Lines Matching +full:reserve +full:- +full:mem +full:- +full:v1
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
12 * Copyright (C) 2004-2006 Ingo Molnar
64 * A selftest will lurk into the ring-buffer to count the
66 * insertions into the ring-buffer such as trace_printk could occurred
72 * If boot-time tracing including tracers/events via kernel cmdline
126 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
262 strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1); in set_ftrace_dump_on_oops()
286 int left = sizeof(boot_snapshot_info) - boot_snapshot_index; in boot_alloc_snapshot()
292 return -1; in boot_alloc_snapshot()
318 int left = sizeof(boot_instance_info) - boot_instance_index; in boot_instance()
322 return -1; in boot_instance()
392 if (export->flags & flag) { in trace_process_export()
395 export->write(export, entry, size); in trace_process_export()
409 if (export->flags & TRACE_EXPORT_FUNCTION) in ftrace_exports_enable()
412 if (export->flags & TRACE_EXPORT_EVENT) in ftrace_exports_enable()
415 if (export->flags & TRACE_EXPORT_MARKER) in ftrace_exports_enable()
421 if (export->flags & TRACE_EXPORT_FUNCTION) in ftrace_exports_disable()
424 if (export->flags & TRACE_EXPORT_EVENT) in ftrace_exports_disable()
427 if (export->flags & TRACE_EXPORT_MARKER) in ftrace_exports_disable()
440 export = rcu_dereference_raw_check(export->next); in ftrace_exports()
447 rcu_assign_pointer(export->next, *list); in add_trace_export()
451 * the export->next pointer is valid before another CPU sees in add_trace_export()
462 for (p = list; *p != NULL; p = &(*p)->next) in rm_trace_export()
467 return -1; in rm_trace_export()
469 rcu_assign_pointer(*p, (*p)->next); in rm_trace_export()
495 if (WARN_ON_ONCE(!export->write)) in register_ftrace_export()
496 return -1; in register_ftrace_export()
533 * The global_trace is the descriptor that holds the top-level tracing
553 return !(tr->flags & TRACE_ARRAY_FL_BOOT); in printk_binsafe()
561 printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK; in update_printk_trace()
563 tr->trace_flags |= TRACE_ITER_TRACE_PRINTK; in update_printk_trace()
572 if (!list_empty(&tr->marker_list)) in update_marker_trace()
575 list_add_rcu(&tr->marker_list, &marker_copies); in update_marker_trace()
576 tr->trace_flags |= TRACE_ITER_COPY_MARKER; in update_marker_trace()
580 if (list_empty(&tr->marker_list)) in update_marker_trace()
583 list_del_init(&tr->marker_list); in update_marker_trace()
584 tr->trace_flags &= ~TRACE_ITER_COPY_MARKER; in update_marker_trace()
592 tr->ring_buffer_expanded = true; in trace_set_ring_buffer_expanded()
604 tr->ref++; in trace_array_get()
609 return -ENODEV; in trace_array_get()
614 WARN_ON(!this_tr->ref); in __trace_array_put()
615 this_tr->ref--; in __trace_array_put()
619 * trace_array_put - Decrement the reference counter for this trace array.
646 return -ENODEV; in tracing_check_open_get_tr()
649 return -ENODEV; in tracing_check_open_get_tr()
655 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
668 * trace_ignore_this_task - should a task be ignored for tracing
691 !trace_find_filtered_pid(filtered_pids, task->pid)) || in trace_ignore_this_task()
693 trace_find_filtered_pid(filtered_no_pids, task->pid)); in trace_ignore_this_task()
697 * trace_filter_add_remove_task - Add or remove a task from a pid_list
717 if (!trace_find_filtered_pid(pid_list, self->pid)) in trace_filter_add_remove_task()
723 trace_pid_list_set(pid_list, task->pid); in trace_filter_add_remove_task()
725 trace_pid_list_clear(pid_list, task->pid); in trace_filter_add_remove_task()
729 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
758 * trace_pid_start - Used for seq_file to start reading pid lists
787 * trace_pid_show - show the current pid in seq_file processing
796 unsigned long pid = (unsigned long)v - 1; in trace_pid_show()
819 return -ENOMEM; in trace_pid_write()
830 return -ENOMEM; in trace_pid_write()
857 cnt -= ret; in trace_pid_write()
862 ret = -EINVAL; in trace_pid_write()
869 ret = -1; in trace_pid_write()
901 if (!buf->buffer) in buffer_ftrace_now()
904 ts = ring_buffer_time_stamp(buf->buffer); in buffer_ftrace_now()
905 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); in buffer_ftrace_now()
916 * tracing_is_enabled - Show if global_trace has been enabled
974 * These primitives don't distinguish read-only and read-consume access.
975 * Multi read-only access are also serialized.
1090 if (tr->array_buffer.buffer) in tracer_tracing_on()
1091 ring_buffer_record_on(tr->array_buffer.buffer); in tracer_tracing_on()
1100 tr->buffer_disabled = 0; in tracer_tracing_on()
1104 * tracing_on - enable tracing buffers
1123 /* Length is in event->array[0] */ in __buffer_unlock_commit()
1124 ring_buffer_write(buffer, event->array[0], &event->array[1]); in __buffer_unlock_commit()
1142 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) in __trace_array_puts()
1154 buffer = tr->array_buffer.buffer; in __trace_array_puts()
1162 entry->ip = ip; in __trace_array_puts()
1164 memcpy(&entry->buf, str, size); in __trace_array_puts()
1167 if (entry->buf[size - 1] != '\n') { in __trace_array_puts()
1168 entry->buf[size] = '\n'; in __trace_array_puts()
1169 entry->buf[size + 1] = '\0'; in __trace_array_puts()
1171 entry->buf[size] = '\0'; in __trace_array_puts()
1180 * __trace_puts - write a constant string into the trace buffer.
1192 * __trace_bputs - write the pointer to a constant string into trace buffer
1208 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) in __trace_bputs()
1215 buffer = tr->array_buffer.buffer; in __trace_bputs()
1224 entry->ip = ip; in __trace_bputs()
1225 entry->str = str; in __trace_bputs()
1238 struct tracer *tracer = tr->current_trace; in tracing_snapshot_instance_cond()
1247 if (!tr->allocated_snapshot) { in tracing_snapshot_instance_cond()
1255 if (tracer->use_max_tr) { in tracing_snapshot_instance_cond()
1261 if (tr->mapped) { in tracing_snapshot_instance_cond()
1278 * tracing_snapshot - take a snapshot of the current buffer.
1300 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1305 * conditional - the snapshot will only happen if the
1319 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1323 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1327 * the tr->max_lock lock, which the code calling
1337 arch_spin_lock(&tr->max_lock); in tracing_cond_snapshot_data()
1339 if (tr->cond_snapshot) in tracing_cond_snapshot_data()
1340 cond_data = tr->cond_snapshot->cond_data; in tracing_cond_snapshot_data()
1342 arch_spin_unlock(&tr->max_lock); in tracing_cond_snapshot_data()
1358 if (!tr->allocated_snapshot) { in tracing_alloc_snapshot_instance()
1361 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); in tracing_alloc_snapshot_instance()
1362 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); in tracing_alloc_snapshot_instance()
1367 ret = resize_buffer_duplicate_size(&tr->max_buffer, in tracing_alloc_snapshot_instance()
1368 &tr->array_buffer, RING_BUFFER_ALL_CPUS); in tracing_alloc_snapshot_instance()
1372 tr->allocated_snapshot = true; in tracing_alloc_snapshot_instance()
1382 * The max_tr ring buffer has some state (e.g. ring->clock) and in free_snapshot()
1385 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0); in free_snapshot()
1386 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); in free_snapshot()
1387 set_buffer_entries(&tr->max_buffer, 1); in free_snapshot()
1388 tracing_reset_online_cpus(&tr->max_buffer); in free_snapshot()
1389 tr->allocated_snapshot = false; in free_snapshot()
1398 spin_lock(&tr->snapshot_trigger_lock); in tracing_arm_snapshot_locked()
1399 if (tr->snapshot == UINT_MAX || tr->mapped) { in tracing_arm_snapshot_locked()
1400 spin_unlock(&tr->snapshot_trigger_lock); in tracing_arm_snapshot_locked()
1401 return -EBUSY; in tracing_arm_snapshot_locked()
1404 tr->snapshot++; in tracing_arm_snapshot_locked()
1405 spin_unlock(&tr->snapshot_trigger_lock); in tracing_arm_snapshot_locked()
1409 spin_lock(&tr->snapshot_trigger_lock); in tracing_arm_snapshot_locked()
1410 tr->snapshot--; in tracing_arm_snapshot_locked()
1411 spin_unlock(&tr->snapshot_trigger_lock); in tracing_arm_snapshot_locked()
1425 spin_lock(&tr->snapshot_trigger_lock); in tracing_disarm_snapshot()
1426 if (!WARN_ON(!tr->snapshot)) in tracing_disarm_snapshot()
1427 tr->snapshot--; in tracing_disarm_snapshot()
1428 spin_unlock(&tr->snapshot_trigger_lock); in tracing_disarm_snapshot()
1432 * tracing_alloc_snapshot - allocate snapshot buffer.
1435 * allocated - it doesn't also take a snapshot.
1454 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1477 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1484 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1497 return -ENOMEM; in tracing_snapshot_cond_enable()
1499 cond_snapshot->cond_data = cond_data; in tracing_snapshot_cond_enable()
1500 cond_snapshot->update = update; in tracing_snapshot_cond_enable()
1504 if (tr->current_trace->use_max_tr) in tracing_snapshot_cond_enable()
1505 return -EBUSY; in tracing_snapshot_cond_enable()
1515 if (tr->cond_snapshot) in tracing_snapshot_cond_enable()
1516 return -EBUSY; in tracing_snapshot_cond_enable()
1523 arch_spin_lock(&tr->max_lock); in tracing_snapshot_cond_enable()
1524 tr->cond_snapshot = no_free_ptr(cond_snapshot); in tracing_snapshot_cond_enable()
1525 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_cond_enable()
1533 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1538 * otherwise return -EINVAL.
1547 arch_spin_lock(&tr->max_lock); in tracing_snapshot_cond_disable()
1549 if (!tr->cond_snapshot) in tracing_snapshot_cond_disable()
1550 ret = -EINVAL; in tracing_snapshot_cond_disable()
1552 kfree(tr->cond_snapshot); in tracing_snapshot_cond_disable()
1553 tr->cond_snapshot = NULL; in tracing_snapshot_cond_disable()
1556 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_cond_disable()
1578 return -ENODEV; in tracing_alloc_snapshot()
1594 return -ENODEV; in tracing_snapshot_cond_enable()
1603 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1608 if (tr->array_buffer.buffer) in tracer_tracing_off()
1609 ring_buffer_record_off(tr->array_buffer.buffer); in tracer_tracing_off()
1618 tr->buffer_disabled = 1; in tracer_tracing_off()
1622 * tracer_tracing_disable() - temporary disable the buffer from write
1625 * Expects trace_tracing_enable() to re-enable tracing.
1633 if (WARN_ON_ONCE(!tr->array_buffer.buffer)) in tracer_tracing_disable()
1636 ring_buffer_record_disable(tr->array_buffer.buffer); in tracer_tracing_disable()
1640 * tracer_tracing_enable() - counter part of tracer_tracing_disable()
1644 * when it's safe to re-enable tracing.
1648 if (WARN_ON_ONCE(!tr->array_buffer.buffer)) in tracer_tracing_enable()
1651 ring_buffer_record_enable(tr->array_buffer.buffer); in tracer_tracing_enable()
1655 * tracing_off - turn off tracing buffers
1678 * tracer_tracing_is_on - show real state of ring buffer enabled
1685 if (tr->array_buffer.buffer) in tracer_tracing_is_on()
1686 return ring_buffer_record_is_set_on(tr->array_buffer.buffer); in tracer_tracing_is_on()
1687 return !tr->buffer_disabled; in tracer_tracing_is_on()
1691 * tracing_is_on - show state of ring buffers enabled
1770 if (trace_clocks[tr->clock_id].in_ns) in trace_clock_in_ns()
1777 * trace_parser_get_init - gets the buffer for trace parser
1783 parser->buffer = kmalloc(size, GFP_KERNEL); in trace_parser_get_init()
1784 if (!parser->buffer) in trace_parser_get_init()
1787 parser->size = size; in trace_parser_get_init()
1792 * trace_parser_put - frees the buffer for trace parser
1796 kfree(parser->buffer); in trace_parser_put()
1797 parser->buffer = NULL; in trace_parser_put()
1801 * trace_get_user - reads the user input string separated by space
1826 cnt--; in trace_get_user()
1832 if (!parser->cont) { in trace_get_user()
1839 cnt--; in trace_get_user()
1842 parser->idx = 0; in trace_get_user()
1851 /* read the non-space input */ in trace_get_user()
1853 if (parser->idx < parser->size - 1) in trace_get_user()
1854 parser->buffer[parser->idx++] = ch; in trace_get_user()
1856 ret = -EINVAL; in trace_get_user()
1864 cnt--; in trace_get_user()
1869 parser->buffer[parser->idx] = 0; in trace_get_user()
1870 parser->cont = false; in trace_get_user()
1871 } else if (parser->idx < parser->size - 1) { in trace_get_user()
1872 parser->cont = true; in trace_get_user()
1873 parser->buffer[parser->idx++] = ch; in trace_get_user()
1875 parser->buffer[parser->idx] = 0; in trace_get_user()
1877 ret = -EINVAL; in trace_get_user()
1893 if (trace_seq_used(s) <= s->readpos) in trace_seq_to_buffer()
1894 return -EBUSY; in trace_seq_to_buffer()
1896 len = trace_seq_used(s) - s->readpos; in trace_seq_to_buffer()
1899 memcpy(buf, s->buffer + s->readpos, cnt); in trace_seq_to_buffer()
1901 s->readpos += cnt; in trace_seq_to_buffer()
1918 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); in latency_fsnotify_workfn()
1925 queue_work(fsnotify_wq, &tr->fsnotify_work); in latency_fsnotify_workfn_irq()
1931 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); in trace_create_maxlat_file()
1932 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); in trace_create_maxlat_file()
1933 tr->d_max_latency = trace_create_file("tracing_max_latency", in trace_create_maxlat_file()
1945 return -ENOMEM; in latency_fsnotify_init()
1957 * We cannot call queue_work(&tr->fsnotify_work) from here because it's in latency_fsnotify()
1961 irq_work_queue(&tr->fsnotify_irqwork); in latency_fsnotify()
1973 * Copy the new maximum trace into the separate maximum-trace
1980 struct array_buffer *trace_buf = &tr->array_buffer; in __update_max_tr()
1981 struct array_buffer *max_buf = &tr->max_buffer; in __update_max_tr()
1982 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); in __update_max_tr()
1983 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); in __update_max_tr()
1985 max_buf->cpu = cpu; in __update_max_tr()
1986 max_buf->time_start = data->preempt_timestamp; in __update_max_tr()
1988 max_data->saved_latency = tr->max_latency; in __update_max_tr()
1989 max_data->critical_start = data->critical_start; in __update_max_tr()
1990 max_data->critical_end = data->critical_end; in __update_max_tr()
1992 strscpy(max_data->comm, tsk->comm); in __update_max_tr()
1993 max_data->pid = tsk->pid; in __update_max_tr()
1999 max_data->uid = current_uid(); in __update_max_tr()
2001 max_data->uid = task_uid(tsk); in __update_max_tr()
2003 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; in __update_max_tr()
2004 max_data->policy = tsk->policy; in __update_max_tr()
2005 max_data->rt_priority = tsk->rt_priority; in __update_max_tr()
2013 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
2026 if (tr->stop_count) in update_max_tr()
2031 if (!tr->allocated_snapshot) { in update_max_tr()
2033 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr()
2037 arch_spin_lock(&tr->max_lock); in update_max_tr()
2040 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) in update_max_tr()
2041 ring_buffer_record_on(tr->max_buffer.buffer); in update_max_tr()
2043 ring_buffer_record_off(tr->max_buffer.buffer); in update_max_tr()
2046 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { in update_max_tr()
2047 arch_spin_unlock(&tr->max_lock); in update_max_tr()
2051 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); in update_max_tr()
2055 arch_spin_unlock(&tr->max_lock); in update_max_tr()
2058 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS); in update_max_tr()
2062 * update_max_tr_single - only copy one trace over, and reset the rest
2074 if (tr->stop_count) in update_max_tr_single()
2078 if (!tr->allocated_snapshot) { in update_max_tr_single()
2080 WARN_ON_ONCE(tr->current_trace != &nop_trace); in update_max_tr_single()
2084 arch_spin_lock(&tr->max_lock); in update_max_tr_single()
2086 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); in update_max_tr_single()
2088 if (ret == -EBUSY) { in update_max_tr_single()
2096 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, in update_max_tr_single()
2100 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); in update_max_tr_single()
2103 arch_spin_unlock(&tr->max_lock); in update_max_tr_single()
2116 struct trace_iterator *iter = pwait->iter; in wait_pipe_cond()
2118 if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index) in wait_pipe_cond()
2121 return iter->closed; in wait_pipe_cond()
2130 if (trace_buffer_iter(iter, iter->cpu_file)) in wait_on_pipe()
2133 pwait.wait_index = atomic_read_acquire(&iter->wait_index); in wait_on_pipe()
2136 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full, in wait_on_pipe()
2144 if (iter->snapshot) in wait_on_pipe()
2145 iter->array_buffer = &iter->tr->max_buffer; in wait_on_pipe()
2166 return -ENOMEM; in save_selftest()
2168 selftest->type = type; in save_selftest()
2169 list_add(&selftest->list, &postponed_selftests); in save_selftest()
2176 struct tracer *saved_tracer = tr->current_trace; in run_tracer_selftest()
2179 if (!type->selftest || tracing_selftest_disabled) in run_tracer_selftest()
2192 type->name); in run_tracer_selftest()
2203 tracing_reset_online_cpus(&tr->array_buffer); in run_tracer_selftest()
2205 tr->current_trace = type; in run_tracer_selftest()
2208 if (type->use_max_tr) { in run_tracer_selftest()
2210 if (tr->ring_buffer_expanded) in run_tracer_selftest()
2211 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, in run_tracer_selftest()
2213 tr->allocated_snapshot = true; in run_tracer_selftest()
2218 pr_info("Testing tracer %s: ", type->name); in run_tracer_selftest()
2219 ret = type->selftest(type, tr); in run_tracer_selftest()
2221 tr->current_trace = saved_tracer; in run_tracer_selftest()
2226 return -1; in run_tracer_selftest()
2229 tracing_reset_online_cpus(&tr->array_buffer); in run_tracer_selftest()
2232 if (type->use_max_tr) { in run_tracer_selftest()
2233 tr->allocated_snapshot = false; in run_tracer_selftest()
2236 if (tr->ring_buffer_expanded) in run_tracer_selftest()
2237 ring_buffer_resize(tr->max_buffer.buffer, 1, in run_tracer_selftest()
2285 ret = run_tracer_selftest(p->type); in init_trace_selftests()
2289 p->type->name); in init_trace_selftests()
2291 for (t = trace_types; t; t = t->next) { in init_trace_selftests()
2292 if (t == p->type) { in init_trace_selftests()
2293 *last = t->next; in init_trace_selftests()
2296 last = &t->next; in init_trace_selftests()
2299 list_del(&p->list); in init_trace_selftests()
2319 * register_tracer - register a tracer with the ftrace system.
2329 if (!type->name) { in register_tracer()
2331 return -1; in register_tracer()
2334 if (strlen(type->name) >= MAX_TRACER_SIZE) { in register_tracer()
2336 return -1; in register_tracer()
2341 type->name); in register_tracer()
2342 return -EPERM; in register_tracer()
2347 for (t = trace_types; t; t = t->next) { in register_tracer()
2348 if (strcmp(type->name, t->name) == 0) { in register_tracer()
2351 type->name); in register_tracer()
2352 ret = -1; in register_tracer()
2357 if (!type->set_flag) in register_tracer()
2358 type->set_flag = &dummy_set_flag; in register_tracer()
2359 if (!type->flags) { in register_tracer()
2361 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); in register_tracer()
2362 if (!type->flags) { in register_tracer()
2363 ret = -ENOMEM; in register_tracer()
2366 type->flags->val = 0; in register_tracer()
2367 type->flags->opts = dummy_tracer_opt; in register_tracer()
2369 if (!type->flags->opts) in register_tracer()
2370 type->flags->opts = dummy_tracer_opt; in register_tracer()
2373 type->flags->trace = type; in register_tracer()
2379 type->next = trace_types; in register_tracer()
2389 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) in register_tracer()
2392 printk(KERN_INFO "Starting tracer '%s'\n", type->name); in register_tracer()
2394 tracing_set_tracer(&global_trace, type->name); in register_tracer()
2407 struct trace_buffer *buffer = buf->buffer; in tracing_reset_cpu()
2423 struct trace_buffer *buffer = buf->buffer; in tracing_reset_online_cpus()
2433 buf->time_start = buffer_ftrace_now(buf, buf->cpu); in tracing_reset_online_cpus()
2442 struct trace_buffer *buffer = buf->buffer; in tracing_reset_all_cpus()
2452 buf->time_start = buffer_ftrace_now(buf, buf->cpu); in tracing_reset_all_cpus()
2467 if (!tr->clear_trace) in tracing_reset_all_online_cpus_unlocked()
2469 tr->clear_trace = false; in tracing_reset_all_online_cpus_unlocked()
2470 tracing_reset_online_cpus(&tr->array_buffer); in tracing_reset_all_online_cpus_unlocked()
2472 tracing_reset_online_cpus(&tr->max_buffer); in tracing_reset_all_online_cpus_unlocked()
2495 guard(raw_spinlock_irqsave)(&tr->start_lock); in tracing_start_tr()
2496 if (--tr->stop_count) { in tracing_start_tr()
2497 if (WARN_ON_ONCE(tr->stop_count < 0)) { in tracing_start_tr()
2499 tr->stop_count = 0; in tracing_start_tr()
2505 arch_spin_lock(&tr->max_lock); in tracing_start_tr()
2507 buffer = tr->array_buffer.buffer; in tracing_start_tr()
2512 buffer = tr->max_buffer.buffer; in tracing_start_tr()
2517 arch_spin_unlock(&tr->max_lock); in tracing_start_tr()
2521 * tracing_start - quick start of the tracer
2536 guard(raw_spinlock_irqsave)(&tr->start_lock); in tracing_stop_tr()
2537 if (tr->stop_count++) in tracing_stop_tr()
2541 arch_spin_lock(&tr->max_lock); in tracing_stop_tr()
2543 buffer = tr->array_buffer.buffer; in tracing_stop_tr()
2548 buffer = tr->max_buffer.buffer; in tracing_stop_tr()
2553 arch_spin_unlock(&tr->max_lock); in tracing_stop_tr()
2557 * tracing_stop - quick stop of the tracer
2582 return current->migration_disabled; in migration_disable_value()
2628 * trace_buffered_event_enable - enable buffering events
2686 * trace_buffered_event_disable - disable buffering events
2702 if (--trace_buffered_event_ref) in trace_buffered_event_disable()
2722 * could wrongly decide to use the pointed-to buffer which is now freed. in trace_buffered_event_disable()
2740 struct trace_array *tr = trace_file->tr; in trace_event_buffer_lock_reserve()
2743 *current_rb = tr->array_buffer.buffer; in trace_event_buffer_lock_reserve()
2745 if (!tr->no_filter_buffering_ref && in trace_event_buffer_lock_reserve()
2746 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) { in trace_event_buffer_lock_reserve()
2753 * (see include/linux/ring-buffer.h for details on in trace_event_buffer_lock_reserve()
2766 int max_len = PAGE_SIZE - struct_size(entry, array, 1); in trace_event_buffer_lock_reserve()
2781 * If that happens, then the reserve is pretty much in trace_event_buffer_lock_reserve()
2784 * change in the future, so let the ring buffer reserve in trace_event_buffer_lock_reserve()
2789 entry->array[0] = len; in trace_event_buffer_lock_reserve()
2807 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { in trace_event_buffer_lock_reserve()
2831 event_call = fbuffer->trace_file->event_call; in output_printk()
2832 if (!event_call || !event_call->event.funcs || in output_printk()
2833 !event_call->event.funcs->trace) in output_printk()
2836 file = fbuffer->trace_file; in output_printk()
2837 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || in output_printk()
2838 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && in output_printk()
2839 !filter_match_preds(file->filter, fbuffer->entry))) in output_printk()
2842 event = &fbuffer->trace_file->event_call->event; in output_printk()
2845 trace_seq_init(&iter->seq); in output_printk()
2846 iter->ent = fbuffer->entry; in output_printk()
2847 event_call->event.funcs->trace(iter, 0, event); in output_printk()
2848 trace_seq_putc(&iter->seq, 0); in output_printk()
2849 printk("%s", iter->seq.buffer); in output_printk()
2887 struct trace_event_file *file = fbuffer->trace_file; in trace_event_buffer_commit()
2889 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, in trace_event_buffer_commit()
2890 fbuffer->entry, &tt)) in trace_event_buffer_commit()
2897 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); in trace_event_buffer_commit()
2899 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, in trace_event_buffer_commit()
2900 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); in trace_event_buffer_commit()
2950 struct trace_buffer *buffer = tr->array_buffer.buffer; in trace_function()
2962 entry->ip = ip; in trace_function()
2963 entry->parent_ip = parent_ip; in trace_function()
2968 entry->args[i] = ftrace_regs_get_argument(fregs, i); in trace_function()
3018 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; in __ftrace_trace_stack()
3034 size = ARRAY_SIZE(fstack->calls); in __ftrace_trace_stack()
3037 nr_entries = stack_trace_save_regs(regs, fstack->calls, in __ftrace_trace_stack()
3040 nr_entries = stack_trace_save(fstack->calls, size, skip); in __ftrace_trace_stack()
3045 if (tr->ops && tr->ops->trampoline) { in __ftrace_trace_stack()
3046 unsigned long tramp_start = tr->ops->trampoline; in __ftrace_trace_stack()
3047 unsigned long tramp_end = tramp_start + tr->ops->trampoline_size; in __ftrace_trace_stack()
3048 unsigned long *calls = fstack->calls; in __ftrace_trace_stack()
3064 entry->size = nr_entries; in __ftrace_trace_stack()
3065 memcpy(&entry->caller, fstack->calls, in __ftrace_trace_stack()
3081 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) in ftrace_trace_stack()
3090 struct trace_buffer *buffer = tr->array_buffer.buffer; in __trace_stack()
3115 * trace_dump_stack - record a stack back trace in the trace buffer
3127 __ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer, in trace_dump_stack()
3142 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) in ftrace_trace_userstack()
3168 entry->tgid = current->tgid; in ftrace_trace_userstack()
3169 memset(&entry->caller, 0, sizeof(entry->caller)); in ftrace_trace_userstack()
3171 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); in ftrace_trace_userstack()
3191 entry->bottom_delta_ts = delta & U32_MAX; in func_repeats_set_delta_ts()
3192 entry->top_delta_ts = (delta >> 32); in func_repeats_set_delta_ts()
3199 struct trace_buffer *buffer = tr->array_buffer.buffer; in trace_last_func_repeats()
3209 delta = ring_buffer_event_time_stamp(buffer, event) - in trace_last_func_repeats()
3210 last_info->ts_last_call; in trace_last_func_repeats()
3213 entry->ip = last_info->ip; in trace_last_func_repeats()
3214 entry->parent_ip = last_info->parent_ip; in trace_last_func_repeats()
3215 entry->count = last_info->count; in trace_last_func_repeats()
3237 if (!trace_percpu_buffer || buffer->nesting >= 4) in get_trace_buf()
3240 buffer->nesting++; in get_trace_buf()
3244 return &buffer->buffer[buffer->nesting - 1][0]; in get_trace_buf()
3251 this_cpu_dec(trace_percpu_buffer->nesting); in put_trace_buf()
3263 return -ENOMEM; in alloc_percpu_trace_buffer()
3332 * trace_vbprintk - write binary msg to tracing buffer
3371 buffer = tr->array_buffer.buffer; in trace_vbprintk()
3378 entry->ip = ip; in trace_vbprintk()
3379 entry->fmt = fmt; in trace_vbprintk()
3381 memcpy(entry->buf, tbuffer, sizeof(u32) * len); in trace_vbprintk()
3430 entry->ip = ip; in __trace_array_vprintk()
3432 memcpy(&entry->buf, tbuffer, len + 1); in __trace_array_vprintk()
3451 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); in trace_array_vprintk()
3455 * trace_array_printk - Print a message to a specific instance
3481 return -ENOENT; in trace_array_printk()
3487 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) in trace_array_printk()
3498 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3508 return -ENOENT; in trace_array_init_printk()
3512 return -EINVAL; in trace_array_init_printk()
3524 if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK)) in trace_array_printk_buf()
3541 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); in trace_iterator_increment()
3543 iter->idx++; in trace_iterator_increment()
3559 (unsigned long)-1 : 0; in peek_next_entry()
3561 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, in peek_next_entry()
3566 iter->ent_size = ring_buffer_event_length(event); in peek_next_entry()
3569 iter->ent_size = 0; in peek_next_entry()
3577 struct trace_buffer *buffer = iter->array_buffer->buffer; in __find_next_entry()
3580 int cpu_file = iter->cpu_file; in __find_next_entry()
3582 int next_cpu = -1; in __find_next_entry()
3615 next_size = iter->ent_size; in __find_next_entry()
3619 iter->ent_size = next_size; in __find_next_entry()
3641 * iter->tr is NULL when used with tp_printk, which makes in trace_iter_expand_format()
3644 if (!iter->tr || iter->fmt == static_fmt_buf) in trace_iter_expand_format()
3647 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE, in trace_iter_expand_format()
3650 iter->fmt_size += STATIC_FMT_BUF_SIZE; in trace_iter_expand_format()
3651 iter->fmt = tmp; in trace_iter_expand_format()
3665 if ((addr >= (unsigned long)iter->ent) && in trace_safe_str()
3666 (addr < (unsigned long)iter->ent + iter->ent_size)) in trace_safe_str()
3670 if ((addr >= (unsigned long)iter->tmp_seq.buffer) && in trace_safe_str()
3671 (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE)) in trace_safe_str()
3685 if (!iter->ent) in trace_safe_str()
3688 trace_event = ftrace_find_event(iter->ent->type); in trace_safe_str()
3693 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) in trace_safe_str()
3697 if (within_module_core(addr, event->module)) in trace_safe_str()
3704 * ignore_event - Check dereferenced fields while writing to the seq buffer
3718 * If it is found that a field is unsafe, it will write into the @iter->seq
3733 trace_event = ftrace_find_event(iter->ent->type); in ignore_event()
3735 seq = &iter->seq; in ignore_event()
3738 trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type); in ignore_event()
3743 if (!(event->flags & TRACE_EVENT_FL_TEST_STR)) in ignore_event()
3753 /* Offsets are from the iter->ent that points to the raw event */ in ignore_event()
3754 ptr = iter->ent; in ignore_event()
3760 if (!field->needs_test) in ignore_event()
3763 str = *(const char **)(ptr + field->offset); in ignore_event()
3773 * instead. See samples/trace_events/trace-events-sample.h in ignore_event()
3777 trace_event_name(event), field->name)) { in ignore_event()
3779 trace_event_name(event), field->name); in ignore_event()
3794 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR) in trace_event_format()
3798 new_fmt = q = iter->fmt; in trace_event_format()
3800 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) { in trace_event_format()
3804 q += iter->fmt - new_fmt; in trace_event_format()
3805 new_fmt = iter->fmt; in trace_event_format()
3811 if (p[-1] == '%') { in trace_event_format()
3833 int ent_size = iter->ent_size; in trace_find_next_entry()
3837 * If called from ftrace_dump(), then the iter->temp buffer in trace_find_next_entry()
3844 if (iter->temp == static_temp_buf && in trace_find_next_entry()
3850 * call ring_buffer_peek() that may make the contents of iter->ent in trace_find_next_entry()
3851 * undefined. Need to copy iter->ent now. in trace_find_next_entry()
3853 if (iter->ent && iter->ent != iter->temp) { in trace_find_next_entry()
3854 if ((!iter->temp || iter->temp_size < iter->ent_size) && in trace_find_next_entry()
3855 !WARN_ON_ONCE(iter->temp == static_temp_buf)) { in trace_find_next_entry()
3857 temp = kmalloc(iter->ent_size, GFP_KERNEL); in trace_find_next_entry()
3860 kfree(iter->temp); in trace_find_next_entry()
3861 iter->temp = temp; in trace_find_next_entry()
3862 iter->temp_size = iter->ent_size; in trace_find_next_entry()
3864 memcpy(iter->temp, iter->ent, iter->ent_size); in trace_find_next_entry()
3865 iter->ent = iter->temp; in trace_find_next_entry()
3869 iter->ent_size = ent_size; in trace_find_next_entry()
3877 iter->ent = __find_next_entry(iter, &iter->cpu, in trace_find_next_entry_inc()
3878 &iter->lost_events, &iter->ts); in trace_find_next_entry_inc()
3880 if (iter->ent) in trace_find_next_entry_inc()
3883 return iter->ent ? iter : NULL; in trace_find_next_entry_inc()
3888 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, in trace_consume()
3889 &iter->lost_events); in trace_consume()
3894 struct trace_iterator *iter = m->private; in s_next()
3898 WARN_ON_ONCE(iter->leftover); in s_next()
3903 if (iter->idx > i) in s_next()
3906 if (iter->idx < 0) in s_next()
3911 while (ent && iter->idx < i) in s_next()
3914 iter->pos = *pos; in s_next()
3925 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; in tracing_iter_reset()
3939 if (ts >= iter->array_buffer->time_start) in tracing_iter_reset()
3947 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; in tracing_iter_reset()
3956 struct trace_iterator *iter = m->private; in s_start()
3957 struct trace_array *tr = iter->tr; in s_start()
3958 int cpu_file = iter->cpu_file; in s_start()
3964 if (unlikely(tr->current_trace != iter->trace)) { in s_start()
3965 /* Close iter->trace before switching to the new current tracer */ in s_start()
3966 if (iter->trace->close) in s_start()
3967 iter->trace->close(iter); in s_start()
3968 iter->trace = tr->current_trace; in s_start()
3970 if (iter->trace->open) in s_start()
3971 iter->trace->open(iter); in s_start()
3976 if (iter->snapshot && iter->trace->use_max_tr) in s_start()
3977 return ERR_PTR(-EBUSY); in s_start()
3980 if (*pos != iter->pos) { in s_start()
3981 iter->ent = NULL; in s_start()
3982 iter->cpu = 0; in s_start()
3983 iter->idx = -1; in s_start()
3991 iter->leftover = 0; in s_start()
4000 if (iter->leftover) in s_start()
4003 l = *pos - 1; in s_start()
4015 struct trace_iterator *iter = m->private; in s_stop()
4018 if (iter->snapshot && iter->trace->use_max_tr) in s_stop()
4022 trace_access_unlock(iter->cpu_file); in s_stop()
4032 count = ring_buffer_entries_cpu(buf->buffer, cpu); in get_total_entries_cpu()
4038 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { in get_total_entries_cpu()
4039 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; in get_total_entries_cpu()
4044 ring_buffer_overrun_cpu(buf->buffer, cpu); in get_total_entries_cpu()
4072 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); in trace_total_entries_cpu()
4084 get_total_entries(&tr->array_buffer, &total, &entries); in trace_total_entries()
4091 seq_puts(m, "# _------=> CPU# \n" in print_lat_help_header()
4092 "# / _-----=> irqs-off/BH-disabled\n" in print_lat_help_header()
4093 "# | / _----=> need-resched \n" in print_lat_help_header()
4094 "# || / _---=> hardirq/softirq \n" in print_lat_help_header()
4095 "# ||| / _--=> preempt-depth \n" in print_lat_help_header()
4096 "# |||| / _-=> migrate-disable \n" in print_lat_help_header()
4108 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", in print_event_info()
4120 …seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "… in print_func_help_header()
4133 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); in print_func_help_header_irq()
4134 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); in print_func_help_header_irq()
4135 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); in print_func_help_header_irq()
4136 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); in print_func_help_header_irq()
4137 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); in print_func_help_header_irq()
4139 …seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID … in print_func_help_header_irq()
4147 struct array_buffer *buf = iter->array_buffer; in print_trace_header()
4148 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); in print_trace_header()
4149 struct tracer *type = iter->trace; in print_trace_header()
4152 const char *name = type->name; in print_trace_header()
4156 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", in print_trace_header()
4157 name, init_utsname()->release); in print_trace_header()
4158 seq_puts(m, "# -----------------------------------" in print_trace_header()
4159 "---------------------------------\n"); in print_trace_header()
4162 nsecs_to_usecs(data->saved_latency), in print_trace_header()
4165 buf->cpu, in print_trace_header()
4174 seq_puts(m, "# -----------------\n"); in print_trace_header()
4175 seq_printf(m, "# | task: %.16s-%d " in print_trace_header()
4177 data->comm, data->pid, in print_trace_header()
4178 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, in print_trace_header()
4179 data->policy, data->rt_priority); in print_trace_header()
4180 seq_puts(m, "# -----------------\n"); in print_trace_header()
4182 if (data->critical_start) { in print_trace_header()
4184 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); in print_trace_header()
4185 trace_print_seq(m, &iter->seq); in print_trace_header()
4187 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); in print_trace_header()
4188 trace_print_seq(m, &iter->seq); in print_trace_header()
4197 struct trace_seq *s = &iter->seq; in test_cpu_buff_start()
4198 struct trace_array *tr = iter->tr; in test_cpu_buff_start()
4200 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) in test_cpu_buff_start()
4203 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) in test_cpu_buff_start()
4206 if (cpumask_available(iter->started) && in test_cpu_buff_start()
4207 cpumask_test_cpu(iter->cpu, iter->started)) in test_cpu_buff_start()
4210 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) in test_cpu_buff_start()
4213 if (cpumask_available(iter->started)) in test_cpu_buff_start()
4214 cpumask_set_cpu(iter->cpu, iter->started); in test_cpu_buff_start()
4217 if (iter->idx > 1) in test_cpu_buff_start()
4219 iter->cpu); in test_cpu_buff_start()
4224 struct trace_array *tr = iter->tr; in print_trace_fmt()
4225 struct trace_seq *s = &iter->seq; in print_trace_fmt()
4226 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); in print_trace_fmt()
4230 entry = iter->ent; in print_trace_fmt()
4234 event = ftrace_find_event(entry->type); in print_trace_fmt()
4236 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_trace_fmt()
4237 if (iter->iter_flags & TRACE_FILE_LAT_FMT) in print_trace_fmt()
4247 if (tr->trace_flags & TRACE_ITER_FIELDS) in print_trace_fmt()
4254 if ((tr->text_delta) && in print_trace_fmt()
4255 event->type > __TRACE_LAST_TYPE) in print_trace_fmt()
4258 return event->funcs->trace(iter, sym_flags, event); in print_trace_fmt()
4261 trace_seq_printf(s, "Unknown type %d\n", entry->type); in print_trace_fmt()
4268 struct trace_array *tr = iter->tr; in print_raw_fmt()
4269 struct trace_seq *s = &iter->seq; in print_raw_fmt()
4273 entry = iter->ent; in print_raw_fmt()
4275 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) in print_raw_fmt()
4277 entry->pid, iter->cpu, iter->ts); in print_raw_fmt()
4282 event = ftrace_find_event(entry->type); in print_raw_fmt()
4284 return event->funcs->raw(iter, 0, event); in print_raw_fmt()
4286 trace_seq_printf(s, "%d ?\n", entry->type); in print_raw_fmt()
4293 struct trace_array *tr = iter->tr; in print_hex_fmt()
4294 struct trace_seq *s = &iter->seq; in print_hex_fmt()
4299 entry = iter->ent; in print_hex_fmt()
4301 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_hex_fmt()
4302 SEQ_PUT_HEX_FIELD(s, entry->pid); in print_hex_fmt()
4303 SEQ_PUT_HEX_FIELD(s, iter->cpu); in print_hex_fmt()
4304 SEQ_PUT_HEX_FIELD(s, iter->ts); in print_hex_fmt()
4309 event = ftrace_find_event(entry->type); in print_hex_fmt()
4311 enum print_line_t ret = event->funcs->hex(iter, 0, event); in print_hex_fmt()
4323 struct trace_array *tr = iter->tr; in print_bin_fmt()
4324 struct trace_seq *s = &iter->seq; in print_bin_fmt()
4328 entry = iter->ent; in print_bin_fmt()
4330 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { in print_bin_fmt()
4331 SEQ_PUT_FIELD(s, entry->pid); in print_bin_fmt()
4332 SEQ_PUT_FIELD(s, iter->cpu); in print_bin_fmt()
4333 SEQ_PUT_FIELD(s, iter->ts); in print_bin_fmt()
4338 event = ftrace_find_event(entry->type); in print_bin_fmt()
4339 return event ? event->funcs->binary(iter, 0, event) : in print_bin_fmt()
4349 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { in trace_empty()
4350 cpu = iter->cpu_file; in trace_empty()
4356 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) in trace_empty()
4368 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) in trace_empty()
4379 struct trace_array *tr = iter->tr; in print_trace_line()
4380 unsigned long trace_flags = tr->trace_flags; in print_trace_line()
4383 if (iter->lost_events) { in print_trace_line()
4384 if (iter->lost_events == (unsigned long)-1) in print_trace_line()
4385 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", in print_trace_line()
4386 iter->cpu); in print_trace_line()
4388 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", in print_trace_line()
4389 iter->cpu, iter->lost_events); in print_trace_line()
4390 if (trace_seq_has_overflowed(&iter->seq)) in print_trace_line()
4394 if (iter->trace && iter->trace->print_line) { in print_trace_line()
4395 ret = iter->trace->print_line(iter); in print_trace_line()
4400 if (iter->ent->type == TRACE_BPUTS && in print_trace_line()
4405 if (iter->ent->type == TRACE_BPRINT && in print_trace_line()
4410 if (iter->ent->type == TRACE_PRINT && in print_trace_line()
4429 struct trace_iterator *iter = m->private; in trace_latency_header()
4430 struct trace_array *tr = iter->tr; in trace_latency_header()
4436 if (iter->iter_flags & TRACE_FILE_LAT_FMT) in trace_latency_header()
4439 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) in trace_latency_header()
4445 struct trace_iterator *iter = m->private; in trace_default_header()
4446 struct trace_array *tr = iter->tr; in trace_default_header()
4447 unsigned long trace_flags = tr->trace_flags; in trace_default_header()
4452 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { in trace_default_header()
4462 print_func_help_header_irq(iter->array_buffer, in trace_default_header()
4465 print_func_help_header(iter->array_buffer, m, in trace_default_header()
4507 if (iter->tr->allocated_snapshot) in print_snapshot_help()
4513 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) in print_snapshot_help()
4528 if (iter->ent == NULL) { in s_show()
4529 if (iter->tr) { in s_show()
4530 seq_printf(m, "# tracer: %s\n", iter->trace->name); in s_show()
4534 if (iter->snapshot && trace_empty(iter)) in s_show()
4536 else if (iter->trace && iter->trace->print_header) in s_show()
4537 iter->trace->print_header(m); in s_show()
4541 } else if (iter->leftover) { in s_show()
4546 ret = trace_print_seq(m, &iter->seq); in s_show()
4549 iter->leftover = ret; in s_show()
4554 iter->seq.full = 0; in s_show()
4555 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); in s_show()
4557 ret = trace_print_seq(m, &iter->seq); in s_show()
4563 * -1 otherwise. in s_show()
4565 iter->leftover = ret; in s_show()
4577 if (inode->i_cdev) /* See trace_create_cpu_file() */ in tracing_get_cpu()
4578 return (long)inode->i_cdev - 1; in tracing_get_cpu()
4599 if (iter->fmt != static_fmt_buf) in free_trace_iter_content()
4600 kfree(iter->fmt); in free_trace_iter_content()
4602 kfree(iter->temp); in free_trace_iter_content()
4603 kfree(iter->buffer_iter); in free_trace_iter_content()
4604 mutex_destroy(&iter->mutex); in free_trace_iter_content()
4605 free_cpumask_var(iter->started); in free_trace_iter_content()
4611 struct trace_array *tr = inode->i_private; in __tracing_open()
4616 return ERR_PTR(-ENODEV); in __tracing_open()
4620 return ERR_PTR(-ENOMEM); in __tracing_open()
4622 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), in __tracing_open()
4624 if (!iter->buffer_iter) in __tracing_open()
4628 * trace_find_next_entry() may need to save off iter->ent. in __tracing_open()
4629 * It will place it into the iter->temp buffer. As most in __tracing_open()
4632 * allocate a new buffer to adjust for the bigger iter->ent. in __tracing_open()
4635 iter->temp = kmalloc(128, GFP_KERNEL); in __tracing_open()
4636 if (iter->temp) in __tracing_open()
4637 iter->temp_size = 128; in __tracing_open()
4646 iter->fmt = NULL; in __tracing_open()
4647 iter->fmt_size = 0; in __tracing_open()
4650 iter->trace = tr->current_trace; in __tracing_open()
4652 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) in __tracing_open()
4655 iter->tr = tr; in __tracing_open()
4659 if (tr->current_trace->print_max || snapshot) in __tracing_open()
4660 iter->array_buffer = &tr->max_buffer; in __tracing_open()
4663 iter->array_buffer = &tr->array_buffer; in __tracing_open()
4664 iter->snapshot = snapshot; in __tracing_open()
4665 iter->pos = -1; in __tracing_open()
4666 iter->cpu_file = tracing_get_cpu(inode); in __tracing_open()
4667 mutex_init(&iter->mutex); in __tracing_open()
4670 if (iter->trace->open) in __tracing_open()
4671 iter->trace->open(iter); in __tracing_open()
4674 if (ring_buffer_overruns(iter->array_buffer->buffer)) in __tracing_open()
4675 iter->iter_flags |= TRACE_FILE_ANNOTATE; in __tracing_open()
4678 if (trace_clocks[tr->clock_id].in_ns) in __tracing_open()
4679 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; in __tracing_open()
4682 * If pause-on-trace is enabled, then stop the trace while in __tracing_open()
4685 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) in __tracing_open()
4688 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { in __tracing_open()
4690 iter->buffer_iter[cpu] = in __tracing_open()
4691 ring_buffer_read_start(iter->array_buffer->buffer, in __tracing_open()
4696 cpu = iter->cpu_file; in __tracing_open()
4697 iter->buffer_iter[cpu] = in __tracing_open()
4698 ring_buffer_read_start(iter->array_buffer->buffer, in __tracing_open()
4712 return ERR_PTR(-ENOMEM); in __tracing_open()
4723 filp->private_data = inode->i_private; in tracing_open_generic()
4738 struct trace_array *tr = inode->i_private; in tracing_open_generic_tr()
4745 filp->private_data = inode->i_private; in tracing_open_generic_tr()
4756 struct trace_event_file *file = inode->i_private; in tracing_open_file_tr()
4759 ret = tracing_check_open_get_tr(file->tr); in tracing_open_file_tr()
4766 if (file->flags & EVENT_FILE_FL_FREED) { in tracing_open_file_tr()
4767 trace_array_put(file->tr); in tracing_open_file_tr()
4768 return -ENODEV; in tracing_open_file_tr()
4773 filp->private_data = inode->i_private; in tracing_open_file_tr()
4780 struct trace_event_file *file = inode->i_private; in tracing_release_file_tr()
4782 trace_array_put(file->tr); in tracing_release_file_tr()
4796 struct trace_array *tr = inode->i_private; in tracing_release()
4797 struct seq_file *m = file->private_data; in tracing_release()
4801 if (!(file->f_mode & FMODE_READ)) { in tracing_release()
4807 iter = m->private; in tracing_release()
4811 if (iter->buffer_iter[cpu]) in tracing_release()
4812 ring_buffer_read_finish(iter->buffer_iter[cpu]); in tracing_release()
4815 if (iter->trace && iter->trace->close) in tracing_release()
4816 iter->trace->close(iter); in tracing_release()
4818 if (!iter->snapshot && tr->stop_count) in tracing_release()
4834 struct trace_array *tr = inode->i_private; in tracing_release_generic_tr()
4842 struct trace_array *tr = inode->i_private; in tracing_single_release_tr()
4851 struct trace_array *tr = inode->i_private; in tracing_open()
4860 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { in tracing_open()
4862 struct array_buffer *trace_buf = &tr->array_buffer; in tracing_open()
4865 if (tr->current_trace->print_max) in tracing_open()
4866 trace_buf = &tr->max_buffer; in tracing_open()
4875 if (file->f_mode & FMODE_READ) { in tracing_open()
4879 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open()
4880 iter->iter_flags |= TRACE_FILE_LAT_FMT; in tracing_open()
4899 if (tr->range_addr_start && t->use_max_tr) in trace_ok_for_array()
4902 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; in trace_ok_for_array()
4910 t = t->next; in get_tracer_for_array()
4918 struct trace_array *tr = m->private; in t_next()
4924 t = get_tracer_for_array(tr, t->next); in t_next()
4931 struct trace_array *tr = m->private; in t_start()
4956 seq_puts(m, t->name); in t_show()
4957 if (t->next) in t_show()
4974 struct trace_array *tr = inode->i_private; in show_traces_open()
4988 m = file->private_data; in show_traces_open()
4989 m->private = tr; in show_traces_open()
4996 struct trace_array *tr = inode->i_private; in tracing_seq_release()
5013 if (file->f_mode & FMODE_READ) in tracing_lseek()
5016 file->f_pos = ret = 0; in tracing_lseek()
5042 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_read()
5047 cpumask_pr_args(tr->tracing_cpumask)) + 1; in tracing_cpumask_read()
5050 return -ENOMEM; in tracing_cpumask_read()
5053 cpumask_pr_args(tr->tracing_cpumask)); in tracing_cpumask_read()
5055 return -EINVAL; in tracing_cpumask_read()
5066 return -EINVAL; in tracing_set_cpumask()
5069 arch_spin_lock(&tr->max_lock); in tracing_set_cpumask()
5075 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_set_cpumask()
5077 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); in tracing_set_cpumask()
5079 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); in tracing_set_cpumask()
5082 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && in tracing_set_cpumask()
5084 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); in tracing_set_cpumask()
5086 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); in tracing_set_cpumask()
5090 arch_spin_unlock(&tr->max_lock); in tracing_set_cpumask()
5093 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); in tracing_set_cpumask()
5102 struct trace_array *tr = file_inode(filp)->i_private; in tracing_cpumask_write()
5107 return -EINVAL; in tracing_cpumask_write()
5110 return -ENOMEM; in tracing_cpumask_write()
5141 struct trace_array *tr = m->private; in tracing_trace_options_show()
5147 tracer_flags = tr->current_trace->flags->val; in tracing_trace_options_show()
5148 trace_opts = tr->current_trace->flags->opts; in tracing_trace_options_show()
5151 if (tr->trace_flags & (1 << i)) in tracing_trace_options_show()
5171 struct tracer *trace = tracer_flags->trace; in __set_tracer_option()
5174 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); in __set_tracer_option()
5179 tracer_flags->val &= ~opts->bit; in __set_tracer_option()
5181 tracer_flags->val |= opts->bit; in __set_tracer_option()
5188 struct tracer *trace = tr->current_trace; in set_tracer_option()
5189 struct tracer_flags *tracer_flags = trace->flags; in set_tracer_option()
5193 for (i = 0; tracer_flags->opts[i].name; i++) { in set_tracer_option()
5194 opts = &tracer_flags->opts[i]; in set_tracer_option()
5196 if (strcmp(cmp, opts->name) == 0) in set_tracer_option()
5197 return __set_tracer_option(tr, trace->flags, opts, neg); in set_tracer_option()
5200 return -EINVAL; in set_tracer_option()
5206 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) in trace_keep_overwrite()
5207 return -1; in trace_keep_overwrite()
5221 if (!!(tr->trace_flags & mask) == !!enabled) in set_tracer_flag()
5225 if (tr->current_trace->flag_changed) in set_tracer_flag()
5226 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) in set_tracer_flag()
5227 return -EINVAL; in set_tracer_flag()
5238 return -EINVAL; in set_tracer_flag()
5252 tr->trace_flags |= mask; in set_tracer_flag()
5254 tr->trace_flags &= ~mask; in set_tracer_flag()
5262 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; in set_tracer_flag()
5263 return -ENOMEM; in set_tracer_flag()
5276 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); in set_tracer_flag()
5278 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); in set_tracer_flag()
5309 ret = match_string(trace_options, -1, cmp); in trace_set_options()
5345 *(buf - 1) = ','; in apply_trace_boot_options()
5353 struct seq_file *m = filp->private_data; in tracing_trace_options_write()
5354 struct trace_array *tr = m->private; in tracing_trace_options_write()
5359 return -EINVAL; in tracing_trace_options_write()
5362 return -EFAULT; in tracing_trace_options_write()
5377 struct trace_array *tr = inode->i_private; in tracing_trace_options_open()
5384 ret = single_open(file, tracing_trace_options_show, inode->i_private); in tracing_trace_options_open()
5400 "tracing mini-HOWTO:\n\n"
5404 "\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n"
5406 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5408 " trace\t\t\t- The static contents of the buffer\n"
5410 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5411 " current_tracer\t- function and latency tracers\n"
5412 " available_tracers\t- list of configured tracers for current_tracer\n"
5413 " error_log\t- error log for failed commands (that support it)\n"
5414 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5415 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5416 " trace_clock\t\t- change the clock used to order events\n"
5423 " x86-tsc: TSC cycle counter\n"
5425 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5426 " delta: Delta difference against a buffer-wide timestamp\n"
5428 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5429 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5430 " tracing_cpumask\t- Limit which CPUs to trace\n"
5431 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5432 "\t\t\t Remove sub-buffer with rmdir\n"
5433 " trace_options\t\t- Set format or modify how tracing happens\n"
5436 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5438 "\n available_filter_functions - list of functions that can be filtered on\n"
5439 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5441 "\t accepts: func_full_name or glob-matching-pattern\n"
5443 "\t Format: :mod:<module-name>\n"
5470 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5476 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5478 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5482 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5483 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5484 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5487 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5492 " stack_trace\t\t- Shows the max stack trace when active\n"
5493 " stack_max_size\t- Shows current max stack size that was traced\n"
5497 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5502 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5506 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5510 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5515 "\t accepts: event-definitions (one definition per line)\n"
5521 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5527 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5528 "\t -:[<group>/][<event>]\n"
5537 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5541 "\t <argname>[->field[->field|.field...]],\n"
5546 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5549 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5550 "\t symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
5557 "\t of the <attached-group>/<attached-event>.\n"
5559 " set_event\t\t- Enables events by name written into it\n"
5561 " events/\t\t- Directory containing all trace event subsystems:\n"
5562 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5563 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5564 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5566 " filter\t\t- If set, only events passing filter are traced\n"
5567 " events/<system>/<event>/\t- Directory containing control files for\n"
5569 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5570 " filter\t\t- If set, only events passing filter are traced\n"
5571 " trigger\t\t- If set, a command to perform when event is hit\n"
5605 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5617 "\t common_timestamp - to record current timestamp\n"
5618 "\t common_cpu - to record the CPU the event happened on\n"
5621 "\t - a reference to a field e.g. x=current_timestamp,\n"
5622 "\t - a reference to another variable e.g. y=$x,\n"
5623 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5624 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5626 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5655 "\t .sym-offset display an address as a symbol and offset\n"
5662 "\t .graph display a bar-graph of a value\n\n"
5674 "\t already-attached hist trigger. The syntax is analogous to\n"
5680 "\t onmatch(matching.event) - invoke on addition or update\n"
5681 "\t onmax(var) - invoke if var exceeds current max\n"
5682 "\t onchange(var) - invoke action if var changes\n\n"
5684 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5685 "\t save(field,...) - save current event fields\n"
5687 "\t snapshot() - snapshot the trace buffer\n\n"
5690 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5715 if (!ptr->map.eval_string) { in update_eval_map()
5716 if (ptr->tail.next) { in update_eval_map()
5717 ptr = ptr->tail.next; in update_eval_map()
5773 ptr->map.eval_string, ptr->map.eval_value, in eval_map_show()
5774 ptr->map.system); in eval_map_show()
5808 return ptr + ptr->head.length + 1; in trace_eval_jmp_to_tail()
5841 if (!ptr->tail.next) in trace_insert_eval_map_file()
5843 ptr = ptr->tail.next; in trace_insert_eval_map_file()
5846 ptr->tail.next = map_array; in trace_insert_eval_map_file()
5848 map_array->head.mod = mod; in trace_insert_eval_map_file()
5849 map_array->head.length = len; in trace_insert_eval_map_file()
5853 map_array->map = **map; in trace_insert_eval_map_file()
5900 struct trace_array *tr = filp->private_data; in tracing_set_trace_read()
5905 r = sprintf(buf, "%s\n", tr->current_trace->name); in tracing_set_trace_read()
5913 tracing_reset_online_cpus(&tr->array_buffer); in tracer_init()
5914 return t->init(tr); in tracer_init()
5922 per_cpu_ptr(buf->data, cpu)->entries = val; in set_buffer_entries()
5928 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); in update_buffer_entries()
5930 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu); in update_buffer_entries()
5943 ret = ring_buffer_resize(trace_buf->buffer, in resize_buffer_duplicate_size()
5944 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); in resize_buffer_duplicate_size()
5947 per_cpu_ptr(trace_buf->data, cpu)->entries = in resize_buffer_duplicate_size()
5948 per_cpu_ptr(size_buf->data, cpu)->entries; in resize_buffer_duplicate_size()
5951 ret = ring_buffer_resize(trace_buf->buffer, in resize_buffer_duplicate_size()
5952 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); in resize_buffer_duplicate_size()
5954 per_cpu_ptr(trace_buf->data, cpu_id)->entries = in resize_buffer_duplicate_size()
5955 per_cpu_ptr(size_buf->data, cpu_id)->entries; in resize_buffer_duplicate_size()
5975 if (!tr->array_buffer.buffer) in __tracing_resize_ring_buffer()
5981 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
5986 if (!tr->allocated_snapshot) in __tracing_resize_ring_buffer()
5989 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); in __tracing_resize_ring_buffer()
5991 int r = resize_buffer_duplicate_size(&tr->array_buffer, in __tracing_resize_ring_buffer()
5992 &tr->array_buffer, cpu); in __tracing_resize_ring_buffer()
6014 update_buffer_entries(&tr->max_buffer, cpu); in __tracing_resize_ring_buffer()
6019 update_buffer_entries(&tr->array_buffer, cpu); in __tracing_resize_ring_buffer()
6033 return -EINVAL; in tracing_resize_ring_buffer()
6061 return addr - ent->mod_addr; in cmp_mod_entry()
6065 * trace_adjust_address() - Adjust prev boot address to current address.
6078 if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) in trace_adjust_address()
6081 /* tr->module_delta must be protected by rcu. */ in trace_adjust_address()
6083 tscratch = tr->scratch; in trace_adjust_address()
6085 module_delta = READ_ONCE(tr->module_delta); in trace_adjust_address()
6086 if (!module_delta || !tscratch->nr_entries || in trace_adjust_address()
6087 tscratch->entries[0].mod_addr > addr) { in trace_adjust_address()
6088 raddr = addr + tr->text_delta; in trace_adjust_address()
6094 nr_entries = tscratch->nr_entries; in trace_adjust_address()
6096 tscratch->entries[nr_entries - 1].mod_addr < addr) in trace_adjust_address()
6097 idx = nr_entries - 1; in trace_adjust_address()
6100 tscratch->entries, in trace_adjust_address()
6101 nr_entries - 1, in trace_adjust_address()
6102 sizeof(tscratch->entries[0]), in trace_adjust_address()
6105 idx = entry - tscratch->entries; in trace_adjust_address()
6108 return addr + module_delta->delta[idx]; in trace_adjust_address()
6119 tscratch = tr->scratch; in save_mod()
6121 return -1; in save_mod()
6122 size = tr->scratch_size; in save_mod()
6124 if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size) in save_mod()
6125 return -1; in save_mod()
6127 entry = &tscratch->entries[tscratch->nr_entries]; in save_mod()
6129 tscratch->nr_entries++; in save_mod()
6131 entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base; in save_mod()
6132 strscpy(entry->mod_name, mod->name); in save_mod()
6148 if (!(tr->flags & TRACE_ARRAY_FL_BOOT)) in update_last_data()
6151 if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) in update_last_data()
6155 tr->flags &= ~TRACE_ARRAY_FL_LAST_BOOT; in update_last_data()
6158 if (tr->scratch) { in update_last_data()
6159 struct trace_scratch *tscratch = tr->scratch; in update_last_data()
6161 tscratch->clock_id = tr->clock_id; in update_last_data()
6162 memset(tscratch->entries, 0, in update_last_data()
6163 flex_array_size(tscratch, entries, tscratch->nr_entries)); in update_last_data()
6164 tscratch->nr_entries = 0; in update_last_data()
6176 tracing_reset_all_cpus(&tr->array_buffer); in update_last_data()
6179 tr->text_delta = 0; in update_last_data()
6181 if (!tr->scratch) in update_last_data()
6184 tscratch = tr->scratch; in update_last_data()
6185 module_delta = READ_ONCE(tr->module_delta); in update_last_data()
6186 WRITE_ONCE(tr->module_delta, NULL); in update_last_data()
6190 tscratch->text_addr = (unsigned long)_text; in update_last_data()
6194 * tracing_update_buffers - used by tracing facility to expand ring buffers
6212 if (!tr->ring_buffer_expanded) in tracing_update_buffers()
6229 if (tr->current_trace == &nop_trace) in tracing_set_nop()
6232 tr->current_trace->enabled--; in tracing_set_nop()
6234 if (tr->current_trace->reset) in tracing_set_nop()
6235 tr->current_trace->reset(tr); in tracing_set_nop()
6237 tr->current_trace = &nop_trace; in tracing_set_nop()
6245 if (!tr->dir && !(tr->flags & TRACE_ARRAY_FL_GLOBAL)) in add_tracer_options()
6267 if (!tr->ring_buffer_expanded) { in tracing_set_tracer()
6275 for (t = trace_types; t; t = t->next) { in tracing_set_tracer()
6276 if (strcmp(t->name, buf) == 0) in tracing_set_tracer()
6280 return -EINVAL; in tracing_set_tracer()
6282 if (t == tr->current_trace) in tracing_set_tracer()
6286 if (t->use_max_tr) { in tracing_set_tracer()
6288 arch_spin_lock(&tr->max_lock); in tracing_set_tracer()
6289 ret = tr->cond_snapshot ? -EBUSY : 0; in tracing_set_tracer()
6290 arch_spin_unlock(&tr->max_lock); in tracing_set_tracer()
6297 if (system_state < SYSTEM_RUNNING && t->noboot) { in tracing_set_tracer()
6299 t->name); in tracing_set_tracer()
6300 return -EINVAL; in tracing_set_tracer()
6305 return -EINVAL; in tracing_set_tracer()
6308 if (tr->trace_ref) in tracing_set_tracer()
6309 return -EBUSY; in tracing_set_tracer()
6313 tr->current_trace->enabled--; in tracing_set_tracer()
6315 if (tr->current_trace->reset) in tracing_set_tracer()
6316 tr->current_trace->reset(tr); in tracing_set_tracer()
6319 had_max_tr = tr->current_trace->use_max_tr; in tracing_set_tracer()
6322 tr->current_trace = &nop_trace; in tracing_set_tracer()
6324 if (had_max_tr && !t->use_max_tr) { in tracing_set_tracer()
6337 if (!had_max_tr && t->use_max_tr) { in tracing_set_tracer()
6343 tr->current_trace = &nop_trace; in tracing_set_tracer()
6346 if (t->init) { in tracing_set_tracer()
6350 if (t->use_max_tr) in tracing_set_tracer()
6357 tr->current_trace = t; in tracing_set_tracer()
6358 tr->current_trace->enabled++; in tracing_set_tracer()
6368 struct trace_array *tr = filp->private_data; in tracing_set_trace_write()
6380 return -EFAULT; in tracing_set_trace_write()
6403 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); in tracing_nsecs_read()
6436 struct trace_array *tr = filp->private_data; in tracing_thresh_write()
6444 if (tr->current_trace->update_thresh) { in tracing_thresh_write()
6445 ret = tr->current_trace->update_thresh(tr); in tracing_thresh_write()
6459 struct trace_array *tr = filp->private_data; in tracing_max_lat_read()
6461 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); in tracing_max_lat_read()
6468 struct trace_array *tr = filp->private_data; in tracing_max_lat_write()
6470 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); in tracing_max_lat_write()
6478 if (cpumask_empty(tr->pipe_cpumask)) { in open_pipe_on_cpu()
6479 cpumask_setall(tr->pipe_cpumask); in open_pipe_on_cpu()
6482 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { in open_pipe_on_cpu()
6483 cpumask_set_cpu(cpu, tr->pipe_cpumask); in open_pipe_on_cpu()
6486 return -EBUSY; in open_pipe_on_cpu()
6492 WARN_ON(!cpumask_full(tr->pipe_cpumask)); in close_pipe_on_cpu()
6493 cpumask_clear(tr->pipe_cpumask); in close_pipe_on_cpu()
6495 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); in close_pipe_on_cpu()
6496 cpumask_clear_cpu(cpu, tr->pipe_cpumask); in close_pipe_on_cpu()
6502 struct trace_array *tr = inode->i_private; in tracing_open_pipe()
6520 ret = -ENOMEM; in tracing_open_pipe()
6524 trace_seq_init(&iter->seq); in tracing_open_pipe()
6525 iter->trace = tr->current_trace; in tracing_open_pipe()
6527 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { in tracing_open_pipe()
6528 ret = -ENOMEM; in tracing_open_pipe()
6533 cpumask_setall(iter->started); in tracing_open_pipe()
6535 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) in tracing_open_pipe()
6536 iter->iter_flags |= TRACE_FILE_LAT_FMT; in tracing_open_pipe()
6539 if (trace_clocks[tr->clock_id].in_ns) in tracing_open_pipe()
6540 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; in tracing_open_pipe()
6542 iter->tr = tr; in tracing_open_pipe()
6543 iter->array_buffer = &tr->array_buffer; in tracing_open_pipe()
6544 iter->cpu_file = cpu; in tracing_open_pipe()
6545 mutex_init(&iter->mutex); in tracing_open_pipe()
6546 filp->private_data = iter; in tracing_open_pipe()
6548 if (iter->trace->pipe_open) in tracing_open_pipe()
6549 iter->trace->pipe_open(iter); in tracing_open_pipe()
6553 tr->trace_ref++; in tracing_open_pipe()
6568 struct trace_iterator *iter = file->private_data; in tracing_release_pipe()
6569 struct trace_array *tr = inode->i_private; in tracing_release_pipe()
6572 tr->trace_ref--; in tracing_release_pipe()
6574 if (iter->trace->pipe_close) in tracing_release_pipe()
6575 iter->trace->pipe_close(iter); in tracing_release_pipe()
6576 close_pipe_on_cpu(tr, iter->cpu_file); in tracing_release_pipe()
6590 struct trace_array *tr = iter->tr; in trace_poll()
6593 if (trace_buffer_iter(iter, iter->cpu_file)) in trace_poll()
6596 if (tr->trace_flags & TRACE_ITER_BLOCK) in trace_poll()
6602 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, in trace_poll()
6603 filp, poll_table, iter->tr->buffer_percent); in trace_poll()
6609 struct trace_iterator *iter = filp->private_data; in tracing_poll_pipe()
6614 /* Must be called with iter->mutex held. */
6617 struct trace_iterator *iter = filp->private_data; in tracing_wait_pipe()
6622 if ((filp->f_flags & O_NONBLOCK)) { in tracing_wait_pipe()
6623 return -EAGAIN; in tracing_wait_pipe()
6633 * iter->pos will be 0 if we haven't read anything. in tracing_wait_pipe()
6635 if (!tracer_tracing_is_on(iter->tr) && iter->pos) in tracing_wait_pipe()
6638 mutex_unlock(&iter->mutex); in tracing_wait_pipe()
6642 mutex_lock(&iter->mutex); in tracing_wait_pipe()
6653 if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) in update_last_data_if_empty()
6656 if (!ring_buffer_empty(tr->array_buffer.buffer)) in update_last_data_if_empty()
6660 * If the buffer contains the last boot data and all per-cpu in update_last_data_if_empty()
6674 struct trace_iterator *iter = filp->private_data; in tracing_read_pipe()
6682 guard(mutex)(&iter->mutex); in tracing_read_pipe()
6685 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); in tracing_read_pipe()
6686 if (sret != -EBUSY) in tracing_read_pipe()
6689 trace_seq_init(&iter->seq); in tracing_read_pipe()
6691 if (iter->trace->read) { in tracing_read_pipe()
6692 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); in tracing_read_pipe()
6698 if (update_last_data_if_empty(iter->tr)) in tracing_read_pipe()
6710 cnt = TRACE_SEQ_BUFFER_SIZE - 1; in tracing_read_pipe()
6714 cpumask_clear(iter->started); in tracing_read_pipe()
6715 trace_seq_init(&iter->seq); in tracing_read_pipe()
6718 trace_access_lock(iter->cpu_file); in tracing_read_pipe()
6721 int save_len = iter->seq.seq.len; in tracing_read_pipe()
6727 * trace_seq_to_user() will returns -EBUSY because save_len == 0, in tracing_read_pipe()
6732 iter->seq.full = 0; in tracing_read_pipe()
6733 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); in tracing_read_pipe()
6739 iter->seq.seq.len = save_len; in tracing_read_pipe()
6745 if (trace_seq_used(&iter->seq) >= cnt) in tracing_read_pipe()
6753 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", in tracing_read_pipe()
6754 iter->ent->type); in tracing_read_pipe()
6756 trace_access_unlock(iter->cpu_file); in tracing_read_pipe()
6760 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); in tracing_read_pipe()
6761 if (iter->seq.readpos >= trace_seq_used(&iter->seq)) in tracing_read_pipe()
6762 trace_seq_init(&iter->seq); in tracing_read_pipe()
6768 if (sret == -EBUSY) in tracing_read_pipe()
6777 __free_page(spd->pages[idx]); in tracing_spd_release_pipe()
6787 /* Seq buffer is page-sized, exactly what we need. */ in tracing_fill_pipe_page()
6789 save_len = iter->seq.seq.len; in tracing_fill_pipe_page()
6792 if (trace_seq_has_overflowed(&iter->seq)) { in tracing_fill_pipe_page()
6793 iter->seq.seq.len = save_len; in tracing_fill_pipe_page()
6799 * be set if the iter->seq overflowed. But check it in tracing_fill_pipe_page()
6803 iter->seq.seq.len = save_len; in tracing_fill_pipe_page()
6807 count = trace_seq_used(&iter->seq) - save_len; in tracing_fill_pipe_page()
6810 iter->seq.seq.len = save_len; in tracing_fill_pipe_page()
6816 rem -= count; in tracing_fill_pipe_page()
6819 iter->ent = NULL; in tracing_fill_pipe_page()
6835 struct trace_iterator *iter = filp->private_data; in tracing_splice_read_pipe()
6849 return -ENOMEM; in tracing_splice_read_pipe()
6851 mutex_lock(&iter->mutex); in tracing_splice_read_pipe()
6853 if (iter->trace->splice_read) { in tracing_splice_read_pipe()
6854 ret = iter->trace->splice_read(iter, filp, in tracing_splice_read_pipe()
6864 if (!iter->ent && !trace_find_next_entry_inc(iter)) { in tracing_splice_read_pipe()
6865 ret = -EFAULT; in tracing_splice_read_pipe()
6870 trace_access_lock(iter->cpu_file); in tracing_splice_read_pipe()
6881 ret = trace_seq_to_buffer(&iter->seq, in tracing_splice_read_pipe()
6883 min((size_t)trace_seq_used(&iter->seq), in tracing_splice_read_pipe()
6892 trace_seq_init(&iter->seq); in tracing_splice_read_pipe()
6895 trace_access_unlock(iter->cpu_file); in tracing_splice_read_pipe()
6897 mutex_unlock(&iter->mutex); in tracing_splice_read_pipe()
6910 mutex_unlock(&iter->mutex); in tracing_splice_read_pipe()
6919 struct trace_array *tr = inode->i_private; in tracing_entries_read()
6937 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; in tracing_entries_read()
6938 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { in tracing_entries_read()
6945 if (!tr->ring_buffer_expanded) in tracing_entries_read()
6954 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); in tracing_entries_read()
6967 struct trace_array *tr = inode->i_private; in tracing_entries_write()
6977 return -EINVAL; in tracing_entries_write()
6994 struct trace_array *tr = filp->private_data; in tracing_total_entries_read()
7001 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; in tracing_total_entries_read()
7002 if (!tr->ring_buffer_expanded) in tracing_total_entries_read()
7005 if (tr->ring_buffer_expanded) in tracing_total_entries_read()
7018 struct trace_array *tr = m->private; in l_next()
7019 struct trace_scratch *tscratch = tr->scratch; in l_next()
7028 if (!tscratch || !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) in l_next()
7032 index--; in l_next()
7034 if (index >= tscratch->nr_entries) in l_next()
7037 return &tscratch->entries[index]; in l_next()
7054 struct trace_scratch *tscratch = tr->scratch; in show_last_boot_header()
7063 if (tscratch && (tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) in show_last_boot_header()
7064 seq_printf(m, "%lx\t[kernel]\n", tscratch->text_addr); in show_last_boot_header()
7071 struct trace_array *tr = m->private; in l_show()
7079 seq_printf(m, "%lx\t%s\n", entry->mod_addr, entry->mod_name); in l_show()
7092 struct trace_array *tr = inode->i_private; in tracing_last_boot_open()
7106 m = file->private_data; in tracing_last_boot_open()
7107 m->private = tr; in tracing_last_boot_open()
7114 struct trace_array *tr = inode->i_private; in tracing_buffer_meta_open()
7122 ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu); in tracing_buffer_meta_open()
7145 struct trace_array *tr = inode->i_private; in tracing_free_buffer_release()
7148 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) in tracing_free_buffer_release()
7175 buffer = tr->array_buffer.buffer; in write_marker_to_buffer()
7184 cnt = ring_buffer_max_event_size(buffer) - meta_size; in write_marker_to_buffer()
7187 return -EBADF; in write_marker_to_buffer()
7192 return -EBADF; in write_marker_to_buffer()
7196 entry->ip = ip; in write_marker_to_buffer()
7197 memcpy(&entry->buf, buf, cnt); in write_marker_to_buffer()
7200 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { in write_marker_to_buffer()
7202 entry->buf[cnt] = '\0'; in write_marker_to_buffer()
7203 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); in write_marker_to_buffer()
7206 if (entry->buf[cnt - 1] != '\n') { in write_marker_to_buffer()
7207 entry->buf[cnt] = '\n'; in write_marker_to_buffer()
7208 entry->buf[cnt + 1] = '\0'; in write_marker_to_buffer()
7210 entry->buf[cnt] = '\0'; in write_marker_to_buffer()
7217 event_triggers_post_call(tr->trace_marker_file, tt); in write_marker_to_buffer()
7241 buf = per_cpu_ptr(tinfo->tbuf, cpu)->buf; in trace_user_fault_buffer_free()
7244 free_percpu(tinfo->tbuf); in trace_user_fault_buffer_free()
7257 trace_user_buffer->ref++; in trace_user_fault_buffer_enable()
7263 return -ENOMEM; in trace_user_fault_buffer_enable()
7265 tinfo->tbuf = alloc_percpu(struct trace_user_buf); in trace_user_fault_buffer_enable()
7266 if (!tinfo->tbuf) { in trace_user_fault_buffer_enable()
7268 return -ENOMEM; in trace_user_fault_buffer_enable()
7271 tinfo->ref = 1; in trace_user_fault_buffer_enable()
7275 per_cpu_ptr(tinfo->tbuf, cpu)->buf = NULL; in trace_user_fault_buffer_enable()
7283 return -ENOMEM; in trace_user_fault_buffer_enable()
7285 per_cpu_ptr(tinfo->tbuf, cpu)->buf = buf; in trace_user_fault_buffer_enable()
7304 if (--tinfo->ref) in trace_user_fault_buffer_disable()
7317 char *buffer = per_cpu_ptr(tinfo->tbuf, cpu)->buf; in trace_user_fault_read()
7389 struct trace_array *tr = filp->private_data; in tracing_mark_write()
7390 ssize_t written = -ENODEV; in tracing_mark_write()
7396 return -EINVAL; in tracing_mark_write()
7398 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_write()
7399 return -EINVAL; in tracing_mark_write()
7402 return -EINVAL; in tracing_mark_write()
7412 return -EFAULT; in tracing_mark_write()
7444 /* cnt includes both the entry->id and the data behind it. */ in write_raw_marker_to_buffer()
7445 size = struct_size(entry, buf, cnt - sizeof(entry->id)); in write_raw_marker_to_buffer()
7447 buffer = tr->array_buffer.buffer; in write_raw_marker_to_buffer()
7450 return -EINVAL; in write_raw_marker_to_buffer()
7456 return -EBADF; in write_raw_marker_to_buffer()
7459 unsafe_memcpy(&entry->id, buf, cnt, in write_raw_marker_to_buffer()
7474 struct trace_array *tr = filp->private_data; in tracing_mark_raw_write()
7475 ssize_t written = -ENODEV; in tracing_mark_raw_write()
7480 return -EINVAL; in tracing_mark_raw_write()
7482 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) in tracing_mark_raw_write()
7483 return -EINVAL; in tracing_mark_raw_write()
7487 return -EINVAL; in tracing_mark_raw_write()
7494 return -EFAULT; in tracing_mark_raw_write()
7498 return -EINVAL; in tracing_mark_raw_write()
7538 struct trace_array *tr = m->private; in tracing_clock_show()
7544 i == tr->clock_id ? "[" : "", trace_clocks[i].name, in tracing_clock_show()
7545 i == tr->clock_id ? "]" : ""); in tracing_clock_show()
7560 return -EINVAL; in tracing_set_clock()
7564 tr->clock_id = i; in tracing_set_clock()
7566 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
7572 tracing_reset_online_cpus(&tr->array_buffer); in tracing_set_clock()
7575 if (tr->max_buffer.buffer) in tracing_set_clock()
7576 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); in tracing_set_clock()
7577 tracing_reset_online_cpus(&tr->max_buffer); in tracing_set_clock()
7580 if (tr->scratch && !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) { in tracing_set_clock()
7581 struct trace_scratch *tscratch = tr->scratch; in tracing_set_clock()
7583 tscratch->clock_id = i; in tracing_set_clock()
7592 struct seq_file *m = filp->private_data; in tracing_clock_write()
7593 struct trace_array *tr = m->private; in tracing_clock_write()
7599 return -EINVAL; in tracing_clock_write()
7602 return -EFAULT; in tracing_clock_write()
7619 struct trace_array *tr = inode->i_private; in tracing_clock_open()
7626 ret = single_open(file, tracing_clock_show, inode->i_private); in tracing_clock_open()
7635 struct trace_array *tr = m->private; in tracing_time_stamp_mode_show()
7639 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) in tracing_time_stamp_mode_show()
7649 struct trace_array *tr = inode->i_private; in tracing_time_stamp_mode_open()
7656 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); in tracing_time_stamp_mode_open()
7678 if (set && tr->no_filter_buffering_ref++) in tracing_set_filter_buffering()
7682 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) in tracing_set_filter_buffering()
7683 return -EINVAL; in tracing_set_filter_buffering()
7685 --tr->no_filter_buffering_ref; in tracing_set_filter_buffering()
7702 struct trace_array *tr = inode->i_private; in tracing_snapshot_open()
7711 if (file->f_mode & FMODE_READ) { in tracing_snapshot_open()
7717 ret = -ENOMEM; in tracing_snapshot_open()
7728 iter->tr = tr; in tracing_snapshot_open()
7729 iter->array_buffer = &tr->max_buffer; in tracing_snapshot_open()
7730 iter->cpu_file = tracing_get_cpu(inode); in tracing_snapshot_open()
7731 m->private = iter; in tracing_snapshot_open()
7732 file->private_data = m; in tracing_snapshot_open()
7750 struct seq_file *m = filp->private_data; in tracing_snapshot_write()
7751 struct trace_iterator *iter = m->private; in tracing_snapshot_write()
7752 struct trace_array *tr = iter->tr; in tracing_snapshot_write()
7766 if (tr->current_trace->use_max_tr) in tracing_snapshot_write()
7767 return -EBUSY; in tracing_snapshot_write()
7770 arch_spin_lock(&tr->max_lock); in tracing_snapshot_write()
7771 if (tr->cond_snapshot) in tracing_snapshot_write()
7772 ret = -EBUSY; in tracing_snapshot_write()
7773 arch_spin_unlock(&tr->max_lock); in tracing_snapshot_write()
7780 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) in tracing_snapshot_write()
7781 return -EINVAL; in tracing_snapshot_write()
7782 if (tr->allocated_snapshot) in tracing_snapshot_write()
7786 /* Only allow per-cpu swap if the ring buffer supports it */ in tracing_snapshot_write()
7788 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) in tracing_snapshot_write()
7789 return -EINVAL; in tracing_snapshot_write()
7791 if (tr->allocated_snapshot) in tracing_snapshot_write()
7792 ret = resize_buffer_duplicate_size(&tr->max_buffer, in tracing_snapshot_write()
7793 &tr->array_buffer, iter->cpu_file); in tracing_snapshot_write()
7800 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { in tracing_snapshot_write()
7805 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, in tracing_snapshot_write()
7811 if (tr->allocated_snapshot) { in tracing_snapshot_write()
7812 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) in tracing_snapshot_write()
7813 tracing_reset_online_cpus(&tr->max_buffer); in tracing_snapshot_write()
7815 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); in tracing_snapshot_write()
7830 struct seq_file *m = file->private_data; in tracing_snapshot_release()
7835 if (file->f_mode & FMODE_READ) in tracing_snapshot_release()
7840 kfree(m->private); in tracing_snapshot_release()
7863 info = filp->private_data; in snapshot_raw_open()
7865 if (info->iter.trace->use_max_tr) { in snapshot_raw_open()
7867 return -EBUSY; in snapshot_raw_open()
7870 info->iter.snapshot = true; in snapshot_raw_open()
7871 info->iter.array_buffer = &info->iter.tr->max_buffer; in snapshot_raw_open()
7993 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
8000 * The filp->private_data must point to a trace_min_max_param structure that
8007 struct trace_min_max_param *param = filp->private_data; in trace_min_max_write()
8012 return -EFAULT; in trace_min_max_write()
8018 if (param->lock) in trace_min_max_write()
8019 mutex_lock(param->lock); in trace_min_max_write()
8021 if (param->min && val < *param->min) in trace_min_max_write()
8022 err = -EINVAL; in trace_min_max_write()
8024 if (param->max && val > *param->max) in trace_min_max_write()
8025 err = -EINVAL; in trace_min_max_write()
8028 *param->val = val; in trace_min_max_write()
8030 if (param->lock) in trace_min_max_write()
8031 mutex_unlock(param->lock); in trace_min_max_write()
8040 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
8047 * The filp->private_data must point to a trace_min_max_param struct with valid
8053 struct trace_min_max_param *param = filp->private_data; in trace_min_max_read()
8059 return -EFAULT; in trace_min_max_read()
8061 val = *param->val; in trace_min_max_read()
8083 const char **errs; /* ptr to loc-specific array of err strings */
8084 u8 type; /* index into errs -> specific err string */
8104 return ERR_PTR(-ENOMEM); in alloc_tracing_log_err()
8106 err->cmd = kzalloc(len, GFP_KERNEL); in alloc_tracing_log_err()
8107 if (!err->cmd) { in alloc_tracing_log_err()
8109 return ERR_PTR(-ENOMEM); in alloc_tracing_log_err()
8117 kfree(err->cmd); in free_tracing_log_err()
8127 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { in get_tracing_log_err()
8129 if (PTR_ERR(err) != -ENOMEM) in get_tracing_log_err()
8130 tr->n_err_log_entries++; in get_tracing_log_err()
8136 return ERR_PTR(-ENOMEM); in get_tracing_log_err()
8137 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); in get_tracing_log_err()
8138 kfree(err->cmd); in get_tracing_log_err()
8139 err->cmd = cmd; in get_tracing_log_err()
8140 list_del(&err->list); in get_tracing_log_err()
8146 * err_pos - find the position of a string within a command for error careting
8166 return found - cmd; in err_pos()
8172 * tracing_log_err - write an error to the tracing error log
8176 * @errs: The array of loc-specific static error strings
8193 * produce a static error string - this string is not copied and saved
8194 * when the error is logged - only a pointer to it is saved. See
8213 if (PTR_ERR(err) == -ENOMEM) in tracing_log_err()
8216 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); in tracing_log_err()
8217 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); in tracing_log_err()
8219 err->info.errs = errs; in tracing_log_err()
8220 err->info.type = type; in tracing_log_err()
8221 err->info.pos = pos; in tracing_log_err()
8222 err->info.ts = local_clock(); in tracing_log_err()
8224 list_add_tail(&err->list, &tr->err_log); in tracing_log_err()
8233 list_for_each_entry_safe(err, next, &tr->err_log, list) { in clear_tracing_err_log()
8234 list_del(&err->list); in clear_tracing_err_log()
8238 tr->n_err_log_entries = 0; in clear_tracing_err_log()
8243 struct trace_array *tr = m->private; in tracing_err_log_seq_start()
8247 return seq_list_start(&tr->err_log, *pos); in tracing_err_log_seq_start()
8252 struct trace_array *tr = m->private; in tracing_err_log_seq_next()
8254 return seq_list_next(v, &tr->err_log, pos); in tracing_err_log_seq_next()
8266 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) in tracing_err_log_show_pos()
8278 const char *err_text = err->info.errs[err->info.type]; in tracing_err_log_seq_show()
8279 u64 sec = err->info.ts; in tracing_err_log_seq_show()
8284 err->loc, err_text); in tracing_err_log_seq_show()
8285 seq_printf(m, "%s", err->cmd); in tracing_err_log_seq_show()
8286 tracing_err_log_show_pos(m, err->info.pos); in tracing_err_log_seq_show()
8301 struct trace_array *tr = inode->i_private; in tracing_err_log_open()
8309 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) in tracing_err_log_open()
8312 if (file->f_mode & FMODE_READ) { in tracing_err_log_open()
8315 struct seq_file *m = file->private_data; in tracing_err_log_open()
8316 m->private = tr; in tracing_err_log_open()
8333 struct trace_array *tr = inode->i_private; in tracing_err_log_release()
8337 if (file->f_mode & FMODE_READ) in tracing_err_log_release()
8353 struct trace_array *tr = inode->i_private; in tracing_buffers_open()
8364 return -ENOMEM; in tracing_buffers_open()
8369 info->iter.tr = tr; in tracing_buffers_open()
8370 info->iter.cpu_file = tracing_get_cpu(inode); in tracing_buffers_open()
8371 info->iter.trace = tr->current_trace; in tracing_buffers_open()
8372 info->iter.array_buffer = &tr->array_buffer; in tracing_buffers_open()
8373 info->spare = NULL; in tracing_buffers_open()
8375 info->read = (unsigned int)-1; in tracing_buffers_open()
8377 filp->private_data = info; in tracing_buffers_open()
8379 tr->trace_ref++; in tracing_buffers_open()
8393 struct ftrace_buffer_info *info = filp->private_data; in tracing_buffers_poll()
8394 struct trace_iterator *iter = &info->iter; in tracing_buffers_poll()
8403 struct ftrace_buffer_info *info = filp->private_data; in tracing_buffers_read()
8404 struct trace_iterator *iter = &info->iter; in tracing_buffers_read()
8414 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_read()
8415 return -EBUSY; in tracing_buffers_read()
8418 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); in tracing_buffers_read()
8421 if (info->spare) { in tracing_buffers_read()
8422 if (page_size != info->spare_size) { in tracing_buffers_read()
8423 ring_buffer_free_read_page(iter->array_buffer->buffer, in tracing_buffers_read()
8424 info->spare_cpu, info->spare); in tracing_buffers_read()
8425 info->spare = NULL; in tracing_buffers_read()
8429 if (!info->spare) { in tracing_buffers_read()
8430 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, in tracing_buffers_read()
8431 iter->cpu_file); in tracing_buffers_read()
8432 if (IS_ERR(info->spare)) { in tracing_buffers_read()
8433 ret = PTR_ERR(info->spare); in tracing_buffers_read()
8434 info->spare = NULL; in tracing_buffers_read()
8436 info->spare_cpu = iter->cpu_file; in tracing_buffers_read()
8437 info->spare_size = page_size; in tracing_buffers_read()
8440 if (!info->spare) in tracing_buffers_read()
8444 if (info->read < page_size) in tracing_buffers_read()
8448 trace_access_lock(iter->cpu_file); in tracing_buffers_read()
8449 ret = ring_buffer_read_page(iter->array_buffer->buffer, in tracing_buffers_read()
8450 info->spare, in tracing_buffers_read()
8452 iter->cpu_file, 0); in tracing_buffers_read()
8453 trace_access_unlock(iter->cpu_file); in tracing_buffers_read()
8456 if (trace_empty(iter) && !iter->closed) { in tracing_buffers_read()
8457 if (update_last_data_if_empty(iter->tr)) in tracing_buffers_read()
8460 if ((filp->f_flags & O_NONBLOCK)) in tracing_buffers_read()
8461 return -EAGAIN; in tracing_buffers_read()
8472 info->read = 0; in tracing_buffers_read()
8474 size = page_size - info->read; in tracing_buffers_read()
8477 trace_data = ring_buffer_read_page_data(info->spare); in tracing_buffers_read()
8478 ret = copy_to_user(ubuf, trace_data + info->read, size); in tracing_buffers_read()
8480 return -EFAULT; in tracing_buffers_read()
8482 size -= ret; in tracing_buffers_read()
8485 info->read += size; in tracing_buffers_read()
8492 struct ftrace_buffer_info *info = file->private_data; in tracing_buffers_flush()
8493 struct trace_iterator *iter = &info->iter; in tracing_buffers_flush()
8495 iter->closed = true; in tracing_buffers_flush()
8497 (void)atomic_fetch_inc_release(&iter->wait_index); in tracing_buffers_flush()
8499 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); in tracing_buffers_flush()
8506 struct ftrace_buffer_info *info = file->private_data; in tracing_buffers_release()
8507 struct trace_iterator *iter = &info->iter; in tracing_buffers_release()
8511 iter->tr->trace_ref--; in tracing_buffers_release()
8513 __trace_array_put(iter->tr); in tracing_buffers_release()
8515 if (info->spare) in tracing_buffers_release()
8516 ring_buffer_free_read_page(iter->array_buffer->buffer, in tracing_buffers_release()
8517 info->spare_cpu, info->spare); in tracing_buffers_release()
8532 if (!refcount_dec_and_test(&ref->refcount)) in buffer_ref_release()
8534 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); in buffer_ref_release()
8541 struct buffer_ref *ref = (struct buffer_ref *)buf->private; in buffer_pipe_buf_release()
8544 buf->private = 0; in buffer_pipe_buf_release()
8550 struct buffer_ref *ref = (struct buffer_ref *)buf->private; in buffer_pipe_buf_get()
8552 if (refcount_read(&ref->refcount) > INT_MAX/2) in buffer_pipe_buf_get()
8555 refcount_inc(&ref->refcount); in buffer_pipe_buf_get()
8572 (struct buffer_ref *)spd->partial[i].private; in buffer_spd_release()
8575 spd->partial[i].private = 0; in buffer_spd_release()
8583 struct ftrace_buffer_info *info = file->private_data; in tracing_buffers_splice_read()
8584 struct trace_iterator *iter = &info->iter; in tracing_buffers_splice_read()
8601 if (iter->snapshot && iter->tr->current_trace->use_max_tr) in tracing_buffers_splice_read()
8602 return -EBUSY; in tracing_buffers_splice_read()
8605 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); in tracing_buffers_splice_read()
8606 if (*ppos & (page_size - 1)) in tracing_buffers_splice_read()
8607 return -EINVAL; in tracing_buffers_splice_read()
8609 if (len & (page_size - 1)) { in tracing_buffers_splice_read()
8611 return -EINVAL; in tracing_buffers_splice_read()
8612 len &= (~(page_size - 1)); in tracing_buffers_splice_read()
8616 return -ENOMEM; in tracing_buffers_splice_read()
8619 trace_access_lock(iter->cpu_file); in tracing_buffers_splice_read()
8620 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); in tracing_buffers_splice_read()
8622 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) { in tracing_buffers_splice_read()
8628 ret = -ENOMEM; in tracing_buffers_splice_read()
8632 refcount_set(&ref->refcount, 1); in tracing_buffers_splice_read()
8633 ref->buffer = iter->array_buffer->buffer; in tracing_buffers_splice_read()
8634 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); in tracing_buffers_splice_read()
8635 if (IS_ERR(ref->page)) { in tracing_buffers_splice_read()
8636 ret = PTR_ERR(ref->page); in tracing_buffers_splice_read()
8637 ref->page = NULL; in tracing_buffers_splice_read()
8641 ref->cpu = iter->cpu_file; in tracing_buffers_splice_read()
8643 r = ring_buffer_read_page(ref->buffer, ref->page, in tracing_buffers_splice_read()
8644 len, iter->cpu_file, 1); in tracing_buffers_splice_read()
8646 ring_buffer_free_read_page(ref->buffer, ref->cpu, in tracing_buffers_splice_read()
8647 ref->page); in tracing_buffers_splice_read()
8652 page = virt_to_page(ring_buffer_read_page_data(ref->page)); in tracing_buffers_splice_read()
8661 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); in tracing_buffers_splice_read()
8664 trace_access_unlock(iter->cpu_file); in tracing_buffers_splice_read()
8676 ret = -EAGAIN; in tracing_buffers_splice_read()
8677 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) in tracing_buffers_splice_read()
8680 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent); in tracing_buffers_splice_read()
8685 if (!tracer_tracing_is_on(iter->tr)) in tracing_buffers_splice_read()
8703 struct ftrace_buffer_info *info = file->private_data; in tracing_buffers_ioctl()
8704 struct trace_iterator *iter = &info->iter; in tracing_buffers_ioctl()
8708 if (!(file->f_flags & O_NONBLOCK)) { in tracing_buffers_ioctl()
8709 err = ring_buffer_wait(iter->array_buffer->buffer, in tracing_buffers_ioctl()
8710 iter->cpu_file, in tracing_buffers_ioctl()
8711 iter->tr->buffer_percent, in tracing_buffers_ioctl()
8717 return ring_buffer_map_get_reader(iter->array_buffer->buffer, in tracing_buffers_ioctl()
8718 iter->cpu_file); in tracing_buffers_ioctl()
8720 return -ENOTTY; in tracing_buffers_ioctl()
8730 (void)atomic_fetch_inc_release(&iter->wait_index); in tracing_buffers_ioctl()
8732 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); in tracing_buffers_ioctl()
8747 spin_lock(&tr->snapshot_trigger_lock); in get_snapshot_map()
8749 if (tr->snapshot || tr->mapped == UINT_MAX) in get_snapshot_map()
8750 err = -EBUSY; in get_snapshot_map()
8752 tr->mapped++; in get_snapshot_map()
8754 spin_unlock(&tr->snapshot_trigger_lock); in get_snapshot_map()
8756 /* Wait for update_max_tr() to observe iter->tr->mapped */ in get_snapshot_map()
8757 if (tr->mapped == 1) in get_snapshot_map()
8765 spin_lock(&tr->snapshot_trigger_lock); in put_snapshot_map()
8766 if (!WARN_ON(!tr->mapped)) in put_snapshot_map()
8767 tr->mapped--; in put_snapshot_map()
8768 spin_unlock(&tr->snapshot_trigger_lock); in put_snapshot_map()
8777 struct ftrace_buffer_info *info = vma->vm_file->private_data; in tracing_buffers_mmap_close()
8778 struct trace_iterator *iter = &info->iter; in tracing_buffers_mmap_close()
8780 WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file)); in tracing_buffers_mmap_close()
8781 put_snapshot_map(iter->tr); in tracing_buffers_mmap_close()
8790 struct ftrace_buffer_info *info = filp->private_data; in tracing_buffers_mmap()
8791 struct trace_iterator *iter = &info->iter; in tracing_buffers_mmap()
8795 if (iter->tr->flags & TRACE_ARRAY_FL_MEMMAP) in tracing_buffers_mmap()
8796 return -ENODEV; in tracing_buffers_mmap()
8798 ret = get_snapshot_map(iter->tr); in tracing_buffers_mmap()
8802 ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma); in tracing_buffers_mmap()
8804 put_snapshot_map(iter->tr); in tracing_buffers_mmap()
8806 vma->vm_ops = &tracing_buffers_vmops; in tracing_buffers_mmap()
8827 struct trace_array *tr = inode->i_private; in tracing_stats_read()
8828 struct array_buffer *trace_buf = &tr->array_buffer; in tracing_stats_read()
8837 return -ENOMEM; in tracing_stats_read()
8841 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); in tracing_stats_read()
8844 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); in tracing_stats_read()
8847 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); in tracing_stats_read()
8850 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); in tracing_stats_read()
8853 if (trace_clocks[tr->clock_id].in_ns) { in tracing_stats_read()
8855 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); in tracing_stats_read()
8860 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer)); in tracing_stats_read()
8866 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); in tracing_stats_read()
8869 ring_buffer_time_stamp(trace_buf->buffer)); in tracing_stats_read()
8872 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); in tracing_stats_read()
8875 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); in tracing_stats_read()
8879 s->buffer, trace_seq_used(s)); in tracing_stats_read()
8908 return -ENOMEM; in tracing_read_dyn_info()
8957 (*count)--; in ftrace_count_snapshot()
8994 return -ENOMEM; in ftrace_snapshot_init()
9034 void *count = (void *)-1; in ftrace_trace_snapshot_callback()
9039 return -ENODEV; in ftrace_trace_snapshot_callback()
9043 return -EINVAL; in ftrace_trace_snapshot_callback()
9099 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) in tracing_get_dentry()
9102 if (WARN_ON(!tr->dir)) in tracing_get_dentry()
9103 return ERR_PTR(-ENODEV); in tracing_get_dentry()
9106 return tr->dir; in tracing_get_dentry()
9113 if (tr->percpu_dir) in tracing_dentry_percpu()
9114 return tr->percpu_dir; in tracing_dentry_percpu()
9120 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); in tracing_dentry_percpu()
9122 MEM_FAIL(!tr->percpu_dir, in tracing_dentry_percpu()
9125 return tr->percpu_dir; in tracing_dentry_percpu()
9135 d_inode(ret)->i_cdev = (void *)(cpu + 1); in trace_create_cpu_file()
9173 if (tr->range_addr_start) in tracing_init_tracefs_percpu()
9177 if (!tr->range_addr_start) { in tracing_init_tracefs_percpu()
9196 struct trace_option_dentry *topt = filp->private_data; in trace_options_read()
9199 if (topt->flags->val & topt->opt->bit) in trace_options_read()
9211 struct trace_option_dentry *topt = filp->private_data; in trace_options_write()
9220 return -EINVAL; in trace_options_write()
9222 if (!!(topt->flags->val & topt->opt->bit) != val) { in trace_options_write()
9224 ret = __set_tracer_option(topt->tr, topt->flags, in trace_options_write()
9225 topt->opt, !val); in trace_options_write()
9237 struct trace_option_dentry *topt = inode->i_private; in tracing_open_options()
9240 ret = tracing_check_open_get_tr(topt->tr); in tracing_open_options()
9244 filp->private_data = inode->i_private; in tracing_open_options()
9250 struct trace_option_dentry *topt = file->private_data; in tracing_release_options()
9252 trace_array_put(topt->tr); in tracing_release_options()
9283 * ptr - idx == &index[0]
9293 *ptr = container_of(data - *pindex, struct trace_array, in get_tr_index()
9301 void *tr_index = filp->private_data; in trace_options_core_read()
9308 if (tr->trace_flags & (1 << index)) in trace_options_core_read()
9320 void *tr_index = filp->private_data; in trace_options_core_write()
9333 return -EINVAL; in trace_options_core_write()
9376 if (tr->options) in trace_options_init_dentry()
9377 return tr->options; in trace_options_init_dentry()
9383 tr->options = tracefs_create_dir("options", d_tracer); in trace_options_init_dentry()
9384 if (!tr->options) { in trace_options_init_dentry()
9389 return tr->options; in trace_options_init_dentry()
9404 topt->flags = flags; in create_trace_option_file()
9405 topt->opt = opt; in create_trace_option_file()
9406 topt->tr = tr; in create_trace_option_file()
9408 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE, in create_trace_option_file()
9426 flags = tracer->flags; in create_trace_option_files()
9428 if (!flags || !flags->opts) in create_trace_option_files()
9438 for (i = 0; i < tr->nr_topts; i++) { in create_trace_option_files()
9440 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) in create_trace_option_files()
9444 opts = flags->opts; in create_trace_option_files()
9453 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), in create_trace_option_files()
9460 tr->topts = tr_topts; in create_trace_option_files()
9461 tr->topts[tr->nr_topts].tracer = tracer; in create_trace_option_files()
9462 tr->topts[tr->nr_topts].topts = topts; in create_trace_option_files()
9463 tr->nr_topts++; in create_trace_option_files()
9485 (void *)&tr->trace_flags_index[index], in create_trace_option_core_file()
9510 struct trace_array *tr = filp->private_data; in rb_simple_read()
9524 struct trace_array *tr = filp->private_data; in rb_simple_write()
9525 struct trace_buffer *buffer = tr->array_buffer.buffer; in rb_simple_write()
9539 if (tr->current_trace->start) in rb_simple_write()
9540 tr->current_trace->start(tr); in rb_simple_write()
9543 if (tr->current_trace->stop) in rb_simple_write()
9544 tr->current_trace->stop(tr); in rb_simple_write()
9567 struct trace_array *tr = filp->private_data; in buffer_percent_read()
9571 r = tr->buffer_percent; in buffer_percent_read()
9581 struct trace_array *tr = filp->private_data; in buffer_percent_write()
9590 return -EINVAL; in buffer_percent_write()
9592 tr->buffer_percent = val; in buffer_percent_write()
9610 struct trace_array *tr = filp->private_data; in buffer_subbuf_size_read()
9616 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); in buffer_subbuf_size_read()
9628 struct trace_array *tr = filp->private_data; in buffer_subbuf_size_write()
9642 order = fls(pages - 1); in buffer_subbuf_size_write()
9646 return -EINVAL; in buffer_subbuf_size_write()
9651 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); in buffer_subbuf_size_write()
9655 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order); in buffer_subbuf_size_write()
9661 if (!tr->allocated_snapshot) in buffer_subbuf_size_write()
9664 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); in buffer_subbuf_size_write()
9667 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order); in buffer_subbuf_size_write()
9718 tscratch = tr->scratch; in make_mod_delta()
9719 module_delta = READ_ONCE(tr->module_delta); in make_mod_delta()
9720 for (i = 0; i < tscratch->nr_entries; i++) { in make_mod_delta()
9721 entry = &tscratch->entries[i]; in make_mod_delta()
9722 if (strcmp(mod->name, entry->mod_name)) in make_mod_delta()
9724 if (mod->state == MODULE_STATE_GOING) in make_mod_delta()
9725 module_delta->delta[i] = 0; in make_mod_delta()
9727 module_delta->delta[i] = (unsigned long)mod->mem[MOD_TEXT].base in make_mod_delta()
9728 - entry->mod_addr; in make_mod_delta()
9745 return e1->mod_addr > e2->mod_addr ? 1 : -1; in mod_addr_comp()
9758 tr->scratch = tscratch; in setup_trace_scratch()
9759 tr->scratch_size = size; in setup_trace_scratch()
9761 if (tscratch->text_addr) in setup_trace_scratch()
9762 tr->text_delta = (unsigned long)_text - tscratch->text_addr; in setup_trace_scratch()
9764 if (struct_size(tscratch, entries, tscratch->nr_entries) > size) in setup_trace_scratch()
9768 for (i = 0; i < tscratch->nr_entries; i++) { in setup_trace_scratch()
9771 entry = &tscratch->entries[i]; in setup_trace_scratch()
9774 if (entry->mod_name[n] == '\0') in setup_trace_scratch()
9776 if (!isprint(entry->mod_name[n])) in setup_trace_scratch()
9784 nr_entries = tscratch->nr_entries; in setup_trace_scratch()
9785 sort_r(tscratch->entries, nr_entries, sizeof(struct trace_mod_entry), in setup_trace_scratch()
9794 init_rcu_head(&module_delta->rcu); in setup_trace_scratch()
9797 WRITE_ONCE(tr->module_delta, module_delta); in setup_trace_scratch()
9803 if (tscratch->clock_id != tr->clock_id) { in setup_trace_scratch()
9804 if (tscratch->clock_id >= ARRAY_SIZE(trace_clocks) || in setup_trace_scratch()
9805 tracing_set_clock(tr, trace_clocks[tscratch->clock_id].name) < 0) { in setup_trace_scratch()
9823 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; in allocate_trace_buffer()
9825 buf->tr = tr; in allocate_trace_buffer()
9827 if (tr->range_addr_start && tr->range_addr_size) { in allocate_trace_buffer()
9829 buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0, in allocate_trace_buffer()
9830 tr->range_addr_start, in allocate_trace_buffer()
9831 tr->range_addr_size, in allocate_trace_buffer()
9834 tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size); in allocate_trace_buffer()
9841 tr->mapped++; in allocate_trace_buffer()
9843 buf->buffer = ring_buffer_alloc(size, rb_flags); in allocate_trace_buffer()
9845 if (!buf->buffer) in allocate_trace_buffer()
9846 return -ENOMEM; in allocate_trace_buffer()
9848 buf->data = alloc_percpu(struct trace_array_cpu); in allocate_trace_buffer()
9849 if (!buf->data) { in allocate_trace_buffer()
9850 ring_buffer_free(buf->buffer); in allocate_trace_buffer()
9851 buf->buffer = NULL; in allocate_trace_buffer()
9852 return -ENOMEM; in allocate_trace_buffer()
9856 set_buffer_entries(&tr->array_buffer, in allocate_trace_buffer()
9857 ring_buffer_size(tr->array_buffer.buffer, 0)); in allocate_trace_buffer()
9864 if (buf->buffer) { in free_trace_buffer()
9865 ring_buffer_free(buf->buffer); in free_trace_buffer()
9866 buf->buffer = NULL; in free_trace_buffer()
9867 free_percpu(buf->data); in free_trace_buffer()
9868 buf->data = NULL; in free_trace_buffer()
9876 ret = allocate_trace_buffer(tr, &tr->array_buffer, size); in allocate_trace_buffers()
9882 if (tr->range_addr_start) in allocate_trace_buffers()
9885 ret = allocate_trace_buffer(tr, &tr->max_buffer, in allocate_trace_buffers()
9888 free_trace_buffer(&tr->array_buffer); in allocate_trace_buffers()
9889 return -ENOMEM; in allocate_trace_buffers()
9891 tr->allocated_snapshot = allocate_snapshot; in allocate_trace_buffers()
9904 free_trace_buffer(&tr->array_buffer); in free_trace_buffers()
9905 kfree(tr->module_delta); in free_trace_buffers()
9908 free_trace_buffer(&tr->max_buffer); in free_trace_buffers()
9918 tr->trace_flags_index[i] = i; in init_trace_flags_index()
9925 for (t = trace_types; t; t = t->next) in __update_tracer_options()
9942 if (tr->name && strcmp(tr->name, instance) == 0) { in trace_array_find()
9958 tr->ref++; in trace_array_find_get()
9967 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); in trace_array_create_dir()
9968 if (!tr->dir) in trace_array_create_dir()
9969 return -EINVAL; in trace_array_create_dir()
9971 ret = event_trace_add_tracer(tr->dir, tr); in trace_array_create_dir()
9973 tracefs_remove(tr->dir); in trace_array_create_dir()
9977 init_tracer_tracefs(tr, tr->dir); in trace_array_create_dir()
9991 ret = -ENOMEM; in trace_array_create_systems()
9996 tr->name = kstrdup(name, GFP_KERNEL); in trace_array_create_systems()
9997 if (!tr->name) in trace_array_create_systems()
10000 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) in trace_array_create_systems()
10003 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) in trace_array_create_systems()
10007 tr->system_names = kstrdup_const(systems, GFP_KERNEL); in trace_array_create_systems()
10008 if (!tr->system_names) in trace_array_create_systems()
10013 tr->range_addr_start = range_addr_start; in trace_array_create_systems()
10014 tr->range_addr_size = range_addr_size; in trace_array_create_systems()
10016 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; in trace_array_create_systems()
10018 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); in trace_array_create_systems()
10020 raw_spin_lock_init(&tr->start_lock); in trace_array_create_systems()
10022 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in trace_array_create_systems()
10024 spin_lock_init(&tr->snapshot_trigger_lock); in trace_array_create_systems()
10026 tr->current_trace = &nop_trace; in trace_array_create_systems()
10028 INIT_LIST_HEAD(&tr->systems); in trace_array_create_systems()
10029 INIT_LIST_HEAD(&tr->events); in trace_array_create_systems()
10030 INIT_LIST_HEAD(&tr->hist_vars); in trace_array_create_systems()
10031 INIT_LIST_HEAD(&tr->err_log); in trace_array_create_systems()
10032 INIT_LIST_HEAD(&tr->marker_list); in trace_array_create_systems()
10035 INIT_LIST_HEAD(&tr->mod_events); in trace_array_create_systems()
10058 list_add(&tr->list, &ftrace_trace_arrays); in trace_array_create_systems()
10060 tr->ref++; in trace_array_create_systems()
10067 free_cpumask_var(tr->pipe_cpumask); in trace_array_create_systems()
10068 free_cpumask_var(tr->tracing_cpumask); in trace_array_create_systems()
10069 kfree_const(tr->system_names); in trace_array_create_systems()
10070 kfree(tr->range_name); in trace_array_create_systems()
10071 kfree(tr->name); in trace_array_create_systems()
10090 ret = -EEXIST; in instance_mkdir()
10092 return -EEXIST; in instance_mkdir()
10112 vmap_start = (unsigned long) area->addr; in map_pages()
10132 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
10156 if (tr->name && strcmp(tr->name, name) == 0) { in trace_array_get_by_name()
10157 tr->ref++; in trace_array_get_by_name()
10167 tr->ref++; in trace_array_get_by_name()
10178 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) in __remove_instance()
10179 return -EBUSY; in __remove_instance()
10181 list_del(&tr->list); in __remove_instance()
10200 tracefs_remove(tr->dir); in __remove_instance()
10201 free_percpu(tr->last_func_repeats); in __remove_instance()
10205 if (tr->range_name) { in __remove_instance()
10206 reserve_mem_release_by_name(tr->range_name); in __remove_instance()
10207 kfree(tr->range_name); in __remove_instance()
10210 for (i = 0; i < tr->nr_topts; i++) { in __remove_instance()
10211 kfree(tr->topts[i].topts); in __remove_instance()
10213 kfree(tr->topts); in __remove_instance()
10215 free_cpumask_var(tr->pipe_cpumask); in __remove_instance()
10216 free_cpumask_var(tr->tracing_cpumask); in __remove_instance()
10217 kfree_const(tr->system_names); in __remove_instance()
10218 kfree(tr->name); in __remove_instance()
10229 return -EINVAL; in trace_array_destroy()
10241 return -ENODEV; in trace_array_destroy()
10254 return -ENODEV; in instance_rmdir()
10273 if (!tr->name) in create_trace_instances()
10316 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print"); in init_tracer_tracefs()
10330 tr->buffer_percent = 50; in init_tracer_tracefs()
10347 if (tr->range_addr_start) { in init_tracer_tracefs()
10402 * tracing_init_dentry - initialize top level trace array
10414 return -EPERM; in tracing_init_dentry()
10418 if (tr->dir) in tracing_init_dentry()
10422 return -ENODEV; in tracing_init_dentry()
10431 tr->dir = debugfs_create_automount("tracing", NULL, in tracing_init_dentry()
10449 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; in eval_map_work_func()
10462 return -ENOMEM; in trace_eval_init()
10494 if (n > sizeof(modname) - 1) in module_exists()
10511 trace_event_update_with_eval_map(mod, mod->trace_evals, mod->num_trace_evals); in trace_module_add_evals()
10520 if (!mod->num_trace_evals) in trace_module_remove_evals()
10528 if (map->head.mod == mod) in trace_module_remove_evals()
10531 last = &map->tail.next; in trace_module_remove_evals()
10532 map = map->tail.next; in trace_module_remove_evals()
10537 *last = trace_eval_jmp_to_tail(map)->tail.next; in trace_module_remove_evals()
10550 flags = tr->flags & (TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT); in trace_module_record()
10655 .priority = INT_MAX - 1,
10660 .priority = INT_MAX - 1,
10701 if (s->seq.len >= TRACE_MAX_PRINT) in trace_printk_seq()
10702 s->seq.len = TRACE_MAX_PRINT; in trace_printk_seq()
10709 if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) in trace_printk_seq()
10710 s->seq.len = s->seq.size - 1; in trace_printk_seq()
10713 s->buffer[s->seq.len] = 0; in trace_printk_seq()
10715 printk(KERN_TRACE "%s", s->buffer); in trace_printk_seq()
10722 iter->tr = tr; in trace_init_iter()
10723 iter->trace = iter->tr->current_trace; in trace_init_iter()
10724 iter->cpu_file = RING_BUFFER_ALL_CPUS; in trace_init_iter()
10725 iter->array_buffer = &tr->array_buffer; in trace_init_iter()
10727 if (iter->trace && iter->trace->open) in trace_init_iter()
10728 iter->trace->open(iter); in trace_init_iter()
10731 if (ring_buffer_overruns(iter->array_buffer->buffer)) in trace_init_iter()
10732 iter->iter_flags |= TRACE_FILE_ANNOTATE; in trace_init_iter()
10735 if (trace_clocks[iter->tr->clock_id].in_ns) in trace_init_iter()
10736 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; in trace_init_iter()
10739 iter->temp = static_temp_buf; in trace_init_iter()
10740 iter->temp_size = STATIC_TEMP_BUF_SIZE; in trace_init_iter()
10741 iter->fmt = static_fmt_buf; in trace_init_iter()
10742 iter->fmt_size = STATIC_FMT_BUF_SIZE; in trace_init_iter()
10763 * If the user does a sysrq-z, then they can re-enable in ftrace_dump_one()
10776 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; in ftrace_dump_one()
10779 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; in ftrace_dump_one()
10789 printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name); in ftrace_dump_one()
10807 printk(KERN_TRACE "---------------------------------\n"); in ftrace_dump_one()
10829 printk(KERN_TRACE "---------------------------------\n"); in ftrace_dump_one()
10831 tr->trace_flags |= old_userobj; in ftrace_dump_one()
10923 return -ENOMEM; in trace_parse_run_command()
10926 size = count - done; in trace_parse_run_command()
10929 size = WRITE_BUFSIZE - 1; in trace_parse_run_command()
10932 return -EFAULT; in trace_parse_run_command()
10940 size = tmp - buf + 1; in trace_parse_run_command()
10946 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ in trace_parse_run_command()
10948 WRITE_BUFSIZE - 2); in trace_parse_run_command()
10949 return -EINVAL; in trace_parse_run_command()
11021 boot_instance_info[boot_instance_index - 1] = '\0'; in enable_instances()
11137 tr->flags |= TRACE_ARRAY_FL_MEMMAP; in enable_instances()
11138 tr->ref++; in enable_instances()
11142 tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT; in enable_instances()
11143 tr->range_name = no_free_ptr(rname); in enable_instances()
11155 int ret = -ENOMEM; in tracer_alloc_buffers()
11160 return -EPERM; in tracer_alloc_buffers()
11170 return -ENOMEM; in tracer_alloc_buffers()
11203 ret = -ENOMEM; in tracer_alloc_buffers()
11308 if (!tr->allocated_snapshot) in ftrace_boot_snapshot()