Lines Matching +full:min +full:- +full:sample +full:- +full:time +full:- +full:nsecs
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
12 * Copyright (C) 2004-2006 Ingo Molnar
64 * A selftest will lurk into the ring-buffer to count the
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
72 * If boot-time tracing including tracers/events via kernel cmdline
126 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
262 strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
286 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
292 return -1;
318 int left = sizeof(boot_instance_info) - boot_instance_index;
322 return -1;
392 if (export->flags & flag) {
395 export->write(export, entry, size);
409 if (export->flags & TRACE_EXPORT_FUNCTION)
412 if (export->flags & TRACE_EXPORT_EVENT)
415 if (export->flags & TRACE_EXPORT_MARKER)
421 if (export->flags & TRACE_EXPORT_FUNCTION)
424 if (export->flags & TRACE_EXPORT_EVENT)
427 if (export->flags & TRACE_EXPORT_MARKER)
440 export = rcu_dereference_raw_check(export->next);
447 rcu_assign_pointer(export->next, *list);
451 * the export->next pointer is valid before another CPU sees
462 for (p = list; *p != NULL; p = &(*p)->next)
467 return -1;
469 rcu_assign_pointer(*p, (*p)->next);
495 if (WARN_ON_ONCE(!export->write))
496 return -1;
533 * The global_trace is the descriptor that holds the top-level tracing
553 return !(tr->flags & TRACE_ARRAY_FL_BOOT);
561 printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
563 tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
572 if (!list_empty(&tr->marker_list))
575 list_add_rcu(&tr->marker_list, &marker_copies);
576 tr->trace_flags |= TRACE_ITER_COPY_MARKER;
580 if (list_empty(&tr->marker_list))
583 list_del_init(&tr->marker_list);
584 tr->trace_flags &= ~TRACE_ITER_COPY_MARKER;
592 tr->ring_buffer_expanded = true;
604 tr->ref++;
609 return -ENODEV;
614 WARN_ON(!this_tr->ref);
615 this_tr->ref--;
619 * trace_array_put - Decrement the reference counter for this trace array.
646 return -ENODEV;
649 return -ENODEV;
655 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
668 * trace_ignore_this_task - should a task be ignored for tracing
691 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
693 trace_find_filtered_pid(filtered_no_pids, task->pid));
697 * trace_filter_add_remove_task - Add or remove a task from a pid_list
717 if (!trace_find_filtered_pid(pid_list, self->pid))
723 trace_pid_list_set(pid_list, task->pid);
725 trace_pid_list_clear(pid_list, task->pid);
729 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
758 * trace_pid_start - Used for seq_file to start reading pid lists
787 * trace_pid_show - show the current pid in seq_file processing
796 unsigned long pid = (unsigned long)v - 1;
819 return -ENOMEM;
830 return -ENOMEM;
857 cnt -= ret;
862 ret = -EINVAL;
869 ret = -1;
901 if (!buf->buffer)
904 ts = ring_buffer_time_stamp(buf->buffer);
905 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
916 * tracing_is_enabled - Show if global_trace has been enabled
942 * boot time and run time configurable.
974 * These primitives don't distinguish read-only and read-consume access.
975 * Multi read-only access are also serialized.
1090 if (tr->array_buffer.buffer)
1091 ring_buffer_record_on(tr->array_buffer.buffer);
1100 tr->buffer_disabled = 0;
1104 * tracing_on - enable tracing buffers
1123 /* Length is in event->array[0] */
1124 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1142 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1154 buffer = tr->array_buffer.buffer;
1162 entry->ip = ip;
1164 memcpy(&entry->buf, str, size);
1167 if (entry->buf[size - 1] != '\n') {
1168 entry->buf[size] = '\n';
1169 entry->buf[size + 1] = '\0';
1171 entry->buf[size] = '\0';
1180 * __trace_puts - write a constant string into the trace buffer.
1192 * __trace_bputs - write the pointer to a constant string into trace buffer
1208 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1215 buffer = tr->array_buffer.buffer;
1224 entry->ip = ip;
1225 entry->str = str;
1238 struct tracer *tracer = tr->current_trace;
1247 if (!tr->allocated_snapshot) {
1255 if (tracer->use_max_tr) {
1261 if (tr->mapped) {
1278 * tracing_snapshot - take a snapshot of the current buffer.
1300 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1305 * conditional - the snapshot will only happen if the
1319 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1323 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1327 * the tr->max_lock lock, which the code calling
1337 arch_spin_lock(&tr->max_lock);
1339 if (tr->cond_snapshot)
1340 cond_data = tr->cond_snapshot->cond_data;
1342 arch_spin_unlock(&tr->max_lock);
1358 if (!tr->allocated_snapshot) {
1361 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1362 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1367 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1368 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1372 tr->allocated_snapshot = true;
1382 * The max_tr ring buffer has some state (e.g. ring->clock) and
1385 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1386 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1387 set_buffer_entries(&tr->max_buffer, 1);
1388 tracing_reset_online_cpus(&tr->max_buffer);
1389 tr->allocated_snapshot = false;
1398 spin_lock(&tr->snapshot_trigger_lock);
1399 if (tr->snapshot == UINT_MAX || tr->mapped) {
1400 spin_unlock(&tr->snapshot_trigger_lock);
1401 return -EBUSY;
1404 tr->snapshot++;
1405 spin_unlock(&tr->snapshot_trigger_lock);
1409 spin_lock(&tr->snapshot_trigger_lock);
1410 tr->snapshot--;
1411 spin_unlock(&tr->snapshot_trigger_lock);
1425 spin_lock(&tr->snapshot_trigger_lock);
1426 if (!WARN_ON(!tr->snapshot))
1427 tr->snapshot--;
1428 spin_unlock(&tr->snapshot_trigger_lock);
1432 * tracing_alloc_snapshot - allocate snapshot buffer.
1435 * allocated - it doesn't also take a snapshot.
1454 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1477 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1484 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1497 return -ENOMEM;
1499 cond_snapshot->cond_data = cond_data;
1500 cond_snapshot->update = update;
1504 if (tr->current_trace->use_max_tr)
1505 return -EBUSY;
1515 if (tr->cond_snapshot)
1516 return -EBUSY;
1523 arch_spin_lock(&tr->max_lock);
1524 tr->cond_snapshot = no_free_ptr(cond_snapshot);
1525 arch_spin_unlock(&tr->max_lock);
1533 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1538 * otherwise return -EINVAL.
1547 arch_spin_lock(&tr->max_lock);
1549 if (!tr->cond_snapshot)
1550 ret = -EINVAL;
1552 kfree(tr->cond_snapshot);
1553 tr->cond_snapshot = NULL;
1556 arch_spin_unlock(&tr->max_lock);
1578 return -ENODEV;
1594 return -ENODEV;
1603 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1608 if (tr->array_buffer.buffer)
1609 ring_buffer_record_off(tr->array_buffer.buffer);
1618 tr->buffer_disabled = 1;
1622 * tracer_tracing_disable() - temporary disable the buffer from write
1625 * Expects trace_tracing_enable() to re-enable tracing.
1633 if (WARN_ON_ONCE(!tr->array_buffer.buffer))
1636 ring_buffer_record_disable(tr->array_buffer.buffer);
1640 * tracer_tracing_enable() - counter part of tracer_tracing_disable()
1644 * when it's safe to re-enable tracing.
1648 if (WARN_ON_ONCE(!tr->array_buffer.buffer))
1651 ring_buffer_record_enable(tr->array_buffer.buffer);
1655 * tracing_off - turn off tracing buffers
1678 * tracer_tracing_is_on - show real state of ring buffer enabled
1685 if (tr->array_buffer.buffer)
1686 return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1687 return !tr->buffer_disabled;
1691 * tracing_is_on - show state of ring buffers enabled
1731 unsigned long nsecs_to_usecs(unsigned long nsecs)
1733 return nsecs / 1000;
1770 if (trace_clocks[tr->clock_id].in_ns)
1777 * trace_parser_get_init - gets the buffer for trace parser
1783 parser->buffer = kmalloc(size, GFP_KERNEL);
1784 if (!parser->buffer)
1787 parser->size = size;
1792 * trace_parser_put - frees the buffer for trace parser
1796 kfree(parser->buffer);
1797 parser->buffer = NULL;
1801 * trace_get_user - reads the user input string separated by space
1826 cnt--;
1832 if (!parser->cont) {
1839 cnt--;
1842 parser->idx = 0;
1851 /* read the non-space input */
1853 if (parser->idx < parser->size - 1)
1854 parser->buffer[parser->idx++] = ch;
1856 ret = -EINVAL;
1864 cnt--;
1869 parser->buffer[parser->idx] = 0;
1870 parser->cont = false;
1871 } else if (parser->idx < parser->size - 1) {
1872 parser->cont = true;
1873 parser->buffer[parser->idx++] = ch;
1875 parser->buffer[parser->idx] = 0;
1877 ret = -EINVAL;
1893 if (trace_seq_used(s) <= s->readpos)
1894 return -EBUSY;
1896 len = trace_seq_used(s) - s->readpos;
1899 memcpy(buf, s->buffer + s->readpos, cnt);
1901 s->readpos += cnt;
1918 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1925 queue_work(fsnotify_wq, &tr->fsnotify_work);
1931 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1932 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1933 tr->d_max_latency = trace_create_file("tracing_max_latency",
1945 return -ENOMEM;
1957 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1961 irq_work_queue(&tr->fsnotify_irqwork);
1973 * Copy the new maximum trace into the separate maximum-trace
1980 struct array_buffer *trace_buf = &tr->array_buffer;
1981 struct array_buffer *max_buf = &tr->max_buffer;
1982 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1983 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1985 max_buf->cpu = cpu;
1986 max_buf->time_start = data->preempt_timestamp;
1988 max_data->saved_latency = tr->max_latency;
1989 max_data->critical_start = data->critical_start;
1990 max_data->critical_end = data->critical_end;
1992 strscpy(max_data->comm, tsk->comm);
1993 max_data->pid = tsk->pid;
1999 max_data->uid = current_uid();
2001 max_data->uid = task_uid(tsk);
2003 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
2004 max_data->policy = tsk->policy;
2005 max_data->rt_priority = tsk->rt_priority;
2013 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
2026 if (tr->stop_count)
2031 if (!tr->allocated_snapshot) {
2033 WARN_ON_ONCE(tr->current_trace != &nop_trace);
2037 arch_spin_lock(&tr->max_lock);
2040 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
2041 ring_buffer_record_on(tr->max_buffer.buffer);
2043 ring_buffer_record_off(tr->max_buffer.buffer);
2046 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
2047 arch_spin_unlock(&tr->max_lock);
2051 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
2055 arch_spin_unlock(&tr->max_lock);
2058 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
2062 * update_max_tr_single - only copy one trace over, and reset the rest
2074 if (tr->stop_count)
2078 if (!tr->allocated_snapshot) {
2080 WARN_ON_ONCE(tr->current_trace != &nop_trace);
2084 arch_spin_lock(&tr->max_lock);
2086 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
2088 if (ret == -EBUSY) {
2096 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
2100 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
2103 arch_spin_unlock(&tr->max_lock);
2116 struct trace_iterator *iter = pwait->iter;
2118 if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
2121 return iter->closed;
2130 if (trace_buffer_iter(iter, iter->cpu_file))
2133 pwait.wait_index = atomic_read_acquire(&iter->wait_index);
2136 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
2144 if (iter->snapshot)
2145 iter->array_buffer = &iter->tr->max_buffer;
2166 return -ENOMEM;
2168 selftest->type = type;
2169 list_add(&selftest->list, &postponed_selftests);
2176 struct tracer *saved_tracer = tr->current_trace;
2179 if (!type->selftest || tracing_selftest_disabled)
2192 type->name);
2203 tracing_reset_online_cpus(&tr->array_buffer);
2205 tr->current_trace = type;
2208 if (type->use_max_tr) {
2210 if (tr->ring_buffer_expanded)
2211 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2213 tr->allocated_snapshot = true;
2218 pr_info("Testing tracer %s: ", type->name);
2219 ret = type->selftest(type, tr);
2221 tr->current_trace = saved_tracer;
2226 return -1;
2229 tracing_reset_online_cpus(&tr->array_buffer);
2232 if (type->use_max_tr) {
2233 tr->allocated_snapshot = false;
2236 if (tr->ring_buffer_expanded)
2237 ring_buffer_resize(tr->max_buffer.buffer, 1,
2251 * Tests can take a long time, especially if they are run one after the
2285 ret = run_tracer_selftest(p->type);
2289 p->type->name);
2291 for (t = trace_types; t; t = t->next) {
2292 if (t == p->type) {
2293 *last = t->next;
2296 last = &t->next;
2299 list_del(&p->list);
2319 * register_tracer - register a tracer with the ftrace system.
2329 if (!type->name) {
2331 return -1;
2334 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2336 return -1;
2341 type->name);
2342 return -EPERM;
2347 for (t = trace_types; t; t = t->next) {
2348 if (strcmp(type->name, t->name) == 0) {
2351 type->name);
2352 ret = -1;
2357 if (!type->set_flag)
2358 type->set_flag = &dummy_set_flag;
2359 if (!type->flags) {
2361 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2362 if (!type->flags) {
2363 ret = -ENOMEM;
2366 type->flags->val = 0;
2367 type->flags->opts = dummy_tracer_opt;
2369 if (!type->flags->opts)
2370 type->flags->opts = dummy_tracer_opt;
2373 type->flags->trace = type;
2379 type->next = trace_types;
2389 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2392 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2394 tracing_set_tracer(&global_trace, type->name);
2407 struct trace_buffer *buffer = buf->buffer;
2423 struct trace_buffer *buffer = buf->buffer;
2433 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2442 struct trace_buffer *buffer = buf->buffer;
2452 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2467 if (!tr->clear_trace)
2469 tr->clear_trace = false;
2470 tracing_reset_online_cpus(&tr->array_buffer);
2472 tracing_reset_online_cpus(&tr->max_buffer);
2495 guard(raw_spinlock_irqsave)(&tr->start_lock);
2496 if (--tr->stop_count) {
2497 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2499 tr->stop_count = 0;
2505 arch_spin_lock(&tr->max_lock);
2507 buffer = tr->array_buffer.buffer;
2512 buffer = tr->max_buffer.buffer;
2517 arch_spin_unlock(&tr->max_lock);
2521 * tracing_start - quick start of the tracer
2536 guard(raw_spinlock_irqsave)(&tr->start_lock);
2537 if (tr->stop_count++)
2541 arch_spin_lock(&tr->max_lock);
2543 buffer = tr->array_buffer.buffer;
2548 buffer = tr->max_buffer.buffer;
2553 arch_spin_unlock(&tr->max_lock);
2557 * tracing_stop - quick stop of the tracer
2582 return current->migration_disabled;
2628 * trace_buffered_event_enable - enable buffering events
2686 * trace_buffered_event_disable - disable buffering events
2702 if (--trace_buffered_event_ref)
2722 * could wrongly decide to use the pointed-to buffer which is now freed.
2740 struct trace_array *tr = trace_file->tr;
2743 *current_rb = tr->array_buffer.buffer;
2745 if (!tr->no_filter_buffering_ref &&
2746 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2753 * (see include/linux/ring-buffer.h for details on
2766 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2789 entry->array[0] = len;
2807 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2831 event_call = fbuffer->trace_file->event_call;
2832 if (!event_call || !event_call->event.funcs ||
2833 !event_call->event.funcs->trace)
2836 file = fbuffer->trace_file;
2837 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2838 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2839 !filter_match_preds(file->filter, fbuffer->entry)))
2842 event = &fbuffer->trace_file->event_call->event;
2845 trace_seq_init(&iter->seq);
2846 iter->ent = fbuffer->entry;
2847 event_call->event.funcs->trace(iter, 0, event);
2848 trace_seq_putc(&iter->seq, 0);
2849 printk("%s", iter->seq.buffer);
2887 struct trace_event_file *file = fbuffer->trace_file;
2889 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2890 fbuffer->entry, &tt))
2897 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2899 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2900 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2950 struct trace_buffer *buffer = tr->array_buffer.buffer;
2962 entry->ip = ip;
2963 entry->parent_ip = parent_ip;
2968 entry->args[i] = ftrace_regs_get_argument(fregs, i);
3018 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3034 size = ARRAY_SIZE(fstack->calls);
3037 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3040 nr_entries = stack_trace_save(fstack->calls, size, skip);
3045 if (tr->ops && tr->ops->trampoline) {
3046 unsigned long tramp_start = tr->ops->trampoline;
3047 unsigned long tramp_end = tramp_start + tr->ops->trampoline_size;
3048 unsigned long *calls = fstack->calls;
3064 entry->size = nr_entries;
3065 memcpy(&entry->caller, fstack->calls,
3081 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3090 struct trace_buffer *buffer = tr->array_buffer.buffer;
3115 * trace_dump_stack - record a stack back trace in the trace buffer
3127 __ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer,
3142 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3168 entry->tgid = current->tgid;
3169 memset(&entry->caller, 0, sizeof(entry->caller));
3171 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3191 entry->bottom_delta_ts = delta & U32_MAX;
3192 entry->top_delta_ts = (delta >> 32);
3199 struct trace_buffer *buffer = tr->array_buffer.buffer;
3209 delta = ring_buffer_event_time_stamp(buffer, event) -
3210 last_info->ts_last_call;
3213 entry->ip = last_info->ip;
3214 entry->parent_ip = last_info->parent_ip;
3215 entry->count = last_info->count;
3237 if (!trace_percpu_buffer || buffer->nesting >= 4)
3240 buffer->nesting++;
3244 return &buffer->buffer[buffer->nesting - 1][0];
3251 this_cpu_dec(trace_percpu_buffer->nesting);
3263 return -ENOMEM;
3332 * trace_vbprintk - write binary msg to tracing buffer
3371 buffer = tr->array_buffer.buffer;
3378 entry->ip = ip;
3379 entry->fmt = fmt;
3381 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3430 entry->ip = ip;
3432 memcpy(&entry->buf, tbuffer, len + 1);
3451 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3455 * trace_array_printk - Print a message to a specific instance
3481 return -ENOENT;
3487 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3498 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3508 return -ENOENT;
3512 return -EINVAL;
3524 if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
3541 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3543 iter->idx++;
3559 (unsigned long)-1 : 0;
3561 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3566 iter->ent_size = ring_buffer_event_length(event);
3569 iter->ent_size = 0;
3577 struct trace_buffer *buffer = iter->array_buffer->buffer;
3580 int cpu_file = iter->cpu_file;
3582 int next_cpu = -1;
3615 next_size = iter->ent_size;
3619 iter->ent_size = next_size;
3641 * iter->tr is NULL when used with tp_printk, which makes
3644 if (!iter->tr || iter->fmt == static_fmt_buf)
3647 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3650 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3651 iter->fmt = tmp;
3665 if ((addr >= (unsigned long)iter->ent) &&
3666 (addr < (unsigned long)iter->ent + iter->ent_size))
3670 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3671 (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3685 if (!iter->ent)
3688 trace_event = ftrace_find_event(iter->ent->type);
3693 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3697 if (within_module_core(addr, event->module))
3704 * ignore_event - Check dereferenced fields while writing to the seq buffer
3712 * by the time the user reads the trace. This would cause a bad memory
3718 * If it is found that a field is unsafe, it will write into the @iter->seq
3733 trace_event = ftrace_find_event(iter->ent->type);
3735 seq = &iter->seq;
3738 trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
3743 if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
3753 /* Offsets are from the iter->ent that points to the raw event */
3754 ptr = iter->ent;
3760 if (!field->needs_test)
3763 str = *(const char **)(ptr + field->offset);
3770 * was saved at the time of the event, but may not be
3773 * instead. See samples/trace_events/trace-events-sample.h
3777 trace_event_name(event), field->name)) {
3779 trace_event_name(event), field->name);
3794 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3798 new_fmt = q = iter->fmt;
3800 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3804 q += iter->fmt - new_fmt;
3805 new_fmt = iter->fmt;
3811 if (p[-1] == '%') {
3833 int ent_size = iter->ent_size;
3837 * If called from ftrace_dump(), then the iter->temp buffer
3841 * used to add markers when two consecutive events' time
3844 if (iter->temp == static_temp_buf &&
3850 * call ring_buffer_peek() that may make the contents of iter->ent
3851 * undefined. Need to copy iter->ent now.
3853 if (iter->ent && iter->ent != iter->temp) {
3854 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3855 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3857 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3860 kfree(iter->temp);
3861 iter->temp = temp;
3862 iter->temp_size = iter->ent_size;
3864 memcpy(iter->temp, iter->ent, iter->ent_size);
3865 iter->ent = iter->temp;
3869 iter->ent_size = ent_size;
3877 iter->ent = __find_next_entry(iter, &iter->cpu,
3878 &iter->lost_events, &iter->ts);
3880 if (iter->ent)
3883 return iter->ent ? iter : NULL;
3888 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3889 &iter->lost_events);
3894 struct trace_iterator *iter = m->private;
3898 WARN_ON_ONCE(iter->leftover);
3903 if (iter->idx > i)
3906 if (iter->idx < 0)
3911 while (ent && iter->idx < i)
3914 iter->pos = *pos;
3925 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3939 if (ts >= iter->array_buffer->time_start)
3947 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3956 struct trace_iterator *iter = m->private;
3957 struct trace_array *tr = iter->tr;
3958 int cpu_file = iter->cpu_file;
3964 if (unlikely(tr->current_trace != iter->trace)) {
3965 /* Close iter->trace before switching to the new current tracer */
3966 if (iter->trace->close)
3967 iter->trace->close(iter);
3968 iter->trace = tr->current_trace;
3970 if (iter->trace->open)
3971 iter->trace->open(iter);
3976 if (iter->snapshot && iter->trace->use_max_tr)
3977 return ERR_PTR(-EBUSY);
3980 if (*pos != iter->pos) {
3981 iter->ent = NULL;
3982 iter->cpu = 0;
3983 iter->idx = -1;
3991 iter->leftover = 0;
4000 if (iter->leftover)
4003 l = *pos - 1;
4015 struct trace_iterator *iter = m->private;
4018 if (iter->snapshot && iter->trace->use_max_tr)
4022 trace_access_unlock(iter->cpu_file);
4032 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4036 * ones before the time stamp.
4038 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4039 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4044 ring_buffer_overrun_cpu(buf->buffer, cpu);
4072 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4084 get_total_entries(&tr->array_buffer, &total, &entries);
4091 seq_puts(m, "# _------=> CPU# \n"
4092 "# / _-----=> irqs-off/BH-disabled\n"
4093 "# | / _----=> need-resched \n"
4094 "# || / _---=> hardirq/softirq \n"
4095 "# ||| / _--=> preempt-depth \n"
4096 "# |||| / _-=> migrate-disable \n"
4098 "# cmd pid |||||| time | caller \n"
4108 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4120 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4133 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4134 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4135 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4136 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4137 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4139 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4147 struct array_buffer *buf = iter->array_buffer;
4148 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4149 struct tracer *type = iter->trace;
4152 const char *name = type->name;
4157 name, init_utsname()->release);
4158 seq_puts(m, "# -----------------------------------"
4159 "---------------------------------\n");
4162 nsecs_to_usecs(data->saved_latency),
4165 buf->cpu,
4174 seq_puts(m, "# -----------------\n");
4175 seq_printf(m, "# | task: %.16s-%d "
4177 data->comm, data->pid,
4178 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4179 data->policy, data->rt_priority);
4180 seq_puts(m, "# -----------------\n");
4182 if (data->critical_start) {
4184 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4185 trace_print_seq(m, &iter->seq);
4187 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4188 trace_print_seq(m, &iter->seq);
4197 struct trace_seq *s = &iter->seq;
4198 struct trace_array *tr = iter->tr;
4200 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4203 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4206 if (cpumask_available(iter->started) &&
4207 cpumask_test_cpu(iter->cpu, iter->started))
4210 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4213 if (cpumask_available(iter->started))
4214 cpumask_set_cpu(iter->cpu, iter->started);
4217 if (iter->idx > 1)
4219 iter->cpu);
4224 struct trace_array *tr = iter->tr;
4225 struct trace_seq *s = &iter->seq;
4226 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4230 entry = iter->ent;
4234 event = ftrace_find_event(entry->type);
4236 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4237 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4247 if (tr->trace_flags & TRACE_ITER_FIELDS)
4254 if ((tr->text_delta) &&
4255 event->type > __TRACE_LAST_TYPE)
4258 return event->funcs->trace(iter, sym_flags, event);
4261 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4268 struct trace_array *tr = iter->tr;
4269 struct trace_seq *s = &iter->seq;
4273 entry = iter->ent;
4275 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4277 entry->pid, iter->cpu, iter->ts);
4282 event = ftrace_find_event(entry->type);
4284 return event->funcs->raw(iter, 0, event);
4286 trace_seq_printf(s, "%d ?\n", entry->type);
4293 struct trace_array *tr = iter->tr;
4294 struct trace_seq *s = &iter->seq;
4299 entry = iter->ent;
4301 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4302 SEQ_PUT_HEX_FIELD(s, entry->pid);
4303 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4304 SEQ_PUT_HEX_FIELD(s, iter->ts);
4309 event = ftrace_find_event(entry->type);
4311 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4323 struct trace_array *tr = iter->tr;
4324 struct trace_seq *s = &iter->seq;
4328 entry = iter->ent;
4330 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4331 SEQ_PUT_FIELD(s, entry->pid);
4332 SEQ_PUT_FIELD(s, iter->cpu);
4333 SEQ_PUT_FIELD(s, iter->ts);
4338 event = ftrace_find_event(entry->type);
4339 return event ? event->funcs->binary(iter, 0, event) :
4349 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4350 cpu = iter->cpu_file;
4356 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4368 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4379 struct trace_array *tr = iter->tr;
4380 unsigned long trace_flags = tr->trace_flags;
4383 if (iter->lost_events) {
4384 if (iter->lost_events == (unsigned long)-1)
4385 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4386 iter->cpu);
4388 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4389 iter->cpu, iter->lost_events);
4390 if (trace_seq_has_overflowed(&iter->seq))
4394 if (iter->trace && iter->trace->print_line) {
4395 ret = iter->trace->print_line(iter);
4400 if (iter->ent->type == TRACE_BPUTS &&
4405 if (iter->ent->type == TRACE_BPRINT &&
4410 if (iter->ent->type == TRACE_PRINT &&
4429 struct trace_iterator *iter = m->private;
4430 struct trace_array *tr = iter->tr;
4436 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4439 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4445 struct trace_iterator *iter = m->private;
4446 struct trace_array *tr = iter->tr;
4447 unsigned long trace_flags = tr->trace_flags;
4452 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4462 print_func_help_header_irq(iter->array_buffer,
4465 print_func_help_header(iter->array_buffer, m,
4507 if (iter->tr->allocated_snapshot)
4513 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4528 if (iter->ent == NULL) {
4529 if (iter->tr) {
4530 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4534 if (iter->snapshot && trace_empty(iter))
4536 else if (iter->trace && iter->trace->print_header)
4537 iter->trace->print_header(m);
4541 } else if (iter->leftover) {
4546 ret = trace_print_seq(m, &iter->seq);
4548 /* ret should this time be zero, but you never know */
4549 iter->leftover = ret;
4554 iter->seq.full = 0;
4555 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4557 ret = trace_print_seq(m, &iter->seq);
4563 * -1 otherwise.
4565 iter->leftover = ret;
4577 if (inode->i_cdev) /* See trace_create_cpu_file() */
4578 return (long)inode->i_cdev - 1;
4599 if (iter->fmt != static_fmt_buf)
4600 kfree(iter->fmt);
4602 kfree(iter->temp);
4603 kfree(iter->buffer_iter);
4604 mutex_destroy(&iter->mutex);
4605 free_cpumask_var(iter->started);
4611 struct trace_array *tr = inode->i_private;
4616 return ERR_PTR(-ENODEV);
4620 return ERR_PTR(-ENOMEM);
4622 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4624 if (!iter->buffer_iter)
4628 * trace_find_next_entry() may need to save off iter->ent.
4629 * It will place it into the iter->temp buffer. As most
4632 * allocate a new buffer to adjust for the bigger iter->ent.
4635 iter->temp = kmalloc(128, GFP_KERNEL);
4636 if (iter->temp)
4637 iter->temp_size = 128;
4646 iter->fmt = NULL;
4647 iter->fmt_size = 0;
4650 iter->trace = tr->current_trace;
4652 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4655 iter->tr = tr;
4659 if (tr->current_trace->print_max || snapshot)
4660 iter->array_buffer = &tr->max_buffer;
4663 iter->array_buffer = &tr->array_buffer;
4664 iter->snapshot = snapshot;
4665 iter->pos = -1;
4666 iter->cpu_file = tracing_get_cpu(inode);
4667 mutex_init(&iter->mutex);
4670 if (iter->trace->open)
4671 iter->trace->open(iter);
4674 if (ring_buffer_overruns(iter->array_buffer->buffer))
4675 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4678 if (trace_clocks[tr->clock_id].in_ns)
4679 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4682 * If pause-on-trace is enabled, then stop the trace while
4685 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4688 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4690 iter->buffer_iter[cpu] =
4691 ring_buffer_read_start(iter->array_buffer->buffer,
4696 cpu = iter->cpu_file;
4697 iter->buffer_iter[cpu] =
4698 ring_buffer_read_start(iter->array_buffer->buffer,
4712 return ERR_PTR(-ENOMEM);
4723 filp->private_data = inode->i_private;
4738 struct trace_array *tr = inode->i_private;
4745 filp->private_data = inode->i_private;
4756 struct trace_event_file *file = inode->i_private;
4759 ret = tracing_check_open_get_tr(file->tr);
4766 if (file->flags & EVENT_FILE_FL_FREED) {
4767 trace_array_put(file->tr);
4768 return -ENODEV;
4773 filp->private_data = inode->i_private;
4780 struct trace_event_file *file = inode->i_private;
4782 trace_array_put(file->tr);
4796 struct trace_array *tr = inode->i_private;
4797 struct seq_file *m = file->private_data;
4801 if (!(file->f_mode & FMODE_READ)) {
4807 iter = m->private;
4811 if (iter->buffer_iter[cpu])
4812 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4815 if (iter->trace && iter->trace->close)
4816 iter->trace->close(iter);
4818 if (!iter->snapshot && tr->stop_count)
4834 struct trace_array *tr = inode->i_private;
4842 struct trace_array *tr = inode->i_private;
4851 struct trace_array *tr = inode->i_private;
4860 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4862 struct array_buffer *trace_buf = &tr->array_buffer;
4865 if (tr->current_trace->print_max)
4866 trace_buf = &tr->max_buffer;
4875 if (file->f_mode & FMODE_READ) {
4879 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4880 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4899 if (tr->range_addr_start && t->use_max_tr)
4902 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4910 t = t->next;
4918 struct trace_array *tr = m->private;
4924 t = get_tracer_for_array(tr, t->next);
4931 struct trace_array *tr = m->private;
4956 seq_puts(m, t->name);
4957 if (t->next)
4974 struct trace_array *tr = inode->i_private;
4988 m = file->private_data;
4989 m->private = tr;
4996 struct trace_array *tr = inode->i_private;
5013 if (file->f_mode & FMODE_READ)
5016 file->f_pos = ret = 0;
5042 struct trace_array *tr = file_inode(filp)->i_private;
5047 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5050 return -ENOMEM;
5053 cpumask_pr_args(tr->tracing_cpumask));
5055 return -EINVAL;
5066 return -EINVAL;
5069 arch_spin_lock(&tr->max_lock);
5075 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5077 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5079 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5082 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5084 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5086 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5090 arch_spin_unlock(&tr->max_lock);
5093 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5102 struct trace_array *tr = file_inode(filp)->i_private;
5107 return -EINVAL;
5110 return -ENOMEM;
5141 struct trace_array *tr = m->private;
5147 tracer_flags = tr->current_trace->flags->val;
5148 trace_opts = tr->current_trace->flags->opts;
5151 if (tr->trace_flags & (1 << i))
5171 struct tracer *trace = tracer_flags->trace;
5174 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5179 tracer_flags->val &= ~opts->bit;
5181 tracer_flags->val |= opts->bit;
5188 struct tracer *trace = tr->current_trace;
5189 struct tracer_flags *tracer_flags = trace->flags;
5193 for (i = 0; tracer_flags->opts[i].name; i++) {
5194 opts = &tracer_flags->opts[i];
5196 if (strcmp(cmp, opts->name) == 0)
5197 return __set_tracer_option(tr, trace->flags, opts, neg);
5200 return -EINVAL;
5206 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5207 return -1;
5221 if (!!(tr->trace_flags & mask) == !!enabled)
5225 if (tr->current_trace->flag_changed)
5226 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5227 return -EINVAL;
5238 return -EINVAL;
5252 tr->trace_flags |= mask;
5254 tr->trace_flags &= ~mask;
5262 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5263 return -ENOMEM;
5276 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5278 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5309 ret = match_string(trace_options, -1, cmp);
5345 *(buf - 1) = ',';
5353 struct seq_file *m = filp->private_data;
5354 struct trace_array *tr = m->private;
5359 return -EINVAL;
5362 return -EFAULT;
5377 struct trace_array *tr = inode->i_private;
5384 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5400 "tracing mini-HOWTO:\n\n"
5404 "\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n"
5406 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5408 " trace\t\t\t- The static contents of the buffer\n"
5410 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5411 " current_tracer\t- function and latency tracers\n"
5412 " available_tracers\t- list of configured tracers for current_tracer\n"
5413 " error_log\t- error log for failed commands (that support it)\n"
5414 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5415 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5416 " trace_clock\t\t- change the clock used to order events\n"
5420 " uptime: Jiffy counter from time of boot\n"
5423 " x86-tsc: TSC cycle counter\n"
5425 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5426 " delta: Delta difference against a buffer-wide timestamp\n"
5428 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5429 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5430 " tracing_cpumask\t- Limit which CPUs to trace\n"
5431 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5432 "\t\t\t Remove sub-buffer with rmdir\n"
5433 " trace_options\t\t- Set format or modify how tracing happens\n"
5436 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5438 "\n available_filter_functions - list of functions that can be filtered on\n"
5439 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5441 "\t accepts: func_full_name or glob-matching-pattern\n"
5443 "\t Format: :mod:<module-name>\n"
5460 "\t The first one will disable tracing every time do_fault is hit\n"
5462 "\t The first time do trap is hit and it disables tracing, the\n"
5470 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5476 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5478 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5482 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5483 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5484 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5487 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5492 " stack_trace\t\t- Shows the max stack trace when active\n"
5493 " stack_max_size\t- Shows current max stack size that was traced\n"
5497 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5502 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5506 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5510 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5515 "\t accepts: event-definitions (one definition per line)\n"
5521 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5527 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5528 "\t -:[<group>/][<event>]\n"
5537 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5541 "\t <argname>[->field[->field|.field...]],\n"
5546 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5549 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5550 "\t symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
5557 "\t of the <attached-group>/<attached-event>.\n"
5559 " set_event\t\t- Enables events by name written into it\n"
5561 " events/\t\t- Directory containing all trace event subsystems:\n"
5562 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5563 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5564 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5566 " filter\t\t- If set, only events passing filter are traced\n"
5567 " events/<system>/<event>/\t- Directory containing control files for\n"
5569 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5570 " filter\t\t- If set, only events passing filter are traced\n"
5571 " trigger\t\t- If set, a command to perform when event is hit\n"
5593 "\t The first disables tracing every time block_unplug is hit.\n"
5605 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5617 "\t common_timestamp - to record current timestamp\n"
5618 "\t common_cpu - to record the CPU the event happened on\n"
5621 "\t - a reference to a field e.g. x=current_timestamp,\n"
5622 "\t - a reference to another variable e.g. y=$x,\n"
5623 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5624 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5626 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5655 "\t .sym-offset display an address as a symbol and offset\n"
5662 "\t .graph display a bar-graph of a value\n\n"
5674 "\t already-attached hist trigger. The syntax is analogous to\n"
5680 "\t onmatch(matching.event) - invoke on addition or update\n"
5681 "\t onmax(var) - invoke if var exceeds current max\n"
5682 "\t onchange(var) - invoke action if var changes\n\n"
5684 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5685 "\t save(field,...) - save current event fields\n"
5687 "\t snapshot() - snapshot the trace buffer\n\n"
5690 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5715 if (!ptr->map.eval_string) {
5716 if (ptr->tail.next) {
5717 ptr = ptr->tail.next;
5773 ptr->map.eval_string, ptr->map.eval_value,
5774 ptr->map.system);
5808 return ptr + ptr->head.length + 1;
5841 if (!ptr->tail.next)
5843 ptr = ptr->tail.next;
5846 ptr->tail.next = map_array;
5848 map_array->head.mod = mod;
5849 map_array->head.length = len;
5853 map_array->map = **map;
5900 struct trace_array *tr = filp->private_data;
5905 r = sprintf(buf, "%s\n", tr->current_trace->name);
5913 tracing_reset_online_cpus(&tr->array_buffer);
5914 return t->init(tr);
5922 per_cpu_ptr(buf->data, cpu)->entries = val;
5928 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5930 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5943 ret = ring_buffer_resize(trace_buf->buffer,
5944 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5947 per_cpu_ptr(trace_buf->data, cpu)->entries =
5948 per_cpu_ptr(size_buf->data, cpu)->entries;
5951 ret = ring_buffer_resize(trace_buf->buffer,
5952 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5954 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5955 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5975 if (!tr->array_buffer.buffer)
5981 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5986 if (!tr->allocated_snapshot)
5989 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5991 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5992 &tr->array_buffer, cpu);
6014 update_buffer_entries(&tr->max_buffer, cpu);
6019 update_buffer_entries(&tr->array_buffer, cpu);
6033 return -EINVAL;
6061 return addr - ent->mod_addr;
6065 * trace_adjust_address() - Adjust prev boot address to current address.
6078 if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6081 /* tr->module_delta must be protected by rcu. */
6083 tscratch = tr->scratch;
6085 module_delta = READ_ONCE(tr->module_delta);
6086 if (!module_delta || !tscratch->nr_entries ||
6087 tscratch->entries[0].mod_addr > addr) {
6088 raddr = addr + tr->text_delta;
6094 nr_entries = tscratch->nr_entries;
6096 tscratch->entries[nr_entries - 1].mod_addr < addr)
6097 idx = nr_entries - 1;
6100 tscratch->entries,
6101 nr_entries - 1,
6102 sizeof(tscratch->entries[0]),
6105 idx = entry - tscratch->entries;
6108 return addr + module_delta->delta[idx];
6119 tscratch = tr->scratch;
6121 return -1;
6122 size = tr->scratch_size;
6124 if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size)
6125 return -1;
6127 entry = &tscratch->entries[tscratch->nr_entries];
6129 tscratch->nr_entries++;
6131 entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base;
6132 strscpy(entry->mod_name, mod->name);
6148 if (!(tr->flags & TRACE_ARRAY_FL_BOOT))
6151 if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6155 tr->flags &= ~TRACE_ARRAY_FL_LAST_BOOT;
6158 if (tr->scratch) {
6159 struct trace_scratch *tscratch = tr->scratch;
6161 tscratch->clock_id = tr->clock_id;
6162 memset(tscratch->entries, 0,
6163 flex_array_size(tscratch, entries, tscratch->nr_entries));
6164 tscratch->nr_entries = 0;
6176 tracing_reset_all_cpus(&tr->array_buffer);
6179 tr->text_delta = 0;
6181 if (!tr->scratch)
6184 tscratch = tr->scratch;
6185 module_delta = READ_ONCE(tr->module_delta);
6186 WRITE_ONCE(tr->module_delta, NULL);
6190 tscratch->text_addr = (unsigned long)_text;
6194 * tracing_update_buffers - used by tracing facility to expand ring buffers
6212 if (!tr->ring_buffer_expanded)
6229 if (tr->current_trace == &nop_trace)
6232 tr->current_trace->enabled--;
6234 if (tr->current_trace->reset)
6235 tr->current_trace->reset(tr);
6237 tr->current_trace = &nop_trace;
6245 if (!tr->dir && !(tr->flags & TRACE_ARRAY_FL_GLOBAL))
6267 if (!tr->ring_buffer_expanded) {
6275 for (t = trace_types; t; t = t->next) {
6276 if (strcmp(t->name, buf) == 0)
6280 return -EINVAL;
6282 if (t == tr->current_trace)
6286 if (t->use_max_tr) {
6288 arch_spin_lock(&tr->max_lock);
6289 ret = tr->cond_snapshot ? -EBUSY : 0;
6290 arch_spin_unlock(&tr->max_lock);
6297 if (system_state < SYSTEM_RUNNING && t->noboot) {
6299 t->name);
6300 return -EINVAL;
6305 return -EINVAL;
6308 if (tr->trace_ref)
6309 return -EBUSY;
6313 tr->current_trace->enabled--;
6315 if (tr->current_trace->reset)
6316 tr->current_trace->reset(tr);
6319 had_max_tr = tr->current_trace->use_max_tr;
6322 tr->current_trace = &nop_trace;
6324 if (had_max_tr && !t->use_max_tr) {
6337 if (!had_max_tr && t->use_max_tr) {
6343 tr->current_trace = &nop_trace;
6346 if (t->init) {
6350 if (t->use_max_tr)
6357 tr->current_trace = t;
6358 tr->current_trace->enabled++;
6368 struct trace_array *tr = filp->private_data;
6380 return -EFAULT;
6403 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6436 struct trace_array *tr = filp->private_data;
6444 if (tr->current_trace->update_thresh) {
6445 ret = tr->current_trace->update_thresh(tr);
6459 struct trace_array *tr = filp->private_data;
6461 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6468 struct trace_array *tr = filp->private_data;
6470 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6478 if (cpumask_empty(tr->pipe_cpumask)) {
6479 cpumask_setall(tr->pipe_cpumask);
6482 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6483 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6486 return -EBUSY;
6492 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6493 cpumask_clear(tr->pipe_cpumask);
6495 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6496 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6502 struct trace_array *tr = inode->i_private;
6520 ret = -ENOMEM;
6524 trace_seq_init(&iter->seq);
6525 iter->trace = tr->current_trace;
6527 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6528 ret = -ENOMEM;
6533 cpumask_setall(iter->started);
6535 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6536 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6539 if (trace_clocks[tr->clock_id].in_ns)
6540 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6542 iter->tr = tr;
6543 iter->array_buffer = &tr->array_buffer;
6544 iter->cpu_file = cpu;
6545 mutex_init(&iter->mutex);
6546 filp->private_data = iter;
6548 if (iter->trace->pipe_open)
6549 iter->trace->pipe_open(iter);
6553 tr->trace_ref++;
6568 struct trace_iterator *iter = file->private_data;
6569 struct trace_array *tr = inode->i_private;
6572 tr->trace_ref--;
6574 if (iter->trace->pipe_close)
6575 iter->trace->pipe_close(iter);
6576 close_pipe_on_cpu(tr, iter->cpu_file);
6590 struct trace_array *tr = iter->tr;
6593 if (trace_buffer_iter(iter, iter->cpu_file))
6596 if (tr->trace_flags & TRACE_ITER_BLOCK)
6602 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6603 filp, poll_table, iter->tr->buffer_percent);
6609 struct trace_iterator *iter = filp->private_data;
6614 /* Must be called with iter->mutex held. */
6617 struct trace_iterator *iter = filp->private_data;
6622 if ((filp->f_flags & O_NONBLOCK)) {
6623 return -EAGAIN;
6633 * iter->pos will be 0 if we haven't read anything.
6635 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6638 mutex_unlock(&iter->mutex);
6642 mutex_lock(&iter->mutex);
6653 if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6656 if (!ring_buffer_empty(tr->array_buffer.buffer))
6660 * If the buffer contains the last boot data and all per-cpu
6674 struct trace_iterator *iter = filp->private_data;
6682 guard(mutex)(&iter->mutex);
6685 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6686 if (sret != -EBUSY)
6689 trace_seq_init(&iter->seq);
6691 if (iter->trace->read) {
6692 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6698 if (update_last_data_if_empty(iter->tr))
6710 cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6714 cpumask_clear(iter->started);
6715 trace_seq_init(&iter->seq);
6718 trace_access_lock(iter->cpu_file);
6721 int save_len = iter->seq.seq.len;
6727 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6729 * this event next time, resulting in an infinite loop.
6732 iter->seq.full = 0;
6733 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6739 iter->seq.seq.len = save_len;
6745 if (trace_seq_used(&iter->seq) >= cnt)
6753 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6754 iter->ent->type);
6756 trace_access_unlock(iter->cpu_file);
6760 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6761 if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6762 trace_seq_init(&iter->seq);
6768 if (sret == -EBUSY)
6777 __free_page(spd->pages[idx]);
6787 /* Seq buffer is page-sized, exactly what we need. */
6789 save_len = iter->seq.seq.len;
6792 if (trace_seq_has_overflowed(&iter->seq)) {
6793 iter->seq.seq.len = save_len;
6799 * be set if the iter->seq overflowed. But check it
6803 iter->seq.seq.len = save_len;
6807 count = trace_seq_used(&iter->seq) - save_len;
6810 iter->seq.seq.len = save_len;
6816 rem -= count;
6819 iter->ent = NULL;
6835 struct trace_iterator *iter = filp->private_data;
6849 return -ENOMEM;
6851 mutex_lock(&iter->mutex);
6853 if (iter->trace->splice_read) {
6854 ret = iter->trace->splice_read(iter, filp,
6864 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6865 ret = -EFAULT;
6870 trace_access_lock(iter->cpu_file);
6881 ret = trace_seq_to_buffer(&iter->seq,
6883 min((size_t)trace_seq_used(&iter->seq),
6892 trace_seq_init(&iter->seq);
6895 trace_access_unlock(iter->cpu_file);
6897 mutex_unlock(&iter->mutex);
6910 mutex_unlock(&iter->mutex);
6919 struct trace_array *tr = inode->i_private;
6937 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6938 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6945 if (!tr->ring_buffer_expanded)
6954 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6967 struct trace_array *tr = inode->i_private;
6977 return -EINVAL;
6994 struct trace_array *tr = filp->private_data;
7001 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7002 if (!tr->ring_buffer_expanded)
7005 if (tr->ring_buffer_expanded)
7018 struct trace_array *tr = m->private;
7019 struct trace_scratch *tscratch = tr->scratch;
7028 if (!tscratch || !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
7032 index--;
7034 if (index >= tscratch->nr_entries)
7037 return &tscratch->entries[index];
7054 struct trace_scratch *tscratch = tr->scratch;
7063 if (tscratch && (tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
7064 seq_printf(m, "%lx\t[kernel]\n", tscratch->text_addr);
7071 struct trace_array *tr = m->private;
7079 seq_printf(m, "%lx\t%s\n", entry->mod_addr, entry->mod_name);
7092 struct trace_array *tr = inode->i_private;
7106 m = file->private_data;
7107 m->private = tr;
7114 struct trace_array *tr = inode->i_private;
7122 ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
7145 struct trace_array *tr = inode->i_private;
7148 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7175 buffer = tr->array_buffer.buffer;
7184 cnt = ring_buffer_max_event_size(buffer) - meta_size;
7187 return -EBADF;
7192 return -EBADF;
7196 entry->ip = ip;
7197 memcpy(&entry->buf, buf, cnt);
7200 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7202 entry->buf[cnt] = '\0';
7203 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7206 if (entry->buf[cnt - 1] != '\n') {
7207 entry->buf[cnt] = '\n';
7208 entry->buf[cnt + 1] = '\0';
7210 entry->buf[cnt] = '\0';
7217 event_triggers_post_call(tr->trace_marker_file, tt);
7241 buf = per_cpu_ptr(tinfo->tbuf, cpu)->buf;
7244 free_percpu(tinfo->tbuf);
7257 trace_user_buffer->ref++;
7263 return -ENOMEM;
7265 tinfo->tbuf = alloc_percpu(struct trace_user_buf);
7266 if (!tinfo->tbuf) {
7268 return -ENOMEM;
7271 tinfo->ref = 1;
7275 per_cpu_ptr(tinfo->tbuf, cpu)->buf = NULL;
7283 return -ENOMEM;
7285 per_cpu_ptr(tinfo->tbuf, cpu)->buf = buf;
7304 if (--tinfo->ref)
7317 char *buffer = per_cpu_ptr(tinfo->tbuf, cpu)->buf;
7389 struct trace_array *tr = filp->private_data;
7390 ssize_t written = -ENODEV;
7396 return -EINVAL;
7398 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7399 return -EINVAL;
7402 return -EINVAL;
7412 return -EFAULT;
7444 /* cnt includes both the entry->id and the data behind it. */
7445 size = struct_size(entry, buf, cnt - sizeof(entry->id));
7447 buffer = tr->array_buffer.buffer;
7450 return -EINVAL;
7456 return -EBADF;
7459 unsafe_memcpy(&entry->id, buf, cnt,
7474 struct trace_array *tr = filp->private_data;
7475 ssize_t written = -ENODEV;
7480 return -EINVAL;
7482 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7483 return -EINVAL;
7487 return -EINVAL;
7494 return -EFAULT;
7498 return -EINVAL;
7538 struct trace_array *tr = m->private;
7544 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7545 i == tr->clock_id ? "]" : "");
7560 return -EINVAL;
7564 tr->clock_id = i;
7566 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7572 tracing_reset_online_cpus(&tr->array_buffer);
7575 if (tr->max_buffer.buffer)
7576 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7577 tracing_reset_online_cpus(&tr->max_buffer);
7580 if (tr->scratch && !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) {
7581 struct trace_scratch *tscratch = tr->scratch;
7583 tscratch->clock_id = i;
7592 struct seq_file *m = filp->private_data;
7593 struct trace_array *tr = m->private;
7599 return -EINVAL;
7602 return -EFAULT;
7619 struct trace_array *tr = inode->i_private;
7626 ret = single_open(file, tracing_clock_show, inode->i_private);
7635 struct trace_array *tr = m->private;
7639 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7649 struct trace_array *tr = inode->i_private;
7656 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7678 if (set && tr->no_filter_buffering_ref++)
7682 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
7683 return -EINVAL;
7685 --tr->no_filter_buffering_ref;
7702 struct trace_array *tr = inode->i_private;
7711 if (file->f_mode & FMODE_READ) {
7717 ret = -ENOMEM;
7728 iter->tr = tr;
7729 iter->array_buffer = &tr->max_buffer;
7730 iter->cpu_file = tracing_get_cpu(inode);
7731 m->private = iter;
7732 file->private_data = m;
7750 struct seq_file *m = filp->private_data;
7751 struct trace_iterator *iter = m->private;
7752 struct trace_array *tr = iter->tr;
7766 if (tr->current_trace->use_max_tr)
7767 return -EBUSY;
7770 arch_spin_lock(&tr->max_lock);
7771 if (tr->cond_snapshot)
7772 ret = -EBUSY;
7773 arch_spin_unlock(&tr->max_lock);
7780 if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7781 return -EINVAL;
7782 if (tr->allocated_snapshot)
7786 /* Only allow per-cpu swap if the ring buffer supports it */
7788 if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7789 return -EINVAL;
7791 if (tr->allocated_snapshot)
7792 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7793 &tr->array_buffer, iter->cpu_file);
7800 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7805 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7811 if (tr->allocated_snapshot) {
7812 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7813 tracing_reset_online_cpus(&tr->max_buffer);
7815 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7830 struct seq_file *m = file->private_data;
7835 if (file->f_mode & FMODE_READ)
7840 kfree(m->private);
7863 info = filp->private_data;
7865 if (info->iter.trace->use_max_tr) {
7867 return -EBUSY;
7870 info->iter.snapshot = true;
7871 info->iter.array_buffer = &info->iter.tr->max_buffer;
7993 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
8000 * The filp->private_data must point to a trace_min_max_param structure that
8001 * defines where to write the value, the min and the max acceptable values,
8007 struct trace_min_max_param *param = filp->private_data;
8012 return -EFAULT;
8018 if (param->lock)
8019 mutex_lock(param->lock);
8021 if (param->min && val < *param->min)
8022 err = -EINVAL;
8024 if (param->max && val > *param->max)
8025 err = -EINVAL;
8028 *param->val = val;
8030 if (param->lock)
8031 mutex_unlock(param->lock);
8040 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
8047 * The filp->private_data must point to a trace_min_max_param struct with valid
8053 struct trace_min_max_param *param = filp->private_data;
8059 return -EFAULT;
8061 val = *param->val;
8083 const char **errs; /* ptr to loc-specific array of err strings */
8084 u8 type; /* index into errs -> specific err string */
8104 return ERR_PTR(-ENOMEM);
8106 err->cmd = kzalloc(len, GFP_KERNEL);
8107 if (!err->cmd) {
8109 return ERR_PTR(-ENOMEM);
8117 kfree(err->cmd);
8127 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
8129 if (PTR_ERR(err) != -ENOMEM)
8130 tr->n_err_log_entries++;
8136 return ERR_PTR(-ENOMEM);
8137 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
8138 kfree(err->cmd);
8139 err->cmd = cmd;
8140 list_del(&err->list);
8146 * err_pos - find the position of a string within a command for error careting
8166 return found - cmd;
8172 * tracing_log_err - write an error to the tracing error log
8176 * @errs: The array of loc-specific static error strings
8193 * produce a static error string - this string is not copied and saved
8194 * when the error is logged - only a pointer to it is saved. See
8213 if (PTR_ERR(err) == -ENOMEM)
8216 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8217 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8219 err->info.errs = errs;
8220 err->info.type = type;
8221 err->info.pos = pos;
8222 err->info.ts = local_clock();
8224 list_add_tail(&err->list, &tr->err_log);
8233 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8234 list_del(&err->list);
8238 tr->n_err_log_entries = 0;
8243 struct trace_array *tr = m->private;
8247 return seq_list_start(&tr->err_log, *pos);
8252 struct trace_array *tr = m->private;
8254 return seq_list_next(v, &tr->err_log, pos);
8266 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8278 const char *err_text = err->info.errs[err->info.type];
8279 u64 sec = err->info.ts;
8284 err->loc, err_text);
8285 seq_printf(m, "%s", err->cmd);
8286 tracing_err_log_show_pos(m, err->info.pos);
8301 struct trace_array *tr = inode->i_private;
8309 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8312 if (file->f_mode & FMODE_READ) {
8315 struct seq_file *m = file->private_data;
8316 m->private = tr;
8333 struct trace_array *tr = inode->i_private;
8337 if (file->f_mode & FMODE_READ)
8353 struct trace_array *tr = inode->i_private;
8364 return -ENOMEM;
8369 info->iter.tr = tr;
8370 info->iter.cpu_file = tracing_get_cpu(inode);
8371 info->iter.trace = tr->current_trace;
8372 info->iter.array_buffer = &tr->array_buffer;
8373 info->spare = NULL;
8375 info->read = (unsigned int)-1;
8377 filp->private_data = info;
8379 tr->trace_ref++;
8393 struct ftrace_buffer_info *info = filp->private_data;
8394 struct trace_iterator *iter = &info->iter;
8403 struct ftrace_buffer_info *info = filp->private_data;
8404 struct trace_iterator *iter = &info->iter;
8414 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8415 return -EBUSY;
8418 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8421 if (info->spare) {
8422 if (page_size != info->spare_size) {
8423 ring_buffer_free_read_page(iter->array_buffer->buffer,
8424 info->spare_cpu, info->spare);
8425 info->spare = NULL;
8429 if (!info->spare) {
8430 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8431 iter->cpu_file);
8432 if (IS_ERR(info->spare)) {
8433 ret = PTR_ERR(info->spare);
8434 info->spare = NULL;
8436 info->spare_cpu = iter->cpu_file;
8437 info->spare_size = page_size;
8440 if (!info->spare)
8444 if (info->read < page_size)
8448 trace_access_lock(iter->cpu_file);
8449 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8450 info->spare,
8452 iter->cpu_file, 0);
8453 trace_access_unlock(iter->cpu_file);
8456 if (trace_empty(iter) && !iter->closed) {
8457 if (update_last_data_if_empty(iter->tr))
8460 if ((filp->f_flags & O_NONBLOCK))
8461 return -EAGAIN;
8472 info->read = 0;
8474 size = page_size - info->read;
8477 trace_data = ring_buffer_read_page_data(info->spare);
8478 ret = copy_to_user(ubuf, trace_data + info->read, size);
8480 return -EFAULT;
8482 size -= ret;
8485 info->read += size;
8492 struct ftrace_buffer_info *info = file->private_data;
8493 struct trace_iterator *iter = &info->iter;
8495 iter->closed = true;
8497 (void)atomic_fetch_inc_release(&iter->wait_index);
8499 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8506 struct ftrace_buffer_info *info = file->private_data;
8507 struct trace_iterator *iter = &info->iter;
8511 iter->tr->trace_ref--;
8513 __trace_array_put(iter->tr);
8515 if (info->spare)
8516 ring_buffer_free_read_page(iter->array_buffer->buffer,
8517 info->spare_cpu, info->spare);
8532 if (!refcount_dec_and_test(&ref->refcount))
8534 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8541 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8544 buf->private = 0;
8550 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8552 if (refcount_read(&ref->refcount) > INT_MAX/2)
8555 refcount_inc(&ref->refcount);
8572 (struct buffer_ref *)spd->partial[i].private;
8575 spd->partial[i].private = 0;
8583 struct ftrace_buffer_info *info = file->private_data;
8584 struct trace_iterator *iter = &info->iter;
8601 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8602 return -EBUSY;
8605 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8606 if (*ppos & (page_size - 1))
8607 return -EINVAL;
8609 if (len & (page_size - 1)) {
8611 return -EINVAL;
8612 len &= (~(page_size - 1));
8616 return -ENOMEM;
8619 trace_access_lock(iter->cpu_file);
8620 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8622 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8628 ret = -ENOMEM;
8632 refcount_set(&ref->refcount, 1);
8633 ref->buffer = iter->array_buffer->buffer;
8634 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8635 if (IS_ERR(ref->page)) {
8636 ret = PTR_ERR(ref->page);
8637 ref->page = NULL;
8641 ref->cpu = iter->cpu_file;
8643 r = ring_buffer_read_page(ref->buffer, ref->page,
8644 len, iter->cpu_file, 1);
8646 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8647 ref->page);
8652 page = virt_to_page(ring_buffer_read_page_data(ref->page));
8661 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8664 trace_access_unlock(iter->cpu_file);
8676 ret = -EAGAIN;
8677 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8680 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8685 if (!tracer_tracing_is_on(iter->tr))
8688 /* Iterate one more time to collect any new data then exit */
8703 struct ftrace_buffer_info *info = file->private_data;
8704 struct trace_iterator *iter = &info->iter;
8708 if (!(file->f_flags & O_NONBLOCK)) {
8709 err = ring_buffer_wait(iter->array_buffer->buffer,
8710 iter->cpu_file,
8711 iter->tr->buffer_percent,
8717 return ring_buffer_map_get_reader(iter->array_buffer->buffer,
8718 iter->cpu_file);
8720 return -ENOTTY;
8730 (void)atomic_fetch_inc_release(&iter->wait_index);
8732 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8747 spin_lock(&tr->snapshot_trigger_lock);
8749 if (tr->snapshot || tr->mapped == UINT_MAX)
8750 err = -EBUSY;
8752 tr->mapped++;
8754 spin_unlock(&tr->snapshot_trigger_lock);
8756 /* Wait for update_max_tr() to observe iter->tr->mapped */
8757 if (tr->mapped == 1)
8765 spin_lock(&tr->snapshot_trigger_lock);
8766 if (!WARN_ON(!tr->mapped))
8767 tr->mapped--;
8768 spin_unlock(&tr->snapshot_trigger_lock);
8777 struct ftrace_buffer_info *info = vma->vm_file->private_data;
8778 struct trace_iterator *iter = &info->iter;
8780 WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
8781 put_snapshot_map(iter->tr);
8790 struct ftrace_buffer_info *info = filp->private_data;
8791 struct trace_iterator *iter = &info->iter;
8795 if (iter->tr->flags & TRACE_ARRAY_FL_MEMMAP)
8796 return -ENODEV;
8798 ret = get_snapshot_map(iter->tr);
8802 ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
8804 put_snapshot_map(iter->tr);
8806 vma->vm_ops = &tracing_buffers_vmops;
8827 struct trace_array *tr = inode->i_private;
8828 struct array_buffer *trace_buf = &tr->array_buffer;
8837 return -ENOMEM;
8841 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8844 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8847 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8850 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8853 if (trace_clocks[tr->clock_id].in_ns) {
8855 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8860 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8866 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8869 ring_buffer_time_stamp(trace_buf->buffer));
8872 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8875 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8879 s->buffer, trace_seq_used(s));
8908 return -ENOMEM;
8912 "ftrace boot update time = %llu (ns)\n"
8913 "ftrace module total update time = %llu (ns)\n",
8957 (*count)--;
8994 return -ENOMEM;
9034 void *count = (void *)-1;
9039 return -ENODEV;
9043 return -EINVAL;
9099 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
9102 if (WARN_ON(!tr->dir))
9103 return ERR_PTR(-ENODEV);
9106 return tr->dir;
9113 if (tr->percpu_dir)
9114 return tr->percpu_dir;
9120 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
9122 MEM_FAIL(!tr->percpu_dir,
9125 return tr->percpu_dir;
9135 d_inode(ret)->i_cdev = (void *)(cpu + 1);
9173 if (tr->range_addr_start)
9177 if (!tr->range_addr_start) {
9196 struct trace_option_dentry *topt = filp->private_data;
9199 if (topt->flags->val & topt->opt->bit)
9211 struct trace_option_dentry *topt = filp->private_data;
9220 return -EINVAL;
9222 if (!!(topt->flags->val & topt->opt->bit) != val) {
9224 ret = __set_tracer_option(topt->tr, topt->flags,
9225 topt->opt, !val);
9237 struct trace_option_dentry *topt = inode->i_private;
9240 ret = tracing_check_open_get_tr(topt->tr);
9244 filp->private_data = inode->i_private;
9250 struct trace_option_dentry *topt = file->private_data;
9252 trace_array_put(topt->tr);
9283 * ptr - idx == &index[0]
9293 *ptr = container_of(data - *pindex, struct trace_array,
9301 void *tr_index = filp->private_data;
9308 if (tr->trace_flags & (1 << index))
9320 void *tr_index = filp->private_data;
9333 return -EINVAL;
9376 if (tr->options)
9377 return tr->options;
9383 tr->options = tracefs_create_dir("options", d_tracer);
9384 if (!tr->options) {
9389 return tr->options;
9404 topt->flags = flags;
9405 topt->opt = opt;
9406 topt->tr = tr;
9408 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9426 flags = tracer->flags;
9428 if (!flags || !flags->opts)
9438 for (i = 0; i < tr->nr_topts; i++) {
9440 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9444 opts = flags->opts;
9453 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9460 tr->topts = tr_topts;
9461 tr->topts[tr->nr_topts].tracer = tracer;
9462 tr->topts[tr->nr_topts].topts = topts;
9463 tr->nr_topts++;
9485 (void *)&tr->trace_flags_index[index],
9510 struct trace_array *tr = filp->private_data;
9524 struct trace_array *tr = filp->private_data;
9525 struct trace_buffer *buffer = tr->array_buffer.buffer;
9539 if (tr->current_trace->start)
9540 tr->current_trace->start(tr);
9543 if (tr->current_trace->stop)
9544 tr->current_trace->stop(tr);
9567 struct trace_array *tr = filp->private_data;
9571 r = tr->buffer_percent;
9581 struct trace_array *tr = filp->private_data;
9590 return -EINVAL;
9592 tr->buffer_percent = val;
9610 struct trace_array *tr = filp->private_data;
9616 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9628 struct trace_array *tr = filp->private_data;
9642 order = fls(pages - 1);
9646 return -EINVAL;
9651 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9655 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9661 if (!tr->allocated_snapshot)
9664 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9667 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9718 tscratch = tr->scratch;
9719 module_delta = READ_ONCE(tr->module_delta);
9720 for (i = 0; i < tscratch->nr_entries; i++) {
9721 entry = &tscratch->entries[i];
9722 if (strcmp(mod->name, entry->mod_name))
9724 if (mod->state == MODULE_STATE_GOING)
9725 module_delta->delta[i] = 0;
9727 module_delta->delta[i] = (unsigned long)mod->mem[MOD_TEXT].base
9728 - entry->mod_addr;
9745 return e1->mod_addr > e2->mod_addr ? 1 : -1;
9758 tr->scratch = tscratch;
9759 tr->scratch_size = size;
9761 if (tscratch->text_addr)
9762 tr->text_delta = (unsigned long)_text - tscratch->text_addr;
9764 if (struct_size(tscratch, entries, tscratch->nr_entries) > size)
9768 for (i = 0; i < tscratch->nr_entries; i++) {
9771 entry = &tscratch->entries[i];
9774 if (entry->mod_name[n] == '\0')
9776 if (!isprint(entry->mod_name[n]))
9784 nr_entries = tscratch->nr_entries;
9785 sort_r(tscratch->entries, nr_entries, sizeof(struct trace_mod_entry),
9794 init_rcu_head(&module_delta->rcu);
9797 WRITE_ONCE(tr->module_delta, module_delta);
9803 if (tscratch->clock_id != tr->clock_id) {
9804 if (tscratch->clock_id >= ARRAY_SIZE(trace_clocks) ||
9805 tracing_set_clock(tr, trace_clocks[tscratch->clock_id].name) < 0) {
9823 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9825 buf->tr = tr;
9827 if (tr->range_addr_start && tr->range_addr_size) {
9829 buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
9830 tr->range_addr_start,
9831 tr->range_addr_size,
9834 tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
9841 tr->mapped++;
9843 buf->buffer = ring_buffer_alloc(size, rb_flags);
9845 if (!buf->buffer)
9846 return -ENOMEM;
9848 buf->data = alloc_percpu(struct trace_array_cpu);
9849 if (!buf->data) {
9850 ring_buffer_free(buf->buffer);
9851 buf->buffer = NULL;
9852 return -ENOMEM;
9856 set_buffer_entries(&tr->array_buffer,
9857 ring_buffer_size(tr->array_buffer.buffer, 0));
9864 if (buf->buffer) {
9865 ring_buffer_free(buf->buffer);
9866 buf->buffer = NULL;
9867 free_percpu(buf->data);
9868 buf->data = NULL;
9876 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9882 if (tr->range_addr_start)
9885 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9888 free_trace_buffer(&tr->array_buffer);
9889 return -ENOMEM;
9891 tr->allocated_snapshot = allocate_snapshot;
9904 free_trace_buffer(&tr->array_buffer);
9905 kfree(tr->module_delta);
9908 free_trace_buffer(&tr->max_buffer);
9918 tr->trace_flags_index[i] = i;
9925 for (t = trace_types; t; t = t->next)
9942 if (tr->name && strcmp(tr->name, instance) == 0) {
9958 tr->ref++;
9967 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9968 if (!tr->dir)
9969 return -EINVAL;
9971 ret = event_trace_add_tracer(tr->dir, tr);
9973 tracefs_remove(tr->dir);
9977 init_tracer_tracefs(tr, tr->dir);
9991 ret = -ENOMEM;
9996 tr->name = kstrdup(name, GFP_KERNEL);
9997 if (!tr->name)
10000 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
10003 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
10007 tr->system_names = kstrdup_const(systems, GFP_KERNEL);
10008 if (!tr->system_names)
10013 tr->range_addr_start = range_addr_start;
10014 tr->range_addr_size = range_addr_size;
10016 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
10018 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
10020 raw_spin_lock_init(&tr->start_lock);
10022 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10024 spin_lock_init(&tr->snapshot_trigger_lock);
10026 tr->current_trace = &nop_trace;
10028 INIT_LIST_HEAD(&tr->systems);
10029 INIT_LIST_HEAD(&tr->events);
10030 INIT_LIST_HEAD(&tr->hist_vars);
10031 INIT_LIST_HEAD(&tr->err_log);
10032 INIT_LIST_HEAD(&tr->marker_list);
10035 INIT_LIST_HEAD(&tr->mod_events);
10058 list_add(&tr->list, &ftrace_trace_arrays);
10060 tr->ref++;
10067 free_cpumask_var(tr->pipe_cpumask);
10068 free_cpumask_var(tr->tracing_cpumask);
10069 kfree_const(tr->system_names);
10070 kfree(tr->range_name);
10071 kfree(tr->name);
10090 ret = -EEXIST;
10092 return -EEXIST;
10112 vmap_start = (unsigned long) area->addr;
10132 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
10156 if (tr->name && strcmp(tr->name, name) == 0) {
10157 tr->ref++;
10167 tr->ref++;
10178 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
10179 return -EBUSY;
10181 list_del(&tr->list);
10200 tracefs_remove(tr->dir);
10201 free_percpu(tr->last_func_repeats);
10205 if (tr->range_name) {
10206 reserve_mem_release_by_name(tr->range_name);
10207 kfree(tr->range_name);
10210 for (i = 0; i < tr->nr_topts; i++) {
10211 kfree(tr->topts[i].topts);
10213 kfree(tr->topts);
10215 free_cpumask_var(tr->pipe_cpumask);
10216 free_cpumask_var(tr->tracing_cpumask);
10217 kfree_const(tr->system_names);
10218 kfree(tr->name);
10229 return -EINVAL;
10241 return -ENODEV;
10254 return -ENODEV;
10273 if (!tr->name)
10316 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
10330 tr->buffer_percent = 50;
10347 if (tr->range_addr_start) {
10402 * tracing_init_dentry - initialize top level trace array
10414 return -EPERM;
10418 if (tr->dir)
10422 return -ENODEV;
10431 tr->dir = debugfs_create_automount("tracing", NULL,
10449 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
10462 return -ENOMEM;
10494 if (n > sizeof(modname) - 1)
10511 trace_event_update_with_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
10520 if (!mod->num_trace_evals)
10528 if (map->head.mod == mod)
10531 last = &map->tail.next;
10532 map = map->tail.next;
10537 *last = trace_eval_jmp_to_tail(map)->tail.next;
10550 flags = tr->flags & (TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT);
10655 .priority = INT_MAX - 1,
10660 .priority = INT_MAX - 1,
10701 if (s->seq.len >= TRACE_MAX_PRINT)
10702 s->seq.len = TRACE_MAX_PRINT;
10709 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10710 s->seq.len = s->seq.size - 1;
10713 s->buffer[s->seq.len] = 0;
10715 printk(KERN_TRACE "%s", s->buffer);
10722 iter->tr = tr;
10723 iter->trace = iter->tr->current_trace;
10724 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10725 iter->array_buffer = &tr->array_buffer;
10727 if (iter->trace && iter->trace->open)
10728 iter->trace->open(iter);
10731 if (ring_buffer_overruns(iter->array_buffer->buffer))
10732 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10735 if (trace_clocks[iter->tr->clock_id].in_ns)
10736 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10739 iter->temp = static_temp_buf;
10740 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10741 iter->fmt = static_fmt_buf;
10742 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10763 * If the user does a sysrq-z, then they can re-enable
10776 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10779 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10789 printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10807 printk(KERN_TRACE "---------------------------------\n");
10829 printk(KERN_TRACE "---------------------------------\n");
10831 tr->trace_flags |= old_userobj;
10882 /* Only allow one dump user at a time. */
10923 return -ENOMEM;
10926 size = count - done;
10929 size = WRITE_BUFSIZE - 1;
10932 return -EFAULT;
10940 size = tmp - buf + 1;
10946 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10948 WRITE_BUFSIZE - 2);
10949 return -EINVAL;
11021 boot_instance_info[boot_instance_index - 1] = '\0';
11137 tr->flags |= TRACE_ARRAY_FL_MEMMAP;
11138 tr->ref++;
11142 tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT;
11143 tr->range_name = no_free_ptr(rname);
11155 int ret = -ENOMEM;
11160 return -EPERM;
11170 return -ENOMEM;
11203 ret = -ENOMEM;
11308 if (!tr->allocated_snapshot)