trace.c (b3ca59f6fe79eb83a360e885f49730d07d31bf79) trace.c (36590c50b2d0729952511129916beeea30d31d81)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:

--- 162 unchanged lines hidden (view full) ---

171};
172
173static union trace_eval_map_item *trace_eval_maps;
174#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
175
176int tracing_set_tracer(struct trace_array *tr, const char *buf);
177static void ftrace_trace_userstack(struct trace_array *tr,
178 struct trace_buffer *buffer,
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:

--- 162 unchanged lines hidden (view full) ---

171};
172
173static union trace_eval_map_item *trace_eval_maps;
174#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
175
176int tracing_set_tracer(struct trace_array *tr, const char *buf);
177static void ftrace_trace_userstack(struct trace_array *tr,
178 struct trace_buffer *buffer,
179 unsigned long flags, int pc);
179 unsigned int trace_ctx);
180
181#define MAX_TRACER_SIZE 100
182static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
183static char *default_bootup_tracer;
184
185static bool allocate_snapshot;
186
187static int __init set_cmdline_ftrace(char *str)

--- 713 unchanged lines hidden (view full) ---

901static inline void trace_access_lock_init(void)
902{
903}
904
905#endif
906
907#ifdef CONFIG_STACKTRACE
908static void __ftrace_trace_stack(struct trace_buffer *buffer,
180
181#define MAX_TRACER_SIZE 100
182static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
183static char *default_bootup_tracer;
184
185static bool allocate_snapshot;
186
187static int __init set_cmdline_ftrace(char *str)

--- 713 unchanged lines hidden (view full) ---

901static inline void trace_access_lock_init(void)
902{
903}
904
905#endif
906
907#ifdef CONFIG_STACKTRACE
908static void __ftrace_trace_stack(struct trace_buffer *buffer,
909 unsigned long flags,
910 int skip, int pc, struct pt_regs *regs);
909 unsigned int trace_ctx,
910 int skip, struct pt_regs *regs);
911static inline void ftrace_trace_stack(struct trace_array *tr,
912 struct trace_buffer *buffer,
911static inline void ftrace_trace_stack(struct trace_array *tr,
912 struct trace_buffer *buffer,
913 unsigned long flags,
914 int skip, int pc, struct pt_regs *regs);
913 unsigned int trace_ctx,
914 int skip, struct pt_regs *regs);
915
916#else
917static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
915
916#else
917static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
918 unsigned long flags,
919 int skip, int pc, struct pt_regs *regs)
918 unsigned int trace_ctx,
919 int skip, struct pt_regs *regs)
920{
921}
922static inline void ftrace_trace_stack(struct trace_array *tr,
923 struct trace_buffer *buffer,
920{
921}
922static inline void ftrace_trace_stack(struct trace_array *tr,
923 struct trace_buffer *buffer,
924 unsigned long flags,
925 int skip, int pc, struct pt_regs *regs)
924 unsigned long trace_ctx,
925 int skip, struct pt_regs *regs)
926{
927}
928
929#endif
930
931static __always_inline void
932trace_event_setup(struct ring_buffer_event *event,
926{
927}
928
929#endif
930
931static __always_inline void
932trace_event_setup(struct ring_buffer_event *event,
933 int type, unsigned long flags, int pc)
933 int type, unsigned int trace_ctx)
934{
935 struct trace_entry *ent = ring_buffer_event_data(event);
936
934{
935 struct trace_entry *ent = ring_buffer_event_data(event);
936
937 tracing_generic_entry_update(ent, type, flags, pc);
937 tracing_generic_entry_update(ent, type, trace_ctx);
938}
939
940static __always_inline struct ring_buffer_event *
941__trace_buffer_lock_reserve(struct trace_buffer *buffer,
942 int type,
943 unsigned long len,
938}
939
940static __always_inline struct ring_buffer_event *
941__trace_buffer_lock_reserve(struct trace_buffer *buffer,
942 int type,
943 unsigned long len,
944 unsigned long flags, int pc)
944 unsigned int trace_ctx)
945{
946 struct ring_buffer_event *event;
947
948 event = ring_buffer_lock_reserve(buffer, len);
949 if (event != NULL)
945{
946 struct ring_buffer_event *event;
947
948 event = ring_buffer_lock_reserve(buffer, len);
949 if (event != NULL)
950 trace_event_setup(event, type, flags, pc);
950 trace_event_setup(event, type, trace_ctx);
951
952 return event;
953}
954
955void tracer_tracing_on(struct trace_array *tr)
956{
957 if (tr->array_buffer.buffer)
958 ring_buffer_record_on(tr->array_buffer.buffer);

--- 44 unchanged lines hidden (view full) ---

1003 * @str: The constant string to write
1004 * @size: The size of the string.
1005 */
1006int __trace_puts(unsigned long ip, const char *str, int size)
1007{
1008 struct ring_buffer_event *event;
1009 struct trace_buffer *buffer;
1010 struct print_entry *entry;
951
952 return event;
953}
954
955void tracer_tracing_on(struct trace_array *tr)
956{
957 if (tr->array_buffer.buffer)
958 ring_buffer_record_on(tr->array_buffer.buffer);

--- 44 unchanged lines hidden (view full) ---

1003 * @str: The constant string to write
1004 * @size: The size of the string.
1005 */
1006int __trace_puts(unsigned long ip, const char *str, int size)
1007{
1008 struct ring_buffer_event *event;
1009 struct trace_buffer *buffer;
1010 struct print_entry *entry;
1011 unsigned long irq_flags;
1011 unsigned int trace_ctx;
1012 int alloc;
1012 int alloc;
1013 int pc;
1014
1015 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1016 return 0;
1017
1013
1014 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1015 return 0;
1016
1018 pc = preempt_count();
1019
1020 if (unlikely(tracing_selftest_running || tracing_disabled))
1021 return 0;
1022
1023 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1024
1017 if (unlikely(tracing_selftest_running || tracing_disabled))
1018 return 0;
1019
1020 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1021
1025 local_save_flags(irq_flags);
1022 trace_ctx = tracing_gen_ctx();
1026 buffer = global_trace.array_buffer.buffer;
1027 ring_buffer_nest_start(buffer);
1023 buffer = global_trace.array_buffer.buffer;
1024 ring_buffer_nest_start(buffer);
1028 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1029 irq_flags, pc);
1025 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1026 trace_ctx);
1030 if (!event) {
1031 size = 0;
1032 goto out;
1033 }
1034
1035 entry = ring_buffer_event_data(event);
1036 entry->ip = ip;
1037
1038 memcpy(&entry->buf, str, size);
1039
1040 /* Add a newline if necessary */
1041 if (entry->buf[size - 1] != '\n') {
1042 entry->buf[size] = '\n';
1043 entry->buf[size + 1] = '\0';
1044 } else
1045 entry->buf[size] = '\0';
1046
1047 __buffer_unlock_commit(buffer, event);
1027 if (!event) {
1028 size = 0;
1029 goto out;
1030 }
1031
1032 entry = ring_buffer_event_data(event);
1033 entry->ip = ip;
1034
1035 memcpy(&entry->buf, str, size);
1036
1037 /* Add a newline if necessary */
1038 if (entry->buf[size - 1] != '\n') {
1039 entry->buf[size] = '\n';
1040 entry->buf[size + 1] = '\0';
1041 } else
1042 entry->buf[size] = '\0';
1043
1044 __buffer_unlock_commit(buffer, event);
1048 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1045 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1049 out:
1050 ring_buffer_nest_end(buffer);
1051 return size;
1052}
1053EXPORT_SYMBOL_GPL(__trace_puts);
1054
1055/**
1056 * __trace_bputs - write the pointer to a constant string into trace buffer
1057 * @ip: The address of the caller
1058 * @str: The constant string to write to the buffer to
1059 */
1060int __trace_bputs(unsigned long ip, const char *str)
1061{
1062 struct ring_buffer_event *event;
1063 struct trace_buffer *buffer;
1064 struct bputs_entry *entry;
1046 out:
1047 ring_buffer_nest_end(buffer);
1048 return size;
1049}
1050EXPORT_SYMBOL_GPL(__trace_puts);
1051
1052/**
1053 * __trace_bputs - write the pointer to a constant string into trace buffer
1054 * @ip: The address of the caller
1055 * @str: The constant string to write to the buffer to
1056 */
1057int __trace_bputs(unsigned long ip, const char *str)
1058{
1059 struct ring_buffer_event *event;
1060 struct trace_buffer *buffer;
1061 struct bputs_entry *entry;
1065 unsigned long irq_flags;
1062 unsigned int trace_ctx;
1066 int size = sizeof(struct bputs_entry);
1067 int ret = 0;
1063 int size = sizeof(struct bputs_entry);
1064 int ret = 0;
1068 int pc;
1069
1070 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1071 return 0;
1072
1065
1066 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1067 return 0;
1068
1073 pc = preempt_count();
1074
1075 if (unlikely(tracing_selftest_running || tracing_disabled))
1076 return 0;
1077
1069 if (unlikely(tracing_selftest_running || tracing_disabled))
1070 return 0;
1071
1078 local_save_flags(irq_flags);
1072 trace_ctx = tracing_gen_ctx();
1079 buffer = global_trace.array_buffer.buffer;
1080
1081 ring_buffer_nest_start(buffer);
1082 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1073 buffer = global_trace.array_buffer.buffer;
1074
1075 ring_buffer_nest_start(buffer);
1076 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1083 irq_flags, pc);
1077 trace_ctx);
1084 if (!event)
1085 goto out;
1086
1087 entry = ring_buffer_event_data(event);
1088 entry->ip = ip;
1089 entry->str = str;
1090
1091 __buffer_unlock_commit(buffer, event);
1078 if (!event)
1079 goto out;
1080
1081 entry = ring_buffer_event_data(event);
1082 entry->ip = ip;
1083 entry->str = str;
1084
1085 __buffer_unlock_commit(buffer, event);
1092 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1086 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1093
1094 ret = 1;
1095 out:
1096 ring_buffer_nest_end(buffer);
1097 return ret;
1098}
1099EXPORT_SYMBOL_GPL(__trace_bputs);
1100

--- 1479 unchanged lines hidden (view full) ---

2580 */
2581enum print_line_t trace_handle_return(struct trace_seq *s)
2582{
2583 return trace_seq_has_overflowed(s) ?
2584 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2585}
2586EXPORT_SYMBOL_GPL(trace_handle_return);
2587
1087
1088 ret = 1;
1089 out:
1090 ring_buffer_nest_end(buffer);
1091 return ret;
1092}
1093EXPORT_SYMBOL_GPL(__trace_bputs);
1094

--- 1479 unchanged lines hidden (view full) ---

2574 */
2575enum print_line_t trace_handle_return(struct trace_seq *s)
2576{
2577 return trace_seq_has_overflowed(s) ?
2578 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2579}
2580EXPORT_SYMBOL_GPL(trace_handle_return);
2581
2588void
2589tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2590 unsigned long flags, int pc)
2582unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
2591{
2583{
2592 struct task_struct *tsk = current;
2584 unsigned int trace_flags = 0;
2585 unsigned int pc;
2593
2586
2594 entry->preempt_count = pc & 0xff;
2595 entry->pid = (tsk) ? tsk->pid : 0;
2596 entry->type = type;
2597 entry->flags =
2587 pc = preempt_count();
2588
2598#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2589#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2599 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2590 if (irqs_disabled_flags(irqflags))
2591 trace_flags |= TRACE_FLAG_IRQS_OFF;
2600#else
2592#else
2601 TRACE_FLAG_IRQS_NOSUPPORT |
2593 trace_flags |= TRACE_FLAG_IRQS_NOSUPPORT;
2602#endif
2594#endif
2603 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2604 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2605 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2606 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2607 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2595
2596 if (pc & NMI_MASK)
2597 trace_flags |= TRACE_FLAG_NMI;
2598 if (pc & HARDIRQ_MASK)
2599 trace_flags |= TRACE_FLAG_HARDIRQ;
2600
2601 if (pc & SOFTIRQ_OFFSET)
2602 trace_flags |= TRACE_FLAG_SOFTIRQ;
2603
2604 if (tif_need_resched())
2605 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2606 if (test_preempt_need_resched())
2607 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2608 return (trace_flags << 16) | (pc & 0xff);
2608}
2609}
2609EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2610
2610
2611unsigned int tracing_gen_ctx(void)
2612{
2613 unsigned long irqflags;
2614
2615#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2616 local_save_flags(irqflags);
2617#else
2618 irqflags = 0;
2619#endif
2620 return tracing_gen_ctx_flags(irqflags);
2621}
2622
2623unsigned int tracing_gen_ctx_dec(void)
2624{
2625 unsigned int trace_ctx;
2626
2627 trace_ctx = tracing_gen_ctx();
2628
2629 /*
2630 * Subtract one from the preeption counter if preemption is enabled,
2631 * see trace_event_buffer_reserve()for details.
2632 */
2633 if (IS_ENABLED(CONFIG_PREEMPTION))
2634 trace_ctx--;
2635 return trace_ctx;
2636}
2637
2611struct ring_buffer_event *
2612trace_buffer_lock_reserve(struct trace_buffer *buffer,
2613 int type,
2614 unsigned long len,
2638struct ring_buffer_event *
2639trace_buffer_lock_reserve(struct trace_buffer *buffer,
2640 int type,
2641 unsigned long len,
2615 unsigned long flags, int pc)
2642 unsigned int trace_ctx)
2616{
2643{
2617 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2644 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2618}
2619
2620DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2621DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2622static int trace_buffered_event_ref;
2623
2624/**
2625 * trace_buffered_event_enable - enable buffering events

--- 103 unchanged lines hidden (view full) ---

2729}
2730
2731static struct trace_buffer *temp_buffer;
2732
2733struct ring_buffer_event *
2734trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2735 struct trace_event_file *trace_file,
2736 int type, unsigned long len,
2645}
2646
2647DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2648DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2649static int trace_buffered_event_ref;
2650
2651/**
2652 * trace_buffered_event_enable - enable buffering events

--- 103 unchanged lines hidden (view full) ---

2756}
2757
2758static struct trace_buffer *temp_buffer;
2759
2760struct ring_buffer_event *
2761trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2762 struct trace_event_file *trace_file,
2763 int type, unsigned long len,
2737 unsigned long flags, int pc)
2764 unsigned int trace_ctx)
2738{
2739 struct ring_buffer_event *entry;
2740 int val;
2741
2742 *current_rb = trace_file->tr->array_buffer.buffer;
2743
2744 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2745 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2746 (entry = this_cpu_read(trace_buffered_event))) {
2747 /* Try to use the per cpu buffer first */
2748 val = this_cpu_inc_return(trace_buffered_event_cnt);
2749 if (val == 1) {
2765{
2766 struct ring_buffer_event *entry;
2767 int val;
2768
2769 *current_rb = trace_file->tr->array_buffer.buffer;
2770
2771 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2772 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2773 (entry = this_cpu_read(trace_buffered_event))) {
2774 /* Try to use the per cpu buffer first */
2775 val = this_cpu_inc_return(trace_buffered_event_cnt);
2776 if (val == 1) {
2750 trace_event_setup(entry, type, flags, pc);
2777 trace_event_setup(entry, type, trace_ctx);
2751 entry->array[0] = len;
2752 return entry;
2753 }
2754 this_cpu_dec(trace_buffered_event_cnt);
2755 }
2756
2778 entry->array[0] = len;
2779 return entry;
2780 }
2781 this_cpu_dec(trace_buffered_event_cnt);
2782 }
2783
2757 entry = __trace_buffer_lock_reserve(*current_rb,
2758 type, len, flags, pc);
2784 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2785 trace_ctx);
2759 /*
2760 * If tracing is off, but we have triggers enabled
2761 * we still need to look at the event data. Use the temp_buffer
2762 * to store the trace event for the trigger to use. It's recursive
2763 * safe and will not be recorded anywhere.
2764 */
2765 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2766 *current_rb = temp_buffer;
2786 /*
2787 * If tracing is off, but we have triggers enabled
2788 * we still need to look at the event data. Use the temp_buffer
2789 * to store the trace event for the trigger to use. It's recursive
2790 * safe and will not be recorded anywhere.
2791 */
2792 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2793 *current_rb = temp_buffer;
2767 entry = __trace_buffer_lock_reserve(*current_rb,
2768 type, len, flags, pc);
2794 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2795 trace_ctx);
2769 }
2770 return entry;
2771}
2772EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2773
2774static DEFINE_SPINLOCK(tracepoint_iter_lock);
2775static DEFINE_MUTEX(tracepoint_printk_mutex);
2776

--- 69 unchanged lines hidden (view full) ---

2846{
2847 if (static_key_false(&tracepoint_printk_key.key))
2848 output_printk(fbuffer);
2849
2850 if (static_branch_unlikely(&trace_event_exports_enabled))
2851 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2852 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2853 fbuffer->event, fbuffer->entry,
2796 }
2797 return entry;
2798}
2799EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2800
2801static DEFINE_SPINLOCK(tracepoint_iter_lock);
2802static DEFINE_MUTEX(tracepoint_printk_mutex);
2803

--- 69 unchanged lines hidden (view full) ---

2873{
2874 if (static_key_false(&tracepoint_printk_key.key))
2875 output_printk(fbuffer);
2876
2877 if (static_branch_unlikely(&trace_event_exports_enabled))
2878 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2879 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2880 fbuffer->event, fbuffer->entry,
2854 fbuffer->flags, fbuffer->pc, fbuffer->regs);
2881 fbuffer->trace_ctx, fbuffer->regs);
2855}
2856EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2857
2858/*
2859 * Skip 3:
2860 *
2861 * trace_buffer_unlock_commit_regs()
2862 * trace_event_buffer_commit()
2863 * trace_event_raw_event_xxx()
2864 */
2865# define STACK_SKIP 3
2866
2867void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2868 struct trace_buffer *buffer,
2869 struct ring_buffer_event *event,
2882}
2883EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2884
2885/*
2886 * Skip 3:
2887 *
2888 * trace_buffer_unlock_commit_regs()
2889 * trace_event_buffer_commit()
2890 * trace_event_raw_event_xxx()
2891 */
2892# define STACK_SKIP 3
2893
2894void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2895 struct trace_buffer *buffer,
2896 struct ring_buffer_event *event,
2870 unsigned long flags, int pc,
2897 unsigned int trace_ctx,
2871 struct pt_regs *regs)
2872{
2873 __buffer_unlock_commit(buffer, event);
2874
2875 /*
2876 * If regs is not set, then skip the necessary functions.
2877 * Note, we can still get here via blktrace, wakeup tracer
2878 * and mmiotrace, but that's ok if they lose a function or
2879 * two. They are not that meaningful.
2880 */
2898 struct pt_regs *regs)
2899{
2900 __buffer_unlock_commit(buffer, event);
2901
2902 /*
2903 * If regs is not set, then skip the necessary functions.
2904 * Note, we can still get here via blktrace, wakeup tracer
2905 * and mmiotrace, but that's ok if they lose a function or
2906 * two. They are not that meaningful.
2907 */
2881 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2882 ftrace_trace_userstack(tr, buffer, flags, pc);
2908 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2909 ftrace_trace_userstack(tr, buffer, trace_ctx);
2883}
2884
2885/*
2886 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2887 */
2888void
2889trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2890 struct ring_buffer_event *event)
2891{
2892 __buffer_unlock_commit(buffer, event);
2893}
2894
2895void
2910}
2911
2912/*
2913 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2914 */
2915void
2916trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2917 struct ring_buffer_event *event)
2918{
2919 __buffer_unlock_commit(buffer, event);
2920}
2921
2922void
2896trace_function(struct trace_array *tr,
2897 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2898 int pc)
2923trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2924 parent_ip, unsigned int trace_ctx)
2899{
2900 struct trace_event_call *call = &event_function;
2901 struct trace_buffer *buffer = tr->array_buffer.buffer;
2902 struct ring_buffer_event *event;
2903 struct ftrace_entry *entry;
2904
2905 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2925{
2926 struct trace_event_call *call = &event_function;
2927 struct trace_buffer *buffer = tr->array_buffer.buffer;
2928 struct ring_buffer_event *event;
2929 struct ftrace_entry *entry;
2930
2931 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2906 flags, pc);
2932 trace_ctx);
2907 if (!event)
2908 return;
2909 entry = ring_buffer_event_data(event);
2910 entry->ip = ip;
2911 entry->parent_ip = parent_ip;
2912
2913 if (!call_filter_check_discard(call, entry, buffer, event)) {
2914 if (static_branch_unlikely(&trace_function_exports_enabled))

--- 17 unchanged lines hidden (view full) ---

2932struct ftrace_stacks {
2933 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2934};
2935
2936static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2937static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2938
2939static void __ftrace_trace_stack(struct trace_buffer *buffer,
2933 if (!event)
2934 return;
2935 entry = ring_buffer_event_data(event);
2936 entry->ip = ip;
2937 entry->parent_ip = parent_ip;
2938
2939 if (!call_filter_check_discard(call, entry, buffer, event)) {
2940 if (static_branch_unlikely(&trace_function_exports_enabled))

--- 17 unchanged lines hidden (view full) ---

2958struct ftrace_stacks {
2959 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2960};
2961
2962static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2963static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2964
2965static void __ftrace_trace_stack(struct trace_buffer *buffer,
2940 unsigned long flags,
2941 int skip, int pc, struct pt_regs *regs)
2966 unsigned int trace_ctx,
2967 int skip, struct pt_regs *regs)
2942{
2943 struct trace_event_call *call = &event_kernel_stack;
2944 struct ring_buffer_event *event;
2945 unsigned int size, nr_entries;
2946 struct ftrace_stack *fstack;
2947 struct stack_entry *entry;
2948 int stackidx;
2949

--- 30 unchanged lines hidden (view full) ---

2980 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2981 size, skip);
2982 } else {
2983 nr_entries = stack_trace_save(fstack->calls, size, skip);
2984 }
2985
2986 size = nr_entries * sizeof(unsigned long);
2987 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2968{
2969 struct trace_event_call *call = &event_kernel_stack;
2970 struct ring_buffer_event *event;
2971 unsigned int size, nr_entries;
2972 struct ftrace_stack *fstack;
2973 struct stack_entry *entry;
2974 int stackidx;
2975

--- 30 unchanged lines hidden (view full) ---

3006 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3007 size, skip);
3008 } else {
3009 nr_entries = stack_trace_save(fstack->calls, size, skip);
3010 }
3011
3012 size = nr_entries * sizeof(unsigned long);
3013 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2988 sizeof(*entry) + size, flags, pc);
3014 sizeof(*entry) + size, trace_ctx);
2989 if (!event)
2990 goto out;
2991 entry = ring_buffer_event_data(event);
2992
2993 memcpy(&entry->caller, fstack->calls, size);
2994 entry->size = nr_entries;
2995
2996 if (!call_filter_check_discard(call, entry, buffer, event))

--- 4 unchanged lines hidden (view full) ---

3001 barrier();
3002 __this_cpu_dec(ftrace_stack_reserve);
3003 preempt_enable_notrace();
3004
3005}
3006
3007static inline void ftrace_trace_stack(struct trace_array *tr,
3008 struct trace_buffer *buffer,
3015 if (!event)
3016 goto out;
3017 entry = ring_buffer_event_data(event);
3018
3019 memcpy(&entry->caller, fstack->calls, size);
3020 entry->size = nr_entries;
3021
3022 if (!call_filter_check_discard(call, entry, buffer, event))

--- 4 unchanged lines hidden (view full) ---

3027 barrier();
3028 __this_cpu_dec(ftrace_stack_reserve);
3029 preempt_enable_notrace();
3030
3031}
3032
3033static inline void ftrace_trace_stack(struct trace_array *tr,
3034 struct trace_buffer *buffer,
3009 unsigned long flags,
3010 int skip, int pc, struct pt_regs *regs)
3035 unsigned int trace_ctx,
3036 int skip, struct pt_regs *regs)
3011{
3012 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3013 return;
3014
3037{
3038 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3039 return;
3040
3015 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
3041 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3016}
3017
3042}
3043
3018void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3019 int pc)
3044void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3045 int skip)
3020{
3021 struct trace_buffer *buffer = tr->array_buffer.buffer;
3022
3023 if (rcu_is_watching()) {
3046{
3047 struct trace_buffer *buffer = tr->array_buffer.buffer;
3048
3049 if (rcu_is_watching()) {
3024 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3050 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3025 return;
3026 }
3027
3028 /*
3029 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3030 * but if the above rcu_is_watching() failed, then the NMI
3031 * triggered someplace critical, and rcu_irq_enter() should
3032 * not be called from NMI.
3033 */
3034 if (unlikely(in_nmi()))
3035 return;
3036
3037 rcu_irq_enter_irqson();
3051 return;
3052 }
3053
3054 /*
3055 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3056 * but if the above rcu_is_watching() failed, then the NMI
3057 * triggered someplace critical, and rcu_irq_enter() should
3058 * not be called from NMI.
3059 */
3060 if (unlikely(in_nmi()))
3061 return;
3062
3063 rcu_irq_enter_irqson();
3038 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3064 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3039 rcu_irq_exit_irqson();
3040}
3041
3042/**
3043 * trace_dump_stack - record a stack back trace in the trace buffer
3044 * @skip: Number of functions to skip (helper handlers)
3045 */
3046void trace_dump_stack(int skip)
3047{
3065 rcu_irq_exit_irqson();
3066}
3067
3068/**
3069 * trace_dump_stack - record a stack back trace in the trace buffer
3070 * @skip: Number of functions to skip (helper handlers)
3071 */
3072void trace_dump_stack(int skip)
3073{
3048 unsigned long flags;
3049
3050 if (tracing_disabled || tracing_selftest_running)
3051 return;
3052
3074 if (tracing_disabled || tracing_selftest_running)
3075 return;
3076
3053 local_save_flags(flags);
3054
3055#ifndef CONFIG_UNWINDER_ORC
3056 /* Skip 1 to skip this function. */
3057 skip++;
3058#endif
3059 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3077#ifndef CONFIG_UNWINDER_ORC
3078 /* Skip 1 to skip this function. */
3079 skip++;
3080#endif
3081 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3060 flags, skip, preempt_count(), NULL);
3082 tracing_gen_ctx(), skip, NULL);
3061}
3062EXPORT_SYMBOL_GPL(trace_dump_stack);
3063
3064#ifdef CONFIG_USER_STACKTRACE_SUPPORT
3065static DEFINE_PER_CPU(int, user_stack_count);
3066
3067static void
3068ftrace_trace_userstack(struct trace_array *tr,
3083}
3084EXPORT_SYMBOL_GPL(trace_dump_stack);
3085
3086#ifdef CONFIG_USER_STACKTRACE_SUPPORT
3087static DEFINE_PER_CPU(int, user_stack_count);
3088
3089static void
3090ftrace_trace_userstack(struct trace_array *tr,
3069 struct trace_buffer *buffer, unsigned long flags, int pc)
3091 struct trace_buffer *buffer, unsigned int trace_ctx)
3070{
3071 struct trace_event_call *call = &event_user_stack;
3072 struct ring_buffer_event *event;
3073 struct userstack_entry *entry;
3074
3075 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3076 return;
3077

--- 10 unchanged lines hidden (view full) ---

3088 */
3089 preempt_disable();
3090 if (__this_cpu_read(user_stack_count))
3091 goto out;
3092
3093 __this_cpu_inc(user_stack_count);
3094
3095 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3092{
3093 struct trace_event_call *call = &event_user_stack;
3094 struct ring_buffer_event *event;
3095 struct userstack_entry *entry;
3096
3097 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3098 return;
3099

--- 10 unchanged lines hidden (view full) ---

3110 */
3111 preempt_disable();
3112 if (__this_cpu_read(user_stack_count))
3113 goto out;
3114
3115 __this_cpu_inc(user_stack_count);
3116
3117 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3096 sizeof(*entry), flags, pc);
3118 sizeof(*entry), trace_ctx);
3097 if (!event)
3098 goto out_drop_count;
3099 entry = ring_buffer_event_data(event);
3100
3101 entry->tgid = current->tgid;
3102 memset(&entry->caller, 0, sizeof(entry->caller));
3103
3104 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3105 if (!call_filter_check_discard(call, entry, buffer, event))
3106 __buffer_unlock_commit(buffer, event);
3107
3108 out_drop_count:
3109 __this_cpu_dec(user_stack_count);
3110 out:
3111 preempt_enable();
3112}
3113#else /* CONFIG_USER_STACKTRACE_SUPPORT */
3114static void ftrace_trace_userstack(struct trace_array *tr,
3115 struct trace_buffer *buffer,
3119 if (!event)
3120 goto out_drop_count;
3121 entry = ring_buffer_event_data(event);
3122
3123 entry->tgid = current->tgid;
3124 memset(&entry->caller, 0, sizeof(entry->caller));
3125
3126 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3127 if (!call_filter_check_discard(call, entry, buffer, event))
3128 __buffer_unlock_commit(buffer, event);
3129
3130 out_drop_count:
3131 __this_cpu_dec(user_stack_count);
3132 out:
3133 preempt_enable();
3134}
3135#else /* CONFIG_USER_STACKTRACE_SUPPORT */
3136static void ftrace_trace_userstack(struct trace_array *tr,
3137 struct trace_buffer *buffer,
3116 unsigned long flags, int pc)
3138 unsigned int trace_ctx)
3117{
3118}
3119#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3120
3121#endif /* CONFIG_STACKTRACE */
3122
3123/* created for use with alloc_percpu */
3124struct trace_buffer_struct {

--- 113 unchanged lines hidden (view full) ---

3238 */
3239int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3240{
3241 struct trace_event_call *call = &event_bprint;
3242 struct ring_buffer_event *event;
3243 struct trace_buffer *buffer;
3244 struct trace_array *tr = &global_trace;
3245 struct bprint_entry *entry;
3139{
3140}
3141#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3142
3143#endif /* CONFIG_STACKTRACE */
3144
3145/* created for use with alloc_percpu */
3146struct trace_buffer_struct {

--- 113 unchanged lines hidden (view full) ---

3260 */
3261int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3262{
3263 struct trace_event_call *call = &event_bprint;
3264 struct ring_buffer_event *event;
3265 struct trace_buffer *buffer;
3266 struct trace_array *tr = &global_trace;
3267 struct bprint_entry *entry;
3246 unsigned long flags;
3268 unsigned int trace_ctx;
3247 char *tbuffer;
3269 char *tbuffer;
3248 int len = 0, size, pc;
3270 int len = 0, size;
3249
3250 if (unlikely(tracing_selftest_running || tracing_disabled))
3251 return 0;
3252
3253 /* Don't pollute graph traces with trace_vprintk internals */
3254 pause_graph_tracing();
3255
3271
3272 if (unlikely(tracing_selftest_running || tracing_disabled))
3273 return 0;
3274
3275 /* Don't pollute graph traces with trace_vprintk internals */
3276 pause_graph_tracing();
3277
3256 pc = preempt_count();
3278 trace_ctx = tracing_gen_ctx();
3257 preempt_disable_notrace();
3258
3259 tbuffer = get_trace_buf();
3260 if (!tbuffer) {
3261 len = 0;
3262 goto out_nobuffer;
3263 }
3264
3265 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3266
3267 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3268 goto out_put;
3269
3279 preempt_disable_notrace();
3280
3281 tbuffer = get_trace_buf();
3282 if (!tbuffer) {
3283 len = 0;
3284 goto out_nobuffer;
3285 }
3286
3287 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3288
3289 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3290 goto out_put;
3291
3270 local_save_flags(flags);
3271 size = sizeof(*entry) + sizeof(u32) * len;
3272 buffer = tr->array_buffer.buffer;
3273 ring_buffer_nest_start(buffer);
3274 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3292 size = sizeof(*entry) + sizeof(u32) * len;
3293 buffer = tr->array_buffer.buffer;
3294 ring_buffer_nest_start(buffer);
3295 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3275 flags, pc);
3296 trace_ctx);
3276 if (!event)
3277 goto out;
3278 entry = ring_buffer_event_data(event);
3279 entry->ip = ip;
3280 entry->fmt = fmt;
3281
3282 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3283 if (!call_filter_check_discard(call, entry, buffer, event)) {
3284 __buffer_unlock_commit(buffer, event);
3297 if (!event)
3298 goto out;
3299 entry = ring_buffer_event_data(event);
3300 entry->ip = ip;
3301 entry->fmt = fmt;
3302
3303 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3304 if (!call_filter_check_discard(call, entry, buffer, event)) {
3305 __buffer_unlock_commit(buffer, event);
3285 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3306 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3286 }
3287
3288out:
3289 ring_buffer_nest_end(buffer);
3290out_put:
3291 put_trace_buf();
3292
3293out_nobuffer:

--- 6 unchanged lines hidden (view full) ---

3300
3301__printf(3, 0)
3302static int
3303__trace_array_vprintk(struct trace_buffer *buffer,
3304 unsigned long ip, const char *fmt, va_list args)
3305{
3306 struct trace_event_call *call = &event_print;
3307 struct ring_buffer_event *event;
3307 }
3308
3309out:
3310 ring_buffer_nest_end(buffer);
3311out_put:
3312 put_trace_buf();
3313
3314out_nobuffer:

--- 6 unchanged lines hidden (view full) ---

3321
3322__printf(3, 0)
3323static int
3324__trace_array_vprintk(struct trace_buffer *buffer,
3325 unsigned long ip, const char *fmt, va_list args)
3326{
3327 struct trace_event_call *call = &event_print;
3328 struct ring_buffer_event *event;
3308 int len = 0, size, pc;
3329 int len = 0, size;
3309 struct print_entry *entry;
3330 struct print_entry *entry;
3310 unsigned long flags;
3331 unsigned int trace_ctx;
3311 char *tbuffer;
3312
3313 if (tracing_disabled || tracing_selftest_running)
3314 return 0;
3315
3316 /* Don't pollute graph traces with trace_vprintk internals */
3317 pause_graph_tracing();
3318
3332 char *tbuffer;
3333
3334 if (tracing_disabled || tracing_selftest_running)
3335 return 0;
3336
3337 /* Don't pollute graph traces with trace_vprintk internals */
3338 pause_graph_tracing();
3339
3319 pc = preempt_count();
3340 trace_ctx = tracing_gen_ctx();
3320 preempt_disable_notrace();
3321
3322
3323 tbuffer = get_trace_buf();
3324 if (!tbuffer) {
3325 len = 0;
3326 goto out_nobuffer;
3327 }
3328
3329 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3330
3341 preempt_disable_notrace();
3342
3343
3344 tbuffer = get_trace_buf();
3345 if (!tbuffer) {
3346 len = 0;
3347 goto out_nobuffer;
3348 }
3349
3350 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3351
3331 local_save_flags(flags);
3332 size = sizeof(*entry) + len + 1;
3333 ring_buffer_nest_start(buffer);
3334 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3352 size = sizeof(*entry) + len + 1;
3353 ring_buffer_nest_start(buffer);
3354 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3335 flags, pc);
3355 trace_ctx);
3336 if (!event)
3337 goto out;
3338 entry = ring_buffer_event_data(event);
3339 entry->ip = ip;
3340
3341 memcpy(&entry->buf, tbuffer, len + 1);
3342 if (!call_filter_check_discard(call, entry, buffer, event)) {
3343 __buffer_unlock_commit(buffer, event);
3356 if (!event)
3357 goto out;
3358 entry = ring_buffer_event_data(event);
3359 entry->ip = ip;
3360
3361 memcpy(&entry->buf, tbuffer, len + 1);
3362 if (!call_filter_check_discard(call, entry, buffer, event)) {
3363 __buffer_unlock_commit(buffer, event);
3344 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3364 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3345 }
3346
3347out:
3348 ring_buffer_nest_end(buffer);
3349 put_trace_buf();
3350
3351out_nobuffer:
3352 preempt_enable_notrace();

--- 3296 unchanged lines hidden (view full) ---

6649tracing_mark_write(struct file *filp, const char __user *ubuf,
6650 size_t cnt, loff_t *fpos)
6651{
6652 struct trace_array *tr = filp->private_data;
6653 struct ring_buffer_event *event;
6654 enum event_trigger_type tt = ETT_NONE;
6655 struct trace_buffer *buffer;
6656 struct print_entry *entry;
3365 }
3366
3367out:
3368 ring_buffer_nest_end(buffer);
3369 put_trace_buf();
3370
3371out_nobuffer:
3372 preempt_enable_notrace();

--- 3296 unchanged lines hidden (view full) ---

6669tracing_mark_write(struct file *filp, const char __user *ubuf,
6670 size_t cnt, loff_t *fpos)
6671{
6672 struct trace_array *tr = filp->private_data;
6673 struct ring_buffer_event *event;
6674 enum event_trigger_type tt = ETT_NONE;
6675 struct trace_buffer *buffer;
6676 struct print_entry *entry;
6657 unsigned long irq_flags;
6658 ssize_t written;
6659 int size;
6660 int len;
6661
6662/* Used in tracing_mark_raw_write() as well */
6663#define FAULTED_STR "<faulted>"
6664#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6665
6666 if (tracing_disabled)
6667 return -EINVAL;
6668
6669 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6670 return -EINVAL;
6671
6672 if (cnt > TRACE_BUF_SIZE)
6673 cnt = TRACE_BUF_SIZE;
6674
6675 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6676
6677 ssize_t written;
6678 int size;
6679 int len;
6680
6681/* Used in tracing_mark_raw_write() as well */
6682#define FAULTED_STR "<faulted>"
6683#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6684
6685 if (tracing_disabled)
6686 return -EINVAL;
6687
6688 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6689 return -EINVAL;
6690
6691 if (cnt > TRACE_BUF_SIZE)
6692 cnt = TRACE_BUF_SIZE;
6693
6694 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6695
6677 local_save_flags(irq_flags);
6678 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6679
6680 /* If less than "<faulted>", then make sure we can still add that */
6681 if (cnt < FAULTED_SIZE)
6682 size += FAULTED_SIZE - cnt;
6683
6684 buffer = tr->array_buffer.buffer;
6685 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6696 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6697
6698 /* If less than "<faulted>", then make sure we can still add that */
6699 if (cnt < FAULTED_SIZE)
6700 size += FAULTED_SIZE - cnt;
6701
6702 buffer = tr->array_buffer.buffer;
6703 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6686 irq_flags, preempt_count());
6704 tracing_gen_ctx());
6687 if (unlikely(!event))
6688 /* Ring buffer disabled, return as if not open for write */
6689 return -EBADF;
6690
6691 entry = ring_buffer_event_data(event);
6692 entry->ip = _THIS_IP_;
6693
6694 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);

--- 35 unchanged lines hidden (view full) ---

6730static ssize_t
6731tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6732 size_t cnt, loff_t *fpos)
6733{
6734 struct trace_array *tr = filp->private_data;
6735 struct ring_buffer_event *event;
6736 struct trace_buffer *buffer;
6737 struct raw_data_entry *entry;
6705 if (unlikely(!event))
6706 /* Ring buffer disabled, return as if not open for write */
6707 return -EBADF;
6708
6709 entry = ring_buffer_event_data(event);
6710 entry->ip = _THIS_IP_;
6711
6712 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);

--- 35 unchanged lines hidden (view full) ---

6748static ssize_t
6749tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6750 size_t cnt, loff_t *fpos)
6751{
6752 struct trace_array *tr = filp->private_data;
6753 struct ring_buffer_event *event;
6754 struct trace_buffer *buffer;
6755 struct raw_data_entry *entry;
6738 unsigned long irq_flags;
6739 ssize_t written;
6740 int size;
6741 int len;
6742
6743#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6744
6745 if (tracing_disabled)
6746 return -EINVAL;

--- 5 unchanged lines hidden (view full) ---

6752 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6753 return -EINVAL;
6754
6755 if (cnt > TRACE_BUF_SIZE)
6756 cnt = TRACE_BUF_SIZE;
6757
6758 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6759
6756 ssize_t written;
6757 int size;
6758 int len;
6759
6760#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6761
6762 if (tracing_disabled)
6763 return -EINVAL;

--- 5 unchanged lines hidden (view full) ---

6769 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6770 return -EINVAL;
6771
6772 if (cnt > TRACE_BUF_SIZE)
6773 cnt = TRACE_BUF_SIZE;
6774
6775 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6776
6760 local_save_flags(irq_flags);
6761 size = sizeof(*entry) + cnt;
6762 if (cnt < FAULT_SIZE_ID)
6763 size += FAULT_SIZE_ID - cnt;
6764
6765 buffer = tr->array_buffer.buffer;
6766 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6777 size = sizeof(*entry) + cnt;
6778 if (cnt < FAULT_SIZE_ID)
6779 size += FAULT_SIZE_ID - cnt;
6780
6781 buffer = tr->array_buffer.buffer;
6782 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6767 irq_flags, preempt_count());
6783 tracing_gen_ctx());
6768 if (!event)
6769 /* Ring buffer disabled, return as if not open for write */
6770 return -EBADF;
6771
6772 entry = ring_buffer_event_data(event);
6773
6774 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6775 if (len) {

--- 2935 unchanged lines hidden ---
6784 if (!event)
6785 /* Ring buffer disabled, return as if not open for write */
6786 return -EBADF;
6787
6788 entry = ring_buffer_event_data(event);
6789
6790 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6791 if (len) {

--- 2935 unchanged lines hidden ---