trace.c (8438f5211479e4b8433f641634362264bc3bbd9e) trace.c (8ab7a2b7055c88c3da5e4684dfa015c6a8987c28)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:

--- 237 unchanged lines hidden (view full) ---

246
247unsigned long long ns2usecs(u64 nsec)
248{
249 nsec += 500;
250 do_div(nsec, 1000);
251 return nsec;
252}
253
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:

--- 237 unchanged lines hidden (view full) ---

246
247unsigned long long ns2usecs(u64 nsec)
248{
249 nsec += 500;
250 do_div(nsec, 1000);
251 return nsec;
252}
253
254static void
255trace_process_export(struct trace_export *export,
256 struct ring_buffer_event *event, int flag)
257{
258 struct trace_entry *entry;
259 unsigned int size = 0;
260
261 if (export->flags & flag) {
262 entry = ring_buffer_event_data(event);
263 size = ring_buffer_event_length(event);
264 export->write(export, entry, size);
265 }
266}
267
268static DEFINE_MUTEX(ftrace_export_lock);
269
270static struct trace_export __rcu *ftrace_exports_list __read_mostly;
271
272static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
273static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
274
275static inline void ftrace_exports_enable(struct trace_export *export)
276{
277 if (export->flags & TRACE_EXPORT_FUNCTION)
278 static_branch_inc(&trace_function_exports_enabled);
279
280 if (export->flags & TRACE_EXPORT_EVENT)
281 static_branch_inc(&trace_event_exports_enabled);
282}
283
284static inline void ftrace_exports_disable(struct trace_export *export)
285{
286 if (export->flags & TRACE_EXPORT_FUNCTION)
287 static_branch_dec(&trace_function_exports_enabled);
288
289 if (export->flags & TRACE_EXPORT_EVENT)
290 static_branch_dec(&trace_event_exports_enabled);
291}
292
293static void ftrace_exports(struct ring_buffer_event *event, int flag)
294{
295 struct trace_export *export;
296
297 preempt_disable_notrace();
298
299 export = rcu_dereference_raw_check(ftrace_exports_list);
300 while (export) {
301 trace_process_export(export, event, flag);
302 export = rcu_dereference_raw_check(export->next);
303 }
304
305 preempt_enable_notrace();
306}
307
308static inline void
309add_trace_export(struct trace_export **list, struct trace_export *export)
310{
311 rcu_assign_pointer(export->next, *list);
312 /*
313 * We are entering export into the list but another
314 * CPU might be walking that list. We need to make sure
315 * the export->next pointer is valid before another CPU sees
316 * the export pointer included into the list.
317 */
318 rcu_assign_pointer(*list, export);
319}
320
321static inline int
322rm_trace_export(struct trace_export **list, struct trace_export *export)
323{
324 struct trace_export **p;
325
326 for (p = list; *p != NULL; p = &(*p)->next)
327 if (*p == export)
328 break;
329
330 if (*p != export)
331 return -1;
332
333 rcu_assign_pointer(*p, (*p)->next);
334
335 return 0;
336}
337
338static inline void
339add_ftrace_export(struct trace_export **list, struct trace_export *export)
340{
341 ftrace_exports_enable(export);
342
343 add_trace_export(list, export);
344}
345
346static inline int
347rm_ftrace_export(struct trace_export **list, struct trace_export *export)
348{
349 int ret;
350
351 ret = rm_trace_export(list, export);
352 ftrace_exports_disable(export);
353
354 return ret;
355}
356
357int register_ftrace_export(struct trace_export *export)
358{
359 if (WARN_ON_ONCE(!export->write))
360 return -1;
361
362 mutex_lock(&ftrace_export_lock);
363
364 add_ftrace_export(&ftrace_exports_list, export);
365
366 mutex_unlock(&ftrace_export_lock);
367
368 return 0;
369}
370EXPORT_SYMBOL_GPL(register_ftrace_export);
371
372int unregister_ftrace_export(struct trace_export *export)
373{
374 int ret;
375
376 mutex_lock(&ftrace_export_lock);
377
378 ret = rm_ftrace_export(&ftrace_exports_list, export);
379
380 mutex_unlock(&ftrace_export_lock);
381
382 return ret;
383}
384EXPORT_SYMBOL_GPL(unregister_ftrace_export);
385
254/* trace_flags holds trace_options default values */
255#define TRACE_DEFAULT_FLAGS \
256 (FUNCTION_DEFAULT_FLAGS | \
257 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
258 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
259 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
260 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
261

--- 2432 unchanged lines hidden (view full) ---

2694 return ret;
2695}
2696
2697void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2698{
2699 if (static_key_false(&tracepoint_printk_key.key))
2700 output_printk(fbuffer);
2701
386/* trace_flags holds trace_options default values */
387#define TRACE_DEFAULT_FLAGS \
388 (FUNCTION_DEFAULT_FLAGS | \
389 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
390 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
391 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
392 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
393

--- 2432 unchanged lines hidden (view full) ---

2826 return ret;
2827}
2828
2829void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2830{
2831 if (static_key_false(&tracepoint_printk_key.key))
2832 output_printk(fbuffer);
2833
2834 if (static_branch_unlikely(&trace_event_exports_enabled))
2835 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2702 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2703 fbuffer->event, fbuffer->entry,
2704 fbuffer->flags, fbuffer->pc, fbuffer->regs);
2705}
2706EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2707
2708/*
2709 * Skip 3:

--- 27 unchanged lines hidden (view full) ---

2737 */
2738void
2739trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2740 struct ring_buffer_event *event)
2741{
2742 __buffer_unlock_commit(buffer, event);
2743}
2744
2836 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2837 fbuffer->event, fbuffer->entry,
2838 fbuffer->flags, fbuffer->pc, fbuffer->regs);
2839}
2840EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2841
2842/*
2843 * Skip 3:

--- 27 unchanged lines hidden (view full) ---

2871 */
2872void
2873trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2874 struct ring_buffer_event *event)
2875{
2876 __buffer_unlock_commit(buffer, event);
2877}
2878
2745static void
2746trace_process_export(struct trace_export *export,
2747 struct ring_buffer_event *event, int flag)
2748{
2749 struct trace_entry *entry;
2750 unsigned int size = 0;
2751
2752 if (export->flags & flag) {
2753 entry = ring_buffer_event_data(event);
2754 size = ring_buffer_event_length(event);
2755 export->write(export, entry, size);
2756 }
2757}
2758
2759static DEFINE_MUTEX(ftrace_export_lock);
2760
2761static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2762
2763static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
2764
2765static inline void ftrace_exports_enable(struct trace_export *export)
2766{
2767 if (export->flags & TRACE_EXPORT_FUNCTION)
2768 static_branch_inc(&trace_function_exports_enabled);
2769}
2770
2771static inline void ftrace_exports_disable(struct trace_export *export)
2772{
2773 if (export->flags & TRACE_EXPORT_FUNCTION)
2774 static_branch_dec(&trace_function_exports_enabled);
2775}
2776
2777static void ftrace_exports(struct ring_buffer_event *event, int flag)
2778{
2779 struct trace_export *export;
2780
2781 preempt_disable_notrace();
2782
2783 export = rcu_dereference_raw_check(ftrace_exports_list);
2784 while (export) {
2785 trace_process_export(export, event, flag);
2786 export = rcu_dereference_raw_check(export->next);
2787 }
2788
2789 preempt_enable_notrace();
2790}
2791
2792static inline void
2793add_trace_export(struct trace_export **list, struct trace_export *export)
2794{
2795 rcu_assign_pointer(export->next, *list);
2796 /*
2797 * We are entering export into the list but another
2798 * CPU might be walking that list. We need to make sure
2799 * the export->next pointer is valid before another CPU sees
2800 * the export pointer included into the list.
2801 */
2802 rcu_assign_pointer(*list, export);
2803}
2804
2805static inline int
2806rm_trace_export(struct trace_export **list, struct trace_export *export)
2807{
2808 struct trace_export **p;
2809
2810 for (p = list; *p != NULL; p = &(*p)->next)
2811 if (*p == export)
2812 break;
2813
2814 if (*p != export)
2815 return -1;
2816
2817 rcu_assign_pointer(*p, (*p)->next);
2818
2819 return 0;
2820}
2821
2822static inline void
2823add_ftrace_export(struct trace_export **list, struct trace_export *export)
2824{
2825 ftrace_exports_enable(export);
2826
2827 add_trace_export(list, export);
2828}
2829
2830static inline int
2831rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2832{
2833 int ret;
2834
2835 ret = rm_trace_export(list, export);
2836 ftrace_exports_disable(export);
2837
2838 return ret;
2839}
2840
2841int register_ftrace_export(struct trace_export *export)
2842{
2843 if (WARN_ON_ONCE(!export->write))
2844 return -1;
2845
2846 mutex_lock(&ftrace_export_lock);
2847
2848 add_ftrace_export(&ftrace_exports_list, export);
2849
2850 mutex_unlock(&ftrace_export_lock);
2851
2852 return 0;
2853}
2854EXPORT_SYMBOL_GPL(register_ftrace_export);
2855
2856int unregister_ftrace_export(struct trace_export *export)
2857{
2858 int ret;
2859
2860 mutex_lock(&ftrace_export_lock);
2861
2862 ret = rm_ftrace_export(&ftrace_exports_list, export);
2863
2864 mutex_unlock(&ftrace_export_lock);
2865
2866 return ret;
2867}
2868EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2869
2870void
2871trace_function(struct trace_array *tr,
2872 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2873 int pc)
2874{
2875 struct trace_event_call *call = &event_function;
2876 struct trace_buffer *buffer = tr->array_buffer.buffer;
2877 struct ring_buffer_event *event;

--- 6737 unchanged lines hidden ---
2879void
2880trace_function(struct trace_array *tr,
2881 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2882 int pc)
2883{
2884 struct trace_event_call *call = &event_function;
2885 struct trace_buffer *buffer = tr->array_buffer.buffer;
2886 struct ring_buffer_event *event;

--- 6737 unchanged lines hidden ---