xref: /linux/include/linux/ring_buffer.h (revision 42b16d3ac371a2fac9b6f08fd75f23f34ba3955a)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
27a8e76a3SSteven Rostedt #ifndef _LINUX_RING_BUFFER_H
37a8e76a3SSteven Rostedt #define _LINUX_RING_BUFFER_H
47a8e76a3SSteven Rostedt 
57a8e76a3SSteven Rostedt #include <linux/mm.h>
67a8e76a3SSteven Rostedt #include <linux/seq_file.h>
715693458SSteven Rostedt (Red Hat) #include <linux/poll.h>
87a8e76a3SSteven Rostedt 
9*117c3920SVincent Donnefort #include <uapi/linux/trace_mmap.h>
10*117c3920SVincent Donnefort 
1113292494SSteven Rostedt (VMware) struct trace_buffer;
127a8e76a3SSteven Rostedt struct ring_buffer_iter;
137a8e76a3SSteven Rostedt 
147a8e76a3SSteven Rostedt /*
15c3706f00SWenji Huang  * Don't refer to this struct directly, use functions below.
167a8e76a3SSteven Rostedt  */
177a8e76a3SSteven Rostedt struct ring_buffer_event {
18334d4169SLai Jiangshan 	u32		type_len:5, time_delta:27;
191744a21dSVegard Nossum 
207a8e76a3SSteven Rostedt 	u32		array[];
217a8e76a3SSteven Rostedt };
227a8e76a3SSteven Rostedt 
237a8e76a3SSteven Rostedt /**
247a8e76a3SSteven Rostedt  * enum ring_buffer_type - internal ring buffer types
257a8e76a3SSteven Rostedt  *
262d622719STom Zanussi  * @RINGBUF_TYPE_PADDING:	Left over page padding or discarded event
272d622719STom Zanussi  *				 If time_delta is 0:
287a8e76a3SSteven Rostedt  *				  array is ignored
297a8e76a3SSteven Rostedt  *				  size is variable depending on how much
307a8e76a3SSteven Rostedt  *				  padding is needed
312d622719STom Zanussi  *				 If time_delta is non zero:
32334d4169SLai Jiangshan  *				  array[0] holds the actual length
33334d4169SLai Jiangshan  *				  size = 4 + length (bytes)
347a8e76a3SSteven Rostedt  *
357a8e76a3SSteven Rostedt  * @RINGBUF_TYPE_TIME_EXTEND:	Extend the time delta
367a8e76a3SSteven Rostedt  *				 array[0] = time delta (28 .. 59)
377a8e76a3SSteven Rostedt  *				 size = 8 bytes
387a8e76a3SSteven Rostedt  *
39dc4e2801STom Zanussi  * @RINGBUF_TYPE_TIME_STAMP:	Absolute timestamp
40dc4e2801STom Zanussi  *				 Same format as TIME_EXTEND except that the
41dc4e2801STom Zanussi  *				 value is an absolute timestamp, not a delta
42dc4e2801STom Zanussi  *				 event.time_delta contains bottom 27 bits
43dc4e2801STom Zanussi  *				 array[0] = top (28 .. 59) bits
44dc4e2801STom Zanussi  *				 size = 8 bytes
457a8e76a3SSteven Rostedt  *
46334d4169SLai Jiangshan  * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
47334d4169SLai Jiangshan  *				Data record
48334d4169SLai Jiangshan  *				 If type_len is zero:
497a8e76a3SSteven Rostedt  *				  array[0] holds the actual length
50361b73d5SLai Jiangshan  *				  array[1..(length+3)/4] holds data
51334d4169SLai Jiangshan  *				  size = 4 + length (bytes)
527a8e76a3SSteven Rostedt  *				 else
53334d4169SLai Jiangshan  *				  length = type_len << 2
54361b73d5SLai Jiangshan  *				  array[0..(length+3)/4-1] holds data
55361b73d5SLai Jiangshan  *				  size = 4 + length (bytes)
567a8e76a3SSteven Rostedt  */
577a8e76a3SSteven Rostedt enum ring_buffer_type {
58334d4169SLai Jiangshan 	RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
597a8e76a3SSteven Rostedt 	RINGBUF_TYPE_PADDING,
607a8e76a3SSteven Rostedt 	RINGBUF_TYPE_TIME_EXTEND,
617a8e76a3SSteven Rostedt 	RINGBUF_TYPE_TIME_STAMP,
627a8e76a3SSteven Rostedt };
637a8e76a3SSteven Rostedt 
647a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event);
657a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event);
66efe6196aSSteven Rostedt (VMware) u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
67efe6196aSSteven Rostedt (VMware) 				 struct ring_buffer_event *event);
687a8e76a3SSteven Rostedt 
69fa1b47ddSSteven Rostedt /*
70fa1b47ddSSteven Rostedt  * ring_buffer_discard_commit will remove an event that has not
71a9235b54SVasyl Gomonovych  *   been committed yet. If this is used, then ring_buffer_unlock_commit
72fa1b47ddSSteven Rostedt  *   must not be called on the discarded event. This function
73fa1b47ddSSteven Rostedt  *   will try to remove the event from the ring buffer completely
74fa1b47ddSSteven Rostedt  *   if another event has not been written after it.
75fa1b47ddSSteven Rostedt  *
76fa1b47ddSSteven Rostedt  * Example use:
77fa1b47ddSSteven Rostedt  *
78fa1b47ddSSteven Rostedt  *  if (some_condition)
79fa1b47ddSSteven Rostedt  *    ring_buffer_discard_commit(buffer, event);
80fa1b47ddSSteven Rostedt  *  else
81fa1b47ddSSteven Rostedt  *    ring_buffer_unlock_commit(buffer, event);
82fa1b47ddSSteven Rostedt  */
8313292494SSteven Rostedt (VMware) void ring_buffer_discard_commit(struct trace_buffer *buffer,
84fa1b47ddSSteven Rostedt 				struct ring_buffer_event *event);
85fa1b47ddSSteven Rostedt 
86fa1b47ddSSteven Rostedt /*
877a8e76a3SSteven Rostedt  * size is in bytes for each per CPU buffer.
887a8e76a3SSteven Rostedt  */
8913292494SSteven Rostedt (VMware) struct trace_buffer *
901f8a6a10SPeter Zijlstra __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
911f8a6a10SPeter Zijlstra 
921f8a6a10SPeter Zijlstra struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
931f8a6a10SPeter Zijlstra 					       int order, unsigned long start,
941f8a6a10SPeter Zijlstra 					       unsigned long range_size,
951f8a6a10SPeter Zijlstra 					       struct lock_class_key *key);
961f8a6a10SPeter Zijlstra 
971f8a6a10SPeter Zijlstra bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text,
981f8a6a10SPeter Zijlstra 				 long *data);
991f8a6a10SPeter Zijlstra 
1001f8a6a10SPeter Zijlstra /*
1011f8a6a10SPeter Zijlstra  * Because the ring buffer is generic, if other users of the ring buffer get
1021f8a6a10SPeter Zijlstra  * traced by ftrace, it can produce lockdep warnings. We need to keep each
1037af9ded0SSteven Rostedt (Google)  * ring buffer's lock class separate.
1042aa043a5SSteven Rostedt (Google)  */
1052aa043a5SSteven Rostedt (Google) #define ring_buffer_alloc(size, flags)			\
10613292494SSteven Rostedt (VMware) ({							\
10742fb0a1eSSteven Rostedt (Google) 	static struct lock_class_key __key;		\
1087e9fbbb1SSteven Rostedt (Google) 	__ring_buffer_alloc((size), (flags), &__key);	\
10915693458SSteven Rostedt (Red Hat) })
110438ced17SVaibhav Nagarnaik 
111438ced17SVaibhav Nagarnaik /*
11213292494SSteven Rostedt (VMware)  * Because the ring buffer is generic, if other users of the ring buffer get
1137a8e76a3SSteven Rostedt  * traced by ftrace, it can produce lockdep warnings. We need to keep each
11413292494SSteven Rostedt (VMware)  * ring buffer's lock class separate.
1157a8e76a3SSteven Rostedt  */
11613292494SSteven Rostedt (VMware) #define ring_buffer_alloc_range(size, flags, order, start, range_size)	\
117750912faSDavid Sharp ({									\
11813292494SSteven Rostedt (VMware) 	static struct lock_class_key __key;				\
1190a987751SArnaldo Carvalho de Melo 	__ring_buffer_alloc_range((size), (flags), (order), (start),	\
12004aabc32SSong Chen 				  (range_size), &__key);		\
12113292494SSteven Rostedt (VMware) })
1227a8e76a3SSteven Rostedt 
1237a8e76a3SSteven Rostedt typedef bool (*ring_buffer_cond_fn)(void *data);
12413292494SSteven Rostedt (VMware) int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
12513292494SSteven Rostedt (VMware) 		     ring_buffer_cond_fn cond, void *data);
1268e012066SSteven Rostedt (VMware) __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
1277a8e76a3SSteven Rostedt 			  struct file *filp, poll_table *poll_table, int full);
12813292494SSteven Rostedt (VMware) void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
12966a8cb95SSteven Rostedt 
1307a8e76a3SSteven Rostedt #define RING_BUFFER_ALL_CPUS -1
13113292494SSteven Rostedt (VMware) 
13266a8cb95SSteven Rostedt void ring_buffer_free(struct trace_buffer *buffer);
1337a8e76a3SSteven Rostedt 
1347a8e76a3SSteven Rostedt int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
13513292494SSteven Rostedt (VMware) 
13672c9ddfdSDavid Miller void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
13772c9ddfdSDavid Miller 
1387a8e76a3SSteven Rostedt struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
1397a8e76a3SSteven Rostedt 						   unsigned long length);
1407a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct trace_buffer *buffer);
1417a8e76a3SSteven Rostedt int ring_buffer_write(struct trace_buffer *buffer,
142bc1a72afSSteven Rostedt (VMware) 		      unsigned long length, void *data);
1437a8e76a3SSteven Rostedt 
1447a8e76a3SSteven Rostedt void ring_buffer_nest_start(struct trace_buffer *buffer);
145c9b7a4a7SSteven Rostedt (VMware) void ring_buffer_nest_end(struct trace_buffer *buffer);
1467a8e76a3SSteven Rostedt 
14713292494SSteven Rostedt (VMware) struct ring_buffer_event *
1488ec90be7SSteven Rostedt (Google) ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
1497a8e76a3SSteven Rostedt 		 unsigned long *lost_events);
15013292494SSteven Rostedt (VMware) struct ring_buffer_event *
151b23d7a5fSNicholas Piggin ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
15213292494SSteven Rostedt (VMware) 		    unsigned long *lost_events);
1537a8e76a3SSteven Rostedt 
15485bac32cSSteven Rostedt struct ring_buffer_iter *
15513292494SSteven Rostedt (VMware) ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
15613292494SSteven Rostedt (VMware) void ring_buffer_read_prepare_sync(void);
15785bac32cSSteven Rostedt void ring_buffer_read_start(struct ring_buffer_iter *iter);
15885bac32cSSteven Rostedt void ring_buffer_read_finish(struct ring_buffer_iter *iter);
15913292494SSteven Rostedt (VMware) 
16013292494SSteven Rostedt (VMware) struct ring_buffer_event *
16185bac32cSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
16285bac32cSSteven Rostedt void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
16385bac32cSSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
16485bac32cSSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
1657a8e76a3SSteven Rostedt bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
16613292494SSteven Rostedt (VMware) 
16713292494SSteven Rostedt (VMware) unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
1687a8e76a3SSteven Rostedt unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer);
16913292494SSteven Rostedt (VMware) 
17013292494SSteven Rostedt (VMware) void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
17113292494SSteven Rostedt (VMware) void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
17213292494SSteven Rostedt (VMware) void ring_buffer_reset(struct trace_buffer *buffer);
17313292494SSteven Rostedt (VMware) 
17413292494SSteven Rostedt (VMware) #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
17513292494SSteven Rostedt (VMware) int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
17613292494SSteven Rostedt (VMware) 			 struct trace_buffer *buffer_b, int cpu);
1777a8e76a3SSteven Rostedt #else
17813292494SSteven Rostedt (VMware) static inline int
ring_buffer_swap_cpu(struct trace_buffer * buffer_a,struct trace_buffer * buffer_b,int cpu)17913292494SSteven Rostedt (VMware) ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
18013292494SSteven Rostedt (VMware) 		     struct trace_buffer *buffer_b, int cpu)
18113292494SSteven Rostedt (VMware) {
18213292494SSteven Rostedt (VMware) 	return -ENODEV;
18313292494SSteven Rostedt (VMware) }
18413292494SSteven Rostedt (VMware) #endif
18513292494SSteven Rostedt (VMware) 
18613292494SSteven Rostedt (VMware) bool ring_buffer_empty(struct trace_buffer *buffer);
1877a8e76a3SSteven Rostedt bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
188f3ef7202SYordan Karadzhov (VMware) 
18913292494SSteven Rostedt (VMware) void ring_buffer_record_disable(struct trace_buffer *buffer);
19037886f6aSSteven Rostedt void ring_buffer_record_enable(struct trace_buffer *buffer);
19113292494SSteven Rostedt (VMware) void ring_buffer_record_off(struct trace_buffer *buffer);
19237886f6aSSteven Rostedt void ring_buffer_record_on(struct trace_buffer *buffer);
19313292494SSteven Rostedt (VMware) bool ring_buffer_record_is_on(struct trace_buffer *buffer);
19413292494SSteven Rostedt (VMware) bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
1957a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
19613292494SSteven Rostedt (VMware) void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
197ef7a4a16SSteven Rostedt 
198bce761d7STzvetomir Stoyanov (VMware) u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
199bce761d7STzvetomir Stoyanov (VMware) unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
200bce761d7STzvetomir Stoyanov (VMware) unsigned long ring_buffer_entries(struct trace_buffer *buffer);
201bce761d7STzvetomir Stoyanov (VMware) unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
202bce761d7STzvetomir Stoyanov (VMware) unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
203bce761d7STzvetomir Stoyanov (VMware) unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
204bce761d7STzvetomir Stoyanov (VMware) unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
205ef7a4a16SSteven Rostedt unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
206bce761d7STzvetomir Stoyanov (VMware) unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
2078789a9e7SSteven Rostedt 
208d1b182a8SSteven Rostedt u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
209d1b182a8SSteven Rostedt void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
210d1b182a8SSteven Rostedt 				      int cpu, u64 *ts);
211139f8400STzvetomir Stoyanov (VMware) void ring_buffer_set_clock(struct trace_buffer *buffer,
212d1b182a8SSteven Rostedt 			   u64 (*clock)(void));
2132808e31eSTzvetomir Stoyanov (VMware) void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
2142808e31eSTzvetomir Stoyanov (VMware) bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
2152808e31eSTzvetomir Stoyanov (VMware) 
2162808e31eSTzvetomir Stoyanov (VMware) size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
2177a8e76a3SSteven Rostedt 
2187a8e76a3SSteven Rostedt struct buffer_data_read_page;
2197a8e76a3SSteven Rostedt struct buffer_data_read_page *
2207a8e76a3SSteven Rostedt ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
221b32614c0SSebastian Andrzej Siewior void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
222b32614c0SSebastian Andrzej Siewior 				struct buffer_data_read_page *page);
223b32614c0SSebastian Andrzej Siewior int ring_buffer_read_page(struct trace_buffer *buffer,
224b32614c0SSebastian Andrzej Siewior 			  struct buffer_data_read_page *data_page,
225b32614c0SSebastian Andrzej Siewior 			  size_t len, int cpu, int full);
226b32614c0SSebastian Andrzej Siewior void *ring_buffer_read_page_data(struct buffer_data_read_page *page);
227*117c3920SVincent Donnefort 
228*117c3920SVincent Donnefort struct trace_seq;
229*117c3920SVincent Donnefort 
230*117c3920SVincent Donnefort int ring_buffer_print_entry_header(struct trace_seq *s);
2317a8e76a3SSteven Rostedt int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s);
232 
233 int ring_buffer_subbuf_order_get(struct trace_buffer *buffer);
234 int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order);
235 int ring_buffer_subbuf_size_get(struct trace_buffer *buffer);
236 
237 enum ring_buffer_flags {
238 	RB_FL_OVERWRITE		= 1 << 0,
239 };
240 
241 #ifdef CONFIG_RING_BUFFER
242 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
243 #else
244 #define trace_rb_cpu_prepare	NULL
245 #endif
246 
247 int ring_buffer_map(struct trace_buffer *buffer, int cpu,
248 		    struct vm_area_struct *vma);
249 int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
250 int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
251 #endif /* _LINUX_RING_BUFFER_H */
252