1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_RING_BUFFER_H 3 #define _LINUX_RING_BUFFER_H 4 5 #include <linux/mm.h> 6 #include <linux/seq_file.h> 7 #include <linux/poll.h> 8 9 struct ring_buffer; 10 struct ring_buffer_iter; 11 12 /* 13 * Don't refer to this struct directly, use functions below. 14 */ 15 struct ring_buffer_event { 16 u32 type_len:5, time_delta:27; 17 18 u32 array[]; 19 }; 20 21 /** 22 * enum ring_buffer_type - internal ring buffer types 23 * 24 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event 25 * If time_delta is 0: 26 * array is ignored 27 * size is variable depending on how much 28 * padding is needed 29 * If time_delta is non zero: 30 * array[0] holds the actual length 31 * size = 4 + length (bytes) 32 * 33 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta 34 * array[0] = time delta (28 .. 59) 35 * size = 8 bytes 36 * 37 * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp 38 * Same format as TIME_EXTEND except that the 39 * value is an absolute timestamp, not a delta 40 * event.time_delta contains bottom 27 bits 41 * array[0] = top (28 .. 59) bits 42 * size = 8 bytes 43 * 44 * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: 45 * Data record 46 * If type_len is zero: 47 * array[0] holds the actual length 48 * array[1..(length+3)/4] holds data 49 * size = 4 + length (bytes) 50 * else 51 * length = type_len << 2 52 * array[0..(length+3)/4-1] holds data 53 * size = 4 + length (bytes) 54 */ 55 enum ring_buffer_type { 56 RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, 57 RINGBUF_TYPE_PADDING, 58 RINGBUF_TYPE_TIME_EXTEND, 59 RINGBUF_TYPE_TIME_STAMP, 60 }; 61 62 unsigned ring_buffer_event_length(struct ring_buffer_event *event); 63 void *ring_buffer_event_data(struct ring_buffer_event *event); 64 u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); 65 66 /* 67 * ring_buffer_discard_commit will remove an event that has not 68 * ben committed yet. If this is used, then ring_buffer_unlock_commit 69 * must not be called on the discarded event. This function 70 * will try to remove the event from the ring buffer completely 71 * if another event has not been written after it. 72 * 73 * Example use: 74 * 75 * if (some_condition) 76 * ring_buffer_discard_commit(buffer, event); 77 * else 78 * ring_buffer_unlock_commit(buffer, event); 79 */ 80 void ring_buffer_discard_commit(struct ring_buffer *buffer, 81 struct ring_buffer_event *event); 82 83 /* 84 * size is in bytes for each per CPU buffer. 85 */ 86 struct ring_buffer * 87 __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); 88 89 /* 90 * Because the ring buffer is generic, if other users of the ring buffer get 91 * traced by ftrace, it can produce lockdep warnings. We need to keep each 92 * ring buffer's lock class separate. 93 */ 94 #define ring_buffer_alloc(size, flags) \ 95 ({ \ 96 static struct lock_class_key __key; \ 97 __ring_buffer_alloc((size), (flags), &__key); \ 98 }) 99 100 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); 101 __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 102 struct file *filp, poll_table *poll_table); 103 104 105 #define RING_BUFFER_ALL_CPUS -1 106 107 void ring_buffer_free(struct ring_buffer *buffer); 108 109 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu); 110 111 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); 112 113 struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, 114 unsigned long length); 115 int ring_buffer_unlock_commit(struct ring_buffer *buffer, 116 struct ring_buffer_event *event); 117 int ring_buffer_write(struct ring_buffer *buffer, 118 unsigned long length, void *data); 119 120 void ring_buffer_nest_start(struct ring_buffer *buffer); 121 void ring_buffer_nest_end(struct ring_buffer *buffer); 122 123 struct ring_buffer_event * 124 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, 125 unsigned long *lost_events); 126 struct ring_buffer_event * 127 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, 128 unsigned long *lost_events); 129 130 struct ring_buffer_iter * 131 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); 132 void ring_buffer_read_prepare_sync(void); 133 void ring_buffer_read_start(struct ring_buffer_iter *iter); 134 void ring_buffer_read_finish(struct ring_buffer_iter *iter); 135 136 struct ring_buffer_event * 137 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); 138 struct ring_buffer_event * 139 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); 140 void ring_buffer_iter_reset(struct ring_buffer_iter *iter); 141 int ring_buffer_iter_empty(struct ring_buffer_iter *iter); 142 143 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu); 144 145 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); 146 void ring_buffer_reset(struct ring_buffer *buffer); 147 148 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 149 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, 150 struct ring_buffer *buffer_b, int cpu); 151 #else 152 static inline int 153 ring_buffer_swap_cpu(struct ring_buffer *buffer_a, 154 struct ring_buffer *buffer_b, int cpu) 155 { 156 return -ENODEV; 157 } 158 #endif 159 160 bool ring_buffer_empty(struct ring_buffer *buffer); 161 bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); 162 163 void ring_buffer_record_disable(struct ring_buffer *buffer); 164 void ring_buffer_record_enable(struct ring_buffer *buffer); 165 void ring_buffer_record_off(struct ring_buffer *buffer); 166 void ring_buffer_record_on(struct ring_buffer *buffer); 167 int ring_buffer_record_is_on(struct ring_buffer *buffer); 168 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 169 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 170 171 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); 172 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); 173 unsigned long ring_buffer_entries(struct ring_buffer *buffer); 174 unsigned long ring_buffer_overruns(struct ring_buffer *buffer); 175 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); 176 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); 177 unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); 178 unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); 179 unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); 180 181 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); 182 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, 183 int cpu, u64 *ts); 184 void ring_buffer_set_clock(struct ring_buffer *buffer, 185 u64 (*clock)(void)); 186 void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); 187 bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); 188 189 size_t ring_buffer_page_len(void *page); 190 191 192 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); 193 void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data); 194 int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, 195 size_t len, int cpu, int full); 196 197 struct trace_seq; 198 199 int ring_buffer_print_entry_header(struct trace_seq *s); 200 int ring_buffer_print_page_header(struct trace_seq *s); 201 202 enum ring_buffer_flags { 203 RB_FL_OVERWRITE = 1 << 0, 204 }; 205 206 #ifdef CONFIG_RING_BUFFER 207 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); 208 #else 209 #define trace_rb_cpu_prepare NULL 210 #endif 211 212 #endif /* _LINUX_RING_BUFFER_H */ 213