xref: /linux/kernel/trace/simple_ring_buffer.c (revision 34e5b958bdad0f9cf16306368bbc2dc5b2a50143)
1*34e5b958SVincent Donnefort // SPDX-License-Identifier: GPL-2.0
2*34e5b958SVincent Donnefort /*
3*34e5b958SVincent Donnefort  * Copyright (C) 2025 - Google LLC
4*34e5b958SVincent Donnefort  * Author: Vincent Donnefort <vdonnefort@google.com>
5*34e5b958SVincent Donnefort  */
6*34e5b958SVincent Donnefort 
7*34e5b958SVincent Donnefort #include <linux/atomic.h>
8*34e5b958SVincent Donnefort #include <linux/simple_ring_buffer.h>
9*34e5b958SVincent Donnefort 
10*34e5b958SVincent Donnefort #include <asm/barrier.h>
11*34e5b958SVincent Donnefort #include <asm/local.h>
12*34e5b958SVincent Donnefort 
13*34e5b958SVincent Donnefort enum simple_rb_link_type {
14*34e5b958SVincent Donnefort 	SIMPLE_RB_LINK_NORMAL		= 0,
15*34e5b958SVincent Donnefort 	SIMPLE_RB_LINK_HEAD		= 1,
16*34e5b958SVincent Donnefort 	SIMPLE_RB_LINK_HEAD_MOVING
17*34e5b958SVincent Donnefort };
18*34e5b958SVincent Donnefort 
19*34e5b958SVincent Donnefort #define SIMPLE_RB_LINK_MASK ~(SIMPLE_RB_LINK_HEAD | SIMPLE_RB_LINK_HEAD_MOVING)
20*34e5b958SVincent Donnefort 
21*34e5b958SVincent Donnefort static void simple_bpage_set_head_link(struct simple_buffer_page *bpage)
22*34e5b958SVincent Donnefort {
23*34e5b958SVincent Donnefort 	unsigned long link = (unsigned long)bpage->link.next;
24*34e5b958SVincent Donnefort 
25*34e5b958SVincent Donnefort 	link &= SIMPLE_RB_LINK_MASK;
26*34e5b958SVincent Donnefort 	link |= SIMPLE_RB_LINK_HEAD;
27*34e5b958SVincent Donnefort 
28*34e5b958SVincent Donnefort 	/*
29*34e5b958SVincent Donnefort 	 * Paired with simple_rb_find_head() to order access between the head
30*34e5b958SVincent Donnefort 	 * link and overrun. It ensures we always report an up-to-date value
31*34e5b958SVincent Donnefort 	 * after swapping the reader page.
32*34e5b958SVincent Donnefort 	 */
33*34e5b958SVincent Donnefort 	smp_store_release(&bpage->link.next, (struct list_head *)link);
34*34e5b958SVincent Donnefort }
35*34e5b958SVincent Donnefort 
36*34e5b958SVincent Donnefort static bool simple_bpage_unset_head_link(struct simple_buffer_page *bpage,
37*34e5b958SVincent Donnefort 					 struct simple_buffer_page *dst,
38*34e5b958SVincent Donnefort 					 enum simple_rb_link_type new_type)
39*34e5b958SVincent Donnefort {
40*34e5b958SVincent Donnefort 	unsigned long *link = (unsigned long *)(&bpage->link.next);
41*34e5b958SVincent Donnefort 	unsigned long old = (*link & SIMPLE_RB_LINK_MASK) | SIMPLE_RB_LINK_HEAD;
42*34e5b958SVincent Donnefort 	unsigned long new = (unsigned long)(&dst->link) | new_type;
43*34e5b958SVincent Donnefort 
44*34e5b958SVincent Donnefort 	return try_cmpxchg(link, &old, new);
45*34e5b958SVincent Donnefort }
46*34e5b958SVincent Donnefort 
47*34e5b958SVincent Donnefort static void simple_bpage_set_normal_link(struct simple_buffer_page *bpage)
48*34e5b958SVincent Donnefort {
49*34e5b958SVincent Donnefort 	unsigned long link = (unsigned long)bpage->link.next;
50*34e5b958SVincent Donnefort 
51*34e5b958SVincent Donnefort 	WRITE_ONCE(bpage->link.next, (struct list_head *)(link & SIMPLE_RB_LINK_MASK));
52*34e5b958SVincent Donnefort }
53*34e5b958SVincent Donnefort 
54*34e5b958SVincent Donnefort static struct simple_buffer_page *simple_bpage_from_link(struct list_head *link)
55*34e5b958SVincent Donnefort {
56*34e5b958SVincent Donnefort 	unsigned long ptr = (unsigned long)link & SIMPLE_RB_LINK_MASK;
57*34e5b958SVincent Donnefort 
58*34e5b958SVincent Donnefort 	return container_of((struct list_head *)ptr, struct simple_buffer_page, link);
59*34e5b958SVincent Donnefort }
60*34e5b958SVincent Donnefort 
61*34e5b958SVincent Donnefort static struct simple_buffer_page *simple_bpage_next_page(struct simple_buffer_page *bpage)
62*34e5b958SVincent Donnefort {
63*34e5b958SVincent Donnefort 	return simple_bpage_from_link(bpage->link.next);
64*34e5b958SVincent Donnefort }
65*34e5b958SVincent Donnefort 
66*34e5b958SVincent Donnefort static void simple_bpage_reset(struct simple_buffer_page *bpage)
67*34e5b958SVincent Donnefort {
68*34e5b958SVincent Donnefort 	bpage->write = 0;
69*34e5b958SVincent Donnefort 	bpage->entries = 0;
70*34e5b958SVincent Donnefort 
71*34e5b958SVincent Donnefort 	local_set(&bpage->page->commit, 0);
72*34e5b958SVincent Donnefort }
73*34e5b958SVincent Donnefort 
74*34e5b958SVincent Donnefort static void simple_bpage_init(struct simple_buffer_page *bpage, unsigned long page)
75*34e5b958SVincent Donnefort {
76*34e5b958SVincent Donnefort 	INIT_LIST_HEAD(&bpage->link);
77*34e5b958SVincent Donnefort 	bpage->page = (struct buffer_data_page *)page;
78*34e5b958SVincent Donnefort 
79*34e5b958SVincent Donnefort 	simple_bpage_reset(bpage);
80*34e5b958SVincent Donnefort }
81*34e5b958SVincent Donnefort 
82*34e5b958SVincent Donnefort #define simple_rb_meta_inc(__meta, __inc)		\
83*34e5b958SVincent Donnefort 	WRITE_ONCE((__meta), (__meta + __inc))
84*34e5b958SVincent Donnefort 
85*34e5b958SVincent Donnefort static bool simple_rb_loaded(struct simple_rb_per_cpu *cpu_buffer)
86*34e5b958SVincent Donnefort {
87*34e5b958SVincent Donnefort 	return !!cpu_buffer->bpages;
88*34e5b958SVincent Donnefort }
89*34e5b958SVincent Donnefort 
90*34e5b958SVincent Donnefort static int simple_rb_find_head(struct simple_rb_per_cpu *cpu_buffer)
91*34e5b958SVincent Donnefort {
92*34e5b958SVincent Donnefort 	int retry = cpu_buffer->nr_pages * 2;
93*34e5b958SVincent Donnefort 	struct simple_buffer_page *head;
94*34e5b958SVincent Donnefort 
95*34e5b958SVincent Donnefort 	head = cpu_buffer->head_page;
96*34e5b958SVincent Donnefort 
97*34e5b958SVincent Donnefort 	while (retry--) {
98*34e5b958SVincent Donnefort 		unsigned long link;
99*34e5b958SVincent Donnefort 
100*34e5b958SVincent Donnefort spin:
101*34e5b958SVincent Donnefort 		/* See smp_store_release in simple_bpage_set_head_link() */
102*34e5b958SVincent Donnefort 		link = (unsigned long)smp_load_acquire(&head->link.prev->next);
103*34e5b958SVincent Donnefort 
104*34e5b958SVincent Donnefort 		switch (link & ~SIMPLE_RB_LINK_MASK) {
105*34e5b958SVincent Donnefort 		/* Found the head */
106*34e5b958SVincent Donnefort 		case SIMPLE_RB_LINK_HEAD:
107*34e5b958SVincent Donnefort 			cpu_buffer->head_page = head;
108*34e5b958SVincent Donnefort 			return 0;
109*34e5b958SVincent Donnefort 		/* The writer caught the head, we can spin, that won't be long */
110*34e5b958SVincent Donnefort 		case SIMPLE_RB_LINK_HEAD_MOVING:
111*34e5b958SVincent Donnefort 			goto spin;
112*34e5b958SVincent Donnefort 		}
113*34e5b958SVincent Donnefort 
114*34e5b958SVincent Donnefort 		head = simple_bpage_next_page(head);
115*34e5b958SVincent Donnefort 	}
116*34e5b958SVincent Donnefort 
117*34e5b958SVincent Donnefort 	return -EBUSY;
118*34e5b958SVincent Donnefort }
119*34e5b958SVincent Donnefort 
120*34e5b958SVincent Donnefort /**
121*34e5b958SVincent Donnefort  * simple_ring_buffer_swap_reader_page - Swap ring-buffer head with the reader
122*34e5b958SVincent Donnefort  * @cpu_buffer: A simple_rb_per_cpu
123*34e5b958SVincent Donnefort  *
124*34e5b958SVincent Donnefort  * This function enables consuming reading. It ensures the current head page will not be overwritten
125*34e5b958SVincent Donnefort  * and can be safely read.
126*34e5b958SVincent Donnefort  *
127*34e5b958SVincent Donnefort  * Returns 0 on success, -ENODEV if @cpu_buffer was unloaded or -EBUSY if we failed to catch the
128*34e5b958SVincent Donnefort  * head page.
129*34e5b958SVincent Donnefort  */
130*34e5b958SVincent Donnefort int simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu *cpu_buffer)
131*34e5b958SVincent Donnefort {
132*34e5b958SVincent Donnefort 	struct simple_buffer_page *last, *head, *reader;
133*34e5b958SVincent Donnefort 	unsigned long overrun;
134*34e5b958SVincent Donnefort 	int retry = 8;
135*34e5b958SVincent Donnefort 	int ret;
136*34e5b958SVincent Donnefort 
137*34e5b958SVincent Donnefort 	if (!simple_rb_loaded(cpu_buffer))
138*34e5b958SVincent Donnefort 		return -ENODEV;
139*34e5b958SVincent Donnefort 
140*34e5b958SVincent Donnefort 	reader = cpu_buffer->reader_page;
141*34e5b958SVincent Donnefort 
142*34e5b958SVincent Donnefort 	do {
143*34e5b958SVincent Donnefort 		/* Run after the writer to find the head */
144*34e5b958SVincent Donnefort 		ret = simple_rb_find_head(cpu_buffer);
145*34e5b958SVincent Donnefort 		if (ret)
146*34e5b958SVincent Donnefort 			return ret;
147*34e5b958SVincent Donnefort 
148*34e5b958SVincent Donnefort 		head = cpu_buffer->head_page;
149*34e5b958SVincent Donnefort 
150*34e5b958SVincent Donnefort 		/* Connect the reader page around the header page */
151*34e5b958SVincent Donnefort 		reader->link.next = head->link.next;
152*34e5b958SVincent Donnefort 		reader->link.prev = head->link.prev;
153*34e5b958SVincent Donnefort 
154*34e5b958SVincent Donnefort 		/* The last page before the head */
155*34e5b958SVincent Donnefort 		last = simple_bpage_from_link(head->link.prev);
156*34e5b958SVincent Donnefort 
157*34e5b958SVincent Donnefort 		/* The reader page points to the new header page */
158*34e5b958SVincent Donnefort 		simple_bpage_set_head_link(reader);
159*34e5b958SVincent Donnefort 
160*34e5b958SVincent Donnefort 		overrun = cpu_buffer->meta->overrun;
161*34e5b958SVincent Donnefort 	} while (!simple_bpage_unset_head_link(last, reader, SIMPLE_RB_LINK_NORMAL) && retry--);
162*34e5b958SVincent Donnefort 
163*34e5b958SVincent Donnefort 	if (!retry)
164*34e5b958SVincent Donnefort 		return -EINVAL;
165*34e5b958SVincent Donnefort 
166*34e5b958SVincent Donnefort 	cpu_buffer->head_page = simple_bpage_from_link(reader->link.next);
167*34e5b958SVincent Donnefort 	cpu_buffer->head_page->link.prev = &reader->link;
168*34e5b958SVincent Donnefort 	cpu_buffer->reader_page = head;
169*34e5b958SVincent Donnefort 	cpu_buffer->meta->reader.lost_events = overrun - cpu_buffer->last_overrun;
170*34e5b958SVincent Donnefort 	cpu_buffer->meta->reader.id = cpu_buffer->reader_page->id;
171*34e5b958SVincent Donnefort 	cpu_buffer->last_overrun = overrun;
172*34e5b958SVincent Donnefort 
173*34e5b958SVincent Donnefort 	return 0;
174*34e5b958SVincent Donnefort }
175*34e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_swap_reader_page);
176*34e5b958SVincent Donnefort 
177*34e5b958SVincent Donnefort static struct simple_buffer_page *simple_rb_move_tail(struct simple_rb_per_cpu *cpu_buffer)
178*34e5b958SVincent Donnefort {
179*34e5b958SVincent Donnefort 	struct simple_buffer_page *tail, *new_tail;
180*34e5b958SVincent Donnefort 
181*34e5b958SVincent Donnefort 	tail = cpu_buffer->tail_page;
182*34e5b958SVincent Donnefort 	new_tail = simple_bpage_next_page(tail);
183*34e5b958SVincent Donnefort 
184*34e5b958SVincent Donnefort 	if (simple_bpage_unset_head_link(tail, new_tail, SIMPLE_RB_LINK_HEAD_MOVING)) {
185*34e5b958SVincent Donnefort 		/*
186*34e5b958SVincent Donnefort 		 * Oh no! we've caught the head. There is none anymore and
187*34e5b958SVincent Donnefort 		 * swap_reader will spin until we set the new one. Overrun must
188*34e5b958SVincent Donnefort 		 * be written first, to make sure we report the correct number
189*34e5b958SVincent Donnefort 		 * of lost events.
190*34e5b958SVincent Donnefort 		 */
191*34e5b958SVincent Donnefort 		simple_rb_meta_inc(cpu_buffer->meta->overrun, new_tail->entries);
192*34e5b958SVincent Donnefort 		simple_rb_meta_inc(cpu_buffer->meta->pages_lost, 1);
193*34e5b958SVincent Donnefort 
194*34e5b958SVincent Donnefort 		simple_bpage_set_head_link(new_tail);
195*34e5b958SVincent Donnefort 		simple_bpage_set_normal_link(tail);
196*34e5b958SVincent Donnefort 	}
197*34e5b958SVincent Donnefort 
198*34e5b958SVincent Donnefort 	simple_bpage_reset(new_tail);
199*34e5b958SVincent Donnefort 	cpu_buffer->tail_page = new_tail;
200*34e5b958SVincent Donnefort 
201*34e5b958SVincent Donnefort 	simple_rb_meta_inc(cpu_buffer->meta->pages_touched, 1);
202*34e5b958SVincent Donnefort 
203*34e5b958SVincent Donnefort 	return new_tail;
204*34e5b958SVincent Donnefort }
205*34e5b958SVincent Donnefort 
206*34e5b958SVincent Donnefort static unsigned long rb_event_size(unsigned long length)
207*34e5b958SVincent Donnefort {
208*34e5b958SVincent Donnefort 	struct ring_buffer_event *event;
209*34e5b958SVincent Donnefort 
210*34e5b958SVincent Donnefort 	return length + RB_EVNT_HDR_SIZE + sizeof(event->array[0]);
211*34e5b958SVincent Donnefort }
212*34e5b958SVincent Donnefort 
213*34e5b958SVincent Donnefort static struct ring_buffer_event *
214*34e5b958SVincent Donnefort rb_event_add_ts_extend(struct ring_buffer_event *event, u64 delta)
215*34e5b958SVincent Donnefort {
216*34e5b958SVincent Donnefort 	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
217*34e5b958SVincent Donnefort 	event->time_delta = delta & TS_MASK;
218*34e5b958SVincent Donnefort 	event->array[0] = delta >> TS_SHIFT;
219*34e5b958SVincent Donnefort 
220*34e5b958SVincent Donnefort 	return (struct ring_buffer_event *)((unsigned long)event + 8);
221*34e5b958SVincent Donnefort }
222*34e5b958SVincent Donnefort 
223*34e5b958SVincent Donnefort static struct ring_buffer_event *
224*34e5b958SVincent Donnefort simple_rb_reserve_next(struct simple_rb_per_cpu *cpu_buffer, unsigned long length, u64 timestamp)
225*34e5b958SVincent Donnefort {
226*34e5b958SVincent Donnefort 	unsigned long ts_ext_size = 0, event_size = rb_event_size(length);
227*34e5b958SVincent Donnefort 	struct simple_buffer_page *tail = cpu_buffer->tail_page;
228*34e5b958SVincent Donnefort 	struct ring_buffer_event *event;
229*34e5b958SVincent Donnefort 	u32 write, prev_write;
230*34e5b958SVincent Donnefort 	u64 time_delta;
231*34e5b958SVincent Donnefort 
232*34e5b958SVincent Donnefort 	time_delta = timestamp - cpu_buffer->write_stamp;
233*34e5b958SVincent Donnefort 
234*34e5b958SVincent Donnefort 	if (test_time_stamp(time_delta))
235*34e5b958SVincent Donnefort 		ts_ext_size = 8;
236*34e5b958SVincent Donnefort 
237*34e5b958SVincent Donnefort 	prev_write = tail->write;
238*34e5b958SVincent Donnefort 	write = prev_write + event_size + ts_ext_size;
239*34e5b958SVincent Donnefort 
240*34e5b958SVincent Donnefort 	if (unlikely(write > (PAGE_SIZE - BUF_PAGE_HDR_SIZE)))
241*34e5b958SVincent Donnefort 		tail = simple_rb_move_tail(cpu_buffer);
242*34e5b958SVincent Donnefort 
243*34e5b958SVincent Donnefort 	if (!tail->entries) {
244*34e5b958SVincent Donnefort 		tail->page->time_stamp = timestamp;
245*34e5b958SVincent Donnefort 		time_delta = 0;
246*34e5b958SVincent Donnefort 		ts_ext_size = 0;
247*34e5b958SVincent Donnefort 		write = event_size;
248*34e5b958SVincent Donnefort 		prev_write = 0;
249*34e5b958SVincent Donnefort 	}
250*34e5b958SVincent Donnefort 
251*34e5b958SVincent Donnefort 	tail->write = write;
252*34e5b958SVincent Donnefort 	tail->entries++;
253*34e5b958SVincent Donnefort 
254*34e5b958SVincent Donnefort 	cpu_buffer->write_stamp = timestamp;
255*34e5b958SVincent Donnefort 
256*34e5b958SVincent Donnefort 	event = (struct ring_buffer_event *)(tail->page->data + prev_write);
257*34e5b958SVincent Donnefort 	if (ts_ext_size) {
258*34e5b958SVincent Donnefort 		event = rb_event_add_ts_extend(event, time_delta);
259*34e5b958SVincent Donnefort 		time_delta = 0;
260*34e5b958SVincent Donnefort 	}
261*34e5b958SVincent Donnefort 
262*34e5b958SVincent Donnefort 	event->type_len = 0;
263*34e5b958SVincent Donnefort 	event->time_delta = time_delta;
264*34e5b958SVincent Donnefort 	event->array[0] = event_size - RB_EVNT_HDR_SIZE;
265*34e5b958SVincent Donnefort 
266*34e5b958SVincent Donnefort 	return event;
267*34e5b958SVincent Donnefort }
268*34e5b958SVincent Donnefort 
269*34e5b958SVincent Donnefort /**
270*34e5b958SVincent Donnefort  * simple_ring_buffer_reserve - Reserve an entry in @cpu_buffer
271*34e5b958SVincent Donnefort  * @cpu_buffer:	A simple_rb_per_cpu
272*34e5b958SVincent Donnefort  * @length:	Size of the entry in bytes
273*34e5b958SVincent Donnefort  * @timestamp:	Timestamp of the entry
274*34e5b958SVincent Donnefort  *
275*34e5b958SVincent Donnefort  * Returns the address of the entry where to write data or NULL
276*34e5b958SVincent Donnefort  */
277*34e5b958SVincent Donnefort void *simple_ring_buffer_reserve(struct simple_rb_per_cpu *cpu_buffer, unsigned long length,
278*34e5b958SVincent Donnefort 				 u64 timestamp)
279*34e5b958SVincent Donnefort {
280*34e5b958SVincent Donnefort 	struct ring_buffer_event *rb_event;
281*34e5b958SVincent Donnefort 
282*34e5b958SVincent Donnefort 	if (cmpxchg(&cpu_buffer->status, SIMPLE_RB_READY, SIMPLE_RB_WRITING) != SIMPLE_RB_READY)
283*34e5b958SVincent Donnefort 		return NULL;
284*34e5b958SVincent Donnefort 
285*34e5b958SVincent Donnefort 	rb_event = simple_rb_reserve_next(cpu_buffer, length, timestamp);
286*34e5b958SVincent Donnefort 
287*34e5b958SVincent Donnefort 	return &rb_event->array[1];
288*34e5b958SVincent Donnefort }
289*34e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_reserve);
290*34e5b958SVincent Donnefort 
291*34e5b958SVincent Donnefort /**
292*34e5b958SVincent Donnefort  * simple_ring_buffer_commit - Commit the entry reserved with simple_ring_buffer_reserve()
293*34e5b958SVincent Donnefort  * @cpu_buffer:	The simple_rb_per_cpu where the entry has been reserved
294*34e5b958SVincent Donnefort  */
295*34e5b958SVincent Donnefort void simple_ring_buffer_commit(struct simple_rb_per_cpu *cpu_buffer)
296*34e5b958SVincent Donnefort {
297*34e5b958SVincent Donnefort 	local_set(&cpu_buffer->tail_page->page->commit,
298*34e5b958SVincent Donnefort 		  cpu_buffer->tail_page->write);
299*34e5b958SVincent Donnefort 	simple_rb_meta_inc(cpu_buffer->meta->entries, 1);
300*34e5b958SVincent Donnefort 
301*34e5b958SVincent Donnefort 	/*
302*34e5b958SVincent Donnefort 	 * Paired with simple_rb_enable_tracing() to ensure data is
303*34e5b958SVincent Donnefort 	 * written to the ring-buffer before teardown.
304*34e5b958SVincent Donnefort 	 */
305*34e5b958SVincent Donnefort 	smp_store_release(&cpu_buffer->status, SIMPLE_RB_READY);
306*34e5b958SVincent Donnefort }
307*34e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_commit);
308*34e5b958SVincent Donnefort 
309*34e5b958SVincent Donnefort static u32 simple_rb_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable)
310*34e5b958SVincent Donnefort {
311*34e5b958SVincent Donnefort 	u32 prev_status;
312*34e5b958SVincent Donnefort 
313*34e5b958SVincent Donnefort 	if (enable)
314*34e5b958SVincent Donnefort 		return cmpxchg(&cpu_buffer->status, SIMPLE_RB_UNAVAILABLE, SIMPLE_RB_READY);
315*34e5b958SVincent Donnefort 
316*34e5b958SVincent Donnefort 	/* Wait for the buffer to be released */
317*34e5b958SVincent Donnefort 	do {
318*34e5b958SVincent Donnefort 		prev_status = cmpxchg_acquire(&cpu_buffer->status,
319*34e5b958SVincent Donnefort 					      SIMPLE_RB_READY,
320*34e5b958SVincent Donnefort 					      SIMPLE_RB_UNAVAILABLE);
321*34e5b958SVincent Donnefort 	} while (prev_status == SIMPLE_RB_WRITING);
322*34e5b958SVincent Donnefort 
323*34e5b958SVincent Donnefort 	return prev_status;
324*34e5b958SVincent Donnefort }
325*34e5b958SVincent Donnefort 
326*34e5b958SVincent Donnefort /**
327*34e5b958SVincent Donnefort  * simple_ring_buffer_reset - Reset @cpu_buffer
328*34e5b958SVincent Donnefort  * @cpu_buffer: A simple_rb_per_cpu
329*34e5b958SVincent Donnefort  *
330*34e5b958SVincent Donnefort  * This will not clear the content of the data, only reset counters and pointers
331*34e5b958SVincent Donnefort  *
332*34e5b958SVincent Donnefort  * Returns 0 on success or -ENODEV if @cpu_buffer was unloaded.
333*34e5b958SVincent Donnefort  */
334*34e5b958SVincent Donnefort int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer)
335*34e5b958SVincent Donnefort {
336*34e5b958SVincent Donnefort 	struct simple_buffer_page *bpage;
337*34e5b958SVincent Donnefort 	u32 prev_status;
338*34e5b958SVincent Donnefort 	int ret;
339*34e5b958SVincent Donnefort 
340*34e5b958SVincent Donnefort 	if (!simple_rb_loaded(cpu_buffer))
341*34e5b958SVincent Donnefort 		return -ENODEV;
342*34e5b958SVincent Donnefort 
343*34e5b958SVincent Donnefort 	prev_status = simple_rb_enable_tracing(cpu_buffer, false);
344*34e5b958SVincent Donnefort 
345*34e5b958SVincent Donnefort 	ret = simple_rb_find_head(cpu_buffer);
346*34e5b958SVincent Donnefort 	if (ret)
347*34e5b958SVincent Donnefort 		return ret;
348*34e5b958SVincent Donnefort 
349*34e5b958SVincent Donnefort 	bpage = cpu_buffer->tail_page = cpu_buffer->head_page;
350*34e5b958SVincent Donnefort 	do {
351*34e5b958SVincent Donnefort 		simple_bpage_reset(bpage);
352*34e5b958SVincent Donnefort 		bpage = simple_bpage_next_page(bpage);
353*34e5b958SVincent Donnefort 	} while (bpage != cpu_buffer->head_page);
354*34e5b958SVincent Donnefort 
355*34e5b958SVincent Donnefort 	simple_bpage_reset(cpu_buffer->reader_page);
356*34e5b958SVincent Donnefort 
357*34e5b958SVincent Donnefort 	cpu_buffer->last_overrun = 0;
358*34e5b958SVincent Donnefort 	cpu_buffer->write_stamp = 0;
359*34e5b958SVincent Donnefort 
360*34e5b958SVincent Donnefort 	cpu_buffer->meta->reader.read = 0;
361*34e5b958SVincent Donnefort 	cpu_buffer->meta->reader.lost_events = 0;
362*34e5b958SVincent Donnefort 	cpu_buffer->meta->entries = 0;
363*34e5b958SVincent Donnefort 	cpu_buffer->meta->overrun = 0;
364*34e5b958SVincent Donnefort 	cpu_buffer->meta->read = 0;
365*34e5b958SVincent Donnefort 	cpu_buffer->meta->pages_lost = 0;
366*34e5b958SVincent Donnefort 	cpu_buffer->meta->pages_touched = 0;
367*34e5b958SVincent Donnefort 
368*34e5b958SVincent Donnefort 	if (prev_status == SIMPLE_RB_READY)
369*34e5b958SVincent Donnefort 		simple_rb_enable_tracing(cpu_buffer, true);
370*34e5b958SVincent Donnefort 
371*34e5b958SVincent Donnefort 	return 0;
372*34e5b958SVincent Donnefort }
373*34e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_reset);
374*34e5b958SVincent Donnefort 
375*34e5b958SVincent Donnefort /**
376*34e5b958SVincent Donnefort  * simple_ring_buffer_init - Init @cpu_buffer based on @desc
377*34e5b958SVincent Donnefort  * @cpu_buffer:	A simple_rb_per_cpu buffer to init, allocated by the caller.
378*34e5b958SVincent Donnefort  * @bpages:	Array of simple_buffer_pages, with as many elements as @desc->nr_page_va
379*34e5b958SVincent Donnefort  * @desc:	A ring_buffer_desc
380*34e5b958SVincent Donnefort  *
381*34e5b958SVincent Donnefort  * Returns 0 on success or -EINVAL if the content of @desc is invalid
382*34e5b958SVincent Donnefort  */
383*34e5b958SVincent Donnefort int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_buffer_page *bpages,
384*34e5b958SVincent Donnefort 			    const struct ring_buffer_desc *desc)
385*34e5b958SVincent Donnefort {
386*34e5b958SVincent Donnefort 	struct simple_buffer_page *bpage = bpages;
387*34e5b958SVincent Donnefort 	int i;
388*34e5b958SVincent Donnefort 
389*34e5b958SVincent Donnefort 	/* At least 1 reader page and two pages in the ring-buffer */
390*34e5b958SVincent Donnefort 	if (desc->nr_page_va < 3)
391*34e5b958SVincent Donnefort 		return -EINVAL;
392*34e5b958SVincent Donnefort 
393*34e5b958SVincent Donnefort 	memset(cpu_buffer, 0, sizeof(*cpu_buffer));
394*34e5b958SVincent Donnefort 
395*34e5b958SVincent Donnefort 	cpu_buffer->bpages = bpages;
396*34e5b958SVincent Donnefort 
397*34e5b958SVincent Donnefort 	cpu_buffer->meta = (void *)desc->meta_va;
398*34e5b958SVincent Donnefort 	memset(cpu_buffer->meta, 0, sizeof(*cpu_buffer->meta));
399*34e5b958SVincent Donnefort 	cpu_buffer->meta->meta_page_size = PAGE_SIZE;
400*34e5b958SVincent Donnefort 	cpu_buffer->meta->nr_subbufs = cpu_buffer->nr_pages;
401*34e5b958SVincent Donnefort 
402*34e5b958SVincent Donnefort 	/* The reader page is not part of the ring initially */
403*34e5b958SVincent Donnefort 	simple_bpage_init(bpage, desc->page_va[0]);
404*34e5b958SVincent Donnefort 	bpage->id = 0;
405*34e5b958SVincent Donnefort 
406*34e5b958SVincent Donnefort 	cpu_buffer->nr_pages = 1;
407*34e5b958SVincent Donnefort 
408*34e5b958SVincent Donnefort 	cpu_buffer->reader_page = bpage;
409*34e5b958SVincent Donnefort 	cpu_buffer->tail_page = bpage + 1;
410*34e5b958SVincent Donnefort 	cpu_buffer->head_page = bpage + 1;
411*34e5b958SVincent Donnefort 
412*34e5b958SVincent Donnefort 	for (i = 1; i < desc->nr_page_va; i++) {
413*34e5b958SVincent Donnefort 		simple_bpage_init(++bpage, desc->page_va[i]);
414*34e5b958SVincent Donnefort 
415*34e5b958SVincent Donnefort 		bpage->link.next = &(bpage + 1)->link;
416*34e5b958SVincent Donnefort 		bpage->link.prev = &(bpage - 1)->link;
417*34e5b958SVincent Donnefort 		bpage->id = i;
418*34e5b958SVincent Donnefort 
419*34e5b958SVincent Donnefort 		cpu_buffer->nr_pages = i + 1;
420*34e5b958SVincent Donnefort 	}
421*34e5b958SVincent Donnefort 
422*34e5b958SVincent Donnefort 	/* Close the ring */
423*34e5b958SVincent Donnefort 	bpage->link.next = &cpu_buffer->tail_page->link;
424*34e5b958SVincent Donnefort 	cpu_buffer->tail_page->link.prev = &bpage->link;
425*34e5b958SVincent Donnefort 
426*34e5b958SVincent Donnefort 	/* The last init'ed page points to the head page */
427*34e5b958SVincent Donnefort 	simple_bpage_set_head_link(bpage);
428*34e5b958SVincent Donnefort 
429*34e5b958SVincent Donnefort 	return 0;
430*34e5b958SVincent Donnefort }
431*34e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_init);
432*34e5b958SVincent Donnefort 
433*34e5b958SVincent Donnefort /**
434*34e5b958SVincent Donnefort  * simple_ring_buffer_unload - Prepare @cpu_buffer for deletion
435*34e5b958SVincent Donnefort  * @cpu_buffer:	A simple_rb_per_cpu that will be deleted.
436*34e5b958SVincent Donnefort  */
437*34e5b958SVincent Donnefort void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer)
438*34e5b958SVincent Donnefort {
439*34e5b958SVincent Donnefort 	if (!simple_rb_loaded(cpu_buffer))
440*34e5b958SVincent Donnefort 		return;
441*34e5b958SVincent Donnefort 
442*34e5b958SVincent Donnefort 	simple_rb_enable_tracing(cpu_buffer, false);
443*34e5b958SVincent Donnefort 
444*34e5b958SVincent Donnefort 	cpu_buffer->bpages = NULL;
445*34e5b958SVincent Donnefort }
446*34e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_unload);
447*34e5b958SVincent Donnefort 
448*34e5b958SVincent Donnefort /**
449*34e5b958SVincent Donnefort  * simple_ring_buffer_enable_tracing - Enable or disable writing to @cpu_buffer
450*34e5b958SVincent Donnefort  * @cpu_buffer: A simple_rb_per_cpu
451*34e5b958SVincent Donnefort  * @enable:	True to enable tracing, False to disable it
452*34e5b958SVincent Donnefort  *
453*34e5b958SVincent Donnefort  * Returns 0 on success or -ENODEV if @cpu_buffer was unloaded
454*34e5b958SVincent Donnefort  */
455*34e5b958SVincent Donnefort int simple_ring_buffer_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable)
456*34e5b958SVincent Donnefort {
457*34e5b958SVincent Donnefort 	if (!simple_rb_loaded(cpu_buffer))
458*34e5b958SVincent Donnefort 		return -ENODEV;
459*34e5b958SVincent Donnefort 
460*34e5b958SVincent Donnefort 	simple_rb_enable_tracing(cpu_buffer, enable);
461*34e5b958SVincent Donnefort 
462*34e5b958SVincent Donnefort 	return 0;
463*34e5b958SVincent Donnefort }
464*34e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_enable_tracing);
465