xref: /linux/kernel/trace/ring_buffer_benchmark.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer tester and benchmark
4  *
5  * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
6  */
7 #include <linux/ring_buffer.h>
8 #include <linux/completion.h>
9 #include <linux/kthread.h>
10 #include <uapi/linux/sched/types.h>
11 #include <linux/module.h>
12 #include <linux/ktime.h>
13 #include <asm/local.h>
14 
15 struct rb_page {
16 	u64		ts;
17 	local_t		commit;
18 	char		data[4080];
19 };
20 
21 /* run time and sleep time in seconds */
22 #define RUN_TIME	10ULL
23 #define SLEEP_TIME	10
24 
25 /* number of events for writer to wake up the reader */
26 static int wakeup_interval = 100;
27 
28 static int reader_finish;
29 static DECLARE_COMPLETION(read_start);
30 static DECLARE_COMPLETION(read_done);
31 
32 static struct trace_buffer *buffer;
33 static struct task_struct *producer;
34 static struct task_struct *consumer;
35 static unsigned long read;
36 
37 static unsigned int disable_reader;
38 module_param(disable_reader, uint, 0644);
39 MODULE_PARM_DESC(disable_reader, "only run producer");
40 
41 static unsigned int write_iteration = 50;
42 module_param(write_iteration, uint, 0644);
43 MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
44 
45 static int producer_nice = MAX_NICE;
46 static int consumer_nice = MAX_NICE;
47 
48 static int producer_fifo;
49 static int consumer_fifo;
50 
51 module_param(producer_nice, int, 0644);
52 MODULE_PARM_DESC(producer_nice, "nice prio for producer");
53 
54 module_param(consumer_nice, int, 0644);
55 MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
56 
57 module_param(producer_fifo, int, 0644);
58 MODULE_PARM_DESC(producer_fifo, "use fifo for producer: 0 - disabled, 1 - low prio, 2 - fifo");
59 
60 module_param(consumer_fifo, int, 0644);
61 MODULE_PARM_DESC(consumer_fifo, "use fifo for consumer: 0 - disabled, 1 - low prio, 2 - fifo");
62 
63 static int read_events;
64 
65 static int test_error;
66 
67 #define TEST_ERROR()				\
68 	do {					\
69 		if (!test_error) {		\
70 			test_error = 1;		\
71 			WARN_ON(1);		\
72 		}				\
73 	} while (0)
74 
75 enum event_status {
76 	EVENT_FOUND,
77 	EVENT_DROPPED,
78 };
79 
break_test(void)80 static bool break_test(void)
81 {
82 	return test_error || kthread_should_stop();
83 }
84 
read_event(int cpu)85 static enum event_status read_event(int cpu)
86 {
87 	struct ring_buffer_event *event;
88 	int *entry;
89 	u64 ts;
90 
91 	event = ring_buffer_consume(buffer, cpu, &ts, NULL);
92 	if (!event)
93 		return EVENT_DROPPED;
94 
95 	entry = ring_buffer_event_data(event);
96 	if (*entry != cpu) {
97 		TEST_ERROR();
98 		return EVENT_DROPPED;
99 	}
100 
101 	read++;
102 	return EVENT_FOUND;
103 }
104 
read_page(int cpu)105 static enum event_status read_page(int cpu)
106 {
107 	struct buffer_data_read_page *bpage;
108 	struct ring_buffer_event *event;
109 	struct rb_page *rpage;
110 	unsigned long commit;
111 	int page_size;
112 	int *entry;
113 	int ret;
114 	int inc;
115 	int i;
116 
117 	bpage = ring_buffer_alloc_read_page(buffer, cpu);
118 	if (IS_ERR(bpage))
119 		return EVENT_DROPPED;
120 
121 	page_size = ring_buffer_subbuf_size_get(buffer);
122 	ret = ring_buffer_read_page(buffer, bpage, page_size, cpu, 1);
123 	if (ret >= 0) {
124 		rpage = ring_buffer_read_page_data(bpage);
125 		/* The commit may have missed event flags set, clear them */
126 		commit = local_read(&rpage->commit) & 0xfffff;
127 		for (i = 0; i < commit && !test_error ; i += inc) {
128 
129 			if (i >= (page_size - offsetof(struct rb_page, data))) {
130 				TEST_ERROR();
131 				break;
132 			}
133 
134 			inc = -1;
135 			event = (void *)&rpage->data[i];
136 			switch (event->type_len) {
137 			case RINGBUF_TYPE_PADDING:
138 				/* failed writes may be discarded events */
139 				if (!event->time_delta)
140 					TEST_ERROR();
141 				inc = event->array[0] + 4;
142 				break;
143 			case RINGBUF_TYPE_TIME_EXTEND:
144 				inc = 8;
145 				break;
146 			case 0:
147 				entry = ring_buffer_event_data(event);
148 				if (*entry != cpu) {
149 					TEST_ERROR();
150 					break;
151 				}
152 				read++;
153 				if (!event->array[0]) {
154 					TEST_ERROR();
155 					break;
156 				}
157 				inc = event->array[0] + 4;
158 				break;
159 			default:
160 				entry = ring_buffer_event_data(event);
161 				if (*entry != cpu) {
162 					TEST_ERROR();
163 					break;
164 				}
165 				read++;
166 				inc = ((event->type_len + 1) * 4);
167 			}
168 			if (test_error)
169 				break;
170 
171 			if (inc <= 0) {
172 				TEST_ERROR();
173 				break;
174 			}
175 		}
176 	}
177 	ring_buffer_free_read_page(buffer, cpu, bpage);
178 
179 	if (ret < 0)
180 		return EVENT_DROPPED;
181 	return EVENT_FOUND;
182 }
183 
ring_buffer_consumer(void)184 static void ring_buffer_consumer(void)
185 {
186 	/* toggle between reading pages and events */
187 	read_events ^= 1;
188 
189 	read = 0;
190 	/*
191 	 * Continue running until the producer specifically asks to stop
192 	 * and is ready for the completion.
193 	 */
194 	while (!READ_ONCE(reader_finish)) {
195 		int found = 1;
196 
197 		while (found && !test_error) {
198 			int cpu;
199 
200 			found = 0;
201 			for_each_online_cpu(cpu) {
202 				enum event_status stat;
203 
204 				if (read_events)
205 					stat = read_event(cpu);
206 				else
207 					stat = read_page(cpu);
208 
209 				if (test_error)
210 					break;
211 
212 				if (stat == EVENT_FOUND)
213 					found = 1;
214 
215 			}
216 		}
217 
218 		/* Wait till the producer wakes us up when there is more data
219 		 * available or when the producer wants us to finish reading.
220 		 */
221 		set_current_state(TASK_INTERRUPTIBLE);
222 		if (reader_finish)
223 			break;
224 
225 		schedule();
226 	}
227 	__set_current_state(TASK_RUNNING);
228 	reader_finish = 0;
229 	complete(&read_done);
230 }
231 
ring_buffer_producer(void)232 static void ring_buffer_producer(void)
233 {
234 	ktime_t start_time, end_time, timeout;
235 	unsigned long long time;
236 	unsigned long long entries;
237 	unsigned long long overruns;
238 	unsigned long missed = 0;
239 	unsigned long hit = 0;
240 	unsigned long avg;
241 	int cnt = 0;
242 
243 	/*
244 	 * Hammer the buffer for 10 secs (this may
245 	 * make the system stall)
246 	 */
247 	trace_printk("Starting ring buffer hammer\n");
248 	start_time = ktime_get();
249 	timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
250 	do {
251 		struct ring_buffer_event *event;
252 		int *entry;
253 		int i;
254 
255 		for (i = 0; i < write_iteration; i++) {
256 			event = ring_buffer_lock_reserve(buffer, 10);
257 			if (!event) {
258 				missed++;
259 			} else {
260 				hit++;
261 				entry = ring_buffer_event_data(event);
262 				*entry = smp_processor_id();
263 				ring_buffer_unlock_commit(buffer);
264 			}
265 		}
266 		end_time = ktime_get();
267 
268 		cnt++;
269 		if (consumer && !(cnt % wakeup_interval))
270 			wake_up_process(consumer);
271 
272 #ifndef CONFIG_PREEMPTION
273 		/*
274 		 * If we are a non preempt kernel, the 10 seconds run will
275 		 * stop everything while it runs. Instead, we will call
276 		 * cond_resched and also add any time that was lost by a
277 		 * reschedule.
278 		 *
279 		 * Do a cond resched at the same frequency we would wake up
280 		 * the reader.
281 		 */
282 		if (cnt % wakeup_interval)
283 			cond_resched();
284 #endif
285 	} while (ktime_before(end_time, timeout) && !break_test());
286 	trace_printk("End ring buffer hammer\n");
287 
288 	if (consumer) {
289 		/* Init both completions here to avoid races */
290 		init_completion(&read_start);
291 		init_completion(&read_done);
292 		/* the completions must be visible before the finish var */
293 		smp_wmb();
294 		reader_finish = 1;
295 		wake_up_process(consumer);
296 		wait_for_completion(&read_done);
297 	}
298 
299 	time = ktime_us_delta(end_time, start_time);
300 
301 	entries = ring_buffer_entries(buffer);
302 	overruns = ring_buffer_overruns(buffer);
303 
304 	if (test_error)
305 		trace_printk("ERROR!\n");
306 
307 	if (!disable_reader) {
308 		if (consumer_fifo)
309 			trace_printk("Running Consumer at SCHED_FIFO %s\n",
310 				     consumer_fifo == 1 ? "low" : "high");
311 		else
312 			trace_printk("Running Consumer at nice: %d\n",
313 				     consumer_nice);
314 	}
315 	if (producer_fifo)
316 		trace_printk("Running Producer at SCHED_FIFO %s\n",
317 			     producer_fifo == 1 ? "low" : "high");
318 	else
319 		trace_printk("Running Producer at nice: %d\n",
320 			     producer_nice);
321 
322 	/* Let the user know that the test is running at low priority */
323 	if (!producer_fifo && !consumer_fifo &&
324 	    producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
325 		trace_printk("WARNING!!! This test is running at lowest priority.\n");
326 
327 	trace_printk("Time:     %lld (usecs)\n", time);
328 	trace_printk("Overruns: %lld\n", overruns);
329 	if (disable_reader)
330 		trace_printk("Read:     (reader disabled)\n");
331 	else
332 		trace_printk("Read:     %ld  (by %s)\n", read,
333 			read_events ? "events" : "pages");
334 	trace_printk("Entries:  %lld\n", entries);
335 	trace_printk("Total:    %lld\n", entries + overruns + read);
336 	trace_printk("Missed:   %ld\n", missed);
337 	trace_printk("Hit:      %ld\n", hit);
338 
339 	/* Convert time from usecs to millisecs */
340 	do_div(time, USEC_PER_MSEC);
341 	if (time)
342 		hit /= (long)time;
343 	else
344 		trace_printk("TIME IS ZERO??\n");
345 
346 	trace_printk("Entries per millisec: %ld\n", hit);
347 
348 	if (hit) {
349 		/* Calculate the average time in nanosecs */
350 		avg = NSEC_PER_MSEC / hit;
351 		trace_printk("%ld ns per entry\n", avg);
352 	}
353 
354 	if (missed) {
355 		if (time)
356 			missed /= (long)time;
357 
358 		trace_printk("Total iterations per millisec: %ld\n",
359 			     hit + missed);
360 
361 		/* it is possible that hit + missed will overflow and be zero */
362 		if (!(hit + missed)) {
363 			trace_printk("hit + missed overflowed and totalled zero!\n");
364 			hit--; /* make it non zero */
365 		}
366 
367 		/* Calculate the average time in nanosecs */
368 		avg = NSEC_PER_MSEC / (hit + missed);
369 		trace_printk("%ld ns per entry\n", avg);
370 	}
371 }
372 
wait_to_die(void)373 static void wait_to_die(void)
374 {
375 	set_current_state(TASK_INTERRUPTIBLE);
376 	while (!kthread_should_stop()) {
377 		schedule();
378 		set_current_state(TASK_INTERRUPTIBLE);
379 	}
380 	__set_current_state(TASK_RUNNING);
381 }
382 
ring_buffer_consumer_thread(void * arg)383 static int ring_buffer_consumer_thread(void *arg)
384 {
385 	while (!break_test()) {
386 		complete(&read_start);
387 
388 		ring_buffer_consumer();
389 
390 		set_current_state(TASK_INTERRUPTIBLE);
391 		if (break_test())
392 			break;
393 		schedule();
394 	}
395 	__set_current_state(TASK_RUNNING);
396 
397 	if (!kthread_should_stop())
398 		wait_to_die();
399 
400 	return 0;
401 }
402 
ring_buffer_producer_thread(void * arg)403 static int ring_buffer_producer_thread(void *arg)
404 {
405 	while (!break_test()) {
406 		ring_buffer_reset(buffer);
407 
408 		if (consumer) {
409 			wake_up_process(consumer);
410 			wait_for_completion(&read_start);
411 		}
412 
413 		ring_buffer_producer();
414 		if (break_test())
415 			goto out_kill;
416 
417 		trace_printk("Sleeping for 10 secs\n");
418 		set_current_state(TASK_INTERRUPTIBLE);
419 		if (break_test())
420 			goto out_kill;
421 		schedule_timeout(HZ * SLEEP_TIME);
422 	}
423 
424 out_kill:
425 	__set_current_state(TASK_RUNNING);
426 	if (!kthread_should_stop())
427 		wait_to_die();
428 
429 	return 0;
430 }
431 
ring_buffer_benchmark_init(void)432 static int __init ring_buffer_benchmark_init(void)
433 {
434 	int ret;
435 
436 	/* make a one meg buffer in overwite mode */
437 	buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
438 	if (!buffer)
439 		return -ENOMEM;
440 
441 	if (!disable_reader) {
442 		consumer = kthread_create(ring_buffer_consumer_thread,
443 					  NULL, "rb_consumer");
444 		ret = PTR_ERR(consumer);
445 		if (IS_ERR(consumer))
446 			goto out_fail;
447 	}
448 
449 	producer = kthread_run(ring_buffer_producer_thread,
450 			       NULL, "rb_producer");
451 	ret = PTR_ERR(producer);
452 
453 	if (IS_ERR(producer))
454 		goto out_kill;
455 
456 	/*
457 	 * Run them as low-prio background tasks by default:
458 	 */
459 	if (!disable_reader) {
460 		if (consumer_fifo >= 2)
461 			sched_set_fifo(consumer);
462 		else if (consumer_fifo == 1)
463 			sched_set_fifo_low(consumer);
464 		else
465 			set_user_nice(consumer, consumer_nice);
466 	}
467 
468 	if (producer_fifo >= 2)
469 		sched_set_fifo(producer);
470 	else if (producer_fifo == 1)
471 		sched_set_fifo_low(producer);
472 	else
473 		set_user_nice(producer, producer_nice);
474 
475 	return 0;
476 
477  out_kill:
478 	if (consumer)
479 		kthread_stop(consumer);
480 
481  out_fail:
482 	ring_buffer_free(buffer);
483 	return ret;
484 }
485 
ring_buffer_benchmark_exit(void)486 static void __exit ring_buffer_benchmark_exit(void)
487 {
488 	kthread_stop(producer);
489 	if (consumer)
490 		kthread_stop(consumer);
491 	ring_buffer_free(buffer);
492 }
493 
494 module_init(ring_buffer_benchmark_init);
495 module_exit(ring_buffer_benchmark_exit);
496 
497 MODULE_AUTHOR("Steven Rostedt");
498 MODULE_DESCRIPTION("ring_buffer_benchmark");
499 MODULE_LICENSE("GPL");
500