xref: /linux/kernel/rcu/rcuscale.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based scalability-test facility
4  *
5  * Copyright (C) IBM Corporation, 2015
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9 
10 #define pr_fmt(fmt) fmt
11 
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/rcupdate.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/atomic.h>
26 #include <linux/bitops.h>
27 #include <linux/completion.h>
28 #include <linux/moduleparam.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/freezer.h>
33 #include <linux/cpu.h>
34 #include <linux/delay.h>
35 #include <linux/stat.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include <linux/vmalloc.h>
41 #include <linux/rcupdate_trace.h>
42 
43 #include "rcu.h"
44 
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
47 
48 #define SCALE_FLAG "-scale:"
49 #define SCALEOUT_STRING(s) \
50 	pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
51 #define VERBOSE_SCALEOUT_STRING(s) \
52 	do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
53 #define SCALEOUT_ERRSTRING(s) \
54 	pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s)
55 
56 /*
57  * The intended use cases for the nreaders and nwriters module parameters
58  * are as follows:
59  *
60  * 1.	Specify only the nr_cpus kernel boot parameter.  This will
61  *	set both nreaders and nwriters to the value specified by
62  *	nr_cpus for a mixed reader/writer test.
63  *
64  * 2.	Specify the nr_cpus kernel boot parameter, but set
65  *	rcuscale.nreaders to zero.  This will set nwriters to the
66  *	value specified by nr_cpus for an update-only test.
67  *
68  * 3.	Specify the nr_cpus kernel boot parameter, but set
69  *	rcuscale.nwriters to zero.  This will set nreaders to the
70  *	value specified by nr_cpus for a read-only test.
71  *
72  * Various other use cases may of course be specified.
73  *
74  * Note that this test's readers are intended only as a test load for
75  * the writers.  The reader scalability statistics will be overly
76  * pessimistic due to the per-critical-section interrupt disabling,
77  * test-end checks, and the pair of calls through pointers.
78  */
79 
80 #ifdef MODULE
81 # define RCUSCALE_SHUTDOWN 0
82 #else
83 # define RCUSCALE_SHUTDOWN 1
84 #endif
85 
86 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
87 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
88 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
89 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
90 torture_param(int, nreaders, -1, "Number of RCU reader threads");
91 torture_param(int, nwriters, -1, "Number of RCU updater threads");
92 torture_param(bool, shutdown, RCUSCALE_SHUTDOWN,
93 	      "Shutdown at end of scalability tests.");
94 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
95 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
96 torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?");
97 torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
98 
99 static char *scale_type = "rcu";
100 module_param(scale_type, charp, 0444);
101 MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)");
102 
103 static int nrealreaders;
104 static int nrealwriters;
105 static struct task_struct **writer_tasks;
106 static struct task_struct **reader_tasks;
107 static struct task_struct *shutdown_task;
108 
109 static u64 **writer_durations;
110 static int *writer_n_durations;
111 static atomic_t n_rcu_scale_reader_started;
112 static atomic_t n_rcu_scale_writer_started;
113 static atomic_t n_rcu_scale_writer_finished;
114 static wait_queue_head_t shutdown_wq;
115 static u64 t_rcu_scale_writer_started;
116 static u64 t_rcu_scale_writer_finished;
117 static unsigned long b_rcu_gp_test_started;
118 static unsigned long b_rcu_gp_test_finished;
119 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
120 
121 #define MAX_MEAS 10000
122 #define MIN_MEAS 100
123 
124 /*
125  * Operations vector for selecting different types of tests.
126  */
127 
128 struct rcu_scale_ops {
129 	int ptype;
130 	void (*init)(void);
131 	void (*cleanup)(void);
132 	int (*readlock)(void);
133 	void (*readunlock)(int idx);
134 	unsigned long (*get_gp_seq)(void);
135 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
136 	unsigned long (*exp_completed)(void);
137 	void (*async)(struct rcu_head *head, rcu_callback_t func);
138 	void (*gp_barrier)(void);
139 	void (*sync)(void);
140 	void (*exp_sync)(void);
141 	const char *name;
142 };
143 
144 static struct rcu_scale_ops *cur_ops;
145 
146 /*
147  * Definitions for rcu scalability testing.
148  */
149 
150 static int rcu_scale_read_lock(void) __acquires(RCU)
151 {
152 	rcu_read_lock();
153 	return 0;
154 }
155 
156 static void rcu_scale_read_unlock(int idx) __releases(RCU)
157 {
158 	rcu_read_unlock();
159 }
160 
161 static unsigned long __maybe_unused rcu_no_completed(void)
162 {
163 	return 0;
164 }
165 
166 static void rcu_sync_scale_init(void)
167 {
168 }
169 
170 static struct rcu_scale_ops rcu_ops = {
171 	.ptype		= RCU_FLAVOR,
172 	.init		= rcu_sync_scale_init,
173 	.readlock	= rcu_scale_read_lock,
174 	.readunlock	= rcu_scale_read_unlock,
175 	.get_gp_seq	= rcu_get_gp_seq,
176 	.gp_diff	= rcu_seq_diff,
177 	.exp_completed	= rcu_exp_batches_completed,
178 	.async		= call_rcu,
179 	.gp_barrier	= rcu_barrier,
180 	.sync		= synchronize_rcu,
181 	.exp_sync	= synchronize_rcu_expedited,
182 	.name		= "rcu"
183 };
184 
185 /*
186  * Definitions for srcu scalability testing.
187  */
188 
189 DEFINE_STATIC_SRCU(srcu_ctl_scale);
190 static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale;
191 
192 static int srcu_scale_read_lock(void) __acquires(srcu_ctlp)
193 {
194 	return srcu_read_lock(srcu_ctlp);
195 }
196 
197 static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp)
198 {
199 	srcu_read_unlock(srcu_ctlp, idx);
200 }
201 
202 static unsigned long srcu_scale_completed(void)
203 {
204 	return srcu_batches_completed(srcu_ctlp);
205 }
206 
207 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
208 {
209 	call_srcu(srcu_ctlp, head, func);
210 }
211 
212 static void srcu_rcu_barrier(void)
213 {
214 	srcu_barrier(srcu_ctlp);
215 }
216 
217 static void srcu_scale_synchronize(void)
218 {
219 	synchronize_srcu(srcu_ctlp);
220 }
221 
222 static void srcu_scale_synchronize_expedited(void)
223 {
224 	synchronize_srcu_expedited(srcu_ctlp);
225 }
226 
227 static struct rcu_scale_ops srcu_ops = {
228 	.ptype		= SRCU_FLAVOR,
229 	.init		= rcu_sync_scale_init,
230 	.readlock	= srcu_scale_read_lock,
231 	.readunlock	= srcu_scale_read_unlock,
232 	.get_gp_seq	= srcu_scale_completed,
233 	.gp_diff	= rcu_seq_diff,
234 	.exp_completed	= srcu_scale_completed,
235 	.async		= srcu_call_rcu,
236 	.gp_barrier	= srcu_rcu_barrier,
237 	.sync		= srcu_scale_synchronize,
238 	.exp_sync	= srcu_scale_synchronize_expedited,
239 	.name		= "srcu"
240 };
241 
242 static struct srcu_struct srcud;
243 
244 static void srcu_sync_scale_init(void)
245 {
246 	srcu_ctlp = &srcud;
247 	init_srcu_struct(srcu_ctlp);
248 }
249 
250 static void srcu_sync_scale_cleanup(void)
251 {
252 	cleanup_srcu_struct(srcu_ctlp);
253 }
254 
255 static struct rcu_scale_ops srcud_ops = {
256 	.ptype		= SRCU_FLAVOR,
257 	.init		= srcu_sync_scale_init,
258 	.cleanup	= srcu_sync_scale_cleanup,
259 	.readlock	= srcu_scale_read_lock,
260 	.readunlock	= srcu_scale_read_unlock,
261 	.get_gp_seq	= srcu_scale_completed,
262 	.gp_diff	= rcu_seq_diff,
263 	.exp_completed	= srcu_scale_completed,
264 	.async		= srcu_call_rcu,
265 	.gp_barrier	= srcu_rcu_barrier,
266 	.sync		= srcu_scale_synchronize,
267 	.exp_sync	= srcu_scale_synchronize_expedited,
268 	.name		= "srcud"
269 };
270 
271 #ifdef CONFIG_TASKS_RCU
272 
273 /*
274  * Definitions for RCU-tasks scalability testing.
275  */
276 
277 static int tasks_scale_read_lock(void)
278 {
279 	return 0;
280 }
281 
282 static void tasks_scale_read_unlock(int idx)
283 {
284 }
285 
286 static struct rcu_scale_ops tasks_ops = {
287 	.ptype		= RCU_TASKS_FLAVOR,
288 	.init		= rcu_sync_scale_init,
289 	.readlock	= tasks_scale_read_lock,
290 	.readunlock	= tasks_scale_read_unlock,
291 	.get_gp_seq	= rcu_no_completed,
292 	.gp_diff	= rcu_seq_diff,
293 	.async		= call_rcu_tasks,
294 	.gp_barrier	= rcu_barrier_tasks,
295 	.sync		= synchronize_rcu_tasks,
296 	.exp_sync	= synchronize_rcu_tasks,
297 	.name		= "tasks"
298 };
299 
300 #define TASKS_OPS &tasks_ops,
301 
302 #else // #ifdef CONFIG_TASKS_RCU
303 
304 #define TASKS_OPS
305 
306 #endif // #else // #ifdef CONFIG_TASKS_RCU
307 
308 #ifdef CONFIG_TASKS_TRACE_RCU
309 
310 /*
311  * Definitions for RCU-tasks-trace scalability testing.
312  */
313 
314 static int tasks_trace_scale_read_lock(void)
315 {
316 	rcu_read_lock_trace();
317 	return 0;
318 }
319 
320 static void tasks_trace_scale_read_unlock(int idx)
321 {
322 	rcu_read_unlock_trace();
323 }
324 
325 static struct rcu_scale_ops tasks_tracing_ops = {
326 	.ptype		= RCU_TASKS_FLAVOR,
327 	.init		= rcu_sync_scale_init,
328 	.readlock	= tasks_trace_scale_read_lock,
329 	.readunlock	= tasks_trace_scale_read_unlock,
330 	.get_gp_seq	= rcu_no_completed,
331 	.gp_diff	= rcu_seq_diff,
332 	.async		= call_rcu_tasks_trace,
333 	.gp_barrier	= rcu_barrier_tasks_trace,
334 	.sync		= synchronize_rcu_tasks_trace,
335 	.exp_sync	= synchronize_rcu_tasks_trace,
336 	.name		= "tasks-tracing"
337 };
338 
339 #define TASKS_TRACING_OPS &tasks_tracing_ops,
340 
341 #else // #ifdef CONFIG_TASKS_TRACE_RCU
342 
343 #define TASKS_TRACING_OPS
344 
345 #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
346 
347 static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
348 {
349 	if (!cur_ops->gp_diff)
350 		return new - old;
351 	return cur_ops->gp_diff(new, old);
352 }
353 
354 /*
355  * If scalability tests complete, wait for shutdown to commence.
356  */
357 static void rcu_scale_wait_shutdown(void)
358 {
359 	cond_resched_tasks_rcu_qs();
360 	if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters)
361 		return;
362 	while (!torture_must_stop())
363 		schedule_timeout_uninterruptible(1);
364 }
365 
366 /*
367  * RCU scalability reader kthread.  Repeatedly does empty RCU read-side
368  * critical section, minimizing update-side interference.  However, the
369  * point of this test is not to evaluate reader scalability, but instead
370  * to serve as a test load for update-side scalability testing.
371  */
372 static int
373 rcu_scale_reader(void *arg)
374 {
375 	unsigned long flags;
376 	int idx;
377 	long me = (long)arg;
378 
379 	VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started");
380 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
381 	set_user_nice(current, MAX_NICE);
382 	atomic_inc(&n_rcu_scale_reader_started);
383 
384 	do {
385 		local_irq_save(flags);
386 		idx = cur_ops->readlock();
387 		cur_ops->readunlock(idx);
388 		local_irq_restore(flags);
389 		rcu_scale_wait_shutdown();
390 	} while (!torture_must_stop());
391 	torture_kthread_stopping("rcu_scale_reader");
392 	return 0;
393 }
394 
395 /*
396  * Callback function for asynchronous grace periods from rcu_scale_writer().
397  */
398 static void rcu_scale_async_cb(struct rcu_head *rhp)
399 {
400 	atomic_dec(this_cpu_ptr(&n_async_inflight));
401 	kfree(rhp);
402 }
403 
404 /*
405  * RCU scale writer kthread.  Repeatedly does a grace period.
406  */
407 static int
408 rcu_scale_writer(void *arg)
409 {
410 	int i = 0;
411 	int i_max;
412 	long me = (long)arg;
413 	struct rcu_head *rhp = NULL;
414 	bool started = false, done = false, alldone = false;
415 	u64 t;
416 	u64 *wdp;
417 	u64 *wdpp = writer_durations[me];
418 
419 	VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
420 	WARN_ON(!wdpp);
421 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
422 	sched_set_fifo_low(current);
423 
424 	if (holdoff)
425 		schedule_timeout_uninterruptible(holdoff * HZ);
426 
427 	/*
428 	 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
429 	 * so that RCU is not always expedited for normal GP tests.
430 	 * The system_state test is approximate, but works well in practice.
431 	 */
432 	while (!gp_exp && system_state != SYSTEM_RUNNING)
433 		schedule_timeout_uninterruptible(1);
434 
435 	t = ktime_get_mono_fast_ns();
436 	if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) {
437 		t_rcu_scale_writer_started = t;
438 		if (gp_exp) {
439 			b_rcu_gp_test_started =
440 				cur_ops->exp_completed() / 2;
441 		} else {
442 			b_rcu_gp_test_started = cur_ops->get_gp_seq();
443 		}
444 	}
445 
446 	do {
447 		if (writer_holdoff)
448 			udelay(writer_holdoff);
449 		wdp = &wdpp[i];
450 		*wdp = ktime_get_mono_fast_ns();
451 		if (gp_async) {
452 retry:
453 			if (!rhp)
454 				rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
455 			if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
456 				atomic_inc(this_cpu_ptr(&n_async_inflight));
457 				cur_ops->async(rhp, rcu_scale_async_cb);
458 				rhp = NULL;
459 			} else if (!kthread_should_stop()) {
460 				cur_ops->gp_barrier();
461 				goto retry;
462 			} else {
463 				kfree(rhp); /* Because we are stopping. */
464 			}
465 		} else if (gp_exp) {
466 			cur_ops->exp_sync();
467 		} else {
468 			cur_ops->sync();
469 		}
470 		t = ktime_get_mono_fast_ns();
471 		*wdp = t - *wdp;
472 		i_max = i;
473 		if (!started &&
474 		    atomic_read(&n_rcu_scale_writer_started) >= nrealwriters)
475 			started = true;
476 		if (!done && i >= MIN_MEAS) {
477 			done = true;
478 			sched_set_normal(current, 0);
479 			pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
480 				 scale_type, SCALE_FLAG, me, MIN_MEAS);
481 			if (atomic_inc_return(&n_rcu_scale_writer_finished) >=
482 			    nrealwriters) {
483 				schedule_timeout_interruptible(10);
484 				rcu_ftrace_dump(DUMP_ALL);
485 				SCALEOUT_STRING("Test complete");
486 				t_rcu_scale_writer_finished = t;
487 				if (gp_exp) {
488 					b_rcu_gp_test_finished =
489 						cur_ops->exp_completed() / 2;
490 				} else {
491 					b_rcu_gp_test_finished =
492 						cur_ops->get_gp_seq();
493 				}
494 				if (shutdown) {
495 					smp_mb(); /* Assign before wake. */
496 					wake_up(&shutdown_wq);
497 				}
498 			}
499 		}
500 		if (done && !alldone &&
501 		    atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters)
502 			alldone = true;
503 		if (started && !alldone && i < MAX_MEAS - 1)
504 			i++;
505 		rcu_scale_wait_shutdown();
506 	} while (!torture_must_stop());
507 	if (gp_async) {
508 		cur_ops->gp_barrier();
509 	}
510 	writer_n_durations[me] = i_max + 1;
511 	torture_kthread_stopping("rcu_scale_writer");
512 	return 0;
513 }
514 
515 static void
516 rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
517 {
518 	pr_alert("%s" SCALE_FLAG
519 		 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
520 		 scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
521 }
522 
523 static void
524 rcu_scale_cleanup(void)
525 {
526 	int i;
527 	int j;
528 	int ngps = 0;
529 	u64 *wdp;
530 	u64 *wdpp;
531 
532 	/*
533 	 * Would like warning at start, but everything is expedited
534 	 * during the mid-boot phase, so have to wait till the end.
535 	 */
536 	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
537 		SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
538 	if (rcu_gp_is_normal() && gp_exp)
539 		SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
540 	if (gp_exp && gp_async)
541 		SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
542 
543 	if (torture_cleanup_begin())
544 		return;
545 	if (!cur_ops) {
546 		torture_cleanup_end();
547 		return;
548 	}
549 
550 	if (reader_tasks) {
551 		for (i = 0; i < nrealreaders; i++)
552 			torture_stop_kthread(rcu_scale_reader,
553 					     reader_tasks[i]);
554 		kfree(reader_tasks);
555 	}
556 
557 	if (writer_tasks) {
558 		for (i = 0; i < nrealwriters; i++) {
559 			torture_stop_kthread(rcu_scale_writer,
560 					     writer_tasks[i]);
561 			if (!writer_n_durations)
562 				continue;
563 			j = writer_n_durations[i];
564 			pr_alert("%s%s writer %d gps: %d\n",
565 				 scale_type, SCALE_FLAG, i, j);
566 			ngps += j;
567 		}
568 		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
569 			 scale_type, SCALE_FLAG,
570 			 t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
571 			 t_rcu_scale_writer_finished -
572 			 t_rcu_scale_writer_started,
573 			 ngps,
574 			 rcuscale_seq_diff(b_rcu_gp_test_finished,
575 					   b_rcu_gp_test_started));
576 		for (i = 0; i < nrealwriters; i++) {
577 			if (!writer_durations)
578 				break;
579 			if (!writer_n_durations)
580 				continue;
581 			wdpp = writer_durations[i];
582 			if (!wdpp)
583 				continue;
584 			for (j = 0; j < writer_n_durations[i]; j++) {
585 				wdp = &wdpp[j];
586 				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
587 					scale_type, SCALE_FLAG,
588 					i, j, *wdp);
589 				if (j % 100 == 0)
590 					schedule_timeout_uninterruptible(1);
591 			}
592 			kfree(writer_durations[i]);
593 		}
594 		kfree(writer_tasks);
595 		kfree(writer_durations);
596 		kfree(writer_n_durations);
597 	}
598 
599 	/* Do torture-type-specific cleanup operations.  */
600 	if (cur_ops->cleanup != NULL)
601 		cur_ops->cleanup();
602 
603 	torture_cleanup_end();
604 }
605 
606 /*
607  * Return the number if non-negative.  If -1, the number of CPUs.
608  * If less than -1, that much less than the number of CPUs, but
609  * at least one.
610  */
611 static int compute_real(int n)
612 {
613 	int nr;
614 
615 	if (n >= 0) {
616 		nr = n;
617 	} else {
618 		nr = num_online_cpus() + 1 + n;
619 		if (nr <= 0)
620 			nr = 1;
621 	}
622 	return nr;
623 }
624 
625 /*
626  * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
627  * down system.
628  */
629 static int
630 rcu_scale_shutdown(void *arg)
631 {
632 	wait_event(shutdown_wq,
633 		   atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
634 	smp_mb(); /* Wake before output. */
635 	rcu_scale_cleanup();
636 	kernel_power_off();
637 	return -EINVAL;
638 }
639 
640 /*
641  * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
642  * of iterations and measure total time and number of GP for all iterations to complete.
643  */
644 
645 torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
646 torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
647 torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
648 torture_param(bool, kfree_rcu_test_double, false, "Do we run a kfree_rcu() double-argument scale test?");
649 torture_param(bool, kfree_rcu_test_single, false, "Do we run a kfree_rcu() single-argument scale test?");
650 
651 static struct task_struct **kfree_reader_tasks;
652 static int kfree_nrealthreads;
653 static atomic_t n_kfree_scale_thread_started;
654 static atomic_t n_kfree_scale_thread_ended;
655 
656 struct kfree_obj {
657 	char kfree_obj[8];
658 	struct rcu_head rh;
659 };
660 
661 static int
662 kfree_scale_thread(void *arg)
663 {
664 	int i, loop = 0;
665 	long me = (long)arg;
666 	struct kfree_obj *alloc_ptr;
667 	u64 start_time, end_time;
668 	long long mem_begin, mem_during = 0;
669 	bool kfree_rcu_test_both;
670 	DEFINE_TORTURE_RANDOM(tr);
671 
672 	VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started");
673 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
674 	set_user_nice(current, MAX_NICE);
675 	kfree_rcu_test_both = (kfree_rcu_test_single == kfree_rcu_test_double);
676 
677 	start_time = ktime_get_mono_fast_ns();
678 
679 	if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) {
680 		if (gp_exp)
681 			b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
682 		else
683 			b_rcu_gp_test_started = cur_ops->get_gp_seq();
684 	}
685 
686 	do {
687 		if (!mem_during) {
688 			mem_during = mem_begin = si_mem_available();
689 		} else if (loop % (kfree_loops / 4) == 0) {
690 			mem_during = (mem_during + si_mem_available()) / 2;
691 		}
692 
693 		for (i = 0; i < kfree_alloc_num; i++) {
694 			alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
695 			if (!alloc_ptr)
696 				return -ENOMEM;
697 
698 			// By default kfree_rcu_test_single and kfree_rcu_test_double are
699 			// initialized to false. If both have the same value (false or true)
700 			// both are randomly tested, otherwise only the one with value true
701 			// is tested.
702 			if ((kfree_rcu_test_single && !kfree_rcu_test_double) ||
703 					(kfree_rcu_test_both && torture_random(&tr) & 0x800))
704 				kfree_rcu(alloc_ptr);
705 			else
706 				kfree_rcu(alloc_ptr, rh);
707 		}
708 
709 		cond_resched();
710 	} while (!torture_must_stop() && ++loop < kfree_loops);
711 
712 	if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) {
713 		end_time = ktime_get_mono_fast_ns();
714 
715 		if (gp_exp)
716 			b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
717 		else
718 			b_rcu_gp_test_finished = cur_ops->get_gp_seq();
719 
720 		pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
721 		       (unsigned long long)(end_time - start_time), kfree_loops,
722 		       rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
723 		       (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
724 
725 		if (shutdown) {
726 			smp_mb(); /* Assign before wake. */
727 			wake_up(&shutdown_wq);
728 		}
729 	}
730 
731 	torture_kthread_stopping("kfree_scale_thread");
732 	return 0;
733 }
734 
735 static void
736 kfree_scale_cleanup(void)
737 {
738 	int i;
739 
740 	if (torture_cleanup_begin())
741 		return;
742 
743 	if (kfree_reader_tasks) {
744 		for (i = 0; i < kfree_nrealthreads; i++)
745 			torture_stop_kthread(kfree_scale_thread,
746 					     kfree_reader_tasks[i]);
747 		kfree(kfree_reader_tasks);
748 	}
749 
750 	torture_cleanup_end();
751 }
752 
753 /*
754  * shutdown kthread.  Just waits to be awakened, then shuts down system.
755  */
756 static int
757 kfree_scale_shutdown(void *arg)
758 {
759 	wait_event(shutdown_wq,
760 		   atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
761 
762 	smp_mb(); /* Wake before output. */
763 
764 	kfree_scale_cleanup();
765 	kernel_power_off();
766 	return -EINVAL;
767 }
768 
769 static int __init
770 kfree_scale_init(void)
771 {
772 	long i;
773 	int firsterr = 0;
774 
775 	kfree_nrealthreads = compute_real(kfree_nthreads);
776 	/* Start up the kthreads. */
777 	if (shutdown) {
778 		init_waitqueue_head(&shutdown_wq);
779 		firsterr = torture_create_kthread(kfree_scale_shutdown, NULL,
780 						  shutdown_task);
781 		if (torture_init_error(firsterr))
782 			goto unwind;
783 		schedule_timeout_uninterruptible(1);
784 	}
785 
786 	pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
787 
788 	kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
789 			       GFP_KERNEL);
790 	if (kfree_reader_tasks == NULL) {
791 		firsterr = -ENOMEM;
792 		goto unwind;
793 	}
794 
795 	for (i = 0; i < kfree_nrealthreads; i++) {
796 		firsterr = torture_create_kthread(kfree_scale_thread, (void *)i,
797 						  kfree_reader_tasks[i]);
798 		if (torture_init_error(firsterr))
799 			goto unwind;
800 	}
801 
802 	while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads)
803 		schedule_timeout_uninterruptible(1);
804 
805 	torture_init_end();
806 	return 0;
807 
808 unwind:
809 	torture_init_end();
810 	kfree_scale_cleanup();
811 	return firsterr;
812 }
813 
814 static int __init
815 rcu_scale_init(void)
816 {
817 	long i;
818 	int firsterr = 0;
819 	static struct rcu_scale_ops *scale_ops[] = {
820 		&rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_TRACING_OPS
821 	};
822 
823 	if (!torture_init_begin(scale_type, verbose))
824 		return -EBUSY;
825 
826 	/* Process args and announce that the scalability'er is on the job. */
827 	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
828 		cur_ops = scale_ops[i];
829 		if (strcmp(scale_type, cur_ops->name) == 0)
830 			break;
831 	}
832 	if (i == ARRAY_SIZE(scale_ops)) {
833 		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
834 		pr_alert("rcu-scale types:");
835 		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
836 			pr_cont(" %s", scale_ops[i]->name);
837 		pr_cont("\n");
838 		firsterr = -EINVAL;
839 		cur_ops = NULL;
840 		goto unwind;
841 	}
842 	if (cur_ops->init)
843 		cur_ops->init();
844 
845 	if (kfree_rcu_test)
846 		return kfree_scale_init();
847 
848 	nrealwriters = compute_real(nwriters);
849 	nrealreaders = compute_real(nreaders);
850 	atomic_set(&n_rcu_scale_reader_started, 0);
851 	atomic_set(&n_rcu_scale_writer_started, 0);
852 	atomic_set(&n_rcu_scale_writer_finished, 0);
853 	rcu_scale_print_module_parms(cur_ops, "Start of test");
854 
855 	/* Start up the kthreads. */
856 
857 	if (shutdown) {
858 		init_waitqueue_head(&shutdown_wq);
859 		firsterr = torture_create_kthread(rcu_scale_shutdown, NULL,
860 						  shutdown_task);
861 		if (torture_init_error(firsterr))
862 			goto unwind;
863 		schedule_timeout_uninterruptible(1);
864 	}
865 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
866 			       GFP_KERNEL);
867 	if (reader_tasks == NULL) {
868 		SCALEOUT_ERRSTRING("out of memory");
869 		firsterr = -ENOMEM;
870 		goto unwind;
871 	}
872 	for (i = 0; i < nrealreaders; i++) {
873 		firsterr = torture_create_kthread(rcu_scale_reader, (void *)i,
874 						  reader_tasks[i]);
875 		if (torture_init_error(firsterr))
876 			goto unwind;
877 	}
878 	while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
879 		schedule_timeout_uninterruptible(1);
880 	writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
881 			       GFP_KERNEL);
882 	writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
883 				   GFP_KERNEL);
884 	writer_n_durations =
885 		kcalloc(nrealwriters, sizeof(*writer_n_durations),
886 			GFP_KERNEL);
887 	if (!writer_tasks || !writer_durations || !writer_n_durations) {
888 		SCALEOUT_ERRSTRING("out of memory");
889 		firsterr = -ENOMEM;
890 		goto unwind;
891 	}
892 	for (i = 0; i < nrealwriters; i++) {
893 		writer_durations[i] =
894 			kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
895 				GFP_KERNEL);
896 		if (!writer_durations[i]) {
897 			firsterr = -ENOMEM;
898 			goto unwind;
899 		}
900 		firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
901 						  writer_tasks[i]);
902 		if (torture_init_error(firsterr))
903 			goto unwind;
904 	}
905 	torture_init_end();
906 	return 0;
907 
908 unwind:
909 	torture_init_end();
910 	rcu_scale_cleanup();
911 	if (shutdown) {
912 		WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
913 		kernel_power_off();
914 	}
915 	return firsterr;
916 }
917 
918 module_init(rcu_scale_init);
919 module_exit(rcu_scale_cleanup);
920