xref: /linux/kernel/rcu/rcuscale.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based scalability-test facility
4  *
5  * Copyright (C) IBM Corporation, 2015
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9 
10 #define pr_fmt(fmt) fmt
11 
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/rcupdate.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/atomic.h>
26 #include <linux/bitops.h>
27 #include <linux/completion.h>
28 #include <linux/moduleparam.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/freezer.h>
33 #include <linux/cpu.h>
34 #include <linux/delay.h>
35 #include <linux/stat.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include <linux/vmalloc.h>
41 #include <linux/rcupdate_trace.h>
42 
43 #include "rcu.h"
44 
45 MODULE_DESCRIPTION("Read-Copy Update module-based scalability-test facility");
46 MODULE_LICENSE("GPL");
47 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
48 
49 #define SCALE_FLAG "-scale:"
50 #define SCALEOUT_STRING(s) \
51 	pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
52 #define VERBOSE_SCALEOUT_STRING(s) \
53 	do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
54 #define SCALEOUT_ERRSTRING(s) \
55 	pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s)
56 
57 /*
58  * The intended use cases for the nreaders and nwriters module parameters
59  * are as follows:
60  *
61  * 1.	Specify only the nr_cpus kernel boot parameter.  This will
62  *	set both nreaders and nwriters to the value specified by
63  *	nr_cpus for a mixed reader/writer test.
64  *
65  * 2.	Specify the nr_cpus kernel boot parameter, but set
66  *	rcuscale.nreaders to zero.  This will set nwriters to the
67  *	value specified by nr_cpus for an update-only test.
68  *
69  * 3.	Specify the nr_cpus kernel boot parameter, but set
70  *	rcuscale.nwriters to zero.  This will set nreaders to the
71  *	value specified by nr_cpus for a read-only test.
72  *
73  * Various other use cases may of course be specified.
74  *
75  * Note that this test's readers are intended only as a test load for
76  * the writers.  The reader scalability statistics will be overly
77  * pessimistic due to the per-critical-section interrupt disabling,
78  * test-end checks, and the pair of calls through pointers.
79  */
80 
81 #ifdef MODULE
82 # define RCUSCALE_SHUTDOWN 0
83 #else
84 # define RCUSCALE_SHUTDOWN 1
85 #endif
86 
87 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
88 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per writer");
89 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
90 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
91 torture_param(int, minruntime, 0, "Minimum run time (s)");
92 torture_param(int, nreaders, -1, "Number of RCU reader threads");
93 torture_param(int, nwriters, -1, "Number of RCU updater threads");
94 torture_param(bool, shutdown, RCUSCALE_SHUTDOWN,
95 	      "Shutdown at end of scalability tests.");
96 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
97 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
98 torture_param(int, writer_holdoff_jiffies, 0, "Holdoff (jiffies) between GPs, zero to disable");
99 torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?");
100 torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
101 torture_param(int, kfree_by_call_rcu, 0, "Use call_rcu() to emulate kfree_rcu()?");
102 
103 static char *scale_type = "rcu";
104 module_param(scale_type, charp, 0444);
105 MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)");
106 
107 static int nrealreaders;
108 static int nrealwriters;
109 static struct task_struct **writer_tasks;
110 static struct task_struct **reader_tasks;
111 static struct task_struct *shutdown_task;
112 
113 static u64 **writer_durations;
114 static int *writer_n_durations;
115 static atomic_t n_rcu_scale_reader_started;
116 static atomic_t n_rcu_scale_writer_started;
117 static atomic_t n_rcu_scale_writer_finished;
118 static wait_queue_head_t shutdown_wq;
119 static u64 t_rcu_scale_writer_started;
120 static u64 t_rcu_scale_writer_finished;
121 static unsigned long b_rcu_gp_test_started;
122 static unsigned long b_rcu_gp_test_finished;
123 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
124 
125 #define MAX_MEAS 10000
126 #define MIN_MEAS 100
127 
128 /*
129  * Operations vector for selecting different types of tests.
130  */
131 
132 struct rcu_scale_ops {
133 	int ptype;
134 	void (*init)(void);
135 	void (*cleanup)(void);
136 	int (*readlock)(void);
137 	void (*readunlock)(int idx);
138 	unsigned long (*get_gp_seq)(void);
139 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
140 	unsigned long (*exp_completed)(void);
141 	void (*async)(struct rcu_head *head, rcu_callback_t func);
142 	void (*gp_barrier)(void);
143 	void (*sync)(void);
144 	void (*exp_sync)(void);
145 	struct task_struct *(*rso_gp_kthread)(void);
146 	const char *name;
147 };
148 
149 static struct rcu_scale_ops *cur_ops;
150 
151 /*
152  * Definitions for rcu scalability testing.
153  */
154 
155 static int rcu_scale_read_lock(void) __acquires(RCU)
156 {
157 	rcu_read_lock();
158 	return 0;
159 }
160 
161 static void rcu_scale_read_unlock(int idx) __releases(RCU)
162 {
163 	rcu_read_unlock();
164 }
165 
166 static unsigned long __maybe_unused rcu_no_completed(void)
167 {
168 	return 0;
169 }
170 
171 static void rcu_sync_scale_init(void)
172 {
173 }
174 
175 static struct rcu_scale_ops rcu_ops = {
176 	.ptype		= RCU_FLAVOR,
177 	.init		= rcu_sync_scale_init,
178 	.readlock	= rcu_scale_read_lock,
179 	.readunlock	= rcu_scale_read_unlock,
180 	.get_gp_seq	= rcu_get_gp_seq,
181 	.gp_diff	= rcu_seq_diff,
182 	.exp_completed	= rcu_exp_batches_completed,
183 	.async		= call_rcu_hurry,
184 	.gp_barrier	= rcu_barrier,
185 	.sync		= synchronize_rcu,
186 	.exp_sync	= synchronize_rcu_expedited,
187 	.name		= "rcu"
188 };
189 
190 /*
191  * Definitions for srcu scalability testing.
192  */
193 
194 DEFINE_STATIC_SRCU(srcu_ctl_scale);
195 static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale;
196 
197 static int srcu_scale_read_lock(void) __acquires(srcu_ctlp)
198 {
199 	return srcu_read_lock(srcu_ctlp);
200 }
201 
202 static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp)
203 {
204 	srcu_read_unlock(srcu_ctlp, idx);
205 }
206 
207 static unsigned long srcu_scale_completed(void)
208 {
209 	return srcu_batches_completed(srcu_ctlp);
210 }
211 
212 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
213 {
214 	call_srcu(srcu_ctlp, head, func);
215 }
216 
217 static void srcu_rcu_barrier(void)
218 {
219 	srcu_barrier(srcu_ctlp);
220 }
221 
222 static void srcu_scale_synchronize(void)
223 {
224 	synchronize_srcu(srcu_ctlp);
225 }
226 
227 static void srcu_scale_synchronize_expedited(void)
228 {
229 	synchronize_srcu_expedited(srcu_ctlp);
230 }
231 
232 static struct rcu_scale_ops srcu_ops = {
233 	.ptype		= SRCU_FLAVOR,
234 	.init		= rcu_sync_scale_init,
235 	.readlock	= srcu_scale_read_lock,
236 	.readunlock	= srcu_scale_read_unlock,
237 	.get_gp_seq	= srcu_scale_completed,
238 	.gp_diff	= rcu_seq_diff,
239 	.exp_completed	= srcu_scale_completed,
240 	.async		= srcu_call_rcu,
241 	.gp_barrier	= srcu_rcu_barrier,
242 	.sync		= srcu_scale_synchronize,
243 	.exp_sync	= srcu_scale_synchronize_expedited,
244 	.name		= "srcu"
245 };
246 
247 static struct srcu_struct srcud;
248 
249 static void srcu_sync_scale_init(void)
250 {
251 	srcu_ctlp = &srcud;
252 	init_srcu_struct(srcu_ctlp);
253 }
254 
255 static void srcu_sync_scale_cleanup(void)
256 {
257 	cleanup_srcu_struct(srcu_ctlp);
258 }
259 
260 static struct rcu_scale_ops srcud_ops = {
261 	.ptype		= SRCU_FLAVOR,
262 	.init		= srcu_sync_scale_init,
263 	.cleanup	= srcu_sync_scale_cleanup,
264 	.readlock	= srcu_scale_read_lock,
265 	.readunlock	= srcu_scale_read_unlock,
266 	.get_gp_seq	= srcu_scale_completed,
267 	.gp_diff	= rcu_seq_diff,
268 	.exp_completed	= srcu_scale_completed,
269 	.async		= srcu_call_rcu,
270 	.gp_barrier	= srcu_rcu_barrier,
271 	.sync		= srcu_scale_synchronize,
272 	.exp_sync	= srcu_scale_synchronize_expedited,
273 	.name		= "srcud"
274 };
275 
276 #ifdef CONFIG_TASKS_RCU
277 
278 /*
279  * Definitions for RCU-tasks scalability testing.
280  */
281 
282 static int tasks_scale_read_lock(void)
283 {
284 	return 0;
285 }
286 
287 static void tasks_scale_read_unlock(int idx)
288 {
289 }
290 
291 static struct rcu_scale_ops tasks_ops = {
292 	.ptype		= RCU_TASKS_FLAVOR,
293 	.init		= rcu_sync_scale_init,
294 	.readlock	= tasks_scale_read_lock,
295 	.readunlock	= tasks_scale_read_unlock,
296 	.get_gp_seq	= rcu_no_completed,
297 	.gp_diff	= rcu_seq_diff,
298 	.async		= call_rcu_tasks,
299 	.gp_barrier	= rcu_barrier_tasks,
300 	.sync		= synchronize_rcu_tasks,
301 	.exp_sync	= synchronize_rcu_tasks,
302 	.rso_gp_kthread	= get_rcu_tasks_gp_kthread,
303 	.name		= "tasks"
304 };
305 
306 #define TASKS_OPS &tasks_ops,
307 
308 #else // #ifdef CONFIG_TASKS_RCU
309 
310 #define TASKS_OPS
311 
312 #endif // #else // #ifdef CONFIG_TASKS_RCU
313 
314 #ifdef CONFIG_TASKS_RUDE_RCU
315 
316 /*
317  * Definitions for RCU-tasks-rude scalability testing.
318  */
319 
320 static int tasks_rude_scale_read_lock(void)
321 {
322 	return 0;
323 }
324 
325 static void tasks_rude_scale_read_unlock(int idx)
326 {
327 }
328 
329 static struct rcu_scale_ops tasks_rude_ops = {
330 	.ptype		= RCU_TASKS_RUDE_FLAVOR,
331 	.init		= rcu_sync_scale_init,
332 	.readlock	= tasks_rude_scale_read_lock,
333 	.readunlock	= tasks_rude_scale_read_unlock,
334 	.get_gp_seq	= rcu_no_completed,
335 	.gp_diff	= rcu_seq_diff,
336 	.async		= call_rcu_tasks_rude,
337 	.gp_barrier	= rcu_barrier_tasks_rude,
338 	.sync		= synchronize_rcu_tasks_rude,
339 	.exp_sync	= synchronize_rcu_tasks_rude,
340 	.rso_gp_kthread	= get_rcu_tasks_rude_gp_kthread,
341 	.name		= "tasks-rude"
342 };
343 
344 #define TASKS_RUDE_OPS &tasks_rude_ops,
345 
346 #else // #ifdef CONFIG_TASKS_RUDE_RCU
347 
348 #define TASKS_RUDE_OPS
349 
350 #endif // #else // #ifdef CONFIG_TASKS_RUDE_RCU
351 
352 #ifdef CONFIG_TASKS_TRACE_RCU
353 
354 /*
355  * Definitions for RCU-tasks-trace scalability testing.
356  */
357 
358 static int tasks_trace_scale_read_lock(void)
359 {
360 	rcu_read_lock_trace();
361 	return 0;
362 }
363 
364 static void tasks_trace_scale_read_unlock(int idx)
365 {
366 	rcu_read_unlock_trace();
367 }
368 
369 static struct rcu_scale_ops tasks_tracing_ops = {
370 	.ptype		= RCU_TASKS_FLAVOR,
371 	.init		= rcu_sync_scale_init,
372 	.readlock	= tasks_trace_scale_read_lock,
373 	.readunlock	= tasks_trace_scale_read_unlock,
374 	.get_gp_seq	= rcu_no_completed,
375 	.gp_diff	= rcu_seq_diff,
376 	.async		= call_rcu_tasks_trace,
377 	.gp_barrier	= rcu_barrier_tasks_trace,
378 	.sync		= synchronize_rcu_tasks_trace,
379 	.exp_sync	= synchronize_rcu_tasks_trace,
380 	.rso_gp_kthread	= get_rcu_tasks_trace_gp_kthread,
381 	.name		= "tasks-tracing"
382 };
383 
384 #define TASKS_TRACING_OPS &tasks_tracing_ops,
385 
386 #else // #ifdef CONFIG_TASKS_TRACE_RCU
387 
388 #define TASKS_TRACING_OPS
389 
390 #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
391 
392 static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
393 {
394 	if (!cur_ops->gp_diff)
395 		return new - old;
396 	return cur_ops->gp_diff(new, old);
397 }
398 
399 /*
400  * If scalability tests complete, wait for shutdown to commence.
401  */
402 static void rcu_scale_wait_shutdown(void)
403 {
404 	cond_resched_tasks_rcu_qs();
405 	if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters)
406 		return;
407 	while (!torture_must_stop())
408 		schedule_timeout_uninterruptible(1);
409 }
410 
411 /*
412  * RCU scalability reader kthread.  Repeatedly does empty RCU read-side
413  * critical section, minimizing update-side interference.  However, the
414  * point of this test is not to evaluate reader scalability, but instead
415  * to serve as a test load for update-side scalability testing.
416  */
417 static int
418 rcu_scale_reader(void *arg)
419 {
420 	unsigned long flags;
421 	int idx;
422 	long me = (long)arg;
423 
424 	VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started");
425 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
426 	set_user_nice(current, MAX_NICE);
427 	atomic_inc(&n_rcu_scale_reader_started);
428 
429 	do {
430 		local_irq_save(flags);
431 		idx = cur_ops->readlock();
432 		cur_ops->readunlock(idx);
433 		local_irq_restore(flags);
434 		rcu_scale_wait_shutdown();
435 	} while (!torture_must_stop());
436 	torture_kthread_stopping("rcu_scale_reader");
437 	return 0;
438 }
439 
440 /*
441  * Callback function for asynchronous grace periods from rcu_scale_writer().
442  */
443 static void rcu_scale_async_cb(struct rcu_head *rhp)
444 {
445 	atomic_dec(this_cpu_ptr(&n_async_inflight));
446 	kfree(rhp);
447 }
448 
449 /*
450  * RCU scale writer kthread.  Repeatedly does a grace period.
451  */
452 static int
453 rcu_scale_writer(void *arg)
454 {
455 	int i = 0;
456 	int i_max;
457 	unsigned long jdone;
458 	long me = (long)arg;
459 	struct rcu_head *rhp = NULL;
460 	bool started = false, done = false, alldone = false;
461 	u64 t;
462 	DEFINE_TORTURE_RANDOM(tr);
463 	u64 *wdp;
464 	u64 *wdpp = writer_durations[me];
465 
466 	VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
467 	WARN_ON(!wdpp);
468 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
469 	current->flags |= PF_NO_SETAFFINITY;
470 	sched_set_fifo_low(current);
471 
472 	if (holdoff)
473 		schedule_timeout_idle(holdoff * HZ);
474 
475 	/*
476 	 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
477 	 * so that RCU is not always expedited for normal GP tests.
478 	 * The system_state test is approximate, but works well in practice.
479 	 */
480 	while (!gp_exp && system_state != SYSTEM_RUNNING)
481 		schedule_timeout_uninterruptible(1);
482 
483 	t = ktime_get_mono_fast_ns();
484 	if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) {
485 		t_rcu_scale_writer_started = t;
486 		if (gp_exp) {
487 			b_rcu_gp_test_started =
488 				cur_ops->exp_completed() / 2;
489 		} else {
490 			b_rcu_gp_test_started = cur_ops->get_gp_seq();
491 		}
492 	}
493 
494 	jdone = jiffies + minruntime * HZ;
495 	do {
496 		if (writer_holdoff)
497 			udelay(writer_holdoff);
498 		if (writer_holdoff_jiffies)
499 			schedule_timeout_idle(torture_random(&tr) % writer_holdoff_jiffies + 1);
500 		wdp = &wdpp[i];
501 		*wdp = ktime_get_mono_fast_ns();
502 		if (gp_async) {
503 retry:
504 			if (!rhp)
505 				rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
506 			if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
507 				atomic_inc(this_cpu_ptr(&n_async_inflight));
508 				cur_ops->async(rhp, rcu_scale_async_cb);
509 				rhp = NULL;
510 			} else if (!kthread_should_stop()) {
511 				cur_ops->gp_barrier();
512 				goto retry;
513 			} else {
514 				kfree(rhp); /* Because we are stopping. */
515 			}
516 		} else if (gp_exp) {
517 			cur_ops->exp_sync();
518 		} else {
519 			cur_ops->sync();
520 		}
521 		t = ktime_get_mono_fast_ns();
522 		*wdp = t - *wdp;
523 		i_max = i;
524 		if (!started &&
525 		    atomic_read(&n_rcu_scale_writer_started) >= nrealwriters)
526 			started = true;
527 		if (!done && i >= MIN_MEAS && time_after(jiffies, jdone)) {
528 			done = true;
529 			sched_set_normal(current, 0);
530 			pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
531 				 scale_type, SCALE_FLAG, me, MIN_MEAS);
532 			if (atomic_inc_return(&n_rcu_scale_writer_finished) >=
533 			    nrealwriters) {
534 				schedule_timeout_interruptible(10);
535 				rcu_ftrace_dump(DUMP_ALL);
536 				SCALEOUT_STRING("Test complete");
537 				t_rcu_scale_writer_finished = t;
538 				if (gp_exp) {
539 					b_rcu_gp_test_finished =
540 						cur_ops->exp_completed() / 2;
541 				} else {
542 					b_rcu_gp_test_finished =
543 						cur_ops->get_gp_seq();
544 				}
545 				if (shutdown) {
546 					smp_mb(); /* Assign before wake. */
547 					wake_up(&shutdown_wq);
548 				}
549 			}
550 		}
551 		if (done && !alldone &&
552 		    atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters)
553 			alldone = true;
554 		if (started && !alldone && i < MAX_MEAS - 1)
555 			i++;
556 		rcu_scale_wait_shutdown();
557 	} while (!torture_must_stop());
558 	if (gp_async) {
559 		cur_ops->gp_barrier();
560 	}
561 	writer_n_durations[me] = i_max + 1;
562 	torture_kthread_stopping("rcu_scale_writer");
563 	return 0;
564 }
565 
566 static void
567 rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
568 {
569 	pr_alert("%s" SCALE_FLAG
570 		 "--- %s: gp_async=%d gp_async_max=%d gp_exp=%d holdoff=%d minruntime=%d nreaders=%d nwriters=%d writer_holdoff=%d writer_holdoff_jiffies=%d verbose=%d shutdown=%d\n",
571 		 scale_type, tag, gp_async, gp_async_max, gp_exp, holdoff, minruntime, nrealreaders, nrealwriters, writer_holdoff, writer_holdoff_jiffies, verbose, shutdown);
572 }
573 
574 /*
575  * Return the number if non-negative.  If -1, the number of CPUs.
576  * If less than -1, that much less than the number of CPUs, but
577  * at least one.
578  */
579 static int compute_real(int n)
580 {
581 	int nr;
582 
583 	if (n >= 0) {
584 		nr = n;
585 	} else {
586 		nr = num_online_cpus() + 1 + n;
587 		if (nr <= 0)
588 			nr = 1;
589 	}
590 	return nr;
591 }
592 
593 /*
594  * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
595  * of iterations and measure total time and number of GP for all iterations to complete.
596  */
597 
598 torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
599 torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
600 torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
601 torture_param(bool, kfree_rcu_test_double, false, "Do we run a kfree_rcu() double-argument scale test?");
602 torture_param(bool, kfree_rcu_test_single, false, "Do we run a kfree_rcu() single-argument scale test?");
603 
604 static struct task_struct **kfree_reader_tasks;
605 static int kfree_nrealthreads;
606 static atomic_t n_kfree_scale_thread_started;
607 static atomic_t n_kfree_scale_thread_ended;
608 static struct task_struct *kthread_tp;
609 static u64 kthread_stime;
610 
611 struct kfree_obj {
612 	char kfree_obj[8];
613 	struct rcu_head rh;
614 };
615 
616 /* Used if doing RCU-kfree'ing via call_rcu(). */
617 static void kfree_call_rcu(struct rcu_head *rh)
618 {
619 	struct kfree_obj *obj = container_of(rh, struct kfree_obj, rh);
620 
621 	kfree(obj);
622 }
623 
624 static int
625 kfree_scale_thread(void *arg)
626 {
627 	int i, loop = 0;
628 	long me = (long)arg;
629 	struct kfree_obj *alloc_ptr;
630 	u64 start_time, end_time;
631 	long long mem_begin, mem_during = 0;
632 	bool kfree_rcu_test_both;
633 	DEFINE_TORTURE_RANDOM(tr);
634 
635 	VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started");
636 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
637 	set_user_nice(current, MAX_NICE);
638 	kfree_rcu_test_both = (kfree_rcu_test_single == kfree_rcu_test_double);
639 
640 	start_time = ktime_get_mono_fast_ns();
641 
642 	if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) {
643 		if (gp_exp)
644 			b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
645 		else
646 			b_rcu_gp_test_started = cur_ops->get_gp_seq();
647 	}
648 
649 	do {
650 		if (!mem_during) {
651 			mem_during = mem_begin = si_mem_available();
652 		} else if (loop % (kfree_loops / 4) == 0) {
653 			mem_during = (mem_during + si_mem_available()) / 2;
654 		}
655 
656 		for (i = 0; i < kfree_alloc_num; i++) {
657 			alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
658 			if (!alloc_ptr)
659 				return -ENOMEM;
660 
661 			if (kfree_by_call_rcu) {
662 				call_rcu(&(alloc_ptr->rh), kfree_call_rcu);
663 				continue;
664 			}
665 
666 			// By default kfree_rcu_test_single and kfree_rcu_test_double are
667 			// initialized to false. If both have the same value (false or true)
668 			// both are randomly tested, otherwise only the one with value true
669 			// is tested.
670 			if ((kfree_rcu_test_single && !kfree_rcu_test_double) ||
671 					(kfree_rcu_test_both && torture_random(&tr) & 0x800))
672 				kfree_rcu_mightsleep(alloc_ptr);
673 			else
674 				kfree_rcu(alloc_ptr, rh);
675 		}
676 
677 		cond_resched();
678 	} while (!torture_must_stop() && ++loop < kfree_loops);
679 
680 	if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) {
681 		end_time = ktime_get_mono_fast_ns();
682 
683 		if (gp_exp)
684 			b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
685 		else
686 			b_rcu_gp_test_finished = cur_ops->get_gp_seq();
687 
688 		pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
689 		       (unsigned long long)(end_time - start_time), kfree_loops,
690 		       rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
691 		       (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
692 
693 		if (shutdown) {
694 			smp_mb(); /* Assign before wake. */
695 			wake_up(&shutdown_wq);
696 		}
697 	}
698 
699 	torture_kthread_stopping("kfree_scale_thread");
700 	return 0;
701 }
702 
703 static void
704 kfree_scale_cleanup(void)
705 {
706 	int i;
707 
708 	if (torture_cleanup_begin())
709 		return;
710 
711 	if (kfree_reader_tasks) {
712 		for (i = 0; i < kfree_nrealthreads; i++)
713 			torture_stop_kthread(kfree_scale_thread,
714 					     kfree_reader_tasks[i]);
715 		kfree(kfree_reader_tasks);
716 	}
717 
718 	torture_cleanup_end();
719 }
720 
721 /*
722  * shutdown kthread.  Just waits to be awakened, then shuts down system.
723  */
724 static int
725 kfree_scale_shutdown(void *arg)
726 {
727 	wait_event_idle(shutdown_wq,
728 			atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
729 
730 	smp_mb(); /* Wake before output. */
731 
732 	kfree_scale_cleanup();
733 	kernel_power_off();
734 	return -EINVAL;
735 }
736 
737 // Used if doing RCU-kfree'ing via call_rcu().
738 static unsigned long jiffies_at_lazy_cb;
739 static struct rcu_head lazy_test1_rh;
740 static int rcu_lazy_test1_cb_called;
741 static void call_rcu_lazy_test1(struct rcu_head *rh)
742 {
743 	jiffies_at_lazy_cb = jiffies;
744 	WRITE_ONCE(rcu_lazy_test1_cb_called, 1);
745 }
746 
747 static int __init
748 kfree_scale_init(void)
749 {
750 	int firsterr = 0;
751 	long i;
752 	unsigned long jif_start;
753 	unsigned long orig_jif;
754 
755 	pr_alert("%s" SCALE_FLAG
756 		 "--- kfree_rcu_test: kfree_mult=%d kfree_by_call_rcu=%d kfree_nthreads=%d kfree_alloc_num=%d kfree_loops=%d kfree_rcu_test_double=%d kfree_rcu_test_single=%d\n",
757 		 scale_type, kfree_mult, kfree_by_call_rcu, kfree_nthreads, kfree_alloc_num, kfree_loops, kfree_rcu_test_double, kfree_rcu_test_single);
758 
759 	// Also, do a quick self-test to ensure laziness is as much as
760 	// expected.
761 	if (kfree_by_call_rcu && !IS_ENABLED(CONFIG_RCU_LAZY)) {
762 		pr_alert("CONFIG_RCU_LAZY is disabled, falling back to kfree_rcu() for delayed RCU kfree'ing\n");
763 		kfree_by_call_rcu = 0;
764 	}
765 
766 	if (kfree_by_call_rcu) {
767 		/* do a test to check the timeout. */
768 		orig_jif = rcu_get_jiffies_lazy_flush();
769 
770 		rcu_set_jiffies_lazy_flush(2 * HZ);
771 		rcu_barrier();
772 
773 		jif_start = jiffies;
774 		jiffies_at_lazy_cb = 0;
775 		call_rcu(&lazy_test1_rh, call_rcu_lazy_test1);
776 
777 		smp_cond_load_relaxed(&rcu_lazy_test1_cb_called, VAL == 1);
778 
779 		rcu_set_jiffies_lazy_flush(orig_jif);
780 
781 		if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start < 2 * HZ)) {
782 			pr_alert("ERROR: call_rcu() CBs are not being lazy as expected!\n");
783 			WARN_ON_ONCE(1);
784 			return -1;
785 		}
786 
787 		if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start > 3 * HZ)) {
788 			pr_alert("ERROR: call_rcu() CBs are being too lazy!\n");
789 			WARN_ON_ONCE(1);
790 			return -1;
791 		}
792 	}
793 
794 	kfree_nrealthreads = compute_real(kfree_nthreads);
795 	/* Start up the kthreads. */
796 	if (shutdown) {
797 		init_waitqueue_head(&shutdown_wq);
798 		firsterr = torture_create_kthread(kfree_scale_shutdown, NULL,
799 						  shutdown_task);
800 		if (torture_init_error(firsterr))
801 			goto unwind;
802 		schedule_timeout_uninterruptible(1);
803 	}
804 
805 	pr_alert("kfree object size=%zu, kfree_by_call_rcu=%d\n",
806 			kfree_mult * sizeof(struct kfree_obj),
807 			kfree_by_call_rcu);
808 
809 	kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
810 			       GFP_KERNEL);
811 	if (kfree_reader_tasks == NULL) {
812 		firsterr = -ENOMEM;
813 		goto unwind;
814 	}
815 
816 	for (i = 0; i < kfree_nrealthreads; i++) {
817 		firsterr = torture_create_kthread(kfree_scale_thread, (void *)i,
818 						  kfree_reader_tasks[i]);
819 		if (torture_init_error(firsterr))
820 			goto unwind;
821 	}
822 
823 	while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads)
824 		schedule_timeout_uninterruptible(1);
825 
826 	torture_init_end();
827 	return 0;
828 
829 unwind:
830 	torture_init_end();
831 	kfree_scale_cleanup();
832 	return firsterr;
833 }
834 
835 static void
836 rcu_scale_cleanup(void)
837 {
838 	int i;
839 	int j;
840 	int ngps = 0;
841 	u64 *wdp;
842 	u64 *wdpp;
843 
844 	/*
845 	 * Would like warning at start, but everything is expedited
846 	 * during the mid-boot phase, so have to wait till the end.
847 	 */
848 	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
849 		SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
850 	if (rcu_gp_is_normal() && gp_exp)
851 		SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
852 	if (gp_exp && gp_async)
853 		SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
854 
855 	// If built-in, just report all of the GP kthread's CPU time.
856 	if (IS_BUILTIN(CONFIG_RCU_SCALE_TEST) && !kthread_tp && cur_ops->rso_gp_kthread)
857 		kthread_tp = cur_ops->rso_gp_kthread();
858 	if (kthread_tp) {
859 		u32 ns;
860 		u64 us;
861 
862 		kthread_stime = kthread_tp->stime - kthread_stime;
863 		us = div_u64_rem(kthread_stime, 1000, &ns);
864 		pr_info("rcu_scale: Grace-period kthread CPU time: %llu.%03u us\n", us, ns);
865 		show_rcu_gp_kthreads();
866 	}
867 	if (kfree_rcu_test) {
868 		kfree_scale_cleanup();
869 		return;
870 	}
871 
872 	if (torture_cleanup_begin())
873 		return;
874 	if (!cur_ops) {
875 		torture_cleanup_end();
876 		return;
877 	}
878 
879 	if (reader_tasks) {
880 		for (i = 0; i < nrealreaders; i++)
881 			torture_stop_kthread(rcu_scale_reader,
882 					     reader_tasks[i]);
883 		kfree(reader_tasks);
884 	}
885 
886 	if (writer_tasks) {
887 		for (i = 0; i < nrealwriters; i++) {
888 			torture_stop_kthread(rcu_scale_writer,
889 					     writer_tasks[i]);
890 			if (!writer_n_durations)
891 				continue;
892 			j = writer_n_durations[i];
893 			pr_alert("%s%s writer %d gps: %d\n",
894 				 scale_type, SCALE_FLAG, i, j);
895 			ngps += j;
896 		}
897 		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
898 			 scale_type, SCALE_FLAG,
899 			 t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
900 			 t_rcu_scale_writer_finished -
901 			 t_rcu_scale_writer_started,
902 			 ngps,
903 			 rcuscale_seq_diff(b_rcu_gp_test_finished,
904 					   b_rcu_gp_test_started));
905 		for (i = 0; i < nrealwriters; i++) {
906 			if (!writer_durations)
907 				break;
908 			if (!writer_n_durations)
909 				continue;
910 			wdpp = writer_durations[i];
911 			if (!wdpp)
912 				continue;
913 			for (j = 0; j < writer_n_durations[i]; j++) {
914 				wdp = &wdpp[j];
915 				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
916 					scale_type, SCALE_FLAG,
917 					i, j, *wdp);
918 				if (j % 100 == 0)
919 					schedule_timeout_uninterruptible(1);
920 			}
921 			kfree(writer_durations[i]);
922 		}
923 		kfree(writer_tasks);
924 		kfree(writer_durations);
925 		kfree(writer_n_durations);
926 	}
927 
928 	/* Do torture-type-specific cleanup operations.  */
929 	if (cur_ops->cleanup != NULL)
930 		cur_ops->cleanup();
931 
932 	torture_cleanup_end();
933 }
934 
935 /*
936  * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
937  * down system.
938  */
939 static int
940 rcu_scale_shutdown(void *arg)
941 {
942 	wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
943 	smp_mb(); /* Wake before output. */
944 	rcu_scale_cleanup();
945 	kernel_power_off();
946 	return -EINVAL;
947 }
948 
949 static int __init
950 rcu_scale_init(void)
951 {
952 	long i;
953 	int firsterr = 0;
954 	static struct rcu_scale_ops *scale_ops[] = {
955 		&rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
956 	};
957 
958 	if (!torture_init_begin(scale_type, verbose))
959 		return -EBUSY;
960 
961 	/* Process args and announce that the scalability'er is on the job. */
962 	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
963 		cur_ops = scale_ops[i];
964 		if (strcmp(scale_type, cur_ops->name) == 0)
965 			break;
966 	}
967 	if (i == ARRAY_SIZE(scale_ops)) {
968 		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
969 		pr_alert("rcu-scale types:");
970 		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
971 			pr_cont(" %s", scale_ops[i]->name);
972 		pr_cont("\n");
973 		firsterr = -EINVAL;
974 		cur_ops = NULL;
975 		goto unwind;
976 	}
977 	if (cur_ops->init)
978 		cur_ops->init();
979 
980 	if (cur_ops->rso_gp_kthread) {
981 		kthread_tp = cur_ops->rso_gp_kthread();
982 		if (kthread_tp)
983 			kthread_stime = kthread_tp->stime;
984 	}
985 	if (kfree_rcu_test)
986 		return kfree_scale_init();
987 
988 	nrealwriters = compute_real(nwriters);
989 	nrealreaders = compute_real(nreaders);
990 	atomic_set(&n_rcu_scale_reader_started, 0);
991 	atomic_set(&n_rcu_scale_writer_started, 0);
992 	atomic_set(&n_rcu_scale_writer_finished, 0);
993 	rcu_scale_print_module_parms(cur_ops, "Start of test");
994 
995 	/* Start up the kthreads. */
996 
997 	if (shutdown) {
998 		init_waitqueue_head(&shutdown_wq);
999 		firsterr = torture_create_kthread(rcu_scale_shutdown, NULL,
1000 						  shutdown_task);
1001 		if (torture_init_error(firsterr))
1002 			goto unwind;
1003 		schedule_timeout_uninterruptible(1);
1004 	}
1005 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
1006 			       GFP_KERNEL);
1007 	if (reader_tasks == NULL) {
1008 		SCALEOUT_ERRSTRING("out of memory");
1009 		firsterr = -ENOMEM;
1010 		goto unwind;
1011 	}
1012 	for (i = 0; i < nrealreaders; i++) {
1013 		firsterr = torture_create_kthread(rcu_scale_reader, (void *)i,
1014 						  reader_tasks[i]);
1015 		if (torture_init_error(firsterr))
1016 			goto unwind;
1017 	}
1018 	while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
1019 		schedule_timeout_uninterruptible(1);
1020 	writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
1021 			       GFP_KERNEL);
1022 	writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
1023 				   GFP_KERNEL);
1024 	writer_n_durations =
1025 		kcalloc(nrealwriters, sizeof(*writer_n_durations),
1026 			GFP_KERNEL);
1027 	if (!writer_tasks || !writer_durations || !writer_n_durations) {
1028 		SCALEOUT_ERRSTRING("out of memory");
1029 		firsterr = -ENOMEM;
1030 		goto unwind;
1031 	}
1032 	for (i = 0; i < nrealwriters; i++) {
1033 		writer_durations[i] =
1034 			kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
1035 				GFP_KERNEL);
1036 		if (!writer_durations[i]) {
1037 			firsterr = -ENOMEM;
1038 			goto unwind;
1039 		}
1040 		firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
1041 						  writer_tasks[i]);
1042 		if (torture_init_error(firsterr))
1043 			goto unwind;
1044 	}
1045 	torture_init_end();
1046 	return 0;
1047 
1048 unwind:
1049 	torture_init_end();
1050 	rcu_scale_cleanup();
1051 	if (shutdown) {
1052 		WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
1053 		kernel_power_off();
1054 	}
1055 	return firsterr;
1056 }
1057 
1058 module_init(rcu_scale_init);
1059 module_exit(rcu_scale_cleanup);
1060