1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based scalability-test facility 4 * 5 * Copyright (C) IBM Corporation, 2015 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #define pr_fmt(fmt) fmt 11 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/init.h> 15 #include <linux/mm.h> 16 #include <linux/module.h> 17 #include <linux/kthread.h> 18 #include <linux/err.h> 19 #include <linux/spinlock.h> 20 #include <linux/smp.h> 21 #include <linux/rcupdate.h> 22 #include <linux/interrupt.h> 23 #include <linux/sched.h> 24 #include <uapi/linux/sched/types.h> 25 #include <linux/atomic.h> 26 #include <linux/bitops.h> 27 #include <linux/completion.h> 28 #include <linux/moduleparam.h> 29 #include <linux/percpu.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/freezer.h> 33 #include <linux/cpu.h> 34 #include <linux/delay.h> 35 #include <linux/stat.h> 36 #include <linux/srcu.h> 37 #include <linux/slab.h> 38 #include <asm/byteorder.h> 39 #include <linux/torture.h> 40 #include <linux/vmalloc.h> 41 #include <linux/rcupdate_trace.h> 42 #include <linux/sched/debug.h> 43 44 #include "rcu.h" 45 46 MODULE_DESCRIPTION("Read-Copy Update module-based scalability-test facility"); 47 MODULE_LICENSE("GPL"); 48 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>"); 49 50 #define SCALE_FLAG "-scale:" 51 #define SCALEOUT_STRING(s) \ 52 pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s) 53 #define VERBOSE_SCALEOUT_STRING(s) \ 54 do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0) 55 #define SCALEOUT_ERRSTRING(s) \ 56 pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s) 57 58 /* 59 * The intended use cases for the nreaders and nwriters module parameters 60 * are as follows: 61 * 62 * 1. Specify only the nr_cpus kernel boot parameter. This will 63 * set both nreaders and nwriters to the value specified by 64 * nr_cpus for a mixed reader/writer test. 65 * 66 * 2. Specify the nr_cpus kernel boot parameter, but set 67 * rcuscale.nreaders to zero. This will set nwriters to the 68 * value specified by nr_cpus for an update-only test. 69 * 70 * 3. Specify the nr_cpus kernel boot parameter, but set 71 * rcuscale.nwriters to zero. This will set nreaders to the 72 * value specified by nr_cpus for a read-only test. 73 * 74 * Various other use cases may of course be specified. 75 * 76 * Note that this test's readers are intended only as a test load for 77 * the writers. The reader scalability statistics will be overly 78 * pessimistic due to the per-critical-section interrupt disabling, 79 * test-end checks, and the pair of calls through pointers. 80 */ 81 82 #ifdef MODULE 83 # define RCUSCALE_SHUTDOWN 0 84 #else 85 # define RCUSCALE_SHUTDOWN 1 86 #endif 87 88 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives"); 89 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per writer"); 90 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 91 torture_param(int, holdoff, 10, "Holdoff time before test start (s)"); 92 torture_param(int, minruntime, 0, "Minimum run time (s)"); 93 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 94 torture_param(int, nwriters, -1, "Number of RCU updater threads"); 95 torture_param(bool, shutdown, RCUSCALE_SHUTDOWN, 96 "Shutdown at end of scalability tests."); 97 torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); 98 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable"); 99 torture_param(int, writer_holdoff_jiffies, 0, "Holdoff (jiffies) between GPs, zero to disable"); 100 torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?"); 101 torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate."); 102 torture_param(int, kfree_by_call_rcu, 0, "Use call_rcu() to emulate kfree_rcu()?"); 103 104 static char *scale_type = "rcu"; 105 module_param(scale_type, charp, 0444); 106 MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)"); 107 108 // Structure definitions for custom fixed-per-task allocator. 109 struct writer_mblock { 110 struct rcu_head wmb_rh; 111 struct llist_node wmb_node; 112 struct writer_freelist *wmb_wfl; 113 }; 114 115 struct writer_freelist { 116 struct llist_head ws_lhg; 117 atomic_t ws_inflight; 118 struct llist_head ____cacheline_internodealigned_in_smp ws_lhp; 119 struct writer_mblock *ws_mblocks; 120 }; 121 122 static int nrealreaders; 123 static int nrealwriters; 124 static struct task_struct **writer_tasks; 125 static struct task_struct **reader_tasks; 126 static struct task_struct *shutdown_task; 127 128 static u64 **writer_durations; 129 static bool *writer_done; 130 static struct writer_freelist *writer_freelists; 131 static int *writer_n_durations; 132 static atomic_t n_rcu_scale_reader_started; 133 static atomic_t n_rcu_scale_writer_started; 134 static atomic_t n_rcu_scale_writer_finished; 135 static wait_queue_head_t shutdown_wq; 136 static u64 t_rcu_scale_writer_started; 137 static u64 t_rcu_scale_writer_finished; 138 static unsigned long b_rcu_gp_test_started; 139 static unsigned long b_rcu_gp_test_finished; 140 141 #define MAX_MEAS 10000 142 #define MIN_MEAS 100 143 144 /* 145 * Operations vector for selecting different types of tests. 146 */ 147 148 struct rcu_scale_ops { 149 int ptype; 150 void (*init)(void); 151 void (*cleanup)(void); 152 int (*readlock)(void); 153 void (*readunlock)(int idx); 154 unsigned long (*get_gp_seq)(void); 155 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 156 unsigned long (*exp_completed)(void); 157 void (*async)(struct rcu_head *head, rcu_callback_t func); 158 void (*gp_barrier)(void); 159 void (*sync)(void); 160 void (*exp_sync)(void); 161 struct task_struct *(*rso_gp_kthread)(void); 162 void (*stats)(void); 163 const char *name; 164 }; 165 166 static struct rcu_scale_ops *cur_ops; 167 168 /* 169 * Definitions for rcu scalability testing. 170 */ 171 172 static int rcu_scale_read_lock(void) __acquires(RCU) 173 { 174 rcu_read_lock(); 175 return 0; 176 } 177 178 static void rcu_scale_read_unlock(int idx) __releases(RCU) 179 { 180 rcu_read_unlock(); 181 } 182 183 static unsigned long __maybe_unused rcu_no_completed(void) 184 { 185 return 0; 186 } 187 188 static void rcu_sync_scale_init(void) 189 { 190 } 191 192 static struct rcu_scale_ops rcu_ops = { 193 .ptype = RCU_FLAVOR, 194 .init = rcu_sync_scale_init, 195 .readlock = rcu_scale_read_lock, 196 .readunlock = rcu_scale_read_unlock, 197 .get_gp_seq = rcu_get_gp_seq, 198 .gp_diff = rcu_seq_diff, 199 .exp_completed = rcu_exp_batches_completed, 200 .async = call_rcu_hurry, 201 .gp_barrier = rcu_barrier, 202 .sync = synchronize_rcu, 203 .exp_sync = synchronize_rcu_expedited, 204 .name = "rcu" 205 }; 206 207 /* 208 * Definitions for srcu scalability testing. 209 */ 210 211 DEFINE_STATIC_SRCU(srcu_ctl_scale); 212 static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale; 213 214 static int srcu_scale_read_lock(void) __acquires(srcu_ctlp) 215 { 216 return srcu_read_lock(srcu_ctlp); 217 } 218 219 static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp) 220 { 221 srcu_read_unlock(srcu_ctlp, idx); 222 } 223 224 static unsigned long srcu_scale_completed(void) 225 { 226 return srcu_batches_completed(srcu_ctlp); 227 } 228 229 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func) 230 { 231 call_srcu(srcu_ctlp, head, func); 232 } 233 234 static void srcu_rcu_barrier(void) 235 { 236 srcu_barrier(srcu_ctlp); 237 } 238 239 static void srcu_scale_synchronize(void) 240 { 241 synchronize_srcu(srcu_ctlp); 242 } 243 244 static void srcu_scale_stats(void) 245 { 246 srcu_torture_stats_print(srcu_ctlp, scale_type, SCALE_FLAG); 247 } 248 249 static void srcu_scale_synchronize_expedited(void) 250 { 251 synchronize_srcu_expedited(srcu_ctlp); 252 } 253 254 static struct rcu_scale_ops srcu_ops = { 255 .ptype = SRCU_FLAVOR, 256 .init = rcu_sync_scale_init, 257 .readlock = srcu_scale_read_lock, 258 .readunlock = srcu_scale_read_unlock, 259 .get_gp_seq = srcu_scale_completed, 260 .gp_diff = rcu_seq_diff, 261 .exp_completed = srcu_scale_completed, 262 .async = srcu_call_rcu, 263 .gp_barrier = srcu_rcu_barrier, 264 .sync = srcu_scale_synchronize, 265 .exp_sync = srcu_scale_synchronize_expedited, 266 .stats = srcu_scale_stats, 267 .name = "srcu" 268 }; 269 270 static struct srcu_struct srcud; 271 272 static void srcu_sync_scale_init(void) 273 { 274 srcu_ctlp = &srcud; 275 init_srcu_struct(srcu_ctlp); 276 } 277 278 static void srcu_sync_scale_cleanup(void) 279 { 280 cleanup_srcu_struct(srcu_ctlp); 281 } 282 283 static struct rcu_scale_ops srcud_ops = { 284 .ptype = SRCU_FLAVOR, 285 .init = srcu_sync_scale_init, 286 .cleanup = srcu_sync_scale_cleanup, 287 .readlock = srcu_scale_read_lock, 288 .readunlock = srcu_scale_read_unlock, 289 .get_gp_seq = srcu_scale_completed, 290 .gp_diff = rcu_seq_diff, 291 .exp_completed = srcu_scale_completed, 292 .async = srcu_call_rcu, 293 .gp_barrier = srcu_rcu_barrier, 294 .sync = srcu_scale_synchronize, 295 .exp_sync = srcu_scale_synchronize_expedited, 296 .stats = srcu_scale_stats, 297 .name = "srcud" 298 }; 299 300 #ifdef CONFIG_TASKS_RCU 301 302 /* 303 * Definitions for RCU-tasks scalability testing. 304 */ 305 306 static int tasks_scale_read_lock(void) 307 { 308 return 0; 309 } 310 311 static void tasks_scale_read_unlock(int idx) 312 { 313 } 314 315 static void rcu_tasks_scale_stats(void) 316 { 317 rcu_tasks_torture_stats_print(scale_type, SCALE_FLAG); 318 } 319 320 static struct rcu_scale_ops tasks_ops = { 321 .ptype = RCU_TASKS_FLAVOR, 322 .init = rcu_sync_scale_init, 323 .readlock = tasks_scale_read_lock, 324 .readunlock = tasks_scale_read_unlock, 325 .get_gp_seq = rcu_no_completed, 326 .gp_diff = rcu_seq_diff, 327 .async = call_rcu_tasks, 328 .gp_barrier = rcu_barrier_tasks, 329 .sync = synchronize_rcu_tasks, 330 .exp_sync = synchronize_rcu_tasks, 331 .rso_gp_kthread = get_rcu_tasks_gp_kthread, 332 .stats = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_scale_stats, 333 .name = "tasks" 334 }; 335 336 #define TASKS_OPS &tasks_ops, 337 338 #else // #ifdef CONFIG_TASKS_RCU 339 340 #define TASKS_OPS 341 342 #endif // #else // #ifdef CONFIG_TASKS_RCU 343 344 #ifdef CONFIG_TASKS_RUDE_RCU 345 346 /* 347 * Definitions for RCU-tasks-rude scalability testing. 348 */ 349 350 static int tasks_rude_scale_read_lock(void) 351 { 352 return 0; 353 } 354 355 static void tasks_rude_scale_read_unlock(int idx) 356 { 357 } 358 359 static void rcu_tasks_rude_scale_stats(void) 360 { 361 rcu_tasks_rude_torture_stats_print(scale_type, SCALE_FLAG); 362 } 363 364 static struct rcu_scale_ops tasks_rude_ops = { 365 .ptype = RCU_TASKS_RUDE_FLAVOR, 366 .init = rcu_sync_scale_init, 367 .readlock = tasks_rude_scale_read_lock, 368 .readunlock = tasks_rude_scale_read_unlock, 369 .get_gp_seq = rcu_no_completed, 370 .gp_diff = rcu_seq_diff, 371 .sync = synchronize_rcu_tasks_rude, 372 .exp_sync = synchronize_rcu_tasks_rude, 373 .rso_gp_kthread = get_rcu_tasks_rude_gp_kthread, 374 .stats = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_rude_scale_stats, 375 .name = "tasks-rude" 376 }; 377 378 #define TASKS_RUDE_OPS &tasks_rude_ops, 379 380 #else // #ifdef CONFIG_TASKS_RUDE_RCU 381 382 #define TASKS_RUDE_OPS 383 384 #endif // #else // #ifdef CONFIG_TASKS_RUDE_RCU 385 386 #ifdef CONFIG_TASKS_TRACE_RCU 387 388 /* 389 * Definitions for RCU-tasks-trace scalability testing. 390 */ 391 392 static int tasks_trace_scale_read_lock(void) 393 { 394 rcu_read_lock_trace(); 395 return 0; 396 } 397 398 static void tasks_trace_scale_read_unlock(int idx) 399 { 400 rcu_read_unlock_trace(); 401 } 402 403 static struct rcu_scale_ops tasks_tracing_ops = { 404 .ptype = RCU_TASKS_FLAVOR, 405 .init = rcu_sync_scale_init, 406 .readlock = tasks_trace_scale_read_lock, 407 .readunlock = tasks_trace_scale_read_unlock, 408 .get_gp_seq = rcu_no_completed, 409 .gp_diff = rcu_seq_diff, 410 .async = call_rcu_tasks_trace, 411 .gp_barrier = rcu_barrier_tasks_trace, 412 .sync = synchronize_rcu_tasks_trace, 413 .exp_sync = synchronize_rcu_tasks_trace, 414 .name = "tasks-tracing" 415 }; 416 417 #define TASKS_TRACING_OPS &tasks_tracing_ops, 418 419 #else // #ifdef CONFIG_TASKS_TRACE_RCU 420 421 #define TASKS_TRACING_OPS 422 423 #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU 424 425 static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old) 426 { 427 if (!cur_ops->gp_diff) 428 return new - old; 429 return cur_ops->gp_diff(new, old); 430 } 431 432 /* 433 * If scalability tests complete, wait for shutdown to commence. 434 */ 435 static void rcu_scale_wait_shutdown(void) 436 { 437 cond_resched_tasks_rcu_qs(); 438 if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters) 439 return; 440 while (!torture_must_stop()) 441 schedule_timeout_uninterruptible(1); 442 } 443 444 /* 445 * RCU scalability reader kthread. Repeatedly does empty RCU read-side 446 * critical section, minimizing update-side interference. However, the 447 * point of this test is not to evaluate reader scalability, but instead 448 * to serve as a test load for update-side scalability testing. 449 */ 450 static int 451 rcu_scale_reader(void *arg) 452 { 453 unsigned long flags; 454 int idx; 455 long me = (long)arg; 456 457 VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started"); 458 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); 459 set_user_nice(current, MAX_NICE); 460 atomic_inc(&n_rcu_scale_reader_started); 461 462 do { 463 local_irq_save(flags); 464 idx = cur_ops->readlock(); 465 cur_ops->readunlock(idx); 466 local_irq_restore(flags); 467 rcu_scale_wait_shutdown(); 468 } while (!torture_must_stop()); 469 torture_kthread_stopping("rcu_scale_reader"); 470 return 0; 471 } 472 473 /* 474 * Allocate a writer_mblock structure for the specified rcu_scale_writer 475 * task. 476 */ 477 static struct writer_mblock *rcu_scale_alloc(long me) 478 { 479 struct llist_node *llnp; 480 struct writer_freelist *wflp; 481 struct writer_mblock *wmbp; 482 483 if (WARN_ON_ONCE(!writer_freelists)) 484 return NULL; 485 wflp = &writer_freelists[me]; 486 if (llist_empty(&wflp->ws_lhp)) { 487 // ->ws_lhp is private to its rcu_scale_writer task. 488 wmbp = container_of(llist_del_all(&wflp->ws_lhg), struct writer_mblock, wmb_node); 489 wflp->ws_lhp.first = &wmbp->wmb_node; 490 } 491 llnp = llist_del_first(&wflp->ws_lhp); 492 if (!llnp) 493 return NULL; 494 return container_of(llnp, struct writer_mblock, wmb_node); 495 } 496 497 /* 498 * Free a writer_mblock structure to its rcu_scale_writer task. 499 */ 500 static void rcu_scale_free(struct writer_mblock *wmbp) 501 { 502 struct writer_freelist *wflp; 503 504 if (!wmbp) 505 return; 506 wflp = wmbp->wmb_wfl; 507 llist_add(&wmbp->wmb_node, &wflp->ws_lhg); 508 } 509 510 /* 511 * Callback function for asynchronous grace periods from rcu_scale_writer(). 512 */ 513 static void rcu_scale_async_cb(struct rcu_head *rhp) 514 { 515 struct writer_mblock *wmbp = container_of(rhp, struct writer_mblock, wmb_rh); 516 struct writer_freelist *wflp = wmbp->wmb_wfl; 517 518 atomic_dec(&wflp->ws_inflight); 519 rcu_scale_free(wmbp); 520 } 521 522 /* 523 * RCU scale writer kthread. Repeatedly does a grace period. 524 */ 525 static int 526 rcu_scale_writer(void *arg) 527 { 528 int i = 0; 529 int i_max; 530 unsigned long jdone; 531 long me = (long)arg; 532 bool selfreport = false; 533 bool started = false, done = false, alldone = false; 534 u64 t; 535 DEFINE_TORTURE_RANDOM(tr); 536 u64 *wdp; 537 u64 *wdpp = writer_durations[me]; 538 struct writer_freelist *wflp = &writer_freelists[me]; 539 struct writer_mblock *wmbp = NULL; 540 541 VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started"); 542 WARN_ON(!wdpp); 543 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); 544 current->flags |= PF_NO_SETAFFINITY; 545 sched_set_fifo_low(current); 546 547 if (holdoff) 548 schedule_timeout_idle(holdoff * HZ); 549 550 /* 551 * Wait until rcu_end_inkernel_boot() is called for normal GP tests 552 * so that RCU is not always expedited for normal GP tests. 553 * The system_state test is approximate, but works well in practice. 554 */ 555 while (!gp_exp && system_state != SYSTEM_RUNNING) 556 schedule_timeout_uninterruptible(1); 557 558 t = ktime_get_mono_fast_ns(); 559 if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) { 560 t_rcu_scale_writer_started = t; 561 if (gp_exp) { 562 b_rcu_gp_test_started = 563 cur_ops->exp_completed() / 2; 564 } else { 565 b_rcu_gp_test_started = cur_ops->get_gp_seq(); 566 } 567 } 568 569 jdone = jiffies + minruntime * HZ; 570 do { 571 bool gp_succeeded = false; 572 573 if (writer_holdoff) 574 udelay(writer_holdoff); 575 if (writer_holdoff_jiffies) 576 schedule_timeout_idle(torture_random(&tr) % writer_holdoff_jiffies + 1); 577 wdp = &wdpp[i]; 578 *wdp = ktime_get_mono_fast_ns(); 579 if (gp_async && !WARN_ON_ONCE(!cur_ops->async)) { 580 if (!wmbp) 581 wmbp = rcu_scale_alloc(me); 582 if (wmbp && atomic_read(&wflp->ws_inflight) < gp_async_max) { 583 atomic_inc(&wflp->ws_inflight); 584 cur_ops->async(&wmbp->wmb_rh, rcu_scale_async_cb); 585 wmbp = NULL; 586 gp_succeeded = true; 587 } else if (!kthread_should_stop()) { 588 cur_ops->gp_barrier(); 589 } else { 590 rcu_scale_free(wmbp); /* Because we are stopping. */ 591 wmbp = NULL; 592 } 593 } else if (gp_exp) { 594 cur_ops->exp_sync(); 595 gp_succeeded = true; 596 } else { 597 cur_ops->sync(); 598 gp_succeeded = true; 599 } 600 t = ktime_get_mono_fast_ns(); 601 *wdp = t - *wdp; 602 i_max = i; 603 if (!started && 604 atomic_read(&n_rcu_scale_writer_started) >= nrealwriters) 605 started = true; 606 if (!done && i >= MIN_MEAS && time_after(jiffies, jdone)) { 607 done = true; 608 WRITE_ONCE(writer_done[me], true); 609 sched_set_normal(current, 0); 610 pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n", 611 scale_type, SCALE_FLAG, me, MIN_MEAS); 612 if (atomic_inc_return(&n_rcu_scale_writer_finished) >= 613 nrealwriters) { 614 schedule_timeout_interruptible(10); 615 rcu_ftrace_dump(DUMP_ALL); 616 SCALEOUT_STRING("Test complete"); 617 t_rcu_scale_writer_finished = t; 618 if (gp_exp) { 619 b_rcu_gp_test_finished = 620 cur_ops->exp_completed() / 2; 621 } else { 622 b_rcu_gp_test_finished = 623 cur_ops->get_gp_seq(); 624 } 625 if (shutdown) { 626 smp_mb(); /* Assign before wake. */ 627 wake_up(&shutdown_wq); 628 } 629 } 630 } 631 if (done && !alldone && 632 atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters) 633 alldone = true; 634 if (done && !alldone && time_after(jiffies, jdone + HZ * 60)) { 635 static atomic_t dumped; 636 int i; 637 638 if (!atomic_xchg(&dumped, 1)) { 639 for (i = 0; i < nrealwriters; i++) { 640 if (writer_done[i]) 641 continue; 642 pr_info("%s: Task %ld flags writer %d:\n", __func__, me, i); 643 sched_show_task(writer_tasks[i]); 644 } 645 if (cur_ops->stats) 646 cur_ops->stats(); 647 } 648 } 649 if (!selfreport && time_after(jiffies, jdone + HZ * (70 + me))) { 650 pr_info("%s: Writer %ld self-report: started %d done %d/%d->%d i %d jdone %lu.\n", 651 __func__, me, started, done, writer_done[me], atomic_read(&n_rcu_scale_writer_finished), i, jiffies - jdone); 652 selfreport = true; 653 } 654 if (gp_succeeded && started && !alldone && i < MAX_MEAS - 1) 655 i++; 656 rcu_scale_wait_shutdown(); 657 } while (!torture_must_stop()); 658 if (gp_async && cur_ops->async) { 659 rcu_scale_free(wmbp); 660 cur_ops->gp_barrier(); 661 } 662 writer_n_durations[me] = i_max + 1; 663 torture_kthread_stopping("rcu_scale_writer"); 664 return 0; 665 } 666 667 static void 668 rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag) 669 { 670 pr_alert("%s" SCALE_FLAG 671 "--- %s: gp_async=%d gp_async_max=%d gp_exp=%d holdoff=%d minruntime=%d nreaders=%d nwriters=%d writer_holdoff=%d writer_holdoff_jiffies=%d verbose=%d shutdown=%d\n", 672 scale_type, tag, gp_async, gp_async_max, gp_exp, holdoff, minruntime, nrealreaders, nrealwriters, writer_holdoff, writer_holdoff_jiffies, verbose, shutdown); 673 } 674 675 /* 676 * Return the number if non-negative. If -1, the number of CPUs. 677 * If less than -1, that much less than the number of CPUs, but 678 * at least one. 679 */ 680 static int compute_real(int n) 681 { 682 int nr; 683 684 if (n >= 0) { 685 nr = n; 686 } else { 687 nr = num_online_cpus() + 1 + n; 688 if (nr <= 0) 689 nr = 1; 690 } 691 return nr; 692 } 693 694 /* 695 * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number 696 * of iterations and measure total time and number of GP for all iterations to complete. 697 */ 698 699 torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu()."); 700 torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration."); 701 torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees."); 702 torture_param(bool, kfree_rcu_test_double, false, "Do we run a kfree_rcu() double-argument scale test?"); 703 torture_param(bool, kfree_rcu_test_single, false, "Do we run a kfree_rcu() single-argument scale test?"); 704 705 static struct task_struct **kfree_reader_tasks; 706 static int kfree_nrealthreads; 707 static atomic_t n_kfree_scale_thread_started; 708 static atomic_t n_kfree_scale_thread_ended; 709 static struct task_struct *kthread_tp; 710 static u64 kthread_stime; 711 712 struct kfree_obj { 713 char kfree_obj[8]; 714 struct rcu_head rh; 715 }; 716 717 /* Used if doing RCU-kfree'ing via call_rcu(). */ 718 static void kfree_call_rcu(struct rcu_head *rh) 719 { 720 struct kfree_obj *obj = container_of(rh, struct kfree_obj, rh); 721 722 kfree(obj); 723 } 724 725 static int 726 kfree_scale_thread(void *arg) 727 { 728 int i, loop = 0; 729 long me = (long)arg; 730 struct kfree_obj *alloc_ptr; 731 u64 start_time, end_time; 732 long long mem_begin, mem_during = 0; 733 bool kfree_rcu_test_both; 734 DEFINE_TORTURE_RANDOM(tr); 735 736 VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started"); 737 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); 738 set_user_nice(current, MAX_NICE); 739 kfree_rcu_test_both = (kfree_rcu_test_single == kfree_rcu_test_double); 740 741 start_time = ktime_get_mono_fast_ns(); 742 743 if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) { 744 if (gp_exp) 745 b_rcu_gp_test_started = cur_ops->exp_completed() / 2; 746 else 747 b_rcu_gp_test_started = cur_ops->get_gp_seq(); 748 } 749 750 do { 751 if (!mem_during) { 752 mem_during = mem_begin = si_mem_available(); 753 } else if (loop % (kfree_loops / 4) == 0) { 754 mem_during = (mem_during + si_mem_available()) / 2; 755 } 756 757 for (i = 0; i < kfree_alloc_num; i++) { 758 alloc_ptr = kcalloc(kfree_mult, sizeof(struct kfree_obj), GFP_KERNEL); 759 if (!alloc_ptr) 760 return -ENOMEM; 761 762 if (kfree_by_call_rcu) { 763 call_rcu(&(alloc_ptr->rh), kfree_call_rcu); 764 continue; 765 } 766 767 // By default kfree_rcu_test_single and kfree_rcu_test_double are 768 // initialized to false. If both have the same value (false or true) 769 // both are randomly tested, otherwise only the one with value true 770 // is tested. 771 if ((kfree_rcu_test_single && !kfree_rcu_test_double) || 772 (kfree_rcu_test_both && torture_random(&tr) & 0x800)) 773 kfree_rcu_mightsleep(alloc_ptr); 774 else 775 kfree_rcu(alloc_ptr, rh); 776 } 777 778 cond_resched(); 779 } while (!torture_must_stop() && ++loop < kfree_loops); 780 781 if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) { 782 end_time = ktime_get_mono_fast_ns(); 783 784 if (gp_exp) 785 b_rcu_gp_test_finished = cur_ops->exp_completed() / 2; 786 else 787 b_rcu_gp_test_finished = cur_ops->get_gp_seq(); 788 789 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n", 790 (unsigned long long)(end_time - start_time), kfree_loops, 791 rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started), 792 PAGES_TO_MB(mem_begin - mem_during)); 793 794 if (shutdown) { 795 smp_mb(); /* Assign before wake. */ 796 wake_up(&shutdown_wq); 797 } 798 } 799 800 torture_kthread_stopping("kfree_scale_thread"); 801 return 0; 802 } 803 804 static void 805 kfree_scale_cleanup(void) 806 { 807 int i; 808 809 if (torture_cleanup_begin()) 810 return; 811 812 if (kfree_reader_tasks) { 813 for (i = 0; i < kfree_nrealthreads; i++) 814 torture_stop_kthread(kfree_scale_thread, 815 kfree_reader_tasks[i]); 816 kfree(kfree_reader_tasks); 817 kfree_reader_tasks = NULL; 818 } 819 820 torture_cleanup_end(); 821 } 822 823 /* 824 * shutdown kthread. Just waits to be awakened, then shuts down system. 825 */ 826 static int 827 kfree_scale_shutdown(void *arg) 828 { 829 wait_event_idle(shutdown_wq, 830 atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads); 831 832 smp_mb(); /* Wake before output. */ 833 834 kfree_scale_cleanup(); 835 kernel_power_off(); 836 return -EINVAL; 837 } 838 839 // Used if doing RCU-kfree'ing via call_rcu(). 840 static unsigned long jiffies_at_lazy_cb; 841 static struct rcu_head lazy_test1_rh; 842 static int rcu_lazy_test1_cb_called; 843 static void call_rcu_lazy_test1(struct rcu_head *rh) 844 { 845 jiffies_at_lazy_cb = jiffies; 846 WRITE_ONCE(rcu_lazy_test1_cb_called, 1); 847 } 848 849 static int __init 850 kfree_scale_init(void) 851 { 852 int firsterr = 0; 853 long i; 854 unsigned long jif_start; 855 unsigned long orig_jif; 856 857 pr_alert("%s" SCALE_FLAG 858 "--- kfree_rcu_test: kfree_mult=%d kfree_by_call_rcu=%d kfree_nthreads=%d kfree_alloc_num=%d kfree_loops=%d kfree_rcu_test_double=%d kfree_rcu_test_single=%d\n", 859 scale_type, kfree_mult, kfree_by_call_rcu, kfree_nthreads, kfree_alloc_num, kfree_loops, kfree_rcu_test_double, kfree_rcu_test_single); 860 861 // Also, do a quick self-test to ensure laziness is as much as 862 // expected. 863 if (kfree_by_call_rcu && !IS_ENABLED(CONFIG_RCU_LAZY)) { 864 pr_alert("CONFIG_RCU_LAZY is disabled, falling back to kfree_rcu() for delayed RCU kfree'ing\n"); 865 kfree_by_call_rcu = 0; 866 } 867 868 if (kfree_by_call_rcu) { 869 /* do a test to check the timeout. */ 870 orig_jif = rcu_get_jiffies_lazy_flush(); 871 872 rcu_set_jiffies_lazy_flush(2 * HZ); 873 rcu_barrier(); 874 875 jif_start = jiffies; 876 jiffies_at_lazy_cb = 0; 877 call_rcu(&lazy_test1_rh, call_rcu_lazy_test1); 878 879 smp_cond_load_relaxed(&rcu_lazy_test1_cb_called, VAL == 1); 880 881 rcu_set_jiffies_lazy_flush(orig_jif); 882 883 if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start < 2 * HZ)) { 884 pr_alert("ERROR: call_rcu() CBs are not being lazy as expected!\n"); 885 firsterr = -1; 886 goto unwind; 887 } 888 889 if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start > 3 * HZ)) { 890 pr_alert("ERROR: call_rcu() CBs are being too lazy!\n"); 891 firsterr = -1; 892 goto unwind; 893 } 894 } 895 896 kfree_nrealthreads = compute_real(kfree_nthreads); 897 /* Start up the kthreads. */ 898 if (shutdown) { 899 init_waitqueue_head(&shutdown_wq); 900 firsterr = torture_create_kthread(kfree_scale_shutdown, NULL, 901 shutdown_task); 902 if (torture_init_error(firsterr)) 903 goto unwind; 904 schedule_timeout_uninterruptible(1); 905 } 906 907 pr_alert("kfree object size=%zu, kfree_by_call_rcu=%d\n", 908 kfree_mult * sizeof(struct kfree_obj), 909 kfree_by_call_rcu); 910 911 kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]), 912 GFP_KERNEL); 913 if (kfree_reader_tasks == NULL) { 914 firsterr = -ENOMEM; 915 goto unwind; 916 } 917 918 for (i = 0; i < kfree_nrealthreads; i++) { 919 firsterr = torture_create_kthread(kfree_scale_thread, (void *)i, 920 kfree_reader_tasks[i]); 921 if (torture_init_error(firsterr)) 922 goto unwind; 923 } 924 925 while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads) 926 schedule_timeout_uninterruptible(1); 927 928 torture_init_end(); 929 return 0; 930 931 unwind: 932 torture_init_end(); 933 kfree_scale_cleanup(); 934 return firsterr; 935 } 936 937 static void 938 rcu_scale_cleanup(void) 939 { 940 int i; 941 int j; 942 int ngps = 0; 943 u64 *wdp; 944 u64 *wdpp; 945 946 /* 947 * Would like warning at start, but everything is expedited 948 * during the mid-boot phase, so have to wait till the end. 949 */ 950 if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp) 951 SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!"); 952 if (rcu_gp_is_normal() && gp_exp) 953 SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!"); 954 if (gp_exp && gp_async) 955 SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!"); 956 957 // If built-in, just report all of the GP kthread's CPU time. 958 if (IS_BUILTIN(CONFIG_RCU_SCALE_TEST) && !kthread_tp && cur_ops->rso_gp_kthread) 959 kthread_tp = cur_ops->rso_gp_kthread(); 960 if (kthread_tp) { 961 u32 ns; 962 u64 us; 963 964 kthread_stime = kthread_tp->stime - kthread_stime; 965 us = div_u64_rem(kthread_stime, 1000, &ns); 966 pr_info("rcu_scale: Grace-period kthread CPU time: %llu.%03u us\n", us, ns); 967 show_rcu_gp_kthreads(); 968 } 969 if (kfree_rcu_test) { 970 kfree_scale_cleanup(); 971 return; 972 } 973 974 if (torture_cleanup_begin()) 975 return; 976 if (!cur_ops) { 977 torture_cleanup_end(); 978 return; 979 } 980 981 if (reader_tasks) { 982 for (i = 0; i < nrealreaders; i++) 983 torture_stop_kthread(rcu_scale_reader, 984 reader_tasks[i]); 985 kfree(reader_tasks); 986 reader_tasks = NULL; 987 } 988 989 if (writer_tasks) { 990 for (i = 0; i < nrealwriters; i++) { 991 torture_stop_kthread(rcu_scale_writer, 992 writer_tasks[i]); 993 if (!writer_n_durations) 994 continue; 995 j = writer_n_durations[i]; 996 pr_alert("%s%s writer %d gps: %d\n", 997 scale_type, SCALE_FLAG, i, j); 998 ngps += j; 999 } 1000 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n", 1001 scale_type, SCALE_FLAG, 1002 t_rcu_scale_writer_started, t_rcu_scale_writer_finished, 1003 t_rcu_scale_writer_finished - 1004 t_rcu_scale_writer_started, 1005 ngps, 1006 rcuscale_seq_diff(b_rcu_gp_test_finished, 1007 b_rcu_gp_test_started)); 1008 for (i = 0; i < nrealwriters; i++) { 1009 if (!writer_durations) 1010 break; 1011 if (!writer_n_durations) 1012 continue; 1013 wdpp = writer_durations[i]; 1014 if (!wdpp) 1015 continue; 1016 for (j = 0; j < writer_n_durations[i]; j++) { 1017 wdp = &wdpp[j]; 1018 pr_alert("%s%s %4d writer-duration: %5d %llu\n", 1019 scale_type, SCALE_FLAG, 1020 i, j, *wdp); 1021 if (j % 100 == 0) 1022 schedule_timeout_uninterruptible(1); 1023 } 1024 kfree(writer_durations[i]); 1025 if (writer_freelists) { 1026 int ctr = 0; 1027 struct llist_node *llnp; 1028 struct writer_freelist *wflp = &writer_freelists[i]; 1029 1030 if (wflp->ws_mblocks) { 1031 llist_for_each(llnp, wflp->ws_lhg.first) 1032 ctr++; 1033 llist_for_each(llnp, wflp->ws_lhp.first) 1034 ctr++; 1035 WARN_ONCE(ctr != gp_async_max, 1036 "%s: ctr = %d gp_async_max = %d\n", 1037 __func__, ctr, gp_async_max); 1038 kfree(wflp->ws_mblocks); 1039 } 1040 } 1041 } 1042 kfree(writer_tasks); 1043 writer_tasks = NULL; 1044 kfree(writer_durations); 1045 writer_durations = NULL; 1046 kfree(writer_n_durations); 1047 writer_n_durations = NULL; 1048 kfree(writer_done); 1049 writer_done = NULL; 1050 kfree(writer_freelists); 1051 writer_freelists = NULL; 1052 } 1053 1054 /* Do torture-type-specific cleanup operations. */ 1055 if (cur_ops->cleanup != NULL) 1056 cur_ops->cleanup(); 1057 1058 torture_cleanup_end(); 1059 } 1060 1061 /* 1062 * RCU scalability shutdown kthread. Just waits to be awakened, then shuts 1063 * down system. 1064 */ 1065 static int 1066 rcu_scale_shutdown(void *arg) 1067 { 1068 wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters); 1069 smp_mb(); /* Wake before output. */ 1070 rcu_scale_cleanup(); 1071 kernel_power_off(); 1072 return -EINVAL; 1073 } 1074 1075 static int __init 1076 rcu_scale_init(void) 1077 { 1078 int firsterr = 0; 1079 long i; 1080 long j; 1081 static struct rcu_scale_ops *scale_ops[] = { 1082 &rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS 1083 }; 1084 1085 if (!torture_init_begin(scale_type, verbose)) 1086 return -EBUSY; 1087 1088 /* Process args and announce that the scalability'er is on the job. */ 1089 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) { 1090 cur_ops = scale_ops[i]; 1091 if (strcmp(scale_type, cur_ops->name) == 0) 1092 break; 1093 } 1094 if (i == ARRAY_SIZE(scale_ops)) { 1095 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type); 1096 pr_alert("rcu-scale types:"); 1097 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) 1098 pr_cont(" %s", scale_ops[i]->name); 1099 pr_cont("\n"); 1100 firsterr = -EINVAL; 1101 cur_ops = NULL; 1102 goto unwind; 1103 } 1104 if (cur_ops->init) 1105 cur_ops->init(); 1106 1107 if (cur_ops->rso_gp_kthread) { 1108 kthread_tp = cur_ops->rso_gp_kthread(); 1109 if (kthread_tp) 1110 kthread_stime = kthread_tp->stime; 1111 } 1112 if (kfree_rcu_test) 1113 return kfree_scale_init(); 1114 1115 nrealwriters = compute_real(nwriters); 1116 nrealreaders = compute_real(nreaders); 1117 atomic_set(&n_rcu_scale_reader_started, 0); 1118 atomic_set(&n_rcu_scale_writer_started, 0); 1119 atomic_set(&n_rcu_scale_writer_finished, 0); 1120 rcu_scale_print_module_parms(cur_ops, "Start of test"); 1121 1122 /* Start up the kthreads. */ 1123 1124 if (shutdown) { 1125 init_waitqueue_head(&shutdown_wq); 1126 firsterr = torture_create_kthread(rcu_scale_shutdown, NULL, 1127 shutdown_task); 1128 if (torture_init_error(firsterr)) 1129 goto unwind; 1130 schedule_timeout_uninterruptible(1); 1131 } 1132 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 1133 GFP_KERNEL); 1134 if (reader_tasks == NULL) { 1135 SCALEOUT_ERRSTRING("out of memory"); 1136 firsterr = -ENOMEM; 1137 goto unwind; 1138 } 1139 for (i = 0; i < nrealreaders; i++) { 1140 firsterr = torture_create_kthread(rcu_scale_reader, (void *)i, 1141 reader_tasks[i]); 1142 if (torture_init_error(firsterr)) 1143 goto unwind; 1144 } 1145 while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders) 1146 schedule_timeout_uninterruptible(1); 1147 writer_tasks = kcalloc(nrealwriters, sizeof(writer_tasks[0]), GFP_KERNEL); 1148 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), GFP_KERNEL); 1149 writer_n_durations = kcalloc(nrealwriters, sizeof(*writer_n_durations), GFP_KERNEL); 1150 writer_done = kcalloc(nrealwriters, sizeof(writer_done[0]), GFP_KERNEL); 1151 if (gp_async) { 1152 if (gp_async_max <= 0) { 1153 pr_warn("%s: gp_async_max = %d must be greater than zero.\n", 1154 __func__, gp_async_max); 1155 WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)); 1156 firsterr = -EINVAL; 1157 goto unwind; 1158 } 1159 writer_freelists = kcalloc(nrealwriters, sizeof(writer_freelists[0]), GFP_KERNEL); 1160 } 1161 if (!writer_tasks || !writer_durations || !writer_n_durations || !writer_done || 1162 (gp_async && !writer_freelists)) { 1163 SCALEOUT_ERRSTRING("out of memory"); 1164 firsterr = -ENOMEM; 1165 goto unwind; 1166 } 1167 for (i = 0; i < nrealwriters; i++) { 1168 writer_durations[i] = 1169 kcalloc(MAX_MEAS, sizeof(*writer_durations[i]), 1170 GFP_KERNEL); 1171 if (!writer_durations[i]) { 1172 firsterr = -ENOMEM; 1173 goto unwind; 1174 } 1175 if (writer_freelists) { 1176 struct writer_freelist *wflp = &writer_freelists[i]; 1177 1178 init_llist_head(&wflp->ws_lhg); 1179 init_llist_head(&wflp->ws_lhp); 1180 wflp->ws_mblocks = kcalloc(gp_async_max, sizeof(wflp->ws_mblocks[0]), 1181 GFP_KERNEL); 1182 if (!wflp->ws_mblocks) { 1183 firsterr = -ENOMEM; 1184 goto unwind; 1185 } 1186 for (j = 0; j < gp_async_max; j++) { 1187 struct writer_mblock *wmbp = &wflp->ws_mblocks[j]; 1188 1189 wmbp->wmb_wfl = wflp; 1190 llist_add(&wmbp->wmb_node, &wflp->ws_lhp); 1191 } 1192 } 1193 firsterr = torture_create_kthread(rcu_scale_writer, (void *)i, 1194 writer_tasks[i]); 1195 if (torture_init_error(firsterr)) 1196 goto unwind; 1197 } 1198 torture_init_end(); 1199 return 0; 1200 1201 unwind: 1202 torture_init_end(); 1203 rcu_scale_cleanup(); 1204 if (shutdown) { 1205 WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST)); 1206 kernel_power_off(); 1207 } 1208 return firsterr; 1209 } 1210 1211 module_init(rcu_scale_init); 1212 module_exit(rcu_scale_cleanup); 1213