1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Torture test for smp_call_function() and friends. 4 // 5 // Copyright (C) Facebook, 2020. 6 // 7 // Author: Paul E. McKenney <paulmck@kernel.org> 8 9 #define pr_fmt(fmt) fmt 10 11 #include <linux/atomic.h> 12 #include <linux/bitops.h> 13 #include <linux/completion.h> 14 #include <linux/cpu.h> 15 #include <linux/delay.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/kthread.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/moduleparam.h> 24 #include <linux/notifier.h> 25 #include <linux/percpu.h> 26 #include <linux/rcupdate.h> 27 #include <linux/rcupdate_trace.h> 28 #include <linux/reboot.h> 29 #include <linux/sched.h> 30 #include <linux/spinlock.h> 31 #include <linux/smp.h> 32 #include <linux/stat.h> 33 #include <linux/srcu.h> 34 #include <linux/slab.h> 35 #include <linux/torture.h> 36 #include <linux/types.h> 37 38 #define SCFTORT_STRING "scftorture" 39 #define SCFTORT_FLAG SCFTORT_STRING ": " 40 41 #define VERBOSE_SCFTORTOUT(s, x...) \ 42 do { if (verbose) pr_alert(SCFTORT_FLAG s "\n", ## x); } while (0) 43 44 #define SCFTORTOUT_ERRSTRING(s, x...) pr_alert(SCFTORT_FLAG "!!! " s "\n", ## x) 45 46 MODULE_LICENSE("GPL"); 47 MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>"); 48 49 // Wait until there are multiple CPUs before starting test. 50 torture_param(int, holdoff, IS_BUILTIN(CONFIG_SCF_TORTURE_TEST) ? 10 : 0, 51 "Holdoff time before test start (s)"); 52 torture_param(int, longwait, 0, "Include ridiculously long waits? (seconds)"); 53 torture_param(int, nthreads, -1, "# threads, defaults to -1 for all CPUs."); 54 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 55 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable"); 56 torture_param(int, shutdown_secs, 0, "Shutdown time (ms), <= zero to disable."); 57 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s."); 58 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 59 torture_param(bool, use_cpus_read_lock, 0, "Use cpus_read_lock() to exclude CPU hotplug."); 60 torture_param(int, verbose, 0, "Enable verbose debugging printk()s"); 61 torture_param(int, weight_resched, -1, "Testing weight for resched_cpu() operations."); 62 torture_param(int, weight_single, -1, "Testing weight for single-CPU no-wait operations."); 63 torture_param(int, weight_single_rpc, -1, "Testing weight for single-CPU RPC operations."); 64 torture_param(int, weight_single_wait, -1, "Testing weight for single-CPU operations."); 65 torture_param(int, weight_many, -1, "Testing weight for multi-CPU no-wait operations."); 66 torture_param(int, weight_many_wait, -1, "Testing weight for multi-CPU operations."); 67 torture_param(int, weight_all, -1, "Testing weight for all-CPU no-wait operations."); 68 torture_param(int, weight_all_wait, -1, "Testing weight for all-CPU operations."); 69 70 char *torture_type = ""; 71 72 #ifdef MODULE 73 # define SCFTORT_SHUTDOWN 0 74 #else 75 # define SCFTORT_SHUTDOWN 1 76 #endif 77 78 torture_param(bool, shutdown, SCFTORT_SHUTDOWN, "Shutdown at end of torture test."); 79 80 struct scf_statistics { 81 struct task_struct *task; 82 int cpu; 83 long long n_resched; 84 long long n_single; 85 long long n_single_ofl; 86 long long n_single_rpc; 87 long long n_single_rpc_ofl; 88 long long n_single_wait; 89 long long n_single_wait_ofl; 90 long long n_many; 91 long long n_many_wait; 92 long long n_all; 93 long long n_all_wait; 94 }; 95 96 static struct scf_statistics *scf_stats_p; 97 static struct task_struct *scf_torture_stats_task; 98 static DEFINE_PER_CPU(long long, scf_invoked_count); 99 100 // Data for random primitive selection 101 #define SCF_PRIM_RESCHED 0 102 #define SCF_PRIM_SINGLE 1 103 #define SCF_PRIM_SINGLE_RPC 2 104 #define SCF_PRIM_MANY 3 105 #define SCF_PRIM_ALL 4 106 #define SCF_NPRIMS 8 // Need wait and no-wait versions of each, 107 // except for SCF_PRIM_RESCHED and 108 // SCF_PRIM_SINGLE_RPC. 109 110 static char *scf_prim_name[] = { 111 "resched_cpu", 112 "smp_call_function_single", 113 "smp_call_function_single_rpc", 114 "smp_call_function_many", 115 "smp_call_function", 116 }; 117 118 struct scf_selector { 119 unsigned long scfs_weight; 120 int scfs_prim; 121 bool scfs_wait; 122 }; 123 static struct scf_selector scf_sel_array[SCF_NPRIMS]; 124 static int scf_sel_array_len; 125 static unsigned long scf_sel_totweight; 126 127 // Communicate between caller and handler. 128 struct scf_check { 129 bool scfc_in; 130 bool scfc_out; 131 int scfc_cpu; // -1 for not _single(). 132 bool scfc_wait; 133 bool scfc_rpc; 134 struct completion scfc_completion; 135 }; 136 137 // Use to wait for all threads to start. 138 static atomic_t n_started; 139 static atomic_t n_errs; 140 static atomic_t n_mb_in_errs; 141 static atomic_t n_mb_out_errs; 142 static atomic_t n_alloc_errs; 143 static bool scfdone; 144 static char *bangstr = ""; 145 146 static DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand); 147 148 extern void resched_cpu(int cpu); // An alternative IPI vector. 149 150 // Print torture statistics. Caller must ensure serialization. 151 static void scf_torture_stats_print(void) 152 { 153 int cpu; 154 int i; 155 long long invoked_count = 0; 156 bool isdone = READ_ONCE(scfdone); 157 struct scf_statistics scfs = {}; 158 159 for_each_possible_cpu(cpu) 160 invoked_count += data_race(per_cpu(scf_invoked_count, cpu)); 161 for (i = 0; i < nthreads; i++) { 162 scfs.n_resched += scf_stats_p[i].n_resched; 163 scfs.n_single += scf_stats_p[i].n_single; 164 scfs.n_single_ofl += scf_stats_p[i].n_single_ofl; 165 scfs.n_single_rpc += scf_stats_p[i].n_single_rpc; 166 scfs.n_single_wait += scf_stats_p[i].n_single_wait; 167 scfs.n_single_wait_ofl += scf_stats_p[i].n_single_wait_ofl; 168 scfs.n_many += scf_stats_p[i].n_many; 169 scfs.n_many_wait += scf_stats_p[i].n_many_wait; 170 scfs.n_all += scf_stats_p[i].n_all; 171 scfs.n_all_wait += scf_stats_p[i].n_all_wait; 172 } 173 if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || 174 atomic_read(&n_mb_out_errs) || 175 (!IS_ENABLED(CONFIG_KASAN) && atomic_read(&n_alloc_errs))) 176 bangstr = "!!! "; 177 pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ", 178 SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched, 179 scfs.n_single, scfs.n_single_wait, scfs.n_single_ofl, scfs.n_single_wait_ofl, 180 scfs.n_single_rpc, scfs.n_single_rpc_ofl, 181 scfs.n_many, scfs.n_many_wait, scfs.n_all, scfs.n_all_wait); 182 torture_onoff_stats(); 183 pr_cont("ste: %d stnmie: %d stnmoe: %d staf: %d\n", atomic_read(&n_errs), 184 atomic_read(&n_mb_in_errs), atomic_read(&n_mb_out_errs), 185 atomic_read(&n_alloc_errs)); 186 } 187 188 // Periodically prints torture statistics, if periodic statistics printing 189 // was specified via the stat_interval module parameter. 190 static int 191 scf_torture_stats(void *arg) 192 { 193 VERBOSE_TOROUT_STRING("scf_torture_stats task started"); 194 do { 195 schedule_timeout_interruptible(stat_interval * HZ); 196 scf_torture_stats_print(); 197 torture_shutdown_absorb("scf_torture_stats"); 198 } while (!torture_must_stop()); 199 torture_kthread_stopping("scf_torture_stats"); 200 return 0; 201 } 202 203 // Add a primitive to the scf_sel_array[]. 204 static void scf_sel_add(unsigned long weight, int prim, bool wait) 205 { 206 struct scf_selector *scfsp = &scf_sel_array[scf_sel_array_len]; 207 208 // If no weight, if array would overflow, if computing three-place 209 // percentages would overflow, or if the scf_prim_name[] array would 210 // overflow, don't bother. In the last three two cases, complain. 211 if (!weight || 212 WARN_ON_ONCE(scf_sel_array_len >= ARRAY_SIZE(scf_sel_array)) || 213 WARN_ON_ONCE(0 - 100000 * weight <= 100000 * scf_sel_totweight) || 214 WARN_ON_ONCE(prim >= ARRAY_SIZE(scf_prim_name))) 215 return; 216 scf_sel_totweight += weight; 217 scfsp->scfs_weight = scf_sel_totweight; 218 scfsp->scfs_prim = prim; 219 scfsp->scfs_wait = wait; 220 scf_sel_array_len++; 221 } 222 223 // Dump out weighting percentages for scf_prim_name[] array. 224 static void scf_sel_dump(void) 225 { 226 int i; 227 unsigned long oldw = 0; 228 struct scf_selector *scfsp; 229 unsigned long w; 230 231 for (i = 0; i < scf_sel_array_len; i++) { 232 scfsp = &scf_sel_array[i]; 233 w = (scfsp->scfs_weight - oldw) * 100000 / scf_sel_totweight; 234 pr_info("%s: %3lu.%03lu %s(%s)\n", __func__, w / 1000, w % 1000, 235 scf_prim_name[scfsp->scfs_prim], 236 scfsp->scfs_wait ? "wait" : "nowait"); 237 oldw = scfsp->scfs_weight; 238 } 239 } 240 241 // Randomly pick a primitive and wait/nowait, based on weightings. 242 static struct scf_selector *scf_sel_rand(struct torture_random_state *trsp) 243 { 244 int i; 245 unsigned long w = torture_random(trsp) % (scf_sel_totweight + 1); 246 247 for (i = 0; i < scf_sel_array_len; i++) 248 if (scf_sel_array[i].scfs_weight >= w) 249 return &scf_sel_array[i]; 250 WARN_ON_ONCE(1); 251 return &scf_sel_array[0]; 252 } 253 254 // Update statistics and occasionally burn up mass quantities of CPU time, 255 // if told to do so via scftorture.longwait. Otherwise, occasionally burn 256 // a little bit. 257 static void scf_handler(void *scfc_in) 258 { 259 int i; 260 int j; 261 unsigned long r = torture_random(this_cpu_ptr(&scf_torture_rand)); 262 struct scf_check *scfcp = scfc_in; 263 264 if (likely(scfcp)) { 265 WRITE_ONCE(scfcp->scfc_out, false); // For multiple receivers. 266 if (WARN_ON_ONCE(unlikely(!READ_ONCE(scfcp->scfc_in)))) 267 atomic_inc(&n_mb_in_errs); 268 } 269 this_cpu_inc(scf_invoked_count); 270 if (longwait <= 0) { 271 if (!(r & 0xffc0)) { 272 udelay(r & 0x3f); 273 goto out; 274 } 275 } 276 if (r & 0xfff) 277 goto out; 278 r = (r >> 12); 279 if (longwait <= 0) { 280 udelay((r & 0xff) + 1); 281 goto out; 282 } 283 r = r % longwait + 1; 284 for (i = 0; i < r; i++) { 285 for (j = 0; j < 1000; j++) { 286 udelay(1000); 287 cpu_relax(); 288 } 289 } 290 out: 291 if (unlikely(!scfcp)) 292 return; 293 if (scfcp->scfc_wait) { 294 WRITE_ONCE(scfcp->scfc_out, true); 295 if (scfcp->scfc_rpc) 296 complete(&scfcp->scfc_completion); 297 } else { 298 kfree(scfcp); 299 } 300 } 301 302 // As above, but check for correct CPU. 303 static void scf_handler_1(void *scfc_in) 304 { 305 struct scf_check *scfcp = scfc_in; 306 307 if (likely(scfcp) && WARN_ONCE(smp_processor_id() != scfcp->scfc_cpu, "%s: Wanted CPU %d got CPU %d\n", __func__, scfcp->scfc_cpu, smp_processor_id())) { 308 atomic_inc(&n_errs); 309 } 310 scf_handler(scfcp); 311 } 312 313 // Randomly do an smp_call_function*() invocation. 314 static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_random_state *trsp) 315 { 316 bool allocfail = false; 317 uintptr_t cpu; 318 int ret = 0; 319 struct scf_check *scfcp = NULL; 320 struct scf_selector *scfsp = scf_sel_rand(trsp); 321 322 if (use_cpus_read_lock) 323 cpus_read_lock(); 324 else 325 preempt_disable(); 326 if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) { 327 scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC); 328 if (!scfcp) { 329 WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN)); 330 atomic_inc(&n_alloc_errs); 331 allocfail = true; 332 } else { 333 scfcp->scfc_cpu = -1; 334 scfcp->scfc_wait = scfsp->scfs_wait; 335 scfcp->scfc_out = false; 336 scfcp->scfc_rpc = false; 337 } 338 } 339 switch (scfsp->scfs_prim) { 340 case SCF_PRIM_RESCHED: 341 if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST)) { 342 cpu = torture_random(trsp) % nr_cpu_ids; 343 scfp->n_resched++; 344 resched_cpu(cpu); 345 this_cpu_inc(scf_invoked_count); 346 } 347 break; 348 case SCF_PRIM_SINGLE: 349 cpu = torture_random(trsp) % nr_cpu_ids; 350 if (scfsp->scfs_wait) 351 scfp->n_single_wait++; 352 else 353 scfp->n_single++; 354 if (scfcp) { 355 scfcp->scfc_cpu = cpu; 356 barrier(); // Prevent race-reduction compiler optimizations. 357 scfcp->scfc_in = true; 358 } 359 ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, scfsp->scfs_wait); 360 if (ret) { 361 if (scfsp->scfs_wait) 362 scfp->n_single_wait_ofl++; 363 else 364 scfp->n_single_ofl++; 365 kfree(scfcp); 366 scfcp = NULL; 367 } 368 break; 369 case SCF_PRIM_SINGLE_RPC: 370 if (!scfcp) 371 break; 372 cpu = torture_random(trsp) % nr_cpu_ids; 373 scfp->n_single_rpc++; 374 scfcp->scfc_cpu = cpu; 375 scfcp->scfc_wait = true; 376 init_completion(&scfcp->scfc_completion); 377 scfcp->scfc_rpc = true; 378 barrier(); // Prevent race-reduction compiler optimizations. 379 scfcp->scfc_in = true; 380 ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, 0); 381 if (!ret) { 382 if (use_cpus_read_lock) 383 cpus_read_unlock(); 384 else 385 preempt_enable(); 386 wait_for_completion(&scfcp->scfc_completion); 387 if (use_cpus_read_lock) 388 cpus_read_lock(); 389 else 390 preempt_disable(); 391 } else { 392 scfp->n_single_rpc_ofl++; 393 kfree(scfcp); 394 scfcp = NULL; 395 } 396 break; 397 case SCF_PRIM_MANY: 398 if (scfsp->scfs_wait) 399 scfp->n_many_wait++; 400 else 401 scfp->n_many++; 402 if (scfcp) { 403 barrier(); // Prevent race-reduction compiler optimizations. 404 scfcp->scfc_in = true; 405 } 406 smp_call_function_many(cpu_online_mask, scf_handler, scfcp, scfsp->scfs_wait); 407 break; 408 case SCF_PRIM_ALL: 409 if (scfsp->scfs_wait) 410 scfp->n_all_wait++; 411 else 412 scfp->n_all++; 413 if (scfcp) { 414 barrier(); // Prevent race-reduction compiler optimizations. 415 scfcp->scfc_in = true; 416 } 417 smp_call_function(scf_handler, scfcp, scfsp->scfs_wait); 418 break; 419 default: 420 WARN_ON_ONCE(1); 421 if (scfcp) 422 scfcp->scfc_out = true; 423 } 424 if (scfcp && scfsp->scfs_wait) { 425 if (WARN_ON_ONCE((num_online_cpus() > 1 || scfsp->scfs_prim == SCF_PRIM_SINGLE) && 426 !scfcp->scfc_out)) { 427 pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__, scfsp->scfs_prim); 428 atomic_inc(&n_mb_out_errs); // Leak rather than trash! 429 } else { 430 kfree(scfcp); 431 } 432 barrier(); // Prevent race-reduction compiler optimizations. 433 } 434 if (use_cpus_read_lock) 435 cpus_read_unlock(); 436 else 437 preempt_enable(); 438 if (allocfail) 439 schedule_timeout_idle((1 + longwait) * HZ); // Let no-wait handlers complete. 440 else if (!(torture_random(trsp) & 0xfff)) 441 schedule_timeout_uninterruptible(1); 442 } 443 444 // SCF test kthread. Repeatedly does calls to members of the 445 // smp_call_function() family of functions. 446 static int scftorture_invoker(void *arg) 447 { 448 int cpu; 449 int curcpu; 450 DEFINE_TORTURE_RANDOM(rand); 451 struct scf_statistics *scfp = (struct scf_statistics *)arg; 452 bool was_offline = false; 453 454 VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu); 455 cpu = scfp->cpu % nr_cpu_ids; 456 WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(cpu))); 457 set_user_nice(current, MAX_NICE); 458 if (holdoff) 459 schedule_timeout_interruptible(holdoff * HZ); 460 461 VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, raw_smp_processor_id()); 462 463 // Make sure that the CPU is affinitized appropriately during testing. 464 curcpu = raw_smp_processor_id(); 465 WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids, 466 "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n", 467 __func__, scfp->cpu, curcpu, nr_cpu_ids); 468 469 if (!atomic_dec_return(&n_started)) 470 while (atomic_read_acquire(&n_started)) { 471 if (torture_must_stop()) { 472 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended before starting", scfp->cpu); 473 goto end; 474 } 475 schedule_timeout_uninterruptible(1); 476 } 477 478 VERBOSE_SCFTORTOUT("scftorture_invoker %d started", scfp->cpu); 479 480 do { 481 scftorture_invoke_one(scfp, &rand); 482 while (cpu_is_offline(cpu) && !torture_must_stop()) { 483 schedule_timeout_interruptible(HZ / 5); 484 was_offline = true; 485 } 486 if (was_offline) { 487 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 488 was_offline = false; 489 } 490 cond_resched(); 491 stutter_wait("scftorture_invoker"); 492 } while (!torture_must_stop()); 493 494 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended", scfp->cpu); 495 end: 496 torture_kthread_stopping("scftorture_invoker"); 497 return 0; 498 } 499 500 static void 501 scftorture_print_module_parms(const char *tag) 502 { 503 pr_alert(SCFTORT_FLAG 504 "--- %s: verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_rpc=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag, 505 verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_rpc, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait); 506 } 507 508 static void scf_cleanup_handler(void *unused) 509 { 510 } 511 512 static void scf_torture_cleanup(void) 513 { 514 int i; 515 516 if (torture_cleanup_begin()) 517 return; 518 519 WRITE_ONCE(scfdone, true); 520 if (nthreads && scf_stats_p) 521 for (i = 0; i < nthreads; i++) 522 torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task); 523 else 524 goto end; 525 smp_call_function(scf_cleanup_handler, NULL, 0); 526 torture_stop_kthread(scf_torture_stats, scf_torture_stats_task); 527 scf_torture_stats_print(); // -After- the stats thread is stopped! 528 kfree(scf_stats_p); // -After- the last stats print has completed! 529 scf_stats_p = NULL; 530 531 if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || atomic_read(&n_mb_out_errs)) 532 scftorture_print_module_parms("End of test: FAILURE"); 533 else if (torture_onoff_failures()) 534 scftorture_print_module_parms("End of test: LOCK_HOTPLUG"); 535 else 536 scftorture_print_module_parms("End of test: SUCCESS"); 537 538 end: 539 torture_cleanup_end(); 540 } 541 542 static int __init scf_torture_init(void) 543 { 544 long i; 545 int firsterr = 0; 546 unsigned long weight_resched1 = weight_resched; 547 unsigned long weight_single1 = weight_single; 548 unsigned long weight_single_rpc1 = weight_single_rpc; 549 unsigned long weight_single_wait1 = weight_single_wait; 550 unsigned long weight_many1 = weight_many; 551 unsigned long weight_many_wait1 = weight_many_wait; 552 unsigned long weight_all1 = weight_all; 553 unsigned long weight_all_wait1 = weight_all_wait; 554 555 if (!torture_init_begin(SCFTORT_STRING, verbose)) 556 return -EBUSY; 557 558 scftorture_print_module_parms("Start of test"); 559 560 if (weight_resched <= 0 && 561 weight_single <= 0 && weight_single_rpc <= 0 && weight_single_wait <= 0 && 562 weight_many <= 0 && weight_many_wait <= 0 && 563 weight_all <= 0 && weight_all_wait <= 0) { 564 weight_resched1 = weight_resched == 0 ? 0 : 2 * nr_cpu_ids; 565 weight_single1 = weight_single == 0 ? 0 : 2 * nr_cpu_ids; 566 weight_single_rpc1 = weight_single_rpc == 0 ? 0 : 2 * nr_cpu_ids; 567 weight_single_wait1 = weight_single_wait == 0 ? 0 : 2 * nr_cpu_ids; 568 weight_many1 = weight_many == 0 ? 0 : 2; 569 weight_many_wait1 = weight_many_wait == 0 ? 0 : 2; 570 weight_all1 = weight_all == 0 ? 0 : 1; 571 weight_all_wait1 = weight_all_wait == 0 ? 0 : 1; 572 } else { 573 if (weight_resched == -1) 574 weight_resched1 = 0; 575 if (weight_single == -1) 576 weight_single1 = 0; 577 if (weight_single_rpc == -1) 578 weight_single_rpc1 = 0; 579 if (weight_single_wait == -1) 580 weight_single_wait1 = 0; 581 if (weight_many == -1) 582 weight_many1 = 0; 583 if (weight_many_wait == -1) 584 weight_many_wait1 = 0; 585 if (weight_all == -1) 586 weight_all1 = 0; 587 if (weight_all_wait == -1) 588 weight_all_wait1 = 0; 589 } 590 if (weight_resched1 == 0 && weight_single1 == 0 && weight_single_rpc1 == 0 && 591 weight_single_wait1 == 0 && weight_many1 == 0 && weight_many_wait1 == 0 && 592 weight_all1 == 0 && weight_all_wait1 == 0) { 593 SCFTORTOUT_ERRSTRING("all zero weights makes no sense"); 594 firsterr = -EINVAL; 595 goto unwind; 596 } 597 if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST)) 598 scf_sel_add(weight_resched1, SCF_PRIM_RESCHED, false); 599 else if (weight_resched1) 600 SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored"); 601 scf_sel_add(weight_single1, SCF_PRIM_SINGLE, false); 602 scf_sel_add(weight_single_rpc1, SCF_PRIM_SINGLE_RPC, true); 603 scf_sel_add(weight_single_wait1, SCF_PRIM_SINGLE, true); 604 scf_sel_add(weight_many1, SCF_PRIM_MANY, false); 605 scf_sel_add(weight_many_wait1, SCF_PRIM_MANY, true); 606 scf_sel_add(weight_all1, SCF_PRIM_ALL, false); 607 scf_sel_add(weight_all_wait1, SCF_PRIM_ALL, true); 608 scf_sel_dump(); 609 610 if (onoff_interval > 0) { 611 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, NULL); 612 if (torture_init_error(firsterr)) 613 goto unwind; 614 } 615 if (shutdown_secs > 0) { 616 firsterr = torture_shutdown_init(shutdown_secs, scf_torture_cleanup); 617 if (torture_init_error(firsterr)) 618 goto unwind; 619 } 620 if (stutter > 0) { 621 firsterr = torture_stutter_init(stutter, stutter); 622 if (torture_init_error(firsterr)) 623 goto unwind; 624 } 625 626 // Worker tasks invoking smp_call_function(). 627 if (nthreads < 0) 628 nthreads = num_online_cpus(); 629 scf_stats_p = kcalloc(nthreads, sizeof(scf_stats_p[0]), GFP_KERNEL); 630 if (!scf_stats_p) { 631 SCFTORTOUT_ERRSTRING("out of memory"); 632 firsterr = -ENOMEM; 633 goto unwind; 634 } 635 636 VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads", nthreads); 637 638 atomic_set(&n_started, nthreads); 639 for (i = 0; i < nthreads; i++) { 640 scf_stats_p[i].cpu = i; 641 firsterr = torture_create_kthread(scftorture_invoker, (void *)&scf_stats_p[i], 642 scf_stats_p[i].task); 643 if (torture_init_error(firsterr)) 644 goto unwind; 645 } 646 if (stat_interval > 0) { 647 firsterr = torture_create_kthread(scf_torture_stats, NULL, scf_torture_stats_task); 648 if (torture_init_error(firsterr)) 649 goto unwind; 650 } 651 652 torture_init_end(); 653 return 0; 654 655 unwind: 656 torture_init_end(); 657 scf_torture_cleanup(); 658 if (shutdown_secs) { 659 WARN_ON(!IS_MODULE(CONFIG_SCF_TORTURE_TEST)); 660 kernel_power_off(); 661 } 662 return firsterr; 663 } 664 665 module_init(scf_torture_init); 666 module_exit(scf_torture_cleanup); 667