Lines Matching +full:no +full:- +full:pbl +full:- +full:x8

1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility");
58 // Bits for ->extendables field, extendables param, and related definitions.
70 #define RCUTORTURE_RDR_UPDOWN 0x80 // ... up-read from task, down-read from timer.
88 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
90 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
94 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
96 "Use conditional/async full-stateexpedited GP wait primitives");
102 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
105 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
106 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
115 torture_param(int, n_up_down, 32, "# of concurrent up/down hrtimer-based RCU readers");
117 torture_param(int, nreaders, -1, "Number of RCU reader threads");
118 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
121 torture_param(bool, gpwrap_lag, true, "Enable grace-period wrap lag testing");
129 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
130 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
140 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
143 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
147 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
176 // Mailbox-like structure to check RCU global memory ordering.
185 // Update-side data structure used to check RCU readers.
319 * Stop aggressive CPU-hog tests a bit before the end of the test in order
324 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
367 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
384 int (*readlock_nesting)(void); // actual nesting, if available, -1 if not.
472 started = cur_ops->get_gp_seq();
477 rtrsp->rt_delay_ms = longdelay_ms;
478 completed = cur_ops->get_gp_seq();
479 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
484 rtrsp->rt_delay_us = shortdelay_us;
502 return -1;
512 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
515 WRITE_ONCE(rp->rtort_chkp, NULL);
516 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
518 i = rp->rtort_pipe_count;
522 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
523 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
525 rp->rtort_mbtest = 0;
532 * Update all callbacks in the pipe. Suitable for synchronous grace-period
542 list_add(&old_rp->rtort_free, &rcu_torture_removed);
545 list_del(&rp->rtort_free);
564 cur_ops->deferred_free(rp);
574 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
650 * buggy-RCU error messages.
655 rcu_torture_cb(&p->rtort_rcu);
737 /* We want there to be long-running readers, but not all the time. */
743 rtrsp->rt_delay_jiffies = longdelay;
753 srcu_read_unlock_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3));
771 return !!(cur_ops->have_up_down & rf);
780 WARN_ON_ONCE(reader_flavor & (reader_flavor - 1));
801 srcu_up_read_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3));
816 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
964 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
974 torture_sched_setaffinity(current->pid, cpumask_of(cpu), true);
983 if (WARN_ONCE(onoff_interval || shuffle_interval, "%s: Non-zero onoff_interval (%d) or shuffle_interval (%d) breaks trivial RCU, resetting to zero", __func__, onoff_interval, shuffle_interval)) {
1017 * Definitions for RCU-tasks torture testing.
1031 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
1070 * Definitions for rude RCU-tasks torture testing.
1086 .name = "tasks-rude"
1101 * Definitions for tracing RCU-tasks torture testing.
1117 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
1138 .name = "tasks-tracing"
1152 if (!cur_ops->gp_diff)
1153 return new - old;
1154 return cur_ops->gp_diff(new, old);
1158 * RCU torture priority-boost testing. Runs one real-time thread per
1164 static int old_rt_runtime = -1;
1170 * throttled. Only possible if rcutorture is built-in otherwise the
1174 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1178 sysctl_sched_rt_runtime = -1;
1183 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1187 old_rt_runtime = -1;
1199 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1201 if (end - *start > mininterval) {
1203 smp_mb(); // Time check before grace-period check.
1204 if (cur_ops->poll_gp_state(gp_state))
1206 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1215 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
1221 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1222 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1223 current->rt_priority, gp_state, end - *start);
1224 cur_ops->gp_kthread_dbg();
1226 gp_done = cur_ops->poll_gp_state(gp_state);
1233 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1260 /* Set real-time priority. */
1263 /* Each pass through the following loop does one boost-test cycle. */
1274 schedule_timeout_interruptible(oldstarttime - jiffies);
1281 // Do one boost-test interval.
1285 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1288 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1289 gp_state = cur_ops->start_gp_poll();
1298 if (cur_ops->poll_gp_state(gp_state))
1306 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1345 * RCU torture force-quiescent-state kthread. Repeatedly induces
1366 cur_ops->fqs();
1368 fqs_burst_remaining -= fqs_holdoff;
1377 // Used by writers to randomly choose from the available grace-period primitives.
1382 * Determine which grace-period primitives are available.
1415 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1418 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1421 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1424 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1427 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1429 pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1430 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1433 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1435 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1437 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1440 if (gp_exp1 && cur_ops->exp_sync) {
1443 } else if (gp_exp && !cur_ops->exp_sync) {
1446 if (gp_normal1 && cur_ops->deferred_free) {
1449 } else if (gp_normal && !cur_ops->deferred_free) {
1452 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1453 cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1456 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1459 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1460 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1462 pr_info("%s: Testing polling full-state GPs.\n", __func__);
1463 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1466 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1469 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1472 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1474 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1476 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1479 if (gp_sync1 && cur_ops->sync) {
1482 } else if (gp_sync && !cur_ops->sync) {
1491 * while also testing out the polled APIs. Note well that the single-CPU
1492 * grace-period optimizations must be accounted for.
1502 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1503 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1507 cookie = cur_ops->get_gp_state();
1509 cur_ops->get_gp_state_full(&cookie_full);
1510 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1513 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1514 "%s: Cookie check 3 failed %pS() online %*pbl.",
1516 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1517 "%s: Cookie check 4 failed %pS() online %*pbl",
1562 torture_type, cur_ops->name);
1564 "%s: No update-side primitives.\n", __func__)) {
1566 * No updates primitives, so don't try updating.
1574 if (cur_ops->poll_active > 0) {
1575 ulo = kcalloc(cur_ops->poll_active, sizeof(*ulo), GFP_KERNEL);
1577 ulo_size = cur_ops->poll_active;
1579 if (cur_ops->poll_active_full > 0) {
1580 rgo = kcalloc(cur_ops->poll_active_full, sizeof(*rgo), GFP_KERNEL);
1582 rgo_size = cur_ops->poll_active_full;
1593 torture_type, jiffies - j);
1601 rp->rtort_pipe_count = 0;
1602 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
1608 rp->rtort_mbtest = 1;
1612 i = old_rp->rtort_pipe_count;
1616 WRITE_ONCE(old_rp->rtort_pipe_count,
1617 old_rp->rtort_pipe_count + 1);
1618 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count);
1621 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1622 idx = cur_ops->readlock();
1623 cookie = cur_ops->get_gp_state();
1624 WARN_ONCE(cur_ops->poll_gp_state(cookie),
1625 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1629 cookie, cur_ops->get_gp_state());
1630 if (cur_ops->get_comp_state) {
1631 cookie = cur_ops->get_comp_state();
1632 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1634 cur_ops->readunlock(idx);
1636 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1637 idx = cur_ops->readlock();
1638 cur_ops->get_gp_state_full(&cookie_full);
1639 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1640 "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1645 if (cur_ops->get_comp_state_full) {
1646 cur_ops->get_comp_state_full(&cookie_full);
1647 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1649 cur_ops->readunlock(idx);
1654 cur_ops->deferred_free(old_rp);
1658 do_rtws_sync(&rand, cur_ops->exp_sync);
1663 gp_snap = cur_ops->get_gp_state();
1667 cur_ops->cond_sync(gp_snap);
1672 gp_snap = cur_ops->get_gp_state_exp();
1676 cur_ops->cond_sync_exp(gp_snap);
1681 cur_ops->get_gp_state_full(&gp_snap_full);
1685 cur_ops->cond_sync_full(&gp_snap_full);
1690 cur_ops->get_gp_state_full(&gp_snap_full);
1694 cur_ops->cond_sync_exp_full(&gp_snap_full);
1700 ulo[i] = cur_ops->get_comp_state();
1701 gp_snap = cur_ops->start_gp_poll();
1703 while (!cur_ops->poll_gp_state(gp_snap)) {
1704 gp_snap1 = cur_ops->get_gp_state();
1706 if (cur_ops->poll_gp_state(ulo[i]) ||
1707 cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1720 cur_ops->get_comp_state_full(&rgo[i]);
1721 cur_ops->start_gp_poll_full(&gp_snap_full);
1723 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1724 cur_ops->get_gp_state_full(&gp_snap1_full);
1726 if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1727 cur_ops->same_gp_state_full(&rgo[i],
1740 gp_snap = cur_ops->start_gp_poll_exp();
1742 while (!cur_ops->poll_gp_state_exp(gp_snap))
1749 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1751 while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1758 do_rtws_sync(&rand, cur_ops->sync);
1770 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1777 expediting = -expediting;
1786 !cur_ops->slow_gps &&
1793 if (cur_ops->gp_kthread_dbg)
1794 cur_ops->gp_kthread_dbg();
1805 expediting = -expediting;
1811 " Dynamic grace-period expediting was disabled.\n",
1835 "%s: No update-side primitives.\n", __func__)) {
1837 * No updates primitives, so don't try updating.
1847 if (cur_ops->cb_barrier != NULL &&
1849 cur_ops->cb_barrier();
1855 cur_ops->exp_sync();
1858 gp_snap = cur_ops->get_gp_state();
1860 cur_ops->cond_sync(gp_snap);
1863 gp_snap = cur_ops->get_gp_state_exp();
1865 cur_ops->cond_sync_exp(gp_snap);
1868 cur_ops->get_gp_state_full(&gp_snap_full);
1870 cur_ops->cond_sync_full(&gp_snap_full);
1873 cur_ops->get_gp_state_full(&gp_snap_full);
1875 cur_ops->cond_sync_exp_full(&gp_snap_full);
1878 if (cur_ops->start_poll_irqsoff)
1880 gp_snap = cur_ops->start_gp_poll();
1881 if (cur_ops->start_poll_irqsoff)
1883 while (!cur_ops->poll_gp_state(gp_snap)) {
1889 if (cur_ops->start_poll_irqsoff)
1891 cur_ops->start_gp_poll_full(&gp_snap_full);
1892 if (cur_ops->start_poll_irqsoff)
1894 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1900 gp_snap = cur_ops->start_gp_poll_exp();
1901 while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1907 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1908 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1914 cur_ops->sync();
1951 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1959 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1960 !READ_ONCE(rtp->rtort_chkp) &&
1961 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1962 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1963 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1964 rtrcp->rtc_chkrdr = rdrchked;
1965 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1966 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1967 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1968 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1972 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1973 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1974 return; // No work or work not yet ready.
1975 rdrchked = rtrcp_assigner->rtc_chkrdr;
1979 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1981 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1983 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1984 rtrcp_assigner->rtc_ready = 0;
1985 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1986 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
2009 WARN_ONCE(cur_ops->readlock_nesting &&
2011 cur_ops->readlock_nesting() == 0, ROEC_ARGS);
2018 WARN_ONCE(cur_ops->extendables &&
2023 * non-preemptible RCU in a preemptible kernel uses preempt_disable()
2030 WARN_ONCE(cur_ops->extendables && !(curstate & mask) &&
2034 * non-preemptible RCU in a preemptible kernel uses "preempt_count() &
2035 * PREEMPT_MASK" as ->readlock_nesting().
2044 WARN_ONCE(cur_ops->readlock_nesting && !(curstate & mask) &&
2045 cur_ops->readlock_nesting() > 0, ROEC_ARGS);
2049 * Do one extension of an RCU read-side critical section using the
2053 * and random-number-generator state in trsp. If this is neither the
2055 * change, do a ->read_delay().
2062 int idxnew1 = -1;
2063 int idxnew2 = -1;
2073 rtrsp->rt_readstate = newstate;
2075 /* First, put new protection in place to avoid critical-section gap. */
2087 idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1;
2089 idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2;
2097 rtrsp->rt_cpu = cpu;
2099 rtrsp[-1].rt_end_cpu = cpu;
2100 if (cur_ops->reader_blocked)
2101 rtrsp[-1].rt_preempted = cur_ops->reader_blocked();
2104 // Sample grace-period sequence number, as good a place as any.
2105 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && cur_ops->gather_gp_seqs) {
2106 rtrsp->rt_gp_seq = cur_ops->gather_gp_seqs();
2107 rtrsp->rt_ts = ktime_get_mono_fast_ns();
2109 rtrsp[-1].rt_gp_seq_end = rtrsp->rt_gp_seq;
2130 cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2);
2131 WARN_ON_ONCE(idxnew2 != -1);
2137 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
2139 raw_spin_lock_irqsave(&current->pi_lock, flags);
2140 cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1);
2141 WARN_ON_ONCE(idxnew1 != -1);
2144 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
2147 cur_ops->up_read((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1);
2148 WARN_ON_ONCE(idxnew1 != -1);
2154 cur_ops->read_delay(trsp, rtrsp);
2157 if (idxnew1 == -1)
2160 if (idxnew2 == -1)
2176 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
2216 * them on non-RT.
2230 * Do a randomly selected number of extensions of an existing RCU read-side
2240 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
2241 if (!((mask - 1) & mask))
2270 rtorsp->checkpolling = !(torture_random(trsp) & 0xfff);
2271 rtorsp->rtrsp = &rtorsp->rtseg[0];
2275 * Set up the first segment of a series of overlapping read-side
2277 * outermost read-side critical section.
2282 if (rtorsp->checkpolling) {
2283 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2284 rtorsp->cookie = cur_ops->get_gp_state();
2285 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2286 cur_ops->get_gp_state_full(&rtorsp->cookie_full);
2288 rtorsp->started = cur_ops->get_gp_seq();
2289 rtorsp->ts = rcu_trace_clock_local();
2290 rtorsp->p = rcu_dereference_check(rcu_torture_current,
2291 !cur_ops->readlock_held || cur_ops->readlock_held() ||
2292 (rtorsp->readstate & RCUTORTURE_RDR_UPDOWN));
2293 if (rtorsp->p == NULL) {
2295 rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp);
2298 if (rtorsp->p->rtort_mbtest == 0)
2300 rcu_torture_reader_do_mbchk(myid, rtorsp->p, trsp);
2305 * Complete the last segment of a series of overlapping read-side
2318 pipe_count = READ_ONCE(rtorsp->p->rtort_pipe_count);
2324 completed = cur_ops->get_gp_seq();
2326 do_trace_rcu_torture_read(cur_ops->name, &rtorsp->p->rtort_rcu,
2327 rtorsp->ts, rtorsp->started, completed);
2331 completed = rcutorture_seq_diff(completed, rtorsp->started);
2338 if (rtorsp->checkpolling) {
2339 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2340 WARN_ONCE(cur_ops->poll_gp_state(rtorsp->cookie),
2341 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2345 rtorsp->cookie, cur_ops->get_gp_state());
2346 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2347 WARN_ONCE(cur_ops->poll_gp_state_full(&rtorsp->cookie_full),
2348 "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2354 if (cur_ops->reader_blocked)
2355 preempted = cur_ops->reader_blocked();
2356 rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp);
2357 WARN_ON_ONCE(rtorsp->readstate);
2360 WARN_ON_ONCE(leakpointer && READ_ONCE(rtorsp->p->rtort_pipe_count) > 1);
2365 for (rtrsp1 = &rtorsp->rtseg[0]; rtrsp1 < rtorsp->rtrsp; rtrsp1++)
2373 * Do one read-side critical section, returning false if there was
2374 * no data to read. Can be invoked both from process context and
2407 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2410 if (cur_ops->call) {
2414 cur_ops->call(rhp, rcu_torture_timer_cb);
2435 if (irqreader && cur_ops->irq_capable)
2439 if (irqreader && cur_ops->irq_capable) {
2454 if (irqreader && cur_ops->irq_capable) {
2486 rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs);
2487 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders);
2488 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1);
2489 WRITE_ONCE(rtorsup->rtorsu_nmigrates,
2490 rtorsup->rtorsu_nmigrates + (cpu != rtorsup->rtorsu_cpu));
2491 smp_store_release(&rtorsup->rtorsu_inuse, false);
2510 return -ENOMEM;
2533 if (!smp_load_acquire(&rtorsup->rtorsu_inuse))
2535 if (hrtimer_cancel(&rtorsup->rtorsu_hrt) || WARN_ON_ONCE(rtorsup->rtorsu_inuse)) {
2536 rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs);
2537 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders);
2538 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1);
2539 smp_store_release(&rtorsup->rtorsu_inuse, false);
2554 init_rcu_torture_one_read_state(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs);
2555 rawidx = cur_ops->down_read();
2556 WRITE_ONCE(rtorsup->rtorsu_ndowns, rtorsup->rtorsu_ndowns + 1);
2558 rtorsup->rtorsu_rtors.readstate = idx | RCUTORTURE_RDR_UPDOWN;
2559 rtorsup->rtorsu_rtors.rtrsp++;
2560 rtorsup->rtorsu_cpu = raw_smp_processor_id();
2561 if (!rcu_torture_one_read_start(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs, -1)) {
2562 WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders);
2563 WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1);
2567 smp_store_release(&rtorsup->rtorsu_inuse, true);
2568 t = torture_random(&rtorsup->rtorsu_trs) & 0xfffff; // One per million.
2571 hrtimer_start(&rtorsup->rtorsu_hrt, t, HRTIMER_MODE_REL | HRTIMER_MODE_HARD);
2573 rtorsup->rtorsu_j = jiffies; // Not used by hrtimer handler.
2574 rtorsup->rtorsu_kt = t;
2593 j = smp_load_acquire(&jiffies); // Time before ->rtorsu_inuse.
2594 if (smp_load_acquire(&rtorsup->rtorsu_inuse)) {
2595 WARN_ONCE(time_after(j, rtorsup->rtorsu_j + 1 + HZ * 10),
2596 "hrtimer queued at jiffies %lu for %lld ns took %lu jiffies\n", rtorsup->rtorsu_j, rtorsup->rtorsu_kt, j - rtorsup->rtorsu_j);
2610 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
2616 int maxcpu = -1;
2686 if (cur_ops->get_gpwrap_count)
2687 n_gpwraps += cur_ops->get_gpwrap_count(cpu);
2697 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2728 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2729 pr_cont("nocb-toggles: %ld:%ld ",
2743 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
2745 WARN_ON_ONCE(i > 1); // Too-short grace period
2759 pr_cont("Free-Block Circulation: ");
2765 if (cur_ops->stats)
2766 cur_ops->stats();
2774 if (cur_ops->get_gp_data)
2775 cur_ops->get_gp_data(&flags, &gp_seq);
2777 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2780 wtp == NULL ? ~0U : wtp->__state,
2781 wtp == NULL ? -1 : (int)task_cpu(wtp));
2786 if (cur_ops->gp_kthread_dbg)
2787 cur_ops->gp_kthread_dbg();
2834 pr_alert("mem_dump_obj(%px):", &rhp->func);
2835 mem_dump_obj(&rhp->func);
2846 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2847 mem_dump_obj(&rhp->func);
2855 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2856 mem_dump_obj(&rhp->func);
2864 "--- %s: nreaders=%d nfakewriters=%d "
2882 test_boost, cur_ops->can_boost,
2969 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2996 idx = cur_ops->readlock();
3018 cur_ops->readunlock(idx);
3023 * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many
3062 /* Spawn CPU-stall kthread, if stall_cpu specified. */
3070 /* State structure for forward-progress self-propagating RCU callback. */
3077 * Forward-progress self-propagating RCU callback function. Because
3078 * callbacks run from softirq, this function is an implicit RCU read-side
3085 if (READ_ONCE(fcsp->stop)) {
3086 WRITE_ONCE(fcsp->stop, 2);
3089 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
3092 /* State for continuous-flood RCU callbacks. */
3135 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
3136 if (rfp->n_launders_hist[i].n_launders > 0)
3138 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
3139 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
3140 gps_old = rfp->rcu_launder_gp_seq_start;
3142 gps = rfp->n_launders_hist[j].launder_gp_seq;
3145 rfp->n_launders_hist[j].n_launders,
3152 /* Callback function for continuous-flood RCU callbacks. */
3159 struct rcu_fwd *rfp = rfcp->rfc_rfp;
3161 rfcp->rfc_next = NULL;
3162 rfcp->rfc_gps++;
3163 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
3164 rfcpp = rfp->rcu_fwd_cb_tail;
3165 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
3167 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
3168 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
3169 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
3170 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
3171 rfp->n_launders_hist[i].n_launders++;
3172 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
3173 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
3185 // No userspace emulation: CB invocation throttles call_rcu()
3200 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
3201 rfcp = rfp->rcu_fwd_cb_head;
3203 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
3206 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
3207 if (!rfp->rcu_fwd_cb_head)
3208 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
3209 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
3222 /* Carry out need_resched()/cond_resched() forward-progress testing. */
3237 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
3238 if (!cur_ops->sync)
3239 return; // Cannot do need_resched() forward progress testing without ->sync.
3240 if (cur_ops->call && cur_ops->cb_barrier) {
3247 cur_ops->sync(); /* Later readers see above write. */
3250 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
3253 gps = cur_ops->get_gp_seq();
3254 sd = cur_ops->stall_dur() + 1;
3255 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
3256 dur = sd4 + torture_random(&trs) % (sd - sd4);
3257 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
3258 stopat = rfp->rcu_fwd_startat + dur;
3262 idx = cur_ops->readlock();
3264 cur_ops->readunlock(idx);
3273 cver = READ_ONCE(rcu_torture_current_version) - cver;
3274 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
3277 rfp->rcu_fwd_id, dur, cver, gps);
3281 cur_ops->sync(); /* Wait for running CB to complete. */
3282 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
3283 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
3294 /* Carry out call_rcu() forward-progress testing. */
3311 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
3313 return; /* Get out of the way quickly, no GP wait! */
3314 if (!cur_ops->call)
3315 return; /* Can't do call_rcu() fwd prog without ->call. */
3319 cur_ops->sync(); /* Later readers see above write. */
3320 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
3321 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
3323 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
3327 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
3328 rfp->n_launders_hist[i].n_launders = 0;
3330 gps = cur_ops->get_gp_seq();
3331 rfp->rcu_launder_gp_seq_start = gps;
3336 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
3339 rfcpn = READ_ONCE(rfcp->rfc_next);
3341 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
3344 rfp->rcu_fwd_cb_head = rfcpn;
3347 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
3355 rfcp->rfc_gps = 0;
3356 rfcp->rfc_rfp = rfp;
3361 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
3370 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
3371 cver = READ_ONCE(rcu_torture_current_version) - cver;
3372 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
3373 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
3374 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
3379 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg)
3380 cur_ops->gp_kthread_dbg();
3383 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
3384 n_launders + n_max_cbs - n_launders_cb_snap,
3400 * current forward-progress test.
3415 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
3419 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
3427 cur_ops->cb_barrier();
3432 cur_ops->cb_barrier();
3448 /* Carry out grace-period forward-progress testing. */
3466 if (!rfp->rcu_fwd_id) {
3480 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
3481 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
3483 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
3486 torture_num_online_cpus() > rfp->rcu_fwd_id)))
3493 /* Short runs might not contain a valid forward-progress attempt. */
3494 if (!rfp->rcu_fwd_id) {
3502 /* If forward-progress checking is requested and feasible, spawn the thread. */
3517 if ((!cur_ops->sync && !cur_ops->call) ||
3518 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
3525 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing");
3528 return -EINVAL; /* In module, can fail back to user. */
3543 return -ENOMEM;
3594 cur_ops->call(rhp, rcu_torture_barrier_cbf);
3619 * is ordered before the following ->call().
3622 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3627 if (cur_ops->cb_barrier != NULL)
3628 cur_ops->cb_barrier();
3653 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3666 cur_ops->cb_barrier();
3691 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3694 torture_type, cur_ops->name);
3708 return -ENOMEM;
3744 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3746 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3778 (void)rcu_torture_one_read(trsp, -1);
3782 // Parent kthread which creates and destroys read-exit child kthreads.
3794 // Each pass through this loop does one read-exit episode.
3870 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis);
3877 int cpu = -1;
3887 cpu = cpumask_next(-1, cpu_online_mask);
3890 if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false))
3892 // Preempt at high-ish priority, then reset to normal.
3894 torture_sched_setaffinity(current->pid, cpu_present_mask, true);
3908 /* Timer handler for toggling RCU grace-period sequence overflow test lag value */
3914 pr_alert("rcu-torture: Disabling gpwrap lag (value=0)\n");
3915 cur_ops->set_gpwrap_lag(0);
3917 next_delay = ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0);
3919 pr_alert("rcu-torture: Enabling gpwrap lag (value=%d)\n", gpwrap_lag_gps);
3920 cur_ops->set_gpwrap_lag(gpwrap_lag_gps);
3938 pr_alert("rcu-torture: lag timing parameters must be positive\n");
3939 return -EINVAL;
3945 ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0), HRTIMER_MODE_REL);
3953 cur_ops->set_gpwrap_lag(0);
3966 if (cur_ops->cb_barrier != NULL) {
3967 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3968 cur_ops->cb_barrier();
3970 if (cur_ops->gp_slow_unregister)
3971 cur_ops->gp_slow_unregister(NULL);
3981 if (cur_ops->gp_kthread_dbg)
3982 cur_ops->gp_kthread_dbg();
4019 if (cur_ops->get_gp_data)
4020 cur_ops->get_gp_data(&flags, &gp_seq);
4021 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
4022 cur_ops->name, (long)gp_seq, flags,
4030 * Wait for all RCU callbacks to fire, then do torture-type-specific
4033 if (cur_ops->cb_barrier != NULL) {
4034 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
4035 cur_ops->cb_barrier();
4037 if (cur_ops->cleanup != NULL)
4038 cur_ops->cleanup();
4042 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
4045 pr_alert("Failure/close-call rcutorture reader segments:\n");
4047 pr_alert("\t: No segments recorded!!!\n");
4063 pr_cont("->%-2d", err_segs[i].rt_end_cpu);
4068 cur_ops->gather_gp_seqs && cur_ops->format_gp_seqs) {
4071 char sepchar = '-';
4073 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq,
4075 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq_end,
4082 buf2[j - 1] = ' ';
4127 if (cur_ops->gp_slow_unregister)
4128 cur_ops->gp_slow_unregister(NULL);
4130 if (gpwrap_lag && cur_ops->set_gpwrap_lag)
4141 * This -might- happen due to race conditions, but is unlikely.
4147 * does happen, the debug-objects subsystem won't have splatted.
4153 * Verify that double-free causes debug-objects to complain, but only
4165 KBUILD_MODNAME, cur_ops->name);
4169 if (WARN_ON_ONCE(cur_ops->debug_objects &&
4170 (!cur_ops->call || !cur_ops->cb_barrier)))
4177 pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name);
4180 idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */
4181 cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */
4182 cur_ops->call(&rh2, rcu_torture_leak_cb);
4183 cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
4185 cur_ops->call(rhp, rcu_torture_leak_cb);
4186 cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
4188 cur_ops->readunlock(idx);
4191 cur_ops->cb_barrier();
4192 pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name);
4202 if (cur_ops->sync && !(++n & 0xfff))
4203 cur_ops->sync();
4245 j = deadlock ? 0 : -1;
4253 // Test lockdep on SRCU-based deadlock scenarios.
4282 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n",
4289 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n",
4290 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
4305 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n",
4306 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
4326 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n",
4327 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
4348 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n",
4349 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
4354 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace"
4364 if (i == cyclelen - 1)
4403 return -EBUSY;
4408 if (strcmp(torture_type, cur_ops->name) == 0)
4412 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
4414 pr_alert("rcu-torture types:");
4416 pr_cont(" %s", torture_ops[i]->name);
4418 firsterr = -EINVAL;
4422 if (cur_ops->fqs == NULL && fqs_duration != 0) {
4423 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
4428 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n",
4429 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU));
4432 if (cur_ops->init)
4433 cur_ops->init();
4440 nrealfakewriters = num_online_cpus() - 2 - nfakewriters;
4448 nrealreaders = num_online_cpus() - 2 - nreaders;
4453 if (cur_ops->get_gp_data)
4454 cur_ops->get_gp_data(&flags, &gp_seq);
4456 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
4457 cur_ops->name, (long)gp_seq, flags);
4503 firsterr = -ENOMEM;
4519 firsterr = -ENOMEM;
4523 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
4547 firsterr = -ENOMEM;
4574 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
4633 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister))
4634 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay);
4636 if (gpwrap_lag && cur_ops->set_gpwrap_lag) {