Lines Matching +full:b +full:- +full:side

1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
26 * @lock: Lock protecting per-CPU callback list.
29 * @urgent_gp: Number of additional non-lazy grace periods.
30 * @rtp_n_lock_retries: Rough lock-contention statistic.
58 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
61 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
62 * @gp_func: This flavor's grace-period-wait function.
64 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
67 * @gp_start: Most recent grace-period start in jiffies.
70 * @n_ipis_fails: Number of IPI-send failures.
71 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
73 * @pregp_func: This flavor's pre-grace-period function (optional).
74 * @pertask_func: This flavor's per-task scan function (optional).
75 * @postscan_func: This flavor's post-task scan function (optional).
76 * @holdouts_func: This flavor's holdout-list scan function (optional).
77 * @postgp_func: This flavor's post-grace-period function (optional).
78 * @call_func: This flavor's call_rcu()-equivalent function.
79 * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE).
83 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
84 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
85 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
153 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
180 static int rcu_task_enqueue_lim __read_mostly = -1;
193 /* RCU tasks grace-period state for debugging. */
229 /* Record grace-period phase and time. */
232 rtp->gp_state = newstate;
233 rtp->gp_jiffies = jiffies;
240 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
249 // Initialize per-CPU callback lists for the specified flavor of
267 rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
268 BUG_ON(!rtp->rtpcp_array);
271 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
276 if (rcu_segcblist_empty(&rtpcp->cblist))
277 rcu_segcblist_init(&rtpcp->cblist);
278 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
279 rtpcp->cpu = cpu;
280 rtpcp->rtpp = rtp;
281 rtpcp->index = index;
282 rtp->rtpcp_array[index] = rtpcp;
284 if (!rtpcp->rtp_blkd_tasks.next)
285 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
286 if (!rtpcp->rtp_exit_list.next)
287 INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
288 rtpcp->barrier_q_head.next = &rtpcp->barrier_q_head;
296 if (((rcu_task_cpu_ids - 1) >> shift) >= lim)
298 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
299 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
300 smp_store_release(&rtp->percpu_enqueue_lim, lim);
303 rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
310 return jiffies + rtp->lazy_jiffies;
322 rtp = rtpcp->rtpp;
324 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
325 if (!rtpcp->urgent_gp)
326 rtpcp->urgent_gp = 1;
328 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
332 rcuwait_wake_up(&rtp->cbs_wait);
335 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
341 rtp = rtpcp->rtpp;
342 rcuwait_wake_up(&rtp->cbs_wait);
351 bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
358 rhp->next = NULL;
359 rhp->func = func;
362 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
363 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
365 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
369 if (rtpcp->rtp_jiffies != j) {
370 rtpcp->rtp_jiffies = j;
371 rtpcp->rtp_n_lock_retries = 0;
373 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
374 READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
378 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
379 rcu_segcblist_init(&rtpcp->cblist);
381 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
382 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
383 if (rtp->lazy_jiffies)
384 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
386 needwake = rcu_segcblist_empty(&rtpcp->cblist);
389 rtpcp->urgent_gp = 3;
390 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
393 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
394 if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
395 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
396 WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
397 smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
398 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
400 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
404 if (needwake && READ_ONCE(rtp->kthread_ptr))
405 irq_work_queue(&rtpcp->rtp_irq_work);
414 rhp->next = rhp; // Mark the callback as having been invoked.
416 rtp = rtpcp->rtpp;
417 if (atomic_dec_and_test(&rtp->barrier_q_count))
418 complete(&rtp->barrier_q_completion);
421 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
428 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
430 mutex_lock(&rtp->barrier_q_mutex);
431 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
433 mutex_unlock(&rtp->barrier_q_mutex);
436 rtp->barrier_q_start = jiffies;
437 rcu_seq_start(&rtp->barrier_q_seq);
438 init_completion(&rtp->barrier_q_completion);
439 atomic_set(&rtp->barrier_q_count, 2);
441 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
443 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
444 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
446 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
447 atomic_inc(&rtp->barrier_q_count);
450 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
451 complete(&rtp->barrier_q_completion);
452 wait_for_completion(&rtp->barrier_q_completion);
453 rcu_seq_end(&rtp->barrier_q_seq);
454 mutex_unlock(&rtp->barrier_q_mutex);
464 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
470 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
474 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
477 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
481 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
487 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
488 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
489 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
490 if (rtp->lazy_jiffies)
491 rtpcp->urgent_gp--;
493 } else if (rcu_segcblist_empty(&rtpcp->cblist)) {
494 rtpcp->urgent_gp = 0;
496 if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
506 // to CPU 0. Note the matching RCU read-side critical section in
509 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
510 if (rtp->percpu_enqueue_lim > 1) {
511 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
512 smp_store_release(&rtp->percpu_enqueue_lim, 1);
513 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
515 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
517 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
520 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
521 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
522 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
523 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
525 if (rtp->percpu_dequeue_lim == 1) {
526 for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
529 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
531 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
534 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
551 index = rtpcp->index * 2 + 1;
553 rtpcp_next = rtp->rtpcp_array[index];
554 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
555 cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
556 queue_work_on(cpuwq, system_percpu_wq, &rtpcp_next->rtp_work);
559 rtpcp_next = rtp->rtpcp_array[index];
560 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
561 cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
562 queue_work_on(cpuwq, system_percpu_wq, &rtpcp_next->rtp_work);
568 if (rcu_segcblist_empty(&rtpcp->cblist))
571 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
572 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
578 rhp->func(rhp);
583 rcu_segcblist_add_len(&rtpcp->cblist, -len);
584 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
594 rtp = rtpcp->rtpp;
603 mutex_lock(&rtp->tasks_gp_mutex);
609 mutex_unlock(&rtp->tasks_gp_mutex);
611 rcuwait_wait_event(&rtp->cbs_wait,
614 mutex_lock(&rtp->tasks_gp_mutex);
620 rtp->gp_start = jiffies;
621 rcu_seq_start(&rtp->tasks_gp_seq);
622 rtp->gp_func(rtp);
623 rcu_seq_end(&rtp->tasks_gp_seq);
628 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
629 mutex_unlock(&rtp->tasks_gp_mutex);
632 // RCU-tasks kthread that detects grace periods and invokes callbacks.
639 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
641 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
642 rtpcp->urgent_gp = 1;
647 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
652 * one RCU-tasks grace period and then invokes the callbacks.
653 * This loop is terminated by the system going down. ;-)
661 schedule_timeout_idle(rtp->gp_sleep);
670 "synchronize_%s() called too soon", rtp->name))
673 // If the grace-period kthread is running, use it.
674 if (READ_ONCE(rtp->kthread_ptr)) {
675 wait_rcu_gp_state(rtp->wait_state, rtp->call_func);
681 /* Spawn RCU-tasks grace-period kthread. */
686 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
687 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
695 * Print any non-default Tasks RCU settings.
703 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
706 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
722 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
731 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
733 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
735 if (data_race(rtpcp->urgent_gp))
737 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
743 rtp->kname,
744 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
745 jiffies - data_race(rtp->gp_jiffies),
746 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
747 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
748 ".k"[!!data_race(rtp->kthread_ptr)],
752 rtp->lazy_jiffies,
756 /* Dump out more rcutorture-relevant state common to all RCU-tasks flavors. */
766 tt, tf, tst, data_race(rtp->tasks_gp_seq),
767 j - data_race(rtp->gp_start), j - data_race(rtp->gp_jiffies),
768 data_race(rtp->gp_state), tasks_gp_state_getname(rtp));
770 data_race(rtp->percpu_enqueue_shift),
771 data_race(rtp->percpu_enqueue_lim),
772 data_race(rtp->percpu_dequeue_lim),
773 data_race(rtp->percpu_dequeue_gpseq));
778 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
780 if (cpumask_available(cm) && !rcu_barrier_cb_is_done(&rtpcp->barrier_q_head))
782 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
793 data_race(rtp->barrier_q_seq), j - data_race(rtp->barrier_q_start),
794 atomic_read(&rtp->barrier_q_count));
810 // Shared code between task-list-scanning variants of Tasks RCU.
812 /* Wait for one RCU-tasks grace period. */
826 rtp->pregp_func(&holdouts);
829 * There were callbacks, so we need to wait for an RCU-tasks
835 if (rtp->pertask_func) {
838 rtp->pertask_func(t, &holdouts);
843 rtp->postscan_func(&holdouts);
855 fract = rtp->init_fract;
885 rtp->holdouts_func(&holdouts, needreport, &firstreport);
887 // Print pre-stall informational messages if needed.
893 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
898 rtp->postgp_func(rtp);
908 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
910 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
914 // rates from multiple CPUs. If this is required, per-CPU callback lists
923 // Invokes synchronize_rcu() in order to wait for all in-flight
924 // t->on_rq and t->nvcsw transitions to complete. This works because
926 // rcu_tasks_pertask(), invoked on every non-idle task:
927 // For every runnable non-idle task other than the current one, use
932 // Gather per-CPU lists of tasks in do_exit() to ensure that all
936 // will take care of any tasks stuck in the non-preemptible region
944 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
951 // current task to a per-CPU list of tasks that rcu_tasks_postscan() must
955 // Pre-grace-period update-side code is ordered before the grace
956 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
961 /* Pre-grace-period preparation. */
965 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
968 * synchronize_rcu(), a read-side critical section that started
973 * memory barrier on the first store to t->rcu_tasks_holdout,
986 if (!READ_ONCE(t->on_rq))
990 * t->on_rq && !t->se.sched_delayed *could* be considered sleeping but
999 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
1008 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
1015 /* Per-task initial processing. */
1020 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
1021 WRITE_ONCE(t->rcu_tasks_holdout, true);
1022 list_add(&t->rcu_tasks_holdout_list, hop);
1044 * read side critical sections:
1050 * 2) An _RCU_ read side starting with the final preempt_disable()
1066 list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) {
1067 if (list_empty(&t->rcu_tasks_holdout_list))
1078 list_add(&tmp, &t->rcu_tasks_exit_list);
1099 if (!READ_ONCE(t->rcu_tasks_holdout) ||
1100 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
1103 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) {
1104 WRITE_ONCE(t->rcu_tasks_holdout, false);
1105 list_del_init(&t->rcu_tasks_holdout_list);
1120 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
1121 data_race(t->rcu_tasks_idle_cpu), cpu);
1137 /* Finish off the Tasks-RCU grace period. */
1141 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1143 * reordering on other CPUs could cause their RCU-tasks read-side
1145 * However, because these ->nvcsw updates are carried out with
1149 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1151 * memory barriers for ->rcu_tasks_holdout accesses.
1157 * read side critical section.
1170 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1178 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1184 * read-side critical sections have completed. call_rcu_tasks() assumes
1185 * that the read-side critical sections end at a voluntary context
1187 * or transition to usermode execution. As such, there are no read-side
1190 * through a safe state, not so much for data-structure synchronization.
1202 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1204 * Control will return to the caller some time after a full rcu-tasks
1206 * executing rcu-tasks read-side critical sections have elapsed. These
1207 * read-side critical sections are delimited by calls to schedule(),
1226 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1237 static int rcu_tasks_lazy_ms = -1;
1299 WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list));
1302 t->rcu_tasks_exit_cpu = smp_processor_id();
1304 WARN_ON_ONCE(!rtpcp->rtp_exit_list.next);
1305 list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list);
1312 * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
1320 WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list));
1321 rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu);
1323 list_del_init(&t->rcu_tasks_exit_list);
1347 // Ordering is provided by the scheduler's context-switch code.
1354 // Wait for one rude RCU-tasks grace period.
1357 rtp->n_ipis += cpumask_weight(cpu_online_mask);
1366 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1372 * read-side critical sections have completed. call_rcu_tasks_rude()
1373 * assumes that the read-side critical sections end at context switch,
1375 * usermode execution is schedulable). As such, there are no read-side
1378 * through a safe state, not so much for data-structure synchronization.
1392 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1394 * Control will return to the caller some time after a rude rcu-tasks
1396 * executing rcu-tasks read-side critical sections have elapsed. These
1397 * read-side critical sections are delimited by calls to schedule(),
1458 // 1. Has explicit read-side markers to allow finite grace periods
1459 // in the face of in-kernel loops for PREEMPT=n builds.
1462 // CPU-hotplug code paths, similar to the capabilities of SRCU.
1464 // 3. Avoids expensive read-side instructions, having overhead similar
1467 // There are of course downsides. For example, the grace-period code
1485 // or were preempted within their current RCU Tasks Trace read-side
1487 // Finally, this function re-enables CPU hotplug.
1488 // The ->pertask_func() pointer is NULL, so there is no per-task processing.
1490 // Invokes synchronize_rcu() to wait for late-stage exiting tasks
1502 // Pre-grace-period update-side code is ordered before the grace period
1503 // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1504 // read-side code is ordered before the grace period by atomic operations
1505 // on .b.need_qs flag of each task involved in this process, or by scheduler
1506 // context-switch ordering (for locked-down non-running readers).
1532 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1535 smp_mb(); // Enforce full grace-period ordering.
1536 return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1539 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1542 smp_store_release(&t->trc_reader_special.b.need_qs, v);
1543 smp_mb(); // Enforce full grace-period ordering.
1547 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1548 * the four-byte operand-size restriction of some platforms.
1554 return cmpxchg(&t->trc_reader_special.b.need_qs, old, new);
1559 * If we are the last reader, signal the grace-period kthread.
1560 * Also remove from the per-CPU list of blocked tasks.
1568 // Open-coded full-word version of rcu_ld_need_qs().
1569 smp_mb(); // Enforce full grace-period ordering.
1570 trs = smp_load_acquire(&t->trc_reader_special);
1572 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1573 smp_mb(); // Pairs with update-side barriers.
1574 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1575 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1579 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1581 if (trs.b.blocked) {
1582 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1584 list_del_init(&t->trc_blkd_node);
1585 WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1588 WRITE_ONCE(t->trc_reader_nesting, 0);
1601 t->trc_blkd_cpu = smp_processor_id();
1602 if (!rtpcp->rtp_blkd_tasks.next)
1603 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1604 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1605 WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1613 if (list_empty(&t->trc_holdout_list)) {
1615 list_add(&t->trc_holdout_list, bhp);
1623 if (!list_empty(&t->trc_holdout_list)) {
1624 list_del_init(&t->trc_holdout_list);
1626 n_trc_holdouts--;
1641 // If the task is not in a read-side critical section, and
1642 // if this is the last reader, awaken the grace-period kthread.
1643 nesting = READ_ONCE(t->trc_reader_nesting);
1652 // Get here if the task is in a read-side critical section.
1653 // Set its state so that it will update state for the grace-period
1662 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1665 /* Callback function for scheduler to check locked-down task. */
1676 return -EINVAL;
1683 if (!rcu_watching_zero_in_eqs(cpu, &t->trc_reader_nesting))
1684 return -EINVAL; // No quiescent state, do it the hard way.
1688 // The task is not running, so C-language access is safe.
1689 nesting = t->trc_reader_nesting;
1695 // If not exiting a read-side critical section, mark as checked
1696 // so that the grace-period kthread will remove it from the
1703 return -EINVAL; // Reader transitioning, try again later.
1705 // The task is in a read-side critical section, so set up its
1720 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1726 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1739 // an RCU read-side critical section. Otherwise, the invocation of
1752 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1756 t->trc_ipi_to_cpu = cpu;
1765 t->trc_ipi_to_cpu = -1;
1771 * Initialize for first-round processing for the specified task.
1777 // is no idle task for the other CPUs. Also, the grace-period
1780 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1784 t->trc_ipi_to_cpu = -1;
1788 /* Do first-round processing for the specified task. */
1795 /* Initialize for a new RCU-tasks-trace grace period. */
1810 // in CPU-hotplug code paths.
1820 // the grace-period kthread will see that task's read-side
1821 // critical section or the task will see the updater's pre-GP
1836 // current RCU tasks trace read-side critical section.
1840 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1844 list_del_init(&t->trc_blkd_node);
1845 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1855 // Re-enable CPU hotplug now that the holdout list is populated.
1864 // Wait for late-stage exiting tasks to finish exiting.
1870 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1886 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1887 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1888 trc_rdrp->needqs = rcu_ld_need_qs(t);
1906 t->pid,
1907 ".I"[t->trc_ipi_to_cpu >= 0],
1911 t->pid,
1915 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1944 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1949 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1957 // Re-enable CPU hotplug now that the holdout list scan has completed.
1992 union rcu_special trs = READ_ONCE(t->trc_reader_special);
1995 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1996 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1999 WRITE_ONCE(t->trc_reader_nesting, 0);
2003 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
2007 * The callback function will be invoked some time after a trace rcu-tasks
2009 * trace rcu-tasks read-side critical sections have completed. These
2010 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
2023 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
2025 * Control will return to the caller some time after a trace rcu-tasks
2027 * trace rcu-tasks read-side critical sections have elapsed. These read-side
2041 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
2047 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
2058 int rcu_tasks_trace_lazy_ms = -1;
2158 pr_info("Callback from %s invoked.\n", rttd->name);
2160 rttd->notrun = false;
2187 * Return: 0 - test passed
2188 * 1 - test failed, but have not timed out yet
2189 * -1 - test failed and timed out
2202 pr_err("%s has failed boot-time tests.\n", tests[i].name);
2203 ret = -1;
2274 // Run the self-tests.