Lines Matching full:rtp
16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
230 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) in set_tasks_gp_state() argument
232 rtp->gp_state = newstate; in set_tasks_gp_state()
233 rtp->gp_jiffies = jiffies; in set_tasks_gp_state()
238 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) in tasks_gp_state_getname() argument
240 int i = data_race(rtp->gp_state); // Let KCSAN detect update races in tasks_gp_state_getname()
251 static void cblist_init_generic(struct rcu_tasks *rtp) in cblist_init_generic() argument
267 rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL); in cblist_init_generic()
268 BUG_ON(!rtp->rtpcp_array); in cblist_init_generic()
271 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in cblist_init_generic()
280 rtpcp->rtpp = rtp; in cblist_init_generic()
282 rtp->rtpcp_array[index] = rtpcp; in cblist_init_generic()
298 WRITE_ONCE(rtp->percpu_enqueue_shift, shift); in cblist_init_generic()
299 WRITE_ONCE(rtp->percpu_dequeue_lim, lim); in cblist_init_generic()
300 smp_store_release(&rtp->percpu_enqueue_lim, lim); in cblist_init_generic()
303 rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), in cblist_init_generic()
308 static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp) in rcu_tasks_lazy_time() argument
310 return jiffies + rtp->lazy_jiffies; in rcu_tasks_lazy_time()
318 struct rcu_tasks *rtp; in call_rcu_tasks_generic_timer() local
321 rtp = rtpcp->rtpp; in call_rcu_tasks_generic_timer()
323 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) { in call_rcu_tasks_generic_timer()
327 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); in call_rcu_tasks_generic_timer()
331 rcuwait_wake_up(&rtp->cbs_wait); in call_rcu_tasks_generic_timer()
337 struct rcu_tasks *rtp; in call_rcu_tasks_iw_wakeup() local
340 rtp = rtpcp->rtpp; in call_rcu_tasks_iw_wakeup()
341 rcuwait_wake_up(&rtp->cbs_wait); in call_rcu_tasks_iw_wakeup()
346 struct rcu_tasks *rtp) in call_rcu_tasks_generic() argument
350 bool havekthread = smp_load_acquire(&rtp->kthread_ptr); in call_rcu_tasks_generic()
361 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); in call_rcu_tasks_generic()
364 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); in call_rcu_tasks_generic()
373 READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids) in call_rcu_tasks_generic()
382 if (rtp->lazy_jiffies) in call_rcu_tasks_generic()
383 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); in call_rcu_tasks_generic()
392 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in call_rcu_tasks_generic()
393 if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) { in call_rcu_tasks_generic()
394 WRITE_ONCE(rtp->percpu_enqueue_shift, 0); in call_rcu_tasks_generic()
395 WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids); in call_rcu_tasks_generic()
396 smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids); in call_rcu_tasks_generic()
397 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); in call_rcu_tasks_generic()
399 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in call_rcu_tasks_generic()
403 if (needwake && READ_ONCE(rtp->kthread_ptr)) in call_rcu_tasks_generic()
410 struct rcu_tasks *rtp; in rcu_barrier_tasks_generic_cb() local
415 rtp = rtpcp->rtpp; in rcu_barrier_tasks_generic_cb()
416 if (atomic_dec_and_test(&rtp->barrier_q_count)) in rcu_barrier_tasks_generic_cb()
417 complete(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic_cb()
422 static void __maybe_unused rcu_barrier_tasks_generic(struct rcu_tasks *rtp) in rcu_barrier_tasks_generic() argument
427 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); in rcu_barrier_tasks_generic()
429 mutex_lock(&rtp->barrier_q_mutex); in rcu_barrier_tasks_generic()
430 if (rcu_seq_done(&rtp->barrier_q_seq, s)) { in rcu_barrier_tasks_generic()
432 mutex_unlock(&rtp->barrier_q_mutex); in rcu_barrier_tasks_generic()
435 rtp->barrier_q_start = jiffies; in rcu_barrier_tasks_generic()
436 rcu_seq_start(&rtp->barrier_q_seq); in rcu_barrier_tasks_generic()
437 init_completion(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic()
438 atomic_set(&rtp->barrier_q_count, 2); in rcu_barrier_tasks_generic()
440 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) in rcu_barrier_tasks_generic()
442 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_barrier_tasks_generic()
446 atomic_inc(&rtp->barrier_q_count); in rcu_barrier_tasks_generic()
449 if (atomic_sub_and_test(2, &rtp->barrier_q_count)) in rcu_barrier_tasks_generic()
450 complete(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic()
451 wait_for_completion(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic()
452 rcu_seq_end(&rtp->barrier_q_seq); in rcu_barrier_tasks_generic()
453 mutex_unlock(&rtp->barrier_q_mutex); in rcu_barrier_tasks_generic()
458 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) in rcu_tasks_need_gpcb() argument
463 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq); in rcu_tasks_need_gpcb()
469 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim); in rcu_tasks_need_gpcb()
473 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_need_gpcb()
486 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); in rcu_tasks_need_gpcb()
487 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); in rcu_tasks_need_gpcb()
489 if (rtp->lazy_jiffies) in rcu_tasks_need_gpcb()
508 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
509 if (rtp->percpu_enqueue_lim > 1) { in rcu_tasks_need_gpcb()
510 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids)); in rcu_tasks_need_gpcb()
511 smp_store_release(&rtp->percpu_enqueue_lim, 1); in rcu_tasks_need_gpcb()
512 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); in rcu_tasks_need_gpcb()
514 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); in rcu_tasks_need_gpcb()
516 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
519 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
520 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { in rcu_tasks_need_gpcb()
521 WRITE_ONCE(rtp->percpu_dequeue_lim, 1); in rcu_tasks_need_gpcb()
522 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); in rcu_tasks_need_gpcb()
524 if (rtp->percpu_dequeue_lim == 1) { in rcu_tasks_need_gpcb()
525 for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) { in rcu_tasks_need_gpcb()
528 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_need_gpcb()
533 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
540 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) in rcu_tasks_invoke_cbs() argument
552 rtpcp_next = rtp->rtpcp_array[index]; in rcu_tasks_invoke_cbs()
553 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) { in rcu_tasks_invoke_cbs()
558 rtpcp_next = rtp->rtpcp_array[index]; in rcu_tasks_invoke_cbs()
559 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) { in rcu_tasks_invoke_cbs()
570 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); in rcu_tasks_invoke_cbs()
583 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); in rcu_tasks_invoke_cbs()
590 struct rcu_tasks *rtp; in rcu_tasks_invoke_cbs_wq() local
593 rtp = rtpcp->rtpp; in rcu_tasks_invoke_cbs_wq()
594 rcu_tasks_invoke_cbs(rtp, rtpcp); in rcu_tasks_invoke_cbs_wq()
598 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) in rcu_tasks_one_gp() argument
602 mutex_lock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
608 mutex_unlock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
609 set_tasks_gp_state(rtp, RTGS_WAIT_CBS); in rcu_tasks_one_gp()
610 rcuwait_wait_event(&rtp->cbs_wait, in rcu_tasks_one_gp()
611 (needgpcb = rcu_tasks_need_gpcb(rtp)), in rcu_tasks_one_gp()
613 mutex_lock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
618 set_tasks_gp_state(rtp, RTGS_WAIT_GP); in rcu_tasks_one_gp()
619 rtp->gp_start = jiffies; in rcu_tasks_one_gp()
620 rcu_seq_start(&rtp->tasks_gp_seq); in rcu_tasks_one_gp()
621 rtp->gp_func(rtp); in rcu_tasks_one_gp()
622 rcu_seq_end(&rtp->tasks_gp_seq); in rcu_tasks_one_gp()
626 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); in rcu_tasks_one_gp()
627 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); in rcu_tasks_one_gp()
628 mutex_unlock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
635 struct rcu_tasks *rtp = arg; in rcu_tasks_kthread() local
638 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_kthread()
646 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start! in rcu_tasks_kthread()
657 rcu_tasks_one_gp(rtp, false); in rcu_tasks_kthread()
660 schedule_timeout_idle(rtp->gp_sleep); in rcu_tasks_kthread()
665 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) in synchronize_rcu_tasks_generic() argument
669 "synchronize_%s() called too soon", rtp->name)) in synchronize_rcu_tasks_generic()
673 if (READ_ONCE(rtp->kthread_ptr)) { in synchronize_rcu_tasks_generic()
674 wait_rcu_gp_state(rtp->wait_state, rtp->call_func); in synchronize_rcu_tasks_generic()
677 rcu_tasks_one_gp(rtp, true); in synchronize_rcu_tasks_generic()
681 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) in rcu_spawn_tasks_kthread_generic() argument
685 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic()
686 …%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) in rcu_spawn_tasks_kthread_generic()
722 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) in show_rcu_tasks_generic_gp_kthread() argument
730 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in show_rcu_tasks_generic_gp_kthread()
742 rtp->kname, in show_rcu_tasks_generic_gp_kthread()
743 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), in show_rcu_tasks_generic_gp_kthread()
744 jiffies - data_race(rtp->gp_jiffies), in show_rcu_tasks_generic_gp_kthread()
745 data_race(rcu_seq_current(&rtp->tasks_gp_seq)), in show_rcu_tasks_generic_gp_kthread()
746 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), in show_rcu_tasks_generic_gp_kthread()
747 ".k"[!!data_race(rtp->kthread_ptr)], in show_rcu_tasks_generic_gp_kthread()
751 rtp->lazy_jiffies, in show_rcu_tasks_generic_gp_kthread()
756 static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *tt, in rcu_tasks_torture_stats_print_generic() argument
765 tt, tf, tst, data_race(rtp->tasks_gp_seq), in rcu_tasks_torture_stats_print_generic()
766 j - data_race(rtp->gp_start), j - data_race(rtp->gp_jiffies), in rcu_tasks_torture_stats_print_generic()
767 data_race(rtp->gp_state), tasks_gp_state_getname(rtp)); in rcu_tasks_torture_stats_print_generic()
769 data_race(rtp->percpu_enqueue_shift), in rcu_tasks_torture_stats_print_generic()
770 data_race(rtp->percpu_enqueue_lim), in rcu_tasks_torture_stats_print_generic()
771 data_race(rtp->percpu_dequeue_lim), in rcu_tasks_torture_stats_print_generic()
772 data_race(rtp->percpu_dequeue_gpseq)); in rcu_tasks_torture_stats_print_generic()
777 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_torture_stats_print_generic()
792 data_race(rtp->barrier_q_seq), j - data_race(rtp->barrier_q_start), in rcu_tasks_torture_stats_print_generic()
793 atomic_read(&rtp->barrier_q_count)); in rcu_tasks_torture_stats_print_generic()
812 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) in rcu_tasks_wait_gp() argument
824 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); in rcu_tasks_wait_gp()
825 rtp->pregp_func(&holdouts); in rcu_tasks_wait_gp()
833 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); in rcu_tasks_wait_gp()
834 if (rtp->pertask_func) { in rcu_tasks_wait_gp()
837 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp()
841 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); in rcu_tasks_wait_gp()
842 rtp->postscan_func(&holdouts); in rcu_tasks_wait_gp()
854 fract = rtp->init_fract; in rcu_tasks_wait_gp()
863 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); in rcu_tasks_wait_gp()
883 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); in rcu_tasks_wait_gp()
884 rtp->holdouts_func(&holdouts, needreport, &firstreport); in rcu_tasks_wait_gp()
892 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); in rcu_tasks_wait_gp()
896 set_tasks_gp_state(rtp, RTGS_POST_GP); in rcu_tasks_wait_gp()
897 rtp->postgp_func(rtp); in rcu_tasks_wait_gp()
1137 static void rcu_tasks_postgp(struct rcu_tasks *rtp) in rcu_tasks_postgp() argument
1354 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) in rcu_tasks_rude_wait_gp() argument
1356 rtp->n_ipis += cpumask_weight(cpu_online_mask); in rcu_tasks_rude_wait_gp()
1971 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) in rcu_tasks_trace_postgp() argument