tree.c (23da2ad64dbe9f3fab10af90484fe41e144337b1) | tree.c (7f66f099de4dc4b1a66a3f94e6db16409924a6f8) |
---|---|
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> --- 131 unchanged lines hidden (view full) --- 140 * It might later prove better for people registering RCU callbacks during 141 * early boot to take responsibility for these callbacks, but one step at 142 * a time. 143 */ 144static int rcu_scheduler_fully_active __read_mostly; 145 146static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 147 unsigned long gps, unsigned long flags); | 1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> --- 131 unchanged lines hidden (view full) --- 140 * It might later prove better for people registering RCU callbacks during 141 * early boot to take responsibility for these callbacks, but one step at 142 * a time. 143 */ 144static int rcu_scheduler_fully_active __read_mostly; 145 146static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 147 unsigned long gps, unsigned long flags); |
148static struct task_struct *rcu_boost_task(struct rcu_node *rnp); | 148static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
149static void invoke_rcu_core(void); 150static void rcu_report_exp_rdp(struct rcu_data *rdp); 151static void sync_sched_exp_online_cleanup(int cpu); 152static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 153static bool rcu_rdp_is_offloaded(struct rcu_data *rdp); 154static bool rcu_rdp_cpu_online(struct rcu_data *rdp); 155static bool rcu_init_invoked(void); 156static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); --- 2591 unchanged lines hidden (view full) --- 2748 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ 2749 } else { 2750 __call_rcu_core(rdp, head, flags); 2751 local_irq_restore(flags); 2752 } 2753} 2754 2755#ifdef CONFIG_RCU_LAZY | 149static void invoke_rcu_core(void); 150static void rcu_report_exp_rdp(struct rcu_data *rdp); 151static void sync_sched_exp_online_cleanup(int cpu); 152static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 153static bool rcu_rdp_is_offloaded(struct rcu_data *rdp); 154static bool rcu_rdp_cpu_online(struct rcu_data *rdp); 155static bool rcu_init_invoked(void); 156static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); --- 2591 unchanged lines hidden (view full) --- 2748 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ 2749 } else { 2750 __call_rcu_core(rdp, head, flags); 2751 local_irq_restore(flags); 2752 } 2753} 2754 2755#ifdef CONFIG_RCU_LAZY |
2756static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF); 2757module_param(enable_rcu_lazy, bool, 0444); 2758 |
|
2756/** 2757 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and 2758 * flush all lazy callbacks (including the new one) to the main ->cblist while 2759 * doing so. 2760 * 2761 * @head: structure to be used for queueing the RCU updates. 2762 * @func: actual callback function to be invoked after the grace period 2763 * --- 9 unchanged lines hidden (view full) --- 2773 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory 2774 * ordering and other functionality. 2775 */ 2776void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) 2777{ 2778 __call_rcu_common(head, func, false); 2779} 2780EXPORT_SYMBOL_GPL(call_rcu_hurry); | 2759/** 2760 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and 2761 * flush all lazy callbacks (including the new one) to the main ->cblist while 2762 * doing so. 2763 * 2764 * @head: structure to be used for queueing the RCU updates. 2765 * @func: actual callback function to be invoked after the grace period 2766 * --- 9 unchanged lines hidden (view full) --- 2776 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory 2777 * ordering and other functionality. 2778 */ 2779void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) 2780{ 2781 __call_rcu_common(head, func, false); 2782} 2783EXPORT_SYMBOL_GPL(call_rcu_hurry); |
2784#else 2785#define enable_rcu_lazy false |
|
2781#endif 2782 2783/** 2784 * call_rcu() - Queue an RCU callback for invocation after a grace period. 2785 * By default the callbacks are 'lazy' and are kept hidden from the main 2786 * ->cblist to prevent starting of grace periods too soon. 2787 * If you desire grace periods to start very soon, use call_rcu_hurry(). 2788 * --- 32 unchanged lines hidden (view full) --- 2821 * if CPU A and CPU B are the same CPU (but again only if the system has 2822 * more than one CPU). 2823 * 2824 * Implementation of these memory-ordering guarantees is described here: 2825 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 2826 */ 2827void call_rcu(struct rcu_head *head, rcu_callback_t func) 2828{ | 2786#endif 2787 2788/** 2789 * call_rcu() - Queue an RCU callback for invocation after a grace period. 2790 * By default the callbacks are 'lazy' and are kept hidden from the main 2791 * ->cblist to prevent starting of grace periods too soon. 2792 * If you desire grace periods to start very soon, use call_rcu_hurry(). 2793 * --- 32 unchanged lines hidden (view full) --- 2826 * if CPU A and CPU B are the same CPU (but again only if the system has 2827 * more than one CPU). 2828 * 2829 * Implementation of these memory-ordering guarantees is described here: 2830 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 2831 */ 2832void call_rcu(struct rcu_head *head, rcu_callback_t func) 2833{ |
2829 __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY)); | 2834 __call_rcu_common(head, func, enable_rcu_lazy); |
2830} 2831EXPORT_SYMBOL_GPL(call_rcu); 2832 2833/* Maximum number of jiffies to wait before draining a batch. */ 2834#define KFREE_DRAIN_JIFFIES (5 * HZ) 2835#define KFREE_N_BATCHES 2 2836#define FREE_N_CHANNELS 2 2837 --- 1551 unchanged lines hidden (view full) --- 4389 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; 4390 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 4391 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; 4392 rdp->last_sched_clock = jiffies; 4393 rdp->cpu = cpu; 4394 rcu_boot_init_nocb_percpu_data(rdp); 4395} 4396 | 2835} 2836EXPORT_SYMBOL_GPL(call_rcu); 2837 2838/* Maximum number of jiffies to wait before draining a batch. */ 2839#define KFREE_DRAIN_JIFFIES (5 * HZ) 2840#define KFREE_N_BATCHES 2 2841#define FREE_N_CHANNELS 2 2842 --- 1551 unchanged lines hidden (view full) --- 4394 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; 4395 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 4396 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; 4397 rdp->last_sched_clock = jiffies; 4398 rdp->cpu = cpu; 4399 rcu_boot_init_nocb_percpu_data(rdp); 4400} 4401 |
4397struct kthread_worker *rcu_exp_gp_kworker; 4398 4399static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) 4400{ 4401 struct kthread_worker *kworker; 4402 const char *name = "rcu_exp_par_gp_kthread_worker/%d"; 4403 struct sched_param param = { .sched_priority = kthread_prio }; 4404 int rnp_index = rnp - rcu_get_root(); 4405 4406 if (rnp->exp_kworker) 4407 return; 4408 4409 kworker = kthread_create_worker(0, name, rnp_index); 4410 if (IS_ERR_OR_NULL(kworker)) { 4411 pr_err("Failed to create par gp kworker on %d/%d\n", 4412 rnp->grplo, rnp->grphi); 4413 return; 4414 } 4415 WRITE_ONCE(rnp->exp_kworker, kworker); 4416 4417 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) 4418 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); 4419} 4420 4421static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp) 4422{ 4423 struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker); 4424 4425 if (!kworker) 4426 return NULL; 4427 4428 return kworker->task; 4429} 4430 4431static void __init rcu_start_exp_gp_kworker(void) 4432{ 4433 const char *name = "rcu_exp_gp_kthread_worker"; 4434 struct sched_param param = { .sched_priority = kthread_prio }; 4435 4436 rcu_exp_gp_kworker = kthread_create_worker(0, name); 4437 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { 4438 pr_err("Failed to create %s!\n", name); 4439 rcu_exp_gp_kworker = NULL; 4440 return; 4441 } 4442 4443 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) 4444 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); 4445} 4446 4447static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp) 4448{ 4449 if (rcu_scheduler_fully_active) { 4450 mutex_lock(&rnp->kthread_mutex); 4451 rcu_spawn_one_boost_kthread(rnp); 4452 rcu_spawn_exp_par_gp_kworker(rnp); 4453 mutex_unlock(&rnp->kthread_mutex); 4454 } 4455} 4456 | |
4457/* 4458 * Invoked early in the CPU-online process, when pretty much all services 4459 * are available. The incoming CPU is not present. 4460 * 4461 * Initializes a CPU's per-CPU RCU data. Note that only one online or 4462 * offline event can be happening at a given time. Note also that we can 4463 * accept some slop in the rsp->gp_seq access due to the fact that this 4464 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. --- 32 unchanged lines hidden (view full) --- 4497 rdp->gp_seq_needed = rdp->gp_seq; 4498 rdp->cpu_no_qs.b.norm = true; 4499 rdp->core_needs_qs = false; 4500 rdp->rcu_iw_pending = false; 4501 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); 4502 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 4503 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 4504 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 4402/* 4403 * Invoked early in the CPU-online process, when pretty much all services 4404 * are available. The incoming CPU is not present. 4405 * 4406 * Initializes a CPU's per-CPU RCU data. Note that only one online or 4407 * offline event can be happening at a given time. Note also that we can 4408 * accept some slop in the rsp->gp_seq access due to the fact that this 4409 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. --- 32 unchanged lines hidden (view full) --- 4442 rdp->gp_seq_needed = rdp->gp_seq; 4443 rdp->cpu_no_qs.b.norm = true; 4444 rdp->core_needs_qs = false; 4445 rdp->rcu_iw_pending = false; 4446 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); 4447 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 4448 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 4449 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
4505 rcu_spawn_rnp_kthreads(rnp); | 4450 rcu_spawn_one_boost_kthread(rnp); |
4506 rcu_spawn_cpu_nocb_kthread(cpu); 4507 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); 4508 4509 return 0; 4510} 4511 4512/* | 4451 rcu_spawn_cpu_nocb_kthread(cpu); 4452 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); 4453 4454 return 0; 4455} 4456 4457/* |
4513 * Update kthreads affinity during CPU-hotplug changes. 4514 * 4515 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 4516 * served by the rcu_node in question. The CPU hotplug lock is still 4517 * held, so the value of rnp->qsmaskinit will be stable. 4518 * 4519 * We don't include outgoingcpu in the affinity set, use -1 if there is 4520 * no outgoing CPU. If there are no CPUs left in the affinity set, 4521 * this function allows the kthread to execute on any CPU. 4522 * 4523 * Any future concurrent calls are serialized via ->kthread_mutex. | 4458 * Update RCU priority boot kthread affinity for CPU-hotplug changes. |
4524 */ | 4459 */ |
4525static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu) | 4460static void rcutree_affinity_setting(unsigned int cpu, int outgoing) |
4526{ | 4461{ |
4527 cpumask_var_t cm; 4528 unsigned long mask; 4529 struct rcu_data *rdp; 4530 struct rcu_node *rnp; 4531 struct task_struct *task_boost, *task_exp; | 4462 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
4532 | 4463 |
4533 rdp = per_cpu_ptr(&rcu_data, cpu); 4534 rnp = rdp->mynode; 4535 4536 task_boost = rcu_boost_task(rnp); 4537 task_exp = rcu_exp_par_gp_task(rnp); 4538 4539 /* 4540 * If CPU is the boot one, those tasks are created later from early 4541 * initcall since kthreadd must be created first. 4542 */ 4543 if (!task_boost && !task_exp) 4544 return; 4545 4546 if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) 4547 return; 4548 4549 mutex_lock(&rnp->kthread_mutex); 4550 mask = rcu_rnp_online_cpus(rnp); 4551 for_each_leaf_node_possible_cpu(rnp, cpu) 4552 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && 4553 cpu != outgoingcpu) 4554 cpumask_set_cpu(cpu, cm); 4555 cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU)); 4556 if (cpumask_empty(cm)) { 4557 cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU)); 4558 if (outgoingcpu >= 0) 4559 cpumask_clear_cpu(outgoingcpu, cm); 4560 } 4561 4562 if (task_exp) 4563 set_cpus_allowed_ptr(task_exp, cm); 4564 4565 if (task_boost) 4566 set_cpus_allowed_ptr(task_boost, cm); 4567 4568 mutex_unlock(&rnp->kthread_mutex); 4569 4570 free_cpumask_var(cm); | 4464 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); |
4571} 4572 4573/* 4574 * Has the specified (known valid) CPU ever been fully online? 4575 */ 4576bool rcu_cpu_beenfullyonline(int cpu) 4577{ 4578 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); --- 257 unchanged lines hidden (view full) --- 4836 rcu_async_relax(); 4837 break; 4838 default: 4839 break; 4840 } 4841 return NOTIFY_OK; 4842} 4843 | 4465} 4466 4467/* 4468 * Has the specified (known valid) CPU ever been fully online? 4469 */ 4470bool rcu_cpu_beenfullyonline(int cpu) 4471{ 4472 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); --- 257 unchanged lines hidden (view full) --- 4730 rcu_async_relax(); 4731 break; 4732 default: 4733 break; 4734 } 4735 return NOTIFY_OK; 4736} 4737 |
4738#ifdef CONFIG_RCU_EXP_KTHREAD 4739struct kthread_worker *rcu_exp_gp_kworker; 4740struct kthread_worker *rcu_exp_par_gp_kworker; 4741 4742static void __init rcu_start_exp_gp_kworkers(void) 4743{ 4744 const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker"; 4745 const char *gp_kworker_name = "rcu_exp_gp_kthread_worker"; 4746 struct sched_param param = { .sched_priority = kthread_prio }; 4747 4748 rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name); 4749 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { 4750 pr_err("Failed to create %s!\n", gp_kworker_name); 4751 return; 4752 } 4753 4754 rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name); 4755 if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) { 4756 pr_err("Failed to create %s!\n", par_gp_kworker_name); 4757 kthread_destroy_worker(rcu_exp_gp_kworker); 4758 return; 4759 } 4760 4761 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); 4762 sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO, 4763 ¶m); 4764} 4765 4766static inline void rcu_alloc_par_gp_wq(void) 4767{ 4768} 4769#else /* !CONFIG_RCU_EXP_KTHREAD */ 4770struct workqueue_struct *rcu_par_gp_wq; 4771 4772static void __init rcu_start_exp_gp_kworkers(void) 4773{ 4774} 4775 4776static inline void rcu_alloc_par_gp_wq(void) 4777{ 4778 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); 4779 WARN_ON(!rcu_par_gp_wq); 4780} 4781#endif /* CONFIG_RCU_EXP_KTHREAD */ 4782 |
|
4844/* 4845 * Spawn the kthreads that handle RCU's grace periods. 4846 */ 4847static int __init rcu_spawn_gp_kthread(void) 4848{ 4849 unsigned long flags; 4850 struct rcu_node *rnp; 4851 struct sched_param sp; --- 18 unchanged lines hidden (view full) --- 4870 wake_up_process(t); 4871 /* This is a pre-SMP initcall, we expect a single CPU */ 4872 WARN_ON(num_online_cpus() > 1); 4873 /* 4874 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() 4875 * due to rcu_scheduler_fully_active. 4876 */ 4877 rcu_spawn_cpu_nocb_kthread(smp_processor_id()); | 4783/* 4784 * Spawn the kthreads that handle RCU's grace periods. 4785 */ 4786static int __init rcu_spawn_gp_kthread(void) 4787{ 4788 unsigned long flags; 4789 struct rcu_node *rnp; 4790 struct sched_param sp; --- 18 unchanged lines hidden (view full) --- 4809 wake_up_process(t); 4810 /* This is a pre-SMP initcall, we expect a single CPU */ 4811 WARN_ON(num_online_cpus() > 1); 4812 /* 4813 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() 4814 * due to rcu_scheduler_fully_active. 4815 */ 4816 rcu_spawn_cpu_nocb_kthread(smp_processor_id()); |
4878 rcu_spawn_rnp_kthreads(rdp->mynode); | 4817 rcu_spawn_one_boost_kthread(rdp->mynode); |
4879 rcu_spawn_core_kthreads(); 4880 /* Create kthread worker for expedited GPs */ | 4818 rcu_spawn_core_kthreads(); 4819 /* Create kthread worker for expedited GPs */ |
4881 rcu_start_exp_gp_kworker(); | 4820 rcu_start_exp_gp_kworkers(); |
4882 return 0; 4883} 4884early_initcall(rcu_spawn_gp_kthread); 4885 4886/* 4887 * This function is invoked towards the end of the scheduler's 4888 * initialization process. Before this is called, the idle task might 4889 * contain synchronous grace-period primitives (during which time, this idle --- 86 unchanged lines hidden (view full) --- 4976 rnp->level = i; 4977 INIT_LIST_HEAD(&rnp->blkd_tasks); 4978 rcu_init_one_nocb(rnp); 4979 init_waitqueue_head(&rnp->exp_wq[0]); 4980 init_waitqueue_head(&rnp->exp_wq[1]); 4981 init_waitqueue_head(&rnp->exp_wq[2]); 4982 init_waitqueue_head(&rnp->exp_wq[3]); 4983 spin_lock_init(&rnp->exp_lock); | 4821 return 0; 4822} 4823early_initcall(rcu_spawn_gp_kthread); 4824 4825/* 4826 * This function is invoked towards the end of the scheduler's 4827 * initialization process. Before this is called, the idle task might 4828 * contain synchronous grace-period primitives (during which time, this idle --- 86 unchanged lines hidden (view full) --- 4915 rnp->level = i; 4916 INIT_LIST_HEAD(&rnp->blkd_tasks); 4917 rcu_init_one_nocb(rnp); 4918 init_waitqueue_head(&rnp->exp_wq[0]); 4919 init_waitqueue_head(&rnp->exp_wq[1]); 4920 init_waitqueue_head(&rnp->exp_wq[2]); 4921 init_waitqueue_head(&rnp->exp_wq[3]); 4922 spin_lock_init(&rnp->exp_lock); |
4984 mutex_init(&rnp->kthread_mutex); | 4923 mutex_init(&rnp->boost_kthread_mutex); |
4985 raw_spin_lock_init(&rnp->exp_poll_lock); 4986 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 4987 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); 4988 } 4989 } 4990 4991 init_swait_queue_head(&rcu_state.gp_wq); 4992 init_swait_queue_head(&rcu_state.expedited_wq); --- 220 unchanged lines hidden (view full) --- 5213 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot. 5214 rcutree_prepare_cpu(cpu); 5215 rcutree_report_cpu_starting(cpu); 5216 rcutree_online_cpu(cpu); 5217 5218 /* Create workqueue for Tree SRCU and for expedited GPs. */ 5219 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 5220 WARN_ON(!rcu_gp_wq); | 4924 raw_spin_lock_init(&rnp->exp_poll_lock); 4925 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 4926 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); 4927 } 4928 } 4929 4930 init_swait_queue_head(&rcu_state.gp_wq); 4931 init_swait_queue_head(&rcu_state.expedited_wq); --- 220 unchanged lines hidden (view full) --- 5152 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot. 5153 rcutree_prepare_cpu(cpu); 5154 rcutree_report_cpu_starting(cpu); 5155 rcutree_online_cpu(cpu); 5156 5157 /* Create workqueue for Tree SRCU and for expedited GPs. */ 5158 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 5159 WARN_ON(!rcu_gp_wq); |
5160 rcu_alloc_par_gp_wq(); |
|
5221 5222 /* Fill in default value for rcutree.qovld boot parameter. */ 5223 /* -After- the rcu_node ->lock fields are initialized! */ 5224 if (qovld < 0) 5225 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 5226 else 5227 qovld_calc = qovld; 5228 5229 // Kick-start in case any polled grace periods started early. 5230 (void)start_poll_synchronize_rcu_expedited(); 5231 5232 rcu_test_sync_prims(); 5233} 5234 5235#include "tree_stall.h" 5236#include "tree_exp.h" 5237#include "tree_nocb.h" 5238#include "tree_plugin.h" | 5161 5162 /* Fill in default value for rcutree.qovld boot parameter. */ 5163 /* -After- the rcu_node ->lock fields are initialized! */ 5164 if (qovld < 0) 5165 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 5166 else 5167 qovld_calc = qovld; 5168 5169 // Kick-start in case any polled grace periods started early. 5170 (void)start_poll_synchronize_rcu_expedited(); 5171 5172 rcu_test_sync_prims(); 5173} 5174 5175#include "tree_stall.h" 5176#include "tree_exp.h" 5177#include "tree_nocb.h" 5178#include "tree_plugin.h" |