xref: /linux/kernel/rcu/tree_plugin.h (revision 2db4df0c09eeb209726261f43fc556360b38ec99)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  * Internal non-public definitions that provide either classic
5  * or preemptible semantics.
6  *
7  * Copyright Red Hat, 2009
8  * Copyright IBM Corporation, 2009
9  *
10  * Author: Ingo Molnar <mingo@elte.hu>
11  *	   Paul E. McKenney <paulmck@linux.ibm.com>
12  */
13 
14 #include "../locking/rtmutex_common.h"
15 
rcu_rdp_is_offloaded(struct rcu_data * rdp)16 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
17 {
18 	/*
19 	 * In order to read the offloaded state of an rdp in a safe
20 	 * and stable way and prevent from its value to be changed
21 	 * under us, we must either hold the barrier mutex, the cpu
22 	 * hotplug lock (read or write) or the nocb lock. Local
23 	 * non-preemptible reads are also safe. NOCB kthreads and
24 	 * timers have their own means of synchronization against the
25 	 * offloaded state updaters.
26 	 */
27 	RCU_NOCB_LOCKDEP_WARN(
28 		!(lockdep_is_held(&rcu_state.barrier_mutex) ||
29 		  (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
30 		  lockdep_is_held(&rdp->nocb_lock) ||
31 		  lockdep_is_held(&rcu_state.nocb_mutex) ||
32 		  ((!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) || softirq_count()) &&
33 		   rdp == this_cpu_ptr(&rcu_data)) ||
34 		  rcu_current_is_nocb_kthread(rdp)),
35 		"Unsafe read of RCU_NOCB offloaded state"
36 	);
37 
38 	return rcu_segcblist_is_offloaded(&rdp->cblist);
39 }
40 
41 /*
42  * Check the RCU kernel configuration parameters and print informative
43  * messages about anything out of the ordinary.
44  */
rcu_bootup_announce_oddness(void)45 static void __init rcu_bootup_announce_oddness(void)
46 {
47 	if (IS_ENABLED(CONFIG_RCU_TRACE))
48 		pr_info("\tRCU event tracing is enabled.\n");
49 	if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
50 	    (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
51 		pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
52 			RCU_FANOUT);
53 	if (rcu_fanout_exact)
54 		pr_info("\tHierarchical RCU autobalancing is disabled.\n");
55 	if (IS_ENABLED(CONFIG_PROVE_RCU))
56 		pr_info("\tRCU lockdep checking is enabled.\n");
57 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
58 		pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n");
59 	if (RCU_NUM_LVLS >= 4)
60 		pr_info("\tFour(or more)-level hierarchy is enabled.\n");
61 	if (RCU_FANOUT_LEAF != 16)
62 		pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
63 			RCU_FANOUT_LEAF);
64 	if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
65 		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
66 			rcu_fanout_leaf);
67 	if (nr_cpu_ids != NR_CPUS)
68 		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
69 #ifdef CONFIG_RCU_BOOST
70 	pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
71 		kthread_prio, CONFIG_RCU_BOOST_DELAY);
72 #endif
73 	if (blimit != DEFAULT_RCU_BLIMIT)
74 		pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
75 	if (qhimark != DEFAULT_RCU_QHIMARK)
76 		pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
77 	if (qlowmark != DEFAULT_RCU_QLOMARK)
78 		pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
79 	if (qovld != DEFAULT_RCU_QOVLD)
80 		pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld);
81 	if (jiffies_till_first_fqs != ULONG_MAX)
82 		pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
83 	if (jiffies_till_next_fqs != ULONG_MAX)
84 		pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
85 	if (jiffies_till_sched_qs != ULONG_MAX)
86 		pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs);
87 	if (rcu_kick_kthreads)
88 		pr_info("\tKick kthreads if too-long grace period.\n");
89 	if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
90 		pr_info("\tRCU callback double-/use-after-free debug is enabled.\n");
91 	if (gp_preinit_delay)
92 		pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
93 	if (gp_init_delay)
94 		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
95 	if (gp_cleanup_delay)
96 		pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay);
97 	if (nohz_full_patience_delay < 0) {
98 		pr_info("\tRCU NOCB CPU patience negative (%d), resetting to zero.\n", nohz_full_patience_delay);
99 		nohz_full_patience_delay = 0;
100 	} else if (nohz_full_patience_delay > 5 * MSEC_PER_SEC) {
101 		pr_info("\tRCU NOCB CPU patience too large (%d), resetting to %ld.\n", nohz_full_patience_delay, 5 * MSEC_PER_SEC);
102 		nohz_full_patience_delay = 5 * MSEC_PER_SEC;
103 	} else if (nohz_full_patience_delay) {
104 		pr_info("\tRCU NOCB CPU patience set to %d milliseconds.\n", nohz_full_patience_delay);
105 	}
106 	nohz_full_patience_delay_jiffies = msecs_to_jiffies(nohz_full_patience_delay);
107 	if (!use_softirq)
108 		pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
109 	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
110 		pr_info("\tRCU debug extended QS entry/exit.\n");
111 	rcupdate_announce_bootup_oddness();
112 }
113 
114 #ifdef CONFIG_PREEMPT_RCU
115 
116 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
117 static void rcu_read_unlock_special(struct task_struct *t);
118 
119 /*
120  * Tell them what RCU they are running.
121  */
rcu_bootup_announce(void)122 static void __init rcu_bootup_announce(void)
123 {
124 	pr_info("Preemptible hierarchical RCU implementation.\n");
125 	rcu_bootup_announce_oddness();
126 }
127 
128 /* Flags for rcu_preempt_ctxt_queue() decision table. */
129 #define RCU_GP_TASKS	0x8
130 #define RCU_EXP_TASKS	0x4
131 #define RCU_GP_BLKD	0x2
132 #define RCU_EXP_BLKD	0x1
133 
134 /*
135  * Queues a task preempted within an RCU-preempt read-side critical
136  * section into the appropriate location within the ->blkd_tasks list,
137  * depending on the states of any ongoing normal and expedited grace
138  * periods.  The ->gp_tasks pointer indicates which element the normal
139  * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
140  * indicates which element the expedited grace period is waiting on (again,
141  * NULL if none).  If a grace period is waiting on a given element in the
142  * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
143  * adding a task to the tail of the list blocks any grace period that is
144  * already waiting on one of the elements.  In contrast, adding a task
145  * to the head of the list won't block any grace period that is already
146  * waiting on one of the elements.
147  *
148  * This queuing is imprecise, and can sometimes make an ongoing grace
149  * period wait for a task that is not strictly speaking blocking it.
150  * Given the choice, we needlessly block a normal grace period rather than
151  * blocking an expedited grace period.
152  *
153  * Note that an endless sequence of expedited grace periods still cannot
154  * indefinitely postpone a normal grace period.  Eventually, all of the
155  * fixed number of preempted tasks blocking the normal grace period that are
156  * not also blocking the expedited grace period will resume and complete
157  * their RCU read-side critical sections.  At that point, the ->gp_tasks
158  * pointer will equal the ->exp_tasks pointer, at which point the end of
159  * the corresponding expedited grace period will also be the end of the
160  * normal grace period.
161  */
rcu_preempt_ctxt_queue(struct rcu_node * rnp,struct rcu_data * rdp)162 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
163 	__releases(rnp->lock) /* But leaves rrupts disabled. */
164 {
165 	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
166 			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
167 			 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
168 			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
169 	struct task_struct *t = current;
170 
171 	raw_lockdep_assert_held_rcu_node(rnp);
172 	WARN_ON_ONCE(rdp->mynode != rnp);
173 	WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
174 	/* RCU better not be waiting on newly onlined CPUs! */
175 	WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
176 		     rdp->grpmask);
177 
178 	/*
179 	 * Decide where to queue the newly blocked task.  In theory,
180 	 * this could be an if-statement.  In practice, when I tried
181 	 * that, it was quite messy.
182 	 */
183 	switch (blkd_state) {
184 	case 0:
185 	case                RCU_EXP_TASKS:
186 	case                RCU_EXP_TASKS | RCU_GP_BLKD:
187 	case RCU_GP_TASKS:
188 	case RCU_GP_TASKS | RCU_EXP_TASKS:
189 
190 		/*
191 		 * Blocking neither GP, or first task blocking the normal
192 		 * GP but not blocking the already-waiting expedited GP.
193 		 * Queue at the head of the list to avoid unnecessarily
194 		 * blocking the already-waiting GPs.
195 		 */
196 		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
197 		break;
198 
199 	case                                              RCU_EXP_BLKD:
200 	case                                RCU_GP_BLKD:
201 	case                                RCU_GP_BLKD | RCU_EXP_BLKD:
202 	case RCU_GP_TASKS |                               RCU_EXP_BLKD:
203 	case RCU_GP_TASKS |                 RCU_GP_BLKD | RCU_EXP_BLKD:
204 	case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD:
205 
206 		/*
207 		 * First task arriving that blocks either GP, or first task
208 		 * arriving that blocks the expedited GP (with the normal
209 		 * GP already waiting), or a task arriving that blocks
210 		 * both GPs with both GPs already waiting.  Queue at the
211 		 * tail of the list to avoid any GP waiting on any of the
212 		 * already queued tasks that are not blocking it.
213 		 */
214 		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
215 		break;
216 
217 	case                RCU_EXP_TASKS |               RCU_EXP_BLKD:
218 	case                RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD:
219 	case RCU_GP_TASKS | RCU_EXP_TASKS |               RCU_EXP_BLKD:
220 
221 		/*
222 		 * Second or subsequent task blocking the expedited GP.
223 		 * The task either does not block the normal GP, or is the
224 		 * first task blocking the normal GP.  Queue just after
225 		 * the first task blocking the expedited GP.
226 		 */
227 		list_add(&t->rcu_node_entry, rnp->exp_tasks);
228 		break;
229 
230 	case RCU_GP_TASKS |                 RCU_GP_BLKD:
231 	case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD:
232 
233 		/*
234 		 * Second or subsequent task blocking the normal GP.
235 		 * The task does not block the expedited GP. Queue just
236 		 * after the first task blocking the normal GP.
237 		 */
238 		list_add(&t->rcu_node_entry, rnp->gp_tasks);
239 		break;
240 
241 	default:
242 
243 		/* Yet another exercise in excessive paranoia. */
244 		WARN_ON_ONCE(1);
245 		break;
246 	}
247 
248 	/*
249 	 * We have now queued the task.  If it was the first one to
250 	 * block either grace period, update the ->gp_tasks and/or
251 	 * ->exp_tasks pointers, respectively, to reference the newly
252 	 * blocked tasks.
253 	 */
254 	if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
255 		WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
256 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
257 	}
258 	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
259 		WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
260 	WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
261 		     !(rnp->qsmask & rdp->grpmask));
262 	WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
263 		     !(rnp->expmask & rdp->grpmask));
264 	raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
265 
266 	/*
267 	 * Report the quiescent state for the expedited GP.  This expedited
268 	 * GP should not be able to end until we report, so there should be
269 	 * no need to check for a subsequent expedited GP.  (Though we are
270 	 * still in a quiescent state in any case.)
271 	 *
272 	 * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change.
273 	 */
274 	if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp)
275 		rcu_report_exp_rdp(rdp);
276 	else
277 		WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
278 	ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
279 }
280 
281 /*
282  * Record a preemptible-RCU quiescent state for the specified CPU.
283  * Note that this does not necessarily mean that the task currently running
284  * on the CPU is in a quiescent state:  Instead, it means that the current
285  * grace period need not wait on any RCU read-side critical section that
286  * starts later on this CPU.  It also means that if the current task is
287  * in an RCU read-side critical section, it has already added itself to
288  * some leaf rcu_node structure's ->blkd_tasks list.  In addition to the
289  * current task, there might be any number of other tasks blocked while
290  * in an RCU read-side critical section.
291  *
292  * Unlike non-preemptible-RCU, quiescent state reports for expedited
293  * grace periods are handled separately via deferred quiescent states
294  * and context switch events.
295  *
296  * Callers to this function must disable preemption.
297  */
rcu_qs(void)298 static void rcu_qs(void)
299 {
300 	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n");
301 	if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) {
302 		trace_rcu_grace_period(TPS("rcu_preempt"),
303 				       __this_cpu_read(rcu_data.gp_seq),
304 				       TPS("cpuqs"));
305 		__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
306 		barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
307 		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
308 	}
309 }
310 
311 /*
312  * We have entered the scheduler, and the current task might soon be
313  * context-switched away from.  If this task is in an RCU read-side
314  * critical section, we will no longer be able to rely on the CPU to
315  * record that fact, so we enqueue the task on the blkd_tasks list.
316  * The task will dequeue itself when it exits the outermost enclosing
317  * RCU read-side critical section.  Therefore, the current grace period
318  * cannot be permitted to complete until the blkd_tasks list entries
319  * predating the current grace period drain, in other words, until
320  * rnp->gp_tasks becomes NULL.
321  *
322  * Caller must disable interrupts.
323  */
rcu_note_context_switch(bool preempt)324 void rcu_note_context_switch(bool preempt)
325 {
326 	struct task_struct *t = current;
327 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
328 	struct rcu_node *rnp;
329 
330 	trace_rcu_utilization(TPS("Start context switch"));
331 	lockdep_assert_irqs_disabled();
332 	WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!");
333 	if (rcu_preempt_depth() > 0 &&
334 	    !t->rcu_read_unlock_special.b.blocked) {
335 
336 		/* Possibly blocking in an RCU read-side critical section. */
337 		rnp = rdp->mynode;
338 		raw_spin_lock_rcu_node(rnp);
339 		t->rcu_read_unlock_special.b.blocked = true;
340 		t->rcu_blocked_node = rnp;
341 
342 		/*
343 		 * Verify the CPU's sanity, trace the preemption, and
344 		 * then queue the task as required based on the states
345 		 * of any ongoing and expedited grace periods.
346 		 */
347 		WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp));
348 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
349 		trace_rcu_preempt_task(rcu_state.name,
350 				       t->pid,
351 				       (rnp->qsmask & rdp->grpmask)
352 				       ? rnp->gp_seq
353 				       : rcu_seq_snap(&rnp->gp_seq));
354 		rcu_preempt_ctxt_queue(rnp, rdp);
355 	} else {
356 		rcu_preempt_deferred_qs(t);
357 	}
358 
359 	/*
360 	 * Either we were not in an RCU read-side critical section to
361 	 * begin with, or we have now recorded that critical section
362 	 * globally.  Either way, we can now note a quiescent state
363 	 * for this CPU.  Again, if we were in an RCU read-side critical
364 	 * section, and if that critical section was blocking the current
365 	 * grace period, then the fact that the task has been enqueued
366 	 * means that we continue to block the current grace period.
367 	 */
368 	rcu_qs();
369 	if (rdp->cpu_no_qs.b.exp)
370 		rcu_report_exp_rdp(rdp);
371 	rcu_tasks_qs(current, preempt);
372 	trace_rcu_utilization(TPS("End context switch"));
373 }
374 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
375 
376 /*
377  * Check for preempted RCU readers blocking the current grace period
378  * for the specified rcu_node structure.  If the caller needs a reliable
379  * answer, it must hold the rcu_node's ->lock.
380  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)381 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
382 {
383 	return READ_ONCE(rnp->gp_tasks) != NULL;
384 }
385 
386 /* limit value for ->rcu_read_lock_nesting. */
387 #define RCU_NEST_PMAX (INT_MAX / 2)
388 
rcu_preempt_read_enter(void)389 static void rcu_preempt_read_enter(void)
390 {
391 	WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1);
392 }
393 
rcu_preempt_read_exit(void)394 static int rcu_preempt_read_exit(void)
395 {
396 	int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1;
397 
398 	WRITE_ONCE(current->rcu_read_lock_nesting, ret);
399 	return ret;
400 }
401 
rcu_preempt_depth_set(int val)402 static void rcu_preempt_depth_set(int val)
403 {
404 	WRITE_ONCE(current->rcu_read_lock_nesting, val);
405 }
406 
407 /*
408  * Preemptible RCU implementation for rcu_read_lock().
409  * Just increment ->rcu_read_lock_nesting, shared state will be updated
410  * if we block.
411  */
__rcu_read_lock(void)412 void __rcu_read_lock(void)
413 {
414 	rcu_preempt_read_enter();
415 	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
416 		WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
417 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread)
418 		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
419 	barrier();  /* critical section after entry code. */
420 }
421 EXPORT_SYMBOL_GPL(__rcu_read_lock);
422 
423 /*
424  * Preemptible RCU implementation for rcu_read_unlock().
425  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
426  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
427  * invoke rcu_read_unlock_special() to clean up after a context switch
428  * in an RCU read-side critical section and other special cases.
429  */
__rcu_read_unlock(void)430 void __rcu_read_unlock(void)
431 {
432 	struct task_struct *t = current;
433 
434 	barrier();  // critical section before exit code.
435 	if (rcu_preempt_read_exit() == 0) {
436 		barrier();  // critical-section exit before .s check.
437 		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
438 			rcu_read_unlock_special(t);
439 	}
440 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
441 		int rrln = rcu_preempt_depth();
442 
443 		WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
444 	}
445 }
446 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
447 
448 /*
449  * Advance a ->blkd_tasks-list pointer to the next entry, instead
450  * returning NULL if at the end of the list.
451  */
rcu_next_node_entry(struct task_struct * t,struct rcu_node * rnp)452 static struct list_head *rcu_next_node_entry(struct task_struct *t,
453 					     struct rcu_node *rnp)
454 {
455 	struct list_head *np;
456 
457 	np = t->rcu_node_entry.next;
458 	if (np == &rnp->blkd_tasks)
459 		np = NULL;
460 	return np;
461 }
462 
463 /*
464  * Return true if the specified rcu_node structure has tasks that were
465  * preempted within an RCU read-side critical section.
466  */
rcu_preempt_has_tasks(struct rcu_node * rnp)467 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
468 {
469 	return !list_empty(&rnp->blkd_tasks);
470 }
471 
472 /*
473  * Report deferred quiescent states.  The deferral time can
474  * be quite short, for example, in the case of the call from
475  * rcu_read_unlock_special().
476  */
477 static notrace void
rcu_preempt_deferred_qs_irqrestore(struct task_struct * t,unsigned long flags)478 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
479 {
480 	bool empty_exp;
481 	bool empty_norm;
482 	bool empty_exp_now;
483 	struct list_head *np;
484 	bool drop_boost_mutex = false;
485 	struct rcu_data *rdp;
486 	struct rcu_node *rnp;
487 	union rcu_special special;
488 
489 	rdp = this_cpu_ptr(&rcu_data);
490 	if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING)
491 		rdp->defer_qs_iw_pending = DEFER_QS_IDLE;
492 
493 	/*
494 	 * If RCU core is waiting for this CPU to exit its critical section,
495 	 * report the fact that it has exited.  Because irqs are disabled,
496 	 * t->rcu_read_unlock_special cannot change.
497 	 */
498 	special = t->rcu_read_unlock_special;
499 	if (!special.s && !rdp->cpu_no_qs.b.exp) {
500 		local_irq_restore(flags);
501 		return;
502 	}
503 	t->rcu_read_unlock_special.s = 0;
504 	if (special.b.need_qs) {
505 		if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
506 			rdp->cpu_no_qs.b.norm = false;
507 			rcu_report_qs_rdp(rdp);
508 			udelay(rcu_unlock_delay);
509 		} else {
510 			rcu_qs();
511 		}
512 	}
513 
514 	/*
515 	 * Respond to a request by an expedited grace period for a
516 	 * quiescent state from this CPU.  Note that requests from
517 	 * tasks are handled when removing the task from the
518 	 * blocked-tasks list below.
519 	 */
520 	if (rdp->cpu_no_qs.b.exp)
521 		rcu_report_exp_rdp(rdp);
522 
523 	/* Clean up if blocked during RCU read-side critical section. */
524 	if (special.b.blocked) {
525 
526 		/*
527 		 * Remove this task from the list it blocked on.  The task
528 		 * now remains queued on the rcu_node corresponding to the
529 		 * CPU it first blocked on, so there is no longer any need
530 		 * to loop.  Retain a WARN_ON_ONCE() out of sheer paranoia.
531 		 */
532 		rnp = t->rcu_blocked_node;
533 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
534 		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
535 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
536 		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
537 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
538 			     (!empty_norm || rnp->qsmask));
539 		empty_exp = sync_rcu_exp_done(rnp);
540 		np = rcu_next_node_entry(t, rnp);
541 		list_del_init(&t->rcu_node_entry);
542 		t->rcu_blocked_node = NULL;
543 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
544 						rnp->gp_seq, t->pid);
545 		if (&t->rcu_node_entry == rnp->gp_tasks)
546 			WRITE_ONCE(rnp->gp_tasks, np);
547 		if (&t->rcu_node_entry == rnp->exp_tasks)
548 			WRITE_ONCE(rnp->exp_tasks, np);
549 		if (IS_ENABLED(CONFIG_RCU_BOOST)) {
550 			/* Snapshot ->boost_mtx ownership w/rnp->lock held. */
551 			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
552 			if (&t->rcu_node_entry == rnp->boost_tasks)
553 				WRITE_ONCE(rnp->boost_tasks, np);
554 		}
555 
556 		/*
557 		 * If this was the last task on the current list, and if
558 		 * we aren't waiting on any CPUs, report the quiescent state.
559 		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
560 		 * so we must take a snapshot of the expedited state.
561 		 */
562 		empty_exp_now = sync_rcu_exp_done(rnp);
563 		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
564 			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
565 							 rnp->gp_seq,
566 							 0, rnp->qsmask,
567 							 rnp->level,
568 							 rnp->grplo,
569 							 rnp->grphi,
570 							 !!rnp->gp_tasks);
571 			rcu_report_unblock_qs_rnp(rnp, flags);
572 		} else {
573 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
574 		}
575 
576 		/*
577 		 * If this was the last task on the expedited lists,
578 		 * then we need to report up the rcu_node hierarchy.
579 		 */
580 		if (!empty_exp && empty_exp_now)
581 			rcu_report_exp_rnp(rnp, true);
582 
583 		/* Unboost if we were boosted. */
584 		if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
585 			rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
586 	} else {
587 		local_irq_restore(flags);
588 	}
589 }
590 
591 /*
592  * Is a deferred quiescent-state pending, and are we also not in
593  * an RCU read-side critical section?  It is the caller's responsibility
594  * to ensure it is otherwise safe to report any deferred quiescent
595  * states.  The reason for this is that it is safe to report a
596  * quiescent state during context switch even though preemption
597  * is disabled.  This function cannot be expected to understand these
598  * nuances, so the caller must handle them.
599  */
rcu_preempt_need_deferred_qs(struct task_struct * t)600 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
601 {
602 	return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) ||
603 		READ_ONCE(t->rcu_read_unlock_special.s)) &&
604 	       rcu_preempt_depth() == 0;
605 }
606 
607 /*
608  * Report a deferred quiescent state if needed and safe to do so.
609  * As with rcu_preempt_need_deferred_qs(), "safe" involves only
610  * not being in an RCU read-side critical section.  The caller must
611  * evaluate safety in terms of interrupt, softirq, and preemption
612  * disabling.
613  */
rcu_preempt_deferred_qs(struct task_struct * t)614 notrace void rcu_preempt_deferred_qs(struct task_struct *t)
615 {
616 	unsigned long flags;
617 
618 	if (!rcu_preempt_need_deferred_qs(t))
619 		return;
620 	local_irq_save(flags);
621 	rcu_preempt_deferred_qs_irqrestore(t, flags);
622 }
623 
624 /*
625  * Minimal handler to give the scheduler a chance to re-evaluate.
626  */
rcu_preempt_deferred_qs_handler(struct irq_work * iwp)627 static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
628 {
629 	unsigned long flags;
630 	struct rcu_data *rdp;
631 
632 	rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
633 	local_irq_save(flags);
634 
635 	/*
636 	 * If the IRQ work handler happens to run in the middle of RCU read-side
637 	 * critical section, it could be ineffective in getting the scheduler's
638 	 * attention to report a deferred quiescent state (the whole point of the
639 	 * IRQ work). For this reason, requeue the IRQ work.
640 	 *
641 	 * Basically, we want to avoid following situation:
642 	 * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING)
643 	 * 2. CPU enters new rcu_read_lock()
644 	 * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0
645 	 * 4. rcu_read_unlock() does not re-queue work (state still PENDING)
646 	 * 5. Deferred QS reporting does not happen.
647 	 */
648 	if (rcu_preempt_depth() > 0)
649 		WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE);
650 
651 	local_irq_restore(flags);
652 }
653 
654 /*
655  * Check if expedited grace period processing during unlock is needed.
656  *
657  * This function determines whether expedited handling is required based on:
658  * 1. Task blocking an expedited grace period (based on a heuristic, could be
659  *    false-positive, see below.)
660  * 2. CPU participating in an expedited grace period
661  * 3. Strict grace period mode requiring expedited handling
662  * 4. RCU priority deboosting needs when interrupts were disabled
663  *
664  * @t: The task being checked
665  * @rdp: The per-CPU RCU data
666  * @rnp: The RCU node for this CPU
667  * @irqs_were_disabled: Whether interrupts were disabled before rcu_read_unlock()
668  *
669  * Returns true if expedited processing of the rcu_read_unlock() is needed.
670  */
rcu_unlock_needs_exp_handling(struct task_struct * t,struct rcu_data * rdp,struct rcu_node * rnp,bool irqs_were_disabled)671 static bool rcu_unlock_needs_exp_handling(struct task_struct *t,
672 				      struct rcu_data *rdp,
673 				      struct rcu_node *rnp,
674 				      bool irqs_were_disabled)
675 {
676 	/*
677 	 * Check if this task is blocking an expedited grace period. If the
678 	 * task was preempted within an RCU read-side critical section and is
679 	 * on the expedited grace period blockers list (exp_tasks), we need
680 	 * expedited handling to unblock the expedited GP. This is not an exact
681 	 * check because 't' might not be on the exp_tasks list at all - its
682 	 * just a fast heuristic that can be false-positive sometimes.
683 	 */
684 	if (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks))
685 		return true;
686 
687 	/*
688 	 * Check if this CPU is participating in an expedited grace period.
689 	 * The expmask bitmap tracks which CPUs need to check in for the
690 	 * current expedited GP. If our CPU's bit is set, we need expedited
691 	 * handling to help complete the expedited GP.
692 	 */
693 	if (rdp->grpmask & READ_ONCE(rnp->expmask))
694 		return true;
695 
696 	/*
697 	 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, all grace periods
698 	 * are treated as short for testing purposes even if that means
699 	 * disturbing the system more. Check if either:
700 	 * - This CPU has not yet reported a quiescent state, or
701 	 * - This task was preempted within an RCU critical section
702 	 * In either case, require expedited handling for strict GP mode.
703 	 */
704 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
705 	    ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node))
706 		return true;
707 
708 	/*
709 	 * RCU priority boosting case: If a task is subject to RCU priority
710 	 * boosting and exits an RCU read-side critical section with interrupts
711 	 * disabled, we need expedited handling to ensure timely deboosting.
712 	 * Without this, a low-priority task could incorrectly run at high
713 	 * real-time priority for an extended period degrading real-time
714 	 * responsiveness. This applies to all CONFIG_RCU_BOOST=y kernels,
715 	 * not just to PREEMPT_RT.
716 	 */
717 	if (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && t->rcu_blocked_node)
718 		return true;
719 
720 	return false;
721 }
722 
723 /*
724  * Handle special cases during rcu_read_unlock(), such as needing to
725  * notify RCU core processing or task having blocked during the RCU
726  * read-side critical section.
727  */
rcu_read_unlock_special(struct task_struct * t)728 static void rcu_read_unlock_special(struct task_struct *t)
729 {
730 	unsigned long flags;
731 	bool irqs_were_disabled;
732 	bool preempt_bh_were_disabled =
733 			!!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
734 
735 	/* NMI handlers cannot block and cannot safely manipulate state. */
736 	if (in_nmi())
737 		return;
738 
739 	local_irq_save(flags);
740 	irqs_were_disabled = irqs_disabled_flags(flags);
741 	if (preempt_bh_were_disabled || irqs_were_disabled) {
742 		bool needs_exp; // Expedited handling needed.
743 		struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
744 		struct rcu_node *rnp = rdp->mynode;
745 
746 		needs_exp = rcu_unlock_needs_exp_handling(t, rdp, rnp, irqs_were_disabled);
747 
748 		// Need to defer quiescent state until everything is enabled.
749 		if (use_softirq && (in_hardirq() || (needs_exp && !irqs_were_disabled))) {
750 			// Using softirq, safe to awaken, and either the
751 			// wakeup is free or there is either an expedited
752 			// GP in flight or a potential need to deboost.
753 			raise_softirq_irqoff(RCU_SOFTIRQ);
754 		} else {
755 			// Enabling BH or preempt does reschedule, so...
756 			// Also if no expediting and no possible deboosting,
757 			// slow is OK.  Plus nohz_full CPUs eventually get
758 			// tick enabled.
759 			set_tsk_need_resched(current);
760 			set_preempt_need_resched();
761 			if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
762 			    needs_exp && rdp->defer_qs_iw_pending != DEFER_QS_PENDING &&
763 			    cpu_online(rdp->cpu)) {
764 				// Get scheduler to re-evaluate and call hooks.
765 				// If !IRQ_WORK, FQS scan will eventually IPI.
766 				rdp->defer_qs_iw =
767 					IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler);
768 				rdp->defer_qs_iw_pending = DEFER_QS_PENDING;
769 				irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
770 			}
771 		}
772 		local_irq_restore(flags);
773 		return;
774 	}
775 	rcu_preempt_deferred_qs_irqrestore(t, flags);
776 }
777 
778 /*
779  * Check that the list of blocked tasks for the newly completed grace
780  * period is in fact empty.  It is a serious bug to complete a grace
781  * period that still has RCU readers blocked!  This function must be
782  * invoked -before- updating this rnp's ->gp_seq.
783  *
784  * Also, if there are blocked tasks on the list, they automatically
785  * block the newly created grace period, so set up ->gp_tasks accordingly.
786  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)787 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
788 {
789 	struct task_struct *t;
790 
791 	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
792 	raw_lockdep_assert_held_rcu_node(rnp);
793 	if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
794 		dump_blkd_tasks(rnp, 10);
795 	if (rcu_preempt_has_tasks(rnp) &&
796 	    (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
797 		WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
798 		t = container_of(rnp->gp_tasks, struct task_struct,
799 				 rcu_node_entry);
800 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
801 						rnp->gp_seq, t->pid);
802 	}
803 	WARN_ON_ONCE(rnp->qsmask);
804 }
805 
806 /*
807  * Check for a quiescent state from the current CPU, including voluntary
808  * context switches for Tasks RCU.  When a task blocks, the task is
809  * recorded in the corresponding CPU's rcu_node structure, which is checked
810  * elsewhere, hence this function need only check for quiescent states
811  * related to the current CPU, not to those related to tasks.
812  */
rcu_flavor_sched_clock_irq(int user)813 static void rcu_flavor_sched_clock_irq(int user)
814 {
815 	struct task_struct *t = current;
816 
817 	lockdep_assert_irqs_disabled();
818 	if (rcu_preempt_depth() > 0 ||
819 	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
820 		/* No QS, force context switch if deferred. */
821 		if (rcu_preempt_need_deferred_qs(t)) {
822 			set_tsk_need_resched(t);
823 			set_preempt_need_resched();
824 		}
825 	} else if (rcu_preempt_need_deferred_qs(t)) {
826 		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
827 		return;
828 	} else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
829 		rcu_qs(); /* Report immediate QS. */
830 		return;
831 	}
832 
833 	/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
834 	if (rcu_preempt_depth() > 0 &&
835 	    __this_cpu_read(rcu_data.core_needs_qs) &&
836 	    __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
837 	    !t->rcu_read_unlock_special.b.need_qs &&
838 	    time_after(jiffies, rcu_state.gp_start + HZ))
839 		t->rcu_read_unlock_special.b.need_qs = true;
840 }
841 
842 /*
843  * Check for a task exiting while in a preemptible-RCU read-side
844  * critical section, clean up if so.  No need to issue warnings, as
845  * debug_check_no_locks_held() already does this if lockdep is enabled.
846  * Besides, if this function does anything other than just immediately
847  * return, there was a bug of some sort.  Spewing warnings from this
848  * function is like as not to simply obscure important prior warnings.
849  */
exit_rcu(void)850 void exit_rcu(void)
851 {
852 	struct task_struct *t = current;
853 
854 	if (unlikely(!list_empty(&current->rcu_node_entry))) {
855 		rcu_preempt_depth_set(1);
856 		barrier();
857 		WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
858 	} else if (unlikely(rcu_preempt_depth())) {
859 		rcu_preempt_depth_set(1);
860 	} else {
861 		return;
862 	}
863 	__rcu_read_unlock();
864 	rcu_preempt_deferred_qs(current);
865 }
866 
867 /*
868  * Dump the blocked-tasks state, but limit the list dump to the
869  * specified number of elements.
870  */
871 static void
dump_blkd_tasks(struct rcu_node * rnp,int ncheck)872 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
873 {
874 	int cpu;
875 	int i;
876 	struct list_head *lhp;
877 	struct rcu_data *rdp;
878 	struct rcu_node *rnp1;
879 
880 	raw_lockdep_assert_held_rcu_node(rnp);
881 	pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
882 		__func__, rnp->grplo, rnp->grphi, rnp->level,
883 		(long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
884 	for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
885 		pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
886 			__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
887 	pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
888 		__func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
889 		READ_ONCE(rnp->exp_tasks));
890 	pr_info("%s: ->blkd_tasks", __func__);
891 	i = 0;
892 	list_for_each(lhp, &rnp->blkd_tasks) {
893 		pr_cont(" %p", lhp);
894 		if (++i >= ncheck)
895 			break;
896 	}
897 	pr_cont("\n");
898 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
899 		rdp = per_cpu_ptr(&rcu_data, cpu);
900 		pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
901 			cpu, ".o"[rcu_rdp_cpu_online(rdp)],
902 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state,
903 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state);
904 	}
905 }
906 
907 #else /* #ifdef CONFIG_PREEMPT_RCU */
908 
909 /*
910  * If strict grace periods are enabled, and if the calling
911  * __rcu_read_unlock() marks the beginning of a quiescent state, immediately
912  * report that quiescent state and, if requested, spin for a bit.
913  */
rcu_read_unlock_strict(void)914 void rcu_read_unlock_strict(void)
915 {
916 	struct rcu_data *rdp;
917 
918 	if (irqs_disabled() || in_atomic_preempt_off() || !rcu_state.gp_kthread)
919 		return;
920 
921 	/*
922 	 * rcu_report_qs_rdp() can only be invoked with a stable rdp and
923 	 * from the local CPU.
924 	 *
925 	 * The in_atomic_preempt_off() check ensures that we come here holding
926 	 * the last preempt_count (which will get dropped once we return to
927 	 * __rcu_read_unlock().
928 	 */
929 	rdp = this_cpu_ptr(&rcu_data);
930 	rdp->cpu_no_qs.b.norm = false;
931 	rcu_report_qs_rdp(rdp);
932 	udelay(rcu_unlock_delay);
933 }
934 EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
935 
936 /*
937  * Tell them what RCU they are running.
938  */
rcu_bootup_announce(void)939 static void __init rcu_bootup_announce(void)
940 {
941 	pr_info("Hierarchical RCU implementation.\n");
942 	rcu_bootup_announce_oddness();
943 }
944 
945 /*
946  * Note a quiescent state for PREEMPTION=n.  Because we do not need to know
947  * how many quiescent states passed, just if there was at least one since
948  * the start of the grace period, this just sets a flag.  The caller must
949  * have disabled preemption.
950  */
rcu_qs(void)951 static void rcu_qs(void)
952 {
953 	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
954 	if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
955 		return;
956 	trace_rcu_grace_period(TPS("rcu_sched"),
957 			       __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
958 	__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
959 	if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
960 		rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
961 }
962 
963 /*
964  * Register an urgently needed quiescent state.  If there is an
965  * emergency, invoke rcu_momentary_eqs() to do a heavy-weight
966  * dyntick-idle quiescent state visible to other CPUs, which will in
967  * some cases serve for expedited as well as normal grace periods.
968  * Either way, register a lightweight quiescent state.
969  */
rcu_all_qs(void)970 void rcu_all_qs(void)
971 {
972 	unsigned long flags;
973 
974 	if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
975 		return;
976 	preempt_disable();  // For CONFIG_PREEMPT_COUNT=y kernels
977 	/* Load rcu_urgent_qs before other flags. */
978 	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
979 		preempt_enable();
980 		return;
981 	}
982 	this_cpu_write(rcu_data.rcu_urgent_qs, false);
983 	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
984 		local_irq_save(flags);
985 		rcu_momentary_eqs();
986 		local_irq_restore(flags);
987 	}
988 	rcu_qs();
989 	preempt_enable();
990 }
991 EXPORT_SYMBOL_GPL(rcu_all_qs);
992 
993 /*
994  * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
995  */
rcu_note_context_switch(bool preempt)996 void rcu_note_context_switch(bool preempt)
997 {
998 	trace_rcu_utilization(TPS("Start context switch"));
999 	rcu_qs();
1000 	/* Load rcu_urgent_qs before other flags. */
1001 	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
1002 		goto out;
1003 	this_cpu_write(rcu_data.rcu_urgent_qs, false);
1004 	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
1005 		rcu_momentary_eqs();
1006 out:
1007 	rcu_tasks_qs(current, preempt);
1008 	trace_rcu_utilization(TPS("End context switch"));
1009 }
1010 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
1011 
1012 /*
1013  * Because preemptible RCU does not exist, there are never any preempted
1014  * RCU readers.
1015  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)1016 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
1017 {
1018 	return 0;
1019 }
1020 
1021 /*
1022  * Because there is no preemptible RCU, there can be no readers blocked.
1023  */
rcu_preempt_has_tasks(struct rcu_node * rnp)1024 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
1025 {
1026 	return false;
1027 }
1028 
1029 /*
1030  * Because there is no preemptible RCU, there can be no deferred quiescent
1031  * states.
1032  */
rcu_preempt_need_deferred_qs(struct task_struct * t)1033 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
1034 {
1035 	return false;
1036 }
1037 
1038 // Except that we do need to respond to a request by an expedited
1039 // grace period for a quiescent state from this CPU.  Note that in
1040 // non-preemptible kernels, there can be no context switches within RCU
1041 // read-side critical sections, which in turn means that the leaf rcu_node
1042 // structure's blocked-tasks list is always empty.  is therefore no need to
1043 // actually check it.  Instead, a quiescent state from this CPU suffices,
1044 // and this function is only called from such a quiescent state.
rcu_preempt_deferred_qs(struct task_struct * t)1045 notrace void rcu_preempt_deferred_qs(struct task_struct *t)
1046 {
1047 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1048 
1049 	if (READ_ONCE(rdp->cpu_no_qs.b.exp))
1050 		rcu_report_exp_rdp(rdp);
1051 }
1052 
1053 /*
1054  * Because there is no preemptible RCU, there can be no readers blocked,
1055  * so there is no need to check for blocked tasks.  So check only for
1056  * bogus qsmask values.
1057  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)1058 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1059 {
1060 	WARN_ON_ONCE(rnp->qsmask);
1061 }
1062 
1063 /*
1064  * Check to see if this CPU is in a non-context-switch quiescent state,
1065  * namely user mode and idle loop.
1066  */
rcu_flavor_sched_clock_irq(int user)1067 static void rcu_flavor_sched_clock_irq(int user)
1068 {
1069 	if (user || rcu_is_cpu_rrupt_from_idle() ||
1070 	     (IS_ENABLED(CONFIG_PREEMPT_COUNT) &&
1071 	      (preempt_count() == HARDIRQ_OFFSET))) {
1072 
1073 		/*
1074 		 * Get here if this CPU took its interrupt from user
1075 		 * mode, from the idle loop without this being a nested
1076 		 * interrupt, or while not holding the task preempt count
1077 		 * (with PREEMPT_COUNT=y). In this case, the CPU is in a
1078 		 * quiescent state, so note it.
1079 		 *
1080 		 * No memory barrier is required here because rcu_qs()
1081 		 * references only CPU-local variables that other CPUs
1082 		 * neither access nor modify, at least not while the
1083 		 * corresponding CPU is online.
1084 		 */
1085 		rcu_qs();
1086 	}
1087 }
1088 
1089 /*
1090  * Because preemptible RCU does not exist, tasks cannot possibly exit
1091  * while in preemptible RCU read-side critical sections.
1092  */
exit_rcu(void)1093 void exit_rcu(void)
1094 {
1095 }
1096 
1097 /*
1098  * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
1099  */
1100 static void
dump_blkd_tasks(struct rcu_node * rnp,int ncheck)1101 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
1102 {
1103 	WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
1104 }
1105 
1106 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
1107 
1108 /*
1109  * If boosting, set rcuc kthreads to realtime priority.
1110  */
rcu_cpu_kthread_setup(unsigned int cpu)1111 static void rcu_cpu_kthread_setup(unsigned int cpu)
1112 {
1113 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1114 #ifdef CONFIG_RCU_BOOST
1115 	struct sched_param sp;
1116 
1117 	sp.sched_priority = kthread_prio;
1118 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1119 #endif /* #ifdef CONFIG_RCU_BOOST */
1120 
1121 	WRITE_ONCE(rdp->rcuc_activity, jiffies);
1122 }
1123 
rcu_is_callbacks_nocb_kthread(struct rcu_data * rdp)1124 static bool rcu_is_callbacks_nocb_kthread(struct rcu_data *rdp)
1125 {
1126 #ifdef CONFIG_RCU_NOCB_CPU
1127 	return rdp->nocb_cb_kthread == current;
1128 #else
1129 	return false;
1130 #endif
1131 }
1132 
1133 /*
1134  * Is the current CPU running the RCU-callbacks kthread?
1135  * Caller must have preemption disabled.
1136  */
rcu_is_callbacks_kthread(struct rcu_data * rdp)1137 static bool rcu_is_callbacks_kthread(struct rcu_data *rdp)
1138 {
1139 	return rdp->rcu_cpu_kthread_task == current ||
1140 			rcu_is_callbacks_nocb_kthread(rdp);
1141 }
1142 
1143 #ifdef CONFIG_RCU_BOOST
1144 
1145 /*
1146  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1147  * or ->boost_tasks, advancing the pointer to the next task in the
1148  * ->blkd_tasks list.
1149  *
1150  * Note that irqs must be enabled: boosting the task can block.
1151  * Returns 1 if there are more tasks needing to be boosted.
1152  */
rcu_boost(struct rcu_node * rnp)1153 static int rcu_boost(struct rcu_node *rnp)
1154 {
1155 	unsigned long flags;
1156 	struct task_struct *t;
1157 	struct list_head *tb;
1158 
1159 	if (READ_ONCE(rnp->exp_tasks) == NULL &&
1160 	    READ_ONCE(rnp->boost_tasks) == NULL)
1161 		return 0;  /* Nothing left to boost. */
1162 
1163 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1164 
1165 	/*
1166 	 * Recheck under the lock: all tasks in need of boosting
1167 	 * might exit their RCU read-side critical sections on their own.
1168 	 */
1169 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1170 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1171 		return 0;
1172 	}
1173 
1174 	/*
1175 	 * Preferentially boost tasks blocking expedited grace periods.
1176 	 * This cannot starve the normal grace periods because a second
1177 	 * expedited grace period must boost all blocked tasks, including
1178 	 * those blocking the pre-existing normal grace period.
1179 	 */
1180 	if (rnp->exp_tasks != NULL)
1181 		tb = rnp->exp_tasks;
1182 	else
1183 		tb = rnp->boost_tasks;
1184 
1185 	/*
1186 	 * We boost task t by manufacturing an rt_mutex that appears to
1187 	 * be held by task t.  We leave a pointer to that rt_mutex where
1188 	 * task t can find it, and task t will release the mutex when it
1189 	 * exits its outermost RCU read-side critical section.  Then
1190 	 * simply acquiring this artificial rt_mutex will boost task
1191 	 * t's priority.  (Thanks to tglx for suggesting this approach!)
1192 	 *
1193 	 * Note that task t must acquire rnp->lock to remove itself from
1194 	 * the ->blkd_tasks list, which it will do from exit() if from
1195 	 * nowhere else.  We therefore are guaranteed that task t will
1196 	 * stay around at least until we drop rnp->lock.  Note that
1197 	 * rnp->lock also resolves races between our priority boosting
1198 	 * and task t's exiting its outermost RCU read-side critical
1199 	 * section.
1200 	 */
1201 	t = container_of(tb, struct task_struct, rcu_node_entry);
1202 	rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
1203 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1204 	/* Lock only for side effect: boosts task t's priority. */
1205 	rt_mutex_lock(&rnp->boost_mtx);
1206 	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
1207 	rnp->n_boosts++;
1208 
1209 	return READ_ONCE(rnp->exp_tasks) != NULL ||
1210 	       READ_ONCE(rnp->boost_tasks) != NULL;
1211 }
1212 
1213 /*
1214  * Priority-boosting kthread, one per leaf rcu_node.
1215  */
rcu_boost_kthread(void * arg)1216 static int rcu_boost_kthread(void *arg)
1217 {
1218 	struct rcu_node *rnp = (struct rcu_node *)arg;
1219 	int spincnt = 0;
1220 	int more2boost;
1221 
1222 	trace_rcu_utilization(TPS("Start boost kthread@init"));
1223 	for (;;) {
1224 		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING);
1225 		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1226 		rcu_wait(READ_ONCE(rnp->boost_tasks) ||
1227 			 READ_ONCE(rnp->exp_tasks));
1228 		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1229 		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING);
1230 		more2boost = rcu_boost(rnp);
1231 		if (more2boost)
1232 			spincnt++;
1233 		else
1234 			spincnt = 0;
1235 		if (spincnt > 10) {
1236 			WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING);
1237 			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1238 			schedule_timeout_idle(2);
1239 			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1240 			spincnt = 0;
1241 		}
1242 	}
1243 	/* NOTREACHED */
1244 	trace_rcu_utilization(TPS("End boost kthread@notreached"));
1245 	return 0;
1246 }
1247 
1248 /*
1249  * Check to see if it is time to start boosting RCU readers that are
1250  * blocking the current grace period, and, if so, tell the per-rcu_node
1251  * kthread to start boosting them.  If there is an expedited grace
1252  * period in progress, it is always time to boost.
1253  *
1254  * The caller must hold rnp->lock, which this function releases.
1255  * The ->boost_kthread_task is immortal, so we don't need to worry
1256  * about it going away.
1257  */
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1258 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1259 	__releases(rnp->lock)
1260 {
1261 	raw_lockdep_assert_held_rcu_node(rnp);
1262 	if (!rnp->boost_kthread_task ||
1263 	    (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) {
1264 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1265 		return;
1266 	}
1267 	if (rnp->exp_tasks != NULL ||
1268 	    (rnp->gp_tasks != NULL &&
1269 	     rnp->boost_tasks == NULL &&
1270 	     rnp->qsmask == 0 &&
1271 	     (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld ||
1272 	      IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)))) {
1273 		if (rnp->exp_tasks == NULL)
1274 			WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
1275 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1276 		rcu_wake_cond(rnp->boost_kthread_task,
1277 			      READ_ONCE(rnp->boost_kthread_status));
1278 	} else {
1279 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1280 	}
1281 }
1282 
1283 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1284 
1285 /*
1286  * Do priority-boost accounting for the start of a new grace period.
1287  */
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1288 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1289 {
1290 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1291 }
1292 
1293 /*
1294  * Create an RCU-boost kthread for the specified node if one does not
1295  * already exist.  We only create this kthread for preemptible RCU.
1296  */
rcu_spawn_one_boost_kthread(struct rcu_node * rnp)1297 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1298 {
1299 	unsigned long flags;
1300 	int rnp_index = rnp - rcu_get_root();
1301 	struct sched_param sp;
1302 	struct task_struct *t;
1303 
1304 	if (rnp->boost_kthread_task)
1305 		return;
1306 
1307 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1308 			   "rcub/%d", rnp_index);
1309 	if (WARN_ON_ONCE(IS_ERR(t)))
1310 		return;
1311 
1312 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1313 	rnp->boost_kthread_task = t;
1314 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1315 
1316 	sp.sched_priority = kthread_prio;
1317 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1318 	rcu_thread_affine_rnp(t, rnp);
1319 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1320 }
1321 
1322 #else /* #ifdef CONFIG_RCU_BOOST */
1323 
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1324 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1325 	__releases(rnp->lock)
1326 {
1327 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1328 }
1329 
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1330 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1331 {
1332 }
1333 
rcu_spawn_one_boost_kthread(struct rcu_node * rnp)1334 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1335 {
1336 }
1337 
1338 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1339 
1340 /*
1341  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
1342  * grace-period kthread will do force_quiescent_state() processing?
1343  * The idea is to avoid waking up RCU core processing on such a
1344  * CPU unless the grace period has extended for too long.
1345  *
1346  * This code relies on the fact that all NO_HZ_FULL CPUs are also
1347  * RCU_NOCB_CPU CPUs.
1348  */
rcu_nohz_full_cpu(void)1349 static bool rcu_nohz_full_cpu(void)
1350 {
1351 #ifdef CONFIG_NO_HZ_FULL
1352 	if (tick_nohz_full_cpu(smp_processor_id()) &&
1353 	    (!rcu_gp_in_progress() ||
1354 	     time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
1355 		return true;
1356 #endif /* #ifdef CONFIG_NO_HZ_FULL */
1357 	return false;
1358 }
1359 
1360 /*
1361  * Bind the RCU grace-period kthreads to the housekeeping CPU.
1362  */
rcu_bind_gp_kthread(void)1363 static void rcu_bind_gp_kthread(void)
1364 {
1365 	if (!tick_nohz_full_enabled())
1366 		return;
1367 	housekeeping_affine(current, HK_TYPE_RCU);
1368 }
1369