xref: /linux/kernel/rcu/tree_exp.h (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * RCU expedited grace periods
4  *
5  * Copyright IBM Corporation, 2016
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9 
10 #include <linux/console.h>
11 #include <linux/lockdep.h>
12 
13 static void rcu_exp_handler(void *unused);
14 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
15 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp);
16 
17 /*
18  * Record the start of an expedited grace period.
19  */
20 static void rcu_exp_gp_seq_start(void)
21 {
22 	rcu_seq_start(&rcu_state.expedited_sequence);
23 	rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
24 }
25 
26 /*
27  * Return the value that the expedited-grace-period counter will have
28  * at the end of the current grace period.
29  */
30 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
31 {
32 	return rcu_seq_endval(&rcu_state.expedited_sequence);
33 }
34 
35 /*
36  * Record the end of an expedited grace period.
37  */
38 static void rcu_exp_gp_seq_end(void)
39 {
40 	rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
41 	rcu_seq_end(&rcu_state.expedited_sequence);
42 	smp_mb(); /* Ensure that consecutive grace periods serialize. */
43 }
44 
45 /*
46  * Take a snapshot of the expedited-grace-period counter, which is the
47  * earliest value that will indicate that a full grace period has
48  * elapsed since the current time.
49  */
50 static unsigned long rcu_exp_gp_seq_snap(void)
51 {
52 	unsigned long s;
53 
54 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
55 	s = rcu_seq_snap(&rcu_state.expedited_sequence);
56 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
57 	return s;
58 }
59 
60 /*
61  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
62  * if a full expedited grace period has elapsed since that snapshot
63  * was taken.
64  */
65 static bool rcu_exp_gp_seq_done(unsigned long s)
66 {
67 	return rcu_seq_done(&rcu_state.expedited_sequence, s);
68 }
69 
70 /*
71  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
72  * recent CPU-online activity.  Note that these masks are not cleared
73  * when CPUs go offline, so they reflect the union of all CPUs that have
74  * ever been online.  This means that this function normally takes its
75  * no-work-to-do fastpath.
76  */
77 static void sync_exp_reset_tree_hotplug(void)
78 {
79 	bool done;
80 	unsigned long flags;
81 	unsigned long mask;
82 	unsigned long oldmask;
83 	int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
84 	struct rcu_node *rnp;
85 	struct rcu_node *rnp_up;
86 
87 	/* If no new CPUs onlined since last time, nothing to do. */
88 	if (likely(ncpus == rcu_state.ncpus_snap))
89 		return;
90 	rcu_state.ncpus_snap = ncpus;
91 
92 	/*
93 	 * Each pass through the following loop propagates newly onlined
94 	 * CPUs for the current rcu_node structure up the rcu_node tree.
95 	 */
96 	rcu_for_each_leaf_node(rnp) {
97 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
98 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
99 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
100 			continue;  /* No new CPUs, nothing to do. */
101 		}
102 
103 		/* Update this node's mask, track old value for propagation. */
104 		oldmask = rnp->expmaskinit;
105 		rnp->expmaskinit = rnp->expmaskinitnext;
106 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
107 
108 		/* If was already nonzero, nothing to propagate. */
109 		if (oldmask)
110 			continue;
111 
112 		/* Propagate the new CPU up the tree. */
113 		mask = rnp->grpmask;
114 		rnp_up = rnp->parent;
115 		done = false;
116 		while (rnp_up) {
117 			raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
118 			if (rnp_up->expmaskinit)
119 				done = true;
120 			rnp_up->expmaskinit |= mask;
121 			raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
122 			if (done)
123 				break;
124 			mask = rnp_up->grpmask;
125 			rnp_up = rnp_up->parent;
126 		}
127 	}
128 }
129 
130 /*
131  * Reset the ->expmask values in the rcu_node tree in preparation for
132  * a new expedited grace period.
133  */
134 static void __maybe_unused sync_exp_reset_tree(void)
135 {
136 	unsigned long flags;
137 	struct rcu_node *rnp;
138 
139 	sync_exp_reset_tree_hotplug();
140 	rcu_for_each_node_breadth_first(rnp) {
141 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
142 		WARN_ON_ONCE(rnp->expmask);
143 		WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
144 		/*
145 		 * Need to wait for any blocked tasks as well.	Note that
146 		 * additional blocking tasks will also block the expedited GP
147 		 * until such time as the ->expmask bits are cleared.
148 		 */
149 		if (rcu_is_leaf_node(rnp) && rcu_preempt_has_tasks(rnp))
150 			WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
151 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
152 	}
153 }
154 
155 /*
156  * Return non-zero if there is no RCU expedited grace period in progress
157  * for the specified rcu_node structure, in other words, if all CPUs and
158  * tasks covered by the specified rcu_node structure have done their bit
159  * for the current expedited grace period.
160  */
161 static bool sync_rcu_exp_done(struct rcu_node *rnp)
162 {
163 	raw_lockdep_assert_held_rcu_node(rnp);
164 	return READ_ONCE(rnp->exp_tasks) == NULL &&
165 	       READ_ONCE(rnp->expmask) == 0;
166 }
167 
168 /*
169  * Like sync_rcu_exp_done(), but where the caller does not hold the
170  * rcu_node's ->lock.
171  */
172 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
173 {
174 	unsigned long flags;
175 	bool ret;
176 
177 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
178 	ret = sync_rcu_exp_done(rnp);
179 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
180 
181 	return ret;
182 }
183 
184 /*
185  * Report the exit from RCU read-side critical section for the last task
186  * that queued itself during or before the current expedited preemptible-RCU
187  * grace period.  This event is reported either to the rcu_node structure on
188  * which the task was queued or to one of that rcu_node structure's ancestors,
189  * recursively up the tree.  (Calm down, calm down, we do the recursion
190  * iteratively!)
191  */
192 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
193 				 bool wake, unsigned long flags)
194 	__releases(rnp->lock)
195 {
196 	unsigned long mask;
197 
198 	raw_lockdep_assert_held_rcu_node(rnp);
199 	for (;;) {
200 		if (!sync_rcu_exp_done(rnp)) {
201 			if (!rnp->expmask)
202 				rcu_initiate_boost(rnp, flags);
203 			else
204 				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
205 			break;
206 		}
207 		if (rnp->parent == NULL) {
208 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
209 			if (wake)
210 				swake_up_one(&rcu_state.expedited_wq);
211 
212 			break;
213 		}
214 		mask = rnp->grpmask;
215 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
216 		rnp = rnp->parent;
217 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
218 		WARN_ON_ONCE(!(rnp->expmask & mask));
219 		WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
220 	}
221 }
222 
223 /*
224  * Report expedited quiescent state for specified node.  This is a
225  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
226  */
227 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
228 {
229 	unsigned long flags;
230 
231 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
232 	__rcu_report_exp_rnp(rnp, wake, flags);
233 }
234 
235 /*
236  * Report expedited quiescent state for multiple CPUs, all covered by the
237  * specified leaf rcu_node structure, which is acquired by the caller.
238  */
239 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, unsigned long flags,
240 				    unsigned long mask_in, bool wake)
241 				    __releases(rnp->lock)
242 {
243 	int cpu;
244 	unsigned long mask;
245 	struct rcu_data *rdp;
246 
247 	raw_lockdep_assert_held_rcu_node(rnp);
248 	if (!(rnp->expmask & mask_in)) {
249 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
250 		return;
251 	}
252 	mask = mask_in & rnp->expmask;
253 	WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
254 	for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
255 		rdp = per_cpu_ptr(&rcu_data, cpu);
256 		if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
257 			continue;
258 		rdp->rcu_forced_tick_exp = false;
259 		tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
260 	}
261 	__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
262 }
263 
264 /*
265  * Report expedited quiescent state for specified rcu_data (CPU).
266  */
267 static void rcu_report_exp_rdp(struct rcu_data *rdp)
268 {
269 	unsigned long flags;
270 	struct rcu_node *rnp = rdp->mynode;
271 
272 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
273 	WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
274 	ASSERT_EXCLUSIVE_WRITER(rdp->cpu_no_qs.b.exp);
275 	rcu_report_exp_cpu_mult(rnp, flags, rdp->grpmask, true);
276 }
277 
278 /* Common code for work-done checking. */
279 static bool sync_exp_work_done(unsigned long s)
280 {
281 	if (rcu_exp_gp_seq_done(s)) {
282 		trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
283 		/*
284 		 * Order GP completion with preceding accesses. Order also GP
285 		 * completion with post GP update side accesses. Pairs with
286 		 * rcu_seq_end().
287 		 */
288 		smp_mb();
289 		return true;
290 	}
291 	return false;
292 }
293 
294 /*
295  * Funnel-lock acquisition for expedited grace periods.  Returns true
296  * if some other task completed an expedited grace period that this task
297  * can piggy-back on, and with no mutex held.  Otherwise, returns false
298  * with the mutex held, indicating that the caller must actually do the
299  * expedited grace period.
300  */
301 static bool exp_funnel_lock(unsigned long s)
302 {
303 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
304 	struct rcu_node *rnp = rdp->mynode;
305 	struct rcu_node *rnp_root = rcu_get_root();
306 
307 	/* Low-contention fastpath. */
308 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
309 	    (rnp == rnp_root ||
310 	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
311 	    mutex_trylock(&rcu_state.exp_mutex))
312 		goto fastpath;
313 
314 	/*
315 	 * Each pass through the following loop works its way up
316 	 * the rcu_node tree, returning if others have done the work or
317 	 * otherwise falls through to acquire ->exp_mutex.  The mapping
318 	 * from CPU to rcu_node structure can be inexact, as it is just
319 	 * promoting locality and is not strictly needed for correctness.
320 	 */
321 	for (; rnp != NULL; rnp = rnp->parent) {
322 		if (sync_exp_work_done(s))
323 			return true;
324 
325 		/* Work not done, either wait here or go up. */
326 		spin_lock(&rnp->exp_lock);
327 		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
328 
329 			/* Someone else doing GP, so wait for them. */
330 			spin_unlock(&rnp->exp_lock);
331 			trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
332 						  rnp->grplo, rnp->grphi,
333 						  TPS("wait"));
334 			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
335 				   sync_exp_work_done(s));
336 			return true;
337 		}
338 		WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
339 		spin_unlock(&rnp->exp_lock);
340 		trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
341 					  rnp->grplo, rnp->grphi, TPS("nxtlvl"));
342 	}
343 	mutex_lock(&rcu_state.exp_mutex);
344 fastpath:
345 	if (sync_exp_work_done(s)) {
346 		mutex_unlock(&rcu_state.exp_mutex);
347 		return true;
348 	}
349 	rcu_exp_gp_seq_start();
350 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
351 	return false;
352 }
353 
354 /*
355  * Select the CPUs within the specified rcu_node that the upcoming
356  * expedited grace period needs to wait for.
357  */
358 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
359 {
360 	int cpu;
361 	unsigned long flags;
362 	unsigned long mask_ofl_test;
363 	unsigned long mask_ofl_ipi;
364 	int ret;
365 	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
366 
367 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
368 
369 	/* Each pass checks a CPU for identity, offline, and idle. */
370 	mask_ofl_test = 0;
371 	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
372 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
373 		unsigned long mask = rdp->grpmask;
374 		int snap;
375 
376 		if (raw_smp_processor_id() == cpu ||
377 		    !(rnp->qsmaskinitnext & mask)) {
378 			mask_ofl_test |= mask;
379 		} else {
380 			/*
381 			 * Full ordering between remote CPU's post idle accesses
382 			 * and updater's accesses prior to current GP (and also
383 			 * the started GP sequence number) is enforced by
384 			 * rcu_seq_start() implicit barrier, relayed by kworkers
385 			 * locking and even further by smp_mb__after_unlock_lock()
386 			 * barriers chained all the way throughout the rnp locking
387 			 * tree since sync_exp_reset_tree() and up to the current
388 			 * leaf rnp locking.
389 			 *
390 			 * Ordering between remote CPU's pre idle accesses and
391 			 * post grace period updater's accesses is enforced by the
392 			 * below acquire semantic.
393 			 */
394 			snap = ct_rcu_watching_cpu_acquire(cpu);
395 			if (rcu_watching_snap_in_eqs(snap))
396 				mask_ofl_test |= mask;
397 			else
398 				rdp->exp_watching_snap = snap;
399 		}
400 	}
401 	mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
402 
403 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
404 
405 	/* IPI the remaining CPUs for expedited quiescent state. */
406 	for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
407 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
408 		unsigned long mask = rdp->grpmask;
409 
410 retry_ipi:
411 		if (rcu_watching_snap_stopped_since(rdp, rdp->exp_watching_snap)) {
412 			mask_ofl_test |= mask;
413 			continue;
414 		}
415 		if (get_cpu() == cpu) {
416 			mask_ofl_test |= mask;
417 			put_cpu();
418 			continue;
419 		}
420 		ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
421 		put_cpu();
422 		/* The CPU will report the QS in response to the IPI. */
423 		if (!ret)
424 			continue;
425 
426 		/* Failed, raced with CPU hotplug operation. */
427 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
428 		if ((rnp->qsmaskinitnext & mask) &&
429 		    (rnp->expmask & mask)) {
430 			/* Online, so delay for a bit and try again. */
431 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
432 			trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
433 			schedule_timeout_idle(1);
434 			goto retry_ipi;
435 		}
436 		/* CPU really is offline, so we must report its QS. */
437 		if (rnp->expmask & mask)
438 			mask_ofl_test |= mask;
439 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
440 	}
441 	/* Report quiescent states for those that went offline. */
442 	if (mask_ofl_test) {
443 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
444 		rcu_report_exp_cpu_mult(rnp, flags, mask_ofl_test, false);
445 	}
446 }
447 
448 static void rcu_exp_sel_wait_wake(unsigned long s);
449 
450 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
451 {
452 	struct rcu_exp_work *rewp =
453 		container_of(wp, struct rcu_exp_work, rew_work);
454 
455 	__sync_rcu_exp_select_node_cpus(rewp);
456 }
457 
458 static inline bool rcu_exp_worker_started(void)
459 {
460 	return !!READ_ONCE(rcu_exp_gp_kworker);
461 }
462 
463 static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp)
464 {
465 	return !!READ_ONCE(rnp->exp_kworker);
466 }
467 
468 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
469 {
470 	kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
471 	/*
472 	 * Use rcu_exp_par_gp_kworker, because flushing a work item from
473 	 * another work item on the same kthread worker can result in
474 	 * deadlock.
475 	 */
476 	kthread_queue_work(READ_ONCE(rnp->exp_kworker), &rnp->rew.rew_work);
477 }
478 
479 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
480 {
481 	kthread_flush_work(&rnp->rew.rew_work);
482 }
483 
484 /*
485  * Work-queue handler to drive an expedited grace period forward.
486  */
487 static void wait_rcu_exp_gp(struct kthread_work *wp)
488 {
489 	struct rcu_exp_work *rewp;
490 
491 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
492 	rcu_exp_sel_wait_wake(rewp->rew_s);
493 }
494 
495 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
496 {
497 	kthread_init_work(&rew->rew_work, wait_rcu_exp_gp);
498 	kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
499 }
500 
501 /*
502  * Select the nodes that the upcoming expedited grace period needs
503  * to wait for.
504  */
505 static void sync_rcu_exp_select_cpus(void)
506 {
507 	struct rcu_node *rnp;
508 
509 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
510 	sync_exp_reset_tree();
511 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
512 
513 	/* Schedule work for each leaf rcu_node structure. */
514 	rcu_for_each_leaf_node(rnp) {
515 		rnp->exp_need_flush = false;
516 		if (!READ_ONCE(rnp->expmask))
517 			continue; /* Avoid early boot non-existent wq. */
518 		if (!rcu_exp_par_worker_started(rnp) ||
519 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
520 		    rcu_is_last_leaf_node(rnp)) {
521 			/* No worker started yet or last leaf, do direct call. */
522 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
523 			continue;
524 		}
525 		sync_rcu_exp_select_cpus_queue_work(rnp);
526 		rnp->exp_need_flush = true;
527 	}
528 
529 	/* Wait for jobs (if any) to complete. */
530 	rcu_for_each_leaf_node(rnp)
531 		if (rnp->exp_need_flush)
532 			sync_rcu_exp_select_cpus_flush_work(rnp);
533 }
534 
535 /*
536  * Wait for the expedited grace period to elapse, within time limit.
537  * If the time limit is exceeded without the grace period elapsing,
538  * return false, otherwise return true.
539  */
540 static bool synchronize_rcu_expedited_wait_once(long tlimit)
541 {
542 	int t;
543 	struct rcu_node *rnp_root = rcu_get_root();
544 
545 	t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
546 					  sync_rcu_exp_done_unlocked(rnp_root),
547 					  tlimit);
548 	// Workqueues should not be signaled.
549 	if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
550 		return true;
551 	WARN_ON(t < 0);  /* workqueues should not be signaled. */
552 	return false;
553 }
554 
555 /*
556  * Print out an expedited RCU CPU stall warning message.
557  */
558 static void synchronize_rcu_expedited_stall(unsigned long jiffies_start, unsigned long j)
559 {
560 	int cpu;
561 	unsigned long mask;
562 	int ndetected;
563 	struct rcu_node *rnp;
564 	struct rcu_node *rnp_root = rcu_get_root();
565 
566 	if (READ_ONCE(csd_lock_suppress_rcu_stall) && csd_lock_is_stuck()) {
567 		pr_err("INFO: %s detected expedited stalls, but suppressed full report due to a stuck CSD-lock.\n", rcu_state.name);
568 		return;
569 	}
570 	pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", rcu_state.name);
571 	ndetected = 0;
572 	rcu_for_each_leaf_node(rnp) {
573 		ndetected += rcu_print_task_exp_stall(rnp);
574 		for_each_leaf_node_possible_cpu(rnp, cpu) {
575 			struct rcu_data *rdp;
576 
577 			mask = leaf_node_cpu_bit(rnp, cpu);
578 			if (!(READ_ONCE(rnp->expmask) & mask))
579 				continue;
580 			ndetected++;
581 			rdp = per_cpu_ptr(&rcu_data, cpu);
582 			pr_cont(" %d-%c%c%c%c", cpu,
583 				"O."[!!cpu_online(cpu)],
584 				"o."[!!(rdp->grpmask & rnp->expmaskinit)],
585 				"N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
586 				"D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
587 		}
588 	}
589 	pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
590 		j - jiffies_start, rcu_state.expedited_sequence, data_race(rnp_root->expmask),
591 		".T"[!!data_race(rnp_root->exp_tasks)]);
592 	if (!ndetected) {
593 		// This is invoked from the grace-period worker, so
594 		// a new grace period cannot have started.  And if this
595 		// worker were stalled, we would not get here.  ;-)
596 		pr_err("INFO: Expedited stall ended before state dump start\n");
597 	} else {
598 		pr_err("blocking rcu_node structures (internal RCU debug):");
599 		rcu_for_each_node_breadth_first(rnp) {
600 			if (rnp == rnp_root)
601 				continue; /* printed unconditionally */
602 			if (sync_rcu_exp_done_unlocked(rnp))
603 				continue;
604 			pr_cont(" l=%u:%d-%d:%#lx/%c",
605 				rnp->level, rnp->grplo, rnp->grphi, data_race(rnp->expmask),
606 				".T"[!!data_race(rnp->exp_tasks)]);
607 		}
608 		pr_cont("\n");
609 	}
610 	rcu_for_each_leaf_node(rnp) {
611 		for_each_leaf_node_possible_cpu(rnp, cpu) {
612 			mask = leaf_node_cpu_bit(rnp, cpu);
613 			if (!(READ_ONCE(rnp->expmask) & mask))
614 				continue;
615 			dump_cpu_task(cpu);
616 		}
617 		rcu_exp_print_detail_task_stall_rnp(rnp);
618 	}
619 }
620 
621 /*
622  * Wait for the expedited grace period to elapse, issuing any needed
623  * RCU CPU stall warnings along the way.
624  */
625 static void synchronize_rcu_expedited_wait(void)
626 {
627 	int cpu;
628 	unsigned long j;
629 	unsigned long jiffies_stall;
630 	unsigned long jiffies_start;
631 	unsigned long mask;
632 	struct rcu_data *rdp;
633 	struct rcu_node *rnp;
634 	unsigned long flags;
635 
636 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
637 	jiffies_stall = rcu_exp_jiffies_till_stall_check();
638 	jiffies_start = jiffies;
639 	if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
640 		if (synchronize_rcu_expedited_wait_once(1))
641 			return;
642 		rcu_for_each_leaf_node(rnp) {
643 			raw_spin_lock_irqsave_rcu_node(rnp, flags);
644 			mask = READ_ONCE(rnp->expmask);
645 			for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
646 				rdp = per_cpu_ptr(&rcu_data, cpu);
647 				if (rdp->rcu_forced_tick_exp)
648 					continue;
649 				rdp->rcu_forced_tick_exp = true;
650 				if (cpu_online(cpu))
651 					tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
652 			}
653 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
654 		}
655 		j = READ_ONCE(jiffies_till_first_fqs);
656 		if (synchronize_rcu_expedited_wait_once(j + HZ))
657 			return;
658 	}
659 
660 	for (;;) {
661 		unsigned long j;
662 
663 		if (synchronize_rcu_expedited_wait_once(jiffies_stall))
664 			return;
665 		if (rcu_stall_is_suppressed())
666 			continue;
667 
668 		nbcon_cpu_emergency_enter();
669 
670 		j = jiffies;
671 		rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start));
672 		trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
673 		synchronize_rcu_expedited_stall(jiffies_start, j);
674 		jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
675 
676 		nbcon_cpu_emergency_exit();
677 
678 		panic_on_rcu_stall();
679 	}
680 }
681 
682 /*
683  * Wait for the current expedited grace period to complete, and then
684  * wake up everyone who piggybacked on the just-completed expedited
685  * grace period.  Also update all the ->exp_seq_rq counters as needed
686  * in order to avoid counter-wrap problems.
687  */
688 static void rcu_exp_wait_wake(unsigned long s)
689 {
690 	struct rcu_node *rnp;
691 
692 	synchronize_rcu_expedited_wait();
693 
694 	// Switch over to wakeup mode, allowing the next GP to proceed.
695 	// End the previous grace period only after acquiring the mutex
696 	// to ensure that only one GP runs concurrently with wakeups.
697 	mutex_lock(&rcu_state.exp_wake_mutex);
698 	rcu_exp_gp_seq_end();
699 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
700 
701 	rcu_for_each_node_breadth_first(rnp) {
702 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
703 			spin_lock(&rnp->exp_lock);
704 			/* Recheck, avoid hang in case someone just arrived. */
705 			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
706 				WRITE_ONCE(rnp->exp_seq_rq, s);
707 			spin_unlock(&rnp->exp_lock);
708 		}
709 		smp_mb(); /* All above changes before wakeup. */
710 		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
711 	}
712 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
713 	mutex_unlock(&rcu_state.exp_wake_mutex);
714 }
715 
716 /*
717  * Common code to drive an expedited grace period forward, used by
718  * workqueues and mid-boot-time tasks.
719  */
720 static void rcu_exp_sel_wait_wake(unsigned long s)
721 {
722 	/* Initialize the rcu_node tree in preparation for the wait. */
723 	sync_rcu_exp_select_cpus();
724 
725 	/* Wait and clean up, including waking everyone. */
726 	rcu_exp_wait_wake(s);
727 }
728 
729 /* Request an expedited quiescent state. */
730 static void rcu_exp_need_qs(void)
731 {
732 	lockdep_assert_irqs_disabled();
733 	ASSERT_EXCLUSIVE_WRITER_SCOPED(*this_cpu_ptr(&rcu_data.cpu_no_qs.b.exp));
734 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
735 	/* Store .exp before .rcu_urgent_qs. */
736 	smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
737 	set_need_resched_current();
738 }
739 
740 #ifdef CONFIG_PREEMPT_RCU
741 
742 /*
743  * Remote handler for smp_call_function_single().  If there is an
744  * RCU read-side critical section in effect, request that the
745  * next rcu_read_unlock() record the quiescent state up the
746  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
747  * report the quiescent state.
748  */
749 static void rcu_exp_handler(void *unused)
750 {
751 	int depth = rcu_preempt_depth();
752 	unsigned long flags;
753 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
754 	struct rcu_node *rnp = rdp->mynode;
755 	struct task_struct *t = current;
756 
757 	/*
758 	 * WARN if the CPU is unexpectedly already looking for a
759 	 * QS or has already reported one.
760 	 */
761 	ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
762 	if (WARN_ON_ONCE(!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
763 			 READ_ONCE(rdp->cpu_no_qs.b.exp)))
764 		return;
765 
766 	/*
767 	 * Second, the common case of not being in an RCU read-side
768 	 * critical section.  If also enabled or idle, immediately
769 	 * report the quiescent state, otherwise defer.
770 	 */
771 	if (!depth) {
772 		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
773 		    rcu_is_cpu_rrupt_from_idle())
774 			rcu_report_exp_rdp(rdp);
775 		else
776 			rcu_exp_need_qs();
777 		return;
778 	}
779 
780 	/*
781 	 * Third, the less-common case of being in an RCU read-side
782 	 * critical section.  In this case we can count on a future
783 	 * rcu_read_unlock().  However, this rcu_read_unlock() might
784 	 * execute on some other CPU, but in that case there will be
785 	 * a future context switch.  Either way, if the expedited
786 	 * grace period is still waiting on this CPU, set ->deferred_qs
787 	 * so that the eventual quiescent state will be reported.
788 	 * Note that there is a large group of race conditions that
789 	 * can have caused this quiescent state to already have been
790 	 * reported, so we really do need to check ->expmask.
791 	 */
792 	if (depth > 0) {
793 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
794 		if (rnp->expmask & rdp->grpmask) {
795 			WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
796 			t->rcu_read_unlock_special.b.exp_hint = true;
797 		}
798 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
799 		return;
800 	}
801 
802 	// Fourth and finally, negative nesting depth should not happen.
803 	WARN_ON_ONCE(1);
804 }
805 
806 /*
807  * Scan the current list of tasks blocked within RCU read-side critical
808  * sections, printing out the tid of each that is blocking the current
809  * expedited grace period.
810  */
811 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
812 {
813 	unsigned long flags;
814 	int ndetected = 0;
815 	struct task_struct *t;
816 
817 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
818 	if (!rnp->exp_tasks) {
819 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
820 		return 0;
821 	}
822 	t = list_entry(rnp->exp_tasks->prev,
823 		       struct task_struct, rcu_node_entry);
824 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
825 		pr_cont(" P%d", t->pid);
826 		ndetected++;
827 	}
828 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
829 	return ndetected;
830 }
831 
832 /*
833  * Scan the current list of tasks blocked within RCU read-side critical
834  * sections, dumping the stack of each that is blocking the current
835  * expedited grace period.
836  */
837 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
838 {
839 	unsigned long flags;
840 	struct task_struct *t;
841 
842 	if (!rcu_exp_stall_task_details)
843 		return;
844 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
845 	if (!READ_ONCE(rnp->exp_tasks)) {
846 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
847 		return;
848 	}
849 	t = list_entry(rnp->exp_tasks->prev,
850 		       struct task_struct, rcu_node_entry);
851 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
852 		/*
853 		 * We could be printing a lot while holding a spinlock.
854 		 * Avoid triggering hard lockup.
855 		 */
856 		touch_nmi_watchdog();
857 		sched_show_task(t);
858 	}
859 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
860 }
861 
862 #else /* #ifdef CONFIG_PREEMPT_RCU */
863 
864 /* Invoked on each online non-idle CPU for expedited quiescent state. */
865 static void rcu_exp_handler(void *unused)
866 {
867 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
868 	struct rcu_node *rnp = rdp->mynode;
869 	bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
870 
871 	ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
872 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
873 	    __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
874 		return;
875 	if (rcu_is_cpu_rrupt_from_idle() ||
876 	    (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) {
877 		rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
878 		return;
879 	}
880 	rcu_exp_need_qs();
881 }
882 
883 /*
884  * Because preemptible RCU does not exist, we never have to check for
885  * tasks blocked within RCU read-side critical sections that are
886  * blocking the current expedited grace period.
887  */
888 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
889 {
890 	return 0;
891 }
892 
893 /*
894  * Because preemptible RCU does not exist, we never have to print out
895  * tasks blocked within RCU read-side critical sections that are blocking
896  * the current expedited grace period.
897  */
898 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
899 {
900 }
901 
902 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
903 
904 /**
905  * synchronize_rcu_expedited - Brute-force RCU grace period
906  *
907  * Wait for an RCU grace period, but expedite it.  The basic idea is to
908  * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
909  * the CPU is in an RCU critical section, and if so, it sets a flag that
910  * causes the outermost rcu_read_unlock() to report the quiescent state
911  * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
912  * other hand, if the CPU is not in an RCU read-side critical section,
913  * the IPI handler reports the quiescent state immediately.
914  *
915  * Although this is a great improvement over previous expedited
916  * implementations, it is still unfriendly to real-time workloads, so is
917  * thus not recommended for any sort of common-case code.  In fact, if
918  * you are using synchronize_rcu_expedited() in a loop, please restructure
919  * your code to batch your updates, and then use a single synchronize_rcu()
920  * instead.
921  *
922  * This has the same semantics as (but is more brutal than) synchronize_rcu().
923  */
924 void synchronize_rcu_expedited(void)
925 {
926 	unsigned long flags;
927 	struct rcu_exp_work rew;
928 	struct rcu_node *rnp;
929 	unsigned long s;
930 
931 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
932 			 lock_is_held(&rcu_lock_map) ||
933 			 lock_is_held(&rcu_sched_lock_map),
934 			 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
935 
936 	/* Is the state is such that the call is a grace period? */
937 	if (rcu_blocking_is_gp()) {
938 		// Note well that this code runs with !PREEMPT && !SMP.
939 		// In addition, all code that advances grace periods runs
940 		// at process level.  Therefore, this expedited GP overlaps
941 		// with other expedited GPs only by being fully nested within
942 		// them, which allows reuse of ->gp_seq_polled_exp_snap.
943 		rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
944 		rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
945 
946 		local_irq_save(flags);
947 		WARN_ON_ONCE(num_online_cpus() > 1);
948 		rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT);
949 		local_irq_restore(flags);
950 		return;  // Context allows vacuous grace periods.
951 	}
952 
953 	/* If expedited grace periods are prohibited, fall back to normal. */
954 	if (rcu_gp_is_normal()) {
955 		synchronize_rcu_normal();
956 		return;
957 	}
958 
959 	/* Take a snapshot of the sequence number.  */
960 	s = rcu_exp_gp_seq_snap();
961 	if (exp_funnel_lock(s))
962 		return;  /* Someone else did our work for us. */
963 
964 	/* Ensure that load happens before action based on it. */
965 	if (unlikely((rcu_scheduler_active == RCU_SCHEDULER_INIT) || !rcu_exp_worker_started())) {
966 		/* Direct call during scheduler init and early_initcalls(). */
967 		rcu_exp_sel_wait_wake(s);
968 	} else {
969 		/* Marshall arguments & schedule the expedited grace period. */
970 		rew.rew_s = s;
971 		synchronize_rcu_expedited_queue_work(&rew);
972 	}
973 
974 	/* Wait for expedited grace period to complete. */
975 	rnp = rcu_get_root();
976 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
977 		   sync_exp_work_done(s));
978 
979 	/* Let the next expedited grace period start. */
980 	mutex_unlock(&rcu_state.exp_mutex);
981 }
982 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
983 
984 /*
985  * Ensure that start_poll_synchronize_rcu_expedited() has the expedited
986  * RCU grace periods that it needs.
987  */
988 static void sync_rcu_do_polled_gp(struct work_struct *wp)
989 {
990 	unsigned long flags;
991 	int i = 0;
992 	struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq);
993 	unsigned long s;
994 
995 	raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
996 	s = rnp->exp_seq_poll_rq;
997 	rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
998 	raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
999 	if (s == RCU_GET_STATE_COMPLETED)
1000 		return;
1001 	while (!poll_state_synchronize_rcu(s)) {
1002 		synchronize_rcu_expedited();
1003 		if (i == 10 || i == 20)
1004 			pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled));
1005 		i++;
1006 	}
1007 	raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1008 	s = rnp->exp_seq_poll_rq;
1009 	if (poll_state_synchronize_rcu(s))
1010 		rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1011 	raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1012 }
1013 
1014 /**
1015  * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period
1016  *
1017  * Returns a cookie to pass to a call to cond_synchronize_rcu(),
1018  * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(),
1019  * allowing them to determine whether or not any sort of grace period has
1020  * elapsed in the meantime.  If the needed expedited grace period is not
1021  * already slated to start, initiates that grace period.
1022  */
1023 unsigned long start_poll_synchronize_rcu_expedited(void)
1024 {
1025 	unsigned long flags;
1026 	struct rcu_data *rdp;
1027 	struct rcu_node *rnp;
1028 	unsigned long s;
1029 
1030 	s = get_state_synchronize_rcu();
1031 	rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
1032 	rnp = rdp->mynode;
1033 	if (rcu_init_invoked())
1034 		raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1035 	if (!poll_state_synchronize_rcu(s)) {
1036 		if (rcu_init_invoked()) {
1037 			rnp->exp_seq_poll_rq = s;
1038 			queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
1039 		}
1040 	}
1041 	if (rcu_init_invoked())
1042 		raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1043 
1044 	return s;
1045 }
1046 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited);
1047 
1048 /**
1049  * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period
1050  * @rgosp: Place to put snapshot of grace-period state
1051  *
1052  * Places the normal and expedited grace-period states in rgosp.  This
1053  * state value can be passed to a later call to cond_synchronize_rcu_full()
1054  * or poll_state_synchronize_rcu_full() to determine whether or not a
1055  * grace period (whether normal or expedited) has elapsed in the meantime.
1056  * If the needed expedited grace period is not already slated to start,
1057  * initiates that grace period.
1058  */
1059 void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1060 {
1061 	get_state_synchronize_rcu_full(rgosp);
1062 	(void)start_poll_synchronize_rcu_expedited();
1063 }
1064 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full);
1065 
1066 /**
1067  * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
1068  *
1069  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
1070  *
1071  * If any type of full RCU grace period has elapsed since the earlier
1072  * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(),
1073  * or start_poll_synchronize_rcu_expedited(), just return.  Otherwise,
1074  * invoke synchronize_rcu_expedited() to wait for a full grace period.
1075  *
1076  * Yes, this function does not take counter wrap into account.
1077  * But counter wrap is harmless.  If the counter wraps, we have waited for
1078  * more than 2 billion grace periods (and way more on a 64-bit system!),
1079  * so waiting for a couple of additional grace periods should be just fine.
1080  *
1081  * This function provides the same memory-ordering guarantees that
1082  * would be provided by a synchronize_rcu() that was invoked at the call
1083  * to the function that provided @oldstate and that returned at the end
1084  * of this function.
1085  */
1086 void cond_synchronize_rcu_expedited(unsigned long oldstate)
1087 {
1088 	if (!poll_state_synchronize_rcu(oldstate))
1089 		synchronize_rcu_expedited();
1090 }
1091 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);
1092 
1093 /**
1094  * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period
1095  * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
1096  *
1097  * If a full RCU grace period has elapsed since the call to
1098  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
1099  * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
1100  * obtained, just return.  Otherwise, invoke synchronize_rcu_expedited()
1101  * to wait for a full grace period.
1102  *
1103  * Yes, this function does not take counter wrap into account.
1104  * But counter wrap is harmless.  If the counter wraps, we have waited for
1105  * more than 2 billion grace periods (and way more on a 64-bit system!),
1106  * so waiting for a couple of additional grace periods should be just fine.
1107  *
1108  * This function provides the same memory-ordering guarantees that
1109  * would be provided by a synchronize_rcu() that was invoked at the call
1110  * to the function that provided @rgosp and that returned at the end of
1111  * this function.
1112  */
1113 void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1114 {
1115 	if (!poll_state_synchronize_rcu_full(rgosp))
1116 		synchronize_rcu_expedited();
1117 }
1118 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full);
1119