xref: /linux/kernel/rcu/tree_exp.h (revision d58db3f3a00af00fce5f914c9d1a946ef7feecb6)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * RCU expedited grace periods
4  *
5  * Copyright IBM Corporation, 2016
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9 
10 #include <linux/console.h>
11 #include <linux/lockdep.h>
12 
13 static void rcu_exp_handler(void *unused);
14 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
15 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp);
16 
17 /*
18  * Record the start of an expedited grace period.
19  */
20 static void rcu_exp_gp_seq_start(void)
21 {
22 	rcu_seq_start(&rcu_state.expedited_sequence);
23 	rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
24 }
25 
26 /*
27  * Return the value that the expedited-grace-period counter will have
28  * at the end of the current grace period.
29  */
30 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
31 {
32 	return rcu_seq_endval(&rcu_state.expedited_sequence);
33 }
34 
35 /*
36  * Record the end of an expedited grace period.
37  */
38 static void rcu_exp_gp_seq_end(void)
39 {
40 	rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
41 	rcu_seq_end(&rcu_state.expedited_sequence);
42 	smp_mb(); /* Ensure that consecutive grace periods serialize. */
43 }
44 
45 /*
46  * Take a snapshot of the expedited-grace-period counter, which is the
47  * earliest value that will indicate that a full grace period has
48  * elapsed since the current time.
49  */
50 static unsigned long rcu_exp_gp_seq_snap(void)
51 {
52 	unsigned long s;
53 
54 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
55 	s = rcu_seq_snap(&rcu_state.expedited_sequence);
56 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
57 	return s;
58 }
59 
60 /*
61  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
62  * if a full expedited grace period has elapsed since that snapshot
63  * was taken.
64  */
65 static bool rcu_exp_gp_seq_done(unsigned long s)
66 {
67 	return rcu_seq_done(&rcu_state.expedited_sequence, s);
68 }
69 
70 /*
71  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
72  * recent CPU-online activity.  Note that these masks are not cleared
73  * when CPUs go offline, so they reflect the union of all CPUs that have
74  * ever been online.  This means that this function normally takes its
75  * no-work-to-do fastpath.
76  */
77 static void sync_exp_reset_tree_hotplug(void)
78 {
79 	bool done;
80 	unsigned long flags;
81 	unsigned long mask;
82 	unsigned long oldmask;
83 	int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
84 	struct rcu_node *rnp;
85 	struct rcu_node *rnp_up;
86 
87 	/* If no new CPUs onlined since last time, nothing to do. */
88 	if (likely(ncpus == rcu_state.ncpus_snap))
89 		return;
90 	rcu_state.ncpus_snap = ncpus;
91 
92 	/*
93 	 * Each pass through the following loop propagates newly onlined
94 	 * CPUs for the current rcu_node structure up the rcu_node tree.
95 	 */
96 	rcu_for_each_leaf_node(rnp) {
97 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
98 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
99 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
100 			continue;  /* No new CPUs, nothing to do. */
101 		}
102 
103 		/* Update this node's mask, track old value for propagation. */
104 		oldmask = rnp->expmaskinit;
105 		rnp->expmaskinit = rnp->expmaskinitnext;
106 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
107 
108 		/* If was already nonzero, nothing to propagate. */
109 		if (oldmask)
110 			continue;
111 
112 		/* Propagate the new CPU up the tree. */
113 		mask = rnp->grpmask;
114 		rnp_up = rnp->parent;
115 		done = false;
116 		while (rnp_up) {
117 			raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
118 			if (rnp_up->expmaskinit)
119 				done = true;
120 			rnp_up->expmaskinit |= mask;
121 			raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
122 			if (done)
123 				break;
124 			mask = rnp_up->grpmask;
125 			rnp_up = rnp_up->parent;
126 		}
127 	}
128 }
129 
130 /*
131  * Reset the ->expmask values in the rcu_node tree in preparation for
132  * a new expedited grace period.
133  */
134 static void __maybe_unused sync_exp_reset_tree(void)
135 {
136 	unsigned long flags;
137 	struct rcu_node *rnp;
138 
139 	sync_exp_reset_tree_hotplug();
140 	rcu_for_each_node_breadth_first(rnp) {
141 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
142 		WARN_ON_ONCE(rnp->expmask);
143 		WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
144 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
145 	}
146 }
147 
148 /*
149  * Return non-zero if there is no RCU expedited grace period in progress
150  * for the specified rcu_node structure, in other words, if all CPUs and
151  * tasks covered by the specified rcu_node structure have done their bit
152  * for the current expedited grace period.
153  */
154 static bool sync_rcu_exp_done(struct rcu_node *rnp)
155 {
156 	raw_lockdep_assert_held_rcu_node(rnp);
157 	return READ_ONCE(rnp->exp_tasks) == NULL &&
158 	       READ_ONCE(rnp->expmask) == 0;
159 }
160 
161 /*
162  * Like sync_rcu_exp_done(), but where the caller does not hold the
163  * rcu_node's ->lock.
164  */
165 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
166 {
167 	unsigned long flags;
168 	bool ret;
169 
170 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
171 	ret = sync_rcu_exp_done(rnp);
172 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
173 
174 	return ret;
175 }
176 
177 /*
178  * Report the exit from RCU read-side critical section for the last task
179  * that queued itself during or before the current expedited preemptible-RCU
180  * grace period.  This event is reported either to the rcu_node structure on
181  * which the task was queued or to one of that rcu_node structure's ancestors,
182  * recursively up the tree.  (Calm down, calm down, we do the recursion
183  * iteratively!)
184  */
185 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
186 				 bool wake, unsigned long flags)
187 	__releases(rnp->lock)
188 {
189 	unsigned long mask;
190 
191 	raw_lockdep_assert_held_rcu_node(rnp);
192 	for (;;) {
193 		if (!sync_rcu_exp_done(rnp)) {
194 			if (!rnp->expmask)
195 				rcu_initiate_boost(rnp, flags);
196 			else
197 				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
198 			break;
199 		}
200 		if (rnp->parent == NULL) {
201 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
202 			if (wake)
203 				swake_up_one_online(&rcu_state.expedited_wq);
204 
205 			break;
206 		}
207 		mask = rnp->grpmask;
208 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
209 		rnp = rnp->parent;
210 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
211 		WARN_ON_ONCE(!(rnp->expmask & mask));
212 		WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
213 	}
214 }
215 
216 /*
217  * Report expedited quiescent state for specified node.  This is a
218  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
219  */
220 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
221 {
222 	unsigned long flags;
223 
224 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
225 	__rcu_report_exp_rnp(rnp, wake, flags);
226 }
227 
228 /*
229  * Report expedited quiescent state for multiple CPUs, all covered by the
230  * specified leaf rcu_node structure.
231  */
232 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
233 				    unsigned long mask, bool wake)
234 {
235 	int cpu;
236 	unsigned long flags;
237 	struct rcu_data *rdp;
238 
239 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
240 	if (!(rnp->expmask & mask)) {
241 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
242 		return;
243 	}
244 	WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
245 	for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
246 		rdp = per_cpu_ptr(&rcu_data, cpu);
247 		if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
248 			continue;
249 		rdp->rcu_forced_tick_exp = false;
250 		tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
251 	}
252 	__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
253 }
254 
255 /*
256  * Report expedited quiescent state for specified rcu_data (CPU).
257  */
258 static void rcu_report_exp_rdp(struct rcu_data *rdp)
259 {
260 	WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
261 	rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
262 }
263 
264 /* Common code for work-done checking. */
265 static bool sync_exp_work_done(unsigned long s)
266 {
267 	if (rcu_exp_gp_seq_done(s)) {
268 		trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
269 		/*
270 		 * Order GP completion with preceding accesses. Order also GP
271 		 * completion with post GP update side accesses. Pairs with
272 		 * rcu_seq_end().
273 		 */
274 		smp_mb();
275 		return true;
276 	}
277 	return false;
278 }
279 
280 /*
281  * Funnel-lock acquisition for expedited grace periods.  Returns true
282  * if some other task completed an expedited grace period that this task
283  * can piggy-back on, and with no mutex held.  Otherwise, returns false
284  * with the mutex held, indicating that the caller must actually do the
285  * expedited grace period.
286  */
287 static bool exp_funnel_lock(unsigned long s)
288 {
289 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
290 	struct rcu_node *rnp = rdp->mynode;
291 	struct rcu_node *rnp_root = rcu_get_root();
292 
293 	/* Low-contention fastpath. */
294 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
295 	    (rnp == rnp_root ||
296 	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
297 	    mutex_trylock(&rcu_state.exp_mutex))
298 		goto fastpath;
299 
300 	/*
301 	 * Each pass through the following loop works its way up
302 	 * the rcu_node tree, returning if others have done the work or
303 	 * otherwise falls through to acquire ->exp_mutex.  The mapping
304 	 * from CPU to rcu_node structure can be inexact, as it is just
305 	 * promoting locality and is not strictly needed for correctness.
306 	 */
307 	for (; rnp != NULL; rnp = rnp->parent) {
308 		if (sync_exp_work_done(s))
309 			return true;
310 
311 		/* Work not done, either wait here or go up. */
312 		spin_lock(&rnp->exp_lock);
313 		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
314 
315 			/* Someone else doing GP, so wait for them. */
316 			spin_unlock(&rnp->exp_lock);
317 			trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
318 						  rnp->grplo, rnp->grphi,
319 						  TPS("wait"));
320 			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
321 				   sync_exp_work_done(s));
322 			return true;
323 		}
324 		WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
325 		spin_unlock(&rnp->exp_lock);
326 		trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
327 					  rnp->grplo, rnp->grphi, TPS("nxtlvl"));
328 	}
329 	mutex_lock(&rcu_state.exp_mutex);
330 fastpath:
331 	if (sync_exp_work_done(s)) {
332 		mutex_unlock(&rcu_state.exp_mutex);
333 		return true;
334 	}
335 	rcu_exp_gp_seq_start();
336 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
337 	return false;
338 }
339 
340 /*
341  * Select the CPUs within the specified rcu_node that the upcoming
342  * expedited grace period needs to wait for.
343  */
344 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
345 {
346 	int cpu;
347 	unsigned long flags;
348 	unsigned long mask_ofl_test;
349 	unsigned long mask_ofl_ipi;
350 	int ret;
351 	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
352 
353 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
354 
355 	/* Each pass checks a CPU for identity, offline, and idle. */
356 	mask_ofl_test = 0;
357 	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
358 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
359 		unsigned long mask = rdp->grpmask;
360 		int snap;
361 
362 		if (raw_smp_processor_id() == cpu ||
363 		    !(rnp->qsmaskinitnext & mask)) {
364 			mask_ofl_test |= mask;
365 		} else {
366 			/*
367 			 * Full ordering between remote CPU's post idle accesses
368 			 * and updater's accesses prior to current GP (and also
369 			 * the started GP sequence number) is enforced by
370 			 * rcu_seq_start() implicit barrier, relayed by kworkers
371 			 * locking and even further by smp_mb__after_unlock_lock()
372 			 * barriers chained all the way throughout the rnp locking
373 			 * tree since sync_exp_reset_tree() and up to the current
374 			 * leaf rnp locking.
375 			 *
376 			 * Ordering between remote CPU's pre idle accesses and
377 			 * post grace period updater's accesses is enforced by the
378 			 * below acquire semantic.
379 			 */
380 			snap = ct_dynticks_cpu_acquire(cpu);
381 			if (rcu_dynticks_in_eqs(snap))
382 				mask_ofl_test |= mask;
383 			else
384 				rdp->exp_dynticks_snap = snap;
385 		}
386 	}
387 	mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
388 
389 	/*
390 	 * Need to wait for any blocked tasks as well.	Note that
391 	 * additional blocking tasks will also block the expedited GP
392 	 * until such time as the ->expmask bits are cleared.
393 	 */
394 	if (rcu_preempt_has_tasks(rnp))
395 		WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
396 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
397 
398 	/* IPI the remaining CPUs for expedited quiescent state. */
399 	for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
400 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
401 		unsigned long mask = rdp->grpmask;
402 
403 retry_ipi:
404 		if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
405 			mask_ofl_test |= mask;
406 			continue;
407 		}
408 		if (get_cpu() == cpu) {
409 			mask_ofl_test |= mask;
410 			put_cpu();
411 			continue;
412 		}
413 		ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
414 		put_cpu();
415 		/* The CPU will report the QS in response to the IPI. */
416 		if (!ret)
417 			continue;
418 
419 		/* Failed, raced with CPU hotplug operation. */
420 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
421 		if ((rnp->qsmaskinitnext & mask) &&
422 		    (rnp->expmask & mask)) {
423 			/* Online, so delay for a bit and try again. */
424 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
425 			trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
426 			schedule_timeout_idle(1);
427 			goto retry_ipi;
428 		}
429 		/* CPU really is offline, so we must report its QS. */
430 		if (rnp->expmask & mask)
431 			mask_ofl_test |= mask;
432 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
433 	}
434 	/* Report quiescent states for those that went offline. */
435 	if (mask_ofl_test)
436 		rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
437 }
438 
439 static void rcu_exp_sel_wait_wake(unsigned long s);
440 
441 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
442 {
443 	struct rcu_exp_work *rewp =
444 		container_of(wp, struct rcu_exp_work, rew_work);
445 
446 	__sync_rcu_exp_select_node_cpus(rewp);
447 }
448 
449 static inline bool rcu_exp_worker_started(void)
450 {
451 	return !!READ_ONCE(rcu_exp_gp_kworker);
452 }
453 
454 static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp)
455 {
456 	return !!READ_ONCE(rnp->exp_kworker);
457 }
458 
459 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
460 {
461 	kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
462 	/*
463 	 * Use rcu_exp_par_gp_kworker, because flushing a work item from
464 	 * another work item on the same kthread worker can result in
465 	 * deadlock.
466 	 */
467 	kthread_queue_work(READ_ONCE(rnp->exp_kworker), &rnp->rew.rew_work);
468 }
469 
470 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
471 {
472 	kthread_flush_work(&rnp->rew.rew_work);
473 }
474 
475 /*
476  * Work-queue handler to drive an expedited grace period forward.
477  */
478 static void wait_rcu_exp_gp(struct kthread_work *wp)
479 {
480 	struct rcu_exp_work *rewp;
481 
482 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
483 	rcu_exp_sel_wait_wake(rewp->rew_s);
484 }
485 
486 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
487 {
488 	kthread_init_work(&rew->rew_work, wait_rcu_exp_gp);
489 	kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
490 }
491 
492 /*
493  * Select the nodes that the upcoming expedited grace period needs
494  * to wait for.
495  */
496 static void sync_rcu_exp_select_cpus(void)
497 {
498 	struct rcu_node *rnp;
499 
500 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
501 	sync_exp_reset_tree();
502 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
503 
504 	/* Schedule work for each leaf rcu_node structure. */
505 	rcu_for_each_leaf_node(rnp) {
506 		rnp->exp_need_flush = false;
507 		if (!READ_ONCE(rnp->expmask))
508 			continue; /* Avoid early boot non-existent wq. */
509 		if (!rcu_exp_par_worker_started(rnp) ||
510 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
511 		    rcu_is_last_leaf_node(rnp)) {
512 			/* No worker started yet or last leaf, do direct call. */
513 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
514 			continue;
515 		}
516 		sync_rcu_exp_select_cpus_queue_work(rnp);
517 		rnp->exp_need_flush = true;
518 	}
519 
520 	/* Wait for jobs (if any) to complete. */
521 	rcu_for_each_leaf_node(rnp)
522 		if (rnp->exp_need_flush)
523 			sync_rcu_exp_select_cpus_flush_work(rnp);
524 }
525 
526 /*
527  * Wait for the expedited grace period to elapse, within time limit.
528  * If the time limit is exceeded without the grace period elapsing,
529  * return false, otherwise return true.
530  */
531 static bool synchronize_rcu_expedited_wait_once(long tlimit)
532 {
533 	int t;
534 	struct rcu_node *rnp_root = rcu_get_root();
535 
536 	t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
537 					  sync_rcu_exp_done_unlocked(rnp_root),
538 					  tlimit);
539 	// Workqueues should not be signaled.
540 	if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
541 		return true;
542 	WARN_ON(t < 0);  /* workqueues should not be signaled. */
543 	return false;
544 }
545 
546 /*
547  * Wait for the expedited grace period to elapse, issuing any needed
548  * RCU CPU stall warnings along the way.
549  */
550 static void synchronize_rcu_expedited_wait(void)
551 {
552 	int cpu;
553 	unsigned long j;
554 	unsigned long jiffies_stall;
555 	unsigned long jiffies_start;
556 	unsigned long mask;
557 	int ndetected;
558 	struct rcu_data *rdp;
559 	struct rcu_node *rnp;
560 	struct rcu_node *rnp_root = rcu_get_root();
561 	unsigned long flags;
562 
563 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
564 	jiffies_stall = rcu_exp_jiffies_till_stall_check();
565 	jiffies_start = jiffies;
566 	if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
567 		if (synchronize_rcu_expedited_wait_once(1))
568 			return;
569 		rcu_for_each_leaf_node(rnp) {
570 			raw_spin_lock_irqsave_rcu_node(rnp, flags);
571 			mask = READ_ONCE(rnp->expmask);
572 			for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
573 				rdp = per_cpu_ptr(&rcu_data, cpu);
574 				if (rdp->rcu_forced_tick_exp)
575 					continue;
576 				rdp->rcu_forced_tick_exp = true;
577 				if (cpu_online(cpu))
578 					tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
579 			}
580 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
581 		}
582 		j = READ_ONCE(jiffies_till_first_fqs);
583 		if (synchronize_rcu_expedited_wait_once(j + HZ))
584 			return;
585 	}
586 
587 	for (;;) {
588 		unsigned long j;
589 
590 		if (synchronize_rcu_expedited_wait_once(jiffies_stall))
591 			return;
592 		if (rcu_stall_is_suppressed())
593 			continue;
594 
595 		nbcon_cpu_emergency_enter();
596 
597 		j = jiffies;
598 		rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start));
599 		trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
600 		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
601 		       rcu_state.name);
602 		ndetected = 0;
603 		rcu_for_each_leaf_node(rnp) {
604 			ndetected += rcu_print_task_exp_stall(rnp);
605 			for_each_leaf_node_possible_cpu(rnp, cpu) {
606 				struct rcu_data *rdp;
607 
608 				mask = leaf_node_cpu_bit(rnp, cpu);
609 				if (!(READ_ONCE(rnp->expmask) & mask))
610 					continue;
611 				ndetected++;
612 				rdp = per_cpu_ptr(&rcu_data, cpu);
613 				pr_cont(" %d-%c%c%c%c", cpu,
614 					"O."[!!cpu_online(cpu)],
615 					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
616 					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
617 					"D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
618 			}
619 		}
620 		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
621 			j - jiffies_start, rcu_state.expedited_sequence,
622 			data_race(rnp_root->expmask),
623 			".T"[!!data_race(rnp_root->exp_tasks)]);
624 		if (ndetected) {
625 			pr_err("blocking rcu_node structures (internal RCU debug):");
626 			rcu_for_each_node_breadth_first(rnp) {
627 				if (rnp == rnp_root)
628 					continue; /* printed unconditionally */
629 				if (sync_rcu_exp_done_unlocked(rnp))
630 					continue;
631 				pr_cont(" l=%u:%d-%d:%#lx/%c",
632 					rnp->level, rnp->grplo, rnp->grphi,
633 					data_race(rnp->expmask),
634 					".T"[!!data_race(rnp->exp_tasks)]);
635 			}
636 			pr_cont("\n");
637 		}
638 		rcu_for_each_leaf_node(rnp) {
639 			for_each_leaf_node_possible_cpu(rnp, cpu) {
640 				mask = leaf_node_cpu_bit(rnp, cpu);
641 				if (!(READ_ONCE(rnp->expmask) & mask))
642 					continue;
643 				preempt_disable(); // For smp_processor_id() in dump_cpu_task().
644 				dump_cpu_task(cpu);
645 				preempt_enable();
646 			}
647 			rcu_exp_print_detail_task_stall_rnp(rnp);
648 		}
649 		jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
650 
651 		nbcon_cpu_emergency_exit();
652 
653 		panic_on_rcu_stall();
654 	}
655 }
656 
657 /*
658  * Wait for the current expedited grace period to complete, and then
659  * wake up everyone who piggybacked on the just-completed expedited
660  * grace period.  Also update all the ->exp_seq_rq counters as needed
661  * in order to avoid counter-wrap problems.
662  */
663 static void rcu_exp_wait_wake(unsigned long s)
664 {
665 	struct rcu_node *rnp;
666 
667 	synchronize_rcu_expedited_wait();
668 
669 	// Switch over to wakeup mode, allowing the next GP to proceed.
670 	// End the previous grace period only after acquiring the mutex
671 	// to ensure that only one GP runs concurrently with wakeups.
672 	mutex_lock(&rcu_state.exp_wake_mutex);
673 	rcu_exp_gp_seq_end();
674 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
675 
676 	rcu_for_each_node_breadth_first(rnp) {
677 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
678 			spin_lock(&rnp->exp_lock);
679 			/* Recheck, avoid hang in case someone just arrived. */
680 			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
681 				WRITE_ONCE(rnp->exp_seq_rq, s);
682 			spin_unlock(&rnp->exp_lock);
683 		}
684 		smp_mb(); /* All above changes before wakeup. */
685 		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
686 	}
687 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
688 	mutex_unlock(&rcu_state.exp_wake_mutex);
689 }
690 
691 /*
692  * Common code to drive an expedited grace period forward, used by
693  * workqueues and mid-boot-time tasks.
694  */
695 static void rcu_exp_sel_wait_wake(unsigned long s)
696 {
697 	/* Initialize the rcu_node tree in preparation for the wait. */
698 	sync_rcu_exp_select_cpus();
699 
700 	/* Wait and clean up, including waking everyone. */
701 	rcu_exp_wait_wake(s);
702 }
703 
704 #ifdef CONFIG_PREEMPT_RCU
705 
706 /*
707  * Remote handler for smp_call_function_single().  If there is an
708  * RCU read-side critical section in effect, request that the
709  * next rcu_read_unlock() record the quiescent state up the
710  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
711  * report the quiescent state.
712  */
713 static void rcu_exp_handler(void *unused)
714 {
715 	int depth = rcu_preempt_depth();
716 	unsigned long flags;
717 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
718 	struct rcu_node *rnp = rdp->mynode;
719 	struct task_struct *t = current;
720 
721 	/*
722 	 * First, the common case of not being in an RCU read-side
723 	 * critical section.  If also enabled or idle, immediately
724 	 * report the quiescent state, otherwise defer.
725 	 */
726 	if (!depth) {
727 		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
728 		    rcu_is_cpu_rrupt_from_idle()) {
729 			rcu_report_exp_rdp(rdp);
730 		} else {
731 			WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
732 			set_tsk_need_resched(t);
733 			set_preempt_need_resched();
734 		}
735 		return;
736 	}
737 
738 	/*
739 	 * Second, the less-common case of being in an RCU read-side
740 	 * critical section.  In this case we can count on a future
741 	 * rcu_read_unlock().  However, this rcu_read_unlock() might
742 	 * execute on some other CPU, but in that case there will be
743 	 * a future context switch.  Either way, if the expedited
744 	 * grace period is still waiting on this CPU, set ->deferred_qs
745 	 * so that the eventual quiescent state will be reported.
746 	 * Note that there is a large group of race conditions that
747 	 * can have caused this quiescent state to already have been
748 	 * reported, so we really do need to check ->expmask.
749 	 */
750 	if (depth > 0) {
751 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
752 		if (rnp->expmask & rdp->grpmask) {
753 			WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
754 			t->rcu_read_unlock_special.b.exp_hint = true;
755 		}
756 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
757 		return;
758 	}
759 
760 	// Finally, negative nesting depth should not happen.
761 	WARN_ON_ONCE(1);
762 }
763 
764 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
765 static void sync_sched_exp_online_cleanup(int cpu)
766 {
767 }
768 
769 /*
770  * Scan the current list of tasks blocked within RCU read-side critical
771  * sections, printing out the tid of each that is blocking the current
772  * expedited grace period.
773  */
774 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
775 {
776 	unsigned long flags;
777 	int ndetected = 0;
778 	struct task_struct *t;
779 
780 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
781 	if (!rnp->exp_tasks) {
782 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
783 		return 0;
784 	}
785 	t = list_entry(rnp->exp_tasks->prev,
786 		       struct task_struct, rcu_node_entry);
787 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
788 		pr_cont(" P%d", t->pid);
789 		ndetected++;
790 	}
791 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
792 	return ndetected;
793 }
794 
795 /*
796  * Scan the current list of tasks blocked within RCU read-side critical
797  * sections, dumping the stack of each that is blocking the current
798  * expedited grace period.
799  */
800 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
801 {
802 	unsigned long flags;
803 	struct task_struct *t;
804 
805 	if (!rcu_exp_stall_task_details)
806 		return;
807 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
808 	if (!READ_ONCE(rnp->exp_tasks)) {
809 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
810 		return;
811 	}
812 	t = list_entry(rnp->exp_tasks->prev,
813 		       struct task_struct, rcu_node_entry);
814 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
815 		/*
816 		 * We could be printing a lot while holding a spinlock.
817 		 * Avoid triggering hard lockup.
818 		 */
819 		touch_nmi_watchdog();
820 		sched_show_task(t);
821 	}
822 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
823 }
824 
825 #else /* #ifdef CONFIG_PREEMPT_RCU */
826 
827 /* Request an expedited quiescent state. */
828 static void rcu_exp_need_qs(void)
829 {
830 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
831 	/* Store .exp before .rcu_urgent_qs. */
832 	smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
833 	set_tsk_need_resched(current);
834 	set_preempt_need_resched();
835 }
836 
837 /* Invoked on each online non-idle CPU for expedited quiescent state. */
838 static void rcu_exp_handler(void *unused)
839 {
840 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
841 	struct rcu_node *rnp = rdp->mynode;
842 	bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
843 
844 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
845 	    __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
846 		return;
847 	if (rcu_is_cpu_rrupt_from_idle() ||
848 	    (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) {
849 		rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
850 		return;
851 	}
852 	rcu_exp_need_qs();
853 }
854 
855 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
856 static void sync_sched_exp_online_cleanup(int cpu)
857 {
858 	unsigned long flags;
859 	int my_cpu;
860 	struct rcu_data *rdp;
861 	int ret;
862 	struct rcu_node *rnp;
863 
864 	rdp = per_cpu_ptr(&rcu_data, cpu);
865 	rnp = rdp->mynode;
866 	my_cpu = get_cpu();
867 	/* Quiescent state either not needed or already requested, leave. */
868 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
869 	    READ_ONCE(rdp->cpu_no_qs.b.exp)) {
870 		put_cpu();
871 		return;
872 	}
873 	/* Quiescent state needed on current CPU, so set it up locally. */
874 	if (my_cpu == cpu) {
875 		local_irq_save(flags);
876 		rcu_exp_need_qs();
877 		local_irq_restore(flags);
878 		put_cpu();
879 		return;
880 	}
881 	/* Quiescent state needed on some other CPU, send IPI. */
882 	ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
883 	put_cpu();
884 	WARN_ON_ONCE(ret);
885 }
886 
887 /*
888  * Because preemptible RCU does not exist, we never have to check for
889  * tasks blocked within RCU read-side critical sections that are
890  * blocking the current expedited grace period.
891  */
892 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
893 {
894 	return 0;
895 }
896 
897 /*
898  * Because preemptible RCU does not exist, we never have to print out
899  * tasks blocked within RCU read-side critical sections that are blocking
900  * the current expedited grace period.
901  */
902 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
903 {
904 }
905 
906 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
907 
908 /**
909  * synchronize_rcu_expedited - Brute-force RCU grace period
910  *
911  * Wait for an RCU grace period, but expedite it.  The basic idea is to
912  * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
913  * the CPU is in an RCU critical section, and if so, it sets a flag that
914  * causes the outermost rcu_read_unlock() to report the quiescent state
915  * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
916  * other hand, if the CPU is not in an RCU read-side critical section,
917  * the IPI handler reports the quiescent state immediately.
918  *
919  * Although this is a great improvement over previous expedited
920  * implementations, it is still unfriendly to real-time workloads, so is
921  * thus not recommended for any sort of common-case code.  In fact, if
922  * you are using synchronize_rcu_expedited() in a loop, please restructure
923  * your code to batch your updates, and then use a single synchronize_rcu()
924  * instead.
925  *
926  * This has the same semantics as (but is more brutal than) synchronize_rcu().
927  */
928 void synchronize_rcu_expedited(void)
929 {
930 	unsigned long flags;
931 	struct rcu_exp_work rew;
932 	struct rcu_node *rnp;
933 	unsigned long s;
934 
935 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
936 			 lock_is_held(&rcu_lock_map) ||
937 			 lock_is_held(&rcu_sched_lock_map),
938 			 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
939 
940 	/* Is the state is such that the call is a grace period? */
941 	if (rcu_blocking_is_gp()) {
942 		// Note well that this code runs with !PREEMPT && !SMP.
943 		// In addition, all code that advances grace periods runs
944 		// at process level.  Therefore, this expedited GP overlaps
945 		// with other expedited GPs only by being fully nested within
946 		// them, which allows reuse of ->gp_seq_polled_exp_snap.
947 		rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
948 		rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
949 
950 		local_irq_save(flags);
951 		WARN_ON_ONCE(num_online_cpus() > 1);
952 		rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT);
953 		local_irq_restore(flags);
954 		return;  // Context allows vacuous grace periods.
955 	}
956 
957 	/* If expedited grace periods are prohibited, fall back to normal. */
958 	if (rcu_gp_is_normal()) {
959 		synchronize_rcu_normal();
960 		return;
961 	}
962 
963 	/* Take a snapshot of the sequence number.  */
964 	s = rcu_exp_gp_seq_snap();
965 	if (exp_funnel_lock(s))
966 		return;  /* Someone else did our work for us. */
967 
968 	/* Ensure that load happens before action based on it. */
969 	if (unlikely((rcu_scheduler_active == RCU_SCHEDULER_INIT) || !rcu_exp_worker_started())) {
970 		/* Direct call during scheduler init and early_initcalls(). */
971 		rcu_exp_sel_wait_wake(s);
972 	} else {
973 		/* Marshall arguments & schedule the expedited grace period. */
974 		rew.rew_s = s;
975 		synchronize_rcu_expedited_queue_work(&rew);
976 	}
977 
978 	/* Wait for expedited grace period to complete. */
979 	rnp = rcu_get_root();
980 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
981 		   sync_exp_work_done(s));
982 
983 	/* Let the next expedited grace period start. */
984 	mutex_unlock(&rcu_state.exp_mutex);
985 }
986 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
987 
988 /*
989  * Ensure that start_poll_synchronize_rcu_expedited() has the expedited
990  * RCU grace periods that it needs.
991  */
992 static void sync_rcu_do_polled_gp(struct work_struct *wp)
993 {
994 	unsigned long flags;
995 	int i = 0;
996 	struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq);
997 	unsigned long s;
998 
999 	raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1000 	s = rnp->exp_seq_poll_rq;
1001 	rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1002 	raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1003 	if (s == RCU_GET_STATE_COMPLETED)
1004 		return;
1005 	while (!poll_state_synchronize_rcu(s)) {
1006 		synchronize_rcu_expedited();
1007 		if (i == 10 || i == 20)
1008 			pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled));
1009 		i++;
1010 	}
1011 	raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1012 	s = rnp->exp_seq_poll_rq;
1013 	if (poll_state_synchronize_rcu(s))
1014 		rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1015 	raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1016 }
1017 
1018 /**
1019  * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period
1020  *
1021  * Returns a cookie to pass to a call to cond_synchronize_rcu(),
1022  * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(),
1023  * allowing them to determine whether or not any sort of grace period has
1024  * elapsed in the meantime.  If the needed expedited grace period is not
1025  * already slated to start, initiates that grace period.
1026  */
1027 unsigned long start_poll_synchronize_rcu_expedited(void)
1028 {
1029 	unsigned long flags;
1030 	struct rcu_data *rdp;
1031 	struct rcu_node *rnp;
1032 	unsigned long s;
1033 
1034 	s = get_state_synchronize_rcu();
1035 	rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
1036 	rnp = rdp->mynode;
1037 	if (rcu_init_invoked())
1038 		raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1039 	if (!poll_state_synchronize_rcu(s)) {
1040 		if (rcu_init_invoked()) {
1041 			rnp->exp_seq_poll_rq = s;
1042 			queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
1043 		}
1044 	}
1045 	if (rcu_init_invoked())
1046 		raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1047 
1048 	return s;
1049 }
1050 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited);
1051 
1052 /**
1053  * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period
1054  * @rgosp: Place to put snapshot of grace-period state
1055  *
1056  * Places the normal and expedited grace-period states in rgosp.  This
1057  * state value can be passed to a later call to cond_synchronize_rcu_full()
1058  * or poll_state_synchronize_rcu_full() to determine whether or not a
1059  * grace period (whether normal or expedited) has elapsed in the meantime.
1060  * If the needed expedited grace period is not already slated to start,
1061  * initiates that grace period.
1062  */
1063 void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1064 {
1065 	get_state_synchronize_rcu_full(rgosp);
1066 	(void)start_poll_synchronize_rcu_expedited();
1067 }
1068 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full);
1069 
1070 /**
1071  * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
1072  *
1073  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
1074  *
1075  * If any type of full RCU grace period has elapsed since the earlier
1076  * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(),
1077  * or start_poll_synchronize_rcu_expedited(), just return.  Otherwise,
1078  * invoke synchronize_rcu_expedited() to wait for a full grace period.
1079  *
1080  * Yes, this function does not take counter wrap into account.
1081  * But counter wrap is harmless.  If the counter wraps, we have waited for
1082  * more than 2 billion grace periods (and way more on a 64-bit system!),
1083  * so waiting for a couple of additional grace periods should be just fine.
1084  *
1085  * This function provides the same memory-ordering guarantees that
1086  * would be provided by a synchronize_rcu() that was invoked at the call
1087  * to the function that provided @oldstate and that returned at the end
1088  * of this function.
1089  */
1090 void cond_synchronize_rcu_expedited(unsigned long oldstate)
1091 {
1092 	if (!poll_state_synchronize_rcu(oldstate))
1093 		synchronize_rcu_expedited();
1094 }
1095 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);
1096 
1097 /**
1098  * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period
1099  * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
1100  *
1101  * If a full RCU grace period has elapsed since the call to
1102  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
1103  * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
1104  * obtained, just return.  Otherwise, invoke synchronize_rcu_expedited()
1105  * to wait for a full grace period.
1106  *
1107  * Yes, this function does not take counter wrap into account.
1108  * But counter wrap is harmless.  If the counter wraps, we have waited for
1109  * more than 2 billion grace periods (and way more on a 64-bit system!),
1110  * so waiting for a couple of additional grace periods should be just fine.
1111  *
1112  * This function provides the same memory-ordering guarantees that
1113  * would be provided by a synchronize_rcu() that was invoked at the call
1114  * to the function that provided @rgosp and that returned at the end of
1115  * this function.
1116  */
1117 void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1118 {
1119 	if (!poll_state_synchronize_rcu_full(rgosp))
1120 		synchronize_rcu_expedited();
1121 }
1122 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full);
1123