xref: /linux/kernel/rcu/tree_exp.h (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1 /*
2  * RCU expedited grace periods
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2016
19  *
20  * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21  */
22 
23 #include <linux/lockdep.h>
24 
25 /*
26  * Record the start of an expedited grace period.
27  */
28 static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
29 {
30 	rcu_seq_start(&rsp->expedited_sequence);
31 }
32 
33 /*
34  * Return then value that expedited-grace-period counter will have
35  * at the end of the current grace period.
36  */
37 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp)
38 {
39 	return rcu_seq_endval(&rsp->expedited_sequence);
40 }
41 
42 /*
43  * Record the end of an expedited grace period.
44  */
45 static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
46 {
47 	rcu_seq_end(&rsp->expedited_sequence);
48 	smp_mb(); /* Ensure that consecutive grace periods serialize. */
49 }
50 
51 /*
52  * Take a snapshot of the expedited-grace-period counter.
53  */
54 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
55 {
56 	unsigned long s;
57 
58 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
59 	s = rcu_seq_snap(&rsp->expedited_sequence);
60 	trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
61 	return s;
62 }
63 
64 /*
65  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
66  * if a full expedited grace period has elapsed since that snapshot
67  * was taken.
68  */
69 static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
70 {
71 	return rcu_seq_done(&rsp->expedited_sequence, s);
72 }
73 
74 /*
75  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
76  * recent CPU-online activity.  Note that these masks are not cleared
77  * when CPUs go offline, so they reflect the union of all CPUs that have
78  * ever been online.  This means that this function normally takes its
79  * no-work-to-do fastpath.
80  */
81 static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
82 {
83 	bool done;
84 	unsigned long flags;
85 	unsigned long mask;
86 	unsigned long oldmask;
87 	int ncpus = smp_load_acquire(&rsp->ncpus); /* Order against locking. */
88 	struct rcu_node *rnp;
89 	struct rcu_node *rnp_up;
90 
91 	/* If no new CPUs onlined since last time, nothing to do. */
92 	if (likely(ncpus == rsp->ncpus_snap))
93 		return;
94 	rsp->ncpus_snap = ncpus;
95 
96 	/*
97 	 * Each pass through the following loop propagates newly onlined
98 	 * CPUs for the current rcu_node structure up the rcu_node tree.
99 	 */
100 	rcu_for_each_leaf_node(rsp, rnp) {
101 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
102 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
103 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
104 			continue;  /* No new CPUs, nothing to do. */
105 		}
106 
107 		/* Update this node's mask, track old value for propagation. */
108 		oldmask = rnp->expmaskinit;
109 		rnp->expmaskinit = rnp->expmaskinitnext;
110 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
111 
112 		/* If was already nonzero, nothing to propagate. */
113 		if (oldmask)
114 			continue;
115 
116 		/* Propagate the new CPU up the tree. */
117 		mask = rnp->grpmask;
118 		rnp_up = rnp->parent;
119 		done = false;
120 		while (rnp_up) {
121 			raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
122 			if (rnp_up->expmaskinit)
123 				done = true;
124 			rnp_up->expmaskinit |= mask;
125 			raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
126 			if (done)
127 				break;
128 			mask = rnp_up->grpmask;
129 			rnp_up = rnp_up->parent;
130 		}
131 	}
132 }
133 
134 /*
135  * Reset the ->expmask values in the rcu_node tree in preparation for
136  * a new expedited grace period.
137  */
138 static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
139 {
140 	unsigned long flags;
141 	struct rcu_node *rnp;
142 
143 	sync_exp_reset_tree_hotplug(rsp);
144 	rcu_for_each_node_breadth_first(rsp, rnp) {
145 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
146 		WARN_ON_ONCE(rnp->expmask);
147 		rnp->expmask = rnp->expmaskinit;
148 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
149 	}
150 }
151 
152 /*
153  * Return non-zero if there is no RCU expedited grace period in progress
154  * for the specified rcu_node structure, in other words, if all CPUs and
155  * tasks covered by the specified rcu_node structure have done their bit
156  * for the current expedited grace period.  Works only for preemptible
157  * RCU -- other RCU implementation use other means.
158  *
159  * Caller must hold the specificed rcu_node structure's ->lock
160  */
161 static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
162 {
163 	raw_lockdep_assert_held_rcu_node(rnp);
164 
165 	return rnp->exp_tasks == NULL &&
166 	       READ_ONCE(rnp->expmask) == 0;
167 }
168 
169 /*
170  * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
171  * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
172  * itself
173  */
174 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
175 {
176 	unsigned long flags;
177 	bool ret;
178 
179 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
180 	ret = sync_rcu_preempt_exp_done(rnp);
181 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
182 
183 	return ret;
184 }
185 
186 
187 /*
188  * Report the exit from RCU read-side critical section for the last task
189  * that queued itself during or before the current expedited preemptible-RCU
190  * grace period.  This event is reported either to the rcu_node structure on
191  * which the task was queued or to one of that rcu_node structure's ancestors,
192  * recursively up the tree.  (Calm down, calm down, we do the recursion
193  * iteratively!)
194  *
195  * Caller must hold the specified rcu_node structure's ->lock.
196  */
197 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
198 				 bool wake, unsigned long flags)
199 	__releases(rnp->lock)
200 {
201 	unsigned long mask;
202 
203 	for (;;) {
204 		if (!sync_rcu_preempt_exp_done(rnp)) {
205 			if (!rnp->expmask)
206 				rcu_initiate_boost(rnp, flags);
207 			else
208 				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
209 			break;
210 		}
211 		if (rnp->parent == NULL) {
212 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
213 			if (wake) {
214 				smp_mb(); /* EGP done before wake_up(). */
215 				swake_up(&rsp->expedited_wq);
216 			}
217 			break;
218 		}
219 		mask = rnp->grpmask;
220 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
221 		rnp = rnp->parent;
222 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
223 		WARN_ON_ONCE(!(rnp->expmask & mask));
224 		rnp->expmask &= ~mask;
225 	}
226 }
227 
228 /*
229  * Report expedited quiescent state for specified node.  This is a
230  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
231  */
232 static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
233 					      struct rcu_node *rnp, bool wake)
234 {
235 	unsigned long flags;
236 
237 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
238 	__rcu_report_exp_rnp(rsp, rnp, wake, flags);
239 }
240 
241 /*
242  * Report expedited quiescent state for multiple CPUs, all covered by the
243  * specified leaf rcu_node structure.
244  */
245 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
246 				    unsigned long mask, bool wake)
247 {
248 	unsigned long flags;
249 
250 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
251 	if (!(rnp->expmask & mask)) {
252 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
253 		return;
254 	}
255 	rnp->expmask &= ~mask;
256 	__rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
257 }
258 
259 /*
260  * Report expedited quiescent state for specified rcu_data (CPU).
261  */
262 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
263 			       bool wake)
264 {
265 	rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
266 }
267 
268 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
269 static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
270 {
271 	if (rcu_exp_gp_seq_done(rsp, s)) {
272 		trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
273 		/* Ensure test happens before caller kfree(). */
274 		smp_mb__before_atomic(); /* ^^^ */
275 		return true;
276 	}
277 	return false;
278 }
279 
280 /*
281  * Funnel-lock acquisition for expedited grace periods.  Returns true
282  * if some other task completed an expedited grace period that this task
283  * can piggy-back on, and with no mutex held.  Otherwise, returns false
284  * with the mutex held, indicating that the caller must actually do the
285  * expedited grace period.
286  */
287 static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
288 {
289 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
290 	struct rcu_node *rnp = rdp->mynode;
291 	struct rcu_node *rnp_root = rcu_get_root(rsp);
292 
293 	/* Low-contention fastpath. */
294 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
295 	    (rnp == rnp_root ||
296 	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
297 	    mutex_trylock(&rsp->exp_mutex))
298 		goto fastpath;
299 
300 	/*
301 	 * Each pass through the following loop works its way up
302 	 * the rcu_node tree, returning if others have done the work or
303 	 * otherwise falls through to acquire rsp->exp_mutex.  The mapping
304 	 * from CPU to rcu_node structure can be inexact, as it is just
305 	 * promoting locality and is not strictly needed for correctness.
306 	 */
307 	for (; rnp != NULL; rnp = rnp->parent) {
308 		if (sync_exp_work_done(rsp, s))
309 			return true;
310 
311 		/* Work not done, either wait here or go up. */
312 		spin_lock(&rnp->exp_lock);
313 		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
314 
315 			/* Someone else doing GP, so wait for them. */
316 			spin_unlock(&rnp->exp_lock);
317 			trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
318 						  rnp->grplo, rnp->grphi,
319 						  TPS("wait"));
320 			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
321 				   sync_exp_work_done(rsp, s));
322 			return true;
323 		}
324 		rnp->exp_seq_rq = s; /* Followers can wait on us. */
325 		spin_unlock(&rnp->exp_lock);
326 		trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
327 					  rnp->grphi, TPS("nxtlvl"));
328 	}
329 	mutex_lock(&rsp->exp_mutex);
330 fastpath:
331 	if (sync_exp_work_done(rsp, s)) {
332 		mutex_unlock(&rsp->exp_mutex);
333 		return true;
334 	}
335 	rcu_exp_gp_seq_start(rsp);
336 	trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
337 	return false;
338 }
339 
340 /* Invoked on each online non-idle CPU for expedited quiescent state. */
341 static void sync_sched_exp_handler(void *data)
342 {
343 	struct rcu_data *rdp;
344 	struct rcu_node *rnp;
345 	struct rcu_state *rsp = data;
346 
347 	rdp = this_cpu_ptr(rsp->rda);
348 	rnp = rdp->mynode;
349 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
350 	    __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
351 		return;
352 	if (rcu_is_cpu_rrupt_from_idle()) {
353 		rcu_report_exp_rdp(&rcu_sched_state,
354 				   this_cpu_ptr(&rcu_sched_data), true);
355 		return;
356 	}
357 	__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
358 	/* Store .exp before .rcu_urgent_qs. */
359 	smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
360 	resched_cpu(smp_processor_id());
361 }
362 
363 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
364 static void sync_sched_exp_online_cleanup(int cpu)
365 {
366 	struct rcu_data *rdp;
367 	int ret;
368 	struct rcu_node *rnp;
369 	struct rcu_state *rsp = &rcu_sched_state;
370 
371 	rdp = per_cpu_ptr(rsp->rda, cpu);
372 	rnp = rdp->mynode;
373 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
374 		return;
375 	ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
376 	WARN_ON_ONCE(ret);
377 }
378 
379 /*
380  * Select the CPUs within the specified rcu_node that the upcoming
381  * expedited grace period needs to wait for.
382  */
383 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
384 {
385 	int cpu;
386 	unsigned long flags;
387 	smp_call_func_t func;
388 	unsigned long mask_ofl_test;
389 	unsigned long mask_ofl_ipi;
390 	int ret;
391 	struct rcu_exp_work *rewp =
392 		container_of(wp, struct rcu_exp_work, rew_work);
393 	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
394 	struct rcu_state *rsp = rewp->rew_rsp;
395 
396 	func = rewp->rew_func;
397 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
398 
399 	/* Each pass checks a CPU for identity, offline, and idle. */
400 	mask_ofl_test = 0;
401 	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
402 		unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
403 		struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
404 		struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu);
405 		int snap;
406 
407 		if (raw_smp_processor_id() == cpu ||
408 		    !(rnp->qsmaskinitnext & mask)) {
409 			mask_ofl_test |= mask;
410 		} else {
411 			snap = rcu_dynticks_snap(rdtp);
412 			if (rcu_dynticks_in_eqs(snap))
413 				mask_ofl_test |= mask;
414 			else
415 				rdp->exp_dynticks_snap = snap;
416 		}
417 	}
418 	mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
419 
420 	/*
421 	 * Need to wait for any blocked tasks as well.	Note that
422 	 * additional blocking tasks will also block the expedited GP
423 	 * until such time as the ->expmask bits are cleared.
424 	 */
425 	if (rcu_preempt_has_tasks(rnp))
426 		rnp->exp_tasks = rnp->blkd_tasks.next;
427 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
428 
429 	/* IPI the remaining CPUs for expedited quiescent state. */
430 	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
431 		unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
432 		struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
433 
434 		if (!(mask_ofl_ipi & mask))
435 			continue;
436 retry_ipi:
437 		if (rcu_dynticks_in_eqs_since(rdp->dynticks,
438 					      rdp->exp_dynticks_snap)) {
439 			mask_ofl_test |= mask;
440 			continue;
441 		}
442 		ret = smp_call_function_single(cpu, func, rsp, 0);
443 		if (!ret) {
444 			mask_ofl_ipi &= ~mask;
445 			continue;
446 		}
447 		/* Failed, raced with CPU hotplug operation. */
448 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
449 		if ((rnp->qsmaskinitnext & mask) &&
450 		    (rnp->expmask & mask)) {
451 			/* Online, so delay for a bit and try again. */
452 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
453 			trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("selectofl"));
454 			schedule_timeout_uninterruptible(1);
455 			goto retry_ipi;
456 		}
457 		/* CPU really is offline, so we can ignore it. */
458 		if (!(rnp->expmask & mask))
459 			mask_ofl_ipi &= ~mask;
460 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
461 	}
462 	/* Report quiescent states for those that went offline. */
463 	mask_ofl_test |= mask_ofl_ipi;
464 	if (mask_ofl_test)
465 		rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
466 }
467 
468 /*
469  * Select the nodes that the upcoming expedited grace period needs
470  * to wait for.
471  */
472 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
473 				     smp_call_func_t func)
474 {
475 	struct rcu_node *rnp;
476 
477 	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
478 	sync_exp_reset_tree(rsp);
479 	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select"));
480 
481 	/* Schedule work for each leaf rcu_node structure. */
482 	rcu_for_each_leaf_node(rsp, rnp) {
483 		rnp->exp_need_flush = false;
484 		if (!READ_ONCE(rnp->expmask))
485 			continue; /* Avoid early boot non-existent wq. */
486 		rnp->rew.rew_func = func;
487 		rnp->rew.rew_rsp = rsp;
488 		if (!READ_ONCE(rcu_par_gp_wq) ||
489 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
490 			/* No workqueues yet. */
491 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
492 			continue;
493 		}
494 		INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
495 		queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
496 		rnp->exp_need_flush = true;
497 	}
498 
499 	/* Wait for workqueue jobs (if any) to complete. */
500 	rcu_for_each_leaf_node(rsp, rnp)
501 		if (rnp->exp_need_flush)
502 			flush_work(&rnp->rew.rew_work);
503 }
504 
505 static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
506 {
507 	int cpu;
508 	unsigned long jiffies_stall;
509 	unsigned long jiffies_start;
510 	unsigned long mask;
511 	int ndetected;
512 	struct rcu_node *rnp;
513 	struct rcu_node *rnp_root = rcu_get_root(rsp);
514 	int ret;
515 
516 	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait"));
517 	jiffies_stall = rcu_jiffies_till_stall_check();
518 	jiffies_start = jiffies;
519 
520 	for (;;) {
521 		ret = swait_event_timeout(
522 				rsp->expedited_wq,
523 				sync_rcu_preempt_exp_done_unlocked(rnp_root),
524 				jiffies_stall);
525 		if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
526 			return;
527 		WARN_ON(ret < 0);  /* workqueues should not be signaled. */
528 		if (rcu_cpu_stall_suppress)
529 			continue;
530 		panic_on_rcu_stall();
531 		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
532 		       rsp->name);
533 		ndetected = 0;
534 		rcu_for_each_leaf_node(rsp, rnp) {
535 			ndetected += rcu_print_task_exp_stall(rnp);
536 			for_each_leaf_node_possible_cpu(rnp, cpu) {
537 				struct rcu_data *rdp;
538 
539 				mask = leaf_node_cpu_bit(rnp, cpu);
540 				if (!(rnp->expmask & mask))
541 					continue;
542 				ndetected++;
543 				rdp = per_cpu_ptr(rsp->rda, cpu);
544 				pr_cont(" %d-%c%c%c", cpu,
545 					"O."[!!cpu_online(cpu)],
546 					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
547 					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
548 			}
549 		}
550 		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
551 			jiffies - jiffies_start, rsp->expedited_sequence,
552 			rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
553 		if (ndetected) {
554 			pr_err("blocking rcu_node structures:");
555 			rcu_for_each_node_breadth_first(rsp, rnp) {
556 				if (rnp == rnp_root)
557 					continue; /* printed unconditionally */
558 				if (sync_rcu_preempt_exp_done_unlocked(rnp))
559 					continue;
560 				pr_cont(" l=%u:%d-%d:%#lx/%c",
561 					rnp->level, rnp->grplo, rnp->grphi,
562 					rnp->expmask,
563 					".T"[!!rnp->exp_tasks]);
564 			}
565 			pr_cont("\n");
566 		}
567 		rcu_for_each_leaf_node(rsp, rnp) {
568 			for_each_leaf_node_possible_cpu(rnp, cpu) {
569 				mask = leaf_node_cpu_bit(rnp, cpu);
570 				if (!(rnp->expmask & mask))
571 					continue;
572 				dump_cpu_task(cpu);
573 			}
574 		}
575 		jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
576 	}
577 }
578 
579 /*
580  * Wait for the current expedited grace period to complete, and then
581  * wake up everyone who piggybacked on the just-completed expedited
582  * grace period.  Also update all the ->exp_seq_rq counters as needed
583  * in order to avoid counter-wrap problems.
584  */
585 static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
586 {
587 	struct rcu_node *rnp;
588 
589 	synchronize_sched_expedited_wait(rsp);
590 	rcu_exp_gp_seq_end(rsp);
591 	trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
592 
593 	/*
594 	 * Switch over to wakeup mode, allowing the next GP, but -only- the
595 	 * next GP, to proceed.
596 	 */
597 	mutex_lock(&rsp->exp_wake_mutex);
598 
599 	rcu_for_each_node_breadth_first(rsp, rnp) {
600 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
601 			spin_lock(&rnp->exp_lock);
602 			/* Recheck, avoid hang in case someone just arrived. */
603 			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
604 				rnp->exp_seq_rq = s;
605 			spin_unlock(&rnp->exp_lock);
606 		}
607 		smp_mb(); /* All above changes before wakeup. */
608 		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
609 	}
610 	trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
611 	mutex_unlock(&rsp->exp_wake_mutex);
612 }
613 
614 /*
615  * Common code to drive an expedited grace period forward, used by
616  * workqueues and mid-boot-time tasks.
617  */
618 static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
619 				  smp_call_func_t func, unsigned long s)
620 {
621 	/* Initialize the rcu_node tree in preparation for the wait. */
622 	sync_rcu_exp_select_cpus(rsp, func);
623 
624 	/* Wait and clean up, including waking everyone. */
625 	rcu_exp_wait_wake(rsp, s);
626 }
627 
628 /*
629  * Work-queue handler to drive an expedited grace period forward.
630  */
631 static void wait_rcu_exp_gp(struct work_struct *wp)
632 {
633 	struct rcu_exp_work *rewp;
634 
635 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
636 	rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
637 }
638 
639 /*
640  * Given an rcu_state pointer and a smp_call_function() handler, kick
641  * off the specified flavor of expedited grace period.
642  */
643 static void _synchronize_rcu_expedited(struct rcu_state *rsp,
644 				       smp_call_func_t func)
645 {
646 	struct rcu_data *rdp;
647 	struct rcu_exp_work rew;
648 	struct rcu_node *rnp;
649 	unsigned long s;
650 
651 	/* If expedited grace periods are prohibited, fall back to normal. */
652 	if (rcu_gp_is_normal()) {
653 		wait_rcu_gp(rsp->call);
654 		return;
655 	}
656 
657 	/* Take a snapshot of the sequence number.  */
658 	s = rcu_exp_gp_seq_snap(rsp);
659 	if (exp_funnel_lock(rsp, s))
660 		return;  /* Someone else did our work for us. */
661 
662 	/* Ensure that load happens before action based on it. */
663 	if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
664 		/* Direct call during scheduler init and early_initcalls(). */
665 		rcu_exp_sel_wait_wake(rsp, func, s);
666 	} else {
667 		/* Marshall arguments & schedule the expedited grace period. */
668 		rew.rew_func = func;
669 		rew.rew_rsp = rsp;
670 		rew.rew_s = s;
671 		INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
672 		queue_work(rcu_gp_wq, &rew.rew_work);
673 	}
674 
675 	/* Wait for expedited grace period to complete. */
676 	rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
677 	rnp = rcu_get_root(rsp);
678 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
679 		   sync_exp_work_done(rsp, s));
680 	smp_mb(); /* Workqueue actions happen before return. */
681 
682 	/* Let the next expedited grace period start. */
683 	mutex_unlock(&rsp->exp_mutex);
684 }
685 
686 /**
687  * synchronize_sched_expedited - Brute-force RCU-sched grace period
688  *
689  * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
690  * approach to force the grace period to end quickly.  This consumes
691  * significant time on all CPUs and is unfriendly to real-time workloads,
692  * so is thus not recommended for any sort of common-case code.  In fact,
693  * if you are using synchronize_sched_expedited() in a loop, please
694  * restructure your code to batch your updates, and then use a single
695  * synchronize_sched() instead.
696  *
697  * This implementation can be thought of as an application of sequence
698  * locking to expedited grace periods, but using the sequence counter to
699  * determine when someone else has already done the work instead of for
700  * retrying readers.
701  */
702 void synchronize_sched_expedited(void)
703 {
704 	struct rcu_state *rsp = &rcu_sched_state;
705 
706 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
707 			 lock_is_held(&rcu_lock_map) ||
708 			 lock_is_held(&rcu_sched_lock_map),
709 			 "Illegal synchronize_sched_expedited() in RCU read-side critical section");
710 
711 	/* If only one CPU, this is automatically a grace period. */
712 	if (rcu_blocking_is_gp())
713 		return;
714 
715 	_synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
716 }
717 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
718 
719 #ifdef CONFIG_PREEMPT_RCU
720 
721 /*
722  * Remote handler for smp_call_function_single().  If there is an
723  * RCU read-side critical section in effect, request that the
724  * next rcu_read_unlock() record the quiescent state up the
725  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
726  * report the quiescent state.
727  */
728 static void sync_rcu_exp_handler(void *info)
729 {
730 	struct rcu_data *rdp;
731 	struct rcu_state *rsp = info;
732 	struct task_struct *t = current;
733 
734 	/*
735 	 * Within an RCU read-side critical section, request that the next
736 	 * rcu_read_unlock() report.  Unless this RCU read-side critical
737 	 * section has already blocked, in which case it is already set
738 	 * up for the expedited grace period to wait on it.
739 	 */
740 	if (t->rcu_read_lock_nesting > 0 &&
741 	    !t->rcu_read_unlock_special.b.blocked) {
742 		t->rcu_read_unlock_special.b.exp_need_qs = true;
743 		return;
744 	}
745 
746 	/*
747 	 * We are either exiting an RCU read-side critical section (negative
748 	 * values of t->rcu_read_lock_nesting) or are not in one at all
749 	 * (zero value of t->rcu_read_lock_nesting).  Or we are in an RCU
750 	 * read-side critical section that blocked before this expedited
751 	 * grace period started.  Either way, we can immediately report
752 	 * the quiescent state.
753 	 */
754 	rdp = this_cpu_ptr(rsp->rda);
755 	rcu_report_exp_rdp(rsp, rdp, true);
756 }
757 
758 /**
759  * synchronize_rcu_expedited - Brute-force RCU grace period
760  *
761  * Wait for an RCU-preempt grace period, but expedite it.  The basic
762  * idea is to IPI all non-idle non-nohz online CPUs.  The IPI handler
763  * checks whether the CPU is in an RCU-preempt critical section, and
764  * if so, it sets a flag that causes the outermost rcu_read_unlock()
765  * to report the quiescent state.  On the other hand, if the CPU is
766  * not in an RCU read-side critical section, the IPI handler reports
767  * the quiescent state immediately.
768  *
769  * Although this is a greate improvement over previous expedited
770  * implementations, it is still unfriendly to real-time workloads, so is
771  * thus not recommended for any sort of common-case code.  In fact, if
772  * you are using synchronize_rcu_expedited() in a loop, please restructure
773  * your code to batch your updates, and then Use a single synchronize_rcu()
774  * instead.
775  */
776 void synchronize_rcu_expedited(void)
777 {
778 	struct rcu_state *rsp = rcu_state_p;
779 
780 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
781 			 lock_is_held(&rcu_lock_map) ||
782 			 lock_is_held(&rcu_sched_lock_map),
783 			 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
784 
785 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
786 		return;
787 	_synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
788 }
789 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
790 
791 #else /* #ifdef CONFIG_PREEMPT_RCU */
792 
793 /*
794  * Wait for an rcu-preempt grace period, but make it happen quickly.
795  * But because preemptible RCU does not exist, map to rcu-sched.
796  */
797 void synchronize_rcu_expedited(void)
798 {
799 	synchronize_sched_expedited();
800 }
801 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
802 
803 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
804