1 /* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25 #include <linux/completion.h> 26 #include <linux/interrupt.h> 27 #include <linux/notifier.h> 28 #include <linux/rcupdate_wait.h> 29 #include <linux/kernel.h> 30 #include <linux/export.h> 31 #include <linux/mutex.h> 32 #include <linux/sched.h> 33 #include <linux/types.h> 34 #include <linux/init.h> 35 #include <linux/time.h> 36 #include <linux/cpu.h> 37 #include <linux/prefetch.h> 38 39 #include "rcu.h" 40 41 /* Global control variables for rcupdate callback mechanism. */ 42 struct rcu_ctrlblk { 43 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ 44 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ 45 struct rcu_head **curtail; /* ->next pointer of last CB. */ 46 }; 47 48 /* Definition for rcupdate control block. */ 49 static struct rcu_ctrlblk rcu_sched_ctrlblk = { 50 .donetail = &rcu_sched_ctrlblk.rcucblist, 51 .curtail = &rcu_sched_ctrlblk.rcucblist, 52 }; 53 54 static struct rcu_ctrlblk rcu_bh_ctrlblk = { 55 .donetail = &rcu_bh_ctrlblk.rcucblist, 56 .curtail = &rcu_bh_ctrlblk.rcucblist, 57 }; 58 59 void rcu_barrier_bh(void) 60 { 61 wait_rcu_gp(call_rcu_bh); 62 } 63 EXPORT_SYMBOL(rcu_barrier_bh); 64 65 void rcu_barrier_sched(void) 66 { 67 wait_rcu_gp(call_rcu_sched); 68 } 69 EXPORT_SYMBOL(rcu_barrier_sched); 70 71 /* 72 * Helper function for rcu_sched_qs() and rcu_bh_qs(). 73 * Also irqs are disabled to avoid confusion due to interrupt handlers 74 * invoking call_rcu(). 75 */ 76 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) 77 { 78 if (rcp->donetail != rcp->curtail) { 79 rcp->donetail = rcp->curtail; 80 return 1; 81 } 82 83 return 0; 84 } 85 86 /* 87 * Record an rcu quiescent state. And an rcu_bh quiescent state while we 88 * are at it, given that any rcu quiescent state is also an rcu_bh 89 * quiescent state. Use "+" instead of "||" to defeat short circuiting. 90 */ 91 void rcu_sched_qs(void) 92 { 93 unsigned long flags; 94 95 local_irq_save(flags); 96 if (rcu_qsctr_help(&rcu_sched_ctrlblk) + 97 rcu_qsctr_help(&rcu_bh_ctrlblk)) 98 raise_softirq(RCU_SOFTIRQ); 99 local_irq_restore(flags); 100 } 101 102 /* 103 * Record an rcu_bh quiescent state. 104 */ 105 void rcu_bh_qs(void) 106 { 107 unsigned long flags; 108 109 local_irq_save(flags); 110 if (rcu_qsctr_help(&rcu_bh_ctrlblk)) 111 raise_softirq(RCU_SOFTIRQ); 112 local_irq_restore(flags); 113 } 114 115 /* 116 * Check to see if the scheduling-clock interrupt came from an extended 117 * quiescent state, and, if so, tell RCU about it. This function must 118 * be called from hardirq context. It is normally called from the 119 * scheduling-clock interrupt. 120 */ 121 void rcu_check_callbacks(int user) 122 { 123 if (user) 124 rcu_sched_qs(); 125 else if (!in_softirq()) 126 rcu_bh_qs(); 127 if (user) 128 rcu_note_voluntary_context_switch(current); 129 } 130 131 /* 132 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure 133 * whose grace period has elapsed. 134 */ 135 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) 136 { 137 struct rcu_head *next, *list; 138 unsigned long flags; 139 140 /* Move the ready-to-invoke callbacks to a local list. */ 141 local_irq_save(flags); 142 if (rcp->donetail == &rcp->rcucblist) { 143 /* No callbacks ready, so just leave. */ 144 local_irq_restore(flags); 145 return; 146 } 147 list = rcp->rcucblist; 148 rcp->rcucblist = *rcp->donetail; 149 *rcp->donetail = NULL; 150 if (rcp->curtail == rcp->donetail) 151 rcp->curtail = &rcp->rcucblist; 152 rcp->donetail = &rcp->rcucblist; 153 local_irq_restore(flags); 154 155 /* Invoke the callbacks on the local list. */ 156 while (list) { 157 next = list->next; 158 prefetch(next); 159 debug_rcu_head_unqueue(list); 160 local_bh_disable(); 161 __rcu_reclaim("", list); 162 local_bh_enable(); 163 list = next; 164 } 165 } 166 167 static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) 168 { 169 __rcu_process_callbacks(&rcu_sched_ctrlblk); 170 __rcu_process_callbacks(&rcu_bh_ctrlblk); 171 } 172 173 /* 174 * Wait for a grace period to elapse. But it is illegal to invoke 175 * synchronize_sched() from within an RCU read-side critical section. 176 * Therefore, any legal call to synchronize_sched() is a quiescent 177 * state, and so on a UP system, synchronize_sched() need do nothing. 178 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the 179 * benefits of doing might_sleep() to reduce latency.) 180 * 181 * Cool, huh? (Due to Josh Triplett.) 182 */ 183 void synchronize_sched(void) 184 { 185 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 186 lock_is_held(&rcu_lock_map) || 187 lock_is_held(&rcu_sched_lock_map), 188 "Illegal synchronize_sched() in RCU read-side critical section"); 189 } 190 EXPORT_SYMBOL_GPL(synchronize_sched); 191 192 /* 193 * Helper function for call_rcu() and call_rcu_bh(). 194 */ 195 static void __call_rcu(struct rcu_head *head, 196 rcu_callback_t func, 197 struct rcu_ctrlblk *rcp) 198 { 199 unsigned long flags; 200 201 debug_rcu_head_queue(head); 202 head->func = func; 203 head->next = NULL; 204 205 local_irq_save(flags); 206 *rcp->curtail = head; 207 rcp->curtail = &head->next; 208 local_irq_restore(flags); 209 210 if (unlikely(is_idle_task(current))) { 211 /* force scheduling for rcu_sched_qs() */ 212 resched_cpu(0); 213 } 214 } 215 216 /* 217 * Post an RCU callback to be invoked after the end of an RCU-sched grace 218 * period. But since we have but one CPU, that would be after any 219 * quiescent state. 220 */ 221 void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) 222 { 223 __call_rcu(head, func, &rcu_sched_ctrlblk); 224 } 225 EXPORT_SYMBOL_GPL(call_rcu_sched); 226 227 /* 228 * Post an RCU bottom-half callback to be invoked after any subsequent 229 * quiescent state. 230 */ 231 void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) 232 { 233 __call_rcu(head, func, &rcu_bh_ctrlblk); 234 } 235 EXPORT_SYMBOL_GPL(call_rcu_bh); 236 237 void __init rcu_init(void) 238 { 239 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 240 rcu_early_boot_tests(); 241 } 242