1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * RCU-based infrastructure for lightweight reader-writer locking 4 * 5 * Copyright (c) 2015, Red Hat, Inc. 6 * 7 * Author: Oleg Nesterov <oleg@redhat.com> 8 */ 9 10 #include <linux/rcu_sync.h> 11 #include <linux/sched.h> 12 13 #ifdef CONFIG_PROVE_RCU 14 #define __INIT_HELD(func) .held = func, 15 #else 16 #define __INIT_HELD(func) 17 #endif 18 19 static const struct { 20 void (*sync)(void); 21 void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); 22 void (*wait)(void); 23 #ifdef CONFIG_PROVE_RCU 24 int (*held)(void); 25 #endif 26 } gp_ops[] = { 27 [RCU_SYNC] = { 28 .sync = synchronize_rcu, 29 .call = call_rcu, 30 .wait = rcu_barrier, 31 __INIT_HELD(rcu_read_lock_held) 32 }, 33 [RCU_SCHED_SYNC] = { 34 .sync = synchronize_rcu, 35 .call = call_rcu, 36 .wait = rcu_barrier, 37 __INIT_HELD(rcu_read_lock_sched_held) 38 }, 39 [RCU_BH_SYNC] = { 40 .sync = synchronize_rcu, 41 .call = call_rcu, 42 .wait = rcu_barrier, 43 __INIT_HELD(rcu_read_lock_bh_held) 44 }, 45 }; 46 47 enum { GP_IDLE = 0, GP_PENDING, GP_PASSED }; 48 enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY }; 49 50 #define rss_lock gp_wait.lock 51 52 #ifdef CONFIG_PROVE_RCU 53 void rcu_sync_lockdep_assert(struct rcu_sync *rsp) 54 { 55 RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(), 56 "suspicious rcu_sync_is_idle() usage"); 57 } 58 59 EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert); 60 #endif 61 62 /** 63 * rcu_sync_init() - Initialize an rcu_sync structure 64 * @rsp: Pointer to rcu_sync structure to be initialized 65 * @type: Flavor of RCU with which to synchronize rcu_sync structure 66 */ 67 void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type) 68 { 69 memset(rsp, 0, sizeof(*rsp)); 70 init_waitqueue_head(&rsp->gp_wait); 71 rsp->gp_type = type; 72 } 73 74 /** 75 * rcu_sync_enter_start - Force readers onto slow path for multiple updates 76 * @rsp: Pointer to rcu_sync structure to use for synchronization 77 * 78 * Must be called after rcu_sync_init() and before first use. 79 * 80 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() 81 * pairs turn into NO-OPs. 82 */ 83 void rcu_sync_enter_start(struct rcu_sync *rsp) 84 { 85 rsp->gp_count++; 86 rsp->gp_state = GP_PASSED; 87 } 88 89 /** 90 * rcu_sync_enter() - Force readers onto slowpath 91 * @rsp: Pointer to rcu_sync structure to use for synchronization 92 * 93 * This function is used by updaters who need readers to make use of 94 * a slowpath during the update. After this function returns, all 95 * subsequent calls to rcu_sync_is_idle() will return false, which 96 * tells readers to stay off their fastpaths. A later call to 97 * rcu_sync_exit() re-enables reader slowpaths. 98 * 99 * When called in isolation, rcu_sync_enter() must wait for a grace 100 * period, however, closely spaced calls to rcu_sync_enter() can 101 * optimize away the grace-period wait via a state machine implemented 102 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func(). 103 */ 104 void rcu_sync_enter(struct rcu_sync *rsp) 105 { 106 bool need_wait, need_sync; 107 108 spin_lock_irq(&rsp->rss_lock); 109 need_wait = rsp->gp_count++; 110 need_sync = rsp->gp_state == GP_IDLE; 111 if (need_sync) 112 rsp->gp_state = GP_PENDING; 113 spin_unlock_irq(&rsp->rss_lock); 114 115 WARN_ON_ONCE(need_wait && need_sync); 116 if (need_sync) { 117 gp_ops[rsp->gp_type].sync(); 118 rsp->gp_state = GP_PASSED; 119 wake_up_all(&rsp->gp_wait); 120 } else if (need_wait) { 121 wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED); 122 } else { 123 /* 124 * Possible when there's a pending CB from a rcu_sync_exit(). 125 * Nobody has yet been allowed the 'fast' path and thus we can 126 * avoid doing any sync(). The callback will get 'dropped'. 127 */ 128 WARN_ON_ONCE(rsp->gp_state != GP_PASSED); 129 } 130 } 131 132 /** 133 * rcu_sync_func() - Callback function managing reader access to fastpath 134 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization 135 * 136 * This function is passed to one of the call_rcu() functions by 137 * rcu_sync_exit(), so that it is invoked after a grace period following the 138 * that invocation of rcu_sync_exit(). It takes action based on events that 139 * have taken place in the meantime, so that closely spaced rcu_sync_enter() 140 * and rcu_sync_exit() pairs need not wait for a grace period. 141 * 142 * If another rcu_sync_enter() is invoked before the grace period 143 * ended, reset state to allow the next rcu_sync_exit() to let the 144 * readers back onto their fastpaths (after a grace period). If both 145 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked 146 * before the grace period ended, re-invoke call_rcu() on behalf of that 147 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers 148 * can again use their fastpaths. 149 */ 150 static void rcu_sync_func(struct rcu_head *rhp) 151 { 152 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); 153 unsigned long flags; 154 155 WARN_ON_ONCE(rsp->gp_state != GP_PASSED); 156 WARN_ON_ONCE(rsp->cb_state == CB_IDLE); 157 158 spin_lock_irqsave(&rsp->rss_lock, flags); 159 if (rsp->gp_count) { 160 /* 161 * A new rcu_sync_begin() has happened; drop the callback. 162 */ 163 rsp->cb_state = CB_IDLE; 164 } else if (rsp->cb_state == CB_REPLAY) { 165 /* 166 * A new rcu_sync_exit() has happened; requeue the callback 167 * to catch a later GP. 168 */ 169 rsp->cb_state = CB_PENDING; 170 gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); 171 } else { 172 /* 173 * We're at least a GP after rcu_sync_exit(); eveybody will now 174 * have observed the write side critical section. Let 'em rip!. 175 */ 176 rsp->cb_state = CB_IDLE; 177 rsp->gp_state = GP_IDLE; 178 } 179 spin_unlock_irqrestore(&rsp->rss_lock, flags); 180 } 181 182 /** 183 * rcu_sync_exit() - Allow readers back onto fast patch after grace period 184 * @rsp: Pointer to rcu_sync structure to use for synchronization 185 * 186 * This function is used by updaters who have completed, and can therefore 187 * now allow readers to make use of their fastpaths after a grace period 188 * has elapsed. After this grace period has completed, all subsequent 189 * calls to rcu_sync_is_idle() will return true, which tells readers that 190 * they can once again use their fastpaths. 191 */ 192 void rcu_sync_exit(struct rcu_sync *rsp) 193 { 194 spin_lock_irq(&rsp->rss_lock); 195 if (!--rsp->gp_count) { 196 if (rsp->cb_state == CB_IDLE) { 197 rsp->cb_state = CB_PENDING; 198 gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); 199 } else if (rsp->cb_state == CB_PENDING) { 200 rsp->cb_state = CB_REPLAY; 201 } 202 } 203 spin_unlock_irq(&rsp->rss_lock); 204 } 205 206 /** 207 * rcu_sync_dtor() - Clean up an rcu_sync structure 208 * @rsp: Pointer to rcu_sync structure to be cleaned up 209 */ 210 void rcu_sync_dtor(struct rcu_sync *rsp) 211 { 212 int cb_state; 213 214 WARN_ON_ONCE(rsp->gp_count); 215 216 spin_lock_irq(&rsp->rss_lock); 217 if (rsp->cb_state == CB_REPLAY) 218 rsp->cb_state = CB_PENDING; 219 cb_state = rsp->cb_state; 220 spin_unlock_irq(&rsp->rss_lock); 221 222 if (cb_state != CB_IDLE) { 223 gp_ops[rsp->gp_type].wait(); 224 WARN_ON_ONCE(rsp->cb_state != CB_IDLE); 225 } 226 } 227