1 /*- 2 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) 3 * Copyright (c) 2017-2020 Hans Petter Selasky (hselasky@freebsd.org) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/sched.h> 39 #include <sys/smp.h> 40 #include <sys/queue.h> 41 #include <sys/taskqueue.h> 42 #include <sys/kdb.h> 43 44 #include <ck_epoch.h> 45 46 #include <linux/rcupdate.h> 47 #include <linux/srcu.h> 48 #include <linux/slab.h> 49 #include <linux/kernel.h> 50 #include <linux/compat.h> 51 52 /* 53 * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will 54 * not be skipped during panic(). 55 */ 56 #ifdef CONFIG_NO_RCU_SKIP 57 #define RCU_SKIP(void) 0 58 #else 59 #define RCU_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active) 60 #endif 61 62 struct callback_head { 63 STAILQ_ENTRY(callback_head) entry; 64 rcu_callback_t func; 65 }; 66 67 struct linux_epoch_head { 68 STAILQ_HEAD(, callback_head) cb_head; 69 struct mtx lock; 70 struct task task; 71 } __aligned(CACHE_LINE_SIZE); 72 73 struct linux_epoch_record { 74 ck_epoch_record_t epoch_record; 75 TAILQ_HEAD(, task_struct) ts_head; 76 int cpuid; 77 int type; 78 } __aligned(CACHE_LINE_SIZE); 79 80 /* 81 * Verify that "struct rcu_head" is big enough to hold "struct 82 * callback_head". This has been done to avoid having to add special 83 * compile flags for including ck_epoch.h to all clients of the 84 * LinuxKPI. 85 */ 86 CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head)); 87 88 /* 89 * Verify that "epoch_record" is at beginning of "struct 90 * linux_epoch_record": 91 */ 92 CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0); 93 94 CTASSERT(TS_RCU_TYPE_MAX == RCU_TYPE_MAX); 95 96 static ck_epoch_t linux_epoch[RCU_TYPE_MAX]; 97 static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX]; 98 DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]); 99 100 static void linux_rcu_cleaner_func(void *, int); 101 102 static void 103 linux_rcu_runtime_init(void *arg __unused) 104 { 105 struct linux_epoch_head *head; 106 int i; 107 int j; 108 109 for (j = 0; j != RCU_TYPE_MAX; j++) { 110 ck_epoch_init(&linux_epoch[j]); 111 112 head = &linux_epoch_head[j]; 113 114 mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF); 115 TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head); 116 STAILQ_INIT(&head->cb_head); 117 118 CPU_FOREACH(i) { 119 struct linux_epoch_record *record; 120 121 record = &DPCPU_ID_GET(i, linux_epoch_record[j]); 122 123 record->cpuid = i; 124 record->type = j; 125 ck_epoch_register(&linux_epoch[j], 126 &record->epoch_record, NULL); 127 TAILQ_INIT(&record->ts_head); 128 } 129 } 130 } 131 SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL); 132 133 static void 134 linux_rcu_runtime_uninit(void *arg __unused) 135 { 136 struct linux_epoch_head *head; 137 int j; 138 139 for (j = 0; j != RCU_TYPE_MAX; j++) { 140 head = &linux_epoch_head[j]; 141 142 mtx_destroy(&head->lock); 143 } 144 } 145 SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL); 146 147 static void 148 linux_rcu_cleaner_func(void *context, int pending __unused) 149 { 150 struct linux_epoch_head *head; 151 struct callback_head *rcu; 152 STAILQ_HEAD(, callback_head) tmp_head; 153 uintptr_t offset; 154 155 linux_set_current(curthread); 156 157 head = context; 158 159 /* move current callbacks into own queue */ 160 mtx_lock(&head->lock); 161 STAILQ_INIT(&tmp_head); 162 STAILQ_CONCAT(&tmp_head, &head->cb_head); 163 mtx_unlock(&head->lock); 164 165 /* synchronize */ 166 linux_synchronize_rcu(head - linux_epoch_head); 167 168 /* dispatch all callbacks, if any */ 169 while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) { 170 171 STAILQ_REMOVE_HEAD(&tmp_head, entry); 172 173 offset = (uintptr_t)rcu->func; 174 175 if (offset < LINUX_KFREE_RCU_OFFSET_MAX) 176 kfree((char *)rcu - offset); 177 else 178 rcu->func((struct rcu_head *)rcu); 179 } 180 } 181 182 void 183 linux_rcu_read_lock(unsigned type) 184 { 185 struct linux_epoch_record *record; 186 struct task_struct *ts; 187 188 MPASS(type < RCU_TYPE_MAX); 189 190 if (RCU_SKIP()) 191 return; 192 193 /* 194 * Pin thread to current CPU so that the unlock code gets the 195 * same per-CPU epoch record: 196 */ 197 sched_pin(); 198 199 record = &DPCPU_GET(linux_epoch_record[type]); 200 ts = current; 201 202 /* 203 * Use a critical section to prevent recursion inside 204 * ck_epoch_begin(). Else this function supports recursion. 205 */ 206 critical_enter(); 207 ck_epoch_begin(&record->epoch_record, NULL); 208 ts->rcu_recurse[type]++; 209 if (ts->rcu_recurse[type] == 1) 210 TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]); 211 critical_exit(); 212 } 213 214 void 215 linux_rcu_read_unlock(unsigned type) 216 { 217 struct linux_epoch_record *record; 218 struct task_struct *ts; 219 220 MPASS(type < RCU_TYPE_MAX); 221 222 if (RCU_SKIP()) 223 return; 224 225 record = &DPCPU_GET(linux_epoch_record[type]); 226 ts = current; 227 228 /* 229 * Use a critical section to prevent recursion inside 230 * ck_epoch_end(). Else this function supports recursion. 231 */ 232 critical_enter(); 233 ck_epoch_end(&record->epoch_record, NULL); 234 ts->rcu_recurse[type]--; 235 if (ts->rcu_recurse[type] == 0) 236 TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]); 237 critical_exit(); 238 239 sched_unpin(); 240 } 241 242 static void 243 linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused) 244 { 245 struct linux_epoch_record *record = 246 container_of(epoch_record, struct linux_epoch_record, epoch_record); 247 struct thread *td = curthread; 248 struct task_struct *ts; 249 250 /* check if blocked on the current CPU */ 251 if (record->cpuid == PCPU_GET(cpuid)) { 252 bool is_sleeping = 0; 253 u_char prio = 0; 254 255 /* 256 * Find the lowest priority or sleeping thread which 257 * is blocking synchronization on this CPU core. All 258 * the threads in the queue are CPU-pinned and cannot 259 * go anywhere while the current thread is locked. 260 */ 261 TAILQ_FOREACH(ts, &record->ts_head, rcu_entry[record->type]) { 262 if (ts->task_thread->td_priority > prio) 263 prio = ts->task_thread->td_priority; 264 is_sleeping |= (ts->task_thread->td_inhibitors != 0); 265 } 266 267 if (is_sleeping) { 268 thread_unlock(td); 269 pause("W", 1); 270 thread_lock(td); 271 } else { 272 /* set new thread priority */ 273 sched_prio(td, prio); 274 /* task switch */ 275 mi_switch(SW_VOL | SWT_RELINQUISH); 276 /* 277 * It is important the thread lock is dropped 278 * while yielding to allow other threads to 279 * acquire the lock pointed to by 280 * TDQ_LOCKPTR(td). Currently mi_switch() will 281 * unlock the thread lock before 282 * returning. Else a deadlock like situation 283 * might happen. 284 */ 285 thread_lock(td); 286 } 287 } else { 288 /* 289 * To avoid spinning move execution to the other CPU 290 * which is blocking synchronization. Set highest 291 * thread priority so that code gets run. The thread 292 * priority will be restored later. 293 */ 294 sched_prio(td, 0); 295 sched_bind(td, record->cpuid); 296 } 297 } 298 299 void 300 linux_synchronize_rcu(unsigned type) 301 { 302 struct thread *td; 303 int was_bound; 304 int old_cpu; 305 int old_pinned; 306 u_char old_prio; 307 308 MPASS(type < RCU_TYPE_MAX); 309 310 if (RCU_SKIP()) 311 return; 312 313 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 314 "linux_synchronize_rcu() can sleep"); 315 316 td = curthread; 317 DROP_GIANT(); 318 319 /* 320 * Synchronizing RCU might change the CPU core this function 321 * is running on. Save current values: 322 */ 323 thread_lock(td); 324 325 old_cpu = PCPU_GET(cpuid); 326 old_pinned = td->td_pinned; 327 old_prio = td->td_priority; 328 was_bound = sched_is_bound(td); 329 sched_unbind(td); 330 td->td_pinned = 0; 331 sched_bind(td, old_cpu); 332 333 ck_epoch_synchronize_wait(&linux_epoch[type], 334 &linux_synchronize_rcu_cb, NULL); 335 336 /* restore CPU binding, if any */ 337 if (was_bound != 0) { 338 sched_bind(td, old_cpu); 339 } else { 340 /* get thread back to initial CPU, if any */ 341 if (old_pinned != 0) 342 sched_bind(td, old_cpu); 343 sched_unbind(td); 344 } 345 /* restore pinned after bind */ 346 td->td_pinned = old_pinned; 347 348 /* restore thread priority */ 349 sched_prio(td, old_prio); 350 thread_unlock(td); 351 352 PICKUP_GIANT(); 353 } 354 355 void 356 linux_rcu_barrier(unsigned type) 357 { 358 struct linux_epoch_head *head; 359 360 MPASS(type < RCU_TYPE_MAX); 361 362 linux_synchronize_rcu(type); 363 364 head = &linux_epoch_head[type]; 365 366 /* wait for callbacks to complete */ 367 taskqueue_drain(taskqueue_fast, &head->task); 368 } 369 370 void 371 linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func) 372 { 373 struct callback_head *rcu; 374 struct linux_epoch_head *head; 375 376 MPASS(type < RCU_TYPE_MAX); 377 378 rcu = (struct callback_head *)context; 379 head = &linux_epoch_head[type]; 380 381 mtx_lock(&head->lock); 382 rcu->func = func; 383 STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry); 384 taskqueue_enqueue(taskqueue_fast, &head->task); 385 mtx_unlock(&head->lock); 386 } 387 388 int 389 init_srcu_struct(struct srcu_struct *srcu) 390 { 391 return (0); 392 } 393 394 void 395 cleanup_srcu_struct(struct srcu_struct *srcu) 396 { 397 } 398 399 int 400 srcu_read_lock(struct srcu_struct *srcu) 401 { 402 linux_rcu_read_lock(RCU_TYPE_SLEEPABLE); 403 return (0); 404 } 405 406 void 407 srcu_read_unlock(struct srcu_struct *srcu, int key __unused) 408 { 409 linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE); 410 } 411 412 void 413 synchronize_srcu(struct srcu_struct *srcu) 414 { 415 linux_synchronize_rcu(RCU_TYPE_SLEEPABLE); 416 } 417 418 void 419 srcu_barrier(struct srcu_struct *srcu) 420 { 421 linux_rcu_barrier(RCU_TYPE_SLEEPABLE); 422 } 423