1 /*- 2 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) 3 * Copyright (c) 2017-2021 Hans Petter Selasky (hselasky@freebsd.org) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/sched.h> 39 #include <sys/smp.h> 40 #include <sys/queue.h> 41 #include <sys/taskqueue.h> 42 #include <sys/kdb.h> 43 44 #include <ck_epoch.h> 45 46 #include <linux/rcupdate.h> 47 #include <linux/srcu.h> 48 #include <linux/slab.h> 49 #include <linux/kernel.h> 50 #include <linux/compat.h> 51 52 /* 53 * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will 54 * not be skipped during panic(). 55 */ 56 #ifdef CONFIG_NO_RCU_SKIP 57 #define RCU_SKIP(void) 0 58 #else 59 #define RCU_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active) 60 #endif 61 62 struct callback_head { 63 STAILQ_ENTRY(callback_head) entry; 64 rcu_callback_t func; 65 }; 66 67 struct linux_epoch_head { 68 STAILQ_HEAD(, callback_head) cb_head; 69 struct mtx lock; 70 struct task task; 71 } __aligned(CACHE_LINE_SIZE); 72 73 struct linux_epoch_record { 74 ck_epoch_record_t epoch_record; 75 TAILQ_HEAD(, task_struct) ts_head; 76 int cpuid; 77 int type; 78 } __aligned(CACHE_LINE_SIZE); 79 80 /* 81 * Verify that "struct rcu_head" is big enough to hold "struct 82 * callback_head". This has been done to avoid having to add special 83 * compile flags for including ck_epoch.h to all clients of the 84 * LinuxKPI. 85 */ 86 CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head)); 87 88 /* 89 * Verify that "rcu_section[0]" has the same size as 90 * "ck_epoch_section_t". This has been done to avoid having to add 91 * special compile flags for including ck_epoch.h to all clients of 92 * the LinuxKPI. 93 */ 94 CTASSERT(sizeof(((struct task_struct *)0)->rcu_section[0] == 95 sizeof(ck_epoch_section_t))); 96 97 /* 98 * Verify that "epoch_record" is at beginning of "struct 99 * linux_epoch_record": 100 */ 101 CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0); 102 103 CTASSERT(TS_RCU_TYPE_MAX == RCU_TYPE_MAX); 104 105 static ck_epoch_t linux_epoch[RCU_TYPE_MAX]; 106 static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX]; 107 DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]); 108 109 static void linux_rcu_cleaner_func(void *, int); 110 111 static void 112 linux_rcu_runtime_init(void *arg __unused) 113 { 114 struct linux_epoch_head *head; 115 int i; 116 int j; 117 118 for (j = 0; j != RCU_TYPE_MAX; j++) { 119 ck_epoch_init(&linux_epoch[j]); 120 121 head = &linux_epoch_head[j]; 122 123 mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF); 124 TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head); 125 STAILQ_INIT(&head->cb_head); 126 127 CPU_FOREACH(i) { 128 struct linux_epoch_record *record; 129 130 record = &DPCPU_ID_GET(i, linux_epoch_record[j]); 131 132 record->cpuid = i; 133 record->type = j; 134 ck_epoch_register(&linux_epoch[j], 135 &record->epoch_record, NULL); 136 TAILQ_INIT(&record->ts_head); 137 } 138 } 139 } 140 SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL); 141 142 static void 143 linux_rcu_runtime_uninit(void *arg __unused) 144 { 145 struct linux_epoch_head *head; 146 int j; 147 148 for (j = 0; j != RCU_TYPE_MAX; j++) { 149 head = &linux_epoch_head[j]; 150 151 mtx_destroy(&head->lock); 152 } 153 } 154 SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL); 155 156 static void 157 linux_rcu_cleaner_func(void *context, int pending __unused) 158 { 159 struct linux_epoch_head *head; 160 struct callback_head *rcu; 161 STAILQ_HEAD(, callback_head) tmp_head; 162 uintptr_t offset; 163 164 linux_set_current(curthread); 165 166 head = context; 167 168 /* move current callbacks into own queue */ 169 mtx_lock(&head->lock); 170 STAILQ_INIT(&tmp_head); 171 STAILQ_CONCAT(&tmp_head, &head->cb_head); 172 mtx_unlock(&head->lock); 173 174 /* synchronize */ 175 linux_synchronize_rcu(head - linux_epoch_head); 176 177 /* dispatch all callbacks, if any */ 178 while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) { 179 STAILQ_REMOVE_HEAD(&tmp_head, entry); 180 181 offset = (uintptr_t)rcu->func; 182 183 if (offset < LINUX_KFREE_RCU_OFFSET_MAX) 184 kfree((char *)rcu - offset); 185 else 186 rcu->func((struct rcu_head *)rcu); 187 } 188 } 189 190 void 191 linux_rcu_read_lock(unsigned type) 192 { 193 struct linux_epoch_record *record; 194 struct task_struct *ts; 195 196 MPASS(type < RCU_TYPE_MAX); 197 198 if (RCU_SKIP()) 199 return; 200 201 ts = current; 202 203 /* assert valid refcount */ 204 MPASS(ts->rcu_recurse[type] != INT_MAX); 205 206 if (++(ts->rcu_recurse[type]) != 1) 207 return; 208 209 /* 210 * Pin thread to current CPU so that the unlock code gets the 211 * same per-CPU epoch record: 212 */ 213 sched_pin(); 214 215 record = &DPCPU_GET(linux_epoch_record[type]); 216 217 /* 218 * Use a critical section to prevent recursion inside 219 * ck_epoch_begin(). Else this function supports recursion. 220 */ 221 critical_enter(); 222 ck_epoch_begin(&record->epoch_record, 223 (ck_epoch_section_t *)&ts->rcu_section[type]); 224 TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]); 225 critical_exit(); 226 } 227 228 void 229 linux_rcu_read_unlock(unsigned type) 230 { 231 struct linux_epoch_record *record; 232 struct task_struct *ts; 233 234 MPASS(type < RCU_TYPE_MAX); 235 236 if (RCU_SKIP()) 237 return; 238 239 ts = current; 240 241 /* assert valid refcount */ 242 MPASS(ts->rcu_recurse[type] > 0); 243 244 if (--(ts->rcu_recurse[type]) != 0) 245 return; 246 247 record = &DPCPU_GET(linux_epoch_record[type]); 248 249 /* 250 * Use a critical section to prevent recursion inside 251 * ck_epoch_end(). Else this function supports recursion. 252 */ 253 critical_enter(); 254 ck_epoch_end(&record->epoch_record, 255 (ck_epoch_section_t *)&ts->rcu_section[type]); 256 TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]); 257 critical_exit(); 258 259 sched_unpin(); 260 } 261 262 static void 263 linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused) 264 { 265 struct linux_epoch_record *record = 266 container_of(epoch_record, struct linux_epoch_record, epoch_record); 267 struct thread *td = curthread; 268 struct task_struct *ts; 269 270 /* check if blocked on the current CPU */ 271 if (record->cpuid == PCPU_GET(cpuid)) { 272 bool is_sleeping = 0; 273 u_char prio = 0; 274 275 /* 276 * Find the lowest priority or sleeping thread which 277 * is blocking synchronization on this CPU core. All 278 * the threads in the queue are CPU-pinned and cannot 279 * go anywhere while the current thread is locked. 280 */ 281 TAILQ_FOREACH(ts, &record->ts_head, rcu_entry[record->type]) { 282 if (ts->task_thread->td_priority > prio) 283 prio = ts->task_thread->td_priority; 284 is_sleeping |= (ts->task_thread->td_inhibitors != 0); 285 } 286 287 if (is_sleeping) { 288 thread_unlock(td); 289 pause("W", 1); 290 thread_lock(td); 291 } else { 292 /* set new thread priority */ 293 sched_prio(td, prio); 294 /* task switch */ 295 mi_switch(SW_VOL | SWT_RELINQUISH); 296 /* 297 * It is important the thread lock is dropped 298 * while yielding to allow other threads to 299 * acquire the lock pointed to by 300 * TDQ_LOCKPTR(td). Currently mi_switch() will 301 * unlock the thread lock before 302 * returning. Else a deadlock like situation 303 * might happen. 304 */ 305 thread_lock(td); 306 } 307 } else { 308 /* 309 * To avoid spinning move execution to the other CPU 310 * which is blocking synchronization. Set highest 311 * thread priority so that code gets run. The thread 312 * priority will be restored later. 313 */ 314 sched_prio(td, 0); 315 sched_bind(td, record->cpuid); 316 } 317 } 318 319 void 320 linux_synchronize_rcu(unsigned type) 321 { 322 struct thread *td; 323 int was_bound; 324 int old_cpu; 325 int old_pinned; 326 u_char old_prio; 327 328 MPASS(type < RCU_TYPE_MAX); 329 330 if (RCU_SKIP()) 331 return; 332 333 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 334 "linux_synchronize_rcu() can sleep"); 335 336 td = curthread; 337 DROP_GIANT(); 338 339 /* 340 * Synchronizing RCU might change the CPU core this function 341 * is running on. Save current values: 342 */ 343 thread_lock(td); 344 345 old_cpu = PCPU_GET(cpuid); 346 old_pinned = td->td_pinned; 347 old_prio = td->td_priority; 348 was_bound = sched_is_bound(td); 349 sched_unbind(td); 350 td->td_pinned = 0; 351 sched_bind(td, old_cpu); 352 353 ck_epoch_synchronize_wait(&linux_epoch[type], 354 &linux_synchronize_rcu_cb, NULL); 355 356 /* restore CPU binding, if any */ 357 if (was_bound != 0) { 358 sched_bind(td, old_cpu); 359 } else { 360 /* get thread back to initial CPU, if any */ 361 if (old_pinned != 0) 362 sched_bind(td, old_cpu); 363 sched_unbind(td); 364 } 365 /* restore pinned after bind */ 366 td->td_pinned = old_pinned; 367 368 /* restore thread priority */ 369 sched_prio(td, old_prio); 370 thread_unlock(td); 371 372 PICKUP_GIANT(); 373 } 374 375 void 376 linux_rcu_barrier(unsigned type) 377 { 378 struct linux_epoch_head *head; 379 380 MPASS(type < RCU_TYPE_MAX); 381 382 linux_synchronize_rcu(type); 383 384 head = &linux_epoch_head[type]; 385 386 /* wait for callbacks to complete */ 387 taskqueue_drain(taskqueue_fast, &head->task); 388 } 389 390 void 391 linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func) 392 { 393 struct callback_head *rcu; 394 struct linux_epoch_head *head; 395 396 MPASS(type < RCU_TYPE_MAX); 397 398 rcu = (struct callback_head *)context; 399 head = &linux_epoch_head[type]; 400 401 mtx_lock(&head->lock); 402 rcu->func = func; 403 STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry); 404 taskqueue_enqueue(taskqueue_fast, &head->task); 405 mtx_unlock(&head->lock); 406 } 407 408 int 409 init_srcu_struct(struct srcu_struct *srcu) 410 { 411 return (0); 412 } 413 414 void 415 cleanup_srcu_struct(struct srcu_struct *srcu) 416 { 417 } 418 419 int 420 srcu_read_lock(struct srcu_struct *srcu) 421 { 422 linux_rcu_read_lock(RCU_TYPE_SLEEPABLE); 423 return (0); 424 } 425 426 void 427 srcu_read_unlock(struct srcu_struct *srcu, int key __unused) 428 { 429 linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE); 430 } 431 432 void 433 synchronize_srcu(struct srcu_struct *srcu) 434 { 435 linux_synchronize_rcu(RCU_TYPE_SLEEPABLE); 436 } 437 438 void 439 srcu_barrier(struct srcu_struct *srcu) 440 { 441 linux_rcu_barrier(RCU_TYPE_SLEEPABLE); 442 } 443