1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Read-Copy Update definitions shared among RCU implementations. 4 * 5 * Copyright IBM Corporation, 2011 6 * 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com> 8 */ 9 10 #ifndef __LINUX_RCU_H 11 #define __LINUX_RCU_H 12 13 #include <linux/slab.h> 14 #include <trace/events/rcu.h> 15 16 /* 17 * Grace-period counter management. 18 * 19 * The two least significant bits contain the control flags. 20 * The most significant bits contain the grace-period sequence counter. 21 * 22 * When both control flags are zero, no grace period is in progress. 23 * When either bit is non-zero, a grace period has started and is in 24 * progress. When the grace period completes, the control flags are reset 25 * to 0 and the grace-period sequence counter is incremented. 26 * 27 * However some specific RCU usages make use of custom values. 28 * 29 * SRCU special control values: 30 * 31 * SRCU_SNP_INIT_SEQ : Invalid/init value set when SRCU node 32 * is initialized. 33 * 34 * SRCU_STATE_IDLE : No SRCU gp is in progress 35 * 36 * SRCU_STATE_SCAN1 : State set by rcu_seq_start(). Indicates 37 * we are scanning the readers on the slot 38 * defined as inactive (there might well 39 * be pending readers that will use that 40 * index, but their number is bounded). 41 * 42 * SRCU_STATE_SCAN2 : State set manually via rcu_seq_set_state() 43 * Indicates we are flipping the readers 44 * index and then scanning the readers on the 45 * slot newly designated as inactive (again, 46 * the number of pending readers that will use 47 * this inactive index is bounded). 48 * 49 * RCU polled GP special control value: 50 * 51 * RCU_GET_STATE_COMPLETED : State value indicating an already-completed 52 * polled GP has completed. This value covers 53 * both the state and the counter of the 54 * grace-period sequence number. 55 */ 56 57 /* Low-order bit definition for polled grace-period APIs. */ 58 #define RCU_GET_STATE_COMPLETED 0x1 59 60 extern int sysctl_sched_rt_runtime; 61 62 /* 63 * Return the counter portion of a sequence number previously returned 64 * by rcu_seq_snap() or rcu_seq_current(). 65 */ 66 static inline unsigned long rcu_seq_ctr(unsigned long s) 67 { 68 return s >> RCU_SEQ_CTR_SHIFT; 69 } 70 71 /* 72 * Return the state portion of a sequence number previously returned 73 * by rcu_seq_snap() or rcu_seq_current(). 74 */ 75 static inline int rcu_seq_state(unsigned long s) 76 { 77 return s & RCU_SEQ_STATE_MASK; 78 } 79 80 /* 81 * Set the state portion of the pointed-to sequence number. 82 * The caller is responsible for preventing conflicting updates. 83 */ 84 static inline void rcu_seq_set_state(unsigned long *sp, int newstate) 85 { 86 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); 87 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); 88 } 89 90 /* Adjust sequence number for start of update-side operation. */ 91 static inline void rcu_seq_start(unsigned long *sp) 92 { 93 WRITE_ONCE(*sp, *sp + 1); 94 smp_mb(); /* Ensure update-side operation after counter increment. */ 95 WARN_ON_ONCE(rcu_seq_state(*sp) != 1); 96 } 97 98 /* Compute the end-of-grace-period value for the specified sequence number. */ 99 static inline unsigned long rcu_seq_endval(unsigned long *sp) 100 { 101 return (*sp | RCU_SEQ_STATE_MASK) + 1; 102 } 103 104 /* Adjust sequence number for end of update-side operation. */ 105 static inline void rcu_seq_end(unsigned long *sp) 106 { 107 smp_mb(); /* Ensure update-side operation before counter increment. */ 108 WARN_ON_ONCE(!rcu_seq_state(*sp)); 109 WRITE_ONCE(*sp, rcu_seq_endval(sp)); 110 } 111 112 /* 113 * rcu_seq_snap - Take a snapshot of the update side's sequence number. 114 * 115 * This function returns the earliest value of the grace-period sequence number 116 * that will indicate that a full grace period has elapsed since the current 117 * time. Once the grace-period sequence number has reached this value, it will 118 * be safe to invoke all callbacks that have been registered prior to the 119 * current time. This value is the current grace-period number plus two to the 120 * power of the number of low-order bits reserved for state, then rounded up to 121 * the next value in which the state bits are all zero. 122 */ 123 static inline unsigned long rcu_seq_snap(unsigned long *sp) 124 { 125 unsigned long s; 126 127 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; 128 smp_mb(); /* Above access must not bleed into critical section. */ 129 return s; 130 } 131 132 /* Return the current value the update side's sequence number, no ordering. */ 133 static inline unsigned long rcu_seq_current(unsigned long *sp) 134 { 135 return READ_ONCE(*sp); 136 } 137 138 /* 139 * Given a snapshot from rcu_seq_snap(), determine whether or not the 140 * corresponding update-side operation has started. 141 */ 142 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s) 143 { 144 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp)); 145 } 146 147 /* 148 * Given a snapshot from rcu_seq_snap(), determine whether or not a 149 * full update-side operation has occurred. 150 */ 151 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) 152 { 153 return ULONG_CMP_GE(READ_ONCE(*sp), s); 154 } 155 156 /* 157 * Given a snapshot from rcu_seq_snap(), determine whether or not a 158 * full update-side operation has occurred, but do not allow the 159 * (ULONG_MAX / 2) safety-factor/guard-band. 160 */ 161 static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s) 162 { 163 unsigned long cur_s = READ_ONCE(*sp); 164 165 return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1)); 166 } 167 168 /* 169 * Has a grace period completed since the time the old gp_seq was collected? 170 */ 171 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) 172 { 173 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); 174 } 175 176 /* 177 * Has a grace period started since the time the old gp_seq was collected? 178 */ 179 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) 180 { 181 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK, 182 new); 183 } 184 185 /* 186 * Roughly how many full grace periods have elapsed between the collection 187 * of the two specified grace periods? 188 */ 189 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) 190 { 191 unsigned long rnd_diff; 192 193 if (old == new) 194 return 0; 195 /* 196 * Compute the number of grace periods (still shifted up), plus 197 * one if either of new and old is not an exact grace period. 198 */ 199 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - 200 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) + 201 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); 202 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff)) 203 return 1; /* Definitely no grace period has elapsed. */ 204 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2; 205 } 206 207 /* 208 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally 209 * by call_rcu() and rcu callback execution, and are therefore not part 210 * of the RCU API. These are in rcupdate.h because they are used by all 211 * RCU implementations. 212 */ 213 214 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 215 # define STATE_RCU_HEAD_READY 0 216 # define STATE_RCU_HEAD_QUEUED 1 217 218 extern const struct debug_obj_descr rcuhead_debug_descr; 219 220 static inline int debug_rcu_head_queue(struct rcu_head *head) 221 { 222 int r1; 223 224 r1 = debug_object_activate(head, &rcuhead_debug_descr); 225 debug_object_active_state(head, &rcuhead_debug_descr, 226 STATE_RCU_HEAD_READY, 227 STATE_RCU_HEAD_QUEUED); 228 return r1; 229 } 230 231 static inline void debug_rcu_head_unqueue(struct rcu_head *head) 232 { 233 debug_object_active_state(head, &rcuhead_debug_descr, 234 STATE_RCU_HEAD_QUEUED, 235 STATE_RCU_HEAD_READY); 236 debug_object_deactivate(head, &rcuhead_debug_descr); 237 } 238 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 239 static inline int debug_rcu_head_queue(struct rcu_head *head) 240 { 241 return 0; 242 } 243 244 static inline void debug_rcu_head_unqueue(struct rcu_head *head) 245 { 246 } 247 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 248 249 static inline void debug_rcu_head_callback(struct rcu_head *rhp) 250 { 251 if (unlikely(!rhp->func)) 252 kmem_dump_obj(rhp); 253 } 254 255 static inline bool rcu_barrier_cb_is_done(struct rcu_head *rhp) 256 { 257 return rhp->next == rhp; 258 } 259 260 extern int rcu_cpu_stall_suppress_at_boot; 261 262 static inline bool rcu_stall_is_suppressed_at_boot(void) 263 { 264 return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended(); 265 } 266 267 extern int rcu_cpu_stall_notifiers; 268 269 #ifdef CONFIG_RCU_STALL_COMMON 270 271 extern int rcu_cpu_stall_ftrace_dump; 272 extern int rcu_cpu_stall_suppress; 273 extern int rcu_cpu_stall_timeout; 274 extern int rcu_exp_cpu_stall_timeout; 275 extern int rcu_cpu_stall_cputime; 276 extern bool rcu_exp_stall_task_details __read_mostly; 277 int rcu_jiffies_till_stall_check(void); 278 int rcu_exp_jiffies_till_stall_check(void); 279 280 static inline bool rcu_stall_is_suppressed(void) 281 { 282 return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress; 283 } 284 285 #define rcu_ftrace_dump_stall_suppress() \ 286 do { \ 287 if (!rcu_cpu_stall_suppress) \ 288 rcu_cpu_stall_suppress = 3; \ 289 } while (0) 290 291 #define rcu_ftrace_dump_stall_unsuppress() \ 292 do { \ 293 if (rcu_cpu_stall_suppress == 3) \ 294 rcu_cpu_stall_suppress = 0; \ 295 } while (0) 296 297 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */ 298 299 static inline bool rcu_stall_is_suppressed(void) 300 { 301 return rcu_stall_is_suppressed_at_boot(); 302 } 303 #define rcu_ftrace_dump_stall_suppress() 304 #define rcu_ftrace_dump_stall_unsuppress() 305 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 306 307 /* 308 * Strings used in tracepoints need to be exported via the 309 * tracing system such that tools like perf and trace-cmd can 310 * translate the string address pointers to actual text. 311 */ 312 #define TPS(x) tracepoint_string(x) 313 314 /* 315 * Dump the ftrace buffer, but only one time per callsite per boot. 316 */ 317 #define rcu_ftrace_dump(oops_dump_mode) \ 318 do { \ 319 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ 320 \ 321 if (!atomic_read(&___rfd_beenhere) && \ 322 !atomic_xchg(&___rfd_beenhere, 1)) { \ 323 tracing_off(); \ 324 rcu_ftrace_dump_stall_suppress(); \ 325 ftrace_dump(oops_dump_mode); \ 326 rcu_ftrace_dump_stall_unsuppress(); \ 327 } \ 328 } while (0) 329 330 void rcu_early_boot_tests(void); 331 void rcu_test_sync_prims(void); 332 333 /* 334 * This function really isn't for public consumption, but RCU is special in 335 * that context switches can allow the state machine to make progress. 336 */ 337 extern void resched_cpu(int cpu); 338 339 #if !defined(CONFIG_TINY_RCU) 340 341 #include <linux/rcu_node_tree.h> 342 343 extern int rcu_num_lvls; 344 extern int num_rcu_lvl[]; 345 extern int rcu_num_nodes; 346 static bool rcu_fanout_exact; 347 static int rcu_fanout_leaf; 348 349 /* 350 * Compute the per-level fanout, either using the exact fanout specified 351 * or balancing the tree, depending on the rcu_fanout_exact boot parameter. 352 */ 353 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) 354 { 355 int i; 356 357 for (i = 0; i < RCU_NUM_LVLS; i++) 358 levelspread[i] = INT_MIN; 359 if (rcu_fanout_exact) { 360 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; 361 for (i = rcu_num_lvls - 2; i >= 0; i--) 362 levelspread[i] = RCU_FANOUT; 363 } else { 364 int ccur; 365 int cprv; 366 367 cprv = nr_cpu_ids; 368 for (i = rcu_num_lvls - 1; i >= 0; i--) { 369 ccur = levelcnt[i]; 370 levelspread[i] = (cprv + ccur - 1) / ccur; 371 cprv = ccur; 372 } 373 } 374 } 375 376 extern void rcu_init_geometry(void); 377 378 /* Returns a pointer to the first leaf rcu_node structure. */ 379 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) 380 381 /* Is this rcu_node a leaf? */ 382 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) 383 384 /* Is this rcu_node the last leaf? */ 385 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) 386 387 /* 388 * Do a full breadth-first scan of the {s,}rcu_node structures for the 389 * specified state structure (for SRCU) or the only rcu_state structure 390 * (for RCU). 391 */ 392 #define _rcu_for_each_node_breadth_first(sp, rnp) \ 393 for ((rnp) = &(sp)->node[0]; \ 394 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) 395 #define rcu_for_each_node_breadth_first(rnp) \ 396 _rcu_for_each_node_breadth_first(&rcu_state, rnp) 397 #define srcu_for_each_node_breadth_first(ssp, rnp) \ 398 _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp) 399 400 /* 401 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure. 402 * Note that if there is a singleton rcu_node tree with but one rcu_node 403 * structure, this loop -will- visit the rcu_node structure. It is still 404 * a leaf node, even if it is also the root node. 405 */ 406 #define rcu_for_each_leaf_node(rnp) \ 407 for ((rnp) = rcu_first_leaf_node(); \ 408 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++) 409 410 /* 411 * Iterate over all possible CPUs in a leaf RCU node. 412 */ 413 #define for_each_leaf_node_possible_cpu(rnp, cpu) \ 414 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ 415 (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \ 416 (cpu) <= rnp->grphi; \ 417 (cpu) = cpumask_next((cpu), cpu_possible_mask)) 418 419 /* 420 * Iterate over all CPUs in a leaf RCU node's specified mask. 421 */ 422 #define rcu_find_next_bit(rnp, cpu, mask) \ 423 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu))) 424 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \ 425 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ 426 (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \ 427 (cpu) <= rnp->grphi; \ 428 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask))) 429 430 #endif /* !defined(CONFIG_TINY_RCU) */ 431 432 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC) 433 434 /* 435 * Wrappers for the rcu_node::lock acquire and release. 436 * 437 * Because the rcu_nodes form a tree, the tree traversal locking will observe 438 * different lock values, this in turn means that an UNLOCK of one level 439 * followed by a LOCK of another level does not imply a full memory barrier; 440 * and most importantly transitivity is lost. 441 * 442 * In order to restore full ordering between tree levels, augment the regular 443 * lock acquire functions with smp_mb__after_unlock_lock(). 444 * 445 * As ->lock of struct rcu_node is a __private field, therefore one should use 446 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. 447 */ 448 #define raw_spin_lock_rcu_node(p) \ 449 do { \ 450 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ 451 smp_mb__after_unlock_lock(); \ 452 } while (0) 453 454 #define raw_spin_unlock_rcu_node(p) \ 455 do { \ 456 lockdep_assert_irqs_disabled(); \ 457 raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \ 458 } while (0) 459 460 #define raw_spin_lock_irq_rcu_node(p) \ 461 do { \ 462 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 463 smp_mb__after_unlock_lock(); \ 464 } while (0) 465 466 #define raw_spin_unlock_irq_rcu_node(p) \ 467 do { \ 468 lockdep_assert_irqs_disabled(); \ 469 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \ 470 } while (0) 471 472 #define raw_spin_lock_irqsave_rcu_node(p, flags) \ 473 do { \ 474 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 475 smp_mb__after_unlock_lock(); \ 476 } while (0) 477 478 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ 479 do { \ 480 lockdep_assert_irqs_disabled(); \ 481 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \ 482 } while (0) 483 484 #define raw_spin_trylock_rcu_node(p) \ 485 ({ \ 486 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \ 487 \ 488 if (___locked) \ 489 smp_mb__after_unlock_lock(); \ 490 ___locked; \ 491 }) 492 493 #define raw_lockdep_assert_held_rcu_node(p) \ 494 lockdep_assert_held(&ACCESS_PRIVATE(p, lock)) 495 496 #endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC) 497 498 #ifdef CONFIG_TINY_RCU 499 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ 500 static inline bool rcu_gp_is_normal(void) { return true; } 501 static inline bool rcu_gp_is_expedited(void) { return false; } 502 static inline bool rcu_async_should_hurry(void) { return false; } 503 static inline void rcu_expedite_gp(void) { } 504 static inline void rcu_unexpedite_gp(void) { } 505 static inline void rcu_async_hurry(void) { } 506 static inline void rcu_async_relax(void) { } 507 static inline bool rcu_cpu_online(int cpu) { return true; } 508 #else /* #ifdef CONFIG_TINY_RCU */ 509 bool rcu_gp_is_normal(void); /* Internal RCU use. */ 510 bool rcu_gp_is_expedited(void); /* Internal RCU use. */ 511 bool rcu_async_should_hurry(void); /* Internal RCU use. */ 512 void rcu_expedite_gp(void); 513 void rcu_unexpedite_gp(void); 514 void rcu_async_hurry(void); 515 void rcu_async_relax(void); 516 void rcupdate_announce_bootup_oddness(void); 517 bool rcu_cpu_online(int cpu); 518 #ifdef CONFIG_TASKS_RCU_GENERIC 519 void show_rcu_tasks_gp_kthreads(void); 520 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ 521 static inline void show_rcu_tasks_gp_kthreads(void) {} 522 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ 523 #endif /* #else #ifdef CONFIG_TINY_RCU */ 524 525 #ifdef CONFIG_TASKS_RCU 526 struct task_struct *get_rcu_tasks_gp_kthread(void); 527 void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq); 528 #endif // # ifdef CONFIG_TASKS_RCU 529 530 #ifdef CONFIG_TASKS_RUDE_RCU 531 struct task_struct *get_rcu_tasks_rude_gp_kthread(void); 532 void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq); 533 #endif // # ifdef CONFIG_TASKS_RUDE_RCU 534 535 #ifdef CONFIG_TASKS_TRACE_RCU 536 void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq); 537 #endif 538 539 #ifdef CONFIG_TASKS_RCU_GENERIC 540 void tasks_cblist_init_generic(void); 541 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ 542 static inline void tasks_cblist_init_generic(void) { } 543 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ 544 545 #define RCU_SCHEDULER_INACTIVE 0 546 #define RCU_SCHEDULER_INIT 1 547 #define RCU_SCHEDULER_RUNNING 2 548 549 enum rcutorture_type { 550 RCU_FLAVOR, 551 RCU_TASKS_FLAVOR, 552 RCU_TASKS_RUDE_FLAVOR, 553 RCU_TASKS_TRACING_FLAVOR, 554 RCU_TRIVIAL_FLAVOR, 555 SRCU_FLAVOR, 556 INVALID_RCU_FLAVOR 557 }; 558 559 #if defined(CONFIG_RCU_LAZY) 560 unsigned long rcu_get_jiffies_lazy_flush(void); 561 void rcu_set_jiffies_lazy_flush(unsigned long j); 562 #else 563 static inline unsigned long rcu_get_jiffies_lazy_flush(void) { return 0; } 564 static inline void rcu_set_jiffies_lazy_flush(unsigned long j) { } 565 #endif 566 567 #if defined(CONFIG_TREE_RCU) 568 void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq); 569 void do_trace_rcu_torture_read(const char *rcutorturename, 570 struct rcu_head *rhp, 571 unsigned long secs, 572 unsigned long c_old, 573 unsigned long c); 574 void rcu_gp_set_torture_wait(int duration); 575 #else 576 static inline void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq) 577 { 578 *flags = 0; 579 *gp_seq = 0; 580 } 581 #ifdef CONFIG_RCU_TRACE 582 void do_trace_rcu_torture_read(const char *rcutorturename, 583 struct rcu_head *rhp, 584 unsigned long secs, 585 unsigned long c_old, 586 unsigned long c); 587 #else 588 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 589 do { } while (0) 590 #endif 591 static inline void rcu_gp_set_torture_wait(int duration) { } 592 #endif 593 594 #ifdef CONFIG_TINY_SRCU 595 596 static inline void srcutorture_get_gp_data(struct srcu_struct *sp, int *flags, 597 unsigned long *gp_seq) 598 { 599 *flags = 0; 600 *gp_seq = sp->srcu_idx; 601 } 602 603 #elif defined(CONFIG_TREE_SRCU) 604 605 void srcutorture_get_gp_data(struct srcu_struct *sp, int *flags, 606 unsigned long *gp_seq); 607 608 #endif 609 610 #ifdef CONFIG_TINY_RCU 611 static inline bool rcu_watching_zero_in_eqs(int cpu, int *vp) { return false; } 612 static inline unsigned long rcu_get_gp_seq(void) { return 0; } 613 static inline unsigned long rcu_exp_batches_completed(void) { return 0; } 614 static inline unsigned long 615 srcu_batches_completed(struct srcu_struct *sp) { return 0; } 616 static inline void rcu_force_quiescent_state(void) { } 617 static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; } 618 static inline void show_rcu_gp_kthreads(void) { } 619 static inline int rcu_get_gp_kthreads_prio(void) { return 0; } 620 static inline void rcu_fwd_progress_check(unsigned long j) { } 621 static inline void rcu_gp_slow_register(atomic_t *rgssp) { } 622 static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { } 623 #else /* #ifdef CONFIG_TINY_RCU */ 624 bool rcu_watching_zero_in_eqs(int cpu, int *vp); 625 unsigned long rcu_get_gp_seq(void); 626 unsigned long rcu_exp_batches_completed(void); 627 unsigned long srcu_batches_completed(struct srcu_struct *sp); 628 bool rcu_check_boost_fail(unsigned long gp_state, int *cpup); 629 void show_rcu_gp_kthreads(void); 630 int rcu_get_gp_kthreads_prio(void); 631 void rcu_fwd_progress_check(unsigned long j); 632 void rcu_force_quiescent_state(void); 633 extern struct workqueue_struct *rcu_gp_wq; 634 extern struct kthread_worker *rcu_exp_gp_kworker; 635 void rcu_gp_slow_register(atomic_t *rgssp); 636 void rcu_gp_slow_unregister(atomic_t *rgssp); 637 #endif /* #else #ifdef CONFIG_TINY_RCU */ 638 639 #ifdef CONFIG_RCU_NOCB_CPU 640 void rcu_bind_current_to_nocb(void); 641 #else 642 static inline void rcu_bind_current_to_nocb(void) { } 643 #endif 644 645 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU) 646 void show_rcu_tasks_classic_gp_kthread(void); 647 #else 648 static inline void show_rcu_tasks_classic_gp_kthread(void) {} 649 #endif 650 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU) 651 void show_rcu_tasks_rude_gp_kthread(void); 652 #else 653 static inline void show_rcu_tasks_rude_gp_kthread(void) {} 654 #endif 655 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU) 656 void show_rcu_tasks_trace_gp_kthread(void); 657 #else 658 static inline void show_rcu_tasks_trace_gp_kthread(void) {} 659 #endif 660 661 #ifdef CONFIG_TINY_RCU 662 static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; } 663 #else 664 bool rcu_cpu_beenfullyonline(int cpu); 665 #endif 666 667 #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) 668 int rcu_stall_notifier_call_chain(unsigned long val, void *v); 669 #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) 670 static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; } 671 #endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) 672 673 #endif /* __LINUX_RCU_H */ 674