1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_SEQLOCK_H 3 #define __LINUX_SEQLOCK_H 4 5 /* 6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with 7 * lockless readers (read-only retry loops), and no writer starvation. 8 * 9 * See Documentation/locking/seqlock.rst 10 * 11 * Copyrights: 12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli 13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH 14 */ 15 16 #include <linux/compiler.h> 17 #include <linux/cleanup.h> 18 #include <linux/kcsan-checks.h> 19 #include <linux/lockdep.h> 20 #include <linux/mutex.h> 21 #include <linux/preempt.h> 22 #include <linux/seqlock_types.h> 23 #include <linux/spinlock.h> 24 25 #include <asm/processor.h> 26 27 /* 28 * The seqlock seqcount_t interface does not prescribe a precise sequence of 29 * read begin/retry/end. For readers, typically there is a call to 30 * read_seqcount_begin() and read_seqcount_retry(), however, there are more 31 * esoteric cases which do not follow this pattern. 32 * 33 * As a consequence, we take the following best-effort approach for raw usage 34 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, 35 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as 36 * atomics; if there is a matching read_seqcount_retry() call, no following 37 * memory operations are considered atomic. Usage of the seqlock_t interface 38 * is not affected. 39 */ 40 #define KCSAN_SEQLOCK_REGION_MAX 1000 41 42 static inline void __seqcount_init(seqcount_t *s, const char *name, 43 struct lock_class_key *key) 44 { 45 /* 46 * Make sure we are not reinitializing a held lock: 47 */ 48 lockdep_init_map(&s->dep_map, name, key, 0); 49 s->sequence = 0; 50 } 51 52 #ifdef CONFIG_DEBUG_LOCK_ALLOC 53 54 # define SEQCOUNT_DEP_MAP_INIT(lockname) \ 55 .dep_map = { .name = #lockname } 56 57 /** 58 * seqcount_init() - runtime initializer for seqcount_t 59 * @s: Pointer to the seqcount_t instance 60 */ 61 # define seqcount_init(s) \ 62 do { \ 63 static struct lock_class_key __key; \ 64 __seqcount_init((s), #s, &__key); \ 65 } while (0) 66 67 static inline void seqcount_lockdep_reader_access(const seqcount_t *s) 68 { 69 seqcount_t *l = (seqcount_t *)s; 70 unsigned long flags; 71 72 local_irq_save(flags); 73 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); 74 seqcount_release(&l->dep_map, _RET_IP_); 75 local_irq_restore(flags); 76 } 77 78 #else 79 # define SEQCOUNT_DEP_MAP_INIT(lockname) 80 # define seqcount_init(s) __seqcount_init(s, NULL, NULL) 81 # define seqcount_lockdep_reader_access(x) 82 #endif 83 84 /** 85 * SEQCNT_ZERO() - static initializer for seqcount_t 86 * @name: Name of the seqcount_t instance 87 */ 88 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } 89 90 /* 91 * Sequence counters with associated locks (seqcount_LOCKNAME_t) 92 * 93 * A sequence counter which associates the lock used for writer 94 * serialization at initialization time. This enables lockdep to validate 95 * that the write side critical section is properly serialized. 96 * 97 * For associated locks which do not implicitly disable preemption, 98 * preemption protection is enforced in the write side function. 99 * 100 * Lockdep is never used in any for the raw write variants. 101 * 102 * See Documentation/locking/seqlock.rst 103 */ 104 105 /* 106 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated 107 * @seqcount: The real sequence counter 108 * @lock: Pointer to the associated lock 109 * 110 * A plain sequence counter with external writer synchronization by 111 * LOCKNAME @lock. The lock is associated to the sequence counter in the 112 * static initializer or init function. This enables lockdep to validate 113 * that the write side critical section is properly serialized. 114 * 115 * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex 116 */ 117 118 /* 119 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t 120 * @s: Pointer to the seqcount_LOCKNAME_t instance 121 * @lock: Pointer to the associated lock 122 */ 123 124 #define seqcount_LOCKNAME_init(s, _lock, lockname) \ 125 do { \ 126 seqcount_##lockname##_t *____s = (s); \ 127 seqcount_init(&____s->seqcount); \ 128 __SEQ_LOCK(____s->lock = (_lock)); \ 129 } while (0) 130 131 #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) 132 #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) 133 #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) 134 #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) 135 136 /* 137 * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers 138 * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t 139 * 140 * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t 141 * @locktype: LOCKNAME canonical C data type 142 * @preemptible: preemptibility of above locktype 143 * @lockbase: prefix for associated lock/unlock 144 */ 145 #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ 146 static __always_inline seqcount_t * \ 147 __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \ 148 { \ 149 return &s->seqcount; \ 150 } \ 151 \ 152 static __always_inline const seqcount_t * \ 153 __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \ 154 { \ 155 return &s->seqcount; \ 156 } \ 157 \ 158 static __always_inline unsigned \ 159 __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ 160 { \ 161 unsigned seq = smp_load_acquire(&s->seqcount.sequence); \ 162 \ 163 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ 164 return seq; \ 165 \ 166 if (preemptible && unlikely(seq & 1)) { \ 167 __SEQ_LOCK(lockbase##_lock(s->lock)); \ 168 __SEQ_LOCK(lockbase##_unlock(s->lock)); \ 169 \ 170 /* \ 171 * Re-read the sequence counter since the (possibly \ 172 * preempted) writer made progress. \ 173 */ \ 174 seq = smp_load_acquire(&s->seqcount.sequence); \ 175 } \ 176 \ 177 return seq; \ 178 } \ 179 \ 180 static __always_inline bool \ 181 __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \ 182 { \ 183 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ 184 return preemptible; \ 185 \ 186 /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \ 187 return false; \ 188 } \ 189 \ 190 static __always_inline void \ 191 __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \ 192 { \ 193 __SEQ_LOCK(lockdep_assert_held(s->lock)); \ 194 } 195 196 /* 197 * __seqprop() for seqcount_t 198 */ 199 200 static inline seqcount_t *__seqprop_ptr(seqcount_t *s) 201 { 202 return s; 203 } 204 205 static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s) 206 { 207 return s; 208 } 209 210 static inline unsigned __seqprop_sequence(const seqcount_t *s) 211 { 212 return smp_load_acquire(&s->sequence); 213 } 214 215 static inline bool __seqprop_preemptible(const seqcount_t *s) 216 { 217 return false; 218 } 219 220 static inline void __seqprop_assert(const seqcount_t *s) 221 { 222 lockdep_assert_preemption_disabled(); 223 } 224 225 #define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT) 226 227 SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) 228 SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) 229 SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) 230 SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) 231 #undef SEQCOUNT_LOCKNAME 232 233 /* 234 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t 235 * @name: Name of the seqcount_LOCKNAME_t instance 236 * @lock: Pointer to the associated LOCKNAME 237 */ 238 239 #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \ 240 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ 241 __SEQ_LOCK(.lock = (assoc_lock)) \ 242 } 243 244 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 245 #define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 246 #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 247 #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 248 #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) 249 250 #define __seqprop_case(s, lockname, prop) \ 251 seqcount_##lockname##_t: __seqprop_##lockname##_##prop 252 253 #define __seqprop(s, prop) _Generic(*(s), \ 254 seqcount_t: __seqprop_##prop, \ 255 __seqprop_case((s), raw_spinlock, prop), \ 256 __seqprop_case((s), spinlock, prop), \ 257 __seqprop_case((s), rwlock, prop), \ 258 __seqprop_case((s), mutex, prop)) 259 260 #define seqprop_ptr(s) __seqprop(s, ptr)(s) 261 #define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s) 262 #define seqprop_sequence(s) __seqprop(s, sequence)(s) 263 #define seqprop_preemptible(s) __seqprop(s, preemptible)(s) 264 #define seqprop_assert(s) __seqprop(s, assert)(s) 265 266 /** 267 * __read_seqcount_begin() - begin a seqcount_t read section 268 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 269 * 270 * Return: count to be passed to read_seqcount_retry() 271 */ 272 #define __read_seqcount_begin(s) \ 273 ({ \ 274 unsigned __seq; \ 275 \ 276 while (unlikely((__seq = seqprop_sequence(s)) & 1)) \ 277 cpu_relax(); \ 278 \ 279 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ 280 __seq; \ 281 }) 282 283 /** 284 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep 285 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 286 * 287 * Return: count to be passed to read_seqcount_retry() 288 */ 289 #define raw_read_seqcount_begin(s) __read_seqcount_begin(s) 290 291 /** 292 * read_seqcount_begin() - begin a seqcount_t read critical section 293 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 294 * 295 * Return: count to be passed to read_seqcount_retry() 296 */ 297 #define read_seqcount_begin(s) \ 298 ({ \ 299 seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \ 300 raw_read_seqcount_begin(s); \ 301 }) 302 303 /** 304 * raw_read_seqcount() - read the raw seqcount_t counter value 305 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 306 * 307 * raw_read_seqcount opens a read critical section of the given 308 * seqcount_t, without any lockdep checking, and without checking or 309 * masking the sequence counter LSB. Calling code is responsible for 310 * handling that. 311 * 312 * Return: count to be passed to read_seqcount_retry() 313 */ 314 #define raw_read_seqcount(s) \ 315 ({ \ 316 unsigned __seq = seqprop_sequence(s); \ 317 \ 318 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ 319 __seq; \ 320 }) 321 322 /** 323 * raw_seqcount_try_begin() - begin a seqcount_t read critical section 324 * w/o lockdep and w/o counter stabilization 325 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 326 * @start: count to be passed to read_seqcount_retry() 327 * 328 * Similar to raw_seqcount_begin(), except it enables eliding the critical 329 * section entirely if odd, instead of doing the speculation knowing it will 330 * fail. 331 * 332 * Useful when counter stabilization is more or less equivalent to taking 333 * the lock and there is a slowpath that does that. 334 * 335 * If true, start will be set to the (even) sequence count read. 336 * 337 * Return: true when a read critical section is started. 338 */ 339 #define raw_seqcount_try_begin(s, start) \ 340 ({ \ 341 start = raw_read_seqcount(s); \ 342 !(start & 1); \ 343 }) 344 345 /** 346 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o 347 * lockdep and w/o counter stabilization 348 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 349 * 350 * raw_seqcount_begin opens a read critical section of the given 351 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait 352 * for the count to stabilize. If a writer is active when it begins, it 353 * will fail the read_seqcount_retry() at the end of the read critical 354 * section instead of stabilizing at the beginning of it. 355 * 356 * Use this only in special kernel hot paths where the read section is 357 * small and has a high probability of success through other external 358 * means. It will save a single branching instruction. 359 * 360 * Return: count to be passed to read_seqcount_retry() 361 */ 362 #define raw_seqcount_begin(s) \ 363 ({ \ 364 /* \ 365 * If the counter is odd, let read_seqcount_retry() fail \ 366 * by decrementing the counter. \ 367 */ \ 368 raw_read_seqcount(s) & ~1; \ 369 }) 370 371 /** 372 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier 373 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 374 * @start: count, from read_seqcount_begin() 375 * 376 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() 377 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is 378 * provided before actually loading any of the variables that are to be 379 * protected in this critical section. 380 * 381 * Use carefully, only in critical code, and comment how the barrier is 382 * provided. 383 * 384 * Return: true if a read section retry is required, else false 385 */ 386 #define __read_seqcount_retry(s, start) \ 387 do___read_seqcount_retry(seqprop_const_ptr(s), start) 388 389 static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) 390 { 391 kcsan_atomic_next(0); 392 return unlikely(READ_ONCE(s->sequence) != start); 393 } 394 395 /** 396 * read_seqcount_retry() - end a seqcount_t read critical section 397 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 398 * @start: count, from read_seqcount_begin() 399 * 400 * read_seqcount_retry closes the read critical section of given 401 * seqcount_t. If the critical section was invalid, it must be ignored 402 * (and typically retried). 403 * 404 * Return: true if a read section retry is required, else false 405 */ 406 #define read_seqcount_retry(s, start) \ 407 do_read_seqcount_retry(seqprop_const_ptr(s), start) 408 409 static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) 410 { 411 smp_rmb(); 412 return do___read_seqcount_retry(s, start); 413 } 414 415 /** 416 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep 417 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 418 * 419 * Context: check write_seqcount_begin() 420 */ 421 #define raw_write_seqcount_begin(s) \ 422 do { \ 423 if (seqprop_preemptible(s)) \ 424 preempt_disable(); \ 425 \ 426 do_raw_write_seqcount_begin(seqprop_ptr(s)); \ 427 } while (0) 428 429 static inline void do_raw_write_seqcount_begin(seqcount_t *s) 430 { 431 kcsan_nestable_atomic_begin(); 432 s->sequence++; 433 smp_wmb(); 434 } 435 436 /** 437 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep 438 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 439 * 440 * Context: check write_seqcount_end() 441 */ 442 #define raw_write_seqcount_end(s) \ 443 do { \ 444 do_raw_write_seqcount_end(seqprop_ptr(s)); \ 445 \ 446 if (seqprop_preemptible(s)) \ 447 preempt_enable(); \ 448 } while (0) 449 450 static inline void do_raw_write_seqcount_end(seqcount_t *s) 451 { 452 smp_wmb(); 453 s->sequence++; 454 kcsan_nestable_atomic_end(); 455 } 456 457 /** 458 * write_seqcount_begin_nested() - start a seqcount_t write section with 459 * custom lockdep nesting level 460 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 461 * @subclass: lockdep nesting level 462 * 463 * See Documentation/locking/lockdep-design.rst 464 * Context: check write_seqcount_begin() 465 */ 466 #define write_seqcount_begin_nested(s, subclass) \ 467 do { \ 468 seqprop_assert(s); \ 469 \ 470 if (seqprop_preemptible(s)) \ 471 preempt_disable(); \ 472 \ 473 do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \ 474 } while (0) 475 476 static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) 477 { 478 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); 479 do_raw_write_seqcount_begin(s); 480 } 481 482 /** 483 * write_seqcount_begin() - start a seqcount_t write side critical section 484 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 485 * 486 * Context: sequence counter write side sections must be serialized and 487 * non-preemptible. Preemption will be automatically disabled if and 488 * only if the seqcount write serialization lock is associated, and 489 * preemptible. If readers can be invoked from hardirq or softirq 490 * context, interrupts or bottom halves must be respectively disabled. 491 */ 492 #define write_seqcount_begin(s) \ 493 do { \ 494 seqprop_assert(s); \ 495 \ 496 if (seqprop_preemptible(s)) \ 497 preempt_disable(); \ 498 \ 499 do_write_seqcount_begin(seqprop_ptr(s)); \ 500 } while (0) 501 502 static inline void do_write_seqcount_begin(seqcount_t *s) 503 { 504 do_write_seqcount_begin_nested(s, 0); 505 } 506 507 /** 508 * write_seqcount_end() - end a seqcount_t write side critical section 509 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 510 * 511 * Context: Preemption will be automatically re-enabled if and only if 512 * the seqcount write serialization lock is associated, and preemptible. 513 */ 514 #define write_seqcount_end(s) \ 515 do { \ 516 do_write_seqcount_end(seqprop_ptr(s)); \ 517 \ 518 if (seqprop_preemptible(s)) \ 519 preempt_enable(); \ 520 } while (0) 521 522 static inline void do_write_seqcount_end(seqcount_t *s) 523 { 524 seqcount_release(&s->dep_map, _RET_IP_); 525 do_raw_write_seqcount_end(s); 526 } 527 528 /** 529 * raw_write_seqcount_barrier() - do a seqcount_t write barrier 530 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 531 * 532 * This can be used to provide an ordering guarantee instead of the usual 533 * consistency guarantee. It is one wmb cheaper, because it can collapse 534 * the two back-to-back wmb()s. 535 * 536 * Note that writes surrounding the barrier should be declared atomic (e.g. 537 * via WRITE_ONCE): a) to ensure the writes become visible to other threads 538 * atomically, avoiding compiler optimizations; b) to document which writes are 539 * meant to propagate to the reader critical section. This is necessary because 540 * neither writes before nor after the barrier are enclosed in a seq-writer 541 * critical section that would ensure readers are aware of ongoing writes:: 542 * 543 * seqcount_t seq; 544 * bool X = true, Y = false; 545 * 546 * void read(void) 547 * { 548 * bool x, y; 549 * 550 * do { 551 * int s = read_seqcount_begin(&seq); 552 * 553 * x = X; y = Y; 554 * 555 * } while (read_seqcount_retry(&seq, s)); 556 * 557 * BUG_ON(!x && !y); 558 * } 559 * 560 * void write(void) 561 * { 562 * WRITE_ONCE(Y, true); 563 * 564 * raw_write_seqcount_barrier(seq); 565 * 566 * WRITE_ONCE(X, false); 567 * } 568 */ 569 #define raw_write_seqcount_barrier(s) \ 570 do_raw_write_seqcount_barrier(seqprop_ptr(s)) 571 572 static inline void do_raw_write_seqcount_barrier(seqcount_t *s) 573 { 574 kcsan_nestable_atomic_begin(); 575 s->sequence++; 576 smp_wmb(); 577 s->sequence++; 578 kcsan_nestable_atomic_end(); 579 } 580 581 /** 582 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read 583 * side operations 584 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants 585 * 586 * After write_seqcount_invalidate, no seqcount_t read side operations 587 * will complete successfully and see data older than this. 588 */ 589 #define write_seqcount_invalidate(s) \ 590 do_write_seqcount_invalidate(seqprop_ptr(s)) 591 592 static inline void do_write_seqcount_invalidate(seqcount_t *s) 593 { 594 smp_wmb(); 595 kcsan_nestable_atomic_begin(); 596 s->sequence+=2; 597 kcsan_nestable_atomic_end(); 598 } 599 600 /* 601 * Latch sequence counters (seqcount_latch_t) 602 * 603 * A sequence counter variant where the counter even/odd value is used to 604 * switch between two copies of protected data. This allows the read path, 605 * typically NMIs, to safely interrupt the write side critical section. 606 * 607 * As the write sections are fully preemptible, no special handling for 608 * PREEMPT_RT is needed. 609 */ 610 typedef struct { 611 seqcount_t seqcount; 612 } seqcount_latch_t; 613 614 /** 615 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t 616 * @seq_name: Name of the seqcount_latch_t instance 617 */ 618 #define SEQCNT_LATCH_ZERO(seq_name) { \ 619 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ 620 } 621 622 /** 623 * seqcount_latch_init() - runtime initializer for seqcount_latch_t 624 * @s: Pointer to the seqcount_latch_t instance 625 */ 626 #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount) 627 628 /** 629 * raw_read_seqcount_latch() - pick even/odd latch data copy 630 * @s: Pointer to seqcount_latch_t 631 * 632 * See raw_write_seqcount_latch() for details and a full reader/writer 633 * usage example. 634 * 635 * Return: sequence counter raw value. Use the lowest bit as an index for 636 * picking which data copy to read. The full counter must then be checked 637 * with raw_read_seqcount_latch_retry(). 638 */ 639 static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) 640 { 641 /* 642 * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). 643 * Due to the dependent load, a full smp_rmb() is not needed. 644 */ 645 return READ_ONCE(s->seqcount.sequence); 646 } 647 648 /** 649 * read_seqcount_latch() - pick even/odd latch data copy 650 * @s: Pointer to seqcount_latch_t 651 * 652 * See write_seqcount_latch() for details and a full reader/writer usage 653 * example. 654 * 655 * Return: sequence counter raw value. Use the lowest bit as an index for 656 * picking which data copy to read. The full counter must then be checked 657 * with read_seqcount_latch_retry(). 658 */ 659 static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s) 660 { 661 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); 662 return raw_read_seqcount_latch(s); 663 } 664 665 /** 666 * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section 667 * @s: Pointer to seqcount_latch_t 668 * @start: count, from raw_read_seqcount_latch() 669 * 670 * Return: true if a read section retry is required, else false 671 */ 672 static __always_inline int 673 raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) 674 { 675 smp_rmb(); 676 return unlikely(READ_ONCE(s->seqcount.sequence) != start); 677 } 678 679 /** 680 * read_seqcount_latch_retry() - end a seqcount_latch_t read section 681 * @s: Pointer to seqcount_latch_t 682 * @start: count, from read_seqcount_latch() 683 * 684 * Return: true if a read section retry is required, else false 685 */ 686 static __always_inline int 687 read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) 688 { 689 kcsan_atomic_next(0); 690 return raw_read_seqcount_latch_retry(s, start); 691 } 692 693 /** 694 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy 695 * @s: Pointer to seqcount_latch_t 696 */ 697 static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s) 698 { 699 smp_wmb(); /* prior stores before incrementing "sequence" */ 700 s->seqcount.sequence++; 701 smp_wmb(); /* increment "sequence" before following stores */ 702 } 703 704 /** 705 * write_seqcount_latch_begin() - redirect latch readers to odd copy 706 * @s: Pointer to seqcount_latch_t 707 * 708 * The latch technique is a multiversion concurrency control method that allows 709 * queries during non-atomic modifications. If you can guarantee queries never 710 * interrupt the modification -- e.g. the concurrency is strictly between CPUs 711 * -- you most likely do not need this. 712 * 713 * Where the traditional RCU/lockless data structures rely on atomic 714 * modifications to ensure queries observe either the old or the new state the 715 * latch allows the same for non-atomic updates. The trade-off is doubling the 716 * cost of storage; we have to maintain two copies of the entire data 717 * structure. 718 * 719 * Very simply put: we first modify one copy and then the other. This ensures 720 * there is always one copy in a stable state, ready to give us an answer. 721 * 722 * The basic form is a data structure like:: 723 * 724 * struct latch_struct { 725 * seqcount_latch_t seq; 726 * struct data_struct data[2]; 727 * }; 728 * 729 * Where a modification, which is assumed to be externally serialized, does the 730 * following:: 731 * 732 * void latch_modify(struct latch_struct *latch, ...) 733 * { 734 * write_seqcount_latch_begin(&latch->seq); 735 * modify(latch->data[0], ...); 736 * write_seqcount_latch(&latch->seq); 737 * modify(latch->data[1], ...); 738 * write_seqcount_latch_end(&latch->seq); 739 * } 740 * 741 * The query will have a form like:: 742 * 743 * struct entry *latch_query(struct latch_struct *latch, ...) 744 * { 745 * struct entry *entry; 746 * unsigned seq, idx; 747 * 748 * do { 749 * seq = read_seqcount_latch(&latch->seq); 750 * 751 * idx = seq & 0x01; 752 * entry = data_query(latch->data[idx], ...); 753 * 754 * // This includes needed smp_rmb() 755 * } while (read_seqcount_latch_retry(&latch->seq, seq)); 756 * 757 * return entry; 758 * } 759 * 760 * So during the modification, queries are first redirected to data[1]. Then we 761 * modify data[0]. When that is complete, we redirect queries back to data[0] 762 * and we can modify data[1]. 763 * 764 * NOTE: 765 * 766 * The non-requirement for atomic modifications does _NOT_ include 767 * the publishing of new entries in the case where data is a dynamic 768 * data structure. 769 * 770 * An iteration might start in data[0] and get suspended long enough 771 * to miss an entire modification sequence, once it resumes it might 772 * observe the new entry. 773 * 774 * NOTE2: 775 * 776 * When data is a dynamic data structure; one should use regular RCU 777 * patterns to manage the lifetimes of the objects within. 778 */ 779 static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s) 780 { 781 kcsan_nestable_atomic_begin(); 782 raw_write_seqcount_latch(s); 783 } 784 785 /** 786 * write_seqcount_latch() - redirect latch readers to even copy 787 * @s: Pointer to seqcount_latch_t 788 */ 789 static __always_inline void write_seqcount_latch(seqcount_latch_t *s) 790 { 791 raw_write_seqcount_latch(s); 792 } 793 794 /** 795 * write_seqcount_latch_end() - end a seqcount_latch_t write section 796 * @s: Pointer to seqcount_latch_t 797 * 798 * Marks the end of a seqcount_latch_t writer section, after all copies of the 799 * latch-protected data have been updated. 800 */ 801 static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s) 802 { 803 kcsan_nestable_atomic_end(); 804 } 805 806 #define __SEQLOCK_UNLOCKED(lockname) \ 807 { \ 808 .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \ 809 .lock = __SPIN_LOCK_UNLOCKED(lockname) \ 810 } 811 812 /** 813 * seqlock_init() - dynamic initializer for seqlock_t 814 * @sl: Pointer to the seqlock_t instance 815 */ 816 #define seqlock_init(sl) \ 817 do { \ 818 spin_lock_init(&(sl)->lock); \ 819 seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \ 820 } while (0) 821 822 /** 823 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t 824 * @sl: Name of the seqlock_t instance 825 */ 826 #define DEFINE_SEQLOCK(sl) \ 827 seqlock_t sl = __SEQLOCK_UNLOCKED(sl) 828 829 /** 830 * read_seqbegin() - start a seqlock_t read side critical section 831 * @sl: Pointer to seqlock_t 832 * 833 * Return: count, to be passed to read_seqretry() 834 */ 835 static inline unsigned read_seqbegin(const seqlock_t *sl) 836 __acquires_shared(sl) __no_context_analysis 837 { 838 return read_seqcount_begin(&sl->seqcount); 839 } 840 841 /** 842 * read_seqretry() - end a seqlock_t read side section 843 * @sl: Pointer to seqlock_t 844 * @start: count, from read_seqbegin() 845 * 846 * read_seqretry closes the read side critical section of given seqlock_t. 847 * If the critical section was invalid, it must be ignored (and typically 848 * retried). 849 * 850 * Return: true if a read section retry is required, else false 851 */ 852 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) 853 __releases_shared(sl) __no_context_analysis 854 { 855 return read_seqcount_retry(&sl->seqcount, start); 856 } 857 858 /* 859 * For all seqlock_t write side functions, use the internal 860 * do_write_seqcount_begin() instead of generic write_seqcount_begin(). 861 * This way, no redundant lockdep_assert_held() checks are added. 862 */ 863 864 /** 865 * write_seqlock() - start a seqlock_t write side critical section 866 * @sl: Pointer to seqlock_t 867 * 868 * write_seqlock opens a write side critical section for the given 869 * seqlock_t. It also implicitly acquires the spinlock_t embedded inside 870 * that sequential lock. All seqlock_t write side sections are thus 871 * automatically serialized and non-preemptible. 872 * 873 * Context: if the seqlock_t read section, or other write side critical 874 * sections, can be invoked from hardirq or softirq contexts, use the 875 * _irqsave or _bh variants of this function instead. 876 */ 877 static inline void write_seqlock(seqlock_t *sl) 878 __acquires(sl) __no_context_analysis 879 { 880 spin_lock(&sl->lock); 881 do_write_seqcount_begin(&sl->seqcount.seqcount); 882 } 883 884 /** 885 * write_sequnlock() - end a seqlock_t write side critical section 886 * @sl: Pointer to seqlock_t 887 * 888 * write_sequnlock closes the (serialized and non-preemptible) write side 889 * critical section of given seqlock_t. 890 */ 891 static inline void write_sequnlock(seqlock_t *sl) 892 __releases(sl) __no_context_analysis 893 { 894 do_write_seqcount_end(&sl->seqcount.seqcount); 895 spin_unlock(&sl->lock); 896 } 897 898 /** 899 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section 900 * @sl: Pointer to seqlock_t 901 * 902 * _bh variant of write_seqlock(). Use only if the read side section, or 903 * other write side sections, can be invoked from softirq contexts. 904 */ 905 static inline void write_seqlock_bh(seqlock_t *sl) 906 __acquires(sl) __no_context_analysis 907 { 908 spin_lock_bh(&sl->lock); 909 do_write_seqcount_begin(&sl->seqcount.seqcount); 910 } 911 912 /** 913 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section 914 * @sl: Pointer to seqlock_t 915 * 916 * write_sequnlock_bh closes the serialized, non-preemptible, and 917 * softirqs-disabled, seqlock_t write side critical section opened with 918 * write_seqlock_bh(). 919 */ 920 static inline void write_sequnlock_bh(seqlock_t *sl) 921 __releases(sl) __no_context_analysis 922 { 923 do_write_seqcount_end(&sl->seqcount.seqcount); 924 spin_unlock_bh(&sl->lock); 925 } 926 927 /** 928 * write_seqlock_irq() - start a non-interruptible seqlock_t write section 929 * @sl: Pointer to seqlock_t 930 * 931 * _irq variant of write_seqlock(). Use only if the read side section, or 932 * other write sections, can be invoked from hardirq contexts. 933 */ 934 static inline void write_seqlock_irq(seqlock_t *sl) 935 __acquires(sl) __no_context_analysis 936 { 937 spin_lock_irq(&sl->lock); 938 do_write_seqcount_begin(&sl->seqcount.seqcount); 939 } 940 941 /** 942 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section 943 * @sl: Pointer to seqlock_t 944 * 945 * write_sequnlock_irq closes the serialized and non-interruptible 946 * seqlock_t write side section opened with write_seqlock_irq(). 947 */ 948 static inline void write_sequnlock_irq(seqlock_t *sl) 949 __releases(sl) __no_context_analysis 950 { 951 do_write_seqcount_end(&sl->seqcount.seqcount); 952 spin_unlock_irq(&sl->lock); 953 } 954 955 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) 956 __acquires(sl) __no_context_analysis 957 { 958 unsigned long flags; 959 960 spin_lock_irqsave(&sl->lock, flags); 961 do_write_seqcount_begin(&sl->seqcount.seqcount); 962 return flags; 963 } 964 965 /** 966 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write 967 * section 968 * @lock: Pointer to seqlock_t 969 * @flags: Stack-allocated storage for saving caller's local interrupt 970 * state, to be passed to write_sequnlock_irqrestore(). 971 * 972 * _irqsave variant of write_seqlock(). Use it only if the read side 973 * section, or other write sections, can be invoked from hardirq context. 974 */ 975 #define write_seqlock_irqsave(lock, flags) \ 976 do { flags = __write_seqlock_irqsave(lock); } while (0) 977 978 /** 979 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write 980 * section 981 * @sl: Pointer to seqlock_t 982 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave() 983 * 984 * write_sequnlock_irqrestore closes the serialized and non-interruptible 985 * seqlock_t write section previously opened with write_seqlock_irqsave(). 986 */ 987 static inline void 988 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) 989 __releases(sl) __no_context_analysis 990 { 991 do_write_seqcount_end(&sl->seqcount.seqcount); 992 spin_unlock_irqrestore(&sl->lock, flags); 993 } 994 995 /** 996 * read_seqlock_excl() - begin a seqlock_t locking reader section 997 * @sl: Pointer to seqlock_t 998 * 999 * read_seqlock_excl opens a seqlock_t locking reader critical section. A 1000 * locking reader exclusively locks out *both* other writers *and* other 1001 * locking readers, but it does not update the embedded sequence number. 1002 * 1003 * Locking readers act like a normal spin_lock()/spin_unlock(). 1004 * 1005 * Context: if the seqlock_t write section, *or other read sections*, can 1006 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh 1007 * variant of this function instead. 1008 * 1009 * The opened read section must be closed with read_sequnlock_excl(). 1010 */ 1011 static inline void read_seqlock_excl(seqlock_t *sl) 1012 __acquires_shared(sl) __no_context_analysis 1013 { 1014 spin_lock(&sl->lock); 1015 } 1016 1017 /** 1018 * read_sequnlock_excl() - end a seqlock_t locking reader critical section 1019 * @sl: Pointer to seqlock_t 1020 */ 1021 static inline void read_sequnlock_excl(seqlock_t *sl) 1022 __releases_shared(sl) __no_context_analysis 1023 { 1024 spin_unlock(&sl->lock); 1025 } 1026 1027 /** 1028 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with 1029 * softirqs disabled 1030 * @sl: Pointer to seqlock_t 1031 * 1032 * _bh variant of read_seqlock_excl(). Use this variant only if the 1033 * seqlock_t write side section, *or other read sections*, can be invoked 1034 * from softirq contexts. 1035 */ 1036 static inline void read_seqlock_excl_bh(seqlock_t *sl) 1037 __acquires_shared(sl) __no_context_analysis 1038 { 1039 spin_lock_bh(&sl->lock); 1040 } 1041 1042 /** 1043 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking 1044 * reader section 1045 * @sl: Pointer to seqlock_t 1046 */ 1047 static inline void read_sequnlock_excl_bh(seqlock_t *sl) 1048 __releases_shared(sl) __no_context_analysis 1049 { 1050 spin_unlock_bh(&sl->lock); 1051 } 1052 1053 /** 1054 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking 1055 * reader section 1056 * @sl: Pointer to seqlock_t 1057 * 1058 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t 1059 * write side section, *or other read sections*, can be invoked from a 1060 * hardirq context. 1061 */ 1062 static inline void read_seqlock_excl_irq(seqlock_t *sl) 1063 __acquires_shared(sl) __no_context_analysis 1064 { 1065 spin_lock_irq(&sl->lock); 1066 } 1067 1068 /** 1069 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t 1070 * locking reader section 1071 * @sl: Pointer to seqlock_t 1072 */ 1073 static inline void read_sequnlock_excl_irq(seqlock_t *sl) 1074 __releases_shared(sl) __no_context_analysis 1075 { 1076 spin_unlock_irq(&sl->lock); 1077 } 1078 1079 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) 1080 __acquires_shared(sl) __no_context_analysis 1081 { 1082 unsigned long flags; 1083 1084 spin_lock_irqsave(&sl->lock, flags); 1085 return flags; 1086 } 1087 1088 /** 1089 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t 1090 * locking reader section 1091 * @lock: Pointer to seqlock_t 1092 * @flags: Stack-allocated storage for saving caller's local interrupt 1093 * state, to be passed to read_sequnlock_excl_irqrestore(). 1094 * 1095 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t 1096 * write side section, *or other read sections*, can be invoked from a 1097 * hardirq context. 1098 */ 1099 #define read_seqlock_excl_irqsave(lock, flags) \ 1100 do { flags = __read_seqlock_excl_irqsave(lock); } while (0) 1101 1102 /** 1103 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t 1104 * locking reader section 1105 * @sl: Pointer to seqlock_t 1106 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave() 1107 */ 1108 static inline void 1109 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) 1110 __releases_shared(sl) __no_context_analysis 1111 { 1112 spin_unlock_irqrestore(&sl->lock, flags); 1113 } 1114 1115 /** 1116 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader 1117 * @lock: Pointer to seqlock_t 1118 * @seq : Marker and return parameter. If the passed value is even, the 1119 * reader will become a *lockless* seqlock_t reader as in read_seqbegin(). 1120 * If the passed value is odd, the reader will become a *locking* reader 1121 * as in read_seqlock_excl(). In the first call to this function, the 1122 * caller *must* initialize and pass an even value to @seq; this way, a 1123 * lockless read can be optimistically tried first. 1124 * 1125 * read_seqbegin_or_lock is an API designed to optimistically try a normal 1126 * lockless seqlock_t read section first. If an odd counter is found, the 1127 * lockless read trial has failed, and the next read iteration transforms 1128 * itself into a full seqlock_t locking reader. 1129 * 1130 * This is typically used to avoid seqlock_t lockless readers starvation 1131 * (too much retry loops) in the case of a sharp spike in write side 1132 * activity. 1133 * 1134 * Context: if the seqlock_t write section, *or other read sections*, can 1135 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh 1136 * variant of this function instead. 1137 * 1138 * Check Documentation/locking/seqlock.rst for template example code. 1139 * 1140 * Return: the encountered sequence counter value, through the @seq 1141 * parameter, which is overloaded as a return parameter. This returned 1142 * value must be checked with need_seqretry(). If the read section need to 1143 * be retried, this returned value must also be passed as the @seq 1144 * parameter of the next read_seqbegin_or_lock() iteration. 1145 */ 1146 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) 1147 __acquires_shared(lock) __no_context_analysis 1148 { 1149 if (!(*seq & 1)) /* Even */ 1150 *seq = read_seqbegin(lock); 1151 else /* Odd */ 1152 read_seqlock_excl(lock); 1153 } 1154 1155 /** 1156 * need_seqretry() - validate seqlock_t "locking or lockless" read section 1157 * @lock: Pointer to seqlock_t 1158 * @seq: sequence count, from read_seqbegin_or_lock() 1159 * 1160 * Return: true if a read section retry is required, false otherwise 1161 */ 1162 static inline int need_seqretry(seqlock_t *lock, int seq) 1163 __releases_shared(lock) __no_context_analysis 1164 { 1165 return !(seq & 1) && read_seqretry(lock, seq); 1166 } 1167 1168 /** 1169 * done_seqretry() - end seqlock_t "locking or lockless" reader section 1170 * @lock: Pointer to seqlock_t 1171 * @seq: count, from read_seqbegin_or_lock() 1172 * 1173 * done_seqretry finishes the seqlock_t read side critical section started 1174 * with read_seqbegin_or_lock() and validated by need_seqretry(). 1175 */ 1176 static inline void done_seqretry(seqlock_t *lock, int seq) 1177 __no_context_analysis 1178 { 1179 if (seq & 1) 1180 read_sequnlock_excl(lock); 1181 } 1182 1183 /** 1184 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or 1185 * a non-interruptible locking reader 1186 * @lock: Pointer to seqlock_t 1187 * @seq: Marker and return parameter. Check read_seqbegin_or_lock(). 1188 * 1189 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if 1190 * the seqlock_t write section, *or other read sections*, can be invoked 1191 * from hardirq context. 1192 * 1193 * Note: Interrupts will be disabled only for "locking reader" mode. 1194 * 1195 * Return: 1196 * 1197 * 1. The saved local interrupts state in case of a locking reader, to 1198 * be passed to done_seqretry_irqrestore(). 1199 * 1200 * 2. The encountered sequence counter value, returned through @seq 1201 * overloaded as a return parameter. Check read_seqbegin_or_lock(). 1202 */ 1203 static inline unsigned long 1204 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) 1205 __acquires_shared(lock) __no_context_analysis 1206 { 1207 unsigned long flags = 0; 1208 1209 if (!(*seq & 1)) /* Even */ 1210 *seq = read_seqbegin(lock); 1211 else /* Odd */ 1212 read_seqlock_excl_irqsave(lock, flags); 1213 1214 return flags; 1215 } 1216 1217 /** 1218 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a 1219 * non-interruptible locking reader section 1220 * @lock: Pointer to seqlock_t 1221 * @seq: Count, from read_seqbegin_or_lock_irqsave() 1222 * @flags: Caller's saved local interrupt state in case of a locking 1223 * reader, also from read_seqbegin_or_lock_irqsave() 1224 * 1225 * This is the _irqrestore variant of done_seqretry(). The read section 1226 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated 1227 * by need_seqretry(). 1228 */ 1229 static inline void 1230 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) 1231 __no_context_analysis 1232 { 1233 if (seq & 1) 1234 read_sequnlock_excl_irqrestore(lock, flags); 1235 } 1236 1237 enum ss_state { 1238 ss_done = 0, 1239 ss_lock, 1240 ss_lock_irqsave, 1241 ss_lockless, 1242 }; 1243 1244 struct ss_tmp { 1245 enum ss_state state; 1246 unsigned long data; 1247 spinlock_t *lock; 1248 spinlock_t *lock_irqsave; 1249 }; 1250 1251 static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) 1252 __no_context_analysis 1253 { 1254 if (sst->lock) 1255 spin_unlock(sst->lock); 1256 if (sst->lock_irqsave) 1257 spin_unlock_irqrestore(sst->lock_irqsave, sst->data); 1258 } 1259 1260 extern void __scoped_seqlock_invalid_target(void); 1261 1262 #if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000) || defined(CONFIG_KASAN) 1263 /* 1264 * For some reason some GCC-8 architectures (nios2, alpha) have trouble 1265 * determining that the ss_done state is impossible in __scoped_seqlock_next() 1266 * below. 1267 * 1268 * Similarly KASAN is known to confuse compilers enough to break this. But we 1269 * don't care about code quality for KASAN builds anyway. 1270 */ 1271 static inline void __scoped_seqlock_bug(void) { } 1272 #else 1273 /* 1274 * Canary for compiler optimization -- if the compiler doesn't realize this is 1275 * an impossible state, it very likely generates sub-optimal code here. 1276 */ 1277 extern void __scoped_seqlock_bug(void); 1278 #endif 1279 1280 static __always_inline void 1281 __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target) 1282 __no_context_analysis 1283 { 1284 switch (sst->state) { 1285 case ss_done: 1286 __scoped_seqlock_bug(); 1287 return; 1288 1289 case ss_lock: 1290 case ss_lock_irqsave: 1291 sst->state = ss_done; 1292 return; 1293 1294 case ss_lockless: 1295 if (!read_seqretry(lock, sst->data)) { 1296 sst->state = ss_done; 1297 return; 1298 } 1299 break; 1300 } 1301 1302 switch (target) { 1303 case ss_done: 1304 __scoped_seqlock_invalid_target(); 1305 return; 1306 1307 case ss_lock: 1308 sst->lock = &lock->lock; 1309 spin_lock(sst->lock); 1310 sst->state = ss_lock; 1311 return; 1312 1313 case ss_lock_irqsave: 1314 sst->lock_irqsave = &lock->lock; 1315 spin_lock_irqsave(sst->lock_irqsave, sst->data); 1316 sst->state = ss_lock_irqsave; 1317 return; 1318 1319 case ss_lockless: 1320 sst->data = read_seqbegin(lock); 1321 return; 1322 } 1323 } 1324 1325 /* 1326 * Context analysis no-op helper to release seqlock at the end of the for-scope; 1327 * the alias analysis of the compiler will recognize that the pointer @s is an 1328 * alias to @_seqlock passed to read_seqbegin(_seqlock) below. 1329 */ 1330 static __always_inline void __scoped_seqlock_cleanup_ctx(struct ss_tmp **s) 1331 __releases_shared(*((seqlock_t **)s)) __no_context_analysis {} 1332 1333 #define __scoped_seqlock_read(_seqlock, _target, _s) \ 1334 for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \ 1335 { .state = ss_lockless, .data = read_seqbegin(_seqlock) }, \ 1336 *__UNIQUE_ID(ctx) __cleanup(__scoped_seqlock_cleanup_ctx) =\ 1337 (struct ss_tmp *)_seqlock; \ 1338 _s.state != ss_done; \ 1339 __scoped_seqlock_next(&_s, _seqlock, _target)) 1340 1341 /** 1342 * scoped_seqlock_read() - execute the read-side critical section 1343 * without manual sequence counter handling 1344 * or calls to other helpers 1345 * @_seqlock: pointer to seqlock_t protecting the data 1346 * @_target: an enum ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} 1347 * indicating the type of critical read section 1348 * 1349 * Example:: 1350 * 1351 * scoped_seqlock_read (&lock, ss_lock) { 1352 * // read-side critical section 1353 * } 1354 * 1355 * Starts with a lockess pass first. If it fails, restarts the critical 1356 * section with the lock held. 1357 */ 1358 #define scoped_seqlock_read(_seqlock, _target) \ 1359 __scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock)) 1360 1361 DEFINE_LOCK_GUARD_1(seqlock_init, seqlock_t, seqlock_init(_T->lock), /* */) 1362 DECLARE_LOCK_GUARD_1_ATTRS(seqlock_init, __acquires(_T), __releases(*(seqlock_t **)_T)) 1363 #define class_seqlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(seqlock_init, _T) 1364 1365 #endif /* __LINUX_SEQLOCK_H */ 1366