Lines Matching +full:a +full:- +full:side

1 /* SPDX-License-Identifier: GPL-2.0 */
6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7 * lockless readers (read-only retry loops), and no writer starvation.
12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
17 #include <linux/kcsan-checks.h>
27 * The seqlock seqcount_t interface does not prescribe a precise sequence of
28 * read begin/retry/end. For readers, typically there is a call to
32 * As a consequence, we take the following best-effort approach for raw usage
33 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
35 * atomics; if there is a matching read_seqcount_retry() call, no following
45 * Make sure we are not reinitializing a held lock: in __seqcount_init()
47 lockdep_init_map(&s->dep_map, name, key, 0); in __seqcount_init()
48 s->sequence = 0; in __seqcount_init()
57 * seqcount_init() - runtime initializer for seqcount_t
72 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); in seqcount_lockdep_reader_access()
73 seqcount_release(&l->dep_map, _RET_IP_); in seqcount_lockdep_reader_access()
84 * SEQCNT_ZERO() - static initializer for seqcount_t
92 * A sequence counter which associates the lock used for writer
94 * that the write side critical section is properly serialized.
97 * preemption protection is enforced in the write side function.
105 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
109 * A plain sequence counter with external writer synchronization by
112 * that the write side critical section is properly serialized.
118 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
126 seqcount_init(&____s->seqcount); \
127 __SEQ_LOCK(____s->lock = (_lock)); \
136 * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
137 * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
148 return &s->seqcount; \
154 return &s->seqcount; \
160 unsigned seq = smp_load_acquire(&s->seqcount.sequence); \
166 __SEQ_LOCK(lockbase##_lock(s->lock)); \
167 __SEQ_LOCK(lockbase##_unlock(s->lock)); \
170 * Re-read the sequence counter since the (possibly \
173 seq = smp_load_acquire(&s->seqcount.sequence); \
192 __SEQ_LOCK(lockdep_assert_held(s->lock)); \
211 return smp_load_acquire(&s->sequence); in __seqprop_sequence()
233 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t in SEQCOUNT_LOCKNAME()
266 * __read_seqcount_begin() - begin a seqcount_t read section
283 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
291 * read_seqcount_begin() - begin a seqcount_t read critical section
303 * raw_read_seqcount() - read the raw seqcount_t counter value
306 * raw_read_seqcount opens a read critical section of the given
322 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
326 * raw_seqcount_begin opens a read critical section of the given
328 * for the count to stabilize. If a writer is active when it begins, it
333 * small and has a high probability of success through other external
334 * means. It will save a single branching instruction.
348 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
360 * Return: true if a read section retry is required, else false
368 return unlikely(READ_ONCE(s->sequence) != start);
372 * read_seqcount_retry() - end a seqcount_t read critical section
380 * Return: true if a read section retry is required, else false
392 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
408 s->sequence++; in do_raw_write_seqcount_begin()
413 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
429 s->sequence++; in do_raw_write_seqcount_end()
434 * write_seqcount_begin_nested() - start a seqcount_t write section with
439 * See Documentation/locking/lockdep-design.rst
454 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); in do_write_seqcount_begin_nested()
459 * write_seqcount_begin() - start a seqcount_t write side critical section
462 * Context: sequence counter write side sections must be serialized and
463 * non-preemptible. Preemption will be automatically disabled if and
484 * write_seqcount_end() - end a seqcount_t write side critical section
487 * Context: Preemption will be automatically re-enabled if and only if
500 seqcount_release(&s->dep_map, _RET_IP_); in do_write_seqcount_end()
505 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
510 * the two back-to-back wmb()s.
513 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
516 * neither writes before nor after the barrier are enclosed in a seq-writer
551 s->sequence++; in do_raw_write_seqcount_barrier()
553 s->sequence++; in do_raw_write_seqcount_barrier()
558 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
559 * side operations
562 * After write_seqcount_invalidate, no seqcount_t read side operations
572 s->sequence+=2; in do_write_seqcount_invalidate()
579 * A sequence counter variant where the counter even/odd value is used to
581 * typically NMIs, to safely interrupt the write side critical section.
591 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
599 * seqcount_latch_init() - runtime initializer for seqcount_latch_t
602 #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
605 * raw_read_seqcount_latch() - pick even/odd latch data copy
608 * See raw_write_seqcount_latch() for details and a full reader/writer
619 * Due to the dependent load, a full smp_rmb() is not needed. in raw_read_seqcount_latch()
621 return READ_ONCE(s->seqcount.sequence); in raw_read_seqcount_latch()
625 * read_seqcount_latch() - pick even/odd latch data copy
628 * See write_seqcount_latch() for details and a full reader/writer usage
642 * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
646 * Return: true if a read section retry is required, else false
652 return unlikely(READ_ONCE(s->seqcount.sequence) != start); in raw_read_seqcount_latch_retry()
656 * read_seqcount_latch_retry() - end a seqcount_latch_t read section
660 * Return: true if a read section retry is required, else false
670 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
676 s->seqcount.sequence++; in raw_write_seqcount_latch()
681 * write_seqcount_latch_begin() - redirect latch readers to odd copy
684 * The latch technique is a multiversion concurrency control method that allows
685 * queries during non-atomic modifications. If you can guarantee queries never
686 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
687 * -- you most likely do not need this.
691 * latch allows the same for non-atomic updates. The trade-off is doubling the
696 * there is always one copy in a stable state, ready to give us an answer.
698 * The basic form is a data structure like::
705 * Where a modification, which is assumed to be externally serialized, does the
710 * write_seqcount_latch_begin(&latch->seq);
711 * modify(latch->data[0], ...);
712 * write_seqcount_latch(&latch->seq);
713 * modify(latch->data[1], ...);
714 * write_seqcount_latch_end(&latch->seq);
717 * The query will have a form like::
725 * seq = read_seqcount_latch(&latch->seq);
728 * entry = data_query(latch->data[idx], ...);
731 * } while (read_seqcount_latch_retry(&latch->seq, seq));
742 * The non-requirement for atomic modifications does _NOT_ include
743 * the publishing of new entries in the case where data is a dynamic
752 * When data is a dynamic data structure; one should use regular RCU
762 * write_seqcount_latch() - redirect latch readers to even copy
771 * write_seqcount_latch_end() - end a seqcount_latch_t write section
774 * Marks the end of a seqcount_latch_t writer section, after all copies of the
775 * latch-protected data have been updated.
789 * seqlock_init() - dynamic initializer for seqlock_t
794 spin_lock_init(&(sl)->lock); \
795 seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
799 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
806 * read_seqbegin() - start a seqlock_t read side critical section
813 return read_seqcount_begin(&sl->seqcount); in read_seqbegin()
817 * read_seqretry() - end a seqlock_t read side section
821 * read_seqretry closes the read side critical section of given seqlock_t.
825 * Return: true if a read section retry is required, else false
829 return read_seqcount_retry(&sl->seqcount, start); in read_seqretry()
833 * For all seqlock_t write side functions, use the internal
839 * write_seqlock() - start a seqlock_t write side critical section
842 * write_seqlock opens a write side critical section for the given
844 * that sequential lock. All seqlock_t write side sections are thus
845 * automatically serialized and non-preemptible.
847 * Context: if the seqlock_t read section, or other write side critical
853 spin_lock(&sl->lock); in write_seqlock()
854 do_write_seqcount_begin(&sl->seqcount.seqcount); in write_seqlock()
858 * write_sequnlock() - end a seqlock_t write side critical section
861 * write_sequnlock closes the (serialized and non-preemptible) write side
866 do_write_seqcount_end(&sl->seqcount.seqcount); in write_sequnlock()
867 spin_unlock(&sl->lock); in write_sequnlock()
871 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
874 * _bh variant of write_seqlock(). Use only if the read side section, or
875 * other write side sections, can be invoked from softirq contexts.
879 spin_lock_bh(&sl->lock); in write_seqlock_bh()
880 do_write_seqcount_begin(&sl->seqcount.seqcount); in write_seqlock_bh()
884 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
887 * write_sequnlock_bh closes the serialized, non-preemptible, and
888 * softirqs-disabled, seqlock_t write side critical section opened with
893 do_write_seqcount_end(&sl->seqcount.seqcount); in write_sequnlock_bh()
894 spin_unlock_bh(&sl->lock); in write_sequnlock_bh()
898 * write_seqlock_irq() - start a non-interruptible seqlock_t write section
901 * _irq variant of write_seqlock(). Use only if the read side section, or
906 spin_lock_irq(&sl->lock); in write_seqlock_irq()
907 do_write_seqcount_begin(&sl->seqcount.seqcount); in write_seqlock_irq()
911 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
914 * write_sequnlock_irq closes the serialized and non-interruptible
915 * seqlock_t write side section opened with write_seqlock_irq().
919 do_write_seqcount_end(&sl->seqcount.seqcount); in write_sequnlock_irq()
920 spin_unlock_irq(&sl->lock); in write_sequnlock_irq()
927 spin_lock_irqsave(&sl->lock, flags); in __write_seqlock_irqsave()
928 do_write_seqcount_begin(&sl->seqcount.seqcount); in __write_seqlock_irqsave()
933 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
936 * @flags: Stack-allocated storage for saving caller's local interrupt
939 * _irqsave variant of write_seqlock(). Use it only if the read side
946 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
951 * write_sequnlock_irqrestore closes the serialized and non-interruptible
957 do_write_seqcount_end(&sl->seqcount.seqcount); in write_sequnlock_irqrestore()
958 spin_unlock_irqrestore(&sl->lock, flags); in write_sequnlock_irqrestore()
962 * read_seqlock_excl() - begin a seqlock_t locking reader section
965 * read_seqlock_excl opens a seqlock_t locking reader critical section. A
969 * Locking readers act like a normal spin_lock()/spin_unlock().
979 spin_lock(&sl->lock); in read_seqlock_excl()
983 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
988 spin_unlock(&sl->lock); in read_sequnlock_excl()
992 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
997 * seqlock_t write side section, *or other read sections*, can be invoked
1002 spin_lock_bh(&sl->lock); in read_seqlock_excl_bh()
1006 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1012 spin_unlock_bh(&sl->lock); in read_sequnlock_excl_bh()
1016 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1021 * write side section, *or other read sections*, can be invoked from a
1026 spin_lock_irq(&sl->lock); in read_seqlock_excl_irq()
1030 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1036 spin_unlock_irq(&sl->lock); in read_sequnlock_excl_irq()
1043 spin_lock_irqsave(&sl->lock, flags); in __read_seqlock_excl_irqsave()
1048 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1051 * @flags: Stack-allocated storage for saving caller's local interrupt
1055 * write side section, *or other read sections*, can be invoked from a
1062 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1070 spin_unlock_irqrestore(&sl->lock, flags); in read_sequnlock_excl_irqrestore()
1074 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1077 * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1078 * If the passed value is odd, the reader will become a *locking* reader
1080 * caller *must* initialize and pass an even value to @seq; this way, a
1083 * read_seqbegin_or_lock is an API designed to optimistically try a normal
1086 * itself into a full seqlock_t locking reader.
1089 * (too much retry loops) in the case of a sharp spike in write side
1099 * parameter, which is overloaded as a return parameter. This returned
1113 * need_seqretry() - validate seqlock_t "locking or lockless" read section
1117 * Return: true if a read section retry is required, false otherwise
1125 * done_seqretry() - end seqlock_t "locking or lockless" reader section
1129 * done_seqretry finishes the seqlock_t read side critical section started
1139 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1140 * a non-interruptible locking reader
1152 * 1. The saved local interrupts state in case of a locking reader, to
1156 * overloaded as a return parameter. Check read_seqbegin_or_lock().
1172 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1173 * non-interruptible locking reader section
1176 * @flags: Caller's saved local interrupt state in case of a locking