1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #ifndef _LINUXKPI_LINUX_SEQLOCK_H__
30 #define _LINUXKPI_LINUX_SEQLOCK_H__
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/cdefs.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/rwlock.h>
38 #include <sys/seqc.h>
39
40 struct lock_class_key;
41
42 struct seqcount {
43 seqc_t seqc;
44 };
45 typedef struct seqcount seqcount_t;
46
47 struct seqlock {
48 struct mtx seql_lock;
49 struct seqcount seql_count;
50 };
51 typedef struct seqlock seqlock_t;
52
53 struct seqcount_mutex {
54 seqc_t seqc;
55 };
56 typedef struct seqcount_mutex seqcount_mutex_t;
57 typedef struct seqcount_mutex seqcount_ww_mutex_t;
58
59 static inline void
__seqcount_init(struct seqcount * seqcount,const char * name __unused,struct lock_class_key * key __unused)60 __seqcount_init(struct seqcount *seqcount, const char *name __unused,
61 struct lock_class_key *key __unused)
62 {
63 seqcount->seqc = 0;
64 }
65 #define seqcount_init(seqcount) __seqcount_init(seqcount, NULL, NULL)
66
67 static inline void
seqcount_mutex_init(struct seqcount_mutex * seqcount,void * mutex __unused)68 seqcount_mutex_init(struct seqcount_mutex *seqcount, void *mutex __unused)
69 {
70 seqcount->seqc = 0;
71 }
72
73 #define seqcount_ww_mutex_init(seqcount, ww_mutex) \
74 seqcount_mutex_init((seqcount), (ww_mutex))
75
76 #define write_seqcount_begin(s) \
77 _Generic(*(s), \
78 struct seqcount: seqc_sleepable_write_begin, \
79 struct seqcount_mutex: seqc_write_begin \
80 )(&(s)->seqc)
81
82 #define write_seqcount_end(s) \
83 _Generic(*(s), \
84 struct seqcount: seqc_sleepable_write_end, \
85 struct seqcount_mutex: seqc_write_end \
86 )(&(s)->seqc)
87
88 static inline void
lkpi_write_seqcount_invalidate(seqc_t * seqcp)89 lkpi_write_seqcount_invalidate(seqc_t *seqcp)
90 {
91 atomic_thread_fence_rel();
92 *seqcp += SEQC_MOD * 2;
93 }
94 #define write_seqcount_invalidate(s) lkpi_write_seqcount_invalidate(&(s)->seqc)
95
96 #define read_seqcount_begin(s) seqc_read(&(s)->seqc)
97 #define raw_read_seqcount(s) seqc_read_any(&(s)->seqc)
98
99 static inline seqc_t
lkpi_seqprop_sequence(const seqc_t * seqcp)100 lkpi_seqprop_sequence(const seqc_t *seqcp)
101 {
102 return (atomic_load_int(__DECONST(seqc_t *, seqcp)));
103 }
104 #define seqprop_sequence(s) lkpi_seqprop_sequence(&(s)->seqc)
105
106 /*
107 * XXX: Are predicts from inline functions still not honored by clang?
108 */
109 #define __read_seqcount_retry(seqcount, gen) \
110 (!seqc_consistent_no_fence(&(seqcount)->seqc, gen))
111 #define read_seqcount_retry(seqcount, gen) \
112 (!seqc_consistent(&(seqcount)->seqc, gen))
113
114 static inline void
seqlock_init(struct seqlock * seqlock)115 seqlock_init(struct seqlock *seqlock)
116 {
117 /*
118 * Don't enroll to witness(4) to avoid orphaned references after struct
119 * seqlock has been freed. There is no seqlock destructor exists so we
120 * can't expect automatic mtx_destroy() execution before free().
121 */
122 mtx_init(&seqlock->seql_lock, "seqlock", NULL, MTX_DEF|MTX_NOWITNESS);
123 seqcount_init(&seqlock->seql_count);
124 }
125
126 static inline void
lkpi_write_seqlock(struct seqlock * seqlock,const bool irqsave)127 lkpi_write_seqlock(struct seqlock *seqlock, const bool irqsave)
128 {
129 mtx_lock(&seqlock->seql_lock);
130 if (irqsave)
131 critical_enter();
132 write_seqcount_begin(&seqlock->seql_count);
133 }
134
135 static inline void
write_seqlock(struct seqlock * seqlock)136 write_seqlock(struct seqlock *seqlock)
137 {
138 lkpi_write_seqlock(seqlock, false);
139 }
140
141 static inline void
lkpi_write_sequnlock(struct seqlock * seqlock,const bool irqsave)142 lkpi_write_sequnlock(struct seqlock *seqlock, const bool irqsave)
143 {
144 write_seqcount_end(&seqlock->seql_count);
145 if (irqsave)
146 critical_exit();
147 mtx_unlock(&seqlock->seql_lock);
148 }
149
150 static inline void
write_sequnlock(struct seqlock * seqlock)151 write_sequnlock(struct seqlock *seqlock)
152 {
153 lkpi_write_sequnlock(seqlock, false);
154 }
155
156 /*
157 * Disable preemption when the consumer wants to disable interrupts. This
158 * ensures that the caller won't be starved if it is preempted by a
159 * higher-priority reader, but assumes that the caller won't perform any
160 * blocking operations while holding the write lock; probably a safe
161 * assumption.
162 */
163 #define write_seqlock_irqsave(seqlock, flags) do { \
164 (flags) = 0; \
165 lkpi_write_seqlock(seqlock, true); \
166 } while (0)
167
168 static inline void
write_sequnlock_irqrestore(struct seqlock * seqlock,unsigned long flags __unused)169 write_sequnlock_irqrestore(struct seqlock *seqlock,
170 unsigned long flags __unused)
171 {
172 lkpi_write_sequnlock(seqlock, true);
173 }
174
175 static inline unsigned
read_seqbegin(const struct seqlock * seqlock)176 read_seqbegin(const struct seqlock *seqlock)
177 {
178 return (read_seqcount_begin(&seqlock->seql_count));
179 }
180
181 #define read_seqretry(seqlock, gen) \
182 read_seqcount_retry(&(seqlock)->seql_count, gen)
183
184 #endif /* _LINUXKPI_LINUX_SEQLOCK_H__ */
185