xref: /freebsd/sys/compat/linuxkpi/common/include/linux/seqlock.h (revision 657729a89dd578d8cfc70d6616f5c65a48a8b33a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifndef _LINUXKPI_LINUX_SEQLOCK_H__
30 #define	_LINUXKPI_LINUX_SEQLOCK_H__
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/rwlock.h>
37 #include <sys/seqc.h>
38 
39 #include <linux/mutex.h>
40 
41 struct lock_class_key;
42 
43 struct seqcount {
44 	seqc_t		seqc;
45 };
46 typedef struct seqcount seqcount_t;
47 
48 struct seqlock {
49 	struct mtx	seql_lock;
50 	struct seqcount	seql_count;
51 };
52 typedef struct seqlock seqlock_t;
53 
54 struct seqcount_mutex {
55 	struct mutex	*seqm_lock;
56 	struct seqcount	 seqm_count;
57 };
58 typedef struct seqcount_mutex seqcount_mutex_t;
59 
60 static inline void
61 __seqcount_init(struct seqcount *seqcount, const char *name __unused,
62     struct lock_class_key *key __unused)
63 {
64 	seqcount->seqc = 0;
65 }
66 #define	seqcount_init(seqcount)	__seqcount_init(seqcount, NULL, NULL)
67 
68 static inline void
69 seqcount_mutex_init(struct seqcount_mutex *seqcount, struct mutex *mutex)
70 {
71 	seqcount->seqm_lock = mutex;
72 	seqcount_init(&seqcount->seqm_count);
73 }
74 
75 #define	write_seqcount_begin(s)						\
76     _Generic(*(s),							\
77 	struct seqcount:	lkpi_write_seqcount_begin,		\
78 	struct seqcount_mutex:	lkpi_write_seqcount_mutex_begin		\
79     )(s)
80 
81 static inline void
82 lkpi_write_seqcount_begin(struct seqcount *seqcount)
83 {
84 	seqc_sleepable_write_begin(&seqcount->seqc);
85 }
86 
87 static inline void
88 lkpi_write_seqcount_mutex_begin(struct seqcount_mutex *seqcount)
89 {
90 	mutex_lock(seqcount->seqm_lock);
91 	lkpi_write_seqcount_begin(&seqcount->seqm_count);
92 }
93 
94 #define	write_seqcount_end(s)						\
95     _Generic(*(s),							\
96 	struct seqcount:	lkpi_write_seqcount_end,		\
97 	struct seqcount_mutex:	lkpi_write_seqcount_mutex_end		\
98     )(s)
99 
100 static inline void
101 lkpi_write_seqcount_end(struct seqcount *seqcount)
102 {
103 	seqc_sleepable_write_end(&seqcount->seqc);
104 }
105 
106 static inline void
107 lkpi_write_seqcount_mutex_end(struct seqcount_mutex *seqcount)
108 {
109 	lkpi_write_seqcount_end(&seqcount->seqm_count);
110 	mutex_unlock(seqcount->seqm_lock);
111 }
112 
113 #define	read_seqcount_begin(s)						\
114     _Generic(*(s),							\
115 	struct seqcount:	lkpi_read_seqcount_begin,		\
116 	struct seqcount_mutex:	lkpi_read_seqcount_mutex_begin		\
117     )(s)
118 
119 static inline unsigned
120 lkpi_read_seqcount_begin(const struct seqcount *seqcount)
121 {
122 	return (seqc_read(&seqcount->seqc));
123 }
124 
125 static inline unsigned
126 lkpi_read_seqcount_mutex_begin(const struct seqcount_mutex *seqcount)
127 {
128 	return (lkpi_read_seqcount_begin(&seqcount->seqm_count));
129 }
130 
131 static inline unsigned
132 raw_read_seqcount(const struct seqcount *seqcount)
133 {
134 	return (seqc_read_any(&seqcount->seqc));
135 }
136 
137 /*
138  * XXX: Are predicts from inline functions still not honored by clang?
139  */
140 #define	__read_seqcount_retry(seqcount, gen)	\
141 	(!seqc_consistent_no_fence(&(seqcount)->seqc, gen))
142 #define	read_seqcount_retry(s, old)					\
143     _Generic(*(s),							\
144 	struct seqcount:	lkpi_read_seqcount_retry,		\
145 	struct seqcount_mutex:	lkpi_read_seqcount_mutex_retry		\
146     )(s, old)
147 
148 static inline int
149 lkpi_read_seqcount_retry(
150     const struct seqcount *seqcount, unsigned int old)
151 {
152 	return (!seqc_consistent(&seqcount->seqc, old));
153 }
154 
155 static inline int
156 lkpi_read_seqcount_mutex_retry(
157     const struct seqcount_mutex *seqcount, unsigned int old)
158 {
159 	return (!seqc_consistent(&seqcount->seqm_count.seqc, old));
160 }
161 
162 static inline void
163 seqlock_init(struct seqlock *seqlock)
164 {
165 	/*
166 	 * Don't enroll to witness(4) to avoid orphaned references after struct
167 	 * seqlock has been freed. There is no seqlock destructor exists so we
168 	 * can't expect automatic mtx_destroy() execution before free().
169 	 */
170 	mtx_init(&seqlock->seql_lock, "seqlock", NULL, MTX_DEF|MTX_NOWITNESS);
171 	seqcount_init(&seqlock->seql_count);
172 }
173 
174 static inline void
175 lkpi_write_seqlock(struct seqlock *seqlock, const bool irqsave)
176 {
177 	mtx_lock(&seqlock->seql_lock);
178 	if (irqsave)
179 		critical_enter();
180 	write_seqcount_begin(&seqlock->seql_count);
181 }
182 
183 static inline void
184 write_seqlock(struct seqlock *seqlock)
185 {
186 	lkpi_write_seqlock(seqlock, false);
187 }
188 
189 static inline void
190 lkpi_write_sequnlock(struct seqlock *seqlock, const bool irqsave)
191 {
192 	write_seqcount_end(&seqlock->seql_count);
193 	if (irqsave)
194 		critical_exit();
195 	mtx_unlock(&seqlock->seql_lock);
196 }
197 
198 static inline void
199 write_sequnlock(struct seqlock *seqlock)
200 {
201 	lkpi_write_sequnlock(seqlock, false);
202 }
203 
204 /*
205  * Disable preemption when the consumer wants to disable interrupts.  This
206  * ensures that the caller won't be starved if it is preempted by a
207  * higher-priority reader, but assumes that the caller won't perform any
208  * blocking operations while holding the write lock; probably a safe
209  * assumption.
210  */
211 #define	write_seqlock_irqsave(seqlock, flags)	do {	\
212 	(flags) = 0;					\
213 	lkpi_write_seqlock(seqlock, true);		\
214 } while (0)
215 
216 static inline void
217 write_sequnlock_irqrestore(struct seqlock *seqlock,
218     unsigned long flags __unused)
219 {
220 	lkpi_write_sequnlock(seqlock, true);
221 }
222 
223 static inline unsigned
224 read_seqbegin(const struct seqlock *seqlock)
225 {
226 	return (read_seqcount_begin(&seqlock->seql_count));
227 }
228 
229 #define	read_seqretry(seqlock, gen)	\
230 	read_seqcount_retry(&(seqlock)->seql_count, gen)
231 
232 #endif	/* _LINUXKPI_LINUX_SEQLOCK_H__ */
233