xref: /freebsd/sys/compat/linuxkpi/common/include/linux/seqlock.h (revision c07d6445eb89d9dd3950361b065b7bd110e3a043)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifndef _LINUXKPI_LINUX_SEQLOCK_H__
30 #define	_LINUXKPI_LINUX_SEQLOCK_H__
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/rwlock.h>
37 #include <sys/seqc.h>
38 
39 struct lock_class_key;
40 
41 struct seqcount {
42 	seqc_t		seqc;
43 };
44 typedef struct seqcount seqcount_t;
45 
46 struct seqlock {
47 	struct mtx	seql_lock;
48 	struct seqcount	seql_count;
49 };
50 typedef struct seqlock seqlock_t;
51 
52 struct seqcount_mutex {
53 	seqc_t		seqc;
54 };
55 typedef struct seqcount_mutex seqcount_mutex_t;
56 
57 static inline void
58 __seqcount_init(struct seqcount *seqcount, const char *name __unused,
59     struct lock_class_key *key __unused)
60 {
61 	seqcount->seqc = 0;
62 }
63 #define	seqcount_init(seqcount)	__seqcount_init(seqcount, NULL, NULL)
64 
65 static inline void
66 seqcount_mutex_init(struct seqcount_mutex *seqcount, void *mutex __unused)
67 {
68 	seqcount->seqc = 0;
69 }
70 
71 #define	write_seqcount_begin(s)						\
72     _Generic(*(s),							\
73 	struct seqcount:	seqc_sleepable_write_begin,		\
74 	struct seqcount_mutex:	seqc_write_begin			\
75     )(&(s)->seqc)
76 
77 #define	write_seqcount_end(s)						\
78     _Generic(*(s),							\
79 	struct seqcount:	seqc_sleepable_write_end,		\
80 	struct seqcount_mutex:	seqc_write_end				\
81     )(&(s)->seqc)
82 
83 #define	read_seqcount_begin(s)	seqc_read(&(s)->seqc)
84 #define	raw_read_seqcount(s)	seqc_read_any(&(s)->seqc)
85 
86 /*
87  * XXX: Are predicts from inline functions still not honored by clang?
88  */
89 #define	__read_seqcount_retry(seqcount, gen)	\
90 	(!seqc_consistent_no_fence(&(seqcount)->seqc, gen))
91 #define	read_seqcount_retry(seqcount, gen)	\
92 	(!seqc_consistent(&(seqcount)->seqc, gen))
93 
94 static inline void
95 seqlock_init(struct seqlock *seqlock)
96 {
97 	/*
98 	 * Don't enroll to witness(4) to avoid orphaned references after struct
99 	 * seqlock has been freed. There is no seqlock destructor exists so we
100 	 * can't expect automatic mtx_destroy() execution before free().
101 	 */
102 	mtx_init(&seqlock->seql_lock, "seqlock", NULL, MTX_DEF|MTX_NOWITNESS);
103 	seqcount_init(&seqlock->seql_count);
104 }
105 
106 static inline void
107 lkpi_write_seqlock(struct seqlock *seqlock, const bool irqsave)
108 {
109 	mtx_lock(&seqlock->seql_lock);
110 	if (irqsave)
111 		critical_enter();
112 	write_seqcount_begin(&seqlock->seql_count);
113 }
114 
115 static inline void
116 write_seqlock(struct seqlock *seqlock)
117 {
118 	lkpi_write_seqlock(seqlock, false);
119 }
120 
121 static inline void
122 lkpi_write_sequnlock(struct seqlock *seqlock, const bool irqsave)
123 {
124 	write_seqcount_end(&seqlock->seql_count);
125 	if (irqsave)
126 		critical_exit();
127 	mtx_unlock(&seqlock->seql_lock);
128 }
129 
130 static inline void
131 write_sequnlock(struct seqlock *seqlock)
132 {
133 	lkpi_write_sequnlock(seqlock, false);
134 }
135 
136 /*
137  * Disable preemption when the consumer wants to disable interrupts.  This
138  * ensures that the caller won't be starved if it is preempted by a
139  * higher-priority reader, but assumes that the caller won't perform any
140  * blocking operations while holding the write lock; probably a safe
141  * assumption.
142  */
143 #define	write_seqlock_irqsave(seqlock, flags)	do {	\
144 	(flags) = 0;					\
145 	lkpi_write_seqlock(seqlock, true);		\
146 } while (0)
147 
148 static inline void
149 write_sequnlock_irqrestore(struct seqlock *seqlock,
150     unsigned long flags __unused)
151 {
152 	lkpi_write_sequnlock(seqlock, true);
153 }
154 
155 static inline unsigned
156 read_seqbegin(const struct seqlock *seqlock)
157 {
158 	return (read_seqcount_begin(&seqlock->seql_count));
159 }
160 
161 #define	read_seqretry(seqlock, gen)	\
162 	read_seqcount_retry(&(seqlock)->seql_count, gen)
163 
164 #endif	/* _LINUXKPI_LINUX_SEQLOCK_H__ */
165