xref: /freebsd/sys/compat/linuxkpi/common/include/linux/seqlock.h (revision 271171e0d97b88ba2a7c3bf750c9672b484c1c13)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifndef _LINUXKPI_LINUX_SEQLOCK_H__
30 #define	_LINUXKPI_LINUX_SEQLOCK_H__
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/seqc.h>
37 
38 struct lock_class_key;
39 
40 struct seqcount {
41 	seqc_t		seqc;
42 };
43 typedef struct seqcount seqcount_t;
44 
45 struct seqlock {
46 	struct mtx	seql_lock;
47 	struct seqcount	seql_count;
48 };
49 typedef struct seqlock seqlock_t;
50 
51 static inline void
52 __seqcount_init(struct seqcount *seqcount, const char *name __unused,
53     struct lock_class_key *key __unused)
54 {
55 	seqcount->seqc = 0;
56 }
57 #define	seqcount_init(seqcount)	__seqcount_init(seqcount, NULL, NULL)
58 
59 static inline void
60 write_seqcount_begin(struct seqcount *seqcount)
61 {
62 	seqc_sleepable_write_begin(&seqcount->seqc);
63 }
64 
65 static inline void
66 write_seqcount_end(struct seqcount *seqcount)
67 {
68 	seqc_sleepable_write_end(&seqcount->seqc);
69 }
70 
71 /*
72  * XXX: Are predicts from inline functions still not honored by clang?
73  */
74 #define	__read_seqcount_retry(seqcount, gen)	\
75 	(!seqc_consistent_no_fence(&(seqcount)->seqc, gen))
76 #define	read_seqcount_retry(seqcount, gen)	\
77 	(!seqc_consistent(&(seqcount)->seqc, gen))
78 
79 static inline unsigned
80 read_seqcount_begin(const struct seqcount *seqcount)
81 {
82 	return (seqc_read(&seqcount->seqc));
83 }
84 
85 static inline unsigned
86 raw_read_seqcount(const struct seqcount *seqcount)
87 {
88 	return (seqc_read_any(&seqcount->seqc));
89 }
90 
91 static inline void
92 seqlock_init(struct seqlock *seqlock)
93 {
94 	/*
95 	 * Don't enroll to witness(4) to avoid orphaned references after struct
96 	 * seqlock has been freed. There is no seqlock destructor exists so we
97 	 * can't expect automatic mtx_destroy() execution before free().
98 	 */
99 	mtx_init(&seqlock->seql_lock, "seqlock", NULL, MTX_DEF|MTX_NOWITNESS);
100 	seqcount_init(&seqlock->seql_count);
101 }
102 
103 static inline void
104 lkpi_write_seqlock(struct seqlock *seqlock, const bool irqsave)
105 {
106 	mtx_lock(&seqlock->seql_lock);
107 	if (irqsave)
108 		critical_enter();
109 	write_seqcount_begin(&seqlock->seql_count);
110 }
111 
112 static inline void
113 write_seqlock(struct seqlock *seqlock)
114 {
115 	lkpi_write_seqlock(seqlock, false);
116 }
117 
118 static inline void
119 lkpi_write_sequnlock(struct seqlock *seqlock, const bool irqsave)
120 {
121 	write_seqcount_end(&seqlock->seql_count);
122 	if (irqsave)
123 		critical_exit();
124 	mtx_unlock(&seqlock->seql_lock);
125 }
126 
127 static inline void
128 write_sequnlock(struct seqlock *seqlock)
129 {
130 	lkpi_write_sequnlock(seqlock, false);
131 }
132 
133 /*
134  * Disable preemption when the consumer wants to disable interrupts.  This
135  * ensures that the caller won't be starved if it is preempted by a
136  * higher-priority reader, but assumes that the caller won't perform any
137  * blocking operations while holding the write lock; probably a safe
138  * assumption.
139  */
140 #define	write_seqlock_irqsave(seqlock, flags)	do {	\
141 	(flags) = 0;					\
142 	lkpi_write_seqlock(seqlock, true);		\
143 } while (0)
144 
145 static inline void
146 write_sequnlock_irqrestore(struct seqlock *seqlock,
147     unsigned long flags __unused)
148 {
149 	lkpi_write_sequnlock(seqlock, true);
150 }
151 
152 static inline unsigned
153 read_seqbegin(const struct seqlock *seqlock)
154 {
155 	return (read_seqcount_begin(&seqlock->seql_count));
156 }
157 
158 #define	read_seqretry(seqlock, gen)	\
159 	read_seqcount_retry(&(seqlock)->seql_count, gen)
160 
161 #endif	/* _LINUXKPI_LINUX_SEQLOCK_H__ */
162