xref: /linux/lib/test_context-analysis.c (revision eb7d96a13bf45f86909006a59e7855d8810f020a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Compile-only tests for common patterns that should not generate false
4  * positive errors when compiled with Clang's context analysis.
5  */
6 
7 #include <linux/bit_spinlock.h>
8 #include <linux/build_bug.h>
9 #include <linux/mutex.h>
10 #include <linux/seqlock.h>
11 #include <linux/spinlock.h>
12 
13 /*
14  * Test that helper macros work as expected.
15  */
16 static void __used test_common_helpers(void)
17 {
18 	BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
19 	BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
20 	BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
21 	context_unsafe(do { } while (0)); /* works with void statements */
22 }
23 
24 #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op)	\
25 	struct test_##class##_data {								\
26 		type lock;									\
27 		int counter __guarded_by(&lock);						\
28 		int *pointer __pt_guarded_by(&lock);						\
29 	};											\
30 	static void __used test_##class##_init(struct test_##class##_data *d)			\
31 	{											\
32 		type_init(&d->lock);								\
33 		d->counter = 0;									\
34 	}											\
35 	static void __used test_##class(struct test_##class##_data *d)				\
36 	{											\
37 		unsigned long flags;								\
38 		d->pointer++;									\
39 		type_lock(&d->lock);								\
40 		op(d->counter);									\
41 		op(*d->pointer);								\
42 		type_unlock(&d->lock);								\
43 		type_lock##_irq(&d->lock);							\
44 		op(d->counter);									\
45 		op(*d->pointer);								\
46 		type_unlock##_irq(&d->lock);							\
47 		type_lock##_bh(&d->lock);							\
48 		op(d->counter);									\
49 		op(*d->pointer);								\
50 		type_unlock##_bh(&d->lock);							\
51 		type_lock##_irqsave(&d->lock, flags);						\
52 		op(d->counter);									\
53 		op(*d->pointer);								\
54 		type_unlock##_irqrestore(&d->lock, flags);					\
55 	}											\
56 	static void __used test_##class##_trylock(struct test_##class##_data *d)		\
57 	{											\
58 		if (type_trylock(&d->lock)) {							\
59 			op(d->counter);								\
60 			type_unlock(&d->lock);							\
61 		}										\
62 	}											\
63 	static void __used test_##class##_assert(struct test_##class##_data *d)			\
64 	{											\
65 		lockdep_assert_held(&d->lock);							\
66 		op(d->counter);									\
67 	}											\
68 	static void __used test_##class##_guard(struct test_##class##_data *d)			\
69 	{											\
70 		{ guard(class)(&d->lock);		op(d->counter); }			\
71 		{ guard(class##_irq)(&d->lock);		op(d->counter); }			\
72 		{ guard(class##_irqsave)(&d->lock);	op(d->counter); }			\
73 	}
74 
75 #define TEST_OP_RW(x) (x)++
76 #define TEST_OP_RO(x) ((void)(x))
77 
78 TEST_SPINLOCK_COMMON(raw_spinlock,
79 		     raw_spinlock_t,
80 		     raw_spin_lock_init,
81 		     raw_spin_lock,
82 		     raw_spin_unlock,
83 		     raw_spin_trylock,
84 		     TEST_OP_RW);
85 static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
86 {
87 	unsigned long flags;
88 
89 	if (raw_spin_trylock_irq(&d->lock)) {
90 		d->counter++;
91 		raw_spin_unlock_irq(&d->lock);
92 	}
93 	if (raw_spin_trylock_irqsave(&d->lock, flags)) {
94 		d->counter++;
95 		raw_spin_unlock_irqrestore(&d->lock, flags);
96 	}
97 	scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
98 		d->counter++;
99 	}
100 }
101 
102 TEST_SPINLOCK_COMMON(spinlock,
103 		     spinlock_t,
104 		     spin_lock_init,
105 		     spin_lock,
106 		     spin_unlock,
107 		     spin_trylock,
108 		     TEST_OP_RW);
109 static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
110 {
111 	unsigned long flags;
112 
113 	if (spin_trylock_irq(&d->lock)) {
114 		d->counter++;
115 		spin_unlock_irq(&d->lock);
116 	}
117 	if (spin_trylock_irqsave(&d->lock, flags)) {
118 		d->counter++;
119 		spin_unlock_irqrestore(&d->lock, flags);
120 	}
121 	scoped_cond_guard(spinlock_try, return, &d->lock) {
122 		d->counter++;
123 	}
124 }
125 
126 TEST_SPINLOCK_COMMON(write_lock,
127 		     rwlock_t,
128 		     rwlock_init,
129 		     write_lock,
130 		     write_unlock,
131 		     write_trylock,
132 		     TEST_OP_RW);
133 static void __used test_write_trylock_extra(struct test_write_lock_data *d)
134 {
135 	unsigned long flags;
136 
137 	if (write_trylock_irqsave(&d->lock, flags)) {
138 		d->counter++;
139 		write_unlock_irqrestore(&d->lock, flags);
140 	}
141 }
142 
143 TEST_SPINLOCK_COMMON(read_lock,
144 		     rwlock_t,
145 		     rwlock_init,
146 		     read_lock,
147 		     read_unlock,
148 		     read_trylock,
149 		     TEST_OP_RO);
150 
151 struct test_mutex_data {
152 	struct mutex mtx;
153 	int counter __guarded_by(&mtx);
154 };
155 
156 static void __used test_mutex_init(struct test_mutex_data *d)
157 {
158 	mutex_init(&d->mtx);
159 	d->counter = 0;
160 }
161 
162 static void __used test_mutex_lock(struct test_mutex_data *d)
163 {
164 	mutex_lock(&d->mtx);
165 	d->counter++;
166 	mutex_unlock(&d->mtx);
167 	mutex_lock_io(&d->mtx);
168 	d->counter++;
169 	mutex_unlock(&d->mtx);
170 }
171 
172 static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
173 {
174 	if (!mutex_lock_interruptible(&d->mtx)) {
175 		d->counter++;
176 		mutex_unlock(&d->mtx);
177 	}
178 	if (!mutex_lock_killable(&d->mtx)) {
179 		d->counter++;
180 		mutex_unlock(&d->mtx);
181 	}
182 	if (mutex_trylock(&d->mtx)) {
183 		d->counter++;
184 		mutex_unlock(&d->mtx);
185 	}
186 	if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
187 		d->counter++;
188 		mutex_unlock(&d->mtx);
189 	}
190 }
191 
192 static void __used test_mutex_assert(struct test_mutex_data *d)
193 {
194 	lockdep_assert_held(&d->mtx);
195 	d->counter++;
196 }
197 
198 static void __used test_mutex_guard(struct test_mutex_data *d)
199 {
200 	guard(mutex)(&d->mtx);
201 	d->counter++;
202 }
203 
204 static void __used test_mutex_cond_guard(struct test_mutex_data *d)
205 {
206 	scoped_cond_guard(mutex_try, return, &d->mtx) {
207 		d->counter++;
208 	}
209 	scoped_cond_guard(mutex_intr, return, &d->mtx) {
210 		d->counter++;
211 	}
212 }
213 
214 struct test_seqlock_data {
215 	seqlock_t sl;
216 	int counter __guarded_by(&sl);
217 };
218 
219 static void __used test_seqlock_init(struct test_seqlock_data *d)
220 {
221 	seqlock_init(&d->sl);
222 	d->counter = 0;
223 }
224 
225 static void __used test_seqlock_reader(struct test_seqlock_data *d)
226 {
227 	unsigned int seq;
228 
229 	do {
230 		seq = read_seqbegin(&d->sl);
231 		(void)d->counter;
232 	} while (read_seqretry(&d->sl, seq));
233 }
234 
235 static void __used test_seqlock_writer(struct test_seqlock_data *d)
236 {
237 	unsigned long flags;
238 
239 	write_seqlock(&d->sl);
240 	d->counter++;
241 	write_sequnlock(&d->sl);
242 
243 	write_seqlock_irq(&d->sl);
244 	d->counter++;
245 	write_sequnlock_irq(&d->sl);
246 
247 	write_seqlock_bh(&d->sl);
248 	d->counter++;
249 	write_sequnlock_bh(&d->sl);
250 
251 	write_seqlock_irqsave(&d->sl, flags);
252 	d->counter++;
253 	write_sequnlock_irqrestore(&d->sl, flags);
254 }
255 
256 static void __used test_seqlock_scoped(struct test_seqlock_data *d)
257 {
258 	scoped_seqlock_read (&d->sl, ss_lockless) {
259 		(void)d->counter;
260 	}
261 }
262 
263 struct test_bit_spinlock_data {
264 	unsigned long bits;
265 	int counter __guarded_by(__bitlock(3, &bits));
266 };
267 
268 static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
269 {
270 	/*
271 	 * Note, the analysis seems to have false negatives, because it won't
272 	 * precisely recognize the bit of the fake __bitlock() token.
273 	 */
274 	bit_spin_lock(3, &d->bits);
275 	d->counter++;
276 	bit_spin_unlock(3, &d->bits);
277 
278 	bit_spin_lock(3, &d->bits);
279 	d->counter++;
280 	__bit_spin_unlock(3, &d->bits);
281 
282 	if (bit_spin_trylock(3, &d->bits)) {
283 		d->counter++;
284 		bit_spin_unlock(3, &d->bits);
285 	}
286 }
287