xref: /linux/lib/test_context-analysis.c (revision f0b7ce22d71810c8c11abcd912fbd6f57c2e9677)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Compile-only tests for common patterns that should not generate false
4  * positive errors when compiled with Clang's context analysis.
5  */
6 
7 #include <linux/bit_spinlock.h>
8 #include <linux/build_bug.h>
9 #include <linux/mutex.h>
10 #include <linux/rcupdate.h>
11 #include <linux/seqlock.h>
12 #include <linux/spinlock.h>
13 #include <linux/srcu.h>
14 
15 /*
16  * Test that helper macros work as expected.
17  */
18 static void __used test_common_helpers(void)
19 {
20 	BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
21 	BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
22 	BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
23 	context_unsafe(do { } while (0)); /* works with void statements */
24 }
25 
26 #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op)	\
27 	struct test_##class##_data {								\
28 		type lock;									\
29 		int counter __guarded_by(&lock);						\
30 		int *pointer __pt_guarded_by(&lock);						\
31 	};											\
32 	static void __used test_##class##_init(struct test_##class##_data *d)			\
33 	{											\
34 		type_init(&d->lock);								\
35 		d->counter = 0;									\
36 	}											\
37 	static void __used test_##class(struct test_##class##_data *d)				\
38 	{											\
39 		unsigned long flags;								\
40 		d->pointer++;									\
41 		type_lock(&d->lock);								\
42 		op(d->counter);									\
43 		op(*d->pointer);								\
44 		type_unlock(&d->lock);								\
45 		type_lock##_irq(&d->lock);							\
46 		op(d->counter);									\
47 		op(*d->pointer);								\
48 		type_unlock##_irq(&d->lock);							\
49 		type_lock##_bh(&d->lock);							\
50 		op(d->counter);									\
51 		op(*d->pointer);								\
52 		type_unlock##_bh(&d->lock);							\
53 		type_lock##_irqsave(&d->lock, flags);						\
54 		op(d->counter);									\
55 		op(*d->pointer);								\
56 		type_unlock##_irqrestore(&d->lock, flags);					\
57 	}											\
58 	static void __used test_##class##_trylock(struct test_##class##_data *d)		\
59 	{											\
60 		if (type_trylock(&d->lock)) {							\
61 			op(d->counter);								\
62 			type_unlock(&d->lock);							\
63 		}										\
64 	}											\
65 	static void __used test_##class##_assert(struct test_##class##_data *d)			\
66 	{											\
67 		lockdep_assert_held(&d->lock);							\
68 		op(d->counter);									\
69 	}											\
70 	static void __used test_##class##_guard(struct test_##class##_data *d)			\
71 	{											\
72 		{ guard(class)(&d->lock);		op(d->counter); }			\
73 		{ guard(class##_irq)(&d->lock);		op(d->counter); }			\
74 		{ guard(class##_irqsave)(&d->lock);	op(d->counter); }			\
75 	}
76 
77 #define TEST_OP_RW(x) (x)++
78 #define TEST_OP_RO(x) ((void)(x))
79 
80 TEST_SPINLOCK_COMMON(raw_spinlock,
81 		     raw_spinlock_t,
82 		     raw_spin_lock_init,
83 		     raw_spin_lock,
84 		     raw_spin_unlock,
85 		     raw_spin_trylock,
86 		     TEST_OP_RW);
87 static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
88 {
89 	unsigned long flags;
90 
91 	if (raw_spin_trylock_irq(&d->lock)) {
92 		d->counter++;
93 		raw_spin_unlock_irq(&d->lock);
94 	}
95 	if (raw_spin_trylock_irqsave(&d->lock, flags)) {
96 		d->counter++;
97 		raw_spin_unlock_irqrestore(&d->lock, flags);
98 	}
99 	scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
100 		d->counter++;
101 	}
102 }
103 
104 TEST_SPINLOCK_COMMON(spinlock,
105 		     spinlock_t,
106 		     spin_lock_init,
107 		     spin_lock,
108 		     spin_unlock,
109 		     spin_trylock,
110 		     TEST_OP_RW);
111 static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
112 {
113 	unsigned long flags;
114 
115 	if (spin_trylock_irq(&d->lock)) {
116 		d->counter++;
117 		spin_unlock_irq(&d->lock);
118 	}
119 	if (spin_trylock_irqsave(&d->lock, flags)) {
120 		d->counter++;
121 		spin_unlock_irqrestore(&d->lock, flags);
122 	}
123 	scoped_cond_guard(spinlock_try, return, &d->lock) {
124 		d->counter++;
125 	}
126 }
127 
128 TEST_SPINLOCK_COMMON(write_lock,
129 		     rwlock_t,
130 		     rwlock_init,
131 		     write_lock,
132 		     write_unlock,
133 		     write_trylock,
134 		     TEST_OP_RW);
135 static void __used test_write_trylock_extra(struct test_write_lock_data *d)
136 {
137 	unsigned long flags;
138 
139 	if (write_trylock_irqsave(&d->lock, flags)) {
140 		d->counter++;
141 		write_unlock_irqrestore(&d->lock, flags);
142 	}
143 }
144 
145 TEST_SPINLOCK_COMMON(read_lock,
146 		     rwlock_t,
147 		     rwlock_init,
148 		     read_lock,
149 		     read_unlock,
150 		     read_trylock,
151 		     TEST_OP_RO);
152 
153 struct test_mutex_data {
154 	struct mutex mtx;
155 	int counter __guarded_by(&mtx);
156 };
157 
158 static void __used test_mutex_init(struct test_mutex_data *d)
159 {
160 	mutex_init(&d->mtx);
161 	d->counter = 0;
162 }
163 
164 static void __used test_mutex_lock(struct test_mutex_data *d)
165 {
166 	mutex_lock(&d->mtx);
167 	d->counter++;
168 	mutex_unlock(&d->mtx);
169 	mutex_lock_io(&d->mtx);
170 	d->counter++;
171 	mutex_unlock(&d->mtx);
172 }
173 
174 static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
175 {
176 	if (!mutex_lock_interruptible(&d->mtx)) {
177 		d->counter++;
178 		mutex_unlock(&d->mtx);
179 	}
180 	if (!mutex_lock_killable(&d->mtx)) {
181 		d->counter++;
182 		mutex_unlock(&d->mtx);
183 	}
184 	if (mutex_trylock(&d->mtx)) {
185 		d->counter++;
186 		mutex_unlock(&d->mtx);
187 	}
188 	if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
189 		d->counter++;
190 		mutex_unlock(&d->mtx);
191 	}
192 }
193 
194 static void __used test_mutex_assert(struct test_mutex_data *d)
195 {
196 	lockdep_assert_held(&d->mtx);
197 	d->counter++;
198 }
199 
200 static void __used test_mutex_guard(struct test_mutex_data *d)
201 {
202 	guard(mutex)(&d->mtx);
203 	d->counter++;
204 }
205 
206 static void __used test_mutex_cond_guard(struct test_mutex_data *d)
207 {
208 	scoped_cond_guard(mutex_try, return, &d->mtx) {
209 		d->counter++;
210 	}
211 	scoped_cond_guard(mutex_intr, return, &d->mtx) {
212 		d->counter++;
213 	}
214 }
215 
216 struct test_seqlock_data {
217 	seqlock_t sl;
218 	int counter __guarded_by(&sl);
219 };
220 
221 static void __used test_seqlock_init(struct test_seqlock_data *d)
222 {
223 	seqlock_init(&d->sl);
224 	d->counter = 0;
225 }
226 
227 static void __used test_seqlock_reader(struct test_seqlock_data *d)
228 {
229 	unsigned int seq;
230 
231 	do {
232 		seq = read_seqbegin(&d->sl);
233 		(void)d->counter;
234 	} while (read_seqretry(&d->sl, seq));
235 }
236 
237 static void __used test_seqlock_writer(struct test_seqlock_data *d)
238 {
239 	unsigned long flags;
240 
241 	write_seqlock(&d->sl);
242 	d->counter++;
243 	write_sequnlock(&d->sl);
244 
245 	write_seqlock_irq(&d->sl);
246 	d->counter++;
247 	write_sequnlock_irq(&d->sl);
248 
249 	write_seqlock_bh(&d->sl);
250 	d->counter++;
251 	write_sequnlock_bh(&d->sl);
252 
253 	write_seqlock_irqsave(&d->sl, flags);
254 	d->counter++;
255 	write_sequnlock_irqrestore(&d->sl, flags);
256 }
257 
258 static void __used test_seqlock_scoped(struct test_seqlock_data *d)
259 {
260 	scoped_seqlock_read (&d->sl, ss_lockless) {
261 		(void)d->counter;
262 	}
263 }
264 
265 struct test_bit_spinlock_data {
266 	unsigned long bits;
267 	int counter __guarded_by(__bitlock(3, &bits));
268 };
269 
270 static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
271 {
272 	/*
273 	 * Note, the analysis seems to have false negatives, because it won't
274 	 * precisely recognize the bit of the fake __bitlock() token.
275 	 */
276 	bit_spin_lock(3, &d->bits);
277 	d->counter++;
278 	bit_spin_unlock(3, &d->bits);
279 
280 	bit_spin_lock(3, &d->bits);
281 	d->counter++;
282 	__bit_spin_unlock(3, &d->bits);
283 
284 	if (bit_spin_trylock(3, &d->bits)) {
285 		d->counter++;
286 		bit_spin_unlock(3, &d->bits);
287 	}
288 }
289 
290 /*
291  * Test that we can mark a variable guarded by RCU, and we can dereference and
292  * write to the pointer with RCU's primitives.
293  */
294 struct test_rcu_data {
295 	long __rcu_guarded *data;
296 };
297 
298 static void __used test_rcu_guarded_reader(struct test_rcu_data *d)
299 {
300 	rcu_read_lock();
301 	(void)rcu_dereference(d->data);
302 	rcu_read_unlock();
303 
304 	rcu_read_lock_bh();
305 	(void)rcu_dereference(d->data);
306 	rcu_read_unlock_bh();
307 
308 	rcu_read_lock_sched();
309 	(void)rcu_dereference(d->data);
310 	rcu_read_unlock_sched();
311 }
312 
313 static void __used test_rcu_guard(struct test_rcu_data *d)
314 {
315 	guard(rcu)();
316 	(void)rcu_dereference(d->data);
317 }
318 
319 static void __used test_rcu_guarded_updater(struct test_rcu_data *d)
320 {
321 	rcu_assign_pointer(d->data, NULL);
322 	RCU_INIT_POINTER(d->data, NULL);
323 	(void)unrcu_pointer(d->data);
324 }
325 
326 static void wants_rcu_held(void)	__must_hold_shared(RCU)       { }
327 static void wants_rcu_held_bh(void)	__must_hold_shared(RCU_BH)    { }
328 static void wants_rcu_held_sched(void)	__must_hold_shared(RCU_SCHED) { }
329 
330 static void __used test_rcu_lock_variants(void)
331 {
332 	rcu_read_lock();
333 	wants_rcu_held();
334 	rcu_read_unlock();
335 
336 	rcu_read_lock_bh();
337 	wants_rcu_held_bh();
338 	rcu_read_unlock_bh();
339 
340 	rcu_read_lock_sched();
341 	wants_rcu_held_sched();
342 	rcu_read_unlock_sched();
343 }
344 
345 static void __used test_rcu_lock_reentrant(void)
346 {
347 	rcu_read_lock();
348 	rcu_read_lock();
349 	rcu_read_lock_bh();
350 	rcu_read_lock_bh();
351 	rcu_read_lock_sched();
352 	rcu_read_lock_sched();
353 
354 	rcu_read_unlock_sched();
355 	rcu_read_unlock_sched();
356 	rcu_read_unlock_bh();
357 	rcu_read_unlock_bh();
358 	rcu_read_unlock();
359 	rcu_read_unlock();
360 }
361 
362 static void __used test_rcu_assert_variants(void)
363 {
364 	lockdep_assert_in_rcu_read_lock();
365 	wants_rcu_held();
366 
367 	lockdep_assert_in_rcu_read_lock_bh();
368 	wants_rcu_held_bh();
369 
370 	lockdep_assert_in_rcu_read_lock_sched();
371 	wants_rcu_held_sched();
372 }
373 
374 struct test_srcu_data {
375 	struct srcu_struct srcu;
376 	long __rcu_guarded *data;
377 };
378 
379 static void __used test_srcu(struct test_srcu_data *d)
380 {
381 	init_srcu_struct(&d->srcu);
382 
383 	int idx = srcu_read_lock(&d->srcu);
384 	long *data = srcu_dereference(d->data, &d->srcu);
385 	(void)data;
386 	srcu_read_unlock(&d->srcu, idx);
387 
388 	rcu_assign_pointer(d->data, NULL);
389 }
390 
391 static void __used test_srcu_guard(struct test_srcu_data *d)
392 {
393 	{ guard(srcu)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
394 	{ guard(srcu_fast)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
395 	{ guard(srcu_fast_notrace)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
396 }
397