xref: /linux/lib/test_context-analysis.c (revision e4fd3be884cf33a42c5bcde087b0722a5b8f25ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Compile-only tests for common patterns that should not generate false
4  * positive errors when compiled with Clang's context analysis.
5  */
6 
7 #include <linux/bit_spinlock.h>
8 #include <linux/build_bug.h>
9 #include <linux/mutex.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rwsem.h>
12 #include <linux/seqlock.h>
13 #include <linux/spinlock.h>
14 #include <linux/srcu.h>
15 
16 /*
17  * Test that helper macros work as expected.
18  */
19 static void __used test_common_helpers(void)
20 {
21 	BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
22 	BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
23 	BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
24 	context_unsafe(do { } while (0)); /* works with void statements */
25 }
26 
27 #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op)	\
28 	struct test_##class##_data {								\
29 		type lock;									\
30 		int counter __guarded_by(&lock);						\
31 		int *pointer __pt_guarded_by(&lock);						\
32 	};											\
33 	static void __used test_##class##_init(struct test_##class##_data *d)			\
34 	{											\
35 		type_init(&d->lock);								\
36 		d->counter = 0;									\
37 	}											\
38 	static void __used test_##class(struct test_##class##_data *d)				\
39 	{											\
40 		unsigned long flags;								\
41 		d->pointer++;									\
42 		type_lock(&d->lock);								\
43 		op(d->counter);									\
44 		op(*d->pointer);								\
45 		type_unlock(&d->lock);								\
46 		type_lock##_irq(&d->lock);							\
47 		op(d->counter);									\
48 		op(*d->pointer);								\
49 		type_unlock##_irq(&d->lock);							\
50 		type_lock##_bh(&d->lock);							\
51 		op(d->counter);									\
52 		op(*d->pointer);								\
53 		type_unlock##_bh(&d->lock);							\
54 		type_lock##_irqsave(&d->lock, flags);						\
55 		op(d->counter);									\
56 		op(*d->pointer);								\
57 		type_unlock##_irqrestore(&d->lock, flags);					\
58 	}											\
59 	static void __used test_##class##_trylock(struct test_##class##_data *d)		\
60 	{											\
61 		if (type_trylock(&d->lock)) {							\
62 			op(d->counter);								\
63 			type_unlock(&d->lock);							\
64 		}										\
65 	}											\
66 	static void __used test_##class##_assert(struct test_##class##_data *d)			\
67 	{											\
68 		lockdep_assert_held(&d->lock);							\
69 		op(d->counter);									\
70 	}											\
71 	static void __used test_##class##_guard(struct test_##class##_data *d)			\
72 	{											\
73 		{ guard(class)(&d->lock);		op(d->counter); }			\
74 		{ guard(class##_irq)(&d->lock);		op(d->counter); }			\
75 		{ guard(class##_irqsave)(&d->lock);	op(d->counter); }			\
76 	}
77 
78 #define TEST_OP_RW(x) (x)++
79 #define TEST_OP_RO(x) ((void)(x))
80 
81 TEST_SPINLOCK_COMMON(raw_spinlock,
82 		     raw_spinlock_t,
83 		     raw_spin_lock_init,
84 		     raw_spin_lock,
85 		     raw_spin_unlock,
86 		     raw_spin_trylock,
87 		     TEST_OP_RW);
88 static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
89 {
90 	unsigned long flags;
91 
92 	if (raw_spin_trylock_irq(&d->lock)) {
93 		d->counter++;
94 		raw_spin_unlock_irq(&d->lock);
95 	}
96 	if (raw_spin_trylock_irqsave(&d->lock, flags)) {
97 		d->counter++;
98 		raw_spin_unlock_irqrestore(&d->lock, flags);
99 	}
100 	scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
101 		d->counter++;
102 	}
103 }
104 
105 TEST_SPINLOCK_COMMON(spinlock,
106 		     spinlock_t,
107 		     spin_lock_init,
108 		     spin_lock,
109 		     spin_unlock,
110 		     spin_trylock,
111 		     TEST_OP_RW);
112 static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
113 {
114 	unsigned long flags;
115 
116 	if (spin_trylock_irq(&d->lock)) {
117 		d->counter++;
118 		spin_unlock_irq(&d->lock);
119 	}
120 	if (spin_trylock_irqsave(&d->lock, flags)) {
121 		d->counter++;
122 		spin_unlock_irqrestore(&d->lock, flags);
123 	}
124 	scoped_cond_guard(spinlock_try, return, &d->lock) {
125 		d->counter++;
126 	}
127 }
128 
129 TEST_SPINLOCK_COMMON(write_lock,
130 		     rwlock_t,
131 		     rwlock_init,
132 		     write_lock,
133 		     write_unlock,
134 		     write_trylock,
135 		     TEST_OP_RW);
136 static void __used test_write_trylock_extra(struct test_write_lock_data *d)
137 {
138 	unsigned long flags;
139 
140 	if (write_trylock_irqsave(&d->lock, flags)) {
141 		d->counter++;
142 		write_unlock_irqrestore(&d->lock, flags);
143 	}
144 }
145 
146 TEST_SPINLOCK_COMMON(read_lock,
147 		     rwlock_t,
148 		     rwlock_init,
149 		     read_lock,
150 		     read_unlock,
151 		     read_trylock,
152 		     TEST_OP_RO);
153 
154 struct test_mutex_data {
155 	struct mutex mtx;
156 	int counter __guarded_by(&mtx);
157 };
158 
159 static void __used test_mutex_init(struct test_mutex_data *d)
160 {
161 	mutex_init(&d->mtx);
162 	d->counter = 0;
163 }
164 
165 static void __used test_mutex_lock(struct test_mutex_data *d)
166 {
167 	mutex_lock(&d->mtx);
168 	d->counter++;
169 	mutex_unlock(&d->mtx);
170 	mutex_lock_io(&d->mtx);
171 	d->counter++;
172 	mutex_unlock(&d->mtx);
173 }
174 
175 static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
176 {
177 	if (!mutex_lock_interruptible(&d->mtx)) {
178 		d->counter++;
179 		mutex_unlock(&d->mtx);
180 	}
181 	if (!mutex_lock_killable(&d->mtx)) {
182 		d->counter++;
183 		mutex_unlock(&d->mtx);
184 	}
185 	if (mutex_trylock(&d->mtx)) {
186 		d->counter++;
187 		mutex_unlock(&d->mtx);
188 	}
189 	if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
190 		d->counter++;
191 		mutex_unlock(&d->mtx);
192 	}
193 }
194 
195 static void __used test_mutex_assert(struct test_mutex_data *d)
196 {
197 	lockdep_assert_held(&d->mtx);
198 	d->counter++;
199 }
200 
201 static void __used test_mutex_guard(struct test_mutex_data *d)
202 {
203 	guard(mutex)(&d->mtx);
204 	d->counter++;
205 }
206 
207 static void __used test_mutex_cond_guard(struct test_mutex_data *d)
208 {
209 	scoped_cond_guard(mutex_try, return, &d->mtx) {
210 		d->counter++;
211 	}
212 	scoped_cond_guard(mutex_intr, return, &d->mtx) {
213 		d->counter++;
214 	}
215 }
216 
217 struct test_seqlock_data {
218 	seqlock_t sl;
219 	int counter __guarded_by(&sl);
220 };
221 
222 static void __used test_seqlock_init(struct test_seqlock_data *d)
223 {
224 	seqlock_init(&d->sl);
225 	d->counter = 0;
226 }
227 
228 static void __used test_seqlock_reader(struct test_seqlock_data *d)
229 {
230 	unsigned int seq;
231 
232 	do {
233 		seq = read_seqbegin(&d->sl);
234 		(void)d->counter;
235 	} while (read_seqretry(&d->sl, seq));
236 }
237 
238 static void __used test_seqlock_writer(struct test_seqlock_data *d)
239 {
240 	unsigned long flags;
241 
242 	write_seqlock(&d->sl);
243 	d->counter++;
244 	write_sequnlock(&d->sl);
245 
246 	write_seqlock_irq(&d->sl);
247 	d->counter++;
248 	write_sequnlock_irq(&d->sl);
249 
250 	write_seqlock_bh(&d->sl);
251 	d->counter++;
252 	write_sequnlock_bh(&d->sl);
253 
254 	write_seqlock_irqsave(&d->sl, flags);
255 	d->counter++;
256 	write_sequnlock_irqrestore(&d->sl, flags);
257 }
258 
259 static void __used test_seqlock_scoped(struct test_seqlock_data *d)
260 {
261 	scoped_seqlock_read (&d->sl, ss_lockless) {
262 		(void)d->counter;
263 	}
264 }
265 
266 struct test_rwsem_data {
267 	struct rw_semaphore sem;
268 	int counter __guarded_by(&sem);
269 };
270 
271 static void __used test_rwsem_init(struct test_rwsem_data *d)
272 {
273 	init_rwsem(&d->sem);
274 	d->counter = 0;
275 }
276 
277 static void __used test_rwsem_reader(struct test_rwsem_data *d)
278 {
279 	down_read(&d->sem);
280 	(void)d->counter;
281 	up_read(&d->sem);
282 
283 	if (down_read_trylock(&d->sem)) {
284 		(void)d->counter;
285 		up_read(&d->sem);
286 	}
287 }
288 
289 static void __used test_rwsem_writer(struct test_rwsem_data *d)
290 {
291 	down_write(&d->sem);
292 	d->counter++;
293 	up_write(&d->sem);
294 
295 	down_write(&d->sem);
296 	d->counter++;
297 	downgrade_write(&d->sem);
298 	(void)d->counter;
299 	up_read(&d->sem);
300 
301 	if (down_write_trylock(&d->sem)) {
302 		d->counter++;
303 		up_write(&d->sem);
304 	}
305 }
306 
307 static void __used test_rwsem_assert(struct test_rwsem_data *d)
308 {
309 	rwsem_assert_held_nolockdep(&d->sem);
310 	d->counter++;
311 }
312 
313 static void __used test_rwsem_guard(struct test_rwsem_data *d)
314 {
315 	{ guard(rwsem_read)(&d->sem); (void)d->counter; }
316 	{ guard(rwsem_write)(&d->sem); d->counter++; }
317 }
318 
319 static void __used test_rwsem_cond_guard(struct test_rwsem_data *d)
320 {
321 	scoped_cond_guard(rwsem_read_try, return, &d->sem) {
322 		(void)d->counter;
323 	}
324 	scoped_cond_guard(rwsem_write_try, return, &d->sem) {
325 		d->counter++;
326 	}
327 }
328 
329 struct test_bit_spinlock_data {
330 	unsigned long bits;
331 	int counter __guarded_by(__bitlock(3, &bits));
332 };
333 
334 static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
335 {
336 	/*
337 	 * Note, the analysis seems to have false negatives, because it won't
338 	 * precisely recognize the bit of the fake __bitlock() token.
339 	 */
340 	bit_spin_lock(3, &d->bits);
341 	d->counter++;
342 	bit_spin_unlock(3, &d->bits);
343 
344 	bit_spin_lock(3, &d->bits);
345 	d->counter++;
346 	__bit_spin_unlock(3, &d->bits);
347 
348 	if (bit_spin_trylock(3, &d->bits)) {
349 		d->counter++;
350 		bit_spin_unlock(3, &d->bits);
351 	}
352 }
353 
354 /*
355  * Test that we can mark a variable guarded by RCU, and we can dereference and
356  * write to the pointer with RCU's primitives.
357  */
358 struct test_rcu_data {
359 	long __rcu_guarded *data;
360 };
361 
362 static void __used test_rcu_guarded_reader(struct test_rcu_data *d)
363 {
364 	rcu_read_lock();
365 	(void)rcu_dereference(d->data);
366 	rcu_read_unlock();
367 
368 	rcu_read_lock_bh();
369 	(void)rcu_dereference(d->data);
370 	rcu_read_unlock_bh();
371 
372 	rcu_read_lock_sched();
373 	(void)rcu_dereference(d->data);
374 	rcu_read_unlock_sched();
375 }
376 
377 static void __used test_rcu_guard(struct test_rcu_data *d)
378 {
379 	guard(rcu)();
380 	(void)rcu_dereference(d->data);
381 }
382 
383 static void __used test_rcu_guarded_updater(struct test_rcu_data *d)
384 {
385 	rcu_assign_pointer(d->data, NULL);
386 	RCU_INIT_POINTER(d->data, NULL);
387 	(void)unrcu_pointer(d->data);
388 }
389 
390 static void wants_rcu_held(void)	__must_hold_shared(RCU)       { }
391 static void wants_rcu_held_bh(void)	__must_hold_shared(RCU_BH)    { }
392 static void wants_rcu_held_sched(void)	__must_hold_shared(RCU_SCHED) { }
393 
394 static void __used test_rcu_lock_variants(void)
395 {
396 	rcu_read_lock();
397 	wants_rcu_held();
398 	rcu_read_unlock();
399 
400 	rcu_read_lock_bh();
401 	wants_rcu_held_bh();
402 	rcu_read_unlock_bh();
403 
404 	rcu_read_lock_sched();
405 	wants_rcu_held_sched();
406 	rcu_read_unlock_sched();
407 }
408 
409 static void __used test_rcu_lock_reentrant(void)
410 {
411 	rcu_read_lock();
412 	rcu_read_lock();
413 	rcu_read_lock_bh();
414 	rcu_read_lock_bh();
415 	rcu_read_lock_sched();
416 	rcu_read_lock_sched();
417 
418 	rcu_read_unlock_sched();
419 	rcu_read_unlock_sched();
420 	rcu_read_unlock_bh();
421 	rcu_read_unlock_bh();
422 	rcu_read_unlock();
423 	rcu_read_unlock();
424 }
425 
426 static void __used test_rcu_assert_variants(void)
427 {
428 	lockdep_assert_in_rcu_read_lock();
429 	wants_rcu_held();
430 
431 	lockdep_assert_in_rcu_read_lock_bh();
432 	wants_rcu_held_bh();
433 
434 	lockdep_assert_in_rcu_read_lock_sched();
435 	wants_rcu_held_sched();
436 }
437 
438 struct test_srcu_data {
439 	struct srcu_struct srcu;
440 	long __rcu_guarded *data;
441 };
442 
443 static void __used test_srcu(struct test_srcu_data *d)
444 {
445 	init_srcu_struct(&d->srcu);
446 
447 	int idx = srcu_read_lock(&d->srcu);
448 	long *data = srcu_dereference(d->data, &d->srcu);
449 	(void)data;
450 	srcu_read_unlock(&d->srcu, idx);
451 
452 	rcu_assign_pointer(d->data, NULL);
453 }
454 
455 static void __used test_srcu_guard(struct test_srcu_data *d)
456 {
457 	{ guard(srcu)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
458 	{ guard(srcu_fast)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
459 	{ guard(srcu_fast_notrace)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
460 }
461