xref: /linux/lib/test_context-analysis.c (revision d3febf16dee28a74b01ba43195ee4965edb6208f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Compile-only tests for common patterns that should not generate false
4  * positive errors when compiled with Clang's context analysis.
5  */
6 
7 #include <linux/bit_spinlock.h>
8 #include <linux/build_bug.h>
9 #include <linux/local_lock.h>
10 #include <linux/mutex.h>
11 #include <linux/percpu.h>
12 #include <linux/rcupdate.h>
13 #include <linux/rwsem.h>
14 #include <linux/seqlock.h>
15 #include <linux/spinlock.h>
16 #include <linux/srcu.h>
17 
18 /*
19  * Test that helper macros work as expected.
20  */
21 static void __used test_common_helpers(void)
22 {
23 	BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
24 	BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
25 	BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
26 	context_unsafe(do { } while (0)); /* works with void statements */
27 }
28 
29 #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op)	\
30 	struct test_##class##_data {								\
31 		type lock;									\
32 		int counter __guarded_by(&lock);						\
33 		int *pointer __pt_guarded_by(&lock);						\
34 	};											\
35 	static void __used test_##class##_init(struct test_##class##_data *d)			\
36 	{											\
37 		type_init(&d->lock);								\
38 		d->counter = 0;									\
39 	}											\
40 	static void __used test_##class(struct test_##class##_data *d)				\
41 	{											\
42 		unsigned long flags;								\
43 		d->pointer++;									\
44 		type_lock(&d->lock);								\
45 		op(d->counter);									\
46 		op(*d->pointer);								\
47 		type_unlock(&d->lock);								\
48 		type_lock##_irq(&d->lock);							\
49 		op(d->counter);									\
50 		op(*d->pointer);								\
51 		type_unlock##_irq(&d->lock);							\
52 		type_lock##_bh(&d->lock);							\
53 		op(d->counter);									\
54 		op(*d->pointer);								\
55 		type_unlock##_bh(&d->lock);							\
56 		type_lock##_irqsave(&d->lock, flags);						\
57 		op(d->counter);									\
58 		op(*d->pointer);								\
59 		type_unlock##_irqrestore(&d->lock, flags);					\
60 	}											\
61 	static void __used test_##class##_trylock(struct test_##class##_data *d)		\
62 	{											\
63 		if (type_trylock(&d->lock)) {							\
64 			op(d->counter);								\
65 			type_unlock(&d->lock);							\
66 		}										\
67 	}											\
68 	static void __used test_##class##_assert(struct test_##class##_data *d)			\
69 	{											\
70 		lockdep_assert_held(&d->lock);							\
71 		op(d->counter);									\
72 	}											\
73 	static void __used test_##class##_guard(struct test_##class##_data *d)			\
74 	{											\
75 		{ guard(class)(&d->lock);		op(d->counter); }			\
76 		{ guard(class##_irq)(&d->lock);		op(d->counter); }			\
77 		{ guard(class##_irqsave)(&d->lock);	op(d->counter); }			\
78 	}
79 
80 #define TEST_OP_RW(x) (x)++
81 #define TEST_OP_RO(x) ((void)(x))
82 
83 TEST_SPINLOCK_COMMON(raw_spinlock,
84 		     raw_spinlock_t,
85 		     raw_spin_lock_init,
86 		     raw_spin_lock,
87 		     raw_spin_unlock,
88 		     raw_spin_trylock,
89 		     TEST_OP_RW);
90 static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
91 {
92 	unsigned long flags;
93 
94 	if (raw_spin_trylock_irq(&d->lock)) {
95 		d->counter++;
96 		raw_spin_unlock_irq(&d->lock);
97 	}
98 	if (raw_spin_trylock_irqsave(&d->lock, flags)) {
99 		d->counter++;
100 		raw_spin_unlock_irqrestore(&d->lock, flags);
101 	}
102 	scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
103 		d->counter++;
104 	}
105 }
106 
107 TEST_SPINLOCK_COMMON(spinlock,
108 		     spinlock_t,
109 		     spin_lock_init,
110 		     spin_lock,
111 		     spin_unlock,
112 		     spin_trylock,
113 		     TEST_OP_RW);
114 static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
115 {
116 	unsigned long flags;
117 
118 	if (spin_trylock_irq(&d->lock)) {
119 		d->counter++;
120 		spin_unlock_irq(&d->lock);
121 	}
122 	if (spin_trylock_irqsave(&d->lock, flags)) {
123 		d->counter++;
124 		spin_unlock_irqrestore(&d->lock, flags);
125 	}
126 	scoped_cond_guard(spinlock_try, return, &d->lock) {
127 		d->counter++;
128 	}
129 }
130 
131 TEST_SPINLOCK_COMMON(write_lock,
132 		     rwlock_t,
133 		     rwlock_init,
134 		     write_lock,
135 		     write_unlock,
136 		     write_trylock,
137 		     TEST_OP_RW);
138 static void __used test_write_trylock_extra(struct test_write_lock_data *d)
139 {
140 	unsigned long flags;
141 
142 	if (write_trylock_irqsave(&d->lock, flags)) {
143 		d->counter++;
144 		write_unlock_irqrestore(&d->lock, flags);
145 	}
146 }
147 
148 TEST_SPINLOCK_COMMON(read_lock,
149 		     rwlock_t,
150 		     rwlock_init,
151 		     read_lock,
152 		     read_unlock,
153 		     read_trylock,
154 		     TEST_OP_RO);
155 
156 struct test_mutex_data {
157 	struct mutex mtx;
158 	int counter __guarded_by(&mtx);
159 };
160 
161 static void __used test_mutex_init(struct test_mutex_data *d)
162 {
163 	mutex_init(&d->mtx);
164 	d->counter = 0;
165 }
166 
167 static void __used test_mutex_lock(struct test_mutex_data *d)
168 {
169 	mutex_lock(&d->mtx);
170 	d->counter++;
171 	mutex_unlock(&d->mtx);
172 	mutex_lock_io(&d->mtx);
173 	d->counter++;
174 	mutex_unlock(&d->mtx);
175 }
176 
177 static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
178 {
179 	if (!mutex_lock_interruptible(&d->mtx)) {
180 		d->counter++;
181 		mutex_unlock(&d->mtx);
182 	}
183 	if (!mutex_lock_killable(&d->mtx)) {
184 		d->counter++;
185 		mutex_unlock(&d->mtx);
186 	}
187 	if (mutex_trylock(&d->mtx)) {
188 		d->counter++;
189 		mutex_unlock(&d->mtx);
190 	}
191 	if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
192 		d->counter++;
193 		mutex_unlock(&d->mtx);
194 	}
195 }
196 
197 static void __used test_mutex_assert(struct test_mutex_data *d)
198 {
199 	lockdep_assert_held(&d->mtx);
200 	d->counter++;
201 }
202 
203 static void __used test_mutex_guard(struct test_mutex_data *d)
204 {
205 	guard(mutex)(&d->mtx);
206 	d->counter++;
207 }
208 
209 static void __used test_mutex_cond_guard(struct test_mutex_data *d)
210 {
211 	scoped_cond_guard(mutex_try, return, &d->mtx) {
212 		d->counter++;
213 	}
214 	scoped_cond_guard(mutex_intr, return, &d->mtx) {
215 		d->counter++;
216 	}
217 }
218 
219 struct test_seqlock_data {
220 	seqlock_t sl;
221 	int counter __guarded_by(&sl);
222 };
223 
224 static void __used test_seqlock_init(struct test_seqlock_data *d)
225 {
226 	seqlock_init(&d->sl);
227 	d->counter = 0;
228 }
229 
230 static void __used test_seqlock_reader(struct test_seqlock_data *d)
231 {
232 	unsigned int seq;
233 
234 	do {
235 		seq = read_seqbegin(&d->sl);
236 		(void)d->counter;
237 	} while (read_seqretry(&d->sl, seq));
238 }
239 
240 static void __used test_seqlock_writer(struct test_seqlock_data *d)
241 {
242 	unsigned long flags;
243 
244 	write_seqlock(&d->sl);
245 	d->counter++;
246 	write_sequnlock(&d->sl);
247 
248 	write_seqlock_irq(&d->sl);
249 	d->counter++;
250 	write_sequnlock_irq(&d->sl);
251 
252 	write_seqlock_bh(&d->sl);
253 	d->counter++;
254 	write_sequnlock_bh(&d->sl);
255 
256 	write_seqlock_irqsave(&d->sl, flags);
257 	d->counter++;
258 	write_sequnlock_irqrestore(&d->sl, flags);
259 }
260 
261 static void __used test_seqlock_scoped(struct test_seqlock_data *d)
262 {
263 	scoped_seqlock_read (&d->sl, ss_lockless) {
264 		(void)d->counter;
265 	}
266 }
267 
268 struct test_rwsem_data {
269 	struct rw_semaphore sem;
270 	int counter __guarded_by(&sem);
271 };
272 
273 static void __used test_rwsem_init(struct test_rwsem_data *d)
274 {
275 	init_rwsem(&d->sem);
276 	d->counter = 0;
277 }
278 
279 static void __used test_rwsem_reader(struct test_rwsem_data *d)
280 {
281 	down_read(&d->sem);
282 	(void)d->counter;
283 	up_read(&d->sem);
284 
285 	if (down_read_trylock(&d->sem)) {
286 		(void)d->counter;
287 		up_read(&d->sem);
288 	}
289 }
290 
291 static void __used test_rwsem_writer(struct test_rwsem_data *d)
292 {
293 	down_write(&d->sem);
294 	d->counter++;
295 	up_write(&d->sem);
296 
297 	down_write(&d->sem);
298 	d->counter++;
299 	downgrade_write(&d->sem);
300 	(void)d->counter;
301 	up_read(&d->sem);
302 
303 	if (down_write_trylock(&d->sem)) {
304 		d->counter++;
305 		up_write(&d->sem);
306 	}
307 }
308 
309 static void __used test_rwsem_assert(struct test_rwsem_data *d)
310 {
311 	rwsem_assert_held_nolockdep(&d->sem);
312 	d->counter++;
313 }
314 
315 static void __used test_rwsem_guard(struct test_rwsem_data *d)
316 {
317 	{ guard(rwsem_read)(&d->sem); (void)d->counter; }
318 	{ guard(rwsem_write)(&d->sem); d->counter++; }
319 }
320 
321 static void __used test_rwsem_cond_guard(struct test_rwsem_data *d)
322 {
323 	scoped_cond_guard(rwsem_read_try, return, &d->sem) {
324 		(void)d->counter;
325 	}
326 	scoped_cond_guard(rwsem_write_try, return, &d->sem) {
327 		d->counter++;
328 	}
329 }
330 
331 struct test_bit_spinlock_data {
332 	unsigned long bits;
333 	int counter __guarded_by(__bitlock(3, &bits));
334 };
335 
336 static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
337 {
338 	/*
339 	 * Note, the analysis seems to have false negatives, because it won't
340 	 * precisely recognize the bit of the fake __bitlock() token.
341 	 */
342 	bit_spin_lock(3, &d->bits);
343 	d->counter++;
344 	bit_spin_unlock(3, &d->bits);
345 
346 	bit_spin_lock(3, &d->bits);
347 	d->counter++;
348 	__bit_spin_unlock(3, &d->bits);
349 
350 	if (bit_spin_trylock(3, &d->bits)) {
351 		d->counter++;
352 		bit_spin_unlock(3, &d->bits);
353 	}
354 }
355 
356 /*
357  * Test that we can mark a variable guarded by RCU, and we can dereference and
358  * write to the pointer with RCU's primitives.
359  */
360 struct test_rcu_data {
361 	long __rcu_guarded *data;
362 };
363 
364 static void __used test_rcu_guarded_reader(struct test_rcu_data *d)
365 {
366 	rcu_read_lock();
367 	(void)rcu_dereference(d->data);
368 	rcu_read_unlock();
369 
370 	rcu_read_lock_bh();
371 	(void)rcu_dereference(d->data);
372 	rcu_read_unlock_bh();
373 
374 	rcu_read_lock_sched();
375 	(void)rcu_dereference(d->data);
376 	rcu_read_unlock_sched();
377 }
378 
379 static void __used test_rcu_guard(struct test_rcu_data *d)
380 {
381 	guard(rcu)();
382 	(void)rcu_dereference(d->data);
383 }
384 
385 static void __used test_rcu_guarded_updater(struct test_rcu_data *d)
386 {
387 	rcu_assign_pointer(d->data, NULL);
388 	RCU_INIT_POINTER(d->data, NULL);
389 	(void)unrcu_pointer(d->data);
390 }
391 
392 static void wants_rcu_held(void)	__must_hold_shared(RCU)       { }
393 static void wants_rcu_held_bh(void)	__must_hold_shared(RCU_BH)    { }
394 static void wants_rcu_held_sched(void)	__must_hold_shared(RCU_SCHED) { }
395 
396 static void __used test_rcu_lock_variants(void)
397 {
398 	rcu_read_lock();
399 	wants_rcu_held();
400 	rcu_read_unlock();
401 
402 	rcu_read_lock_bh();
403 	wants_rcu_held_bh();
404 	rcu_read_unlock_bh();
405 
406 	rcu_read_lock_sched();
407 	wants_rcu_held_sched();
408 	rcu_read_unlock_sched();
409 }
410 
411 static void __used test_rcu_lock_reentrant(void)
412 {
413 	rcu_read_lock();
414 	rcu_read_lock();
415 	rcu_read_lock_bh();
416 	rcu_read_lock_bh();
417 	rcu_read_lock_sched();
418 	rcu_read_lock_sched();
419 
420 	rcu_read_unlock_sched();
421 	rcu_read_unlock_sched();
422 	rcu_read_unlock_bh();
423 	rcu_read_unlock_bh();
424 	rcu_read_unlock();
425 	rcu_read_unlock();
426 }
427 
428 static void __used test_rcu_assert_variants(void)
429 {
430 	lockdep_assert_in_rcu_read_lock();
431 	wants_rcu_held();
432 
433 	lockdep_assert_in_rcu_read_lock_bh();
434 	wants_rcu_held_bh();
435 
436 	lockdep_assert_in_rcu_read_lock_sched();
437 	wants_rcu_held_sched();
438 }
439 
440 struct test_srcu_data {
441 	struct srcu_struct srcu;
442 	long __rcu_guarded *data;
443 };
444 
445 static void __used test_srcu(struct test_srcu_data *d)
446 {
447 	init_srcu_struct(&d->srcu);
448 
449 	int idx = srcu_read_lock(&d->srcu);
450 	long *data = srcu_dereference(d->data, &d->srcu);
451 	(void)data;
452 	srcu_read_unlock(&d->srcu, idx);
453 
454 	rcu_assign_pointer(d->data, NULL);
455 }
456 
457 static void __used test_srcu_guard(struct test_srcu_data *d)
458 {
459 	{ guard(srcu)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
460 	{ guard(srcu_fast)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
461 	{ guard(srcu_fast_notrace)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
462 }
463 
464 struct test_local_lock_data {
465 	local_lock_t lock;
466 	int counter __guarded_by(&lock);
467 };
468 
469 static DEFINE_PER_CPU(struct test_local_lock_data, test_local_lock_data) = {
470 	.lock = INIT_LOCAL_LOCK(lock),
471 };
472 
473 static void __used test_local_lock_init(struct test_local_lock_data *d)
474 {
475 	local_lock_init(&d->lock);
476 	d->counter = 0;
477 }
478 
479 static void __used test_local_lock(void)
480 {
481 	unsigned long flags;
482 
483 	local_lock(&test_local_lock_data.lock);
484 	this_cpu_add(test_local_lock_data.counter, 1);
485 	local_unlock(&test_local_lock_data.lock);
486 
487 	local_lock_irq(&test_local_lock_data.lock);
488 	this_cpu_add(test_local_lock_data.counter, 1);
489 	local_unlock_irq(&test_local_lock_data.lock);
490 
491 	local_lock_irqsave(&test_local_lock_data.lock, flags);
492 	this_cpu_add(test_local_lock_data.counter, 1);
493 	local_unlock_irqrestore(&test_local_lock_data.lock, flags);
494 
495 	local_lock_nested_bh(&test_local_lock_data.lock);
496 	this_cpu_add(test_local_lock_data.counter, 1);
497 	local_unlock_nested_bh(&test_local_lock_data.lock);
498 }
499 
500 static void __used test_local_lock_guard(void)
501 {
502 	{ guard(local_lock)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
503 	{ guard(local_lock_irq)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
504 	{ guard(local_lock_irqsave)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
505 	{ guard(local_lock_nested_bh)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
506 }
507 
508 struct test_local_trylock_data {
509 	local_trylock_t lock;
510 	int counter __guarded_by(&lock);
511 };
512 
513 static DEFINE_PER_CPU(struct test_local_trylock_data, test_local_trylock_data) = {
514 	.lock = INIT_LOCAL_TRYLOCK(lock),
515 };
516 
517 static void __used test_local_trylock_init(struct test_local_trylock_data *d)
518 {
519 	local_trylock_init(&d->lock);
520 	d->counter = 0;
521 }
522 
523 static void __used test_local_trylock(void)
524 {
525 	local_lock(&test_local_trylock_data.lock);
526 	this_cpu_add(test_local_trylock_data.counter, 1);
527 	local_unlock(&test_local_trylock_data.lock);
528 
529 	if (local_trylock(&test_local_trylock_data.lock)) {
530 		this_cpu_add(test_local_trylock_data.counter, 1);
531 		local_unlock(&test_local_trylock_data.lock);
532 	}
533 }
534