xref: /linux/lib/test_context-analysis.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Compile-only tests for common patterns that should not generate false
4  * positive errors when compiled with Clang's context analysis.
5  */
6 
7 #include <linux/bit_spinlock.h>
8 #include <linux/build_bug.h>
9 #include <linux/local_lock.h>
10 #include <linux/mutex.h>
11 #include <linux/percpu.h>
12 #include <linux/rcupdate.h>
13 #include <linux/rwsem.h>
14 #include <linux/seqlock.h>
15 #include <linux/spinlock.h>
16 #include <linux/srcu.h>
17 #include <linux/ww_mutex.h>
18 
19 /*
20  * Test that helper macros work as expected.
21  */
22 static void __used test_common_helpers(void)
23 {
24 	BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
25 	BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
26 	BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
27 	context_unsafe(do { } while (0)); /* works with void statements */
28 }
29 
30 #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op)	\
31 	struct test_##class##_data {								\
32 		type lock;									\
33 		int counter __guarded_by(&lock);						\
34 		int *pointer __pt_guarded_by(&lock);						\
35 	};											\
36 	static void __used test_##class##_init(struct test_##class##_data *d)			\
37 	{											\
38 		guard(type_init)(&d->lock);							\
39 		d->counter = 0;									\
40 	}											\
41 	static void __used test_##class(struct test_##class##_data *d)				\
42 	{											\
43 		unsigned long flags;								\
44 		d->pointer++;									\
45 		type_lock(&d->lock);								\
46 		op(d->counter);									\
47 		op(*d->pointer);								\
48 		type_unlock(&d->lock);								\
49 		type_lock##_irq(&d->lock);							\
50 		op(d->counter);									\
51 		op(*d->pointer);								\
52 		type_unlock##_irq(&d->lock);							\
53 		type_lock##_bh(&d->lock);							\
54 		op(d->counter);									\
55 		op(*d->pointer);								\
56 		type_unlock##_bh(&d->lock);							\
57 		type_lock##_irqsave(&d->lock, flags);						\
58 		op(d->counter);									\
59 		op(*d->pointer);								\
60 		type_unlock##_irqrestore(&d->lock, flags);					\
61 	}											\
62 	static void __used test_##class##_trylock(struct test_##class##_data *d)		\
63 	{											\
64 		if (type_trylock(&d->lock)) {							\
65 			op(d->counter);								\
66 			type_unlock(&d->lock);							\
67 		}										\
68 	}											\
69 	static void __used test_##class##_assert(struct test_##class##_data *d)			\
70 	{											\
71 		lockdep_assert_held(&d->lock);							\
72 		op(d->counter);									\
73 	}											\
74 	static void __used test_##class##_guard(struct test_##class##_data *d)			\
75 	{											\
76 		{ guard(class)(&d->lock);		op(d->counter); }			\
77 		{ guard(class##_irq)(&d->lock);		op(d->counter); }			\
78 		{ guard(class##_irqsave)(&d->lock);	op(d->counter); }			\
79 	}
80 
81 #define TEST_OP_RW(x) (x)++
82 #define TEST_OP_RO(x) ((void)(x))
83 
84 TEST_SPINLOCK_COMMON(raw_spinlock,
85 		     raw_spinlock_t,
86 		     raw_spinlock_init,
87 		     raw_spin_lock,
88 		     raw_spin_unlock,
89 		     raw_spin_trylock,
90 		     TEST_OP_RW);
91 static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
92 {
93 	unsigned long flags;
94 
95 	data_race(d->counter++); /* no warning */
96 
97 	if (raw_spin_trylock_irq(&d->lock)) {
98 		d->counter++;
99 		raw_spin_unlock_irq(&d->lock);
100 	}
101 	if (raw_spin_trylock_irqsave(&d->lock, flags)) {
102 		d->counter++;
103 		raw_spin_unlock_irqrestore(&d->lock, flags);
104 	}
105 	scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
106 		d->counter++;
107 	}
108 }
109 
110 TEST_SPINLOCK_COMMON(spinlock,
111 		     spinlock_t,
112 		     spinlock_init,
113 		     spin_lock,
114 		     spin_unlock,
115 		     spin_trylock,
116 		     TEST_OP_RW);
117 static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
118 {
119 	unsigned long flags;
120 
121 	if (spin_trylock_irq(&d->lock)) {
122 		d->counter++;
123 		spin_unlock_irq(&d->lock);
124 	}
125 	if (spin_trylock_irqsave(&d->lock, flags)) {
126 		d->counter++;
127 		spin_unlock_irqrestore(&d->lock, flags);
128 	}
129 	scoped_cond_guard(spinlock_try, return, &d->lock) {
130 		d->counter++;
131 	}
132 }
133 
134 TEST_SPINLOCK_COMMON(write_lock,
135 		     rwlock_t,
136 		     rwlock_init,
137 		     write_lock,
138 		     write_unlock,
139 		     write_trylock,
140 		     TEST_OP_RW);
141 static void __used test_write_trylock_extra(struct test_write_lock_data *d)
142 {
143 	unsigned long flags;
144 
145 	if (write_trylock_irqsave(&d->lock, flags)) {
146 		d->counter++;
147 		write_unlock_irqrestore(&d->lock, flags);
148 	}
149 }
150 
151 TEST_SPINLOCK_COMMON(read_lock,
152 		     rwlock_t,
153 		     rwlock_init,
154 		     read_lock,
155 		     read_unlock,
156 		     read_trylock,
157 		     TEST_OP_RO);
158 
159 struct test_mutex_data {
160 	struct mutex mtx;
161 	int counter __guarded_by(&mtx);
162 };
163 
164 static void __used test_mutex_init(struct test_mutex_data *d)
165 {
166 	guard(mutex_init)(&d->mtx);
167 	d->counter = 0;
168 }
169 
170 static void __used test_mutex_lock(struct test_mutex_data *d)
171 {
172 	mutex_lock(&d->mtx);
173 	d->counter++;
174 	mutex_unlock(&d->mtx);
175 	mutex_lock_io(&d->mtx);
176 	d->counter++;
177 	mutex_unlock(&d->mtx);
178 }
179 
180 static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
181 {
182 	if (!mutex_lock_interruptible(&d->mtx)) {
183 		d->counter++;
184 		mutex_unlock(&d->mtx);
185 	}
186 	if (!mutex_lock_killable(&d->mtx)) {
187 		d->counter++;
188 		mutex_unlock(&d->mtx);
189 	}
190 	if (mutex_trylock(&d->mtx)) {
191 		d->counter++;
192 		mutex_unlock(&d->mtx);
193 	}
194 	if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
195 		d->counter++;
196 		mutex_unlock(&d->mtx);
197 	}
198 }
199 
200 static void __used test_mutex_assert(struct test_mutex_data *d)
201 {
202 	lockdep_assert_held(&d->mtx);
203 	d->counter++;
204 }
205 
206 static void __used test_mutex_guard(struct test_mutex_data *d)
207 {
208 	guard(mutex)(&d->mtx);
209 	d->counter++;
210 }
211 
212 static void __used test_mutex_cond_guard(struct test_mutex_data *d)
213 {
214 	scoped_cond_guard(mutex_try, return, &d->mtx) {
215 		d->counter++;
216 	}
217 	scoped_cond_guard(mutex_intr, return, &d->mtx) {
218 		d->counter++;
219 	}
220 }
221 
222 struct test_seqlock_data {
223 	seqlock_t sl;
224 	int counter __guarded_by(&sl);
225 };
226 
227 static void __used test_seqlock_init(struct test_seqlock_data *d)
228 {
229 	guard(seqlock_init)(&d->sl);
230 	d->counter = 0;
231 }
232 
233 static void __used test_seqlock_reader(struct test_seqlock_data *d)
234 {
235 	unsigned int seq;
236 
237 	do {
238 		seq = read_seqbegin(&d->sl);
239 		(void)d->counter;
240 	} while (read_seqretry(&d->sl, seq));
241 }
242 
243 static void __used test_seqlock_writer(struct test_seqlock_data *d)
244 {
245 	unsigned long flags;
246 
247 	write_seqlock(&d->sl);
248 	d->counter++;
249 	write_sequnlock(&d->sl);
250 
251 	write_seqlock_irq(&d->sl);
252 	d->counter++;
253 	write_sequnlock_irq(&d->sl);
254 
255 	write_seqlock_bh(&d->sl);
256 	d->counter++;
257 	write_sequnlock_bh(&d->sl);
258 
259 	write_seqlock_irqsave(&d->sl, flags);
260 	d->counter++;
261 	write_sequnlock_irqrestore(&d->sl, flags);
262 }
263 
264 static void __used test_seqlock_scoped(struct test_seqlock_data *d)
265 {
266 	scoped_seqlock_read (&d->sl, ss_lockless) {
267 		(void)d->counter;
268 	}
269 }
270 
271 struct test_rwsem_data {
272 	struct rw_semaphore sem;
273 	int counter __guarded_by(&sem);
274 };
275 
276 static void __used test_rwsem_init(struct test_rwsem_data *d)
277 {
278 	guard(rwsem_init)(&d->sem);
279 	d->counter = 0;
280 }
281 
282 static void __used test_rwsem_reader(struct test_rwsem_data *d)
283 {
284 	down_read(&d->sem);
285 	(void)d->counter;
286 	up_read(&d->sem);
287 
288 	if (down_read_trylock(&d->sem)) {
289 		(void)d->counter;
290 		up_read(&d->sem);
291 	}
292 }
293 
294 static void __used test_rwsem_writer(struct test_rwsem_data *d)
295 {
296 	down_write(&d->sem);
297 	d->counter++;
298 	up_write(&d->sem);
299 
300 	down_write(&d->sem);
301 	d->counter++;
302 	downgrade_write(&d->sem);
303 	(void)d->counter;
304 	up_read(&d->sem);
305 
306 	if (down_write_trylock(&d->sem)) {
307 		d->counter++;
308 		up_write(&d->sem);
309 	}
310 }
311 
312 static void __used test_rwsem_assert(struct test_rwsem_data *d)
313 {
314 	rwsem_assert_held_nolockdep(&d->sem);
315 	d->counter++;
316 }
317 
318 static void __used test_rwsem_guard(struct test_rwsem_data *d)
319 {
320 	{ guard(rwsem_read)(&d->sem); (void)d->counter; }
321 	{ guard(rwsem_write)(&d->sem); d->counter++; }
322 }
323 
324 static void __used test_rwsem_cond_guard(struct test_rwsem_data *d)
325 {
326 	scoped_cond_guard(rwsem_read_try, return, &d->sem) {
327 		(void)d->counter;
328 	}
329 	scoped_cond_guard(rwsem_write_try, return, &d->sem) {
330 		d->counter++;
331 	}
332 }
333 
334 struct test_bit_spinlock_data {
335 	unsigned long bits;
336 	int counter __guarded_by(__bitlock(3, &bits));
337 };
338 
339 static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
340 {
341 	/*
342 	 * Note, the analysis seems to have false negatives, because it won't
343 	 * precisely recognize the bit of the fake __bitlock() token.
344 	 */
345 	bit_spin_lock(3, &d->bits);
346 	d->counter++;
347 	bit_spin_unlock(3, &d->bits);
348 
349 	bit_spin_lock(3, &d->bits);
350 	d->counter++;
351 	__bit_spin_unlock(3, &d->bits);
352 
353 	if (bit_spin_trylock(3, &d->bits)) {
354 		d->counter++;
355 		bit_spin_unlock(3, &d->bits);
356 	}
357 }
358 
359 /*
360  * Test that we can mark a variable guarded by RCU, and we can dereference and
361  * write to the pointer with RCU's primitives.
362  */
363 struct test_rcu_data {
364 	long __rcu_guarded *data;
365 };
366 
367 static void __used test_rcu_guarded_reader(struct test_rcu_data *d)
368 {
369 	rcu_read_lock();
370 	(void)rcu_dereference(d->data);
371 	rcu_read_unlock();
372 
373 	rcu_read_lock_bh();
374 	(void)rcu_dereference(d->data);
375 	rcu_read_unlock_bh();
376 
377 	rcu_read_lock_sched();
378 	(void)rcu_dereference(d->data);
379 	rcu_read_unlock_sched();
380 }
381 
382 static void __used test_rcu_guard(struct test_rcu_data *d)
383 {
384 	guard(rcu)();
385 	(void)rcu_dereference(d->data);
386 }
387 
388 static void __used test_rcu_guarded_updater(struct test_rcu_data *d)
389 {
390 	rcu_assign_pointer(d->data, NULL);
391 	RCU_INIT_POINTER(d->data, NULL);
392 	(void)unrcu_pointer(d->data);
393 }
394 
395 static void wants_rcu_held(void)	__must_hold_shared(RCU)       { }
396 static void wants_rcu_held_bh(void)	__must_hold_shared(RCU_BH)    { }
397 static void wants_rcu_held_sched(void)	__must_hold_shared(RCU_SCHED) { }
398 
399 static void __used test_rcu_lock_variants(void)
400 {
401 	rcu_read_lock();
402 	wants_rcu_held();
403 	rcu_read_unlock();
404 
405 	rcu_read_lock_bh();
406 	wants_rcu_held_bh();
407 	rcu_read_unlock_bh();
408 
409 	rcu_read_lock_sched();
410 	wants_rcu_held_sched();
411 	rcu_read_unlock_sched();
412 }
413 
414 static void __used test_rcu_lock_reentrant(void)
415 {
416 	rcu_read_lock();
417 	rcu_read_lock();
418 	rcu_read_lock_bh();
419 	rcu_read_lock_bh();
420 	rcu_read_lock_sched();
421 	rcu_read_lock_sched();
422 
423 	rcu_read_unlock_sched();
424 	rcu_read_unlock_sched();
425 	rcu_read_unlock_bh();
426 	rcu_read_unlock_bh();
427 	rcu_read_unlock();
428 	rcu_read_unlock();
429 }
430 
431 static void __used test_rcu_assert_variants(void)
432 {
433 	lockdep_assert_in_rcu_read_lock();
434 	wants_rcu_held();
435 
436 	lockdep_assert_in_rcu_read_lock_bh();
437 	wants_rcu_held_bh();
438 
439 	lockdep_assert_in_rcu_read_lock_sched();
440 	wants_rcu_held_sched();
441 }
442 
443 struct test_srcu_data {
444 	struct srcu_struct srcu;
445 	long __rcu_guarded *data;
446 };
447 
448 static void __used test_srcu(struct test_srcu_data *d)
449 {
450 	init_srcu_struct(&d->srcu);
451 
452 	int idx = srcu_read_lock(&d->srcu);
453 	long *data = srcu_dereference(d->data, &d->srcu);
454 	(void)data;
455 	srcu_read_unlock(&d->srcu, idx);
456 
457 	rcu_assign_pointer(d->data, NULL);
458 }
459 
460 static void __used test_srcu_guard(struct test_srcu_data *d)
461 {
462 	{ guard(srcu)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
463 	{ guard(srcu_fast)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
464 	{ guard(srcu_fast_notrace)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
465 }
466 
467 struct test_local_lock_data {
468 	local_lock_t lock;
469 	int counter __guarded_by(&lock);
470 };
471 
472 static DEFINE_PER_CPU(struct test_local_lock_data, test_local_lock_data) = {
473 	.lock = INIT_LOCAL_LOCK(lock),
474 };
475 
476 static void __used test_local_lock_init(struct test_local_lock_data *d)
477 {
478 	guard(local_lock_init)(&d->lock);
479 	d->counter = 0;
480 }
481 
482 static void __used test_local_lock(void)
483 {
484 	unsigned long flags;
485 
486 	local_lock(&test_local_lock_data.lock);
487 	this_cpu_add(test_local_lock_data.counter, 1);
488 	local_unlock(&test_local_lock_data.lock);
489 
490 	local_lock_irq(&test_local_lock_data.lock);
491 	this_cpu_add(test_local_lock_data.counter, 1);
492 	local_unlock_irq(&test_local_lock_data.lock);
493 
494 	local_lock_irqsave(&test_local_lock_data.lock, flags);
495 	this_cpu_add(test_local_lock_data.counter, 1);
496 	local_unlock_irqrestore(&test_local_lock_data.lock, flags);
497 
498 	local_lock_nested_bh(&test_local_lock_data.lock);
499 	this_cpu_add(test_local_lock_data.counter, 1);
500 	local_unlock_nested_bh(&test_local_lock_data.lock);
501 }
502 
503 static void __used test_local_lock_guard(void)
504 {
505 	{ guard(local_lock)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
506 	{ guard(local_lock_irq)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
507 	{ guard(local_lock_irqsave)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
508 	{ guard(local_lock_nested_bh)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
509 }
510 
511 struct test_local_trylock_data {
512 	local_trylock_t lock;
513 	int counter __guarded_by(&lock);
514 };
515 
516 static DEFINE_PER_CPU(struct test_local_trylock_data, test_local_trylock_data) = {
517 	.lock = INIT_LOCAL_TRYLOCK(lock),
518 };
519 
520 static void __used test_local_trylock_init(struct test_local_trylock_data *d)
521 {
522 	guard(local_trylock_init)(&d->lock);
523 	d->counter = 0;
524 }
525 
526 static void __used test_local_trylock(void)
527 {
528 	local_lock(&test_local_trylock_data.lock);
529 	this_cpu_add(test_local_trylock_data.counter, 1);
530 	local_unlock(&test_local_trylock_data.lock);
531 
532 	if (local_trylock(&test_local_trylock_data.lock)) {
533 		this_cpu_add(test_local_trylock_data.counter, 1);
534 		local_unlock(&test_local_trylock_data.lock);
535 	}
536 }
537 
538 static DEFINE_WD_CLASS(ww_class);
539 
540 struct test_ww_mutex_data {
541 	struct ww_mutex mtx;
542 	int counter __guarded_by(&mtx);
543 };
544 
545 static void __used test_ww_mutex_lock_noctx(struct test_ww_mutex_data *d)
546 {
547 	if (!ww_mutex_lock(&d->mtx, NULL)) {
548 		d->counter++;
549 		ww_mutex_unlock(&d->mtx);
550 	}
551 
552 	if (!ww_mutex_lock_interruptible(&d->mtx, NULL)) {
553 		d->counter++;
554 		ww_mutex_unlock(&d->mtx);
555 	}
556 
557 	if (ww_mutex_trylock(&d->mtx, NULL)) {
558 		d->counter++;
559 		ww_mutex_unlock(&d->mtx);
560 	}
561 
562 	ww_mutex_lock_slow(&d->mtx, NULL);
563 	d->counter++;
564 	ww_mutex_unlock(&d->mtx);
565 
566 	ww_mutex_destroy(&d->mtx);
567 }
568 
569 static void __used test_ww_mutex_lock_ctx(struct test_ww_mutex_data *d)
570 {
571 	struct ww_acquire_ctx ctx;
572 
573 	ww_acquire_init(&ctx, &ww_class);
574 
575 	if (!ww_mutex_lock(&d->mtx, &ctx)) {
576 		d->counter++;
577 		ww_mutex_unlock(&d->mtx);
578 	}
579 
580 	if (!ww_mutex_lock_interruptible(&d->mtx, &ctx)) {
581 		d->counter++;
582 		ww_mutex_unlock(&d->mtx);
583 	}
584 
585 	if (ww_mutex_trylock(&d->mtx, &ctx)) {
586 		d->counter++;
587 		ww_mutex_unlock(&d->mtx);
588 	}
589 
590 	ww_mutex_lock_slow(&d->mtx, &ctx);
591 	d->counter++;
592 	ww_mutex_unlock(&d->mtx);
593 
594 	ww_acquire_done(&ctx);
595 	ww_acquire_fini(&ctx);
596 
597 	ww_mutex_destroy(&d->mtx);
598 }
599