xref: /linux/lib/test_context-analysis.c (revision 47907461e4f6fcdce8cf91dd164369192deeb7c4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Compile-only tests for common patterns that should not generate false
4  * positive errors when compiled with Clang's context analysis.
5  */
6 
7 #include <linux/bit_spinlock.h>
8 #include <linux/build_bug.h>
9 #include <linux/local_lock.h>
10 #include <linux/mutex.h>
11 #include <linux/percpu.h>
12 #include <linux/rcupdate.h>
13 #include <linux/rwsem.h>
14 #include <linux/seqlock.h>
15 #include <linux/spinlock.h>
16 #include <linux/srcu.h>
17 #include <linux/ww_mutex.h>
18 
19 /*
20  * Test that helper macros work as expected.
21  */
22 static void __used test_common_helpers(void)
23 {
24 	BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
25 	BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
26 	BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
27 	context_unsafe(do { } while (0)); /* works with void statements */
28 }
29 
30 #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op)	\
31 	struct test_##class##_data {								\
32 		type lock;									\
33 		int counter __guarded_by(&lock);						\
34 		int *pointer __pt_guarded_by(&lock);						\
35 	};											\
36 	static void __used test_##class##_init(struct test_##class##_data *d)			\
37 	{											\
38 		type_init(&d->lock);								\
39 		d->counter = 0;									\
40 	}											\
41 	static void __used test_##class(struct test_##class##_data *d)				\
42 	{											\
43 		unsigned long flags;								\
44 		d->pointer++;									\
45 		type_lock(&d->lock);								\
46 		op(d->counter);									\
47 		op(*d->pointer);								\
48 		type_unlock(&d->lock);								\
49 		type_lock##_irq(&d->lock);							\
50 		op(d->counter);									\
51 		op(*d->pointer);								\
52 		type_unlock##_irq(&d->lock);							\
53 		type_lock##_bh(&d->lock);							\
54 		op(d->counter);									\
55 		op(*d->pointer);								\
56 		type_unlock##_bh(&d->lock);							\
57 		type_lock##_irqsave(&d->lock, flags);						\
58 		op(d->counter);									\
59 		op(*d->pointer);								\
60 		type_unlock##_irqrestore(&d->lock, flags);					\
61 	}											\
62 	static void __used test_##class##_trylock(struct test_##class##_data *d)		\
63 	{											\
64 		if (type_trylock(&d->lock)) {							\
65 			op(d->counter);								\
66 			type_unlock(&d->lock);							\
67 		}										\
68 	}											\
69 	static void __used test_##class##_assert(struct test_##class##_data *d)			\
70 	{											\
71 		lockdep_assert_held(&d->lock);							\
72 		op(d->counter);									\
73 	}											\
74 	static void __used test_##class##_guard(struct test_##class##_data *d)			\
75 	{											\
76 		{ guard(class)(&d->lock);		op(d->counter); }			\
77 		{ guard(class##_irq)(&d->lock);		op(d->counter); }			\
78 		{ guard(class##_irqsave)(&d->lock);	op(d->counter); }			\
79 	}
80 
81 #define TEST_OP_RW(x) (x)++
82 #define TEST_OP_RO(x) ((void)(x))
83 
84 TEST_SPINLOCK_COMMON(raw_spinlock,
85 		     raw_spinlock_t,
86 		     raw_spin_lock_init,
87 		     raw_spin_lock,
88 		     raw_spin_unlock,
89 		     raw_spin_trylock,
90 		     TEST_OP_RW);
91 static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
92 {
93 	unsigned long flags;
94 
95 	if (raw_spin_trylock_irq(&d->lock)) {
96 		d->counter++;
97 		raw_spin_unlock_irq(&d->lock);
98 	}
99 	if (raw_spin_trylock_irqsave(&d->lock, flags)) {
100 		d->counter++;
101 		raw_spin_unlock_irqrestore(&d->lock, flags);
102 	}
103 	scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
104 		d->counter++;
105 	}
106 }
107 
108 TEST_SPINLOCK_COMMON(spinlock,
109 		     spinlock_t,
110 		     spin_lock_init,
111 		     spin_lock,
112 		     spin_unlock,
113 		     spin_trylock,
114 		     TEST_OP_RW);
115 static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
116 {
117 	unsigned long flags;
118 
119 	if (spin_trylock_irq(&d->lock)) {
120 		d->counter++;
121 		spin_unlock_irq(&d->lock);
122 	}
123 	if (spin_trylock_irqsave(&d->lock, flags)) {
124 		d->counter++;
125 		spin_unlock_irqrestore(&d->lock, flags);
126 	}
127 	scoped_cond_guard(spinlock_try, return, &d->lock) {
128 		d->counter++;
129 	}
130 }
131 
132 TEST_SPINLOCK_COMMON(write_lock,
133 		     rwlock_t,
134 		     rwlock_init,
135 		     write_lock,
136 		     write_unlock,
137 		     write_trylock,
138 		     TEST_OP_RW);
139 static void __used test_write_trylock_extra(struct test_write_lock_data *d)
140 {
141 	unsigned long flags;
142 
143 	if (write_trylock_irqsave(&d->lock, flags)) {
144 		d->counter++;
145 		write_unlock_irqrestore(&d->lock, flags);
146 	}
147 }
148 
149 TEST_SPINLOCK_COMMON(read_lock,
150 		     rwlock_t,
151 		     rwlock_init,
152 		     read_lock,
153 		     read_unlock,
154 		     read_trylock,
155 		     TEST_OP_RO);
156 
157 struct test_mutex_data {
158 	struct mutex mtx;
159 	int counter __guarded_by(&mtx);
160 };
161 
162 static void __used test_mutex_init(struct test_mutex_data *d)
163 {
164 	mutex_init(&d->mtx);
165 	d->counter = 0;
166 }
167 
168 static void __used test_mutex_lock(struct test_mutex_data *d)
169 {
170 	mutex_lock(&d->mtx);
171 	d->counter++;
172 	mutex_unlock(&d->mtx);
173 	mutex_lock_io(&d->mtx);
174 	d->counter++;
175 	mutex_unlock(&d->mtx);
176 }
177 
178 static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
179 {
180 	if (!mutex_lock_interruptible(&d->mtx)) {
181 		d->counter++;
182 		mutex_unlock(&d->mtx);
183 	}
184 	if (!mutex_lock_killable(&d->mtx)) {
185 		d->counter++;
186 		mutex_unlock(&d->mtx);
187 	}
188 	if (mutex_trylock(&d->mtx)) {
189 		d->counter++;
190 		mutex_unlock(&d->mtx);
191 	}
192 	if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
193 		d->counter++;
194 		mutex_unlock(&d->mtx);
195 	}
196 }
197 
198 static void __used test_mutex_assert(struct test_mutex_data *d)
199 {
200 	lockdep_assert_held(&d->mtx);
201 	d->counter++;
202 }
203 
204 static void __used test_mutex_guard(struct test_mutex_data *d)
205 {
206 	guard(mutex)(&d->mtx);
207 	d->counter++;
208 }
209 
210 static void __used test_mutex_cond_guard(struct test_mutex_data *d)
211 {
212 	scoped_cond_guard(mutex_try, return, &d->mtx) {
213 		d->counter++;
214 	}
215 	scoped_cond_guard(mutex_intr, return, &d->mtx) {
216 		d->counter++;
217 	}
218 }
219 
220 struct test_seqlock_data {
221 	seqlock_t sl;
222 	int counter __guarded_by(&sl);
223 };
224 
225 static void __used test_seqlock_init(struct test_seqlock_data *d)
226 {
227 	seqlock_init(&d->sl);
228 	d->counter = 0;
229 }
230 
231 static void __used test_seqlock_reader(struct test_seqlock_data *d)
232 {
233 	unsigned int seq;
234 
235 	do {
236 		seq = read_seqbegin(&d->sl);
237 		(void)d->counter;
238 	} while (read_seqretry(&d->sl, seq));
239 }
240 
241 static void __used test_seqlock_writer(struct test_seqlock_data *d)
242 {
243 	unsigned long flags;
244 
245 	write_seqlock(&d->sl);
246 	d->counter++;
247 	write_sequnlock(&d->sl);
248 
249 	write_seqlock_irq(&d->sl);
250 	d->counter++;
251 	write_sequnlock_irq(&d->sl);
252 
253 	write_seqlock_bh(&d->sl);
254 	d->counter++;
255 	write_sequnlock_bh(&d->sl);
256 
257 	write_seqlock_irqsave(&d->sl, flags);
258 	d->counter++;
259 	write_sequnlock_irqrestore(&d->sl, flags);
260 }
261 
262 static void __used test_seqlock_scoped(struct test_seqlock_data *d)
263 {
264 	scoped_seqlock_read (&d->sl, ss_lockless) {
265 		(void)d->counter;
266 	}
267 }
268 
269 struct test_rwsem_data {
270 	struct rw_semaphore sem;
271 	int counter __guarded_by(&sem);
272 };
273 
274 static void __used test_rwsem_init(struct test_rwsem_data *d)
275 {
276 	init_rwsem(&d->sem);
277 	d->counter = 0;
278 }
279 
280 static void __used test_rwsem_reader(struct test_rwsem_data *d)
281 {
282 	down_read(&d->sem);
283 	(void)d->counter;
284 	up_read(&d->sem);
285 
286 	if (down_read_trylock(&d->sem)) {
287 		(void)d->counter;
288 		up_read(&d->sem);
289 	}
290 }
291 
292 static void __used test_rwsem_writer(struct test_rwsem_data *d)
293 {
294 	down_write(&d->sem);
295 	d->counter++;
296 	up_write(&d->sem);
297 
298 	down_write(&d->sem);
299 	d->counter++;
300 	downgrade_write(&d->sem);
301 	(void)d->counter;
302 	up_read(&d->sem);
303 
304 	if (down_write_trylock(&d->sem)) {
305 		d->counter++;
306 		up_write(&d->sem);
307 	}
308 }
309 
310 static void __used test_rwsem_assert(struct test_rwsem_data *d)
311 {
312 	rwsem_assert_held_nolockdep(&d->sem);
313 	d->counter++;
314 }
315 
316 static void __used test_rwsem_guard(struct test_rwsem_data *d)
317 {
318 	{ guard(rwsem_read)(&d->sem); (void)d->counter; }
319 	{ guard(rwsem_write)(&d->sem); d->counter++; }
320 }
321 
322 static void __used test_rwsem_cond_guard(struct test_rwsem_data *d)
323 {
324 	scoped_cond_guard(rwsem_read_try, return, &d->sem) {
325 		(void)d->counter;
326 	}
327 	scoped_cond_guard(rwsem_write_try, return, &d->sem) {
328 		d->counter++;
329 	}
330 }
331 
332 struct test_bit_spinlock_data {
333 	unsigned long bits;
334 	int counter __guarded_by(__bitlock(3, &bits));
335 };
336 
337 static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
338 {
339 	/*
340 	 * Note, the analysis seems to have false negatives, because it won't
341 	 * precisely recognize the bit of the fake __bitlock() token.
342 	 */
343 	bit_spin_lock(3, &d->bits);
344 	d->counter++;
345 	bit_spin_unlock(3, &d->bits);
346 
347 	bit_spin_lock(3, &d->bits);
348 	d->counter++;
349 	__bit_spin_unlock(3, &d->bits);
350 
351 	if (bit_spin_trylock(3, &d->bits)) {
352 		d->counter++;
353 		bit_spin_unlock(3, &d->bits);
354 	}
355 }
356 
357 /*
358  * Test that we can mark a variable guarded by RCU, and we can dereference and
359  * write to the pointer with RCU's primitives.
360  */
361 struct test_rcu_data {
362 	long __rcu_guarded *data;
363 };
364 
365 static void __used test_rcu_guarded_reader(struct test_rcu_data *d)
366 {
367 	rcu_read_lock();
368 	(void)rcu_dereference(d->data);
369 	rcu_read_unlock();
370 
371 	rcu_read_lock_bh();
372 	(void)rcu_dereference(d->data);
373 	rcu_read_unlock_bh();
374 
375 	rcu_read_lock_sched();
376 	(void)rcu_dereference(d->data);
377 	rcu_read_unlock_sched();
378 }
379 
380 static void __used test_rcu_guard(struct test_rcu_data *d)
381 {
382 	guard(rcu)();
383 	(void)rcu_dereference(d->data);
384 }
385 
386 static void __used test_rcu_guarded_updater(struct test_rcu_data *d)
387 {
388 	rcu_assign_pointer(d->data, NULL);
389 	RCU_INIT_POINTER(d->data, NULL);
390 	(void)unrcu_pointer(d->data);
391 }
392 
393 static void wants_rcu_held(void)	__must_hold_shared(RCU)       { }
394 static void wants_rcu_held_bh(void)	__must_hold_shared(RCU_BH)    { }
395 static void wants_rcu_held_sched(void)	__must_hold_shared(RCU_SCHED) { }
396 
397 static void __used test_rcu_lock_variants(void)
398 {
399 	rcu_read_lock();
400 	wants_rcu_held();
401 	rcu_read_unlock();
402 
403 	rcu_read_lock_bh();
404 	wants_rcu_held_bh();
405 	rcu_read_unlock_bh();
406 
407 	rcu_read_lock_sched();
408 	wants_rcu_held_sched();
409 	rcu_read_unlock_sched();
410 }
411 
412 static void __used test_rcu_lock_reentrant(void)
413 {
414 	rcu_read_lock();
415 	rcu_read_lock();
416 	rcu_read_lock_bh();
417 	rcu_read_lock_bh();
418 	rcu_read_lock_sched();
419 	rcu_read_lock_sched();
420 
421 	rcu_read_unlock_sched();
422 	rcu_read_unlock_sched();
423 	rcu_read_unlock_bh();
424 	rcu_read_unlock_bh();
425 	rcu_read_unlock();
426 	rcu_read_unlock();
427 }
428 
429 static void __used test_rcu_assert_variants(void)
430 {
431 	lockdep_assert_in_rcu_read_lock();
432 	wants_rcu_held();
433 
434 	lockdep_assert_in_rcu_read_lock_bh();
435 	wants_rcu_held_bh();
436 
437 	lockdep_assert_in_rcu_read_lock_sched();
438 	wants_rcu_held_sched();
439 }
440 
441 struct test_srcu_data {
442 	struct srcu_struct srcu;
443 	long __rcu_guarded *data;
444 };
445 
446 static void __used test_srcu(struct test_srcu_data *d)
447 {
448 	init_srcu_struct(&d->srcu);
449 
450 	int idx = srcu_read_lock(&d->srcu);
451 	long *data = srcu_dereference(d->data, &d->srcu);
452 	(void)data;
453 	srcu_read_unlock(&d->srcu, idx);
454 
455 	rcu_assign_pointer(d->data, NULL);
456 }
457 
458 static void __used test_srcu_guard(struct test_srcu_data *d)
459 {
460 	{ guard(srcu)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
461 	{ guard(srcu_fast)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
462 	{ guard(srcu_fast_notrace)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
463 }
464 
465 struct test_local_lock_data {
466 	local_lock_t lock;
467 	int counter __guarded_by(&lock);
468 };
469 
470 static DEFINE_PER_CPU(struct test_local_lock_data, test_local_lock_data) = {
471 	.lock = INIT_LOCAL_LOCK(lock),
472 };
473 
474 static void __used test_local_lock_init(struct test_local_lock_data *d)
475 {
476 	local_lock_init(&d->lock);
477 	d->counter = 0;
478 }
479 
480 static void __used test_local_lock(void)
481 {
482 	unsigned long flags;
483 
484 	local_lock(&test_local_lock_data.lock);
485 	this_cpu_add(test_local_lock_data.counter, 1);
486 	local_unlock(&test_local_lock_data.lock);
487 
488 	local_lock_irq(&test_local_lock_data.lock);
489 	this_cpu_add(test_local_lock_data.counter, 1);
490 	local_unlock_irq(&test_local_lock_data.lock);
491 
492 	local_lock_irqsave(&test_local_lock_data.lock, flags);
493 	this_cpu_add(test_local_lock_data.counter, 1);
494 	local_unlock_irqrestore(&test_local_lock_data.lock, flags);
495 
496 	local_lock_nested_bh(&test_local_lock_data.lock);
497 	this_cpu_add(test_local_lock_data.counter, 1);
498 	local_unlock_nested_bh(&test_local_lock_data.lock);
499 }
500 
501 static void __used test_local_lock_guard(void)
502 {
503 	{ guard(local_lock)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
504 	{ guard(local_lock_irq)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
505 	{ guard(local_lock_irqsave)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
506 	{ guard(local_lock_nested_bh)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); }
507 }
508 
509 struct test_local_trylock_data {
510 	local_trylock_t lock;
511 	int counter __guarded_by(&lock);
512 };
513 
514 static DEFINE_PER_CPU(struct test_local_trylock_data, test_local_trylock_data) = {
515 	.lock = INIT_LOCAL_TRYLOCK(lock),
516 };
517 
518 static void __used test_local_trylock_init(struct test_local_trylock_data *d)
519 {
520 	local_trylock_init(&d->lock);
521 	d->counter = 0;
522 }
523 
524 static void __used test_local_trylock(void)
525 {
526 	local_lock(&test_local_trylock_data.lock);
527 	this_cpu_add(test_local_trylock_data.counter, 1);
528 	local_unlock(&test_local_trylock_data.lock);
529 
530 	if (local_trylock(&test_local_trylock_data.lock)) {
531 		this_cpu_add(test_local_trylock_data.counter, 1);
532 		local_unlock(&test_local_trylock_data.lock);
533 	}
534 }
535 
536 static DEFINE_WD_CLASS(ww_class);
537 
538 struct test_ww_mutex_data {
539 	struct ww_mutex mtx;
540 	int counter __guarded_by(&mtx);
541 };
542 
543 static void __used test_ww_mutex_init(struct test_ww_mutex_data *d)
544 {
545 	ww_mutex_init(&d->mtx, &ww_class);
546 	d->counter = 0;
547 }
548 
549 static void __used test_ww_mutex_lock_noctx(struct test_ww_mutex_data *d)
550 {
551 	if (!ww_mutex_lock(&d->mtx, NULL)) {
552 		d->counter++;
553 		ww_mutex_unlock(&d->mtx);
554 	}
555 
556 	if (!ww_mutex_lock_interruptible(&d->mtx, NULL)) {
557 		d->counter++;
558 		ww_mutex_unlock(&d->mtx);
559 	}
560 
561 	if (ww_mutex_trylock(&d->mtx, NULL)) {
562 		d->counter++;
563 		ww_mutex_unlock(&d->mtx);
564 	}
565 
566 	ww_mutex_lock_slow(&d->mtx, NULL);
567 	d->counter++;
568 	ww_mutex_unlock(&d->mtx);
569 
570 	ww_mutex_destroy(&d->mtx);
571 }
572 
573 static void __used test_ww_mutex_lock_ctx(struct test_ww_mutex_data *d)
574 {
575 	struct ww_acquire_ctx ctx;
576 
577 	ww_acquire_init(&ctx, &ww_class);
578 
579 	if (!ww_mutex_lock(&d->mtx, &ctx)) {
580 		d->counter++;
581 		ww_mutex_unlock(&d->mtx);
582 	}
583 
584 	if (!ww_mutex_lock_interruptible(&d->mtx, &ctx)) {
585 		d->counter++;
586 		ww_mutex_unlock(&d->mtx);
587 	}
588 
589 	if (ww_mutex_trylock(&d->mtx, &ctx)) {
590 		d->counter++;
591 		ww_mutex_unlock(&d->mtx);
592 	}
593 
594 	ww_mutex_lock_slow(&d->mtx, &ctx);
595 	d->counter++;
596 	ww_mutex_unlock(&d->mtx);
597 
598 	ww_acquire_done(&ctx);
599 	ww_acquire_fini(&ctx);
600 
601 	ww_mutex_destroy(&d->mtx);
602 }
603