xref: /linux/lib/test_context-analysis.c (revision 370f0a345a70fe36d0185abf87c7ee8e70572e06)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Compile-only tests for common patterns that should not generate false
4  * positive errors when compiled with Clang's context analysis.
5  */
6 
7 #include <linux/build_bug.h>
8 #include <linux/mutex.h>
9 #include <linux/spinlock.h>
10 
11 /*
12  * Test that helper macros work as expected.
13  */
14 static void __used test_common_helpers(void)
15 {
16 	BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
17 	BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
18 	BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
19 	context_unsafe(do { } while (0)); /* works with void statements */
20 }
21 
22 #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op)	\
23 	struct test_##class##_data {								\
24 		type lock;									\
25 		int counter __guarded_by(&lock);						\
26 		int *pointer __pt_guarded_by(&lock);						\
27 	};											\
28 	static void __used test_##class##_init(struct test_##class##_data *d)			\
29 	{											\
30 		type_init(&d->lock);								\
31 		d->counter = 0;									\
32 	}											\
33 	static void __used test_##class(struct test_##class##_data *d)				\
34 	{											\
35 		unsigned long flags;								\
36 		d->pointer++;									\
37 		type_lock(&d->lock);								\
38 		op(d->counter);									\
39 		op(*d->pointer);								\
40 		type_unlock(&d->lock);								\
41 		type_lock##_irq(&d->lock);							\
42 		op(d->counter);									\
43 		op(*d->pointer);								\
44 		type_unlock##_irq(&d->lock);							\
45 		type_lock##_bh(&d->lock);							\
46 		op(d->counter);									\
47 		op(*d->pointer);								\
48 		type_unlock##_bh(&d->lock);							\
49 		type_lock##_irqsave(&d->lock, flags);						\
50 		op(d->counter);									\
51 		op(*d->pointer);								\
52 		type_unlock##_irqrestore(&d->lock, flags);					\
53 	}											\
54 	static void __used test_##class##_trylock(struct test_##class##_data *d)		\
55 	{											\
56 		if (type_trylock(&d->lock)) {							\
57 			op(d->counter);								\
58 			type_unlock(&d->lock);							\
59 		}										\
60 	}											\
61 	static void __used test_##class##_assert(struct test_##class##_data *d)			\
62 	{											\
63 		lockdep_assert_held(&d->lock);							\
64 		op(d->counter);									\
65 	}											\
66 	static void __used test_##class##_guard(struct test_##class##_data *d)			\
67 	{											\
68 		{ guard(class)(&d->lock);		op(d->counter); }			\
69 		{ guard(class##_irq)(&d->lock);		op(d->counter); }			\
70 		{ guard(class##_irqsave)(&d->lock);	op(d->counter); }			\
71 	}
72 
73 #define TEST_OP_RW(x) (x)++
74 #define TEST_OP_RO(x) ((void)(x))
75 
76 TEST_SPINLOCK_COMMON(raw_spinlock,
77 		     raw_spinlock_t,
78 		     raw_spin_lock_init,
79 		     raw_spin_lock,
80 		     raw_spin_unlock,
81 		     raw_spin_trylock,
82 		     TEST_OP_RW);
83 static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
84 {
85 	unsigned long flags;
86 
87 	if (raw_spin_trylock_irq(&d->lock)) {
88 		d->counter++;
89 		raw_spin_unlock_irq(&d->lock);
90 	}
91 	if (raw_spin_trylock_irqsave(&d->lock, flags)) {
92 		d->counter++;
93 		raw_spin_unlock_irqrestore(&d->lock, flags);
94 	}
95 	scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
96 		d->counter++;
97 	}
98 }
99 
100 TEST_SPINLOCK_COMMON(spinlock,
101 		     spinlock_t,
102 		     spin_lock_init,
103 		     spin_lock,
104 		     spin_unlock,
105 		     spin_trylock,
106 		     TEST_OP_RW);
107 static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
108 {
109 	unsigned long flags;
110 
111 	if (spin_trylock_irq(&d->lock)) {
112 		d->counter++;
113 		spin_unlock_irq(&d->lock);
114 	}
115 	if (spin_trylock_irqsave(&d->lock, flags)) {
116 		d->counter++;
117 		spin_unlock_irqrestore(&d->lock, flags);
118 	}
119 	scoped_cond_guard(spinlock_try, return, &d->lock) {
120 		d->counter++;
121 	}
122 }
123 
124 TEST_SPINLOCK_COMMON(write_lock,
125 		     rwlock_t,
126 		     rwlock_init,
127 		     write_lock,
128 		     write_unlock,
129 		     write_trylock,
130 		     TEST_OP_RW);
131 static void __used test_write_trylock_extra(struct test_write_lock_data *d)
132 {
133 	unsigned long flags;
134 
135 	if (write_trylock_irqsave(&d->lock, flags)) {
136 		d->counter++;
137 		write_unlock_irqrestore(&d->lock, flags);
138 	}
139 }
140 
141 TEST_SPINLOCK_COMMON(read_lock,
142 		     rwlock_t,
143 		     rwlock_init,
144 		     read_lock,
145 		     read_unlock,
146 		     read_trylock,
147 		     TEST_OP_RO);
148 
149 struct test_mutex_data {
150 	struct mutex mtx;
151 	int counter __guarded_by(&mtx);
152 };
153 
154 static void __used test_mutex_init(struct test_mutex_data *d)
155 {
156 	mutex_init(&d->mtx);
157 	d->counter = 0;
158 }
159 
160 static void __used test_mutex_lock(struct test_mutex_data *d)
161 {
162 	mutex_lock(&d->mtx);
163 	d->counter++;
164 	mutex_unlock(&d->mtx);
165 	mutex_lock_io(&d->mtx);
166 	d->counter++;
167 	mutex_unlock(&d->mtx);
168 }
169 
170 static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
171 {
172 	if (!mutex_lock_interruptible(&d->mtx)) {
173 		d->counter++;
174 		mutex_unlock(&d->mtx);
175 	}
176 	if (!mutex_lock_killable(&d->mtx)) {
177 		d->counter++;
178 		mutex_unlock(&d->mtx);
179 	}
180 	if (mutex_trylock(&d->mtx)) {
181 		d->counter++;
182 		mutex_unlock(&d->mtx);
183 	}
184 	if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
185 		d->counter++;
186 		mutex_unlock(&d->mtx);
187 	}
188 }
189 
190 static void __used test_mutex_assert(struct test_mutex_data *d)
191 {
192 	lockdep_assert_held(&d->mtx);
193 	d->counter++;
194 }
195 
196 static void __used test_mutex_guard(struct test_mutex_data *d)
197 {
198 	guard(mutex)(&d->mtx);
199 	d->counter++;
200 }
201 
202 static void __used test_mutex_cond_guard(struct test_mutex_data *d)
203 {
204 	scoped_cond_guard(mutex_try, return, &d->mtx) {
205 		d->counter++;
206 	}
207 	scoped_cond_guard(mutex_intr, return, &d->mtx) {
208 		d->counter++;
209 	}
210 }
211