xref: /linux/lib/test_context-analysis.c (revision fe00f6e84621ad441aa99005f2f0fefd0e5e1a2c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Compile-only tests for common patterns that should not generate false
4  * positive errors when compiled with Clang's context analysis.
5  */
6 
7 #include <linux/bit_spinlock.h>
8 #include <linux/build_bug.h>
9 #include <linux/mutex.h>
10 #include <linux/rcupdate.h>
11 #include <linux/seqlock.h>
12 #include <linux/spinlock.h>
13 
14 /*
15  * Test that helper macros work as expected.
16  */
17 static void __used test_common_helpers(void)
18 {
19 	BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
20 	BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
21 	BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
22 	context_unsafe(do { } while (0)); /* works with void statements */
23 }
24 
25 #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op)	\
26 	struct test_##class##_data {								\
27 		type lock;									\
28 		int counter __guarded_by(&lock);						\
29 		int *pointer __pt_guarded_by(&lock);						\
30 	};											\
31 	static void __used test_##class##_init(struct test_##class##_data *d)			\
32 	{											\
33 		type_init(&d->lock);								\
34 		d->counter = 0;									\
35 	}											\
36 	static void __used test_##class(struct test_##class##_data *d)				\
37 	{											\
38 		unsigned long flags;								\
39 		d->pointer++;									\
40 		type_lock(&d->lock);								\
41 		op(d->counter);									\
42 		op(*d->pointer);								\
43 		type_unlock(&d->lock);								\
44 		type_lock##_irq(&d->lock);							\
45 		op(d->counter);									\
46 		op(*d->pointer);								\
47 		type_unlock##_irq(&d->lock);							\
48 		type_lock##_bh(&d->lock);							\
49 		op(d->counter);									\
50 		op(*d->pointer);								\
51 		type_unlock##_bh(&d->lock);							\
52 		type_lock##_irqsave(&d->lock, flags);						\
53 		op(d->counter);									\
54 		op(*d->pointer);								\
55 		type_unlock##_irqrestore(&d->lock, flags);					\
56 	}											\
57 	static void __used test_##class##_trylock(struct test_##class##_data *d)		\
58 	{											\
59 		if (type_trylock(&d->lock)) {							\
60 			op(d->counter);								\
61 			type_unlock(&d->lock);							\
62 		}										\
63 	}											\
64 	static void __used test_##class##_assert(struct test_##class##_data *d)			\
65 	{											\
66 		lockdep_assert_held(&d->lock);							\
67 		op(d->counter);									\
68 	}											\
69 	static void __used test_##class##_guard(struct test_##class##_data *d)			\
70 	{											\
71 		{ guard(class)(&d->lock);		op(d->counter); }			\
72 		{ guard(class##_irq)(&d->lock);		op(d->counter); }			\
73 		{ guard(class##_irqsave)(&d->lock);	op(d->counter); }			\
74 	}
75 
76 #define TEST_OP_RW(x) (x)++
77 #define TEST_OP_RO(x) ((void)(x))
78 
79 TEST_SPINLOCK_COMMON(raw_spinlock,
80 		     raw_spinlock_t,
81 		     raw_spin_lock_init,
82 		     raw_spin_lock,
83 		     raw_spin_unlock,
84 		     raw_spin_trylock,
85 		     TEST_OP_RW);
86 static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
87 {
88 	unsigned long flags;
89 
90 	if (raw_spin_trylock_irq(&d->lock)) {
91 		d->counter++;
92 		raw_spin_unlock_irq(&d->lock);
93 	}
94 	if (raw_spin_trylock_irqsave(&d->lock, flags)) {
95 		d->counter++;
96 		raw_spin_unlock_irqrestore(&d->lock, flags);
97 	}
98 	scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
99 		d->counter++;
100 	}
101 }
102 
103 TEST_SPINLOCK_COMMON(spinlock,
104 		     spinlock_t,
105 		     spin_lock_init,
106 		     spin_lock,
107 		     spin_unlock,
108 		     spin_trylock,
109 		     TEST_OP_RW);
110 static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
111 {
112 	unsigned long flags;
113 
114 	if (spin_trylock_irq(&d->lock)) {
115 		d->counter++;
116 		spin_unlock_irq(&d->lock);
117 	}
118 	if (spin_trylock_irqsave(&d->lock, flags)) {
119 		d->counter++;
120 		spin_unlock_irqrestore(&d->lock, flags);
121 	}
122 	scoped_cond_guard(spinlock_try, return, &d->lock) {
123 		d->counter++;
124 	}
125 }
126 
127 TEST_SPINLOCK_COMMON(write_lock,
128 		     rwlock_t,
129 		     rwlock_init,
130 		     write_lock,
131 		     write_unlock,
132 		     write_trylock,
133 		     TEST_OP_RW);
134 static void __used test_write_trylock_extra(struct test_write_lock_data *d)
135 {
136 	unsigned long flags;
137 
138 	if (write_trylock_irqsave(&d->lock, flags)) {
139 		d->counter++;
140 		write_unlock_irqrestore(&d->lock, flags);
141 	}
142 }
143 
144 TEST_SPINLOCK_COMMON(read_lock,
145 		     rwlock_t,
146 		     rwlock_init,
147 		     read_lock,
148 		     read_unlock,
149 		     read_trylock,
150 		     TEST_OP_RO);
151 
152 struct test_mutex_data {
153 	struct mutex mtx;
154 	int counter __guarded_by(&mtx);
155 };
156 
157 static void __used test_mutex_init(struct test_mutex_data *d)
158 {
159 	mutex_init(&d->mtx);
160 	d->counter = 0;
161 }
162 
163 static void __used test_mutex_lock(struct test_mutex_data *d)
164 {
165 	mutex_lock(&d->mtx);
166 	d->counter++;
167 	mutex_unlock(&d->mtx);
168 	mutex_lock_io(&d->mtx);
169 	d->counter++;
170 	mutex_unlock(&d->mtx);
171 }
172 
173 static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
174 {
175 	if (!mutex_lock_interruptible(&d->mtx)) {
176 		d->counter++;
177 		mutex_unlock(&d->mtx);
178 	}
179 	if (!mutex_lock_killable(&d->mtx)) {
180 		d->counter++;
181 		mutex_unlock(&d->mtx);
182 	}
183 	if (mutex_trylock(&d->mtx)) {
184 		d->counter++;
185 		mutex_unlock(&d->mtx);
186 	}
187 	if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
188 		d->counter++;
189 		mutex_unlock(&d->mtx);
190 	}
191 }
192 
193 static void __used test_mutex_assert(struct test_mutex_data *d)
194 {
195 	lockdep_assert_held(&d->mtx);
196 	d->counter++;
197 }
198 
199 static void __used test_mutex_guard(struct test_mutex_data *d)
200 {
201 	guard(mutex)(&d->mtx);
202 	d->counter++;
203 }
204 
205 static void __used test_mutex_cond_guard(struct test_mutex_data *d)
206 {
207 	scoped_cond_guard(mutex_try, return, &d->mtx) {
208 		d->counter++;
209 	}
210 	scoped_cond_guard(mutex_intr, return, &d->mtx) {
211 		d->counter++;
212 	}
213 }
214 
215 struct test_seqlock_data {
216 	seqlock_t sl;
217 	int counter __guarded_by(&sl);
218 };
219 
220 static void __used test_seqlock_init(struct test_seqlock_data *d)
221 {
222 	seqlock_init(&d->sl);
223 	d->counter = 0;
224 }
225 
226 static void __used test_seqlock_reader(struct test_seqlock_data *d)
227 {
228 	unsigned int seq;
229 
230 	do {
231 		seq = read_seqbegin(&d->sl);
232 		(void)d->counter;
233 	} while (read_seqretry(&d->sl, seq));
234 }
235 
236 static void __used test_seqlock_writer(struct test_seqlock_data *d)
237 {
238 	unsigned long flags;
239 
240 	write_seqlock(&d->sl);
241 	d->counter++;
242 	write_sequnlock(&d->sl);
243 
244 	write_seqlock_irq(&d->sl);
245 	d->counter++;
246 	write_sequnlock_irq(&d->sl);
247 
248 	write_seqlock_bh(&d->sl);
249 	d->counter++;
250 	write_sequnlock_bh(&d->sl);
251 
252 	write_seqlock_irqsave(&d->sl, flags);
253 	d->counter++;
254 	write_sequnlock_irqrestore(&d->sl, flags);
255 }
256 
257 static void __used test_seqlock_scoped(struct test_seqlock_data *d)
258 {
259 	scoped_seqlock_read (&d->sl, ss_lockless) {
260 		(void)d->counter;
261 	}
262 }
263 
264 struct test_bit_spinlock_data {
265 	unsigned long bits;
266 	int counter __guarded_by(__bitlock(3, &bits));
267 };
268 
269 static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
270 {
271 	/*
272 	 * Note, the analysis seems to have false negatives, because it won't
273 	 * precisely recognize the bit of the fake __bitlock() token.
274 	 */
275 	bit_spin_lock(3, &d->bits);
276 	d->counter++;
277 	bit_spin_unlock(3, &d->bits);
278 
279 	bit_spin_lock(3, &d->bits);
280 	d->counter++;
281 	__bit_spin_unlock(3, &d->bits);
282 
283 	if (bit_spin_trylock(3, &d->bits)) {
284 		d->counter++;
285 		bit_spin_unlock(3, &d->bits);
286 	}
287 }
288 
289 /*
290  * Test that we can mark a variable guarded by RCU, and we can dereference and
291  * write to the pointer with RCU's primitives.
292  */
293 struct test_rcu_data {
294 	long __rcu_guarded *data;
295 };
296 
297 static void __used test_rcu_guarded_reader(struct test_rcu_data *d)
298 {
299 	rcu_read_lock();
300 	(void)rcu_dereference(d->data);
301 	rcu_read_unlock();
302 
303 	rcu_read_lock_bh();
304 	(void)rcu_dereference(d->data);
305 	rcu_read_unlock_bh();
306 
307 	rcu_read_lock_sched();
308 	(void)rcu_dereference(d->data);
309 	rcu_read_unlock_sched();
310 }
311 
312 static void __used test_rcu_guard(struct test_rcu_data *d)
313 {
314 	guard(rcu)();
315 	(void)rcu_dereference(d->data);
316 }
317 
318 static void __used test_rcu_guarded_updater(struct test_rcu_data *d)
319 {
320 	rcu_assign_pointer(d->data, NULL);
321 	RCU_INIT_POINTER(d->data, NULL);
322 	(void)unrcu_pointer(d->data);
323 }
324 
325 static void wants_rcu_held(void)	__must_hold_shared(RCU)       { }
326 static void wants_rcu_held_bh(void)	__must_hold_shared(RCU_BH)    { }
327 static void wants_rcu_held_sched(void)	__must_hold_shared(RCU_SCHED) { }
328 
329 static void __used test_rcu_lock_variants(void)
330 {
331 	rcu_read_lock();
332 	wants_rcu_held();
333 	rcu_read_unlock();
334 
335 	rcu_read_lock_bh();
336 	wants_rcu_held_bh();
337 	rcu_read_unlock_bh();
338 
339 	rcu_read_lock_sched();
340 	wants_rcu_held_sched();
341 	rcu_read_unlock_sched();
342 }
343 
344 static void __used test_rcu_lock_reentrant(void)
345 {
346 	rcu_read_lock();
347 	rcu_read_lock();
348 	rcu_read_lock_bh();
349 	rcu_read_lock_bh();
350 	rcu_read_lock_sched();
351 	rcu_read_lock_sched();
352 
353 	rcu_read_unlock_sched();
354 	rcu_read_unlock_sched();
355 	rcu_read_unlock_bh();
356 	rcu_read_unlock_bh();
357 	rcu_read_unlock();
358 	rcu_read_unlock();
359 }
360 
361 static void __used test_rcu_assert_variants(void)
362 {
363 	lockdep_assert_in_rcu_read_lock();
364 	wants_rcu_held();
365 
366 	lockdep_assert_in_rcu_read_lock_bh();
367 	wants_rcu_held_bh();
368 
369 	lockdep_assert_in_rcu_read_lock_sched();
370 	wants_rcu_held_sched();
371 }
372