xref: /linux/lib/test_context-analysis.c (revision 8f8a55f49cda5fee914bbea1ab5af8df3a6ba8af)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Compile-only tests for common patterns that should not generate false
4  * positive errors when compiled with Clang's context analysis.
5  */
6 
7 #include <linux/build_bug.h>
8 #include <linux/mutex.h>
9 #include <linux/seqlock.h>
10 #include <linux/spinlock.h>
11 
12 /*
13  * Test that helper macros work as expected.
14  */
15 static void __used test_common_helpers(void)
16 {
17 	BUILD_BUG_ON(context_unsafe(3) != 3); /* plain expression */
18 	BUILD_BUG_ON(context_unsafe((void)2; 3) != 3); /* does not swallow semi-colon */
19 	BUILD_BUG_ON(context_unsafe((void)2, 3) != 3); /* does not swallow commas */
20 	context_unsafe(do { } while (0)); /* works with void statements */
21 }
22 
23 #define TEST_SPINLOCK_COMMON(class, type, type_init, type_lock, type_unlock, type_trylock, op)	\
24 	struct test_##class##_data {								\
25 		type lock;									\
26 		int counter __guarded_by(&lock);						\
27 		int *pointer __pt_guarded_by(&lock);						\
28 	};											\
29 	static void __used test_##class##_init(struct test_##class##_data *d)			\
30 	{											\
31 		type_init(&d->lock);								\
32 		d->counter = 0;									\
33 	}											\
34 	static void __used test_##class(struct test_##class##_data *d)				\
35 	{											\
36 		unsigned long flags;								\
37 		d->pointer++;									\
38 		type_lock(&d->lock);								\
39 		op(d->counter);									\
40 		op(*d->pointer);								\
41 		type_unlock(&d->lock);								\
42 		type_lock##_irq(&d->lock);							\
43 		op(d->counter);									\
44 		op(*d->pointer);								\
45 		type_unlock##_irq(&d->lock);							\
46 		type_lock##_bh(&d->lock);							\
47 		op(d->counter);									\
48 		op(*d->pointer);								\
49 		type_unlock##_bh(&d->lock);							\
50 		type_lock##_irqsave(&d->lock, flags);						\
51 		op(d->counter);									\
52 		op(*d->pointer);								\
53 		type_unlock##_irqrestore(&d->lock, flags);					\
54 	}											\
55 	static void __used test_##class##_trylock(struct test_##class##_data *d)		\
56 	{											\
57 		if (type_trylock(&d->lock)) {							\
58 			op(d->counter);								\
59 			type_unlock(&d->lock);							\
60 		}										\
61 	}											\
62 	static void __used test_##class##_assert(struct test_##class##_data *d)			\
63 	{											\
64 		lockdep_assert_held(&d->lock);							\
65 		op(d->counter);									\
66 	}											\
67 	static void __used test_##class##_guard(struct test_##class##_data *d)			\
68 	{											\
69 		{ guard(class)(&d->lock);		op(d->counter); }			\
70 		{ guard(class##_irq)(&d->lock);		op(d->counter); }			\
71 		{ guard(class##_irqsave)(&d->lock);	op(d->counter); }			\
72 	}
73 
74 #define TEST_OP_RW(x) (x)++
75 #define TEST_OP_RO(x) ((void)(x))
76 
77 TEST_SPINLOCK_COMMON(raw_spinlock,
78 		     raw_spinlock_t,
79 		     raw_spin_lock_init,
80 		     raw_spin_lock,
81 		     raw_spin_unlock,
82 		     raw_spin_trylock,
83 		     TEST_OP_RW);
84 static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data *d)
85 {
86 	unsigned long flags;
87 
88 	if (raw_spin_trylock_irq(&d->lock)) {
89 		d->counter++;
90 		raw_spin_unlock_irq(&d->lock);
91 	}
92 	if (raw_spin_trylock_irqsave(&d->lock, flags)) {
93 		d->counter++;
94 		raw_spin_unlock_irqrestore(&d->lock, flags);
95 	}
96 	scoped_cond_guard(raw_spinlock_try, return, &d->lock) {
97 		d->counter++;
98 	}
99 }
100 
101 TEST_SPINLOCK_COMMON(spinlock,
102 		     spinlock_t,
103 		     spin_lock_init,
104 		     spin_lock,
105 		     spin_unlock,
106 		     spin_trylock,
107 		     TEST_OP_RW);
108 static void __used test_spinlock_trylock_extra(struct test_spinlock_data *d)
109 {
110 	unsigned long flags;
111 
112 	if (spin_trylock_irq(&d->lock)) {
113 		d->counter++;
114 		spin_unlock_irq(&d->lock);
115 	}
116 	if (spin_trylock_irqsave(&d->lock, flags)) {
117 		d->counter++;
118 		spin_unlock_irqrestore(&d->lock, flags);
119 	}
120 	scoped_cond_guard(spinlock_try, return, &d->lock) {
121 		d->counter++;
122 	}
123 }
124 
125 TEST_SPINLOCK_COMMON(write_lock,
126 		     rwlock_t,
127 		     rwlock_init,
128 		     write_lock,
129 		     write_unlock,
130 		     write_trylock,
131 		     TEST_OP_RW);
132 static void __used test_write_trylock_extra(struct test_write_lock_data *d)
133 {
134 	unsigned long flags;
135 
136 	if (write_trylock_irqsave(&d->lock, flags)) {
137 		d->counter++;
138 		write_unlock_irqrestore(&d->lock, flags);
139 	}
140 }
141 
142 TEST_SPINLOCK_COMMON(read_lock,
143 		     rwlock_t,
144 		     rwlock_init,
145 		     read_lock,
146 		     read_unlock,
147 		     read_trylock,
148 		     TEST_OP_RO);
149 
150 struct test_mutex_data {
151 	struct mutex mtx;
152 	int counter __guarded_by(&mtx);
153 };
154 
155 static void __used test_mutex_init(struct test_mutex_data *d)
156 {
157 	mutex_init(&d->mtx);
158 	d->counter = 0;
159 }
160 
161 static void __used test_mutex_lock(struct test_mutex_data *d)
162 {
163 	mutex_lock(&d->mtx);
164 	d->counter++;
165 	mutex_unlock(&d->mtx);
166 	mutex_lock_io(&d->mtx);
167 	d->counter++;
168 	mutex_unlock(&d->mtx);
169 }
170 
171 static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
172 {
173 	if (!mutex_lock_interruptible(&d->mtx)) {
174 		d->counter++;
175 		mutex_unlock(&d->mtx);
176 	}
177 	if (!mutex_lock_killable(&d->mtx)) {
178 		d->counter++;
179 		mutex_unlock(&d->mtx);
180 	}
181 	if (mutex_trylock(&d->mtx)) {
182 		d->counter++;
183 		mutex_unlock(&d->mtx);
184 	}
185 	if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
186 		d->counter++;
187 		mutex_unlock(&d->mtx);
188 	}
189 }
190 
191 static void __used test_mutex_assert(struct test_mutex_data *d)
192 {
193 	lockdep_assert_held(&d->mtx);
194 	d->counter++;
195 }
196 
197 static void __used test_mutex_guard(struct test_mutex_data *d)
198 {
199 	guard(mutex)(&d->mtx);
200 	d->counter++;
201 }
202 
203 static void __used test_mutex_cond_guard(struct test_mutex_data *d)
204 {
205 	scoped_cond_guard(mutex_try, return, &d->mtx) {
206 		d->counter++;
207 	}
208 	scoped_cond_guard(mutex_intr, return, &d->mtx) {
209 		d->counter++;
210 	}
211 }
212 
213 struct test_seqlock_data {
214 	seqlock_t sl;
215 	int counter __guarded_by(&sl);
216 };
217 
218 static void __used test_seqlock_init(struct test_seqlock_data *d)
219 {
220 	seqlock_init(&d->sl);
221 	d->counter = 0;
222 }
223 
224 static void __used test_seqlock_reader(struct test_seqlock_data *d)
225 {
226 	unsigned int seq;
227 
228 	do {
229 		seq = read_seqbegin(&d->sl);
230 		(void)d->counter;
231 	} while (read_seqretry(&d->sl, seq));
232 }
233 
234 static void __used test_seqlock_writer(struct test_seqlock_data *d)
235 {
236 	unsigned long flags;
237 
238 	write_seqlock(&d->sl);
239 	d->counter++;
240 	write_sequnlock(&d->sl);
241 
242 	write_seqlock_irq(&d->sl);
243 	d->counter++;
244 	write_sequnlock_irq(&d->sl);
245 
246 	write_seqlock_bh(&d->sl);
247 	d->counter++;
248 	write_sequnlock_bh(&d->sl);
249 
250 	write_seqlock_irqsave(&d->sl, flags);
251 	d->counter++;
252 	write_sequnlock_irqrestore(&d->sl, flags);
253 }
254 
255 static void __used test_seqlock_scoped(struct test_seqlock_data *d)
256 {
257 	scoped_seqlock_read (&d->sl, ss_lockless) {
258 		(void)d->counter;
259 	}
260 }
261