xref: /linux/include/linux/cleanup.h (revision f4915933947c71f08ed1c5a6c9b4fdbe735e18cf)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CLEANUP_H
3 #define _LINUX_CLEANUP_H
4 
5 #include <linux/compiler.h>
6 
7 /**
8  * DOC: scope-based cleanup helpers
9  *
10  * The "goto error" pattern is notorious for introducing subtle resource
11  * leaks. It is tedious and error prone to add new resource acquisition
12  * constraints into code paths that already have several unwind
13  * conditions. The "cleanup" helpers enable the compiler to help with
14  * this tedium and can aid in maintaining LIFO (last in first out)
15  * unwind ordering to avoid unintentional leaks.
16  *
17  * As drivers make up the majority of the kernel code base, here is an
18  * example of using these helpers to clean up PCI drivers. The target of
19  * the cleanups are occasions where a goto is used to unwind a device
20  * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
21  * before returning.
22  *
23  * The DEFINE_FREE() macro can arrange for PCI device references to be
24  * dropped when the associated variable goes out of scope::
25  *
26  *	DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
27  *	...
28  *	struct pci_dev *dev __free(pci_dev_put) =
29  *		pci_get_slot(parent, PCI_DEVFN(0, 0));
30  *
31  * The above will automatically call pci_dev_put() if @dev is non-NULL
32  * when @dev goes out of scope (automatic variable scope). If a function
33  * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
34  * freeing it) on success, it can do::
35  *
36  *	return no_free_ptr(dev);
37  *
38  * ...or::
39  *
40  *	return_ptr(dev);
41  *
42  * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
43  * dropped when the scope where guard() is invoked ends::
44  *
45  *	DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
46  *	...
47  *	guard(pci_dev)(dev);
48  *
49  * The lifetime of the lock obtained by the guard() helper follows the
50  * scope of automatic variable declaration. Take the following example::
51  *
52  *	func(...)
53  *	{
54  *		if (...) {
55  *			...
56  *			guard(pci_dev)(dev); // pci_dev_lock() invoked here
57  *			...
58  *		} // <- implied pci_dev_unlock() triggered here
59  *	}
60  *
61  * Observe the lock is held for the remainder of the "if ()" block not
62  * the remainder of "func()".
63  *
64  * Now, when a function uses both __free() and guard(), or multiple
65  * instances of __free(), the LIFO order of variable definition order
66  * matters. GCC documentation says:
67  *
68  * "When multiple variables in the same scope have cleanup attributes,
69  * at exit from the scope their associated cleanup functions are run in
70  * reverse order of definition (last defined, first cleanup)."
71  *
72  * When the unwind order matters it requires that variables be defined
73  * mid-function scope rather than at the top of the file.  Take the
74  * following example and notice the bug highlighted by "!!"::
75  *
76  *	LIST_HEAD(list);
77  *	DEFINE_MUTEX(lock);
78  *
79  *	struct object {
80  *	        struct list_head node;
81  *	};
82  *
83  *	static struct object *alloc_add(void)
84  *	{
85  *	        struct object *obj;
86  *
87  *	        lockdep_assert_held(&lock);
88  *	        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
89  *	        if (obj) {
90  *	                LIST_HEAD_INIT(&obj->node);
91  *	                list_add(obj->node, &list):
92  *	        }
93  *	        return obj;
94  *	}
95  *
96  *	static void remove_free(struct object *obj)
97  *	{
98  *	        lockdep_assert_held(&lock);
99  *	        list_del(&obj->node);
100  *	        kfree(obj);
101  *	}
102  *
103  *	DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
104  *	static int init(void)
105  *	{
106  *	        struct object *obj __free(remove_free) = NULL;
107  *	        int err;
108  *
109  *	        guard(mutex)(&lock);
110  *	        obj = alloc_add();
111  *
112  *	        if (!obj)
113  *	                return -ENOMEM;
114  *
115  *	        err = other_init(obj);
116  *	        if (err)
117  *	                return err; // remove_free() called without the lock!!
118  *
119  *	        no_free_ptr(obj);
120  *	        return 0;
121  *	}
122  *
123  * That bug is fixed by changing init() to call guard() and define +
124  * initialize @obj in this order::
125  *
126  *	guard(mutex)(&lock);
127  *	struct object *obj __free(remove_free) = alloc_add();
128  *
129  * Given that the "__free(...) = NULL" pattern for variables defined at
130  * the top of the function poses this potential interdependency problem
131  * the recommendation is to always define and assign variables in one
132  * statement and not group variable definitions at the top of the
133  * function when __free() is used.
134  *
135  * Lastly, given that the benefit of cleanup helpers is removal of
136  * "goto", and that the "goto" statement can jump between scopes, the
137  * expectation is that usage of "goto" and cleanup helpers is never
138  * mixed in the same function. I.e. for a given routine, convert all
139  * resources that need a "goto" cleanup to scope-based cleanup, or
140  * convert none of them.
141  */
142 
143 /*
144  * DEFINE_FREE(name, type, free):
145  *	simple helper macro that defines the required wrapper for a __free()
146  *	based cleanup function. @free is an expression using '_T' to access the
147  *	variable. @free should typically include a NULL test before calling a
148  *	function, see the example below.
149  *
150  * __free(name):
151  *	variable attribute to add a scoped based cleanup to the variable.
152  *
153  * no_free_ptr(var):
154  *	like a non-atomic xchg(var, NULL), such that the cleanup function will
155  *	be inhibited -- provided it sanely deals with a NULL value.
156  *
157  *	NOTE: this has __must_check semantics so that it is harder to accidentally
158  *	leak the resource.
159  *
160  * return_ptr(p):
161  *	returns p while inhibiting the __free().
162  *
163  * Ex.
164  *
165  * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
166  *
167  * void *alloc_obj(...)
168  * {
169  *	struct obj *p __free(kfree) = kmalloc(...);
170  *	if (!p)
171  *		return NULL;
172  *
173  *	if (!init_obj(p))
174  *		return NULL;
175  *
176  *	return_ptr(p);
177  * }
178  *
179  * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
180  * kfree() is fine to be called with a NULL value. This is on purpose. This way
181  * the compiler sees the end of our alloc_obj() function as:
182  *
183  *	tmp = p;
184  *	p = NULL;
185  *	if (p)
186  *		kfree(p);
187  *	return tmp;
188  *
189  * And through the magic of value-propagation and dead-code-elimination, it
190  * eliminates the actual cleanup call and compiles into:
191  *
192  *	return p;
193  *
194  * Without the NULL test it turns into a mess and the compiler can't help us.
195  */
196 
197 #define DEFINE_FREE(_name, _type, _free) \
198 	static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
199 
200 #define __free(_name)	__cleanup(__free_##_name)
201 
202 #define __get_and_null(p, nullvalue)   \
203 	({                                  \
204 		__auto_type __ptr = &(p);   \
205 		__auto_type __val = *__ptr; \
206 		*__ptr = nullvalue;         \
207 		__val;                      \
208 	})
209 
210 static inline __must_check
__must_check_fn(const volatile void * val)211 const volatile void * __must_check_fn(const volatile void *val)
212 { return val; }
213 
214 #define no_free_ptr(p) \
215 	((typeof(p)) __must_check_fn((__force const volatile void *)__get_and_null(p, NULL)))
216 
217 #define return_ptr(p)	return no_free_ptr(p)
218 
219 /*
220  * Only for situations where an allocation is handed in to another function
221  * and consumed by that function on success.
222  *
223  *	struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
224  *
225  *	setup(f);
226  *	if (some_condition)
227  *		return -EINVAL;
228  *	....
229  *	ret = bar(f);
230  *	if (!ret)
231  *		retain_ptr(f);
232  *	return ret;
233  */
234 #define retain_ptr(p)				\
235 	__get_and_null(p, NULL)
236 
237 /*
238  * DEFINE_CLASS(name, type, exit, init, init_args...):
239  *	helper to define the destructor and constructor for a type.
240  *	@exit is an expression using '_T' -- similar to FREE above.
241  *	@init is an expression in @init_args resulting in @type
242  *
243  * EXTEND_CLASS(name, ext, init, init_args...):
244  *	extends class @name to @name@ext with the new constructor
245  *
246  * CLASS(name, var)(args...):
247  *	declare the variable @var as an instance of the named class
248  *
249  * Ex.
250  *
251  * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
252  *
253  *	CLASS(fdget, f)(fd);
254  *	if (fd_empty(f))
255  *		return -EBADF;
256  *
257  *	// use 'f' without concern
258  */
259 
260 #define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...)		\
261 typedef _type class_##_name##_t;					\
262 static inline void class_##_name##_destructor(_type *p)			\
263 { _type _T = *p; _exit; }						\
264 static inline _type class_##_name##_constructor(_init_args)		\
265 { _type t = _init; return t; }
266 
267 #define EXTEND_CLASS(_name, ext, _init, _init_args...)			\
268 typedef class_##_name##_t class_##_name##ext##_t;			\
269 static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
270 { class_##_name##_destructor(p); }					\
271 static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
272 { class_##_name##_t t = _init; return t; }
273 
274 #define CLASS(_name, var)						\
275 	class_##_name##_t var __cleanup(class_##_name##_destructor) =	\
276 		class_##_name##_constructor
277 
278 
279 /*
280  * DEFINE_GUARD(name, type, lock, unlock):
281  *	trivial wrapper around DEFINE_CLASS() above specifically
282  *	for locks.
283  *
284  * DEFINE_GUARD_COND(name, ext, condlock)
285  *	wrapper around EXTEND_CLASS above to add conditional lock
286  *	variants to a base class, eg. mutex_trylock() or
287  *	mutex_lock_interruptible().
288  *
289  * guard(name):
290  *	an anonymous instance of the (guard) class, not recommended for
291  *	conditional locks.
292  *
293  * scoped_guard (name, args...) { }:
294  *	similar to CLASS(name, scope)(args), except the variable (with the
295  *	explicit name 'scope') is declard in a for-loop such that its scope is
296  *	bound to the next (compound) statement.
297  *
298  *	for conditional locks the loop body is skipped when the lock is not
299  *	acquired.
300  *
301  * scoped_cond_guard (name, fail, args...) { }:
302  *      similar to scoped_guard(), except it does fail when the lock
303  *      acquire fails.
304  *
305  *      Only for conditional locks.
306  */
307 
308 #define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond)	\
309 static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
310 
311 #define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \
312 	static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
313 	{ return (void *)(__force unsigned long)*(_exp); }
314 
315 #define DEFINE_CLASS_IS_GUARD(_name) \
316 	__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
317 	__DEFINE_GUARD_LOCK_PTR(_name, _T)
318 
319 #define DEFINE_CLASS_IS_COND_GUARD(_name) \
320 	__DEFINE_CLASS_IS_CONDITIONAL(_name, true); \
321 	__DEFINE_GUARD_LOCK_PTR(_name, _T)
322 
323 #define DEFINE_GUARD(_name, _type, _lock, _unlock) \
324 	DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
325 	DEFINE_CLASS_IS_GUARD(_name)
326 
327 #define DEFINE_GUARD_COND(_name, _ext, _condlock) \
328 	__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
329 	EXTEND_CLASS(_name, _ext, \
330 		     ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
331 		     class_##_name##_t _T) \
332 	static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
333 	{ return class_##_name##_lock_ptr(_T); }
334 
335 #define guard(_name) \
336 	CLASS(_name, __UNIQUE_ID(guard))
337 
338 #define __guard_ptr(_name) class_##_name##_lock_ptr
339 #define __is_cond_ptr(_name) class_##_name##_is_conditional
340 
341 /*
342  * Helper macro for scoped_guard().
343  *
344  * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
345  * compiler would be sure that for the unconditional locks the body of the
346  * loop (caller-provided code glued to the else clause) could not be skipped.
347  * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
348  * hard to deduce (even if could be proven true for unconditional locks).
349  */
350 #define __scoped_guard(_name, _label, args...)				\
351 	for (CLASS(_name, scope)(args);					\
352 	     __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name);	\
353 	     ({ goto _label; }))					\
354 		if (0) {						\
355 _label:									\
356 			break;						\
357 		} else
358 
359 #define scoped_guard(_name, args...)	\
360 	__scoped_guard(_name, __UNIQUE_ID(label), args)
361 
362 #define __scoped_cond_guard(_name, _fail, _label, args...)		\
363 	for (CLASS(_name, scope)(args); true; ({ goto _label; }))	\
364 		if (!__guard_ptr(_name)(&scope)) {			\
365 			BUILD_BUG_ON(!__is_cond_ptr(_name));		\
366 			_fail;						\
367 _label:									\
368 			break;						\
369 		} else
370 
371 #define scoped_cond_guard(_name, _fail, args...)	\
372 	__scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
373 
374 /*
375  * Additional helper macros for generating lock guards with types, either for
376  * locks that don't have a native type (eg. RCU, preempt) or those that need a
377  * 'fat' pointer (eg. spin_lock_irqsave).
378  *
379  * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
380  * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
381  * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
382  *
383  * will result in the following type:
384  *
385  *   typedef struct {
386  *	type *lock;		// 'type := void' for the _0 variant
387  *	__VA_ARGS__;
388  *   } class_##name##_t;
389  *
390  * As above, both _lock and _unlock are statements, except this time '_T' will
391  * be a pointer to the above struct.
392  */
393 
394 #define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...)		\
395 typedef struct {							\
396 	_type *lock;							\
397 	__VA_ARGS__;							\
398 } class_##_name##_t;							\
399 									\
400 static inline void class_##_name##_destructor(class_##_name##_t *_T)	\
401 {									\
402 	if (_T->lock) { _unlock; }					\
403 }									\
404 									\
405 __DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
406 
407 #define __DEFINE_LOCK_GUARD_1(_name, _type, _lock)			\
408 static inline class_##_name##_t class_##_name##_constructor(_type *l)	\
409 {									\
410 	class_##_name##_t _t = { .lock = l }, *_T = &_t;		\
411 	_lock;								\
412 	return _t;							\
413 }
414 
415 #define __DEFINE_LOCK_GUARD_0(_name, _lock)				\
416 static inline class_##_name##_t class_##_name##_constructor(void)	\
417 {									\
418 	class_##_name##_t _t = { .lock = (void*)1 },			\
419 			 *_T __maybe_unused = &_t;			\
420 	_lock;								\
421 	return _t;							\
422 }
423 
424 #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...)		\
425 __DEFINE_CLASS_IS_CONDITIONAL(_name, false);				\
426 __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__)		\
427 __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
428 
429 #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...)			\
430 __DEFINE_CLASS_IS_CONDITIONAL(_name, false);				\
431 __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__)		\
432 __DEFINE_LOCK_GUARD_0(_name, _lock)
433 
434 #define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock)		\
435 	__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true);		\
436 	EXTEND_CLASS(_name, _ext,					\
437 		     ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
438 		        if (_T->lock && !(_condlock)) _T->lock = NULL;	\
439 			_t; }),						\
440 		     typeof_member(class_##_name##_t, lock) l)		\
441 	static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
442 	{ return class_##_name##_lock_ptr(_T); }
443 
444 
445 #endif /* _LINUX_CLEANUP_H */
446