xref: /linux/include/linux/cleanup.h (revision 70d837c3e017a0dc2bc52abf07abfeebd006c946)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CLEANUP_H
3 #define _LINUX_CLEANUP_H
4 
5 #include <linux/compiler.h>
6 #include <linux/err.h>
7 #include <linux/args.h>
8 
9 /**
10  * DOC: scope-based cleanup helpers
11  *
12  * The "goto error" pattern is notorious for introducing subtle resource
13  * leaks. It is tedious and error prone to add new resource acquisition
14  * constraints into code paths that already have several unwind
15  * conditions. The "cleanup" helpers enable the compiler to help with
16  * this tedium and can aid in maintaining LIFO (last in first out)
17  * unwind ordering to avoid unintentional leaks.
18  *
19  * As drivers make up the majority of the kernel code base, here is an
20  * example of using these helpers to clean up PCI drivers. The target of
21  * the cleanups are occasions where a goto is used to unwind a device
22  * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
23  * before returning.
24  *
25  * The DEFINE_FREE() macro can arrange for PCI device references to be
26  * dropped when the associated variable goes out of scope::
27  *
28  *	DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
29  *	...
30  *	struct pci_dev *dev __free(pci_dev_put) =
31  *		pci_get_slot(parent, PCI_DEVFN(0, 0));
32  *
33  * The above will automatically call pci_dev_put() if @dev is non-NULL
34  * when @dev goes out of scope (automatic variable scope). If a function
35  * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
36  * freeing it) on success, it can do::
37  *
38  *	return no_free_ptr(dev);
39  *
40  * ...or::
41  *
42  *	return_ptr(dev);
43  *
44  * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
45  * dropped when the scope where guard() is invoked ends::
46  *
47  *	DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
48  *	...
49  *	guard(pci_dev)(dev);
50  *
51  * The lifetime of the lock obtained by the guard() helper follows the
52  * scope of automatic variable declaration. Take the following example::
53  *
54  *	func(...)
55  *	{
56  *		if (...) {
57  *			...
58  *			guard(pci_dev)(dev); // pci_dev_lock() invoked here
59  *			...
60  *		} // <- implied pci_dev_unlock() triggered here
61  *	}
62  *
63  * Observe the lock is held for the remainder of the "if ()" block not
64  * the remainder of "func()".
65  *
66  * The ACQUIRE() macro can be used in all places that guard() can be
67  * used and additionally support conditional locks::
68  *
69  *	DEFINE_GUARD_COND(pci_dev, _try, pci_dev_trylock(_T))
70  *	...
71  *	ACQUIRE(pci_dev_try, lock)(dev);
72  *	rc = ACQUIRE_ERR(pci_dev_try, &lock);
73  *	if (rc)
74  *		return rc;
75  *	// @lock is held
76  *
77  * Now, when a function uses both __free() and guard()/ACQUIRE(), or
78  * multiple instances of __free(), the LIFO order of variable definition
79  * order matters. GCC documentation says:
80  *
81  * "When multiple variables in the same scope have cleanup attributes,
82  * at exit from the scope their associated cleanup functions are run in
83  * reverse order of definition (last defined, first cleanup)."
84  *
85  * When the unwind order matters it requires that variables be defined
86  * mid-function scope rather than at the top of the file.  Take the
87  * following example and notice the bug highlighted by "!!"::
88  *
89  *	LIST_HEAD(list);
90  *	DEFINE_MUTEX(lock);
91  *
92  *	struct object {
93  *	        struct list_head node;
94  *	};
95  *
96  *	static struct object *alloc_add(void)
97  *	{
98  *	        struct object *obj;
99  *
100  *	        lockdep_assert_held(&lock);
101  *	        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
102  *	        if (obj) {
103  *	                LIST_HEAD_INIT(&obj->node);
104  *	                list_add(obj->node, &list):
105  *	        }
106  *	        return obj;
107  *	}
108  *
109  *	static void remove_free(struct object *obj)
110  *	{
111  *	        lockdep_assert_held(&lock);
112  *	        list_del(&obj->node);
113  *	        kfree(obj);
114  *	}
115  *
116  *	DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
117  *	static int init(void)
118  *	{
119  *	        struct object *obj __free(remove_free) = NULL;
120  *	        int err;
121  *
122  *	        guard(mutex)(&lock);
123  *	        obj = alloc_add();
124  *
125  *	        if (!obj)
126  *	                return -ENOMEM;
127  *
128  *	        err = other_init(obj);
129  *	        if (err)
130  *	                return err; // remove_free() called without the lock!!
131  *
132  *	        no_free_ptr(obj);
133  *	        return 0;
134  *	}
135  *
136  * That bug is fixed by changing init() to call guard() and define +
137  * initialize @obj in this order::
138  *
139  *	guard(mutex)(&lock);
140  *	struct object *obj __free(remove_free) = alloc_add();
141  *
142  * Given that the "__free(...) = NULL" pattern for variables defined at
143  * the top of the function poses this potential interdependency problem
144  * the recommendation is to always define and assign variables in one
145  * statement and not group variable definitions at the top of the
146  * function when __free() is used.
147  *
148  * Lastly, given that the benefit of cleanup helpers is removal of
149  * "goto", and that the "goto" statement can jump between scopes, the
150  * expectation is that usage of "goto" and cleanup helpers is never
151  * mixed in the same function. I.e. for a given routine, convert all
152  * resources that need a "goto" cleanup to scope-based cleanup, or
153  * convert none of them.
154  */
155 
156 /*
157  * DEFINE_FREE(name, type, free):
158  *	simple helper macro that defines the required wrapper for a __free()
159  *	based cleanup function. @free is an expression using '_T' to access the
160  *	variable. @free should typically include a NULL test before calling a
161  *	function, see the example below.
162  *
163  * __free(name):
164  *	variable attribute to add a scoped based cleanup to the variable.
165  *
166  * no_free_ptr(var):
167  *	like a non-atomic xchg(var, NULL), such that the cleanup function will
168  *	be inhibited -- provided it sanely deals with a NULL value.
169  *
170  *	NOTE: this has __must_check semantics so that it is harder to accidentally
171  *	leak the resource.
172  *
173  * return_ptr(p):
174  *	returns p while inhibiting the __free().
175  *
176  * Ex.
177  *
178  * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
179  *
180  * void *alloc_obj(...)
181  * {
182  *	struct obj *p __free(kfree) = kmalloc(...);
183  *	if (!p)
184  *		return NULL;
185  *
186  *	if (!init_obj(p))
187  *		return NULL;
188  *
189  *	return_ptr(p);
190  * }
191  *
192  * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
193  * kfree() is fine to be called with a NULL value. This is on purpose. This way
194  * the compiler sees the end of our alloc_obj() function as:
195  *
196  *	tmp = p;
197  *	p = NULL;
198  *	if (p)
199  *		kfree(p);
200  *	return tmp;
201  *
202  * And through the magic of value-propagation and dead-code-elimination, it
203  * eliminates the actual cleanup call and compiles into:
204  *
205  *	return p;
206  *
207  * Without the NULL test it turns into a mess and the compiler can't help us.
208  */
209 
210 #define DEFINE_FREE(_name, _type, _free) \
211 	static __always_inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
212 
213 #define __free(_name)	__cleanup(__free_##_name)
214 
215 #define __get_and_null(p, nullvalue)   \
216 	({                                  \
217 		__auto_type __ptr = &(p);   \
218 		__auto_type __val = *__ptr; \
219 		*__ptr = nullvalue;         \
220 		__val;                      \
221 	})
222 
223 static __always_inline __must_check
__must_check_fn(const volatile void * val)224 const volatile void * __must_check_fn(const volatile void *val)
225 { return val; }
226 
227 #define no_free_ptr(p) \
228 	((typeof(p)) __must_check_fn((__force const volatile void *)__get_and_null(p, NULL)))
229 
230 #define return_ptr(p)	return no_free_ptr(p)
231 
232 /*
233  * Only for situations where an allocation is handed in to another function
234  * and consumed by that function on success.
235  *
236  *	struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
237  *
238  *	setup(f);
239  *	if (some_condition)
240  *		return -EINVAL;
241  *	....
242  *	ret = bar(f);
243  *	if (!ret)
244  *		retain_and_null_ptr(f);
245  *	return ret;
246  *
247  * After retain_and_null_ptr(f) the variable f is NULL and cannot be
248  * dereferenced anymore.
249  */
250 #define retain_and_null_ptr(p)		((void)__get_and_null(p, NULL))
251 
252 /*
253  * DEFINE_CLASS(name, type, exit, init, init_args...):
254  *	helper to define the destructor and constructor for a type.
255  *	@exit is an expression using '_T' -- similar to FREE above.
256  *	@init is an expression in @init_args resulting in @type
257  *
258  * EXTEND_CLASS(name, ext, init, init_args...):
259  *	extends class @name to @name@ext with the new constructor
260  *
261  * CLASS(name, var)(args...):
262  *	declare the variable @var as an instance of the named class
263  *
264  * CLASS_INIT(name, var, init_expr):
265  *	declare the variable @var as an instance of the named class with
266  *	custom initialization expression.
267  *
268  * Ex.
269  *
270  * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
271  *
272  *	CLASS(fdget, f)(fd);
273  *	if (fd_empty(f))
274  *		return -EBADF;
275  *
276  *	// use 'f' without concern
277  */
278 
279 #define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...)		\
280 typedef _type class_##_name##_t;					\
281 static __always_inline void class_##_name##_destructor(_type *p)	\
282 { _type _T = *p; _exit; }						\
283 static __always_inline _type class_##_name##_constructor(_init_args)	\
284 { _type t = _init; return t; }
285 
286 #define EXTEND_CLASS(_name, ext, _init, _init_args...)			\
287 typedef class_##_name##_t class_##_name##ext##_t;			\
288 static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
289 { class_##_name##_destructor(p); }					\
290 static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
291 { class_##_name##_t t = _init; return t; }
292 
293 #define CLASS(_name, var)						\
294 	class_##_name##_t var __cleanup(class_##_name##_destructor) =	\
295 		class_##_name##_constructor
296 
297 #define CLASS_INIT(_name, _var, _init_expr)                             \
298         class_##_name##_t _var __cleanup(class_##_name##_destructor) = (_init_expr)
299 
300 #define __scoped_class(_name, var, _label, args...)        \
301 	for (CLASS(_name, var)(args); ; ({ goto _label; })) \
302 		if (0) {                                   \
303 _label:                                                    \
304 			break;                             \
305 		} else
306 
307 #define scoped_class(_name, var, args...) \
308 	__scoped_class(_name, var, __UNIQUE_ID(label), args)
309 
310 /*
311  * DEFINE_GUARD(name, type, lock, unlock):
312  *	trivial wrapper around DEFINE_CLASS() above specifically
313  *	for locks.
314  *
315  * DEFINE_GUARD_COND(name, ext, condlock)
316  *	wrapper around EXTEND_CLASS above to add conditional lock
317  *	variants to a base class, eg. mutex_trylock() or
318  *	mutex_lock_interruptible().
319  *
320  * guard(name):
321  *	an anonymous instance of the (guard) class, not recommended for
322  *	conditional locks.
323  *
324  * scoped_guard (name, args...) { }:
325  *	similar to CLASS(name, scope)(args), except the variable (with the
326  *	explicit name 'scope') is declard in a for-loop such that its scope is
327  *	bound to the next (compound) statement.
328  *
329  *	for conditional locks the loop body is skipped when the lock is not
330  *	acquired.
331  *
332  * scoped_cond_guard (name, fail, args...) { }:
333  *      similar to scoped_guard(), except it does fail when the lock
334  *      acquire fails.
335  *
336  *      Only for conditional locks.
337  *
338  * ACQUIRE(name, var):
339  *	a named instance of the (guard) class, suitable for conditional
340  *	locks when paired with ACQUIRE_ERR().
341  *
342  * ACQUIRE_ERR(name, &var):
343  *	a helper that is effectively a PTR_ERR() conversion of the guard
344  *	pointer. Returns 0 when the lock was acquired and a negative
345  *	error code otherwise.
346  */
347 
348 #define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond)	\
349 static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
350 
351 #define DEFINE_CLASS_IS_UNCONDITIONAL(_name)		\
352 	__DEFINE_CLASS_IS_CONDITIONAL(_name, false);	\
353 	static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
354 	{ return (void *)1; }
355 
356 #define __GUARD_IS_ERR(_ptr)                                       \
357 	({                                                         \
358 		unsigned long _rc = (__force unsigned long)(_ptr); \
359 		unlikely((_rc - 1) >= -MAX_ERRNO - 1);             \
360 	})
361 
362 #define __DEFINE_GUARD_LOCK_PTR(_name, _exp)                                \
363 	static __always_inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
364 	{                                                                   \
365 		void *_ptr = (void *)(__force unsigned long)*(_exp);        \
366 		if (IS_ERR(_ptr)) {                                         \
367 			_ptr = NULL;                                        \
368 		}                                                           \
369 		return _ptr;                                                \
370 	}                                                                   \
371 	static __always_inline int class_##_name##_lock_err(class_##_name##_t *_T) \
372 	{                                                                   \
373 		long _rc = (__force unsigned long)*(_exp);                  \
374 		if (!_rc) {                                                 \
375 			_rc = -EBUSY;                                       \
376 		}                                                           \
377 		if (!IS_ERR_VALUE(_rc)) {                                   \
378 			_rc = 0;                                            \
379 		}                                                           \
380 		return _rc;                                                 \
381 	}
382 
383 #define DEFINE_CLASS_IS_GUARD(_name) \
384 	__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
385 	__DEFINE_GUARD_LOCK_PTR(_name, _T)
386 
387 #define DEFINE_CLASS_IS_COND_GUARD(_name) \
388 	__DEFINE_CLASS_IS_CONDITIONAL(_name, true); \
389 	__DEFINE_GUARD_LOCK_PTR(_name, _T)
390 
391 #define DEFINE_GUARD(_name, _type, _lock, _unlock) \
392 	DEFINE_CLASS(_name, _type, if (!__GUARD_IS_ERR(_T)) { _unlock; }, ({ _lock; _T; }), _type _T); \
393 	DEFINE_CLASS_IS_GUARD(_name)
394 
395 #define DEFINE_GUARD_COND_4(_name, _ext, _lock, _cond) \
396 	__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
397 	EXTEND_CLASS(_name, _ext, \
398 		     ({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \
399 		     class_##_name##_t _T) \
400 	static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
401 	{ return class_##_name##_lock_ptr(_T); } \
402 	static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
403 	{ return class_##_name##_lock_err(_T); }
404 
405 /*
406  * Default binary condition; success on 'true'.
407  */
408 #define DEFINE_GUARD_COND_3(_name, _ext, _lock) \
409 	DEFINE_GUARD_COND_4(_name, _ext, _lock, _RET)
410 
411 #define DEFINE_GUARD_COND(X...) CONCATENATE(DEFINE_GUARD_COND_, COUNT_ARGS(X))(X)
412 
413 #define guard(_name) \
414 	CLASS(_name, __UNIQUE_ID(guard))
415 
416 #define __guard_ptr(_name) class_##_name##_lock_ptr
417 #define __guard_err(_name) class_##_name##_lock_err
418 #define __is_cond_ptr(_name) class_##_name##_is_conditional
419 
420 #define ACQUIRE(_name, _var)     CLASS(_name, _var)
421 #define ACQUIRE_ERR(_name, _var) __guard_err(_name)(_var)
422 
423 /*
424  * Helper macro for scoped_guard().
425  *
426  * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
427  * compiler would be sure that for the unconditional locks the body of the
428  * loop (caller-provided code glued to the else clause) could not be skipped.
429  * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
430  * hard to deduce (even if could be proven true for unconditional locks).
431  */
432 #define __scoped_guard(_name, _label, args...)				\
433 	for (CLASS(_name, scope)(args);					\
434 	     __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name);	\
435 	     ({ goto _label; }))					\
436 		if (0) {						\
437 _label:									\
438 			break;						\
439 		} else
440 
441 #define scoped_guard(_name, args...)	\
442 	__scoped_guard(_name, __UNIQUE_ID(label), args)
443 
444 #define __scoped_cond_guard(_name, _fail, _label, args...)		\
445 	for (CLASS(_name, scope)(args); true; ({ goto _label; }))	\
446 		if (!__guard_ptr(_name)(&scope)) {			\
447 			BUILD_BUG_ON(!__is_cond_ptr(_name));		\
448 			_fail;						\
449 _label:									\
450 			break;						\
451 		} else
452 
453 #define scoped_cond_guard(_name, _fail, args...)	\
454 	__scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
455 
456 /*
457  * Additional helper macros for generating lock guards with types, either for
458  * locks that don't have a native type (eg. RCU, preempt) or those that need a
459  * 'fat' pointer (eg. spin_lock_irqsave).
460  *
461  * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
462  * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
463  * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
464  *
465  * will result in the following type:
466  *
467  *   typedef struct {
468  *	type *lock;		// 'type := void' for the _0 variant
469  *	__VA_ARGS__;
470  *   } class_##name##_t;
471  *
472  * As above, both _lock and _unlock are statements, except this time '_T' will
473  * be a pointer to the above struct.
474  */
475 
476 #define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...)		\
477 typedef struct {							\
478 	_type *lock;							\
479 	__VA_ARGS__;							\
480 } class_##_name##_t;							\
481 									\
482 static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
483 {									\
484 	if (!__GUARD_IS_ERR(_T->lock)) { _unlock; }			\
485 }									\
486 									\
487 __DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
488 
489 #define __DEFINE_LOCK_GUARD_1(_name, _type, _lock)			\
490 static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
491 {									\
492 	class_##_name##_t _t = { .lock = l }, *_T = &_t;		\
493 	_lock;								\
494 	return _t;							\
495 }
496 
497 #define __DEFINE_LOCK_GUARD_0(_name, _lock)				\
498 static __always_inline class_##_name##_t class_##_name##_constructor(void) \
499 {									\
500 	class_##_name##_t _t = { .lock = (void*)1 },			\
501 			 *_T __maybe_unused = &_t;			\
502 	_lock;								\
503 	return _t;							\
504 }
505 
506 #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...)		\
507 __DEFINE_CLASS_IS_CONDITIONAL(_name, false);				\
508 __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__)		\
509 __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
510 
511 #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...)			\
512 __DEFINE_CLASS_IS_CONDITIONAL(_name, false);				\
513 __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__)		\
514 __DEFINE_LOCK_GUARD_0(_name, _lock)
515 
516 #define DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _cond)		\
517 	__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true);		\
518 	EXTEND_CLASS(_name, _ext,					\
519 		     ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
520 		        int _RET = (_lock);                             \
521 		        if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\
522 			_t; }),						\
523 		     typeof_member(class_##_name##_t, lock) l)		\
524 	static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
525 	{ return class_##_name##_lock_ptr(_T); } \
526 	static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
527 	{ return class_##_name##_lock_err(_T); }
528 
529 #define DEFINE_LOCK_GUARD_1_COND_3(_name, _ext, _lock) \
530 	DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _RET)
531 
532 #define DEFINE_LOCK_GUARD_1_COND(X...) CONCATENATE(DEFINE_LOCK_GUARD_1_COND_, COUNT_ARGS(X))(X)
533 
534 #endif /* _LINUX_CLEANUP_H */
535