xref: /linux/include/linux/randomize_kstack.h (revision 8cb37a5974a48569aab8a1736d21399fddbdbdb2)
139218ff4SKees Cook /* SPDX-License-Identifier: GPL-2.0-only */
239218ff4SKees Cook #ifndef _LINUX_RANDOMIZE_KSTACK_H
339218ff4SKees Cook #define _LINUX_RANDOMIZE_KSTACK_H
439218ff4SKees Cook 
5*8cb37a59SMarco Elver #ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
639218ff4SKees Cook #include <linux/kernel.h>
739218ff4SKees Cook #include <linux/jump_label.h>
839218ff4SKees Cook #include <linux/percpu-defs.h>
939218ff4SKees Cook 
1039218ff4SKees Cook DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
1139218ff4SKees Cook 			 randomize_kstack_offset);
1239218ff4SKees Cook DECLARE_PER_CPU(u32, kstack_offset);
1339218ff4SKees Cook 
1439218ff4SKees Cook /*
1539218ff4SKees Cook  * Do not use this anywhere else in the kernel. This is used here because
1639218ff4SKees Cook  * it provides an arch-agnostic way to grow the stack with correct
1739218ff4SKees Cook  * alignment. Also, since this use is being explicitly masked to a max of
1839218ff4SKees Cook  * 10 bits, stack-clash style attacks are unlikely. For more details see
1939218ff4SKees Cook  * "VLAs" in Documentation/process/deprecated.rst
2039218ff4SKees Cook  */
2139218ff4SKees Cook void *__builtin_alloca(size_t size);
2239218ff4SKees Cook /*
2339218ff4SKees Cook  * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
2439218ff4SKees Cook  * "VLA" from being unbounded (see above). 10 bits leaves enough room for
2539218ff4SKees Cook  * per-arch offset masks to reduce entropy (by removing higher bits, since
2639218ff4SKees Cook  * high entropy may overly constrain usable stack space), and for
2739218ff4SKees Cook  * compiler/arch-specific stack alignment to remove the lower bits.
2839218ff4SKees Cook  */
2939218ff4SKees Cook #define KSTACK_OFFSET_MAX(x)	((x) & 0x3FF)
3039218ff4SKees Cook 
3139218ff4SKees Cook /*
3239218ff4SKees Cook  * These macros must be used during syscall entry when interrupts and
3339218ff4SKees Cook  * preempt are disabled, and after user registers have been stored to
3439218ff4SKees Cook  * the stack.
3539218ff4SKees Cook  */
3639218ff4SKees Cook #define add_random_kstack_offset() do {					\
3739218ff4SKees Cook 	if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,	\
3839218ff4SKees Cook 				&randomize_kstack_offset)) {		\
3939218ff4SKees Cook 		u32 offset = raw_cpu_read(kstack_offset);		\
4039218ff4SKees Cook 		u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset));	\
4139218ff4SKees Cook 		/* Keep allocation even after "ptr" loses scope. */	\
422515dd6cSNick Desaulniers 		asm volatile("" :: "r"(ptr) : "memory");		\
4339218ff4SKees Cook 	}								\
4439218ff4SKees Cook } while (0)
4539218ff4SKees Cook 
4639218ff4SKees Cook #define choose_random_kstack_offset(rand) do {				\
4739218ff4SKees Cook 	if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,	\
4839218ff4SKees Cook 				&randomize_kstack_offset)) {		\
4939218ff4SKees Cook 		u32 offset = raw_cpu_read(kstack_offset);		\
5039218ff4SKees Cook 		offset ^= (rand);					\
5139218ff4SKees Cook 		raw_cpu_write(kstack_offset, offset);			\
5239218ff4SKees Cook 	}								\
5339218ff4SKees Cook } while (0)
54*8cb37a59SMarco Elver #else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
55*8cb37a59SMarco Elver #define add_random_kstack_offset()		do { } while (0)
56*8cb37a59SMarco Elver #define choose_random_kstack_offset(rand)	do { } while (0)
57*8cb37a59SMarco Elver #endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
5839218ff4SKees Cook 
5939218ff4SKees Cook #endif
60