xref: /linux/include/linux/randomize_kstack.h (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef _LINUX_RANDOMIZE_KSTACK_H
3 #define _LINUX_RANDOMIZE_KSTACK_H
4 
5 #ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
6 #include <linux/kernel.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu-defs.h>
9 #include <linux/prandom.h>
10 
11 DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
12 			 randomize_kstack_offset);
13 
14 /*
15  * Do not use this anywhere else in the kernel. This is used here because
16  * it provides an arch-agnostic way to grow the stack with correct
17  * alignment. Also, since this use is being explicitly masked to a max of
18  * 10 bits, stack-clash style attacks are unlikely. For more details see
19  * "VLAs" in Documentation/process/deprecated.rst
20  *
21  * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently
22  * only with Clang and not GCC). Initializing the unused area on each syscall
23  * entry is expensive, and generating an implicit call to memset() may also be
24  * problematic (such as in noinstr functions). Therefore, if the compiler
25  * supports it (which it should if it initializes allocas), always use the
26  * "uninitialized" variant of the builtin.
27  */
28 #if __has_builtin(__builtin_alloca_uninitialized)
29 #define __kstack_alloca __builtin_alloca_uninitialized
30 #else
31 #define __kstack_alloca __builtin_alloca
32 #endif
33 
34 /*
35  * Use, at most, 6 bits of entropy (on 64-bit; 8 on 32-bit). This cap is
36  * to keep the "VLA" from being unbounded (see above). Additionally clear
37  * the bottom 4 bits (on 64-bit systems, 2 for 32-bit), since stack
38  * alignment will always be at least word size. This makes the compiler
39  * code gen better when it is applying the actual per-arch alignment to
40  * the final offset. The resulting randomness is reasonable without overly
41  * constraining usable stack space.
42  */
43 #ifdef CONFIG_64BIT
44 #define KSTACK_OFFSET_MAX(x)	((x) & 0b1111110000)
45 #else
46 #define KSTACK_OFFSET_MAX(x)	((x) & 0b1111111100)
47 #endif
48 
49 DECLARE_PER_CPU(struct rnd_state, kstack_rnd_state);
50 
51 static __always_inline u32 get_kstack_offset(void)
52 {
53 	struct rnd_state *state;
54 	u32 rnd;
55 
56 	state = &get_cpu_var(kstack_rnd_state);
57 	rnd = prandom_u32_state(state);
58 	put_cpu_var(kstack_rnd_state);
59 
60 	return rnd;
61 }
62 
63 /**
64  * add_random_kstack_offset - Increase stack utilization by a random offset.
65  *
66  * This should be used in the syscall entry path after user registers have been
67  * stored to the stack. Preemption may be enabled. For testing the resulting
68  * entropy, please see: tools/testing/selftests/lkdtm/stack-entropy.sh
69  */
70 #define add_random_kstack_offset() do {					\
71 	if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,	\
72 				&randomize_kstack_offset)) {		\
73 		u32 offset = get_kstack_offset();			\
74 		u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset));	\
75 		/* Keep allocation even after "ptr" loses scope. */	\
76 		asm volatile("" :: "r"(ptr) : "memory");		\
77 	}								\
78 } while (0)
79 
80 #else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
81 #define add_random_kstack_offset()		do { } while (0)
82 #endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
83 
84 #endif
85