1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
260a5317fSTejun Heo /*
360a5317fSTejun Heo * GCC stack protector support.
460a5317fSTejun Heo *
560a5317fSTejun Heo * Stack protector works by putting predefined pattern at the start of
660a5317fSTejun Heo * the stack frame and verifying that it hasn't been overwritten when
760a5317fSTejun Heo * returning from the function. The pattern is called stack canary
83fb0fdb3SAndy Lutomirski * and unfortunately gcc historically required it to be at a fixed offset
93fb0fdb3SAndy Lutomirski * from the percpu segment base. On x86_64, the offset is 40 bytes.
1060a5317fSTejun Heo *
113fb0fdb3SAndy Lutomirski * The same segment is shared by percpu area and stack canary. On
123fb0fdb3SAndy Lutomirski * x86_64, percpu symbols are zero based and %gs (64-bit) points to the
133fb0fdb3SAndy Lutomirski * base of percpu area. The first occupant of the percpu area is always
14c4342633SIngo Molnar * fixed_percpu_data which contains stack_canary at the appropriate
153fb0fdb3SAndy Lutomirski * offset. On x86_32, the stack canary is just a regular percpu
163fb0fdb3SAndy Lutomirski * variable.
1760a5317fSTejun Heo *
183fb0fdb3SAndy Lutomirski * Putting percpu data in %fs on 32-bit is a minor optimization compared to
193fb0fdb3SAndy Lutomirski * using %gs. Since 32-bit userspace normally has %fs == 0, we are likely
203fb0fdb3SAndy Lutomirski * to load 0 into %fs on exit to usermode, whereas with percpu data in
213fb0fdb3SAndy Lutomirski * %gs, we are likely to load a non-null %gs on return to user mode.
2260a5317fSTejun Heo *
233fb0fdb3SAndy Lutomirski * Once we are willing to require GCC 8.1 or better for 64-bit stackprotector
243fb0fdb3SAndy Lutomirski * support, we can remove some of this complexity.
2560a5317fSTejun Heo */
2660a5317fSTejun Heo
27b2b062b8SIngo Molnar #ifndef _ASM_STACKPROTECTOR_H
28b2b062b8SIngo Molnar #define _ASM_STACKPROTECTOR_H 1
29b2b062b8SIngo Molnar
30050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR
3176397f72STejun Heo
32b2b062b8SIngo Molnar #include <asm/tsc.h>
33947e76cdSBrian Gerst #include <asm/processor.h>
3476397f72STejun Heo #include <asm/percpu.h>
3560a5317fSTejun Heo #include <asm/desc.h>
36952f07ecSIngo Molnar
37952f07ecSIngo Molnar #include <linux/sched.h>
38b2b062b8SIngo Molnar
39b2b062b8SIngo Molnar /*
40b2b062b8SIngo Molnar * Initialize the stackprotector canary value.
41b2b062b8SIngo Molnar *
42a9a3ed1eSBorislav Petkov * NOTE: this must only be called from functions that never return
43b2b062b8SIngo Molnar * and it must always be inlined.
44a9a3ed1eSBorislav Petkov *
45a9a3ed1eSBorislav Petkov * In addition, it should be called from a compilation unit for which
46a9a3ed1eSBorislav Petkov * stack protector is disabled. Alternatively, the caller should not end
47a9a3ed1eSBorislav Petkov * with a function call which gets tail-call optimized as that would
48a9a3ed1eSBorislav Petkov * lead to checking a modified canary value.
49b2b062b8SIngo Molnar */
boot_init_stack_canary(void)50b2b062b8SIngo Molnar static __always_inline void boot_init_stack_canary(void)
51b2b062b8SIngo Molnar {
52*622754e8SJason A. Donenfeld unsigned long canary = get_random_canary();
53b2b062b8SIngo Molnar
5460a5317fSTejun Heo #ifdef CONFIG_X86_64
55e6401c13SAndy Lutomirski BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
5660a5317fSTejun Heo #endif
57b2b062b8SIngo Molnar
58b2b062b8SIngo Molnar current->stack_canary = canary;
5960a5317fSTejun Heo #ifdef CONFIG_X86_64
60e6401c13SAndy Lutomirski this_cpu_write(fixed_percpu_data.stack_canary, canary);
6160a5317fSTejun Heo #else
623fb0fdb3SAndy Lutomirski this_cpu_write(__stack_chk_guard, canary);
6360a5317fSTejun Heo #endif
6460a5317fSTejun Heo }
6560a5317fSTejun Heo
cpu_init_stack_canary(int cpu,struct task_struct * idle)66c9a1ff31SBrian Gerst static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
67c9a1ff31SBrian Gerst {
68c9a1ff31SBrian Gerst #ifdef CONFIG_X86_64
69c9a1ff31SBrian Gerst per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
70c9a1ff31SBrian Gerst #else
713fb0fdb3SAndy Lutomirski per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
7260a5317fSTejun Heo #endif
7360a5317fSTejun Heo }
7460a5317fSTejun Heo
75050e9baaSLinus Torvalds #else /* STACKPROTECTOR */
7660a5317fSTejun Heo
7760a5317fSTejun Heo /* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
7860a5317fSTejun Heo
cpu_init_stack_canary(int cpu,struct task_struct * idle)79c9a1ff31SBrian Gerst static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
80c9a1ff31SBrian Gerst { }
81c9a1ff31SBrian Gerst
82050e9baaSLinus Torvalds #endif /* STACKPROTECTOR */
8376397f72STejun Heo #endif /* _ASM_STACKPROTECTOR_H */
84