1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Supervisor Mode Access Prevention support
4 *
5 * Copyright (C) 2012 Intel Corporation
6 * Author: H. Peter Anvin <hpa@linux.intel.com>
7 */
8
9 #ifndef _ASM_X86_SMAP_H
10 #define _ASM_X86_SMAP_H
11
12 #include <asm/nops.h>
13 #include <asm/cpufeatures.h>
14 #include <asm/alternative.h>
15
16 #ifdef __ASSEMBLER__
17
18 #define ASM_CLAC \
19 ALTERNATIVE "", "clac", X86_FEATURE_SMAP
20
21 #define ASM_STAC \
22 ALTERNATIVE "", "stac", X86_FEATURE_SMAP
23
24 #else /* __ASSEMBLER__ */
25
26 /*
27 * The CLAC/STAC instructions toggle the enforcement of
28 * X86_FEATURE_SMAP along with X86_FEATURE_LASS.
29 *
30 * SMAP enforcement is based on the _PAGE_BIT_USER bit in the page
31 * tables. The kernel is not allowed to touch pages with that bit set
32 * unless the AC bit is set.
33 *
34 * Use stac()/clac() when accessing userspace (_PAGE_USER) mappings,
35 * regardless of location.
36 *
37 * Note: a barrier is implicit in alternative().
38 */
39
clac(void)40 static __always_inline void clac(void)
41 {
42 alternative("", "clac", X86_FEATURE_SMAP);
43 }
44
stac(void)45 static __always_inline void stac(void)
46 {
47 alternative("", "stac", X86_FEATURE_SMAP);
48 }
49
50 /*
51 * LASS enforcement is based on bit 63 of the virtual address. The
52 * kernel is not allowed to touch memory in the lower half of the
53 * virtual address space.
54 *
55 * Use lass_stac()/lass_clac() to toggle the AC bit for kernel data
56 * accesses (!_PAGE_USER) that are blocked by LASS, but not by SMAP.
57 *
58 * Even with the AC bit set, LASS will continue to block instruction
59 * fetches from the user half of the address space. To allow those,
60 * clear CR4.LASS to disable the LASS mechanism entirely.
61 *
62 * Note: a barrier is implicit in alternative().
63 */
64
lass_clac(void)65 static __always_inline void lass_clac(void)
66 {
67 alternative("", "clac", X86_FEATURE_LASS);
68 }
69
lass_stac(void)70 static __always_inline void lass_stac(void)
71 {
72 alternative("", "stac", X86_FEATURE_LASS);
73 }
74
smap_save(void)75 static __always_inline unsigned long smap_save(void)
76 {
77 unsigned long flags;
78
79 asm volatile ("# smap_save\n\t"
80 ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t"
81 "", "pushf; pop %0; clac",
82 X86_FEATURE_SMAP)
83 : "=rm" (flags) : : "memory", "cc");
84
85 return flags;
86 }
87
smap_restore(unsigned long flags)88 static __always_inline void smap_restore(unsigned long flags)
89 {
90 asm volatile ("# smap_restore\n\t"
91 ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t"
92 "", "push %0; popf",
93 X86_FEATURE_SMAP)
94 : : "g" (flags) : "memory", "cc");
95 }
96
97 /* These macros can be used in asm() statements */
98 #define ASM_CLAC \
99 ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
100 #define ASM_STAC \
101 ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
102
103 #define ASM_CLAC_UNSAFE \
104 ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "clac", X86_FEATURE_SMAP)
105 #define ASM_STAC_UNSAFE \
106 ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "stac", X86_FEATURE_SMAP)
107
108 #endif /* __ASSEMBLER__ */
109
110 #endif /* _ASM_X86_SMAP_H */
111