1b886d83cSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
251ae4a2dSH. Peter Anvin /*
351ae4a2dSH. Peter Anvin * Supervisor Mode Access Prevention support
451ae4a2dSH. Peter Anvin *
551ae4a2dSH. Peter Anvin * Copyright (C) 2012 Intel Corporation
651ae4a2dSH. Peter Anvin * Author: H. Peter Anvin <hpa@linux.intel.com>
751ae4a2dSH. Peter Anvin */
851ae4a2dSH. Peter Anvin
951ae4a2dSH. Peter Anvin #ifndef _ASM_X86_SMAP_H
1051ae4a2dSH. Peter Anvin #define _ASM_X86_SMAP_H
1151ae4a2dSH. Peter Anvin
1251ae4a2dSH. Peter Anvin #include <asm/nops.h>
13cd4d09ecSBorislav Petkov #include <asm/cpufeatures.h>
145e21a3ecSJuergen Gross #include <asm/alternative.h>
1551ae4a2dSH. Peter Anvin
1651ae4a2dSH. Peter Anvin /* "Raw" instruction opcodes */
17a936af8eSPeter Zijlstra #define __ASM_CLAC ".byte 0x0f,0x01,0xca"
18a936af8eSPeter Zijlstra #define __ASM_STAC ".byte 0x0f,0x01,0xcb"
1951ae4a2dSH. Peter Anvin
2051ae4a2dSH. Peter Anvin #ifdef __ASSEMBLY__
2151ae4a2dSH. Peter Anvin
2251ae4a2dSH. Peter Anvin #define ASM_CLAC \
23a936af8eSPeter Zijlstra ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP
2451ae4a2dSH. Peter Anvin
2551ae4a2dSH. Peter Anvin #define ASM_STAC \
26a936af8eSPeter Zijlstra ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP
2751ae4a2dSH. Peter Anvin
2851ae4a2dSH. Peter Anvin #else /* __ASSEMBLY__ */
2951ae4a2dSH. Peter Anvin
clac(void)3063bcff2aSH. Peter Anvin static __always_inline void clac(void)
3151ae4a2dSH. Peter Anvin {
3251ae4a2dSH. Peter Anvin /* Note: a barrier is implicit in alternative() */
33a936af8eSPeter Zijlstra alternative("", __ASM_CLAC, X86_FEATURE_SMAP);
3451ae4a2dSH. Peter Anvin }
3551ae4a2dSH. Peter Anvin
stac(void)3663bcff2aSH. Peter Anvin static __always_inline void stac(void)
3751ae4a2dSH. Peter Anvin {
3851ae4a2dSH. Peter Anvin /* Note: a barrier is implicit in alternative() */
39a936af8eSPeter Zijlstra alternative("", __ASM_STAC, X86_FEATURE_SMAP);
4051ae4a2dSH. Peter Anvin }
4151ae4a2dSH. Peter Anvin
smap_save(void)42e74deb11SPeter Zijlstra static __always_inline unsigned long smap_save(void)
43e74deb11SPeter Zijlstra {
44e74deb11SPeter Zijlstra unsigned long flags;
45e74deb11SPeter Zijlstra
461ff865e3SPeter Zijlstra asm volatile ("# smap_save\n\t"
47*ba08abcaSPeter Zijlstra ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC "\n\t",
48*ba08abcaSPeter Zijlstra X86_FEATURE_SMAP)
49e74deb11SPeter Zijlstra : "=rm" (flags) : : "memory", "cc");
50e74deb11SPeter Zijlstra
51e74deb11SPeter Zijlstra return flags;
52e74deb11SPeter Zijlstra }
53e74deb11SPeter Zijlstra
smap_restore(unsigned long flags)54e74deb11SPeter Zijlstra static __always_inline void smap_restore(unsigned long flags)
55e74deb11SPeter Zijlstra {
561ff865e3SPeter Zijlstra asm volatile ("# smap_restore\n\t"
57*ba08abcaSPeter Zijlstra ALTERNATIVE("", "push %0; popf\n\t",
58*ba08abcaSPeter Zijlstra X86_FEATURE_SMAP)
59e74deb11SPeter Zijlstra : : "g" (flags) : "memory", "cc");
60e74deb11SPeter Zijlstra }
61e74deb11SPeter Zijlstra
6251ae4a2dSH. Peter Anvin /* These macros can be used in asm() statements */
6351ae4a2dSH. Peter Anvin #define ASM_CLAC \
64a936af8eSPeter Zijlstra ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP)
6551ae4a2dSH. Peter Anvin #define ASM_STAC \
66a936af8eSPeter Zijlstra ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)
6751ae4a2dSH. Peter Anvin
6851ae4a2dSH. Peter Anvin #endif /* __ASSEMBLY__ */
6951ae4a2dSH. Peter Anvin
7051ae4a2dSH. Peter Anvin #endif /* _ASM_X86_SMAP_H */
71