1126fe040SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */ 2126fe040SThomas Gleixner #ifndef __X86_KERNEL_FPU_XSTATE_H 3126fe040SThomas Gleixner #define __X86_KERNEL_FPU_XSTATE_H 4126fe040SThomas Gleixner 5126fe040SThomas Gleixner #include <asm/cpufeature.h> 6126fe040SThomas Gleixner #include <asm/fpu/xstate.h> 7126fe040SThomas Gleixner 8126fe040SThomas Gleixner static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask) 9126fe040SThomas Gleixner { 10126fe040SThomas Gleixner /* 11126fe040SThomas Gleixner * XRSTORS requires these bits set in xcomp_bv, or it will 12126fe040SThomas Gleixner * trigger #GP: 13126fe040SThomas Gleixner */ 14126fe040SThomas Gleixner if (cpu_feature_enabled(X86_FEATURE_XSAVES)) 15126fe040SThomas Gleixner xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT; 16126fe040SThomas Gleixner } 17126fe040SThomas Gleixner 18ca834defSThomas Gleixner extern void __copy_xstate_to_uabi_buf(struct membuf to, struct xregs_state *xsave, 19ca834defSThomas Gleixner u32 pkru_val, enum xstate_copy_mode copy_mode); 20ca834defSThomas Gleixner 21*df95b0f1SThomas Gleixner /* XSAVE/XRSTOR wrapper functions */ 22*df95b0f1SThomas Gleixner 23*df95b0f1SThomas Gleixner #ifdef CONFIG_X86_64 24*df95b0f1SThomas Gleixner #define REX_PREFIX "0x48, " 25*df95b0f1SThomas Gleixner #else 26*df95b0f1SThomas Gleixner #define REX_PREFIX 27*df95b0f1SThomas Gleixner #endif 28*df95b0f1SThomas Gleixner 29*df95b0f1SThomas Gleixner /* These macros all use (%edi)/(%rdi) as the single memory argument. */ 30*df95b0f1SThomas Gleixner #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" 31*df95b0f1SThomas Gleixner #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" 32*df95b0f1SThomas Gleixner #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f" 33*df95b0f1SThomas Gleixner #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" 34*df95b0f1SThomas Gleixner #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" 35*df95b0f1SThomas Gleixner 36*df95b0f1SThomas Gleixner /* 37*df95b0f1SThomas Gleixner * After this @err contains 0 on success or the trap number when the 38*df95b0f1SThomas Gleixner * operation raises an exception. 39*df95b0f1SThomas Gleixner */ 40*df95b0f1SThomas Gleixner #define XSTATE_OP(op, st, lmask, hmask, err) \ 41*df95b0f1SThomas Gleixner asm volatile("1:" op "\n\t" \ 42*df95b0f1SThomas Gleixner "xor %[err], %[err]\n" \ 43*df95b0f1SThomas Gleixner "2:\n\t" \ 44*df95b0f1SThomas Gleixner _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE) \ 45*df95b0f1SThomas Gleixner : [err] "=a" (err) \ 46*df95b0f1SThomas Gleixner : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ 47*df95b0f1SThomas Gleixner : "memory") 48*df95b0f1SThomas Gleixner 49*df95b0f1SThomas Gleixner /* 50*df95b0f1SThomas Gleixner * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact 51*df95b0f1SThomas Gleixner * format and supervisor states in addition to modified optimization in 52*df95b0f1SThomas Gleixner * XSAVEOPT. 53*df95b0f1SThomas Gleixner * 54*df95b0f1SThomas Gleixner * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT 55*df95b0f1SThomas Gleixner * supports modified optimization which is not supported by XSAVE. 56*df95b0f1SThomas Gleixner * 57*df95b0f1SThomas Gleixner * We use XSAVE as a fallback. 58*df95b0f1SThomas Gleixner * 59*df95b0f1SThomas Gleixner * The 661 label is defined in the ALTERNATIVE* macros as the address of the 60*df95b0f1SThomas Gleixner * original instruction which gets replaced. We need to use it here as the 61*df95b0f1SThomas Gleixner * address of the instruction where we might get an exception at. 62*df95b0f1SThomas Gleixner */ 63*df95b0f1SThomas Gleixner #define XSTATE_XSAVE(st, lmask, hmask, err) \ 64*df95b0f1SThomas Gleixner asm volatile(ALTERNATIVE_2(XSAVE, \ 65*df95b0f1SThomas Gleixner XSAVEOPT, X86_FEATURE_XSAVEOPT, \ 66*df95b0f1SThomas Gleixner XSAVES, X86_FEATURE_XSAVES) \ 67*df95b0f1SThomas Gleixner "\n" \ 68*df95b0f1SThomas Gleixner "xor %[err], %[err]\n" \ 69*df95b0f1SThomas Gleixner "3:\n" \ 70*df95b0f1SThomas Gleixner ".pushsection .fixup,\"ax\"\n" \ 71*df95b0f1SThomas Gleixner "4: movl $-2, %[err]\n" \ 72*df95b0f1SThomas Gleixner "jmp 3b\n" \ 73*df95b0f1SThomas Gleixner ".popsection\n" \ 74*df95b0f1SThomas Gleixner _ASM_EXTABLE(661b, 4b) \ 75*df95b0f1SThomas Gleixner : [err] "=r" (err) \ 76*df95b0f1SThomas Gleixner : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ 77*df95b0f1SThomas Gleixner : "memory") 78*df95b0f1SThomas Gleixner 79*df95b0f1SThomas Gleixner /* 80*df95b0f1SThomas Gleixner * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact 81*df95b0f1SThomas Gleixner * XSAVE area format. 82*df95b0f1SThomas Gleixner */ 83*df95b0f1SThomas Gleixner #define XSTATE_XRESTORE(st, lmask, hmask) \ 84*df95b0f1SThomas Gleixner asm volatile(ALTERNATIVE(XRSTOR, \ 85*df95b0f1SThomas Gleixner XRSTORS, X86_FEATURE_XSAVES) \ 86*df95b0f1SThomas Gleixner "\n" \ 87*df95b0f1SThomas Gleixner "3:\n" \ 88*df95b0f1SThomas Gleixner _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE) \ 89*df95b0f1SThomas Gleixner : \ 90*df95b0f1SThomas Gleixner : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ 91*df95b0f1SThomas Gleixner : "memory") 92*df95b0f1SThomas Gleixner 93*df95b0f1SThomas Gleixner /* 94*df95b0f1SThomas Gleixner * Save processor xstate to xsave area. 95*df95b0f1SThomas Gleixner * 96*df95b0f1SThomas Gleixner * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features 97*df95b0f1SThomas Gleixner * and command line options. The choice is permanent until the next reboot. 98*df95b0f1SThomas Gleixner */ 99*df95b0f1SThomas Gleixner static inline void os_xsave(struct xregs_state *xstate) 100*df95b0f1SThomas Gleixner { 101*df95b0f1SThomas Gleixner u64 mask = xfeatures_mask_all; 102*df95b0f1SThomas Gleixner u32 lmask = mask; 103*df95b0f1SThomas Gleixner u32 hmask = mask >> 32; 104*df95b0f1SThomas Gleixner int err; 105*df95b0f1SThomas Gleixner 106*df95b0f1SThomas Gleixner WARN_ON_FPU(!alternatives_patched); 107*df95b0f1SThomas Gleixner 108*df95b0f1SThomas Gleixner XSTATE_XSAVE(xstate, lmask, hmask, err); 109*df95b0f1SThomas Gleixner 110*df95b0f1SThomas Gleixner /* We should never fault when copying to a kernel buffer: */ 111*df95b0f1SThomas Gleixner WARN_ON_FPU(err); 112*df95b0f1SThomas Gleixner } 113*df95b0f1SThomas Gleixner 114*df95b0f1SThomas Gleixner /* 115*df95b0f1SThomas Gleixner * Restore processor xstate from xsave area. 116*df95b0f1SThomas Gleixner * 117*df95b0f1SThomas Gleixner * Uses XRSTORS when XSAVES is used, XRSTOR otherwise. 118*df95b0f1SThomas Gleixner */ 119*df95b0f1SThomas Gleixner static inline void os_xrstor(struct xregs_state *xstate, u64 mask) 120*df95b0f1SThomas Gleixner { 121*df95b0f1SThomas Gleixner u32 lmask = mask; 122*df95b0f1SThomas Gleixner u32 hmask = mask >> 32; 123*df95b0f1SThomas Gleixner 124*df95b0f1SThomas Gleixner XSTATE_XRESTORE(xstate, lmask, hmask); 125*df95b0f1SThomas Gleixner } 126*df95b0f1SThomas Gleixner 127*df95b0f1SThomas Gleixner /* 128*df95b0f1SThomas Gleixner * Save xstate to user space xsave area. 129*df95b0f1SThomas Gleixner * 130*df95b0f1SThomas Gleixner * We don't use modified optimization because xrstor/xrstors might track 131*df95b0f1SThomas Gleixner * a different application. 132*df95b0f1SThomas Gleixner * 133*df95b0f1SThomas Gleixner * We don't use compacted format xsave area for backward compatibility for 134*df95b0f1SThomas Gleixner * old applications which don't understand the compacted format of the 135*df95b0f1SThomas Gleixner * xsave area. 136*df95b0f1SThomas Gleixner * 137*df95b0f1SThomas Gleixner * The caller has to zero buf::header before calling this because XSAVE* 138*df95b0f1SThomas Gleixner * does not touch the reserved fields in the header. 139*df95b0f1SThomas Gleixner */ 140*df95b0f1SThomas Gleixner static inline int xsave_to_user_sigframe(struct xregs_state __user *buf) 141*df95b0f1SThomas Gleixner { 142*df95b0f1SThomas Gleixner /* 143*df95b0f1SThomas Gleixner * Include the features which are not xsaved/rstored by the kernel 144*df95b0f1SThomas Gleixner * internally, e.g. PKRU. That's user space ABI and also required 145*df95b0f1SThomas Gleixner * to allow the signal handler to modify PKRU. 146*df95b0f1SThomas Gleixner */ 147*df95b0f1SThomas Gleixner u64 mask = xfeatures_mask_uabi(); 148*df95b0f1SThomas Gleixner u32 lmask = mask; 149*df95b0f1SThomas Gleixner u32 hmask = mask >> 32; 150*df95b0f1SThomas Gleixner int err; 151*df95b0f1SThomas Gleixner 152*df95b0f1SThomas Gleixner stac(); 153*df95b0f1SThomas Gleixner XSTATE_OP(XSAVE, buf, lmask, hmask, err); 154*df95b0f1SThomas Gleixner clac(); 155*df95b0f1SThomas Gleixner 156*df95b0f1SThomas Gleixner return err; 157*df95b0f1SThomas Gleixner } 158*df95b0f1SThomas Gleixner 159*df95b0f1SThomas Gleixner /* 160*df95b0f1SThomas Gleixner * Restore xstate from user space xsave area. 161*df95b0f1SThomas Gleixner */ 162*df95b0f1SThomas Gleixner static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask) 163*df95b0f1SThomas Gleixner { 164*df95b0f1SThomas Gleixner struct xregs_state *xstate = ((__force struct xregs_state *)buf); 165*df95b0f1SThomas Gleixner u32 lmask = mask; 166*df95b0f1SThomas Gleixner u32 hmask = mask >> 32; 167*df95b0f1SThomas Gleixner int err; 168*df95b0f1SThomas Gleixner 169*df95b0f1SThomas Gleixner stac(); 170*df95b0f1SThomas Gleixner XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); 171*df95b0f1SThomas Gleixner clac(); 172*df95b0f1SThomas Gleixner 173*df95b0f1SThomas Gleixner return err; 174*df95b0f1SThomas Gleixner } 175*df95b0f1SThomas Gleixner 176*df95b0f1SThomas Gleixner /* 177*df95b0f1SThomas Gleixner * Restore xstate from kernel space xsave area, return an error code instead of 178*df95b0f1SThomas Gleixner * an exception. 179*df95b0f1SThomas Gleixner */ 180*df95b0f1SThomas Gleixner static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask) 181*df95b0f1SThomas Gleixner { 182*df95b0f1SThomas Gleixner u32 lmask = mask; 183*df95b0f1SThomas Gleixner u32 hmask = mask >> 32; 184*df95b0f1SThomas Gleixner int err; 185*df95b0f1SThomas Gleixner 186*df95b0f1SThomas Gleixner if (cpu_feature_enabled(X86_FEATURE_XSAVES)) 187*df95b0f1SThomas Gleixner XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); 188*df95b0f1SThomas Gleixner else 189*df95b0f1SThomas Gleixner XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); 190*df95b0f1SThomas Gleixner 191*df95b0f1SThomas Gleixner return err; 192*df95b0f1SThomas Gleixner } 193*df95b0f1SThomas Gleixner 194*df95b0f1SThomas Gleixner 195126fe040SThomas Gleixner #endif 196