xref: /linux/arch/x86/kernel/fpu/xstate.h (revision 49e4eb4125d506937e52e10c34c8cafd93ab0ed6)
1126fe040SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */
2126fe040SThomas Gleixner #ifndef __X86_KERNEL_FPU_XSTATE_H
3126fe040SThomas Gleixner #define __X86_KERNEL_FPU_XSTATE_H
4126fe040SThomas Gleixner 
5126fe040SThomas Gleixner #include <asm/cpufeature.h>
6126fe040SThomas Gleixner #include <asm/fpu/xstate.h>
7126fe040SThomas Gleixner 
8126fe040SThomas Gleixner static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
9126fe040SThomas Gleixner {
10126fe040SThomas Gleixner 	/*
11126fe040SThomas Gleixner 	 * XRSTORS requires these bits set in xcomp_bv, or it will
12126fe040SThomas Gleixner 	 * trigger #GP:
13126fe040SThomas Gleixner 	 */
14126fe040SThomas Gleixner 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
15126fe040SThomas Gleixner 		xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
16126fe040SThomas Gleixner }
17126fe040SThomas Gleixner 
18*49e4eb41SThomas Gleixner enum xstate_copy_mode {
19*49e4eb41SThomas Gleixner 	XSTATE_COPY_FP,
20*49e4eb41SThomas Gleixner 	XSTATE_COPY_FX,
21*49e4eb41SThomas Gleixner 	XSTATE_COPY_XSAVE,
22*49e4eb41SThomas Gleixner };
23*49e4eb41SThomas Gleixner 
24*49e4eb41SThomas Gleixner struct membuf;
253ac8d757SThomas Gleixner extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
26ca834defSThomas Gleixner 				      u32 pkru_val, enum xstate_copy_mode copy_mode);
27*49e4eb41SThomas Gleixner extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
28*49e4eb41SThomas Gleixner 				    enum xstate_copy_mode mode);
29*49e4eb41SThomas Gleixner extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf);
30*49e4eb41SThomas Gleixner extern int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate, const void __user *ubuf);
31*49e4eb41SThomas Gleixner 
32ca834defSThomas Gleixner 
336415bb80SThomas Gleixner extern void fpu__init_cpu_xstate(void);
346415bb80SThomas Gleixner extern void fpu__init_system_xstate(void);
356415bb80SThomas Gleixner 
36087df48cSThomas Gleixner extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
37087df48cSThomas Gleixner 
38df95b0f1SThomas Gleixner /* XSAVE/XRSTOR wrapper functions */
39df95b0f1SThomas Gleixner 
40df95b0f1SThomas Gleixner #ifdef CONFIG_X86_64
41df95b0f1SThomas Gleixner #define REX_PREFIX	"0x48, "
42df95b0f1SThomas Gleixner #else
43df95b0f1SThomas Gleixner #define REX_PREFIX
44df95b0f1SThomas Gleixner #endif
45df95b0f1SThomas Gleixner 
46df95b0f1SThomas Gleixner /* These macros all use (%edi)/(%rdi) as the single memory argument. */
47df95b0f1SThomas Gleixner #define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
48df95b0f1SThomas Gleixner #define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
49df95b0f1SThomas Gleixner #define XSAVES		".byte " REX_PREFIX "0x0f,0xc7,0x2f"
50df95b0f1SThomas Gleixner #define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
51df95b0f1SThomas Gleixner #define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"
52df95b0f1SThomas Gleixner 
53df95b0f1SThomas Gleixner /*
54df95b0f1SThomas Gleixner  * After this @err contains 0 on success or the trap number when the
55df95b0f1SThomas Gleixner  * operation raises an exception.
56df95b0f1SThomas Gleixner  */
57df95b0f1SThomas Gleixner #define XSTATE_OP(op, st, lmask, hmask, err)				\
58df95b0f1SThomas Gleixner 	asm volatile("1:" op "\n\t"					\
59df95b0f1SThomas Gleixner 		     "xor %[err], %[err]\n"				\
60df95b0f1SThomas Gleixner 		     "2:\n\t"						\
61df95b0f1SThomas Gleixner 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE)	\
62df95b0f1SThomas Gleixner 		     : [err] "=a" (err)					\
63df95b0f1SThomas Gleixner 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
64df95b0f1SThomas Gleixner 		     : "memory")
65df95b0f1SThomas Gleixner 
66df95b0f1SThomas Gleixner /*
67df95b0f1SThomas Gleixner  * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
68df95b0f1SThomas Gleixner  * format and supervisor states in addition to modified optimization in
69df95b0f1SThomas Gleixner  * XSAVEOPT.
70df95b0f1SThomas Gleixner  *
71df95b0f1SThomas Gleixner  * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
72df95b0f1SThomas Gleixner  * supports modified optimization which is not supported by XSAVE.
73df95b0f1SThomas Gleixner  *
74df95b0f1SThomas Gleixner  * We use XSAVE as a fallback.
75df95b0f1SThomas Gleixner  *
76df95b0f1SThomas Gleixner  * The 661 label is defined in the ALTERNATIVE* macros as the address of the
77df95b0f1SThomas Gleixner  * original instruction which gets replaced. We need to use it here as the
78df95b0f1SThomas Gleixner  * address of the instruction where we might get an exception at.
79df95b0f1SThomas Gleixner  */
80df95b0f1SThomas Gleixner #define XSTATE_XSAVE(st, lmask, hmask, err)				\
81df95b0f1SThomas Gleixner 	asm volatile(ALTERNATIVE_2(XSAVE,				\
82df95b0f1SThomas Gleixner 				   XSAVEOPT, X86_FEATURE_XSAVEOPT,	\
83df95b0f1SThomas Gleixner 				   XSAVES,   X86_FEATURE_XSAVES)	\
84df95b0f1SThomas Gleixner 		     "\n"						\
85df95b0f1SThomas Gleixner 		     "xor %[err], %[err]\n"				\
86df95b0f1SThomas Gleixner 		     "3:\n"						\
87df95b0f1SThomas Gleixner 		     ".pushsection .fixup,\"ax\"\n"			\
88df95b0f1SThomas Gleixner 		     "4: movl $-2, %[err]\n"				\
89df95b0f1SThomas Gleixner 		     "jmp 3b\n"						\
90df95b0f1SThomas Gleixner 		     ".popsection\n"					\
91df95b0f1SThomas Gleixner 		     _ASM_EXTABLE(661b, 4b)				\
92df95b0f1SThomas Gleixner 		     : [err] "=r" (err)					\
93df95b0f1SThomas Gleixner 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
94df95b0f1SThomas Gleixner 		     : "memory")
95df95b0f1SThomas Gleixner 
96df95b0f1SThomas Gleixner /*
97df95b0f1SThomas Gleixner  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
98df95b0f1SThomas Gleixner  * XSAVE area format.
99df95b0f1SThomas Gleixner  */
100df95b0f1SThomas Gleixner #define XSTATE_XRESTORE(st, lmask, hmask)				\
101df95b0f1SThomas Gleixner 	asm volatile(ALTERNATIVE(XRSTOR,				\
102df95b0f1SThomas Gleixner 				 XRSTORS, X86_FEATURE_XSAVES)		\
103df95b0f1SThomas Gleixner 		     "\n"						\
104df95b0f1SThomas Gleixner 		     "3:\n"						\
105df95b0f1SThomas Gleixner 		     _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE)	\
106df95b0f1SThomas Gleixner 		     :							\
107df95b0f1SThomas Gleixner 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
108df95b0f1SThomas Gleixner 		     : "memory")
109df95b0f1SThomas Gleixner 
110df95b0f1SThomas Gleixner /*
111df95b0f1SThomas Gleixner  * Save processor xstate to xsave area.
112df95b0f1SThomas Gleixner  *
113df95b0f1SThomas Gleixner  * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
114df95b0f1SThomas Gleixner  * and command line options. The choice is permanent until the next reboot.
115df95b0f1SThomas Gleixner  */
116073e627aSThomas Gleixner static inline void os_xsave(struct fpstate *fpstate)
117df95b0f1SThomas Gleixner {
118073e627aSThomas Gleixner 	u64 mask = fpstate->xfeatures;
119df95b0f1SThomas Gleixner 	u32 lmask = mask;
120df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
121df95b0f1SThomas Gleixner 	int err;
122df95b0f1SThomas Gleixner 
123df95b0f1SThomas Gleixner 	WARN_ON_FPU(!alternatives_patched);
124df95b0f1SThomas Gleixner 
125073e627aSThomas Gleixner 	XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
126df95b0f1SThomas Gleixner 
127df95b0f1SThomas Gleixner 	/* We should never fault when copying to a kernel buffer: */
128df95b0f1SThomas Gleixner 	WARN_ON_FPU(err);
129df95b0f1SThomas Gleixner }
130df95b0f1SThomas Gleixner 
131df95b0f1SThomas Gleixner /*
132df95b0f1SThomas Gleixner  * Restore processor xstate from xsave area.
133df95b0f1SThomas Gleixner  *
134df95b0f1SThomas Gleixner  * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
135df95b0f1SThomas Gleixner  */
136df95b0f1SThomas Gleixner static inline void os_xrstor(struct xregs_state *xstate, u64 mask)
137df95b0f1SThomas Gleixner {
138df95b0f1SThomas Gleixner 	u32 lmask = mask;
139df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
140df95b0f1SThomas Gleixner 
141df95b0f1SThomas Gleixner 	XSTATE_XRESTORE(xstate, lmask, hmask);
142df95b0f1SThomas Gleixner }
143df95b0f1SThomas Gleixner 
144df95b0f1SThomas Gleixner /*
145df95b0f1SThomas Gleixner  * Save xstate to user space xsave area.
146df95b0f1SThomas Gleixner  *
147df95b0f1SThomas Gleixner  * We don't use modified optimization because xrstor/xrstors might track
148df95b0f1SThomas Gleixner  * a different application.
149df95b0f1SThomas Gleixner  *
150df95b0f1SThomas Gleixner  * We don't use compacted format xsave area for backward compatibility for
151df95b0f1SThomas Gleixner  * old applications which don't understand the compacted format of the
152df95b0f1SThomas Gleixner  * xsave area.
153df95b0f1SThomas Gleixner  *
154df95b0f1SThomas Gleixner  * The caller has to zero buf::header before calling this because XSAVE*
155df95b0f1SThomas Gleixner  * does not touch the reserved fields in the header.
156df95b0f1SThomas Gleixner  */
157df95b0f1SThomas Gleixner static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
158df95b0f1SThomas Gleixner {
159df95b0f1SThomas Gleixner 	/*
160df95b0f1SThomas Gleixner 	 * Include the features which are not xsaved/rstored by the kernel
161df95b0f1SThomas Gleixner 	 * internally, e.g. PKRU. That's user space ABI and also required
162df95b0f1SThomas Gleixner 	 * to allow the signal handler to modify PKRU.
163df95b0f1SThomas Gleixner 	 */
1640b2d39aaSThomas Gleixner 	u64 mask = current->thread.fpu.fpstate->user_xfeatures;
165df95b0f1SThomas Gleixner 	u32 lmask = mask;
166df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
167df95b0f1SThomas Gleixner 	int err;
168df95b0f1SThomas Gleixner 
169df95b0f1SThomas Gleixner 	stac();
170df95b0f1SThomas Gleixner 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
171df95b0f1SThomas Gleixner 	clac();
172df95b0f1SThomas Gleixner 
173df95b0f1SThomas Gleixner 	return err;
174df95b0f1SThomas Gleixner }
175df95b0f1SThomas Gleixner 
176df95b0f1SThomas Gleixner /*
177df95b0f1SThomas Gleixner  * Restore xstate from user space xsave area.
178df95b0f1SThomas Gleixner  */
179df95b0f1SThomas Gleixner static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
180df95b0f1SThomas Gleixner {
181df95b0f1SThomas Gleixner 	struct xregs_state *xstate = ((__force struct xregs_state *)buf);
182df95b0f1SThomas Gleixner 	u32 lmask = mask;
183df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
184df95b0f1SThomas Gleixner 	int err;
185df95b0f1SThomas Gleixner 
186df95b0f1SThomas Gleixner 	stac();
187df95b0f1SThomas Gleixner 	XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
188df95b0f1SThomas Gleixner 	clac();
189df95b0f1SThomas Gleixner 
190df95b0f1SThomas Gleixner 	return err;
191df95b0f1SThomas Gleixner }
192df95b0f1SThomas Gleixner 
193df95b0f1SThomas Gleixner /*
194df95b0f1SThomas Gleixner  * Restore xstate from kernel space xsave area, return an error code instead of
195df95b0f1SThomas Gleixner  * an exception.
196df95b0f1SThomas Gleixner  */
197df95b0f1SThomas Gleixner static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask)
198df95b0f1SThomas Gleixner {
199df95b0f1SThomas Gleixner 	u32 lmask = mask;
200df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
201df95b0f1SThomas Gleixner 	int err;
202df95b0f1SThomas Gleixner 
203df95b0f1SThomas Gleixner 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
204df95b0f1SThomas Gleixner 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
205df95b0f1SThomas Gleixner 	else
206df95b0f1SThomas Gleixner 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
207df95b0f1SThomas Gleixner 
208df95b0f1SThomas Gleixner 	return err;
209df95b0f1SThomas Gleixner }
210df95b0f1SThomas Gleixner 
211df95b0f1SThomas Gleixner 
212126fe040SThomas Gleixner #endif
213