xref: /linux/arch/x86/kernel/fpu/xstate.h (revision db8268df0983adc2bb1fb48c9e5f7bfbb5f617f3)
1126fe040SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */
2126fe040SThomas Gleixner #ifndef __X86_KERNEL_FPU_XSTATE_H
3126fe040SThomas Gleixner #define __X86_KERNEL_FPU_XSTATE_H
4126fe040SThomas Gleixner 
5126fe040SThomas Gleixner #include <asm/cpufeature.h>
6126fe040SThomas Gleixner #include <asm/fpu/xstate.h>
7126fe040SThomas Gleixner 
8126fe040SThomas Gleixner static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
9126fe040SThomas Gleixner {
10126fe040SThomas Gleixner 	/*
11126fe040SThomas Gleixner 	 * XRSTORS requires these bits set in xcomp_bv, or it will
12126fe040SThomas Gleixner 	 * trigger #GP:
13126fe040SThomas Gleixner 	 */
14126fe040SThomas Gleixner 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
15126fe040SThomas Gleixner 		xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
16126fe040SThomas Gleixner }
17126fe040SThomas Gleixner 
18*db8268dfSChang S. Bae static inline u64 xstate_get_host_group_perm(void)
19*db8268dfSChang S. Bae {
20*db8268dfSChang S. Bae 	/* Pairs with WRITE_ONCE() in xstate_request_perm() */
21*db8268dfSChang S. Bae 	return READ_ONCE(current->group_leader->thread.fpu.perm.__state_perm);
22*db8268dfSChang S. Bae }
23*db8268dfSChang S. Bae 
2449e4eb41SThomas Gleixner enum xstate_copy_mode {
2549e4eb41SThomas Gleixner 	XSTATE_COPY_FP,
2649e4eb41SThomas Gleixner 	XSTATE_COPY_FX,
2749e4eb41SThomas Gleixner 	XSTATE_COPY_XSAVE,
2849e4eb41SThomas Gleixner };
2949e4eb41SThomas Gleixner 
3049e4eb41SThomas Gleixner struct membuf;
313ac8d757SThomas Gleixner extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
32ca834defSThomas Gleixner 				      u32 pkru_val, enum xstate_copy_mode copy_mode);
3349e4eb41SThomas Gleixner extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
3449e4eb41SThomas Gleixner 				    enum xstate_copy_mode mode);
3549e4eb41SThomas Gleixner extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf);
3649e4eb41SThomas Gleixner extern int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate, const void __user *ubuf);
3749e4eb41SThomas Gleixner 
38ca834defSThomas Gleixner 
396415bb80SThomas Gleixner extern void fpu__init_cpu_xstate(void);
402bd264bcSThomas Gleixner extern void fpu__init_system_xstate(unsigned int legacy_size);
416415bb80SThomas Gleixner 
42087df48cSThomas Gleixner extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
43087df48cSThomas Gleixner 
44d72c8701SThomas Gleixner static inline u64 xfeatures_mask_supervisor(void)
45d72c8701SThomas Gleixner {
46d72c8701SThomas Gleixner 	return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
47d72c8701SThomas Gleixner }
48d72c8701SThomas Gleixner 
49d72c8701SThomas Gleixner static inline u64 xfeatures_mask_independent(void)
50d72c8701SThomas Gleixner {
51d72c8701SThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
52d72c8701SThomas Gleixner 		return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
53d72c8701SThomas Gleixner 
54d72c8701SThomas Gleixner 	return XFEATURE_MASK_INDEPENDENT;
55d72c8701SThomas Gleixner }
56d72c8701SThomas Gleixner 
57df95b0f1SThomas Gleixner /* XSAVE/XRSTOR wrapper functions */
58df95b0f1SThomas Gleixner 
59df95b0f1SThomas Gleixner #ifdef CONFIG_X86_64
60df95b0f1SThomas Gleixner #define REX_PREFIX	"0x48, "
61df95b0f1SThomas Gleixner #else
62df95b0f1SThomas Gleixner #define REX_PREFIX
63df95b0f1SThomas Gleixner #endif
64df95b0f1SThomas Gleixner 
65df95b0f1SThomas Gleixner /* These macros all use (%edi)/(%rdi) as the single memory argument. */
66df95b0f1SThomas Gleixner #define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
67df95b0f1SThomas Gleixner #define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
68df95b0f1SThomas Gleixner #define XSAVES		".byte " REX_PREFIX "0x0f,0xc7,0x2f"
69df95b0f1SThomas Gleixner #define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
70df95b0f1SThomas Gleixner #define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"
71df95b0f1SThomas Gleixner 
72df95b0f1SThomas Gleixner /*
73df95b0f1SThomas Gleixner  * After this @err contains 0 on success or the trap number when the
74df95b0f1SThomas Gleixner  * operation raises an exception.
75df95b0f1SThomas Gleixner  */
76df95b0f1SThomas Gleixner #define XSTATE_OP(op, st, lmask, hmask, err)				\
77df95b0f1SThomas Gleixner 	asm volatile("1:" op "\n\t"					\
78df95b0f1SThomas Gleixner 		     "xor %[err], %[err]\n"				\
79df95b0f1SThomas Gleixner 		     "2:\n\t"						\
80df95b0f1SThomas Gleixner 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE)	\
81df95b0f1SThomas Gleixner 		     : [err] "=a" (err)					\
82df95b0f1SThomas Gleixner 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
83df95b0f1SThomas Gleixner 		     : "memory")
84df95b0f1SThomas Gleixner 
85df95b0f1SThomas Gleixner /*
86df95b0f1SThomas Gleixner  * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
87df95b0f1SThomas Gleixner  * format and supervisor states in addition to modified optimization in
88df95b0f1SThomas Gleixner  * XSAVEOPT.
89df95b0f1SThomas Gleixner  *
90df95b0f1SThomas Gleixner  * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
91df95b0f1SThomas Gleixner  * supports modified optimization which is not supported by XSAVE.
92df95b0f1SThomas Gleixner  *
93df95b0f1SThomas Gleixner  * We use XSAVE as a fallback.
94df95b0f1SThomas Gleixner  *
95df95b0f1SThomas Gleixner  * The 661 label is defined in the ALTERNATIVE* macros as the address of the
96df95b0f1SThomas Gleixner  * original instruction which gets replaced. We need to use it here as the
97df95b0f1SThomas Gleixner  * address of the instruction where we might get an exception at.
98df95b0f1SThomas Gleixner  */
99df95b0f1SThomas Gleixner #define XSTATE_XSAVE(st, lmask, hmask, err)				\
100df95b0f1SThomas Gleixner 	asm volatile(ALTERNATIVE_2(XSAVE,				\
101df95b0f1SThomas Gleixner 				   XSAVEOPT, X86_FEATURE_XSAVEOPT,	\
102df95b0f1SThomas Gleixner 				   XSAVES,   X86_FEATURE_XSAVES)	\
103df95b0f1SThomas Gleixner 		     "\n"						\
104df95b0f1SThomas Gleixner 		     "xor %[err], %[err]\n"				\
105df95b0f1SThomas Gleixner 		     "3:\n"						\
106df95b0f1SThomas Gleixner 		     ".pushsection .fixup,\"ax\"\n"			\
107df95b0f1SThomas Gleixner 		     "4: movl $-2, %[err]\n"				\
108df95b0f1SThomas Gleixner 		     "jmp 3b\n"						\
109df95b0f1SThomas Gleixner 		     ".popsection\n"					\
110df95b0f1SThomas Gleixner 		     _ASM_EXTABLE(661b, 4b)				\
111df95b0f1SThomas Gleixner 		     : [err] "=r" (err)					\
112df95b0f1SThomas Gleixner 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
113df95b0f1SThomas Gleixner 		     : "memory")
114df95b0f1SThomas Gleixner 
115df95b0f1SThomas Gleixner /*
116df95b0f1SThomas Gleixner  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
117df95b0f1SThomas Gleixner  * XSAVE area format.
118df95b0f1SThomas Gleixner  */
119df95b0f1SThomas Gleixner #define XSTATE_XRESTORE(st, lmask, hmask)				\
120df95b0f1SThomas Gleixner 	asm volatile(ALTERNATIVE(XRSTOR,				\
121df95b0f1SThomas Gleixner 				 XRSTORS, X86_FEATURE_XSAVES)		\
122df95b0f1SThomas Gleixner 		     "\n"						\
123df95b0f1SThomas Gleixner 		     "3:\n"						\
124df95b0f1SThomas Gleixner 		     _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE)	\
125df95b0f1SThomas Gleixner 		     :							\
126df95b0f1SThomas Gleixner 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
127df95b0f1SThomas Gleixner 		     : "memory")
128df95b0f1SThomas Gleixner 
129df95b0f1SThomas Gleixner /*
130df95b0f1SThomas Gleixner  * Save processor xstate to xsave area.
131df95b0f1SThomas Gleixner  *
132df95b0f1SThomas Gleixner  * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
133df95b0f1SThomas Gleixner  * and command line options. The choice is permanent until the next reboot.
134df95b0f1SThomas Gleixner  */
135073e627aSThomas Gleixner static inline void os_xsave(struct fpstate *fpstate)
136df95b0f1SThomas Gleixner {
137073e627aSThomas Gleixner 	u64 mask = fpstate->xfeatures;
138df95b0f1SThomas Gleixner 	u32 lmask = mask;
139df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
140df95b0f1SThomas Gleixner 	int err;
141df95b0f1SThomas Gleixner 
142df95b0f1SThomas Gleixner 	WARN_ON_FPU(!alternatives_patched);
143df95b0f1SThomas Gleixner 
144073e627aSThomas Gleixner 	XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
145df95b0f1SThomas Gleixner 
146df95b0f1SThomas Gleixner 	/* We should never fault when copying to a kernel buffer: */
147df95b0f1SThomas Gleixner 	WARN_ON_FPU(err);
148df95b0f1SThomas Gleixner }
149df95b0f1SThomas Gleixner 
150df95b0f1SThomas Gleixner /*
151df95b0f1SThomas Gleixner  * Restore processor xstate from xsave area.
152df95b0f1SThomas Gleixner  *
153df95b0f1SThomas Gleixner  * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
154df95b0f1SThomas Gleixner  */
155df95b0f1SThomas Gleixner static inline void os_xrstor(struct xregs_state *xstate, u64 mask)
156df95b0f1SThomas Gleixner {
157df95b0f1SThomas Gleixner 	u32 lmask = mask;
158df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
159df95b0f1SThomas Gleixner 
160df95b0f1SThomas Gleixner 	XSTATE_XRESTORE(xstate, lmask, hmask);
161df95b0f1SThomas Gleixner }
162df95b0f1SThomas Gleixner 
163df95b0f1SThomas Gleixner /*
164df95b0f1SThomas Gleixner  * Save xstate to user space xsave area.
165df95b0f1SThomas Gleixner  *
166df95b0f1SThomas Gleixner  * We don't use modified optimization because xrstor/xrstors might track
167df95b0f1SThomas Gleixner  * a different application.
168df95b0f1SThomas Gleixner  *
169df95b0f1SThomas Gleixner  * We don't use compacted format xsave area for backward compatibility for
170df95b0f1SThomas Gleixner  * old applications which don't understand the compacted format of the
171df95b0f1SThomas Gleixner  * xsave area.
172df95b0f1SThomas Gleixner  *
173df95b0f1SThomas Gleixner  * The caller has to zero buf::header before calling this because XSAVE*
174df95b0f1SThomas Gleixner  * does not touch the reserved fields in the header.
175df95b0f1SThomas Gleixner  */
176df95b0f1SThomas Gleixner static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
177df95b0f1SThomas Gleixner {
178df95b0f1SThomas Gleixner 	/*
179df95b0f1SThomas Gleixner 	 * Include the features which are not xsaved/rstored by the kernel
180df95b0f1SThomas Gleixner 	 * internally, e.g. PKRU. That's user space ABI and also required
181df95b0f1SThomas Gleixner 	 * to allow the signal handler to modify PKRU.
182df95b0f1SThomas Gleixner 	 */
1830b2d39aaSThomas Gleixner 	u64 mask = current->thread.fpu.fpstate->user_xfeatures;
184df95b0f1SThomas Gleixner 	u32 lmask = mask;
185df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
186df95b0f1SThomas Gleixner 	int err;
187df95b0f1SThomas Gleixner 
188df95b0f1SThomas Gleixner 	stac();
189df95b0f1SThomas Gleixner 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
190df95b0f1SThomas Gleixner 	clac();
191df95b0f1SThomas Gleixner 
192df95b0f1SThomas Gleixner 	return err;
193df95b0f1SThomas Gleixner }
194df95b0f1SThomas Gleixner 
195df95b0f1SThomas Gleixner /*
196df95b0f1SThomas Gleixner  * Restore xstate from user space xsave area.
197df95b0f1SThomas Gleixner  */
198df95b0f1SThomas Gleixner static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
199df95b0f1SThomas Gleixner {
200df95b0f1SThomas Gleixner 	struct xregs_state *xstate = ((__force struct xregs_state *)buf);
201df95b0f1SThomas Gleixner 	u32 lmask = mask;
202df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
203df95b0f1SThomas Gleixner 	int err;
204df95b0f1SThomas Gleixner 
205df95b0f1SThomas Gleixner 	stac();
206df95b0f1SThomas Gleixner 	XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
207df95b0f1SThomas Gleixner 	clac();
208df95b0f1SThomas Gleixner 
209df95b0f1SThomas Gleixner 	return err;
210df95b0f1SThomas Gleixner }
211df95b0f1SThomas Gleixner 
212df95b0f1SThomas Gleixner /*
213df95b0f1SThomas Gleixner  * Restore xstate from kernel space xsave area, return an error code instead of
214df95b0f1SThomas Gleixner  * an exception.
215df95b0f1SThomas Gleixner  */
216df95b0f1SThomas Gleixner static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask)
217df95b0f1SThomas Gleixner {
218df95b0f1SThomas Gleixner 	u32 lmask = mask;
219df95b0f1SThomas Gleixner 	u32 hmask = mask >> 32;
220df95b0f1SThomas Gleixner 	int err;
221df95b0f1SThomas Gleixner 
222df95b0f1SThomas Gleixner 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
223df95b0f1SThomas Gleixner 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
224df95b0f1SThomas Gleixner 	else
225df95b0f1SThomas Gleixner 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
226df95b0f1SThomas Gleixner 
227df95b0f1SThomas Gleixner 	return err;
228df95b0f1SThomas Gleixner }
229df95b0f1SThomas Gleixner 
230df95b0f1SThomas Gleixner 
231126fe040SThomas Gleixner #endif
232