xref: /linux/arch/x86/kernel/fpu/xstate.h (revision 84262262177b98cf4e57e8c010119576d3c6bc2b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_KERNEL_FPU_XSTATE_H
3 #define __X86_KERNEL_FPU_XSTATE_H
4 
5 #include <asm/cpufeature.h>
6 #include <asm/fpu/xstate.h>
7 #include <asm/fpu/xcr.h>
8 
9 #ifdef CONFIG_X86_64
10 DECLARE_PER_CPU(u64, xfd_state);
11 #endif
12 
xstate_init_xcomp_bv(struct xregs_state * xsave,u64 mask)13 static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
14 {
15 	/*
16 	 * XRSTORS requires these bits set in xcomp_bv, or it will
17 	 * trigger #GP:
18 	 */
19 	if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED))
20 		xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
21 }
22 
xstate_get_group_perm(bool guest)23 static inline u64 xstate_get_group_perm(bool guest)
24 {
25 	struct fpu *fpu = &current->group_leader->thread.fpu;
26 	struct fpu_state_perm *perm;
27 
28 	/* Pairs with WRITE_ONCE() in xstate_request_perm() */
29 	perm = guest ? &fpu->guest_perm : &fpu->perm;
30 	return READ_ONCE(perm->__state_perm);
31 }
32 
xstate_get_host_group_perm(void)33 static inline u64 xstate_get_host_group_perm(void)
34 {
35 	return xstate_get_group_perm(false);
36 }
37 
38 enum xstate_copy_mode {
39 	XSTATE_COPY_FP,
40 	XSTATE_COPY_FX,
41 	XSTATE_COPY_XSAVE,
42 };
43 
44 struct membuf;
45 extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
46 				      u64 xfeatures, u32 pkru_val,
47 				      enum xstate_copy_mode copy_mode);
48 extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
49 				    enum xstate_copy_mode mode);
50 extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
51 extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf);
52 
53 
54 extern void fpu__init_cpu_xstate(void);
55 extern void fpu__init_system_xstate(unsigned int legacy_size);
56 
57 extern void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr);
58 
xfeatures_mask_supervisor(void)59 static inline u64 xfeatures_mask_supervisor(void)
60 {
61 	return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
62 }
63 
xfeatures_mask_independent(void)64 static inline u64 xfeatures_mask_independent(void)
65 {
66 	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
67 		return fpu_kernel_cfg.independent_features & ~XFEATURE_MASK_LBR;
68 
69 	return fpu_kernel_cfg.independent_features;
70 }
71 
72 /*
73  * Update the value of PKRU register that was already pushed onto the signal frame.
74  */
update_pkru_in_sigframe(struct xregs_state __user * buf,u64 mask,u32 pkru)75 static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
76 {
77 	u64 xstate_bv;
78 	int err;
79 
80 	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
81 		return 0;
82 
83 	/* Mark PKRU as in-use so that it is restored correctly. */
84 	xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
85 
86 	err =  __put_user(xstate_bv, &buf->header.xfeatures);
87 	if (err)
88 		return err;
89 
90 	/* Update PKRU value in the userspace xsave buffer. */
91 	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
92 }
93 
94 /* XSAVE/XRSTOR wrapper functions */
95 
96 #ifdef CONFIG_X86_64
97 #define REX_PREFIX	"0x48, "
98 #else
99 #define REX_PREFIX
100 #endif
101 
102 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
103 #define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
104 #define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
105 #define XSAVEC		".byte " REX_PREFIX "0x0f,0xc7,0x27"
106 #define XSAVES		".byte " REX_PREFIX "0x0f,0xc7,0x2f"
107 #define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
108 #define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"
109 
110 /*
111  * After this @err contains 0 on success or the trap number when the
112  * operation raises an exception.
113  */
114 #define XSTATE_OP(op, st, lmask, hmask, err)				\
115 	asm volatile("1:" op "\n\t"					\
116 		     "xor %[err], %[err]\n"				\
117 		     "2:\n\t"						\
118 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE)	\
119 		     : [err] "=a" (err)					\
120 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
121 		     : "memory")
122 
123 /*
124  * If XSAVES is enabled, it replaces XSAVEC because it supports supervisor
125  * states in addition to XSAVEC.
126  *
127  * Otherwise if XSAVEC is enabled, it replaces XSAVEOPT because it supports
128  * compacted storage format in addition to XSAVEOPT.
129  *
130  * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
131  * supports modified optimization which is not supported by XSAVE.
132  *
133  * Use XSAVE as a fallback.
134  */
135 #define XSTATE_XSAVE(st, lmask, hmask, err)				\
136 	asm volatile("1: " ALTERNATIVE_3(XSAVE,				\
137 				   XSAVEOPT, X86_FEATURE_XSAVEOPT,	\
138 				   XSAVEC,   X86_FEATURE_XSAVEC,	\
139 				   XSAVES,   X86_FEATURE_XSAVES)	\
140 		     "\n"						\
141 		     "xor %[err], %[err]\n"				\
142 		     "3:\n"						\
143 		     _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
144 		     : [err] "=r" (err)					\
145 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
146 		     : "memory")
147 
148 /*
149  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
150  * XSAVE area format.
151  */
152 #define XSTATE_XRESTORE(st, lmask, hmask)				\
153 	asm volatile("1: " ALTERNATIVE(XRSTOR,				\
154 				 XRSTORS, X86_FEATURE_XSAVES)		\
155 		     "\n"						\
156 		     "3:\n"						\
157 		     _ASM_EXTABLE_TYPE(1b, 3b, EX_TYPE_FPU_RESTORE)	\
158 		     :							\
159 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
160 		     : "memory")
161 
162 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
163 extern void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor);
164 #else
xfd_validate_state(struct fpstate * fpstate,u64 mask,bool rstor)165 static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { }
166 #endif
167 
168 #ifdef CONFIG_X86_64
xfd_set_state(u64 xfd)169 static inline void xfd_set_state(u64 xfd)
170 {
171 	wrmsrl(MSR_IA32_XFD, xfd);
172 	__this_cpu_write(xfd_state, xfd);
173 }
174 
xfd_update_state(struct fpstate * fpstate)175 static inline void xfd_update_state(struct fpstate *fpstate)
176 {
177 	if (fpu_state_size_dynamic()) {
178 		u64 xfd = fpstate->xfd;
179 
180 		if (__this_cpu_read(xfd_state) != xfd)
181 			xfd_set_state(xfd);
182 	}
183 }
184 
185 extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
186 #else
xfd_set_state(u64 xfd)187 static inline void xfd_set_state(u64 xfd) { }
188 
xfd_update_state(struct fpstate * fpstate)189 static inline void xfd_update_state(struct fpstate *fpstate) { }
190 
__xfd_enable_feature(u64 which,struct fpu_guest * guest_fpu)191 static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
192 	return -EPERM;
193 }
194 #endif
195 
196 /*
197  * Save processor xstate to xsave area.
198  *
199  * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
200  * and command line options. The choice is permanent until the next reboot.
201  */
os_xsave(struct fpstate * fpstate)202 static inline void os_xsave(struct fpstate *fpstate)
203 {
204 	u64 mask = fpstate->xfeatures;
205 	u32 lmask = mask;
206 	u32 hmask = mask >> 32;
207 	int err;
208 
209 	WARN_ON_FPU(!alternatives_patched);
210 	xfd_validate_state(fpstate, mask, false);
211 
212 	XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
213 
214 	/* We should never fault when copying to a kernel buffer: */
215 	WARN_ON_FPU(err);
216 }
217 
218 /*
219  * Restore processor xstate from xsave area.
220  *
221  * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
222  */
os_xrstor(struct fpstate * fpstate,u64 mask)223 static inline void os_xrstor(struct fpstate *fpstate, u64 mask)
224 {
225 	u32 lmask = mask;
226 	u32 hmask = mask >> 32;
227 
228 	xfd_validate_state(fpstate, mask, true);
229 	XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
230 }
231 
232 /* Restore of supervisor state. Does not require XFD */
os_xrstor_supervisor(struct fpstate * fpstate)233 static inline void os_xrstor_supervisor(struct fpstate *fpstate)
234 {
235 	u64 mask = xfeatures_mask_supervisor();
236 	u32 lmask = mask;
237 	u32 hmask = mask >> 32;
238 
239 	XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
240 }
241 
242 /*
243  * XSAVE itself always writes all requested xfeatures.  Removing features
244  * from the request bitmap reduces the features which are written.
245  * Generate a mask of features which must be written to a sigframe.  The
246  * unset features can be optimized away and not written.
247  *
248  * This optimization is user-visible.  Only use for states where
249  * uninitialized sigframe contents are tolerable, like dynamic features.
250  *
251  * Users of buffers produced with this optimization must check XSTATE_BV
252  * to determine which features have been optimized out.
253  */
xfeatures_need_sigframe_write(void)254 static inline u64 xfeatures_need_sigframe_write(void)
255 {
256 	u64 xfeaures_to_write;
257 
258 	/* In-use features must be written: */
259 	xfeaures_to_write = xfeatures_in_use();
260 
261 	/* Also write all non-optimizable sigframe features: */
262 	xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
263 			     ~XFEATURE_MASK_SIGFRAME_INITOPT;
264 
265 	return xfeaures_to_write;
266 }
267 
268 /*
269  * Save xstate to user space xsave area.
270  *
271  * We don't use modified optimization because xrstor/xrstors might track
272  * a different application.
273  *
274  * We don't use compacted format xsave area for backward compatibility for
275  * old applications which don't understand the compacted format of the
276  * xsave area.
277  *
278  * The caller has to zero buf::header before calling this because XSAVE*
279  * does not touch the reserved fields in the header.
280  */
xsave_to_user_sigframe(struct xregs_state __user * buf,u32 pkru)281 static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkru)
282 {
283 	/*
284 	 * Include the features which are not xsaved/rstored by the kernel
285 	 * internally, e.g. PKRU. That's user space ABI and also required
286 	 * to allow the signal handler to modify PKRU.
287 	 */
288 	struct fpstate *fpstate = current->thread.fpu.fpstate;
289 	u64 mask = fpstate->user_xfeatures;
290 	u32 lmask;
291 	u32 hmask;
292 	int err;
293 
294 	/* Optimize away writing unnecessary xfeatures: */
295 	if (fpu_state_size_dynamic())
296 		mask &= xfeatures_need_sigframe_write();
297 
298 	lmask = mask;
299 	hmask = mask >> 32;
300 	xfd_validate_state(fpstate, mask, false);
301 
302 	stac();
303 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
304 	clac();
305 
306 	if (!err)
307 		err = update_pkru_in_sigframe(buf, mask, pkru);
308 
309 	return err;
310 }
311 
312 /*
313  * Restore xstate from user space xsave area.
314  */
xrstor_from_user_sigframe(struct xregs_state __user * buf,u64 mask)315 static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
316 {
317 	struct xregs_state *xstate = ((__force struct xregs_state *)buf);
318 	u32 lmask = mask;
319 	u32 hmask = mask >> 32;
320 	int err;
321 
322 	xfd_validate_state(current->thread.fpu.fpstate, mask, true);
323 
324 	stac();
325 	XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
326 	clac();
327 
328 	return err;
329 }
330 
331 /*
332  * Restore xstate from kernel space xsave area, return an error code instead of
333  * an exception.
334  */
os_xrstor_safe(struct fpstate * fpstate,u64 mask)335 static inline int os_xrstor_safe(struct fpstate *fpstate, u64 mask)
336 {
337 	struct xregs_state *xstate = &fpstate->regs.xsave;
338 	u32 lmask = mask;
339 	u32 hmask = mask >> 32;
340 	int err;
341 
342 	/* Ensure that XFD is up to date */
343 	xfd_update_state(fpstate);
344 
345 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
346 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
347 	else
348 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
349 
350 	return err;
351 }
352 
353 
354 #endif
355