xref: /linux/arch/x86/kernel/fpu/xstate.h (revision e47a324d6f07c9ef252cfce1f14cfa5110cbed99)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_KERNEL_FPU_XSTATE_H
3 #define __X86_KERNEL_FPU_XSTATE_H
4 
5 #include <asm/cpufeature.h>
6 #include <asm/fpu/xstate.h>
7 #include <asm/fpu/xcr.h>
8 #include <asm/msr.h>
9 
10 #ifdef CONFIG_X86_64
11 DECLARE_PER_CPU(u64, xfd_state);
12 #endif
13 
14 static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
15 {
16 	/*
17 	 * XRSTORS requires these bits set in xcomp_bv, or it will
18 	 * trigger #GP:
19 	 */
20 	if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED))
21 		xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
22 }
23 
24 static inline u64 xstate_get_group_perm(bool guest)
25 {
26 	struct fpu *fpu = x86_task_fpu(current->group_leader);
27 	struct fpu_state_perm *perm;
28 
29 	/* Pairs with WRITE_ONCE() in xstate_request_perm() */
30 	perm = guest ? &fpu->guest_perm : &fpu->perm;
31 	return READ_ONCE(perm->__state_perm);
32 }
33 
34 static inline u64 xstate_get_host_group_perm(void)
35 {
36 	return xstate_get_group_perm(false);
37 }
38 
39 enum xstate_copy_mode {
40 	XSTATE_COPY_FP,
41 	XSTATE_COPY_FX,
42 	XSTATE_COPY_XSAVE,
43 };
44 
45 struct membuf;
46 extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
47 				      u64 xfeatures, u32 pkru_val,
48 				      enum xstate_copy_mode copy_mode);
49 extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
50 				    enum xstate_copy_mode mode);
51 extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
52 extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf);
53 
54 
55 extern void fpu__init_cpu_xstate(void);
56 extern void fpu__init_system_xstate(unsigned int legacy_size);
57 
58 extern void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr);
59 
60 static inline u64 xfeatures_mask_supervisor(void)
61 {
62 	return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
63 }
64 
65 static inline u64 xfeatures_mask_independent(void)
66 {
67 	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
68 		return fpu_kernel_cfg.independent_features & ~XFEATURE_MASK_LBR;
69 
70 	return fpu_kernel_cfg.independent_features;
71 }
72 
73 static inline int set_xfeature_in_sigframe(struct xregs_state __user *xbuf, u64 mask)
74 {
75 	u64 xfeatures;
76 	int err;
77 
78 	/* Read the xfeatures value already saved in the user buffer */
79 	err  = __get_user(xfeatures, &xbuf->header.xfeatures);
80 	xfeatures |= mask;
81 	err |= __put_user(xfeatures, &xbuf->header.xfeatures);
82 
83 	return err;
84 }
85 
86 /*
87  * Update the value of PKRU register that was already pushed onto the signal frame.
88  */
89 static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
90 {
91 	int err;
92 
93 	if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
94 		return 0;
95 
96 	/* Mark PKRU as in-use so that it is restored correctly. */
97 	err = set_xfeature_in_sigframe(buf, XFEATURE_MASK_PKRU);
98 	if (err)
99 		return err;
100 
101 	/* Update PKRU value in the userspace xsave buffer. */
102 	return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
103 }
104 
105 /* XSAVE/XRSTOR wrapper functions */
106 
107 #ifdef CONFIG_X86_64
108 #define REX_SUFFIX	"64"
109 #else
110 #define REX_SUFFIX
111 #endif
112 
113 #define XSAVE		"xsave" REX_SUFFIX " %[xa]"
114 #define XSAVEOPT	"xsaveopt" REX_SUFFIX " %[xa]"
115 #define XSAVEC		"xsavec" REX_SUFFIX " %[xa]"
116 #define XSAVES		"xsaves" REX_SUFFIX " %[xa]"
117 #define XRSTOR		"xrstor" REX_SUFFIX " %[xa]"
118 #define XRSTORS		"xrstors" REX_SUFFIX " %[xa]"
119 
120 /*
121  * After this @err contains 0 on success or the trap number when the
122  * operation raises an exception.
123  *
124  * The [xa] input parameter below represents the struct xregs_state pointer
125  * and the asm symbolic name for the argument used in the XSAVE/XRSTOR insns
126  * above.
127  */
128 #define XSTATE_OP(op, st, lmask, hmask, err)				\
129 	asm volatile("1:" op "\n\t"					\
130 		     "xor %[err], %[err]\n"				\
131 		     "2:\n"						\
132 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE)	\
133 		     : [err] "=a" (err)					\
134 		     : [xa] "m" (*(st)), "a" (lmask), "d" (hmask)	\
135 		     : "memory")
136 
137 /*
138  * If XSAVES is enabled, it replaces XSAVEC because it supports supervisor
139  * states in addition to XSAVEC.
140  *
141  * Otherwise if XSAVEC is enabled, it replaces XSAVEOPT because it supports
142  * compacted storage format in addition to XSAVEOPT.
143  *
144  * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
145  * supports modified optimization which is not supported by XSAVE.
146  *
147  * Use XSAVE as a fallback.
148  */
149 #define XSTATE_XSAVE(st, lmask, hmask, err)				\
150 	asm volatile("1: " ALTERNATIVE_3(XSAVE,				\
151 				   XSAVEOPT, X86_FEATURE_XSAVEOPT,	\
152 				   XSAVEC,   X86_FEATURE_XSAVEC,	\
153 				   XSAVES,   X86_FEATURE_XSAVES)	\
154 		     "\n\t"						\
155 		     "xor %[err], %[err]\n"				\
156 		     "3:\n"						\
157 		     _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
158 		     : [err] "=r" (err)					\
159 		     : [xa] "m" (*(st)), "a" (lmask), "d" (hmask)	\
160 		     : "memory")
161 
162 /*
163  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
164  * XSAVE area format.
165  */
166 #define XSTATE_XRESTORE(st, lmask, hmask)				\
167 	asm volatile("1: " ALTERNATIVE(XRSTOR,				\
168 				 XRSTORS, X86_FEATURE_XSAVES)		\
169 		     "\n"						\
170 		     "3:\n"						\
171 		     _ASM_EXTABLE_TYPE(1b, 3b, EX_TYPE_FPU_RESTORE)	\
172 		     :							\
173 		     : [xa] "m" (*(st)), "a" (lmask), "d" (hmask)	\
174 		     : "memory")
175 
176 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
177 extern void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor);
178 #else
179 static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { }
180 #endif
181 
182 #ifdef CONFIG_X86_64
183 static inline void xfd_set_state(u64 xfd)
184 {
185 	wrmsrq(MSR_IA32_XFD, xfd);
186 	__this_cpu_write(xfd_state, xfd);
187 }
188 
189 static inline void xfd_update_state(struct fpstate *fpstate)
190 {
191 	if (fpu_state_size_dynamic()) {
192 		u64 xfd = fpstate->xfd;
193 
194 		if (__this_cpu_read(xfd_state) != xfd)
195 			xfd_set_state(xfd);
196 	}
197 }
198 
199 extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
200 #else
201 static inline void xfd_set_state(u64 xfd) { }
202 
203 static inline void xfd_update_state(struct fpstate *fpstate) { }
204 
205 static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
206 	return -EPERM;
207 }
208 #endif
209 
210 /*
211  * Save processor xstate to xsave area.
212  *
213  * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
214  * and command line options. The choice is permanent until the next reboot.
215  */
216 static inline void os_xsave(struct fpstate *fpstate)
217 {
218 	u64 mask = fpstate->xfeatures;
219 	u32 lmask = mask;
220 	u32 hmask = mask >> 32;
221 	int err;
222 
223 	WARN_ON_FPU(!alternatives_patched);
224 	xfd_validate_state(fpstate, mask, false);
225 
226 	XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
227 
228 	/* We should never fault when copying to a kernel buffer: */
229 	WARN_ON_FPU(err);
230 }
231 
232 /*
233  * Restore processor xstate from xsave area.
234  *
235  * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
236  */
237 static inline void os_xrstor(struct fpstate *fpstate, u64 mask)
238 {
239 	u32 lmask = mask;
240 	u32 hmask = mask >> 32;
241 
242 	xfd_validate_state(fpstate, mask, true);
243 	XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
244 }
245 
246 /* Restore of supervisor state. Does not require XFD */
247 static inline void os_xrstor_supervisor(struct fpstate *fpstate)
248 {
249 	u64 mask = xfeatures_mask_supervisor();
250 	u32 lmask = mask;
251 	u32 hmask = mask >> 32;
252 
253 	XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
254 }
255 
256 /*
257  * XSAVE itself always writes all requested xfeatures.  Removing features
258  * from the request bitmap reduces the features which are written.
259  * Generate a mask of features which must be written to a sigframe.  The
260  * unset features can be optimized away and not written.
261  *
262  * This optimization is user-visible.  Only use for states where
263  * uninitialized sigframe contents are tolerable, like dynamic features.
264  *
265  * Users of buffers produced with this optimization must check XSTATE_BV
266  * to determine which features have been optimized out.
267  */
268 static inline u64 xfeatures_need_sigframe_write(void)
269 {
270 	u64 xfeaures_to_write;
271 
272 	/* In-use features must be written: */
273 	xfeaures_to_write = xfeatures_in_use();
274 
275 	/* Also write all non-optimizable sigframe features: */
276 	xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
277 			     ~XFEATURE_MASK_SIGFRAME_INITOPT;
278 
279 	return xfeaures_to_write;
280 }
281 
282 /*
283  * Save xstate to user space xsave area.
284  *
285  * We don't use modified optimization because xrstor/xrstors might track
286  * a different application.
287  *
288  * We don't use compacted format xsave area for backward compatibility for
289  * old applications which don't understand the compacted format of the
290  * xsave area.
291  *
292  * The caller has to zero buf::header before calling this because XSAVE*
293  * does not touch the reserved fields in the header.
294  */
295 static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkru)
296 {
297 	/*
298 	 * Include the features which are not xsaved/rstored by the kernel
299 	 * internally, e.g. PKRU. That's user space ABI and also required
300 	 * to allow the signal handler to modify PKRU.
301 	 */
302 	struct fpstate *fpstate = x86_task_fpu(current)->fpstate;
303 	u64 mask = fpstate->user_xfeatures;
304 	u32 lmask;
305 	u32 hmask;
306 	int err;
307 
308 	/* Optimize away writing unnecessary xfeatures: */
309 	if (fpu_state_size_dynamic())
310 		mask &= xfeatures_need_sigframe_write();
311 
312 	lmask = mask;
313 	hmask = mask >> 32;
314 	xfd_validate_state(fpstate, mask, false);
315 
316 	stac();
317 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
318 	clac();
319 
320 	if (!err)
321 		err = update_pkru_in_sigframe(buf, pkru);
322 
323 	return err;
324 }
325 
326 /*
327  * Restore xstate from user space xsave area.
328  */
329 static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
330 {
331 	struct xregs_state *xstate = ((__force struct xregs_state *)buf);
332 	u32 lmask = mask;
333 	u32 hmask = mask >> 32;
334 	int err;
335 
336 	xfd_validate_state(x86_task_fpu(current)->fpstate, mask, true);
337 
338 	stac();
339 	XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
340 	clac();
341 
342 	return err;
343 }
344 
345 /*
346  * Restore xstate from kernel space xsave area, return an error code instead of
347  * an exception.
348  */
349 static inline int os_xrstor_safe(struct fpstate *fpstate, u64 mask)
350 {
351 	struct xregs_state *xstate = &fpstate->regs.xsave;
352 	u32 lmask = mask;
353 	u32 hmask = mask >> 32;
354 	int err;
355 
356 	/* Ensure that XFD is up to date */
357 	xfd_update_state(fpstate);
358 
359 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
360 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
361 	else
362 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
363 
364 	return err;
365 }
366 
367 
368 #endif
369