1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_KERNEL_FPU_XSTATE_H
3 #define __X86_KERNEL_FPU_XSTATE_H
4
5 #include <asm/cpufeature.h>
6 #include <asm/fpu/xstate.h>
7 #include <asm/fpu/xcr.h>
8
9 #ifdef CONFIG_X86_64
10 DECLARE_PER_CPU(u64, xfd_state);
11 #endif
12
xstate_init_xcomp_bv(struct xregs_state * xsave,u64 mask)13 static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
14 {
15 /*
16 * XRSTORS requires these bits set in xcomp_bv, or it will
17 * trigger #GP:
18 */
19 if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED))
20 xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
21 }
22
xstate_get_group_perm(bool guest)23 static inline u64 xstate_get_group_perm(bool guest)
24 {
25 struct fpu *fpu = ¤t->group_leader->thread.fpu;
26 struct fpu_state_perm *perm;
27
28 /* Pairs with WRITE_ONCE() in xstate_request_perm() */
29 perm = guest ? &fpu->guest_perm : &fpu->perm;
30 return READ_ONCE(perm->__state_perm);
31 }
32
xstate_get_host_group_perm(void)33 static inline u64 xstate_get_host_group_perm(void)
34 {
35 return xstate_get_group_perm(false);
36 }
37
38 enum xstate_copy_mode {
39 XSTATE_COPY_FP,
40 XSTATE_COPY_FX,
41 XSTATE_COPY_XSAVE,
42 };
43
44 struct membuf;
45 extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
46 u64 xfeatures, u32 pkru_val,
47 enum xstate_copy_mode copy_mode);
48 extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
49 enum xstate_copy_mode mode);
50 extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
51 extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf);
52
53
54 extern void fpu__init_cpu_xstate(void);
55 extern void fpu__init_system_xstate(unsigned int legacy_size);
56
57 extern void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr);
58
xfeatures_mask_supervisor(void)59 static inline u64 xfeatures_mask_supervisor(void)
60 {
61 return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
62 }
63
xfeatures_mask_independent(void)64 static inline u64 xfeatures_mask_independent(void)
65 {
66 if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
67 return fpu_kernel_cfg.independent_features & ~XFEATURE_MASK_LBR;
68
69 return fpu_kernel_cfg.independent_features;
70 }
71
72 /* XSAVE/XRSTOR wrapper functions */
73
74 #ifdef CONFIG_X86_64
75 #define REX_PREFIX "0x48, "
76 #else
77 #define REX_PREFIX
78 #endif
79
80 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
81 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
82 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
83 #define XSAVEC ".byte " REX_PREFIX "0x0f,0xc7,0x27"
84 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
85 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
86 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
87
88 /*
89 * After this @err contains 0 on success or the trap number when the
90 * operation raises an exception.
91 */
92 #define XSTATE_OP(op, st, lmask, hmask, err) \
93 asm volatile("1:" op "\n\t" \
94 "xor %[err], %[err]\n" \
95 "2:\n\t" \
96 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE) \
97 : [err] "=a" (err) \
98 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
99 : "memory")
100
101 /*
102 * If XSAVES is enabled, it replaces XSAVEC because it supports supervisor
103 * states in addition to XSAVEC.
104 *
105 * Otherwise if XSAVEC is enabled, it replaces XSAVEOPT because it supports
106 * compacted storage format in addition to XSAVEOPT.
107 *
108 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
109 * supports modified optimization which is not supported by XSAVE.
110 *
111 * Use XSAVE as a fallback.
112 */
113 #define XSTATE_XSAVE(st, lmask, hmask, err) \
114 asm volatile("1: " ALTERNATIVE_3(XSAVE, \
115 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
116 XSAVEC, X86_FEATURE_XSAVEC, \
117 XSAVES, X86_FEATURE_XSAVES) \
118 "\n" \
119 "xor %[err], %[err]\n" \
120 "3:\n" \
121 _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
122 : [err] "=r" (err) \
123 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
124 : "memory")
125
126 /*
127 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
128 * XSAVE area format.
129 */
130 #define XSTATE_XRESTORE(st, lmask, hmask) \
131 asm volatile("1: " ALTERNATIVE(XRSTOR, \
132 XRSTORS, X86_FEATURE_XSAVES) \
133 "\n" \
134 "3:\n" \
135 _ASM_EXTABLE_TYPE(1b, 3b, EX_TYPE_FPU_RESTORE) \
136 : \
137 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
138 : "memory")
139
140 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
141 extern void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor);
142 #else
xfd_validate_state(struct fpstate * fpstate,u64 mask,bool rstor)143 static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { }
144 #endif
145
146 #ifdef CONFIG_X86_64
xfd_set_state(u64 xfd)147 static inline void xfd_set_state(u64 xfd)
148 {
149 wrmsrl(MSR_IA32_XFD, xfd);
150 __this_cpu_write(xfd_state, xfd);
151 }
152
xfd_update_state(struct fpstate * fpstate)153 static inline void xfd_update_state(struct fpstate *fpstate)
154 {
155 if (fpu_state_size_dynamic()) {
156 u64 xfd = fpstate->xfd;
157
158 if (__this_cpu_read(xfd_state) != xfd)
159 xfd_set_state(xfd);
160 }
161 }
162
163 extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
164 #else
xfd_set_state(u64 xfd)165 static inline void xfd_set_state(u64 xfd) { }
166
xfd_update_state(struct fpstate * fpstate)167 static inline void xfd_update_state(struct fpstate *fpstate) { }
168
__xfd_enable_feature(u64 which,struct fpu_guest * guest_fpu)169 static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
170 return -EPERM;
171 }
172 #endif
173
174 /*
175 * Save processor xstate to xsave area.
176 *
177 * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
178 * and command line options. The choice is permanent until the next reboot.
179 */
os_xsave(struct fpstate * fpstate)180 static inline void os_xsave(struct fpstate *fpstate)
181 {
182 u64 mask = fpstate->xfeatures;
183 u32 lmask = mask;
184 u32 hmask = mask >> 32;
185 int err;
186
187 WARN_ON_FPU(!alternatives_patched);
188 xfd_validate_state(fpstate, mask, false);
189
190 XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
191
192 /* We should never fault when copying to a kernel buffer: */
193 WARN_ON_FPU(err);
194 }
195
196 /*
197 * Restore processor xstate from xsave area.
198 *
199 * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
200 */
os_xrstor(struct fpstate * fpstate,u64 mask)201 static inline void os_xrstor(struct fpstate *fpstate, u64 mask)
202 {
203 u32 lmask = mask;
204 u32 hmask = mask >> 32;
205
206 xfd_validate_state(fpstate, mask, true);
207 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
208 }
209
210 /* Restore of supervisor state. Does not require XFD */
os_xrstor_supervisor(struct fpstate * fpstate)211 static inline void os_xrstor_supervisor(struct fpstate *fpstate)
212 {
213 u64 mask = xfeatures_mask_supervisor();
214 u32 lmask = mask;
215 u32 hmask = mask >> 32;
216
217 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
218 }
219
220 /*
221 * XSAVE itself always writes all requested xfeatures. Removing features
222 * from the request bitmap reduces the features which are written.
223 * Generate a mask of features which must be written to a sigframe. The
224 * unset features can be optimized away and not written.
225 *
226 * This optimization is user-visible. Only use for states where
227 * uninitialized sigframe contents are tolerable, like dynamic features.
228 *
229 * Users of buffers produced with this optimization must check XSTATE_BV
230 * to determine which features have been optimized out.
231 */
xfeatures_need_sigframe_write(void)232 static inline u64 xfeatures_need_sigframe_write(void)
233 {
234 u64 xfeaures_to_write;
235
236 /* In-use features must be written: */
237 xfeaures_to_write = xfeatures_in_use();
238
239 /* Also write all non-optimizable sigframe features: */
240 xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
241 ~XFEATURE_MASK_SIGFRAME_INITOPT;
242
243 return xfeaures_to_write;
244 }
245
246 /*
247 * Save xstate to user space xsave area.
248 *
249 * We don't use modified optimization because xrstor/xrstors might track
250 * a different application.
251 *
252 * We don't use compacted format xsave area for backward compatibility for
253 * old applications which don't understand the compacted format of the
254 * xsave area.
255 *
256 * The caller has to zero buf::header before calling this because XSAVE*
257 * does not touch the reserved fields in the header.
258 */
xsave_to_user_sigframe(struct xregs_state __user * buf)259 static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
260 {
261 /*
262 * Include the features which are not xsaved/rstored by the kernel
263 * internally, e.g. PKRU. That's user space ABI and also required
264 * to allow the signal handler to modify PKRU.
265 */
266 struct fpstate *fpstate = current->thread.fpu.fpstate;
267 u64 mask = fpstate->user_xfeatures;
268 u32 lmask;
269 u32 hmask;
270 int err;
271
272 /* Optimize away writing unnecessary xfeatures: */
273 if (fpu_state_size_dynamic())
274 mask &= xfeatures_need_sigframe_write();
275
276 lmask = mask;
277 hmask = mask >> 32;
278 xfd_validate_state(fpstate, mask, false);
279
280 stac();
281 XSTATE_OP(XSAVE, buf, lmask, hmask, err);
282 clac();
283
284 return err;
285 }
286
287 /*
288 * Restore xstate from user space xsave area.
289 */
xrstor_from_user_sigframe(struct xregs_state __user * buf,u64 mask)290 static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
291 {
292 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
293 u32 lmask = mask;
294 u32 hmask = mask >> 32;
295 int err;
296
297 xfd_validate_state(current->thread.fpu.fpstate, mask, true);
298
299 stac();
300 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
301 clac();
302
303 return err;
304 }
305
306 /*
307 * Restore xstate from kernel space xsave area, return an error code instead of
308 * an exception.
309 */
os_xrstor_safe(struct fpstate * fpstate,u64 mask)310 static inline int os_xrstor_safe(struct fpstate *fpstate, u64 mask)
311 {
312 struct xregs_state *xstate = &fpstate->regs.xsave;
313 u32 lmask = mask;
314 u32 hmask = mask >> 32;
315 int err;
316
317 /* Ensure that XFD is up to date */
318 xfd_update_state(fpstate);
319
320 if (cpu_feature_enabled(X86_FEATURE_XSAVES))
321 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
322 else
323 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
324
325 return err;
326 }
327
328
329 #endif
330