xref: /linux/arch/s390/include/asm/fpu-insn.h (revision 906fd46a65383cd639e5eec72a047efc33045d86)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Support for Floating Point and Vector Instructions
4  *
5  */
6 
7 #ifndef __ASM_S390_FPU_INSN_H
8 #define __ASM_S390_FPU_INSN_H
9 
10 #include <asm/fpu-insn-asm.h>
11 
12 #ifndef __ASSEMBLY__
13 
14 #include <linux/instrumented.h>
15 #include <asm/asm-extable.h>
16 
17 asm(".include \"asm/fpu-insn-asm.h\"\n");
18 
19 /*
20  * Various small helper functions, which can and should be used within
21  * kernel fpu code sections. Each function represents only one floating
22  * point or vector instruction (except for helper functions which require
23  * exception handling).
24  *
25  * This allows to use floating point and vector instructions like C
26  * functions, which has the advantage that all supporting code, like
27  * e.g. loops, can be written in easy to read C code.
28  *
29  * Each of the helper functions provides support for code instrumentation,
30  * like e.g. KASAN. Therefore instrumentation is also covered automatically
31  * when using these functions.
32  *
33  * In order to ensure that code generated with the helper functions stays
34  * within kernel fpu sections, which are guarded with kernel_fpu_begin()
35  * and kernel_fpu_end() calls, each function has a mandatory "memory"
36  * barrier.
37  */
38 
39 static __always_inline void fpu_cefbr(u8 f1, s32 val)
40 {
41 	asm volatile("cefbr	%[f1],%[val]\n"
42 		     :
43 		     : [f1] "I" (f1), [val] "d" (val)
44 		     : "memory");
45 }
46 
47 static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode)
48 {
49 	unsigned long val;
50 
51 	asm volatile("cgebr	%[val],%[mode],%[f2]\n"
52 		     : [val] "=d" (val)
53 		     : [f2] "I" (f2), [mode] "I" (mode)
54 		     : "memory");
55 	return val;
56 }
57 
58 static __always_inline void fpu_debr(u8 f1, u8 f2)
59 {
60 	asm volatile("debr	%[f1],%[f2]\n"
61 		     :
62 		     : [f1] "I" (f1), [f2] "I" (f2)
63 		     : "memory");
64 }
65 
66 static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
67 {
68 	instrument_read(reg, sizeof(*reg));
69 	asm volatile("ld	 %[fpr],%[reg]\n"
70 		     :
71 		     : [fpr] "I" (fpr), [reg] "Q" (reg->ui)
72 		     : "memory");
73 }
74 
75 static __always_inline void fpu_ldgr(u8 f1, u32 val)
76 {
77 	asm volatile("ldgr	%[f1],%[val]\n"
78 		     :
79 		     : [f1] "I" (f1), [val] "d" (val)
80 		     : "memory");
81 }
82 
83 static __always_inline void fpu_lfpc(unsigned int *fpc)
84 {
85 	instrument_read(fpc, sizeof(*fpc));
86 	asm volatile("lfpc	%[fpc]"
87 		     :
88 		     : [fpc] "Q" (*fpc)
89 		     : "memory");
90 }
91 
92 /**
93  * fpu_lfpc_safe - Load floating point control register safely.
94  * @fpc: new value for floating point control register
95  *
96  * Load floating point control register. This may lead to an exception,
97  * since a saved value may have been modified by user space (ptrace,
98  * signal return, kvm registers) to an invalid value. In such a case
99  * set the floating point control register to zero.
100  */
101 static inline void fpu_lfpc_safe(unsigned int *fpc)
102 {
103 	u32 tmp;
104 
105 	instrument_read(fpc, sizeof(*fpc));
106 	asm volatile("\n"
107 		"0:	lfpc	%[fpc]\n"
108 		"1:	nopr	%%r7\n"
109 		".pushsection .fixup, \"ax\"\n"
110 		"2:	lghi	%[tmp],0\n"
111 		"	sfpc	%[tmp]\n"
112 		"	jg	1b\n"
113 		".popsection\n"
114 		EX_TABLE(1b, 2b)
115 		: [tmp] "=d" (tmp)
116 		: [fpc] "Q" (*fpc)
117 		: "memory");
118 }
119 
120 static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
121 {
122 	instrument_write(reg, sizeof(*reg));
123 	asm volatile("std	 %[fpr],%[reg]\n"
124 		     : [reg] "=Q" (reg->ui)
125 		     : [fpr] "I" (fpr)
126 		     : "memory");
127 }
128 
129 static __always_inline void fpu_sfpc(unsigned int fpc)
130 {
131 	asm volatile("sfpc	%[fpc]"
132 		     :
133 		     : [fpc] "d" (fpc)
134 		     : "memory");
135 }
136 
137 static __always_inline void fpu_stfpc(unsigned int *fpc)
138 {
139 	instrument_write(fpc, sizeof(*fpc));
140 	asm volatile("stfpc	%[fpc]"
141 		     : [fpc] "=Q" (*fpc)
142 		     :
143 		     : "memory");
144 }
145 
146 static __always_inline void fpu_vab(u8 v1, u8 v2, u8 v3)
147 {
148 	asm volatile("VAB	%[v1],%[v2],%[v3]"
149 		     :
150 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
151 		     : "memory");
152 }
153 
154 static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3)
155 {
156 	asm volatile("VCKSM	%[v1],%[v2],%[v3]"
157 		     :
158 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
159 		     : "memory");
160 }
161 
162 static __always_inline void fpu_vesravb(u8 v1, u8 v2, u8 v3)
163 {
164 	asm volatile("VESRAVB	%[v1],%[v2],%[v3]"
165 		     :
166 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
167 		     : "memory");
168 }
169 
170 static __always_inline void fpu_vgfmag(u8 v1, u8 v2, u8 v3, u8 v4)
171 {
172 	asm volatile("VGFMAG	%[v1],%[v2],%[v3],%[v4]"
173 		     :
174 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
175 		     : "memory");
176 }
177 
178 static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
179 {
180 	asm volatile("VGFMG	%[v1],%[v2],%[v3]"
181 		     :
182 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
183 		     : "memory");
184 }
185 
186 #ifdef CONFIG_CC_IS_CLANG
187 
188 static __always_inline void fpu_vl(u8 v1, const void *vxr)
189 {
190 	instrument_read(vxr, sizeof(__vector128));
191 	asm volatile("\n"
192 		"	la	1,%[vxr]\n"
193 		"	VL	%[v1],0,,1\n"
194 		:
195 		: [vxr] "R" (*(__vector128 *)vxr),
196 		  [v1] "I" (v1)
197 		: "memory", "1");
198 }
199 
200 #else /* CONFIG_CC_IS_CLANG */
201 
202 static __always_inline void fpu_vl(u8 v1, const void *vxr)
203 {
204 	instrument_read(vxr, sizeof(__vector128));
205 	asm volatile("VL	%[v1],%O[vxr],,%R[vxr]\n"
206 		     :
207 		     : [vxr] "Q" (*(__vector128 *)vxr),
208 		       [v1] "I" (v1)
209 		     : "memory");
210 }
211 
212 #endif /* CONFIG_CC_IS_CLANG */
213 
214 static __always_inline void fpu_vleib(u8 v, s16 val, u8 index)
215 {
216 	asm volatile("VLEIB	%[v],%[val],%[index]"
217 		     :
218 		     : [v] "I" (v), [val] "K" (val), [index] "I" (index)
219 		     : "memory");
220 }
221 
222 static __always_inline void fpu_vleig(u8 v, s16 val, u8 index)
223 {
224 	asm volatile("VLEIG	%[v],%[val],%[index]"
225 		     :
226 		     : [v] "I" (v), [val] "K" (val), [index] "I" (index)
227 		     : "memory");
228 }
229 
230 static __always_inline u64 fpu_vlgvf(u8 v, u16 index)
231 {
232 	u64 val;
233 
234 	asm volatile("VLGVF	%[val],%[v],%[index]"
235 		     : [val] "=d" (val)
236 		     : [v] "I" (v), [index] "L" (index)
237 		     : "memory");
238 	return val;
239 }
240 
241 #ifdef CONFIG_CC_IS_CLANG
242 
243 static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
244 {
245 	unsigned int size;
246 
247 	size = min(index + 1, sizeof(__vector128));
248 	instrument_read(vxr, size);
249 	asm volatile("\n"
250 		"	la	1,%[vxr]\n"
251 		"	VLL	%[v1],%[index],0,1\n"
252 		:
253 		: [vxr] "R" (*(u8 *)vxr),
254 		  [index] "d" (index),
255 		  [v1] "I" (v1)
256 		: "memory", "1");
257 }
258 
259 #else /* CONFIG_CC_IS_CLANG */
260 
261 static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
262 {
263 	unsigned int size;
264 
265 	size = min(index + 1, sizeof(__vector128));
266 	instrument_read(vxr, size);
267 	asm volatile("VLL	%[v1],%[index],%O[vxr],%R[vxr]\n"
268 		     :
269 		     : [vxr] "Q" (*(u8 *)vxr),
270 		       [index] "d" (index),
271 		       [v1] "I" (v1)
272 		     : "memory");
273 }
274 
275 #endif /* CONFIG_CC_IS_CLANG */
276 
277 #ifdef CONFIG_CC_IS_CLANG
278 
279 #define fpu_vlm(_v1, _v3, _vxrs)					\
280 ({									\
281 	unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128);	\
282 	struct {							\
283 		__vector128 _v[(_v3) - (_v1) + 1];			\
284 	} *_v = (void *)(_vxrs);					\
285 									\
286 	instrument_read(_v, size);					\
287 	asm volatile("\n"						\
288 		"	la	1,%[vxrs]\n"				\
289 		"	VLM	%[v1],%[v3],0,1\n"			\
290 		:							\
291 		: [vxrs] "R" (*_v),					\
292 		  [v1] "I" (_v1), [v3] "I" (_v3)			\
293 		: "memory", "1");					\
294 	(_v3) - (_v1) + 1;						\
295 })
296 
297 #else /* CONFIG_CC_IS_CLANG */
298 
299 #define fpu_vlm(_v1, _v3, _vxrs)					\
300 ({									\
301 	unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128);	\
302 	struct {							\
303 		__vector128 _v[(_v3) - (_v1) + 1];			\
304 	} *_v = (void *)(_vxrs);					\
305 									\
306 	instrument_read(_v, size);					\
307 	asm volatile("VLM	%[v1],%[v3],%O[vxrs],%R[vxrs]\n"	\
308 		     :							\
309 		     : [vxrs] "Q" (*_v),				\
310 		       [v1] "I" (_v1), [v3] "I" (_v3)			\
311 		     : "memory");					\
312 	(_v3) - (_v1) + 1;						\
313 })
314 
315 #endif /* CONFIG_CC_IS_CLANG */
316 
317 static __always_inline void fpu_vlr(u8 v1, u8 v2)
318 {
319 	asm volatile("VLR	%[v1],%[v2]"
320 		     :
321 		     : [v1] "I" (v1), [v2] "I" (v2)
322 		     : "memory");
323 }
324 
325 static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index)
326 {
327 	asm volatile("VLVGF	%[v],%[val],%[index]"
328 		     :
329 		     : [v] "I" (v), [val] "d" (val), [index] "L" (index)
330 		     : "memory");
331 }
332 
333 static __always_inline void fpu_vn(u8 v1, u8 v2, u8 v3)
334 {
335 	asm volatile("VN	%[v1],%[v2],%[v3]"
336 		     :
337 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
338 		     : "memory");
339 }
340 
341 static __always_inline void fpu_vperm(u8 v1, u8 v2, u8 v3, u8 v4)
342 {
343 	asm volatile("VPERM	%[v1],%[v2],%[v3],%[v4]"
344 		     :
345 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
346 		     : "memory");
347 }
348 
349 static __always_inline void fpu_vrepib(u8 v1, s16 i2)
350 {
351 	asm volatile("VREPIB	%[v1],%[i2]"
352 		     :
353 		     : [v1] "I" (v1), [i2] "K" (i2)
354 		     : "memory");
355 }
356 
357 static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
358 {
359 	asm volatile("VSRLB	%[v1],%[v2],%[v3]"
360 		     :
361 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
362 		     : "memory");
363 }
364 
365 #ifdef CONFIG_CC_IS_CLANG
366 
367 static __always_inline void fpu_vst(u8 v1, const void *vxr)
368 {
369 	instrument_write(vxr, sizeof(__vector128));
370 	asm volatile("\n"
371 		"	la	1,%[vxr]\n"
372 		"	VST	%[v1],0,,1\n"
373 		: [vxr] "=R" (*(__vector128 *)vxr)
374 		: [v1] "I" (v1)
375 		: "memory", "1");
376 }
377 
378 #else /* CONFIG_CC_IS_CLANG */
379 
380 static __always_inline void fpu_vst(u8 v1, const void *vxr)
381 {
382 	instrument_write(vxr, sizeof(__vector128));
383 	asm volatile("VST	%[v1],%O[vxr],,%R[vxr]\n"
384 		     : [vxr] "=Q" (*(__vector128 *)vxr)
385 		     : [v1] "I" (v1)
386 		     : "memory");
387 }
388 
389 #endif /* CONFIG_CC_IS_CLANG */
390 
391 #ifdef CONFIG_CC_IS_CLANG
392 
393 static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
394 {
395 	unsigned int size;
396 
397 	size = min(index + 1, sizeof(__vector128));
398 	instrument_write(vxr, size);
399 	asm volatile("\n"
400 		"	la	1,%[vxr]\n"
401 		"	VSTL	%[v1],%[index],0,1\n"
402 		: [vxr] "=R" (*(u8 *)vxr)
403 		: [index] "d" (index), [v1] "I" (v1)
404 		: "memory", "1");
405 }
406 
407 #else /* CONFIG_CC_IS_CLANG */
408 
409 static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
410 {
411 	unsigned int size;
412 
413 	size = min(index + 1, sizeof(__vector128));
414 	instrument_write(vxr, size);
415 	asm volatile("VSTL	%[v1],%[index],%O[vxr],%R[vxr]\n"
416 		     : [vxr] "=Q" (*(u8 *)vxr)
417 		     : [index] "d" (index), [v1] "I" (v1)
418 		     : "memory");
419 }
420 
421 #endif /* CONFIG_CC_IS_CLANG */
422 
423 #ifdef CONFIG_CC_IS_CLANG
424 
425 #define fpu_vstm(_v1, _v3, _vxrs)					\
426 ({									\
427 	unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128);	\
428 	struct {							\
429 		__vector128 _v[(_v3) - (_v1) + 1];			\
430 	} *_v = (void *)(_vxrs);					\
431 									\
432 	instrument_write(_v, size);					\
433 	asm volatile("\n"						\
434 		"	la	1,%[vxrs]\n"				\
435 		"	VSTM	%[v1],%[v3],0,1\n"			\
436 		: [vxrs] "=R" (*_v)					\
437 		: [v1] "I" (_v1), [v3] "I" (_v3)			\
438 		: "memory", "1");					\
439 	(_v3) - (_v1) + 1;						\
440 })
441 
442 #else /* CONFIG_CC_IS_CLANG */
443 
444 #define fpu_vstm(_v1, _v3, _vxrs)					\
445 ({									\
446 	unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128);	\
447 	struct {							\
448 		__vector128 _v[(_v3) - (_v1) + 1];			\
449 	} *_v = (void *)(_vxrs);					\
450 									\
451 	instrument_write(_v, size);					\
452 	asm volatile("VSTM	%[v1],%[v3],%O[vxrs],%R[vxrs]\n"	\
453 		     : [vxrs] "=Q" (*_v)				\
454 		     : [v1] "I" (_v1), [v3] "I" (_v3)			\
455 		     : "memory");					\
456 	(_v3) - (_v1) + 1;						\
457 })
458 
459 #endif /* CONFIG_CC_IS_CLANG */
460 
461 static __always_inline void fpu_vupllf(u8 v1, u8 v2)
462 {
463 	asm volatile("VUPLLF	%[v1],%[v2]"
464 		     :
465 		     : [v1] "I" (v1), [v2] "I" (v2)
466 		     : "memory");
467 }
468 
469 static __always_inline void fpu_vx(u8 v1, u8 v2, u8 v3)
470 {
471 	asm volatile("VX	%[v1],%[v2],%[v3]"
472 		     :
473 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
474 		     : "memory");
475 }
476 
477 static __always_inline void fpu_vzero(u8 v)
478 {
479 	asm volatile("VZERO	%[v]"
480 		     :
481 		     : [v] "I" (v)
482 		     : "memory");
483 }
484 
485 #endif /* __ASSEMBLY__ */
486 #endif	/* __ASM_S390_FPU_INSN_H */
487