xref: /linux/arch/s390/include/asm/fpu-insn.h (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Support for Floating Point and Vector Instructions
4  *
5  */
6 
7 #ifndef __ASM_S390_FPU_INSN_H
8 #define __ASM_S390_FPU_INSN_H
9 
10 #include <asm/fpu-insn-asm.h>
11 
12 #ifndef __ASSEMBLER__
13 
14 #include <linux/instrumented.h>
15 #include <linux/kmsan.h>
16 #include <asm/asm-extable.h>
17 
18 asm(".include \"asm/fpu-insn-asm.h\"\n");
19 
20 /*
21  * Various small helper functions, which can and should be used within
22  * kernel fpu code sections. Each function represents only one floating
23  * point or vector instruction (except for helper functions which require
24  * exception handling).
25  *
26  * This allows to use floating point and vector instructions like C
27  * functions, which has the advantage that all supporting code, like
28  * e.g. loops, can be written in easy to read C code.
29  *
30  * Each of the helper functions provides support for code instrumentation,
31  * like e.g. KASAN. Therefore instrumentation is also covered automatically
32  * when using these functions.
33  *
34  * In order to ensure that code generated with the helper functions stays
35  * within kernel fpu sections, which are guarded with kernel_fpu_begin()
36  * and kernel_fpu_end() calls, each function has a mandatory "memory"
37  * barrier.
38  */
39 
40 static __always_inline void fpu_cefbr(u8 f1, s32 val)
41 {
42 	asm volatile("cefbr	%[f1],%[val]"
43 		     :
44 		     : [f1] "I" (f1), [val] "d" (val)
45 		     : "memory");
46 }
47 
48 static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode)
49 {
50 	unsigned long val;
51 
52 	asm volatile("cgebr	%[val],%[mode],%[f2]"
53 		     : [val] "=d" (val)
54 		     : [f2] "I" (f2), [mode] "I" (mode)
55 		     : "memory");
56 	return val;
57 }
58 
59 static __always_inline void fpu_debr(u8 f1, u8 f2)
60 {
61 	asm volatile("debr	%[f1],%[f2]"
62 		     :
63 		     : [f1] "I" (f1), [f2] "I" (f2)
64 		     : "memory");
65 }
66 
67 static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
68 {
69 	instrument_read(reg, sizeof(*reg));
70 	asm volatile("ld	 %[fpr],%[reg]"
71 		     :
72 		     : [fpr] "I" (fpr), [reg] "Q" (reg->ui)
73 		     : "memory");
74 }
75 
76 static __always_inline void fpu_ldgr(u8 f1, u32 val)
77 {
78 	asm volatile("ldgr	%[f1],%[val]"
79 		     :
80 		     : [f1] "I" (f1), [val] "d" (val)
81 		     : "memory");
82 }
83 
84 static __always_inline void fpu_lfpc(unsigned int *fpc)
85 {
86 	instrument_read(fpc, sizeof(*fpc));
87 	asm volatile("lfpc	%[fpc]"
88 		     :
89 		     : [fpc] "Q" (*fpc)
90 		     : "memory");
91 }
92 
93 /**
94  * fpu_lfpc_safe - Load floating point control register safely.
95  * @fpc: new value for floating point control register
96  *
97  * Load floating point control register. This may lead to an exception,
98  * since a saved value may have been modified by user space (ptrace,
99  * signal return, kvm registers) to an invalid value. In such a case
100  * set the floating point control register to zero.
101  */
102 static inline void fpu_lfpc_safe(unsigned int *fpc)
103 {
104 	instrument_read(fpc, sizeof(*fpc));
105 	asm_inline volatile(
106 		"	lfpc	%[fpc]\n"
107 		"0:	nopr	%%r7\n"
108 		EX_TABLE_FPC(0b, 0b)
109 		:
110 		: [fpc] "Q" (*fpc)
111 		: "memory");
112 }
113 
114 static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
115 {
116 	instrument_write(reg, sizeof(*reg));
117 	asm volatile("std	 %[fpr],%[reg]"
118 		     : [reg] "=Q" (reg->ui)
119 		     : [fpr] "I" (fpr)
120 		     : "memory");
121 }
122 
123 static __always_inline void fpu_sfpc(unsigned int fpc)
124 {
125 	asm volatile("sfpc	%[fpc]"
126 		     :
127 		     : [fpc] "d" (fpc)
128 		     : "memory");
129 }
130 
131 static __always_inline void fpu_stfpc(unsigned int *fpc)
132 {
133 	instrument_write(fpc, sizeof(*fpc));
134 	asm volatile("stfpc	%[fpc]"
135 		     : [fpc] "=Q" (*fpc)
136 		     :
137 		     : "memory");
138 }
139 
140 static __always_inline void fpu_vab(u8 v1, u8 v2, u8 v3)
141 {
142 	asm volatile("VAB	%[v1],%[v2],%[v3]"
143 		     :
144 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
145 		     : "memory");
146 }
147 
148 static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3)
149 {
150 	asm volatile("VCKSM	%[v1],%[v2],%[v3]"
151 		     :
152 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
153 		     : "memory");
154 }
155 
156 static __always_inline void fpu_vesravb(u8 v1, u8 v2, u8 v3)
157 {
158 	asm volatile("VESRAVB	%[v1],%[v2],%[v3]"
159 		     :
160 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
161 		     : "memory");
162 }
163 
164 static __always_inline void fpu_vgfmag(u8 v1, u8 v2, u8 v3, u8 v4)
165 {
166 	asm volatile("VGFMAG	%[v1],%[v2],%[v3],%[v4]"
167 		     :
168 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
169 		     : "memory");
170 }
171 
172 static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
173 {
174 	asm volatile("VGFMG	%[v1],%[v2],%[v3]"
175 		     :
176 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
177 		     : "memory");
178 }
179 
180 #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
181 
182 static __always_inline void fpu_vl(u8 v1, const void *vxr)
183 {
184 	instrument_read(vxr, sizeof(__vector128));
185 	asm volatile("VL	%[v1],%O[vxr],,%R[vxr]"
186 		     :
187 		     : [vxr] "Q" (*(__vector128 *)vxr),
188 		       [v1] "I" (v1)
189 		     : "memory");
190 }
191 
192 #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
193 
194 static __always_inline void fpu_vl(u8 v1, const void *vxr)
195 {
196 	instrument_read(vxr, sizeof(__vector128));
197 	asm volatile(
198 		"	la	1,%[vxr]\n"
199 		"	VL	%[v1],0,,1"
200 		:
201 		: [vxr] "R" (*(__vector128 *)vxr),
202 		  [v1] "I" (v1)
203 		: "memory", "1");
204 }
205 
206 #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
207 
208 static __always_inline void fpu_vleib(u8 v, s16 val, u8 index)
209 {
210 	asm volatile("VLEIB	%[v],%[val],%[index]"
211 		     :
212 		     : [v] "I" (v), [val] "K" (val), [index] "I" (index)
213 		     : "memory");
214 }
215 
216 static __always_inline void fpu_vleig(u8 v, s16 val, u8 index)
217 {
218 	asm volatile("VLEIG	%[v],%[val],%[index]"
219 		     :
220 		     : [v] "I" (v), [val] "K" (val), [index] "I" (index)
221 		     : "memory");
222 }
223 
224 static __always_inline u64 fpu_vlgvf(u8 v, u16 index)
225 {
226 	u64 val;
227 
228 	asm volatile("VLGVF	%[val],%[v],%[index]"
229 		     : [val] "=d" (val)
230 		     : [v] "I" (v), [index] "L" (index)
231 		     : "memory");
232 	return val;
233 }
234 
235 #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
236 
237 static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
238 {
239 	unsigned int size;
240 
241 	size = min(index + 1, sizeof(__vector128));
242 	instrument_read(vxr, size);
243 	asm volatile("VLL	%[v1],%[index],%O[vxr],%R[vxr]"
244 		     :
245 		     : [vxr] "Q" (*(u8 *)vxr),
246 		       [index] "d" (index),
247 		       [v1] "I" (v1)
248 		     : "memory");
249 }
250 
251 #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
252 
253 static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
254 {
255 	unsigned int size;
256 
257 	size = min(index + 1, sizeof(__vector128));
258 	instrument_read(vxr, size);
259 	asm volatile(
260 		"	la	1,%[vxr]\n"
261 		"	VLL	%[v1],%[index],0,1"
262 		:
263 		: [vxr] "R" (*(u8 *)vxr),
264 		  [index] "d" (index),
265 		  [v1] "I" (v1)
266 		: "memory", "1");
267 }
268 
269 #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
270 
271 #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
272 
273 #define fpu_vlm(_v1, _v3, _vxrs)					\
274 ({									\
275 	unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128);	\
276 	struct {							\
277 		__vector128 _v[(_v3) - (_v1) + 1];			\
278 	} *_v = (void *)(_vxrs);					\
279 									\
280 	instrument_read(_v, size);					\
281 	asm volatile("VLM	%[v1],%[v3],%O[vxrs],%R[vxrs]"		\
282 		     :							\
283 		     : [vxrs] "Q" (*_v),				\
284 		       [v1] "I" (_v1), [v3] "I" (_v3)			\
285 		     : "memory");					\
286 	(_v3) - (_v1) + 1;						\
287 })
288 
289 #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
290 
291 #define fpu_vlm(_v1, _v3, _vxrs)					\
292 ({									\
293 	unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128);	\
294 	struct {							\
295 		__vector128 _v[(_v3) - (_v1) + 1];			\
296 	} *_v = (void *)(_vxrs);					\
297 									\
298 	instrument_read(_v, size);					\
299 	asm volatile(							\
300 		"	la	1,%[vxrs]\n"				\
301 		"	VLM	%[v1],%[v3],0,1"			\
302 		:							\
303 		: [vxrs] "R" (*_v),					\
304 		  [v1] "I" (_v1), [v3] "I" (_v3)			\
305 		: "memory", "1");					\
306 	(_v3) - (_v1) + 1;						\
307 })
308 
309 #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
310 
311 static __always_inline void fpu_vlr(u8 v1, u8 v2)
312 {
313 	asm volatile("VLR	%[v1],%[v2]"
314 		     :
315 		     : [v1] "I" (v1), [v2] "I" (v2)
316 		     : "memory");
317 }
318 
319 static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index)
320 {
321 	asm volatile("VLVGF	%[v],%[val],%[index]"
322 		     :
323 		     : [v] "I" (v), [val] "d" (val), [index] "L" (index)
324 		     : "memory");
325 }
326 
327 static __always_inline void fpu_vn(u8 v1, u8 v2, u8 v3)
328 {
329 	asm volatile("VN	%[v1],%[v2],%[v3]"
330 		     :
331 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
332 		     : "memory");
333 }
334 
335 static __always_inline void fpu_vperm(u8 v1, u8 v2, u8 v3, u8 v4)
336 {
337 	asm volatile("VPERM	%[v1],%[v2],%[v3],%[v4]"
338 		     :
339 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
340 		     : "memory");
341 }
342 
343 static __always_inline void fpu_vrepib(u8 v1, s16 i2)
344 {
345 	asm volatile("VREPIB	%[v1],%[i2]"
346 		     :
347 		     : [v1] "I" (v1), [i2] "K" (i2)
348 		     : "memory");
349 }
350 
351 static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
352 {
353 	asm volatile("VSRLB	%[v1],%[v2],%[v3]"
354 		     :
355 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
356 		     : "memory");
357 }
358 
359 #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
360 
361 static __always_inline void fpu_vst(u8 v1, const void *vxr)
362 {
363 	instrument_write(vxr, sizeof(__vector128));
364 	asm volatile("VST	%[v1],%O[vxr],,%R[vxr]"
365 		     : [vxr] "=Q" (*(__vector128 *)vxr)
366 		     : [v1] "I" (v1)
367 		     : "memory");
368 }
369 
370 #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
371 
372 static __always_inline void fpu_vst(u8 v1, const void *vxr)
373 {
374 	instrument_write(vxr, sizeof(__vector128));
375 	asm volatile(
376 		"	la	1,%[vxr]\n"
377 		"	VST	%[v1],0,,1"
378 		: [vxr] "=R" (*(__vector128 *)vxr)
379 		: [v1] "I" (v1)
380 		: "memory", "1");
381 }
382 
383 #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
384 
385 #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
386 
387 static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
388 {
389 	unsigned int size;
390 
391 	size = min(index + 1, sizeof(__vector128));
392 	instrument_write(vxr, size);
393 	asm volatile("VSTL	%[v1],%[index],%O[vxr],%R[vxr]"
394 		     : [vxr] "=Q" (*(u8 *)vxr)
395 		     : [index] "d" (index), [v1] "I" (v1)
396 		     : "memory");
397 	kmsan_unpoison_memory(vxr, size);
398 }
399 
400 #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
401 
402 static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
403 {
404 	unsigned int size;
405 
406 	size = min(index + 1, sizeof(__vector128));
407 	instrument_write(vxr, size);
408 	asm volatile(
409 		"	la	1,%[vxr]\n"
410 		"	VSTL	%[v1],%[index],0,1"
411 		: [vxr] "=R" (*(u8 *)vxr)
412 		: [index] "d" (index), [v1] "I" (v1)
413 		: "memory", "1");
414 	kmsan_unpoison_memory(vxr, size);
415 }
416 
417 #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
418 
419 #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
420 
421 #define fpu_vstm(_v1, _v3, _vxrs)					\
422 ({									\
423 	unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128);	\
424 	struct {							\
425 		__vector128 _v[(_v3) - (_v1) + 1];			\
426 	} *_v = (void *)(_vxrs);					\
427 									\
428 	instrument_write(_v, size);					\
429 	asm volatile("VSTM	%[v1],%[v3],%O[vxrs],%R[vxrs]"		\
430 		     : [vxrs] "=Q" (*_v)				\
431 		     : [v1] "I" (_v1), [v3] "I" (_v3)			\
432 		     : "memory");					\
433 	(_v3) - (_v1) + 1;						\
434 })
435 
436 #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
437 
438 #define fpu_vstm(_v1, _v3, _vxrs)					\
439 ({									\
440 	unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128);	\
441 	struct {							\
442 		__vector128 _v[(_v3) - (_v1) + 1];			\
443 	} *_v = (void *)(_vxrs);					\
444 									\
445 	instrument_write(_v, size);					\
446 	asm volatile(							\
447 		"	la	1,%[vxrs]\n"				\
448 		"	VSTM	%[v1],%[v3],0,1"			\
449 		: [vxrs] "=R" (*_v)					\
450 		: [v1] "I" (_v1), [v3] "I" (_v3)			\
451 		: "memory", "1");					\
452 	(_v3) - (_v1) + 1;						\
453 })
454 
455 #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
456 
457 static __always_inline void fpu_vupllf(u8 v1, u8 v2)
458 {
459 	asm volatile("VUPLLF	%[v1],%[v2]"
460 		     :
461 		     : [v1] "I" (v1), [v2] "I" (v2)
462 		     : "memory");
463 }
464 
465 static __always_inline void fpu_vx(u8 v1, u8 v2, u8 v3)
466 {
467 	asm volatile("VX	%[v1],%[v2],%[v3]"
468 		     :
469 		     : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
470 		     : "memory");
471 }
472 
473 static __always_inline void fpu_vzero(u8 v)
474 {
475 	asm volatile("VZERO	%[v]"
476 		     :
477 		     : [v] "I" (v)
478 		     : "memory");
479 }
480 
481 #endif /* __ASSEMBLER__ */
482 #endif	/* __ASM_S390_FPU_INSN_H */
483