xref: /linux/arch/s390/include/asm/fpu-insn-asm.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Support for Vector Instructions
4  *
5  * Assembler macros to generate .byte/.word code for particular
6  * vector instructions that are supported by recent binutils (>= 2.26) only.
7  *
8  * Copyright IBM Corp. 2015
9  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10  */
11 
12 #ifndef __ASM_S390_FPU_INSN_ASM_H
13 #define __ASM_S390_FPU_INSN_ASM_H
14 
15 #ifndef __ASM_S390_FPU_INSN_H
16 #error only <asm/fpu-insn.h> can be included directly
17 #endif
18 
19 #ifdef __ASSEMBLY__
20 
21 /* Macros to generate vector instruction byte code */
22 
23 /* GR_NUM - Retrieve general-purpose register number
24  *
25  * @opd:	Operand to store register number
26  * @r64:	String designation register in the format "%rN"
27  */
28 .macro	GR_NUM	opd gr
29 	\opd = 255
30 	.ifc \gr,%r0
31 		\opd = 0
32 	.endif
33 	.ifc \gr,%r1
34 		\opd = 1
35 	.endif
36 	.ifc \gr,%r2
37 		\opd = 2
38 	.endif
39 	.ifc \gr,%r3
40 		\opd = 3
41 	.endif
42 	.ifc \gr,%r4
43 		\opd = 4
44 	.endif
45 	.ifc \gr,%r5
46 		\opd = 5
47 	.endif
48 	.ifc \gr,%r6
49 		\opd = 6
50 	.endif
51 	.ifc \gr,%r7
52 		\opd = 7
53 	.endif
54 	.ifc \gr,%r8
55 		\opd = 8
56 	.endif
57 	.ifc \gr,%r9
58 		\opd = 9
59 	.endif
60 	.ifc \gr,%r10
61 		\opd = 10
62 	.endif
63 	.ifc \gr,%r11
64 		\opd = 11
65 	.endif
66 	.ifc \gr,%r12
67 		\opd = 12
68 	.endif
69 	.ifc \gr,%r13
70 		\opd = 13
71 	.endif
72 	.ifc \gr,%r14
73 		\opd = 14
74 	.endif
75 	.ifc \gr,%r15
76 		\opd = 15
77 	.endif
78 	.if \opd == 255
79 		\opd = \gr
80 	.endif
81 .endm
82 
83 /* VX_NUM - Retrieve vector register number
84  *
85  * @opd:	Operand to store register number
86  * @vxr:	String designation register in the format "%vN"
87  *
88  * The vector register number is used for as input number to the
89  * instruction and, as well as, to compute the RXB field of the
90  * instruction.
91  */
92 .macro	VX_NUM	opd vxr
93 	\opd = 255
94 	.ifc \vxr,%v0
95 		\opd = 0
96 	.endif
97 	.ifc \vxr,%v1
98 		\opd = 1
99 	.endif
100 	.ifc \vxr,%v2
101 		\opd = 2
102 	.endif
103 	.ifc \vxr,%v3
104 		\opd = 3
105 	.endif
106 	.ifc \vxr,%v4
107 		\opd = 4
108 	.endif
109 	.ifc \vxr,%v5
110 		\opd = 5
111 	.endif
112 	.ifc \vxr,%v6
113 		\opd = 6
114 	.endif
115 	.ifc \vxr,%v7
116 		\opd = 7
117 	.endif
118 	.ifc \vxr,%v8
119 		\opd = 8
120 	.endif
121 	.ifc \vxr,%v9
122 		\opd = 9
123 	.endif
124 	.ifc \vxr,%v10
125 		\opd = 10
126 	.endif
127 	.ifc \vxr,%v11
128 		\opd = 11
129 	.endif
130 	.ifc \vxr,%v12
131 		\opd = 12
132 	.endif
133 	.ifc \vxr,%v13
134 		\opd = 13
135 	.endif
136 	.ifc \vxr,%v14
137 		\opd = 14
138 	.endif
139 	.ifc \vxr,%v15
140 		\opd = 15
141 	.endif
142 	.ifc \vxr,%v16
143 		\opd = 16
144 	.endif
145 	.ifc \vxr,%v17
146 		\opd = 17
147 	.endif
148 	.ifc \vxr,%v18
149 		\opd = 18
150 	.endif
151 	.ifc \vxr,%v19
152 		\opd = 19
153 	.endif
154 	.ifc \vxr,%v20
155 		\opd = 20
156 	.endif
157 	.ifc \vxr,%v21
158 		\opd = 21
159 	.endif
160 	.ifc \vxr,%v22
161 		\opd = 22
162 	.endif
163 	.ifc \vxr,%v23
164 		\opd = 23
165 	.endif
166 	.ifc \vxr,%v24
167 		\opd = 24
168 	.endif
169 	.ifc \vxr,%v25
170 		\opd = 25
171 	.endif
172 	.ifc \vxr,%v26
173 		\opd = 26
174 	.endif
175 	.ifc \vxr,%v27
176 		\opd = 27
177 	.endif
178 	.ifc \vxr,%v28
179 		\opd = 28
180 	.endif
181 	.ifc \vxr,%v29
182 		\opd = 29
183 	.endif
184 	.ifc \vxr,%v30
185 		\opd = 30
186 	.endif
187 	.ifc \vxr,%v31
188 		\opd = 31
189 	.endif
190 	.if \opd == 255
191 		\opd = \vxr
192 	.endif
193 .endm
194 
195 /* RXB - Compute most significant bit used vector registers
196  *
197  * @rxb:	Operand to store computed RXB value
198  * @v1:		Vector register designated operand whose MSB is stored in
199  *		RXB bit 0 (instruction bit 36) and whose remaining bits
200  *		are stored in instruction bits 8-11.
201  * @v2:		Vector register designated operand whose MSB is stored in
202  *		RXB bit 1 (instruction bit 37) and whose remaining bits
203  *		are stored in instruction bits 12-15.
204  * @v3:		Vector register designated operand whose MSB is stored in
205  *		RXB bit 2 (instruction bit 38) and whose remaining bits
206  *		are stored in instruction bits 16-19.
207  * @v4:		Vector register designated operand whose MSB is stored in
208  *		RXB bit 3 (instruction bit 39) and whose remaining bits
209  *		are stored in instruction bits 32-35.
210  *
211  * Note: In most vector instruction formats [1] V1, V2, V3, and V4 directly
212  * correspond to @v1, @v2, @v3, and @v4. But there are exceptions, such as but
213  * not limited to the vector instruction formats VRR-g, VRR-h, VRS-a, VRS-d,
214  * and VSI.
215  *
216  * [1] IBM z/Architecture Principles of Operation, chapter "Program
217  * Execution, section "Instructions", subsection "Instruction Formats".
218  */
219 .macro	RXB	rxb v1 v2=0 v3=0 v4=0
220 	\rxb = 0
221 	.if \v1 & 0x10
222 		\rxb = \rxb | 0x08
223 	.endif
224 	.if \v2 & 0x10
225 		\rxb = \rxb | 0x04
226 	.endif
227 	.if \v3 & 0x10
228 		\rxb = \rxb | 0x02
229 	.endif
230 	.if \v4 & 0x10
231 		\rxb = \rxb | 0x01
232 	.endif
233 .endm
234 
235 /* MRXB - Generate Element Size Control and RXB value
236  *
237  * @m:		Element size control
238  * @v1:		First vector register designated operand (for RXB)
239  * @v2:		Second vector register designated operand (for RXB)
240  * @v3:		Third vector register designated operand (for RXB)
241  * @v4:		Fourth vector register designated operand (for RXB)
242  *
243  * Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
244  * description for further details.
245  */
246 .macro	MRXB	m v1 v2=0 v3=0 v4=0
247 	rxb = 0
248 	RXB	rxb, \v1, \v2, \v3, \v4
249 	.byte	(\m << 4) | rxb
250 .endm
251 
252 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
253  *
254  * @m:		Element size control
255  * @opc:	Opcode
256  * @v1:		First vector register designated operand (for RXB)
257  * @v2:		Second vector register designated operand (for RXB)
258  * @v3:		Third vector register designated operand (for RXB)
259  * @v4:		Fourth vector register designated operand (for RXB)
260  *
261  * Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
262  * description for further details.
263  */
264 .macro	MRXBOPC	m opc v1 v2=0 v3=0 v4=0
265 	MRXB	\m, \v1, \v2, \v3, \v4
266 	.byte	\opc
267 .endm
268 
269 /* Vector support instructions */
270 
271 /* VECTOR GENERATE BYTE MASK */
272 .macro	VGBM	vr imm2
273 	VX_NUM	v1, \vr
274 	.word	(0xE700 | ((v1&15) << 4))
275 	.word	\imm2
276 	MRXBOPC	0, 0x44, v1
277 .endm
278 .macro	VZERO	vxr
279 	VGBM	\vxr, 0
280 .endm
281 .macro	VONE	vxr
282 	VGBM	\vxr, 0xFFFF
283 .endm
284 
285 /* VECTOR LOAD VR ELEMENT FROM GR */
286 .macro	VLVG	v, gr, disp, m
287 	VX_NUM	v1, \v
288 	GR_NUM	b2, "%r0"
289 	GR_NUM	r3, \gr
290 	.word	0xE700 | ((v1&15) << 4) | r3
291 	.word	(b2 << 12) | (\disp)
292 	MRXBOPC	\m, 0x22, v1
293 .endm
294 .macro	VLVGB	v, gr, index, base
295 	VLVG	\v, \gr, \index, \base, 0
296 .endm
297 .macro	VLVGH	v, gr, index
298 	VLVG	\v, \gr, \index, 1
299 .endm
300 .macro	VLVGF	v, gr, index
301 	VLVG	\v, \gr, \index, 2
302 .endm
303 .macro	VLVGG	v, gr, index
304 	VLVG	\v, \gr, \index, 3
305 .endm
306 
307 /* VECTOR LOAD REGISTER */
308 .macro	VLR	v1, v2
309 	VX_NUM	v1, \v1
310 	VX_NUM	v2, \v2
311 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
312 	.word	0
313 	MRXBOPC	0, 0x56, v1, v2
314 .endm
315 
316 /* VECTOR LOAD */
317 .macro	VL	v, disp, index="%r0", base
318 	VX_NUM	v1, \v
319 	GR_NUM	x2, \index
320 	GR_NUM	b2, \base
321 	.word	0xE700 | ((v1&15) << 4) | x2
322 	.word	(b2 << 12) | (\disp)
323 	MRXBOPC 0, 0x06, v1
324 .endm
325 
326 /* VECTOR LOAD ELEMENT */
327 .macro	VLEx	vr1, disp, index="%r0", base, m3, opc
328 	VX_NUM	v1, \vr1
329 	GR_NUM	x2, \index
330 	GR_NUM	b2, \base
331 	.word	0xE700 | ((v1&15) << 4) | x2
332 	.word	(b2 << 12) | (\disp)
333 	MRXBOPC	\m3, \opc, v1
334 .endm
335 .macro	VLEB	vr1, disp, index="%r0", base, m3
336 	VLEx	\vr1, \disp, \index, \base, \m3, 0x00
337 .endm
338 .macro	VLEH	vr1, disp, index="%r0", base, m3
339 	VLEx	\vr1, \disp, \index, \base, \m3, 0x01
340 .endm
341 .macro	VLEF	vr1, disp, index="%r0", base, m3
342 	VLEx	\vr1, \disp, \index, \base, \m3, 0x03
343 .endm
344 .macro	VLEG	vr1, disp, index="%r0", base, m3
345 	VLEx	\vr1, \disp, \index, \base, \m3, 0x02
346 .endm
347 
348 /* VECTOR LOAD ELEMENT IMMEDIATE */
349 .macro	VLEIx	vr1, imm2, m3, opc
350 	VX_NUM	v1, \vr1
351 	.word	0xE700 | ((v1&15) << 4)
352 	.word	\imm2
353 	MRXBOPC	\m3, \opc, v1
354 .endm
355 .macro	VLEIB	vr1, imm2, index
356 	VLEIx	\vr1, \imm2, \index, 0x40
357 .endm
358 .macro	VLEIH	vr1, imm2, index
359 	VLEIx	\vr1, \imm2, \index, 0x41
360 .endm
361 .macro	VLEIF	vr1, imm2, index
362 	VLEIx	\vr1, \imm2, \index, 0x43
363 .endm
364 .macro	VLEIG	vr1, imm2, index
365 	VLEIx	\vr1, \imm2, \index, 0x42
366 .endm
367 
368 /* VECTOR LOAD GR FROM VR ELEMENT */
369 .macro	VLGV	gr, vr, disp, base="%r0", m
370 	GR_NUM	r1, \gr
371 	GR_NUM	b2, \base
372 	VX_NUM	v3, \vr
373 	.word	0xE700 | (r1 << 4) | (v3&15)
374 	.word	(b2 << 12) | (\disp)
375 	MRXBOPC	\m, 0x21, 0, v3
376 .endm
377 .macro	VLGVB	gr, vr, disp, base="%r0"
378 	VLGV	\gr, \vr, \disp, \base, 0
379 .endm
380 .macro	VLGVH	gr, vr, disp, base="%r0"
381 	VLGV	\gr, \vr, \disp, \base, 1
382 .endm
383 .macro	VLGVF	gr, vr, disp, base="%r0"
384 	VLGV	\gr, \vr, \disp, \base, 2
385 .endm
386 .macro	VLGVG	gr, vr, disp, base="%r0"
387 	VLGV	\gr, \vr, \disp, \base, 3
388 .endm
389 
390 /* VECTOR LOAD MULTIPLE */
391 .macro	VLM	vfrom, vto, disp, base, hint=3
392 	VX_NUM	v1, \vfrom
393 	VX_NUM	v3, \vto
394 	GR_NUM	b2, \base
395 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
396 	.word	(b2 << 12) | (\disp)
397 	MRXBOPC	\hint, 0x36, v1, v3
398 .endm
399 
400 /* VECTOR STORE */
401 .macro	VST	vr1, disp, index="%r0", base
402 	VX_NUM	v1, \vr1
403 	GR_NUM	x2, \index
404 	GR_NUM	b2, \base
405 	.word	0xE700 | ((v1&15) << 4) | (x2&15)
406 	.word	(b2 << 12) | (\disp)
407 	MRXBOPC	0, 0x0E, v1
408 .endm
409 
410 /* VECTOR STORE BYTE REVERSED ELEMENTS */
411 	.macro	VSTBR	vr1, disp, index="%r0", base, m
412 	VX_NUM	v1, \vr1
413 	GR_NUM	x2, \index
414 	GR_NUM	b2, \base
415 	.word	0xE600 | ((v1&15) << 4) | (x2&15)
416 	.word	(b2 << 12) | (\disp)
417 	MRXBOPC	\m, 0x0E, v1
418 .endm
419 .macro	VSTBRH	vr1, disp, index="%r0", base
420 	VSTBR	\vr1, \disp, \index, \base, 1
421 .endm
422 .macro	VSTBRF	vr1, disp, index="%r0", base
423 	VSTBR	\vr1, \disp, \index, \base, 2
424 .endm
425 .macro	VSTBRG	vr1, disp, index="%r0", base
426 	VSTBR	\vr1, \disp, \index, \base, 3
427 .endm
428 .macro	VSTBRQ	vr1, disp, index="%r0", base
429 	VSTBR	\vr1, \disp, \index, \base, 4
430 .endm
431 
432 /* VECTOR STORE MULTIPLE */
433 .macro	VSTM	vfrom, vto, disp, base, hint=3
434 	VX_NUM	v1, \vfrom
435 	VX_NUM	v3, \vto
436 	GR_NUM	b2, \base
437 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
438 	.word	(b2 << 12) | (\disp)
439 	MRXBOPC	\hint, 0x3E, v1, v3
440 .endm
441 
442 /* VECTOR PERMUTE */
443 .macro	VPERM	vr1, vr2, vr3, vr4
444 	VX_NUM	v1, \vr1
445 	VX_NUM	v2, \vr2
446 	VX_NUM	v3, \vr3
447 	VX_NUM	v4, \vr4
448 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
449 	.word	((v3&15) << 12)
450 	MRXBOPC	(v4&15), 0x8C, v1, v2, v3, v4
451 .endm
452 
453 /* VECTOR UNPACK LOGICAL LOW */
454 .macro	VUPLL	vr1, vr2, m3
455 	VX_NUM	v1, \vr1
456 	VX_NUM	v2, \vr2
457 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
458 	.word	0x0000
459 	MRXBOPC	\m3, 0xD4, v1, v2
460 .endm
461 .macro	VUPLLB	vr1, vr2
462 	VUPLL	\vr1, \vr2, 0
463 .endm
464 .macro	VUPLLH	vr1, vr2
465 	VUPLL	\vr1, \vr2, 1
466 .endm
467 .macro	VUPLLF	vr1, vr2
468 	VUPLL	\vr1, \vr2, 2
469 .endm
470 
471 /* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
472 .macro	VPDI	vr1, vr2, vr3, m4
473 	VX_NUM	v1, \vr1
474 	VX_NUM	v2, \vr2
475 	VX_NUM	v3, \vr3
476 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
477 	.word	((v3&15) << 12)
478 	MRXBOPC	\m4, 0x84, v1, v2, v3
479 .endm
480 
481 /* VECTOR REPLICATE */
482 .macro	VREP	vr1, vr3, imm2, m4
483 	VX_NUM	v1, \vr1
484 	VX_NUM	v3, \vr3
485 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
486 	.word	\imm2
487 	MRXBOPC	\m4, 0x4D, v1, v3
488 .endm
489 .macro	VREPB	vr1, vr3, imm2
490 	VREP	\vr1, \vr3, \imm2, 0
491 .endm
492 .macro	VREPH	vr1, vr3, imm2
493 	VREP	\vr1, \vr3, \imm2, 1
494 .endm
495 .macro	VREPF	vr1, vr3, imm2
496 	VREP	\vr1, \vr3, \imm2, 2
497 .endm
498 .macro	VREPG	vr1, vr3, imm2
499 	VREP	\vr1, \vr3, \imm2, 3
500 .endm
501 
502 /* VECTOR MERGE HIGH */
503 .macro	VMRH	vr1, vr2, vr3, m4
504 	VX_NUM	v1, \vr1
505 	VX_NUM	v2, \vr2
506 	VX_NUM	v3, \vr3
507 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
508 	.word	((v3&15) << 12)
509 	MRXBOPC	\m4, 0x61, v1, v2, v3
510 .endm
511 .macro	VMRHB	vr1, vr2, vr3
512 	VMRH	\vr1, \vr2, \vr3, 0
513 .endm
514 .macro	VMRHH	vr1, vr2, vr3
515 	VMRH	\vr1, \vr2, \vr3, 1
516 .endm
517 .macro	VMRHF	vr1, vr2, vr3
518 	VMRH	\vr1, \vr2, \vr3, 2
519 .endm
520 .macro	VMRHG	vr1, vr2, vr3
521 	VMRH	\vr1, \vr2, \vr3, 3
522 .endm
523 
524 /* VECTOR MERGE LOW */
525 .macro	VMRL	vr1, vr2, vr3, m4
526 	VX_NUM	v1, \vr1
527 	VX_NUM	v2, \vr2
528 	VX_NUM	v3, \vr3
529 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
530 	.word	((v3&15) << 12)
531 	MRXBOPC	\m4, 0x60, v1, v2, v3
532 .endm
533 .macro	VMRLB	vr1, vr2, vr3
534 	VMRL	\vr1, \vr2, \vr3, 0
535 .endm
536 .macro	VMRLH	vr1, vr2, vr3
537 	VMRL	\vr1, \vr2, \vr3, 1
538 .endm
539 .macro	VMRLF	vr1, vr2, vr3
540 	VMRL	\vr1, \vr2, \vr3, 2
541 .endm
542 .macro	VMRLG	vr1, vr2, vr3
543 	VMRL	\vr1, \vr2, \vr3, 3
544 .endm
545 
546 /* VECTOR LOAD WITH LENGTH */
547 .macro VLL	v, gr, disp, base
548 	VX_NUM	v1, \v
549 	GR_NUM	b2, \base
550 	GR_NUM	r3, \gr
551 	.word	0xE700 | ((v1&15) << 4) | r3
552 	.word	(b2 << 12) | (\disp)
553 	MRXBOPC 0, 0x37, v1
554 .endm
555 
556 /* VECTOR STORE WITH LENGTH */
557 .macro VSTL	v, gr, disp, base
558 	VX_NUM	v1, \v
559 	GR_NUM	b2, \base
560 	GR_NUM	r3, \gr
561 	.word	0xE700 | ((v1&15) << 4) | r3
562 	.word	(b2 << 12) | (\disp)
563 	MRXBOPC 0, 0x3f, v1
564 .endm
565 
566 /* Vector integer instructions */
567 
568 /* VECTOR AND */
569 .macro	VN	vr1, vr2, vr3
570 	VX_NUM	v1, \vr1
571 	VX_NUM	v2, \vr2
572 	VX_NUM	v3, \vr3
573 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
574 	.word	((v3&15) << 12)
575 	MRXBOPC	0, 0x68, v1, v2, v3
576 .endm
577 
578 /* VECTOR CHECKSUM */
579 .macro VCKSM	vr1, vr2, vr3
580 	VX_NUM	v1, \vr1
581 	VX_NUM	v2, \vr2
582 	VX_NUM	v3, \vr3
583 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
584 	.word	((v3&15) << 12)
585 	MRXBOPC 0, 0x66, v1, v2, v3
586 .endm
587 
588 /* VECTOR EXCLUSIVE OR */
589 .macro	VX	vr1, vr2, vr3
590 	VX_NUM	v1, \vr1
591 	VX_NUM	v2, \vr2
592 	VX_NUM	v3, \vr3
593 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
594 	.word	((v3&15) << 12)
595 	MRXBOPC	0, 0x6D, v1, v2, v3
596 .endm
597 
598 /* VECTOR GALOIS FIELD MULTIPLY SUM */
599 .macro	VGFM	vr1, vr2, vr3, m4
600 	VX_NUM	v1, \vr1
601 	VX_NUM	v2, \vr2
602 	VX_NUM	v3, \vr3
603 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
604 	.word	((v3&15) << 12)
605 	MRXBOPC	\m4, 0xB4, v1, v2, v3
606 .endm
607 .macro	VGFMB	vr1, vr2, vr3
608 	VGFM	\vr1, \vr2, \vr3, 0
609 .endm
610 .macro	VGFMH	vr1, vr2, vr3
611 	VGFM	\vr1, \vr2, \vr3, 1
612 .endm
613 .macro	VGFMF	vr1, vr2, vr3
614 	VGFM	\vr1, \vr2, \vr3, 2
615 .endm
616 .macro	VGFMG	vr1, vr2, vr3
617 	VGFM	\vr1, \vr2, \vr3, 3
618 .endm
619 
620 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
621 .macro	VGFMA	vr1, vr2, vr3, vr4, m5
622 	VX_NUM	v1, \vr1
623 	VX_NUM	v2, \vr2
624 	VX_NUM	v3, \vr3
625 	VX_NUM	v4, \vr4
626 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
627 	.word	((v3&15) << 12) | (\m5 << 8)
628 	MRXBOPC	(v4&15), 0xBC, v1, v2, v3, v4
629 .endm
630 .macro	VGFMAB	vr1, vr2, vr3, vr4
631 	VGFMA	\vr1, \vr2, \vr3, \vr4, 0
632 .endm
633 .macro	VGFMAH	vr1, vr2, vr3, vr4
634 	VGFMA	\vr1, \vr2, \vr3, \vr4, 1
635 .endm
636 .macro	VGFMAF	vr1, vr2, vr3, vr4
637 	VGFMA	\vr1, \vr2, \vr3, \vr4, 2
638 .endm
639 .macro	VGFMAG	vr1, vr2, vr3, vr4
640 	VGFMA	\vr1, \vr2, \vr3, \vr4, 3
641 .endm
642 
643 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
644 .macro	VSRLB	vr1, vr2, vr3
645 	VX_NUM	v1, \vr1
646 	VX_NUM	v2, \vr2
647 	VX_NUM	v3, \vr3
648 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
649 	.word	((v3&15) << 12)
650 	MRXBOPC	0, 0x7D, v1, v2, v3
651 .endm
652 
653 /* VECTOR REPLICATE IMMEDIATE */
654 .macro	VREPI	vr1, imm2, m3
655 	VX_NUM	v1, \vr1
656 	.word	0xE700 | ((v1&15) << 4)
657 	.word	\imm2
658 	MRXBOPC	\m3, 0x45, v1
659 .endm
660 .macro	VREPIB	vr1, imm2
661 	VREPI	\vr1, \imm2, 0
662 .endm
663 .macro	VREPIH	vr1, imm2
664 	VREPI	\vr1, \imm2, 1
665 .endm
666 .macro	VREPIF	vr1, imm2
667 	VREPI	\vr1, \imm2, 2
668 .endm
669 .macro	VREPIG	vr1, imm2
670 	VREP	\vr1, \imm2, 3
671 .endm
672 
673 /* VECTOR ADD */
674 .macro	VA	vr1, vr2, vr3, m4
675 	VX_NUM	v1, \vr1
676 	VX_NUM	v2, \vr2
677 	VX_NUM	v3, \vr3
678 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
679 	.word	((v3&15) << 12)
680 	MRXBOPC	\m4, 0xF3, v1, v2, v3
681 .endm
682 .macro	VAB	vr1, vr2, vr3
683 	VA	\vr1, \vr2, \vr3, 0
684 .endm
685 .macro	VAH	vr1, vr2, vr3
686 	VA	\vr1, \vr2, \vr3, 1
687 .endm
688 .macro	VAF	vr1, vr2, vr3
689 	VA	\vr1, \vr2, \vr3, 2
690 .endm
691 .macro	VAG	vr1, vr2, vr3
692 	VA	\vr1, \vr2, \vr3, 3
693 .endm
694 .macro	VAQ	vr1, vr2, vr3
695 	VA	\vr1, \vr2, \vr3, 4
696 .endm
697 
698 /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
699 .macro	VESRAV	vr1, vr2, vr3, m4
700 	VX_NUM	v1, \vr1
701 	VX_NUM	v2, \vr2
702 	VX_NUM	v3, \vr3
703 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
704 	.word	((v3&15) << 12)
705 	MRXBOPC \m4, 0x7A, v1, v2, v3
706 .endm
707 
708 .macro	VESRAVB	vr1, vr2, vr3
709 	VESRAV	\vr1, \vr2, \vr3, 0
710 .endm
711 .macro	VESRAVH	vr1, vr2, vr3
712 	VESRAV	\vr1, \vr2, \vr3, 1
713 .endm
714 .macro	VESRAVF	vr1, vr2, vr3
715 	VESRAV	\vr1, \vr2, \vr3, 2
716 .endm
717 .macro	VESRAVG	vr1, vr2, vr3
718 	VESRAV	\vr1, \vr2, \vr3, 3
719 .endm
720 
721 /* VECTOR ELEMENT ROTATE LEFT LOGICAL */
722 .macro	VERLL	vr1, vr3, disp, base="%r0", m4
723 	VX_NUM	v1, \vr1
724 	VX_NUM	v3, \vr3
725 	GR_NUM	b2, \base
726 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
727 	.word	(b2 << 12) | (\disp)
728 	MRXBOPC	\m4, 0x33, v1, v3
729 .endm
730 .macro	VERLLB	vr1, vr3, disp, base="%r0"
731 	VERLL	\vr1, \vr3, \disp, \base, 0
732 .endm
733 .macro	VERLLH	vr1, vr3, disp, base="%r0"
734 	VERLL	\vr1, \vr3, \disp, \base, 1
735 .endm
736 .macro	VERLLF	vr1, vr3, disp, base="%r0"
737 	VERLL	\vr1, \vr3, \disp, \base, 2
738 .endm
739 .macro	VERLLG	vr1, vr3, disp, base="%r0"
740 	VERLL	\vr1, \vr3, \disp, \base, 3
741 .endm
742 
743 /* VECTOR SHIFT LEFT DOUBLE BY BYTE */
744 .macro	VSLDB	vr1, vr2, vr3, imm4
745 	VX_NUM	v1, \vr1
746 	VX_NUM	v2, \vr2
747 	VX_NUM	v3, \vr3
748 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
749 	.word	((v3&15) << 12) | (\imm4)
750 	MRXBOPC	0, 0x77, v1, v2, v3
751 .endm
752 
753 #endif	/* __ASSEMBLY__ */
754 #endif	/* __ASM_S390_FPU_INSN_ASM_H */
755