Lines Matching +full:2 +full:d

64 	pmull		\rd\().1q, \rn\().1d, \rm\().1d
68 pmull2 \rd\().1q, \rn\().2d, \rm\().2d
73 ext t5.8b, \ad\().8b, \ad\().8b, #2 // A2
96 __pmull_p8_tail \rq, \ad\().16b, SHASH.16b, 16b, 2, sh1, sh2, sh3, sh4
107 pmull\t \rq\().8h, \ad, \bd // D = A*B
113 uzp1 t4.2d, t3.2d, t5.2d
114 uzp2 t3.2d, t3.2d, t5.2d
115 uzp1 t6.2d, t7.2d, t9.2d
116 uzp2 t7.2d, t7.2d, t9.2d
131 zip2 t5.2d, t4.2d, t3.2d
132 zip1 t3.2d, t4.2d, t3.2d
133 zip2 t9.2d, t6.2d, t7.2d
134 zip1 t7.2d, t6.2d, t7.2d
149 ld1 {HH.2d-HH4.2d}, [x8]
151 trn1 SHASH2.2d, SHASH.2d, HH.2d
152 trn2 T1.2d, SHASH.2d, HH.2d
155 trn1 HH34.2d, HH3.2d, HH4.2d
156 trn2 T1.2d, HH3.2d, HH4.2d
160 shl MASK.2d, MASK.2d, #57
169 movi k32_48.2d, #0xffffffff
170 mov k32_48.h[2], k32_48.h[0]
171 ushr k00_16.2d, k32_48.2d, #32
176 dup perm1.2d, x5
178 ushr perm2.2d, perm1.2d, #8
179 ushr perm3.2d, perm1.2d, #16
180 ushr T1.2d, perm1.2d, #24
181 sli perm2.2d, perm1.2d, #56
182 sli perm3.2d, perm1.2d, #48
183 sli T1.2d, perm1.2d, #40
191 ext ss2.8b, SHASH2.8b, SHASH2.8b, #2
201 pmull T2.1q, XL.1d, MASK.1d
204 mov XH.d[0], XM.d[1]
205 mov XM.d[1], XL.d[0]
209 pmull XL.1q, XL.1d, MASK.1d
219 mov XL.d[1], XM.d[0]
220 mov XH.d[0], XM.d[1]
222 shl T1.2d, XL.2d, #57
223 shl T2.2d, XL.2d, #62
225 shl T1.2d, XL.2d, #63
230 mov XL.d[1], T2.d[0]
231 mov XH.d[0], T2.d[1]
233 ushr T2.2d, XL.2d, #1
236 ushr T2.2d, T2.2d, #6
237 ushr XL.2d, XL.2d, #1
241 ld1 {SHASH.2d}, [x3]
242 ld1 {XL.2d}, [x1]
248 ld1 {T1.2d}, [x4]
253 tbnz w0, #0, 2f // skip until #blocks is a
254 tbnz w0, #1, 2f // round multiple of 4
269 pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
270 pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
271 pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
274 pmull2 XH3.1q, HH.2d, XL3.2d // a1 * b1
275 pmull XL3.1q, HH.1d, XL3.1d // a0 * b0
276 pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
284 pmull2 XH3.1q, HH3.2d, IN1.2d // a1 * b1
285 pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0
286 pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
297 pmull2 XH.1q, HH4.2d, XL.2d // a1 * b1
299 pmull XL.1q, HH4.1d, XL.1d // a0 * b0
300 pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
319 2: ld1 {T1.2d}, [x2], #16
322 3: /* multiply XL by SHASH in GF(2^128) */
346 5: st1 {XL.2d}, [x1]
414 tbnz \rounds, #2, .Lnot128_\@
443 ld1 {SHASH.2d}, [x3], #16
444 ld1 {HH.2d-HH4.2d}, [x3]
446 trn1 SHASH2.2d, SHASH.2d, HH.2d
447 trn2 T1.2d, SHASH.2d, HH.2d
450 trn1 HH34.2d, HH3.2d, HH4.2d
451 trn2 T1.2d, HH3.2d, HH4.2d
454 ld1 {XL.2d}, [x4]
514 b 2f
517 2: .if \enc == 0
569 st1 {XL.2d}, [x4]
619 shl MASK.2d, MASK.2d, #57
628 tbz w9, #2, 0f // <4 blocks?
634 tbz w9, #0, 1f // 2 blocks?
635 tbz w9, #1, 2f // 1 block?
645 2: eor TT4.16b, TT4.16b, XL.16b
653 pmull2 XH2.1q, HH4.2d, IN1.2d // a1 * b1
655 pmull XL2.1q, HH4.1d, IN1.1d // a0 * b0
656 pmull2 XM2.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
660 pmull2 XH.1q, HH3.2d, T1.2d // a1 * b1
661 pmull XL.1q, HH3.1d, T1.1d // a0 * b0
662 pmull XM.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
670 pmull2 XH.1q, HH.2d, T2.2d // a1 * b1
671 pmull XL.1q, HH.1d, T2.1d // a0 * b0
672 pmull2 XM.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
680 pmull XL.1q, SHASH.1d, IN1.1d // a0 * b0
681 pmull2 XH.1q, SHASH.2d, IN1.2d // a1 * b1
682 pmull XM.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
704 sub w12, w8, #2
724 tbnz x7, #2, .Lnot128