Lines Matching +full:- +full:d2
2 # SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause
5 # Written by Andy Polyakov, @dot-asm, initially for the OpenSSL
9 # IALU(*)/gcc-4.4 NEON
11 # ARM11xx(ARMv6) 7.78/+100% -
12 # Cortex-A5 6.35/+130% 3.00
13 # Cortex-A8 6.25/+115% 2.36
14 # Cortex-A9 5.10/+95% 2.55
15 # Cortex-A15 3.85/+85% 1.25(**)
18 # (*) this is for -march=armv6, i.e. with bunch of ldrb loading data;
19 # (**) these are trade-off results, they can be improved by ~8% but at
20 # the cost of 15/12% regression on Cortex-A5/A7, it's even possible
21 # to improve Cortex-A9 result, but then A5/A7 loose more than 20%;
24 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
25 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
29 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
30 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
31 die "can't locate arm-xlate.pl";
66 stmdb sp!,{r4-r11}
85 mov r3,#-1
95 and r3,r10,#-4 @ 0x0ffffffc
131 orr r11,r11,#1 @ thumb-ify addresses
134 add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init)
136 addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init)
137 addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init)
165 ldmia sp!,{r4-r11}
171 bx lr @ interoperable with Thumb ISA:-)
173 .size poly1305_init,.-poly1305_init
184 stmdb sp!,{r3-r11,lr}
186 ands $len,$len,#-16
193 ldmia $ctx,{$h0-$r3} @ load context
199 ldmia $ctx!,{$h0-$h4} @ load hash value
203 adds $r0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32
220 ldmia $ctx,{$r0-$r3} @ load key
240 ldrb r1,[lr,#-15]
241 ldrb r2,[lr,#-14]
242 ldrb r3,[lr,#-13]
244 ldrb r0,[lr,#-12]
246 ldrb r1,[lr,#-11]
248 ldrb r2,[lr,#-10]
251 ldrb r3,[lr,#-9]
253 ldrb r0,[lr,#-8]
255 ldrb r1,[lr,#-7]
257 ldrb r2,[lr,#-6]
260 ldrb r3,[lr,#-5]
262 ldrb r0,[lr,#-4]
264 ldrb r1,[lr,#-3]
266 ldrb r2,[lr,#-2]
269 ldrb r3,[lr,#-1]
279 ldr r1,[lr,#-12]
280 ldr r2,[lr,#-8]
281 ldr r3,[lr,#-4]
332 adds $h2,lr,r0 @ d2+=d1>>32
335 adds $h3,r2,r1 @ d3+=d2>>32
340 and r1,$h4,#-4
354 stmdb $ctx,{$h0-$h4} @ store the result
358 ldmia sp!,{r3-r11,pc}
360 ldmia sp!,{r3-r11,lr}
363 bx lr @ interoperable with Thumb ISA:-)
365 .size poly1305_blocks,.-poly1305_blocks
378 stmdb sp!,{r4-r11}
380 ldmia $ctx,{$h0-$h4}
385 adds $g0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32
482 ldmia sp!,{r4-r11}
488 bx lr @ interoperable with Thumb ISA:-)
490 .size poly1305_emit,.-poly1305_emit
494 my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
508 cmp r3,#-1 @ is value impossible?
516 and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
548 @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
554 vmull.u32 $D2,$R2,${R0}[1]
560 vmlal.u32 $D2,$R1,${R1}[1]
567 vmlal.u32 $D2,$R0,${R2}[1]
573 vmlal.u32 $D2,$R4,${S3}[1]
579 vmlal.u32 $D2,$R3,${S4}[1]
591 @ Result of multiplication of n-bit number by m-bit number is
592 @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
593 @ m-bit number multiplied by 2^n is still n+m bits wide.
595 @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
596 @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
604 @ of 52-bit numbers as long as the amount of addends is not a
616 @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
617 @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
620 @ instruction accepts 2x32-bit input and writes 2x64-bit result.
622 @ loop wrap-around. This can be done in the process of reduction
624 @ 128-bit instructions, which benefits low-end processors], but
635 vadd.i64 $D4,$D4,$T0 @ h3 -> h4
637 vadd.i64 $D1,$D1,$T1 @ h0 -> h1
644 vadd.i64 $D2,$D2,$T1 @ h1 -> h2
650 vshrn.u64 $T1#lo,$D2,#26
651 vmovn.i64 $D2#lo,$D2
652 vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0
653 vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
654 vbic.i32 $D2#lo,#0xfc000000
660 vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
661 vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
670 vtrn.32 $R2,$D2#lo
701 vshl.u32 $S2,$D2#lo,#2
702 vmov $R2,$D2#lo
708 vadd.i32 $S2,$S2,$D2#lo
721 .size poly1305_init_neon,.-poly1305_init_neon
733 stmdb sp!,{r4-r7}
734 vstmdb sp!,{d8-d15} @ ABI specification says so
739 stmdb sp!,{r1-r3,lr}
748 and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
756 veor $D2#lo,$D2#lo,$D2#lo
770 vmov.32 $D2#lo[0],r4
775 ldmia sp!,{r1-r3,lr}
785 veor $D2#lo,$D2#lo,$D2#lo
788 vld4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
810 vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26
827 vadd.i32 $H2#hi,$H2#lo,$D2#lo
859 vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
898 @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
905 vadd.i32 $H2#lo,$H2#lo,$D2#lo @ accumulate inp[0:1]
906 vmull.u32 $D2,$H2#hi,${R0}[1]
911 vmlal.u32 $D2,$H1#hi,${R1}[1]
930 vmlal.u32 $D2,$H0#hi,${R2}[1]
936 vmlal.u32 $D2,$H4#hi,${S3}[1]
942 vmlal.u32 $D2,$H3#hi,${S4}[1]
954 vmlal.u32 $D2,$H2#lo,${R0}[0]
961 vmlal.u32 $D2,$H1#lo,${R1}[0]
967 vmlal.u32 $D2,$H0#lo,${R2}[0]
975 vmlal.u32 $D2,$H4#lo,${S3}[0]
980 vmlal.u32 $D2,$H3#lo,${S4}[0]
992 @ lazy reduction interleaved with base 2^32 -> base 2^26 of
993 @ inp[0:3] previously loaded to $H0-$H3 and smashed to $H0-$H4.
999 vadd.i64 $D4,$D4,$T0 @ h3 -> h4
1001 vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
1002 vadd.i64 $D1,$D1,$T1 @ h0 -> h1
1010 vadd.i64 $D2,$D2,$T1 @ h1 -> h2
1019 vshrn.u64 $T1#lo,$D2,#26
1020 vmovn.i64 $D2#lo,$D2
1021 vaddl.u32 $D0,$D0#lo,$T0#lo @ h4 -> h0 [widen for a sec]
1023 vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
1025 vbic.i32 $D2#lo,#0xfc000000
1028 vshrn.u64 $T0#lo,$D0,#26 @ re-narrow
1035 vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
1036 vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
1052 vadd.i32 $H2#hi,$H2#lo,$D2#lo @ add hash value and move to #hi
1062 vadd.i32 $H2#lo,$H2#lo,$D2#lo @ can be redundant
1063 vmull.u32 $D2,$H2#hi,$R0
1079 vmlal.u32 $D2,$H1#hi,$R1
1087 vmlal.u32 $D2,$H0#hi,$R2
1097 vmlal.u32 $D2,$H4#hi,$S3
1100 vorn $MASK,$MASK,$MASK @ all-ones, can be redundant
1105 vmlal.u32 $D2,$H3#hi,$S4
1115 vmlal.u32 $D2,$H2#lo,$R0
1127 vmlal.u32 $D2,$H1#lo,$R1
1135 vmlal.u32 $D2,$H0#lo,$R2
1141 vmlal.u32 $D2,$H4#lo,$S3
1144 vorn $MASK,$MASK,$MASK @ all-ones
1149 vmlal.u32 $D2,$H3#lo,$S4
1159 vadd.i64 $D2#lo,$D2#lo,$D2#hi
1168 vadd.i64 $D4,$D4,$T0 @ h3 -> h4
1169 vadd.i64 $D1,$D1,$T1 @ h0 -> h1
1175 vadd.i64 $D2,$D2,$T1 @ h1 -> h2
1179 vshr.u64 $T1,$D2,#26
1180 vand.i64 $D2,$D2,$MASK
1181 vadd.i64 $D0,$D0,$T0 @ h4 -> h0
1182 vadd.i64 $D3,$D3,$T1 @ h2 -> h3
1188 vadd.i64 $D1,$D1,$T0 @ h0 -> h1
1189 vadd.i64 $D4,$D4,$T1 @ h3 -> h4
1197 vst4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
1200 vldmia sp!,{d8-d15} @ epilogue
1201 ldmia sp!,{r4-r7}
1203 .size poly1305_blocks_neon,.-poly1305_blocks_neon
1213 .word OPENSSL_armcap_P-.Lpoly1305_init
1222 .asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by \@dot-asm"
1229 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
1231 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4