Lines Matching +full:in4 +full:- +full:in5

2 # SPDX-License-Identifier: GPL-2.0
12 # Copyright (c) 2006-2017, CRYPTOGAMS by <appro@openssl.org>
58 # The module is endian-agnostic in sense that it supports both big-
59 # and little-endian cases. Data alignment in parallelizable modes is
64 # is aligned programmatically, which in turn guarantees exception-
72 # Add XTS subroutine, 9x on little- and 12x improvement on big-endian
76 # Current large-block performance in cycles per byte processed with
77 # 128-bit key (less is better).
79 # CBC en-/decrypt CTR XTS
106 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
107 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
108 die "can't locate ppc-xlate.pl";
140 addi $ptr,$ptr,-0x58
152 li $ptr,-1
154 beq- Lenc_key_abort # if ($inp==0) return -1;
156 beq- Lenc_key_abort # if ($out==0) return -1;
157 li $ptr,-2
159 blt- Lenc_key_abort
161 bgt- Lenc_key_abort
163 bne- Lenc_key_abort
190 vspltisb $outmask,-1
202 vperm $key,$in0,$in0,$mask # rotate-n-splat
222 vperm $key,$in0,$in0,$mask # rotate-n-splat
239 vperm $key,$in0,$in0,$mask # rotate-n-splat
280 vperm $key,$in1,$in1,$mask # roate-n-splat
300 vperm $key,$in1,$in1,$mask # rotate-n-splat
354 vperm $key,$in1,$in1,$mask # rotate-n-splat
406 .size .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
409 $STU $sp,-$FRAME($sp)
416 bne- Ldec_key_abort
439 stw r9, -16($inp)
440 stw r10,-12($inp)
441 stw r11,-8($inp)
442 stw r12,-4($inp)
452 .size .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
456 {{{ # Single block en- and decrypt procedures #
509 vspltisb v2,-1
527 .size .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
534 {{{ # CBC en- and decrypt procedures #
542 bltlr-
569 vspltisb $outmask,-1
584 subi $len,$len,16 # len-=16
632 subi $len,$len,16 # len-=16
672 addi $out,$out,-1
680 vspltisb $outmask,-1
703 my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
705 my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
706 # v26-v31 last 6 round keys
707 my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
712 $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
737 li r0,-1
738 stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
754 subi $rounds,$rounds,3 # -4 in total
769 stvx v24,$x00,$key_ # off-load round[1]
772 stvx v25,$x10,$key_ # off-load round[2]
779 stvx v24,$x00,$key_ # off-load round[3]
782 stvx v25,$x10,$key_ # off-load round[4]
793 lvx v24,$x00,$key_ # pre-load round[1]
795 lvx v25,$x10,$key_ # pre-load round[2]
811 lvx_u $in4,$x40,$inp
814 lvx_u $in5,$x50,$inp
818 le?vperm $in4,$in4,$in4,$inpperm
822 le?vperm $in5,$in5,$in5,$inpperm
825 vxor $out4,$in4,$rndkey0
827 vxor $out5,$in5,$rndkey0
857 subic $len,$len,128 # $len-=128
867 subfe. r0,r0,r0 # borrow?-1:0
889 # loop inX-in7 are loaded
909 lvx v24,$x00,$key_ # re-pre-load round[1]
919 lvx v25,$x10,$key_ # re-pre-load round[2]
932 vxor $in4,$in4,v31
934 vxor $in5,$in5,v31
949 vncipherlast $out5,$out5,$in4
951 lvx_u $in4,$x40,$inp
952 vncipherlast $out6,$out6,$in5
954 lvx_u $in5,$x50,$inp
956 le?vperm $in4,$in4,$in4,$inpperm
959 le?vperm $in5,$in5,$in5,$inpperm
980 vxor $out4,$in4,$rndkey0
983 vxor $out5,$in5,$rndkey0
992 beq Loop_cbc_dec8x # did $len-=128 borrow?
1077 vxor $in4,$in4,v31
1079 vxor $in5,$in5,v31
1101 vncipherlast $out5,$out5,$in4
1102 vncipherlast $out6,$out6,$in5
1128 vncipherlast $out5,$out5,$in4
1129 vncipherlast $out6,$out6,$in5
1152 vncipherlast $out5,$out5,$in4
1153 vncipherlast $out6,$out6,$in5
1173 vncipherlast $out5,$out5,$in4
1174 vncipherlast $out6,$out6,$in5
1192 vncipherlast $out6,$out6,$in5
1284 .size .${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt
1293 # This code is written as 'ctr32', based on a 32-bit counter used
1294 # upstream. The kernel does *not* use a 32-bit counter. The kernel uses
1295 # a 128-bit counter.
1303 # 1d4aa0b4c181 ("crypto: vmx - Fixing AES-CTR counter bug")
1304 # 009b30ac7444 ("crypto: vmx - CTR: always increment IV as quadword")
1317 bltlr-
1352 vspltisb $outmask,-1
1379 vadduqm $ivec,$ivec,$one # Kernel change for 128-bit
1383 subic. $len,$len,1 # blocks--
1409 addi $out,$out,-1
1424 my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14));
1426 my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
1427 # v26-v31 last 6 round keys
1428 my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
1434 $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
1459 li r0,-1
1460 stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
1476 subi $rounds,$rounds,3 # -4 in total
1490 stvx v24,$x00,$key_ # off-load round[1]
1493 stvx v25,$x10,$key_ # off-load round[2]
1500 stvx v24,$x00,$key_ # off-load round[3]
1503 stvx v25,$x10,$key_ # off-load round[4]
1514 lvx v24,$x00,$key_ # pre-load round[1]
1516 lvx v25,$x10,$key_ # pre-load round[2]
1523 vadduqm $out2,$ivec,$two # (do all ctr adds as 128-bit)
1570 subic r11,$len,256 # $len-256, borrow $key_
1580 subfe r0,r0,r0 # borrow?-1:0
1600 lvx v24,$x00,$key_ # re-pre-load round[1]
1602 subic $len,$len,129 # $len-=129
1604 addi $len,$len,1 # $len-=128 really
1612 lvx v25,$x10,$key_ # re-pre-load round[2]
1623 lvx_u $in4,$x40,$inp
1625 lvx_u $in5,$x50,$inp
1641 le?vperm $in4,$in4,$in4,$inpperm
1643 le?vperm $in5,$in5,$in5,$inpperm
1651 # loop inX-in7 are loaded
1653 subfe. r0,r0,r0 # borrow?-1:0
1663 vxor $in4,$in4,v31
1665 vxor $in5,$in5,v31
1671 bne Lctr32_enc8x_break # did $len-129 borrow?
1682 vcipherlast $in4,$out4,$in4
1685 vcipherlast $in5,$out5,$in5
1709 le?vperm $in4,$in4,$in4,$inpperm
1712 le?vperm $in5,$in5,$in5,$inpperm
1714 stvx_u $in4,$x40,$out
1717 stvx_u $in5,$x50,$out
1729 cmpwi $len,-0x60
1733 cmpwi $len,-0x40
1737 cmpwi $len,-0x20
1749 vcipherlast $out4,$out4,$in4
1750 vcipherlast $out5,$out5,$in5
1778 vcipherlast $out3,$out3,$in4
1779 vcipherlast $out4,$out4,$in5
1804 vcipherlast $out2,$out2,$in4
1805 vcipherlast $out3,$out3,$in5
1827 vcipherlast $out1,$out1,$in4
1828 vcipherlast $out2,$out2,$in5
1847 vcipherlast $out0,$out0,$in4
1848 vcipherlast $out1,$out1,$in5
1865 vcipherlast $out0,$out0,$in5
1952 .size .${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks
1978 li r3,-1
1980 bltlr-
2044 li $idx,-16
2147 vspltisb $tmp,-1
2183 .size .${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt
2187 li r3,-1
2189 bltlr-
2359 vxor $inout,$inout,$tweak # :-(
2360 vxor $inout,$inout,$tweak1 # :-)
2397 vspltisb $tmp,-1
2437 .size .${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt
2444 my ($in0, $in1, $in2, $in3, $in4, $in5 )=map("v$_",(0..5));
2447 my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
2448 # v26-v31 last 6 round keys
2455 $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
2482 li r0,-1
2483 stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
2510 subi $rounds,$rounds,3 # -4 in total
2524 stvx v24,$x00,$key_ # off-load round[1]
2527 stvx v25,$x10,$key_ # off-load round[2]
2534 stvx v24,$x00,$key_ # off-load round[3]
2537 stvx v25,$x10,$key_ # off-load round[4]
2548 lvx v24,$x00,$key_ # pre-load round[1]
2550 lvx v25,$x10,$key_ # pre-load round[2]
2599 xxlor 32+$in4, 0, 0
2600 vpermxor $tweak, $tweak, $tmp, $in4
2602 lvx_u $in4,$x40,$inp
2607 le?vperm $in4,$in4,$in4,$leperm
2609 vxor $out4,$in4,$twk4
2610 xxlor 32+$in5, 0, 0
2611 vpermxor $tweak, $tweak, $tmp, $in5
2613 lvx_u $in5,$x50,$inp
2618 le?vperm $in5,$in5,$in5,$leperm
2620 vxor $out5,$in5,$twk5
2650 subic $len,$len,96 # $len-=96
2662 subfe. r0,r0,r0 # borrow?-1:0
2690 # loop inX-in5 are loaded
2717 lvx v24,$x00,$key_ # re-pre-load round[1]
2722 xxlor 32+$in4, 0, 0
2723 vpermxor $tweak, $tweak, $tmp, $in4
2726 vxor $in4,$twk4,v31
2731 lvx v25,$x10,$key_ # re-pre-load round[2]
2739 xxlor 32+$in5, 0, 0
2740 vpermxor $tweak, $tweak, $tmp, $in5
2743 vxor $in5,$twk5,v31
2759 vcipherlast $out4,$out4,$in4
2761 lvx_u $in4,$x40,$inp
2766 vcipherlast $tmp,$out5,$in5 # last block might be needed
2769 lvx_u $in5,$x50,$inp
2771 le?vperm $in4,$in4,$in4,$leperm
2772 le?vperm $in5,$in5,$in5,$leperm
2789 vxor $out4,$in4,$twk4
2792 vxor $out5,$in5,$twk5
2796 beq Loop_xts_enc6x # did $len-=96 borrow?
2815 vxor $out3,$in4,$twk3
2816 vxor $out4,$in5,$twk4
2840 vxor $out2,$in4,$twk2
2841 vxor $out3,$in5,$twk3
2863 vxor $out1,$in4,$twk1
2864 vxor $out2,$in5,$twk2
2884 vxor $out0,$in4,$twk0
2885 vxor $out1,$in5,$twk1
2904 vxor $out0,$in5,$twk0
2930 lvx v24,$x00,$key_ # re-pre-load round[1]
2933 lvx v25,$x10,$key_ # re-pre-load round[2]
2958 lvsr $inpperm,0,$taillen # $in5 is no more
2965 vspltisb $out1,-1
3081 lvsr $inpperm,r0,$taillen # $in5 is no more
3102 lvx v24,$x00,$key_ # re-pre-load round[1]
3111 lvx v25,$x10,$key_ # re-pre-load round[2]
3112 vxor $in4,$twk4,v31
3125 vcipherlast $out4,$out4,$in4
3132 $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
3159 li r0,-1
3160 stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
3187 subi $rounds,$rounds,3 # -4 in total
3201 stvx v24,$x00,$key_ # off-load round[1]
3204 stvx v25,$x10,$key_ # off-load round[2]
3211 stvx v24,$x00,$key_ # off-load round[3]
3214 stvx v25,$x10,$key_ # off-load round[4]
3225 lvx v24,$x00,$key_ # pre-load round[1]
3227 lvx v25,$x10,$key_ # pre-load round[2]
3268 xxlor 32+$in4, 0, 0
3269 vpermxor $tweak, $tweak, $tmp, $in4
3271 lvx_u $in4,$x40,$inp
3276 le?vperm $in4,$in4,$in4,$leperm
3278 vxor $out4,$in4,$twk4
3279 xxlor 32+$in5, 0, 0
3280 vpermxor $tweak, $tweak, $tmp, $in5
3282 lvx_u $in5,$x50,$inp
3287 le?vperm $in5,$in5,$in5,$leperm
3289 vxor $out5,$in5,$twk5
3319 subic $len,$len,96 # $len-=96
3331 subfe. r0,r0,r0 # borrow?-1:0
3359 # loop inX-in5 are loaded
3386 lvx v24,$x00,$key_ # re-pre-load round[1]
3391 xxlor 32+$in4, 0, 0
3392 vpermxor $tweak, $tweak, $tmp, $in4
3395 vxor $in4,$twk4,v31
3400 lvx v25,$x10,$key_ # re-pre-load round[2]
3408 xxlor 32+$in5, 0, 0
3409 vpermxor $tweak, $tweak, $tmp, $in5
3412 vxor $in5,$twk5,v31
3428 vncipherlast $out4,$out4,$in4
3430 lvx_u $in4,$x40,$inp
3435 vncipherlast $out5,$out5,$in5
3437 lvx_u $in5,$x50,$inp
3439 le?vperm $in4,$in4,$in4,$leperm
3440 le?vperm $in5,$in5,$in5,$leperm
3457 vxor $out4,$in4,$twk4
3459 vxor $out5,$in5,$twk5
3463 beq Loop_xts_dec6x # did $len-=96 borrow?
3482 vxor $out3,$in4,$twk3
3483 vxor $out4,$in5,$twk4
3508 vxor $out2,$in4,$twk2
3509 vxor $out3,$in5,$twk3
3532 vxor $out1,$in4,$twk1
3533 vxor $out2,$in5,$twk2
3554 vxor $out0,$in4,$twk0
3555 vxor $out1,$in5,$twk1
3575 vxor $out0,$in5,$twk0
3601 lvx v24,$x00,$key_ # re-pre-load round[1]
3604 lvx v25,$x10,$key_ # re-pre-load round[2]
3648 lvsr $inpperm,0,$taillen # $in5 is no more
3653 lvx v24,$x00,$key_ # re-pre-load round[1]
3656 lvx v25,$x10,$key_ # re-pre-load round[2]
3670 vspltisb $out1,-1
3807 lvx v24,$x00,$key_ # re-pre-load round[1]
3816 lvx v25,$x10,$key_ # re-pre-load round[2]
3817 vxor $in4,$twk4,v31
3829 vncipherlast $out4,$out4,$in4
3841 # constants table endian-specific conversion
3842 if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
3846 # convert to endian-agnostic format
3856 # little-endian conversion
3870 # instructions prefixed with '?' are endian-specific and need
3872 if ($flavour =~ /le$/o) { # little-endian
3877 s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
3878 s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
3879 s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
3880 } else { # big-endian
3883 s/\?([a-z]+)/$1/o;