/freebsd/sys/crypto/openssl/aarch64/ |
H A D | aesv8-armx.S | 21 stp x29,x30,[sp,#-16]! 40 eor v0.16b,v0.16b,v0.16b 41 ld1 {v3.16b},[x0],#16 51 tbl v6.16b,{v3.16b},v2.16b 52 ext v5.16b,v0.16b,v3.16b,#12 53 st1 {v3.4s},[x2],#16 54 aese v6.16b,v0.16b 57 eor v3.16b,v3.16b,v5.16b 58 ext v5.16b,v0.16b,v5.16b,#12 59 eor v3.16b,v3.16b,v5.16b [all …]
|
H A D | bsaes-armv8.S | 47 ldr q8, [x9], #16 50 movi v9.16b, #0x55 51 ldr q10, [x11], #16 52 movi v16.16b, #0x33 53 movi v17.16b, #0x0f 55 eor v0.16b, v0.16b, v8.16b 56 eor v1.16b, v1.16b, v8.16b 57 eor v2.16b, v2.16b, v8.16b 58 eor v4.16b, v4.16b, v8.16b 59 eor v3.16b, v3.16b, v8.16b [all …]
|
H A D | vpsm4_ex-armv8.S | 55 ldr q27, [x9, #:lo12:.Lsbox_magic+16] 61 rev32 v5.16b,v5.16b 69 eor v5.16b,v5.16b,v6.16b 73 movi v0.16b,#64 86 tbl v0.16b, {v4.16b}, v26.16b 87 ushr v2.16b, v0.16b, 4 88 and v0.16b, v0.16b, v31.16b 89 tbl v0.16b, {v28.16b}, v0.16b 90 tbl v2.16b, {v27.16b}, v2.16b 91 eor v0.16b, v0.16b, v2.16b [all …]
|
H A D | vpsm4-armv8.S | 69 ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x10],#64 70 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x10],#64 71 ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x10],#64 72 ld1 {v28.16b,v29.16b,v30.16b,v31.16b},[x10] 74 rev32 v5.16b,v5.16b 82 eor v5.16b,v5.16b,v6.16b 86 movi v0.16b,#64 99 tbl v1.16b,{v16.16b,v17.16b,v18.16b,v19.16b},v4.16b 100 sub v4.16b,v4.16b,v0.16b 101 tbx v1.16b,{v20.16b,v21.16b,v22.16b,v23.16b},v4.16b [all …]
|
H A D | aes-gcm-armv8-unroll8_64.S | 17 stp d10, d11, [sp, #16] 25 movi v31.16b, #0x0 28 ld1 { v0.16b}, [x16] //CTR block 0 34 rev32 v30.16b, v0.16b //set up reversed counter 38 rev32 v1.16b, v30.16b //CTR block 1 41 rev32 v2.16b, v30.16b //CTR block 2 44 rev32 v3.16b, v30.16b //CTR block 3 47 rev32 v4.16b, v30.16b //CTR block 4 50 rev32 v5.16b, v30.16b //CTR block 5 54 rev32 v6.16b, v30.16b //CTR block 6 [all …]
|
H A D | ghashv8-armx.S | 13 movi v19.16b,#0xe1 15 ext v3.16b,v17.16b,v17.16b,#8 18 ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 21 and v18.16b,v18.16b,v16.16b 23 ext v18.16b,v18.16b,v18.16b,#8 24 and v16.16b,v16.16b,v17.16b 25 orr v3.16b,v3.16b,v18.16b //H<<<=1 26 eor v20.16b,v3.16b,v16.16b //twisted H 27 st1 {v20.2d},[x0],#16 //store Htable[0] 30 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing [all …]
|
H A D | aes-gcm-armv8_64.S | 16 stp x21, x22, [sp, #16] 33 ld1 {v11.16b}, [x3] 34 ext v11.16b, v11.16b, v11.16b, #8 35 rev64 v11.16b, v11.16b 39 ld1 {v18.4s}, [x8], #16 //load rk0 46 ext v15.16b, v15.16b, v15.16b, #8 53 ld1 {v19.4s}, [x8], #16 //load rk1 60 …ld1 { v0.16b}, [x16] //special case vector load initial counter so we … 73 ld1 {v20.4s}, [x8], #16 //load rk2 80 ext v14.16b, v14.16b, v14.16b, #8 [all …]
|
H A D | vpaes-armv8.S | 110 movi v17.16b, #0x0f 137 adrp x11, .Lk_mc_forward+16 138 add x11, x11, #:lo12:.Lk_mc_forward+16 140 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 141 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 142 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 143 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 144 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 145 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 146 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 [all …]
|
H A D | chacha-armv8.S | 45 stp x19,x20,[sp,#16] 54 ldp x26,x27,[x3,#16] 95 ror w17,w17,#16 96 ror w19,w19,#16 97 ror w20,w20,#16 98 ror w21,w21,#16 143 ror w21,w21,#16 144 ror w17,w17,#16 145 ror w19,w19,#16 146 ror w20,w20,#16 [all …]
|
H A D | keccak1600-armv8.S | 44 stp x28,x30,[sp,#16] // 32 bytes on top are mine 151 ldr x27,[sp,#16] 159 str x27,[sp,#16] 217 stp x19,x20,[sp,#16] 226 ldp x0,x1,[x0,#16*0] 227 ldp x2,x3,[x26,#16*1] 228 ldp x4,x5,[x26,#16*2] 229 ldp x6,x7,[x26,#16*3] 230 ldp x8,x9,[x26,#16*4] 231 ldp x10,x11,[x26,#16*5] [all …]
|
H A D | sha512-armv8.S | 82 stp x19,x20,[sp,#16] 453 str x9,[sp,#16] 541 ldr x9,[sp,#16] 573 str x13,[sp,#16] 665 ldr x13,[sp,#16] 697 str x1,[sp,#16] 789 ldr x1,[sp,#16] 821 str x5,[sp,#16] 913 ldr x5,[sp,#16] 945 str x9,[sp,#16] [all …]
|
H A D | sm4-armv8.S | 51 rev32 v0.16b,v0.16b 54 eor v0.16b,v0.16b,v24.16b; 80 rev32 v7.16b,v7.16b 83 eor v7.16b, v7.16b,v24.16b; 89 ext v7.16b,v7.16b,v7.16b,#8 90 ext v6.16b,v6.16b,v6.16b,#8 95 ext v5.16b,v5.16b,v5.16b,#8 96 ext v4.16b,v4.16b,v4.16b,#8 101 ext v3.16b,v3.16b,v3.16b,#8 102 ext v2.16b,v2.16b,v2.16b,#8 [all …]
|
/freebsd/crypto/openssl/crypto/aes/asm/ |
H A D | bsaes-armv8.pl | 80 ldr q8, [x9], #16 83 movi v9.16b, #0x55 84 ldr q10, [x11], #16 85 movi v16.16b, #0x33 86 movi v17.16b, #0x0f 88 eor v0.16b, v0.16b, v8.16b 89 eor v1.16b, v1.16b, v8.16b 90 eor v2.16b, v2.16b, v8.16b 91 eor v4.16b, v4.16b, v8.16b 92 eor v3.16b, v3.16b, v8.16b [all …]
|
H A D | vpaes-armv8.pl | 157 my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23)); 158 my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27)); 159 my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31)); 173 movi v17.16b, #0x0f 200 adrp x11, .Lk_mc_forward+16 201 add x11, x11, :lo12:.Lk_mc_forward+16 203 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 204 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 205 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 206 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 [all …]
|
/freebsd/sys/contrib/openzfs/module/icp/asm-aarch64/blake3/ |
H A D | b3_aarch64_sse41.S | 48 .word 16 69 .cfi_offset w19, -16 82 eor v0.16b, v2.16b, v0.16b 83 eor v1.16b, v3.16b, v1.16b 94 .section .rodata.cst16,"aM",@progbits,16 146 ldr q5, [x1, #16] 150 stp q5, q4, [x0, #16] 160 eor v1.16b, v16.16b, v1.16b 162 tbl v1.16b, { v1.16b }, v0.16b 164 eor v5.16b, v4.16b, v5.16b [all …]
|
H A D | b3_aarch64_sse2.S | 48 .word 16 69 .cfi_offset w19, -16 82 eor v0.16b, v2.16b, v0.16b 83 eor v1.16b, v3.16b, v1.16b 94 .section .rodata.cst16,"aM",@progbits,16 114 ldr q5, [x1, #16] 116 stp q5, q4, [x0, #16] 125 eor v3.16b, v2.16b, v3.16b 129 eor v5.16b, v4.16b, v5.16b 132 orr v5.16b, v5.16b, v6.16b [all …]
|
/freebsd/tools/regression/ccd/layout/ |
H A D | ref.256k_128k_384k_128k_16_0 | 1 ccd3: 4 components (md90, md91, md92, md93), 1728 blocks interleaved at 16 blocks 6 0 16 16 7 16 16 16 8 32 16 16 9 48 16 16 10 64 32 16 11 80 32 16 12 96 32 16 13 112 32 16 14 128 48 16 [all …]
|
H A D | ref.256k_128k_384k_128k_16_2 | 1 ccd3: 4 components (md90, md91, md92, md93), 960 blocks interleaved at 16 blocks 6 0 16 257 9 16 16 16 10 32 16 16 11 48 16 16 12 64 32 16 13 80 32 16 14 96 32 16 15 112 32 16 16 128 48 16 [all …]
|
/freebsd/sys/contrib/openzfs/module/icp/asm-aarch64/sha2/ |
H A D | sha512-armv8.S | 28 .word 16 91 stp x19,x20,[sp,#16] 461 str x9,[sp,#16] 549 ldr x9,[sp,#16] 581 str x13,[sp,#16] 673 ldr x13,[sp,#16] 705 str x1,[sp,#16] 797 ldr x1,[sp,#16] 829 str x5,[sp,#16] 921 ldr x5,[sp,#16] [all …]
|
/freebsd/crypto/krb5/src/lib/crypto/builtin/aes/ |
H A D | iaesx64.s | 134 movdqu xmm4,[%1+16] 146 movdqu xmm0,[%1 + 0*16] 148 movdqu xmm1,[%1 + 1*16] 150 movdqu xmm2,[%1 + 2*16] 152 movdqu xmm3,[%1 + 3*16] 157 movdqu [%1 + 0*16],xmm0 158 movdqu [%1 + 1*16],xmm1 159 movdqu [%1 + 2*16],xmm2 160 movdqu [%1 + 3*16],xmm3 164 movdqu xmm4,[%2 + ((%3)*16)] [all …]
|
H A D | iaesx86.s | 130 movdqu xmm0,[%1 + 0*16] 132 movdqu xmm1,[%1 + 1*16] 134 movdqu xmm2,[%1 + 2*16] 136 movdqu xmm3,[%1 + 3*16] 144 movdqu xmm4,[%1+16] 153 movdqu [%1 + 0*16],xmm0 154 movdqu [%1 + 1*16],xmm1 155 movdqu [%1 + 2*16],xmm2 156 movdqu [%1 + 3*16],xmm3 161 movdqu xmm4,[%2 + ((%3)*16)] [all …]
|
/freebsd/sys/contrib/openzfs/module/zfs/ |
H A D | vdev_raidz_math_aarch64_neon_common.h | 133 extern const uint8_t gf_clmul_mod_lt[4*256][16]; 135 #define ELEM_SIZE 16 150 "eor " VR0(r) ".16b," VR0(r) ".16b,v21.16b\n" \ 151 "eor " VR1(r) ".16b," VR1(r) ".16b,v20.16b\n" \ 152 "eor " VR2(r) ".16b," VR2(r) ".16b,v19.16b\n" \ 153 "eor " VR3(r) ".16b," VR3(r) ".16b,v18.16b\n" \ 158 "eor " VR4(r) ".16b," VR4(r) ".16b,v21.16b\n" \ 159 "eor " VR5(r) ".16b," VR5(r) ".16b,v20.16b\n" \ 160 "eor " VR6(r) ".16b," VR6(r) ".16b,v19.16b\n" \ 161 "eor " VR7(r) ".16b," VR7(r) ".16b,v18.16b\n" \ [all …]
|
/freebsd/contrib/bearssl/src/symcipher/ |
H A D | aes_pwr8_cbcenc.c | 58 addi(%[cc], %[cc], 16) in cbcenc_128() 60 addi(%[cc], %[cc], 16) in cbcenc_128() 62 addi(%[cc], %[cc], 16) in cbcenc_128() 64 addi(%[cc], %[cc], 16) in cbcenc_128() 66 addi(%[cc], %[cc], 16) in cbcenc_128() 68 addi(%[cc], %[cc], 16) in cbcenc_128() 70 addi(%[cc], %[cc], 16) in cbcenc_128() 72 addi(%[cc], %[cc], 16) in cbcenc_128() 74 addi(%[cc], %[cc], 16) in cbcenc_128() 76 addi(%[cc], %[cc], 16) in cbcenc_128() [all …]
|
/freebsd/crypto/openssl/crypto/sm4/asm/ |
H A D | vpsm4-armv8.pl | 37 my @sbox=map("v$_",(16..31)); 51 rev32 $dst.16b,$src.16b 53 mov $dst.16b,$src.16b 59 rev32 $dst.16b,$dst.16b 72 rev32 $dst.16b,$src.16b 74 mov $dst.16b,$src.16b 80 rev32 $dst.16b,$dst.16b 94 rbit $dst.16b,$src.16b 98 mov $dst.16b,$src.16b 104 rbit $dst.16b,$src.16b [all …]
|
/freebsd/contrib/arm-optimized-routines/string/aarch64/ |
H A D | strrchr.S | 62 movk wtmp2, #0x4010, lsl #16 63 dup vrepchr.16b, chrin 75 ld1 {vdata1.16b, vdata2.16b}, [src], #32 77 cmeq vhas_nul1.16b, vdata1.16b, #0 78 cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b 79 cmeq vhas_nul2.16b, vdata2.16b, #0 80 cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b 81 and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b 82 and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b 83 and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b [all …]
|