| /freebsd/sys/crypto/openssl/aarch64/ |
| H A D | vpsm4_ex-armv8.S | 73 movi v0.16b,#64 86 tbl v0.16b, {v4.16b}, v26.16b 87 ushr v2.16b, v0.16b, 4 88 and v0.16b, v0.16b, v31.16b 89 tbl v0.16b, {v28.16b}, v0.16b 91 eor v0.16b, v0.16b, v2.16b 93 aese v0.16b,v1.16b 94 ushr v2.16b, v0.16b, 4 95 and v0.16b, v0.16b, v31.16b 96 tbl v0.16b, {v30.16b}, v0.16b [all …]
|
| H A D | ghashv8-armx.S | 31 pmull v0.1q,v20.1d,v20.1d 36 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing 37 eor v18.16b,v0.16b,v2.16b 40 pmull v18.1q,v0.1d,v19.1d //1st phase 43 ins v1.d[1],v0.d[0] 44 eor v0.16b,v1.16b,v18.16b 46 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase 47 pmull v0.1q,v0.1d,v19.1d 49 eor v22.16b,v0.16b,v18.16b 56 pmull v0.1q,v20.1d, v22.1d [all …]
|
| H A D | aesv8-armx.S | 40 eor v0.16b,v0.16b,v0.16b 52 ext v5.16b,v0.16b,v3.16b,#12 54 aese v6.16b,v0.16b 58 ext v5.16b,v0.16b,v5.16b,#12 60 ext v5.16b,v0.16b,v5.16b,#12 70 ext v5.16b,v0.16b,v3.16b,#12 72 aese v6.16b,v0.16b 75 ext v5.16b,v0.16b,v5.16b,#12 77 ext v5.16b,v0.16b,v5.16b,#12 84 ext v5.16b,v0.16b,v3.16b,#12 [all …]
|
| H A D | vpsm4-armv8.S | 86 movi v0.16b,#64 100 sub v4.16b,v4.16b,v0.16b 102 sub v4.16b,v4.16b,v0.16b 104 sub v4.16b,v4.16b,v0.16b 138 movi v0.16b,#64 141 sub v0.16b,v12.16b,v0.16b 145 tbl v0.16b,{v20.16b,v21.16b,v22.16b,v23.16b},v0.16b 148 add v0.2d,v0.2d,v1.2d 150 add v12.2d,v0.2d,v2.2d 152 ushr v0.4s,v12.4s,32-2 [all …]
|
| H A D | vpaes-armv8.S | 142 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 145 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 146 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 147 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 156 tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 159 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 162 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 164 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 167 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 169 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D [all …]
|
| H A D | aes-gcm-armv8_64.S | 60 …ld1 { v0.16b}, [x16] //special case vector load initial counter so we … 93 aese v0.16b, v18.16b 94 aesmc v0.16b, v0.16b //AES block 0 - round 0 105 aese v0.16b, v19.16b 106 aesmc v0.16b, v0.16b //AES block 0 - round 1 117 aese v0.16b, v20.16b 118 aesmc v0.16b, v0.16b //AES block 0 - round 2 135 aese v0.16b, v21.16b 136 aesmc v0.16b, v0.16b //AES block 0 - round 3 159 aese v0.16b, v22.16b [all …]
|
| H A D | bsaes-armv8.S | 39 // v0-v7 input data 43 // v0-v7 output data 55 eor v0.16b, v0.16b, v8.16b 61 tbl v0.16b, {v0.16b}, v10.16b 70 ushr v8.2d, v0.2d, #1 89 eor v0.16b, v0.16b, v8.16b 96 ushr v8.2d, v0.2d, #2 116 eor v0.16b, v0.16b, v8.16b 122 ushr v16.2d, v0.2d, #4 142 eor v0.16b, v0.16b, v16.16b [all …]
|
| /freebsd/contrib/llvm-project/clang/lib/Headers/ |
| H A D | velintrin_approx.h | 12 static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) { in _vel_approx_vfdivs_vvvl() argument 19 v2 = _vel_vfmuls_vvvl(v0, v3, l); in _vel_approx_vfdivs_vvvl() 20 v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); in _vel_approx_vfdivs_vvvl() 22 v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); in _vel_approx_vfdivs_vvvl() 23 v0 = _vel_vfmads_vvvvl(v2, v3, v0, l); in _vel_approx_vfdivs_vvvl() 24 return v0; in _vel_approx_vfdivs_vvvl() 27 static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) { in _vel_approx_pvfdiv_vvvl() argument 34 v2 = _vel_pvfmul_vvvl(v0, v3, l); in _vel_approx_pvfdiv_vvvl() 35 v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l); in _vel_approx_pvfdiv_vvvl() 37 v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l); in _vel_approx_pvfdiv_vvvl() [all …]
|
| /freebsd/contrib/file/tests/ |
| H A D | Makefile.am | 147 zstd-v0.2-FF.result \ 148 zstd-v0.2-FF.testfile \ 149 zstd-v0.3-FF.result \ 150 zstd-v0.3-FF.testfile \ 151 zstd-v0.4-FF.result \ 152 zstd-v0.4-FF.testfile \ 153 zstd-v0.5-FF.result \ 154 zstd-v0.5-FF.testfile \ 155 zstd-v0.6-FF.result \ 156 zstd-v0.6-FF.testfile \ [all …]
|
| /freebsd/contrib/xz/src/liblzma/check/ |
| H A D | crc_x86_clmul.h | 176 __m128i v0, v1, v2, v3; in crc32_arch_optimized() local 201 v0 = my_set_low64((int64_t)x); in crc32_arch_optimized() 202 v0 = shift_left(v0, 8 - size); in crc32_arch_optimized() 205 v0 = my_set_low64((int64_t)(crc ^ read64le(buf))); in crc32_arch_optimized() 220 v0 = _mm_insert_epi32(v0, (int32_t)high, 2); in crc32_arch_optimized() 221 v0 = _mm_insert_epi32(v0, (int32_t)(high >> 32), 3); in crc32_arch_optimized() 223 v0 = _mm_insert_epi64(v0, (int64_t)high, 1); in crc32_arch_optimized() 226 v0 = shift_left(v0, padding); in crc32_arch_optimized() 228 v1 = _mm_srli_si128(v0, 8); in crc32_arch_optimized() 229 v0 = _mm_clmulepi64_si128(v0, fold128, 0x10); in crc32_arch_optimized() [all …]
|
| /freebsd/crypto/openssl/crypto/aes/asm/ |
| H A D | vpaes-ppc.pl | 246 vsrb v1, v0, v8 # vpsrlb \$4, %xmm0, %xmm0 247 vperm v0, $iptlo, $iptlo, v0 # vpshufb %xmm1, %xmm2, %xmm1 249 vxor v0, v0, v5 # vpxor %xmm5, %xmm1, %xmm0 250 vxor v0, v0, v1 # vpxor %xmm2, %xmm0, %xmm0 260 vperm v0, $sb1u, v7, v3 # vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 264 vxor v0, v0, v4 # vpxor %xmm4, %xmm0, %xmm0 # 0 = A 268 vperm v3, v0, v7, v1 # vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 270 vperm v0, v0, v7, v4 # vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 273 vxor v0, v0, v3 # vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 274 vxor v0, v0, v4 # vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D [all …]
|
| H A D | vpaes-armv8.pl | 205 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 208 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 209 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 210 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 219 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 222 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 225 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 227 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 230 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 232 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D [all …]
|
| H A D | bsaes-armv8.pl | 72 // v0-v7 input data 76 // v0-v7 output data 88 eor v0.16b, v0.16b, v8.16b 94 tbl v0.16b, {v0.16b}, v10.16b 103 ushr v8.2d, v0.2d, #1 122 eor v0.16b, v0.16b, v8.16b 129 ushr v8.2d, v0.2d, #2 149 eor v0.16b, v0.16b, v8.16b 155 ushr v16.2d, v0.2d, #4 175 eor v0.16b, v0.16b, v16.16b [all …]
|
| /freebsd/sys/contrib/openzfs/module/icp/asm-aarch64/blake3/ |
| H A D | b3_aarch64_sse2.S | 82 eor v0.16b, v2.16b, v0.16b 115 and v0.8b, v1.8b, v0.8b 117 mov v3.d[1], v0.d[0] 120 uzp1 v1.4s, v0.4s, v6.4s 121 uzp2 v0.4s, v0.4s, v6.4s 126 add v2.4s, v2.4s, v0.4s 168 uzp2 v17.4s, v17.4s, v0.4s 180 zip1 v18.2d, v16.2d, v0.2d 181 zip2 v0.4s, v0.4s, v16.4s 185 zip1 v16.4s, v0.4s, v6.4s [all …]
|
| /freebsd/lib/libc/aarch64/string/ |
| H A D | strncmp.S | 50 cmeq v5.16b, v0.16b, #0 66 tbl v0.16b, {v0.16b}, v4.16b 87 cmeq v2.16b, v0.16b, #0 // NUL byte present? 88 cmeq v4.16b, v0.16b, v4.16b // which bytes match? 132 cmeq v5.16b, v0.16b, #0 151 tbl v0.16b, {v0.16b}, v4.16b 173 cmeq v2.16b, v0.16b, #0 // NUL byte present? 174 cmeq v4.16b, v0.16b, v4.16b // which bytes match? 201 cmeq v0.16b, v0.16b, v2.16b // Mismatch between chunks? 203 shrn v0.8b, v0.8h, #4 [all …]
|
| H A D | timingsafe_bcmp.S | 76 eor v0.16b, v0.16b, v1.16b 78 orr v0.16b, v0.16b, v2.16b 79 umaxv s0, v0.4s // get a nonzero word if any 80 mov w0, v0.s[0] 86 eor v0.16b, v0.16b, v2.16b 88 orr v4.16b, v0.16b, v1.16b 94 eor v0.16b, v0.16b, v2.16b 96 orr v0.16b, v0.16b, v1.16b 97 orr v4.16b, v4.16b, v0.16b 106 eor v0.16b, v0.16b, v2.16b [all …]
|
| H A D | strcmp.S | 44 cmeq v5.16b, v0.16b, #0 60 tbl v0.16b, {v0.16b}, v4.16b 82 cmeq v2.16b, v0.16b, #0 // NUL byte present? 83 cmeq v4.16b, v0.16b, v4.16b // which bytes match? 102 cmeq v0.16b, v0.16b, v2.16b 106 shrn v0.8b, v0.8h, #4 134 cmeq v0.16b, v0.16b, v2.16b // do the chunks match? 138 shrn v0.8b, v0.8h, #4 152 cmeq v0.16b, v0.16b, v2.16b 156 shrn v0.8b, v0.8h, #4 [all …]
|
| /freebsd/sys/sys/ |
| H A D | ktr.h | 143 #define KTR_EVENT1(m, egroup, ident, etype, edat, a0, v0) \ argument 144 CTR3(m, KTR_EFMT(egroup, ident, etype) a0, ident, edat, (v0)) 145 #define KTR_EVENT2(m, egroup, ident, etype, edat, a0, v0, a1, v1) \ argument 147 ident, edat, (v0), (v1)) 148 #define KTR_EVENT3(m, egroup, ident, etype, edat, a0, v0, a1, v1, a2, v2)\ argument 150 ident, edat, (v0), (v1), (v2)) 152 a0, v0, a1, v1, a2, v2, a3, v3) \ argument 154 ident, edat, (v0), (v1), (v2), (v3)) 161 #define KTR_STATE1(m, egroup, ident, state, a0, v0) \ argument 162 KTR_EVENT1(m, egroup, ident, "state:\"%s\"", state, a0, (v0)) [all …]
|
| /freebsd/contrib/bearssl/src/rsa/ |
| H A D | rsa_i15_privexp.c | 65 uint32_t r, a, b, u0, v0, u1, v1, he, hr; in br_rsa_i15_compute_privexp() local 161 * u0, u1, v0 and v1. Initial values are: in br_rsa_i15_compute_privexp() 162 * a = e u0 = 1 v0 = 0 in br_rsa_i15_compute_privexp() 165 * a = u0*e - v0*r in br_rsa_i15_compute_privexp() 170 * 0 <= v0 <= e in br_rsa_i15_compute_privexp() 175 * adjust u0, u1, v0 and v1 to maintain the invariants: in br_rsa_i15_compute_privexp() 182 * key or public exponent is not valid). The (u0,v0) or (u1,v1) in br_rsa_i15_compute_privexp() 191 * - When a is divided by 2, u0 and v0 must be divided by 2. in br_rsa_i15_compute_privexp() 194 * u0 and v0, respectively. in br_rsa_i15_compute_privexp() 195 * - When a is subtracted from b, u0 and v0 are subtracted from in br_rsa_i15_compute_privexp() [all …]
|
| H A D | rsa_i31_privexp.c | 65 uint32_t r, a, b, u0, v0, u1, v1, he, hr; in br_rsa_i31_compute_privexp() local 161 * u0, u1, v0 and v1. Initial values are: in br_rsa_i31_compute_privexp() 162 * a = e u0 = 1 v0 = 0 in br_rsa_i31_compute_privexp() 165 * a = u0*e - v0*r in br_rsa_i31_compute_privexp() 170 * 0 <= v0 <= e in br_rsa_i31_compute_privexp() 175 * adjust u0, u1, v0 and v1 to maintain the invariants: in br_rsa_i31_compute_privexp() 182 * key or public exponent is not valid). The (u0,v0) or (u1,v1) in br_rsa_i31_compute_privexp() 191 * - When a is divided by 2, u0 and v0 must be divided by 2. in br_rsa_i31_compute_privexp() 194 * u0 and v0, respectively. in br_rsa_i31_compute_privexp() 195 * - When a is subtracted from b, u0 and v0 are subtracted from in br_rsa_i31_compute_privexp() [all …]
|
| /freebsd/crypto/openssl/crypto/sm3/asm/ |
| H A D | sm3-riscv64-zvksh.pl | 67 my ($V0, $V1, $V2, $V3, $V4, $V5, $V6, $V7, 82 @{[vle32_v $V0, $CTX]} 83 @{[vrev8_v $V0, $V0]} 88 @{[vmv_v_v $V2, $V0]} 104 @{[vsm3c_vi $V0, $V6, 0]} 105 @{[vsm3c_vi $V0, $V4, 1]} 111 @{[vsm3c_vi $V0, $V4, 2]} 113 @{[vsm3c_vi $V0, $V4, 3]} 115 @{[vsm3c_vi $V0, $V8, 4]} 117 @{[vsm3c_vi $V0, $V4, 5]} [all …]
|
| /freebsd/crypto/openssl/crypto/des/ |
| H A D | cfb64ede.c | 29 register DES_LONG v0, v1; in DES_ede3_cfb64_encrypt() local 39 c2l(iv, v0); in DES_ede3_cfb64_encrypt() 42 ti[0] = v0; in DES_ede3_cfb64_encrypt() 45 v0 = ti[0]; in DES_ede3_cfb64_encrypt() 49 l2c(v0, iv); in DES_ede3_cfb64_encrypt() 61 c2l(iv, v0); in DES_ede3_cfb64_encrypt() 64 ti[0] = v0; in DES_ede3_cfb64_encrypt() 67 v0 = ti[0]; in DES_ede3_cfb64_encrypt() 71 l2c(v0, iv); in DES_ede3_cfb64_encrypt() 82 v0 = v1 = ti[0] = ti[1] = c = cc = 0; in DES_ede3_cfb64_encrypt() [all …]
|
| /freebsd/crypto/openssl/crypto/modes/asm/ |
| H A D | ghash-riscv64-zvkb-zvbc.pl | 69 my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6"); 100 @{[vmv_v_i $V0, 2]} # vmv.v.i v0, 2 101 @{[vor_vv_v0t $V1, $V1, $V4]} # vor.vv v1, v1, v4, v0.t 104 @{[vmv_v_v $V0, $V3]} # vmv.v.v v0, v3 106 @{[vmerge_vim $V3, $V3, 3]} # vmerge.vim v3, v3, 3, v0 107 @{[vmv_v_v $V0, $V3]} # vmv.v.v v0, v3 109 @{[vxor_vv_v0t $V1, $V1, $V2]} # vxor.vv v1, v1, v2, v0.t 125 my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6"); 177 @{[vmv_v_i $V0, 1]} # vmv.v.i v0, 1 179 @{[vxor_vv_v0t $V2, $V2, $V3]} # vxor.vv v2, v2, v3, v0.t [all …]
|
| /freebsd/lib/libc/quad/ |
| H A D | muldi3.c | 48 * v = 2^n v1 * v0 52 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0 53 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0 56 * and add 2^n u0 v0 to the last term and subtract it from the middle. 60 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) + 61 * (2^n + 1) (u0 v0) 66 * (2^n) (u1 - u0) (v0 - v1) + [(u1-u0)... = mid] 67 * (2^n + 1) (u0 v0) [u0v0 = low] 69 * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done 71 * of (u1 - u0) or (v0 - v1) may be negative.) [all …]
|
| /freebsd/sys/libkern/arm/ |
| H A D | muldi3.c | 50 * v = 2^n v1 * v0 54 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0 55 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0 58 * and add 2^n u0 v0 to the last term and subtract it from the middle. 62 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) + 63 * (2^n + 1) (u0 v0) 68 * (2^n) (u1 - u0) (v0 - v1) + [(u1-u0)... = mid] 69 * (2^n + 1) (u0 v0) [u0v0 = low] 71 * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done 73 * of (u1 - u0) or (v0 - v1) may be negative.) [all …]
|