Home
last modified time | relevance | path

Searched +full:1 +full:v0 (Results 1 – 25 of 575) sorted by relevance

12345678910>>...23

/freebsd/sys/crypto/openssl/aarch64/
H A Dvpsm4_ex-armv8.S73 movi v0.16b,#64
74 cbnz w2,1f
76 1:
77 mov w7,v5.s[1]
86 tbl v0.16b, {v4.16b}, v26.16b
87 ushr v2.16b, v0.16b, 4
88 and v0.16b, v0.16b, v31.16b
89 tbl v0.16b, {v28.16b}, v0.16b
91 eor v0.16b, v0.16b, v2.16b
93 aese v0.16b,v1.16b
[all …]
H A Dghashv8-armx.S17 dup v17.4s,v17.s[1]
22 shl v3.2d,v3.2d,#1
25 orr v3.16b,v3.16b,v18.16b //H<<<=1
31 pmull v0.1q,v20.1d,v20.1d
33 pmull2 v2.1q,v20.2d,v20.2d
34 pmull v1.1q,v16.1d,v16.1d
36 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
37 eor v18.16b,v0.16b,v2.16b
40 pmull v18.1q,v0.1d,v19.1d //1st phase
42 ins v2.d[0],v1.d[1]
[all …]
H A Dvpsm4-armv8.S86 movi v0.16b,#64
87 cbnz w2,1f
89 1:
90 mov w7,v5.s[1]
100 sub v4.16b,v4.16b,v0.16b
102 sub v4.16b,v4.16b,v0.16b
104 sub v4.16b,v4.16b,v0.16b
119 subs x6,x6,#1
120 b.ne 1b
138 movi v0.16b,#64
[all …]
H A Daesv8-armx.S23 mov x3,#-1
40 eor v0.16b,v0.16b,v0.16b
52 ext v5.16b,v0.16b,v3.16b,#12
54 aese v6.16b,v0.16b
55 subs w1,w1,#1
58 ext v5.16b,v0.16b,v5.16b,#12
60 ext v5.16b,v0.16b,v5.16b,#12
63 shl v1.16b,v1.16b,#1
70 ext v5.16b,v0.16b,v3.16b,#12
72 aese v6.16b,v0.16b
[all …]
H A Daes-gcm-armv8_64.S41 sub x5, x5, #1 //byte_len - 1
48 fmov d1, x10 //CTR block 1
51 add w12, w12, #1 //increment rev_ctr32
55 rev w9, w12 //CTR block 1
56 add w12, w12, #1 //CTR block 1
59 orr x9, x11, x9, lsl #32 //CTR block 1
60 …ld1 { v0.16b}, [x16] //special case vector load initial counter so we …
62 fmov v1.d[1], x9 //CTR block 1
67 add w12, w12, #1 //CTR block 2
69 fmov v2.d[1], x9 //CTR block 2
[all …]
H A Dvpaes-armv8.S142 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
145 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
146 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
147 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
156 tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
159 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
162 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
164 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
167 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
168 and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
[all …]
H A Dbsaes-armv8.S39 // v0-v7 input data
43 // v0-v7 output data
54 sub x10, x10, #1
55 eor v0.16b, v0.16b, v8.16b
61 tbl v0.16b, {v0.16b}, v10.16b
70 ushr v8.2d, v0.2d, #1
72 ushr v10.2d, v4.2d, #1
73 ushr v18.2d, v2.2d, #1
75 ushr v19.2d, v6.2d, #1
83 shl v8.2d, v8.2d, #1
[all …]
/freebsd/contrib/xz/src/liblzma/check/
H A Dcrc_x86_clmul.h8 /// The CRC32 and CRC64 implementations use 32/64-bit x86 SSSE3, SSE4.1, and
54 __attribute__((__target__("ssse3,sse4.1,pclmul")))
176 __m128i v0, v1, v2, v3; in crc32_arch_optimized() local
198 if (size & 1) in crc32_arch_optimized()
201 v0 = my_set_low64((int64_t)x); in crc32_arch_optimized()
202 v0 = shift_left(v0, 8 - size); in crc32_arch_optimized()
205 v0 = my_set_low64((int64_t)(crc ^ read64le(buf))); in crc32_arch_optimized()
208 // we can read the last 1-7 bytes with read64le(buf + size). in crc32_arch_optimized()
220 v0 = _mm_insert_epi32(v0, (int32_t)high, 2); in crc32_arch_optimized()
221 v0 = _mm_insert_epi32(v0, (int32_t)(high >> 32), 3); in crc32_arch_optimized()
[all …]
/freebsd/contrib/file/tests/
H A DMakefile.am6 android-vdex-1.result \
7 android-vdex-1.testfile \
40 gpkg-1-zst.result \
41 gpkg-1-zst.testfile \
80 keyman-1.result \
81 keyman-1.testfile \
141 zstd-dictionary-1.result \
147 zstd-v0.2-FF.result \
148 zstd-v0.2-FF.testfile \
149 zstd-v0.3-FF.result \
[all …]
/freebsd/crypto/openssl/crypto/aes/asm/
H A Dvpaes-ppc.pl62 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
246 vsrb v1, v0, v8 # vpsrlb \$4, %xmm0, %xmm0
247 vperm v0, $iptlo, $iptlo, v0 # vpshufb %xmm1, %xmm2, %xmm1
249 vxor v0, v0, v5 # vpxor %xmm5, %xmm1, %xmm0
250 vxor v0, v0, v1 # vpxor %xmm2, %xmm0, %xmm0
260 vperm v0, $sb1u, v7, v3 # vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
264 vxor v0, v0, v4 # vpxor %xmm4, %xmm0, %xmm0 # 0 = A
268 vperm v3, v0, v7, v1 # vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
270 vperm v0, v0, v7, v4 # vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
273 vxor v0, v0, v3 # vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
[all …]
H A Dvpaes-armv8.pl46 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
205 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
208 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
209 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
210 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
219 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
222 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
225 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
227 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
230 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
[all …]
H A Dbsaes-armv8.pl15 $0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1;
72 // v0-v7 input data
76 // v0-v7 output data
87 sub x10, x10, #1
88 eor v0.16b, v0.16b, v8.16b
94 tbl v0.16b, {v0.16b}, v10.16b
103 ushr v8.2d, v0.2d, #1
105 ushr v10.2d, v4.2d, #1
106 ushr v18.2d, v2.2d, #1
108 ushr v19.2d, v6.2d, #1
[all …]
/freebsd/contrib/bearssl/src/rsa/
H A Drsa_i15_privexp.c33 * We want to invert e modulo phi = (p-1)(q-1). This first in br_rsa_i15_compute_privexp()
40 * values (phi/4, 1 and e) and calling moddiv, that requires in br_rsa_i15_compute_privexp()
52 * - We find small u, v such that u*e - v*r = 1 (using a in br_rsa_i15_compute_privexp()
65 uint32_t r, a, b, u0, v0, u1, v1, he, hr; in br_rsa_i15_compute_privexp() local
71 if (e < 3 || (e & 1) == 0) { in br_rsa_i15_compute_privexp()
85 || (pbuf[plen - 1] & 1) != 1) in br_rsa_i15_compute_privexp()
96 || (qbuf[qlen - 1] & 1) != 1) in br_rsa_i15_compute_privexp()
112 q = p + 1 + plen; in br_rsa_i15_compute_privexp()
117 * Compute phi = (p-1)*(q-1), then move it over p-1 and q-1 (that in br_rsa_i15_compute_privexp()
120 * p-1 and q-1, which is usually exact but may overshoot by one 1 in br_rsa_i15_compute_privexp()
[all …]
H A Drsa_i31_privexp.c33 * We want to invert e modulo phi = (p-1)(q-1). This first in br_rsa_i31_compute_privexp()
40 * values (phi/4, 1 and e) and calling moddiv, that requires in br_rsa_i31_compute_privexp()
52 * - We find small u, v such that u*e - v*r = 1 (using a in br_rsa_i31_compute_privexp()
65 uint32_t r, a, b, u0, v0, u1, v1, he, hr; in br_rsa_i31_compute_privexp() local
71 if (e < 3 || (e & 1) == 0) { in br_rsa_i31_compute_privexp()
85 || (pbuf[plen - 1] & 1) != 1) in br_rsa_i31_compute_privexp()
96 || (qbuf[qlen - 1] & 1) != 1) in br_rsa_i31_compute_privexp()
112 q = p + 1 + plen; in br_rsa_i31_compute_privexp()
117 * Compute phi = (p-1)*(q-1), then move it over p-1 and q-1 (that in br_rsa_i31_compute_privexp()
120 * p-1 and q-1, which is usually exact but may overshoot by one 1 in br_rsa_i31_compute_privexp()
[all …]
/freebsd/lib/libc/aarch64/string/
H A Dstrncmp.S21 subs x2, x2, #1
24 mov x13, #-1 // save constants for later
50 cmeq v5.16b, v0.16b, #0
66 tbl v0.16b, {v0.16b}, v4.16b
68 b 1f
72 1:
79 b 1f
86 1:
87 cmeq v2.16b, v0.16b, #0 // NUL byte present?
88 cmeq v4.16b, v0.16b, v4.16b // which bytes match?
[all …]
H A Dstrcmp.S21 mov x13, #-1
44 cmeq v5.16b, v0.16b, #0
60 tbl v0.16b, {v0.16b}, v4.16b
62 b 1f
66 1:
73 b 1f
80 1:
82 cmeq v2.16b, v0.16b, #0 // NUL byte present?
83 cmeq v4.16b, v0.16b, v4.16b // which bytes match?
102 cmeq v0.16b, v0.16b, v2.16b
[all …]
/freebsd/crypto/openssl/crypto/des/
H A Dcfb64ede.c29 register DES_LONG v0, v1; in DES_ede3_cfb64_encrypt() local
39 c2l(iv, v0); in DES_ede3_cfb64_encrypt()
42 ti[0] = v0; in DES_ede3_cfb64_encrypt()
43 ti[1] = v1; in DES_ede3_cfb64_encrypt()
45 v0 = ti[0]; in DES_ede3_cfb64_encrypt()
46 v1 = ti[1]; in DES_ede3_cfb64_encrypt()
49 l2c(v0, iv); in DES_ede3_cfb64_encrypt()
56 n = (n + 1) & 0x07; in DES_ede3_cfb64_encrypt()
61 c2l(iv, v0); in DES_ede3_cfb64_encrypt()
64 ti[0] = v0; in DES_ede3_cfb64_encrypt()
[all …]
H A Dcfb64enc.c28 register DES_LONG v0, v1; in DES_cfb64_encrypt() local
38 c2l(iv, v0); in DES_cfb64_encrypt()
39 ti[0] = v0; in DES_cfb64_encrypt()
41 ti[1] = v1; in DES_cfb64_encrypt()
44 v0 = ti[0]; in DES_cfb64_encrypt()
45 l2c(v0, iv); in DES_cfb64_encrypt()
46 v0 = ti[1]; in DES_cfb64_encrypt()
47 l2c(v0, iv); in DES_cfb64_encrypt()
53 n = (n + 1) & 0x07; in DES_cfb64_encrypt()
58 c2l(iv, v0); in DES_cfb64_encrypt()
[all …]
H A Dcfb_enc.c27 * Until Aug 1 2003 this function did not correctly implement CFB-r, so it
34 register DES_LONG d0, d1, v0, v1; in DES_cfb_encrypt() local
55 c2l(iv, v0); in DES_cfb_encrypt()
60 ti[0] = v0; in DES_cfb_encrypt()
61 ti[1] = v1; in DES_cfb_encrypt()
66 d1 ^= ti[1]; in DES_cfb_encrypt()
74 v0 = v1; in DES_cfb_encrypt()
77 v0 = d0; in DES_cfb_encrypt()
82 l2c(v0, iv); in DES_cfb_encrypt()
87 sh[0] = v0, sh[1] = v1, sh[2] = d0, sh[3] = d1; in DES_cfb_encrypt()
[all …]
/freebsd/crypto/openssl/crypto/modes/asm/
H A Dghash-riscv64-zvkb-zvbc.pl19 # 1. Redistributions of source code must retain the above copyright
69 my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6");
90 @{[vsll_vi $V1, $V1, 1]} # vsll.vi v1, v1, 1
97 @{[vslideup_vi $V4, $V3, 1]} # vslideup.vi v4, v3, 1
98 @{[vslidedown_vi $V3, $V3, 1]} # vslidedown.vi v3, v3, 1
100 @{[vmv_v_i $V0, 2]} # vmv.v.i v0, 2
101 @{[vor_vv_v0t $V1, $V1, $V4]} # vor.vv v1, v1, v4, v0.t
104 @{[vmv_v_v $V0, $V3]} # vmv.v.v v0, v3
106 @{[vmerge_vim $V3, $V3, 3]} # vmerge.vim v3, v3, 3, v0
107 @{[vmv_v_v $V0, $V3]} # vmv.v.v v0, v3
[all …]
/freebsd/lib/libc/quad/
H A Dmuldi3.c14 * 1. Redistributions of source code must retain the above copyright
48 * v = 2^n v1 * v0
52 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0
53 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0
56 * and add 2^n u0 v0 to the last term and subtract it from the middle.
60 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) +
61 * (2^n + 1) (u0 v0)
66 * (2^n) (u1 - u0) (v0 - v1) + [(u1-u0)... = mid]
67 * (2^n + 1) (u0 v0) [u0v0 = low]
69 * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done
[all …]
/freebsd/sys/libkern/arm/
H A Dmuldi3.c16 * 1. Redistributions of source code must retain the above copyright
50 * v = 2^n v1 * v0
54 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0
55 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0
58 * and add 2^n u0 v0 to the last term and subtract it from the middle.
62 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) +
63 * (2^n + 1) (u0 v0)
68 * (2^n) (u1 - u0) (v0 - v1) + [(u1-u0)... = mid]
69 * (2^n + 1) (u0 v0) [u0v0 = low]
71 * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done
[all …]
/freebsd/contrib/libucl/klib/
H A Dkvec.h39 return 1;
55 #define kv_roundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, +…
77 size_t _ts = ((v).m > 1 ? (v).m * kv_grow_factor : 2); \
87 #define kv_copy_safe(type, v1, v0, el) do { \ argument
88 if ((v1).m < (v0).n) kv_resize_safe(type, v1, (v0).n, el); \
89 (v1).n = (v0).n; \
90 memcpy((v1).a, (v0).a, sizeof(type) * (v0).n); \
104 memmove((v).a + 1, (v).a, sizeof(type) * (v).n); \
109 #define kv_concat_safe(type, v1, v0, el) do { \ argument
110 if ((v1).m < (v0).n + (v1).n) \
[all …]
/freebsd/contrib/llvm-project/clang/lib/Headers/
H A Dvelintrin_approx.h12 static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) { in _vel_approx_vfdivs_vvvl() argument
19 v2 = _vel_vfmuls_vvvl(v0, v3, l); in _vel_approx_vfdivs_vvvl()
20 v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); in _vel_approx_vfdivs_vvvl()
22 v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); in _vel_approx_vfdivs_vvvl()
23 v0 = _vel_vfmads_vvvvl(v2, v3, v0, l); in _vel_approx_vfdivs_vvvl()
24 return v0; in _vel_approx_vfdivs_vvvl()
27 static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) { in _vel_approx_pvfdiv_vvvl() argument
34 v2 = _vel_pvfmul_vvvl(v0, v3, l); in _vel_approx_pvfdiv_vvvl()
35 v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l); in _vel_approx_pvfdiv_vvvl()
37 v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l); in _vel_approx_pvfdiv_vvvl()
[all …]
/freebsd/sys/contrib/openzfs/module/icp/asm-aarch64/blake3/
H A Db3_aarch64_sse2.S82 eor v0.16b, v2.16b, v0.16b
110 mov v1.s[1], w5
115 and v0.8b, v1.8b, v0.8b
117 mov v3.d[1], v0.d[0]
120 uzp1 v1.4s, v0.4s, v6.4s
121 uzp2 v0.4s, v0.4s, v6.4s
126 add v2.4s, v2.4s, v0.4s
154 mov v7.s[1], v6.s[2]
168 uzp2 v17.4s, v17.4s, v0.4s
180 zip1 v18.2d, v16.2d, v0.2d
[all …]

12345678910>>...23