| /freebsd/contrib/googletest/googletest/include/gtest/ |
| H A D | gtest_pred_impl.h | 48 // ASSERT_PRED_FORMAT1(pred_format, v1) 49 // ASSERT_PRED_FORMAT2(pred_format, v1, v2) 60 // ASSERT_PRED1(pred, v1) 61 // ASSERT_PRED2(pred, v1, v2) 65 // and the values v1, v2, ..., must support the << operator for 88 Pred pred, const T1& v1) { in AssertPred1Helper() argument 89 if (pred(v1)) return AssertionSuccess(); in AssertPred1Helper() 94 << e1 << " evaluates to " << ::testing::PrintToString(v1); in AssertPred1Helper() 99 #define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure) \ argument 100 GTEST_ASSERT_(pred_format(#v1, v1), on_failure) [all …]
|
| /freebsd/contrib/llvm-project/clang/lib/Headers/ |
| H A D | velintrin_approx.h | 12 static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) { in _vel_approx_vfdivs_vvvl() argument 15 v5 = _vel_vrcps_vvl(v1, l); in _vel_approx_vfdivs_vvvl() 17 v4 = _vel_vfnmsbs_vsvvl(s0, v1, v5, l); in _vel_approx_vfdivs_vvvl() 20 v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); in _vel_approx_vfdivs_vvvl() 22 v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); in _vel_approx_vfdivs_vvvl() 27 static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) { in _vel_approx_pvfdiv_vvvl() argument 30 v5 = _vel_pvrcp_vvl(v1, l); in _vel_approx_pvfdiv_vvvl() 32 v4 = _vel_pvfnmsb_vsvvl(s0, v1, v5, l); in _vel_approx_pvfdiv_vvvl() 35 v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l); in _vel_approx_pvfdiv_vvvl() 37 v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l); in _vel_approx_pvfdiv_vvvl() [all …]
|
| /freebsd/crypto/openssl/crypto/modes/asm/ |
| H A D | ghash-riscv64-zvkb-zvbc.pl | 69 my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6"); 85 @{[vlse64_v $V1, $H, $TMP0]} # vlse64.v v1, (a1), t0 89 @{[vsrl_vx $V3, $V1, $TMP1]} # vsrl.vx v3, v1, t1 90 @{[vsll_vi $V1, $V1, 1]} # vsll.vi v1, v1, 1 101 @{[vor_vv_v0t $V1, $V1, $V4]} # vor.vv v1, v1, v4, v0.t 109 @{[vxor_vv_v0t $V1, $V1, $V2]} # vxor.vv v1, v1, v2, v0.t 111 @{[vse64_v $V1, $Htable]} # vse64.v v1, (a0) 125 my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6"); 160 # v1 = (a0b1)l,(a0b0)l 161 @{[vclmul_vx $V1, $V5, $TMP0]} # vclmul.vx v1, v5, t0 [all …]
|
| /freebsd/lib/libpmc/pmu-events/arch/arm64/ |
| H A D | mapfile.csv | 15 0x00000000410fd020,v1,arm/cortex-a34,core 16 0x00000000410fd030,v1,arm/cortex-a53,core 17 0x00000000420f1000,v1,arm/cortex-a53,core 18 0x00000000410fd040,v1,arm/cortex-a35,core 19 0x00000000410fd050,v1,arm/cortex-a55,core 20 0x00000000410fd060,v1,arm/cortex-a65,core 21 0x00000000410fd070,v1,arm/cortex-a57-a72,core 22 0x00000000410fd080,v1,arm/cortex-a57-a72,core 23 0x00000000410fd090,v1,arm/cortex-a73,core 24 0x00000000410fd0a0,v1,arm/cortex-a75,core [all …]
|
| /freebsd/usr.sbin/sa/tests/ |
| H A D | legacy_test.sh | 27 install -c -m 644 $LCDIR/v1-$ARCH-acct.in v1-$ARCH-acct.in 33 sa -u v1-$ARCH-acct.in | check 1 $DIR/v1-$ARCH-u.out 37 sa -i v1-$ARCH-acct.in | check 3 $DIR/v1-$ARCH-sav.out 38 sa -im v1-$ARCH-acct.in | check 4 $DIR/v1-$ARCH-usr.out 40 # Backward compatibility of v1 summary files 41 sa -P $DIR/v1-$ARCH-sav.in -U $DIR/v1-$ARCH-usr.in /dev/null | 42 check 5 $DIR/v1-$ARCH-sav.out 43 sa -m -P $DIR/v1-$ARCH-sav.in -U $DIR/v1-$ARCH-usr.in /dev/null | 44 check 6 $DIR/v1-$ARCH-usr.out 47 install -c -m 644 $DIR/v1-$ARCH-sav.in v2c-$ARCH-sav.in [all …]
|
| /freebsd/sys/crypto/openssl/aarch64/ |
| H A D | vpsm4-armv8.S | 99 tbl v1.16b,{v16.16b,v17.16b,v18.16b,v19.16b},v4.16b 101 tbx v1.16b,{v20.16b,v21.16b,v22.16b,v23.16b},v4.16b 103 tbx v1.16b,{v24.16b,v25.16b,v26.16b,v27.16b},v4.16b 105 tbx v1.16b,{v28.16b,v29.16b,v30.16b,v31.16b},v4.16b 106 mov w7,v1.s[0] 139 movi v1.16b,#128 142 sub v1.16b,v12.16b,v1.16b 146 tbl v1.16b,{v24.16b,v25.16b,v26.16b,v27.16b},v1.16b 148 add v0.2d,v0.2d,v1.2d 155 eor v1.16b,v0.16b,v12.16b [all …]
|
| H A D | aesv8-armx.S | 43 ld1 {v1.4s,v2.4s},[x3],#32 61 eor v6.16b,v6.16b,v1.16b 63 shl v1.16b,v1.16b,#1 67 ld1 {v1.4s},[x3] 78 eor v6.16b,v6.16b,v1.16b 80 shl v1.16b,v1.16b,#1 92 eor v6.16b,v6.16b,v1.16b 128 eor v6.16b,v6.16b,v1.16b 130 shl v1.16b,v1.16b,#1 159 eor v6.16b,v6.16b,v1.16b [all …]
|
| H A D | vpsm4_ex-armv8.S | 92 eor v1.16b, v1.16b, v1.16b 93 aese v0.16b,v1.16b 138 eor v1.16b, v1.16b, v1.16b 139 aese v0.16b,v1.16b 149 ushr v1.4s,v12.4s,32-10 153 sli v1.4s,v12.4s,10 157 eor v24.16b,v24.16b,v1.16b 172 eor v1.16b, v1.16b, v1.16b 173 aese v0.16b,v1.16b 183 ushr v1.4s,v13.4s,32-10 [all …]
|
| H A D | ghashv8-armx.S | 34 pmull v1.1q,v16.1d,v16.1d 38 eor v1.16b,v1.16b,v17.16b 39 eor v1.16b,v1.16b,v18.16b 42 ins v2.d[0],v1.d[1] 43 ins v1.d[1],v0.d[0] 44 eor v0.16b,v1.16b,v18.16b 60 pmull v1.1q,v16.1d,v17.1d 66 eor v1.16b,v1.16b,v16.16b 69 eor v1.16b,v1.16b,v18.16b 74 ins v2.d[0],v1.d[1] [all …]
|
| H A D | aes-gcm-armv8_64.S | 62 fmov v1.d[1], x9 //CTR block 1 82 aese v1.16b, v18.16b 83 aesmc v1.16b, v1.16b //AES block 1 - round 0 109 aese v1.16b, v19.16b 110 aesmc v1.16b, v1.16b //AES block 1 - round 1 121 aese v1.16b, v20.16b 122 aesmc v1.16b, v1.16b //AES block 1 - round 2 138 aese v1.16b, v21.16b 139 aesmc v1.16b, v1.16b //AES block 1 - round 3 174 aese v1.16b, v22.16b [all …]
|
| H A D | bsaes-armv8.S | 56 eor v1.16b, v1.16b, v8.16b 62 tbl v1.16b, {v1.16b}, v10.16b 74 eor v8.16b, v8.16b, v1.16b 82 eor v1.16b, v1.16b, v8.16b 94 ushr v9.2d, v1.2d, #2 115 eor v1.16b, v1.16b, v9.16b 121 ushr v10.2d, v1.2d, #4 141 eor v1.16b, v1.16b, v10.16b 150 eor v1.16b, v17.16b, v1.16b 159 tbl v1.16b, {v1.16b}, v28.16b [all …]
|
| /freebsd/sys/contrib/openzfs/module/icp/asm-aarch64/blake3/ |
| H A D | b3_aarch64_sse2.S | 24 * Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3 83 eor v1.16b, v3.16b, v1.16b 110 mov v1.s[1], w5 115 and v0.8b, v1.8b, v0.8b 120 uzp1 v1.4s, v0.4s, v6.4s 122 add v2.4s, v2.4s, v1.4s 123 uzp1 v18.4s, v1.4s, v1.4s 166 ext v17.16b, v18.16b, v1.16b, #8 177 ext v18.16b, v1.16b, v1.16b, #12 179 ext v1.16b, v1.16b, v18.16b, #12 [all …]
|
| /freebsd/lib/libpmc/pmu-events/arch/x86/ |
| H A D | mapfile.csv | 13 GenuineIntel-6-7A,v1,goldmontplus,core 36 GenuineIntel-6-55-[01234],v1,skylakex,core 37 GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core 38 GenuineIntel-6-7D,v1,icelake,core 39 GenuineIntel-6-7E,v1,icelake,core 40 GenuineIntel-6-8[CD],v1,tigerlake,core 41 GenuineIntel-6-A7,v1,icelake,core 42 GenuineIntel-6-6A,v1,icelakex,core 43 GenuineIntel-6-6C,v1,icelakex,core 44 GenuineIntel-6-86,v1,tremontx,core [all …]
|
| /freebsd/sys/contrib/device-tree/src/powerpc/fsl/ |
| H A D | interlaken-lac-portals.dtsi | 39 compatible = "fsl,interlaken-lac-portal-v1.0"; 44 compatible = "fsl,interlaken-lac-portal-v1.0"; 49 compatible = "fsl,interlaken-lac-portal-v1.0"; 54 compatible = "fsl,interlaken-lac-portal-v1.0"; 59 compatible = "fsl,interlaken-lac-portal-v1.0"; 64 compatible = "fsl,interlaken-lac-portal-v1.0"; 69 compatible = "fsl,interlaken-lac-portal-v1.0"; 74 compatible = "fsl,interlaken-lac-portal-v1.0"; 79 compatible = "fsl,interlaken-lac-portal-v1.0"; 84 compatible = "fsl,interlaken-lac-portal-v1.0"; [all …]
|
| /freebsd/crypto/openssl/crypto/des/ |
| H A D | cfb64ede.c | 29 register DES_LONG v0, v1; in DES_ede3_cfb64_encrypt() local 40 c2l(iv, v1); in DES_ede3_cfb64_encrypt() 43 ti[1] = v1; in DES_ede3_cfb64_encrypt() 46 v1 = ti[1]; in DES_ede3_cfb64_encrypt() 50 l2c(v1, iv); in DES_ede3_cfb64_encrypt() 62 c2l(iv, v1); in DES_ede3_cfb64_encrypt() 65 ti[1] = v1; in DES_ede3_cfb64_encrypt() 68 v1 = ti[1]; in DES_ede3_cfb64_encrypt() 72 l2c(v1, iv); in DES_ede3_cfb64_encrypt() 82 v0 = v1 = ti[0] = ti[1] = c = cc = 0; in DES_ede3_cfb64_encrypt() [all …]
|
| H A D | cfb_enc.c | 34 register DES_LONG d0, d1, v0, v1; in DES_cfb_encrypt() local 56 c2l(iv, v1); in DES_cfb_encrypt() 61 ti[1] = v1; in DES_cfb_encrypt() 74 v0 = v1; in DES_cfb_encrypt() 75 v1 = d0; in DES_cfb_encrypt() 78 v1 = d1; in DES_cfb_encrypt() 83 l2c(v1, iv); in DES_cfb_encrypt() 87 sh[0] = v0, sh[1] = v1, sh[2] = d0, sh[3] = d1; in DES_cfb_encrypt() 96 v0 = sh[0], v1 = sh[1]; in DES_cfb_encrypt() 100 c2l(iv, v1); in DES_cfb_encrypt() [all …]
|
| /freebsd/lib/libc/aarch64/string/ |
| H A D | strncmp.S | 51 cmeq v6.16b, v1.16b, #0 77 tbl v4.16b, {v1.16b}, v4.16b 133 cmeq v6.16b, v1.16b, #0 162 tbl v4.16b, {v1.16b}, v4.16b 200 cmeq v1.16b, v3.16b, #0 // NUL present? 202 shrn v1.8b, v1.8h, #4 240 cmeq v1.16b, v1.16b, #0 // end of string? 243 shrn v1.8b, v1.8h, #4 259 cmeq v1.16b, v1.16b, #0 262 shrn v1.8b, v1.8h, #4 [all …]
|
| H A D | strlcpy.S | 22 cmeq v1.16b, v1.16b, #0 // NUL found in head? 28 shrn v1.8b, v1.8h, #4 39 cmeq v1.16b, v3.16b, #0 // NUL found in second chunk? 45 shrn v1.8b, v1.8h, #4 62 cmeq v2.16b, v1.16b, #0 // NUL found in second chunk? 77 cmeq v2.16b, v1.16b, #0 // NUL found in second chunk? 95 cmeq v2.16b, v1.16b, #0 // NUL found in second chunk? 127 cmeq v1.16b, v1.16b, #0 // bytewise compare against NUL 128 shrn v1.8b, v1.8h, #4 133 cmeq v1.16b, v1.16b, #0 // bytewise compare against NUL [all …]
|
| H A D | strcmp.S | 101 cmeq v1.16b, v3.16b, #0 104 shrn v1.8b, v1.8h, #4 133 cmeq v1.16b, v1.16b, #0 // end of string? 136 shrn v1.8b, v1.8h, #4 151 cmeq v1.16b, v1.16b, #0 154 shrn v1.8b, v1.8h, #4 195 cmeq v1.16b, v0.16b, v1.16b 196 shrn v1.8b, v1.8h, #4 236 cmeq v1.16b, v2.16b, #0 239 shrn v1.8b, v1.8h, #4 [all …]
|
| /freebsd/contrib/libucl/klib/ |
| H A D | kvec.h | 87 #define kv_copy_safe(type, v1, v0, el) do { \ argument 88 if ((v1).m < (v0).n) kv_resize_safe(type, v1, (v0).n, el); \ 89 (v1).n = (v0).n; \ 90 memcpy((v1).a, (v0).a, sizeof(type) * (v0).n); \ 109 #define kv_concat_safe(type, v1, v0, el) do { \ argument 110 if ((v1).m < (v0).n + (v1).n) \ 111 kv_resize_safe(type, v1, (v0).n + (v1).n, el); \ 112 memcpy((v1).a + (v1).n, (v0).a, sizeof(type) * (v0).n); \ 113 (v1).n = (v0).n + (v1).n; \ 133 #define kv_copy(type, v1, v0) do { \ argument [all …]
|
| /freebsd/lib/libc/quad/ |
| H A D | muldi3.c | 48 * v = 2^n v1 * v0 52 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0 53 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0 55 * Now add 2^n u1 v1 to the first term and subtract it from the middle, 59 * uv = (2^2n + 2^n) (u1 v1) + 60 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) + 65 * uv = (2^2n + 2^n) (u1 v1) + [u1v1 = high] 66 * (2^n) (u1 - u0) (v0 - v1) + [(u1-u0)... = mid] 69 * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done 71 * of (u1 - u0) or (v0 - v1) may be negative.) [all …]
|
| /freebsd/sys/libkern/arm/ |
| H A D | muldi3.c | 50 * v = 2^n v1 * v0 54 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0 55 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0 57 * Now add 2^n u1 v1 to the first term and subtract it from the middle, 61 * uv = (2^2n + 2^n) (u1 v1) + 62 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) + 67 * uv = (2^2n + 2^n) (u1 v1) + [u1v1 = high] 68 * (2^n) (u1 - u0) (v0 - v1) + [(u1-u0)... = mid] 71 * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done 73 * of (u1 - u0) or (v0 - v1) may be negative.) [all …]
|
| /freebsd/contrib/less/ |
| H A D | lesskey.h | 69 #define SNPRINTF1(str, size, fmt, v1) snprintf((str), (size), (fmt), (v1)) argument 70 #define SNPRINTF2(str, size, fmt, v1, v2) snprintf((str), (size), (fmt), (v1), (v2)) argument 71 #define SNPRINTF3(str, size, fmt, v1, v2, v3) snprintf((str), (size), (fmt), (v1), (v2), (v3)) argument 72 #define SNPRINTF4(str, size, fmt, v1, v2, v3, v4) snprintf((str), (size), (fmt), (v1), (v2), (v3), … argument 75 #define SNPRINTF1(str, size, fmt, v1) sprintf((str), (fmt), (v1)) argument 76 #define SNPRINTF2(str, size, fmt, v1, v2) sprintf((str), (fmt), (v1), (v2)) argument 77 #define SNPRINTF3(str, size, fmt, v1, v2, v3) sprintf((str), (fmt), (v1), (v2), (v3)) argument 78 #define SNPRINTF4(str, size, fmt, v1, v2, v3, v4) sprintf((str), (fmt), (v1), (v2), (v3), (v4)) argument
|
| /freebsd/crypto/openssl/crypto/aes/asm/ |
| H A D | aes-riscv64-zvkned.pl | 61 my ($V0, $V1, $V2, $V3, $V4, $V5, $V6, $V7, 67 # Load all 11 round keys to v1-v11 registers. 73 @{[vle32_v $V1, $KEYP]} 99 # Load all 13 round keys to v1-v13 registers. 105 @{[vle32_v $V1, $KEYP]} 135 # Load all 15 round keys to v1-v15 registers. 141 @{[vle32_v $V1, $KEYP]} 175 # aes-128 encryption with round keys v1-v11 178 @{[vaesz_vs $V24, $V1]} # with round key w[ 0, 3] 194 # aes-128 decryption with round keys v1-v11 [all …]
|
| H A D | bsaes-armv8.pl | 89 eor v1.16b, v1.16b, v8.16b 95 tbl v1.16b, {v1.16b}, v10.16b 107 eor v8.16b, v8.16b, v1.16b 115 eor v1.16b, v1.16b, v8.16b 127 ushr v9.2d, v1.2d, #2 148 eor v1.16b, v1.16b, v9.16b 154 ushr v10.2d, v1.2d, #4 174 eor v1.16b, v1.16b, v10.16b 183 eor v1.16b, v17.16b, v1.16b 192 tbl v1.16b, {v1.16b}, v28.16b [all …]
|