/freebsd/sys/crypto/openssl/aarch64/ |
H A D | ghashv8-armx.S | 1 /* Do not modify. This file is auto-generated from ghashv8-armx.pl. */ 5 .arch armv8-a+crypto 17 dup v17.4s,v17.s[1] 22 shl v3.2d,v3.2d,#1 25 orr v3.16b,v3.16b,v18.16b //H<<<=1 30 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing 31 pmull v0.1q,v20.1d,v20.1d 33 pmull2 v2.1q,v20.2d,v20.2d 34 pmull v1.1q,v16.1d,v16.1d 36 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing [all …]
|
H A D | aesv8-armx.S | 1 /* Do not modify. This file is auto-generated from aesv8-armx.pl. */ 5 .arch armv8-a+crypto 10 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat 19 // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. 20 stp x29,x30,[sp,#-16]! 22 mov x3,#-1 27 mov x3,#-2 38 eor v0.16b,v0.16b,v0.16b 50 ext v5.16b,v0.16b,v3.16b,#12 52 aese v6.16b,v0.16b [all …]
|
H A D | aes-gcm-armv8_64.S | 1 /* Do not modify. This file is auto-generated from aes-gcm-armv8_64.pl. */ 5 .arch armv8-a+crypto 13 stp x19, x20, [sp, #-112]! 41 sub x5, x5, #1 //byte_len - 1 48 fmov d1, x10 //CTR block 1 51 add w12, w12, #1 //increment rev_ctr32 55 rev w9, w12 //CTR block 1 56 add w12, w12, #1 //CTR block 1 59 orr x9, x11, x9, lsl #32 //CTR block 1 60 …ld1 { v0.16b}, [x16] //special case vector load initial counter so we … [all …]
|
H A D | vpaes-armv8.S | 1 /* Do not modify. This file is auto-generated from vpaes-armv8.pl. */ 94 .size _vpaes_consts,.-_vpaes_consts 99 // Fills register %r10 -> .aes_consts (so you can -fPIC) 100 // and %xmm9-%xmm15 as specified below. 111 .size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat 116 // AES-encrypt %xmm0. 120 // %xmm9-%xmm15 as in _vpaes_preheat 124 // Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 125 // Preserves %xmm6 - %xmm8 so you get some local vectors 137 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 [all …]
|
H A D | arm64cpuid.S | 1 /* Do not modify. This file is auto-generated from arm64cpuid.pl. */ 5 .arch armv8-a+crypto 14 .size _armv7_neon_probe,.-_armv7_neon_probe 26 .size _armv7_tick,.-_armv7_tick 32 aese v0.16b, v0.16b 34 .size _armv8_aes_probe,.-_armv8_aes_probe 42 .size _armv8_sha1_probe,.-_armv8_sha1_probe 48 sha256su0 v0.4s, v0.4s 50 .size _armv8_sha256_probe,.-_armv8_sha256_probe 56 pmull v0.1q, v0.1d, v0.1d [all …]
|
/freebsd/contrib/file/tests/ |
H A D | Makefile.am | 3 test_CPPFLAGS = -I$(top_builddir)/src 6 android-vdex-1.result \ 7 android-vdex-1.testfile \ 8 android-vdex-2.result \ 9 android-vdex-2.testfile \ 26 CVE-2014-1943.result \ 27 CVE-2014-1943.testfile \ 28 dsd64-dff.result \ 29 dsd64-dff.testfile \ 30 dsd64-dsf.result \ [all …]
|
/freebsd/crypto/openssl/crypto/aes/asm/ |
H A D | vpaes-ppc.pl | 2 # Copyright 2013-2020 The OpenSSL Project Authors. All Rights Reserved. 11 ## Constant-time SSSE3 AES core implementation. 21 # 128-bit key. 23 # aes-ppc.pl this 32 # it in-line. Secondly it, being transliterated from 33 # vpaes-x86_64.pl, relies on "nested inversion" better suited 60 $FRAME=6*$SIZE_T+13*16; # 13*16 is for v20-v31 offload 62 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 63 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or 64 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or [all …]
|
H A D | vpaes-armv8.pl | 2 # Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved. 11 ## Constant-time SSSE3 AES core implementation. 24 # SoC based on Cortex-A53 that doesn't have crypto extensions. 26 # CBC enc ECB enc/dec(*) [bit-sliced enc/dec] 27 # Cortex-A53 21.5 18.1/20.6 [17.5/19.8 ] 28 # Cortex-A57 36.0(**) 20.4/24.9(**) [14.4/16.6 ] 29 # X-Gene 45.9(**) 45.8/57.7(**) [33.1/37.6(**) ] 37 # (**) these results are worse than scalar compiler-generated 38 # code, but it's constant-time and therefore preferred; 46 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; [all …]
|
/freebsd/contrib/bearssl/src/rsa/ |
H A D | rsa_i15_privexp.c | 33 * We want to invert e modulo phi = (p-1)(q-1). This first in br_rsa_i15_compute_privexp() 39 * modulo phi, but this would involve assembling three modulus-wide in br_rsa_i15_compute_privexp() 40 * values (phi/4, 1 and e) and calling moddiv, that requires in br_rsa_i15_compute_privexp() 42 * slightly more than 3 kB of stack space for RSA-4096. This in br_rsa_i15_compute_privexp() 47 * - We compute phi = k*e + r (Euclidean division of phi by e). in br_rsa_i15_compute_privexp() 50 * enforce non-ridiculously-small factors. in br_rsa_i15_compute_privexp() 52 * - We find small u, v such that u*e - v*r = 1 (using a in br_rsa_i15_compute_privexp() 56 * - Solution is: d = u + v*k in br_rsa_i15_compute_privexp() 58 * the above implies d < r + e*((phi-r)/e) = phi in br_rsa_i15_compute_privexp() 65 uint32_t r, a, b, u0, v0, u1, v1, he, hr; in br_rsa_i15_compute_privexp() local [all …]
|
H A D | rsa_i31_privexp.c | 33 * We want to invert e modulo phi = (p-1)(q-1). This first in br_rsa_i31_compute_privexp() 39 * modulo phi, but this would involve assembling three modulus-wide in br_rsa_i31_compute_privexp() 40 * values (phi/4, 1 and e) and calling moddiv, that requires in br_rsa_i31_compute_privexp() 42 * slightly more than 3 kB of stack space for RSA-4096. This in br_rsa_i31_compute_privexp() 47 * - We compute phi = k*e + r (Euclidean division of phi by e). in br_rsa_i31_compute_privexp() 50 * enforce non-ridiculously-small factors. in br_rsa_i31_compute_privexp() 52 * - We find small u, v such that u*e - v*r = 1 (using a in br_rsa_i31_compute_privexp() 56 * - Solution is: d = u + v*k in br_rsa_i31_compute_privexp() 58 * the above implies d < r + e*((phi-r)/e) = phi in br_rsa_i31_compute_privexp() 65 uint32_t r, a, b, u0, v0, u1, v1, he, hr; in br_rsa_i31_compute_privexp() local [all …]
|
/freebsd/crypto/openssl/crypto/des/ |
H A D | cfb64ede.c | 2 * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. 29 register DES_LONG v0, v1; in DES_ede3_cfb64_encrypt() local 37 while (l--) { in DES_ede3_cfb64_encrypt() 39 c2l(iv, v0); in DES_ede3_cfb64_encrypt() 42 ti[0] = v0; in DES_ede3_cfb64_encrypt() 43 ti[1] = v1; in DES_ede3_cfb64_encrypt() 45 v0 = ti[0]; in DES_ede3_cfb64_encrypt() 46 v1 = ti[1]; in DES_ede3_cfb64_encrypt() 49 l2c(v0, iv); in DES_ede3_cfb64_encrypt() 56 n = (n + 1) & 0x07; in DES_ede3_cfb64_encrypt() [all …]
|
H A D | cfb_enc.c | 2 * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. 27 * Until Aug 1 2003 this function did not correctly implement CFB-r, so it 34 register DES_LONG d0, d1, v0, v1; in DES_cfb_encrypt() local 48 /* but 16-bit platforms... */ in DES_cfb_encrypt() 55 c2l(iv, v0); in DES_cfb_encrypt() 59 l -= n; in DES_cfb_encrypt() 60 ti[0] = v0; in DES_cfb_encrypt() 61 ti[1] = v1; in DES_cfb_encrypt() 66 d1 ^= ti[1]; in DES_cfb_encrypt() 70 * 30-08-94 - eay - changed because l>>32 and l<<32 are bad under in DES_cfb_encrypt() [all …]
|
H A D | cfb64enc.c | 2 * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. 28 register DES_LONG v0, v1; in DES_cfb64_encrypt() local 36 while (l--) { in DES_cfb64_encrypt() 38 c2l(iv, v0); in DES_cfb64_encrypt() 39 ti[0] = v0; in DES_cfb64_encrypt() 41 ti[1] = v1; in DES_cfb64_encrypt() 44 v0 = ti[0]; in DES_cfb64_encrypt() 45 l2c(v0, iv); in DES_cfb64_encrypt() 46 v0 = ti[1]; in DES_cfb64_encrypt() 47 l2c(v0, iv); in DES_cfb64_encrypt() [all …]
|
H A D | ofb_enc.c | 2 * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. 28 register DES_LONG d0, d1, vv0, vv1, v0, v1, n = (numbits + 7) / 8; in DES_ofb_encrypt() local 42 mask1 = (1L << (num - 32)) - 1; in DES_ofb_encrypt() 47 mask0 = (1L << num) - 1; in DES_ofb_encrypt() 52 c2l(iv, v0); in DES_ofb_encrypt() 54 ti[0] = v0; in DES_ofb_encrypt() 55 ti[1] = v1; in DES_ofb_encrypt() 56 while (l-- > 0) { in DES_ofb_encrypt() 57 ti[0] = v0; in DES_ofb_encrypt() 58 ti[1] = v1; in DES_ofb_encrypt() [all …]
|
H A D | ofb64ede.c | 2 * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. 28 register DES_LONG v0, v1; in DES_ede3_ofb64_encrypt() local 38 c2l(iv, v0); in DES_ede3_ofb64_encrypt() 40 ti[0] = v0; in DES_ede3_ofb64_encrypt() 41 ti[1] = v1; in DES_ede3_ofb64_encrypt() 43 l2c(v0, dp); in DES_ede3_ofb64_encrypt() 45 while (l--) { in DES_ede3_ofb64_encrypt() 47 /* ti[0]=v0; */ in DES_ede3_ofb64_encrypt() 48 /* ti[1]=v1; */ in DES_ede3_ofb64_encrypt() 50 v0 = ti[0]; in DES_ede3_ofb64_encrypt() [all …]
|
/freebsd/lib/libc/quad/ |
H A D | muldi3.c | 1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 14 * 1. Redistributions of source code must retain the above copyright 48 * v = 2^n v1 * v0 52 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0 53 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0 56 * and add 2^n u0 v0 to the last term and subtract it from the middle. 60 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) + 61 * (2^n + 1) (u0 v0) [all …]
|
/freebsd/sys/libkern/arm/ |
H A D | muldi3.c | 3 /*- 4 * SPDX-License-Identifier: BSD-3-Clause 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 16 * 1. Redistributions of source code must retain the above copyright 50 * v = 2^n v1 * v0 54 * uv = 2^2n u1 v1 + 2^n u1 v0 + 2^n v1 u0 + u0 v0 55 * = 2^2n u1 v1 + 2^n (u1 v0 + v1 u0) + u0 v0 58 * and add 2^n u0 v0 to the last term and subtract it from the middle. 62 * (2^n) (u1 v0 - u1 v1 + u0 v1 - u0 v0) + 63 * (2^n + 1) (u0 v0) [all …]
|
/freebsd/contrib/libucl/klib/ |
H A D | kvec.h | 39 return 1; 44 2008-09-22 (0.1.0): 55 #define kv_roundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, +… 61 #define kv_pop(v) ((v).a[--(v).n]) 77 size_t _ts = ((v).m > 1 ? (v).m * kv_grow_factor : 2); \ 87 #define kv_copy_safe(type, v1, v0, el) do { \ argument 88 if ((v1).m < (v0).n) kv_resize_safe(type, v1, (v0).n, el); \ 89 (v1).n = (v0).n; \ 90 memcpy((v1).a, (v0).a, sizeof(type) * (v0).n); \ 104 memmove((v).a + 1, (v).a, sizeof(type) * (v).n); \ [all …]
|
/freebsd/contrib/llvm-project/clang/lib/Headers/ |
H A D | velintrin_approx.h | 1 /*===---- velintrin_approx.h - VEL intrinsics helper for VE ----------------=== 5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7 *===-----------------------------------------------------------------------=== 12 static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) { in _vel_approx_vfdivs_vvvl() argument 19 v2 = _vel_vfmuls_vvvl(v0, v3, l); in _vel_approx_vfdivs_vvvl() 20 v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); in _vel_approx_vfdivs_vvvl() 22 v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); in _vel_approx_vfdivs_vvvl() 23 v0 = _vel_vfmads_vvvvl(v2, v3, v0, l); in _vel_approx_vfdivs_vvvl() 24 return v0; in _vel_approx_vfdivs_vvvl() 27 static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) { in _vel_approx_pvfdiv_vvvl() argument [all …]
|
/freebsd/sys/contrib/openzfs/module/icp/asm-aarch64/blake3/ |
H A D | b3_aarch64_sse2.S | 9 * or https://opensource.org/licenses/CDDL-1.0. 23 * Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3 24 * Copyright (c) 2019-2022 Samuel Neves and Matthew Krupcale 25 * Copyright (c) 2022-2023 Tino Reichardt <milky-zfs@mcmilk.de> 27 * This is converted assembly: SSE2 -> ARMv8-A 28 * Used tools: SIMDe https://github.com/simd-everywhere/simde 31 * see: https://github.com/mcmilk/BLAKE3-tests/blob/master/contrib/simde.sh 68 .cfi_offset w19, -1 [all...] |
H A D | b3_aarch64_sse41.S | 9 * or https://opensource.org/licenses/CDDL-1.0. 23 * Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3 24 * Copyright (c) 2019-2022 Samuel Neves 25 * Copyright (c) 2022-2023 Tino Reichardt <milky-zfs@mcmilk.de> 27 * This is converted assembly: SSE4.1 -> ARMv8-A 28 * Used tools: SIMDe https://github.com/simd-everywhere/simde 31 * see: https://github.com/mcmilk/BLAKE3-test [all...] |
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/ |
H A D | RISCVInstrInfoVVLPatterns.td | 1 //===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7 //===----------------------------------------------------------------------===// 18 //===----------------------------------------------------------------------===// 20 //===----------------------------------------------------------------------===// 22 //===----------------------------------------------------------------------===// 24 def SDT_RISCVIntUnOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 31 def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 40 def SDT_RISCVVNBinOp_RM_VL : SDTypeProfile<1, 6, [SDTCisVec<0>, SDTCisInt<0>, 42 SDTCisSameNumEltsAs<0, 1>, [all …]
|
/freebsd/contrib/bearssl/src/symcipher/ |
H A D | poly1305_ctmulq.c | 52 * The "accumulator" word is nominally a 130-bit value. We split it into 53 * words of 44 bits, each held in a 64-bit variable. 64 * We want to reduce that value modulo p = 2^130-5, so W^3 = 20 mod p, 80 * bits of u1. Note that since r is clamped down to a 124-bit value, the 95 * need to compute the additions (for the bx values) over 128-bit 96 * quantities; we can stick to 64-bit computations. 99 * Since the 128-bit result of a 64x64 multiplication is actually 100 * represented over two 64-bit registers, it is cheaper to arrange for 102 * that 64-bit boundary. This is done by left shifting the rx, ux and tx 129 r1 = r[1]; in poly1305_inner_big() [all …]
|
/freebsd/crypto/openssl/crypto/siphash/ |
H A D | siphash.c | 2 * Copyright 2017-2022 The OpenSSL Project Authors. All Rights Reserved. 14 Copyright (c) 2012-2016 Jean-Philippe Aumasson 15 Copyright (c) 2012-2014 Daniel J. Bernstein 32 #define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b)))) 36 (p)[1] = (uint8_t)((v) >> 8); \ 45 (((uint64_t)((p)[0])) | ((uint64_t)((p)[1]) << 8) | \ 52 v0 += v1; \ 54 v1 ^= v0; \ 55 v0 = ROTL(v0, 32); \ 59 v0 += v3; \ [all …]
|
/freebsd/crypto/openssl/crypto/bn/asm/ |
H A D | mips.pl | 2 # Copyright 2010-2021 The OpenSSL Project Authors. All Rights Reserved. 21 # This is drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c. 26 # because 5.x kernels put R4x00 CPU into 32-bit mode and all those 27 # 64-bit instructions (daddu, dmultu, etc.) found below gonna only 28 # cause illegal instruction exception:-( 33 # I mean as long as -mmips-as is specified or is the default option, 48 # Adapt the module even for 32-bit ABIs and other OSes. The former was 49 # achieved by mechanical replacement of 64-bit arithmetic instructions 50 # such as dmultu, daddu, etc. with their 32-bit counterparts and 52 # >3x performance improvement naturally does not apply to 32-bit code [all …]
|