/linux/arch/arm64/crypto/ |
H A D | sm4-ce-asm.h | 11 #define SM4_CRYPT_BLK_BE(b0) \ argument 12 sm4e b0.4s, v24.4s; \ 13 sm4e b0.4s, v25.4s; \ 14 sm4e b0.4s, v26.4s; \ 15 sm4e b0.4s, v27.4s; \ 16 sm4e b0.4s, v28.4s; \ 17 sm4e b0.4s, v29.4s; \ 18 sm4e b0.4s, v30.4s; \ 19 sm4e b0.4s, v31.4s; \ 20 rev64 b0.4s, b0.4s; \ [all …]
|
H A D | sm4-ce-gcm-core.S | 109 #define SM4_CRYPT_PMUL_128x128_BLK(b0, r0, r1, m0, m1, T0, T1) \ argument 110 rev32 b0.16b, b0.16b; \ 112 sm4e b0.4s, v24.4s; \ 114 sm4e b0.4s, v25.4s; \ 116 sm4e b0.4s, v26.4s; \ 118 sm4e b0.4s, v27.4s; \ 120 sm4e b0.4s, v28.4s; \ 122 sm4e b0.4s, v29.4s; \ 124 sm4e b0.4s, v30.4s; \ 126 sm4e b0.4s, v31.4s; \ [all …]
|
H A D | sm4-neon-core.S | 131 #define SM4_CRYPT_BLK4_BE(b0, b1, b2, b3) \ argument 137 ROUND4(0, b0, b1, b2, b3); \ 138 ROUND4(1, b1, b2, b3, b0); \ 139 ROUND4(2, b2, b3, b0, b1); \ 140 ROUND4(3, b3, b0, b1, b2); \ 144 rev32 b0.16b, b0.16b; \ 149 rotate_clockwise_4x4(b0, b1, b2, b3); \ 154 #define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ argument 155 rev32 b0.16b, b0.16b; \ 159 SM4_CRYPT_BLK4_BE(b0, b1, b2, b3); [all …]
|
H A D | aes-neonbs-core.S | 26 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 29 eor \b3, \b3, \b0 31 eor \b5, \b5, \b0 42 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 43 eor \b0, \b0, \b6 46 eor \b2, \b2, \b0 56 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 64 eor \b2, \b2, \b0 67 eor \b0, \b0, \b6 71 .macro inv_out_bs_ch, b6, b5, b0, b3, b7, b1, b4, b2 [all …]
|
H A D | ghash-ce-core.S | 270 pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0 271 pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0) 275 pmull XL3.1q, HH.1d, XL3.1d // a0 * b0 276 pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0) 285 pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0 286 pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0) 299 pmull XL.1q, HH4.1d, XL.1d // a0 * b0 300 pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0) 332 __pmull_\pn XL, XL, SHASH // a0 * b0 333 __pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0) [all …]
|
/linux/arch/s390/crypto/ |
H A D | chacha-s390.S | 441 #define B0 %v1 macro 495 VLR B0,K1 526 VAF A0,A0,B0 551 VX B0,B0,C0 557 VERLLF B0,B0,12 564 VAF A0,A0,B0 589 VX B0,B0,C0 595 VERLLF B0,B0,7 608 VSLDB B0,B0,B0,4 621 VAF A0,A0,B0 [all …]
|
/linux/arch/arm/nwfpe/ |
H A D | softfloat-macros | 339 value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so 346 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 352 *z0Ptr = a0 + b0 + ( z1 < a1 ); 359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is 370 bits64 b0, 385 z0 = a0 + b0; 397 Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the 406 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 410 *z0Ptr = a0 - b0 - ( a1 < b1 ); 416 Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2' [all …]
|
/linux/lib/crypto/ |
H A D | curve25519-hacl64.c | 44 u64 b0 = b[0]; in modulo_carry_top() local 46 u64 b0_ = b0 + 19 * (b4 >> 51); in modulo_carry_top() 129 u64 b0; in fmul_shift_reduce() local 151 b0 = output[0]; in fmul_shift_reduce() 152 output[0] = 19 * b0; in fmul_shift_reduce() 190 u128 b0; in fmul_fmul() local 201 b0 = t[0]; in fmul_fmul() 203 b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); in fmul_fmul() 248 u128 b0; in fsquare_fsquare_() local 258 b0 = tmp[0]; in fsquare_fsquare_() [all …]
|
/linux/include/media/ |
H A D | v4l2-h264.h | 26 * ordered P/B0/B1 lists 29 * This object stores the context of the P/B0/B1 reference list builder. 55 * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists 58 * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry 63 * This functions builds the B0/B1 reference lists. This procedure is described 66 * need to pass B0/B1 reference lists to the hardware.
|
/linux/drivers/gpu/drm/i915/ |
H A D | intel_step.c | 36 [1] = { COMMON_STEP(B0) }, 53 [3] = { COMMON_STEP(B0) }, 62 [1] = { COMMON_STEP(B0) }, 67 [1] = { COMMON_STEP(B0) }, 75 [1] = { COMMON_STEP(B0) }, 80 [1] = { COMMON_STEP(B0) }, 86 [1] = { COMMON_STEP(B0) }, 92 [0x4] = { COMMON_STEP(B0) }, 99 [0x4] = { COMMON_STEP(B0) }, 107 [0x4] = { COMMON_STEP(B0) }, [all …]
|
/linux/drivers/crypto/nx/ |
H A D | nx-aes-ccm.c | 134 unsigned int cryptlen, u8 *b0) in generate_b0() argument 138 memcpy(b0, iv, 16); in generate_b0() 140 lp = b0[0]; in generate_b0() 144 *b0 |= (8 * ((m - 2) / 2)); in generate_b0() 148 *b0 |= 64; in generate_b0() 150 return set_msg_len(b0 + 16 - l, cryptlen, l); in generate_b0() 164 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; in generate_pat() local 186 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; in generate_pat() 189 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, in generate_pat() 191 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; in generate_pat() [all …]
|
/linux/drivers/net/wireless/intel/iwlwifi/cfg/ |
H A D | ax210.c | 29 #define IWL_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0" 30 #define IWL_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0" 35 #define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0" 39 #define IWL_MA_B_HR_B_FW_PRE "iwlwifi-ma-b0-hr-b0" 40 #define IWL_MA_B_GF_A_FW_PRE "iwlwifi-ma-b0-gf-a0" 41 #define IWL_MA_B_GF4_A_FW_PRE "iwlwifi-ma-b0-gf4-a0" 42 #define IWL_MA_B_MR_A_FW_PRE "iwlwifi-ma-b0-mr-a0" 306 MODULE_FIRMWARE("iwlwifi-ma-b0-gf-a0.pnvm"); 307 MODULE_FIRMWARE("iwlwifi-ma-b0-gf4-a0.pnvm");
|
H A D | bz.c | 29 #define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0" 32 #define IWL_BZ_A_FM_B_FW_PRE "iwlwifi-bz-a0-fm-b0" 34 #define IWL_BZ_A_FM4_B_FW_PRE "iwlwifi-bz-a0-fm4-b0" 35 #define IWL_GL_B_FM_B_FW_PRE "iwlwifi-gl-b0-fm-b0"
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_wa_oob.rules | 2 14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0) 16 16020292621 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0) 17 14018913170 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0) 23 14019882105 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
|
H A D | xe_step.c | 41 [1] = { COMMON_STEP(B0) }, 46 [1] = { COMMON_STEP(B0) }, 52 [0x4] = { COMMON_STEP(B0) }, 64 [0x4] = { COMMON_STEP(B0) }, 80 [0x4] = { COMMON_STEP(B0) }, 86 [0x4] = { COMMON_STEP(B0) }, 218 * all platforms: major steppings (A0, B0, etc.) are 4 apart, with minor
|
/linux/arch/riscv/crypto/ |
H A D | chacha-riscv64-zvkb.S | 76 .macro chacha_round a0, b0, c0, d0, a1, b1, c1, d1, \ 79 vadd.vv \a0, \a0, \b0 97 vxor.vv \b0, \b0, \c0 101 vror.vi \b0, \b0, 32 - 12 107 vadd.vv \a0, \a0, \b0 125 vxor.vv \b0, \b0, \c0 129 vror.vi \b0, \b0, 32 - 7
|
/linux/drivers/media/dvb-frontends/ |
H A D | lgdt3306a.c | 1868 0x0001, /* 1'b1 1'b1 1'b0 1'b0 AUTORPTRS */ 1869 0x0002, /* NI2CRPTEN 1'b0 1'b0 1'b0 SPECINVAUT */ 1876 0x0009, /* 1'b0 1'b0 1'b0 STDOPDETTMODE[2:0] STDOPDETCMODE[1:0] 00011110 */ 1885 0x0013, /* AGCRFFIXB AGCIFFIXB AGCLOCKDETRNGSEL[1:0] 1'b1 1'b0 1'b0 1'b0 11101000 */ 1893 0x001d, /* 1'b0 1'b1 1'b0 1'b1 AICCVSYNC */ 1894 0x001e, /* AICCALPHA[3:0] 1'b1 1'b0 1'b1 1'b0 01111010 */ 1913 0x0031, /* 1'b0 1'b1 1'b0 1'b0 x DAGC1STER */ 1931 0x0050, /* 1'b0 1'b1 1'b1 1'b0 MSECALCDA */ 1984 0x1000, /* 1'b0 WODAGCOU */ 1988 0x101a, /* x 1'b1 1'b0 1'b0 x QMDQAMMODE[2:0] x100x010 */ [all …]
|
/linux/drivers/crypto/ccree/ |
H A D | cc_aead.h | 32 /* CCM B0 and CTR_COUNT constants. */ 33 #define CCM_BLOCK_NONCE_OFFSET 1 /* Nonce offset inside B0 and CTR_COUNT */ 34 #define CCM_BLOCK_NONCE_SIZE 3 /* Nonce size inside B0 and CTR_COUNT */ 35 #define CCM_BLOCK_IV_OFFSET 4 /* IV offset inside B0 and CTR_COUNT */ 36 #define CCM_BLOCK_IV_SIZE 8 /* IV size inside B0 and CTR_COUNT */
|
/linux/fs/f2fs/ |
H A D | hash.c | 28 __u32 b0 = buf[0], b1 = buf[1]; in TEA_transform() local 34 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); in TEA_transform() 35 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); in TEA_transform() 38 buf[0] += b0; in TEA_transform()
|
/linux/arch/arm/crypto/ |
H A D | aes-neonbs-core.S | 80 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 83 veor \b3, \b3, \b0 85 veor \b5, \b5, \b0 96 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 97 veor \b0, \b0, \b6 100 veor \b2, \b2, \b0 110 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 118 veor \b2, \b2, \b0 121 veor \b0, \b0, \b6 125 .macro inv_out_bs_ch, b6, b5, b0, b3, b7, b1, b4, b2 [all …]
|
/linux/fs/reiserfs/ |
H A D | hashes.c | 28 u32 b0, b1; \ 30 b0 = h0; \ 36 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); \ 37 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \ 40 h0 += b0; \
|
/linux/arch/powerpc/platforms/powernv/ |
H A D | opal-lpc.c | 229 * IE. If the LPC transaction has bytes B0, B1, B2 and B3 in that in lpc_debug_read() 234 * 32-bit: B0 B1 B2 B3 B0B1B2B3 B3B2B1B0 in lpc_debug_read() 235 * 16-bit: B0 B1 0000B0B1 B1B00000 in lpc_debug_read() 236 * 8-bit: B0 000000B0 B0000000 in lpc_debug_read() 314 * 32-bit: B0 B1 B2 B3 B3B2B1B0 B0B1B2B3 in lpc_debug_write() 315 * 16-bit: B0 B1 0000B1B0 0000B0B1 in lpc_debug_write() 316 * 8-bit: B0 000000B0 000000B0 in lpc_debug_write()
|
/linux/include/linux/soc/pxa/ |
H A D | cpu.h | 18 * PXA210 B0 0x69052922 0x2926C013 25 * PXA250 B0 0x69052902 0x29264013 33 * PXA26x B0 0x69052D05 0x59264013 37 * PXA27x B0 0x69054112 0x29265013 50 * PXA930 B0 0x69056835 0x5E643013 55 * PXA935 B0 0x56056936 0x6E653013
|
/linux/crypto/ |
H A D | aes_generic.c | 1179 u32 b0[4], b1[4]; in crypto_aes_encrypt() local 1183 b0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in); in crypto_aes_encrypt() 1184 b0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4); in crypto_aes_encrypt() 1185 b0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8); in crypto_aes_encrypt() 1186 b0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12); in crypto_aes_encrypt() 1189 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1190 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1194 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1195 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1198 f_nround(b1, b0, kp); in crypto_aes_encrypt() [all …]
|
/linux/drivers/gpu/drm/xe/tests/ |
H A D | xe_wa_test.c | 57 PLATFORM_CASE(TIGERLAKE, B0), 59 PLATFORM_CASE(DG1, B0), 61 PLATFORM_CASE(ALDERLAKE_S, B0), 65 PLATFORM_CASE(ALDERLAKE_P, B0), 76 GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
|