| /linux/arch/arm64/crypto/ |
| H A D | sm4-ce-asm.h | 28 #define SM4_CRYPT_BLK2_BE(b0, b1) \ argument 30 sm4e b1.4s, v24.4s; \ 32 sm4e b1.4s, v25.4s; \ 34 sm4e b1.4s, v26.4s; \ 36 sm4e b1.4s, v27.4s; \ 38 sm4e b1.4s, v28.4s; \ 40 sm4e b1.4s, v29.4s; \ 42 sm4e b1.4s, v30.4s; \ 44 sm4e b1.4s, v31.4s; \ 46 rev64 b1.4s, b1.4s; \ [all …]
|
| H A D | sm4-neon-core.S | 131 #define SM4_CRYPT_BLK4_BE(b0, b1, b2, b3) \ argument 137 ROUND4(0, b0, b1, b2, b3); \ 138 ROUND4(1, b1, b2, b3, b0); \ 139 ROUND4(2, b2, b3, b0, b1); \ 140 ROUND4(3, b3, b0, b1, b2); \ 145 rev32 b1.16b, b1.16b; \ 149 rotate_clockwise_4x4(b0, b1, b2, b3); \ 154 #define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ argument 156 rev32 b1.16b, b1.16b; \ 159 SM4_CRYPT_BLK4_BE(b0, b1, b2, b3); [all …]
|
| H A D | aes-neonbs-core.S | 26 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 27 eor \b2, \b2, \b1 38 eor \b3, \b3, \b1 39 eor \b1, \b1, \b5 42 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 44 eor \b1, \b1, \b4 47 eor \b6, \b6, \b1 48 eor \b1, \b1, \b5 56 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 57 eor \b1, \b1, \b7 [all …]
|
| H A D | sm4-ce-gcm-core.S | 133 #define SM4_CRYPT_PMUL_128x128_BLK3(b0, b1, b2, \ argument 138 rev32 b1.16b, b1.16b; \ 144 sm4e b1.4s, v24.4s; \ 150 sm4e b1.4s, v25.4s; \ 156 sm4e b1.4s, v26.4s; \ 162 sm4e b1.4s, v27.4s; \ 168 sm4e b1.4s, v28.4s; \ 174 sm4e b1.4s, v29.4s; \ 180 sm4e b1.4s, v30.4s; \ 186 sm4e b1.4s, v31.4s; \ [all …]
|
| /linux/crypto/ |
| H A D | xor.c | 83 do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) in do_xor_speed() argument 101 tmpl->do_2(BENCH_SIZE, b1, b2); in do_xor_speed() 118 void *b1, *b2; in calibrate_xor_blocks() local 130 b1 = (void *) __get_free_pages(GFP_KERNEL, 2); in calibrate_xor_blocks() 131 if (!b1) { in calibrate_xor_blocks() 135 b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE; in calibrate_xor_blocks() 142 #define xor_speed(templ) do_xor_speed((templ), b1, b2) in calibrate_xor_blocks() 157 free_pages((unsigned long)b1, 2); in calibrate_xor_blocks()
|
| /linux/Documentation/arch/arm64/ |
| H A D | elf_hwcaps.rst | 174 Functionality implied by ID_AA64PFR1_EL1.GCS == 0b1, as 213 Functionality implied by ID_AA64SMFR0_EL1.SBitPerm == 0b1. 216 Functionality implied by ID_AA64SMFR0_EL1.AES == 0b1. 219 Functionality implied by ID_AA64SMFR0_EL1.SFEXPA == 0b1. 222 Functionality implied by ID_AA64SMFR0_EL1.STMOP == 0b1. 225 Functionality implied by ID_AA64SMFR0_EL1.SMOP4 == 0b1. 316 Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1. 322 Functionality implied by ID_AA64SMFR0_EL1.F16F32 == 0b1. 325 Functionality implied by ID_AA64SMFR0_EL1.B16F32 == 0b1. 328 Functionality implied by ID_AA64SMFR0_EL1.F32F32 == 0b1. [all …]
|
| /linux/drivers/atm/ |
| H A D | fore200e.h | 71 #define BITFIELD2(b1, b2) b1; b2; argument 72 #define BITFIELD3(b1, b2, b3) b1; b2; b3; argument 73 #define BITFIELD4(b1, b2, b3, b4) b1; b2; b3; b4; argument 74 #define BITFIELD5(b1, b2, b3, b4, b5) b1; b2; b3; b4; b5; argument 75 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b1; b2; b3; b4; b5; b6; argument 77 #define BITFIELD2(b1, b2) b2; b1; argument 78 #define BITFIELD3(b1, b2, b3) b3; b2; b1; argument 79 #define BITFIELD4(b1, b2, b3, b4) b4; b3; b2; b1; argument 80 #define BITFIELD5(b1, b2, b3, b4, b5) b5; b4; b3; b2; b1; argument 81 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b6; b5; b4; b3; b2; b1; argument
|
| /linux/drivers/isdn/mISDN/ |
| H A D | dsp_biquad.h | 19 int32_t b1; member 27 int32_t gain, int32_t a1, int32_t a2, int32_t b1, int32_t b2) in biquad2_init() argument 32 bq->b1 = b1; in biquad2_init() 45 y = z0 + bq->z1 * bq->b1 + bq->z2 * bq->b2; in biquad2()
|
| /linux/fs/f2fs/ |
| H A D | hash.c | 28 __u32 b0 = buf[0], b1 = buf[1]; in TEA_transform() local 34 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); in TEA_transform() 35 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); in TEA_transform() 39 buf[1] += b1; in TEA_transform()
|
| /linux/arch/arm/include/asm/ |
| H A D | xor.h | 26 : "=r" (src), "=r" (b1), "=r" (b2) \ 28 __XOR(a1, b1); __XOR(a2, b2); 32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 55 register unsigned int b1 __asm__("r8"); in xor_arm4regs_2() 77 register unsigned int b1 __asm__("r8"); in xor_arm4regs_3() 99 register unsigned int b1 __asm__("ip"); in xor_arm4regs_4() 121 register unsigned int b1 __asm__("ip"); in xor_arm4regs_5()
|
| /linux/lib/crypto/ |
| H A D | polyval.c | 69 u64 b1 = b & 0x2222222222222222; in clmul64() local 75 (a2 * (u128)b2) ^ (a3 * (u128)b1); in clmul64() 76 u128 c1 = (a0 * (u128)b1) ^ (a1 * (u128)b0) ^ in clmul64() 78 u128 c2 = (a0 * (u128)b2) ^ (a1 * (u128)b1) ^ in clmul64() 81 (a2 * (u128)b1) ^ (a3 * (u128)b0); in clmul64() 119 u32 b1 = b & 0x22222222; in clmul32() local 124 (a2 * (u64)b2) ^ (a3 * (u64)b1); in clmul32() 125 u64 c1 = (a0 * (u64)b1) ^ (a1 * (u64)b0) ^ in clmul32() 127 u64 c2 = (a0 * (u64)b2) ^ (a1 * (u64)b1) ^ in clmul32() 130 (a2 * (u64)b1) ^ (a3 * (u64)b0); in clmul32()
|
| /linux/arch/arm/nwfpe/ |
| H A D | softfloat-macros | 339 value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so 346 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 350 z1 = a1 + b1; 359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is 371 bits64 b1, 383 z1 = a1 + b1; 397 Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the 406 bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr ) 409 *z1Ptr = a1 - b1; 410 *z0Ptr = a0 - b0 - ( a1 < b1 ); [all …]
|
| /linux/tools/mm/ |
| H A D | slabinfo.c | 809 char b1[20], b2[20], b3[20], b4[20]; in totals() local 985 store_size(b1, total_size);store_size(b2, total_waste); in totals() 987 printf("Memory used: %15s # Loss : %15s MRatio:%6s%%\n", b1, b2, b3); in totals() 989 store_size(b1, total_objects);store_size(b2, total_partobj); in totals() 991 printf("# Objects : %15s # PartObj: %15s ORatio:%6s%%\n", b1, b2, b3); in totals() 999 store_size(b1, avg_objects);store_size(b2, min_objects); in totals() 1002 b1, b2, b3, b4); in totals() 1004 store_size(b1, avg_slabs);store_size(b2, min_slabs); in totals() 1007 b1, b2, b3, b4); in totals() 1009 store_size(b1, avg_partial);store_size(b2, min_partial); in totals() [all …]
|
| /linux/drivers/gpu/drm/xe/tests/ |
| H A D | xe_guc_buf_kunit.c | 158 struct xe_guc_buf b1, b2; in test_overlap() local 164 b1 = xe_guc_buf_reserve(cache, dwords); in test_overlap() 167 p1 = xe_guc_buf_cpu_ptr(b1); in test_overlap() 170 a1 = xe_guc_buf_gpu_addr(b1); in test_overlap() 185 xe_guc_buf_release(b1); in test_overlap() 192 struct xe_guc_buf b1, b2; in test_reusable() local 196 b1 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache)); in test_reusable() 197 KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(b1)); in test_reusable() 198 KUNIT_EXPECT_NOT_NULL(test, p1 = xe_guc_buf_cpu_ptr(b1)); in test_reusable() 199 KUNIT_EXPECT_NE(test, 0, a1 = xe_guc_buf_gpu_addr(b1)); in test_reusable() [all …]
|
| /linux/lib/crypto/riscv/ |
| H A D | chacha-riscv64-zvkb.S | 77 .macro chacha_round a0, b0, c0, d0, a1, b1, c1, d1, \ 81 vadd.vv \a1, \a1, \b1 99 vxor.vv \b1, \b1, \c1 103 vror.vi \b1, \b1, 32 - 12 109 vadd.vv \a1, \a1, \b1 127 vxor.vv \b1, \b1, \c1 131 vror.vi \b1, \b1, 32 - 7
|
| /linux/arch/powerpc/kernel/vdso/ |
| H A D | vgetrandom-chacha.S | 52 .macro quarterround4 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 a4 b4 c4 d4 53 add \a1, \a1, \b1 69 xor \b1, \b1, \c1 73 rotlwi \b1, \b1, 12 77 add \a1, \a1, \b1 93 xor \b1, \b1, \c1 97 rotlwi \b1, \b1, 7 103 #define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4) \ argument 104 quarterround4 state##a1 state##b1 state##c1 state##d1 \
|
| /linux/rust/quote/ |
| H A D | lib.rs | 700 ($call:ident! $extra:tt ($($b1:tt)*) ($($curr:tt)*)) => { 702 $crate::pounded_var_with_context!{$call! $extra $b1 $curr} 710 ($call:ident! $extra:tt $b1:tt ( $($inner:tt)* )) => { 714 ($call:ident! $extra:tt $b1:tt [ $($inner:tt)* ]) => { 718 ($call:ident! $extra:tt $b1:tt { $($inner:tt)* }) => { 726 ($call:ident! $extra:tt $b1:tt $curr:tt) => {}; 857 ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*) 862 $crate::quote_token_with_context!{$tokens $b3 $b2 $b1 $curr $a1 $a2 $a3} 872 ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*) 877 $crate::quote_token_with_context_spanned!{$tokens $span $b3 $b2 $b1 $curr $a1 $a2 $a3} [all …]
|
| /linux/arch/arm/crypto/ |
| H A D | aes-neonbs-core.S | 80 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 81 veor \b2, \b2, \b1 92 veor \b3, \b3, \b1 93 veor \b1, \b1, \b5 96 .macro out_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 98 veor \b1, \b1, \b4 101 veor \b6, \b6, \b1 102 veor \b1, \b1, \b5 110 .macro inv_in_bs_ch, b6, b1, b2, b4, b7, b0, b3, b5 111 veor \b1, \b1, \b7 [all …]
|
| /linux/arch/xtensa/platforms/iss/include/platform/ |
| H A D | simcall-iss.h | 61 register int b1 asm("a3") = b; in __simc() 66 : "+r"(a1), "+r"(b1) in __simc() 69 errno = b1; in __simc()
|
| H A D | simcall-gdbio.h | 22 register int b1 asm("a6") = b; in __simc() 28 : "r"(b1), "r"(d1) in __simc()
|
| /linux/scripts/ |
| H A D | parse-maintainers.pl | 79 my $b1 = uc(substr($b, 0, 1)); 82 my $b_index = index($preferred_order, $b1); 87 if (($a1 =~ /^F$/ && $b1 =~ /^F$/) || 88 ($a1 =~ /^X$/ && $b1 =~ /^X$/)) {
|
| /linux/arch/x86/crypto/ |
| H A D | cast6-avx-x86_64-asm_64.S | 129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 130 F_head(b1, RX, RGI1, RGI2, op0); \ 133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 139 #define F1_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 141 #define F2_2(a1, b1, a2, b2) \ argument 142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 143 #define F3_2(a1, b1, a2, b2) \ argument 144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
|
| /linux/drivers/mtd/nand/ |
| H A D | ecc-sw-hamming.c | 378 unsigned char b0, b1, b2, bit_addr; in ecc_sw_hamming_correct() local 388 b1 = read_ecc[1] ^ calc_ecc[1]; in ecc_sw_hamming_correct() 391 b1 = read_ecc[0] ^ calc_ecc[0]; in ecc_sw_hamming_correct() 401 if ((b0 | b1 | b2) == 0) in ecc_sw_hamming_correct() 405 (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && in ecc_sw_hamming_correct() 426 byte_addr = (addressbits[b1] << 4) + addressbits[b0]; in ecc_sw_hamming_correct() 429 (addressbits[b1] << 4) + addressbits[b0]; in ecc_sw_hamming_correct() 437 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) in ecc_sw_hamming_correct()
|
| /linux/lib/crypto/arm/ |
| H A D | blake2s-core.S | 71 .macro _blake2s_quarterround a0, b0, c0, d0, a1, b1, c1, d1, s0, s1, s2, s3 78 add \a1, \a1, \b1, ror #brot 92 eor \b1, \c1, \b1, ror #brot 99 add \a1, \a1, \b1, ror #12 113 eor \b1, \c1, \b1, ror#12
|
| /linux/Documentation/translations/zh_CN/arch/arm64/ |
| H A D | booting.txt | 193 ICC_SRE_EL3.Enable (位 3) 必须初始化为 0b1。 194 ICC_SRE_EL3.SRE (位 0) 必须初始化为 0b1。 196 ICC_SRE_EL2.Enable (位 3) 必须初始化为 0b1。 197 ICC_SRE_EL2.SRE (位 0) 必须初始化为 0b1。
|