| /linux/lib/crypto/x86/ |
| H A D | chacha-ssse3-x86_64.S | 43 paddd %xmm1,%xmm0 44 pxor %xmm0,%xmm3 56 paddd %xmm1,%xmm0 57 pxor %xmm0,%xmm3 76 paddd %xmm1,%xmm0 77 pxor %xmm0,%xmm3 89 paddd %xmm1,%xmm0 90 pxor %xmm0,%xmm3 123 movdqu 0x00(%rdi),%xmm0 127 movdqa %xmm0,%xmm8 [all …]
|
| H A D | blake2s-core.S | 69 movdqu (CTX),%xmm0 // Load h[0..3] 81 movdqa %xmm0,%xmm10 // Save h[0..3] and let v[0..3] = h[0..3] 102 paddd %xmm4,%xmm0 103 paddd %xmm1,%xmm0 104 pxor %xmm0,%xmm3 123 paddd %xmm5,%xmm0 124 paddd %xmm1,%xmm0 125 pxor %xmm0,%xmm3 133 pshufd $0x93,%xmm0,%xmm0 147 paddd %xmm6,%xmm0 [all …]
|
| H A D | polyval-pclmul-avx.S | 88 movups (16*\i)(MSG), %xmm0 90 pxor SUM, %xmm0 92 vpclmulqdq $0x01, (16*\i)(KEY_POWERS), %xmm0, %xmm2 93 vpclmulqdq $0x00, (16*\i)(KEY_POWERS), %xmm0, %xmm1 94 vpclmulqdq $0x10, (16*\i)(KEY_POWERS), %xmm0, %xmm3 95 vpclmulqdq $0x11, (16*\i)(KEY_POWERS), %xmm0, %xmm4 108 vpclmulqdq $0x01, %xmm0, %xmm1, MI 109 vpclmulqdq $0x10, %xmm0, %xmm1, %xmm2 110 vpclmulqdq $0x00, %xmm0, %xmm1, LO 111 vpclmulqdq $0x11, %xmm0, %xmm1, HI [all …]
|
| H A D | sha512-avx-asm.S | 176 vpsrlq $61, %xmm4, %xmm0 # XMM0 = W[t-2]>>61 184 vpxor %xmm1, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19 220 vpxor %xmm4, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19 ^ 223 vpxor %xmm2, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) 227 vpaddq W_t(idx), %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] 239 vpaddq %xmm6, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15]) 241 vpaddq %xmm1, %xmm0, %xmm0 # XMM0 = W[t] = s1(W[t-2]) + W[t-7] + 248 vmovdqa %xmm0, W_t(idx) # Store W[t] 249 vpaddq K_t(idx), %xmm0, %xmm0 # Compute W[t]+K[t] 250 vmovdqa %xmm0, WK_2(idx) # Store W[t]+K[t] for next rounds [all …]
|
| H A D | sha512-ssse3-asm.S | 174 movdqa %xmm2, %xmm0 # XMM0 = W[t-2] 185 psrlq $61-19, %xmm0 # XMM0 = W[t-2] >> 42 191 pxor %xmm2, %xmm0 # XMM0 = (W[t-2] >> 42) ^ W[t-2] 197 psrlq $(19-6), %xmm0 # XMM0 = ((W[t-2]>>42)^W[t-2])>>13 203 pxor %xmm2, %xmm0 # XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2] 209 psrlq $6, %xmm0 # XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6 240 pxor %xmm1, %xmm0 # XMM0 = s1(W[t-2]) 248 paddq %xmm3, %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) 251 paddq W_t(idx), %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16] 253 paddq %xmm1, %xmm0 # XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] [all …]
|
| H A D | sha1-ni-asm.S | 62 #define ABCD %xmm0
|
| H A D | sha256-ni-asm.S | 64 #define MSG %xmm0 /* sha256rnds2 implicit operand */ 202 #define MSG %xmm0 // sha256rnds2 implicit operand
|
| H A D | chacha-avx512vl-x86_64.S | 119 vextracti128 $1,%ymm7,%xmm0 143 vmovdqa %xmm0,%xmm7 331 vextracti128 $1,%ymm10,%xmm0 355 vmovdqa %xmm0,%xmm10
|
| H A D | chacha-avx2-x86_64.S | 146 vextracti128 $1,%ymm7,%xmm0 170 vmovdqa %xmm0,%xmm7 397 vextracti128 $1,%ymm10,%xmm0 421 vmovdqa %xmm0,%xmm10
|
| H A D | poly1305-x86_64-cryptogams.pl | 3877 movdqu ($inp,$otp),%xmm0 3878 pxor ($otp),%xmm0 3879 movdqu %xmm0,($out,$otp) 3880 movdqa %xmm0,($otp) 3924 movdqu ($inp,$otp),%xmm0 3926 pxor %xmm0,%xmm1 3928 movdqa %xmm0,($otp)
|
| /linux/lib/crc/x86/ |
| H A D | crc-pclmul-template.S | 286 _cond_vex movd, CRC, %xmm0 288 _cond_vex movq, CRC, %xmm0 291 _cond_vex pslldq, $(128-\n)/8, %xmm0, %xmm0 310 _prepare_v0 16, %xmm0, %xmm1, BSWAP_MASK_XMM 391 _fold_vec_final 16, %xmm0, %xmm1, CONSTS_XMM, BSWAP_MASK_XMM, %xmm4, %xmm5 419 _cond_vex pshufb, %xmm3, %xmm0, %xmm1 427 %xmm0, %xmm0, unaligned_mem_tmp=%xmm4 433 movdqa %xmm0, %xmm4 434 movdqa %xmm3, %xmm0 436 movdqa %xmm4, %xmm0 [all …]
|
| /linux/arch/x86/crypto/ |
| H A D | aes-gcm-vaes-avx512.S | 305 vmovdqu (KEY), %xmm0 // Zero-th round key XOR all-zeroes block 308 vaesenc (KEY), %xmm0, %xmm0 312 vaesenclast (RNDKEYLAST_PTR), %xmm0, %xmm0 315 vpshufb .Lbswap_mask(%rip), %xmm0, H_CUR_XMM 318 vpxor %xmm0, %xmm0, %xmm0 320 vmovdqu %xmm0, 64+2*16(POWERS_PTR) 337 vpshufd $0xd3, H_CUR_XMM, %xmm0 338 vpsrad $31, %xmm0, %xmm0 341 vpternlogd $0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM 353 _ghash_square H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, %xmm0, %xmm1 [all …]
|
| H A D | aes-gcm-aesni-x86_64.S | 524 movdqa H_POW1, %xmm0 525 pshufd $0xd3, %xmm0, %xmm0 526 psrad $31, %xmm0 528 pand .Lgfpoly_and_internal_carrybit(%rip), %xmm0 529 pxor %xmm0, H_POW1 536 pshufd $0x4e, H_POW1, %xmm0 538 pxor %xmm0, H_POW1_X64 542 pxor H_POW1, %xmm0 543 movq %xmm0, OFFSETOF_H_POWERS_XORED+7*8(KEY) 550 _ghash_mul H_POW1, H_POW1_X64, H_CUR, GFPOLY, %xmm0, %xmm1 [all …]
|
| H A D | aes-xts-avx-x86_64.S | 311 _next_tweak TWEAK0, %xmm0, TWEAK1 312 _next_tweak TWEAK1, %xmm0, TWEAK2 313 _next_tweak TWEAK2, %xmm0, TWEAK3 319 _next_tweak TWEAK0_XMM, %xmm0, %xmm1 758 vmovdqu (SRC), %xmm0 759 _aes_crypt \enc, _XMM, TWEAK0_XMM, %xmm0, tmp=%xmm1 760 vmovdqu %xmm0, (DST) 761 _next_tweak TWEAK0_XMM, %xmm0, TWEAK0_XMM 780 vmovdqu (DST), %xmm0 785 _next_tweak TWEAK0_XMM, %xmm0, TWEAK1_XMM [all …]
|
| H A D | aria-aesni-avx-asm_64.S | 889 inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 893 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 895 aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 899 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 901 aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 905 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 907 aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 911 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 913 aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, 917 %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, [all …]
|
| H A D | aes-gcm-vaes-avx2.S | 228 .set TMP0_XMM, %xmm0 507 .set TMP0_XMM, %xmm0 710 .set BSWAP_MASK_XMM, %xmm0 1058 vmovq TOTAL_DATALEN, %xmm0 1059 vpinsrq $1, TOTAL_AADLEN, %xmm0, %xmm0 1060 vpsllq $3, %xmm0, %xmm0 // Bytes to bits 1061 vpxor (GHASH_ACC_PTR), %xmm0, GHASH_ACC 1076 vpshufb BSWAP_MASK, LE_CTR, %xmm0 1077 vpxor (KEY), %xmm0, %xmm0 1085 vaesenc -13*16(%rax), %xmm0, %xmm0 [all …]
|
| H A D | sm3-avx-asm_64.S | 117 #define W0 %xmm0 507 vmovdqa %xmm0, IW_W1_ADDR(0, 0); 508 vmovdqa %xmm0, IW_W1W2_ADDR(0, 0); 509 vmovdqa %xmm0, IW_W1_ADDR(4, 0); 510 vmovdqa %xmm0, IW_W1W2_ADDR(4, 0); 511 vmovdqa %xmm0, IW_W1_ADDR(8, 0); 512 vmovdqa %xmm0, IW_W1W2_ADDR(8, 0);
|
| H A D | nh-sse2-x86_64.S | 13 #define PASS0_SUMS %xmm0
|
| H A D | sm4-aesni-avx2-asm_64.S | 45 #define RX0x %xmm0 49 #define RNOTx %xmm0
|
| H A D | sm4-aesni-avx-asm_64.S | 22 #define RX0 %xmm0 41 #define RNOT %xmm0
|
| H A D | cast6-avx-x86_64-asm_64.S | 37 #define RA1 %xmm0
|
| H A D | twofish-avx-x86_64-asm_64.S | 37 #define RA1 %xmm0
|
| H A D | aes-ctr-avx-x86_64.S | 303 .set AESDATA0_XMM, %xmm0
|
| /linux/Documentation/trace/ |
| H A D | tracepoint-analysis.rst | 321 12.40 : 34eee: 66 0f 7f 80 40 ff ff movdqa %xmm0,-0xc0(%eax) 323 12.40 : 34ef6: 66 0f 7f 80 50 ff ff movdqa %xmm0,-0xb0(%eax) 325 12.39 : 34efe: 66 0f 7f 80 60 ff ff movdqa %xmm0,-0xa0(%eax) 327 12.67 : 34f06: 66 0f 7f 80 70 ff ff movdqa %xmm0,-0x90(%eax) 329 12.58 : 34f0e: 66 0f 7f 40 80 movdqa %xmm0,-0x80(%eax) 330 12.31 : 34f13: 66 0f 7f 40 90 movdqa %xmm0,-0x70(%eax) 331 12.40 : 34f18: 66 0f 7f 40 a0 movdqa %xmm0,-0x60(%eax) 332 12.31 : 34f1d: 66 0f 7f 40 b0 movdqa %xmm0,-0x50(%eax)
|
| /linux/arch/x86/entry/vdso/ |
| H A D | vgetrandom-chacha.S | 32 .set temp, %xmm0
|