/linux/arch/x86/crypto/ |
H A D | aria-gfni-avx512-asm_64.S | 53 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ macro 73 transpose_4x4(a0, a1, a2, a3, d2, d3); \ 74 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 80 transpose_4x4(c0, c1, c2, c3, a0, a1); \ 81 transpose_4x4(d0, d1, d2, d3, a0, a1); \ 105 transpose_4x4(a0, b0, c0, d0, d2, d3); \ 106 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 112 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 113 transpose_4x4(a3, b3, c3, d3, b0, b1); \ 125 transpose_4x4(a0, a1, a2, a3, d2, d3); \ [all …]
|
H A D | sm4-aesni-avx-asm_64.S | 46 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ macro 186 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 229 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 273 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 274 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); 345 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 346 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
|
H A D | sm4-aesni-avx2-asm_64.S | 62 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ macro 179 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 180 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); 257 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); 258 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
|
H A D | aria-aesni-avx-asm_64.S | 53 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ macro 73 transpose_4x4(a0, a1, a2, a3, d2, d3); \ 74 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 80 transpose_4x4(c0, c1, c2, c3, a0, a1); \ 81 transpose_4x4(d0, d1, d2, d3, a0, a1); \ 105 transpose_4x4(a0, b0, c0, d0, d2, d3); \ 106 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 112 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 113 transpose_4x4(a3, b3, c3, d3, b0, b1); \ 125 transpose_4x4(a0, a1, a2, a3, d2, d3); \ [all …]
|
H A D | aria-aesni-avx2-asm_64.S | 69 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ macro 89 transpose_4x4(a0, a1, a2, a3, d2, d3); \ 90 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 96 transpose_4x4(c0, c1, c2, c3, a0, a1); \ 97 transpose_4x4(d0, d1, d2, d3, a0, a1); \ 121 transpose_4x4(a0, b0, c0, d0, d2, d3); \ 122 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 128 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 129 transpose_4x4(a3, b3, c3, d3, b0, b1); \ 141 transpose_4x4(a0, a1, a2, a3, d2, d3); \ [all …]
|
H A D | camellia-aesni-avx-asm_64.S | 419 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ macro 436 transpose_4x4(a0, a1, a2, a3, d2, d3); \ 437 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 443 transpose_4x4(c0, c1, c2, c3, a0, a1); \ 444 transpose_4x4(d0, d1, d2, d3, a0, a1); \ 468 transpose_4x4(a0, b0, c0, d0, d2, d3); \ 469 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 475 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 476 transpose_4x4(a3, b3, c3, d3, b0, b1); \
|
H A D | camellia-aesni-avx2-asm_64.S | 451 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ macro 468 transpose_4x4(a0, a1, a2, a3, d2, d3); \ 469 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 475 transpose_4x4(c0, c1, c2, c3, a0, a1); \ 476 transpose_4x4(d0, d1, d2, d3, a0, a1); \ 500 transpose_4x4(a0, b0, c0, d0, d2, d3); \ 501 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 507 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 508 transpose_4x4(a3, b3, c3, d3, b0, b1); \
|
H A D | cast6-avx-x86_64-asm_64.S | 190 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ macro 207 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) 210 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
|
H A D | twofish-avx-x86_64-asm_64.S | 204 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ macro 221 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) 224 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
|
H A D | serpent-sse2-i586-asm_32.S | 453 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ macro 474 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) 477 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ 485 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
|
H A D | serpent-sse2-x86_64-asm_64.S | 575 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ macro 596 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) 599 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ 607 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
|
H A D | serpent-avx-x86_64-asm_64.S | 536 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ macro 548 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) 551 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
|
H A D | serpent-avx2-asm_64.S | 536 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ macro 548 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) 551 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
|
/linux/arch/arm64/crypto/ |
H A D | sm4-neon-core.S | 40 #define transpose_4x4(s0, s1, s2, s3) \ macro 308 transpose_4x4(v0, v1, v2, v3) 386 transpose_4x4(v4, v5, v6, v7) 414 transpose_4x4(v4, v5, v6, v7) 513 transpose_4x4(v0, v1, v2, v3) 542 transpose_4x4(v0, v1, v2, v3)
|