| /linux/arch/x86/crypto/ |
| H A D | aria-aesni-avx-asm_64.S | 172 x4, x5, x6, x7, \ argument 183 vmovdqu (7 * 16)(rio), x7; \ 195 x4, x5, x6, x7, \ argument 200 x4, x5, x6, x7, \ 212 vmovdqu x7, 7 * 16(mem_ab); \ 223 x4, x5, x6, x7, \ argument 234 vmovdqu x7, 7 * 16(mem); \ 245 x4, x5, x6, x7, \ argument 254 vmovdqu x7, ((idx + 7) * 16)(mem_tmp); 257 x4, x5, x6, x7, \ argument [all …]
|
| H A D | aria-aesni-avx2-asm_64.S | 188 x4, x5, x6, x7, \ argument 199 vmovdqu (7 * 32)(rio), x7; \ 211 x4, x5, x6, x7, \ argument 216 x4, x5, x6, x7, \ 228 vmovdqu x7, 7 * 32(mem_ab); \ 239 x4, x5, x6, x7, \ argument 250 vmovdqu x7, 7 * 32(mem); \ 261 x4, x5, x6, x7, \ argument 270 vmovdqu x7, ((idx + 7) * 32)(mem_tmp); 273 x4, x5, x6, x7, \ argument [all …]
|
| H A D | glue_helper-asm-avx.S | 8 #define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 16 vmovdqu (7*16)(src), x7; 18 #define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 26 vmovdqu x7, (7*16)(dst); 28 #define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 35 vpxor (6*16)(src), x7, x7; \ 36 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
|
| H A D | glue_helper-asm-avx2.S | 8 #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 16 vmovdqu (7*32)(src), x7; 18 #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ argument 26 vmovdqu x7, (7*32)(dst); 28 #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \ argument 38 vpxor (6*32+16)(src), x7, x7; \ 39 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
|
| H A D | aria-gfni-avx512-asm_64.S | 172 x4, x5, x6, x7, \ argument 183 vmovdqu64 (7 * 64)(rio), x7; \ 195 x4, x5, x6, x7, \ argument 200 x4, x5, x6, x7, \ 212 vmovdqu64 x7, 7 * 64(mem_ab); \ 223 x4, x5, x6, x7, \ argument 234 vmovdqu64 x7, 7 * 64(mem); \ 245 x4, x5, x6, x7, \ argument 254 vmovdqu64 x7, ((idx + 7) * 64)(mem_tmp); 257 x4, x5, x6, x7, \ argument [all …]
|
| /linux/drivers/gpu/drm/vmwgfx/ |
| H A D | vmwgfx_msg_arm64.h | 57 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | in vmware_hypercall1() 65 : "r" (x1), "r" (x2), "r" (x3), "r" (x7) in vmware_hypercall1() 82 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | in vmware_hypercall5() 90 : "r" (x1), "r" (x3), "r" (x4), "r" (x5), "r" (x7) in vmware_hypercall5() 108 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | in vmware_hypercall6() 116 : "r" (x1), "r" (x7) in vmware_hypercall6() 138 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | in vmware_hypercall7() 146 : "r" (x4), "r" (x5), "r" (x7) in vmware_hypercall7() 168 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) | in vmware_hypercall_hb() 177 "r" (x6), "r" (x7) in vmware_hypercall_hb()
|
| /linux/arch/arm64/crypto/ |
| H A D | sm4-ce-ccm-core.S | 33 mov vctr.d[0], x7; \ 36 adc x7, x7, xzr; 119 ldp x7, x8, [x3] 120 rev x7, x7 211 rev x7, x7 213 stp x7, x8, [x3] 231 ldp x7, x8, [x3] 232 rev x7, x7 323 rev x7, x7 325 stp x7, x8, [x3]
|
| H A D | aes-neonbs-core.S | 114 .macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \ 127 eor \t1, \x5, \x7 128 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 135 eor \x7, \x7, \t1 138 .macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \ 141 eor \t0, \x5, \x7 143 eor \s1, \x7, \x6 165 and \s0, \x7, \x3 186 mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ 228 .macro add_round_key, x0, x1, x2, x3, x4, x5, x6, x7 argument [all …]
|
| H A D | aes-modes.S | 131 enc_prepare w8, x6, x7 132 encrypt_block v4, w8, x6, x7, w9 175 enc_prepare w8, x6, x7 176 encrypt_block cbciv, w8, x6, x7, w9 379 sub x7, CTR, #MAX_STRIDE - 2 384 eor x7, x7, IV_PART 389 mov v1.d[0], x7 436 2: rev x7, IV_PART 437 ins vctr.d[1], x7 438 sub x7, IV_PART, #MAX_STRIDE - 1 [all …]
|
| H A D | sm4-ce-core.S | 333 add x7, x6, #32 335 sub x7, x7, x5 337 ld1 {v4.16b}, [x7] 377 add x7, x6, #32 379 sub x7, x7, x5 381 ld1 {v4.16b}, [x7] 416 ldp x7, x8, [x3] 417 rev x7, x7 426 mov vctr.d[0], x7; \ 429 adc x7, x7, xzr; [all …]
|
| /linux/lib/crc/arm64/ |
| H A D | crc32-core.S | 61 and x7, x2, #0x1f 63 cbz x7, 32f // multiple of 32 bytes 65 and x8, x7, #0xf 68 add x1, x1, x7 72 tst x7, #8 76 tst x7, #4 81 tst x7, #2 86 tst x7, #1 89 tst x7, #16 162 add x7, in, x3, lsl #4 // x7 := in + 16 * x3 [all …]
|
| /linux/arch/arm/crypto/ |
| H A D | aes-neonbs-core.S | 168 .macro mul_gf16_2, x0, x1, x2, x3, x4, x5, x6, x7, \ 181 veor \t1, \x5, \x7 182 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 189 veor \x7, \x7, \t1 192 .macro inv_gf256, x0, x1, x2, x3, x4, x5, x6, x7, \ 195 veor \t0, \x5, \x7 197 veor \s1, \x7, \x6 219 vand \s0, \x7, \x3 240 mul_gf16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \ 260 .macro shift_rows, x0, x1, x2, x3, x4, x5, x6, x7, \ [all …]
|
| /linux/lib/crypto/ |
| H A D | curve25519-fiat32.c | 236 { const u32 x7 = in1[1]; in fe_add_impl() local 249 out[1] = (x7 + x25); in fe_add_impl() 279 { const u32 x7 = in1[1]; in fe_sub_impl() local 292 out[1] = ((0x3fffffe + x7) - x25); in fe_sub_impl() 322 { const u32 x7 = in1[1]; in fe_mul_impl() local 335 { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); in fe_mul_impl() 336 { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); in fe_mul_impl() 337 { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); in fe_mul_impl() 338 …{ u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13))… in fe_mul_impl() 339 …{ u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u6… in fe_mul_impl() [all …]
|
| /linux/arch/arm64/lib/ |
| H A D | mte.S | 140 multitag_transfer_size x7, x5 146 add x0, x0, x7 164 multitag_transfer_size x7, x5 169 add x0, x0, x7
|
| H A D | kasan_sw_tags.S | 45 stp x6, x7, [sp, #8 * 6] 60 ldp x6, x7, [sp, #8 * 6]
|
| /linux/arch/arm64/kernel/ |
| H A D | sleep.S | 79 mrs x7, mpidr_el1 88 compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 127 compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 131 ldr x0, [x0, x7, lsl #3]
|
| H A D | smccc-call.S | 58 ldp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS] 74 stp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS]
|
| /linux/arch/riscv/boot/dts/tenstorrent/ |
| H A D | blackhole.dtsi | 89 interrupts-extended = <&cpu0_intc 0x3>, <&cpu0_intc 0x7>, 90 <&cpu1_intc 0x3>, <&cpu1_intc 0x7>, 91 <&cpu2_intc 0x3>, <&cpu2_intc 0x7>, 92 <&cpu3_intc 0x3>, <&cpu3_intc 0x7>;
|
| /linux/arch/arm64/kvm/hyp/nvhe/ |
| H A D | host.S | 29 stp x6, x7, [x0, #CPU_XREG_OFFSET(6)] 81 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)] 140 mrs x7, hpfar_el2 273 ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)] 285 stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-bus-event_source-devices-hv_24x7 | 30 Provides access to the binary "24x7 catalog" provided by the 34 https://raw.githubusercontent.com/jmesmon/catalog-24x7/master/hv-24x7-catalog.h 47 Exposes the "version" field of the 24x7 catalog. This is also 76 HCALLs to retrieve hv-24x7 pmu event counter data.
|
| /linux/arch/powerpc/boot/dts/fsl/ |
| H A D | mpc8568mds.dts | 89 reg = <0x7>; 131 0x4 0x7 0x1 0x0 0x2 0x0 /* TxD3 */ 159 0x5 0x7 0x1 0x0 0x2 0x0 /* TxD3 */ 230 reg = <0x7>; 255 interrupt-map-mask = <0xf800 0x0 0x0 0x7>; 260 0x9000 0x0 0x0 0x3 &mpic 0x7 0x1 0 0 265 0x9800 0x0 0x0 0x2 &mpic 0x7 0x1 0 0
|
| H A D | mpc8641si-post.dtsi | 109 interrupt-map-mask = <0xf800 0x0 0x0 0x7>; 136 interrupt-map-mask = <0xf800 0x0 0x0 0x7>; 141 0x0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
|
| /linux/arch/arm64/boot/dts/qcom/ |
| H A D | pm8450.dtsi | 37 reg = <0x7 SPMI_USID>; 44 interrupts = <0x7 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
|
| H A D | pm8550b.dtsi | 37 reg = <0x7 SPMI_USID>; 44 interrupts = <0x7 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
|
| /linux/lib/crypto/x86/ |
| H A D | chacha-ssse3-x86_64.S | 302 # x3 += x7, x15 = rotl32(x15 ^ x3, 16) 330 # x11 += x15, x7 = rotl32(x7 ^ x11, 12) 356 # x3 += x7, x15 = rotl32(x15 ^ x3, 8) 384 # x11 += x15, x7 = rotl32(x7 ^ x11, 7) 404 # x2 += x7, x13 = rotl32(x13 ^ x2, 16) 431 # x8 += x13, x7 = rotl32(x7 ^ x8, 12) 458 # x2 += x7, x13 = rotl32(x13 ^ x2, 8) 485 # x8 += x13, x7 = rotl32(x7 ^ x8, 7) 530 # x7[0-3] += s1[3]
|