/linux/lib/raid6/ |
H A D | recov_avx512.c | 64 asm volatile("vpbroadcastb %0, %%zmm7" : : "m" (x0f)); in raid6_2data_recov_avx512() 77 : "m" (q[0]), "m" (q[64]), "m" (p[0]), in raid6_2data_recov_avx512() 78 "m" (p[64]), "m" (dq[0]), "m" (dq[64]), in raid6_2data_recov_avx512() 79 "m" (dp[0]), "m" (dp[64])); in raid6_2data_recov_avx512() 83 * 9 = dq[64] ^ q[64] in raid6_2data_recov_avx512() 85 * 8 = dp[64] ^ p[64] in raid6_2data_recov_avx512() 91 : "m" (qmul[0]), "m" (qmul[16])); in raid6_2data_recov_avx512() 110 * 15 = qx[64] in raid6_2data_recov_avx512() 128 : "m" (pbmul[0]), "m" (pbmul[16])); in raid6_2data_recov_avx512() 132 * 13 = pbmul[px[64]] in raid6_2data_recov_avx512() [all …]
|
H A D | avx512.c | 59 : "m" (raid6_avx512_constants.x1d[0])); in raid6_avx5121_gen_syndrome() 61 for (d = 0; d < bytes; d += 64) { in raid6_avx5121_gen_syndrome() 68 : "m" (dptr[z0][d]), "m" (dptr[z0-1][d])); in raid6_avx5121_gen_syndrome() 80 : "m" (dptr[z][d])); in raid6_avx5121_gen_syndrome() 94 : "m" (p[d]), "m" (q[d])); in raid6_avx5121_gen_syndrome() 115 : : "m" (raid6_avx512_constants.x1d[0])); in raid6_avx5121_xor_syndrome() 117 for (d = 0 ; d < bytes ; d += 64) { in raid6_avx5121_xor_syndrome() 122 : "m" (dptr[z0][d]), "m" (p[d])); in raid6_avx5121_xor_syndrome() 135 : "m" (dptr[z][d])); in raid6_avx5121_xor_syndrome() 153 : "m" (q[d]), "m" (p[d])); in raid6_avx5121_xor_syndrome() [all …]
|
H A D | recov_loongarch_simd.c | 69 asm volatile("vld $vr20, %0" : : "m" (qmul[0])); in raid6_2data_recov_lsx() 70 asm volatile("vld $vr21, %0" : : "m" (qmul[16])); in raid6_2data_recov_lsx() 71 asm volatile("vld $vr22, %0" : : "m" (pbmul[0])); in raid6_2data_recov_lsx() 72 asm volatile("vld $vr23, %0" : : "m" (pbmul[16])); in raid6_2data_recov_lsx() 76 asm volatile("vld $vr4, %0" : : "m" (q[0])); in raid6_2data_recov_lsx() 77 asm volatile("vld $vr5, %0" : : "m" (q[16])); in raid6_2data_recov_lsx() 78 asm volatile("vld $vr6, %0" : : "m" (q[32])); in raid6_2data_recov_lsx() 79 asm volatile("vld $vr7, %0" : : "m" (q[48])); in raid6_2data_recov_lsx() 81 asm volatile("vld $vr8, %0" : : "m" (dq[0])); in raid6_2data_recov_lsx() 82 asm volatile("vld $vr9, %0" : : "m" (dq[16])); in raid6_2data_recov_lsx() [all …]
|
H A D | recov_avx2.c | 53 asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f)); in raid6_2data_recov_avx2() 57 asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0])); in raid6_2data_recov_avx2() 58 asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32])); in raid6_2data_recov_avx2() 59 asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0])); in raid6_2data_recov_avx2() 60 asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32])); in raid6_2data_recov_avx2() 61 asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0])); in raid6_2data_recov_avx2() 62 asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32])); in raid6_2data_recov_avx2() 63 asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0])); in raid6_2data_recov_avx2() 64 asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32])); in raid6_2data_recov_avx2() 73 asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0])); in raid6_2data_recov_avx2() [all …]
|
H A D | avx2.c | 46 asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); in raid6_avx21_gen_syndrome() 50 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); in raid6_avx21_gen_syndrome() 51 asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */ in raid6_avx21_gen_syndrome() 52 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); in raid6_avx21_gen_syndrome() 54 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d])); in raid6_avx21_gen_syndrome() 56 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome() 63 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome() 72 asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); in raid6_avx21_gen_syndrome() 74 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); in raid6_avx21_gen_syndrome() 95 asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); in raid6_avx21_xor_syndrome() [all …]
|
/linux/arch/m68k/fpsp040/ |
H A D | stwotox.S | 15 | 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the 29 | 3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore 31 | N = 64(M + M') + j, j = 0,1,2,...,63. 34 | 2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). 42 | 3. Set y := X*log_2(10)*64 (base 2 log of 10). Set 44 | N = 64(M + M') + j, j = 0,1,2,...,63. 48 | where L1, L2 are the leading and trailing parts of log_10(2)/64 50 | 10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). 54 | 1. Fetch 2**(j/64) from table as Fact1 and Fact2. 57 | Fact1 := 2**(M) * Fact1 [all …]
|
H A D | setox.S | 20 | The returned result is within 0.85 ulps in 64 significant bit, i.e. 79 | Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). 81 | 2.2 N := round-to-nearest-integer( X * 64/log2 ). 82 | 2.3 Calculate J = N mod 64; so J = 0,1,2,..., or 63. 83 | 2.4 Calculate M = (N - J)/64; so N = 64M + J. 84 | 2.5 Calculate the address of the stored value of 2^(J/64). 85 | 2.6 Create the value Scale = 2^M. 93 | constant := single-precision( 64/log 2 ). 99 | Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24). 103 | Step 3. Calculate X - N*log2/64. [all …]
|
/linux/arch/riscv/boot/dts/sophgo/ |
H A D | sg2042-cpus.dtsi | 261 riscv,isa-extensions = "i", "m", "a", "f", "d", "c", 265 i-cache-block-size = <64>; 268 d-cache-block-size = <64>; 286 riscv,isa-extensions = "i", "m", "a", "f", "d", "c", 290 i-cache-block-size = <64>; 293 d-cache-block-size = <64>; 311 riscv,isa-extensions = "i", "m", "a", "f", "d", "c", 315 i-cache-block-size = <64>; 318 d-cache-block-size = <64>; 336 riscv,isa-extensions = "i", "m", "a", "f", "d", "c", [all …]
|
/linux/Documentation/scsi/ |
H A D | aic7xxx.rst | 35 aic7891 20 PCI/64 40MHz 16Bit 16 3 4 5 6 7 8 36 aic7892 20 PCI/64-66 80MHz 16Bit 16 3 4 5 6 7 8 40 aic7897 20 PCI/64 40MHz 16Bit 16 2 3 4 5 6 7 8 41 aic7899 20 PCI/64-66 80MHz 16Bit 16 2 3 4 5 6 7 8 49 4. 64 Byte SCB Support - Allows disconnected, untagged request table 63 AHA-274X[A] aic7770 EISA SE-50M SE-HD50F 65 SE-50M 66 AHA-274X[A]T aic7770 EISA 2 X SE-50M SE-HD50F 67 AHA-2842 aic7770 VL SE-50M SE-HD50F 68 AHA-2940AU aic7860 PCI/32 SE-50M SE-HD50F [all …]
|
/linux/include/asm-generic/ |
H A D | div64.h | 29 #if BITS_PER_LONG == 64 82 /* compute m = ((p << 64) + b - 1) / b */ \ 89 /* test our ___m with res = m * x / (p << 64) */ \ 109 * additional bit to represent m which would overflow \ 110 * a 64-bit variable. \ 112 * Instead we do m = p / b and n / b = (n * m + m) / p. \ 115 /* Compute m = (p << 64) / b */ \ 120 * Reduce m / p, and try to clear bit 31 of m when \ 152 * Select the best way to do (m_bias + m * n) / (1 << 64). \ 164 * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) [all …]
|
/linux/lib/ |
H A D | siphash.c | 15 #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 50 u64 m; in __siphash_aligned() local 53 m = le64_to_cpup(data); in __siphash_aligned() 54 v3 ^= m; in __siphash_aligned() 57 v0 ^= m; in __siphash_aligned() 59 #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 in __siphash_aligned() 83 u64 m; in __siphash_unaligned() local 86 m = get_unaligned_le64(data); in __siphash_unaligned() 87 v3 ^= m; in __siphash_unaligned() 90 v0 ^= m; in __siphash_unaligned() [all …]
|
/linux/lib/math/ |
H A D | int_sqrt.c | 22 unsigned long b, m, y = 0; in int_sqrt() local 27 m = 1UL << (__fls(x) & ~1UL); in int_sqrt() 28 while (m != 0) { in int_sqrt() 29 b = y + m; in int_sqrt() 34 y += m; in int_sqrt() 36 m >>= 2; in int_sqrt() 43 #if BITS_PER_LONG < 64 45 * int_sqrt64 - strongly typed int_sqrt function when minimum 64 bit input 47 * @x: 64bit integer of which to calculate the sqrt 51 u64 b, m, y = 0; in int_sqrt64() local [all …]
|
/linux/arch/mips/loongson2ef/common/ |
H A D | pci.c | 37 * CPU address space [256M,448M] is window for accessing pci space in setup_pcimap() 38 * we set pcimap_lo[0,1,2] to map it to pci space[0M,64M], [320M,448M] in setup_pcimap() 41 * [<2G] [384M,448M] [320M,384M] [0M,64M] in setup_pcimap() 49 * PCI-DMA to local mapping: [2G,2G+256M] -> [0M,256M] in setup_pcimap() 51 LOONGSON_PCIBASE0 = 0x80000000ul; /* base: 2G -> mmap: 0M */ in setup_pcimap() 52 /* size: 256M, burst transmission, pre-fetch enable, 64bit */ in setup_pcimap()
|
/linux/arch/x86/boot/ |
H A D | cpucheck.c | 11 * present. This code should be compilable as 16-, 32- or 64-bit 63 cpu_vendor[2] == A32('c', 'A', 'M', 'D'); in is_amd() 77 cpu_vendor[2] == A32('M', 'x', '8', '6'); in is_transmeta() 107 * level. x86-64 is considered level 64 for this purpose. 125 cpu.level = 64; in check_cpu() 134 struct msr m; in check_cpu() local 136 boot_rdmsr(MSR_K7_HWCR, &m); in check_cpu() 137 m.l &= ~(1 << 15); in check_cpu() 138 boot_wrmsr(MSR_K7_HWCR, &m); in check_cpu() 148 struct msr m; in check_cpu() local [all …]
|
/linux/arch/parisc/kernel/ |
H A D | pacache.S | 86 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ 100 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */ 130 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ 144 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */ 207 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */ 216 fice,m %arg1(%sr1, %arg0) 217 fice,m %arg1(%sr1, %arg0) 218 fice,m %arg1(%sr1, %arg0) 219 fice,m %arg1(%sr1, %arg0) 220 fice,m %arg1(%sr1, %arg0) [all …]
|
/linux/drivers/mfd/ |
H A D | mcp-sa11x0.c | 35 #define MCCR0(m) ((m)->base0 + 0x00) argument 36 #define MCDR0(m) ((m)->base0 + 0x08) argument 37 #define MCDR1(m) ((m)->base0 + 0x0c) argument 38 #define MCDR2(m) ((m)->base0 + 0x10) argument 39 #define MCSR(m) ((m)->base0 + 0x18) argument 40 #define MCCR1(m) ((m)->base1 + 0x00) argument 47 struct mcp_sa11x0 *m = priv(mcp); in mcp_sa11x0_set_telecom_divisor() local 51 m->mccr0 &= ~0x00007f00; in mcp_sa11x0_set_telecom_divisor() 52 m->mccr0 |= divisor << 8; in mcp_sa11x0_set_telecom_divisor() 53 writel_relaxed(m->mccr0, MCCR0(m)); in mcp_sa11x0_set_telecom_divisor() [all …]
|
/linux/arch/mips/math-emu/ |
H A D | ieee754sp.h | 36 /* 64 bit right shift with rounding */ 38 (((rs) >= 64) ? ((v) != 0) : ((v) >> (rs)) | ((v) << (64-(rs)) != 0)) 44 #define XSPSRS1(m) \ argument 45 ((m >> 1) | (m & 1)) 54 #define SPDNORMx(m,e) \ argument 55 while ((m >> SP_FBITS) == 0) { m <<= 1; e--; } 60 static inline union ieee754sp buildsp(int s, int bx, unsigned int m) in buildsp() argument 67 assert(((m) >> SP_FBITS) == 0); in buildsp() 71 r.mant = m; in buildsp()
|
/linux/drivers/gpu/drm/msm/disp/dpu1/ |
H A D | msm_media_info.h | 184 * M M M M M M M M M M M M . . ^ ^ 185 * M M M M M M M M M M M M . . | | 186 * M M M M M M M M M M M M . . Height | 187 * M M M M M M M M M M M M . . | Meta_Y_Scanlines 188 * M M M M M M M M M M M M . . | | 189 * M M M M M M M M M M M M . . | | 190 * M M M M M M M M M M M M . . | | 191 * M M M M M M M M M M M M . . V | 211 * M M M M M M M M M M M M . . ^ 212 * M M M M M M M M M M M M . . | [all …]
|
/linux/Documentation/arch/powerpc/ |
H A D | kaslr-booke32.rst | 24 We will use the first 512M of the low memory to randomize the kernel 25 image. The memory will be split in 64M zones. We will use the lower 8 26 bit of the entropy to decide the index of the 64M zone. Then we chose a 27 16K aligned offset inside the 64M zone to put the kernel in:: 31 |--> 64M <--|
|
/linux/arch/x86/include/asm/ |
H A D | syscall_wrapper.h | 17 * __x64_sys_*() - 64-bit native syscall 20 * __x64_compat_sys_*() - 64-bit X32 compat syscall 23 * 64-bit: RDI, RSI, RDX, R10, R8, R9 48 * cltq <-- extend return value to 64-bit 55 /* Mapping of registers to parameters for syscalls on x86-64 and x32 */ 63 #define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \ argument 64 SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp)) 65 #define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \ argument 66 SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di)) 67 #define SYSCALL_PT_ARG4(m, t1, t2, t3, t4) \ argument [all …]
|
/linux/arch/mips/include/asm/ |
H A D | cmpxchg.h | 34 #define __xchg_asm(ld, st, m, val) \ argument 36 __typeof(*(m)) __ret; \ 52 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ 53 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \ 59 __ret = *m; \ 60 *m = val; \ 112 #define __cmpxchg_asm(ld, st, m, old, new) \ argument 114 __typeof(*(m)) __ret; \ 132 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ 133 : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ [all …]
|
/linux/drivers/video/fbdev/core/ |
H A D | modedb.c | 55 { NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3, 63 { NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3, 0, 79 { NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, 92 { NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, 0, 96 { NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, 0, 108 { NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, 0, 150 { NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, 171 { NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, 0, 179 { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, 184 { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, [all …]
|
/linux/arch/powerpc/include/asm/ |
H A D | page.h | 19 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages 20 * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software 21 * page size. When using 64K pages however, whether we are really supporting 22 * 64K pages in HW or not is irrelevant to those definitions. 36 #define HPAGE_SHIFT 22 /* 4M pages */ 136 * virtual_base = ALIGN_DOWN(KERNELBASE,256M) + 137 * MODULO(_stext.run,256M) 140 * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M) 158 * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET). 159 * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M [all …]
|
/linux/Documentation/devicetree/bindings/memory-controllers/fsl/ |
H A D | fsl,imx-weim.yaml | 69 05 128M 0M 0M 0M 70 033 64M 64M 0M 0M 71 0113 64M 32M 32M 0M 72 01111 32M 32M 32M 32M
|
/linux/drivers/media/v4l2-core/ |
H A D | v4l2-compat-ioctl32.c | 3 * ioctl32.c: Conversion between 32bit and 64bit native ioctls. 13 * These routines maintain argument size conversion between 32bit and 64bit 33 * came from the userspace call and a 64-bits struct, also allocated as 274 } m; member 303 } m; member 327 } m; member 339 typeof(p64->m) m = {}; in get_v4l2_plane32() local 347 m.mem_offset = plane32.m.mem_offset; in get_v4l2_plane32() 350 m.userptr = (unsigned long)compat_ptr(plane32.m.userptr); in get_v4l2_plane32() 353 m.fd = plane32.m.fd; in get_v4l2_plane32() [all …]
|