| /linux/drivers/staging/rtl8723bs/hal/ |
| H A D | HalHWImg8723B_RF.c | 361 "FCC", "20M", "CCK", "1T", "01", "32", 362 "ETSI", "20M", "CCK", "1T", "01", "32", 363 "MKK", "20M", "CCK", "1T", "01", "32", 364 "FCC", "20M", "CCK", "1T", "02", "32", 365 "ETSI", "20M", "CCK", "1T", "02", "32", 366 "MKK", "20M", "CCK", "1T", "02", "32", 367 "FCC", "20M", "CCK", "1T", "03", "32", 368 "ETSI", "20M", "CCK", "1T", "03", "32", 369 "MKK", "20M", "CCK", "1T", "03", "32", 370 "FCC", "20M", "CCK", "1T", "04", "32", [all …]
|
| /linux/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/ |
| H A D | table.c | 2898 "FCC", "2.4G", "20M", "CCK", "1T", "01", "36", 2899 "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", 2900 "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", 2901 "FCC", "2.4G", "20M", "CCK", "1T", "02", "36", 2902 "ETSI", "2.4G", "20M", "CCK", "1T", "02", "32", 2903 "MKK", "2.4G", "20M", "CCK", "1T", "02", "32", 2904 "FCC", "2.4G", "20M", "CCK", "1T", "03", "36", 2905 "ETSI", "2.4G", "20M", "CCK", "1T", "03", "32", 2906 "MKK", "2.4G", "20M", "CCK", "1T", "03", "32", 2907 "FCC", "2.4G", "20M", "CCK", "1T", "04", "36", [all …]
|
| /linux/lib/raid6/ |
| H A D | recov_avx2.c | 53 asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f)); in raid6_2data_recov_avx2() 57 asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0])); in raid6_2data_recov_avx2() 58 asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32])); in raid6_2data_recov_avx2() 59 asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0])); in raid6_2data_recov_avx2() 60 asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32])); in raid6_2data_recov_avx2() 61 asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0])); in raid6_2data_recov_avx2() 62 asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32])); in raid6_2data_recov_avx2() 63 asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0])); in raid6_2data_recov_avx2() 64 asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32])); in raid6_2data_recov_avx2() 68 * 9 = dq[32] ^ q[32] in raid6_2data_recov_avx2() [all …]
|
| H A D | avx2.c | 21 } raid6_avx2_constants __aligned(32) = { 46 asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); in raid6_avx21_gen_syndrome() 49 for (d = 0; d < bytes; d += 32) { in raid6_avx21_gen_syndrome() 50 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); in raid6_avx21_gen_syndrome() 51 asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */ in raid6_avx21_gen_syndrome() 52 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); in raid6_avx21_gen_syndrome() 54 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d])); in raid6_avx21_gen_syndrome() 56 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome() 63 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome() 72 asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); in raid6_avx21_gen_syndrome() [all …]
|
| H A D | recov_loongarch_simd.c | 69 asm volatile("vld $vr20, %0" : : "m" (qmul[0])); in raid6_2data_recov_lsx() 70 asm volatile("vld $vr21, %0" : : "m" (qmul[16])); in raid6_2data_recov_lsx() 71 asm volatile("vld $vr22, %0" : : "m" (pbmul[0])); in raid6_2data_recov_lsx() 72 asm volatile("vld $vr23, %0" : : "m" (pbmul[16])); in raid6_2data_recov_lsx() 76 asm volatile("vld $vr4, %0" : : "m" (q[0])); in raid6_2data_recov_lsx() 77 asm volatile("vld $vr5, %0" : : "m" (q[16])); in raid6_2data_recov_lsx() 78 asm volatile("vld $vr6, %0" : : "m" (q[32])); in raid6_2data_recov_lsx() 79 asm volatile("vld $vr7, %0" : : "m" (q[48])); in raid6_2data_recov_lsx() 81 asm volatile("vld $vr8, %0" : : "m" (dq[0])); in raid6_2data_recov_lsx() 82 asm volatile("vld $vr9, %0" : : "m" (dq[16])); in raid6_2data_recov_lsx() [all …]
|
| H A D | sse2.c | 48 asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); in raid6_sse21_gen_syndrome() 52 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); in raid6_sse21_gen_syndrome() 53 asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ in raid6_sse21_gen_syndrome() 54 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); in raid6_sse21_gen_syndrome() 56 asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d])); in raid6_sse21_gen_syndrome() 58 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse21_gen_syndrome() 66 asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d])); in raid6_sse21_gen_syndrome() 76 asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); in raid6_sse21_gen_syndrome() 78 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); in raid6_sse21_gen_syndrome() 100 asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); in raid6_sse21_xor_syndrome() [all …]
|
| H A D | recov_ssse3.c | 54 asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0])); in raid6_2data_recov_ssse3() 57 asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0])); in raid6_2data_recov_ssse3() 58 asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0])); in raid6_2data_recov_ssse3() 59 asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16])); in raid6_2data_recov_ssse3() 67 asm volatile("movdqa %0,%%xmm1" : : "m" (q[0])); in raid6_2data_recov_ssse3() 68 asm volatile("movdqa %0,%%xmm9" : : "m" (q[16])); in raid6_2data_recov_ssse3() 69 asm volatile("movdqa %0,%%xmm0" : : "m" (p[0])); in raid6_2data_recov_ssse3() 70 asm volatile("movdqa %0,%%xmm8" : : "m" (p[16])); in raid6_2data_recov_ssse3() 71 asm volatile("pxor %0,%%xmm1" : : "m" (dq[0])); in raid6_2data_recov_ssse3() 72 asm volatile("pxor %0,%%xmm9" : : "m" (dq[16])); in raid6_2data_recov_ssse3() [all …]
|
| /linux/Documentation/scsi/ |
| H A D | aic7xxx.rst | 27 aic7850 10 PCI/32 10MHz 8Bit 3 28 aic7855 10 PCI/32 10MHz 8Bit 3 29 aic7856 10 PCI/32 10MHz 8Bit 3 30 aic7859 10 PCI/32 20MHz 8Bit 3 31 aic7860 10 PCI/32 20MHz 8Bit 3 32 aic7870 10 PCI/32 10MHz 16Bit 16 33 aic7880 10 PCI/32 20MHz 16Bit 16 34 aic7890 20 PCI/32 40MHz 16Bit 16 3 4 5 6 7 8 37 aic7895 15 PCI/32 20MHz 16Bit 16 2 3 4 5 38 aic7895C 15 PCI/32 20MHz 16Bit 16 2 3 4 5 8 [all …]
|
| /linux/lib/ |
| H A D | siphash.c | 50 u64 m; in __siphash_aligned() local 53 m = le64_to_cpup(data); in __siphash_aligned() 54 v3 ^= m; in __siphash_aligned() 57 v0 ^= m; in __siphash_aligned() 67 case 5: b |= ((u64)end[4]) << 32; fallthrough; in __siphash_aligned() 83 u64 m; in __siphash_unaligned() local 86 m = get_unaligned_le64(data); in __siphash_unaligned() 87 v3 ^= m; in __siphash_unaligned() 90 v0 ^= m; in __siphash_unaligned() 100 case 5: b |= ((u64)end[4]) << 32; fallthrough; in __siphash_unaligned() [all …]
|
| /linux/include/linux/ |
| H A D | fsl-diu-fb.h | 35 #define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) 36 #define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) 37 #define MFB_SET_ALPHA _IOW('M', 0, __u8) 38 #define MFB_GET_ALPHA _IOR('M', 0, __u8) 39 #define MFB_SET_AOID _IOW('M', 4, struct aoi_display_offset) 40 #define MFB_GET_AOID _IOR('M', 4, struct aoi_display_offset) 41 #define MFB_SET_PIXFMT _IOW('M', 8, __u32) 42 #define MFB_GET_PIXFMT _IOR('M', 8, __u32) 50 #define MFB_SET_GAMMA _IOW('M', 1, __u8) 51 #define MFB_GET_GAMMA _IOR('M', 1, __u8) [all …]
|
| H A D | reciprocal_div.h | 24 u32 m; member 35 u32 t = (u32)(((u64)a * R.m) >> 32); in reciprocal_divide() 40 u32 m; member 47 * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The 75 * rvalue = reciprocal_value_adv(d, 32) 78 * // floor(log2(d & (2^32 -d))) 80 * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift); 90 * t = (n * rvalue.m) >> 32; 98 * result = ((u64)result * rvalue.m) >> 32;
|
| /linux/arch/x86/lib/ |
| H A D | csum-partial_64.c | 16 return (__force __wsum)((temp64 + ror64(temp64, 32)) >> 32); in csum_finalize_sum() 19 static inline unsigned long update_csum_40b(unsigned long sum, const unsigned long m[5]) in update_csum_40b() 28 :"m" (m[0]), "m" (m[1]), "m" (m[2]), in update_csum_40b() 29 "m" (m[3]), "m" (m[4])); in update_csum_40b() 35 * Returns a 32bit checksum. 75 if (len & 32) { in csum_partial() 82 : [src] "r"(buff), "m"(*(const char(*)[32])buff)); in csum_partial() 83 buff += 32; in csum_partial() 90 : [src] "r"(buff), "m"(*(const char(*)[16])buff)); in csum_partial() 97 : [src] "r"(buff), "m"(*(const char(*)[8])buff)); in csum_partial()
|
| /linux/Documentation/devicetree/bindings/memory-controllers/fsl/ |
| H A D | fsl,imx-weim.yaml | 69 05 128M 0M 0M 0M 70 033 64M 64M 0M 0M 71 0113 64M 32M 32M 0M 72 01111 32M 32M 32M 32M
|
| /linux/lib/math/ |
| H A D | reciprocal_div.c | 20 u64 m; in reciprocal_value() local 24 m = ((1ULL << 32) * ((1ULL << l) - d)); in reciprocal_value() 25 do_div(m, d); in reciprocal_value() 26 ++m; in reciprocal_value() 27 R.m = (u32)m; in reciprocal_value() 43 /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to in reciprocal_value_adv() 47 WARN(l == 32, in reciprocal_value_adv() 48 "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor", in reciprocal_value_adv() 51 mlow = 1ULL << (32 + l); in reciprocal_value_adv() 53 mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec)); in reciprocal_value_adv() [all …]
|
| /linux/arch/arm/mach-imx/ |
| H A D | mx3x.h | 15 * FC000000 43F00000 1M AIPS 1 16 * FC100000 50000000 1M SPBA 17 * FC200000 53F00000 1M AIPS 2 18 * FC500000 60000000 128M ROMPATCH 19 * FC400000 68000000 128M AVIC 20 * 70000000 256M IPU (MAX M2) 21 * 80000000 256M CSD0 SDRAM/DDR 22 * 90000000 256M CSD1 SDRAM/DDR 23 * A0000000 128M CS0 Flash 24 * A8000000 128M CS1 Flash [all …]
|
| /linux/arch/mips/include/asm/ |
| H A D | cmpxchg.h | 34 #define __xchg_asm(ld, st, m, val) \ argument 36 __typeof(*(m)) __ret; \ 52 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ 53 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \ 59 __ret = *m; \ 60 *m = val; \ 112 #define __cmpxchg_asm(ld, st, m, old, new) \ argument 114 __typeof(*(m)) __ret; \ 132 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ 133 : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ [all …]
|
| /linux/drivers/gpu/drm/msm/disp/dpu1/ |
| H A D | msm_media_info.h | 48 * Y_Scanlines: Height aligned to 32 86 * Y_Scanlines: Height aligned to 32 144 * Y_Scanlines: Height aligned to 32 184 * M M M M M M M M M M M M . . ^ ^ 185 * M M M M M M M M M M M M . . | | 186 * M M M M M M M M M M M M . . Height | 187 * M M M M M M M M M M M M . . | Meta_Y_Scanlines 188 * M M M M M M M M M M M M . . | | 189 * M M M M M M M M M M M M . . | | 190 * M M M M M M M M M M M M . . | | [all …]
|
| /linux/arch/arm/vfp/ |
| H A D | vfp.h | 12 if (shift < 32) in vfp_shiftright32jamming() 13 val = val >> shift | ((val << (32 - shift)) != 0); in vfp_shiftright32jamming() 70 static inline void mul64to128(u64 *resh, u64 *resl, u64 n, u64 m) in mul64to128() argument 76 ml = m; in mul64to128() 79 nh = n >> 32; in mul64to128() 82 mh = m >> 32; in mul64to128() 87 rh += ((u64)(rma < rmb) << 32) + (rma >> 32); in mul64to128() 89 rma <<= 32; in mul64to128() 103 static inline u64 vfp_hi64multiply64(u64 n, u64 m) in vfp_hi64multiply64() argument 106 mul64to128(&rh, &rl, n, m); in vfp_hi64multiply64() [all …]
|
| /linux/lib/crypto/arm/ |
| H A D | sha1-ce-core.S | 40 sha1h.32 dg1b\ev, dg0 42 sha1\op\().32 dg0, dg1a\ev, ta\ev 44 sha1\op\().32 dg0, \dg1, ta\ev 49 sha1su0.32 q\s0, q\s1, q\s2 51 sha1su1.32 q\s0, q\s3 68 vld1.32 {k0-k1}, [ip, :128]! 69 vld1.32 {k2-k3}, [ip, :128] 72 vld1.32 {dga}, [r0] 76 0: vld1.32 {q8-q9}, [r1]! 77 vld1.32 {q10-q11}, [r1]! [all …]
|
| /linux/arch/parisc/kernel/ |
| H A D | pacache.S | 86 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ 100 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */ 130 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ 144 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */ 207 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */ 216 fice,m %arg1(%sr1, %arg0) 217 fice,m %arg1(%sr1, %arg0) 218 fice,m %arg1(%sr1, %arg0) 219 fice,m %arg1(%sr1, %arg0) 220 fice,m %arg1(%sr1, %arg0) [all …]
|
| /linux/arch/x86/include/asm/ |
| H A D | xor_avx.h | 18 BLOCK(32 * i, 0) \ 19 BLOCK(32 * (i + 1), 1) \ 20 BLOCK(32 * (i + 2), 2) \ 21 BLOCK(32 * (i + 3), 3) 40 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p1[i / sizeof(*p1)])); \ in xor_avx_2() 42 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_2() 44 "=m" (p0[i / sizeof(*p0)])); \ in xor_avx_2() 68 asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p2[i / sizeof(*p2)])); \ in xor_avx_3() 70 "m" (p1[i / sizeof(*p1)])); \ in xor_avx_3() 72 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_3() [all …]
|
| /linux/drivers/bus/ |
| H A D | imx-weim.c | 97 05, /* CS0(128M) CS1(0M) CS2(0M) CS3(0M) */ in imx_weim_gpr_setup() 98 033, /* CS0(64M) CS1(64M) CS2(0M) CS3(0M) */ in imx_weim_gpr_setup() 99 0113, /* CS0(64M) CS1(32M) CS2(32M) CS3(0M) */ in imx_weim_gpr_setup() 100 01111, /* CS0(32M) CS1(32M) CS2(32M) CS3(32M) */ in imx_weim_gpr_setup() 117 cs = range.bus_addr >> 32; in imx_weim_gpr_setup()
|
| /linux/drivers/clk/sunxi/ |
| H A D | clk-sunxi.c | 27 * sun4i_get_pll1_factors() - calculates n, k, m, p factors for PLL1 29 * rate = (parent_rate * n * (k + 1) >> p) / (m + 1); 37 /* Normalize value to a 6M multiple */ in sun4i_get_pll1_factors() 41 /* m is always zero for pll1 */ in sun4i_get_pll1_factors() 42 req->m = 0; in sun4i_get_pll1_factors() 55 /* p will be 2 for divs between 10 - 20 and odd divs under 32 */ in sun4i_get_pll1_factors() 56 else if (div < 20 || (div < 32 && (div & 1))) in sun4i_get_pll1_factors() 59 /* p will be 1 for even divs under 32, divs under 40 and odd pairs in sun4i_get_pll1_factors() 75 * sun6i_a31_get_pll1_factors() - calculates n, k and m factors for PLL1 77 * rate = parent_rate * (n + 1) * (k + 1) / (m + 1); [all …]
|
| /linux/include/uapi/mtd/ |
| H A D | mtd-abi.h | 72 * @len: length of data buffer (only lower 32 bits are used) 73 * @ooblen: length of OOB buffer (only lower 32 bits are used) 117 * @len: length of data buffer (only lower 32 bits are used) 118 * @ooblen: length of OOB buffer (only lower 32 bits are used) 201 * #define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo) 206 #define MEMGETINFO _IOR('M', 1, struct mtd_info_user) 208 #define MEMERASE _IOW('M', 2, struct erase_info_user) 210 #define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf) 212 #define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf) 214 #define MEMLOCK _IOW('M', 5, struct erase_info_user) [all …]
|
| /linux/fs/erofs/ |
| H A D | zmap.c | 23 static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, in z_erofs_load_full_lcluster() argument 26 struct inode *const inode = m->inode; in z_erofs_load_full_lcluster() 34 di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, m->in_mbox); in z_erofs_load_full_lcluster() 37 m->lcn = lcn; in z_erofs_load_full_lcluster() 38 m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index); in z_erofs_load_full_lcluster() 41 m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK; in z_erofs_load_full_lcluster() 42 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { in z_erofs_load_full_lcluster() 43 m->clusterofs = 1 << vi->z_lclusterbits; in z_erofs_load_full_lcluster() 44 m->delta[0] = le16_to_cpu(di->di_u.delta[0]); in z_erofs_load_full_lcluster() 45 if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) { in z_erofs_load_full_lcluster() [all …]
|