Home
last modified time | relevance | path

Searched refs:__force (Results 1 – 25 of 783) sorted by relevance

12345678910>>...32

/linux/include/scsi/
H A Dscsi_devinfo.h9 #define BLIST_NOLUN ((__force blist_flags_t)(1ULL << 0))
12 #define BLIST_FORCELUN ((__force blist_flags_t)(1ULL << 1))
14 #define BLIST_BORKEN ((__force blist_flags_t)(1ULL << 2))
16 #define BLIST_KEY ((__force blist_flags_t)(1ULL << 3))
18 #define BLIST_SINGLELUN ((__force blist_flags_t)(1ULL << 4))
20 #define BLIST_NOTQ ((__force blist_flags_t)(1ULL << 5))
22 #define BLIST_SPARSELUN ((__force blist_flags_t)(1ULL << 6))
24 #define BLIST_MAX5LUN ((__force blist_flags_t)(1ULL << 7))
26 #define BLIST_ISROM ((__force blist_flags_t)(1ULL << 8))
28 #define BLIST_LARGELUN ((__force blist_flags_t)(1ULL << 9))
[all …]
/linux/include/uapi/linux/byteorder/
H A Dlittle_endian.h16 #define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
17 #define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
18 #define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
19 #define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
20 #define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
21 #define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
22 #define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
23 #define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
24 #define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
25 #define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
[all …]
H A Dbig_endian.h16 #define __constant_htonl(x) ((__force __be32)(__u32)(x))
17 #define __constant_ntohl(x) ((__force __u32)(__be32)(x))
18 #define __constant_htons(x) ((__force __be16)(__u16)(x))
19 #define __constant_ntohs(x) ((__force __u16)(__be16)(x))
20 #define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
21 #define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x))
22 #define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
23 #define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x))
24 #define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
25 #define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x))
[all …]
/linux/tools/include/linux/
H A Dgfp_types.h108 #define __GFP_DMA ((__force gfp_t)___GFP_DMA)
109 #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
110 #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
111 #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
145 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
146 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
147 #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
148 #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
149 #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
150 #define __GFP_NO_OBJ_EXT ((__force gfp_t)___GFP_NO_OBJ_EXT)
[all …]
/linux/include/linux/
H A Dgfp_types.h108 #define __GFP_DMA ((__force gfp_t)___GFP_DMA)
109 #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
110 #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
111 #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
145 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
146 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
147 #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
148 #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
149 #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
150 #define __GFP_NO_OBJ_EXT ((__force gfp_t)___GFP_NO_OBJ_EXT)
[all …]
H A Dvirtio_byteorder.h19 return le16_to_cpu((__force __le16)val); in __virtio16_to_cpu()
21 return be16_to_cpu((__force __be16)val); in __virtio16_to_cpu()
27 return (__force __virtio16)cpu_to_le16(val); in __cpu_to_virtio16()
29 return (__force __virtio16)cpu_to_be16(val); in __cpu_to_virtio16()
35 return le32_to_cpu((__force __le32)val); in __virtio32_to_cpu()
37 return be32_to_cpu((__force __be32)val); in __virtio32_to_cpu()
43 return (__force __virtio32)cpu_to_le32(val); in __cpu_to_virtio32()
45 return (__force __virtio32)cpu_to_be32(val); in __cpu_to_virtio32()
51 return le64_to_cpu((__force __le64)val); in __virtio64_to_cpu()
53 return be64_to_cpu((__force __be64)val); in __virtio64_to_cpu()
[all …]
/linux/tools/include/uapi/sound/
H A Dasound.h
/linux/include/uapi/sound/
H A Dasound.h173 #define SNDRV_PCM_ACCESS_MMAP_INTERLEAVED ((__force snd_pcm_access_t) 0) /* interleaved mmap */
174 #define SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED ((__force snd_pcm_access_t) 1) /* noninterleaved mmap …
175 #define SNDRV_PCM_ACCESS_MMAP_COMPLEX ((__force snd_pcm_access_t) 2) /* complex mmap */
176 #define SNDRV_PCM_ACCESS_RW_INTERLEAVED ((__force snd_pcm_access_t) 3) /* readi/writei */
177 #define SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ((__force snd_pcm_access_t) 4) /* readn/writen */
181 #define SNDRV_PCM_FORMAT_S8 ((__force snd_pcm_format_t) 0)
182 #define SNDRV_PCM_FORMAT_U8 ((__force snd_pcm_format_t) 1)
183 #define SNDRV_PCM_FORMAT_S16_LE ((__force snd_pcm_format_t) 2)
184 #define SNDRV_PCM_FORMAT_S16_BE ((__force snd_pcm_format_t) 3)
185 #define SNDRV_PCM_FORMAT_U16_LE ((__force snd_pcm_format_t) 4)
[all …]
/linux/tools/perf/trace/beauty/include/uapi/sound/
H A Dasound.h173 #define SNDRV_PCM_ACCESS_MMAP_INTERLEAVED ((__force snd_pcm_access_t) 0) /* interleaved mmap */
174 #define SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED ((__force snd_pcm_access_t) 1) /* noninterleaved mmap …
175 #define SNDRV_PCM_ACCESS_MMAP_COMPLEX ((__force snd_pcm_access_t) 2) /* complex mmap */
176 #define SNDRV_PCM_ACCESS_RW_INTERLEAVED ((__force snd_pcm_access_t) 3) /* readi/writei */
177 #define SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ((__force snd_pcm_access_t) 4) /* readn/writen */
181 #define SNDRV_PCM_FORMAT_S8 ((__force snd_pcm_format_t) 0)
182 #define SNDRV_PCM_FORMAT_U8 ((__force snd_pcm_format_t) 1)
183 #define SNDRV_PCM_FORMAT_S16_LE ((__force snd_pcm_format_t) 2)
184 #define SNDRV_PCM_FORMAT_S16_BE ((__force snd_pcm_format_t) 3)
185 #define SNDRV_PCM_FORMAT_U16_LE ((__force snd_pcm_format_t) 4)
[all …]
/linux/include/uapi/linux/
H A Deventpoll.h31 #define EPOLLIN (__force __poll_t)0x00000001
32 #define EPOLLPRI (__force __poll_t)0x00000002
33 #define EPOLLOUT (__force __poll_t)0x00000004
34 #define EPOLLERR (__force __poll_t)0x00000008
35 #define EPOLLHUP (__force __poll_t)0x00000010
36 #define EPOLLNVAL (__force __poll_t)0x00000020
37 #define EPOLLRDNORM (__force __poll_t)0x00000040
38 #define EPOLLRDBAND (__force __poll_t)0x00000080
39 #define EPOLLWRNORM (__force __poll_t)0x00000100
40 #define EPOLLWRBAND (__force __poll_t)0x00000200
[all …]
/linux/tools/testing/selftests/bpf/progs/
H A Dtest_tcp_custom_syncookie.h8 #define __force macro
56 unsigned long long s = (__force u32)sum; in csum_tcpudp_nofold()
58 s += (__force u32)saddr; in csum_tcpudp_nofold()
59 s += (__force u32)daddr; in csum_tcpudp_nofold()
65 return (__force __wsum)from64to32(s); in csum_tcpudp_nofold()
71 u32 sum = (__force u32)csum; in csum_fold()
75 return (__force __sum16)~sum; in csum_fold()
92 __u32 sum = (__force u32)csum; in csum_ipv6_magic()
94 sum += (__force u32)saddr->in6_u.u6_addr32[0]; in csum_ipv6_magic()
95 carry = (sum < (__force u32)saddr->in6_u.u6_addr32[0]); in csum_ipv6_magic()
[all …]
/linux/arch/s390/include/asm/
H A Ddma-types.h40 return (__force dma32_t)__pa32(ptr); in virt_to_dma32()
45 return __va((__force unsigned long)addr); in dma32_to_virt()
50 return (__force dma32_t)addr; in u32_to_dma32()
55 return (__force u32)addr; in dma32_to_u32()
60 return (__force dma32_t)((__force u32)a + b); in dma32_add()
65 return (__force dma32_t)((__force u32)a & b); in dma32_and()
75 return (__force dma64_t)__pa(ptr); in virt_to_dma64()
80 return __va((__force unsigned long)addr); in dma64_to_virt()
85 return (__force dma64_t)addr; in u64_to_dma64()
90 return (__force u64)addr; in dma64_to_u64()
[all …]
H A Dchecksum.h45 u32 csum = (__force u32) sum; in csum_fold()
49 return (__force __sum16) ~csum; in csum_fold()
69 return csum_fold((__force __wsum)(csum >> 32)); in ip_fast_csum()
79 __u64 csum = (__force __u64)sum; in csum_tcpudp_nofold()
81 csum += (__force __u32)saddr; in csum_tcpudp_nofold()
82 csum += (__force __u32)daddr; in csum_tcpudp_nofold()
86 return (__force __wsum)(csum >> 32); in csum_tcpudp_nofold()
112 __u64 sum = (__force __u64)csum; in csum_ipv6_magic()
114 sum += (__force __u32)saddr->s6_addr32[0]; in csum_ipv6_magic()
115 sum += (__force __u32)saddr->s6_addr32[1]; in csum_ipv6_magic()
[all …]
/linux/net/ipv6/
H A Dip6_checksum.c16 __u32 sum = (__force u32)csum; in csum_ipv6_magic()
18 sum += (__force u32)saddr->s6_addr32[0]; in csum_ipv6_magic()
19 carry = (sum < (__force u32)saddr->s6_addr32[0]); in csum_ipv6_magic()
22 sum += (__force u32)saddr->s6_addr32[1]; in csum_ipv6_magic()
23 carry = (sum < (__force u32)saddr->s6_addr32[1]); in csum_ipv6_magic()
26 sum += (__force u32)saddr->s6_addr32[2]; in csum_ipv6_magic()
27 carry = (sum < (__force u32)saddr->s6_addr32[2]); in csum_ipv6_magic()
30 sum += (__force u32)saddr->s6_addr32[3]; in csum_ipv6_magic()
31 carry = (sum < (__force u32)saddr->s6_addr32[3]); in csum_ipv6_magic()
34 sum += (__force u32)daddr->s6_addr32[0]; in csum_ipv6_magic()
[all …]
/linux/arch/powerpc/include/asm/
H A Dchecksum.h41 u32 tmp = (__force u32)sum; in csum_fold()
49 return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16); in csum_fold()
61 u64 s = (__force u32)sum; in csum_tcpudp_nofold()
63 s += (__force u32)saddr; in csum_tcpudp_nofold()
64 s += (__force u32)daddr; in csum_tcpudp_nofold()
70 return (__force __wsum) from64to32(s); in csum_tcpudp_nofold()
98 u64 res = (__force u64)csum; in csum_add()
100 res += (__force u64)addend; in csum_add()
101 return (__force __wsum)((u32)res + (res >> 32)); in csum_add()
119 return (__force __wsum)rol32((__force u32)sum, (offset & 1) << 3); in csum_shift()
[all …]
/linux/include/net/
H A Dchecksum.h61 u32 res = (__force u32)csum; in csum_add()
62 res += (__force u32)addend; in csum_add()
63 return (__force __wsum)(res + (res < (__force u32)addend)); in csum_add()
74 u16 res = (__force u16)csum; in csum16_add()
76 res += (__force u16)addend; in csum16_add()
77 return (__force __sum16)(res + (res < (__force u16)addend)); in csum16_add()
90 return (__force __wsum)ror32((__force u32)sum, 8); in csum_shift()
109 return (__force __wsum)n; in csum_unfold()
112 #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
121 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); in csum_replace4()
[all …]
/linux/fs/befs/
H A Dendian.h19 return le64_to_cpu((__force __le64)n); in fs64_to_cpu()
21 return be64_to_cpu((__force __be64)n); in fs64_to_cpu()
28 return (__force fs64)cpu_to_le64(n); in cpu_to_fs64()
30 return (__force fs64)cpu_to_be64(n); in cpu_to_fs64()
37 return le32_to_cpu((__force __le32)n); in fs32_to_cpu()
39 return be32_to_cpu((__force __be32)n); in fs32_to_cpu()
46 return (__force fs32)cpu_to_le32(n); in cpu_to_fs32()
48 return (__force fs32)cpu_to_be32(n); in cpu_to_fs32()
55 return le16_to_cpu((__force __le16)n); in fs16_to_cpu()
57 return be16_to_cpu((__force __be16)n); in fs16_to_cpu()
[all …]
/linux/include/linux/rpmsg/
H A Dbyteorder.h22 return le16_to_cpu((__force __le16)val); in __rpmsg16_to_cpu()
24 return be16_to_cpu((__force __be16)val); in __rpmsg16_to_cpu()
30 return (__force __rpmsg16)cpu_to_le16(val); in __cpu_to_rpmsg16()
32 return (__force __rpmsg16)cpu_to_be16(val); in __cpu_to_rpmsg16()
38 return le32_to_cpu((__force __le32)val); in __rpmsg32_to_cpu()
40 return be32_to_cpu((__force __be32)val); in __rpmsg32_to_cpu()
46 return (__force __rpmsg32)cpu_to_le32(val); in __cpu_to_rpmsg32()
48 return (__force __rpmsg32)cpu_to_be32(val); in __cpu_to_rpmsg32()
54 return le64_to_cpu((__force __le64)val); in __rpmsg64_to_cpu()
56 return be64_to_cpu((__force __be64)val); in __rpmsg64_to_cpu()
[all …]
/linux/include/net/netfilter/
H A Dnf_queue.h55 if ((__force u32)iph->saddr < (__force u32)iph->daddr) in hash_v4()
56 return jhash_3words((__force u32)iph->saddr, in hash_v4()
57 (__force u32)iph->daddr, iph->protocol, initval); in hash_v4()
59 return jhash_3words((__force u32)iph->daddr, in hash_v6()
60 (__force u32)iph->saddr, iph->protocol, initval); in hash_v6()
67 if ((__force u32)ip6h->saddr.s6_addr32[3] < in hash_v6()
68 (__force u32)ip6h->daddr.s6_addr32[3]) { in hash_v6()
69 a = (__force u32) ip6h->saddr.s6_addr32[3]; in hash_v6()
70 b = (__force u3 in hash_v6()
[all...]
/linux/arch/alpha/include/asm/
H A Dio_trivial.h12 return __kernel_ldbu(*(const volatile u8 __force *)a); in IO_CONCAT()
18 return __kernel_ldwu(*(const volatile u16 __force *)a); in IO_CONCAT()
24 __kernel_stb(b, *(volatile u8 __force *)a); in IO_CONCAT()
30 __kernel_stw(b, *(volatile u16 __force *)a); in IO_CONCAT()
38 return *(const volatile u32 __force *)a; in IO_CONCAT()
44 *(volatile u32 __force *)a = b; in IO_CONCAT()
50 return *(const volatile u64 __force *)a; in IO_CONCAT()
56 *(volatile u64 __force *)a = b; in IO_CONCAT()
64 return __kernel_ldbu(*(const volatile u8 __force *)a); in IO_CONCAT()
70 return __kernel_ldwu(*(const volatile u16 __force *)a); in IO_CONCAT()
[all …]
/linux/fs/ufs/
H A Dswab.h30 return le64_to_cpu((__force __le64)n); in fs64_to_cpu()
32 return be64_to_cpu((__force __be64)n); in fs64_to_cpu()
39 return (__force __fs64)cpu_to_le64(n); in cpu_to_fs64()
41 return (__force __fs64)cpu_to_be64(n); in cpu_to_fs64()
48 return le32_to_cpu((__force __le32)n); in fs32_to_cpu()
50 return be32_to_cpu((__force __be32)n); in fs32_to_cpu()
57 return (__force __fs32)cpu_to_le32(n); in cpu_to_fs32()
59 return (__force __fs32)cpu_to_be32(n); in cpu_to_fs32()
84 return le16_to_cpu((__force __le16)n); in fs16_to_cpu()
86 return be16_to_cpu((__force __be16)n); in fs16_to_cpu()
[all …]
/linux/include/asm-generic/
H A Duaccess.h23 *(u8 *)to = *((u8 __force *)from); in __get_user_fn()
26 *(u16 *)to = get_unaligned((u16 __force *)from); in __get_user_fn()
29 *(u32 *)to = get_unaligned((u32 __force *)from); in __get_user_fn()
32 *(u64 *)to = get_unaligned((u64 __force *)from); in __get_user_fn()
49 *(u8 __force *)to = *(u8 *)from; in __put_user_fn()
52 put_unaligned(*(u16 *)from, (u16 __force *)to); in __put_user_fn()
55 put_unaligned(*(u32 *)from, (u32 __force *)to); in __put_user_fn()
58 put_unaligned(*(u64 *)from, (u64 __force *)to); in __put_user_fn()
84 memcpy(to, (const void __force *)from, n); in raw_copy_from_user()
91 memcpy((void __force *)to, from, n); in raw_copy_to_user()
[all …]
/linux/fs/qnx6/
H A Dqnx6.h81 return le64_to_cpu((__force __le64)n); in fs64_to_cpu()
83 return be64_to_cpu((__force __be64)n); in fs64_to_cpu()
89 return (__force __fs64)cpu_to_le64(n); in cpu_to_fs64()
91 return (__force __fs64)cpu_to_be64(n); in cpu_to_fs64()
97 return le32_to_cpu((__force __le32)n); in fs32_to_cpu()
99 return be32_to_cpu((__force __be32)n); in fs32_to_cpu()
105 return (__force __fs32)cpu_to_le32(n); in cpu_to_fs32()
107 return (__force __fs32)cpu_to_be32(n); in cpu_to_fs32()
113 return le16_to_cpu((__force __le16)n); in fs16_to_cpu()
115 return be16_to_cpu((__force __be16)n); in fs16_to_cpu()
[all …]
/linux/arch/m68k/include/asm/
H A Draw_io.h20 ({ u8 __v = (*(__force const volatile u8 *) (unsigned long)(addr)); __v; })
22 ({ u16 __v = (*(__force const volatile u16 *) (unsigned long)(addr)); __v; })
24 ({ u32 __v = (*(__force const volatile u32 *) (unsigned long)(addr)); __v; })
26 ({ u16 __v = le16_to_cpu(*(__force const volatile __le16 *) (unsigned long)(addr)); __v; })
28 ({ u32 __v = le32_to_cpu(*(__force const volatile __le32 *) (unsigned long)(addr)); __v; })
30 #define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
31 #define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
32 #define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
33 #define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(…
34 #define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(…
[all …]
/linux/arch/riscv/lib/
H A Dcsum.c25 unsigned long sum = (__force unsigned long)csum; in csum_ipv6_magic()
27 sum += (__force unsigned long)saddr->s6_addr32[0]; in csum_ipv6_magic()
28 sum += (__force unsigned long)saddr->s6_addr32[1]; in csum_ipv6_magic()
29 sum += (__force unsigned long)saddr->s6_addr32[2]; in csum_ipv6_magic()
30 sum += (__force unsigned long)saddr->s6_addr32[3]; in csum_ipv6_magic()
32 sum += (__force unsigned long)daddr->s6_addr32[0]; in csum_ipv6_magic()
33 sum += (__force unsigned long)daddr->s6_addr32[1]; in csum_ipv6_magic()
34 sum += (__force unsigned long)daddr->s6_addr32[2]; in csum_ipv6_magic()
35 sum += (__force unsigned long)daddr->s6_addr32[3]; in csum_ipv6_magic()
37 ulen = (__force unsigned int)htonl((unsigned int)len); in csum_ipv6_magic()
[all …]

12345678910>>...32