xref: /linux/tools/lib/bpf/bpf_endian.h (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2 #ifndef __BPF_ENDIAN__
3 #define __BPF_ENDIAN__
4 
5 /*
6  * Isolate byte #n and put it into byte #m, for __u##b type.
7  * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64:
8  * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx
9  * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000
10  * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn
11  * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000
12  */
13 #define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8))
14 
15 #define ___bpf_swab16(x) ((__u16)(			\
16 			  ___bpf_mvb(x, 16, 0, 1) |	\
17 			  ___bpf_mvb(x, 16, 1, 0)))
18 
19 #define ___bpf_swab32(x) ((__u32)(			\
20 			  ___bpf_mvb(x, 32, 0, 3) |	\
21 			  ___bpf_mvb(x, 32, 1, 2) |	\
22 			  ___bpf_mvb(x, 32, 2, 1) |	\
23 			  ___bpf_mvb(x, 32, 3, 0)))
24 
25 #define ___bpf_swab64(x) ((__u64)(			\
26 			  ___bpf_mvb(x, 64, 0, 7) |	\
27 			  ___bpf_mvb(x, 64, 1, 6) |	\
28 			  ___bpf_mvb(x, 64, 2, 5) |	\
29 			  ___bpf_mvb(x, 64, 3, 4) |	\
30 			  ___bpf_mvb(x, 64, 4, 3) |	\
31 			  ___bpf_mvb(x, 64, 5, 2) |	\
32 			  ___bpf_mvb(x, 64, 6, 1) |	\
33 			  ___bpf_mvb(x, 64, 7, 0)))
34 
35 /* LLVM's BPF target selects the endianness of the CPU
36  * it compiles on, or the user specifies (bpfel/bpfeb),
37  * respectively. The used __BYTE_ORDER__ is defined by
38  * the compiler, we cannot rely on __BYTE_ORDER from
39  * libc headers, since it doesn't reflect the actual
40  * requested byte order.
41  *
42  * Note, LLVM's BPF target has different __builtin_bswapX()
43  * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
44  * in bpfel and bpfeb case, which means below, that we map
45  * to cpu_to_be16(). We could use it unconditionally in BPF
46  * case, but better not rely on it, so that this header here
47  * can be used from application and BPF program side, which
48  * use different targets.
49  */
50 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
51 # define __bpf_ntohs(x)			__builtin_bswap16(x)
52 # define __bpf_htons(x)			__builtin_bswap16(x)
53 # define __bpf_constant_ntohs(x)	___bpf_swab16(x)
54 # define __bpf_constant_htons(x)	___bpf_swab16(x)
55 # define __bpf_ntohl(x)			__builtin_bswap32(x)
56 # define __bpf_htonl(x)			__builtin_bswap32(x)
57 # define __bpf_constant_ntohl(x)	___bpf_swab32(x)
58 # define __bpf_constant_htonl(x)	___bpf_swab32(x)
59 # define __bpf_be64_to_cpu(x)		__builtin_bswap64(x)
60 # define __bpf_cpu_to_be64(x)		__builtin_bswap64(x)
61 # define __bpf_constant_be64_to_cpu(x)	___bpf_swab64(x)
62 # define __bpf_constant_cpu_to_be64(x)	___bpf_swab64(x)
63 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
64 # define __bpf_ntohs(x)			(x)
65 # define __bpf_htons(x)			(x)
66 # define __bpf_constant_ntohs(x)	(x)
67 # define __bpf_constant_htons(x)	(x)
68 # define __bpf_ntohl(x)			(x)
69 # define __bpf_htonl(x)			(x)
70 # define __bpf_constant_ntohl(x)	(x)
71 # define __bpf_constant_htonl(x)	(x)
72 # define __bpf_be64_to_cpu(x)		(x)
73 # define __bpf_cpu_to_be64(x)		(x)
74 # define __bpf_constant_be64_to_cpu(x)  (x)
75 # define __bpf_constant_cpu_to_be64(x)  (x)
76 #else
77 # error "Fix your compiler's __BYTE_ORDER__?!"
78 #endif
79 
80 #define bpf_htons(x)				\
81 	(__builtin_constant_p(x) ?		\
82 	 __bpf_constant_htons(x) : __bpf_htons(x))
83 #define bpf_ntohs(x)				\
84 	(__builtin_constant_p(x) ?		\
85 	 __bpf_constant_ntohs(x) : __bpf_ntohs(x))
86 #define bpf_htonl(x)				\
87 	(__builtin_constant_p(x) ?		\
88 	 __bpf_constant_htonl(x) : __bpf_htonl(x))
89 #define bpf_ntohl(x)				\
90 	(__builtin_constant_p(x) ?		\
91 	 __bpf_constant_ntohl(x) : __bpf_ntohl(x))
92 #define bpf_cpu_to_be64(x)			\
93 	(__builtin_constant_p(x) ?		\
94 	 __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x))
95 #define bpf_be64_to_cpu(x)			\
96 	(__builtin_constant_p(x) ?		\
97 	 __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x))
98 
99 #endif /* __BPF_ENDIAN__ */
100