xref: /linux/samples/bpf/xdp_sample.bpf.h (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef _XDP_SAMPLE_BPF_H
3 #define _XDP_SAMPLE_BPF_H
4 
5 #include "vmlinux.h"
6 #include <bpf/bpf_tracing.h>
7 #include <bpf/bpf_core_read.h>
8 #include <bpf/bpf_helpers.h>
9 
10 #include "net_shared.h"
11 #include "xdp_sample_shared.h"
12 
13 #define EINVAL 22
14 #define ENETDOWN 100
15 #define EMSGSIZE 90
16 #define EOPNOTSUPP 95
17 #define ENOSPC 28
18 
19 typedef struct {
20 	__uint(type, BPF_MAP_TYPE_ARRAY);
21 	__uint(map_flags, BPF_F_MMAPABLE);
22 	__type(key, unsigned int);
23 	__type(value, struct datarec);
24 } array_map;
25 
26 extern array_map rx_cnt;
27 extern const volatile int nr_cpus;
28 
29 enum {
30 	XDP_REDIRECT_SUCCESS = 0,
31 	XDP_REDIRECT_ERROR = 1
32 };
33 
swap_src_dst_mac(void * data)34 static __always_inline void swap_src_dst_mac(void *data)
35 {
36 	unsigned short *p = data;
37 	unsigned short dst[3];
38 
39 	dst[0] = p[0];
40 	dst[1] = p[1];
41 	dst[2] = p[2];
42 	p[0] = p[3];
43 	p[1] = p[4];
44 	p[2] = p[5];
45 	p[3] = dst[0];
46 	p[4] = dst[1];
47 	p[5] = dst[2];
48 }
49 
50 /*
51  * Note: including linux/compiler.h or linux/kernel.h for the macros below
52  * conflicts with vmlinux.h include in BPF files, so we define them here.
53  *
54  * Following functions are taken from kernel sources and
55  * break aliasing rules in their original form.
56  *
57  * While kernel is compiled with -fno-strict-aliasing,
58  * perf uses -Wstrict-aliasing=3 which makes build fail
59  * under gcc 4.4.
60  *
61  * Using extra __may_alias__ type to allow aliasing
62  * in this case.
63  */
64 typedef __u8  __attribute__((__may_alias__))  __u8_alias_t;
65 typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
66 typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
67 typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
68 
__read_once_size(const volatile void * p,void * res,int size)69 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
70 {
71 	switch (size) {
72 	case 1: *(__u8_alias_t  *) res = *(volatile __u8_alias_t  *) p; break;
73 	case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
74 	case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
75 	case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
76 	default:
77 		asm volatile ("" : : : "memory");
78 		__builtin_memcpy((void *)res, (const void *)p, size);
79 		asm volatile ("" : : : "memory");
80 	}
81 }
82 
__write_once_size(volatile void * p,void * res,int size)83 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
84 {
85 	switch (size) {
86 	case 1: *(volatile  __u8_alias_t *) p = *(__u8_alias_t  *) res; break;
87 	case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
88 	case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
89 	case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
90 	default:
91 		asm volatile ("" : : : "memory");
92 		__builtin_memcpy((void *)p, (const void *)res, size);
93 		asm volatile ("" : : : "memory");
94 	}
95 }
96 
97 #define READ_ONCE(x)					\
98 ({							\
99 	union { typeof(x) __val; char __c[1]; } __u =	\
100 		{ .__c = { 0 } };			\
101 	__read_once_size(&(x), __u.__c, sizeof(x));	\
102 	__u.__val;					\
103 })
104 
105 #define WRITE_ONCE(x, val)				\
106 ({							\
107 	union { typeof(x) __val; char __c[1]; } __u =	\
108 		{ .__val = (val) }; 			\
109 	__write_once_size(&(x), __u.__c, sizeof(x));	\
110 	__u.__val;					\
111 })
112 
113 /* Add a value using relaxed read and relaxed write. Less expensive than
114  * fetch_add when there is no write concurrency.
115  */
116 #define NO_TEAR_ADD(x, val) WRITE_ONCE((x), READ_ONCE(x) + (val))
117 #define NO_TEAR_INC(x) NO_TEAR_ADD((x), 1)
118 
119 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
120 
121 #endif
122