1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 #include "test_xdp_context_test_run.skel.h"
5 #include "test_xdp_meta.skel.h"
6
7 #define RX_NAME "veth0"
8 #define TX_NAME "veth1"
9 #define TX_NETNS "xdp_context_tx"
10 #define RX_NETNS "xdp_context_rx"
11 #define TAP_NAME "tap0"
12 #define TAP_NETNS "xdp_context_tuntap"
13
14 #define TEST_PAYLOAD_LEN 32
15 static const __u8 test_payload[TEST_PAYLOAD_LEN] = {
16 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
17 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
18 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
19 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
20 };
21
test_xdp_context_error(int prog_fd,struct bpf_test_run_opts opts,__u32 data_meta,__u32 data,__u32 data_end,__u32 ingress_ifindex,__u32 rx_queue_index,__u32 egress_ifindex)22 void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts,
23 __u32 data_meta, __u32 data, __u32 data_end,
24 __u32 ingress_ifindex, __u32 rx_queue_index,
25 __u32 egress_ifindex)
26 {
27 struct xdp_md ctx = {
28 .data = data,
29 .data_end = data_end,
30 .data_meta = data_meta,
31 .ingress_ifindex = ingress_ifindex,
32 .rx_queue_index = rx_queue_index,
33 .egress_ifindex = egress_ifindex,
34 };
35 int err;
36
37 opts.ctx_in = &ctx;
38 opts.ctx_size_in = sizeof(ctx);
39 err = bpf_prog_test_run_opts(prog_fd, &opts);
40 ASSERT_EQ(errno, EINVAL, "errno-EINVAL");
41 ASSERT_ERR(err, "bpf_prog_test_run");
42 }
43
test_xdp_context_test_run(void)44 void test_xdp_context_test_run(void)
45 {
46 struct test_xdp_context_test_run *skel = NULL;
47 char data[sizeof(pkt_v4) + sizeof(__u32)];
48 char bad_ctx[sizeof(struct xdp_md) + 1];
49 struct xdp_md ctx_in, ctx_out;
50 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
51 .data_in = &data,
52 .data_size_in = sizeof(data),
53 .ctx_out = &ctx_out,
54 .ctx_size_out = sizeof(ctx_out),
55 .repeat = 1,
56 );
57 int err, prog_fd;
58
59 skel = test_xdp_context_test_run__open_and_load();
60 if (!ASSERT_OK_PTR(skel, "skel"))
61 return;
62 prog_fd = bpf_program__fd(skel->progs.xdp_context);
63
64 /* Data past the end of the kernel's struct xdp_md must be 0 */
65 bad_ctx[sizeof(bad_ctx) - 1] = 1;
66 opts.ctx_in = bad_ctx;
67 opts.ctx_size_in = sizeof(bad_ctx);
68 err = bpf_prog_test_run_opts(prog_fd, &opts);
69 ASSERT_EQ(errno, E2BIG, "extradata-errno");
70 ASSERT_ERR(err, "bpf_prog_test_run(extradata)");
71
72 *(__u32 *)data = XDP_PASS;
73 *(struct ipv4_packet *)(data + sizeof(__u32)) = pkt_v4;
74 opts.ctx_in = &ctx_in;
75 opts.ctx_size_in = sizeof(ctx_in);
76 memset(&ctx_in, 0, sizeof(ctx_in));
77 ctx_in.data_meta = 0;
78 ctx_in.data = sizeof(__u32);
79 ctx_in.data_end = ctx_in.data + sizeof(pkt_v4);
80 err = bpf_prog_test_run_opts(prog_fd, &opts);
81 ASSERT_OK(err, "bpf_prog_test_run(valid)");
82 ASSERT_EQ(opts.retval, XDP_PASS, "valid-retval");
83 ASSERT_EQ(opts.data_size_out, sizeof(pkt_v4), "valid-datasize");
84 ASSERT_EQ(opts.ctx_size_out, opts.ctx_size_in, "valid-ctxsize");
85 ASSERT_EQ(ctx_out.data_meta, 0, "valid-datameta");
86 ASSERT_EQ(ctx_out.data, 0, "valid-data");
87 ASSERT_EQ(ctx_out.data_end, sizeof(pkt_v4), "valid-dataend");
88
89 /* Meta data's size must be a multiple of 4 */
90 test_xdp_context_error(prog_fd, opts, 0, 1, sizeof(data), 0, 0, 0);
91
92 /* data_meta must reference the start of data */
93 test_xdp_context_error(prog_fd, opts, 4, sizeof(__u32), sizeof(data),
94 0, 0, 0);
95
96 /* Meta data must be 255 bytes or smaller */
97 test_xdp_context_error(prog_fd, opts, 0, 256, sizeof(data), 0, 0, 0);
98
99 /* Total size of data must match data_end - data_meta */
100 test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
101 sizeof(data) - 1, 0, 0, 0);
102 test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
103 sizeof(data) + 1, 0, 0, 0);
104
105 /* RX queue cannot be specified without specifying an ingress */
106 test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
107 0, 1, 0);
108
109 /* Interface 1 is always the loopback interface which always has only
110 * one RX queue (index 0). This makes index 1 an invalid rx queue index
111 * for interface 1.
112 */
113 test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
114 1, 1, 0);
115
116 /* The egress cannot be specified */
117 test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
118 0, 0, 1);
119
120 test_xdp_context_test_run__destroy(skel);
121 }
122
send_test_packet(int ifindex)123 static int send_test_packet(int ifindex)
124 {
125 int n, sock = -1;
126 __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
127
128 /* The ethernet header is not relevant for this test and doesn't need to
129 * be meaningful.
130 */
131 struct ethhdr eth = { 0 };
132
133 memcpy(packet, ð, sizeof(eth));
134 memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN);
135
136 sock = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
137 if (!ASSERT_GE(sock, 0, "socket"))
138 goto err;
139
140 struct sockaddr_ll saddr = {
141 .sll_family = PF_PACKET,
142 .sll_ifindex = ifindex,
143 .sll_halen = ETH_ALEN
144 };
145 n = sendto(sock, packet, sizeof(packet), 0, (struct sockaddr *)&saddr,
146 sizeof(saddr));
147 if (!ASSERT_EQ(n, sizeof(packet), "sendto"))
148 goto err;
149
150 close(sock);
151 return 0;
152
153 err:
154 if (sock >= 0)
155 close(sock);
156 return -1;
157 }
158
assert_test_result(struct test_xdp_meta * skel)159 static void assert_test_result(struct test_xdp_meta *skel)
160 {
161 int err;
162 __u32 map_key = 0;
163 __u8 map_value[TEST_PAYLOAD_LEN];
164
165 err = bpf_map__lookup_elem(skel->maps.test_result, &map_key,
166 sizeof(map_key), &map_value,
167 TEST_PAYLOAD_LEN, BPF_ANY);
168 if (!ASSERT_OK(err, "lookup test_result"))
169 return;
170
171 ASSERT_MEMEQ(&map_value, &test_payload, TEST_PAYLOAD_LEN,
172 "test_result map contains test payload");
173 }
174
test_xdp_context_veth(void)175 void test_xdp_context_veth(void)
176 {
177 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
178 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
179 struct netns_obj *rx_ns = NULL, *tx_ns = NULL;
180 struct bpf_program *tc_prog, *xdp_prog;
181 struct test_xdp_meta *skel = NULL;
182 struct nstoken *nstoken = NULL;
183 int rx_ifindex, tx_ifindex;
184 int ret;
185
186 tx_ns = netns_new(TX_NETNS, false);
187 if (!ASSERT_OK_PTR(tx_ns, "create tx_ns"))
188 return;
189
190 rx_ns = netns_new(RX_NETNS, false);
191 if (!ASSERT_OK_PTR(rx_ns, "create rx_ns"))
192 goto close;
193
194 SYS(close, "ip link add " RX_NAME " netns " RX_NETNS
195 " type veth peer name " TX_NAME " netns " TX_NETNS);
196
197 nstoken = open_netns(RX_NETNS);
198 if (!ASSERT_OK_PTR(nstoken, "setns rx_ns"))
199 goto close;
200
201 SYS(close, "ip link set dev " RX_NAME " up");
202
203 skel = test_xdp_meta__open_and_load();
204 if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
205 goto close;
206
207 rx_ifindex = if_nametoindex(RX_NAME);
208 if (!ASSERT_GE(rx_ifindex, 0, "if_nametoindex rx"))
209 goto close;
210
211 tc_hook.ifindex = rx_ifindex;
212 ret = bpf_tc_hook_create(&tc_hook);
213 if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
214 goto close;
215
216 tc_prog = bpf_object__find_program_by_name(skel->obj, "ing_cls");
217 if (!ASSERT_OK_PTR(tc_prog, "open ing_cls prog"))
218 goto close;
219
220 tc_opts.prog_fd = bpf_program__fd(tc_prog);
221 ret = bpf_tc_attach(&tc_hook, &tc_opts);
222 if (!ASSERT_OK(ret, "bpf_tc_attach"))
223 goto close;
224
225 xdp_prog = bpf_object__find_program_by_name(skel->obj, "ing_xdp");
226 if (!ASSERT_OK_PTR(xdp_prog, "open ing_xdp prog"))
227 goto close;
228
229 ret = bpf_xdp_attach(rx_ifindex,
230 bpf_program__fd(xdp_prog),
231 0, NULL);
232 if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
233 goto close;
234
235 close_netns(nstoken);
236
237 nstoken = open_netns(TX_NETNS);
238 if (!ASSERT_OK_PTR(nstoken, "setns tx_ns"))
239 goto close;
240
241 SYS(close, "ip link set dev " TX_NAME " up");
242
243 tx_ifindex = if_nametoindex(TX_NAME);
244 if (!ASSERT_GE(tx_ifindex, 0, "if_nametoindex tx"))
245 goto close;
246
247 ret = send_test_packet(tx_ifindex);
248 if (!ASSERT_OK(ret, "send_test_packet"))
249 goto close;
250
251 assert_test_result(skel);
252
253 close:
254 close_netns(nstoken);
255 test_xdp_meta__destroy(skel);
256 netns_free(rx_ns);
257 netns_free(tx_ns);
258 }
259
test_xdp_context_tuntap(void)260 void test_xdp_context_tuntap(void)
261 {
262 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
263 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
264 struct netns_obj *ns = NULL;
265 struct test_xdp_meta *skel = NULL;
266 __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
267 int tap_fd = -1;
268 int tap_ifindex;
269 int ret;
270
271 ns = netns_new(TAP_NETNS, true);
272 if (!ASSERT_OK_PTR(ns, "create and open ns"))
273 return;
274
275 tap_fd = open_tuntap(TAP_NAME, true);
276 if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
277 goto close;
278
279 SYS(close, "ip link set dev " TAP_NAME " up");
280
281 skel = test_xdp_meta__open_and_load();
282 if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
283 goto close;
284
285 tap_ifindex = if_nametoindex(TAP_NAME);
286 if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex"))
287 goto close;
288
289 tc_hook.ifindex = tap_ifindex;
290 ret = bpf_tc_hook_create(&tc_hook);
291 if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
292 goto close;
293
294 tc_opts.prog_fd = bpf_program__fd(skel->progs.ing_cls);
295 ret = bpf_tc_attach(&tc_hook, &tc_opts);
296 if (!ASSERT_OK(ret, "bpf_tc_attach"))
297 goto close;
298
299 ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(skel->progs.ing_xdp),
300 0, NULL);
301 if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
302 goto close;
303
304 /* The ethernet header is not relevant for this test and doesn't need to
305 * be meaningful.
306 */
307 struct ethhdr eth = { 0 };
308
309 memcpy(packet, ð, sizeof(eth));
310 memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN);
311
312 ret = write(tap_fd, packet, sizeof(packet));
313 if (!ASSERT_EQ(ret, sizeof(packet), "write packet"))
314 goto close;
315
316 assert_test_result(skel);
317
318 close:
319 if (tap_fd >= 0)
320 close(tap_fd);
321 test_xdp_meta__destroy(skel);
322 netns_free(ns);
323 }
324