xref: /linux/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 #include <net/if.h>
5 #include "test_xdp.skel.h"
6 #include "test_xdp_bpf2bpf.skel.h"
7 
8 struct meta {
9 	int ifindex;
10 	int pkt_len;
11 };
12 
13 struct test_ctx_s {
14 	bool passed;
15 	int pkt_size;
16 };
17 
18 struct test_ctx_s test_ctx;
19 
20 static void on_sample(void *ctx, int cpu, void *data, __u32 size)
21 {
22 	struct meta *meta = (struct meta *)data;
23 	struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta);
24 	unsigned char *raw_pkt = data + sizeof(*meta);
25 	struct test_ctx_s *tst_ctx = ctx;
26 
27 	ASSERT_GE(size, sizeof(pkt_v4) + sizeof(*meta), "check_size");
28 	ASSERT_EQ(meta->ifindex, if_nametoindex("lo"), "check_meta_ifindex");
29 	ASSERT_EQ(meta->pkt_len, tst_ctx->pkt_size, "check_meta_pkt_len");
30 	ASSERT_EQ(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), 0,
31 		  "check_packet_content");
32 
33 	if (meta->pkt_len > sizeof(pkt_v4)) {
34 		for (int i = 0; i < meta->pkt_len - sizeof(pkt_v4); i++)
35 			ASSERT_EQ(raw_pkt[i + sizeof(pkt_v4)], (unsigned char)i,
36 				  "check_packet_content");
37 	}
38 
39 	tst_ctx->passed = true;
40 }
41 
42 #define BUF_SZ	9000
43 
44 static void run_xdp_bpf2bpf_pkt_size(int pkt_fd, struct perf_buffer *pb,
45 				     struct test_xdp_bpf2bpf *ftrace_skel,
46 				     int pkt_size)
47 {
48 	__u8 *buf, *buf_in;
49 	int err;
50 	LIBBPF_OPTS(bpf_test_run_opts, topts);
51 
52 	if (!ASSERT_LE(pkt_size, BUF_SZ, "pkt_size") ||
53 	    !ASSERT_GE(pkt_size, sizeof(pkt_v4), "pkt_size"))
54 		return;
55 
56 	buf_in = malloc(BUF_SZ);
57 	if (!ASSERT_OK_PTR(buf_in, "buf_in malloc()"))
58 		return;
59 
60 	buf = malloc(BUF_SZ);
61 	if (!ASSERT_OK_PTR(buf, "buf malloc()")) {
62 		free(buf_in);
63 		return;
64 	}
65 
66 	test_ctx.passed = false;
67 	test_ctx.pkt_size = pkt_size;
68 
69 	memcpy(buf_in, &pkt_v4, sizeof(pkt_v4));
70 	if (pkt_size > sizeof(pkt_v4)) {
71 		for (int i = 0; i < (pkt_size - sizeof(pkt_v4)); i++)
72 			buf_in[i + sizeof(pkt_v4)] = i;
73 	}
74 
75 	/* Run test program */
76 	topts.data_in = buf_in;
77 	topts.data_size_in = pkt_size;
78 	topts.data_out = buf;
79 	topts.data_size_out = BUF_SZ;
80 
81 	err = bpf_prog_test_run_opts(pkt_fd, &topts);
82 
83 	ASSERT_OK(err, "ipv4");
84 	ASSERT_EQ(topts.retval, XDP_PASS, "ipv4 retval");
85 	ASSERT_EQ(topts.data_size_out, pkt_size, "ipv4 size");
86 
87 	/* Make sure bpf_xdp_output() was triggered and it sent the expected
88 	 * data to the perf ring buffer.
89 	 */
90 	err = perf_buffer__poll(pb, 100);
91 
92 	ASSERT_GE(err, 0, "perf_buffer__poll");
93 	ASSERT_TRUE(test_ctx.passed, "test passed");
94 	/* Verify test results */
95 	ASSERT_EQ(ftrace_skel->bss->test_result_fentry, if_nametoindex("lo"),
96 		  "fentry result");
97 	ASSERT_EQ(ftrace_skel->bss->test_result_fexit, XDP_PASS, "fexit result");
98 
99 	free(buf);
100 	free(buf_in);
101 }
102 
103 void test_xdp_bpf2bpf(void)
104 {
105 	int err, pkt_fd, map_fd;
106 	int pkt_sizes[] = {sizeof(pkt_v4), 1024, 4100, 8200};
107 	struct iptnl_info value4 = {.family = AF_INET6};
108 	struct test_xdp *pkt_skel = NULL;
109 	struct test_xdp_bpf2bpf *ftrace_skel = NULL;
110 	struct vip key4 = {.protocol = 6, .family = AF_INET};
111 	struct bpf_program *prog;
112 	struct perf_buffer *pb = NULL;
113 
114 	/* Load XDP program to introspect */
115 	pkt_skel = test_xdp__open_and_load();
116 	if (!ASSERT_OK_PTR(pkt_skel, "test_xdp__open_and_load"))
117 		return;
118 
119 	pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_tx_iptunnel);
120 
121 	map_fd = bpf_map__fd(pkt_skel->maps.vip2tnl);
122 	bpf_map_update_elem(map_fd, &key4, &value4, 0);
123 
124 	/* Load trace program */
125 	ftrace_skel = test_xdp_bpf2bpf__open();
126 	if (!ASSERT_OK_PTR(ftrace_skel, "test_xdp_bpf2bpf__open"))
127 		goto out;
128 
129 	/* Demonstrate the bpf_program__set_attach_target() API rather than
130 	 * the load with options, i.e. opts.attach_prog_fd.
131 	 */
132 	prog = ftrace_skel->progs.trace_on_entry;
133 	bpf_program__set_expected_attach_type(prog, BPF_TRACE_FENTRY);
134 	bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
135 
136 	prog = ftrace_skel->progs.trace_on_exit;
137 	bpf_program__set_expected_attach_type(prog, BPF_TRACE_FEXIT);
138 	bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
139 
140 	err = test_xdp_bpf2bpf__load(ftrace_skel);
141 	if (!ASSERT_OK(err, "test_xdp_bpf2bpf__load"))
142 		goto out;
143 
144 	err = test_xdp_bpf2bpf__attach(ftrace_skel);
145 	if (!ASSERT_OK(err, "test_xdp_bpf2bpf__attach"))
146 		goto out;
147 
148 	/* Set up perf buffer */
149 	pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 8,
150 			      on_sample, NULL, &test_ctx, NULL);
151 	if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
152 		goto out;
153 
154 	for (int i = 0; i < ARRAY_SIZE(pkt_sizes); i++)
155 		run_xdp_bpf2bpf_pkt_size(pkt_fd, pb, ftrace_skel,
156 					 pkt_sizes[i]);
157 out:
158 	perf_buffer__free(pb);
159 	test_xdp__destroy(pkt_skel);
160 	test_xdp_bpf2bpf__destroy(ftrace_skel);
161 }
162