1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4 #include <test_progs.h>
5 #include <network_helpers.h>
6
7 #include "rbtree.skel.h"
8 #include "rbtree_fail.skel.h"
9 #include "rbtree_btf_fail__wrong_node_type.skel.h"
10 #include "rbtree_btf_fail__add_wrong_type.skel.h"
11
test_rbtree_add_nodes(void)12 static void test_rbtree_add_nodes(void)
13 {
14 LIBBPF_OPTS(bpf_test_run_opts, opts,
15 .data_in = &pkt_v4,
16 .data_size_in = sizeof(pkt_v4),
17 .repeat = 1,
18 );
19 struct rbtree *skel;
20 int ret;
21
22 skel = rbtree__open_and_load();
23 if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
24 return;
25
26 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts);
27 ASSERT_OK(ret, "rbtree_add_nodes run");
28 ASSERT_OK(opts.retval, "rbtree_add_nodes retval");
29 ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran");
30
31 rbtree__destroy(skel);
32 }
33
test_rbtree_add_nodes_nested(void)34 static void test_rbtree_add_nodes_nested(void)
35 {
36 LIBBPF_OPTS(bpf_test_run_opts, opts,
37 .data_in = &pkt_v4,
38 .data_size_in = sizeof(pkt_v4),
39 .repeat = 1,
40 );
41 struct rbtree *skel;
42 int ret;
43
44 skel = rbtree__open_and_load();
45 if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
46 return;
47
48 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts);
49 ASSERT_OK(ret, "rbtree_add_nodes_nested run");
50 ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval");
51 ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes_nested less_callback_ran");
52
53 rbtree__destroy(skel);
54 }
55
test_rbtree_add_and_remove(void)56 static void test_rbtree_add_and_remove(void)
57 {
58 LIBBPF_OPTS(bpf_test_run_opts, opts,
59 .data_in = &pkt_v4,
60 .data_size_in = sizeof(pkt_v4),
61 .repeat = 1,
62 );
63 struct rbtree *skel;
64 int ret;
65
66 skel = rbtree__open_and_load();
67 if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
68 return;
69
70 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove), &opts);
71 ASSERT_OK(ret, "rbtree_add_and_remove");
72 ASSERT_OK(opts.retval, "rbtree_add_and_remove retval");
73 ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key");
74
75 rbtree__destroy(skel);
76 }
77
test_rbtree_add_and_remove_array(void)78 static void test_rbtree_add_and_remove_array(void)
79 {
80 LIBBPF_OPTS(bpf_test_run_opts, opts,
81 .data_in = &pkt_v4,
82 .data_size_in = sizeof(pkt_v4),
83 .repeat = 1,
84 );
85 struct rbtree *skel;
86 int ret;
87
88 skel = rbtree__open_and_load();
89 if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
90 return;
91
92 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove_array), &opts);
93 ASSERT_OK(ret, "rbtree_add_and_remove_array");
94 ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval");
95
96 rbtree__destroy(skel);
97 }
98
test_rbtree_first_and_remove(void)99 static void test_rbtree_first_and_remove(void)
100 {
101 LIBBPF_OPTS(bpf_test_run_opts, opts,
102 .data_in = &pkt_v4,
103 .data_size_in = sizeof(pkt_v4),
104 .repeat = 1,
105 );
106 struct rbtree *skel;
107 int ret;
108
109 skel = rbtree__open_and_load();
110 if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
111 return;
112
113 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_first_and_remove), &opts);
114 ASSERT_OK(ret, "rbtree_first_and_remove");
115 ASSERT_OK(opts.retval, "rbtree_first_and_remove retval");
116 ASSERT_EQ(skel->data->first_data[0], 2, "rbtree_first_and_remove first rbtree_first()");
117 ASSERT_EQ(skel->data->removed_key, 1, "rbtree_first_and_remove first removed key");
118 ASSERT_EQ(skel->data->first_data[1], 4, "rbtree_first_and_remove second rbtree_first()");
119
120 rbtree__destroy(skel);
121 }
122
test_rbtree_api_release_aliasing(void)123 static void test_rbtree_api_release_aliasing(void)
124 {
125 LIBBPF_OPTS(bpf_test_run_opts, opts,
126 .data_in = &pkt_v4,
127 .data_size_in = sizeof(pkt_v4),
128 .repeat = 1,
129 );
130 struct rbtree *skel;
131 int ret;
132
133 skel = rbtree__open_and_load();
134 if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
135 return;
136
137 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_api_release_aliasing), &opts);
138 ASSERT_OK(ret, "rbtree_api_release_aliasing");
139 ASSERT_OK(opts.retval, "rbtree_api_release_aliasing retval");
140 ASSERT_EQ(skel->data->first_data[0], 42, "rbtree_api_release_aliasing first rbtree_remove()");
141 ASSERT_EQ(skel->data->first_data[1], -1, "rbtree_api_release_aliasing second rbtree_remove()");
142
143 rbtree__destroy(skel);
144 }
145
test_rbtree_success(void)146 void test_rbtree_success(void)
147 {
148 if (test__start_subtest("rbtree_add_nodes"))
149 test_rbtree_add_nodes();
150 if (test__start_subtest("rbtree_add_nodes_nested"))
151 test_rbtree_add_nodes_nested();
152 if (test__start_subtest("rbtree_add_and_remove"))
153 test_rbtree_add_and_remove();
154 if (test__start_subtest("rbtree_add_and_remove_array"))
155 test_rbtree_add_and_remove_array();
156 if (test__start_subtest("rbtree_first_and_remove"))
157 test_rbtree_first_and_remove();
158 if (test__start_subtest("rbtree_api_release_aliasing"))
159 test_rbtree_api_release_aliasing();
160 }
161
162 #define BTF_FAIL_TEST(suffix) \
163 void test_rbtree_btf_fail__##suffix(void) \
164 { \
165 struct rbtree_btf_fail__##suffix *skel; \
166 \
167 skel = rbtree_btf_fail__##suffix##__open_and_load(); \
168 if (!ASSERT_ERR_PTR(skel, \
169 "rbtree_btf_fail__" #suffix "__open_and_load unexpected success")) \
170 rbtree_btf_fail__##suffix##__destroy(skel); \
171 }
172
173 #define RUN_BTF_FAIL_TEST(suffix) \
174 if (test__start_subtest("rbtree_btf_fail__" #suffix)) \
175 test_rbtree_btf_fail__##suffix();
176
177 BTF_FAIL_TEST(wrong_node_type);
178 BTF_FAIL_TEST(add_wrong_type);
179
test_rbtree_btf_fail(void)180 void test_rbtree_btf_fail(void)
181 {
182 RUN_BTF_FAIL_TEST(wrong_node_type);
183 RUN_BTF_FAIL_TEST(add_wrong_type);
184 }
185
test_rbtree_fail(void)186 void test_rbtree_fail(void)
187 {
188 RUN_TESTS(rbtree_fail);
189 }
190