1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "for_each_hash_map_elem.skel.h"
6 #include "for_each_array_map_elem.skel.h"
7 #include "for_each_map_elem_write_key.skel.h"
8 #include "for_each_multi_maps.skel.h"
9 #include "for_each_hash_modify.skel.h"
10
11 static unsigned int duration;
12
test_hash_map(void)13 static void test_hash_map(void)
14 {
15 int i, err, max_entries;
16 struct for_each_hash_map_elem *skel;
17 __u64 *percpu_valbuf = NULL;
18 size_t percpu_val_sz;
19 __u32 key, num_cpus;
20 __u64 val;
21 LIBBPF_OPTS(bpf_test_run_opts, topts,
22 .data_in = &pkt_v4,
23 .data_size_in = sizeof(pkt_v4),
24 .repeat = 1,
25 );
26
27 skel = for_each_hash_map_elem__open_and_load();
28 if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
29 return;
30
31 max_entries = bpf_map__max_entries(skel->maps.hashmap);
32 for (i = 0; i < max_entries; i++) {
33 key = i;
34 val = i + 1;
35 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
36 &val, sizeof(val), BPF_ANY);
37 if (!ASSERT_OK(err, "map_update"))
38 goto out;
39 }
40
41 num_cpus = bpf_num_possible_cpus();
42 percpu_val_sz = sizeof(__u64) * num_cpus;
43 percpu_valbuf = malloc(percpu_val_sz);
44 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
45 goto out;
46
47 key = 1;
48 for (i = 0; i < num_cpus; i++)
49 percpu_valbuf[i] = i + 1;
50 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
51 percpu_valbuf, percpu_val_sz, BPF_ANY);
52 if (!ASSERT_OK(err, "percpu_map_update"))
53 goto out;
54
55 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
56 duration = topts.duration;
57 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
58 err, errno, topts.retval))
59 goto out;
60
61 ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
62 ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
63
64 key = 1;
65 err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
66 ASSERT_ERR(err, "hashmap_lookup");
67
68 ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
69 ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
70 ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
71 ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
72 ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
73 ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
74 out:
75 free(percpu_valbuf);
76 for_each_hash_map_elem__destroy(skel);
77 }
78
test_array_map(void)79 static void test_array_map(void)
80 {
81 __u32 key, num_cpus, max_entries;
82 int i, err;
83 struct for_each_array_map_elem *skel;
84 __u64 *percpu_valbuf = NULL;
85 size_t percpu_val_sz;
86 __u64 val, expected_total;
87 LIBBPF_OPTS(bpf_test_run_opts, topts,
88 .data_in = &pkt_v4,
89 .data_size_in = sizeof(pkt_v4),
90 .repeat = 1,
91 );
92
93 skel = for_each_array_map_elem__open_and_load();
94 if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
95 return;
96
97 expected_total = 0;
98 max_entries = bpf_map__max_entries(skel->maps.arraymap);
99 for (i = 0; i < max_entries; i++) {
100 key = i;
101 val = i + 1;
102 /* skip the last iteration for expected total */
103 if (i != max_entries - 1)
104 expected_total += val;
105 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
106 &val, sizeof(val), BPF_ANY);
107 if (!ASSERT_OK(err, "map_update"))
108 goto out;
109 }
110
111 num_cpus = bpf_num_possible_cpus();
112 percpu_val_sz = sizeof(__u64) * num_cpus;
113 percpu_valbuf = malloc(percpu_val_sz);
114 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
115 goto out;
116
117 key = 0;
118 for (i = 0; i < num_cpus; i++)
119 percpu_valbuf[i] = i + 1;
120 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
121 percpu_valbuf, percpu_val_sz, BPF_ANY);
122 if (!ASSERT_OK(err, "percpu_map_update"))
123 goto out;
124
125 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
126 duration = topts.duration;
127 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
128 err, errno, topts.retval))
129 goto out;
130
131 ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
132 ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
133
134 out:
135 free(percpu_valbuf);
136 for_each_array_map_elem__destroy(skel);
137 }
138
test_write_map_key(void)139 static void test_write_map_key(void)
140 {
141 struct for_each_map_elem_write_key *skel;
142
143 skel = for_each_map_elem_write_key__open_and_load();
144 if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
145 for_each_map_elem_write_key__destroy(skel);
146 }
147
test_multi_maps(void)148 static void test_multi_maps(void)
149 {
150 struct for_each_multi_maps *skel;
151 __u64 val, array_total, hash_total;
152 __u32 key, max_entries;
153 int i, err;
154
155 LIBBPF_OPTS(bpf_test_run_opts, topts,
156 .data_in = &pkt_v4,
157 .data_size_in = sizeof(pkt_v4),
158 .repeat = 1,
159 );
160
161 skel = for_each_multi_maps__open_and_load();
162 if (!ASSERT_OK_PTR(skel, "for_each_multi_maps__open_and_load"))
163 return;
164
165 array_total = 0;
166 max_entries = bpf_map__max_entries(skel->maps.arraymap);
167 for (i = 0; i < max_entries; i++) {
168 key = i;
169 val = i + 1;
170 array_total += val;
171 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
172 &val, sizeof(val), BPF_ANY);
173 if (!ASSERT_OK(err, "array_map_update"))
174 goto out;
175 }
176
177 hash_total = 0;
178 max_entries = bpf_map__max_entries(skel->maps.hashmap);
179 for (i = 0; i < max_entries; i++) {
180 key = i + 100;
181 val = i + 1;
182 hash_total += val;
183 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
184 &val, sizeof(val), BPF_ANY);
185 if (!ASSERT_OK(err, "hash_map_update"))
186 goto out;
187 }
188
189 skel->bss->data_output = 0;
190 skel->bss->use_array = 1;
191 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
192 ASSERT_OK(err, "bpf_prog_test_run_opts");
193 ASSERT_OK(topts.retval, "retval");
194 ASSERT_EQ(skel->bss->data_output, array_total, "array output");
195
196 skel->bss->data_output = 0;
197 skel->bss->use_array = 0;
198 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
199 ASSERT_OK(err, "bpf_prog_test_run_opts");
200 ASSERT_OK(topts.retval, "retval");
201 ASSERT_EQ(skel->bss->data_output, hash_total, "hash output");
202
203 out:
204 for_each_multi_maps__destroy(skel);
205 }
206
test_hash_modify(void)207 static void test_hash_modify(void)
208 {
209 struct for_each_hash_modify *skel;
210 int max_entries, i, err;
211 __u64 key, val;
212
213 LIBBPF_OPTS(bpf_test_run_opts, topts,
214 .data_in = &pkt_v4,
215 .data_size_in = sizeof(pkt_v4),
216 .repeat = 1
217 );
218
219 skel = for_each_hash_modify__open_and_load();
220 if (!ASSERT_OK_PTR(skel, "for_each_hash_modify__open_and_load"))
221 return;
222
223 max_entries = bpf_map__max_entries(skel->maps.hashmap);
224 for (i = 0; i < max_entries; i++) {
225 key = i;
226 val = i;
227 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
228 &val, sizeof(val), BPF_ANY);
229 if (!ASSERT_OK(err, "map_update"))
230 goto out;
231 }
232
233 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
234 ASSERT_OK(err, "bpf_prog_test_run_opts");
235 ASSERT_OK(topts.retval, "retval");
236
237 out:
238 for_each_hash_modify__destroy(skel);
239 }
240
test_for_each(void)241 void test_for_each(void)
242 {
243 if (test__start_subtest("hash_map"))
244 test_hash_map();
245 if (test__start_subtest("array_map"))
246 test_array_map();
247 if (test__start_subtest("write_map_key"))
248 test_write_map_key();
249 if (test__start_subtest("multi_maps"))
250 test_multi_maps();
251 if (test__start_subtest("hash_modify"))
252 test_hash_modify();
253 }
254