1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Isovalent */
3 #include <uapi/linux/if_link.h>
4 #include <net/if.h>
5 #include <test_progs.h>
6
7 #define loopback 1
8 #define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
9
10 #include "test_tc_link.skel.h"
11 #include "tc_helpers.h"
12
test_ns_tc_opts_basic(void)13 void test_ns_tc_opts_basic(void)
14 {
15 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
16 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
17 LIBBPF_OPTS(bpf_prog_query_opts, optq);
18 __u32 fd1, fd2, id1, id2;
19 struct test_tc_link *skel;
20 __u32 prog_ids[2];
21 int err;
22
23 skel = test_tc_link__open_and_load();
24 if (!ASSERT_OK_PTR(skel, "skel_load"))
25 goto cleanup;
26
27 fd1 = bpf_program__fd(skel->progs.tc1);
28 fd2 = bpf_program__fd(skel->progs.tc2);
29
30 id1 = id_from_prog_fd(fd1);
31 id2 = id_from_prog_fd(fd2);
32
33 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
34
35 assert_mprog_count(BPF_TCX_INGRESS, 0);
36 assert_mprog_count(BPF_TCX_EGRESS, 0);
37
38 ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
39 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
40
41 err = bpf_prog_attach_opts(fd1, loopback, BPF_TCX_INGRESS, &opta);
42 if (!ASSERT_EQ(err, 0, "prog_attach"))
43 goto cleanup;
44
45 assert_mprog_count(BPF_TCX_INGRESS, 1);
46 assert_mprog_count(BPF_TCX_EGRESS, 0);
47
48 optq.prog_ids = prog_ids;
49
50 memset(prog_ids, 0, sizeof(prog_ids));
51 optq.count = ARRAY_SIZE(prog_ids);
52
53 err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq);
54 if (!ASSERT_OK(err, "prog_query"))
55 goto cleanup_in;
56
57 ASSERT_EQ(optq.count, 1, "count");
58 ASSERT_EQ(optq.revision, 2, "revision");
59 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
60 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
61
62 tc_skel_reset_all_seen(skel);
63 ASSERT_OK(system(ping_cmd), ping_cmd);
64
65 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
66 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
67
68 err = bpf_prog_attach_opts(fd2, loopback, BPF_TCX_EGRESS, &opta);
69 if (!ASSERT_EQ(err, 0, "prog_attach"))
70 goto cleanup_in;
71
72 assert_mprog_count(BPF_TCX_INGRESS, 1);
73 assert_mprog_count(BPF_TCX_EGRESS, 1);
74
75 memset(prog_ids, 0, sizeof(prog_ids));
76 optq.count = ARRAY_SIZE(prog_ids);
77
78 err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq);
79 if (!ASSERT_OK(err, "prog_query"))
80 goto cleanup_eg;
81
82 ASSERT_EQ(optq.count, 1, "count");
83 ASSERT_EQ(optq.revision, 2, "revision");
84 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
85 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
86
87 tc_skel_reset_all_seen(skel);
88 ASSERT_OK(system(ping_cmd), ping_cmd);
89
90 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
91 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
92
93 cleanup_eg:
94 err = bpf_prog_detach_opts(fd2, loopback, BPF_TCX_EGRESS, &optd);
95 ASSERT_OK(err, "prog_detach_eg");
96
97 assert_mprog_count(BPF_TCX_INGRESS, 1);
98 assert_mprog_count(BPF_TCX_EGRESS, 0);
99
100 cleanup_in:
101 err = bpf_prog_detach_opts(fd1, loopback, BPF_TCX_INGRESS, &optd);
102 ASSERT_OK(err, "prog_detach_in");
103
104 assert_mprog_count(BPF_TCX_INGRESS, 0);
105 assert_mprog_count(BPF_TCX_EGRESS, 0);
106
107 cleanup:
108 test_tc_link__destroy(skel);
109 }
110
test_tc_opts_before_target(int target)111 static void test_tc_opts_before_target(int target)
112 {
113 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
114 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
115 LIBBPF_OPTS(bpf_prog_query_opts, optq);
116 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
117 struct test_tc_link *skel;
118 __u32 prog_ids[5];
119 int err;
120
121 skel = test_tc_link__open_and_load();
122 if (!ASSERT_OK_PTR(skel, "skel_load"))
123 goto cleanup;
124
125 fd1 = bpf_program__fd(skel->progs.tc1);
126 fd2 = bpf_program__fd(skel->progs.tc2);
127 fd3 = bpf_program__fd(skel->progs.tc3);
128 fd4 = bpf_program__fd(skel->progs.tc4);
129
130 id1 = id_from_prog_fd(fd1);
131 id2 = id_from_prog_fd(fd2);
132 id3 = id_from_prog_fd(fd3);
133 id4 = id_from_prog_fd(fd4);
134
135 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
136 ASSERT_NEQ(id3, id4, "prog_ids_3_4");
137 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
138
139 assert_mprog_count(target, 0);
140
141 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
142 if (!ASSERT_EQ(err, 0, "prog_attach"))
143 goto cleanup;
144
145 assert_mprog_count(target, 1);
146
147 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
148 if (!ASSERT_EQ(err, 0, "prog_attach"))
149 goto cleanup_target;
150
151 assert_mprog_count(target, 2);
152
153 optq.prog_ids = prog_ids;
154
155 memset(prog_ids, 0, sizeof(prog_ids));
156 optq.count = ARRAY_SIZE(prog_ids);
157
158 err = bpf_prog_query_opts(loopback, target, &optq);
159 if (!ASSERT_OK(err, "prog_query"))
160 goto cleanup_target2;
161
162 ASSERT_EQ(optq.count, 2, "count");
163 ASSERT_EQ(optq.revision, 3, "revision");
164 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
165 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
166 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
167
168 tc_skel_reset_all_seen(skel);
169 ASSERT_OK(system(ping_cmd), ping_cmd);
170
171 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
172 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
173 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
174 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
175
176 LIBBPF_OPTS_RESET(opta,
177 .flags = BPF_F_BEFORE,
178 .relative_fd = fd2,
179 );
180
181 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
182 if (!ASSERT_EQ(err, 0, "prog_attach"))
183 goto cleanup_target2;
184
185 memset(prog_ids, 0, sizeof(prog_ids));
186 optq.count = ARRAY_SIZE(prog_ids);
187
188 err = bpf_prog_query_opts(loopback, target, &optq);
189 if (!ASSERT_OK(err, "prog_query"))
190 goto cleanup_target3;
191
192 ASSERT_EQ(optq.count, 3, "count");
193 ASSERT_EQ(optq.revision, 4, "revision");
194 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
195 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
196 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
197 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
198
199 LIBBPF_OPTS_RESET(opta,
200 .flags = BPF_F_BEFORE,
201 .relative_id = id1,
202 );
203
204 err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
205 if (!ASSERT_EQ(err, 0, "prog_attach"))
206 goto cleanup_target3;
207
208 assert_mprog_count(target, 4);
209
210 memset(prog_ids, 0, sizeof(prog_ids));
211 optq.count = ARRAY_SIZE(prog_ids);
212
213 err = bpf_prog_query_opts(loopback, target, &optq);
214 if (!ASSERT_OK(err, "prog_query"))
215 goto cleanup_target4;
216
217 ASSERT_EQ(optq.count, 4, "count");
218 ASSERT_EQ(optq.revision, 5, "revision");
219 ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
220 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
221 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
222 ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]");
223 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
224
225 tc_skel_reset_all_seen(skel);
226 ASSERT_OK(system(ping_cmd), ping_cmd);
227
228 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
229 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
230 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
231 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
232
233 cleanup_target4:
234 err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
235 ASSERT_OK(err, "prog_detach");
236 assert_mprog_count(target, 3);
237
238 cleanup_target3:
239 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
240 ASSERT_OK(err, "prog_detach");
241 assert_mprog_count(target, 2);
242
243 cleanup_target2:
244 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
245 ASSERT_OK(err, "prog_detach");
246 assert_mprog_count(target, 1);
247
248 cleanup_target:
249 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
250 ASSERT_OK(err, "prog_detach");
251 assert_mprog_count(target, 0);
252
253 cleanup:
254 test_tc_link__destroy(skel);
255 }
256
test_ns_tc_opts_before(void)257 void test_ns_tc_opts_before(void)
258 {
259 test_tc_opts_before_target(BPF_TCX_INGRESS);
260 test_tc_opts_before_target(BPF_TCX_EGRESS);
261 }
262
test_tc_opts_after_target(int target)263 static void test_tc_opts_after_target(int target)
264 {
265 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
266 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
267 LIBBPF_OPTS(bpf_prog_query_opts, optq);
268 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
269 struct test_tc_link *skel;
270 __u32 prog_ids[5];
271 int err;
272
273 skel = test_tc_link__open_and_load();
274 if (!ASSERT_OK_PTR(skel, "skel_load"))
275 goto cleanup;
276
277 fd1 = bpf_program__fd(skel->progs.tc1);
278 fd2 = bpf_program__fd(skel->progs.tc2);
279 fd3 = bpf_program__fd(skel->progs.tc3);
280 fd4 = bpf_program__fd(skel->progs.tc4);
281
282 id1 = id_from_prog_fd(fd1);
283 id2 = id_from_prog_fd(fd2);
284 id3 = id_from_prog_fd(fd3);
285 id4 = id_from_prog_fd(fd4);
286
287 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
288 ASSERT_NEQ(id3, id4, "prog_ids_3_4");
289 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
290
291 assert_mprog_count(target, 0);
292
293 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
294 if (!ASSERT_EQ(err, 0, "prog_attach"))
295 goto cleanup;
296
297 assert_mprog_count(target, 1);
298
299 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
300 if (!ASSERT_EQ(err, 0, "prog_attach"))
301 goto cleanup_target;
302
303 assert_mprog_count(target, 2);
304
305 optq.prog_ids = prog_ids;
306
307 memset(prog_ids, 0, sizeof(prog_ids));
308 optq.count = ARRAY_SIZE(prog_ids);
309
310 err = bpf_prog_query_opts(loopback, target, &optq);
311 if (!ASSERT_OK(err, "prog_query"))
312 goto cleanup_target2;
313
314 ASSERT_EQ(optq.count, 2, "count");
315 ASSERT_EQ(optq.revision, 3, "revision");
316 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
317 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
318 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
319
320 tc_skel_reset_all_seen(skel);
321 ASSERT_OK(system(ping_cmd), ping_cmd);
322
323 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
324 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
325 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
326 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
327
328 LIBBPF_OPTS_RESET(opta,
329 .flags = BPF_F_AFTER,
330 .relative_fd = fd1,
331 );
332
333 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
334 if (!ASSERT_EQ(err, 0, "prog_attach"))
335 goto cleanup_target2;
336
337 memset(prog_ids, 0, sizeof(prog_ids));
338 optq.count = ARRAY_SIZE(prog_ids);
339
340 err = bpf_prog_query_opts(loopback, target, &optq);
341 if (!ASSERT_OK(err, "prog_query"))
342 goto cleanup_target3;
343
344 ASSERT_EQ(optq.count, 3, "count");
345 ASSERT_EQ(optq.revision, 4, "revision");
346 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
347 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
348 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
349 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
350
351 LIBBPF_OPTS_RESET(opta,
352 .flags = BPF_F_AFTER,
353 .relative_id = id2,
354 );
355
356 err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
357 if (!ASSERT_EQ(err, 0, "prog_attach"))
358 goto cleanup_target3;
359
360 assert_mprog_count(target, 4);
361
362 memset(prog_ids, 0, sizeof(prog_ids));
363 optq.count = ARRAY_SIZE(prog_ids);
364
365 err = bpf_prog_query_opts(loopback, target, &optq);
366 if (!ASSERT_OK(err, "prog_query"))
367 goto cleanup_target4;
368
369 ASSERT_EQ(optq.count, 4, "count");
370 ASSERT_EQ(optq.revision, 5, "revision");
371 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
372 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
373 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
374 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
375 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
376
377 tc_skel_reset_all_seen(skel);
378 ASSERT_OK(system(ping_cmd), ping_cmd);
379
380 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
381 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
382 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
383 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
384
385 cleanup_target4:
386 err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
387 ASSERT_OK(err, "prog_detach");
388 assert_mprog_count(target, 3);
389
390 memset(prog_ids, 0, sizeof(prog_ids));
391 optq.count = ARRAY_SIZE(prog_ids);
392
393 err = bpf_prog_query_opts(loopback, target, &optq);
394 if (!ASSERT_OK(err, "prog_query"))
395 goto cleanup_target3;
396
397 ASSERT_EQ(optq.count, 3, "count");
398 ASSERT_EQ(optq.revision, 6, "revision");
399 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
400 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
401 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
402 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
403
404 cleanup_target3:
405 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
406 ASSERT_OK(err, "prog_detach");
407 assert_mprog_count(target, 2);
408
409 memset(prog_ids, 0, sizeof(prog_ids));
410 optq.count = ARRAY_SIZE(prog_ids);
411
412 err = bpf_prog_query_opts(loopback, target, &optq);
413 if (!ASSERT_OK(err, "prog_query"))
414 goto cleanup_target2;
415
416 ASSERT_EQ(optq.count, 2, "count");
417 ASSERT_EQ(optq.revision, 7, "revision");
418 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
419 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
420 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
421
422 cleanup_target2:
423 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
424 ASSERT_OK(err, "prog_detach");
425 assert_mprog_count(target, 1);
426
427 memset(prog_ids, 0, sizeof(prog_ids));
428 optq.count = ARRAY_SIZE(prog_ids);
429
430 err = bpf_prog_query_opts(loopback, target, &optq);
431 if (!ASSERT_OK(err, "prog_query"))
432 goto cleanup_target;
433
434 ASSERT_EQ(optq.count, 1, "count");
435 ASSERT_EQ(optq.revision, 8, "revision");
436 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
437 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
438
439 cleanup_target:
440 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
441 ASSERT_OK(err, "prog_detach");
442 assert_mprog_count(target, 0);
443
444 cleanup:
445 test_tc_link__destroy(skel);
446 }
447
test_ns_tc_opts_after(void)448 void test_ns_tc_opts_after(void)
449 {
450 test_tc_opts_after_target(BPF_TCX_INGRESS);
451 test_tc_opts_after_target(BPF_TCX_EGRESS);
452 }
453
test_tc_opts_revision_target(int target)454 static void test_tc_opts_revision_target(int target)
455 {
456 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
457 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
458 LIBBPF_OPTS(bpf_prog_query_opts, optq);
459 __u32 fd1, fd2, id1, id2;
460 struct test_tc_link *skel;
461 __u32 prog_ids[3];
462 int err;
463
464 skel = test_tc_link__open_and_load();
465 if (!ASSERT_OK_PTR(skel, "skel_load"))
466 goto cleanup;
467
468 fd1 = bpf_program__fd(skel->progs.tc1);
469 fd2 = bpf_program__fd(skel->progs.tc2);
470
471 id1 = id_from_prog_fd(fd1);
472 id2 = id_from_prog_fd(fd2);
473
474 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
475
476 assert_mprog_count(target, 0);
477
478 LIBBPF_OPTS_RESET(opta,
479 .expected_revision = 1,
480 );
481
482 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
483 if (!ASSERT_EQ(err, 0, "prog_attach"))
484 goto cleanup;
485
486 assert_mprog_count(target, 1);
487
488 LIBBPF_OPTS_RESET(opta,
489 .expected_revision = 1,
490 );
491
492 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
493 if (!ASSERT_EQ(err, -ESTALE, "prog_attach"))
494 goto cleanup_target;
495
496 assert_mprog_count(target, 1);
497
498 LIBBPF_OPTS_RESET(opta,
499 .expected_revision = 2,
500 );
501
502 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
503 if (!ASSERT_EQ(err, 0, "prog_attach"))
504 goto cleanup_target;
505
506 assert_mprog_count(target, 2);
507
508 optq.prog_ids = prog_ids;
509
510 memset(prog_ids, 0, sizeof(prog_ids));
511 optq.count = ARRAY_SIZE(prog_ids);
512
513 err = bpf_prog_query_opts(loopback, target, &optq);
514 if (!ASSERT_OK(err, "prog_query"))
515 goto cleanup_target2;
516
517 ASSERT_EQ(optq.count, 2, "count");
518 ASSERT_EQ(optq.revision, 3, "revision");
519 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
520 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
521 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
522
523 tc_skel_reset_all_seen(skel);
524 ASSERT_OK(system(ping_cmd), ping_cmd);
525
526 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
527 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
528
529 LIBBPF_OPTS_RESET(optd,
530 .expected_revision = 2,
531 );
532
533 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
534 ASSERT_EQ(err, -ESTALE, "prog_detach");
535 assert_mprog_count(target, 2);
536
537 cleanup_target2:
538 LIBBPF_OPTS_RESET(optd,
539 .expected_revision = 3,
540 );
541
542 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
543 ASSERT_OK(err, "prog_detach");
544 assert_mprog_count(target, 1);
545
546 cleanup_target:
547 LIBBPF_OPTS_RESET(optd);
548
549 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
550 ASSERT_OK(err, "prog_detach");
551 assert_mprog_count(target, 0);
552
553 cleanup:
554 test_tc_link__destroy(skel);
555 }
556
test_ns_tc_opts_revision(void)557 void test_ns_tc_opts_revision(void)
558 {
559 test_tc_opts_revision_target(BPF_TCX_INGRESS);
560 test_tc_opts_revision_target(BPF_TCX_EGRESS);
561 }
562
test_tc_chain_classic(int target,bool chain_tc_old)563 static void test_tc_chain_classic(int target, bool chain_tc_old)
564 {
565 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
566 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
567 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
568 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
569 bool hook_created = false, tc_attached = false;
570 __u32 fd1, fd2, fd3, id1, id2, id3;
571 struct test_tc_link *skel;
572 int err;
573
574 skel = test_tc_link__open_and_load();
575 if (!ASSERT_OK_PTR(skel, "skel_load"))
576 goto cleanup;
577
578 fd1 = bpf_program__fd(skel->progs.tc1);
579 fd2 = bpf_program__fd(skel->progs.tc2);
580 fd3 = bpf_program__fd(skel->progs.tc3);
581
582 id1 = id_from_prog_fd(fd1);
583 id2 = id_from_prog_fd(fd2);
584 id3 = id_from_prog_fd(fd3);
585
586 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
587 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
588
589 assert_mprog_count(target, 0);
590
591 if (chain_tc_old) {
592 tc_hook.attach_point = target == BPF_TCX_INGRESS ?
593 BPF_TC_INGRESS : BPF_TC_EGRESS;
594 err = bpf_tc_hook_create(&tc_hook);
595 if (err == 0)
596 hook_created = true;
597 err = err == -EEXIST ? 0 : err;
598 if (!ASSERT_OK(err, "bpf_tc_hook_create"))
599 goto cleanup;
600
601 tc_opts.prog_fd = fd3;
602 err = bpf_tc_attach(&tc_hook, &tc_opts);
603 if (!ASSERT_OK(err, "bpf_tc_attach"))
604 goto cleanup;
605 tc_attached = true;
606 }
607
608 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
609 if (!ASSERT_EQ(err, 0, "prog_attach"))
610 goto cleanup;
611
612 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
613 if (!ASSERT_EQ(err, 0, "prog_attach"))
614 goto cleanup_detach;
615
616 assert_mprog_count(target, 2);
617
618 tc_skel_reset_all_seen(skel);
619 ASSERT_OK(system(ping_cmd), ping_cmd);
620
621 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
622 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
623 ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
624
625 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
626 if (!ASSERT_OK(err, "prog_detach"))
627 goto cleanup_detach;
628
629 assert_mprog_count(target, 1);
630
631 tc_skel_reset_all_seen(skel);
632 ASSERT_OK(system(ping_cmd), ping_cmd);
633
634 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
635 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
636 ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
637
638 cleanup_detach:
639 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
640 if (!ASSERT_OK(err, "prog_detach"))
641 goto cleanup;
642
643 assert_mprog_count(target, 0);
644 cleanup:
645 if (tc_attached) {
646 tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
647 err = bpf_tc_detach(&tc_hook, &tc_opts);
648 ASSERT_OK(err, "bpf_tc_detach");
649 }
650 if (hook_created) {
651 tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
652 bpf_tc_hook_destroy(&tc_hook);
653 }
654 test_tc_link__destroy(skel);
655 assert_mprog_count(target, 0);
656 }
657
test_ns_tc_opts_chain_classic(void)658 void test_ns_tc_opts_chain_classic(void)
659 {
660 test_tc_chain_classic(BPF_TCX_INGRESS, false);
661 test_tc_chain_classic(BPF_TCX_EGRESS, false);
662 test_tc_chain_classic(BPF_TCX_INGRESS, true);
663 test_tc_chain_classic(BPF_TCX_EGRESS, true);
664 }
665
test_tc_opts_replace_target(int target)666 static void test_tc_opts_replace_target(int target)
667 {
668 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
669 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
670 LIBBPF_OPTS(bpf_prog_query_opts, optq);
671 __u32 fd1, fd2, fd3, id1, id2, id3, detach_fd;
672 __u32 prog_ids[4], prog_flags[4];
673 struct test_tc_link *skel;
674 int err;
675
676 skel = test_tc_link__open_and_load();
677 if (!ASSERT_OK_PTR(skel, "skel_load"))
678 goto cleanup;
679
680 fd1 = bpf_program__fd(skel->progs.tc1);
681 fd2 = bpf_program__fd(skel->progs.tc2);
682 fd3 = bpf_program__fd(skel->progs.tc3);
683
684 id1 = id_from_prog_fd(fd1);
685 id2 = id_from_prog_fd(fd2);
686 id3 = id_from_prog_fd(fd3);
687
688 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
689 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
690
691 assert_mprog_count(target, 0);
692
693 LIBBPF_OPTS_RESET(opta,
694 .expected_revision = 1,
695 );
696
697 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
698 if (!ASSERT_EQ(err, 0, "prog_attach"))
699 goto cleanup;
700
701 assert_mprog_count(target, 1);
702
703 LIBBPF_OPTS_RESET(opta,
704 .flags = BPF_F_BEFORE,
705 .relative_id = id1,
706 .expected_revision = 2,
707 );
708
709 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
710 if (!ASSERT_EQ(err, 0, "prog_attach"))
711 goto cleanup_target;
712
713 detach_fd = fd2;
714
715 assert_mprog_count(target, 2);
716
717 optq.prog_attach_flags = prog_flags;
718 optq.prog_ids = prog_ids;
719
720 memset(prog_flags, 0, sizeof(prog_flags));
721 memset(prog_ids, 0, sizeof(prog_ids));
722 optq.count = ARRAY_SIZE(prog_ids);
723
724 err = bpf_prog_query_opts(loopback, target, &optq);
725 if (!ASSERT_OK(err, "prog_query"))
726 goto cleanup_target2;
727
728 ASSERT_EQ(optq.count, 2, "count");
729 ASSERT_EQ(optq.revision, 3, "revision");
730 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
731 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
732 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
733
734 ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]");
735 ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
736 ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
737
738 tc_skel_reset_all_seen(skel);
739 ASSERT_OK(system(ping_cmd), ping_cmd);
740
741 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
742 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
743 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
744
745 LIBBPF_OPTS_RESET(opta,
746 .flags = BPF_F_REPLACE,
747 .replace_prog_fd = fd2,
748 .expected_revision = 3,
749 );
750
751 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
752 if (!ASSERT_EQ(err, 0, "prog_attach"))
753 goto cleanup_target2;
754
755 detach_fd = fd3;
756
757 assert_mprog_count(target, 2);
758
759 memset(prog_ids, 0, sizeof(prog_ids));
760 optq.count = ARRAY_SIZE(prog_ids);
761
762 err = bpf_prog_query_opts(loopback, target, &optq);
763 if (!ASSERT_OK(err, "prog_query"))
764 goto cleanup_target2;
765
766 ASSERT_EQ(optq.count, 2, "count");
767 ASSERT_EQ(optq.revision, 4, "revision");
768 ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]");
769 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
770 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
771
772 tc_skel_reset_all_seen(skel);
773 ASSERT_OK(system(ping_cmd), ping_cmd);
774
775 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
776 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
777 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
778
779 LIBBPF_OPTS_RESET(opta,
780 .flags = BPF_F_REPLACE | BPF_F_BEFORE,
781 .replace_prog_fd = fd3,
782 .relative_fd = fd1,
783 .expected_revision = 4,
784 );
785
786 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
787 if (!ASSERT_EQ(err, 0, "prog_attach"))
788 goto cleanup_target2;
789
790 detach_fd = fd2;
791
792 assert_mprog_count(target, 2);
793
794 memset(prog_ids, 0, sizeof(prog_ids));
795 optq.count = ARRAY_SIZE(prog_ids);
796
797 err = bpf_prog_query_opts(loopback, target, &optq);
798 if (!ASSERT_OK(err, "prog_query"))
799 goto cleanup_target2;
800
801 ASSERT_EQ(optq.count, 2, "count");
802 ASSERT_EQ(optq.revision, 5, "revision");
803 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
804 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
805 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
806
807 tc_skel_reset_all_seen(skel);
808 ASSERT_OK(system(ping_cmd), ping_cmd);
809
810 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
811 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
812 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
813
814 LIBBPF_OPTS_RESET(opta,
815 .flags = BPF_F_REPLACE,
816 .replace_prog_fd = fd2,
817 );
818
819 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
820 ASSERT_EQ(err, -EEXIST, "prog_attach");
821 assert_mprog_count(target, 2);
822
823 LIBBPF_OPTS_RESET(opta,
824 .flags = BPF_F_REPLACE | BPF_F_AFTER,
825 .replace_prog_fd = fd2,
826 .relative_fd = fd1,
827 .expected_revision = 5,
828 );
829
830 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
831 ASSERT_EQ(err, -ERANGE, "prog_attach");
832 assert_mprog_count(target, 2);
833
834 LIBBPF_OPTS_RESET(opta,
835 .flags = BPF_F_BEFORE | BPF_F_AFTER | BPF_F_REPLACE,
836 .replace_prog_fd = fd2,
837 .relative_fd = fd1,
838 .expected_revision = 5,
839 );
840
841 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
842 ASSERT_EQ(err, -ERANGE, "prog_attach");
843 assert_mprog_count(target, 2);
844
845 LIBBPF_OPTS_RESET(optd,
846 .flags = BPF_F_BEFORE,
847 .relative_id = id1,
848 .expected_revision = 5,
849 );
850
851 cleanup_target2:
852 err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
853 ASSERT_OK(err, "prog_detach");
854 assert_mprog_count(target, 1);
855
856 cleanup_target:
857 LIBBPF_OPTS_RESET(optd);
858
859 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
860 ASSERT_OK(err, "prog_detach");
861 assert_mprog_count(target, 0);
862
863 cleanup:
864 test_tc_link__destroy(skel);
865 }
866
test_ns_tc_opts_replace(void)867 void test_ns_tc_opts_replace(void)
868 {
869 test_tc_opts_replace_target(BPF_TCX_INGRESS);
870 test_tc_opts_replace_target(BPF_TCX_EGRESS);
871 }
872
test_tc_opts_invalid_target(int target)873 static void test_tc_opts_invalid_target(int target)
874 {
875 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
876 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
877 __u32 fd1, fd2, id1, id2;
878 struct test_tc_link *skel;
879 int err;
880
881 skel = test_tc_link__open_and_load();
882 if (!ASSERT_OK_PTR(skel, "skel_load"))
883 goto cleanup;
884
885 fd1 = bpf_program__fd(skel->progs.tc1);
886 fd2 = bpf_program__fd(skel->progs.tc2);
887
888 id1 = id_from_prog_fd(fd1);
889 id2 = id_from_prog_fd(fd2);
890
891 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
892
893 assert_mprog_count(target, 0);
894
895 LIBBPF_OPTS_RESET(opta,
896 .flags = BPF_F_BEFORE | BPF_F_AFTER,
897 );
898
899 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
900 ASSERT_EQ(err, -ERANGE, "prog_attach");
901 assert_mprog_count(target, 0);
902
903 LIBBPF_OPTS_RESET(opta,
904 .flags = BPF_F_BEFORE | BPF_F_ID,
905 );
906
907 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
908 ASSERT_EQ(err, -ENOENT, "prog_attach");
909 assert_mprog_count(target, 0);
910
911 LIBBPF_OPTS_RESET(opta,
912 .flags = BPF_F_AFTER | BPF_F_ID,
913 );
914
915 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
916 ASSERT_EQ(err, -ENOENT, "prog_attach");
917 assert_mprog_count(target, 0);
918
919 LIBBPF_OPTS_RESET(opta,
920 .relative_fd = fd2,
921 );
922
923 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
924 ASSERT_EQ(err, -EINVAL, "prog_attach");
925 assert_mprog_count(target, 0);
926
927 LIBBPF_OPTS_RESET(opta,
928 .flags = BPF_F_BEFORE | BPF_F_AFTER,
929 .relative_fd = fd2,
930 );
931
932 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
933 ASSERT_EQ(err, -ENOENT, "prog_attach");
934 assert_mprog_count(target, 0);
935
936 LIBBPF_OPTS_RESET(opta,
937 .flags = BPF_F_ID,
938 .relative_id = id2,
939 );
940
941 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
942 ASSERT_EQ(err, -EINVAL, "prog_attach");
943 assert_mprog_count(target, 0);
944
945 LIBBPF_OPTS_RESET(opta,
946 .flags = BPF_F_BEFORE,
947 .relative_fd = fd1,
948 );
949
950 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
951 ASSERT_EQ(err, -ENOENT, "prog_attach");
952 assert_mprog_count(target, 0);
953
954 LIBBPF_OPTS_RESET(opta,
955 .flags = BPF_F_AFTER,
956 .relative_fd = fd1,
957 );
958
959 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
960 ASSERT_EQ(err, -ENOENT, "prog_attach");
961 assert_mprog_count(target, 0);
962
963 LIBBPF_OPTS_RESET(opta);
964
965 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
966 if (!ASSERT_EQ(err, 0, "prog_attach"))
967 goto cleanup;
968
969 assert_mprog_count(target, 1);
970
971 LIBBPF_OPTS_RESET(opta);
972
973 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
974 ASSERT_EQ(err, -EEXIST, "prog_attach");
975 assert_mprog_count(target, 1);
976
977 LIBBPF_OPTS_RESET(opta,
978 .flags = BPF_F_BEFORE,
979 .relative_fd = fd1,
980 );
981
982 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
983 ASSERT_EQ(err, -EEXIST, "prog_attach");
984 assert_mprog_count(target, 1);
985
986 LIBBPF_OPTS_RESET(opta,
987 .flags = BPF_F_AFTER,
988 .relative_fd = fd1,
989 );
990
991 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
992 ASSERT_EQ(err, -EEXIST, "prog_attach");
993 assert_mprog_count(target, 1);
994
995 LIBBPF_OPTS_RESET(opta,
996 .flags = BPF_F_REPLACE,
997 .relative_fd = fd1,
998 );
999
1000 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1001 ASSERT_EQ(err, -EINVAL, "prog_attach_x1");
1002 assert_mprog_count(target, 1);
1003
1004 LIBBPF_OPTS_RESET(opta,
1005 .flags = BPF_F_REPLACE,
1006 .replace_prog_fd = fd1,
1007 );
1008
1009 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1010 ASSERT_EQ(err, -EEXIST, "prog_attach");
1011 assert_mprog_count(target, 1);
1012
1013 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1014 ASSERT_OK(err, "prog_detach");
1015 assert_mprog_count(target, 0);
1016 cleanup:
1017 test_tc_link__destroy(skel);
1018 }
1019
test_ns_tc_opts_invalid(void)1020 void test_ns_tc_opts_invalid(void)
1021 {
1022 test_tc_opts_invalid_target(BPF_TCX_INGRESS);
1023 test_tc_opts_invalid_target(BPF_TCX_EGRESS);
1024 }
1025
test_tc_opts_prepend_target(int target)1026 static void test_tc_opts_prepend_target(int target)
1027 {
1028 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1029 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1030 LIBBPF_OPTS(bpf_prog_query_opts, optq);
1031 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1032 struct test_tc_link *skel;
1033 __u32 prog_ids[5];
1034 int err;
1035
1036 skel = test_tc_link__open_and_load();
1037 if (!ASSERT_OK_PTR(skel, "skel_load"))
1038 goto cleanup;
1039
1040 fd1 = bpf_program__fd(skel->progs.tc1);
1041 fd2 = bpf_program__fd(skel->progs.tc2);
1042 fd3 = bpf_program__fd(skel->progs.tc3);
1043 fd4 = bpf_program__fd(skel->progs.tc4);
1044
1045 id1 = id_from_prog_fd(fd1);
1046 id2 = id_from_prog_fd(fd2);
1047 id3 = id_from_prog_fd(fd3);
1048 id4 = id_from_prog_fd(fd4);
1049
1050 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1051 ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1052 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1053
1054 assert_mprog_count(target, 0);
1055
1056 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1057 if (!ASSERT_EQ(err, 0, "prog_attach"))
1058 goto cleanup;
1059
1060 assert_mprog_count(target, 1);
1061
1062 LIBBPF_OPTS_RESET(opta,
1063 .flags = BPF_F_BEFORE,
1064 );
1065
1066 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
1067 if (!ASSERT_EQ(err, 0, "prog_attach"))
1068 goto cleanup_target;
1069
1070 assert_mprog_count(target, 2);
1071
1072 optq.prog_ids = prog_ids;
1073
1074 memset(prog_ids, 0, sizeof(prog_ids));
1075 optq.count = ARRAY_SIZE(prog_ids);
1076
1077 err = bpf_prog_query_opts(loopback, target, &optq);
1078 if (!ASSERT_OK(err, "prog_query"))
1079 goto cleanup_target2;
1080
1081 ASSERT_EQ(optq.count, 2, "count");
1082 ASSERT_EQ(optq.revision, 3, "revision");
1083 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
1084 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
1085 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
1086
1087 tc_skel_reset_all_seen(skel);
1088 ASSERT_OK(system(ping_cmd), ping_cmd);
1089
1090 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
1091 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
1092 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
1093 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
1094
1095 LIBBPF_OPTS_RESET(opta,
1096 .flags = BPF_F_BEFORE,
1097 );
1098
1099 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
1100 if (!ASSERT_EQ(err, 0, "prog_attach"))
1101 goto cleanup_target2;
1102
1103 LIBBPF_OPTS_RESET(opta,
1104 .flags = BPF_F_BEFORE,
1105 );
1106
1107 err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
1108 if (!ASSERT_EQ(err, 0, "prog_attach"))
1109 goto cleanup_target3;
1110
1111 assert_mprog_count(target, 4);
1112
1113 memset(prog_ids, 0, sizeof(prog_ids));
1114 optq.count = ARRAY_SIZE(prog_ids);
1115
1116 err = bpf_prog_query_opts(loopback, target, &optq);
1117 if (!ASSERT_OK(err, "prog_query"))
1118 goto cleanup_target4;
1119
1120 ASSERT_EQ(optq.count, 4, "count");
1121 ASSERT_EQ(optq.revision, 5, "revision");
1122 ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
1123 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
1124 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
1125 ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]");
1126 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
1127
1128 tc_skel_reset_all_seen(skel);
1129 ASSERT_OK(system(ping_cmd), ping_cmd);
1130
1131 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
1132 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
1133 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
1134 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
1135
1136 cleanup_target4:
1137 err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
1138 ASSERT_OK(err, "prog_detach");
1139 assert_mprog_count(target, 3);
1140
1141 cleanup_target3:
1142 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1143 ASSERT_OK(err, "prog_detach");
1144 assert_mprog_count(target, 2);
1145
1146 cleanup_target2:
1147 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1148 ASSERT_OK(err, "prog_detach");
1149 assert_mprog_count(target, 1);
1150
1151 cleanup_target:
1152 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1153 ASSERT_OK(err, "prog_detach");
1154 assert_mprog_count(target, 0);
1155
1156 cleanup:
1157 test_tc_link__destroy(skel);
1158 }
1159
test_ns_tc_opts_prepend(void)1160 void test_ns_tc_opts_prepend(void)
1161 {
1162 test_tc_opts_prepend_target(BPF_TCX_INGRESS);
1163 test_tc_opts_prepend_target(BPF_TCX_EGRESS);
1164 }
1165
test_tc_opts_append_target(int target)1166 static void test_tc_opts_append_target(int target)
1167 {
1168 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1169 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1170 LIBBPF_OPTS(bpf_prog_query_opts, optq);
1171 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1172 struct test_tc_link *skel;
1173 __u32 prog_ids[5];
1174 int err;
1175
1176 skel = test_tc_link__open_and_load();
1177 if (!ASSERT_OK_PTR(skel, "skel_load"))
1178 goto cleanup;
1179
1180 fd1 = bpf_program__fd(skel->progs.tc1);
1181 fd2 = bpf_program__fd(skel->progs.tc2);
1182 fd3 = bpf_program__fd(skel->progs.tc3);
1183 fd4 = bpf_program__fd(skel->progs.tc4);
1184
1185 id1 = id_from_prog_fd(fd1);
1186 id2 = id_from_prog_fd(fd2);
1187 id3 = id_from_prog_fd(fd3);
1188 id4 = id_from_prog_fd(fd4);
1189
1190 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1191 ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1192 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1193
1194 assert_mprog_count(target, 0);
1195
1196 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1197 if (!ASSERT_EQ(err, 0, "prog_attach"))
1198 goto cleanup;
1199
1200 assert_mprog_count(target, 1);
1201
1202 LIBBPF_OPTS_RESET(opta,
1203 .flags = BPF_F_AFTER,
1204 );
1205
1206 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
1207 if (!ASSERT_EQ(err, 0, "prog_attach"))
1208 goto cleanup_target;
1209
1210 assert_mprog_count(target, 2);
1211
1212 optq.prog_ids = prog_ids;
1213
1214 memset(prog_ids, 0, sizeof(prog_ids));
1215 optq.count = ARRAY_SIZE(prog_ids);
1216
1217 err = bpf_prog_query_opts(loopback, target, &optq);
1218 if (!ASSERT_OK(err, "prog_query"))
1219 goto cleanup_target2;
1220
1221 ASSERT_EQ(optq.count, 2, "count");
1222 ASSERT_EQ(optq.revision, 3, "revision");
1223 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
1224 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
1225 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
1226
1227 tc_skel_reset_all_seen(skel);
1228 ASSERT_OK(system(ping_cmd), ping_cmd);
1229
1230 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
1231 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
1232 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
1233 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
1234
1235 LIBBPF_OPTS_RESET(opta,
1236 .flags = BPF_F_AFTER,
1237 );
1238
1239 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
1240 if (!ASSERT_EQ(err, 0, "prog_attach"))
1241 goto cleanup_target2;
1242
1243 LIBBPF_OPTS_RESET(opta,
1244 .flags = BPF_F_AFTER,
1245 );
1246
1247 err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
1248 if (!ASSERT_EQ(err, 0, "prog_attach"))
1249 goto cleanup_target3;
1250
1251 assert_mprog_count(target, 4);
1252
1253 memset(prog_ids, 0, sizeof(prog_ids));
1254 optq.count = ARRAY_SIZE(prog_ids);
1255
1256 err = bpf_prog_query_opts(loopback, target, &optq);
1257 if (!ASSERT_OK(err, "prog_query"))
1258 goto cleanup_target4;
1259
1260 ASSERT_EQ(optq.count, 4, "count");
1261 ASSERT_EQ(optq.revision, 5, "revision");
1262 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
1263 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
1264 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
1265 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
1266 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
1267
1268 tc_skel_reset_all_seen(skel);
1269 ASSERT_OK(system(ping_cmd), ping_cmd);
1270
1271 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
1272 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
1273 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
1274 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
1275
1276 cleanup_target4:
1277 err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
1278 ASSERT_OK(err, "prog_detach");
1279 assert_mprog_count(target, 3);
1280
1281 cleanup_target3:
1282 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1283 ASSERT_OK(err, "prog_detach");
1284 assert_mprog_count(target, 2);
1285
1286 cleanup_target2:
1287 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1288 ASSERT_OK(err, "prog_detach");
1289 assert_mprog_count(target, 1);
1290
1291 cleanup_target:
1292 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1293 ASSERT_OK(err, "prog_detach");
1294 assert_mprog_count(target, 0);
1295
1296 cleanup:
1297 test_tc_link__destroy(skel);
1298 }
1299
test_ns_tc_opts_append(void)1300 void test_ns_tc_opts_append(void)
1301 {
1302 test_tc_opts_append_target(BPF_TCX_INGRESS);
1303 test_tc_opts_append_target(BPF_TCX_EGRESS);
1304 }
1305
test_tc_opts_dev_cleanup_target(int target)1306 static void test_tc_opts_dev_cleanup_target(int target)
1307 {
1308 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1309 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1310 LIBBPF_OPTS(bpf_prog_query_opts, optq);
1311 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1312 struct test_tc_link *skel;
1313 int err, ifindex;
1314
1315 ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
1316 ifindex = if_nametoindex("tcx_opts1");
1317 ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
1318
1319 skel = test_tc_link__open_and_load();
1320 if (!ASSERT_OK_PTR(skel, "skel_load"))
1321 goto cleanup;
1322
1323 fd1 = bpf_program__fd(skel->progs.tc1);
1324 fd2 = bpf_program__fd(skel->progs.tc2);
1325 fd3 = bpf_program__fd(skel->progs.tc3);
1326 fd4 = bpf_program__fd(skel->progs.tc4);
1327
1328 id1 = id_from_prog_fd(fd1);
1329 id2 = id_from_prog_fd(fd2);
1330 id3 = id_from_prog_fd(fd3);
1331 id4 = id_from_prog_fd(fd4);
1332
1333 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1334 ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1335 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1336
1337 assert_mprog_count_ifindex(ifindex, target, 0);
1338
1339 err = bpf_prog_attach_opts(fd1, ifindex, target, &opta);
1340 if (!ASSERT_EQ(err, 0, "prog_attach"))
1341 goto cleanup;
1342
1343 assert_mprog_count_ifindex(ifindex, target, 1);
1344
1345 err = bpf_prog_attach_opts(fd2, ifindex, target, &opta);
1346 if (!ASSERT_EQ(err, 0, "prog_attach"))
1347 goto cleanup1;
1348
1349 assert_mprog_count_ifindex(ifindex, target, 2);
1350
1351 err = bpf_prog_attach_opts(fd3, ifindex, target, &opta);
1352 if (!ASSERT_EQ(err, 0, "prog_attach"))
1353 goto cleanup2;
1354
1355 assert_mprog_count_ifindex(ifindex, target, 3);
1356
1357 err = bpf_prog_attach_opts(fd4, ifindex, target, &opta);
1358 if (!ASSERT_EQ(err, 0, "prog_attach"))
1359 goto cleanup3;
1360
1361 assert_mprog_count_ifindex(ifindex, target, 4);
1362
1363 goto cleanup;
1364
1365 cleanup3:
1366 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1367 ASSERT_OK(err, "prog_detach");
1368
1369 assert_mprog_count_ifindex(ifindex, target, 2);
1370 cleanup2:
1371 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1372 ASSERT_OK(err, "prog_detach");
1373
1374 assert_mprog_count_ifindex(ifindex, target, 1);
1375 cleanup1:
1376 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1377 ASSERT_OK(err, "prog_detach");
1378
1379 assert_mprog_count_ifindex(ifindex, target, 0);
1380 cleanup:
1381 test_tc_link__destroy(skel);
1382
1383 ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
1384 ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
1385 ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
1386 }
1387
test_ns_tc_opts_dev_cleanup(void)1388 void test_ns_tc_opts_dev_cleanup(void)
1389 {
1390 test_tc_opts_dev_cleanup_target(BPF_TCX_INGRESS);
1391 test_tc_opts_dev_cleanup_target(BPF_TCX_EGRESS);
1392 }
1393
test_tc_opts_mixed_target(int target)1394 static void test_tc_opts_mixed_target(int target)
1395 {
1396 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1397 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1398 LIBBPF_OPTS(bpf_prog_query_opts, optq);
1399 LIBBPF_OPTS(bpf_tcx_opts, optl);
1400 __u32 pid1, pid2, pid3, pid4, lid2, lid4;
1401 __u32 prog_flags[4], link_flags[4];
1402 __u32 prog_ids[4], link_ids[4];
1403 struct test_tc_link *skel;
1404 struct bpf_link *link;
1405 int err, detach_fd;
1406
1407 skel = test_tc_link__open();
1408 if (!ASSERT_OK_PTR(skel, "skel_open"))
1409 goto cleanup;
1410
1411 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
1412 0, "tc1_attach_type");
1413 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
1414 0, "tc2_attach_type");
1415 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
1416 0, "tc3_attach_type");
1417 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
1418 0, "tc4_attach_type");
1419
1420 err = test_tc_link__load(skel);
1421 if (!ASSERT_OK(err, "skel_load"))
1422 goto cleanup;
1423
1424 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
1425 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
1426 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
1427 pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
1428
1429 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
1430 ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
1431 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
1432
1433 assert_mprog_count(target, 0);
1434
1435 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
1436 loopback, target, &opta);
1437 if (!ASSERT_EQ(err, 0, "prog_attach"))
1438 goto cleanup;
1439
1440 detach_fd = bpf_program__fd(skel->progs.tc1);
1441
1442 assert_mprog_count(target, 1);
1443
1444 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
1445 if (!ASSERT_OK_PTR(link, "link_attach"))
1446 goto cleanup1;
1447 skel->links.tc2 = link;
1448
1449 lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
1450
1451 assert_mprog_count(target, 2);
1452
1453 LIBBPF_OPTS_RESET(opta,
1454 .flags = BPF_F_REPLACE,
1455 .replace_prog_fd = bpf_program__fd(skel->progs.tc1),
1456 );
1457
1458 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2),
1459 loopback, target, &opta);
1460 ASSERT_EQ(err, -EEXIST, "prog_attach");
1461
1462 assert_mprog_count(target, 2);
1463
1464 LIBBPF_OPTS_RESET(opta,
1465 .flags = BPF_F_REPLACE,
1466 .replace_prog_fd = bpf_program__fd(skel->progs.tc2),
1467 );
1468
1469 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
1470 loopback, target, &opta);
1471 ASSERT_EQ(err, -EEXIST, "prog_attach");
1472
1473 assert_mprog_count(target, 2);
1474
1475 LIBBPF_OPTS_RESET(opta,
1476 .flags = BPF_F_REPLACE,
1477 .replace_prog_fd = bpf_program__fd(skel->progs.tc2),
1478 );
1479
1480 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3),
1481 loopback, target, &opta);
1482 ASSERT_EQ(err, -EBUSY, "prog_attach");
1483
1484 assert_mprog_count(target, 2);
1485
1486 LIBBPF_OPTS_RESET(opta,
1487 .flags = BPF_F_REPLACE,
1488 .replace_prog_fd = bpf_program__fd(skel->progs.tc1),
1489 );
1490
1491 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3),
1492 loopback, target, &opta);
1493 if (!ASSERT_EQ(err, 0, "prog_attach"))
1494 goto cleanup1;
1495
1496 detach_fd = bpf_program__fd(skel->progs.tc3);
1497
1498 assert_mprog_count(target, 2);
1499
1500 link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
1501 if (!ASSERT_OK_PTR(link, "link_attach"))
1502 goto cleanup1;
1503 skel->links.tc4 = link;
1504
1505 lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
1506
1507 assert_mprog_count(target, 3);
1508
1509 LIBBPF_OPTS_RESET(opta,
1510 .flags = BPF_F_REPLACE,
1511 .replace_prog_fd = bpf_program__fd(skel->progs.tc4),
1512 );
1513
1514 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2),
1515 loopback, target, &opta);
1516 ASSERT_EQ(err, -EEXIST, "prog_attach");
1517
1518 optq.prog_ids = prog_ids;
1519 optq.prog_attach_flags = prog_flags;
1520 optq.link_ids = link_ids;
1521 optq.link_attach_flags = link_flags;
1522
1523 memset(prog_ids, 0, sizeof(prog_ids));
1524 memset(prog_flags, 0, sizeof(prog_flags));
1525 memset(link_ids, 0, sizeof(link_ids));
1526 memset(link_flags, 0, sizeof(link_flags));
1527 optq.count = ARRAY_SIZE(prog_ids);
1528
1529 err = bpf_prog_query_opts(loopback, target, &optq);
1530 if (!ASSERT_OK(err, "prog_query"))
1531 goto cleanup1;
1532
1533 ASSERT_EQ(optq.count, 3, "count");
1534 ASSERT_EQ(optq.revision, 5, "revision");
1535 ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]");
1536 ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]");
1537 ASSERT_EQ(optq.link_ids[0], 0, "link_ids[0]");
1538 ASSERT_EQ(optq.link_attach_flags[0], 0, "link_flags[0]");
1539 ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
1540 ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
1541 ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
1542 ASSERT_EQ(optq.link_attach_flags[1], 0, "link_flags[1]");
1543 ASSERT_EQ(optq.prog_ids[2], pid4, "prog_ids[2]");
1544 ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
1545 ASSERT_EQ(optq.link_ids[2], lid4, "link_ids[2]");
1546 ASSERT_EQ(optq.link_attach_flags[2], 0, "link_flags[2]");
1547 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
1548 ASSERT_EQ(optq.prog_attach_flags[3], 0, "prog_flags[3]");
1549 ASSERT_EQ(optq.link_ids[3], 0, "link_ids[3]");
1550 ASSERT_EQ(optq.link_attach_flags[3], 0, "link_flags[3]");
1551
1552 ASSERT_OK(system(ping_cmd), ping_cmd);
1553
1554 cleanup1:
1555 err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
1556 ASSERT_OK(err, "prog_detach");
1557 assert_mprog_count(target, 2);
1558
1559 cleanup:
1560 test_tc_link__destroy(skel);
1561 assert_mprog_count(target, 0);
1562 }
1563
test_ns_tc_opts_mixed(void)1564 void test_ns_tc_opts_mixed(void)
1565 {
1566 test_tc_opts_mixed_target(BPF_TCX_INGRESS);
1567 test_tc_opts_mixed_target(BPF_TCX_EGRESS);
1568 }
1569
test_tc_opts_demixed_target(int target)1570 static void test_tc_opts_demixed_target(int target)
1571 {
1572 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1573 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1574 LIBBPF_OPTS(bpf_tcx_opts, optl);
1575 struct test_tc_link *skel;
1576 struct bpf_link *link;
1577 __u32 pid1, pid2;
1578 int err;
1579
1580 skel = test_tc_link__open();
1581 if (!ASSERT_OK_PTR(skel, "skel_open"))
1582 goto cleanup;
1583
1584 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
1585 0, "tc1_attach_type");
1586 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
1587 0, "tc2_attach_type");
1588
1589 err = test_tc_link__load(skel);
1590 if (!ASSERT_OK(err, "skel_load"))
1591 goto cleanup;
1592
1593 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
1594 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
1595 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
1596
1597 assert_mprog_count(target, 0);
1598
1599 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
1600 loopback, target, &opta);
1601 if (!ASSERT_EQ(err, 0, "prog_attach"))
1602 goto cleanup;
1603
1604 assert_mprog_count(target, 1);
1605
1606 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
1607 if (!ASSERT_OK_PTR(link, "link_attach"))
1608 goto cleanup1;
1609 skel->links.tc2 = link;
1610
1611 assert_mprog_count(target, 2);
1612
1613 LIBBPF_OPTS_RESET(optd,
1614 .flags = BPF_F_AFTER,
1615 );
1616
1617 err = bpf_prog_detach_opts(0, loopback, target, &optd);
1618 ASSERT_EQ(err, -EBUSY, "prog_detach");
1619
1620 assert_mprog_count(target, 2);
1621
1622 LIBBPF_OPTS_RESET(optd,
1623 .flags = BPF_F_BEFORE,
1624 );
1625
1626 err = bpf_prog_detach_opts(0, loopback, target, &optd);
1627 ASSERT_OK(err, "prog_detach");
1628
1629 assert_mprog_count(target, 1);
1630 goto cleanup;
1631
1632 cleanup1:
1633 err = bpf_prog_detach_opts(bpf_program__fd(skel->progs.tc1),
1634 loopback, target, &optd);
1635 ASSERT_OK(err, "prog_detach");
1636 assert_mprog_count(target, 2);
1637
1638 cleanup:
1639 test_tc_link__destroy(skel);
1640 assert_mprog_count(target, 0);
1641 }
1642
test_ns_tc_opts_demixed(void)1643 void test_ns_tc_opts_demixed(void)
1644 {
1645 test_tc_opts_demixed_target(BPF_TCX_INGRESS);
1646 test_tc_opts_demixed_target(BPF_TCX_EGRESS);
1647 }
1648
test_tc_opts_detach_target(int target)1649 static void test_tc_opts_detach_target(int target)
1650 {
1651 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1652 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1653 LIBBPF_OPTS(bpf_prog_query_opts, optq);
1654 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1655 struct test_tc_link *skel;
1656 __u32 prog_ids[5];
1657 int err;
1658
1659 skel = test_tc_link__open_and_load();
1660 if (!ASSERT_OK_PTR(skel, "skel_load"))
1661 goto cleanup;
1662
1663 fd1 = bpf_program__fd(skel->progs.tc1);
1664 fd2 = bpf_program__fd(skel->progs.tc2);
1665 fd3 = bpf_program__fd(skel->progs.tc3);
1666 fd4 = bpf_program__fd(skel->progs.tc4);
1667
1668 id1 = id_from_prog_fd(fd1);
1669 id2 = id_from_prog_fd(fd2);
1670 id3 = id_from_prog_fd(fd3);
1671 id4 = id_from_prog_fd(fd4);
1672
1673 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1674 ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1675 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1676
1677 assert_mprog_count(target, 0);
1678
1679 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1680 if (!ASSERT_EQ(err, 0, "prog_attach"))
1681 goto cleanup;
1682
1683 assert_mprog_count(target, 1);
1684
1685 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
1686 if (!ASSERT_EQ(err, 0, "prog_attach"))
1687 goto cleanup1;
1688
1689 assert_mprog_count(target, 2);
1690
1691 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
1692 if (!ASSERT_EQ(err, 0, "prog_attach"))
1693 goto cleanup2;
1694
1695 assert_mprog_count(target, 3);
1696
1697 err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
1698 if (!ASSERT_EQ(err, 0, "prog_attach"))
1699 goto cleanup3;
1700
1701 assert_mprog_count(target, 4);
1702
1703 optq.prog_ids = prog_ids;
1704
1705 memset(prog_ids, 0, sizeof(prog_ids));
1706 optq.count = ARRAY_SIZE(prog_ids);
1707
1708 err = bpf_prog_query_opts(loopback, target, &optq);
1709 if (!ASSERT_OK(err, "prog_query"))
1710 goto cleanup4;
1711
1712 ASSERT_EQ(optq.count, 4, "count");
1713 ASSERT_EQ(optq.revision, 5, "revision");
1714 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
1715 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
1716 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
1717 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
1718 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
1719
1720 LIBBPF_OPTS_RESET(optd,
1721 .flags = BPF_F_BEFORE,
1722 );
1723
1724 err = bpf_prog_detach_opts(0, loopback, target, &optd);
1725 ASSERT_OK(err, "prog_detach");
1726
1727 assert_mprog_count(target, 3);
1728
1729 memset(prog_ids, 0, sizeof(prog_ids));
1730 optq.count = ARRAY_SIZE(prog_ids);
1731
1732 err = bpf_prog_query_opts(loopback, target, &optq);
1733 if (!ASSERT_OK(err, "prog_query"))
1734 goto cleanup4;
1735
1736 ASSERT_EQ(optq.count, 3, "count");
1737 ASSERT_EQ(optq.revision, 6, "revision");
1738 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
1739 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
1740 ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
1741 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
1742
1743 LIBBPF_OPTS_RESET(optd,
1744 .flags = BPF_F_AFTER,
1745 );
1746
1747 err = bpf_prog_detach_opts(0, loopback, target, &optd);
1748 ASSERT_OK(err, "prog_detach");
1749
1750 assert_mprog_count(target, 2);
1751
1752 memset(prog_ids, 0, sizeof(prog_ids));
1753 optq.count = ARRAY_SIZE(prog_ids);
1754
1755 err = bpf_prog_query_opts(loopback, target, &optq);
1756 if (!ASSERT_OK(err, "prog_query"))
1757 goto cleanup4;
1758
1759 ASSERT_EQ(optq.count, 2, "count");
1760 ASSERT_EQ(optq.revision, 7, "revision");
1761 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
1762 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
1763 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
1764
1765 LIBBPF_OPTS_RESET(optd);
1766
1767 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1768 ASSERT_OK(err, "prog_detach");
1769 assert_mprog_count(target, 1);
1770
1771 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1772 ASSERT_OK(err, "prog_detach");
1773 assert_mprog_count(target, 0);
1774
1775 LIBBPF_OPTS_RESET(optd,
1776 .flags = BPF_F_BEFORE,
1777 );
1778
1779 err = bpf_prog_detach_opts(0, loopback, target, &optd);
1780 ASSERT_EQ(err, -ENOENT, "prog_detach");
1781
1782 LIBBPF_OPTS_RESET(optd,
1783 .flags = BPF_F_AFTER,
1784 );
1785
1786 err = bpf_prog_detach_opts(0, loopback, target, &optd);
1787 ASSERT_EQ(err, -ENOENT, "prog_detach");
1788 goto cleanup;
1789
1790 cleanup4:
1791 err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
1792 ASSERT_OK(err, "prog_detach");
1793 assert_mprog_count(target, 3);
1794
1795 cleanup3:
1796 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1797 ASSERT_OK(err, "prog_detach");
1798 assert_mprog_count(target, 2);
1799
1800 cleanup2:
1801 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1802 ASSERT_OK(err, "prog_detach");
1803 assert_mprog_count(target, 1);
1804
1805 cleanup1:
1806 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1807 ASSERT_OK(err, "prog_detach");
1808 assert_mprog_count(target, 0);
1809
1810 cleanup:
1811 test_tc_link__destroy(skel);
1812 }
1813
test_ns_tc_opts_detach(void)1814 void test_ns_tc_opts_detach(void)
1815 {
1816 test_tc_opts_detach_target(BPF_TCX_INGRESS);
1817 test_tc_opts_detach_target(BPF_TCX_EGRESS);
1818 }
1819
test_tc_opts_detach_before_target(int target)1820 static void test_tc_opts_detach_before_target(int target)
1821 {
1822 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1823 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1824 LIBBPF_OPTS(bpf_prog_query_opts, optq);
1825 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1826 struct test_tc_link *skel;
1827 __u32 prog_ids[5];
1828 int err;
1829
1830 skel = test_tc_link__open_and_load();
1831 if (!ASSERT_OK_PTR(skel, "skel_load"))
1832 goto cleanup;
1833
1834 fd1 = bpf_program__fd(skel->progs.tc1);
1835 fd2 = bpf_program__fd(skel->progs.tc2);
1836 fd3 = bpf_program__fd(skel->progs.tc3);
1837 fd4 = bpf_program__fd(skel->progs.tc4);
1838
1839 id1 = id_from_prog_fd(fd1);
1840 id2 = id_from_prog_fd(fd2);
1841 id3 = id_from_prog_fd(fd3);
1842 id4 = id_from_prog_fd(fd4);
1843
1844 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1845 ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1846 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1847
1848 assert_mprog_count(target, 0);
1849
1850 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1851 if (!ASSERT_EQ(err, 0, "prog_attach"))
1852 goto cleanup;
1853
1854 assert_mprog_count(target, 1);
1855
1856 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
1857 if (!ASSERT_EQ(err, 0, "prog_attach"))
1858 goto cleanup1;
1859
1860 assert_mprog_count(target, 2);
1861
1862 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
1863 if (!ASSERT_EQ(err, 0, "prog_attach"))
1864 goto cleanup2;
1865
1866 assert_mprog_count(target, 3);
1867
1868 err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
1869 if (!ASSERT_EQ(err, 0, "prog_attach"))
1870 goto cleanup3;
1871
1872 assert_mprog_count(target, 4);
1873
1874 optq.prog_ids = prog_ids;
1875
1876 memset(prog_ids, 0, sizeof(prog_ids));
1877 optq.count = ARRAY_SIZE(prog_ids);
1878
1879 err = bpf_prog_query_opts(loopback, target, &optq);
1880 if (!ASSERT_OK(err, "prog_query"))
1881 goto cleanup4;
1882
1883 ASSERT_EQ(optq.count, 4, "count");
1884 ASSERT_EQ(optq.revision, 5, "revision");
1885 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
1886 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
1887 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
1888 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
1889 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
1890
1891 LIBBPF_OPTS_RESET(optd,
1892 .flags = BPF_F_BEFORE,
1893 .relative_fd = fd2,
1894 );
1895
1896 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1897 ASSERT_OK(err, "prog_detach");
1898
1899 assert_mprog_count(target, 3);
1900
1901 memset(prog_ids, 0, sizeof(prog_ids));
1902 optq.count = ARRAY_SIZE(prog_ids);
1903
1904 err = bpf_prog_query_opts(loopback, target, &optq);
1905 if (!ASSERT_OK(err, "prog_query"))
1906 goto cleanup4;
1907
1908 ASSERT_EQ(optq.count, 3, "count");
1909 ASSERT_EQ(optq.revision, 6, "revision");
1910 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
1911 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
1912 ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
1913 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
1914
1915 LIBBPF_OPTS_RESET(optd,
1916 .flags = BPF_F_BEFORE,
1917 .relative_fd = fd2,
1918 );
1919
1920 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1921 ASSERT_EQ(err, -ENOENT, "prog_detach");
1922 assert_mprog_count(target, 3);
1923
1924 LIBBPF_OPTS_RESET(optd,
1925 .flags = BPF_F_BEFORE,
1926 .relative_fd = fd4,
1927 );
1928
1929 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1930 ASSERT_EQ(err, -ERANGE, "prog_detach");
1931 assert_mprog_count(target, 3);
1932
1933 LIBBPF_OPTS_RESET(optd,
1934 .flags = BPF_F_BEFORE,
1935 .relative_fd = fd1,
1936 );
1937
1938 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1939 ASSERT_EQ(err, -ENOENT, "prog_detach");
1940 assert_mprog_count(target, 3);
1941
1942 LIBBPF_OPTS_RESET(optd,
1943 .flags = BPF_F_BEFORE,
1944 .relative_fd = fd3,
1945 );
1946
1947 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1948 ASSERT_OK(err, "prog_detach");
1949
1950 assert_mprog_count(target, 2);
1951
1952 memset(prog_ids, 0, sizeof(prog_ids));
1953 optq.count = ARRAY_SIZE(prog_ids);
1954
1955 err = bpf_prog_query_opts(loopback, target, &optq);
1956 if (!ASSERT_OK(err, "prog_query"))
1957 goto cleanup4;
1958
1959 ASSERT_EQ(optq.count, 2, "count");
1960 ASSERT_EQ(optq.revision, 7, "revision");
1961 ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]");
1962 ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]");
1963 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
1964
1965 LIBBPF_OPTS_RESET(optd,
1966 .flags = BPF_F_BEFORE,
1967 .relative_fd = fd4,
1968 );
1969
1970 err = bpf_prog_detach_opts(0, loopback, target, &optd);
1971 ASSERT_OK(err, "prog_detach");
1972
1973 assert_mprog_count(target, 1);
1974
1975 memset(prog_ids, 0, sizeof(prog_ids));
1976 optq.count = ARRAY_SIZE(prog_ids);
1977
1978 err = bpf_prog_query_opts(loopback, target, &optq);
1979 if (!ASSERT_OK(err, "prog_query"))
1980 goto cleanup4;
1981
1982 ASSERT_EQ(optq.count, 1, "count");
1983 ASSERT_EQ(optq.revision, 8, "revision");
1984 ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
1985 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
1986
1987 LIBBPF_OPTS_RESET(optd,
1988 .flags = BPF_F_BEFORE,
1989 );
1990
1991 err = bpf_prog_detach_opts(0, loopback, target, &optd);
1992 ASSERT_OK(err, "prog_detach");
1993
1994 assert_mprog_count(target, 0);
1995 goto cleanup;
1996
1997 cleanup4:
1998 err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
1999 ASSERT_OK(err, "prog_detach");
2000 assert_mprog_count(target, 3);
2001
2002 cleanup3:
2003 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
2004 ASSERT_OK(err, "prog_detach");
2005 assert_mprog_count(target, 2);
2006
2007 cleanup2:
2008 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2009 ASSERT_OK(err, "prog_detach");
2010 assert_mprog_count(target, 1);
2011
2012 cleanup1:
2013 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2014 ASSERT_OK(err, "prog_detach");
2015 assert_mprog_count(target, 0);
2016
2017 cleanup:
2018 test_tc_link__destroy(skel);
2019 }
2020
test_ns_tc_opts_detach_before(void)2021 void test_ns_tc_opts_detach_before(void)
2022 {
2023 test_tc_opts_detach_before_target(BPF_TCX_INGRESS);
2024 test_tc_opts_detach_before_target(BPF_TCX_EGRESS);
2025 }
2026
test_tc_opts_detach_after_target(int target)2027 static void test_tc_opts_detach_after_target(int target)
2028 {
2029 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2030 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2031 LIBBPF_OPTS(bpf_prog_query_opts, optq);
2032 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
2033 struct test_tc_link *skel;
2034 __u32 prog_ids[5];
2035 int err;
2036
2037 skel = test_tc_link__open_and_load();
2038 if (!ASSERT_OK_PTR(skel, "skel_load"))
2039 goto cleanup;
2040
2041 fd1 = bpf_program__fd(skel->progs.tc1);
2042 fd2 = bpf_program__fd(skel->progs.tc2);
2043 fd3 = bpf_program__fd(skel->progs.tc3);
2044 fd4 = bpf_program__fd(skel->progs.tc4);
2045
2046 id1 = id_from_prog_fd(fd1);
2047 id2 = id_from_prog_fd(fd2);
2048 id3 = id_from_prog_fd(fd3);
2049 id4 = id_from_prog_fd(fd4);
2050
2051 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
2052 ASSERT_NEQ(id3, id4, "prog_ids_3_4");
2053 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
2054
2055 assert_mprog_count(target, 0);
2056
2057 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
2058 if (!ASSERT_EQ(err, 0, "prog_attach"))
2059 goto cleanup;
2060
2061 assert_mprog_count(target, 1);
2062
2063 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
2064 if (!ASSERT_EQ(err, 0, "prog_attach"))
2065 goto cleanup1;
2066
2067 assert_mprog_count(target, 2);
2068
2069 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
2070 if (!ASSERT_EQ(err, 0, "prog_attach"))
2071 goto cleanup2;
2072
2073 assert_mprog_count(target, 3);
2074
2075 err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
2076 if (!ASSERT_EQ(err, 0, "prog_attach"))
2077 goto cleanup3;
2078
2079 assert_mprog_count(target, 4);
2080
2081 optq.prog_ids = prog_ids;
2082
2083 memset(prog_ids, 0, sizeof(prog_ids));
2084 optq.count = ARRAY_SIZE(prog_ids);
2085
2086 err = bpf_prog_query_opts(loopback, target, &optq);
2087 if (!ASSERT_OK(err, "prog_query"))
2088 goto cleanup4;
2089
2090 ASSERT_EQ(optq.count, 4, "count");
2091 ASSERT_EQ(optq.revision, 5, "revision");
2092 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2093 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
2094 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
2095 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
2096 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
2097
2098 LIBBPF_OPTS_RESET(optd,
2099 .flags = BPF_F_AFTER,
2100 .relative_fd = fd1,
2101 );
2102
2103 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2104 ASSERT_OK(err, "prog_detach");
2105
2106 assert_mprog_count(target, 3);
2107
2108 memset(prog_ids, 0, sizeof(prog_ids));
2109 optq.count = ARRAY_SIZE(prog_ids);
2110
2111 err = bpf_prog_query_opts(loopback, target, &optq);
2112 if (!ASSERT_OK(err, "prog_query"))
2113 goto cleanup4;
2114
2115 ASSERT_EQ(optq.count, 3, "count");
2116 ASSERT_EQ(optq.revision, 6, "revision");
2117 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2118 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
2119 ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
2120 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
2121
2122 LIBBPF_OPTS_RESET(optd,
2123 .flags = BPF_F_AFTER,
2124 .relative_fd = fd1,
2125 );
2126
2127 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2128 ASSERT_EQ(err, -ENOENT, "prog_detach");
2129 assert_mprog_count(target, 3);
2130
2131 LIBBPF_OPTS_RESET(optd,
2132 .flags = BPF_F_AFTER,
2133 .relative_fd = fd4,
2134 );
2135
2136 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2137 ASSERT_EQ(err, -ERANGE, "prog_detach");
2138 assert_mprog_count(target, 3);
2139
2140 LIBBPF_OPTS_RESET(optd,
2141 .flags = BPF_F_AFTER,
2142 .relative_fd = fd3,
2143 );
2144
2145 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2146 ASSERT_EQ(err, -ERANGE, "prog_detach");
2147 assert_mprog_count(target, 3);
2148
2149 LIBBPF_OPTS_RESET(optd,
2150 .flags = BPF_F_AFTER,
2151 .relative_fd = fd1,
2152 );
2153
2154 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2155 ASSERT_EQ(err, -ERANGE, "prog_detach");
2156 assert_mprog_count(target, 3);
2157
2158 LIBBPF_OPTS_RESET(optd,
2159 .flags = BPF_F_AFTER,
2160 .relative_fd = fd1,
2161 );
2162
2163 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
2164 ASSERT_OK(err, "prog_detach");
2165
2166 assert_mprog_count(target, 2);
2167
2168 memset(prog_ids, 0, sizeof(prog_ids));
2169 optq.count = ARRAY_SIZE(prog_ids);
2170
2171 err = bpf_prog_query_opts(loopback, target, &optq);
2172 if (!ASSERT_OK(err, "prog_query"))
2173 goto cleanup4;
2174
2175 ASSERT_EQ(optq.count, 2, "count");
2176 ASSERT_EQ(optq.revision, 7, "revision");
2177 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2178 ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]");
2179 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
2180
2181 LIBBPF_OPTS_RESET(optd,
2182 .flags = BPF_F_AFTER,
2183 .relative_fd = fd1,
2184 );
2185
2186 err = bpf_prog_detach_opts(0, loopback, target, &optd);
2187 ASSERT_OK(err, "prog_detach");
2188
2189 assert_mprog_count(target, 1);
2190
2191 memset(prog_ids, 0, sizeof(prog_ids));
2192 optq.count = ARRAY_SIZE(prog_ids);
2193
2194 err = bpf_prog_query_opts(loopback, target, &optq);
2195 if (!ASSERT_OK(err, "prog_query"))
2196 goto cleanup4;
2197
2198 ASSERT_EQ(optq.count, 1, "count");
2199 ASSERT_EQ(optq.revision, 8, "revision");
2200 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2201 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
2202
2203 LIBBPF_OPTS_RESET(optd,
2204 .flags = BPF_F_AFTER,
2205 );
2206
2207 err = bpf_prog_detach_opts(0, loopback, target, &optd);
2208 ASSERT_OK(err, "prog_detach");
2209
2210 assert_mprog_count(target, 0);
2211 goto cleanup;
2212
2213 cleanup4:
2214 err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
2215 ASSERT_OK(err, "prog_detach");
2216 assert_mprog_count(target, 3);
2217
2218 cleanup3:
2219 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
2220 ASSERT_OK(err, "prog_detach");
2221 assert_mprog_count(target, 2);
2222
2223 cleanup2:
2224 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2225 ASSERT_OK(err, "prog_detach");
2226 assert_mprog_count(target, 1);
2227
2228 cleanup1:
2229 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2230 ASSERT_OK(err, "prog_detach");
2231 assert_mprog_count(target, 0);
2232
2233 cleanup:
2234 test_tc_link__destroy(skel);
2235 }
2236
test_ns_tc_opts_detach_after(void)2237 void test_ns_tc_opts_detach_after(void)
2238 {
2239 test_tc_opts_detach_after_target(BPF_TCX_INGRESS);
2240 test_tc_opts_detach_after_target(BPF_TCX_EGRESS);
2241 }
2242
test_tc_opts_delete_empty(int target,bool chain_tc_old)2243 static void test_tc_opts_delete_empty(int target, bool chain_tc_old)
2244 {
2245 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
2246 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2247 int err;
2248
2249 assert_mprog_count(target, 0);
2250 if (chain_tc_old) {
2251 tc_hook.attach_point = target == BPF_TCX_INGRESS ?
2252 BPF_TC_INGRESS : BPF_TC_EGRESS;
2253 err = bpf_tc_hook_create(&tc_hook);
2254 ASSERT_OK(err, "bpf_tc_hook_create");
2255 assert_mprog_count(target, 0);
2256 }
2257 err = bpf_prog_detach_opts(0, loopback, target, &optd);
2258 ASSERT_EQ(err, -ENOENT, "prog_detach");
2259 if (chain_tc_old) {
2260 tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
2261 bpf_tc_hook_destroy(&tc_hook);
2262 }
2263 assert_mprog_count(target, 0);
2264 }
2265
test_ns_tc_opts_delete_empty(void)2266 void test_ns_tc_opts_delete_empty(void)
2267 {
2268 test_tc_opts_delete_empty(BPF_TCX_INGRESS, false);
2269 test_tc_opts_delete_empty(BPF_TCX_EGRESS, false);
2270 test_tc_opts_delete_empty(BPF_TCX_INGRESS, true);
2271 test_tc_opts_delete_empty(BPF_TCX_EGRESS, true);
2272 }
2273
test_tc_chain_mixed(int target)2274 static void test_tc_chain_mixed(int target)
2275 {
2276 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
2277 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
2278 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2279 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2280 __u32 fd1, fd2, fd3, id1, id2, id3;
2281 struct test_tc_link *skel;
2282 int err, detach_fd;
2283
2284 skel = test_tc_link__open_and_load();
2285 if (!ASSERT_OK_PTR(skel, "skel_load"))
2286 goto cleanup;
2287
2288 fd1 = bpf_program__fd(skel->progs.tc4);
2289 fd2 = bpf_program__fd(skel->progs.tc5);
2290 fd3 = bpf_program__fd(skel->progs.tc6);
2291
2292 id1 = id_from_prog_fd(fd1);
2293 id2 = id_from_prog_fd(fd2);
2294 id3 = id_from_prog_fd(fd3);
2295
2296 ASSERT_NEQ(id1, id2, "prog_ids_1_2");
2297 ASSERT_NEQ(id2, id3, "prog_ids_2_3");
2298
2299 assert_mprog_count(target, 0);
2300
2301 tc_hook.attach_point = target == BPF_TCX_INGRESS ?
2302 BPF_TC_INGRESS : BPF_TC_EGRESS;
2303 err = bpf_tc_hook_create(&tc_hook);
2304 err = err == -EEXIST ? 0 : err;
2305 if (!ASSERT_OK(err, "bpf_tc_hook_create"))
2306 goto cleanup;
2307
2308 tc_opts.prog_fd = fd2;
2309 err = bpf_tc_attach(&tc_hook, &tc_opts);
2310 if (!ASSERT_OK(err, "bpf_tc_attach"))
2311 goto cleanup_hook;
2312
2313 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
2314 if (!ASSERT_EQ(err, 0, "prog_attach"))
2315 goto cleanup_filter;
2316
2317 detach_fd = fd3;
2318
2319 assert_mprog_count(target, 1);
2320
2321 tc_skel_reset_all_seen(skel);
2322 ASSERT_OK(system(ping_cmd), ping_cmd);
2323
2324 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
2325 ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
2326 ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
2327
2328 LIBBPF_OPTS_RESET(opta,
2329 .flags = BPF_F_REPLACE,
2330 .replace_prog_fd = fd3,
2331 );
2332
2333 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
2334 if (!ASSERT_EQ(err, 0, "prog_attach"))
2335 goto cleanup_opts;
2336
2337 detach_fd = fd1;
2338
2339 assert_mprog_count(target, 1);
2340
2341 tc_skel_reset_all_seen(skel);
2342 ASSERT_OK(system(ping_cmd), ping_cmd);
2343
2344 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
2345 ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
2346 ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
2347
2348 cleanup_opts:
2349 err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
2350 ASSERT_OK(err, "prog_detach");
2351 assert_mprog_count(target, 0);
2352
2353 tc_skel_reset_all_seen(skel);
2354 ASSERT_OK(system(ping_cmd), ping_cmd);
2355
2356 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
2357 ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
2358 ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
2359
2360 cleanup_filter:
2361 tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
2362 err = bpf_tc_detach(&tc_hook, &tc_opts);
2363 ASSERT_OK(err, "bpf_tc_detach");
2364
2365 cleanup_hook:
2366 tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
2367 bpf_tc_hook_destroy(&tc_hook);
2368
2369 cleanup:
2370 test_tc_link__destroy(skel);
2371 }
2372
test_ns_tc_opts_chain_mixed(void)2373 void test_ns_tc_opts_chain_mixed(void)
2374 {
2375 test_tc_chain_mixed(BPF_TCX_INGRESS);
2376 test_tc_chain_mixed(BPF_TCX_EGRESS);
2377 }
2378
generate_dummy_prog(void)2379 static int generate_dummy_prog(void)
2380 {
2381 const struct bpf_insn prog_insns[] = {
2382 BPF_MOV64_IMM(BPF_REG_0, 0),
2383 BPF_EXIT_INSN(),
2384 };
2385 const size_t prog_insn_cnt = ARRAY_SIZE(prog_insns);
2386 LIBBPF_OPTS(bpf_prog_load_opts, opts);
2387 const size_t log_buf_sz = 256;
2388 char log_buf[log_buf_sz];
2389 int fd = -1;
2390
2391 opts.log_buf = log_buf;
2392 opts.log_size = log_buf_sz;
2393
2394 log_buf[0] = '\0';
2395 opts.log_level = 0;
2396 fd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, "tcx_prog", "GPL",
2397 prog_insns, prog_insn_cnt, &opts);
2398 ASSERT_STREQ(log_buf, "", "log_0");
2399 ASSERT_GE(fd, 0, "prog_fd");
2400 return fd;
2401 }
2402
test_tc_opts_max_target(int target,int flags,bool relative)2403 static void test_tc_opts_max_target(int target, int flags, bool relative)
2404 {
2405 int err, ifindex, i, prog_fd, last_fd = -1;
2406 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2407 const int max_progs = 63;
2408
2409 ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
2410 ifindex = if_nametoindex("tcx_opts1");
2411 ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
2412
2413 assert_mprog_count_ifindex(ifindex, target, 0);
2414
2415 for (i = 0; i < max_progs; i++) {
2416 prog_fd = generate_dummy_prog();
2417 if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
2418 goto cleanup;
2419 err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
2420 if (!ASSERT_EQ(err, 0, "prog_attach"))
2421 goto cleanup;
2422 assert_mprog_count_ifindex(ifindex, target, i + 1);
2423 if (i == max_progs - 1 && relative)
2424 last_fd = prog_fd;
2425 else
2426 close(prog_fd);
2427 }
2428
2429 prog_fd = generate_dummy_prog();
2430 if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
2431 goto cleanup;
2432 opta.flags = flags;
2433 if (last_fd > 0)
2434 opta.relative_fd = last_fd;
2435 err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
2436 ASSERT_EQ(err, -ERANGE, "prog_64_attach");
2437 assert_mprog_count_ifindex(ifindex, target, max_progs);
2438 close(prog_fd);
2439 cleanup:
2440 if (last_fd > 0)
2441 close(last_fd);
2442 ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
2443 ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
2444 ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
2445 }
2446
test_ns_tc_opts_max(void)2447 void test_ns_tc_opts_max(void)
2448 {
2449 test_tc_opts_max_target(BPF_TCX_INGRESS, 0, false);
2450 test_tc_opts_max_target(BPF_TCX_EGRESS, 0, false);
2451
2452 test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_BEFORE, false);
2453 test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_BEFORE, true);
2454
2455 test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true);
2456 test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false);
2457 }
2458
test_tc_opts_query_target(int target)2459 static void test_tc_opts_query_target(int target)
2460 {
2461 const size_t attr_size = offsetofend(union bpf_attr, query);
2462 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2463 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2464 LIBBPF_OPTS(bpf_prog_query_opts, optq);
2465 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
2466 struct test_tc_link *skel;
2467 union bpf_attr attr;
2468 __u32 prog_ids[10];
2469 int err;
2470
2471 skel = test_tc_link__open_and_load();
2472 if (!ASSERT_OK_PTR(skel, "skel_load"))
2473 goto cleanup;
2474
2475 fd1 = bpf_program__fd(skel->progs.tc1);
2476 fd2 = bpf_program__fd(skel->progs.tc2);
2477 fd3 = bpf_program__fd(skel->progs.tc3);
2478 fd4 = bpf_program__fd(skel->progs.tc4);
2479
2480 id1 = id_from_prog_fd(fd1);
2481 id2 = id_from_prog_fd(fd2);
2482 id3 = id_from_prog_fd(fd3);
2483 id4 = id_from_prog_fd(fd4);
2484
2485 assert_mprog_count(target, 0);
2486
2487 LIBBPF_OPTS_RESET(opta,
2488 .expected_revision = 1,
2489 );
2490
2491 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
2492 if (!ASSERT_EQ(err, 0, "prog_attach"))
2493 goto cleanup;
2494
2495 assert_mprog_count(target, 1);
2496
2497 LIBBPF_OPTS_RESET(opta,
2498 .expected_revision = 2,
2499 );
2500
2501 err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
2502 if (!ASSERT_EQ(err, 0, "prog_attach"))
2503 goto cleanup1;
2504
2505 assert_mprog_count(target, 2);
2506
2507 LIBBPF_OPTS_RESET(opta,
2508 .expected_revision = 3,
2509 );
2510
2511 err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
2512 if (!ASSERT_EQ(err, 0, "prog_attach"))
2513 goto cleanup2;
2514
2515 assert_mprog_count(target, 3);
2516
2517 LIBBPF_OPTS_RESET(opta,
2518 .expected_revision = 4,
2519 );
2520
2521 err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
2522 if (!ASSERT_EQ(err, 0, "prog_attach"))
2523 goto cleanup3;
2524
2525 assert_mprog_count(target, 4);
2526
2527 /* Test 1: Double query via libbpf API */
2528 err = bpf_prog_query_opts(loopback, target, &optq);
2529 if (!ASSERT_OK(err, "prog_query"))
2530 goto cleanup4;
2531
2532 ASSERT_EQ(optq.count, 4, "count");
2533 ASSERT_EQ(optq.revision, 5, "revision");
2534 ASSERT_EQ(optq.prog_ids, NULL, "prog_ids");
2535 ASSERT_EQ(optq.link_ids, NULL, "link_ids");
2536
2537 memset(prog_ids, 0, sizeof(prog_ids));
2538 optq.prog_ids = prog_ids;
2539
2540 err = bpf_prog_query_opts(loopback, target, &optq);
2541 if (!ASSERT_OK(err, "prog_query"))
2542 goto cleanup4;
2543
2544 ASSERT_EQ(optq.count, 4, "count");
2545 ASSERT_EQ(optq.revision, 5, "revision");
2546 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2547 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
2548 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
2549 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
2550 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
2551 ASSERT_EQ(optq.link_ids, NULL, "link_ids");
2552
2553 /* Test 2: Double query via bpf_attr & bpf(2) directly */
2554 memset(&attr, 0, attr_size);
2555 attr.query.target_ifindex = loopback;
2556 attr.query.attach_type = target;
2557
2558 err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2559 if (!ASSERT_OK(err, "prog_query"))
2560 goto cleanup4;
2561
2562 ASSERT_EQ(attr.query.count, 4, "count");
2563 ASSERT_EQ(attr.query.revision, 5, "revision");
2564 ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
2565 ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
2566 ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
2567 ASSERT_EQ(attr.query.attach_type, target, "attach_type");
2568 ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
2569 ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
2570 ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
2571 ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
2572
2573 memset(prog_ids, 0, sizeof(prog_ids));
2574 attr.query.prog_ids = ptr_to_u64(prog_ids);
2575
2576 err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2577 if (!ASSERT_OK(err, "prog_query"))
2578 goto cleanup4;
2579
2580 ASSERT_EQ(attr.query.count, 4, "count");
2581 ASSERT_EQ(attr.query.revision, 5, "revision");
2582 ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
2583 ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
2584 ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
2585 ASSERT_EQ(attr.query.attach_type, target, "attach_type");
2586 ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
2587 ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
2588 ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
2589 ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
2590 ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
2591 ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
2592 ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
2593 ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
2594 ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
2595
2596 /* Test 3: Query with smaller prog_ids array */
2597 memset(&attr, 0, attr_size);
2598 attr.query.target_ifindex = loopback;
2599 attr.query.attach_type = target;
2600
2601 memset(prog_ids, 0, sizeof(prog_ids));
2602 attr.query.prog_ids = ptr_to_u64(prog_ids);
2603 attr.query.count = 2;
2604
2605 err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2606 ASSERT_EQ(err, -1, "prog_query_should_fail");
2607 ASSERT_EQ(errno, ENOSPC, "prog_query_should_fail");
2608
2609 ASSERT_EQ(attr.query.count, 4, "count");
2610 ASSERT_EQ(attr.query.revision, 5, "revision");
2611 ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
2612 ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
2613 ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
2614 ASSERT_EQ(attr.query.attach_type, target, "attach_type");
2615 ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
2616 ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
2617 ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
2618 ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
2619 ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
2620 ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
2621 ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
2622 ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
2623 ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
2624
2625 /* Test 4: Query with larger prog_ids array */
2626 memset(&attr, 0, attr_size);
2627 attr.query.target_ifindex = loopback;
2628 attr.query.attach_type = target;
2629
2630 memset(prog_ids, 0, sizeof(prog_ids));
2631 attr.query.prog_ids = ptr_to_u64(prog_ids);
2632 attr.query.count = 10;
2633
2634 err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2635 if (!ASSERT_OK(err, "prog_query"))
2636 goto cleanup4;
2637
2638 ASSERT_EQ(attr.query.count, 4, "count");
2639 ASSERT_EQ(attr.query.revision, 5, "revision");
2640 ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
2641 ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
2642 ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
2643 ASSERT_EQ(attr.query.attach_type, target, "attach_type");
2644 ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
2645 ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
2646 ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
2647 ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
2648 ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
2649 ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
2650 ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
2651 ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
2652 ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
2653
2654 /* Test 5: Query with NULL prog_ids array but with count > 0 */
2655 memset(&attr, 0, attr_size);
2656 attr.query.target_ifindex = loopback;
2657 attr.query.attach_type = target;
2658
2659 memset(prog_ids, 0, sizeof(prog_ids));
2660 attr.query.count = sizeof(prog_ids);
2661
2662 err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2663 if (!ASSERT_OK(err, "prog_query"))
2664 goto cleanup4;
2665
2666 ASSERT_EQ(attr.query.count, 4, "count");
2667 ASSERT_EQ(attr.query.revision, 5, "revision");
2668 ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
2669 ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
2670 ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
2671 ASSERT_EQ(attr.query.attach_type, target, "attach_type");
2672 ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
2673 ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
2674 ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
2675 ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
2676 ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
2677 ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
2678 ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
2679 ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
2680 ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
2681
2682 /* Test 6: Query with non-NULL prog_ids array but with count == 0 */
2683 memset(&attr, 0, attr_size);
2684 attr.query.target_ifindex = loopback;
2685 attr.query.attach_type = target;
2686
2687 memset(prog_ids, 0, sizeof(prog_ids));
2688 attr.query.prog_ids = ptr_to_u64(prog_ids);
2689
2690 err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2691 if (!ASSERT_OK(err, "prog_query"))
2692 goto cleanup4;
2693
2694 ASSERT_EQ(attr.query.count, 4, "count");
2695 ASSERT_EQ(attr.query.revision, 5, "revision");
2696 ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
2697 ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
2698 ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
2699 ASSERT_EQ(attr.query.attach_type, target, "attach_type");
2700 ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
2701 ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
2702 ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
2703 ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
2704 ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
2705 ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
2706 ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
2707 ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
2708 ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
2709
2710 /* Test 7: Query with invalid flags */
2711 attr.query.attach_flags = 0;
2712 attr.query.query_flags = 1;
2713
2714 err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2715 ASSERT_EQ(err, -1, "prog_query_should_fail");
2716 ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
2717
2718 attr.query.attach_flags = 1;
2719 attr.query.query_flags = 0;
2720
2721 err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2722 ASSERT_EQ(err, -1, "prog_query_should_fail");
2723 ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
2724
2725 cleanup4:
2726 err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
2727 ASSERT_OK(err, "prog_detach");
2728 assert_mprog_count(target, 3);
2729
2730 cleanup3:
2731 err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
2732 ASSERT_OK(err, "prog_detach");
2733 assert_mprog_count(target, 2);
2734
2735 cleanup2:
2736 err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2737 ASSERT_OK(err, "prog_detach");
2738 assert_mprog_count(target, 1);
2739
2740 cleanup1:
2741 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2742 ASSERT_OK(err, "prog_detach");
2743 assert_mprog_count(target, 0);
2744
2745 cleanup:
2746 test_tc_link__destroy(skel);
2747 }
2748
test_ns_tc_opts_query(void)2749 void test_ns_tc_opts_query(void)
2750 {
2751 test_tc_opts_query_target(BPF_TCX_INGRESS);
2752 test_tc_opts_query_target(BPF_TCX_EGRESS);
2753 }
2754
test_tc_opts_query_attach_target(int target)2755 static void test_tc_opts_query_attach_target(int target)
2756 {
2757 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2758 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2759 LIBBPF_OPTS(bpf_prog_query_opts, optq);
2760 struct test_tc_link *skel;
2761 __u32 prog_ids[2];
2762 __u32 fd1, id1;
2763 int err;
2764
2765 skel = test_tc_link__open_and_load();
2766 if (!ASSERT_OK_PTR(skel, "skel_load"))
2767 goto cleanup;
2768
2769 fd1 = bpf_program__fd(skel->progs.tc1);
2770 id1 = id_from_prog_fd(fd1);
2771
2772 err = bpf_prog_query_opts(loopback, target, &optq);
2773 if (!ASSERT_OK(err, "prog_query"))
2774 goto cleanup;
2775
2776 ASSERT_EQ(optq.count, 0, "count");
2777 ASSERT_EQ(optq.revision, 1, "revision");
2778
2779 LIBBPF_OPTS_RESET(opta,
2780 .expected_revision = optq.revision,
2781 );
2782
2783 err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
2784 if (!ASSERT_EQ(err, 0, "prog_attach"))
2785 goto cleanup;
2786
2787 memset(prog_ids, 0, sizeof(prog_ids));
2788 optq.prog_ids = prog_ids;
2789 optq.count = ARRAY_SIZE(prog_ids);
2790
2791 err = bpf_prog_query_opts(loopback, target, &optq);
2792 if (!ASSERT_OK(err, "prog_query"))
2793 goto cleanup1;
2794
2795 ASSERT_EQ(optq.count, 1, "count");
2796 ASSERT_EQ(optq.revision, 2, "revision");
2797 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2798 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
2799
2800 cleanup1:
2801 err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2802 ASSERT_OK(err, "prog_detach");
2803 assert_mprog_count(target, 0);
2804 cleanup:
2805 test_tc_link__destroy(skel);
2806 }
2807
test_ns_tc_opts_query_attach(void)2808 void test_ns_tc_opts_query_attach(void)
2809 {
2810 test_tc_opts_query_attach_target(BPF_TCX_INGRESS);
2811 test_tc_opts_query_attach_target(BPF_TCX_EGRESS);
2812 }
2813