1 /* Copyright (c) 2017 Facebook 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <stdio.h> 8 #include <unistd.h> 9 #include <errno.h> 10 #include <string.h> 11 #include <assert.h> 12 #include <stdlib.h> 13 #include <time.h> 14 15 #include <linux/types.h> 16 typedef __u16 __sum16; 17 #include <arpa/inet.h> 18 #include <linux/if_ether.h> 19 #include <linux/if_packet.h> 20 #include <linux/ip.h> 21 #include <linux/ipv6.h> 22 #include <linux/tcp.h> 23 #include <linux/filter.h> 24 #include <linux/perf_event.h> 25 #include <linux/unistd.h> 26 27 #include <sys/ioctl.h> 28 #include <sys/wait.h> 29 #include <sys/types.h> 30 #include <fcntl.h> 31 32 #include <linux/bpf.h> 33 #include <linux/err.h> 34 #include <bpf/bpf.h> 35 #include <bpf/libbpf.h> 36 37 #include "test_iptunnel_common.h" 38 #include "bpf_util.h" 39 #include "bpf_endian.h" 40 #include "bpf_rlimit.h" 41 42 static int error_cnt, pass_cnt; 43 44 #define MAGIC_BYTES 123 45 46 /* ipv4 test vector */ 47 static struct { 48 struct ethhdr eth; 49 struct iphdr iph; 50 struct tcphdr tcp; 51 } __packed pkt_v4 = { 52 .eth.h_proto = bpf_htons(ETH_P_IP), 53 .iph.ihl = 5, 54 .iph.protocol = 6, 55 .iph.tot_len = bpf_htons(MAGIC_BYTES), 56 .tcp.urg_ptr = 123, 57 }; 58 59 /* ipv6 test vector */ 60 static struct { 61 struct ethhdr eth; 62 struct ipv6hdr iph; 63 struct tcphdr tcp; 64 } __packed pkt_v6 = { 65 .eth.h_proto = bpf_htons(ETH_P_IPV6), 66 .iph.nexthdr = 6, 67 .iph.payload_len = bpf_htons(MAGIC_BYTES), 68 .tcp.urg_ptr = 123, 69 }; 70 71 #define CHECK(condition, tag, format...) ({ \ 72 int __ret = !!(condition); \ 73 if (__ret) { \ 74 error_cnt++; \ 75 printf("%s:FAIL:%s ", __func__, tag); \ 76 printf(format); \ 77 } else { \ 78 pass_cnt++; \ 79 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\ 80 } \ 81 __ret; \ 82 }) 83 84 static int bpf_find_map(const char *test, struct bpf_object *obj, 85 const char *name) 86 { 87 struct bpf_map *map; 88 89 map = bpf_object__find_map_by_name(obj, name); 90 if (!map) { 91 printf("%s:FAIL:map '%s' not found\n", test, name); 92 error_cnt++; 93 return -1; 94 } 95 return bpf_map__fd(map); 96 } 97 98 static void test_pkt_access(void) 99 { 100 const char *file = "./test_pkt_access.o"; 101 struct bpf_object *obj; 102 __u32 duration, retval; 103 int err, prog_fd; 104 105 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 106 if (err) { 107 error_cnt++; 108 return; 109 } 110 111 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), 112 NULL, NULL, &retval, &duration); 113 CHECK(err || errno || retval, "ipv4", 114 "err %d errno %d retval %d duration %d\n", 115 err, errno, retval, duration); 116 117 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), 118 NULL, NULL, &retval, &duration); 119 CHECK(err || errno || retval, "ipv6", 120 "err %d errno %d retval %d duration %d\n", 121 err, errno, retval, duration); 122 bpf_object__close(obj); 123 } 124 125 static void test_xdp(void) 126 { 127 struct vip key4 = {.protocol = 6, .family = AF_INET}; 128 struct vip key6 = {.protocol = 6, .family = AF_INET6}; 129 struct iptnl_info value4 = {.family = AF_INET}; 130 struct iptnl_info value6 = {.family = AF_INET6}; 131 const char *file = "./test_xdp.o"; 132 struct bpf_object *obj; 133 char buf[128]; 134 struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); 135 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); 136 __u32 duration, retval, size; 137 int err, prog_fd, map_fd; 138 139 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 140 if (err) { 141 error_cnt++; 142 return; 143 } 144 145 map_fd = bpf_find_map(__func__, obj, "vip2tnl"); 146 if (map_fd < 0) 147 goto out; 148 bpf_map_update_elem(map_fd, &key4, &value4, 0); 149 bpf_map_update_elem(map_fd, &key6, &value6, 0); 150 151 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), 152 buf, &size, &retval, &duration); 153 154 CHECK(err || errno || retval != XDP_TX || size != 74 || 155 iph->protocol != IPPROTO_IPIP, "ipv4", 156 "err %d errno %d retval %d size %d\n", 157 err, errno, retval, size); 158 159 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), 160 buf, &size, &retval, &duration); 161 CHECK(err || errno || retval != XDP_TX || size != 114 || 162 iph6->nexthdr != IPPROTO_IPV6, "ipv6", 163 "err %d errno %d retval %d size %d\n", 164 err, errno, retval, size); 165 out: 166 bpf_object__close(obj); 167 } 168 169 #define MAGIC_VAL 0x1234 170 #define NUM_ITER 100000 171 #define VIP_NUM 5 172 173 static void test_l4lb(const char *file) 174 { 175 unsigned int nr_cpus = bpf_num_possible_cpus(); 176 struct vip key = {.protocol = 6}; 177 struct vip_meta { 178 __u32 flags; 179 __u32 vip_num; 180 } value = {.vip_num = VIP_NUM}; 181 __u32 stats_key = VIP_NUM; 182 struct vip_stats { 183 __u64 bytes; 184 __u64 pkts; 185 } stats[nr_cpus]; 186 struct real_definition { 187 union { 188 __be32 dst; 189 __be32 dstv6[4]; 190 }; 191 __u8 flags; 192 } real_def = {.dst = MAGIC_VAL}; 193 __u32 ch_key = 11, real_num = 3; 194 __u32 duration, retval, size; 195 int err, i, prog_fd, map_fd; 196 __u64 bytes = 0, pkts = 0; 197 struct bpf_object *obj; 198 char buf[128]; 199 u32 *magic = (u32 *)buf; 200 201 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 202 if (err) { 203 error_cnt++; 204 return; 205 } 206 207 map_fd = bpf_find_map(__func__, obj, "vip_map"); 208 if (map_fd < 0) 209 goto out; 210 bpf_map_update_elem(map_fd, &key, &value, 0); 211 212 map_fd = bpf_find_map(__func__, obj, "ch_rings"); 213 if (map_fd < 0) 214 goto out; 215 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); 216 217 map_fd = bpf_find_map(__func__, obj, "reals"); 218 if (map_fd < 0) 219 goto out; 220 bpf_map_update_elem(map_fd, &real_num, &real_def, 0); 221 222 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), 223 buf, &size, &retval, &duration); 224 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || 225 *magic != MAGIC_VAL, "ipv4", 226 "err %d errno %d retval %d size %d magic %x\n", 227 err, errno, retval, size, *magic); 228 229 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), 230 buf, &size, &retval, &duration); 231 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || 232 *magic != MAGIC_VAL, "ipv6", 233 "err %d errno %d retval %d size %d magic %x\n", 234 err, errno, retval, size, *magic); 235 236 map_fd = bpf_find_map(__func__, obj, "stats"); 237 if (map_fd < 0) 238 goto out; 239 bpf_map_lookup_elem(map_fd, &stats_key, stats); 240 for (i = 0; i < nr_cpus; i++) { 241 bytes += stats[i].bytes; 242 pkts += stats[i].pkts; 243 } 244 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { 245 error_cnt++; 246 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); 247 } 248 out: 249 bpf_object__close(obj); 250 } 251 252 static void test_l4lb_all(void) 253 { 254 const char *file1 = "./test_l4lb.o"; 255 const char *file2 = "./test_l4lb_noinline.o"; 256 257 test_l4lb(file1); 258 test_l4lb(file2); 259 } 260 261 static void test_xdp_noinline(void) 262 { 263 const char *file = "./test_xdp_noinline.o"; 264 unsigned int nr_cpus = bpf_num_possible_cpus(); 265 struct vip key = {.protocol = 6}; 266 struct vip_meta { 267 __u32 flags; 268 __u32 vip_num; 269 } value = {.vip_num = VIP_NUM}; 270 __u32 stats_key = VIP_NUM; 271 struct vip_stats { 272 __u64 bytes; 273 __u64 pkts; 274 } stats[nr_cpus]; 275 struct real_definition { 276 union { 277 __be32 dst; 278 __be32 dstv6[4]; 279 }; 280 __u8 flags; 281 } real_def = {.dst = MAGIC_VAL}; 282 __u32 ch_key = 11, real_num = 3; 283 __u32 duration, retval, size; 284 int err, i, prog_fd, map_fd; 285 __u64 bytes = 0, pkts = 0; 286 struct bpf_object *obj; 287 char buf[128]; 288 u32 *magic = (u32 *)buf; 289 290 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 291 if (err) { 292 error_cnt++; 293 return; 294 } 295 296 map_fd = bpf_find_map(__func__, obj, "vip_map"); 297 if (map_fd < 0) 298 goto out; 299 bpf_map_update_elem(map_fd, &key, &value, 0); 300 301 map_fd = bpf_find_map(__func__, obj, "ch_rings"); 302 if (map_fd < 0) 303 goto out; 304 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); 305 306 map_fd = bpf_find_map(__func__, obj, "reals"); 307 if (map_fd < 0) 308 goto out; 309 bpf_map_update_elem(map_fd, &real_num, &real_def, 0); 310 311 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), 312 buf, &size, &retval, &duration); 313 CHECK(err || errno || retval != 1 || size != 54 || 314 *magic != MAGIC_VAL, "ipv4", 315 "err %d errno %d retval %d size %d magic %x\n", 316 err, errno, retval, size, *magic); 317 318 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), 319 buf, &size, &retval, &duration); 320 CHECK(err || errno || retval != 1 || size != 74 || 321 *magic != MAGIC_VAL, "ipv6", 322 "err %d errno %d retval %d size %d magic %x\n", 323 err, errno, retval, size, *magic); 324 325 map_fd = bpf_find_map(__func__, obj, "stats"); 326 if (map_fd < 0) 327 goto out; 328 bpf_map_lookup_elem(map_fd, &stats_key, stats); 329 for (i = 0; i < nr_cpus; i++) { 330 bytes += stats[i].bytes; 331 pkts += stats[i].pkts; 332 } 333 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { 334 error_cnt++; 335 printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts); 336 } 337 out: 338 bpf_object__close(obj); 339 } 340 341 static void test_tcp_estats(void) 342 { 343 const char *file = "./test_tcp_estats.o"; 344 int err, prog_fd; 345 struct bpf_object *obj; 346 __u32 duration = 0; 347 348 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 349 CHECK(err, "", "err %d errno %d\n", err, errno); 350 if (err) { 351 error_cnt++; 352 return; 353 } 354 355 bpf_object__close(obj); 356 } 357 358 static inline __u64 ptr_to_u64(const void *ptr) 359 { 360 return (__u64) (unsigned long) ptr; 361 } 362 363 static void test_bpf_obj_id(void) 364 { 365 const __u64 array_magic_value = 0xfaceb00c; 366 const __u32 array_key = 0; 367 const int nr_iters = 2; 368 const char *file = "./test_obj_id.o"; 369 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; 370 const char *expected_prog_name = "test_obj_id"; 371 const char *expected_map_name = "test_map_id"; 372 const __u64 nsec_per_sec = 1000000000; 373 374 struct bpf_object *objs[nr_iters]; 375 int prog_fds[nr_iters], map_fds[nr_iters]; 376 /* +1 to test for the info_len returned by kernel */ 377 struct bpf_prog_info prog_infos[nr_iters + 1]; 378 struct bpf_map_info map_infos[nr_iters + 1]; 379 /* Each prog only uses one map. +1 to test nr_map_ids 380 * returned by kernel. 381 */ 382 __u32 map_ids[nr_iters + 1]; 383 char jited_insns[128], xlated_insns[128], zeros[128]; 384 __u32 i, next_id, info_len, nr_id_found, duration = 0; 385 struct timespec real_time_ts, boot_time_ts; 386 int sysctl_fd, jit_enabled = 0, err = 0; 387 __u64 array_value; 388 uid_t my_uid = getuid(); 389 time_t now, load_time; 390 391 sysctl_fd = open(jit_sysctl, 0, O_RDONLY); 392 if (sysctl_fd != -1) { 393 char tmpc; 394 395 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1) 396 jit_enabled = (tmpc != '0'); 397 close(sysctl_fd); 398 } 399 400 err = bpf_prog_get_fd_by_id(0); 401 CHECK(err >= 0 || errno != ENOENT, 402 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno); 403 404 err = bpf_map_get_fd_by_id(0); 405 CHECK(err >= 0 || errno != ENOENT, 406 "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno); 407 408 for (i = 0; i < nr_iters; i++) 409 objs[i] = NULL; 410 411 /* Check bpf_obj_get_info_by_fd() */ 412 bzero(zeros, sizeof(zeros)); 413 for (i = 0; i < nr_iters; i++) { 414 now = time(NULL); 415 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER, 416 &objs[i], &prog_fds[i]); 417 /* test_obj_id.o is a dumb prog. It should never fail 418 * to load. 419 */ 420 if (err) 421 error_cnt++; 422 assert(!err); 423 424 /* Insert a magic value to the map */ 425 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); 426 assert(map_fds[i] >= 0); 427 err = bpf_map_update_elem(map_fds[i], &array_key, 428 &array_magic_value, 0); 429 assert(!err); 430 431 /* Check getting map info */ 432 info_len = sizeof(struct bpf_map_info) * 2; 433 bzero(&map_infos[i], info_len); 434 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], 435 &info_len); 436 if (CHECK(err || 437 map_infos[i].type != BPF_MAP_TYPE_ARRAY || 438 map_infos[i].key_size != sizeof(__u32) || 439 map_infos[i].value_size != sizeof(__u64) || 440 map_infos[i].max_entries != 1 || 441 map_infos[i].map_flags != 0 || 442 info_len != sizeof(struct bpf_map_info) || 443 strcmp((char *)map_infos[i].name, expected_map_name), 444 "get-map-info(fd)", 445 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", 446 err, errno, 447 map_infos[i].type, BPF_MAP_TYPE_ARRAY, 448 info_len, sizeof(struct bpf_map_info), 449 map_infos[i].key_size, 450 map_infos[i].value_size, 451 map_infos[i].max_entries, 452 map_infos[i].map_flags, 453 map_infos[i].name, expected_map_name)) 454 goto done; 455 456 /* Check getting prog info */ 457 info_len = sizeof(struct bpf_prog_info) * 2; 458 bzero(&prog_infos[i], info_len); 459 bzero(jited_insns, sizeof(jited_insns)); 460 bzero(xlated_insns, sizeof(xlated_insns)); 461 prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); 462 prog_infos[i].jited_prog_len = sizeof(jited_insns); 463 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); 464 prog_infos[i].xlated_prog_len = sizeof(xlated_insns); 465 prog_infos[i].map_ids = ptr_to_u64(map_ids + i); 466 prog_infos[i].nr_map_ids = 2; 467 err = clock_gettime(CLOCK_REALTIME, &real_time_ts); 468 assert(!err); 469 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); 470 assert(!err); 471 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], 472 &info_len); 473 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) 474 + (prog_infos[i].load_time / nsec_per_sec); 475 if (CHECK(err || 476 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || 477 info_len != sizeof(struct bpf_prog_info) || 478 (jit_enabled && !prog_infos[i].jited_prog_len) || 479 (jit_enabled && 480 !memcmp(jited_insns, zeros, sizeof(zeros))) || 481 !prog_infos[i].xlated_prog_len || 482 !memcmp(xlated_insns, zeros, sizeof(zeros)) || 483 load_time < now - 60 || load_time > now + 60 || 484 prog_infos[i].created_by_uid != my_uid || 485 prog_infos[i].nr_map_ids != 1 || 486 *(int *)prog_infos[i].map_ids != map_infos[i].id || 487 strcmp((char *)prog_infos[i].name, expected_prog_name), 488 "get-prog-info(fd)", 489 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", 490 err, errno, i, 491 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, 492 info_len, sizeof(struct bpf_prog_info), 493 jit_enabled, 494 prog_infos[i].jited_prog_len, 495 prog_infos[i].xlated_prog_len, 496 !!memcmp(jited_insns, zeros, sizeof(zeros)), 497 !!memcmp(xlated_insns, zeros, sizeof(zeros)), 498 load_time, now, 499 prog_infos[i].created_by_uid, my_uid, 500 prog_infos[i].nr_map_ids, 1, 501 *(int *)prog_infos[i].map_ids, map_infos[i].id, 502 prog_infos[i].name, expected_prog_name)) 503 goto done; 504 } 505 506 /* Check bpf_prog_get_next_id() */ 507 nr_id_found = 0; 508 next_id = 0; 509 while (!bpf_prog_get_next_id(next_id, &next_id)) { 510 struct bpf_prog_info prog_info = {}; 511 __u32 saved_map_id; 512 int prog_fd; 513 514 info_len = sizeof(prog_info); 515 516 prog_fd = bpf_prog_get_fd_by_id(next_id); 517 if (prog_fd < 0 && errno == ENOENT) 518 /* The bpf_prog is in the dead row */ 519 continue; 520 if (CHECK(prog_fd < 0, "get-prog-fd(next_id)", 521 "prog_fd %d next_id %d errno %d\n", 522 prog_fd, next_id, errno)) 523 break; 524 525 for (i = 0; i < nr_iters; i++) 526 if (prog_infos[i].id == next_id) 527 break; 528 529 if (i == nr_iters) 530 continue; 531 532 nr_id_found++; 533 534 /* Negative test: 535 * prog_info.nr_map_ids = 1 536 * prog_info.map_ids = NULL 537 */ 538 prog_info.nr_map_ids = 1; 539 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); 540 if (CHECK(!err || errno != EFAULT, 541 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)", 542 err, errno, EFAULT)) 543 break; 544 bzero(&prog_info, sizeof(prog_info)); 545 info_len = sizeof(prog_info); 546 547 saved_map_id = *(int *)(prog_infos[i].map_ids); 548 prog_info.map_ids = prog_infos[i].map_ids; 549 prog_info.nr_map_ids = 2; 550 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); 551 prog_infos[i].jited_prog_insns = 0; 552 prog_infos[i].xlated_prog_insns = 0; 553 CHECK(err || info_len != sizeof(struct bpf_prog_info) || 554 memcmp(&prog_info, &prog_infos[i], info_len) || 555 *(int *)prog_info.map_ids != saved_map_id, 556 "get-prog-info(next_id->fd)", 557 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n", 558 err, errno, info_len, sizeof(struct bpf_prog_info), 559 memcmp(&prog_info, &prog_infos[i], info_len), 560 *(int *)prog_info.map_ids, saved_map_id); 561 close(prog_fd); 562 } 563 CHECK(nr_id_found != nr_iters, 564 "check total prog id found by get_next_id", 565 "nr_id_found %u(%u)\n", 566 nr_id_found, nr_iters); 567 568 /* Check bpf_map_get_next_id() */ 569 nr_id_found = 0; 570 next_id = 0; 571 while (!bpf_map_get_next_id(next_id, &next_id)) { 572 struct bpf_map_info map_info = {}; 573 int map_fd; 574 575 info_len = sizeof(map_info); 576 577 map_fd = bpf_map_get_fd_by_id(next_id); 578 if (map_fd < 0 && errno == ENOENT) 579 /* The bpf_map is in the dead row */ 580 continue; 581 if (CHECK(map_fd < 0, "get-map-fd(next_id)", 582 "map_fd %d next_id %u errno %d\n", 583 map_fd, next_id, errno)) 584 break; 585 586 for (i = 0; i < nr_iters; i++) 587 if (map_infos[i].id == next_id) 588 break; 589 590 if (i == nr_iters) 591 continue; 592 593 nr_id_found++; 594 595 err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); 596 assert(!err); 597 598 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); 599 CHECK(err || info_len != sizeof(struct bpf_map_info) || 600 memcmp(&map_info, &map_infos[i], info_len) || 601 array_value != array_magic_value, 602 "check get-map-info(next_id->fd)", 603 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n", 604 err, errno, info_len, sizeof(struct bpf_map_info), 605 memcmp(&map_info, &map_infos[i], info_len), 606 array_value, array_magic_value); 607 608 close(map_fd); 609 } 610 CHECK(nr_id_found != nr_iters, 611 "check total map id found by get_next_id", 612 "nr_id_found %u(%u)\n", 613 nr_id_found, nr_iters); 614 615 done: 616 for (i = 0; i < nr_iters; i++) 617 bpf_object__close(objs[i]); 618 } 619 620 static void test_pkt_md_access(void) 621 { 622 const char *file = "./test_pkt_md_access.o"; 623 struct bpf_object *obj; 624 __u32 duration, retval; 625 int err, prog_fd; 626 627 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 628 if (err) { 629 error_cnt++; 630 return; 631 } 632 633 err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), 634 NULL, NULL, &retval, &duration); 635 CHECK(err || retval, "", 636 "err %d errno %d retval %d duration %d\n", 637 err, errno, retval, duration); 638 639 bpf_object__close(obj); 640 } 641 642 static void test_obj_name(void) 643 { 644 struct { 645 const char *name; 646 int success; 647 int expected_errno; 648 } tests[] = { 649 { "", 1, 0 }, 650 { "_123456789ABCDE", 1, 0 }, 651 { "_123456789ABCDEF", 0, EINVAL }, 652 { "_123456789ABCD\n", 0, EINVAL }, 653 }; 654 struct bpf_insn prog[] = { 655 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), 656 BPF_EXIT_INSN(), 657 }; 658 __u32 duration = 0; 659 int i; 660 661 for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { 662 size_t name_len = strlen(tests[i].name) + 1; 663 union bpf_attr attr; 664 size_t ncopy; 665 int fd; 666 667 /* test different attr.prog_name during BPF_PROG_LOAD */ 668 ncopy = name_len < sizeof(attr.prog_name) ? 669 name_len : sizeof(attr.prog_name); 670 bzero(&attr, sizeof(attr)); 671 attr.prog_type = BPF_PROG_TYPE_SCHED_CLS; 672 attr.insn_cnt = 2; 673 attr.insns = ptr_to_u64(prog); 674 attr.license = ptr_to_u64(""); 675 memcpy(attr.prog_name, tests[i].name, ncopy); 676 677 fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); 678 CHECK((tests[i].success && fd < 0) || 679 (!tests[i].success && fd != -1) || 680 (!tests[i].success && errno != tests[i].expected_errno), 681 "check-bpf-prog-name", 682 "fd %d(%d) errno %d(%d)\n", 683 fd, tests[i].success, errno, tests[i].expected_errno); 684 685 if (fd != -1) 686 close(fd); 687 688 /* test different attr.map_name during BPF_MAP_CREATE */ 689 ncopy = name_len < sizeof(attr.map_name) ? 690 name_len : sizeof(attr.map_name); 691 bzero(&attr, sizeof(attr)); 692 attr.map_type = BPF_MAP_TYPE_ARRAY; 693 attr.key_size = 4; 694 attr.value_size = 4; 695 attr.max_entries = 1; 696 attr.map_flags = 0; 697 memcpy(attr.map_name, tests[i].name, ncopy); 698 fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); 699 CHECK((tests[i].success && fd < 0) || 700 (!tests[i].success && fd != -1) || 701 (!tests[i].success && errno != tests[i].expected_errno), 702 "check-bpf-map-name", 703 "fd %d(%d) errno %d(%d)\n", 704 fd, tests[i].success, errno, tests[i].expected_errno); 705 706 if (fd != -1) 707 close(fd); 708 } 709 } 710 711 static void test_tp_attach_query(void) 712 { 713 const int num_progs = 3; 714 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs]; 715 __u32 duration = 0, info_len, saved_prog_ids[num_progs]; 716 const char *file = "./test_tracepoint.o"; 717 struct perf_event_query_bpf *query; 718 struct perf_event_attr attr = {}; 719 struct bpf_object *obj[num_progs]; 720 struct bpf_prog_info prog_info; 721 char buf[256]; 722 723 snprintf(buf, sizeof(buf), 724 "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); 725 efd = open(buf, O_RDONLY, 0); 726 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) 727 return; 728 bytes = read(efd, buf, sizeof(buf)); 729 close(efd); 730 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), 731 "read", "bytes %d errno %d\n", bytes, errno)) 732 return; 733 734 attr.config = strtol(buf, NULL, 0); 735 attr.type = PERF_TYPE_TRACEPOINT; 736 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; 737 attr.sample_period = 1; 738 attr.wakeup_events = 1; 739 740 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs); 741 for (i = 0; i < num_progs; i++) { 742 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i], 743 &prog_fd[i]); 744 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 745 goto cleanup1; 746 747 bzero(&prog_info, sizeof(prog_info)); 748 prog_info.jited_prog_len = 0; 749 prog_info.xlated_prog_len = 0; 750 prog_info.nr_map_ids = 0; 751 info_len = sizeof(prog_info); 752 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len); 753 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n", 754 err, errno)) 755 goto cleanup1; 756 saved_prog_ids[i] = prog_info.id; 757 758 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 759 0 /* cpu 0 */, -1 /* group id */, 760 0 /* flags */); 761 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n", 762 pmu_fd[i], errno)) 763 goto cleanup2; 764 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0); 765 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", 766 err, errno)) 767 goto cleanup3; 768 769 if (i == 0) { 770 /* check NULL prog array query */ 771 query->ids_len = num_progs; 772 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); 773 if (CHECK(err || query->prog_cnt != 0, 774 "perf_event_ioc_query_bpf", 775 "err %d errno %d query->prog_cnt %u\n", 776 err, errno, query->prog_cnt)) 777 goto cleanup3; 778 } 779 780 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]); 781 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", 782 err, errno)) 783 goto cleanup3; 784 785 if (i == 1) { 786 /* try to get # of programs only */ 787 query->ids_len = 0; 788 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); 789 if (CHECK(err || query->prog_cnt != 2, 790 "perf_event_ioc_query_bpf", 791 "err %d errno %d query->prog_cnt %u\n", 792 err, errno, query->prog_cnt)) 793 goto cleanup3; 794 795 /* try a few negative tests */ 796 /* invalid query pointer */ 797 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, 798 (struct perf_event_query_bpf *)0x1); 799 if (CHECK(!err || errno != EFAULT, 800 "perf_event_ioc_query_bpf", 801 "err %d errno %d\n", err, errno)) 802 goto cleanup3; 803 804 /* no enough space */ 805 query->ids_len = 1; 806 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); 807 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2, 808 "perf_event_ioc_query_bpf", 809 "err %d errno %d query->prog_cnt %u\n", 810 err, errno, query->prog_cnt)) 811 goto cleanup3; 812 } 813 814 query->ids_len = num_progs; 815 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); 816 if (CHECK(err || query->prog_cnt != (i + 1), 817 "perf_event_ioc_query_bpf", 818 "err %d errno %d query->prog_cnt %u\n", 819 err, errno, query->prog_cnt)) 820 goto cleanup3; 821 for (j = 0; j < i + 1; j++) 822 if (CHECK(saved_prog_ids[j] != query->ids[j], 823 "perf_event_ioc_query_bpf", 824 "#%d saved_prog_id %x query prog_id %x\n", 825 j, saved_prog_ids[j], query->ids[j])) 826 goto cleanup3; 827 } 828 829 i = num_progs - 1; 830 for (; i >= 0; i--) { 831 cleanup3: 832 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE); 833 cleanup2: 834 close(pmu_fd[i]); 835 cleanup1: 836 bpf_object__close(obj[i]); 837 } 838 free(query); 839 } 840 841 static int compare_map_keys(int map1_fd, int map2_fd) 842 { 843 __u32 key, next_key; 844 char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)]; 845 int err; 846 847 err = bpf_map_get_next_key(map1_fd, NULL, &key); 848 if (err) 849 return err; 850 err = bpf_map_lookup_elem(map2_fd, &key, val_buf); 851 if (err) 852 return err; 853 854 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) { 855 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf); 856 if (err) 857 return err; 858 859 key = next_key; 860 } 861 if (errno != ENOENT) 862 return -1; 863 864 return 0; 865 } 866 867 static void test_stacktrace_map() 868 { 869 int control_map_fd, stackid_hmap_fd, stackmap_fd; 870 const char *file = "./test_stacktrace_map.o"; 871 int bytes, efd, err, pmu_fd, prog_fd; 872 struct perf_event_attr attr = {}; 873 __u32 key, val, duration = 0; 874 struct bpf_object *obj; 875 char buf[256]; 876 877 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 878 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 879 goto out; 880 881 /* Get the ID for the sched/sched_switch tracepoint */ 882 snprintf(buf, sizeof(buf), 883 "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); 884 efd = open(buf, O_RDONLY, 0); 885 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) 886 goto close_prog; 887 888 bytes = read(efd, buf, sizeof(buf)); 889 close(efd); 890 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), 891 "read", "bytes %d errno %d\n", bytes, errno)) 892 goto close_prog; 893 894 /* Open the perf event and attach bpf progrram */ 895 attr.config = strtol(buf, NULL, 0); 896 attr.type = PERF_TYPE_TRACEPOINT; 897 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; 898 attr.sample_period = 1; 899 attr.wakeup_events = 1; 900 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 901 0 /* cpu 0 */, -1 /* group id */, 902 0 /* flags */); 903 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", 904 pmu_fd, errno)) 905 goto close_prog; 906 907 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); 908 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", 909 err, errno)) 910 goto close_pmu; 911 912 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); 913 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", 914 err, errno)) 915 goto disable_pmu; 916 917 /* find map fds */ 918 control_map_fd = bpf_find_map(__func__, obj, "control_map"); 919 if (CHECK(control_map_fd < 0, "bpf_find_map control_map", 920 "err %d errno %d\n", err, errno)) 921 goto disable_pmu; 922 923 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); 924 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", 925 "err %d errno %d\n", err, errno)) 926 goto disable_pmu; 927 928 stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); 929 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", 930 err, errno)) 931 goto disable_pmu; 932 933 /* give some time for bpf program run */ 934 sleep(1); 935 936 /* disable stack trace collection */ 937 key = 0; 938 val = 1; 939 bpf_map_update_elem(control_map_fd, &key, &val, 0); 940 941 /* for every element in stackid_hmap, we can find a corresponding one 942 * in stackmap, and vise versa. 943 */ 944 err = compare_map_keys(stackid_hmap_fd, stackmap_fd); 945 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", 946 "err %d errno %d\n", err, errno)) 947 goto disable_pmu; 948 949 err = compare_map_keys(stackmap_fd, stackid_hmap_fd); 950 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", 951 "err %d errno %d\n", err, errno)) 952 ; /* fall through */ 953 954 disable_pmu: 955 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 956 957 close_pmu: 958 close(pmu_fd); 959 960 close_prog: 961 bpf_object__close(obj); 962 963 out: 964 return; 965 } 966 967 int main(void) 968 { 969 test_pkt_access(); 970 test_xdp(); 971 test_l4lb_all(); 972 test_xdp_noinline(); 973 test_tcp_estats(); 974 test_bpf_obj_id(); 975 test_pkt_md_access(); 976 test_obj_name(); 977 test_tp_attach_query(); 978 test_stacktrace_map(); 979 980 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); 981 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 982 } 983