xref: /linux/tools/testing/selftests/bpf/test_progs.c (revision 064223c1231ce508efaded6576ffdb07de9307b5)
1 /* Copyright (c) 2017 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <stdio.h>
8 #include <unistd.h>
9 #include <errno.h>
10 #include <string.h>
11 #include <assert.h>
12 #include <stdlib.h>
13 #include <time.h>
14 
15 #include <linux/types.h>
16 typedef __u16 __sum16;
17 #include <arpa/inet.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_packet.h>
20 #include <linux/ip.h>
21 #include <linux/ipv6.h>
22 #include <linux/tcp.h>
23 #include <linux/filter.h>
24 #include <linux/perf_event.h>
25 #include <linux/unistd.h>
26 
27 #include <sys/ioctl.h>
28 #include <sys/wait.h>
29 #include <sys/types.h>
30 #include <fcntl.h>
31 
32 #include <linux/bpf.h>
33 #include <linux/err.h>
34 #include <bpf/bpf.h>
35 #include <bpf/libbpf.h>
36 
37 #include "test_iptunnel_common.h"
38 #include "bpf_util.h"
39 #include "bpf_endian.h"
40 #include "bpf_rlimit.h"
41 
42 static int error_cnt, pass_cnt;
43 
44 #define MAGIC_BYTES 123
45 
46 /* ipv4 test vector */
47 static struct {
48 	struct ethhdr eth;
49 	struct iphdr iph;
50 	struct tcphdr tcp;
51 } __packed pkt_v4 = {
52 	.eth.h_proto = bpf_htons(ETH_P_IP),
53 	.iph.ihl = 5,
54 	.iph.protocol = 6,
55 	.iph.tot_len = bpf_htons(MAGIC_BYTES),
56 	.tcp.urg_ptr = 123,
57 };
58 
59 /* ipv6 test vector */
60 static struct {
61 	struct ethhdr eth;
62 	struct ipv6hdr iph;
63 	struct tcphdr tcp;
64 } __packed pkt_v6 = {
65 	.eth.h_proto = bpf_htons(ETH_P_IPV6),
66 	.iph.nexthdr = 6,
67 	.iph.payload_len = bpf_htons(MAGIC_BYTES),
68 	.tcp.urg_ptr = 123,
69 };
70 
71 #define CHECK(condition, tag, format...) ({				\
72 	int __ret = !!(condition);					\
73 	if (__ret) {							\
74 		error_cnt++;						\
75 		printf("%s:FAIL:%s ", __func__, tag);			\
76 		printf(format);						\
77 	} else {							\
78 		pass_cnt++;						\
79 		printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
80 	}								\
81 	__ret;								\
82 })
83 
84 static int bpf_find_map(const char *test, struct bpf_object *obj,
85 			const char *name)
86 {
87 	struct bpf_map *map;
88 
89 	map = bpf_object__find_map_by_name(obj, name);
90 	if (!map) {
91 		printf("%s:FAIL:map '%s' not found\n", test, name);
92 		error_cnt++;
93 		return -1;
94 	}
95 	return bpf_map__fd(map);
96 }
97 
98 static void test_pkt_access(void)
99 {
100 	const char *file = "./test_pkt_access.o";
101 	struct bpf_object *obj;
102 	__u32 duration, retval;
103 	int err, prog_fd;
104 
105 	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
106 	if (err) {
107 		error_cnt++;
108 		return;
109 	}
110 
111 	err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
112 				NULL, NULL, &retval, &duration);
113 	CHECK(err || errno || retval, "ipv4",
114 	      "err %d errno %d retval %d duration %d\n",
115 	      err, errno, retval, duration);
116 
117 	err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
118 				NULL, NULL, &retval, &duration);
119 	CHECK(err || errno || retval, "ipv6",
120 	      "err %d errno %d retval %d duration %d\n",
121 	      err, errno, retval, duration);
122 	bpf_object__close(obj);
123 }
124 
125 static void test_xdp(void)
126 {
127 	struct vip key4 = {.protocol = 6, .family = AF_INET};
128 	struct vip key6 = {.protocol = 6, .family = AF_INET6};
129 	struct iptnl_info value4 = {.family = AF_INET};
130 	struct iptnl_info value6 = {.family = AF_INET6};
131 	const char *file = "./test_xdp.o";
132 	struct bpf_object *obj;
133 	char buf[128];
134 	struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
135 	struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
136 	__u32 duration, retval, size;
137 	int err, prog_fd, map_fd;
138 
139 	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
140 	if (err) {
141 		error_cnt++;
142 		return;
143 	}
144 
145 	map_fd = bpf_find_map(__func__, obj, "vip2tnl");
146 	if (map_fd < 0)
147 		goto out;
148 	bpf_map_update_elem(map_fd, &key4, &value4, 0);
149 	bpf_map_update_elem(map_fd, &key6, &value6, 0);
150 
151 	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
152 				buf, &size, &retval, &duration);
153 
154 	CHECK(err || errno || retval != XDP_TX || size != 74 ||
155 	      iph->protocol != IPPROTO_IPIP, "ipv4",
156 	      "err %d errno %d retval %d size %d\n",
157 	      err, errno, retval, size);
158 
159 	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
160 				buf, &size, &retval, &duration);
161 	CHECK(err || errno || retval != XDP_TX || size != 114 ||
162 	      iph6->nexthdr != IPPROTO_IPV6, "ipv6",
163 	      "err %d errno %d retval %d size %d\n",
164 	      err, errno, retval, size);
165 out:
166 	bpf_object__close(obj);
167 }
168 
169 static void test_xdp_adjust_tail(void)
170 {
171 	const char *file = "./test_adjust_tail.o";
172 	struct bpf_object *obj;
173 	char buf[128];
174 	__u32 duration, retval, size;
175 	int err, prog_fd;
176 
177 	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
178 	if (err) {
179 		error_cnt++;
180 		return;
181 	}
182 
183 	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
184 				buf, &size, &retval, &duration);
185 
186 	CHECK(err || errno || retval != XDP_DROP,
187 	      "ipv4", "err %d errno %d retval %d size %d\n",
188 	      err, errno, retval, size);
189 
190 	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
191 				buf, &size, &retval, &duration);
192 	CHECK(err || errno || retval != XDP_TX || size != 54,
193 	      "ipv6", "err %d errno %d retval %d size %d\n",
194 	      err, errno, retval, size);
195 	bpf_object__close(obj);
196 }
197 
198 
199 
200 #define MAGIC_VAL 0x1234
201 #define NUM_ITER 100000
202 #define VIP_NUM 5
203 
204 static void test_l4lb(const char *file)
205 {
206 	unsigned int nr_cpus = bpf_num_possible_cpus();
207 	struct vip key = {.protocol = 6};
208 	struct vip_meta {
209 		__u32 flags;
210 		__u32 vip_num;
211 	} value = {.vip_num = VIP_NUM};
212 	__u32 stats_key = VIP_NUM;
213 	struct vip_stats {
214 		__u64 bytes;
215 		__u64 pkts;
216 	} stats[nr_cpus];
217 	struct real_definition {
218 		union {
219 			__be32 dst;
220 			__be32 dstv6[4];
221 		};
222 		__u8 flags;
223 	} real_def = {.dst = MAGIC_VAL};
224 	__u32 ch_key = 11, real_num = 3;
225 	__u32 duration, retval, size;
226 	int err, i, prog_fd, map_fd;
227 	__u64 bytes = 0, pkts = 0;
228 	struct bpf_object *obj;
229 	char buf[128];
230 	u32 *magic = (u32 *)buf;
231 
232 	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
233 	if (err) {
234 		error_cnt++;
235 		return;
236 	}
237 
238 	map_fd = bpf_find_map(__func__, obj, "vip_map");
239 	if (map_fd < 0)
240 		goto out;
241 	bpf_map_update_elem(map_fd, &key, &value, 0);
242 
243 	map_fd = bpf_find_map(__func__, obj, "ch_rings");
244 	if (map_fd < 0)
245 		goto out;
246 	bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
247 
248 	map_fd = bpf_find_map(__func__, obj, "reals");
249 	if (map_fd < 0)
250 		goto out;
251 	bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
252 
253 	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
254 				buf, &size, &retval, &duration);
255 	CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
256 	      *magic != MAGIC_VAL, "ipv4",
257 	      "err %d errno %d retval %d size %d magic %x\n",
258 	      err, errno, retval, size, *magic);
259 
260 	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
261 				buf, &size, &retval, &duration);
262 	CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
263 	      *magic != MAGIC_VAL, "ipv6",
264 	      "err %d errno %d retval %d size %d magic %x\n",
265 	      err, errno, retval, size, *magic);
266 
267 	map_fd = bpf_find_map(__func__, obj, "stats");
268 	if (map_fd < 0)
269 		goto out;
270 	bpf_map_lookup_elem(map_fd, &stats_key, stats);
271 	for (i = 0; i < nr_cpus; i++) {
272 		bytes += stats[i].bytes;
273 		pkts += stats[i].pkts;
274 	}
275 	if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
276 		error_cnt++;
277 		printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
278 	}
279 out:
280 	bpf_object__close(obj);
281 }
282 
283 static void test_l4lb_all(void)
284 {
285 	const char *file1 = "./test_l4lb.o";
286 	const char *file2 = "./test_l4lb_noinline.o";
287 
288 	test_l4lb(file1);
289 	test_l4lb(file2);
290 }
291 
292 static void test_xdp_noinline(void)
293 {
294 	const char *file = "./test_xdp_noinline.o";
295 	unsigned int nr_cpus = bpf_num_possible_cpus();
296 	struct vip key = {.protocol = 6};
297 	struct vip_meta {
298 		__u32 flags;
299 		__u32 vip_num;
300 	} value = {.vip_num = VIP_NUM};
301 	__u32 stats_key = VIP_NUM;
302 	struct vip_stats {
303 		__u64 bytes;
304 		__u64 pkts;
305 	} stats[nr_cpus];
306 	struct real_definition {
307 		union {
308 			__be32 dst;
309 			__be32 dstv6[4];
310 		};
311 		__u8 flags;
312 	} real_def = {.dst = MAGIC_VAL};
313 	__u32 ch_key = 11, real_num = 3;
314 	__u32 duration, retval, size;
315 	int err, i, prog_fd, map_fd;
316 	__u64 bytes = 0, pkts = 0;
317 	struct bpf_object *obj;
318 	char buf[128];
319 	u32 *magic = (u32 *)buf;
320 
321 	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
322 	if (err) {
323 		error_cnt++;
324 		return;
325 	}
326 
327 	map_fd = bpf_find_map(__func__, obj, "vip_map");
328 	if (map_fd < 0)
329 		goto out;
330 	bpf_map_update_elem(map_fd, &key, &value, 0);
331 
332 	map_fd = bpf_find_map(__func__, obj, "ch_rings");
333 	if (map_fd < 0)
334 		goto out;
335 	bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
336 
337 	map_fd = bpf_find_map(__func__, obj, "reals");
338 	if (map_fd < 0)
339 		goto out;
340 	bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
341 
342 	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
343 				buf, &size, &retval, &duration);
344 	CHECK(err || errno || retval != 1 || size != 54 ||
345 	      *magic != MAGIC_VAL, "ipv4",
346 	      "err %d errno %d retval %d size %d magic %x\n",
347 	      err, errno, retval, size, *magic);
348 
349 	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
350 				buf, &size, &retval, &duration);
351 	CHECK(err || errno || retval != 1 || size != 74 ||
352 	      *magic != MAGIC_VAL, "ipv6",
353 	      "err %d errno %d retval %d size %d magic %x\n",
354 	      err, errno, retval, size, *magic);
355 
356 	map_fd = bpf_find_map(__func__, obj, "stats");
357 	if (map_fd < 0)
358 		goto out;
359 	bpf_map_lookup_elem(map_fd, &stats_key, stats);
360 	for (i = 0; i < nr_cpus; i++) {
361 		bytes += stats[i].bytes;
362 		pkts += stats[i].pkts;
363 	}
364 	if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
365 		error_cnt++;
366 		printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
367 	}
368 out:
369 	bpf_object__close(obj);
370 }
371 
372 static void test_tcp_estats(void)
373 {
374 	const char *file = "./test_tcp_estats.o";
375 	int err, prog_fd;
376 	struct bpf_object *obj;
377 	__u32 duration = 0;
378 
379 	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
380 	CHECK(err, "", "err %d errno %d\n", err, errno);
381 	if (err) {
382 		error_cnt++;
383 		return;
384 	}
385 
386 	bpf_object__close(obj);
387 }
388 
389 static inline __u64 ptr_to_u64(const void *ptr)
390 {
391 	return (__u64) (unsigned long) ptr;
392 }
393 
394 static void test_bpf_obj_id(void)
395 {
396 	const __u64 array_magic_value = 0xfaceb00c;
397 	const __u32 array_key = 0;
398 	const int nr_iters = 2;
399 	const char *file = "./test_obj_id.o";
400 	const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
401 	const char *expected_prog_name = "test_obj_id";
402 	const char *expected_map_name = "test_map_id";
403 	const __u64 nsec_per_sec = 1000000000;
404 
405 	struct bpf_object *objs[nr_iters];
406 	int prog_fds[nr_iters], map_fds[nr_iters];
407 	/* +1 to test for the info_len returned by kernel */
408 	struct bpf_prog_info prog_infos[nr_iters + 1];
409 	struct bpf_map_info map_infos[nr_iters + 1];
410 	/* Each prog only uses one map. +1 to test nr_map_ids
411 	 * returned by kernel.
412 	 */
413 	__u32 map_ids[nr_iters + 1];
414 	char jited_insns[128], xlated_insns[128], zeros[128];
415 	__u32 i, next_id, info_len, nr_id_found, duration = 0;
416 	struct timespec real_time_ts, boot_time_ts;
417 	int sysctl_fd, jit_enabled = 0, err = 0;
418 	__u64 array_value;
419 	uid_t my_uid = getuid();
420 	time_t now, load_time;
421 
422 	sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
423 	if (sysctl_fd != -1) {
424 		char tmpc;
425 
426 		if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
427 			jit_enabled = (tmpc != '0');
428 		close(sysctl_fd);
429 	}
430 
431 	err = bpf_prog_get_fd_by_id(0);
432 	CHECK(err >= 0 || errno != ENOENT,
433 	      "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
434 
435 	err = bpf_map_get_fd_by_id(0);
436 	CHECK(err >= 0 || errno != ENOENT,
437 	      "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
438 
439 	for (i = 0; i < nr_iters; i++)
440 		objs[i] = NULL;
441 
442 	/* Check bpf_obj_get_info_by_fd() */
443 	bzero(zeros, sizeof(zeros));
444 	for (i = 0; i < nr_iters; i++) {
445 		now = time(NULL);
446 		err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
447 				    &objs[i], &prog_fds[i]);
448 		/* test_obj_id.o is a dumb prog. It should never fail
449 		 * to load.
450 		 */
451 		if (err)
452 			error_cnt++;
453 		assert(!err);
454 
455 		/* Insert a magic value to the map */
456 		map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
457 		assert(map_fds[i] >= 0);
458 		err = bpf_map_update_elem(map_fds[i], &array_key,
459 					  &array_magic_value, 0);
460 		assert(!err);
461 
462 		/* Check getting map info */
463 		info_len = sizeof(struct bpf_map_info) * 2;
464 		bzero(&map_infos[i], info_len);
465 		err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
466 					     &info_len);
467 		if (CHECK(err ||
468 			  map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
469 			  map_infos[i].key_size != sizeof(__u32) ||
470 			  map_infos[i].value_size != sizeof(__u64) ||
471 			  map_infos[i].max_entries != 1 ||
472 			  map_infos[i].map_flags != 0 ||
473 			  info_len != sizeof(struct bpf_map_info) ||
474 			  strcmp((char *)map_infos[i].name, expected_map_name),
475 			  "get-map-info(fd)",
476 			  "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
477 			  err, errno,
478 			  map_infos[i].type, BPF_MAP_TYPE_ARRAY,
479 			  info_len, sizeof(struct bpf_map_info),
480 			  map_infos[i].key_size,
481 			  map_infos[i].value_size,
482 			  map_infos[i].max_entries,
483 			  map_infos[i].map_flags,
484 			  map_infos[i].name, expected_map_name))
485 			goto done;
486 
487 		/* Check getting prog info */
488 		info_len = sizeof(struct bpf_prog_info) * 2;
489 		bzero(&prog_infos[i], info_len);
490 		bzero(jited_insns, sizeof(jited_insns));
491 		bzero(xlated_insns, sizeof(xlated_insns));
492 		prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
493 		prog_infos[i].jited_prog_len = sizeof(jited_insns);
494 		prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
495 		prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
496 		prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
497 		prog_infos[i].nr_map_ids = 2;
498 		err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
499 		assert(!err);
500 		err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
501 		assert(!err);
502 		err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
503 					     &info_len);
504 		load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
505 			+ (prog_infos[i].load_time / nsec_per_sec);
506 		if (CHECK(err ||
507 			  prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
508 			  info_len != sizeof(struct bpf_prog_info) ||
509 			  (jit_enabled && !prog_infos[i].jited_prog_len) ||
510 			  (jit_enabled &&
511 			   !memcmp(jited_insns, zeros, sizeof(zeros))) ||
512 			  !prog_infos[i].xlated_prog_len ||
513 			  !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
514 			  load_time < now - 60 || load_time > now + 60 ||
515 			  prog_infos[i].created_by_uid != my_uid ||
516 			  prog_infos[i].nr_map_ids != 1 ||
517 			  *(int *)prog_infos[i].map_ids != map_infos[i].id ||
518 			  strcmp((char *)prog_infos[i].name, expected_prog_name),
519 			  "get-prog-info(fd)",
520 			  "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
521 			  err, errno, i,
522 			  prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
523 			  info_len, sizeof(struct bpf_prog_info),
524 			  jit_enabled,
525 			  prog_infos[i].jited_prog_len,
526 			  prog_infos[i].xlated_prog_len,
527 			  !!memcmp(jited_insns, zeros, sizeof(zeros)),
528 			  !!memcmp(xlated_insns, zeros, sizeof(zeros)),
529 			  load_time, now,
530 			  prog_infos[i].created_by_uid, my_uid,
531 			  prog_infos[i].nr_map_ids, 1,
532 			  *(int *)prog_infos[i].map_ids, map_infos[i].id,
533 			  prog_infos[i].name, expected_prog_name))
534 			goto done;
535 	}
536 
537 	/* Check bpf_prog_get_next_id() */
538 	nr_id_found = 0;
539 	next_id = 0;
540 	while (!bpf_prog_get_next_id(next_id, &next_id)) {
541 		struct bpf_prog_info prog_info = {};
542 		__u32 saved_map_id;
543 		int prog_fd;
544 
545 		info_len = sizeof(prog_info);
546 
547 		prog_fd = bpf_prog_get_fd_by_id(next_id);
548 		if (prog_fd < 0 && errno == ENOENT)
549 			/* The bpf_prog is in the dead row */
550 			continue;
551 		if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
552 			  "prog_fd %d next_id %d errno %d\n",
553 			  prog_fd, next_id, errno))
554 			break;
555 
556 		for (i = 0; i < nr_iters; i++)
557 			if (prog_infos[i].id == next_id)
558 				break;
559 
560 		if (i == nr_iters)
561 			continue;
562 
563 		nr_id_found++;
564 
565 		/* Negative test:
566 		 * prog_info.nr_map_ids = 1
567 		 * prog_info.map_ids = NULL
568 		 */
569 		prog_info.nr_map_ids = 1;
570 		err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
571 		if (CHECK(!err || errno != EFAULT,
572 			  "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
573 			  err, errno, EFAULT))
574 			break;
575 		bzero(&prog_info, sizeof(prog_info));
576 		info_len = sizeof(prog_info);
577 
578 		saved_map_id = *(int *)(prog_infos[i].map_ids);
579 		prog_info.map_ids = prog_infos[i].map_ids;
580 		prog_info.nr_map_ids = 2;
581 		err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
582 		prog_infos[i].jited_prog_insns = 0;
583 		prog_infos[i].xlated_prog_insns = 0;
584 		CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
585 		      memcmp(&prog_info, &prog_infos[i], info_len) ||
586 		      *(int *)prog_info.map_ids != saved_map_id,
587 		      "get-prog-info(next_id->fd)",
588 		      "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
589 		      err, errno, info_len, sizeof(struct bpf_prog_info),
590 		      memcmp(&prog_info, &prog_infos[i], info_len),
591 		      *(int *)prog_info.map_ids, saved_map_id);
592 		close(prog_fd);
593 	}
594 	CHECK(nr_id_found != nr_iters,
595 	      "check total prog id found by get_next_id",
596 	      "nr_id_found %u(%u)\n",
597 	      nr_id_found, nr_iters);
598 
599 	/* Check bpf_map_get_next_id() */
600 	nr_id_found = 0;
601 	next_id = 0;
602 	while (!bpf_map_get_next_id(next_id, &next_id)) {
603 		struct bpf_map_info map_info = {};
604 		int map_fd;
605 
606 		info_len = sizeof(map_info);
607 
608 		map_fd = bpf_map_get_fd_by_id(next_id);
609 		if (map_fd < 0 && errno == ENOENT)
610 			/* The bpf_map is in the dead row */
611 			continue;
612 		if (CHECK(map_fd < 0, "get-map-fd(next_id)",
613 			  "map_fd %d next_id %u errno %d\n",
614 			  map_fd, next_id, errno))
615 			break;
616 
617 		for (i = 0; i < nr_iters; i++)
618 			if (map_infos[i].id == next_id)
619 				break;
620 
621 		if (i == nr_iters)
622 			continue;
623 
624 		nr_id_found++;
625 
626 		err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
627 		assert(!err);
628 
629 		err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
630 		CHECK(err || info_len != sizeof(struct bpf_map_info) ||
631 		      memcmp(&map_info, &map_infos[i], info_len) ||
632 		      array_value != array_magic_value,
633 		      "check get-map-info(next_id->fd)",
634 		      "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
635 		      err, errno, info_len, sizeof(struct bpf_map_info),
636 		      memcmp(&map_info, &map_infos[i], info_len),
637 		      array_value, array_magic_value);
638 
639 		close(map_fd);
640 	}
641 	CHECK(nr_id_found != nr_iters,
642 	      "check total map id found by get_next_id",
643 	      "nr_id_found %u(%u)\n",
644 	      nr_id_found, nr_iters);
645 
646 done:
647 	for (i = 0; i < nr_iters; i++)
648 		bpf_object__close(objs[i]);
649 }
650 
651 static void test_pkt_md_access(void)
652 {
653 	const char *file = "./test_pkt_md_access.o";
654 	struct bpf_object *obj;
655 	__u32 duration, retval;
656 	int err, prog_fd;
657 
658 	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
659 	if (err) {
660 		error_cnt++;
661 		return;
662 	}
663 
664 	err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
665 				NULL, NULL, &retval, &duration);
666 	CHECK(err || retval, "",
667 	      "err %d errno %d retval %d duration %d\n",
668 	      err, errno, retval, duration);
669 
670 	bpf_object__close(obj);
671 }
672 
673 static void test_obj_name(void)
674 {
675 	struct {
676 		const char *name;
677 		int success;
678 		int expected_errno;
679 	} tests[] = {
680 		{ "", 1, 0 },
681 		{ "_123456789ABCDE", 1, 0 },
682 		{ "_123456789ABCDEF", 0, EINVAL },
683 		{ "_123456789ABCD\n", 0, EINVAL },
684 	};
685 	struct bpf_insn prog[] = {
686 		BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
687 		BPF_EXIT_INSN(),
688 	};
689 	__u32 duration = 0;
690 	int i;
691 
692 	for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
693 		size_t name_len = strlen(tests[i].name) + 1;
694 		union bpf_attr attr;
695 		size_t ncopy;
696 		int fd;
697 
698 		/* test different attr.prog_name during BPF_PROG_LOAD */
699 		ncopy = name_len < sizeof(attr.prog_name) ?
700 			name_len : sizeof(attr.prog_name);
701 		bzero(&attr, sizeof(attr));
702 		attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
703 		attr.insn_cnt = 2;
704 		attr.insns = ptr_to_u64(prog);
705 		attr.license = ptr_to_u64("");
706 		memcpy(attr.prog_name, tests[i].name, ncopy);
707 
708 		fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
709 		CHECK((tests[i].success && fd < 0) ||
710 		      (!tests[i].success && fd != -1) ||
711 		      (!tests[i].success && errno != tests[i].expected_errno),
712 		      "check-bpf-prog-name",
713 		      "fd %d(%d) errno %d(%d)\n",
714 		       fd, tests[i].success, errno, tests[i].expected_errno);
715 
716 		if (fd != -1)
717 			close(fd);
718 
719 		/* test different attr.map_name during BPF_MAP_CREATE */
720 		ncopy = name_len < sizeof(attr.map_name) ?
721 			name_len : sizeof(attr.map_name);
722 		bzero(&attr, sizeof(attr));
723 		attr.map_type = BPF_MAP_TYPE_ARRAY;
724 		attr.key_size = 4;
725 		attr.value_size = 4;
726 		attr.max_entries = 1;
727 		attr.map_flags = 0;
728 		memcpy(attr.map_name, tests[i].name, ncopy);
729 		fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
730 		CHECK((tests[i].success && fd < 0) ||
731 		      (!tests[i].success && fd != -1) ||
732 		      (!tests[i].success && errno != tests[i].expected_errno),
733 		      "check-bpf-map-name",
734 		      "fd %d(%d) errno %d(%d)\n",
735 		      fd, tests[i].success, errno, tests[i].expected_errno);
736 
737 		if (fd != -1)
738 			close(fd);
739 	}
740 }
741 
742 static void test_tp_attach_query(void)
743 {
744 	const int num_progs = 3;
745 	int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
746 	__u32 duration = 0, info_len, saved_prog_ids[num_progs];
747 	const char *file = "./test_tracepoint.o";
748 	struct perf_event_query_bpf *query;
749 	struct perf_event_attr attr = {};
750 	struct bpf_object *obj[num_progs];
751 	struct bpf_prog_info prog_info;
752 	char buf[256];
753 
754 	snprintf(buf, sizeof(buf),
755 		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
756 	efd = open(buf, O_RDONLY, 0);
757 	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
758 		return;
759 	bytes = read(efd, buf, sizeof(buf));
760 	close(efd);
761 	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
762 		  "read", "bytes %d errno %d\n", bytes, errno))
763 		return;
764 
765 	attr.config = strtol(buf, NULL, 0);
766 	attr.type = PERF_TYPE_TRACEPOINT;
767 	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
768 	attr.sample_period = 1;
769 	attr.wakeup_events = 1;
770 
771 	query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
772 	for (i = 0; i < num_progs; i++) {
773 		err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
774 				    &prog_fd[i]);
775 		if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
776 			goto cleanup1;
777 
778 		bzero(&prog_info, sizeof(prog_info));
779 		prog_info.jited_prog_len = 0;
780 		prog_info.xlated_prog_len = 0;
781 		prog_info.nr_map_ids = 0;
782 		info_len = sizeof(prog_info);
783 		err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
784 		if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
785 			  err, errno))
786 			goto cleanup1;
787 		saved_prog_ids[i] = prog_info.id;
788 
789 		pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
790 				    0 /* cpu 0 */, -1 /* group id */,
791 				    0 /* flags */);
792 		if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
793 			  pmu_fd[i], errno))
794 			goto cleanup2;
795 		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
796 		if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
797 			  err, errno))
798 			goto cleanup3;
799 
800 		if (i == 0) {
801 			/* check NULL prog array query */
802 			query->ids_len = num_progs;
803 			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
804 			if (CHECK(err || query->prog_cnt != 0,
805 				  "perf_event_ioc_query_bpf",
806 				  "err %d errno %d query->prog_cnt %u\n",
807 				  err, errno, query->prog_cnt))
808 				goto cleanup3;
809 		}
810 
811 		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
812 		if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
813 			  err, errno))
814 			goto cleanup3;
815 
816 		if (i == 1) {
817 			/* try to get # of programs only */
818 			query->ids_len = 0;
819 			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
820 			if (CHECK(err || query->prog_cnt != 2,
821 				  "perf_event_ioc_query_bpf",
822 				  "err %d errno %d query->prog_cnt %u\n",
823 				  err, errno, query->prog_cnt))
824 				goto cleanup3;
825 
826 			/* try a few negative tests */
827 			/* invalid query pointer */
828 			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
829 				    (struct perf_event_query_bpf *)0x1);
830 			if (CHECK(!err || errno != EFAULT,
831 				  "perf_event_ioc_query_bpf",
832 				  "err %d errno %d\n", err, errno))
833 				goto cleanup3;
834 
835 			/* no enough space */
836 			query->ids_len = 1;
837 			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
838 			if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
839 				  "perf_event_ioc_query_bpf",
840 				  "err %d errno %d query->prog_cnt %u\n",
841 				  err, errno, query->prog_cnt))
842 				goto cleanup3;
843 		}
844 
845 		query->ids_len = num_progs;
846 		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
847 		if (CHECK(err || query->prog_cnt != (i + 1),
848 			  "perf_event_ioc_query_bpf",
849 			  "err %d errno %d query->prog_cnt %u\n",
850 			  err, errno, query->prog_cnt))
851 			goto cleanup3;
852 		for (j = 0; j < i + 1; j++)
853 			if (CHECK(saved_prog_ids[j] != query->ids[j],
854 				  "perf_event_ioc_query_bpf",
855 				  "#%d saved_prog_id %x query prog_id %x\n",
856 				  j, saved_prog_ids[j], query->ids[j]))
857 				goto cleanup3;
858 	}
859 
860 	i = num_progs - 1;
861 	for (; i >= 0; i--) {
862  cleanup3:
863 		ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
864  cleanup2:
865 		close(pmu_fd[i]);
866  cleanup1:
867 		bpf_object__close(obj[i]);
868 	}
869 	free(query);
870 }
871 
872 static int compare_map_keys(int map1_fd, int map2_fd)
873 {
874 	__u32 key, next_key;
875 	char val_buf[PERF_MAX_STACK_DEPTH *
876 		     sizeof(struct bpf_stack_build_id)];
877 	int err;
878 
879 	err = bpf_map_get_next_key(map1_fd, NULL, &key);
880 	if (err)
881 		return err;
882 	err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
883 	if (err)
884 		return err;
885 
886 	while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
887 		err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
888 		if (err)
889 			return err;
890 
891 		key = next_key;
892 	}
893 	if (errno != ENOENT)
894 		return -1;
895 
896 	return 0;
897 }
898 
899 static void test_stacktrace_map()
900 {
901 	int control_map_fd, stackid_hmap_fd, stackmap_fd;
902 	const char *file = "./test_stacktrace_map.o";
903 	int bytes, efd, err, pmu_fd, prog_fd;
904 	struct perf_event_attr attr = {};
905 	__u32 key, val, duration = 0;
906 	struct bpf_object *obj;
907 	char buf[256];
908 
909 	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
910 	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
911 		return;
912 
913 	/* Get the ID for the sched/sched_switch tracepoint */
914 	snprintf(buf, sizeof(buf),
915 		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
916 	efd = open(buf, O_RDONLY, 0);
917 	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
918 		goto close_prog;
919 
920 	bytes = read(efd, buf, sizeof(buf));
921 	close(efd);
922 	if (bytes <= 0 || bytes >= sizeof(buf))
923 		goto close_prog;
924 
925 	/* Open the perf event and attach bpf progrram */
926 	attr.config = strtol(buf, NULL, 0);
927 	attr.type = PERF_TYPE_TRACEPOINT;
928 	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
929 	attr.sample_period = 1;
930 	attr.wakeup_events = 1;
931 	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
932 			 0 /* cpu 0 */, -1 /* group id */,
933 			 0 /* flags */);
934 	if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
935 		  pmu_fd, errno))
936 		goto close_prog;
937 
938 	err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
939 	if (err)
940 		goto disable_pmu;
941 
942 	err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
943 	if (err)
944 		goto disable_pmu;
945 
946 	/* find map fds */
947 	control_map_fd = bpf_find_map(__func__, obj, "control_map");
948 	if (control_map_fd < 0)
949 		goto disable_pmu;
950 
951 	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
952 	if (stackid_hmap_fd < 0)
953 		goto disable_pmu;
954 
955 	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
956 	if (stackmap_fd < 0)
957 		goto disable_pmu;
958 
959 	/* give some time for bpf program run */
960 	sleep(1);
961 
962 	/* disable stack trace collection */
963 	key = 0;
964 	val = 1;
965 	bpf_map_update_elem(control_map_fd, &key, &val, 0);
966 
967 	/* for every element in stackid_hmap, we can find a corresponding one
968 	 * in stackmap, and vise versa.
969 	 */
970 	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
971 	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
972 		  "err %d errno %d\n", err, errno))
973 		goto disable_pmu_noerr;
974 
975 	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
976 	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
977 		  "err %d errno %d\n", err, errno))
978 		goto disable_pmu_noerr;
979 
980 	goto disable_pmu_noerr;
981 disable_pmu:
982 	error_cnt++;
983 disable_pmu_noerr:
984 	ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
985 	close(pmu_fd);
986 close_prog:
987 	bpf_object__close(obj);
988 }
989 
990 static void test_stacktrace_map_raw_tp()
991 {
992 	int control_map_fd, stackid_hmap_fd, stackmap_fd;
993 	const char *file = "./test_stacktrace_map.o";
994 	int efd, err, prog_fd;
995 	__u32 key, val, duration = 0;
996 	struct bpf_object *obj;
997 
998 	err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
999 	if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1000 		return;
1001 
1002 	efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
1003 	if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1004 		goto close_prog;
1005 
1006 	/* find map fds */
1007 	control_map_fd = bpf_find_map(__func__, obj, "control_map");
1008 	if (control_map_fd < 0)
1009 		goto close_prog;
1010 
1011 	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1012 	if (stackid_hmap_fd < 0)
1013 		goto close_prog;
1014 
1015 	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1016 	if (stackmap_fd < 0)
1017 		goto close_prog;
1018 
1019 	/* give some time for bpf program run */
1020 	sleep(1);
1021 
1022 	/* disable stack trace collection */
1023 	key = 0;
1024 	val = 1;
1025 	bpf_map_update_elem(control_map_fd, &key, &val, 0);
1026 
1027 	/* for every element in stackid_hmap, we can find a corresponding one
1028 	 * in stackmap, and vise versa.
1029 	 */
1030 	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1031 	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1032 		  "err %d errno %d\n", err, errno))
1033 		goto close_prog;
1034 
1035 	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1036 	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1037 		  "err %d errno %d\n", err, errno))
1038 		goto close_prog;
1039 
1040 	goto close_prog_noerr;
1041 close_prog:
1042 	error_cnt++;
1043 close_prog_noerr:
1044 	bpf_object__close(obj);
1045 }
1046 
1047 static int extract_build_id(char *build_id, size_t size)
1048 {
1049 	FILE *fp;
1050 	char *line = NULL;
1051 	size_t len = 0;
1052 
1053 	fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
1054 	if (fp == NULL)
1055 		return -1;
1056 
1057 	if (getline(&line, &len, fp) == -1)
1058 		goto err;
1059 	fclose(fp);
1060 
1061 	if (len > size)
1062 		len = size;
1063 	memcpy(build_id, line, len);
1064 	build_id[len] = '\0';
1065 	return 0;
1066 err:
1067 	fclose(fp);
1068 	return -1;
1069 }
1070 
1071 static void test_stacktrace_build_id(void)
1072 {
1073 	int control_map_fd, stackid_hmap_fd, stackmap_fd;
1074 	const char *file = "./test_stacktrace_build_id.o";
1075 	int bytes, efd, err, pmu_fd, prog_fd;
1076 	struct perf_event_attr attr = {};
1077 	__u32 key, previous_key, val, duration = 0;
1078 	struct bpf_object *obj;
1079 	char buf[256];
1080 	int i, j;
1081 	struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1082 	int build_id_matches = 0;
1083 
1084 	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1085 	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1086 		goto out;
1087 
1088 	/* Get the ID for the sched/sched_switch tracepoint */
1089 	snprintf(buf, sizeof(buf),
1090 		 "/sys/kernel/debug/tracing/events/random/urandom_read/id");
1091 	efd = open(buf, O_RDONLY, 0);
1092 	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1093 		goto close_prog;
1094 
1095 	bytes = read(efd, buf, sizeof(buf));
1096 	close(efd);
1097 	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
1098 		  "read", "bytes %d errno %d\n", bytes, errno))
1099 		goto close_prog;
1100 
1101 	/* Open the perf event and attach bpf progrram */
1102 	attr.config = strtol(buf, NULL, 0);
1103 	attr.type = PERF_TYPE_TRACEPOINT;
1104 	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
1105 	attr.sample_period = 1;
1106 	attr.wakeup_events = 1;
1107 	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1108 			 0 /* cpu 0 */, -1 /* group id */,
1109 			 0 /* flags */);
1110 	if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
1111 		  pmu_fd, errno))
1112 		goto close_prog;
1113 
1114 	err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1115 	if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1116 		  err, errno))
1117 		goto close_pmu;
1118 
1119 	err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1120 	if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1121 		  err, errno))
1122 		goto disable_pmu;
1123 
1124 	/* find map fds */
1125 	control_map_fd = bpf_find_map(__func__, obj, "control_map");
1126 	if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1127 		  "err %d errno %d\n", err, errno))
1128 		goto disable_pmu;
1129 
1130 	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1131 	if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1132 		  "err %d errno %d\n", err, errno))
1133 		goto disable_pmu;
1134 
1135 	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1136 	if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1137 		  err, errno))
1138 		goto disable_pmu;
1139 
1140 	assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1141 	       == 0);
1142 	assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0);
1143 	/* disable stack trace collection */
1144 	key = 0;
1145 	val = 1;
1146 	bpf_map_update_elem(control_map_fd, &key, &val, 0);
1147 
1148 	/* for every element in stackid_hmap, we can find a corresponding one
1149 	 * in stackmap, and vise versa.
1150 	 */
1151 	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1152 	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1153 		  "err %d errno %d\n", err, errno))
1154 		goto disable_pmu;
1155 
1156 	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1157 	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1158 		  "err %d errno %d\n", err, errno))
1159 		goto disable_pmu;
1160 
1161 	err = extract_build_id(buf, 256);
1162 
1163 	if (CHECK(err, "get build_id with readelf",
1164 		  "err %d errno %d\n", err, errno))
1165 		goto disable_pmu;
1166 
1167 	err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1168 	if (CHECK(err, "get_next_key from stackmap",
1169 		  "err %d, errno %d\n", err, errno))
1170 		goto disable_pmu;
1171 
1172 	do {
1173 		char build_id[64];
1174 
1175 		err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1176 		if (CHECK(err, "lookup_elem from stackmap",
1177 			  "err %d, errno %d\n", err, errno))
1178 			goto disable_pmu;
1179 		for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1180 			if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1181 			    id_offs[i].offset != 0) {
1182 				for (j = 0; j < 20; ++j)
1183 					sprintf(build_id + 2 * j, "%02x",
1184 						id_offs[i].build_id[j] & 0xff);
1185 				if (strstr(buf, build_id) != NULL)
1186 					build_id_matches = 1;
1187 			}
1188 		previous_key = key;
1189 	} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1190 
1191 	CHECK(build_id_matches < 1, "build id match",
1192 	      "Didn't find expected build ID from the map");
1193 
1194 disable_pmu:
1195 	ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1196 
1197 close_pmu:
1198 	close(pmu_fd);
1199 
1200 close_prog:
1201 	bpf_object__close(obj);
1202 
1203 out:
1204 	return;
1205 }
1206 
1207 int main(void)
1208 {
1209 	test_pkt_access();
1210 	test_xdp();
1211 	test_xdp_adjust_tail();
1212 	test_l4lb_all();
1213 	test_xdp_noinline();
1214 	test_tcp_estats();
1215 	test_bpf_obj_id();
1216 	test_pkt_md_access();
1217 	test_obj_name();
1218 	test_tp_attach_query();
1219 	test_stacktrace_map();
1220 	test_stacktrace_build_id();
1221 	test_stacktrace_map_raw_tp();
1222 
1223 	printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
1224 	return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1225 }
1226