xref: /linux/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
3 #define _GNU_SOURCE
4 #include <pthread.h>
5 #include <sched.h>
6 #include <test_progs.h>
7 #include "perf_event_stackmap.skel.h"
8 
9 #ifndef noinline
10 #define noinline __attribute__((noinline))
11 #endif
12 
13 noinline int func_1(void)
14 {
15 	static int val = 1;
16 
17 	val += 1;
18 
19 	usleep(100);
20 	return val;
21 }
22 
23 noinline int func_2(void)
24 {
25 	return func_1();
26 }
27 
28 noinline int func_3(void)
29 {
30 	return func_2();
31 }
32 
33 noinline int func_4(void)
34 {
35 	return func_3();
36 }
37 
38 noinline int func_5(void)
39 {
40 	return func_4();
41 }
42 
43 noinline int func_6(void)
44 {
45 	int i, val = 1;
46 
47 	for (i = 0; i < 100; i++)
48 		val += func_5();
49 
50 	return val;
51 }
52 
53 void test_perf_event_stackmap(void)
54 {
55 	struct perf_event_attr attr = {
56 		/* .type = PERF_TYPE_SOFTWARE, */
57 		.type = PERF_TYPE_HARDWARE,
58 		.config = PERF_COUNT_HW_CPU_CYCLES,
59 		.precise_ip = 2,
60 		.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK |
61 			PERF_SAMPLE_CALLCHAIN,
62 		.branch_sample_type = PERF_SAMPLE_BRANCH_USER |
63 			PERF_SAMPLE_BRANCH_NO_FLAGS |
64 			PERF_SAMPLE_BRANCH_NO_CYCLES |
65 			PERF_SAMPLE_BRANCH_CALL_STACK,
66 		.sample_period = 5000,
67 		.size = sizeof(struct perf_event_attr),
68 	};
69 	struct perf_event_stackmap *skel;
70 	__u32 duration = 0;
71 	cpu_set_t cpu_set;
72 	int pmu_fd, err;
73 
74 	skel = perf_event_stackmap__open();
75 
76 	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
77 		return;
78 
79 	err = perf_event_stackmap__load(skel);
80 	if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
81 		goto cleanup;
82 
83 	CPU_ZERO(&cpu_set);
84 	CPU_SET(0, &cpu_set);
85 	err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
86 	if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
87 		goto cleanup;
88 
89 	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
90 			 0 /* cpu 0 */, -1 /* group id */,
91 			 0 /* flags */);
92 	if (pmu_fd < 0) {
93 		printf("%s:SKIP:cpu doesn't support the event\n", __func__);
94 		test__skip();
95 		goto cleanup;
96 	}
97 
98 	skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
99 							   pmu_fd);
100 	if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
101 		close(pmu_fd);
102 		goto cleanup;
103 	}
104 
105 	/* create kernel and user stack traces for testing */
106 	func_6();
107 
108 	CHECK(skel->data->stackid_kernel != 2, "get_stackid_kernel", "failed\n");
109 	CHECK(skel->data->stackid_user != 2, "get_stackid_user", "failed\n");
110 	CHECK(skel->data->stack_kernel != 2, "get_stack_kernel", "failed\n");
111 	CHECK(skel->data->stack_user != 2, "get_stack_user", "failed\n");
112 
113 cleanup:
114 	perf_event_stackmap__destroy(skel);
115 }
116