xref: /linux/tools/perf/tests/backward-ring-buffer.c (revision 078c33862e042b3778dce3bcc8eaef84ab40715c)
1 /*
2  * Test backward bit in event attribute, read ring buffer from end to
3  * beginning
4  */
5 
6 #include <perf.h>
7 #include <evlist.h>
8 #include <sys/prctl.h>
9 #include "tests.h"
10 #include "debug.h"
11 
12 #define NR_ITERS 111
13 
14 static void testcase(void)
15 {
16 	int i;
17 
18 	for (i = 0; i < NR_ITERS; i++) {
19 		char proc_name[10];
20 
21 		snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
22 		prctl(PR_SET_NAME, proc_name);
23 	}
24 }
25 
26 static int count_samples(struct perf_evlist *evlist, int *sample_count,
27 			 int *comm_count)
28 {
29 	int i;
30 
31 	for (i = 0; i < evlist->nr_mmaps; i++) {
32 		union perf_event *event;
33 
34 		perf_mmap__read_catchup(&evlist->backward_mmap[i]);
35 		while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) {
36 			const u32 type = event->header.type;
37 
38 			switch (type) {
39 			case PERF_RECORD_SAMPLE:
40 				(*sample_count)++;
41 				break;
42 			case PERF_RECORD_COMM:
43 				(*comm_count)++;
44 				break;
45 			default:
46 				pr_err("Unexpected record of type %d\n", type);
47 				return TEST_FAIL;
48 			}
49 		}
50 	}
51 	return TEST_OK;
52 }
53 
54 static int do_test(struct perf_evlist *evlist, int mmap_pages,
55 		   int *sample_count, int *comm_count)
56 {
57 	int err;
58 	char sbuf[STRERR_BUFSIZE];
59 
60 	err = perf_evlist__mmap(evlist, mmap_pages, true);
61 	if (err < 0) {
62 		pr_debug("perf_evlist__mmap: %s\n",
63 			 str_error_r(errno, sbuf, sizeof(sbuf)));
64 		return TEST_FAIL;
65 	}
66 
67 	perf_evlist__enable(evlist);
68 	testcase();
69 	perf_evlist__disable(evlist);
70 
71 	err = count_samples(evlist, sample_count, comm_count);
72 	perf_evlist__munmap(evlist);
73 	return err;
74 }
75 
76 
77 int test__backward_ring_buffer(int subtest __maybe_unused)
78 {
79 	int ret = TEST_SKIP, err, sample_count = 0, comm_count = 0;
80 	char pid[16], sbuf[STRERR_BUFSIZE];
81 	struct perf_evlist *evlist;
82 	struct perf_evsel *evsel __maybe_unused;
83 	struct parse_events_error parse_error;
84 	struct record_opts opts = {
85 		.target = {
86 			.uid = UINT_MAX,
87 			.uses_mmap = true,
88 		},
89 		.freq	      = 0,
90 		.mmap_pages   = 256,
91 		.default_interval = 1,
92 	};
93 
94 	snprintf(pid, sizeof(pid), "%d", getpid());
95 	pid[sizeof(pid) - 1] = '\0';
96 	opts.target.tid = opts.target.pid = pid;
97 
98 	evlist = perf_evlist__new();
99 	if (!evlist) {
100 		pr_debug("No ehough memory to create evlist\n");
101 		return TEST_FAIL;
102 	}
103 
104 	evlist->backward = true;
105 	err = perf_evlist__create_maps(evlist, &opts.target);
106 	if (err < 0) {
107 		pr_debug("Not enough memory to create thread/cpu maps\n");
108 		goto out_delete_evlist;
109 	}
110 
111 	bzero(&parse_error, sizeof(parse_error));
112 	err = parse_events(evlist, "syscalls:sys_enter_prctl", &parse_error);
113 	if (err) {
114 		pr_debug("Failed to parse tracepoint event, try use root\n");
115 		ret = TEST_SKIP;
116 		goto out_delete_evlist;
117 	}
118 
119 	perf_evlist__config(evlist, &opts, NULL);
120 
121 	/* Set backward bit, ring buffer should be writing from end */
122 	evlist__for_each_entry(evlist, evsel)
123 		evsel->attr.write_backward = 1;
124 
125 	err = perf_evlist__open(evlist);
126 	if (err < 0) {
127 		pr_debug("perf_evlist__open: %s\n",
128 			 str_error_r(errno, sbuf, sizeof(sbuf)));
129 		goto out_delete_evlist;
130 	}
131 
132 	ret = TEST_FAIL;
133 	err = do_test(evlist, opts.mmap_pages, &sample_count,
134 		      &comm_count);
135 	if (err != TEST_OK)
136 		goto out_delete_evlist;
137 
138 	if ((sample_count != NR_ITERS) || (comm_count != NR_ITERS)) {
139 		pr_err("Unexpected counter: sample_count=%d, comm_count=%d\n",
140 		       sample_count, comm_count);
141 		goto out_delete_evlist;
142 	}
143 
144 	err = do_test(evlist, 1, &sample_count, &comm_count);
145 	if (err != TEST_OK)
146 		goto out_delete_evlist;
147 
148 	ret = TEST_OK;
149 out_delete_evlist:
150 	perf_evlist__delete(evlist);
151 	return ret;
152 }
153