1 // SPDX-License-Identifier: GPL-2.0
2 #include "tests.h"
3 #include <stdio.h>
4 #include "cpumap.h"
5 #include "event.h"
6 #include "util/synthetic-events.h"
7 #include <string.h>
8 #include <linux/bitops.h>
9 #include <internal/cpumap.h>
10 #include "debug.h"
11
12 struct machine;
13
process_event_mask(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)14 static int process_event_mask(const struct perf_tool *tool __maybe_unused,
15 union perf_event *event,
16 struct perf_sample *sample __maybe_unused,
17 struct machine *machine __maybe_unused)
18 {
19 struct perf_record_cpu_map *map_event = &event->cpu_map;
20 struct perf_record_cpu_map_data *data;
21 struct perf_cpu_map *map;
22 unsigned int long_size;
23
24 data = &map_event->data;
25
26 TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK);
27
28 long_size = data->mask32_data.long_size;
29
30 TEST_ASSERT_VAL("wrong long_size", long_size == 4 || long_size == 8);
31
32 TEST_ASSERT_VAL("wrong nr", data->mask32_data.nr == 1);
33
34 TEST_ASSERT_VAL("wrong cpu", perf_record_cpu_map_data__test_bit(0, data));
35 TEST_ASSERT_VAL("wrong cpu", !perf_record_cpu_map_data__test_bit(1, data));
36 for (int i = 2; i <= 20; i++)
37 TEST_ASSERT_VAL("wrong cpu", perf_record_cpu_map_data__test_bit(i, data));
38
39 map = cpu_map__new_data(data);
40 TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 20);
41
42 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 0);
43 for (int i = 2; i <= 20; i++)
44 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, i - 1).cpu == i);
45
46 perf_cpu_map__put(map);
47 return 0;
48 }
49
process_event_cpus(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)50 static int process_event_cpus(const struct perf_tool *tool __maybe_unused,
51 union perf_event *event,
52 struct perf_sample *sample __maybe_unused,
53 struct machine *machine __maybe_unused)
54 {
55 struct perf_record_cpu_map *map_event = &event->cpu_map;
56 struct perf_record_cpu_map_data *data;
57 struct perf_cpu_map *map;
58
59 data = &map_event->data;
60
61 TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__CPUS);
62
63 TEST_ASSERT_VAL("wrong nr", data->cpus_data.nr == 2);
64 TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[0] == 1);
65 TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[1] == 256);
66
67 map = cpu_map__new_data(data);
68 TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 2);
69 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
70 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 1).cpu == 256);
71 TEST_ASSERT_VAL("wrong refcnt", refcount_read(perf_cpu_map__refcnt(map)) == 1);
72 perf_cpu_map__put(map);
73 return 0;
74 }
75
process_event_range_cpus(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)76 static int process_event_range_cpus(const struct perf_tool *tool __maybe_unused,
77 union perf_event *event,
78 struct perf_sample *sample __maybe_unused,
79 struct machine *machine __maybe_unused)
80 {
81 struct perf_record_cpu_map *map_event = &event->cpu_map;
82 struct perf_record_cpu_map_data *data;
83 struct perf_cpu_map *map;
84
85 data = &map_event->data;
86
87 TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__RANGE_CPUS);
88
89 TEST_ASSERT_VAL("wrong any_cpu", data->range_cpu_data.any_cpu == 0);
90 TEST_ASSERT_VAL("wrong start_cpu", data->range_cpu_data.start_cpu == 1);
91 TEST_ASSERT_VAL("wrong end_cpu", data->range_cpu_data.end_cpu == 256);
92
93 map = cpu_map__new_data(data);
94 TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 256);
95 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
96 TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__max(map).cpu == 256);
97 TEST_ASSERT_VAL("wrong refcnt", refcount_read(perf_cpu_map__refcnt(map)) == 1);
98 perf_cpu_map__put(map);
99 return 0;
100 }
101
102
test__cpu_map_synthesize(struct test_suite * test __maybe_unused,int subtest __maybe_unused)103 static int test__cpu_map_synthesize(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
104 {
105 struct perf_cpu_map *cpus;
106
107 /* This one is better stored in a mask. */
108 cpus = perf_cpu_map__new("0,2-20");
109
110 TEST_ASSERT_VAL("failed to synthesize map",
111 !perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL));
112
113 perf_cpu_map__put(cpus);
114
115 /* This one is better stored in cpu values. */
116 cpus = perf_cpu_map__new("1,256");
117
118 TEST_ASSERT_VAL("failed to synthesize map",
119 !perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL));
120
121 perf_cpu_map__put(cpus);
122
123 /* This one is better stored as a range. */
124 cpus = perf_cpu_map__new("1-256");
125
126 TEST_ASSERT_VAL("failed to synthesize map",
127 !perf_event__synthesize_cpu_map(NULL, cpus, process_event_range_cpus, NULL));
128
129 perf_cpu_map__put(cpus);
130 return 0;
131 }
132
cpu_map_print(const char * str)133 static int cpu_map_print(const char *str)
134 {
135 struct perf_cpu_map *map = perf_cpu_map__new(str);
136 char buf[100];
137
138 if (!map)
139 return -1;
140
141 cpu_map__snprint(map, buf, sizeof(buf));
142 perf_cpu_map__put(map);
143
144 return !strcmp(buf, str);
145 }
146
test__cpu_map_print(struct test_suite * test __maybe_unused,int subtest __maybe_unused)147 static int test__cpu_map_print(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
148 {
149 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1"));
150 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,5"));
151 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3,5,7,9,11,13,15,17,19,21-40"));
152 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("2-5"));
153 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
154 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
155 TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1-10,12-20,22-30,32-40"));
156 return 0;
157 }
158
test__cpu_map_merge(struct test_suite * test __maybe_unused,int subtest __maybe_unused)159 static int test__cpu_map_merge(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
160 {
161 struct perf_cpu_map *a = perf_cpu_map__new("4,2,1");
162 struct perf_cpu_map *b = perf_cpu_map__new("4,5,7");
163 struct perf_cpu_map *c = perf_cpu_map__merge(a, b);
164 char buf[100];
165
166 TEST_ASSERT_VAL("failed to merge map: bad nr", perf_cpu_map__nr(c) == 5);
167 cpu_map__snprint(c, buf, sizeof(buf));
168 TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7"));
169 perf_cpu_map__put(b);
170 perf_cpu_map__put(c);
171 return 0;
172 }
173
__test__cpu_map_intersect(const char * lhs,const char * rhs,int nr,const char * expected)174 static int __test__cpu_map_intersect(const char *lhs, const char *rhs, int nr, const char *expected)
175 {
176 struct perf_cpu_map *a = perf_cpu_map__new(lhs);
177 struct perf_cpu_map *b = perf_cpu_map__new(rhs);
178 struct perf_cpu_map *c = perf_cpu_map__intersect(a, b);
179 char buf[100];
180
181 TEST_ASSERT_EQUAL("failed to intersect map: bad nr", perf_cpu_map__nr(c), nr);
182 cpu_map__snprint(c, buf, sizeof(buf));
183 TEST_ASSERT_VAL("failed to intersect map: bad result", !strcmp(buf, expected));
184 perf_cpu_map__put(a);
185 perf_cpu_map__put(b);
186 perf_cpu_map__put(c);
187 return 0;
188 }
189
test__cpu_map_intersect(struct test_suite * test __maybe_unused,int subtest __maybe_unused)190 static int test__cpu_map_intersect(struct test_suite *test __maybe_unused,
191 int subtest __maybe_unused)
192 {
193 int ret;
194
195 ret = __test__cpu_map_intersect("4,2,1", "4,5,7", 1, "4");
196 if (ret)
197 return ret;
198 ret = __test__cpu_map_intersect("1-8", "6-9", 3, "6-8");
199 if (ret)
200 return ret;
201 ret = __test__cpu_map_intersect("1-8,12-20", "6-9,15", 4, "6-8,15");
202 if (ret)
203 return ret;
204 ret = __test__cpu_map_intersect("4,2,1", "1", 1, "1");
205 if (ret)
206 return ret;
207 ret = __test__cpu_map_intersect("1", "4,2,1", 1, "1");
208 if (ret)
209 return ret;
210 ret = __test__cpu_map_intersect("1", "1", 1, "1");
211 return ret;
212 }
213
test__cpu_map_equal(struct test_suite * test __maybe_unused,int subtest __maybe_unused)214 static int test__cpu_map_equal(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
215 {
216 struct perf_cpu_map *any = perf_cpu_map__new_any_cpu();
217 struct perf_cpu_map *one = perf_cpu_map__new("1");
218 struct perf_cpu_map *two = perf_cpu_map__new("2");
219 struct perf_cpu_map *empty = perf_cpu_map__intersect(one, two);
220 struct perf_cpu_map *pair = perf_cpu_map__new("1-2");
221 struct perf_cpu_map *tmp;
222 struct perf_cpu_map *maps[] = {empty, any, one, two, pair};
223
224 for (size_t i = 0; i < ARRAY_SIZE(maps); i++) {
225 /* Maps equal themself. */
226 TEST_ASSERT_VAL("equal", perf_cpu_map__equal(maps[i], maps[i]));
227 for (size_t j = 0; j < ARRAY_SIZE(maps); j++) {
228 /* Maps dont't equal each other. */
229 if (i == j)
230 continue;
231 TEST_ASSERT_VAL("not equal", !perf_cpu_map__equal(maps[i], maps[j]));
232 }
233 }
234
235 /* Maps equal made maps. */
236 tmp = perf_cpu_map__merge(perf_cpu_map__get(one), two);
237 TEST_ASSERT_VAL("pair", perf_cpu_map__equal(pair, tmp));
238 perf_cpu_map__put(tmp);
239
240 tmp = perf_cpu_map__intersect(pair, one);
241 TEST_ASSERT_VAL("one", perf_cpu_map__equal(one, tmp));
242 perf_cpu_map__put(tmp);
243
244 for (size_t i = 0; i < ARRAY_SIZE(maps); i++)
245 perf_cpu_map__put(maps[i]);
246
247 return TEST_OK;
248 }
249
250 static struct test_case tests__cpu_map[] = {
251 TEST_CASE("Synthesize cpu map", cpu_map_synthesize),
252 TEST_CASE("Print cpu map", cpu_map_print),
253 TEST_CASE("Merge cpu map", cpu_map_merge),
254 TEST_CASE("Intersect cpu map", cpu_map_intersect),
255 TEST_CASE("Equal cpu map", cpu_map_equal),
256 { .name = NULL, }
257 };
258
259 struct test_suite suite__cpu_map = {
260 .desc = "CPU map",
261 .test_cases = tests__cpu_map,
262 };
263