xref: /linux/tools/perf/util/cputopo.c (revision c9dc580c43b8d83de0c14158e826f79e41098822)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <sys/param.h>
3 #include <sys/utsname.h>
4 #include <inttypes.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <api/fs/fs.h>
8 #include <linux/zalloc.h>
9 #include <perf/cpumap.h>
10 
11 #include "cputopo.h"
12 #include "cpumap.h"
13 #include "debug.h"
14 #include "env.h"
15 #include "pmu-hybrid.h"
16 
17 #define PACKAGE_CPUS_FMT \
18 	"%s/devices/system/cpu/cpu%d/topology/package_cpus_list"
19 #define PACKAGE_CPUS_FMT_OLD \
20 	"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
21 #define DIE_CPUS_FMT \
22 	"%s/devices/system/cpu/cpu%d/topology/die_cpus_list"
23 #define CORE_CPUS_FMT \
24 	"%s/devices/system/cpu/cpu%d/topology/core_cpus_list"
25 #define CORE_CPUS_FMT_OLD \
26 	"%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
27 #define NODE_ONLINE_FMT \
28 	"%s/devices/system/node/online"
29 #define NODE_MEMINFO_FMT \
30 	"%s/devices/system/node/node%d/meminfo"
31 #define NODE_CPULIST_FMT \
32 	"%s/devices/system/node/node%d/cpulist"
33 
34 static int build_cpu_topology(struct cpu_topology *tp, int cpu)
35 {
36 	FILE *fp;
37 	char filename[MAXPATHLEN];
38 	char *buf = NULL, *p;
39 	size_t len = 0;
40 	ssize_t sret;
41 	u32 i = 0;
42 	int ret = -1;
43 
44 	scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT,
45 		  sysfs__mountpoint(), cpu);
46 	if (access(filename, F_OK) == -1) {
47 		scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT_OLD,
48 			sysfs__mountpoint(), cpu);
49 	}
50 	fp = fopen(filename, "r");
51 	if (!fp)
52 		goto try_dies;
53 
54 	sret = getline(&buf, &len, fp);
55 	fclose(fp);
56 	if (sret <= 0)
57 		goto try_dies;
58 
59 	p = strchr(buf, '\n');
60 	if (p)
61 		*p = '\0';
62 
63 	for (i = 0; i < tp->package_cpus_lists; i++) {
64 		if (!strcmp(buf, tp->package_cpus_list[i]))
65 			break;
66 	}
67 	if (i == tp->package_cpus_lists) {
68 		tp->package_cpus_list[i] = buf;
69 		tp->package_cpus_lists++;
70 		buf = NULL;
71 		len = 0;
72 	}
73 	ret = 0;
74 
75 try_dies:
76 	if (!tp->die_cpus_list)
77 		goto try_threads;
78 
79 	scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
80 		  sysfs__mountpoint(), cpu);
81 	fp = fopen(filename, "r");
82 	if (!fp)
83 		goto try_threads;
84 
85 	sret = getline(&buf, &len, fp);
86 	fclose(fp);
87 	if (sret <= 0)
88 		goto try_threads;
89 
90 	p = strchr(buf, '\n');
91 	if (p)
92 		*p = '\0';
93 
94 	for (i = 0; i < tp->die_cpus_lists; i++) {
95 		if (!strcmp(buf, tp->die_cpus_list[i]))
96 			break;
97 	}
98 	if (i == tp->die_cpus_lists) {
99 		tp->die_cpus_list[i] = buf;
100 		tp->die_cpus_lists++;
101 		buf = NULL;
102 		len = 0;
103 	}
104 	ret = 0;
105 
106 try_threads:
107 	scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT,
108 		  sysfs__mountpoint(), cpu);
109 	if (access(filename, F_OK) == -1) {
110 		scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT_OLD,
111 			  sysfs__mountpoint(), cpu);
112 	}
113 	fp = fopen(filename, "r");
114 	if (!fp)
115 		goto done;
116 
117 	if (getline(&buf, &len, fp) <= 0)
118 		goto done;
119 
120 	p = strchr(buf, '\n');
121 	if (p)
122 		*p = '\0';
123 
124 	for (i = 0; i < tp->core_cpus_lists; i++) {
125 		if (!strcmp(buf, tp->core_cpus_list[i]))
126 			break;
127 	}
128 	if (i == tp->core_cpus_lists) {
129 		tp->core_cpus_list[i] = buf;
130 		tp->core_cpus_lists++;
131 		buf = NULL;
132 	}
133 	ret = 0;
134 done:
135 	if (fp)
136 		fclose(fp);
137 	free(buf);
138 	return ret;
139 }
140 
141 void cpu_topology__delete(struct cpu_topology *tp)
142 {
143 	u32 i;
144 
145 	if (!tp)
146 		return;
147 
148 	for (i = 0 ; i < tp->package_cpus_lists; i++)
149 		zfree(&tp->package_cpus_list[i]);
150 
151 	for (i = 0 ; i < tp->die_cpus_lists; i++)
152 		zfree(&tp->die_cpus_list[i]);
153 
154 	for (i = 0 ; i < tp->core_cpus_lists; i++)
155 		zfree(&tp->core_cpus_list[i]);
156 
157 	free(tp);
158 }
159 
160 bool cpu_topology__smt_on(const struct cpu_topology *topology)
161 {
162 	for (u32 i = 0; i < topology->core_cpus_lists; i++) {
163 		const char *cpu_list = topology->core_cpus_list[i];
164 
165 		/*
166 		 * If there is a need to separate siblings in a core then SMT is
167 		 * enabled.
168 		 */
169 		if (strchr(cpu_list, ',') || strchr(cpu_list, '-'))
170 			return true;
171 	}
172 	return false;
173 }
174 
175 bool cpu_topology__core_wide(const struct cpu_topology *topology,
176 			     const char *user_requested_cpu_list)
177 {
178 	struct perf_cpu_map *user_requested_cpus;
179 
180 	/*
181 	 * If user_requested_cpu_list is empty then all CPUs are recorded and so
182 	 * core_wide is true.
183 	 */
184 	if (!user_requested_cpu_list)
185 		return true;
186 
187 	user_requested_cpus = perf_cpu_map__new(user_requested_cpu_list);
188 	/* Check that every user requested CPU is the complete set of SMT threads on a core. */
189 	for (u32 i = 0; i < topology->core_cpus_lists; i++) {
190 		const char *core_cpu_list = topology->core_cpus_list[i];
191 		struct perf_cpu_map *core_cpus = perf_cpu_map__new(core_cpu_list);
192 		struct perf_cpu cpu;
193 		int idx;
194 		bool has_first, first = true;
195 
196 		perf_cpu_map__for_each_cpu(cpu, idx, core_cpus) {
197 			if (first) {
198 				has_first = perf_cpu_map__has(user_requested_cpus, cpu);
199 				first = false;
200 			} else {
201 				/*
202 				 * If the first core CPU is user requested then
203 				 * all subsequent CPUs in the core must be user
204 				 * requested too. If the first CPU isn't user
205 				 * requested then none of the others must be
206 				 * too.
207 				 */
208 				if (perf_cpu_map__has(user_requested_cpus, cpu) != has_first) {
209 					perf_cpu_map__put(core_cpus);
210 					perf_cpu_map__put(user_requested_cpus);
211 					return false;
212 				}
213 			}
214 		}
215 		perf_cpu_map__put(core_cpus);
216 	}
217 	perf_cpu_map__put(user_requested_cpus);
218 	return true;
219 }
220 
221 static bool has_die_topology(void)
222 {
223 	char filename[MAXPATHLEN];
224 	struct utsname uts;
225 
226 	if (uname(&uts) < 0)
227 		return false;
228 
229 	if (strncmp(uts.machine, "x86_64", 6) &&
230 	    strncmp(uts.machine, "s390x", 5))
231 		return false;
232 
233 	scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
234 		  sysfs__mountpoint(), 0);
235 	if (access(filename, F_OK) == -1)
236 		return false;
237 
238 	return true;
239 }
240 
241 const struct cpu_topology *online_topology(void)
242 {
243 	static const struct cpu_topology *topology;
244 
245 	if (!topology) {
246 		topology = cpu_topology__new();
247 		if (!topology) {
248 			pr_err("Error creating CPU topology");
249 			abort();
250 		}
251 	}
252 	return topology;
253 }
254 
255 struct cpu_topology *cpu_topology__new(void)
256 {
257 	struct cpu_topology *tp = NULL;
258 	void *addr;
259 	u32 nr, i, nr_addr;
260 	size_t sz;
261 	long ncpus;
262 	int ret = -1;
263 	struct perf_cpu_map *map;
264 	bool has_die = has_die_topology();
265 
266 	ncpus = cpu__max_present_cpu().cpu;
267 
268 	/* build online CPU map */
269 	map = perf_cpu_map__new(NULL);
270 	if (map == NULL) {
271 		pr_debug("failed to get system cpumap\n");
272 		return NULL;
273 	}
274 
275 	nr = (u32)(ncpus & UINT_MAX);
276 
277 	sz = nr * sizeof(char *);
278 	if (has_die)
279 		nr_addr = 3;
280 	else
281 		nr_addr = 2;
282 	addr = calloc(1, sizeof(*tp) + nr_addr * sz);
283 	if (!addr)
284 		goto out_free;
285 
286 	tp = addr;
287 	addr += sizeof(*tp);
288 	tp->package_cpus_list = addr;
289 	addr += sz;
290 	if (has_die) {
291 		tp->die_cpus_list = addr;
292 		addr += sz;
293 	}
294 	tp->core_cpus_list = addr;
295 
296 	for (i = 0; i < nr; i++) {
297 		if (!perf_cpu_map__has(map, (struct perf_cpu){ .cpu = i }))
298 			continue;
299 
300 		ret = build_cpu_topology(tp, i);
301 		if (ret < 0)
302 			break;
303 	}
304 
305 out_free:
306 	perf_cpu_map__put(map);
307 	if (ret) {
308 		cpu_topology__delete(tp);
309 		tp = NULL;
310 	}
311 	return tp;
312 }
313 
314 static int load_numa_node(struct numa_topology_node *node, int nr)
315 {
316 	char str[MAXPATHLEN];
317 	char field[32];
318 	char *buf = NULL, *p;
319 	size_t len = 0;
320 	int ret = -1;
321 	FILE *fp;
322 	u64 mem;
323 
324 	node->node = (u32) nr;
325 
326 	scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
327 		  sysfs__mountpoint(), nr);
328 	fp = fopen(str, "r");
329 	if (!fp)
330 		return -1;
331 
332 	while (getline(&buf, &len, fp) > 0) {
333 		/* skip over invalid lines */
334 		if (!strchr(buf, ':'))
335 			continue;
336 		if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
337 			goto err;
338 		if (!strcmp(field, "MemTotal:"))
339 			node->mem_total = mem;
340 		if (!strcmp(field, "MemFree:"))
341 			node->mem_free = mem;
342 		if (node->mem_total && node->mem_free)
343 			break;
344 	}
345 
346 	fclose(fp);
347 	fp = NULL;
348 
349 	scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
350 		  sysfs__mountpoint(), nr);
351 
352 	fp = fopen(str, "r");
353 	if (!fp)
354 		return -1;
355 
356 	if (getline(&buf, &len, fp) <= 0)
357 		goto err;
358 
359 	p = strchr(buf, '\n');
360 	if (p)
361 		*p = '\0';
362 
363 	node->cpus = buf;
364 	fclose(fp);
365 	return 0;
366 
367 err:
368 	free(buf);
369 	if (fp)
370 		fclose(fp);
371 	return ret;
372 }
373 
374 struct numa_topology *numa_topology__new(void)
375 {
376 	struct perf_cpu_map *node_map = NULL;
377 	struct numa_topology *tp = NULL;
378 	char path[MAXPATHLEN];
379 	char *buf = NULL;
380 	size_t len = 0;
381 	u32 nr, i;
382 	FILE *fp;
383 	char *c;
384 
385 	scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
386 		  sysfs__mountpoint());
387 
388 	fp = fopen(path, "r");
389 	if (!fp)
390 		return NULL;
391 
392 	if (getline(&buf, &len, fp) <= 0)
393 		goto out;
394 
395 	c = strchr(buf, '\n');
396 	if (c)
397 		*c = '\0';
398 
399 	node_map = perf_cpu_map__new(buf);
400 	if (!node_map)
401 		goto out;
402 
403 	nr = (u32) perf_cpu_map__nr(node_map);
404 
405 	tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
406 	if (!tp)
407 		goto out;
408 
409 	tp->nr = nr;
410 
411 	for (i = 0; i < nr; i++) {
412 		if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) {
413 			numa_topology__delete(tp);
414 			tp = NULL;
415 			break;
416 		}
417 	}
418 
419 out:
420 	free(buf);
421 	fclose(fp);
422 	perf_cpu_map__put(node_map);
423 	return tp;
424 }
425 
426 void numa_topology__delete(struct numa_topology *tp)
427 {
428 	u32 i;
429 
430 	for (i = 0; i < tp->nr; i++)
431 		zfree(&tp->nodes[i].cpus);
432 
433 	free(tp);
434 }
435 
436 static int load_hybrid_node(struct hybrid_topology_node *node,
437 			    struct perf_pmu *pmu)
438 {
439 	char *buf = NULL, *p;
440 	FILE *fp;
441 	size_t len = 0;
442 
443 	node->pmu_name = strdup(pmu->name);
444 	if (!node->pmu_name)
445 		return -1;
446 
447 	fp = perf_pmu__open_file(pmu, "cpus");
448 	if (!fp)
449 		goto err;
450 
451 	if (getline(&buf, &len, fp) <= 0) {
452 		fclose(fp);
453 		goto err;
454 	}
455 
456 	p = strchr(buf, '\n');
457 	if (p)
458 		*p = '\0';
459 
460 	fclose(fp);
461 	node->cpus = buf;
462 	return 0;
463 
464 err:
465 	zfree(&node->pmu_name);
466 	free(buf);
467 	return -1;
468 }
469 
470 struct hybrid_topology *hybrid_topology__new(void)
471 {
472 	struct perf_pmu *pmu;
473 	struct hybrid_topology *tp = NULL;
474 	u32 nr, i = 0;
475 
476 	nr = perf_pmu__hybrid_pmu_num();
477 	if (nr == 0)
478 		return NULL;
479 
480 	tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
481 	if (!tp)
482 		return NULL;
483 
484 	tp->nr = nr;
485 	perf_pmu__for_each_hybrid_pmu(pmu) {
486 		if (load_hybrid_node(&tp->nodes[i], pmu)) {
487 			hybrid_topology__delete(tp);
488 			return NULL;
489 		}
490 		i++;
491 	}
492 
493 	return tp;
494 }
495 
496 void hybrid_topology__delete(struct hybrid_topology *tp)
497 {
498 	u32 i;
499 
500 	for (i = 0; i < tp->nr; i++) {
501 		zfree(&tp->nodes[i].pmu_name);
502 		zfree(&tp->nodes[i].cpus);
503 	}
504 
505 	free(tp);
506 }
507