xref: /linux/tools/perf/util/cpumap.c (revision cf2f33a4e54096f90652cca3511fd6a456ea5abe)
1 #include "util.h"
2 #include <api/fs/fs.h>
3 #include "../perf.h"
4 #include "cpumap.h"
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include "asm/bug.h"
9 
10 static struct cpu_map *cpu_map__default_new(void)
11 {
12 	struct cpu_map *cpus;
13 	int nr_cpus;
14 
15 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
16 	if (nr_cpus < 0)
17 		return NULL;
18 
19 	cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
20 	if (cpus != NULL) {
21 		int i;
22 		for (i = 0; i < nr_cpus; ++i)
23 			cpus->map[i] = i;
24 
25 		cpus->nr = nr_cpus;
26 		atomic_set(&cpus->refcnt, 1);
27 	}
28 
29 	return cpus;
30 }
31 
32 static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
33 {
34 	size_t payload_size = nr_cpus * sizeof(int);
35 	struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
36 
37 	if (cpus != NULL) {
38 		cpus->nr = nr_cpus;
39 		memcpy(cpus->map, tmp_cpus, payload_size);
40 		atomic_set(&cpus->refcnt, 1);
41 	}
42 
43 	return cpus;
44 }
45 
46 struct cpu_map *cpu_map__read(FILE *file)
47 {
48 	struct cpu_map *cpus = NULL;
49 	int nr_cpus = 0;
50 	int *tmp_cpus = NULL, *tmp;
51 	int max_entries = 0;
52 	int n, cpu, prev;
53 	char sep;
54 
55 	sep = 0;
56 	prev = -1;
57 	for (;;) {
58 		n = fscanf(file, "%u%c", &cpu, &sep);
59 		if (n <= 0)
60 			break;
61 		if (prev >= 0) {
62 			int new_max = nr_cpus + cpu - prev - 1;
63 
64 			if (new_max >= max_entries) {
65 				max_entries = new_max + MAX_NR_CPUS / 2;
66 				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
67 				if (tmp == NULL)
68 					goto out_free_tmp;
69 				tmp_cpus = tmp;
70 			}
71 
72 			while (++prev < cpu)
73 				tmp_cpus[nr_cpus++] = prev;
74 		}
75 		if (nr_cpus == max_entries) {
76 			max_entries += MAX_NR_CPUS;
77 			tmp = realloc(tmp_cpus, max_entries * sizeof(int));
78 			if (tmp == NULL)
79 				goto out_free_tmp;
80 			tmp_cpus = tmp;
81 		}
82 
83 		tmp_cpus[nr_cpus++] = cpu;
84 		if (n == 2 && sep == '-')
85 			prev = cpu;
86 		else
87 			prev = -1;
88 		if (n == 1 || sep == '\n')
89 			break;
90 	}
91 
92 	if (nr_cpus > 0)
93 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
94 	else
95 		cpus = cpu_map__default_new();
96 out_free_tmp:
97 	free(tmp_cpus);
98 	return cpus;
99 }
100 
101 static struct cpu_map *cpu_map__read_all_cpu_map(void)
102 {
103 	struct cpu_map *cpus = NULL;
104 	FILE *onlnf;
105 
106 	onlnf = fopen("/sys/devices/system/cpu/online", "r");
107 	if (!onlnf)
108 		return cpu_map__default_new();
109 
110 	cpus = cpu_map__read(onlnf);
111 	fclose(onlnf);
112 	return cpus;
113 }
114 
115 struct cpu_map *cpu_map__new(const char *cpu_list)
116 {
117 	struct cpu_map *cpus = NULL;
118 	unsigned long start_cpu, end_cpu = 0;
119 	char *p = NULL;
120 	int i, nr_cpus = 0;
121 	int *tmp_cpus = NULL, *tmp;
122 	int max_entries = 0;
123 
124 	if (!cpu_list)
125 		return cpu_map__read_all_cpu_map();
126 
127 	if (!isdigit(*cpu_list))
128 		goto out;
129 
130 	while (isdigit(*cpu_list)) {
131 		p = NULL;
132 		start_cpu = strtoul(cpu_list, &p, 0);
133 		if (start_cpu >= INT_MAX
134 		    || (*p != '\0' && *p != ',' && *p != '-'))
135 			goto invalid;
136 
137 		if (*p == '-') {
138 			cpu_list = ++p;
139 			p = NULL;
140 			end_cpu = strtoul(cpu_list, &p, 0);
141 
142 			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
143 				goto invalid;
144 
145 			if (end_cpu < start_cpu)
146 				goto invalid;
147 		} else {
148 			end_cpu = start_cpu;
149 		}
150 
151 		for (; start_cpu <= end_cpu; start_cpu++) {
152 			/* check for duplicates */
153 			for (i = 0; i < nr_cpus; i++)
154 				if (tmp_cpus[i] == (int)start_cpu)
155 					goto invalid;
156 
157 			if (nr_cpus == max_entries) {
158 				max_entries += MAX_NR_CPUS;
159 				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
160 				if (tmp == NULL)
161 					goto invalid;
162 				tmp_cpus = tmp;
163 			}
164 			tmp_cpus[nr_cpus++] = (int)start_cpu;
165 		}
166 		if (*p)
167 			++p;
168 
169 		cpu_list = p;
170 	}
171 
172 	if (nr_cpus > 0)
173 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
174 	else
175 		cpus = cpu_map__default_new();
176 invalid:
177 	free(tmp_cpus);
178 out:
179 	return cpus;
180 }
181 
182 size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
183 {
184 	int i;
185 	size_t printed = fprintf(fp, "%d cpu%s: ",
186 				 map->nr, map->nr > 1 ? "s" : "");
187 	for (i = 0; i < map->nr; ++i)
188 		printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]);
189 
190 	return printed + fprintf(fp, "\n");
191 }
192 
193 struct cpu_map *cpu_map__dummy_new(void)
194 {
195 	struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
196 
197 	if (cpus != NULL) {
198 		cpus->nr = 1;
199 		cpus->map[0] = -1;
200 		atomic_set(&cpus->refcnt, 1);
201 	}
202 
203 	return cpus;
204 }
205 
206 static void cpu_map__delete(struct cpu_map *map)
207 {
208 	if (map) {
209 		WARN_ONCE(atomic_read(&map->refcnt) != 0,
210 			  "cpu_map refcnt unbalanced\n");
211 		free(map);
212 	}
213 }
214 
215 struct cpu_map *cpu_map__get(struct cpu_map *map)
216 {
217 	if (map)
218 		atomic_inc(&map->refcnt);
219 	return map;
220 }
221 
222 void cpu_map__put(struct cpu_map *map)
223 {
224 	if (map && atomic_dec_and_test(&map->refcnt))
225 		cpu_map__delete(map);
226 }
227 
228 int cpu_map__get_socket_id(int cpu)
229 {
230 	FILE *fp;
231 	const char *mnt;
232 	char path[PATH_MAX];
233 	int socket_id, ret;
234 
235 	mnt = sysfs__mountpoint();
236 	if (!mnt)
237 		return -1;
238 
239 	snprintf(path, PATH_MAX,
240 		"%s/devices/system/cpu/cpu%d/topology/physical_package_id",
241 		mnt, cpu);
242 
243 	fp = fopen(path, "r");
244 	if (!fp)
245 		return -1;
246 	ret = fscanf(fp, "%d", &socket_id);
247 	fclose(fp);
248 
249 	return ret == 1 ? socket_id : -1;
250 }
251 
252 int cpu_map__get_socket(struct cpu_map *map, int idx)
253 {
254 	int cpu;
255 
256 	if (idx > map->nr)
257 		return -1;
258 
259 	cpu = map->map[idx];
260 
261 	return cpu_map__get_socket_id(cpu);
262 }
263 
264 static int cmp_ids(const void *a, const void *b)
265 {
266 	return *(int *)a - *(int *)b;
267 }
268 
269 static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
270 			      int (*f)(struct cpu_map *map, int cpu))
271 {
272 	struct cpu_map *c;
273 	int nr = cpus->nr;
274 	int cpu, s1, s2;
275 
276 	/* allocate as much as possible */
277 	c = calloc(1, sizeof(*c) + nr * sizeof(int));
278 	if (!c)
279 		return -1;
280 
281 	for (cpu = 0; cpu < nr; cpu++) {
282 		s1 = f(cpus, cpu);
283 		for (s2 = 0; s2 < c->nr; s2++) {
284 			if (s1 == c->map[s2])
285 				break;
286 		}
287 		if (s2 == c->nr) {
288 			c->map[c->nr] = s1;
289 			c->nr++;
290 		}
291 	}
292 	/* ensure we process id in increasing order */
293 	qsort(c->map, c->nr, sizeof(int), cmp_ids);
294 
295 	atomic_set(&cpus->refcnt, 1);
296 	*res = c;
297 	return 0;
298 }
299 
300 int cpu_map__get_core_id(int cpu)
301 {
302 	FILE *fp;
303 	const char *mnt;
304 	char path[PATH_MAX];
305 	int core_id, ret;
306 
307 	mnt = sysfs__mountpoint();
308 	if (!mnt)
309 		return -1;
310 
311 	snprintf(path, PATH_MAX,
312 		"%s/devices/system/cpu/cpu%d/topology/core_id",
313 		mnt, cpu);
314 
315 	fp = fopen(path, "r");
316 	if (!fp)
317 		return -1;
318 	ret = fscanf(fp, "%d", &core_id);
319 	fclose(fp);
320 
321 	return ret == 1 ? core_id : -1;
322 }
323 
324 int cpu_map__get_core(struct cpu_map *map, int idx)
325 {
326 	int cpu, s;
327 
328 	if (idx > map->nr)
329 		return -1;
330 
331 	cpu = map->map[idx];
332 
333 	cpu = cpu_map__get_core_id(cpu);
334 
335 	s = cpu_map__get_socket(map, idx);
336 	if (s == -1)
337 		return -1;
338 
339 	/*
340 	 * encode socket in upper 16 bits
341 	 * core_id is relative to socket, and
342 	 * we need a global id. So we combine
343 	 * socket+ core id
344 	 */
345 	return (s << 16) | (cpu & 0xffff);
346 }
347 
348 int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
349 {
350 	return cpu_map__build_map(cpus, sockp, cpu_map__get_socket);
351 }
352 
353 int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
354 {
355 	return cpu_map__build_map(cpus, corep, cpu_map__get_core);
356 }
357 
358 /* setup simple routines to easily access node numbers given a cpu number */
359 static int get_max_num(char *path, int *max)
360 {
361 	size_t num;
362 	char *buf;
363 	int err = 0;
364 
365 	if (filename__read_str(path, &buf, &num))
366 		return -1;
367 
368 	buf[num] = '\0';
369 
370 	/* start on the right, to find highest node num */
371 	while (--num) {
372 		if ((buf[num] == ',') || (buf[num] == '-')) {
373 			num++;
374 			break;
375 		}
376 	}
377 	if (sscanf(&buf[num], "%d", max) < 1) {
378 		err = -1;
379 		goto out;
380 	}
381 
382 	/* convert from 0-based to 1-based */
383 	(*max)++;
384 
385 out:
386 	free(buf);
387 	return err;
388 }
389 
390 /* Determine highest possible cpu in the system for sparse allocation */
391 static void set_max_cpu_num(void)
392 {
393 	const char *mnt;
394 	char path[PATH_MAX];
395 	int ret = -1;
396 
397 	/* set up default */
398 	max_cpu_num = 4096;
399 
400 	mnt = sysfs__mountpoint();
401 	if (!mnt)
402 		goto out;
403 
404 	/* get the highest possible cpu number for a sparse allocation */
405 	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
406 	if (ret == PATH_MAX) {
407 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
408 		goto out;
409 	}
410 
411 	ret = get_max_num(path, &max_cpu_num);
412 
413 out:
414 	if (ret)
415 		pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
416 }
417 
418 /* Determine highest possible node in the system for sparse allocation */
419 static void set_max_node_num(void)
420 {
421 	const char *mnt;
422 	char path[PATH_MAX];
423 	int ret = -1;
424 
425 	/* set up default */
426 	max_node_num = 8;
427 
428 	mnt = sysfs__mountpoint();
429 	if (!mnt)
430 		goto out;
431 
432 	/* get the highest possible cpu number for a sparse allocation */
433 	ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
434 	if (ret == PATH_MAX) {
435 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
436 		goto out;
437 	}
438 
439 	ret = get_max_num(path, &max_node_num);
440 
441 out:
442 	if (ret)
443 		pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
444 }
445 
446 static int init_cpunode_map(void)
447 {
448 	int i;
449 
450 	set_max_cpu_num();
451 	set_max_node_num();
452 
453 	cpunode_map = calloc(max_cpu_num, sizeof(int));
454 	if (!cpunode_map) {
455 		pr_err("%s: calloc failed\n", __func__);
456 		return -1;
457 	}
458 
459 	for (i = 0; i < max_cpu_num; i++)
460 		cpunode_map[i] = -1;
461 
462 	return 0;
463 }
464 
465 int cpu__setup_cpunode_map(void)
466 {
467 	struct dirent *dent1, *dent2;
468 	DIR *dir1, *dir2;
469 	unsigned int cpu, mem;
470 	char buf[PATH_MAX];
471 	char path[PATH_MAX];
472 	const char *mnt;
473 	int n;
474 
475 	/* initialize globals */
476 	if (init_cpunode_map())
477 		return -1;
478 
479 	mnt = sysfs__mountpoint();
480 	if (!mnt)
481 		return 0;
482 
483 	n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
484 	if (n == PATH_MAX) {
485 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
486 		return -1;
487 	}
488 
489 	dir1 = opendir(path);
490 	if (!dir1)
491 		return 0;
492 
493 	/* walk tree and setup map */
494 	while ((dent1 = readdir(dir1)) != NULL) {
495 		if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
496 			continue;
497 
498 		n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
499 		if (n == PATH_MAX) {
500 			pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
501 			continue;
502 		}
503 
504 		dir2 = opendir(buf);
505 		if (!dir2)
506 			continue;
507 		while ((dent2 = readdir(dir2)) != NULL) {
508 			if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
509 				continue;
510 			cpunode_map[cpu] = mem;
511 		}
512 		closedir(dir2);
513 	}
514 	closedir(dir1);
515 	return 0;
516 }
517