xref: /linux/tools/perf/util/thread_map.c (revision 9749b90e566ca1a235fc8e2118f99c5690969342)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <limits.h>
5 #include <stdbool.h>
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include "string2.h"
12 #include "strlist.h"
13 #include <string.h>
14 #include <api/fs/fs.h>
15 #include <linux/string.h>
16 #include <linux/zalloc.h>
17 #include "asm/bug.h"
18 #include "thread_map.h"
19 #include "debug.h"
20 #include "event.h"
21 
22 /* Skip "." and ".." directories */
23 static int filter(const struct dirent *dir)
24 {
25 	if (dir->d_name[0] == '.')
26 		return 0;
27 	else
28 		return 1;
29 }
30 
31 static void thread_map__reset(struct perf_thread_map *map, int start, int nr)
32 {
33 	size_t size = (nr - start) * sizeof(map->map[0]);
34 
35 	memset(&map->map[start], 0, size);
36 	map->err_thread = -1;
37 }
38 
39 static struct perf_thread_map *thread_map__realloc(struct perf_thread_map *map, int nr)
40 {
41 	size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
42 	int start = map ? map->nr : 0;
43 
44 	map = realloc(map, size);
45 	/*
46 	 * We only realloc to add more items, let's reset new items.
47 	 */
48 	if (map)
49 		thread_map__reset(map, start, nr);
50 
51 	return map;
52 }
53 
54 #define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
55 
56 struct perf_thread_map *thread_map__new_by_pid(pid_t pid)
57 {
58 	struct perf_thread_map *threads;
59 	char name[256];
60 	int items;
61 	struct dirent **namelist = NULL;
62 	int i;
63 
64 	sprintf(name, "/proc/%d/task", pid);
65 	items = scandir(name, &namelist, filter, NULL);
66 	if (items <= 0)
67 		return NULL;
68 
69 	threads = thread_map__alloc(items);
70 	if (threads != NULL) {
71 		for (i = 0; i < items; i++)
72 			thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
73 		threads->nr = items;
74 		refcount_set(&threads->refcnt, 1);
75 	}
76 
77 	for (i=0; i<items; i++)
78 		zfree(&namelist[i]);
79 	free(namelist);
80 
81 	return threads;
82 }
83 
84 struct perf_thread_map *thread_map__new_by_tid(pid_t tid)
85 {
86 	struct perf_thread_map *threads = thread_map__alloc(1);
87 
88 	if (threads != NULL) {
89 		thread_map__set_pid(threads, 0, tid);
90 		threads->nr = 1;
91 		refcount_set(&threads->refcnt, 1);
92 	}
93 
94 	return threads;
95 }
96 
97 static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid)
98 {
99 	DIR *proc;
100 	int max_threads = 32, items, i;
101 	char path[NAME_MAX + 1 + 6];
102 	struct dirent *dirent, **namelist = NULL;
103 	struct perf_thread_map *threads = thread_map__alloc(max_threads);
104 
105 	if (threads == NULL)
106 		goto out;
107 
108 	proc = opendir("/proc");
109 	if (proc == NULL)
110 		goto out_free_threads;
111 
112 	threads->nr = 0;
113 	refcount_set(&threads->refcnt, 1);
114 
115 	while ((dirent = readdir(proc)) != NULL) {
116 		char *end;
117 		bool grow = false;
118 		pid_t pid = strtol(dirent->d_name, &end, 10);
119 
120 		if (*end) /* only interested in proper numerical dirents */
121 			continue;
122 
123 		snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
124 
125 		if (uid != UINT_MAX) {
126 			struct stat st;
127 
128 			if (stat(path, &st) != 0 || st.st_uid != uid)
129 				continue;
130 		}
131 
132 		snprintf(path, sizeof(path), "/proc/%d/task", pid);
133 		items = scandir(path, &namelist, filter, NULL);
134 		if (items <= 0)
135 			goto out_free_closedir;
136 
137 		while (threads->nr + items >= max_threads) {
138 			max_threads *= 2;
139 			grow = true;
140 		}
141 
142 		if (grow) {
143 			struct perf_thread_map *tmp;
144 
145 			tmp = thread_map__realloc(threads, max_threads);
146 			if (tmp == NULL)
147 				goto out_free_namelist;
148 
149 			threads = tmp;
150 		}
151 
152 		for (i = 0; i < items; i++) {
153 			thread_map__set_pid(threads, threads->nr + i,
154 					    atoi(namelist[i]->d_name));
155 		}
156 
157 		for (i = 0; i < items; i++)
158 			zfree(&namelist[i]);
159 		free(namelist);
160 
161 		threads->nr += items;
162 	}
163 
164 out_closedir:
165 	closedir(proc);
166 out:
167 	return threads;
168 
169 out_free_threads:
170 	free(threads);
171 	return NULL;
172 
173 out_free_namelist:
174 	for (i = 0; i < items; i++)
175 		zfree(&namelist[i]);
176 	free(namelist);
177 
178 out_free_closedir:
179 	zfree(&threads);
180 	goto out_closedir;
181 }
182 
183 struct perf_thread_map *thread_map__new_all_cpus(void)
184 {
185 	return __thread_map__new_all_cpus(UINT_MAX);
186 }
187 
188 struct perf_thread_map *thread_map__new_by_uid(uid_t uid)
189 {
190 	return __thread_map__new_all_cpus(uid);
191 }
192 
193 struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
194 {
195 	if (pid != -1)
196 		return thread_map__new_by_pid(pid);
197 
198 	if (tid == -1 && uid != UINT_MAX)
199 		return thread_map__new_by_uid(uid);
200 
201 	return thread_map__new_by_tid(tid);
202 }
203 
204 static struct perf_thread_map *thread_map__new_by_pid_str(const char *pid_str)
205 {
206 	struct perf_thread_map *threads = NULL, *nt;
207 	char name[256];
208 	int items, total_tasks = 0;
209 	struct dirent **namelist = NULL;
210 	int i, j = 0;
211 	pid_t pid, prev_pid = INT_MAX;
212 	char *end_ptr;
213 	struct str_node *pos;
214 	struct strlist_config slist_config = { .dont_dupstr = true, };
215 	struct strlist *slist = strlist__new(pid_str, &slist_config);
216 
217 	if (!slist)
218 		return NULL;
219 
220 	strlist__for_each_entry(pos, slist) {
221 		pid = strtol(pos->s, &end_ptr, 10);
222 
223 		if (pid == INT_MIN || pid == INT_MAX ||
224 		    (*end_ptr != '\0' && *end_ptr != ','))
225 			goto out_free_threads;
226 
227 		if (pid == prev_pid)
228 			continue;
229 
230 		sprintf(name, "/proc/%d/task", pid);
231 		items = scandir(name, &namelist, filter, NULL);
232 		if (items <= 0)
233 			goto out_free_threads;
234 
235 		total_tasks += items;
236 		nt = thread_map__realloc(threads, total_tasks);
237 		if (nt == NULL)
238 			goto out_free_namelist;
239 
240 		threads = nt;
241 
242 		for (i = 0; i < items; i++) {
243 			thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
244 			zfree(&namelist[i]);
245 		}
246 		threads->nr = total_tasks;
247 		free(namelist);
248 	}
249 
250 out:
251 	strlist__delete(slist);
252 	if (threads)
253 		refcount_set(&threads->refcnt, 1);
254 	return threads;
255 
256 out_free_namelist:
257 	for (i = 0; i < items; i++)
258 		zfree(&namelist[i]);
259 	free(namelist);
260 
261 out_free_threads:
262 	zfree(&threads);
263 	goto out;
264 }
265 
266 struct perf_thread_map *thread_map__new_dummy(void)
267 {
268 	struct perf_thread_map *threads = thread_map__alloc(1);
269 
270 	if (threads != NULL) {
271 		thread_map__set_pid(threads, 0, -1);
272 		threads->nr = 1;
273 		refcount_set(&threads->refcnt, 1);
274 	}
275 	return threads;
276 }
277 
278 struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
279 {
280 	struct perf_thread_map *threads = NULL, *nt;
281 	int ntasks = 0;
282 	pid_t tid, prev_tid = INT_MAX;
283 	char *end_ptr;
284 	struct str_node *pos;
285 	struct strlist_config slist_config = { .dont_dupstr = true, };
286 	struct strlist *slist;
287 
288 	/* perf-stat expects threads to be generated even if tid not given */
289 	if (!tid_str)
290 		return thread_map__new_dummy();
291 
292 	slist = strlist__new(tid_str, &slist_config);
293 	if (!slist)
294 		return NULL;
295 
296 	strlist__for_each_entry(pos, slist) {
297 		tid = strtol(pos->s, &end_ptr, 10);
298 
299 		if (tid == INT_MIN || tid == INT_MAX ||
300 		    (*end_ptr != '\0' && *end_ptr != ','))
301 			goto out_free_threads;
302 
303 		if (tid == prev_tid)
304 			continue;
305 
306 		ntasks++;
307 		nt = thread_map__realloc(threads, ntasks);
308 
309 		if (nt == NULL)
310 			goto out_free_threads;
311 
312 		threads = nt;
313 		thread_map__set_pid(threads, ntasks - 1, tid);
314 		threads->nr = ntasks;
315 	}
316 out:
317 	if (threads)
318 		refcount_set(&threads->refcnt, 1);
319 	return threads;
320 
321 out_free_threads:
322 	zfree(&threads);
323 	strlist__delete(slist);
324 	goto out;
325 }
326 
327 struct perf_thread_map *thread_map__new_str(const char *pid, const char *tid,
328 				       uid_t uid, bool all_threads)
329 {
330 	if (pid)
331 		return thread_map__new_by_pid_str(pid);
332 
333 	if (!tid && uid != UINT_MAX)
334 		return thread_map__new_by_uid(uid);
335 
336 	if (all_threads)
337 		return thread_map__new_all_cpus();
338 
339 	return thread_map__new_by_tid_str(tid);
340 }
341 
342 static void thread_map__delete(struct perf_thread_map *threads)
343 {
344 	if (threads) {
345 		int i;
346 
347 		WARN_ONCE(refcount_read(&threads->refcnt) != 0,
348 			  "thread map refcnt unbalanced\n");
349 		for (i = 0; i < threads->nr; i++)
350 			free(thread_map__comm(threads, i));
351 		free(threads);
352 	}
353 }
354 
355 struct perf_thread_map *thread_map__get(struct perf_thread_map *map)
356 {
357 	if (map)
358 		refcount_inc(&map->refcnt);
359 	return map;
360 }
361 
362 void thread_map__put(struct perf_thread_map *map)
363 {
364 	if (map && refcount_dec_and_test(&map->refcnt))
365 		thread_map__delete(map);
366 }
367 
368 size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp)
369 {
370 	int i;
371 	size_t printed = fprintf(fp, "%d thread%s: ",
372 				 threads->nr, threads->nr > 1 ? "s" : "");
373 	for (i = 0; i < threads->nr; ++i)
374 		printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i));
375 
376 	return printed + fprintf(fp, "\n");
377 }
378 
379 static int get_comm(char **comm, pid_t pid)
380 {
381 	char *path;
382 	size_t size;
383 	int err;
384 
385 	if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1)
386 		return -ENOMEM;
387 
388 	err = filename__read_str(path, comm, &size);
389 	if (!err) {
390 		/*
391 		 * We're reading 16 bytes, while filename__read_str
392 		 * allocates data per BUFSIZ bytes, so we can safely
393 		 * mark the end of the string.
394 		 */
395 		(*comm)[size] = 0;
396 		strim(*comm);
397 	}
398 
399 	free(path);
400 	return err;
401 }
402 
403 static void comm_init(struct perf_thread_map *map, int i)
404 {
405 	pid_t pid = thread_map__pid(map, i);
406 	char *comm = NULL;
407 
408 	/* dummy pid comm initialization */
409 	if (pid == -1) {
410 		map->map[i].comm = strdup("dummy");
411 		return;
412 	}
413 
414 	/*
415 	 * The comm name is like extra bonus ;-),
416 	 * so just warn if we fail for any reason.
417 	 */
418 	if (get_comm(&comm, pid))
419 		pr_warning("Couldn't resolve comm name for pid %d\n", pid);
420 
421 	map->map[i].comm = comm;
422 }
423 
424 void thread_map__read_comms(struct perf_thread_map *threads)
425 {
426 	int i;
427 
428 	for (i = 0; i < threads->nr; ++i)
429 		comm_init(threads, i);
430 }
431 
432 static void thread_map__copy_event(struct perf_thread_map *threads,
433 				   struct thread_map_event *event)
434 {
435 	unsigned i;
436 
437 	threads->nr = (int) event->nr;
438 
439 	for (i = 0; i < event->nr; i++) {
440 		thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
441 		threads->map[i].comm = strndup(event->entries[i].comm, 16);
442 	}
443 
444 	refcount_set(&threads->refcnt, 1);
445 }
446 
447 struct perf_thread_map *thread_map__new_event(struct thread_map_event *event)
448 {
449 	struct perf_thread_map *threads;
450 
451 	threads = thread_map__alloc(event->nr);
452 	if (threads)
453 		thread_map__copy_event(threads, event);
454 
455 	return threads;
456 }
457 
458 bool thread_map__has(struct perf_thread_map *threads, pid_t pid)
459 {
460 	int i;
461 
462 	for (i = 0; i < threads->nr; ++i) {
463 		if (threads->map[i].pid == pid)
464 			return true;
465 	}
466 
467 	return false;
468 }
469 
470 int thread_map__remove(struct perf_thread_map *threads, int idx)
471 {
472 	int i;
473 
474 	if (threads->nr < 1)
475 		return -EINVAL;
476 
477 	if (idx >= threads->nr)
478 		return -EINVAL;
479 
480 	/*
481 	 * Free the 'idx' item and shift the rest up.
482 	 */
483 	zfree(&threads->map[idx].comm);
484 
485 	for (i = idx; i < threads->nr - 1; i++)
486 		threads->map[i] = threads->map[i + 1];
487 
488 	threads->nr--;
489 	return 0;
490 }
491