xref: /linux/tools/perf/util/thread_map.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <limits.h>
5 #include <stdbool.h>
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include "string2.h"
12 #include "strlist.h"
13 #include <string.h>
14 #include <api/fs/fs.h>
15 #include "asm/bug.h"
16 #include "thread_map.h"
17 #include "util.h"
18 #include "debug.h"
19 #include "event.h"
20 
21 /* Skip "." and ".." directories */
22 static int filter(const struct dirent *dir)
23 {
24 	if (dir->d_name[0] == '.')
25 		return 0;
26 	else
27 		return 1;
28 }
29 
30 static void thread_map__reset(struct thread_map *map, int start, int nr)
31 {
32 	size_t size = (nr - start) * sizeof(map->map[0]);
33 
34 	memset(&map->map[start], 0, size);
35 	map->err_thread = -1;
36 }
37 
38 static struct thread_map *thread_map__realloc(struct thread_map *map, int nr)
39 {
40 	size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
41 	int start = map ? map->nr : 0;
42 
43 	map = realloc(map, size);
44 	/*
45 	 * We only realloc to add more items, let's reset new items.
46 	 */
47 	if (map)
48 		thread_map__reset(map, start, nr);
49 
50 	return map;
51 }
52 
53 #define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
54 
55 struct thread_map *thread_map__new_by_pid(pid_t pid)
56 {
57 	struct thread_map *threads;
58 	char name[256];
59 	int items;
60 	struct dirent **namelist = NULL;
61 	int i;
62 
63 	sprintf(name, "/proc/%d/task", pid);
64 	items = scandir(name, &namelist, filter, NULL);
65 	if (items <= 0)
66 		return NULL;
67 
68 	threads = thread_map__alloc(items);
69 	if (threads != NULL) {
70 		for (i = 0; i < items; i++)
71 			thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
72 		threads->nr = items;
73 		refcount_set(&threads->refcnt, 1);
74 	}
75 
76 	for (i=0; i<items; i++)
77 		zfree(&namelist[i]);
78 	free(namelist);
79 
80 	return threads;
81 }
82 
83 struct thread_map *thread_map__new_by_tid(pid_t tid)
84 {
85 	struct thread_map *threads = thread_map__alloc(1);
86 
87 	if (threads != NULL) {
88 		thread_map__set_pid(threads, 0, tid);
89 		threads->nr = 1;
90 		refcount_set(&threads->refcnt, 1);
91 	}
92 
93 	return threads;
94 }
95 
96 static struct thread_map *__thread_map__new_all_cpus(uid_t uid)
97 {
98 	DIR *proc;
99 	int max_threads = 32, items, i;
100 	char path[NAME_MAX + 1 + 6];
101 	struct dirent *dirent, **namelist = NULL;
102 	struct thread_map *threads = thread_map__alloc(max_threads);
103 
104 	if (threads == NULL)
105 		goto out;
106 
107 	proc = opendir("/proc");
108 	if (proc == NULL)
109 		goto out_free_threads;
110 
111 	threads->nr = 0;
112 	refcount_set(&threads->refcnt, 1);
113 
114 	while ((dirent = readdir(proc)) != NULL) {
115 		char *end;
116 		bool grow = false;
117 		pid_t pid = strtol(dirent->d_name, &end, 10);
118 
119 		if (*end) /* only interested in proper numerical dirents */
120 			continue;
121 
122 		snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
123 
124 		if (uid != UINT_MAX) {
125 			struct stat st;
126 
127 			if (stat(path, &st) != 0 || st.st_uid != uid)
128 				continue;
129 		}
130 
131 		snprintf(path, sizeof(path), "/proc/%d/task", pid);
132 		items = scandir(path, &namelist, filter, NULL);
133 		if (items <= 0)
134 			goto out_free_closedir;
135 
136 		while (threads->nr + items >= max_threads) {
137 			max_threads *= 2;
138 			grow = true;
139 		}
140 
141 		if (grow) {
142 			struct thread_map *tmp;
143 
144 			tmp = thread_map__realloc(threads, max_threads);
145 			if (tmp == NULL)
146 				goto out_free_namelist;
147 
148 			threads = tmp;
149 		}
150 
151 		for (i = 0; i < items; i++) {
152 			thread_map__set_pid(threads, threads->nr + i,
153 					    atoi(namelist[i]->d_name));
154 		}
155 
156 		for (i = 0; i < items; i++)
157 			zfree(&namelist[i]);
158 		free(namelist);
159 
160 		threads->nr += items;
161 	}
162 
163 out_closedir:
164 	closedir(proc);
165 out:
166 	return threads;
167 
168 out_free_threads:
169 	free(threads);
170 	return NULL;
171 
172 out_free_namelist:
173 	for (i = 0; i < items; i++)
174 		zfree(&namelist[i]);
175 	free(namelist);
176 
177 out_free_closedir:
178 	zfree(&threads);
179 	goto out_closedir;
180 }
181 
182 struct thread_map *thread_map__new_all_cpus(void)
183 {
184 	return __thread_map__new_all_cpus(UINT_MAX);
185 }
186 
187 struct thread_map *thread_map__new_by_uid(uid_t uid)
188 {
189 	return __thread_map__new_all_cpus(uid);
190 }
191 
192 struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
193 {
194 	if (pid != -1)
195 		return thread_map__new_by_pid(pid);
196 
197 	if (tid == -1 && uid != UINT_MAX)
198 		return thread_map__new_by_uid(uid);
199 
200 	return thread_map__new_by_tid(tid);
201 }
202 
203 static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
204 {
205 	struct thread_map *threads = NULL, *nt;
206 	char name[256];
207 	int items, total_tasks = 0;
208 	struct dirent **namelist = NULL;
209 	int i, j = 0;
210 	pid_t pid, prev_pid = INT_MAX;
211 	char *end_ptr;
212 	struct str_node *pos;
213 	struct strlist_config slist_config = { .dont_dupstr = true, };
214 	struct strlist *slist = strlist__new(pid_str, &slist_config);
215 
216 	if (!slist)
217 		return NULL;
218 
219 	strlist__for_each_entry(pos, slist) {
220 		pid = strtol(pos->s, &end_ptr, 10);
221 
222 		if (pid == INT_MIN || pid == INT_MAX ||
223 		    (*end_ptr != '\0' && *end_ptr != ','))
224 			goto out_free_threads;
225 
226 		if (pid == prev_pid)
227 			continue;
228 
229 		sprintf(name, "/proc/%d/task", pid);
230 		items = scandir(name, &namelist, filter, NULL);
231 		if (items <= 0)
232 			goto out_free_threads;
233 
234 		total_tasks += items;
235 		nt = thread_map__realloc(threads, total_tasks);
236 		if (nt == NULL)
237 			goto out_free_namelist;
238 
239 		threads = nt;
240 
241 		for (i = 0; i < items; i++) {
242 			thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
243 			zfree(&namelist[i]);
244 		}
245 		threads->nr = total_tasks;
246 		free(namelist);
247 	}
248 
249 out:
250 	strlist__delete(slist);
251 	if (threads)
252 		refcount_set(&threads->refcnt, 1);
253 	return threads;
254 
255 out_free_namelist:
256 	for (i = 0; i < items; i++)
257 		zfree(&namelist[i]);
258 	free(namelist);
259 
260 out_free_threads:
261 	zfree(&threads);
262 	goto out;
263 }
264 
265 struct thread_map *thread_map__new_dummy(void)
266 {
267 	struct thread_map *threads = thread_map__alloc(1);
268 
269 	if (threads != NULL) {
270 		thread_map__set_pid(threads, 0, -1);
271 		threads->nr = 1;
272 		refcount_set(&threads->refcnt, 1);
273 	}
274 	return threads;
275 }
276 
277 struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
278 {
279 	struct thread_map *threads = NULL, *nt;
280 	int ntasks = 0;
281 	pid_t tid, prev_tid = INT_MAX;
282 	char *end_ptr;
283 	struct str_node *pos;
284 	struct strlist_config slist_config = { .dont_dupstr = true, };
285 	struct strlist *slist;
286 
287 	/* perf-stat expects threads to be generated even if tid not given */
288 	if (!tid_str)
289 		return thread_map__new_dummy();
290 
291 	slist = strlist__new(tid_str, &slist_config);
292 	if (!slist)
293 		return NULL;
294 
295 	strlist__for_each_entry(pos, slist) {
296 		tid = strtol(pos->s, &end_ptr, 10);
297 
298 		if (tid == INT_MIN || tid == INT_MAX ||
299 		    (*end_ptr != '\0' && *end_ptr != ','))
300 			goto out_free_threads;
301 
302 		if (tid == prev_tid)
303 			continue;
304 
305 		ntasks++;
306 		nt = thread_map__realloc(threads, ntasks);
307 
308 		if (nt == NULL)
309 			goto out_free_threads;
310 
311 		threads = nt;
312 		thread_map__set_pid(threads, ntasks - 1, tid);
313 		threads->nr = ntasks;
314 	}
315 out:
316 	if (threads)
317 		refcount_set(&threads->refcnt, 1);
318 	return threads;
319 
320 out_free_threads:
321 	zfree(&threads);
322 	strlist__delete(slist);
323 	goto out;
324 }
325 
326 struct thread_map *thread_map__new_str(const char *pid, const char *tid,
327 				       uid_t uid, bool all_threads)
328 {
329 	if (pid)
330 		return thread_map__new_by_pid_str(pid);
331 
332 	if (!tid && uid != UINT_MAX)
333 		return thread_map__new_by_uid(uid);
334 
335 	if (all_threads)
336 		return thread_map__new_all_cpus();
337 
338 	return thread_map__new_by_tid_str(tid);
339 }
340 
341 static void thread_map__delete(struct thread_map *threads)
342 {
343 	if (threads) {
344 		int i;
345 
346 		WARN_ONCE(refcount_read(&threads->refcnt) != 0,
347 			  "thread map refcnt unbalanced\n");
348 		for (i = 0; i < threads->nr; i++)
349 			free(thread_map__comm(threads, i));
350 		free(threads);
351 	}
352 }
353 
354 struct thread_map *thread_map__get(struct thread_map *map)
355 {
356 	if (map)
357 		refcount_inc(&map->refcnt);
358 	return map;
359 }
360 
361 void thread_map__put(struct thread_map *map)
362 {
363 	if (map && refcount_dec_and_test(&map->refcnt))
364 		thread_map__delete(map);
365 }
366 
367 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp)
368 {
369 	int i;
370 	size_t printed = fprintf(fp, "%d thread%s: ",
371 				 threads->nr, threads->nr > 1 ? "s" : "");
372 	for (i = 0; i < threads->nr; ++i)
373 		printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i));
374 
375 	return printed + fprintf(fp, "\n");
376 }
377 
378 static int get_comm(char **comm, pid_t pid)
379 {
380 	char *path;
381 	size_t size;
382 	int err;
383 
384 	if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1)
385 		return -ENOMEM;
386 
387 	err = filename__read_str(path, comm, &size);
388 	if (!err) {
389 		/*
390 		 * We're reading 16 bytes, while filename__read_str
391 		 * allocates data per BUFSIZ bytes, so we can safely
392 		 * mark the end of the string.
393 		 */
394 		(*comm)[size] = 0;
395 		rtrim(*comm);
396 	}
397 
398 	free(path);
399 	return err;
400 }
401 
402 static void comm_init(struct thread_map *map, int i)
403 {
404 	pid_t pid = thread_map__pid(map, i);
405 	char *comm = NULL;
406 
407 	/* dummy pid comm initialization */
408 	if (pid == -1) {
409 		map->map[i].comm = strdup("dummy");
410 		return;
411 	}
412 
413 	/*
414 	 * The comm name is like extra bonus ;-),
415 	 * so just warn if we fail for any reason.
416 	 */
417 	if (get_comm(&comm, pid))
418 		pr_warning("Couldn't resolve comm name for pid %d\n", pid);
419 
420 	map->map[i].comm = comm;
421 }
422 
423 void thread_map__read_comms(struct thread_map *threads)
424 {
425 	int i;
426 
427 	for (i = 0; i < threads->nr; ++i)
428 		comm_init(threads, i);
429 }
430 
431 static void thread_map__copy_event(struct thread_map *threads,
432 				   struct thread_map_event *event)
433 {
434 	unsigned i;
435 
436 	threads->nr = (int) event->nr;
437 
438 	for (i = 0; i < event->nr; i++) {
439 		thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
440 		threads->map[i].comm = strndup(event->entries[i].comm, 16);
441 	}
442 
443 	refcount_set(&threads->refcnt, 1);
444 }
445 
446 struct thread_map *thread_map__new_event(struct thread_map_event *event)
447 {
448 	struct thread_map *threads;
449 
450 	threads = thread_map__alloc(event->nr);
451 	if (threads)
452 		thread_map__copy_event(threads, event);
453 
454 	return threads;
455 }
456 
457 bool thread_map__has(struct thread_map *threads, pid_t pid)
458 {
459 	int i;
460 
461 	for (i = 0; i < threads->nr; ++i) {
462 		if (threads->map[i].pid == pid)
463 			return true;
464 	}
465 
466 	return false;
467 }
468 
469 int thread_map__remove(struct thread_map *threads, int idx)
470 {
471 	int i;
472 
473 	if (threads->nr < 1)
474 		return -EINVAL;
475 
476 	if (idx >= threads->nr)
477 		return -EINVAL;
478 
479 	/*
480 	 * Free the 'idx' item and shift the rest up.
481 	 */
482 	free(threads->map[idx].comm);
483 
484 	for (i = idx; i < threads->nr - 1; i++)
485 		threads->map[i] = threads->map[i + 1];
486 
487 	threads->nr--;
488 	return 0;
489 }
490