xref: /linux/tools/perf/util/thread_map.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #include <dirent.h>
2 #include <limits.h>
3 #include <stdbool.h>
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include "strlist.h"
10 #include <string.h>
11 #include <api/fs/fs.h>
12 #include "asm/bug.h"
13 #include "thread_map.h"
14 #include "util.h"
15 #include "debug.h"
16 
17 /* Skip "." and ".." directories */
18 static int filter(const struct dirent *dir)
19 {
20 	if (dir->d_name[0] == '.')
21 		return 0;
22 	else
23 		return 1;
24 }
25 
26 static void thread_map__reset(struct thread_map *map, int start, int nr)
27 {
28 	size_t size = (nr - start) * sizeof(map->map[0]);
29 
30 	memset(&map->map[start], 0, size);
31 }
32 
33 static struct thread_map *thread_map__realloc(struct thread_map *map, int nr)
34 {
35 	size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
36 	int start = map ? map->nr : 0;
37 
38 	map = realloc(map, size);
39 	/*
40 	 * We only realloc to add more items, let's reset new items.
41 	 */
42 	if (map)
43 		thread_map__reset(map, start, nr);
44 
45 	return map;
46 }
47 
48 #define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
49 
50 struct thread_map *thread_map__new_by_pid(pid_t pid)
51 {
52 	struct thread_map *threads;
53 	char name[256];
54 	int items;
55 	struct dirent **namelist = NULL;
56 	int i;
57 
58 	sprintf(name, "/proc/%d/task", pid);
59 	items = scandir(name, &namelist, filter, NULL);
60 	if (items <= 0)
61 		return NULL;
62 
63 	threads = thread_map__alloc(items);
64 	if (threads != NULL) {
65 		for (i = 0; i < items; i++)
66 			thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
67 		threads->nr = items;
68 		atomic_set(&threads->refcnt, 1);
69 	}
70 
71 	for (i=0; i<items; i++)
72 		zfree(&namelist[i]);
73 	free(namelist);
74 
75 	return threads;
76 }
77 
78 struct thread_map *thread_map__new_by_tid(pid_t tid)
79 {
80 	struct thread_map *threads = thread_map__alloc(1);
81 
82 	if (threads != NULL) {
83 		thread_map__set_pid(threads, 0, tid);
84 		threads->nr = 1;
85 		atomic_set(&threads->refcnt, 1);
86 	}
87 
88 	return threads;
89 }
90 
91 struct thread_map *thread_map__new_by_uid(uid_t uid)
92 {
93 	DIR *proc;
94 	int max_threads = 32, items, i;
95 	char path[256];
96 	struct dirent dirent, *next, **namelist = NULL;
97 	struct thread_map *threads = thread_map__alloc(max_threads);
98 
99 	if (threads == NULL)
100 		goto out;
101 
102 	proc = opendir("/proc");
103 	if (proc == NULL)
104 		goto out_free_threads;
105 
106 	threads->nr = 0;
107 	atomic_set(&threads->refcnt, 1);
108 
109 	while (!readdir_r(proc, &dirent, &next) && next) {
110 		char *end;
111 		bool grow = false;
112 		struct stat st;
113 		pid_t pid = strtol(dirent.d_name, &end, 10);
114 
115 		if (*end) /* only interested in proper numerical dirents */
116 			continue;
117 
118 		snprintf(path, sizeof(path), "/proc/%s", dirent.d_name);
119 
120 		if (stat(path, &st) != 0)
121 			continue;
122 
123 		if (st.st_uid != uid)
124 			continue;
125 
126 		snprintf(path, sizeof(path), "/proc/%d/task", pid);
127 		items = scandir(path, &namelist, filter, NULL);
128 		if (items <= 0)
129 			goto out_free_closedir;
130 
131 		while (threads->nr + items >= max_threads) {
132 			max_threads *= 2;
133 			grow = true;
134 		}
135 
136 		if (grow) {
137 			struct thread_map *tmp;
138 
139 			tmp = thread_map__realloc(threads, max_threads);
140 			if (tmp == NULL)
141 				goto out_free_namelist;
142 
143 			threads = tmp;
144 		}
145 
146 		for (i = 0; i < items; i++) {
147 			thread_map__set_pid(threads, threads->nr + i,
148 					    atoi(namelist[i]->d_name));
149 		}
150 
151 		for (i = 0; i < items; i++)
152 			zfree(&namelist[i]);
153 		free(namelist);
154 
155 		threads->nr += items;
156 	}
157 
158 out_closedir:
159 	closedir(proc);
160 out:
161 	return threads;
162 
163 out_free_threads:
164 	free(threads);
165 	return NULL;
166 
167 out_free_namelist:
168 	for (i = 0; i < items; i++)
169 		zfree(&namelist[i]);
170 	free(namelist);
171 
172 out_free_closedir:
173 	zfree(&threads);
174 	goto out_closedir;
175 }
176 
177 struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
178 {
179 	if (pid != -1)
180 		return thread_map__new_by_pid(pid);
181 
182 	if (tid == -1 && uid != UINT_MAX)
183 		return thread_map__new_by_uid(uid);
184 
185 	return thread_map__new_by_tid(tid);
186 }
187 
188 static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
189 {
190 	struct thread_map *threads = NULL, *nt;
191 	char name[256];
192 	int items, total_tasks = 0;
193 	struct dirent **namelist = NULL;
194 	int i, j = 0;
195 	pid_t pid, prev_pid = INT_MAX;
196 	char *end_ptr;
197 	struct str_node *pos;
198 	struct strlist_config slist_config = { .dont_dupstr = true, };
199 	struct strlist *slist = strlist__new(pid_str, &slist_config);
200 
201 	if (!slist)
202 		return NULL;
203 
204 	strlist__for_each(pos, slist) {
205 		pid = strtol(pos->s, &end_ptr, 10);
206 
207 		if (pid == INT_MIN || pid == INT_MAX ||
208 		    (*end_ptr != '\0' && *end_ptr != ','))
209 			goto out_free_threads;
210 
211 		if (pid == prev_pid)
212 			continue;
213 
214 		sprintf(name, "/proc/%d/task", pid);
215 		items = scandir(name, &namelist, filter, NULL);
216 		if (items <= 0)
217 			goto out_free_threads;
218 
219 		total_tasks += items;
220 		nt = thread_map__realloc(threads, total_tasks);
221 		if (nt == NULL)
222 			goto out_free_namelist;
223 
224 		threads = nt;
225 
226 		for (i = 0; i < items; i++) {
227 			thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
228 			zfree(&namelist[i]);
229 		}
230 		threads->nr = total_tasks;
231 		free(namelist);
232 	}
233 
234 out:
235 	strlist__delete(slist);
236 	if (threads)
237 		atomic_set(&threads->refcnt, 1);
238 	return threads;
239 
240 out_free_namelist:
241 	for (i = 0; i < items; i++)
242 		zfree(&namelist[i]);
243 	free(namelist);
244 
245 out_free_threads:
246 	zfree(&threads);
247 	goto out;
248 }
249 
250 struct thread_map *thread_map__new_dummy(void)
251 {
252 	struct thread_map *threads = thread_map__alloc(1);
253 
254 	if (threads != NULL) {
255 		thread_map__set_pid(threads, 0, -1);
256 		threads->nr = 1;
257 		atomic_set(&threads->refcnt, 1);
258 	}
259 	return threads;
260 }
261 
262 static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
263 {
264 	struct thread_map *threads = NULL, *nt;
265 	int ntasks = 0;
266 	pid_t tid, prev_tid = INT_MAX;
267 	char *end_ptr;
268 	struct str_node *pos;
269 	struct strlist_config slist_config = { .dont_dupstr = true, };
270 	struct strlist *slist;
271 
272 	/* perf-stat expects threads to be generated even if tid not given */
273 	if (!tid_str)
274 		return thread_map__new_dummy();
275 
276 	slist = strlist__new(tid_str, &slist_config);
277 	if (!slist)
278 		return NULL;
279 
280 	strlist__for_each(pos, slist) {
281 		tid = strtol(pos->s, &end_ptr, 10);
282 
283 		if (tid == INT_MIN || tid == INT_MAX ||
284 		    (*end_ptr != '\0' && *end_ptr != ','))
285 			goto out_free_threads;
286 
287 		if (tid == prev_tid)
288 			continue;
289 
290 		ntasks++;
291 		nt = thread_map__realloc(threads, ntasks);
292 
293 		if (nt == NULL)
294 			goto out_free_threads;
295 
296 		threads = nt;
297 		thread_map__set_pid(threads, ntasks - 1, tid);
298 		threads->nr = ntasks;
299 	}
300 out:
301 	if (threads)
302 		atomic_set(&threads->refcnt, 1);
303 	return threads;
304 
305 out_free_threads:
306 	zfree(&threads);
307 	goto out;
308 }
309 
310 struct thread_map *thread_map__new_str(const char *pid, const char *tid,
311 				       uid_t uid)
312 {
313 	if (pid)
314 		return thread_map__new_by_pid_str(pid);
315 
316 	if (!tid && uid != UINT_MAX)
317 		return thread_map__new_by_uid(uid);
318 
319 	return thread_map__new_by_tid_str(tid);
320 }
321 
322 static void thread_map__delete(struct thread_map *threads)
323 {
324 	if (threads) {
325 		int i;
326 
327 		WARN_ONCE(atomic_read(&threads->refcnt) != 0,
328 			  "thread map refcnt unbalanced\n");
329 		for (i = 0; i < threads->nr; i++)
330 			free(thread_map__comm(threads, i));
331 		free(threads);
332 	}
333 }
334 
335 struct thread_map *thread_map__get(struct thread_map *map)
336 {
337 	if (map)
338 		atomic_inc(&map->refcnt);
339 	return map;
340 }
341 
342 void thread_map__put(struct thread_map *map)
343 {
344 	if (map && atomic_dec_and_test(&map->refcnt))
345 		thread_map__delete(map);
346 }
347 
348 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp)
349 {
350 	int i;
351 	size_t printed = fprintf(fp, "%d thread%s: ",
352 				 threads->nr, threads->nr > 1 ? "s" : "");
353 	for (i = 0; i < threads->nr; ++i)
354 		printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i));
355 
356 	return printed + fprintf(fp, "\n");
357 }
358 
359 static int get_comm(char **comm, pid_t pid)
360 {
361 	char *path;
362 	size_t size;
363 	int err;
364 
365 	if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1)
366 		return -ENOMEM;
367 
368 	err = filename__read_str(path, comm, &size);
369 	if (!err) {
370 		/*
371 		 * We're reading 16 bytes, while filename__read_str
372 		 * allocates data per BUFSIZ bytes, so we can safely
373 		 * mark the end of the string.
374 		 */
375 		(*comm)[size] = 0;
376 		rtrim(*comm);
377 	}
378 
379 	free(path);
380 	return err;
381 }
382 
383 static void comm_init(struct thread_map *map, int i)
384 {
385 	pid_t pid = thread_map__pid(map, i);
386 	char *comm = NULL;
387 
388 	/* dummy pid comm initialization */
389 	if (pid == -1) {
390 		map->map[i].comm = strdup("dummy");
391 		return;
392 	}
393 
394 	/*
395 	 * The comm name is like extra bonus ;-),
396 	 * so just warn if we fail for any reason.
397 	 */
398 	if (get_comm(&comm, pid))
399 		pr_warning("Couldn't resolve comm name for pid %d\n", pid);
400 
401 	map->map[i].comm = comm;
402 }
403 
404 void thread_map__read_comms(struct thread_map *threads)
405 {
406 	int i;
407 
408 	for (i = 0; i < threads->nr; ++i)
409 		comm_init(threads, i);
410 }
411