1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <limits.h>
5 #include <stdbool.h>
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include "string2.h"
12 #include "strlist.h"
13 #include <string.h>
14 #include <api/fs/fs.h>
15 #include <linux/string.h>
16 #include <linux/zalloc.h>
17 #include "asm/bug.h"
18 #include "thread_map.h"
19 #include "debug.h"
20 #include "event.h"
21 #include <internal/threadmap.h>
22
23 /* Skip "." and ".." directories */
filter(const struct dirent * dir)24 static int filter(const struct dirent *dir)
25 {
26 if (dir->d_name[0] == '.')
27 return 0;
28 else
29 return 1;
30 }
31
32 #define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr)
33
thread_map__new_by_pid(pid_t pid)34 struct perf_thread_map *thread_map__new_by_pid(pid_t pid)
35 {
36 struct perf_thread_map *threads;
37 char name[256];
38 int items;
39 struct dirent **namelist = NULL;
40 int i;
41
42 sprintf(name, "/proc/%d/task", pid);
43 items = scandir(name, &namelist, filter, NULL);
44 if (items <= 0)
45 return NULL;
46
47 threads = thread_map__alloc(items);
48 if (threads != NULL) {
49 for (i = 0; i < items; i++)
50 perf_thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
51 threads->nr = items;
52 refcount_set(&threads->refcnt, 1);
53 }
54
55 for (i=0; i<items; i++)
56 zfree(&namelist[i]);
57 free(namelist);
58
59 return threads;
60 }
61
thread_map__new_by_tid(pid_t tid)62 struct perf_thread_map *thread_map__new_by_tid(pid_t tid)
63 {
64 struct perf_thread_map *threads = thread_map__alloc(1);
65
66 if (threads != NULL) {
67 perf_thread_map__set_pid(threads, 0, tid);
68 threads->nr = 1;
69 refcount_set(&threads->refcnt, 1);
70 }
71
72 return threads;
73 }
74
thread_map__new_all_cpus(void)75 static struct perf_thread_map *thread_map__new_all_cpus(void)
76 {
77 DIR *proc;
78 int max_threads = 32, items, i;
79 char path[NAME_MAX + 1 + 6];
80 struct dirent *dirent, **namelist = NULL;
81 struct perf_thread_map *threads = thread_map__alloc(max_threads);
82
83 if (threads == NULL)
84 goto out;
85
86 proc = opendir("/proc");
87 if (proc == NULL)
88 goto out_free_threads;
89
90 threads->nr = 0;
91 refcount_set(&threads->refcnt, 1);
92
93 while ((dirent = readdir(proc)) != NULL) {
94 char *end;
95 bool grow = false;
96 pid_t pid = strtol(dirent->d_name, &end, 10);
97
98 if (*end) /* only interested in proper numerical dirents */
99 continue;
100
101 snprintf(path, sizeof(path), "/proc/%d/task", pid);
102 items = scandir(path, &namelist, filter, NULL);
103 if (items <= 0) {
104 pr_debug("scandir for %d returned empty, skipping\n", pid);
105 continue;
106 }
107 while (threads->nr + items >= max_threads) {
108 max_threads *= 2;
109 grow = true;
110 }
111
112 if (grow) {
113 struct perf_thread_map *tmp;
114
115 tmp = perf_thread_map__realloc(threads, max_threads);
116 if (tmp == NULL)
117 goto out_free_namelist;
118
119 threads = tmp;
120 }
121
122 for (i = 0; i < items; i++) {
123 perf_thread_map__set_pid(threads, threads->nr + i,
124 atoi(namelist[i]->d_name));
125 }
126
127 for (i = 0; i < items; i++)
128 zfree(&namelist[i]);
129 free(namelist);
130
131 threads->nr += items;
132 }
133
134 out_closedir:
135 closedir(proc);
136 out:
137 return threads;
138
139 out_free_threads:
140 free(threads);
141 return NULL;
142
143 out_free_namelist:
144 for (i = 0; i < items; i++)
145 zfree(&namelist[i]);
146 free(namelist);
147 zfree(&threads);
148 goto out_closedir;
149 }
150
thread_map__new(pid_t pid,pid_t tid)151 struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid)
152 {
153 if (pid != -1)
154 return thread_map__new_by_pid(pid);
155
156 return thread_map__new_by_tid(tid);
157 }
158
thread_map__new_by_pid_str(const char * pid_str)159 static struct perf_thread_map *thread_map__new_by_pid_str(const char *pid_str)
160 {
161 struct perf_thread_map *threads = NULL, *nt;
162 char name[256];
163 int items, total_tasks = 0;
164 struct dirent **namelist = NULL;
165 int i, j = 0;
166 pid_t pid, prev_pid = INT_MAX;
167 struct str_node *pos;
168 struct strlist *slist = strlist__new(pid_str, NULL);
169
170 if (!slist)
171 return NULL;
172
173 strlist__for_each_entry(pos, slist) {
174 pid = strtol(pos->s, NULL, 10);
175
176 if (pid == INT_MIN || pid == INT_MAX)
177 goto out_free_threads;
178
179 if (pid == prev_pid)
180 continue;
181
182 sprintf(name, "/proc/%d/task", pid);
183 items = scandir(name, &namelist, filter, NULL);
184 if (items <= 0)
185 goto out_free_threads;
186
187 total_tasks += items;
188 nt = perf_thread_map__realloc(threads, total_tasks);
189 if (nt == NULL)
190 goto out_free_namelist;
191
192 threads = nt;
193
194 for (i = 0; i < items; i++) {
195 perf_thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
196 zfree(&namelist[i]);
197 }
198 threads->nr = total_tasks;
199 free(namelist);
200 }
201
202 out:
203 strlist__delete(slist);
204 if (threads)
205 refcount_set(&threads->refcnt, 1);
206 return threads;
207
208 out_free_namelist:
209 for (i = 0; i < items; i++)
210 zfree(&namelist[i]);
211 free(namelist);
212
213 out_free_threads:
214 zfree(&threads);
215 goto out;
216 }
217
thread_map__new_by_tid_str(const char * tid_str)218 struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
219 {
220 struct perf_thread_map *threads = NULL, *nt;
221 int ntasks = 0;
222 pid_t tid, prev_tid = INT_MAX;
223 struct str_node *pos;
224 struct strlist *slist;
225
226 /* perf-stat expects threads to be generated even if tid not given */
227 if (!tid_str)
228 return perf_thread_map__new_dummy();
229
230 slist = strlist__new(tid_str, NULL);
231 if (!slist)
232 return NULL;
233
234 strlist__for_each_entry(pos, slist) {
235 tid = strtol(pos->s, NULL, 10);
236
237 if (tid == INT_MIN || tid == INT_MAX)
238 goto out_free_threads;
239
240 if (tid == prev_tid)
241 continue;
242
243 ntasks++;
244 nt = perf_thread_map__realloc(threads, ntasks);
245
246 if (nt == NULL)
247 goto out_free_threads;
248
249 threads = nt;
250 perf_thread_map__set_pid(threads, ntasks - 1, tid);
251 threads->nr = ntasks;
252 }
253 out:
254 strlist__delete(slist);
255 if (threads)
256 refcount_set(&threads->refcnt, 1);
257 return threads;
258
259 out_free_threads:
260 zfree(&threads);
261 goto out;
262 }
263
thread_map__new_str(const char * pid,const char * tid,bool all_threads)264 struct perf_thread_map *thread_map__new_str(const char *pid, const char *tid, bool all_threads)
265 {
266 if (pid)
267 return thread_map__new_by_pid_str(pid);
268
269 if (all_threads)
270 return thread_map__new_all_cpus();
271
272 return thread_map__new_by_tid_str(tid);
273 }
274
thread_map__fprintf(struct perf_thread_map * threads,FILE * fp)275 size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp)
276 {
277 int i;
278 size_t printed = fprintf(fp, "%d thread%s: ",
279 threads->nr, threads->nr > 1 ? "s" : "");
280 for (i = 0; i < threads->nr; ++i)
281 printed += fprintf(fp, "%s%d", i ? ", " : "", perf_thread_map__pid(threads, i));
282
283 return printed + fprintf(fp, "\n");
284 }
285
get_comm(char ** comm,pid_t pid)286 static int get_comm(char **comm, pid_t pid)
287 {
288 char *path;
289 size_t size;
290 int err;
291
292 if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1)
293 return -ENOMEM;
294
295 err = filename__read_str(path, comm, &size);
296 if (!err) {
297 /*
298 * We're reading 16 bytes, while filename__read_str
299 * allocates data per BUFSIZ bytes, so we can safely
300 * mark the end of the string.
301 */
302 (*comm)[size] = 0;
303 strim(*comm);
304 }
305
306 free(path);
307 return err;
308 }
309
comm_init(struct perf_thread_map * map,int i)310 static void comm_init(struct perf_thread_map *map, int i)
311 {
312 pid_t pid = perf_thread_map__pid(map, i);
313 char *comm = NULL;
314
315 /* dummy pid comm initialization */
316 if (pid == -1) {
317 map->map[i].comm = strdup("dummy");
318 return;
319 }
320
321 /*
322 * The comm name is like extra bonus ;-),
323 * so just warn if we fail for any reason.
324 */
325 if (get_comm(&comm, pid))
326 pr_warning("Couldn't resolve comm name for pid %d\n", pid);
327
328 map->map[i].comm = comm;
329 }
330
thread_map__read_comms(struct perf_thread_map * threads)331 void thread_map__read_comms(struct perf_thread_map *threads)
332 {
333 int i;
334
335 for (i = 0; i < threads->nr; ++i)
336 comm_init(threads, i);
337 }
338
thread_map__copy_event(struct perf_thread_map * threads,struct perf_record_thread_map * event)339 static void thread_map__copy_event(struct perf_thread_map *threads,
340 struct perf_record_thread_map *event)
341 {
342 unsigned i;
343
344 threads->nr = (int) event->nr;
345
346 for (i = 0; i < event->nr; i++) {
347 perf_thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
348 threads->map[i].comm = strndup(event->entries[i].comm, 16);
349 }
350
351 refcount_set(&threads->refcnt, 1);
352 }
353
thread_map__new_event(struct perf_record_thread_map * event)354 struct perf_thread_map *thread_map__new_event(struct perf_record_thread_map *event)
355 {
356 struct perf_thread_map *threads;
357
358 threads = thread_map__alloc(event->nr);
359 if (threads)
360 thread_map__copy_event(threads, event);
361
362 return threads;
363 }
364
thread_map__has(struct perf_thread_map * threads,pid_t pid)365 bool thread_map__has(struct perf_thread_map *threads, pid_t pid)
366 {
367 int i;
368
369 for (i = 0; i < threads->nr; ++i) {
370 if (threads->map[i].pid == pid)
371 return true;
372 }
373
374 return false;
375 }
376
thread_map__remove(struct perf_thread_map * threads,int idx)377 int thread_map__remove(struct perf_thread_map *threads, int idx)
378 {
379 int i;
380
381 if (threads->nr < 1)
382 return -EINVAL;
383
384 if (idx >= threads->nr)
385 return -EINVAL;
386
387 /*
388 * Free the 'idx' item and shift the rest up.
389 */
390 zfree(&threads->map[idx].comm);
391
392 for (i = idx; i < threads->nr - 1; i++)
393 threads->map[i] = threads->map[i + 1];
394
395 threads->nr--;
396 return 0;
397 }
398