1 #include "../perf.h" 2 #include <stdlib.h> 3 #include <stdio.h> 4 #include <string.h> 5 #include "session.h" 6 #include "thread.h" 7 #include "thread-stack.h" 8 #include "util.h" 9 #include "debug.h" 10 #include "namespaces.h" 11 #include "comm.h" 12 #include "unwind.h" 13 14 #include <api/fs/fs.h> 15 16 int thread__init_map_groups(struct thread *thread, struct machine *machine) 17 { 18 pid_t pid = thread->pid_; 19 20 if (pid == thread->tid || pid == -1) { 21 thread->mg = map_groups__new(machine); 22 } else { 23 struct thread *leader = __machine__findnew_thread(machine, pid, pid); 24 if (leader) { 25 thread->mg = map_groups__get(leader->mg); 26 thread__put(leader); 27 } 28 } 29 30 return thread->mg ? 0 : -1; 31 } 32 33 struct thread *thread__new(pid_t pid, pid_t tid) 34 { 35 char *comm_str; 36 struct comm *comm; 37 struct thread *thread = zalloc(sizeof(*thread)); 38 39 if (thread != NULL) { 40 thread->pid_ = pid; 41 thread->tid = tid; 42 thread->ppid = -1; 43 thread->cpu = -1; 44 INIT_LIST_HEAD(&thread->namespaces_list); 45 INIT_LIST_HEAD(&thread->comm_list); 46 47 comm_str = malloc(32); 48 if (!comm_str) 49 goto err_thread; 50 51 snprintf(comm_str, 32, ":%d", tid); 52 comm = comm__new(comm_str, 0, false); 53 free(comm_str); 54 if (!comm) 55 goto err_thread; 56 57 list_add(&comm->list, &thread->comm_list); 58 refcount_set(&thread->refcnt, 1); 59 RB_CLEAR_NODE(&thread->rb_node); 60 } 61 62 return thread; 63 64 err_thread: 65 free(thread); 66 return NULL; 67 } 68 69 void thread__delete(struct thread *thread) 70 { 71 struct namespaces *namespaces, *tmp_namespaces; 72 struct comm *comm, *tmp_comm; 73 74 BUG_ON(!RB_EMPTY_NODE(&thread->rb_node)); 75 76 thread_stack__free(thread); 77 78 if (thread->mg) { 79 map_groups__put(thread->mg); 80 thread->mg = NULL; 81 } 82 list_for_each_entry_safe(namespaces, tmp_namespaces, 83 &thread->namespaces_list, list) { 84 list_del(&namespaces->list); 85 namespaces__free(namespaces); 86 } 87 list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) { 88 list_del(&comm->list); 89 comm__free(comm); 90 } 91 unwind__finish_access(thread); 92 93 free(thread); 94 } 95 96 struct thread *thread__get(struct thread *thread) 97 { 98 if (thread) 99 refcount_inc(&thread->refcnt); 100 return thread; 101 } 102 103 void thread__put(struct thread *thread) 104 { 105 if (thread && refcount_dec_and_test(&thread->refcnt)) { 106 /* 107 * Remove it from the dead_threads list, as last reference 108 * is gone. 109 */ 110 list_del_init(&thread->node); 111 thread__delete(thread); 112 } 113 } 114 115 struct namespaces *thread__namespaces(const struct thread *thread) 116 { 117 if (list_empty(&thread->namespaces_list)) 118 return NULL; 119 120 return list_first_entry(&thread->namespaces_list, struct namespaces, list); 121 } 122 123 int thread__set_namespaces(struct thread *thread, u64 timestamp, 124 struct namespaces_event *event) 125 { 126 struct namespaces *new, *curr = thread__namespaces(thread); 127 128 new = namespaces__new(event); 129 if (!new) 130 return -ENOMEM; 131 132 list_add(&new->list, &thread->namespaces_list); 133 134 if (timestamp && curr) { 135 /* 136 * setns syscall must have changed few or all the namespaces 137 * of this thread. Update end time for the namespaces 138 * previously used. 139 */ 140 curr = list_next_entry(new, list); 141 curr->end_time = timestamp; 142 } 143 144 return 0; 145 } 146 147 struct comm *thread__comm(const struct thread *thread) 148 { 149 if (list_empty(&thread->comm_list)) 150 return NULL; 151 152 return list_first_entry(&thread->comm_list, struct comm, list); 153 } 154 155 struct comm *thread__exec_comm(const struct thread *thread) 156 { 157 struct comm *comm, *last = NULL; 158 159 list_for_each_entry(comm, &thread->comm_list, list) { 160 if (comm->exec) 161 return comm; 162 last = comm; 163 } 164 165 return last; 166 } 167 168 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, 169 bool exec) 170 { 171 struct comm *new, *curr = thread__comm(thread); 172 173 /* Override the default :tid entry */ 174 if (!thread->comm_set) { 175 int err = comm__override(curr, str, timestamp, exec); 176 if (err) 177 return err; 178 } else { 179 new = comm__new(str, timestamp, exec); 180 if (!new) 181 return -ENOMEM; 182 list_add(&new->list, &thread->comm_list); 183 184 if (exec) 185 unwind__flush_access(thread); 186 } 187 188 thread->comm_set = true; 189 190 return 0; 191 } 192 193 int thread__set_comm_from_proc(struct thread *thread) 194 { 195 char path[64]; 196 char *comm = NULL; 197 size_t sz; 198 int err = -1; 199 200 if (!(snprintf(path, sizeof(path), "%d/task/%d/comm", 201 thread->pid_, thread->tid) >= (int)sizeof(path)) && 202 procfs__read_str(path, &comm, &sz) == 0) { 203 comm[sz - 1] = '\0'; 204 err = thread__set_comm(thread, comm, 0); 205 } 206 207 return err; 208 } 209 210 const char *thread__comm_str(const struct thread *thread) 211 { 212 const struct comm *comm = thread__comm(thread); 213 214 if (!comm) 215 return NULL; 216 217 return comm__str(comm); 218 } 219 220 /* CHECKME: it should probably better return the max comm len from its comm list */ 221 int thread__comm_len(struct thread *thread) 222 { 223 if (!thread->comm_len) { 224 const char *comm = thread__comm_str(thread); 225 if (!comm) 226 return 0; 227 thread->comm_len = strlen(comm); 228 } 229 230 return thread->comm_len; 231 } 232 233 size_t thread__fprintf(struct thread *thread, FILE *fp) 234 { 235 return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) + 236 map_groups__fprintf(thread->mg, fp); 237 } 238 239 int thread__insert_map(struct thread *thread, struct map *map) 240 { 241 int ret; 242 243 ret = unwind__prepare_access(thread, map, NULL); 244 if (ret) 245 return ret; 246 247 map_groups__fixup_overlappings(thread->mg, map, stderr); 248 map_groups__insert(thread->mg, map); 249 250 return 0; 251 } 252 253 static int __thread__prepare_access(struct thread *thread) 254 { 255 bool initialized = false; 256 int i, err = 0; 257 258 for (i = 0; i < MAP__NR_TYPES; ++i) { 259 struct maps *maps = &thread->mg->maps[i]; 260 struct map *map; 261 262 pthread_rwlock_rdlock(&maps->lock); 263 264 for (map = maps__first(maps); map; map = map__next(map)) { 265 err = unwind__prepare_access(thread, map, &initialized); 266 if (err || initialized) 267 break; 268 } 269 270 pthread_rwlock_unlock(&maps->lock); 271 } 272 273 return err; 274 } 275 276 static int thread__prepare_access(struct thread *thread) 277 { 278 int err = 0; 279 280 if (symbol_conf.use_callchain) 281 err = __thread__prepare_access(thread); 282 283 return err; 284 } 285 286 static int thread__clone_map_groups(struct thread *thread, 287 struct thread *parent) 288 { 289 int i; 290 291 /* This is new thread, we share map groups for process. */ 292 if (thread->pid_ == parent->pid_) 293 return thread__prepare_access(thread); 294 295 if (thread->mg == parent->mg) { 296 pr_debug("broken map groups on thread %d/%d parent %d/%d\n", 297 thread->pid_, thread->tid, parent->pid_, parent->tid); 298 return 0; 299 } 300 301 /* But this one is new process, copy maps. */ 302 for (i = 0; i < MAP__NR_TYPES; ++i) 303 if (map_groups__clone(thread, parent->mg, i) < 0) 304 return -ENOMEM; 305 306 return 0; 307 } 308 309 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) 310 { 311 if (parent->comm_set) { 312 const char *comm = thread__comm_str(parent); 313 int err; 314 if (!comm) 315 return -ENOMEM; 316 err = thread__set_comm(thread, comm, timestamp); 317 if (err) 318 return err; 319 } 320 321 thread->ppid = parent->tid; 322 return thread__clone_map_groups(thread, parent); 323 } 324 325 void thread__find_cpumode_addr_location(struct thread *thread, 326 enum map_type type, u64 addr, 327 struct addr_location *al) 328 { 329 size_t i; 330 const u8 cpumodes[] = { 331 PERF_RECORD_MISC_USER, 332 PERF_RECORD_MISC_KERNEL, 333 PERF_RECORD_MISC_GUEST_USER, 334 PERF_RECORD_MISC_GUEST_KERNEL 335 }; 336 337 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { 338 thread__find_addr_location(thread, cpumodes[i], type, addr, al); 339 if (al->map) 340 break; 341 } 342 } 343 344 struct thread *thread__main_thread(struct machine *machine, struct thread *thread) 345 { 346 if (thread->pid_ == thread->tid) 347 return thread__get(thread); 348 349 if (thread->pid_ == -1) 350 return NULL; 351 352 return machine__find_thread(machine, thread->pid_, thread->pid_); 353 } 354