1 #include "../perf.h" 2 #include <stdlib.h> 3 #include <stdio.h> 4 #include <string.h> 5 #include "session.h" 6 #include "thread.h" 7 #include "util.h" 8 #include "debug.h" 9 10 void map_groups__init(struct map_groups *self) 11 { 12 int i; 13 for (i = 0; i < MAP__NR_TYPES; ++i) { 14 self->maps[i] = RB_ROOT; 15 INIT_LIST_HEAD(&self->removed_maps[i]); 16 } 17 } 18 19 static struct thread *thread__new(pid_t pid) 20 { 21 struct thread *self = zalloc(sizeof(*self)); 22 23 if (self != NULL) { 24 map_groups__init(&self->mg); 25 self->pid = pid; 26 self->comm = malloc(32); 27 if (self->comm) 28 snprintf(self->comm, 32, ":%d", self->pid); 29 } 30 31 return self; 32 } 33 34 static void map_groups__flush(struct map_groups *self) 35 { 36 int type; 37 38 for (type = 0; type < MAP__NR_TYPES; type++) { 39 struct rb_root *root = &self->maps[type]; 40 struct rb_node *next = rb_first(root); 41 42 while (next) { 43 struct map *pos = rb_entry(next, struct map, rb_node); 44 next = rb_next(&pos->rb_node); 45 rb_erase(&pos->rb_node, root); 46 /* 47 * We may have references to this map, for 48 * instance in some hist_entry instances, so 49 * just move them to a separate list. 50 */ 51 list_add_tail(&pos->node, &self->removed_maps[pos->type]); 52 } 53 } 54 } 55 56 int thread__set_comm(struct thread *self, const char *comm) 57 { 58 int err; 59 60 if (self->comm) 61 free(self->comm); 62 self->comm = strdup(comm); 63 err = self->comm == NULL ? -ENOMEM : 0; 64 if (!err) { 65 self->comm_set = true; 66 map_groups__flush(&self->mg); 67 } 68 return err; 69 } 70 71 int thread__comm_len(struct thread *self) 72 { 73 if (!self->comm_len) { 74 if (!self->comm) 75 return 0; 76 self->comm_len = strlen(self->comm); 77 } 78 79 return self->comm_len; 80 } 81 82 static size_t __map_groups__fprintf_maps(struct map_groups *self, 83 enum map_type type, FILE *fp) 84 { 85 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); 86 struct rb_node *nd; 87 88 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { 89 struct map *pos = rb_entry(nd, struct map, rb_node); 90 printed += fprintf(fp, "Map:"); 91 printed += map__fprintf(pos, fp); 92 if (verbose > 1) { 93 printed += dso__fprintf(pos->dso, type, fp); 94 printed += fprintf(fp, "--\n"); 95 } 96 } 97 98 return printed; 99 } 100 101 size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp) 102 { 103 size_t printed = 0, i; 104 for (i = 0; i < MAP__NR_TYPES; ++i) 105 printed += __map_groups__fprintf_maps(self, i, fp); 106 return printed; 107 } 108 109 static size_t __map_groups__fprintf_removed_maps(struct map_groups *self, 110 enum map_type type, FILE *fp) 111 { 112 struct map *pos; 113 size_t printed = 0; 114 115 list_for_each_entry(pos, &self->removed_maps[type], node) { 116 printed += fprintf(fp, "Map:"); 117 printed += map__fprintf(pos, fp); 118 if (verbose > 1) { 119 printed += dso__fprintf(pos->dso, type, fp); 120 printed += fprintf(fp, "--\n"); 121 } 122 } 123 return printed; 124 } 125 126 static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp) 127 { 128 size_t printed = 0, i; 129 for (i = 0; i < MAP__NR_TYPES; ++i) 130 printed += __map_groups__fprintf_removed_maps(self, i, fp); 131 return printed; 132 } 133 134 static size_t map_groups__fprintf(struct map_groups *self, FILE *fp) 135 { 136 size_t printed = map_groups__fprintf_maps(self, fp); 137 printed += fprintf(fp, "Removed maps:\n"); 138 return printed + map_groups__fprintf_removed_maps(self, fp); 139 } 140 141 static size_t thread__fprintf(struct thread *self, FILE *fp) 142 { 143 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) + 144 map_groups__fprintf(&self->mg, fp); 145 } 146 147 struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) 148 { 149 struct rb_node **p = &self->threads.rb_node; 150 struct rb_node *parent = NULL; 151 struct thread *th; 152 153 /* 154 * Font-end cache - PID lookups come in blocks, 155 * so most of the time we dont have to look up 156 * the full rbtree: 157 */ 158 if (self->last_match && self->last_match->pid == pid) 159 return self->last_match; 160 161 while (*p != NULL) { 162 parent = *p; 163 th = rb_entry(parent, struct thread, rb_node); 164 165 if (th->pid == pid) { 166 self->last_match = th; 167 return th; 168 } 169 170 if (pid < th->pid) 171 p = &(*p)->rb_left; 172 else 173 p = &(*p)->rb_right; 174 } 175 176 th = thread__new(pid); 177 if (th != NULL) { 178 rb_link_node(&th->rb_node, parent, p); 179 rb_insert_color(&th->rb_node, &self->threads); 180 self->last_match = th; 181 } 182 183 return th; 184 } 185 186 static void map_groups__remove_overlappings(struct map_groups *self, 187 struct map *map) 188 { 189 struct rb_root *root = &self->maps[map->type]; 190 struct rb_node *next = rb_first(root); 191 192 while (next) { 193 struct map *pos = rb_entry(next, struct map, rb_node); 194 next = rb_next(&pos->rb_node); 195 196 if (!map__overlap(pos, map)) 197 continue; 198 199 if (verbose >= 2) { 200 fputs("overlapping maps:\n", stderr); 201 map__fprintf(map, stderr); 202 map__fprintf(pos, stderr); 203 } 204 205 rb_erase(&pos->rb_node, root); 206 /* 207 * We may have references to this map, for instance in some 208 * hist_entry instances, so just move them to a separate 209 * list. 210 */ 211 list_add_tail(&pos->node, &self->removed_maps[map->type]); 212 } 213 } 214 215 void maps__insert(struct rb_root *maps, struct map *map) 216 { 217 struct rb_node **p = &maps->rb_node; 218 struct rb_node *parent = NULL; 219 const u64 ip = map->start; 220 struct map *m; 221 222 while (*p != NULL) { 223 parent = *p; 224 m = rb_entry(parent, struct map, rb_node); 225 if (ip < m->start) 226 p = &(*p)->rb_left; 227 else 228 p = &(*p)->rb_right; 229 } 230 231 rb_link_node(&map->rb_node, parent, p); 232 rb_insert_color(&map->rb_node, maps); 233 } 234 235 struct map *maps__find(struct rb_root *maps, u64 ip) 236 { 237 struct rb_node **p = &maps->rb_node; 238 struct rb_node *parent = NULL; 239 struct map *m; 240 241 while (*p != NULL) { 242 parent = *p; 243 m = rb_entry(parent, struct map, rb_node); 244 if (ip < m->start) 245 p = &(*p)->rb_left; 246 else if (ip > m->end) 247 p = &(*p)->rb_right; 248 else 249 return m; 250 } 251 252 return NULL; 253 } 254 255 void thread__insert_map(struct thread *self, struct map *map) 256 { 257 map_groups__remove_overlappings(&self->mg, map); 258 map_groups__insert(&self->mg, map); 259 } 260 261 /* 262 * XXX This should not really _copy_ te maps, but refcount them. 263 */ 264 static int map_groups__clone(struct map_groups *self, 265 struct map_groups *parent, enum map_type type) 266 { 267 struct rb_node *nd; 268 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { 269 struct map *map = rb_entry(nd, struct map, rb_node); 270 struct map *new = map__clone(map); 271 if (new == NULL) 272 return -ENOMEM; 273 map_groups__insert(self, new); 274 } 275 return 0; 276 } 277 278 int thread__fork(struct thread *self, struct thread *parent) 279 { 280 int i; 281 282 if (parent->comm_set) { 283 if (self->comm) 284 free(self->comm); 285 self->comm = strdup(parent->comm); 286 if (!self->comm) 287 return -ENOMEM; 288 self->comm_set = true; 289 } 290 291 for (i = 0; i < MAP__NR_TYPES; ++i) 292 if (map_groups__clone(&self->mg, &parent->mg, i) < 0) 293 return -ENOMEM; 294 return 0; 295 } 296 297 size_t perf_session__fprintf(struct perf_session *self, FILE *fp) 298 { 299 size_t ret = 0; 300 struct rb_node *nd; 301 302 for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) { 303 struct thread *pos = rb_entry(nd, struct thread, rb_node); 304 305 ret += thread__fprintf(pos, fp); 306 } 307 308 return ret; 309 } 310 311 struct symbol *map_groups__find_symbol(struct map_groups *self, 312 enum map_type type, u64 addr, 313 symbol_filter_t filter) 314 { 315 struct map *map = map_groups__find(self, type, addr); 316 317 if (map != NULL) 318 return map__find_symbol(map, map->map_ip(map, addr), filter); 319 320 return NULL; 321 } 322