1 /*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/types.h>
28 #include <sys/cpuset.h>
29 #include <sys/param.h>
30 #include <sys/socket.h>
31 #include <sys/stat.h>
32 #include <sys/pmc.h>
33
34 #include <assert.h>
35 #include <ctype.h>
36 #include <err.h>
37 #include <errno.h>
38 #include <fcntl.h>
39 #include <limits.h>
40 #include <netdb.h>
41 #include <pmc.h>
42 #include <pmclog.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <strings.h>
47 #include <sysexits.h>
48 #include <unistd.h>
49
50 #include "libpmcstat.h"
51
52 /*
53 * Get PMC record by id, apply merge policy.
54 */
55
56 static struct pmcstat_pmcrecord *
pmcstat_lookup_pmcid(pmc_id_t pmcid,int pmcstat_mergepmc)57 pmcstat_lookup_pmcid(pmc_id_t pmcid, int pmcstat_mergepmc)
58 {
59 struct pmcstat_pmcrecord *pr;
60
61 LIST_FOREACH(pr, &pmcstat_pmcs, pr_next) {
62 if (pr->pr_pmcid == pmcid) {
63 if (pmcstat_mergepmc)
64 return pr->pr_merge;
65 return pr;
66 }
67 }
68
69 return NULL;
70 }
71
72 /*
73 * Add a {pmcid,name} mapping.
74 */
75
76 static void
pmcstat_pmcid_add(pmc_id_t pmcid,pmcstat_interned_string ps,struct pmcstat_args * args,struct pmc_plugins * plugins,int * pmcstat_npmcs)77 pmcstat_pmcid_add(pmc_id_t pmcid, pmcstat_interned_string ps,
78 struct pmcstat_args *args, struct pmc_plugins *plugins,
79 int *pmcstat_npmcs)
80 {
81 struct pmcstat_pmcrecord *pr, *prm;
82
83 /* Replace an existing name for the PMC. */
84 prm = NULL;
85 LIST_FOREACH(pr, &pmcstat_pmcs, pr_next)
86 if (pr->pr_pmcid == pmcid) {
87 pr->pr_pmcname = ps;
88 return;
89 } else if (pr->pr_pmcname == ps)
90 prm = pr;
91
92 /*
93 * Otherwise, allocate a new descriptor and call the
94 * plugins hook.
95 */
96 if ((pr = malloc(sizeof(*pr))) == NULL)
97 err(EX_OSERR, "ERROR: Cannot allocate pmc record");
98
99 pr->pr_pmcid = pmcid;
100 pr->pr_pmcname = ps;
101 pr->pr_pmcin = (*pmcstat_npmcs)++;
102 pr->pr_samples = 0;
103 pr->pr_dubious_frames = 0;
104 pr->pr_merge = prm == NULL ? pr : prm;
105
106 LIST_INSERT_HEAD(&pmcstat_pmcs, pr, pr_next);
107
108 if (plugins[args->pa_pplugin].pl_newpmc != NULL)
109 plugins[args->pa_pplugin].pl_newpmc(ps, pr);
110 if (plugins[args->pa_plugin].pl_newpmc != NULL)
111 plugins[args->pa_plugin].pl_newpmc(ps, pr);
112 }
113
114 /*
115 * Unmap images in the range [start..end) associated with process
116 * 'pp'.
117 */
118
119 static void
pmcstat_image_unmap(struct pmcstat_process * pp,uintfptr_t start,uintfptr_t end)120 pmcstat_image_unmap(struct pmcstat_process *pp, uintfptr_t start,
121 uintfptr_t end)
122 {
123 struct pmcstat_pcmap *pcm, *pcmtmp, *pcmnew;
124
125 assert(pp != NULL);
126 assert(start < end);
127
128 /*
129 * Cases:
130 * - we could have the range completely in the middle of an
131 * existing pcmap; in this case we have to split the pcmap
132 * structure into two (i.e., generate a 'hole').
133 * - we could have the range covering multiple pcmaps; these
134 * will have to be removed.
135 * - we could have either 'start' or 'end' falling in the
136 * middle of a pcmap; in this case shorten the entry.
137 */
138 TAILQ_FOREACH_SAFE(pcm, &pp->pp_map, ppm_next, pcmtmp) {
139 assert(pcm->ppm_lowpc < pcm->ppm_highpc);
140 if (pcm->ppm_highpc <= start)
141 continue;
142 if (pcm->ppm_lowpc >= end)
143 return;
144 if (pcm->ppm_lowpc >= start && pcm->ppm_highpc <= end) {
145 /*
146 * The current pcmap is completely inside the
147 * unmapped range: remove it entirely.
148 */
149 TAILQ_REMOVE(&pp->pp_map, pcm, ppm_next);
150 free(pcm);
151 } else if (pcm->ppm_lowpc < start && pcm->ppm_highpc > end) {
152 /*
153 * Split this pcmap into two; curtail the
154 * current map to end at [start-1], and start
155 * the new one at [end].
156 */
157 if ((pcmnew = malloc(sizeof(*pcmnew))) == NULL)
158 err(EX_OSERR,
159 "ERROR: Cannot split a map entry");
160
161 pcmnew->ppm_image = pcm->ppm_image;
162
163 pcmnew->ppm_lowpc = end;
164 pcmnew->ppm_highpc = pcm->ppm_highpc;
165
166 pcm->ppm_highpc = start;
167
168 TAILQ_INSERT_AFTER(&pp->pp_map, pcm, pcmnew, ppm_next);
169
170 return;
171 } else if (pcm->ppm_lowpc < start && pcm->ppm_highpc <= end)
172 pcm->ppm_highpc = start;
173 else if (pcm->ppm_lowpc >= start && pcm->ppm_highpc > end)
174 pcm->ppm_lowpc = end;
175 else
176 assert(0);
177 }
178 }
179
180 /*
181 * Convert a hwpmc(4) log to profile information. A system-wide
182 * callgraph is generated if FLAG_DO_CALLGRAPHS is set. gmon.out
183 * files usable by gprof(1) are created if FLAG_DO_GPROF is set.
184 */
185 int
pmcstat_analyze_log(struct pmcstat_args * args,struct pmc_plugins * plugins,struct pmcstat_stats * pmcstat_stats,struct pmcstat_process * pmcstat_kernproc,int pmcstat_mergepmc,int * pmcstat_npmcs,int * ps_samples_period)186 pmcstat_analyze_log(struct pmcstat_args *args,
187 struct pmc_plugins *plugins,
188 struct pmcstat_stats *pmcstat_stats,
189 struct pmcstat_process *pmcstat_kernproc,
190 int pmcstat_mergepmc,
191 int *pmcstat_npmcs,
192 int *ps_samples_period)
193 {
194 uint32_t cpu, cpuflags;
195 pid_t pid;
196 struct pmcstat_image *image;
197 struct pmcstat_process *pp, *ppnew;
198 struct pmcstat_pcmap *ppm, *ppmtmp;
199 struct pmclog_ev ev;
200 struct pmcstat_pmcrecord *pmcr;
201 pmcstat_interned_string image_path;
202
203 assert(args->pa_flags & FLAG_DO_ANALYSIS);
204
205 if (elf_version(EV_CURRENT) == EV_NONE)
206 err(EX_UNAVAILABLE, "Elf library initialization failed");
207
208 while (pmclog_read(args->pa_logparser, &ev) == 0) {
209 assert(ev.pl_state == PMCLOG_OK);
210
211 switch (ev.pl_type) {
212 case PMCLOG_TYPE_INITIALIZE:
213 if ((ev.pl_u.pl_i.pl_version & 0xFF000000) !=
214 PMC_VERSION_MAJOR << 24 && args->pa_verbosity > 0)
215 warnx(
216 "WARNING: Log version 0x%x does not match compiled version 0x%x.",
217 ev.pl_u.pl_i.pl_version, PMC_VERSION_MAJOR);
218 break;
219
220 case PMCLOG_TYPE_MAP_IN:
221 /*
222 * Introduce an address range mapping for a
223 * userland process or the kernel (pid == -1).
224 *
225 * We always allocate a process descriptor so
226 * that subsequent samples seen for this
227 * address range are mapped to the current
228 * object being mapped in.
229 */
230 pid = ev.pl_u.pl_mi.pl_pid;
231 if (pid == -1)
232 pp = pmcstat_kernproc;
233 else
234 pp = pmcstat_process_lookup(pid,
235 PMCSTAT_ALLOCATE);
236
237 assert(pp != NULL);
238
239 image_path = pmcstat_string_intern(ev.pl_u.pl_mi.
240 pl_pathname);
241 image = pmcstat_image_from_path(image_path, pid == -1,
242 args, plugins);
243 if (image->pi_type == PMCSTAT_IMAGE_UNKNOWN)
244 pmcstat_image_determine_type(image, args);
245 if (image->pi_type != PMCSTAT_IMAGE_INDETERMINABLE)
246 pmcstat_image_link(pp, image,
247 ev.pl_u.pl_mi.pl_start);
248 break;
249
250 case PMCLOG_TYPE_MAP_OUT:
251 /*
252 * Remove an address map.
253 */
254 pid = ev.pl_u.pl_mo.pl_pid;
255 if (pid == -1)
256 pp = pmcstat_kernproc;
257 else
258 pp = pmcstat_process_lookup(pid, 0);
259
260 if (pp == NULL) /* unknown process */
261 break;
262
263 pmcstat_image_unmap(pp, ev.pl_u.pl_mo.pl_start,
264 ev.pl_u.pl_mo.pl_end);
265 break;
266
267 case PMCLOG_TYPE_CALLCHAIN:
268 pmcstat_stats->ps_samples_total++;
269 *ps_samples_period += 1;
270
271 cpuflags = ev.pl_u.pl_cc.pl_cpuflags;
272 cpu = PMC_CALLCHAIN_CPUFLAGS_TO_CPU(cpuflags);
273
274 if ((args->pa_flags & FLAG_FILTER_THREAD_ID) &&
275 args->pa_tid != ev.pl_u.pl_cc.pl_tid) {
276 pmcstat_stats->ps_samples_skipped++;
277 break;
278 }
279 /* Filter on the CPU id. */
280 if (!CPU_ISSET(cpu, &(args->pa_cpumask))) {
281 pmcstat_stats->ps_samples_skipped++;
282 break;
283 }
284
285 pp = pmcstat_process_lookup(ev.pl_u.pl_cc.pl_pid,
286 PMCSTAT_ALLOCATE);
287
288 /* Get PMC record. */
289 pmcr = pmcstat_lookup_pmcid(ev.pl_u.pl_cc.pl_pmcid, pmcstat_mergepmc);
290 assert(pmcr != NULL);
291 pmcr->pr_samples++;
292
293 /*
294 * Call the plugins processing
295 */
296
297 if (plugins[args->pa_pplugin].pl_process != NULL)
298 plugins[args->pa_pplugin].pl_process(
299 pp, pmcr,
300 ev.pl_u.pl_cc.pl_npc,
301 ev.pl_u.pl_cc.pl_pc,
302 PMC_CALLCHAIN_CPUFLAGS_TO_USERMODE(cpuflags),
303 cpu);
304 plugins[args->pa_plugin].pl_process(
305 pp, pmcr,
306 ev.pl_u.pl_cc.pl_npc,
307 ev.pl_u.pl_cc.pl_pc,
308 PMC_CALLCHAIN_CPUFLAGS_TO_USERMODE(cpuflags),
309 cpu);
310 break;
311
312 case PMCLOG_TYPE_PMCALLOCATE:
313 /*
314 * Record the association pmc id between this
315 * PMC and its name.
316 */
317 pmcstat_pmcid_add(ev.pl_u.pl_a.pl_pmcid,
318 pmcstat_string_intern(ev.pl_u.pl_a.pl_evname),
319 args, plugins, pmcstat_npmcs);
320 break;
321
322 case PMCLOG_TYPE_PMCALLOCATEDYN:
323 /*
324 * Record the association pmc id between this
325 * PMC and its name.
326 */
327 pmcstat_pmcid_add(ev.pl_u.pl_ad.pl_pmcid,
328 pmcstat_string_intern(ev.pl_u.pl_ad.pl_evname),
329 args, plugins, pmcstat_npmcs);
330 break;
331
332 case PMCLOG_TYPE_PROCEXEC:
333 /*
334 * Change the executable image associated with
335 * a process.
336 */
337 pp = pmcstat_process_lookup(ev.pl_u.pl_x.pl_pid,
338 PMCSTAT_ALLOCATE);
339
340 /* delete the current process map */
341 TAILQ_FOREACH_SAFE(ppm, &pp->pp_map, ppm_next, ppmtmp) {
342 TAILQ_REMOVE(&pp->pp_map, ppm, ppm_next);
343 free(ppm);
344 }
345
346 /*
347 * Associate this process image.
348 */
349 image_path = pmcstat_string_intern(
350 ev.pl_u.pl_x.pl_pathname);
351 assert(image_path != NULL);
352 pmcstat_process_exec(pp, image_path,
353 ev.pl_u.pl_x.pl_baseaddr, ev.pl_u.pl_x.pl_dynaddr,
354 args, plugins, pmcstat_stats);
355 break;
356
357 case PMCLOG_TYPE_PROCEXIT:
358
359 /*
360 * Due to the way the log is generated, the
361 * last few samples corresponding to a process
362 * may appear in the log after the process
363 * exit event is recorded. Thus we keep the
364 * process' descriptor and associated data
365 * structures around, but mark the process as
366 * having exited.
367 */
368 pp = pmcstat_process_lookup(ev.pl_u.pl_e.pl_pid, 0);
369 if (pp == NULL)
370 break;
371 pp->pp_isactive = 0; /* mark as a zombie */
372 break;
373
374 case PMCLOG_TYPE_SYSEXIT:
375 pp = pmcstat_process_lookup(ev.pl_u.pl_se.pl_pid, 0);
376 if (pp == NULL)
377 break;
378 pp->pp_isactive = 0; /* make a zombie */
379 break;
380
381 case PMCLOG_TYPE_PROCFORK:
382
383 /*
384 * Allocate a process descriptor for the new
385 * (child) process.
386 */
387 ppnew =
388 pmcstat_process_lookup(ev.pl_u.pl_f.pl_newpid,
389 PMCSTAT_ALLOCATE);
390
391 /*
392 * If we had been tracking the parent, clone
393 * its address maps.
394 */
395 pp = pmcstat_process_lookup(ev.pl_u.pl_f.pl_oldpid, 0);
396 if (pp == NULL)
397 break;
398 TAILQ_FOREACH(ppm, &pp->pp_map, ppm_next)
399 pmcstat_image_link(ppnew, ppm->ppm_image,
400 ppm->ppm_lowpc);
401 break;
402
403 default: /* other types of entries are not relevant */
404 break;
405 }
406 }
407
408 if (ev.pl_state == PMCLOG_EOF)
409 return (PMCSTAT_FINISHED);
410 else if (ev.pl_state == PMCLOG_REQUIRE_DATA)
411 return (PMCSTAT_RUNNING);
412
413 err(EX_DATAERR,
414 "ERROR: event parsing failed state: %d type: %d (record %jd, offset 0x%jx)",
415 ev.pl_state, ev.pl_type, (uintmax_t) ev.pl_count + 1, ev.pl_offset);
416 }
417
418 /*
419 * Open a log file, for reading or writing.
420 *
421 * The function returns the fd of a successfully opened log or -1 in
422 * case of failure.
423 */
424
425 int
pmcstat_open_log(const char * path,int mode)426 pmcstat_open_log(const char *path, int mode)
427 {
428 int error, fd, cfd;
429 size_t hlen;
430 const char *p, *errstr;
431 struct addrinfo hints, *res, *res0;
432 char hostname[MAXHOSTNAMELEN];
433
434 errstr = NULL;
435 fd = -1;
436
437 /*
438 * If 'path' is "-" then open one of stdin or stdout depending
439 * on the value of 'mode'.
440 *
441 * If 'path' contains a ':' and does not start with a '/' or '.',
442 * and is being opened for writing, treat it as a "host:port"
443 * specification and open a network socket.
444 *
445 * Otherwise, treat 'path' as a file name and open that.
446 */
447 if (path[0] == '-' && path[1] == '\0')
448 fd = (mode == PMCSTAT_OPEN_FOR_READ) ? 0 : 1;
449 else if (path[0] != '/' &&
450 path[0] != '.' && strchr(path, ':') != NULL) {
451
452 p = strrchr(path, ':');
453 hlen = p - path;
454 if (p == path || hlen >= sizeof(hostname)) {
455 errstr = strerror(EINVAL);
456 goto done;
457 }
458
459 assert(hlen < sizeof(hostname));
460 (void) strncpy(hostname, path, hlen);
461 hostname[hlen] = '\0';
462
463 (void) memset(&hints, 0, sizeof(hints));
464 hints.ai_family = AF_UNSPEC;
465 hints.ai_socktype = SOCK_STREAM;
466 if ((error = getaddrinfo(hostname, p+1, &hints, &res0)) != 0) {
467 errstr = gai_strerror(error);
468 goto done;
469 }
470
471 fd = -1;
472 for (res = res0; res; res = res->ai_next) {
473 if ((fd = socket(res->ai_family, res->ai_socktype,
474 res->ai_protocol)) < 0) {
475 errstr = strerror(errno);
476 continue;
477 }
478 if (mode == PMCSTAT_OPEN_FOR_READ) {
479 if (bind(fd, res->ai_addr, res->ai_addrlen) < 0) {
480 errstr = strerror(errno);
481 (void) close(fd);
482 fd = -1;
483 continue;
484 }
485 listen(fd, 1);
486 cfd = accept(fd, NULL, NULL);
487 (void) close(fd);
488 if (cfd < 0) {
489 errstr = strerror(errno);
490 fd = -1;
491 break;
492 }
493 fd = cfd;
494 } else {
495 if (connect(fd, res->ai_addr, res->ai_addrlen) < 0) {
496 errstr = strerror(errno);
497 (void) close(fd);
498 fd = -1;
499 continue;
500 }
501 }
502 errstr = NULL;
503 break;
504 }
505 freeaddrinfo(res0);
506
507 } else if ((fd = open(path, mode == PMCSTAT_OPEN_FOR_READ ?
508 O_RDONLY : (O_WRONLY|O_CREAT|O_TRUNC),
509 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
510 errstr = strerror(errno);
511
512 done:
513 if (errstr)
514 errx(EX_OSERR, "ERROR: Cannot open \"%s\" for %s: %s.", path,
515 (mode == PMCSTAT_OPEN_FOR_READ ? "reading" : "writing"),
516 errstr);
517
518 return (fd);
519 }
520
521 /*
522 * Close a logfile, after first flushing all in-module queued data.
523 */
524
525 int
pmcstat_close_log(struct pmcstat_args * args)526 pmcstat_close_log(struct pmcstat_args *args)
527 {
528 /* If a local logfile is configured ask the kernel to stop
529 * and flush data. Kernel will close the file when data is flushed
530 * so keep the status to EXITING.
531 */
532 if (args->pa_logfd != -1) {
533 if (pmc_close_logfile() < 0)
534 err(EX_OSERR, "ERROR: logging failed");
535 }
536
537 return (args->pa_flags & FLAG_HAS_PIPE ? PMCSTAT_EXITING :
538 PMCSTAT_FINISHED);
539 }
540
541 /*
542 * Initialize module.
543 */
544
545 void
pmcstat_initialize_logging(struct pmcstat_process ** pmcstat_kernproc,struct pmcstat_args * args,struct pmc_plugins * plugins,int * pmcstat_npmcs,int * pmcstat_mergepmc)546 pmcstat_initialize_logging(struct pmcstat_process **pmcstat_kernproc,
547 struct pmcstat_args *args, struct pmc_plugins *plugins,
548 int *pmcstat_npmcs, int *pmcstat_mergepmc)
549 {
550 struct pmcstat_process *pmcstat_kp;
551 int i;
552
553 /* use a convenient format for 'ldd' output */
554 if (setenv("LD_TRACE_LOADED_OBJECTS_FMT1","%o \"%p\" %x\n",1) != 0)
555 err(EX_OSERR, "ERROR: Cannot setenv");
556
557 /* Initialize hash tables */
558 pmcstat_string_initialize();
559 for (i = 0; i < PMCSTAT_NHASH; i++) {
560 LIST_INIT(&pmcstat_image_hash[i]);
561 LIST_INIT(&pmcstat_process_hash[i]);
562 }
563
564 /*
565 * Create a fake 'process' entry for the kernel with pid -1.
566 * hwpmc(4) will subsequently inform us about where the kernel
567 * and any loaded kernel modules are mapped.
568 */
569 if ((pmcstat_kp = pmcstat_process_lookup((pid_t) -1,
570 PMCSTAT_ALLOCATE)) == NULL)
571 err(EX_OSERR, "ERROR: Cannot initialize logging");
572
573 *pmcstat_kernproc = pmcstat_kp;
574
575 /* PMC count. */
576 *pmcstat_npmcs = 0;
577
578 /* Merge PMC with same name. */
579 *pmcstat_mergepmc = args->pa_mergepmc;
580
581 /*
582 * Initialize plugins
583 */
584
585 if (plugins[args->pa_pplugin].pl_init != NULL)
586 plugins[args->pa_pplugin].pl_init();
587 if (plugins[args->pa_plugin].pl_init != NULL)
588 plugins[args->pa_plugin].pl_init();
589 }
590
591 /*
592 * Shutdown module.
593 */
594
595 void
pmcstat_shutdown_logging(struct pmcstat_args * args,struct pmc_plugins * plugins,struct pmcstat_stats * pmcstat_stats)596 pmcstat_shutdown_logging(struct pmcstat_args *args,
597 struct pmc_plugins *plugins,
598 struct pmcstat_stats *pmcstat_stats)
599 {
600 struct pmcstat_image *pi, *pitmp;
601 struct pmcstat_process *pp, *pptmp;
602 struct pmcstat_pcmap *ppm, *ppmtmp;
603 FILE *mf;
604 int i;
605
606 /* determine where to send the map file */
607 mf = NULL;
608 if (args->pa_mapfilename != NULL)
609 mf = (strcmp(args->pa_mapfilename, "-") == 0) ?
610 args->pa_printfile : fopen(args->pa_mapfilename, "w");
611
612 if (mf == NULL && args->pa_flags & FLAG_DO_GPROF &&
613 args->pa_verbosity >= 2)
614 mf = args->pa_printfile;
615
616 if (mf)
617 (void) fprintf(mf, "MAP:\n");
618
619 /*
620 * Shutdown the plugins
621 */
622
623 if (plugins[args->pa_plugin].pl_shutdown != NULL)
624 plugins[args->pa_plugin].pl_shutdown(mf);
625 if (plugins[args->pa_pplugin].pl_shutdown != NULL)
626 plugins[args->pa_pplugin].pl_shutdown(mf);
627
628 for (i = 0; i < PMCSTAT_NHASH; i++) {
629 LIST_FOREACH_SAFE(pi, &pmcstat_image_hash[i], pi_next,
630 pitmp) {
631 if (plugins[args->pa_plugin].pl_shutdownimage != NULL)
632 plugins[args->pa_plugin].pl_shutdownimage(pi);
633 if (plugins[args->pa_pplugin].pl_shutdownimage != NULL)
634 plugins[args->pa_pplugin].pl_shutdownimage(pi);
635
636 free(pi->pi_symbols);
637 if (pi->pi_addr2line != NULL)
638 pclose(pi->pi_addr2line);
639 LIST_REMOVE(pi, pi_next);
640 free(pi);
641 }
642
643 LIST_FOREACH_SAFE(pp, &pmcstat_process_hash[i], pp_next,
644 pptmp) {
645 TAILQ_FOREACH_SAFE(ppm, &pp->pp_map, ppm_next, ppmtmp) {
646 TAILQ_REMOVE(&pp->pp_map, ppm, ppm_next);
647 free(ppm);
648 }
649 LIST_REMOVE(pp, pp_next);
650 free(pp);
651 }
652 }
653
654 pmcstat_string_shutdown();
655
656 /*
657 * Print errors unless -q was specified. Print all statistics
658 * if verbosity > 1.
659 */
660 #define PRINT(N,V) do { \
661 if (pmcstat_stats->ps_##V || args->pa_verbosity >= 2) \
662 (void) fprintf(args->pa_printfile, " %-40s %d\n",\
663 N, pmcstat_stats->ps_##V); \
664 } while (0)
665
666 if (args->pa_verbosity >= 1 && (args->pa_flags & FLAG_DO_ANALYSIS)) {
667 (void) fprintf(args->pa_printfile, "CONVERSION STATISTICS:\n");
668 PRINT("#exec/a.out", exec_aout);
669 PRINT("#exec/elf", exec_elf);
670 PRINT("#exec/unknown", exec_indeterminable);
671 PRINT("#exec handling errors", exec_errors);
672 PRINT("#samples/total", samples_total);
673 PRINT("#samples/unclaimed", samples_unknown_offset);
674 PRINT("#samples/unknown-object", samples_indeterminable);
675 PRINT("#samples/unknown-function", samples_unknown_function);
676 PRINT("#callchain/dubious-frames", callchain_dubious_frames);
677 }
678
679 if (mf)
680 (void) fclose(mf);
681 }
682