xref: /linux/tools/perf/util/stat.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <math.h>
5 #include <string.h>
6 #include "counts.h"
7 #include "cpumap.h"
8 #include "debug.h"
9 #include "header.h"
10 #include "stat.h"
11 #include "session.h"
12 #include "target.h"
13 #include "evlist.h"
14 #include "evsel.h"
15 #include "thread_map.h"
16 #include "hashmap.h"
17 #include <linux/zalloc.h>
18 
19 void update_stats(struct stats *stats, u64 val)
20 {
21 	double delta;
22 
23 	stats->n++;
24 	delta = val - stats->mean;
25 	stats->mean += delta / stats->n;
26 	stats->M2 += delta*(val - stats->mean);
27 
28 	if (val > stats->max)
29 		stats->max = val;
30 
31 	if (val < stats->min)
32 		stats->min = val;
33 }
34 
35 double avg_stats(struct stats *stats)
36 {
37 	return stats->mean;
38 }
39 
40 /*
41  * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
42  *
43  *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
44  * s^2 = -------------------------------
45  *                  n - 1
46  *
47  * http://en.wikipedia.org/wiki/Stddev
48  *
49  * The std dev of the mean is related to the std dev by:
50  *
51  *             s
52  * s_mean = -------
53  *          sqrt(n)
54  *
55  */
56 double stddev_stats(struct stats *stats)
57 {
58 	double variance, variance_mean;
59 
60 	if (stats->n < 2)
61 		return 0.0;
62 
63 	variance = stats->M2 / (stats->n - 1);
64 	variance_mean = variance / stats->n;
65 
66 	return sqrt(variance_mean);
67 }
68 
69 double rel_stddev_stats(double stddev, double avg)
70 {
71 	double pct = 0.0;
72 
73 	if (avg)
74 		pct = 100.0 * stddev/avg;
75 
76 	return pct;
77 }
78 
79 bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id)
80 {
81 	struct perf_stat_evsel *ps = evsel->stats;
82 
83 	return ps->id == id;
84 }
85 
86 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
87 static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
88 	ID(NONE,		x),
89 	ID(CYCLES_IN_TX,	cpu/cycles-t/),
90 	ID(TRANSACTION_START,	cpu/tx-start/),
91 	ID(ELISION_START,	cpu/el-start/),
92 	ID(CYCLES_IN_TX_CP,	cpu/cycles-ct/),
93 	ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
94 	ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
95 	ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
96 	ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
97 	ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
98 	ID(TOPDOWN_RETIRING, topdown-retiring),
99 	ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
100 	ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
101 	ID(TOPDOWN_BE_BOUND, topdown-be-bound),
102 	ID(TOPDOWN_HEAVY_OPS, topdown-heavy-ops),
103 	ID(TOPDOWN_BR_MISPREDICT, topdown-br-mispredict),
104 	ID(TOPDOWN_FETCH_LAT, topdown-fetch-lat),
105 	ID(TOPDOWN_MEM_BOUND, topdown-mem-bound),
106 	ID(SMI_NUM, msr/smi/),
107 	ID(APERF, msr/aperf/),
108 };
109 #undef ID
110 
111 static void perf_stat_evsel_id_init(struct evsel *evsel)
112 {
113 	struct perf_stat_evsel *ps = evsel->stats;
114 	int i;
115 
116 	/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
117 
118 	for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
119 		if (!strcmp(evsel__name(evsel), id_str[i])) {
120 			ps->id = i;
121 			break;
122 		}
123 	}
124 }
125 
126 static void evsel__reset_stat_priv(struct evsel *evsel)
127 {
128 	int i;
129 	struct perf_stat_evsel *ps = evsel->stats;
130 
131 	for (i = 0; i < 3; i++)
132 		init_stats(&ps->res_stats[i]);
133 
134 	perf_stat_evsel_id_init(evsel);
135 }
136 
137 static int evsel__alloc_stat_priv(struct evsel *evsel)
138 {
139 	evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
140 	if (evsel->stats == NULL)
141 		return -ENOMEM;
142 	evsel__reset_stat_priv(evsel);
143 	return 0;
144 }
145 
146 static void evsel__free_stat_priv(struct evsel *evsel)
147 {
148 	struct perf_stat_evsel *ps = evsel->stats;
149 
150 	if (ps)
151 		zfree(&ps->group_data);
152 	zfree(&evsel->stats);
153 }
154 
155 static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
156 {
157 	int cpu_map_nr = evsel__nr_cpus(evsel);
158 	int nthreads = perf_thread_map__nr(evsel->core.threads);
159 	struct perf_counts *counts;
160 
161 	counts = perf_counts__new(cpu_map_nr, nthreads);
162 	if (counts)
163 		evsel->prev_raw_counts = counts;
164 
165 	return counts ? 0 : -ENOMEM;
166 }
167 
168 static void evsel__free_prev_raw_counts(struct evsel *evsel)
169 {
170 	perf_counts__delete(evsel->prev_raw_counts);
171 	evsel->prev_raw_counts = NULL;
172 }
173 
174 static void evsel__reset_prev_raw_counts(struct evsel *evsel)
175 {
176 	if (evsel->prev_raw_counts)
177 		perf_counts__reset(evsel->prev_raw_counts);
178 }
179 
180 static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
181 {
182 	if (evsel__alloc_stat_priv(evsel) < 0 ||
183 	    evsel__alloc_counts(evsel) < 0 ||
184 	    (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
185 		return -ENOMEM;
186 
187 	return 0;
188 }
189 
190 int evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
191 {
192 	struct evsel *evsel;
193 
194 	evlist__for_each_entry(evlist, evsel) {
195 		if (evsel__alloc_stats(evsel, alloc_raw))
196 			goto out_free;
197 	}
198 
199 	return 0;
200 
201 out_free:
202 	evlist__free_stats(evlist);
203 	return -1;
204 }
205 
206 void evlist__free_stats(struct evlist *evlist)
207 {
208 	struct evsel *evsel;
209 
210 	evlist__for_each_entry(evlist, evsel) {
211 		evsel__free_stat_priv(evsel);
212 		evsel__free_counts(evsel);
213 		evsel__free_prev_raw_counts(evsel);
214 	}
215 }
216 
217 void evlist__reset_stats(struct evlist *evlist)
218 {
219 	struct evsel *evsel;
220 
221 	evlist__for_each_entry(evlist, evsel) {
222 		evsel__reset_stat_priv(evsel);
223 		evsel__reset_counts(evsel);
224 	}
225 }
226 
227 void evlist__reset_prev_raw_counts(struct evlist *evlist)
228 {
229 	struct evsel *evsel;
230 
231 	evlist__for_each_entry(evlist, evsel)
232 		evsel__reset_prev_raw_counts(evsel);
233 }
234 
235 static void evsel__copy_prev_raw_counts(struct evsel *evsel)
236 {
237 	int ncpus = evsel__nr_cpus(evsel);
238 	int nthreads = perf_thread_map__nr(evsel->core.threads);
239 
240 	for (int thread = 0; thread < nthreads; thread++) {
241 		for (int cpu = 0; cpu < ncpus; cpu++) {
242 			*perf_counts(evsel->counts, cpu, thread) =
243 				*perf_counts(evsel->prev_raw_counts, cpu,
244 					     thread);
245 		}
246 	}
247 
248 	evsel->counts->aggr = evsel->prev_raw_counts->aggr;
249 }
250 
251 void evlist__copy_prev_raw_counts(struct evlist *evlist)
252 {
253 	struct evsel *evsel;
254 
255 	evlist__for_each_entry(evlist, evsel)
256 		evsel__copy_prev_raw_counts(evsel);
257 }
258 
259 void evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
260 {
261 	struct evsel *evsel;
262 
263 	/*
264 	 * To collect the overall statistics for interval mode,
265 	 * we copy the counts from evsel->prev_raw_counts to
266 	 * evsel->counts. The perf_stat_process_counter creates
267 	 * aggr values from per cpu values, but the per cpu values
268 	 * are 0 for AGGR_GLOBAL. So we use a trick that saves the
269 	 * previous aggr value to the first member of perf_counts,
270 	 * then aggr calculation in process_counter_values can work
271 	 * correctly.
272 	 */
273 	evlist__for_each_entry(evlist, evsel) {
274 		*perf_counts(evsel->prev_raw_counts, 0, 0) =
275 			evsel->prev_raw_counts->aggr;
276 	}
277 }
278 
279 static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused)
280 {
281 	uint64_t *key = (uint64_t *) __key;
282 
283 	return *key & 0xffffffff;
284 }
285 
286 static bool pkg_id_equal(const void *__key1, const void *__key2,
287 			 void *ctx __maybe_unused)
288 {
289 	uint64_t *key1 = (uint64_t *) __key1;
290 	uint64_t *key2 = (uint64_t *) __key2;
291 
292 	return *key1 == *key2;
293 }
294 
295 static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
296 			 int cpu_map_idx, bool *skip)
297 {
298 	struct hashmap *mask = counter->per_pkg_mask;
299 	struct perf_cpu_map *cpus = evsel__cpus(counter);
300 	struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
301 	int s, d, ret = 0;
302 	uint64_t *key;
303 
304 	*skip = false;
305 
306 	if (!counter->per_pkg)
307 		return 0;
308 
309 	if (perf_cpu_map__empty(cpus))
310 		return 0;
311 
312 	if (!mask) {
313 		mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
314 		if (!mask)
315 			return -ENOMEM;
316 
317 		counter->per_pkg_mask = mask;
318 	}
319 
320 	/*
321 	 * we do not consider an event that has not run as a good
322 	 * instance to mark a package as used (skip=1). Otherwise
323 	 * we may run into a situation where the first CPU in a package
324 	 * is not running anything, yet the second is, and this function
325 	 * would mark the package as used after the first CPU and would
326 	 * not read the values from the second CPU.
327 	 */
328 	if (!(vals->run && vals->ena))
329 		return 0;
330 
331 	s = cpu__get_socket_id(cpu);
332 	if (s < 0)
333 		return -1;
334 
335 	/*
336 	 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
337 	 * We use hashmap(socket, die) to check the used socket+die pair.
338 	 */
339 	d = cpu__get_die_id(cpu);
340 	if (d < 0)
341 		return -1;
342 
343 	key = malloc(sizeof(*key));
344 	if (!key)
345 		return -ENOMEM;
346 
347 	*key = (uint64_t)d << 32 | s;
348 	if (hashmap__find(mask, (void *)key, NULL)) {
349 		*skip = true;
350 		free(key);
351 	} else
352 		ret = hashmap__add(mask, (void *)key, (void *)1);
353 
354 	return ret;
355 }
356 
357 static int
358 process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
359 		       int cpu_map_idx, int thread,
360 		       struct perf_counts_values *count)
361 {
362 	struct perf_counts_values *aggr = &evsel->counts->aggr;
363 	static struct perf_counts_values zero;
364 	bool skip = false;
365 
366 	if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
367 		pr_err("failed to read per-pkg counter\n");
368 		return -1;
369 	}
370 
371 	if (skip)
372 		count = &zero;
373 
374 	switch (config->aggr_mode) {
375 	case AGGR_THREAD:
376 	case AGGR_CORE:
377 	case AGGR_DIE:
378 	case AGGR_SOCKET:
379 	case AGGR_NODE:
380 	case AGGR_NONE:
381 		if (!evsel->snapshot)
382 			evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
383 		perf_counts_values__scale(count, config->scale, NULL);
384 		if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
385 			perf_stat__update_shadow_stats(evsel, count->val,
386 						       cpu_map_idx, &rt_stat);
387 		}
388 
389 		if (config->aggr_mode == AGGR_THREAD) {
390 			if (config->stats)
391 				perf_stat__update_shadow_stats(evsel,
392 					count->val, 0, &config->stats[thread]);
393 			else
394 				perf_stat__update_shadow_stats(evsel,
395 					count->val, 0, &rt_stat);
396 		}
397 		break;
398 	case AGGR_GLOBAL:
399 		aggr->val += count->val;
400 		aggr->ena += count->ena;
401 		aggr->run += count->run;
402 	case AGGR_UNSET:
403 	default:
404 		break;
405 	}
406 
407 	return 0;
408 }
409 
410 static int process_counter_maps(struct perf_stat_config *config,
411 				struct evsel *counter)
412 {
413 	int nthreads = perf_thread_map__nr(counter->core.threads);
414 	int ncpus = evsel__nr_cpus(counter);
415 	int idx, thread;
416 
417 	if (counter->core.system_wide)
418 		nthreads = 1;
419 
420 	for (thread = 0; thread < nthreads; thread++) {
421 		for (idx = 0; idx < ncpus; idx++) {
422 			if (process_counter_values(config, counter, idx, thread,
423 						   perf_counts(counter->counts, idx, thread)))
424 				return -1;
425 		}
426 	}
427 
428 	return 0;
429 }
430 
431 int perf_stat_process_counter(struct perf_stat_config *config,
432 			      struct evsel *counter)
433 {
434 	struct perf_counts_values *aggr = &counter->counts->aggr;
435 	struct perf_stat_evsel *ps = counter->stats;
436 	u64 *count = counter->counts->aggr.values;
437 	int i, ret;
438 
439 	aggr->val = aggr->ena = aggr->run = 0;
440 
441 	if (counter->per_pkg)
442 		evsel__zero_per_pkg(counter);
443 
444 	ret = process_counter_maps(config, counter);
445 	if (ret)
446 		return ret;
447 
448 	if (config->aggr_mode != AGGR_GLOBAL)
449 		return 0;
450 
451 	if (!counter->snapshot)
452 		evsel__compute_deltas(counter, -1, -1, aggr);
453 	perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
454 
455 	for (i = 0; i < 3; i++)
456 		update_stats(&ps->res_stats[i], count[i]);
457 
458 	if (verbose > 0) {
459 		fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
460 			evsel__name(counter), count[0], count[1], count[2]);
461 	}
462 
463 	/*
464 	 * Save the full runtime - to allow normalization during printout:
465 	 */
466 	perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
467 
468 	return 0;
469 }
470 
471 int perf_event__process_stat_event(struct perf_session *session,
472 				   union perf_event *event)
473 {
474 	struct perf_counts_values count;
475 	struct perf_record_stat *st = &event->stat;
476 	struct evsel *counter;
477 
478 	count.val = st->val;
479 	count.ena = st->ena;
480 	count.run = st->run;
481 
482 	counter = evlist__id2evsel(session->evlist, st->id);
483 	if (!counter) {
484 		pr_err("Failed to resolve counter for stat event.\n");
485 		return -EINVAL;
486 	}
487 
488 	*perf_counts(counter->counts, st->cpu, st->thread) = count;
489 	counter->supported = true;
490 	return 0;
491 }
492 
493 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
494 {
495 	struct perf_record_stat *st = (struct perf_record_stat *)event;
496 	size_t ret;
497 
498 	ret  = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
499 		       st->id, st->cpu, st->thread);
500 	ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
501 		       st->val, st->ena, st->run);
502 
503 	return ret;
504 }
505 
506 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
507 {
508 	struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
509 	size_t ret;
510 
511 	ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
512 		      rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
513 
514 	return ret;
515 }
516 
517 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
518 {
519 	struct perf_stat_config sc;
520 	size_t ret;
521 
522 	perf_event__read_stat_config(&sc, &event->stat_config);
523 
524 	ret  = fprintf(fp, "\n");
525 	ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
526 	ret += fprintf(fp, "... scale     %d\n", sc.scale);
527 	ret += fprintf(fp, "... interval  %u\n", sc.interval);
528 
529 	return ret;
530 }
531 
532 int create_perf_stat_counter(struct evsel *evsel,
533 			     struct perf_stat_config *config,
534 			     struct target *target,
535 			     int cpu_map_idx)
536 {
537 	struct perf_event_attr *attr = &evsel->core.attr;
538 	struct evsel *leader = evsel__leader(evsel);
539 
540 	attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
541 			    PERF_FORMAT_TOTAL_TIME_RUNNING;
542 
543 	/*
544 	 * The event is part of non trivial group, let's enable
545 	 * the group read (for leader) and ID retrieval for all
546 	 * members.
547 	 */
548 	if (leader->core.nr_members > 1)
549 		attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
550 
551 	attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
552 
553 	/*
554 	 * Some events get initialized with sample_(period/type) set,
555 	 * like tracepoints. Clear it up for counting.
556 	 */
557 	attr->sample_period = 0;
558 
559 	if (config->identifier)
560 		attr->sample_type = PERF_SAMPLE_IDENTIFIER;
561 
562 	if (config->all_user) {
563 		attr->exclude_kernel = 1;
564 		attr->exclude_user   = 0;
565 	}
566 
567 	if (config->all_kernel) {
568 		attr->exclude_kernel = 0;
569 		attr->exclude_user   = 1;
570 	}
571 
572 	/*
573 	 * Disabling all counters initially, they will be enabled
574 	 * either manually by us or by kernel via enable_on_exec
575 	 * set later.
576 	 */
577 	if (evsel__is_group_leader(evsel)) {
578 		attr->disabled = 1;
579 
580 		/*
581 		 * In case of initial_delay we enable tracee
582 		 * events manually.
583 		 */
584 		if (target__none(target) && !config->initial_delay)
585 			attr->enable_on_exec = 1;
586 	}
587 
588 	if (target__has_cpu(target) && !target__has_per_thread(target))
589 		return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
590 
591 	return evsel__open_per_thread(evsel, evsel->core.threads);
592 }
593