stat.c (002c6ca75289a4ac4f6738213dd2d258704886e4) stat.c (e5f4afbe395f1248e7501d470118a2a947fe87e6)
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <linux/err.h>
4#include <inttypes.h>
5#include <math.h>
6#include <string.h>
7#include "counts.h"
8#include "cpumap.h"

--- 116 unchanged lines hidden (view full) ---

125 (strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name
126 && strstr(evsel__name(evsel), evsel->pmu_name))) {
127 ps->id = i;
128 break;
129 }
130 }
131}
132
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <linux/err.h>
4#include <inttypes.h>
5#include <math.h>
6#include <string.h>
7#include "counts.h"
8#include "cpumap.h"

--- 116 unchanged lines hidden (view full) ---

125 (strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name
126 && strstr(evsel__name(evsel), evsel->pmu_name))) {
127 ps->id = i;
128 break;
129 }
130 }
131}
132
133static void evsel__reset_aggr_stats(struct evsel *evsel)
134{
135 struct perf_stat_evsel *ps = evsel->stats;
136 struct perf_stat_aggr *aggr = ps->aggr;
137
138 if (aggr)
139 memset(aggr, 0, sizeof(*aggr) * ps->nr_aggr);
140}
141
133static void evsel__reset_stat_priv(struct evsel *evsel)
134{
135 struct perf_stat_evsel *ps = evsel->stats;
136
137 init_stats(&ps->res_stats);
142static void evsel__reset_stat_priv(struct evsel *evsel)
143{
144 struct perf_stat_evsel *ps = evsel->stats;
145
146 init_stats(&ps->res_stats);
147 evsel__reset_aggr_stats(evsel);
138}
139
148}
149
140static int evsel__alloc_stat_priv(struct evsel *evsel)
150static int evsel__alloc_aggr_stats(struct evsel *evsel, int nr_aggr)
141{
151{
142 evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
143 if (evsel->stats == NULL)
152 struct perf_stat_evsel *ps = evsel->stats;
153
154 if (ps == NULL)
155 return 0;
156
157 ps->nr_aggr = nr_aggr;
158 ps->aggr = calloc(nr_aggr, sizeof(*ps->aggr));
159 if (ps->aggr == NULL)
144 return -ENOMEM;
160 return -ENOMEM;
161
162 return 0;
163}
164
165int evlist__alloc_aggr_stats(struct evlist *evlist, int nr_aggr)
166{
167 struct evsel *evsel;
168
169 evlist__for_each_entry(evlist, evsel) {
170 if (evsel__alloc_aggr_stats(evsel, nr_aggr) < 0)
171 return -1;
172 }
173 return 0;
174}
175
176static int evsel__alloc_stat_priv(struct evsel *evsel, int nr_aggr)
177{
178 struct perf_stat_evsel *ps;
179
180 ps = zalloc(sizeof(*ps));
181 if (ps == NULL)
182 return -ENOMEM;
183
184 evsel->stats = ps;
185
186 if (nr_aggr && evsel__alloc_aggr_stats(evsel, nr_aggr) < 0) {
187 evsel->stats = NULL;
188 free(ps);
189 return -ENOMEM;
190 }
191
145 perf_stat_evsel_id_init(evsel);
146 evsel__reset_stat_priv(evsel);
147 return 0;
148}
149
150static void evsel__free_stat_priv(struct evsel *evsel)
151{
152 struct perf_stat_evsel *ps = evsel->stats;
153
192 perf_stat_evsel_id_init(evsel);
193 evsel__reset_stat_priv(evsel);
194 return 0;
195}
196
197static void evsel__free_stat_priv(struct evsel *evsel)
198{
199 struct perf_stat_evsel *ps = evsel->stats;
200
154 if (ps)
201 if (ps) {
202 zfree(&ps->aggr);
155 zfree(&ps->group_data);
203 zfree(&ps->group_data);
204 }
156 zfree(&evsel->stats);
157}
158
159static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
160{
161 int cpu_map_nr = evsel__nr_cpus(evsel);
162 int nthreads = perf_thread_map__nr(evsel->core.threads);
163 struct perf_counts *counts;

--- 12 unchanged lines hidden (view full) ---

176}
177
178static void evsel__reset_prev_raw_counts(struct evsel *evsel)
179{
180 if (evsel->prev_raw_counts)
181 perf_counts__reset(evsel->prev_raw_counts);
182}
183
205 zfree(&evsel->stats);
206}
207
208static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
209{
210 int cpu_map_nr = evsel__nr_cpus(evsel);
211 int nthreads = perf_thread_map__nr(evsel->core.threads);
212 struct perf_counts *counts;

--- 12 unchanged lines hidden (view full) ---

225}
226
227static void evsel__reset_prev_raw_counts(struct evsel *evsel)
228{
229 if (evsel->prev_raw_counts)
230 perf_counts__reset(evsel->prev_raw_counts);
231}
232
184static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
233static int evsel__alloc_stats(struct evsel *evsel, int nr_aggr, bool alloc_raw)
185{
234{
186 if (evsel__alloc_stat_priv(evsel) < 0 ||
235 if (evsel__alloc_stat_priv(evsel, nr_aggr) < 0 ||
187 evsel__alloc_counts(evsel) < 0 ||
188 (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
189 return -ENOMEM;
190
191 return 0;
192}
193
236 evsel__alloc_counts(evsel) < 0 ||
237 (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
238 return -ENOMEM;
239
240 return 0;
241}
242
194int evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
243int evlist__alloc_stats(struct perf_stat_config *config,
244 struct evlist *evlist, bool alloc_raw)
195{
196 struct evsel *evsel;
245{
246 struct evsel *evsel;
247 int nr_aggr = 0;
197
248
249 if (config && config->aggr_map)
250 nr_aggr = config->aggr_map->nr;
251
198 evlist__for_each_entry(evlist, evsel) {
252 evlist__for_each_entry(evlist, evsel) {
199 if (evsel__alloc_stats(evsel, alloc_raw))
253 if (evsel__alloc_stats(evsel, nr_aggr, alloc_raw))
200 goto out_free;
201 }
202
203 return 0;
204
205out_free:
206 evlist__free_stats(evlist);
207 return -1;

--- 15 unchanged lines hidden (view full) ---

223 struct evsel *evsel;
224
225 evlist__for_each_entry(evlist, evsel) {
226 evsel__reset_stat_priv(evsel);
227 evsel__reset_counts(evsel);
228 }
229}
230
254 goto out_free;
255 }
256
257 return 0;
258
259out_free:
260 evlist__free_stats(evlist);
261 return -1;

--- 15 unchanged lines hidden (view full) ---

277 struct evsel *evsel;
278
279 evlist__for_each_entry(evlist, evsel) {
280 evsel__reset_stat_priv(evsel);
281 evsel__reset_counts(evsel);
282 }
283}
284
285void evlist__reset_aggr_stats(struct evlist *evlist)
286{
287 struct evsel *evsel;
288
289 evlist__for_each_entry(evlist, evsel)
290 evsel__reset_aggr_stats(evsel);
291}
292
231void evlist__reset_prev_raw_counts(struct evlist *evlist)
232{
233 struct evsel *evsel;
234
235 evlist__for_each_entry(evlist, evsel)
236 evsel__reset_prev_raw_counts(evsel);
237}
238
239static void evsel__copy_prev_raw_counts(struct evsel *evsel)
240{
241 int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
242
243 for (int thread = 0; thread < nthreads; thread++) {
244 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
245 *perf_counts(evsel->counts, idx, thread) =
246 *perf_counts(evsel->prev_raw_counts, idx, thread);
247 }
248 }
293void evlist__reset_prev_raw_counts(struct evlist *evlist)
294{
295 struct evsel *evsel;
296
297 evlist__for_each_entry(evlist, evsel)
298 evsel__reset_prev_raw_counts(evsel);
299}
300
301static void evsel__copy_prev_raw_counts(struct evsel *evsel)
302{
303 int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
304
305 for (int thread = 0; thread < nthreads; thread++) {
306 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
307 *perf_counts(evsel->counts, idx, thread) =
308 *perf_counts(evsel->prev_raw_counts, idx, thread);
309 }
310 }
249
250 evsel->counts->aggr = evsel->prev_raw_counts->aggr;
251}
252
253void evlist__copy_prev_raw_counts(struct evlist *evlist)
254{
255 struct evsel *evsel;
256
257 evlist__for_each_entry(evlist, evsel)
258 evsel__copy_prev_raw_counts(evsel);
259}
260
311}
312
313void evlist__copy_prev_raw_counts(struct evlist *evlist)
314{
315 struct evsel *evsel;
316
317 evlist__for_each_entry(evlist, evsel)
318 evsel__copy_prev_raw_counts(evsel);
319}
320
261void evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
262{
263 struct evsel *evsel;
264
265 /*
266 * To collect the overall statistics for interval mode,
267 * we copy the counts from evsel->prev_raw_counts to
268 * evsel->counts. The perf_stat_process_counter creates
269 * aggr values from per cpu values, but the per cpu values
270 * are 0 for AGGR_GLOBAL. So we use a trick that saves the
271 * previous aggr value to the first member of perf_counts,
272 * then aggr calculation in process_counter_values can work
273 * correctly.
274 */
275 evlist__for_each_entry(evlist, evsel) {
276 *perf_counts(evsel->prev_raw_counts, 0, 0) =
277 evsel->prev_raw_counts->aggr;
278 }
279}
280
281static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused)
282{
283 uint64_t *key = (uint64_t *) __key;
284
285 return *key & 0xffffffff;
286}
287
288static bool pkg_id_equal(const void *__key1, const void *__key2,

--- 62 unchanged lines hidden (view full) ---

351 *skip = true;
352 free(key);
353 } else
354 ret = hashmap__add(mask, (void *)key, (void *)1);
355
356 return ret;
357}
358
321static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused)
322{
323 uint64_t *key = (uint64_t *) __key;
324
325 return *key & 0xffffffff;
326}
327
328static bool pkg_id_equal(const void *__key1, const void *__key2,

--- 62 unchanged lines hidden (view full) ---

391 *skip = true;
392 free(key);
393 } else
394 ret = hashmap__add(mask, (void *)key, (void *)1);
395
396 return ret;
397}
398
399static bool evsel__count_has_error(struct evsel *evsel,
400 struct perf_counts_values *count,
401 struct perf_stat_config *config)
402{
403 /* the evsel was failed already */
404 if (evsel->err || evsel->counts->scaled == -1)
405 return true;
406
407 /* this is meaningful for CPU aggregation modes only */
408 if (config->aggr_mode == AGGR_GLOBAL)
409 return false;
410
411 /* it's considered ok when it actually ran */
412 if (count->ena != 0 && count->run != 0)
413 return false;
414
415 return true;
416}
417
359static int
360process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
361 int cpu_map_idx, int thread,
362 struct perf_counts_values *count)
363{
418static int
419process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
420 int cpu_map_idx, int thread,
421 struct perf_counts_values *count)
422{
364 struct perf_counts_values *aggr = &evsel->counts->aggr;
423 struct perf_stat_evsel *ps = evsel->stats;
365 static struct perf_counts_values zero;
366 bool skip = false;
367
368 if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
369 pr_err("failed to read per-pkg counter\n");
370 return -1;
371 }
372
373 if (skip)
374 count = &zero;
375
424 static struct perf_counts_values zero;
425 bool skip = false;
426
427 if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
428 pr_err("failed to read per-pkg counter\n");
429 return -1;
430 }
431
432 if (skip)
433 count = &zero;
434
376 switch (config->aggr_mode) {
377 case AGGR_THREAD:
378 case AGGR_CORE:
379 case AGGR_DIE:
380 case AGGR_SOCKET:
381 case AGGR_NODE:
382 case AGGR_NONE:
383 if (!evsel->snapshot)
384 evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
385 perf_counts_values__scale(count, config->scale, NULL);
386 if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
387 perf_stat__update_shadow_stats(evsel, count->val,
388 cpu_map_idx, &rt_stat);
389 }
435 if (!evsel->snapshot)
436 evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
437 perf_counts_values__scale(count, config->scale, NULL);
390
438
391 if (config->aggr_mode == AGGR_THREAD) {
392 perf_stat__update_shadow_stats(evsel, count->val,
393 thread, &rt_stat);
439 if (config->aggr_mode == AGGR_THREAD) {
440 struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts;
441
442 /*
443 * Skip value 0 when enabling --per-thread globally,
444 * otherwise too many 0 output.
445 */
446 if (count->val == 0 && config->system_wide)
447 return 0;
448
449 ps->aggr[thread].nr++;
450
451 aggr_counts->val += count->val;
452 aggr_counts->ena += count->ena;
453 aggr_counts->run += count->run;
454 return 0;
455 }
456
457 if (ps->aggr) {
458 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
459 struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
460 struct perf_stat_aggr *ps_aggr;
461 int i;
462
463 for (i = 0; i < ps->nr_aggr; i++) {
464 if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
465 continue;
466
467 ps_aggr = &ps->aggr[i];
468 ps_aggr->nr++;
469
470 /*
471 * When any result is bad, make them all to give consistent output
472 * in interval mode. But per-task counters can have 0 enabled time
473 * when some tasks are idle.
474 */
475 if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
476 ps_aggr->counts.val = 0;
477 ps_aggr->counts.ena = 0;
478 ps_aggr->counts.run = 0;
479 ps_aggr->failed = true;
480 }
481
482 if (!ps_aggr->failed) {
483 ps_aggr->counts.val += count->val;
484 ps_aggr->counts.ena += count->ena;
485 ps_aggr->counts.run += count->run;
486 }
487 break;
394 }
488 }
395 break;
396 case AGGR_GLOBAL:
397 aggr->val += count->val;
398 aggr->ena += count->ena;
399 aggr->run += count->run;
400 case AGGR_UNSET:
401 case AGGR_MAX:
402 default:
403 break;
404 }
405
406 return 0;
407}
408
409static int process_counter_maps(struct perf_stat_config *config,
410 struct evsel *counter)
411{

--- 10 unchanged lines hidden (view full) ---

422 }
423
424 return 0;
425}
426
427int perf_stat_process_counter(struct perf_stat_config *config,
428 struct evsel *counter)
429{
489 }
490
491 return 0;
492}
493
494static int process_counter_maps(struct perf_stat_config *config,
495 struct evsel *counter)
496{

--- 10 unchanged lines hidden (view full) ---

507 }
508
509 return 0;
510}
511
512int perf_stat_process_counter(struct perf_stat_config *config,
513 struct evsel *counter)
514{
430 struct perf_counts_values *aggr = &counter->counts->aggr;
431 struct perf_stat_evsel *ps = counter->stats;
515 struct perf_stat_evsel *ps = counter->stats;
432 u64 *count = counter->counts->aggr.values;
516 u64 *count;
433 int ret;
434
517 int ret;
518
435 aggr->val = aggr->ena = aggr->run = 0;
436
437 if (counter->per_pkg)
438 evsel__zero_per_pkg(counter);
439
440 ret = process_counter_maps(config, counter);
441 if (ret)
442 return ret;
443
444 if (config->aggr_mode != AGGR_GLOBAL)
445 return 0;
446
519 if (counter->per_pkg)
520 evsel__zero_per_pkg(counter);
521
522 ret = process_counter_maps(config, counter);
523 if (ret)
524 return ret;
525
526 if (config->aggr_mode != AGGR_GLOBAL)
527 return 0;
528
447 if (!counter->snapshot)
448 evsel__compute_deltas(counter, -1, -1, aggr);
449 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
450
529 /*
530 * GLOBAL aggregation mode only has a single aggr counts,
531 * so we can use ps->aggr[0] as the actual output.
532 */
533 count = ps->aggr[0].counts.values;
451 update_stats(&ps->res_stats, *count);
452
453 if (verbose > 0) {
454 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
455 evsel__name(counter), count[0], count[1], count[2]);
456 }
457
534 update_stats(&ps->res_stats, *count);
535
536 if (verbose > 0) {
537 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
538 evsel__name(counter), count[0], count[1], count[2]);
539 }
540
458 /*
459 * Save the full runtime - to allow normalization during printout:
460 */
461 perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
541 return 0;
542}
462
543
544static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
545{
546 struct perf_stat_evsel *ps_a = evsel->stats;
547 struct perf_stat_evsel *ps_b = alias->stats;
548 int i;
549
550 if (ps_a->aggr == NULL && ps_b->aggr == NULL)
551 return 0;
552
553 if (ps_a->nr_aggr != ps_b->nr_aggr) {
554 pr_err("Unmatched aggregation mode between aliases\n");
555 return -1;
556 }
557
558 for (i = 0; i < ps_a->nr_aggr; i++) {
559 struct perf_counts_values *aggr_counts_a = &ps_a->aggr[i].counts;
560 struct perf_counts_values *aggr_counts_b = &ps_b->aggr[i].counts;
561
562 /* NB: don't increase aggr.nr for aliases */
563
564 aggr_counts_a->val += aggr_counts_b->val;
565 aggr_counts_a->ena += aggr_counts_b->ena;
566 aggr_counts_a->run += aggr_counts_b->run;
567 }
568
463 return 0;
464}
569 return 0;
570}
571/* events should have the same name, scale, unit, cgroup but on different PMUs */
572static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b)
573{
574 if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b)))
575 return false;
465
576
577 if (evsel_a->scale != evsel_b->scale)
578 return false;
579
580 if (evsel_a->cgrp != evsel_b->cgrp)
581 return false;
582
583 if (strcmp(evsel_a->unit, evsel_b->unit))
584 return false;
585
586 if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b))
587 return false;
588
589 return !!strcmp(evsel_a->pmu_name, evsel_b->pmu_name);
590}
591
592static void evsel__merge_aliases(struct evsel *evsel)
593{
594 struct evlist *evlist = evsel->evlist;
595 struct evsel *alias;
596
597 alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
598 list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
599 /* Merge the same events on different PMUs. */
600 if (evsel__is_alias(evsel, alias)) {
601 evsel__merge_aggr_counters(evsel, alias);
602 alias->merged_stat = true;
603 }
604 }
605}
606
607static bool evsel__should_merge_hybrid(const struct evsel *evsel,
608 const struct perf_stat_config *config)
609{
610 return config->hybrid_merge && evsel__is_hybrid(evsel);
611}
612
613static void evsel__merge_stats(struct evsel *evsel, struct perf_stat_config *config)
614{
615 /* this evsel is already merged */
616 if (evsel->merged_stat)
617 return;
618
619 if (evsel->auto_merge_stats || evsel__should_merge_hybrid(evsel, config))
620 evsel__merge_aliases(evsel);
621}
622
623/* merge the same uncore and hybrid events if requested */
624void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist)
625{
626 struct evsel *evsel;
627
628 if (config->no_merge)
629 return;
630
631 evlist__for_each_entry(evlist, evsel)
632 evsel__merge_stats(evsel, config);
633}
634
635static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id *core_id)
636{
637 struct perf_stat_evsel *ps = evsel->stats;
638 struct perf_counts_values counts = { 0, };
639 struct aggr_cpu_id id;
640 struct perf_cpu cpu;
641 int idx;
642
643 /* collect per-core counts */
644 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
645 struct perf_stat_aggr *aggr = &ps->aggr[idx];
646
647 id = aggr_cpu_id__core(cpu, NULL);
648 if (!aggr_cpu_id__equal(core_id, &id))
649 continue;
650
651 counts.val += aggr->counts.val;
652 counts.ena += aggr->counts.ena;
653 counts.run += aggr->counts.run;
654 }
655
656 /* update aggregated per-core counts for each CPU */
657 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
658 struct perf_stat_aggr *aggr = &ps->aggr[idx];
659
660 id = aggr_cpu_id__core(cpu, NULL);
661 if (!aggr_cpu_id__equal(core_id, &id))
662 continue;
663
664 aggr->counts.val = counts.val;
665 aggr->counts.ena = counts.ena;
666 aggr->counts.run = counts.run;
667
668 aggr->used = true;
669 }
670}
671
672/* we have an aggr_map for cpu, but want to aggregate the counters per-core */
673static void evsel__process_percore(struct evsel *evsel)
674{
675 struct perf_stat_evsel *ps = evsel->stats;
676 struct aggr_cpu_id core_id;
677 struct perf_cpu cpu;
678 int idx;
679
680 if (!evsel->percore)
681 return;
682
683 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
684 struct perf_stat_aggr *aggr = &ps->aggr[idx];
685
686 if (aggr->used)
687 continue;
688
689 core_id = aggr_cpu_id__core(cpu, NULL);
690 evsel__update_percore_stats(evsel, &core_id);
691 }
692}
693
694/* process cpu stats on per-core events */
695void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist)
696{
697 struct evsel *evsel;
698
699 if (config->aggr_mode != AGGR_NONE)
700 return;
701
702 evlist__for_each_entry(evlist, evsel)
703 evsel__process_percore(evsel);
704}
705
706static void evsel__update_shadow_stats(struct evsel *evsel)
707{
708 struct perf_stat_evsel *ps = evsel->stats;
709 int i;
710
711 if (ps->aggr == NULL)
712 return;
713
714 for (i = 0; i < ps->nr_aggr; i++) {
715 struct perf_counts_values *aggr_counts = &ps->aggr[i].counts;
716
717 perf_stat__update_shadow_stats(evsel, aggr_counts->val, i, &rt_stat);
718 }
719}
720
721void perf_stat_process_shadow_stats(struct perf_stat_config *config __maybe_unused,
722 struct evlist *evlist)
723{
724 struct evsel *evsel;
725
726 evlist__for_each_entry(evlist, evsel)
727 evsel__update_shadow_stats(evsel);
728}
729
466int perf_event__process_stat_event(struct perf_session *session,
467 union perf_event *event)
468{
469 struct perf_counts_values count, *ptr;
470 struct perf_record_stat *st = &event->stat;
471 struct evsel *counter;
472 int cpu_map_idx;
473

--- 125 unchanged lines hidden ---
730int perf_event__process_stat_event(struct perf_session *session,
731 union perf_event *event)
732{
733 struct perf_counts_values count, *ptr;
734 struct perf_record_stat *st = &event->stat;
735 struct evsel *counter;
736 int cpu_map_idx;
737

--- 125 unchanged lines hidden ---