xref: /linux/tools/perf/util/mem-events.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stddef.h>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <errno.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include <api/fs/fs.h>
10 #include <linux/kernel.h>
11 #include "cpumap.h"
12 #include "map_symbol.h"
13 #include "mem-events.h"
14 #include "mem-info.h"
15 #include "debug.h"
16 #include "evsel.h"
17 #include "symbol.h"
18 #include "pmu.h"
19 #include "pmus.h"
20 
21 unsigned int perf_mem_events__loads_ldlat = 30;
22 
23 #define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a }
24 
25 struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
26 	E("ldlat-loads",	"%s/mem-loads,ldlat=%u/P",	"mem-loads",	true,	0),
27 	E("ldlat-stores",	"%s/mem-stores/P",		"mem-stores",	false,	0),
28 	E(NULL,			NULL,				NULL,		false,	0),
29 };
30 #undef E
31 
32 static char mem_loads_name[100];
33 static char mem_stores_name[100];
34 
35 struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
36 {
37 	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
38 		return NULL;
39 
40 	return &pmu->mem_events[i];
41 }
42 
43 static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
44 {
45 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
46 		if (pmu->mem_events)
47 			return pmu;
48 	}
49 	return NULL;
50 }
51 
52 struct perf_pmu *perf_mem_events_find_pmu(void)
53 {
54 	/*
55 	 * The current perf mem doesn't support per-PMU configuration.
56 	 * The exact same configuration is applied to all the
57 	 * mem_events supported PMUs.
58 	 * Return the first mem_events supported PMU.
59 	 *
60 	 * Notes: The only case which may support multiple mem_events
61 	 * supported PMUs is Intel hybrid. The exact same mem_events
62 	 * is shared among the PMUs. Only configure the first PMU
63 	 * is good enough as well.
64 	 */
65 	return perf_pmus__scan_mem(NULL);
66 }
67 
68 /**
69  * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu
70  * @pmu: Start pmu. If it's NULL, search the entire PMU list.
71  */
72 int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu)
73 {
74 	int num = 0;
75 
76 	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL)
77 		num++;
78 
79 	return num;
80 }
81 
82 static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
83 {
84 	struct perf_mem_event *e;
85 
86 	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
87 		return NULL;
88 
89 	e = &pmu->mem_events[i];
90 	if (!e || !e->name)
91 		return NULL;
92 
93 	if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
94 		if (e->ldlat) {
95 			if (!e->aux_event) {
96 				/* ARM and Most of Intel */
97 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
98 					  e->name, pmu->name,
99 					  perf_mem_events__loads_ldlat);
100 			} else {
101 				/* Intel with mem-loads-aux event */
102 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
103 					  e->name, pmu->name, pmu->name,
104 					  perf_mem_events__loads_ldlat);
105 			}
106 		} else {
107 			if (!e->aux_event) {
108 				/* AMD and POWER */
109 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
110 					  e->name, pmu->name);
111 			} else
112 				return NULL;
113 		}
114 
115 		return mem_loads_name;
116 	}
117 
118 	if (i == PERF_MEM_EVENTS__STORE) {
119 		scnprintf(mem_stores_name, sizeof(mem_stores_name),
120 			  e->name, pmu->name);
121 		return mem_stores_name;
122 	}
123 
124 	return NULL;
125 }
126 
127 bool is_mem_loads_aux_event(struct evsel *leader)
128 {
129 	struct perf_pmu *pmu = leader->pmu;
130 	struct perf_mem_event *e;
131 
132 	if (!pmu || !pmu->mem_events)
133 		return false;
134 
135 	e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD];
136 	if (!e->aux_event)
137 		return false;
138 
139 	return leader->core.attr.config == e->aux_event;
140 }
141 
142 int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
143 {
144 	char *tok, *saveptr = NULL;
145 	bool found = false;
146 	char *buf;
147 	int j;
148 
149 	/* We need buffer that we know we can write to. */
150 	buf = malloc(strlen(str) + 1);
151 	if (!buf)
152 		return -ENOMEM;
153 
154 	strcpy(buf, str);
155 
156 	tok = strtok_r((char *)buf, ",", &saveptr);
157 
158 	while (tok) {
159 		for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
160 			struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
161 
162 			if (!e->tag)
163 				continue;
164 
165 			if (strstr(e->tag, tok))
166 				e->record = found = true;
167 		}
168 
169 		tok = strtok_r(NULL, ",", &saveptr);
170 	}
171 
172 	free(buf);
173 
174 	if (found)
175 		return 0;
176 
177 	pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
178 	return -1;
179 }
180 
181 static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu,
182 				      struct perf_mem_event *e)
183 {
184 	char path[PATH_MAX];
185 	struct stat st;
186 
187 	if (!e->event_name)
188 		return true;
189 
190 	scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name);
191 
192 	return !stat(path, &st);
193 }
194 
195 int perf_pmu__mem_events_init(struct perf_pmu *pmu)
196 {
197 	const char *mnt = sysfs__mount();
198 	bool found = false;
199 	int j;
200 
201 	if (!mnt)
202 		return -ENOENT;
203 
204 	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
205 		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
206 
207 		/*
208 		 * If the event entry isn't valid, skip initialization
209 		 * and "e->supported" will keep false.
210 		 */
211 		if (!e->tag)
212 			continue;
213 
214 		e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e);
215 		if (e->supported)
216 			found = true;
217 	}
218 
219 	return found ? 0 : -ENOENT;
220 }
221 
222 void perf_pmu__mem_events_list(struct perf_pmu *pmu)
223 {
224 	int j;
225 
226 	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
227 		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
228 
229 		fprintf(stderr, "%-*s%-*s%s",
230 			e->tag ? 13 : 0,
231 			e->tag ? : "",
232 			e->tag && verbose > 0 ? 25 : 0,
233 			e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "",
234 			e->supported ? ": available\n" : "");
235 	}
236 }
237 
238 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
239 {
240 	const char *mnt = sysfs__mount();
241 	struct perf_pmu *pmu = NULL;
242 	struct perf_mem_event *e;
243 	int i = *argv_nr;
244 	const char *s;
245 	char *copy;
246 	struct perf_cpu_map *cpu_map = NULL;
247 
248 	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
249 		for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
250 			e = perf_pmu__mem_events_ptr(pmu, j);
251 
252 			if (!e->record)
253 				continue;
254 
255 			if (!e->supported) {
256 				pr_err("failed: event '%s' not supported\n",
257 					perf_pmu__mem_events_name(j, pmu));
258 				return -1;
259 			}
260 
261 			s = perf_pmu__mem_events_name(j, pmu);
262 			if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e))
263 				continue;
264 
265 			copy = strdup(s);
266 			if (!copy)
267 				return -1;
268 
269 			rec_argv[i++] = "-e";
270 			rec_argv[i++] = copy;
271 
272 			cpu_map = perf_cpu_map__merge(cpu_map, pmu->cpus);
273 		}
274 	}
275 
276 	if (cpu_map) {
277 		if (!perf_cpu_map__equal(cpu_map, cpu_map__online())) {
278 			char buf[200];
279 
280 			cpu_map__snprint(cpu_map, buf, sizeof(buf));
281 			pr_warning("Memory events are enabled on a subset of CPUs: %s\n", buf);
282 		}
283 		perf_cpu_map__put(cpu_map);
284 	}
285 
286 	*argv_nr = i;
287 	return 0;
288 }
289 
290 static const char * const tlb_access[] = {
291 	"N/A",
292 	"HIT",
293 	"MISS",
294 	"L1",
295 	"L2",
296 	"Walker",
297 	"Fault",
298 };
299 
300 int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
301 {
302 	size_t l = 0, i;
303 	u64 m = PERF_MEM_TLB_NA;
304 	u64 hit, miss;
305 
306 	sz -= 1; /* -1 for null termination */
307 	out[0] = '\0';
308 
309 	if (mem_info)
310 		m = mem_info__const_data_src(mem_info)->mem_dtlb;
311 
312 	hit = m & PERF_MEM_TLB_HIT;
313 	miss = m & PERF_MEM_TLB_MISS;
314 
315 	/* already taken care of */
316 	m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
317 
318 	for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
319 		if (!(m & 0x1))
320 			continue;
321 		if (l) {
322 			strcat(out, " or ");
323 			l += 4;
324 		}
325 		l += scnprintf(out + l, sz - l, tlb_access[i]);
326 	}
327 	if (*out == '\0')
328 		l += scnprintf(out, sz - l, "N/A");
329 	if (hit)
330 		l += scnprintf(out + l, sz - l, " hit");
331 	if (miss)
332 		l += scnprintf(out + l, sz - l, " miss");
333 
334 	return l;
335 }
336 
337 static const char * const mem_lvl[] = {
338 	"N/A",
339 	"HIT",
340 	"MISS",
341 	"L1",
342 	"LFB/MAB",
343 	"L2",
344 	"L3",
345 	"Local RAM",
346 	"Remote RAM (1 hop)",
347 	"Remote RAM (2 hops)",
348 	"Remote Cache (1 hop)",
349 	"Remote Cache (2 hops)",
350 	"I/O",
351 	"Uncached",
352 };
353 
354 static const char * const mem_lvlnum[] = {
355 	[PERF_MEM_LVLNUM_UNC] = "Uncached",
356 	[PERF_MEM_LVLNUM_CXL] = "CXL",
357 	[PERF_MEM_LVLNUM_IO] = "I/O",
358 	[PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
359 	[PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
360 	[PERF_MEM_LVLNUM_RAM] = "RAM",
361 	[PERF_MEM_LVLNUM_PMEM] = "PMEM",
362 	[PERF_MEM_LVLNUM_NA] = "N/A",
363 };
364 
365 static const char * const mem_hops[] = {
366 	"N/A",
367 	/*
368 	 * While printing, 'Remote' will be added to represent
369 	 * 'Remote core, same node' accesses as remote field need
370 	 * to be set with mem_hops field.
371 	 */
372 	"core, same node",
373 	"node, same socket",
374 	"socket, same board",
375 	"board",
376 };
377 
378 static int perf_mem__op_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
379 {
380 	u64 op = PERF_MEM_LOCK_NA;
381 	int l;
382 
383 	if (mem_info)
384 		op = mem_info__const_data_src(mem_info)->mem_op;
385 
386 	if (op & PERF_MEM_OP_NA)
387 		l = scnprintf(out, sz, "N/A");
388 	else if (op & PERF_MEM_OP_LOAD)
389 		l = scnprintf(out, sz, "LOAD");
390 	else if (op & PERF_MEM_OP_STORE)
391 		l = scnprintf(out, sz, "STORE");
392 	else if (op & PERF_MEM_OP_PFETCH)
393 		l = scnprintf(out, sz, "PFETCH");
394 	else if (op & PERF_MEM_OP_EXEC)
395 		l = scnprintf(out, sz, "EXEC");
396 	else
397 		l = scnprintf(out, sz, "No");
398 
399 	return l;
400 }
401 
402 int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
403 {
404 	union perf_mem_data_src data_src;
405 	int printed = 0;
406 	size_t l = 0;
407 	size_t i;
408 	int lvl;
409 	char hit_miss[5] = {0};
410 
411 	sz -= 1; /* -1 for null termination */
412 	out[0] = '\0';
413 
414 	if (!mem_info)
415 		goto na;
416 
417 	data_src = *mem_info__const_data_src(mem_info);
418 
419 	if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
420 		memcpy(hit_miss, "hit", 3);
421 	else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
422 		memcpy(hit_miss, "miss", 4);
423 
424 	lvl = data_src.mem_lvl_num;
425 	if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
426 		if (data_src.mem_remote) {
427 			strcat(out, "Remote ");
428 			l += 7;
429 		}
430 
431 		if (data_src.mem_hops)
432 			l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
433 
434 		if (mem_lvlnum[lvl])
435 			l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
436 		else
437 			l += scnprintf(out + l, sz - l, "L%d", lvl);
438 
439 		l += scnprintf(out + l, sz - l, " %s", hit_miss);
440 		return l;
441 	}
442 
443 	lvl = data_src.mem_lvl;
444 	if (!lvl)
445 		goto na;
446 
447 	lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
448 	if (!lvl)
449 		goto na;
450 
451 	for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
452 		if (!(lvl & 0x1))
453 			continue;
454 		if (printed++) {
455 			strcat(out, " or ");
456 			l += 4;
457 		}
458 		l += scnprintf(out + l, sz - l, mem_lvl[i]);
459 	}
460 
461 	if (printed) {
462 		l += scnprintf(out + l, sz - l, " %s", hit_miss);
463 		return l;
464 	}
465 
466 na:
467 	strcat(out, "N/A");
468 	return 3;
469 }
470 
471 static const char * const snoop_access[] = {
472 	"N/A",
473 	"None",
474 	"Hit",
475 	"Miss",
476 	"HitM",
477 };
478 
479 static const char * const snoopx_access[] = {
480 	"Fwd",
481 	"Peer",
482 };
483 
484 int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
485 {
486 	size_t i, l = 0;
487 	u64 m = PERF_MEM_SNOOP_NA;
488 
489 	sz -= 1; /* -1 for null termination */
490 	out[0] = '\0';
491 
492 	if (mem_info)
493 		m = mem_info__const_data_src(mem_info)->mem_snoop;
494 
495 	for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
496 		if (!(m & 0x1))
497 			continue;
498 		if (l) {
499 			strcat(out, " or ");
500 			l += 4;
501 		}
502 		l += scnprintf(out + l, sz - l, snoop_access[i]);
503 	}
504 
505 	m = 0;
506 	if (mem_info)
507 		m = mem_info__const_data_src(mem_info)->mem_snoopx;
508 
509 	for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
510 		if (!(m & 0x1))
511 			continue;
512 
513 		if (l) {
514 			strcat(out, " or ");
515 			l += 4;
516 		}
517 		l += scnprintf(out + l, sz - l, snoopx_access[i]);
518 	}
519 
520 	if (*out == '\0')
521 		l += scnprintf(out, sz - l, "N/A");
522 
523 	return l;
524 }
525 
526 int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
527 {
528 	u64 mask = PERF_MEM_LOCK_NA;
529 	int l;
530 
531 	if (mem_info)
532 		mask = mem_info__const_data_src(mem_info)->mem_lock;
533 
534 	if (mask & PERF_MEM_LOCK_NA)
535 		l = scnprintf(out, sz, "N/A");
536 	else if (mask & PERF_MEM_LOCK_LOCKED)
537 		l = scnprintf(out, sz, "Yes");
538 	else
539 		l = scnprintf(out, sz, "No");
540 
541 	return l;
542 }
543 
544 int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
545 {
546 	size_t l = 0;
547 	u64 mask = PERF_MEM_BLK_NA;
548 
549 	sz -= 1; /* -1 for null termination */
550 	out[0] = '\0';
551 
552 	if (mem_info)
553 		mask = mem_info__const_data_src(mem_info)->mem_blk;
554 
555 	if (!mask || (mask & PERF_MEM_BLK_NA)) {
556 		l += scnprintf(out + l, sz - l, " N/A");
557 		return l;
558 	}
559 	if (mask & PERF_MEM_BLK_DATA)
560 		l += scnprintf(out + l, sz - l, " Data");
561 	if (mask & PERF_MEM_BLK_ADDR)
562 		l += scnprintf(out + l, sz - l, " Addr");
563 
564 	return l;
565 }
566 
567 int perf_script__meminfo_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
568 {
569 	int i = 0;
570 
571 	i += scnprintf(out, sz, "|OP ");
572 	i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
573 	i += scnprintf(out + i, sz - i, "|LVL ");
574 	i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
575 	i += scnprintf(out + i, sz - i, "|SNP ");
576 	i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
577 	i += scnprintf(out + i, sz - i, "|TLB ");
578 	i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
579 	i += scnprintf(out + i, sz - i, "|LCK ");
580 	i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
581 	i += scnprintf(out + i, sz - i, "|BLK ");
582 	i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
583 
584 	return i;
585 }
586 
587 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
588 {
589 	union perf_mem_data_src *data_src = mem_info__data_src(mi);
590 	u64 daddr  = mem_info__daddr(mi)->addr;
591 	u64 op     = data_src->mem_op;
592 	u64 lvl    = data_src->mem_lvl;
593 	u64 snoop  = data_src->mem_snoop;
594 	u64 snoopx = data_src->mem_snoopx;
595 	u64 lock   = data_src->mem_lock;
596 	u64 blk    = data_src->mem_blk;
597 	/*
598 	 * Skylake might report unknown remote level via this
599 	 * bit, consider it when evaluating remote HITMs.
600 	 *
601 	 * Incase of power, remote field can also be used to denote cache
602 	 * accesses from the another core of same node. Hence, setting
603 	 * mrem only when HOPS is zero along with set remote field.
604 	 */
605 	bool mrem  = (data_src->mem_remote && !data_src->mem_hops);
606 	int err = 0;
607 
608 #define HITM_INC(__f)		\
609 do {				\
610 	stats->__f++;		\
611 	stats->tot_hitm++;	\
612 } while (0)
613 
614 #define PEER_INC(__f)		\
615 do {				\
616 	stats->__f++;		\
617 	stats->tot_peer++;	\
618 } while (0)
619 
620 #define P(a, b) PERF_MEM_##a##_##b
621 
622 	stats->nr_entries++;
623 
624 	if (lock & P(LOCK, LOCKED)) stats->locks++;
625 
626 	if (blk & P(BLK, DATA)) stats->blk_data++;
627 	if (blk & P(BLK, ADDR)) stats->blk_addr++;
628 
629 	if (op & P(OP, LOAD)) {
630 		/* load */
631 		stats->load++;
632 
633 		if (!daddr) {
634 			stats->ld_noadrs++;
635 			return -1;
636 		}
637 
638 		if (lvl & P(LVL, HIT)) {
639 			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
640 			if (lvl & P(LVL, IO))  stats->ld_io++;
641 			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
642 			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
643 			if (lvl & P(LVL, L2)) {
644 				stats->ld_l2hit++;
645 
646 				if (snoopx & P(SNOOPX, PEER))
647 					PEER_INC(lcl_peer);
648 			}
649 			if (lvl & P(LVL, L3 )) {
650 				if (snoop & P(SNOOP, HITM))
651 					HITM_INC(lcl_hitm);
652 				else
653 					stats->ld_llchit++;
654 
655 				if (snoopx & P(SNOOPX, PEER))
656 					PEER_INC(lcl_peer);
657 			}
658 
659 			if (lvl & P(LVL, LOC_RAM)) {
660 				stats->lcl_dram++;
661 				if (snoop & P(SNOOP, HIT))
662 					stats->ld_shared++;
663 				else
664 					stats->ld_excl++;
665 			}
666 
667 			if ((lvl & P(LVL, REM_RAM1)) ||
668 			    (lvl & P(LVL, REM_RAM2)) ||
669 			     mrem) {
670 				stats->rmt_dram++;
671 				if (snoop & P(SNOOP, HIT))
672 					stats->ld_shared++;
673 				else
674 					stats->ld_excl++;
675 			}
676 		}
677 
678 		if ((lvl & P(LVL, REM_CCE1)) ||
679 		    (lvl & P(LVL, REM_CCE2)) ||
680 		     mrem) {
681 			if (snoop & P(SNOOP, HIT)) {
682 				stats->rmt_hit++;
683 			} else if (snoop & P(SNOOP, HITM)) {
684 				HITM_INC(rmt_hitm);
685 			} else if (snoopx & P(SNOOPX, PEER)) {
686 				stats->rmt_hit++;
687 				PEER_INC(rmt_peer);
688 			}
689 		}
690 
691 		if ((lvl & P(LVL, MISS)))
692 			stats->ld_miss++;
693 
694 	} else if (op & P(OP, STORE)) {
695 		/* store */
696 		stats->store++;
697 
698 		if (!daddr) {
699 			stats->st_noadrs++;
700 			return -1;
701 		}
702 
703 		if (lvl & P(LVL, HIT)) {
704 			if (lvl & P(LVL, UNC)) stats->st_uncache++;
705 			if (lvl & P(LVL, L1 )) stats->st_l1hit++;
706 		}
707 		if (lvl & P(LVL, MISS))
708 			if (lvl & P(LVL, L1)) stats->st_l1miss++;
709 		if (lvl & P(LVL, NA))
710 			stats->st_na++;
711 	} else {
712 		/* unparsable data_src? */
713 		stats->noparse++;
714 		return -1;
715 	}
716 
717 	if (!mem_info__daddr(mi)->ms.map || !mem_info__iaddr(mi)->ms.map) {
718 		stats->nomap++;
719 		return -1;
720 	}
721 
722 #undef P
723 #undef HITM_INC
724 	return err;
725 }
726 
727 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
728 {
729 	stats->nr_entries	+= add->nr_entries;
730 
731 	stats->locks		+= add->locks;
732 	stats->store		+= add->store;
733 	stats->st_uncache	+= add->st_uncache;
734 	stats->st_noadrs	+= add->st_noadrs;
735 	stats->st_l1hit		+= add->st_l1hit;
736 	stats->st_l1miss	+= add->st_l1miss;
737 	stats->st_na		+= add->st_na;
738 	stats->load		+= add->load;
739 	stats->ld_excl		+= add->ld_excl;
740 	stats->ld_shared	+= add->ld_shared;
741 	stats->ld_uncache	+= add->ld_uncache;
742 	stats->ld_io		+= add->ld_io;
743 	stats->ld_miss		+= add->ld_miss;
744 	stats->ld_noadrs	+= add->ld_noadrs;
745 	stats->ld_fbhit		+= add->ld_fbhit;
746 	stats->ld_l1hit		+= add->ld_l1hit;
747 	stats->ld_l2hit		+= add->ld_l2hit;
748 	stats->ld_llchit	+= add->ld_llchit;
749 	stats->lcl_hitm		+= add->lcl_hitm;
750 	stats->rmt_hitm		+= add->rmt_hitm;
751 	stats->tot_hitm		+= add->tot_hitm;
752 	stats->lcl_peer		+= add->lcl_peer;
753 	stats->rmt_peer		+= add->rmt_peer;
754 	stats->tot_peer		+= add->tot_peer;
755 	stats->rmt_hit		+= add->rmt_hit;
756 	stats->lcl_dram		+= add->lcl_dram;
757 	stats->rmt_dram		+= add->rmt_dram;
758 	stats->blk_data		+= add->blk_data;
759 	stats->blk_addr		+= add->blk_addr;
760 	stats->nomap		+= add->nomap;
761 	stats->noparse		+= add->noparse;
762 }
763