xref: /linux/tools/perf/util/mem-events.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stddef.h>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <errno.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include <api/fs/fs.h>
10 #include <linux/kernel.h>
11 #include "map_symbol.h"
12 #include "mem-events.h"
13 #include "mem-info.h"
14 #include "debug.h"
15 #include "evsel.h"
16 #include "symbol.h"
17 #include "pmu.h"
18 #include "pmus.h"
19 
20 unsigned int perf_mem_events__loads_ldlat = 30;
21 
22 #define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a }
23 
24 struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
25 	E("ldlat-loads",	"%s/mem-loads,ldlat=%u/P",	"mem-loads",	true,	0),
26 	E("ldlat-stores",	"%s/mem-stores/P",		"mem-stores",	false,	0),
27 	E(NULL,			NULL,				NULL,		false,	0),
28 };
29 #undef E
30 
31 static char mem_loads_name[100];
32 static char mem_stores_name[100];
33 
34 struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
35 {
36 	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
37 		return NULL;
38 
39 	return &pmu->mem_events[i];
40 }
41 
42 static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
43 {
44 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
45 		if (pmu->mem_events)
46 			return pmu;
47 	}
48 	return NULL;
49 }
50 
51 struct perf_pmu *perf_mem_events_find_pmu(void)
52 {
53 	/*
54 	 * The current perf mem doesn't support per-PMU configuration.
55 	 * The exact same configuration is applied to all the
56 	 * mem_events supported PMUs.
57 	 * Return the first mem_events supported PMU.
58 	 *
59 	 * Notes: The only case which may support multiple mem_events
60 	 * supported PMUs is Intel hybrid. The exact same mem_events
61 	 * is shared among the PMUs. Only configure the first PMU
62 	 * is good enough as well.
63 	 */
64 	return perf_pmus__scan_mem(NULL);
65 }
66 
67 /**
68  * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu
69  * @pmu: Start pmu. If it's NULL, search the entire PMU list.
70  */
71 int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu)
72 {
73 	int num = 0;
74 
75 	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL)
76 		num++;
77 
78 	return num;
79 }
80 
81 static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
82 {
83 	struct perf_mem_event *e;
84 
85 	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
86 		return NULL;
87 
88 	e = &pmu->mem_events[i];
89 	if (!e)
90 		return NULL;
91 
92 	if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
93 		if (e->ldlat) {
94 			if (!e->aux_event) {
95 				/* ARM and Most of Intel */
96 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
97 					  e->name, pmu->name,
98 					  perf_mem_events__loads_ldlat);
99 			} else {
100 				/* Intel with mem-loads-aux event */
101 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
102 					  e->name, pmu->name, pmu->name,
103 					  perf_mem_events__loads_ldlat);
104 			}
105 		} else {
106 			if (!e->aux_event) {
107 				/* AMD and POWER */
108 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
109 					  e->name, pmu->name);
110 			} else
111 				return NULL;
112 		}
113 
114 		return mem_loads_name;
115 	}
116 
117 	if (i == PERF_MEM_EVENTS__STORE) {
118 		scnprintf(mem_stores_name, sizeof(mem_stores_name),
119 			  e->name, pmu->name);
120 		return mem_stores_name;
121 	}
122 
123 	return NULL;
124 }
125 
126 bool is_mem_loads_aux_event(struct evsel *leader)
127 {
128 	struct perf_pmu *pmu = leader->pmu;
129 	struct perf_mem_event *e;
130 
131 	if (!pmu || !pmu->mem_events)
132 		return false;
133 
134 	e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD];
135 	if (!e->aux_event)
136 		return false;
137 
138 	return leader->core.attr.config == e->aux_event;
139 }
140 
141 int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
142 {
143 	char *tok, *saveptr = NULL;
144 	bool found = false;
145 	char *buf;
146 	int j;
147 
148 	/* We need buffer that we know we can write to. */
149 	buf = malloc(strlen(str) + 1);
150 	if (!buf)
151 		return -ENOMEM;
152 
153 	strcpy(buf, str);
154 
155 	tok = strtok_r((char *)buf, ",", &saveptr);
156 
157 	while (tok) {
158 		for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
159 			struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
160 
161 			if (!e->tag)
162 				continue;
163 
164 			if (strstr(e->tag, tok))
165 				e->record = found = true;
166 		}
167 
168 		tok = strtok_r(NULL, ",", &saveptr);
169 	}
170 
171 	free(buf);
172 
173 	if (found)
174 		return 0;
175 
176 	pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
177 	return -1;
178 }
179 
180 static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu,
181 				      struct perf_mem_event *e)
182 {
183 	char path[PATH_MAX];
184 	struct stat st;
185 
186 	if (!e->event_name)
187 		return true;
188 
189 	scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name);
190 
191 	return !stat(path, &st);
192 }
193 
194 int perf_pmu__mem_events_init(struct perf_pmu *pmu)
195 {
196 	const char *mnt = sysfs__mount();
197 	bool found = false;
198 	int j;
199 
200 	if (!mnt)
201 		return -ENOENT;
202 
203 	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
204 		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
205 
206 		/*
207 		 * If the event entry isn't valid, skip initialization
208 		 * and "e->supported" will keep false.
209 		 */
210 		if (!e->tag)
211 			continue;
212 
213 		e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e);
214 		if (e->supported)
215 			found = true;
216 	}
217 
218 	return found ? 0 : -ENOENT;
219 }
220 
221 void perf_pmu__mem_events_list(struct perf_pmu *pmu)
222 {
223 	int j;
224 
225 	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
226 		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
227 
228 		fprintf(stderr, "%-*s%-*s%s",
229 			e->tag ? 13 : 0,
230 			e->tag ? : "",
231 			e->tag && verbose > 0 ? 25 : 0,
232 			e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "",
233 			e->supported ? ": available\n" : "");
234 	}
235 }
236 
237 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
238 {
239 	const char *mnt = sysfs__mount();
240 	struct perf_pmu *pmu = NULL;
241 	struct perf_mem_event *e;
242 	int i = *argv_nr;
243 	const char *s;
244 	char *copy;
245 
246 	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
247 		for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
248 			e = perf_pmu__mem_events_ptr(pmu, j);
249 
250 			if (!e->record)
251 				continue;
252 
253 			if (!e->supported) {
254 				pr_err("failed: event '%s' not supported\n",
255 					perf_pmu__mem_events_name(j, pmu));
256 				return -1;
257 			}
258 
259 			s = perf_pmu__mem_events_name(j, pmu);
260 			if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e))
261 				continue;
262 
263 			copy = strdup(s);
264 			if (!copy)
265 				return -1;
266 
267 			rec_argv[i++] = "-e";
268 			rec_argv[i++] = copy;
269 		}
270 	}
271 
272 	*argv_nr = i;
273 	return 0;
274 }
275 
276 static const char * const tlb_access[] = {
277 	"N/A",
278 	"HIT",
279 	"MISS",
280 	"L1",
281 	"L2",
282 	"Walker",
283 	"Fault",
284 };
285 
286 int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
287 {
288 	size_t l = 0, i;
289 	u64 m = PERF_MEM_TLB_NA;
290 	u64 hit, miss;
291 
292 	sz -= 1; /* -1 for null termination */
293 	out[0] = '\0';
294 
295 	if (mem_info)
296 		m = mem_info__const_data_src(mem_info)->mem_dtlb;
297 
298 	hit = m & PERF_MEM_TLB_HIT;
299 	miss = m & PERF_MEM_TLB_MISS;
300 
301 	/* already taken care of */
302 	m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
303 
304 	for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
305 		if (!(m & 0x1))
306 			continue;
307 		if (l) {
308 			strcat(out, " or ");
309 			l += 4;
310 		}
311 		l += scnprintf(out + l, sz - l, tlb_access[i]);
312 	}
313 	if (*out == '\0')
314 		l += scnprintf(out, sz - l, "N/A");
315 	if (hit)
316 		l += scnprintf(out + l, sz - l, " hit");
317 	if (miss)
318 		l += scnprintf(out + l, sz - l, " miss");
319 
320 	return l;
321 }
322 
323 static const char * const mem_lvl[] = {
324 	"N/A",
325 	"HIT",
326 	"MISS",
327 	"L1",
328 	"LFB/MAB",
329 	"L2",
330 	"L3",
331 	"Local RAM",
332 	"Remote RAM (1 hop)",
333 	"Remote RAM (2 hops)",
334 	"Remote Cache (1 hop)",
335 	"Remote Cache (2 hops)",
336 	"I/O",
337 	"Uncached",
338 };
339 
340 static const char * const mem_lvlnum[] = {
341 	[PERF_MEM_LVLNUM_UNC] = "Uncached",
342 	[PERF_MEM_LVLNUM_CXL] = "CXL",
343 	[PERF_MEM_LVLNUM_IO] = "I/O",
344 	[PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
345 	[PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
346 	[PERF_MEM_LVLNUM_RAM] = "RAM",
347 	[PERF_MEM_LVLNUM_PMEM] = "PMEM",
348 	[PERF_MEM_LVLNUM_NA] = "N/A",
349 };
350 
351 static const char * const mem_hops[] = {
352 	"N/A",
353 	/*
354 	 * While printing, 'Remote' will be added to represent
355 	 * 'Remote core, same node' accesses as remote field need
356 	 * to be set with mem_hops field.
357 	 */
358 	"core, same node",
359 	"node, same socket",
360 	"socket, same board",
361 	"board",
362 };
363 
364 static int perf_mem__op_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
365 {
366 	u64 op = PERF_MEM_LOCK_NA;
367 	int l;
368 
369 	if (mem_info)
370 		op = mem_info__const_data_src(mem_info)->mem_op;
371 
372 	if (op & PERF_MEM_OP_NA)
373 		l = scnprintf(out, sz, "N/A");
374 	else if (op & PERF_MEM_OP_LOAD)
375 		l = scnprintf(out, sz, "LOAD");
376 	else if (op & PERF_MEM_OP_STORE)
377 		l = scnprintf(out, sz, "STORE");
378 	else if (op & PERF_MEM_OP_PFETCH)
379 		l = scnprintf(out, sz, "PFETCH");
380 	else if (op & PERF_MEM_OP_EXEC)
381 		l = scnprintf(out, sz, "EXEC");
382 	else
383 		l = scnprintf(out, sz, "No");
384 
385 	return l;
386 }
387 
388 int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
389 {
390 	union perf_mem_data_src data_src;
391 	int printed = 0;
392 	size_t l = 0;
393 	size_t i;
394 	int lvl;
395 	char hit_miss[5] = {0};
396 
397 	sz -= 1; /* -1 for null termination */
398 	out[0] = '\0';
399 
400 	if (!mem_info)
401 		goto na;
402 
403 	data_src = *mem_info__const_data_src(mem_info);
404 
405 	if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
406 		memcpy(hit_miss, "hit", 3);
407 	else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
408 		memcpy(hit_miss, "miss", 4);
409 
410 	lvl = data_src.mem_lvl_num;
411 	if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
412 		if (data_src.mem_remote) {
413 			strcat(out, "Remote ");
414 			l += 7;
415 		}
416 
417 		if (data_src.mem_hops)
418 			l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
419 
420 		if (mem_lvlnum[lvl])
421 			l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
422 		else
423 			l += scnprintf(out + l, sz - l, "L%d", lvl);
424 
425 		l += scnprintf(out + l, sz - l, " %s", hit_miss);
426 		return l;
427 	}
428 
429 	lvl = data_src.mem_lvl;
430 	if (!lvl)
431 		goto na;
432 
433 	lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
434 	if (!lvl)
435 		goto na;
436 
437 	for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
438 		if (!(lvl & 0x1))
439 			continue;
440 		if (printed++) {
441 			strcat(out, " or ");
442 			l += 4;
443 		}
444 		l += scnprintf(out + l, sz - l, mem_lvl[i]);
445 	}
446 
447 	if (printed) {
448 		l += scnprintf(out + l, sz - l, " %s", hit_miss);
449 		return l;
450 	}
451 
452 na:
453 	strcat(out, "N/A");
454 	return 3;
455 }
456 
457 static const char * const snoop_access[] = {
458 	"N/A",
459 	"None",
460 	"Hit",
461 	"Miss",
462 	"HitM",
463 };
464 
465 static const char * const snoopx_access[] = {
466 	"Fwd",
467 	"Peer",
468 };
469 
470 int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
471 {
472 	size_t i, l = 0;
473 	u64 m = PERF_MEM_SNOOP_NA;
474 
475 	sz -= 1; /* -1 for null termination */
476 	out[0] = '\0';
477 
478 	if (mem_info)
479 		m = mem_info__const_data_src(mem_info)->mem_snoop;
480 
481 	for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
482 		if (!(m & 0x1))
483 			continue;
484 		if (l) {
485 			strcat(out, " or ");
486 			l += 4;
487 		}
488 		l += scnprintf(out + l, sz - l, snoop_access[i]);
489 	}
490 
491 	m = 0;
492 	if (mem_info)
493 		m = mem_info__const_data_src(mem_info)->mem_snoopx;
494 
495 	for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
496 		if (!(m & 0x1))
497 			continue;
498 
499 		if (l) {
500 			strcat(out, " or ");
501 			l += 4;
502 		}
503 		l += scnprintf(out + l, sz - l, snoopx_access[i]);
504 	}
505 
506 	if (*out == '\0')
507 		l += scnprintf(out, sz - l, "N/A");
508 
509 	return l;
510 }
511 
512 int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
513 {
514 	u64 mask = PERF_MEM_LOCK_NA;
515 	int l;
516 
517 	if (mem_info)
518 		mask = mem_info__const_data_src(mem_info)->mem_lock;
519 
520 	if (mask & PERF_MEM_LOCK_NA)
521 		l = scnprintf(out, sz, "N/A");
522 	else if (mask & PERF_MEM_LOCK_LOCKED)
523 		l = scnprintf(out, sz, "Yes");
524 	else
525 		l = scnprintf(out, sz, "No");
526 
527 	return l;
528 }
529 
530 int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
531 {
532 	size_t l = 0;
533 	u64 mask = PERF_MEM_BLK_NA;
534 
535 	sz -= 1; /* -1 for null termination */
536 	out[0] = '\0';
537 
538 	if (mem_info)
539 		mask = mem_info__const_data_src(mem_info)->mem_blk;
540 
541 	if (!mask || (mask & PERF_MEM_BLK_NA)) {
542 		l += scnprintf(out + l, sz - l, " N/A");
543 		return l;
544 	}
545 	if (mask & PERF_MEM_BLK_DATA)
546 		l += scnprintf(out + l, sz - l, " Data");
547 	if (mask & PERF_MEM_BLK_ADDR)
548 		l += scnprintf(out + l, sz - l, " Addr");
549 
550 	return l;
551 }
552 
553 int perf_script__meminfo_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
554 {
555 	int i = 0;
556 
557 	i += scnprintf(out, sz, "|OP ");
558 	i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
559 	i += scnprintf(out + i, sz - i, "|LVL ");
560 	i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
561 	i += scnprintf(out + i, sz - i, "|SNP ");
562 	i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
563 	i += scnprintf(out + i, sz - i, "|TLB ");
564 	i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
565 	i += scnprintf(out + i, sz - i, "|LCK ");
566 	i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
567 	i += scnprintf(out + i, sz - i, "|BLK ");
568 	i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
569 
570 	return i;
571 }
572 
573 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
574 {
575 	union perf_mem_data_src *data_src = mem_info__data_src(mi);
576 	u64 daddr  = mem_info__daddr(mi)->addr;
577 	u64 op     = data_src->mem_op;
578 	u64 lvl    = data_src->mem_lvl;
579 	u64 snoop  = data_src->mem_snoop;
580 	u64 snoopx = data_src->mem_snoopx;
581 	u64 lock   = data_src->mem_lock;
582 	u64 blk    = data_src->mem_blk;
583 	/*
584 	 * Skylake might report unknown remote level via this
585 	 * bit, consider it when evaluating remote HITMs.
586 	 *
587 	 * Incase of power, remote field can also be used to denote cache
588 	 * accesses from the another core of same node. Hence, setting
589 	 * mrem only when HOPS is zero along with set remote field.
590 	 */
591 	bool mrem  = (data_src->mem_remote && !data_src->mem_hops);
592 	int err = 0;
593 
594 #define HITM_INC(__f)		\
595 do {				\
596 	stats->__f++;		\
597 	stats->tot_hitm++;	\
598 } while (0)
599 
600 #define PEER_INC(__f)		\
601 do {				\
602 	stats->__f++;		\
603 	stats->tot_peer++;	\
604 } while (0)
605 
606 #define P(a, b) PERF_MEM_##a##_##b
607 
608 	stats->nr_entries++;
609 
610 	if (lock & P(LOCK, LOCKED)) stats->locks++;
611 
612 	if (blk & P(BLK, DATA)) stats->blk_data++;
613 	if (blk & P(BLK, ADDR)) stats->blk_addr++;
614 
615 	if (op & P(OP, LOAD)) {
616 		/* load */
617 		stats->load++;
618 
619 		if (!daddr) {
620 			stats->ld_noadrs++;
621 			return -1;
622 		}
623 
624 		if (lvl & P(LVL, HIT)) {
625 			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
626 			if (lvl & P(LVL, IO))  stats->ld_io++;
627 			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
628 			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
629 			if (lvl & P(LVL, L2)) {
630 				stats->ld_l2hit++;
631 
632 				if (snoopx & P(SNOOPX, PEER))
633 					PEER_INC(lcl_peer);
634 			}
635 			if (lvl & P(LVL, L3 )) {
636 				if (snoop & P(SNOOP, HITM))
637 					HITM_INC(lcl_hitm);
638 				else
639 					stats->ld_llchit++;
640 
641 				if (snoopx & P(SNOOPX, PEER))
642 					PEER_INC(lcl_peer);
643 			}
644 
645 			if (lvl & P(LVL, LOC_RAM)) {
646 				stats->lcl_dram++;
647 				if (snoop & P(SNOOP, HIT))
648 					stats->ld_shared++;
649 				else
650 					stats->ld_excl++;
651 			}
652 
653 			if ((lvl & P(LVL, REM_RAM1)) ||
654 			    (lvl & P(LVL, REM_RAM2)) ||
655 			     mrem) {
656 				stats->rmt_dram++;
657 				if (snoop & P(SNOOP, HIT))
658 					stats->ld_shared++;
659 				else
660 					stats->ld_excl++;
661 			}
662 		}
663 
664 		if ((lvl & P(LVL, REM_CCE1)) ||
665 		    (lvl & P(LVL, REM_CCE2)) ||
666 		     mrem) {
667 			if (snoop & P(SNOOP, HIT)) {
668 				stats->rmt_hit++;
669 			} else if (snoop & P(SNOOP, HITM)) {
670 				HITM_INC(rmt_hitm);
671 			} else if (snoopx & P(SNOOPX, PEER)) {
672 				stats->rmt_hit++;
673 				PEER_INC(rmt_peer);
674 			}
675 		}
676 
677 		if ((lvl & P(LVL, MISS)))
678 			stats->ld_miss++;
679 
680 	} else if (op & P(OP, STORE)) {
681 		/* store */
682 		stats->store++;
683 
684 		if (!daddr) {
685 			stats->st_noadrs++;
686 			return -1;
687 		}
688 
689 		if (lvl & P(LVL, HIT)) {
690 			if (lvl & P(LVL, UNC)) stats->st_uncache++;
691 			if (lvl & P(LVL, L1 )) stats->st_l1hit++;
692 		}
693 		if (lvl & P(LVL, MISS))
694 			if (lvl & P(LVL, L1)) stats->st_l1miss++;
695 		if (lvl & P(LVL, NA))
696 			stats->st_na++;
697 	} else {
698 		/* unparsable data_src? */
699 		stats->noparse++;
700 		return -1;
701 	}
702 
703 	if (!mem_info__daddr(mi)->ms.map || !mem_info__iaddr(mi)->ms.map) {
704 		stats->nomap++;
705 		return -1;
706 	}
707 
708 #undef P
709 #undef HITM_INC
710 	return err;
711 }
712 
713 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
714 {
715 	stats->nr_entries	+= add->nr_entries;
716 
717 	stats->locks		+= add->locks;
718 	stats->store		+= add->store;
719 	stats->st_uncache	+= add->st_uncache;
720 	stats->st_noadrs	+= add->st_noadrs;
721 	stats->st_l1hit		+= add->st_l1hit;
722 	stats->st_l1miss	+= add->st_l1miss;
723 	stats->st_na		+= add->st_na;
724 	stats->load		+= add->load;
725 	stats->ld_excl		+= add->ld_excl;
726 	stats->ld_shared	+= add->ld_shared;
727 	stats->ld_uncache	+= add->ld_uncache;
728 	stats->ld_io		+= add->ld_io;
729 	stats->ld_miss		+= add->ld_miss;
730 	stats->ld_noadrs	+= add->ld_noadrs;
731 	stats->ld_fbhit		+= add->ld_fbhit;
732 	stats->ld_l1hit		+= add->ld_l1hit;
733 	stats->ld_l2hit		+= add->ld_l2hit;
734 	stats->ld_llchit	+= add->ld_llchit;
735 	stats->lcl_hitm		+= add->lcl_hitm;
736 	stats->rmt_hitm		+= add->rmt_hitm;
737 	stats->tot_hitm		+= add->tot_hitm;
738 	stats->lcl_peer		+= add->lcl_peer;
739 	stats->rmt_peer		+= add->rmt_peer;
740 	stats->tot_peer		+= add->tot_peer;
741 	stats->rmt_hit		+= add->rmt_hit;
742 	stats->lcl_dram		+= add->lcl_dram;
743 	stats->rmt_dram		+= add->rmt_dram;
744 	stats->blk_data		+= add->blk_data;
745 	stats->blk_addr		+= add->blk_addr;
746 	stats->nomap		+= add->nomap;
747 	stats->noparse		+= add->noparse;
748 }
749