1 // SPDX-License-Identifier: GPL-2.0
2 #include <stddef.h>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <errno.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include <api/fs/fs.h>
10 #include <linux/kernel.h>
11 #include "cpumap.h"
12 #include "map_symbol.h"
13 #include "mem-events.h"
14 #include "mem-info.h"
15 #include "debug.h"
16 #include "evsel.h"
17 #include "symbol.h"
18 #include "pmu.h"
19 #include "pmus.h"
20
21 unsigned int perf_mem_events__loads_ldlat = 30;
22
23 #define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a }
24
25 struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
26 E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "mem-loads", true, 0),
27 E("ldlat-stores", "%s/mem-stores/P", "mem-stores", false, 0),
28 E(NULL, NULL, NULL, false, 0),
29 };
30 #undef E
31
32 bool perf_mem_record[PERF_MEM_EVENTS__MAX] = { 0 };
33
34 static char mem_loads_name[100];
35 static char mem_stores_name[100];
36
perf_pmu__mem_events_ptr(struct perf_pmu * pmu,int i)37 struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
38 {
39 if (i >= PERF_MEM_EVENTS__MAX || !pmu)
40 return NULL;
41
42 return &pmu->mem_events[i];
43 }
44
perf_pmus__scan_mem(struct perf_pmu * pmu)45 static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
46 {
47 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
48 if (pmu->mem_events)
49 return pmu;
50 }
51 return NULL;
52 }
53
perf_mem_events_find_pmu(void)54 struct perf_pmu *perf_mem_events_find_pmu(void)
55 {
56 /*
57 * The current perf mem doesn't support per-PMU configuration.
58 * The exact same configuration is applied to all the
59 * mem_events supported PMUs.
60 * Return the first mem_events supported PMU.
61 *
62 * Notes: The only case which may support multiple mem_events
63 * supported PMUs is Intel hybrid. The exact same mem_events
64 * is shared among the PMUs. Only configure the first PMU
65 * is good enough as well.
66 */
67 return perf_pmus__scan_mem(NULL);
68 }
69
70 /**
71 * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu
72 * @pmu: Start pmu. If it's NULL, search the entire PMU list.
73 */
perf_pmu__mem_events_num_mem_pmus(struct perf_pmu * pmu)74 int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu)
75 {
76 int num = 0;
77
78 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL)
79 num++;
80
81 return num;
82 }
83
perf_pmu__mem_events_name(int i,struct perf_pmu * pmu)84 static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
85 {
86 struct perf_mem_event *e;
87
88 if (i >= PERF_MEM_EVENTS__MAX || !pmu)
89 return NULL;
90
91 e = &pmu->mem_events[i];
92 if (!e || !e->name)
93 return NULL;
94
95 if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
96 if (e->ldlat) {
97 if (!e->aux_event) {
98 /* ARM and Most of Intel */
99 scnprintf(mem_loads_name, sizeof(mem_loads_name),
100 e->name, pmu->name,
101 perf_mem_events__loads_ldlat);
102 } else {
103 /* Intel with mem-loads-aux event */
104 scnprintf(mem_loads_name, sizeof(mem_loads_name),
105 e->name, pmu->name, pmu->name,
106 perf_mem_events__loads_ldlat);
107 }
108 } else {
109 if (!e->aux_event) {
110 /* AMD and POWER */
111 scnprintf(mem_loads_name, sizeof(mem_loads_name),
112 e->name, pmu->name);
113 } else
114 return NULL;
115 }
116
117 return mem_loads_name;
118 }
119
120 if (i == PERF_MEM_EVENTS__STORE) {
121 scnprintf(mem_stores_name, sizeof(mem_stores_name),
122 e->name, pmu->name);
123 return mem_stores_name;
124 }
125
126 return NULL;
127 }
128
is_mem_loads_aux_event(struct evsel * leader)129 bool is_mem_loads_aux_event(struct evsel *leader)
130 {
131 struct perf_pmu *pmu = leader->pmu;
132 struct perf_mem_event *e;
133
134 if (!pmu || !pmu->mem_events)
135 return false;
136
137 e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD];
138 if (!e->aux_event)
139 return false;
140
141 return leader->core.attr.config == e->aux_event;
142 }
143
perf_pmu__mem_events_parse(struct perf_pmu * pmu,const char * str)144 int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
145 {
146 char *tok, *saveptr = NULL;
147 bool found = false;
148 char *buf;
149 int j;
150
151 /* We need buffer that we know we can write to. */
152 buf = malloc(strlen(str) + 1);
153 if (!buf)
154 return -ENOMEM;
155
156 strcpy(buf, str);
157
158 tok = strtok_r((char *)buf, ",", &saveptr);
159
160 while (tok) {
161 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
162 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
163
164 if (!e->tag)
165 continue;
166
167 if (strstr(e->tag, tok))
168 perf_mem_record[j] = found = true;
169 }
170
171 tok = strtok_r(NULL, ",", &saveptr);
172 }
173
174 free(buf);
175
176 if (found)
177 return 0;
178
179 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
180 return -1;
181 }
182
perf_pmu__mem_events_supported(const char * mnt,struct perf_pmu * pmu,struct perf_mem_event * e)183 static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu,
184 struct perf_mem_event *e)
185 {
186 char path[PATH_MAX];
187 struct stat st;
188
189 if (!e->event_name)
190 return true;
191
192 scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name);
193
194 return !stat(path, &st);
195 }
196
__perf_pmu__mem_events_init(struct perf_pmu * pmu)197 static int __perf_pmu__mem_events_init(struct perf_pmu *pmu)
198 {
199 const char *mnt = sysfs__mount();
200 bool found = false;
201 int j;
202
203 if (!mnt)
204 return -ENOENT;
205
206 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
207 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
208
209 /*
210 * If the event entry isn't valid, skip initialization
211 * and "e->supported" will keep false.
212 */
213 if (!e->tag)
214 continue;
215
216 e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e);
217 if (e->supported)
218 found = true;
219 }
220
221 return found ? 0 : -ENOENT;
222 }
223
perf_pmu__mem_events_init(void)224 int perf_pmu__mem_events_init(void)
225 {
226 struct perf_pmu *pmu = NULL;
227
228 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
229 if (__perf_pmu__mem_events_init(pmu))
230 return -ENOENT;
231 }
232
233 return 0;
234 }
235
perf_pmu__mem_events_list(struct perf_pmu * pmu)236 void perf_pmu__mem_events_list(struct perf_pmu *pmu)
237 {
238 int j;
239
240 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
241 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
242
243 fprintf(stderr, "%-*s%-*s%s",
244 e->tag ? 13 : 0,
245 e->tag ? : "",
246 e->tag && verbose > 0 ? 25 : 0,
247 e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "",
248 e->supported ? ": available\n" : "");
249 }
250 }
251
perf_mem_events__record_args(const char ** rec_argv,int * argv_nr)252 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
253 {
254 const char *mnt = sysfs__mount();
255 struct perf_pmu *pmu = NULL;
256 struct perf_mem_event *e;
257 int i = *argv_nr;
258 const char *s;
259 char *copy;
260 struct perf_cpu_map *cpu_map = NULL;
261
262 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
263 for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
264 e = perf_pmu__mem_events_ptr(pmu, j);
265
266 if (!perf_mem_record[j])
267 continue;
268
269 if (!e->supported) {
270 pr_err("failed: event '%s' not supported\n",
271 perf_pmu__mem_events_name(j, pmu));
272 return -1;
273 }
274
275 s = perf_pmu__mem_events_name(j, pmu);
276 if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e))
277 continue;
278
279 copy = strdup(s);
280 if (!copy)
281 return -1;
282
283 rec_argv[i++] = "-e";
284 rec_argv[i++] = copy;
285
286 cpu_map = perf_cpu_map__merge(cpu_map, pmu->cpus);
287 }
288 }
289
290 if (cpu_map) {
291 if (!perf_cpu_map__equal(cpu_map, cpu_map__online())) {
292 char buf[200];
293
294 cpu_map__snprint(cpu_map, buf, sizeof(buf));
295 pr_warning("Memory events are enabled on a subset of CPUs: %s\n", buf);
296 }
297 perf_cpu_map__put(cpu_map);
298 }
299
300 *argv_nr = i;
301 return 0;
302 }
303
304 static const char * const tlb_access[] = {
305 "N/A",
306 "HIT",
307 "MISS",
308 "L1",
309 "L2",
310 "Walker",
311 "Fault",
312 };
313
perf_mem__tlb_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)314 int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
315 {
316 size_t l = 0, i;
317 u64 m = PERF_MEM_TLB_NA;
318 u64 hit, miss;
319
320 sz -= 1; /* -1 for null termination */
321 out[0] = '\0';
322
323 if (mem_info)
324 m = mem_info__const_data_src(mem_info)->mem_dtlb;
325
326 hit = m & PERF_MEM_TLB_HIT;
327 miss = m & PERF_MEM_TLB_MISS;
328
329 /* already taken care of */
330 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
331
332 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
333 if (!(m & 0x1))
334 continue;
335 if (l) {
336 strcat(out, " or ");
337 l += 4;
338 }
339 l += scnprintf(out + l, sz - l, tlb_access[i]);
340 }
341 if (*out == '\0')
342 l += scnprintf(out, sz - l, "N/A");
343 if (hit)
344 l += scnprintf(out + l, sz - l, " hit");
345 if (miss)
346 l += scnprintf(out + l, sz - l, " miss");
347
348 return l;
349 }
350
351 static const char * const mem_lvl[] = {
352 "N/A",
353 "HIT",
354 "MISS",
355 "L1",
356 "LFB/MAB",
357 "L2",
358 "L3",
359 "Local RAM",
360 "Remote RAM (1 hop)",
361 "Remote RAM (2 hops)",
362 "Remote Cache (1 hop)",
363 "Remote Cache (2 hops)",
364 "I/O",
365 "Uncached",
366 };
367
368 static const char * const mem_lvlnum[] = {
369 [PERF_MEM_LVLNUM_UNC] = "Uncached",
370 [PERF_MEM_LVLNUM_CXL] = "CXL",
371 [PERF_MEM_LVLNUM_IO] = "I/O",
372 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
373 [PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
374 [PERF_MEM_LVLNUM_RAM] = "RAM",
375 [PERF_MEM_LVLNUM_PMEM] = "PMEM",
376 [PERF_MEM_LVLNUM_NA] = "N/A",
377 };
378
379 static const char * const mem_hops[] = {
380 "N/A",
381 /*
382 * While printing, 'Remote' will be added to represent
383 * 'Remote core, same node' accesses as remote field need
384 * to be set with mem_hops field.
385 */
386 "core, same node",
387 "node, same socket",
388 "socket, same board",
389 "board",
390 };
391
perf_mem__op_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)392 static int perf_mem__op_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
393 {
394 u64 op = PERF_MEM_LOCK_NA;
395 int l;
396
397 if (mem_info)
398 op = mem_info__const_data_src(mem_info)->mem_op;
399
400 if (op & PERF_MEM_OP_NA)
401 l = scnprintf(out, sz, "N/A");
402 else if (op & PERF_MEM_OP_LOAD)
403 l = scnprintf(out, sz, "LOAD");
404 else if (op & PERF_MEM_OP_STORE)
405 l = scnprintf(out, sz, "STORE");
406 else if (op & PERF_MEM_OP_PFETCH)
407 l = scnprintf(out, sz, "PFETCH");
408 else if (op & PERF_MEM_OP_EXEC)
409 l = scnprintf(out, sz, "EXEC");
410 else
411 l = scnprintf(out, sz, "No");
412
413 return l;
414 }
415
perf_mem__lvl_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)416 int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
417 {
418 union perf_mem_data_src data_src;
419 int printed = 0;
420 size_t l = 0;
421 size_t i;
422 int lvl;
423 char hit_miss[5] = {0};
424
425 sz -= 1; /* -1 for null termination */
426 out[0] = '\0';
427
428 if (!mem_info)
429 goto na;
430
431 data_src = *mem_info__const_data_src(mem_info);
432
433 if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
434 memcpy(hit_miss, "hit", 3);
435 else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
436 memcpy(hit_miss, "miss", 4);
437
438 lvl = data_src.mem_lvl_num;
439 if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
440 if (data_src.mem_remote) {
441 strcat(out, "Remote ");
442 l += 7;
443 }
444
445 if (data_src.mem_hops)
446 l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
447
448 if (mem_lvlnum[lvl])
449 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
450 else
451 l += scnprintf(out + l, sz - l, "L%d", lvl);
452
453 l += scnprintf(out + l, sz - l, " %s", hit_miss);
454 return l;
455 }
456
457 lvl = data_src.mem_lvl;
458 if (!lvl)
459 goto na;
460
461 lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
462 if (!lvl)
463 goto na;
464
465 for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
466 if (!(lvl & 0x1))
467 continue;
468 if (printed++) {
469 strcat(out, " or ");
470 l += 4;
471 }
472 l += scnprintf(out + l, sz - l, mem_lvl[i]);
473 }
474
475 if (printed) {
476 l += scnprintf(out + l, sz - l, " %s", hit_miss);
477 return l;
478 }
479
480 na:
481 strcat(out, "N/A");
482 return 3;
483 }
484
485 static const char * const snoop_access[] = {
486 "N/A",
487 "None",
488 "Hit",
489 "Miss",
490 "HitM",
491 };
492
493 static const char * const snoopx_access[] = {
494 "Fwd",
495 "Peer",
496 };
497
perf_mem__snp_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)498 int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
499 {
500 size_t i, l = 0;
501 u64 m = PERF_MEM_SNOOP_NA;
502
503 sz -= 1; /* -1 for null termination */
504 out[0] = '\0';
505
506 if (mem_info)
507 m = mem_info__const_data_src(mem_info)->mem_snoop;
508
509 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
510 if (!(m & 0x1))
511 continue;
512 if (l) {
513 strcat(out, " or ");
514 l += 4;
515 }
516 l += scnprintf(out + l, sz - l, snoop_access[i]);
517 }
518
519 m = 0;
520 if (mem_info)
521 m = mem_info__const_data_src(mem_info)->mem_snoopx;
522
523 for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
524 if (!(m & 0x1))
525 continue;
526
527 if (l) {
528 strcat(out, " or ");
529 l += 4;
530 }
531 l += scnprintf(out + l, sz - l, snoopx_access[i]);
532 }
533
534 if (*out == '\0')
535 l += scnprintf(out, sz - l, "N/A");
536
537 return l;
538 }
539
perf_mem__lck_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)540 int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
541 {
542 u64 mask = PERF_MEM_LOCK_NA;
543 int l;
544
545 if (mem_info)
546 mask = mem_info__const_data_src(mem_info)->mem_lock;
547
548 if (mask & PERF_MEM_LOCK_NA)
549 l = scnprintf(out, sz, "N/A");
550 else if (mask & PERF_MEM_LOCK_LOCKED)
551 l = scnprintf(out, sz, "Yes");
552 else
553 l = scnprintf(out, sz, "No");
554
555 return l;
556 }
557
perf_mem__blk_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)558 int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
559 {
560 size_t l = 0;
561 u64 mask = PERF_MEM_BLK_NA;
562
563 sz -= 1; /* -1 for null termination */
564 out[0] = '\0';
565
566 if (mem_info)
567 mask = mem_info__const_data_src(mem_info)->mem_blk;
568
569 if (!mask || (mask & PERF_MEM_BLK_NA)) {
570 l += scnprintf(out + l, sz - l, " N/A");
571 return l;
572 }
573 if (mask & PERF_MEM_BLK_DATA)
574 l += scnprintf(out + l, sz - l, " Data");
575 if (mask & PERF_MEM_BLK_ADDR)
576 l += scnprintf(out + l, sz - l, " Addr");
577
578 return l;
579 }
580
perf_script__meminfo_scnprintf(char * out,size_t sz,const struct mem_info * mem_info)581 int perf_script__meminfo_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
582 {
583 int i = 0;
584
585 i += scnprintf(out, sz, "|OP ");
586 i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
587 i += scnprintf(out + i, sz - i, "|LVL ");
588 i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
589 i += scnprintf(out + i, sz - i, "|SNP ");
590 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
591 i += scnprintf(out + i, sz - i, "|TLB ");
592 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
593 i += scnprintf(out + i, sz - i, "|LCK ");
594 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
595 i += scnprintf(out + i, sz - i, "|BLK ");
596 i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
597
598 return i;
599 }
600
c2c_decode_stats(struct c2c_stats * stats,struct mem_info * mi)601 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
602 {
603 union perf_mem_data_src *data_src = mem_info__data_src(mi);
604 u64 daddr = mem_info__daddr(mi)->addr;
605 u64 op = data_src->mem_op;
606 u64 lvl = data_src->mem_lvl;
607 u64 snoop = data_src->mem_snoop;
608 u64 snoopx = data_src->mem_snoopx;
609 u64 lock = data_src->mem_lock;
610 u64 blk = data_src->mem_blk;
611 /*
612 * Skylake might report unknown remote level via this
613 * bit, consider it when evaluating remote HITMs.
614 *
615 * Incase of power, remote field can also be used to denote cache
616 * accesses from the another core of same node. Hence, setting
617 * mrem only when HOPS is zero along with set remote field.
618 */
619 bool mrem = (data_src->mem_remote && !data_src->mem_hops);
620 int err = 0;
621
622 #define HITM_INC(__f) \
623 do { \
624 stats->__f++; \
625 stats->tot_hitm++; \
626 } while (0)
627
628 #define PEER_INC(__f) \
629 do { \
630 stats->__f++; \
631 stats->tot_peer++; \
632 } while (0)
633
634 #define P(a, b) PERF_MEM_##a##_##b
635
636 stats->nr_entries++;
637
638 if (lock & P(LOCK, LOCKED)) stats->locks++;
639
640 if (blk & P(BLK, DATA)) stats->blk_data++;
641 if (blk & P(BLK, ADDR)) stats->blk_addr++;
642
643 if (op & P(OP, LOAD)) {
644 /* load */
645 stats->load++;
646
647 if (!daddr) {
648 stats->ld_noadrs++;
649 return -1;
650 }
651
652 if (lvl & P(LVL, HIT)) {
653 if (lvl & P(LVL, UNC)) stats->ld_uncache++;
654 if (lvl & P(LVL, IO)) stats->ld_io++;
655 if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
656 if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
657 if (lvl & P(LVL, L2)) {
658 stats->ld_l2hit++;
659
660 if (snoopx & P(SNOOPX, PEER))
661 PEER_INC(lcl_peer);
662 }
663 if (lvl & P(LVL, L3 )) {
664 if (snoop & P(SNOOP, HITM))
665 HITM_INC(lcl_hitm);
666 else
667 stats->ld_llchit++;
668
669 if (snoopx & P(SNOOPX, PEER))
670 PEER_INC(lcl_peer);
671 }
672
673 if (lvl & P(LVL, LOC_RAM)) {
674 stats->lcl_dram++;
675 if (snoop & P(SNOOP, HIT))
676 stats->ld_shared++;
677 else
678 stats->ld_excl++;
679 }
680
681 if ((lvl & P(LVL, REM_RAM1)) ||
682 (lvl & P(LVL, REM_RAM2)) ||
683 mrem) {
684 stats->rmt_dram++;
685 if (snoop & P(SNOOP, HIT))
686 stats->ld_shared++;
687 else
688 stats->ld_excl++;
689 }
690 }
691
692 if ((lvl & P(LVL, REM_CCE1)) ||
693 (lvl & P(LVL, REM_CCE2)) ||
694 mrem) {
695 if (snoop & P(SNOOP, HIT)) {
696 stats->rmt_hit++;
697 } else if (snoop & P(SNOOP, HITM)) {
698 HITM_INC(rmt_hitm);
699 } else if (snoopx & P(SNOOPX, PEER)) {
700 stats->rmt_hit++;
701 PEER_INC(rmt_peer);
702 }
703 }
704
705 if ((lvl & P(LVL, MISS)))
706 stats->ld_miss++;
707
708 } else if (op & P(OP, STORE)) {
709 /* store */
710 stats->store++;
711
712 if (!daddr) {
713 stats->st_noadrs++;
714 return -1;
715 }
716
717 if (lvl & P(LVL, HIT)) {
718 if (lvl & P(LVL, UNC)) stats->st_uncache++;
719 if (lvl & P(LVL, L1 )) stats->st_l1hit++;
720 }
721 if (lvl & P(LVL, MISS))
722 if (lvl & P(LVL, L1)) stats->st_l1miss++;
723 if (lvl & P(LVL, NA))
724 stats->st_na++;
725 } else {
726 /* unparsable data_src? */
727 stats->noparse++;
728 return -1;
729 }
730
731 if (!mem_info__daddr(mi)->ms.map || !mem_info__iaddr(mi)->ms.map) {
732 stats->nomap++;
733 return -1;
734 }
735
736 #undef P
737 #undef HITM_INC
738 return err;
739 }
740
c2c_add_stats(struct c2c_stats * stats,struct c2c_stats * add)741 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
742 {
743 stats->nr_entries += add->nr_entries;
744
745 stats->locks += add->locks;
746 stats->store += add->store;
747 stats->st_uncache += add->st_uncache;
748 stats->st_noadrs += add->st_noadrs;
749 stats->st_l1hit += add->st_l1hit;
750 stats->st_l1miss += add->st_l1miss;
751 stats->st_na += add->st_na;
752 stats->load += add->load;
753 stats->ld_excl += add->ld_excl;
754 stats->ld_shared += add->ld_shared;
755 stats->ld_uncache += add->ld_uncache;
756 stats->ld_io += add->ld_io;
757 stats->ld_miss += add->ld_miss;
758 stats->ld_noadrs += add->ld_noadrs;
759 stats->ld_fbhit += add->ld_fbhit;
760 stats->ld_l1hit += add->ld_l1hit;
761 stats->ld_l2hit += add->ld_l2hit;
762 stats->ld_llchit += add->ld_llchit;
763 stats->lcl_hitm += add->lcl_hitm;
764 stats->rmt_hitm += add->rmt_hitm;
765 stats->tot_hitm += add->tot_hitm;
766 stats->lcl_peer += add->lcl_peer;
767 stats->rmt_peer += add->rmt_peer;
768 stats->tot_peer += add->tot_peer;
769 stats->rmt_hit += add->rmt_hit;
770 stats->lcl_dram += add->lcl_dram;
771 stats->rmt_dram += add->rmt_dram;
772 stats->blk_data += add->blk_data;
773 stats->blk_addr += add->blk_addr;
774 stats->nomap += add->nomap;
775 stats->noparse += add->noparse;
776 }
777