1 // SPDX-License-Identifier: GPL-2.0 2 #include <stddef.h> 3 #include <stdlib.h> 4 #include <string.h> 5 #include <errno.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <unistd.h> 9 #include <api/fs/fs.h> 10 #include <linux/kernel.h> 11 #include "map_symbol.h" 12 #include "mem-events.h" 13 #include "debug.h" 14 #include "symbol.h" 15 #include "pmu.h" 16 #include "pmu-hybrid.h" 17 18 unsigned int perf_mem_events__loads_ldlat = 30; 19 20 #define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s } 21 22 static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { 23 E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"), 24 E("ldlat-stores", "cpu/mem-stores/P", "cpu/events/mem-stores"), 25 E(NULL, NULL, NULL), 26 }; 27 #undef E 28 29 static char mem_loads_name[100]; 30 static bool mem_loads_name__init; 31 32 struct perf_mem_event * __weak perf_mem_events__ptr(int i) 33 { 34 if (i >= PERF_MEM_EVENTS__MAX) 35 return NULL; 36 37 return &perf_mem_events[i]; 38 } 39 40 char * __weak perf_mem_events__name(int i, char *pmu_name __maybe_unused) 41 { 42 struct perf_mem_event *e = perf_mem_events__ptr(i); 43 44 if (!e) 45 return NULL; 46 47 if (i == PERF_MEM_EVENTS__LOAD) { 48 if (!mem_loads_name__init) { 49 mem_loads_name__init = true; 50 scnprintf(mem_loads_name, sizeof(mem_loads_name), 51 e->name, perf_mem_events__loads_ldlat); 52 } 53 return mem_loads_name; 54 } 55 56 return (char *)e->name; 57 } 58 59 __weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused) 60 { 61 return false; 62 } 63 64 int perf_mem_events__parse(const char *str) 65 { 66 char *tok, *saveptr = NULL; 67 bool found = false; 68 char *buf; 69 int j; 70 71 /* We need buffer that we know we can write to. */ 72 buf = malloc(strlen(str) + 1); 73 if (!buf) 74 return -ENOMEM; 75 76 strcpy(buf, str); 77 78 tok = strtok_r((char *)buf, ",", &saveptr); 79 80 while (tok) { 81 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { 82 struct perf_mem_event *e = perf_mem_events__ptr(j); 83 84 if (!e->tag) 85 continue; 86 87 if (strstr(e->tag, tok)) 88 e->record = found = true; 89 } 90 91 tok = strtok_r(NULL, ",", &saveptr); 92 } 93 94 free(buf); 95 96 if (found) 97 return 0; 98 99 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str); 100 return -1; 101 } 102 103 static bool perf_mem_event__supported(const char *mnt, char *sysfs_name) 104 { 105 char path[PATH_MAX]; 106 struct stat st; 107 108 scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name); 109 return !stat(path, &st); 110 } 111 112 int perf_mem_events__init(void) 113 { 114 const char *mnt = sysfs__mount(); 115 bool found = false; 116 int j; 117 118 if (!mnt) 119 return -ENOENT; 120 121 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { 122 struct perf_mem_event *e = perf_mem_events__ptr(j); 123 struct perf_pmu *pmu; 124 char sysfs_name[100]; 125 126 /* 127 * If the event entry isn't valid, skip initialization 128 * and "e->supported" will keep false. 129 */ 130 if (!e->tag) 131 continue; 132 133 if (!perf_pmu__has_hybrid()) { 134 scnprintf(sysfs_name, sizeof(sysfs_name), 135 e->sysfs_name, "cpu"); 136 e->supported = perf_mem_event__supported(mnt, sysfs_name); 137 } else { 138 perf_pmu__for_each_hybrid_pmu(pmu) { 139 scnprintf(sysfs_name, sizeof(sysfs_name), 140 e->sysfs_name, pmu->name); 141 e->supported |= perf_mem_event__supported(mnt, sysfs_name); 142 } 143 } 144 145 if (e->supported) 146 found = true; 147 } 148 149 return found ? 0 : -ENOENT; 150 } 151 152 void perf_mem_events__list(void) 153 { 154 int j; 155 156 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { 157 struct perf_mem_event *e = perf_mem_events__ptr(j); 158 159 fprintf(stderr, "%-*s%-*s%s", 160 e->tag ? 13 : 0, 161 e->tag ? : "", 162 e->tag && verbose > 0 ? 25 : 0, 163 e->tag && verbose > 0 ? perf_mem_events__name(j, NULL) : "", 164 e->supported ? ": available\n" : ""); 165 } 166 } 167 168 static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e, 169 int idx) 170 { 171 const char *mnt = sysfs__mount(); 172 char sysfs_name[100]; 173 struct perf_pmu *pmu; 174 175 perf_pmu__for_each_hybrid_pmu(pmu) { 176 scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, 177 pmu->name); 178 if (!perf_mem_event__supported(mnt, sysfs_name)) { 179 pr_err("failed: event '%s' not supported\n", 180 perf_mem_events__name(idx, pmu->name)); 181 } 182 } 183 } 184 185 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, 186 char **rec_tmp, int *tmp_nr) 187 { 188 int i = *argv_nr, k = 0; 189 struct perf_mem_event *e; 190 struct perf_pmu *pmu; 191 char *s; 192 193 for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) { 194 e = perf_mem_events__ptr(j); 195 if (!e->record) 196 continue; 197 198 if (!perf_pmu__has_hybrid()) { 199 if (!e->supported) { 200 pr_err("failed: event '%s' not supported\n", 201 perf_mem_events__name(j, NULL)); 202 return -1; 203 } 204 205 rec_argv[i++] = "-e"; 206 rec_argv[i++] = perf_mem_events__name(j, NULL); 207 } else { 208 if (!e->supported) { 209 perf_mem_events__print_unsupport_hybrid(e, j); 210 return -1; 211 } 212 213 perf_pmu__for_each_hybrid_pmu(pmu) { 214 rec_argv[i++] = "-e"; 215 s = perf_mem_events__name(j, pmu->name); 216 if (s) { 217 s = strdup(s); 218 if (!s) 219 return -1; 220 221 rec_argv[i++] = s; 222 rec_tmp[k++] = s; 223 } 224 } 225 } 226 } 227 228 *argv_nr = i; 229 *tmp_nr = k; 230 return 0; 231 } 232 233 static const char * const tlb_access[] = { 234 "N/A", 235 "HIT", 236 "MISS", 237 "L1", 238 "L2", 239 "Walker", 240 "Fault", 241 }; 242 243 int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info) 244 { 245 size_t l = 0, i; 246 u64 m = PERF_MEM_TLB_NA; 247 u64 hit, miss; 248 249 sz -= 1; /* -1 for null termination */ 250 out[0] = '\0'; 251 252 if (mem_info) 253 m = mem_info->data_src.mem_dtlb; 254 255 hit = m & PERF_MEM_TLB_HIT; 256 miss = m & PERF_MEM_TLB_MISS; 257 258 /* already taken care of */ 259 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS); 260 261 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) { 262 if (!(m & 0x1)) 263 continue; 264 if (l) { 265 strcat(out, " or "); 266 l += 4; 267 } 268 l += scnprintf(out + l, sz - l, tlb_access[i]); 269 } 270 if (*out == '\0') 271 l += scnprintf(out, sz - l, "N/A"); 272 if (hit) 273 l += scnprintf(out + l, sz - l, " hit"); 274 if (miss) 275 l += scnprintf(out + l, sz - l, " miss"); 276 277 return l; 278 } 279 280 static const char * const mem_lvl[] = { 281 "N/A", 282 "HIT", 283 "MISS", 284 "L1", 285 "LFB/MAB", 286 "L2", 287 "L3", 288 "Local RAM", 289 "Remote RAM (1 hop)", 290 "Remote RAM (2 hops)", 291 "Remote Cache (1 hop)", 292 "Remote Cache (2 hops)", 293 "I/O", 294 "Uncached", 295 }; 296 297 static const char * const mem_lvlnum[] = { 298 [PERF_MEM_LVLNUM_CXL] = "CXL", 299 [PERF_MEM_LVLNUM_IO] = "I/O", 300 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache", 301 [PERF_MEM_LVLNUM_LFB] = "LFB/MAB", 302 [PERF_MEM_LVLNUM_RAM] = "RAM", 303 [PERF_MEM_LVLNUM_PMEM] = "PMEM", 304 [PERF_MEM_LVLNUM_NA] = "N/A", 305 }; 306 307 static const char * const mem_hops[] = { 308 "N/A", 309 /* 310 * While printing, 'Remote' will be added to represent 311 * 'Remote core, same node' accesses as remote field need 312 * to be set with mem_hops field. 313 */ 314 "core, same node", 315 "node, same socket", 316 "socket, same board", 317 "board", 318 }; 319 320 static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info) 321 { 322 u64 op = PERF_MEM_LOCK_NA; 323 int l; 324 325 if (mem_info) 326 op = mem_info->data_src.mem_op; 327 328 if (op & PERF_MEM_OP_NA) 329 l = scnprintf(out, sz, "N/A"); 330 else if (op & PERF_MEM_OP_LOAD) 331 l = scnprintf(out, sz, "LOAD"); 332 else if (op & PERF_MEM_OP_STORE) 333 l = scnprintf(out, sz, "STORE"); 334 else if (op & PERF_MEM_OP_PFETCH) 335 l = scnprintf(out, sz, "PFETCH"); 336 else if (op & PERF_MEM_OP_EXEC) 337 l = scnprintf(out, sz, "EXEC"); 338 else 339 l = scnprintf(out, sz, "No"); 340 341 return l; 342 } 343 344 int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info) 345 { 346 size_t i, l = 0; 347 u64 m = PERF_MEM_LVL_NA; 348 u64 hit, miss; 349 int printed = 0; 350 351 if (mem_info) 352 m = mem_info->data_src.mem_lvl; 353 354 sz -= 1; /* -1 for null termination */ 355 out[0] = '\0'; 356 357 hit = m & PERF_MEM_LVL_HIT; 358 miss = m & PERF_MEM_LVL_MISS; 359 360 /* already taken care of */ 361 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS); 362 363 if (mem_info && mem_info->data_src.mem_remote) { 364 strcat(out, "Remote "); 365 l += 7; 366 } 367 368 /* 369 * Incase mem_hops field is set, we can skip printing data source via 370 * PERF_MEM_LVL namespace. 371 */ 372 if (mem_info && mem_info->data_src.mem_hops) { 373 l += scnprintf(out + l, sz - l, "%s ", mem_hops[mem_info->data_src.mem_hops]); 374 } else { 375 for (i = 0; m && i < ARRAY_SIZE(mem_lvl); i++, m >>= 1) { 376 if (!(m & 0x1)) 377 continue; 378 if (printed++) { 379 strcat(out, " or "); 380 l += 4; 381 } 382 l += scnprintf(out + l, sz - l, mem_lvl[i]); 383 } 384 } 385 386 if (mem_info && mem_info->data_src.mem_lvl_num) { 387 int lvl = mem_info->data_src.mem_lvl_num; 388 if (printed++) { 389 strcat(out, " or "); 390 l += 4; 391 } 392 if (mem_lvlnum[lvl]) 393 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]); 394 else 395 l += scnprintf(out + l, sz - l, "L%d", lvl); 396 } 397 398 if (l == 0) 399 l += scnprintf(out + l, sz - l, "N/A"); 400 if (hit) 401 l += scnprintf(out + l, sz - l, " hit"); 402 if (miss) 403 l += scnprintf(out + l, sz - l, " miss"); 404 405 return l; 406 } 407 408 static const char * const snoop_access[] = { 409 "N/A", 410 "None", 411 "Hit", 412 "Miss", 413 "HitM", 414 }; 415 416 static const char * const snoopx_access[] = { 417 "Fwd", 418 "Peer", 419 }; 420 421 int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info) 422 { 423 size_t i, l = 0; 424 u64 m = PERF_MEM_SNOOP_NA; 425 426 sz -= 1; /* -1 for null termination */ 427 out[0] = '\0'; 428 429 if (mem_info) 430 m = mem_info->data_src.mem_snoop; 431 432 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) { 433 if (!(m & 0x1)) 434 continue; 435 if (l) { 436 strcat(out, " or "); 437 l += 4; 438 } 439 l += scnprintf(out + l, sz - l, snoop_access[i]); 440 } 441 442 m = 0; 443 if (mem_info) 444 m = mem_info->data_src.mem_snoopx; 445 446 for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) { 447 if (!(m & 0x1)) 448 continue; 449 450 if (l) { 451 strcat(out, " or "); 452 l += 4; 453 } 454 l += scnprintf(out + l, sz - l, snoopx_access[i]); 455 } 456 457 if (*out == '\0') 458 l += scnprintf(out, sz - l, "N/A"); 459 460 return l; 461 } 462 463 int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info) 464 { 465 u64 mask = PERF_MEM_LOCK_NA; 466 int l; 467 468 if (mem_info) 469 mask = mem_info->data_src.mem_lock; 470 471 if (mask & PERF_MEM_LOCK_NA) 472 l = scnprintf(out, sz, "N/A"); 473 else if (mask & PERF_MEM_LOCK_LOCKED) 474 l = scnprintf(out, sz, "Yes"); 475 else 476 l = scnprintf(out, sz, "No"); 477 478 return l; 479 } 480 481 int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info) 482 { 483 size_t l = 0; 484 u64 mask = PERF_MEM_BLK_NA; 485 486 sz -= 1; /* -1 for null termination */ 487 out[0] = '\0'; 488 489 if (mem_info) 490 mask = mem_info->data_src.mem_blk; 491 492 if (!mask || (mask & PERF_MEM_BLK_NA)) { 493 l += scnprintf(out + l, sz - l, " N/A"); 494 return l; 495 } 496 if (mask & PERF_MEM_BLK_DATA) 497 l += scnprintf(out + l, sz - l, " Data"); 498 if (mask & PERF_MEM_BLK_ADDR) 499 l += scnprintf(out + l, sz - l, " Addr"); 500 501 return l; 502 } 503 504 int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info) 505 { 506 int i = 0; 507 508 i += scnprintf(out, sz, "|OP "); 509 i += perf_mem__op_scnprintf(out + i, sz - i, mem_info); 510 i += scnprintf(out + i, sz - i, "|LVL "); 511 i += perf_mem__lvl_scnprintf(out + i, sz, mem_info); 512 i += scnprintf(out + i, sz - i, "|SNP "); 513 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info); 514 i += scnprintf(out + i, sz - i, "|TLB "); 515 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info); 516 i += scnprintf(out + i, sz - i, "|LCK "); 517 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info); 518 i += scnprintf(out + i, sz - i, "|BLK "); 519 i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info); 520 521 return i; 522 } 523 524 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi) 525 { 526 union perf_mem_data_src *data_src = &mi->data_src; 527 u64 daddr = mi->daddr.addr; 528 u64 op = data_src->mem_op; 529 u64 lvl = data_src->mem_lvl; 530 u64 snoop = data_src->mem_snoop; 531 u64 snoopx = data_src->mem_snoopx; 532 u64 lock = data_src->mem_lock; 533 u64 blk = data_src->mem_blk; 534 /* 535 * Skylake might report unknown remote level via this 536 * bit, consider it when evaluating remote HITMs. 537 * 538 * Incase of power, remote field can also be used to denote cache 539 * accesses from the another core of same node. Hence, setting 540 * mrem only when HOPS is zero along with set remote field. 541 */ 542 bool mrem = (data_src->mem_remote && !data_src->mem_hops); 543 int err = 0; 544 545 #define HITM_INC(__f) \ 546 do { \ 547 stats->__f++; \ 548 stats->tot_hitm++; \ 549 } while (0) 550 551 #define PEER_INC(__f) \ 552 do { \ 553 stats->__f++; \ 554 stats->tot_peer++; \ 555 } while (0) 556 557 #define P(a, b) PERF_MEM_##a##_##b 558 559 stats->nr_entries++; 560 561 if (lock & P(LOCK, LOCKED)) stats->locks++; 562 563 if (blk & P(BLK, DATA)) stats->blk_data++; 564 if (blk & P(BLK, ADDR)) stats->blk_addr++; 565 566 if (op & P(OP, LOAD)) { 567 /* load */ 568 stats->load++; 569 570 if (!daddr) { 571 stats->ld_noadrs++; 572 return -1; 573 } 574 575 if (lvl & P(LVL, HIT)) { 576 if (lvl & P(LVL, UNC)) stats->ld_uncache++; 577 if (lvl & P(LVL, IO)) stats->ld_io++; 578 if (lvl & P(LVL, LFB)) stats->ld_fbhit++; 579 if (lvl & P(LVL, L1 )) stats->ld_l1hit++; 580 if (lvl & P(LVL, L2)) { 581 stats->ld_l2hit++; 582 583 if (snoopx & P(SNOOPX, PEER)) 584 PEER_INC(lcl_peer); 585 } 586 if (lvl & P(LVL, L3 )) { 587 if (snoop & P(SNOOP, HITM)) 588 HITM_INC(lcl_hitm); 589 else 590 stats->ld_llchit++; 591 592 if (snoopx & P(SNOOPX, PEER)) 593 PEER_INC(lcl_peer); 594 } 595 596 if (lvl & P(LVL, LOC_RAM)) { 597 stats->lcl_dram++; 598 if (snoop & P(SNOOP, HIT)) 599 stats->ld_shared++; 600 else 601 stats->ld_excl++; 602 } 603 604 if ((lvl & P(LVL, REM_RAM1)) || 605 (lvl & P(LVL, REM_RAM2)) || 606 mrem) { 607 stats->rmt_dram++; 608 if (snoop & P(SNOOP, HIT)) 609 stats->ld_shared++; 610 else 611 stats->ld_excl++; 612 } 613 } 614 615 if ((lvl & P(LVL, REM_CCE1)) || 616 (lvl & P(LVL, REM_CCE2)) || 617 mrem) { 618 if (snoop & P(SNOOP, HIT)) { 619 stats->rmt_hit++; 620 } else if (snoop & P(SNOOP, HITM)) { 621 HITM_INC(rmt_hitm); 622 } else if (snoopx & P(SNOOPX, PEER)) { 623 stats->rmt_hit++; 624 PEER_INC(rmt_peer); 625 } 626 } 627 628 if ((lvl & P(LVL, MISS))) 629 stats->ld_miss++; 630 631 } else if (op & P(OP, STORE)) { 632 /* store */ 633 stats->store++; 634 635 if (!daddr) { 636 stats->st_noadrs++; 637 return -1; 638 } 639 640 if (lvl & P(LVL, HIT)) { 641 if (lvl & P(LVL, UNC)) stats->st_uncache++; 642 if (lvl & P(LVL, L1 )) stats->st_l1hit++; 643 } 644 if (lvl & P(LVL, MISS)) 645 if (lvl & P(LVL, L1)) stats->st_l1miss++; 646 if (lvl & P(LVL, NA)) 647 stats->st_na++; 648 } else { 649 /* unparsable data_src? */ 650 stats->noparse++; 651 return -1; 652 } 653 654 if (!mi->daddr.ms.map || !mi->iaddr.ms.map) { 655 stats->nomap++; 656 return -1; 657 } 658 659 #undef P 660 #undef HITM_INC 661 return err; 662 } 663 664 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add) 665 { 666 stats->nr_entries += add->nr_entries; 667 668 stats->locks += add->locks; 669 stats->store += add->store; 670 stats->st_uncache += add->st_uncache; 671 stats->st_noadrs += add->st_noadrs; 672 stats->st_l1hit += add->st_l1hit; 673 stats->st_l1miss += add->st_l1miss; 674 stats->st_na += add->st_na; 675 stats->load += add->load; 676 stats->ld_excl += add->ld_excl; 677 stats->ld_shared += add->ld_shared; 678 stats->ld_uncache += add->ld_uncache; 679 stats->ld_io += add->ld_io; 680 stats->ld_miss += add->ld_miss; 681 stats->ld_noadrs += add->ld_noadrs; 682 stats->ld_fbhit += add->ld_fbhit; 683 stats->ld_l1hit += add->ld_l1hit; 684 stats->ld_l2hit += add->ld_l2hit; 685 stats->ld_llchit += add->ld_llchit; 686 stats->lcl_hitm += add->lcl_hitm; 687 stats->rmt_hitm += add->rmt_hitm; 688 stats->tot_hitm += add->tot_hitm; 689 stats->lcl_peer += add->lcl_peer; 690 stats->rmt_peer += add->rmt_peer; 691 stats->tot_peer += add->tot_peer; 692 stats->rmt_hit += add->rmt_hit; 693 stats->lcl_dram += add->lcl_dram; 694 stats->rmt_dram += add->rmt_dram; 695 stats->blk_data += add->blk_data; 696 stats->blk_addr += add->blk_addr; 697 stats->nomap += add->nomap; 698 stats->noparse += add->noparse; 699 } 700