1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/perf_event.h> 3 #include <linux/jump_label.h> 4 #include <linux/export.h> 5 #include <linux/types.h> 6 #include <linux/init.h> 7 #include <linux/slab.h> 8 #include <linux/delay.h> 9 #include <linux/jiffies.h> 10 #include <asm/apicdef.h> 11 #include <asm/apic.h> 12 #include <asm/nmi.h> 13 14 #include "../perf_event.h" 15 16 static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp); 17 static unsigned long perf_nmi_window; 18 19 /* AMD Event 0xFFF: Merge. Used with Large Increment per Cycle events */ 20 #define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL) 21 #define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE) 22 23 /* PMC Enable and Overflow bits for PerfCntrGlobal* registers */ 24 static u64 amd_pmu_global_cntr_mask __read_mostly; 25 26 static __initconst const u64 amd_hw_cache_event_ids 27 [PERF_COUNT_HW_CACHE_MAX] 28 [PERF_COUNT_HW_CACHE_OP_MAX] 29 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 30 { 31 [ C(L1D) ] = { 32 [ C(OP_READ) ] = { 33 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ 34 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ 35 }, 36 [ C(OP_WRITE) ] = { 37 [ C(RESULT_ACCESS) ] = 0, 38 [ C(RESULT_MISS) ] = 0, 39 }, 40 [ C(OP_PREFETCH) ] = { 41 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ 42 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ 43 }, 44 }, 45 [ C(L1I ) ] = { 46 [ C(OP_READ) ] = { 47 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ 48 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ 49 }, 50 [ C(OP_WRITE) ] = { 51 [ C(RESULT_ACCESS) ] = -1, 52 [ C(RESULT_MISS) ] = -1, 53 }, 54 [ C(OP_PREFETCH) ] = { 55 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ 56 [ C(RESULT_MISS) ] = 0, 57 }, 58 }, 59 [ C(LL ) ] = { 60 [ C(OP_READ) ] = { 61 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ 62 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ 63 }, 64 [ C(OP_WRITE) ] = { 65 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ 66 [ C(RESULT_MISS) ] = 0, 67 }, 68 [ C(OP_PREFETCH) ] = { 69 [ C(RESULT_ACCESS) ] = 0, 70 [ C(RESULT_MISS) ] = 0, 71 }, 72 }, 73 [ C(DTLB) ] = { 74 [ C(OP_READ) ] = { 75 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ 76 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ 77 }, 78 [ C(OP_WRITE) ] = { 79 [ C(RESULT_ACCESS) ] = 0, 80 [ C(RESULT_MISS) ] = 0, 81 }, 82 [ C(OP_PREFETCH) ] = { 83 [ C(RESULT_ACCESS) ] = 0, 84 [ C(RESULT_MISS) ] = 0, 85 }, 86 }, 87 [ C(ITLB) ] = { 88 [ C(OP_READ) ] = { 89 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ 90 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ 91 }, 92 [ C(OP_WRITE) ] = { 93 [ C(RESULT_ACCESS) ] = -1, 94 [ C(RESULT_MISS) ] = -1, 95 }, 96 [ C(OP_PREFETCH) ] = { 97 [ C(RESULT_ACCESS) ] = -1, 98 [ C(RESULT_MISS) ] = -1, 99 }, 100 }, 101 [ C(BPU ) ] = { 102 [ C(OP_READ) ] = { 103 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ 104 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ 105 }, 106 [ C(OP_WRITE) ] = { 107 [ C(RESULT_ACCESS) ] = -1, 108 [ C(RESULT_MISS) ] = -1, 109 }, 110 [ C(OP_PREFETCH) ] = { 111 [ C(RESULT_ACCESS) ] = -1, 112 [ C(RESULT_MISS) ] = -1, 113 }, 114 }, 115 [ C(NODE) ] = { 116 [ C(OP_READ) ] = { 117 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ 118 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ 119 }, 120 [ C(OP_WRITE) ] = { 121 [ C(RESULT_ACCESS) ] = -1, 122 [ C(RESULT_MISS) ] = -1, 123 }, 124 [ C(OP_PREFETCH) ] = { 125 [ C(RESULT_ACCESS) ] = -1, 126 [ C(RESULT_MISS) ] = -1, 127 }, 128 }, 129 }; 130 131 static __initconst const u64 amd_hw_cache_event_ids_f17h 132 [PERF_COUNT_HW_CACHE_MAX] 133 [PERF_COUNT_HW_CACHE_OP_MAX] 134 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 135 [C(L1D)] = { 136 [C(OP_READ)] = { 137 [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */ 138 [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */ 139 }, 140 [C(OP_WRITE)] = { 141 [C(RESULT_ACCESS)] = 0, 142 [C(RESULT_MISS)] = 0, 143 }, 144 [C(OP_PREFETCH)] = { 145 [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */ 146 [C(RESULT_MISS)] = 0, 147 }, 148 }, 149 [C(L1I)] = { 150 [C(OP_READ)] = { 151 [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */ 152 [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */ 153 }, 154 [C(OP_WRITE)] = { 155 [C(RESULT_ACCESS)] = -1, 156 [C(RESULT_MISS)] = -1, 157 }, 158 [C(OP_PREFETCH)] = { 159 [C(RESULT_ACCESS)] = 0, 160 [C(RESULT_MISS)] = 0, 161 }, 162 }, 163 [C(LL)] = { 164 [C(OP_READ)] = { 165 [C(RESULT_ACCESS)] = 0, 166 [C(RESULT_MISS)] = 0, 167 }, 168 [C(OP_WRITE)] = { 169 [C(RESULT_ACCESS)] = 0, 170 [C(RESULT_MISS)] = 0, 171 }, 172 [C(OP_PREFETCH)] = { 173 [C(RESULT_ACCESS)] = 0, 174 [C(RESULT_MISS)] = 0, 175 }, 176 }, 177 [C(DTLB)] = { 178 [C(OP_READ)] = { 179 [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */ 180 [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */ 181 }, 182 [C(OP_WRITE)] = { 183 [C(RESULT_ACCESS)] = 0, 184 [C(RESULT_MISS)] = 0, 185 }, 186 [C(OP_PREFETCH)] = { 187 [C(RESULT_ACCESS)] = 0, 188 [C(RESULT_MISS)] = 0, 189 }, 190 }, 191 [C(ITLB)] = { 192 [C(OP_READ)] = { 193 [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */ 194 [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */ 195 }, 196 [C(OP_WRITE)] = { 197 [C(RESULT_ACCESS)] = -1, 198 [C(RESULT_MISS)] = -1, 199 }, 200 [C(OP_PREFETCH)] = { 201 [C(RESULT_ACCESS)] = -1, 202 [C(RESULT_MISS)] = -1, 203 }, 204 }, 205 [C(BPU)] = { 206 [C(OP_READ)] = { 207 [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */ 208 [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */ 209 }, 210 [C(OP_WRITE)] = { 211 [C(RESULT_ACCESS)] = -1, 212 [C(RESULT_MISS)] = -1, 213 }, 214 [C(OP_PREFETCH)] = { 215 [C(RESULT_ACCESS)] = -1, 216 [C(RESULT_MISS)] = -1, 217 }, 218 }, 219 [C(NODE)] = { 220 [C(OP_READ)] = { 221 [C(RESULT_ACCESS)] = 0, 222 [C(RESULT_MISS)] = 0, 223 }, 224 [C(OP_WRITE)] = { 225 [C(RESULT_ACCESS)] = -1, 226 [C(RESULT_MISS)] = -1, 227 }, 228 [C(OP_PREFETCH)] = { 229 [C(RESULT_ACCESS)] = -1, 230 [C(RESULT_MISS)] = -1, 231 }, 232 }, 233 }; 234 235 /* 236 * AMD Performance Monitor K7 and later, up to and including Family 16h: 237 */ 238 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = 239 { 240 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, 241 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 242 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, 243 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, 244 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, 245 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, 246 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ 247 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ 248 }; 249 250 /* 251 * AMD Performance Monitor Family 17h and later: 252 */ 253 static const u64 amd_zen1_perfmon_event_map[PERF_COUNT_HW_MAX] = 254 { 255 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, 256 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 257 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, 258 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964, 259 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, 260 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, 261 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287, 262 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187, 263 }; 264 265 static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] = 266 { 267 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, 268 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 269 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, 270 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964, 271 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, 272 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, 273 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9, 274 }; 275 276 static const u64 amd_zen4_perfmon_event_map[PERF_COUNT_HW_MAX] = 277 { 278 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, 279 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 280 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, 281 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964, 282 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, 283 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, 284 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9, 285 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x100000120, 286 }; 287 288 static u64 amd_pmu_event_map(int hw_event) 289 { 290 if (cpu_feature_enabled(X86_FEATURE_ZEN4) || boot_cpu_data.x86 >= 0x1a) 291 return amd_zen4_perfmon_event_map[hw_event]; 292 293 if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19) 294 return amd_zen2_perfmon_event_map[hw_event]; 295 296 if (cpu_feature_enabled(X86_FEATURE_ZEN1)) 297 return amd_zen1_perfmon_event_map[hw_event]; 298 299 return amd_perfmon_event_map[hw_event]; 300 } 301 302 /* 303 * Previously calculated offsets 304 */ 305 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; 306 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; 307 308 /* 309 * Legacy CPUs: 310 * 4 counters starting at 0xc0010000 each offset by 1 311 * 312 * CPUs with core performance counter extensions: 313 * 6 counters starting at 0xc0010200 each offset by 2 314 */ 315 static inline int amd_pmu_addr_offset(int index, bool eventsel) 316 { 317 int offset; 318 319 if (!index) 320 return index; 321 322 if (eventsel) 323 offset = event_offsets[index]; 324 else 325 offset = count_offsets[index]; 326 327 if (offset) 328 return offset; 329 330 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) 331 offset = index; 332 else 333 offset = index << 1; 334 335 if (eventsel) 336 event_offsets[index] = offset; 337 else 338 count_offsets[index] = offset; 339 340 return offset; 341 } 342 343 /* 344 * AMD64 events are detected based on their event codes. 345 */ 346 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) 347 { 348 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); 349 } 350 351 static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc) 352 { 353 if (!(x86_pmu.flags & PMU_FL_PAIR)) 354 return false; 355 356 switch (amd_get_event_code(hwc)) { 357 case 0x003: return true; /* Retired SSE/AVX FLOPs */ 358 default: return false; 359 } 360 } 361 362 DEFINE_STATIC_CALL_RET0(amd_pmu_branch_hw_config, *x86_pmu.hw_config); 363 364 static int amd_core_hw_config(struct perf_event *event) 365 { 366 if (event->attr.exclude_host && event->attr.exclude_guest) 367 /* 368 * When HO == GO == 1 the hardware treats that as GO == HO == 0 369 * and will count in both modes. We don't want to count in that 370 * case so we emulate no-counting by setting US = OS = 0. 371 */ 372 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | 373 ARCH_PERFMON_EVENTSEL_OS); 374 else if (event->attr.exclude_host) 375 event->hw.config |= AMD64_EVENTSEL_GUESTONLY; 376 else if (event->attr.exclude_guest) 377 event->hw.config |= AMD64_EVENTSEL_HOSTONLY; 378 379 if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw)) 380 event->hw.flags |= PERF_X86_EVENT_PAIR; 381 382 if (has_branch_stack(event)) 383 return static_call(amd_pmu_branch_hw_config)(event); 384 385 return 0; 386 } 387 388 static inline int amd_is_nb_event(struct hw_perf_event *hwc) 389 { 390 return (hwc->config & 0xe0) == 0xe0; 391 } 392 393 static inline int amd_has_nb(struct cpu_hw_events *cpuc) 394 { 395 struct amd_nb *nb = cpuc->amd_nb; 396 397 return nb && nb->nb_id != -1; 398 } 399 400 static int amd_pmu_hw_config(struct perf_event *event) 401 { 402 int ret; 403 404 /* pass precise event sampling to ibs: */ 405 if (event->attr.precise_ip && get_ibs_caps()) 406 return forward_event_to_ibs(event); 407 408 if (has_branch_stack(event) && !x86_pmu.lbr_nr) 409 return -EOPNOTSUPP; 410 411 ret = x86_pmu_hw_config(event); 412 if (ret) 413 return ret; 414 415 if (event->attr.type == PERF_TYPE_RAW) 416 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; 417 418 return amd_core_hw_config(event); 419 } 420 421 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, 422 struct perf_event *event) 423 { 424 struct amd_nb *nb = cpuc->amd_nb; 425 int i; 426 427 /* 428 * need to scan whole list because event may not have 429 * been assigned during scheduling 430 * 431 * no race condition possible because event can only 432 * be removed on one CPU at a time AND PMU is disabled 433 * when we come here 434 */ 435 for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { 436 struct perf_event *tmp = event; 437 438 if (try_cmpxchg(nb->owners + i, &tmp, NULL)) 439 break; 440 } 441 } 442 443 /* 444 * AMD64 NorthBridge events need special treatment because 445 * counter access needs to be synchronized across all cores 446 * of a package. Refer to BKDG section 3.12 447 * 448 * NB events are events measuring L3 cache, Hypertransport 449 * traffic. They are identified by an event code >= 0xe00. 450 * They measure events on the NorthBride which is shared 451 * by all cores on a package. NB events are counted on a 452 * shared set of counters. When a NB event is programmed 453 * in a counter, the data actually comes from a shared 454 * counter. Thus, access to those counters needs to be 455 * synchronized. 456 * 457 * We implement the synchronization such that no two cores 458 * can be measuring NB events using the same counters. Thus, 459 * we maintain a per-NB allocation table. The available slot 460 * is propagated using the event_constraint structure. 461 * 462 * We provide only one choice for each NB event based on 463 * the fact that only NB events have restrictions. Consequently, 464 * if a counter is available, there is a guarantee the NB event 465 * will be assigned to it. If no slot is available, an empty 466 * constraint is returned and scheduling will eventually fail 467 * for this event. 468 * 469 * Note that all cores attached the same NB compete for the same 470 * counters to host NB events, this is why we use atomic ops. Some 471 * multi-chip CPUs may have more than one NB. 472 * 473 * Given that resources are allocated (cmpxchg), they must be 474 * eventually freed for others to use. This is accomplished by 475 * calling __amd_put_nb_event_constraints() 476 * 477 * Non NB events are not impacted by this restriction. 478 */ 479 static struct event_constraint * 480 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, 481 struct event_constraint *c) 482 { 483 struct hw_perf_event *hwc = &event->hw; 484 struct amd_nb *nb = cpuc->amd_nb; 485 struct perf_event *old; 486 int idx, new = -1; 487 488 if (!c) 489 c = &unconstrained; 490 491 if (cpuc->is_fake) 492 return c; 493 494 /* 495 * detect if already present, if so reuse 496 * 497 * cannot merge with actual allocation 498 * because of possible holes 499 * 500 * event can already be present yet not assigned (in hwc->idx) 501 * because of successive calls to x86_schedule_events() from 502 * hw_perf_group_sched_in() without hw_perf_enable() 503 */ 504 for_each_set_bit(idx, c->idxmsk, x86_pmu_max_num_counters(NULL)) { 505 if (new == -1 || hwc->idx == idx) 506 /* assign free slot, prefer hwc->idx */ 507 old = cmpxchg(nb->owners + idx, NULL, event); 508 else if (nb->owners[idx] == event) 509 /* event already present */ 510 old = event; 511 else 512 continue; 513 514 if (old && old != event) 515 continue; 516 517 /* reassign to this slot */ 518 if (new != -1) 519 cmpxchg(nb->owners + new, event, NULL); 520 new = idx; 521 522 /* already present, reuse */ 523 if (old == event) 524 break; 525 } 526 527 if (new == -1) 528 return &emptyconstraint; 529 530 return &nb->event_constraints[new]; 531 } 532 533 static struct amd_nb *amd_alloc_nb(int cpu) 534 { 535 struct amd_nb *nb; 536 int i; 537 538 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu)); 539 if (!nb) 540 return NULL; 541 542 nb->nb_id = -1; 543 544 /* 545 * initialize all possible NB constraints 546 */ 547 for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { 548 __set_bit(i, nb->event_constraints[i].idxmsk); 549 nb->event_constraints[i].weight = 1; 550 } 551 return nb; 552 } 553 554 typedef void (amd_pmu_branch_reset_t)(void); 555 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_reset, amd_pmu_branch_reset_t); 556 557 static void amd_pmu_cpu_reset(int cpu) 558 { 559 if (x86_pmu.lbr_nr) 560 static_call(amd_pmu_branch_reset)(); 561 562 if (x86_pmu.version < 2) 563 return; 564 565 /* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */ 566 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0); 567 568 /* 569 * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze 570 * and PerfCntrGLobalStatus.PerfCntrOvfl 571 */ 572 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, 573 GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask); 574 } 575 576 static int amd_pmu_cpu_prepare(int cpu) 577 { 578 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 579 580 cpuc->lbr_sel = kzalloc_node(sizeof(struct er_account), GFP_KERNEL, 581 cpu_to_node(cpu)); 582 if (!cpuc->lbr_sel) 583 return -ENOMEM; 584 585 WARN_ON_ONCE(cpuc->amd_nb); 586 587 if (!x86_pmu.amd_nb_constraints) 588 return 0; 589 590 cpuc->amd_nb = amd_alloc_nb(cpu); 591 if (cpuc->amd_nb) 592 return 0; 593 594 kfree(cpuc->lbr_sel); 595 cpuc->lbr_sel = NULL; 596 597 return -ENOMEM; 598 } 599 600 static void amd_pmu_cpu_starting(int cpu) 601 { 602 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 603 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED]; 604 struct amd_nb *nb; 605 int i, nb_id; 606 607 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; 608 amd_pmu_cpu_reset(cpu); 609 610 if (!x86_pmu.amd_nb_constraints) 611 return; 612 613 nb_id = topology_amd_node_id(cpu); 614 WARN_ON_ONCE(nb_id == BAD_APICID); 615 616 for_each_online_cpu(i) { 617 nb = per_cpu(cpu_hw_events, i).amd_nb; 618 if (WARN_ON_ONCE(!nb)) 619 continue; 620 621 if (nb->nb_id == nb_id) { 622 *onln = cpuc->amd_nb; 623 cpuc->amd_nb = nb; 624 break; 625 } 626 } 627 628 cpuc->amd_nb->nb_id = nb_id; 629 cpuc->amd_nb->refcnt++; 630 } 631 632 static void amd_pmu_cpu_dead(int cpu) 633 { 634 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); 635 636 kfree(cpuhw->lbr_sel); 637 cpuhw->lbr_sel = NULL; 638 639 if (!x86_pmu.amd_nb_constraints) 640 return; 641 642 if (cpuhw->amd_nb) { 643 struct amd_nb *nb = cpuhw->amd_nb; 644 645 if (nb->nb_id == -1 || --nb->refcnt == 0) 646 kfree(nb); 647 648 cpuhw->amd_nb = NULL; 649 } 650 } 651 652 static __always_inline void amd_pmu_set_global_ctl(u64 ctl) 653 { 654 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl); 655 } 656 657 static inline u64 amd_pmu_get_global_status(void) 658 { 659 u64 status; 660 661 /* PerfCntrGlobalStatus is read-only */ 662 rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status); 663 664 return status; 665 } 666 667 static inline void amd_pmu_ack_global_status(u64 status) 668 { 669 /* 670 * PerfCntrGlobalStatus is read-only but an overflow acknowledgment 671 * mechanism exists; writing 1 to a bit in PerfCntrGlobalStatusClr 672 * clears the same bit in PerfCntrGlobalStatus 673 */ 674 675 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status); 676 } 677 678 static bool amd_pmu_test_overflow_topbit(int idx) 679 { 680 u64 counter; 681 682 rdmsrl(x86_pmu_event_addr(idx), counter); 683 684 return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1)); 685 } 686 687 static bool amd_pmu_test_overflow_status(int idx) 688 { 689 return amd_pmu_get_global_status() & BIT_ULL(idx); 690 } 691 692 DEFINE_STATIC_CALL(amd_pmu_test_overflow, amd_pmu_test_overflow_topbit); 693 694 /* 695 * When a PMC counter overflows, an NMI is used to process the event and 696 * reset the counter. NMI latency can result in the counter being updated 697 * before the NMI can run, which can result in what appear to be spurious 698 * NMIs. This function is intended to wait for the NMI to run and reset 699 * the counter to avoid possible unhandled NMI messages. 700 */ 701 #define OVERFLOW_WAIT_COUNT 50 702 703 static void amd_pmu_wait_on_overflow(int idx) 704 { 705 unsigned int i; 706 707 /* 708 * Wait for the counter to be reset if it has overflowed. This loop 709 * should exit very, very quickly, but just in case, don't wait 710 * forever... 711 */ 712 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { 713 if (!static_call(amd_pmu_test_overflow)(idx)) 714 break; 715 716 /* Might be in IRQ context, so can't sleep */ 717 udelay(1); 718 } 719 } 720 721 static void amd_pmu_check_overflow(void) 722 { 723 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 724 int idx; 725 726 /* 727 * This shouldn't be called from NMI context, but add a safeguard here 728 * to return, since if we're in NMI context we can't wait for an NMI 729 * to reset an overflowed counter value. 730 */ 731 if (in_nmi()) 732 return; 733 734 /* 735 * Check each counter for overflow and wait for it to be reset by the 736 * NMI if it has overflowed. This relies on the fact that all active 737 * counters are always enabled when this function is called and 738 * ARCH_PERFMON_EVENTSEL_INT is always set. 739 */ 740 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { 741 if (!test_bit(idx, cpuc->active_mask)) 742 continue; 743 744 amd_pmu_wait_on_overflow(idx); 745 } 746 } 747 748 static void amd_pmu_enable_event(struct perf_event *event) 749 { 750 x86_pmu_enable_event(event); 751 } 752 753 static void amd_pmu_enable_all(int added) 754 { 755 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 756 int idx; 757 758 amd_brs_enable_all(); 759 760 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { 761 /* only activate events which are marked as active */ 762 if (!test_bit(idx, cpuc->active_mask)) 763 continue; 764 765 amd_pmu_enable_event(cpuc->events[idx]); 766 } 767 } 768 769 static void amd_pmu_v2_enable_event(struct perf_event *event) 770 { 771 struct hw_perf_event *hwc = &event->hw; 772 773 /* 774 * Testing cpu_hw_events.enabled should be skipped in this case unlike 775 * in x86_pmu_enable_event(). 776 * 777 * Since cpu_hw_events.enabled is set only after returning from 778 * x86_pmu_start(), the PMCs must be programmed and kept ready. 779 * Counting starts only after x86_pmu_enable_all() is called. 780 */ 781 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 782 } 783 784 static __always_inline void amd_pmu_core_enable_all(void) 785 { 786 amd_pmu_set_global_ctl(amd_pmu_global_cntr_mask); 787 } 788 789 static void amd_pmu_v2_enable_all(int added) 790 { 791 amd_pmu_lbr_enable_all(); 792 amd_pmu_core_enable_all(); 793 } 794 795 static void amd_pmu_disable_event(struct perf_event *event) 796 { 797 x86_pmu_disable_event(event); 798 799 /* 800 * This can be called from NMI context (via x86_pmu_stop). The counter 801 * may have overflowed, but either way, we'll never see it get reset 802 * by the NMI if we're already in the NMI. And the NMI latency support 803 * below will take care of any pending NMI that might have been 804 * generated by the overflow. 805 */ 806 if (in_nmi()) 807 return; 808 809 amd_pmu_wait_on_overflow(event->hw.idx); 810 } 811 812 static void amd_pmu_disable_all(void) 813 { 814 amd_brs_disable_all(); 815 x86_pmu_disable_all(); 816 amd_pmu_check_overflow(); 817 } 818 819 static __always_inline void amd_pmu_core_disable_all(void) 820 { 821 amd_pmu_set_global_ctl(0); 822 } 823 824 static void amd_pmu_v2_disable_all(void) 825 { 826 amd_pmu_core_disable_all(); 827 amd_pmu_lbr_disable_all(); 828 amd_pmu_check_overflow(); 829 } 830 831 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_add, *x86_pmu.add); 832 833 static void amd_pmu_add_event(struct perf_event *event) 834 { 835 if (needs_branch_stack(event)) 836 static_call(amd_pmu_branch_add)(event); 837 } 838 839 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_del, *x86_pmu.del); 840 841 static void amd_pmu_del_event(struct perf_event *event) 842 { 843 if (needs_branch_stack(event)) 844 static_call(amd_pmu_branch_del)(event); 845 } 846 847 /* 848 * Because of NMI latency, if multiple PMC counters are active or other sources 849 * of NMIs are received, the perf NMI handler can handle one or more overflowed 850 * PMC counters outside of the NMI associated with the PMC overflow. If the NMI 851 * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel 852 * back-to-back NMI support won't be active. This PMC handler needs to take into 853 * account that this can occur, otherwise this could result in unknown NMI 854 * messages being issued. Examples of this is PMC overflow while in the NMI 855 * handler when multiple PMCs are active or PMC overflow while handling some 856 * other source of an NMI. 857 * 858 * Attempt to mitigate this by creating an NMI window in which un-handled NMIs 859 * received during this window will be claimed. This prevents extending the 860 * window past when it is possible that latent NMIs should be received. The 861 * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has 862 * handled a counter. When an un-handled NMI is received, it will be claimed 863 * only if arriving within that window. 864 */ 865 static inline int amd_pmu_adjust_nmi_window(int handled) 866 { 867 /* 868 * If a counter was handled, record a timestamp such that un-handled 869 * NMIs will be claimed if arriving within that window. 870 */ 871 if (handled) { 872 this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window); 873 874 return handled; 875 } 876 877 if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp))) 878 return NMI_DONE; 879 880 return NMI_HANDLED; 881 } 882 883 static int amd_pmu_handle_irq(struct pt_regs *regs) 884 { 885 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 886 int handled; 887 int pmu_enabled; 888 889 /* 890 * Save the PMU state. 891 * It needs to be restored when leaving the handler. 892 */ 893 pmu_enabled = cpuc->enabled; 894 cpuc->enabled = 0; 895 896 amd_brs_disable_all(); 897 898 /* Drain BRS is in use (could be inactive) */ 899 if (cpuc->lbr_users) 900 amd_brs_drain(); 901 902 /* Process any counter overflows */ 903 handled = x86_pmu_handle_irq(regs); 904 905 cpuc->enabled = pmu_enabled; 906 if (pmu_enabled) 907 amd_brs_enable_all(); 908 909 return amd_pmu_adjust_nmi_window(handled); 910 } 911 912 /* 913 * AMD-specific callback invoked through perf_snapshot_branch_stack static 914 * call, defined in include/linux/perf_event.h. See its definition for API 915 * details. It's up to caller to provide enough space in *entries* to fit all 916 * LBR records, otherwise returned result will be truncated to *cnt* entries. 917 */ 918 static int amd_pmu_v2_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 919 { 920 struct cpu_hw_events *cpuc; 921 unsigned long flags; 922 923 /* 924 * The sequence of steps to freeze LBR should be completely inlined 925 * and contain no branches to minimize contamination of LBR snapshot 926 */ 927 local_irq_save(flags); 928 amd_pmu_core_disable_all(); 929 __amd_pmu_lbr_disable(); 930 931 cpuc = this_cpu_ptr(&cpu_hw_events); 932 933 amd_pmu_lbr_read(); 934 cnt = min(cnt, x86_pmu.lbr_nr); 935 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt); 936 937 amd_pmu_v2_enable_all(0); 938 local_irq_restore(flags); 939 940 return cnt; 941 } 942 943 static int amd_pmu_v2_handle_irq(struct pt_regs *regs) 944 { 945 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 946 static atomic64_t status_warned = ATOMIC64_INIT(0); 947 u64 reserved, status, mask, new_bits, prev_bits; 948 struct perf_sample_data data; 949 struct hw_perf_event *hwc; 950 struct perf_event *event; 951 int handled = 0, idx; 952 bool pmu_enabled; 953 954 /* 955 * Save the PMU state as it needs to be restored when leaving the 956 * handler 957 */ 958 pmu_enabled = cpuc->enabled; 959 cpuc->enabled = 0; 960 961 /* Stop counting but do not disable LBR */ 962 amd_pmu_core_disable_all(); 963 964 status = amd_pmu_get_global_status(); 965 966 /* Check if any overflows are pending */ 967 if (!status) 968 goto done; 969 970 /* Read branch records */ 971 if (x86_pmu.lbr_nr) { 972 amd_pmu_lbr_read(); 973 status &= ~GLOBAL_STATUS_LBRS_FROZEN; 974 } 975 976 reserved = status & ~amd_pmu_global_cntr_mask; 977 if (reserved) 978 pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n", 979 reserved); 980 981 /* Clear any reserved bits set by buggy microcode */ 982 status &= amd_pmu_global_cntr_mask; 983 984 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { 985 if (!test_bit(idx, cpuc->active_mask)) 986 continue; 987 988 event = cpuc->events[idx]; 989 hwc = &event->hw; 990 x86_perf_event_update(event); 991 mask = BIT_ULL(idx); 992 993 if (!(status & mask)) 994 continue; 995 996 /* Event overflow */ 997 handled++; 998 status &= ~mask; 999 perf_sample_data_init(&data, 0, hwc->last_period); 1000 1001 if (!x86_perf_event_set_period(event)) 1002 continue; 1003 1004 if (has_branch_stack(event)) 1005 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); 1006 1007 if (perf_event_overflow(event, &data, regs)) 1008 x86_pmu_stop(event, 0); 1009 } 1010 1011 /* 1012 * It should never be the case that some overflows are not handled as 1013 * the corresponding PMCs are expected to be inactive according to the 1014 * active_mask 1015 */ 1016 if (status > 0) { 1017 prev_bits = atomic64_fetch_or(status, &status_warned); 1018 // A new bit was set for the very first time. 1019 new_bits = status & ~prev_bits; 1020 WARN(new_bits, "New overflows for inactive PMCs: %llx\n", new_bits); 1021 } 1022 1023 /* Clear overflow and freeze bits */ 1024 amd_pmu_ack_global_status(~status); 1025 1026 /* 1027 * Unmasking the LVTPC is not required as the Mask (M) bit of the LVT 1028 * PMI entry is not set by the local APIC when a PMC overflow occurs 1029 */ 1030 inc_irq_stat(apic_perf_irqs); 1031 1032 done: 1033 cpuc->enabled = pmu_enabled; 1034 1035 /* Resume counting only if PMU is active */ 1036 if (pmu_enabled) 1037 amd_pmu_core_enable_all(); 1038 1039 return amd_pmu_adjust_nmi_window(handled); 1040 } 1041 1042 static struct event_constraint * 1043 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 1044 struct perf_event *event) 1045 { 1046 /* 1047 * if not NB event or no NB, then no constraints 1048 */ 1049 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) 1050 return &unconstrained; 1051 1052 return __amd_get_nb_event_constraints(cpuc, event, NULL); 1053 } 1054 1055 static void amd_put_event_constraints(struct cpu_hw_events *cpuc, 1056 struct perf_event *event) 1057 { 1058 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) 1059 __amd_put_nb_event_constraints(cpuc, event); 1060 } 1061 1062 PMU_FORMAT_ATTR(event, "config:0-7,32-35"); 1063 PMU_FORMAT_ATTR(umask, "config:8-15" ); 1064 PMU_FORMAT_ATTR(edge, "config:18" ); 1065 PMU_FORMAT_ATTR(inv, "config:23" ); 1066 PMU_FORMAT_ATTR(cmask, "config:24-31" ); 1067 1068 static struct attribute *amd_format_attr[] = { 1069 &format_attr_event.attr, 1070 &format_attr_umask.attr, 1071 &format_attr_edge.attr, 1072 &format_attr_inv.attr, 1073 &format_attr_cmask.attr, 1074 NULL, 1075 }; 1076 1077 /* AMD Family 15h */ 1078 1079 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL 1080 1081 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL 1082 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL 1083 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL 1084 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL 1085 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL 1086 #define AMD_EVENT_EX_LS 0x000000C0ULL 1087 #define AMD_EVENT_DE 0x000000D0ULL 1088 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL 1089 1090 /* 1091 * AMD family 15h event code/PMC mappings: 1092 * 1093 * type = event_code & 0x0F0: 1094 * 1095 * 0x000 FP PERF_CTL[5:3] 1096 * 0x010 FP PERF_CTL[5:3] 1097 * 0x020 LS PERF_CTL[5:0] 1098 * 0x030 LS PERF_CTL[5:0] 1099 * 0x040 DC PERF_CTL[5:0] 1100 * 0x050 DC PERF_CTL[5:0] 1101 * 0x060 CU PERF_CTL[2:0] 1102 * 0x070 CU PERF_CTL[2:0] 1103 * 0x080 IC/DE PERF_CTL[2:0] 1104 * 0x090 IC/DE PERF_CTL[2:0] 1105 * 0x0A0 --- 1106 * 0x0B0 --- 1107 * 0x0C0 EX/LS PERF_CTL[5:0] 1108 * 0x0D0 DE PERF_CTL[2:0] 1109 * 0x0E0 NB NB_PERF_CTL[3:0] 1110 * 0x0F0 NB NB_PERF_CTL[3:0] 1111 * 1112 * Exceptions: 1113 * 1114 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) 1115 * 0x003 FP PERF_CTL[3] 1116 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) 1117 * 0x00B FP PERF_CTL[3] 1118 * 0x00D FP PERF_CTL[3] 1119 * 0x023 DE PERF_CTL[2:0] 1120 * 0x02D LS PERF_CTL[3] 1121 * 0x02E LS PERF_CTL[3,0] 1122 * 0x031 LS PERF_CTL[2:0] (**) 1123 * 0x043 CU PERF_CTL[2:0] 1124 * 0x045 CU PERF_CTL[2:0] 1125 * 0x046 CU PERF_CTL[2:0] 1126 * 0x054 CU PERF_CTL[2:0] 1127 * 0x055 CU PERF_CTL[2:0] 1128 * 0x08F IC PERF_CTL[0] 1129 * 0x187 DE PERF_CTL[0] 1130 * 0x188 DE PERF_CTL[0] 1131 * 0x0DB EX PERF_CTL[5:0] 1132 * 0x0DC LS PERF_CTL[5:0] 1133 * 0x0DD LS PERF_CTL[5:0] 1134 * 0x0DE LS PERF_CTL[5:0] 1135 * 0x0DF LS PERF_CTL[5:0] 1136 * 0x1C0 EX PERF_CTL[5:3] 1137 * 0x1D6 EX PERF_CTL[5:0] 1138 * 0x1D8 EX PERF_CTL[5:0] 1139 * 1140 * (*) depending on the umask all FPU counters may be used 1141 * (**) only one unitmask enabled at a time 1142 */ 1143 1144 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); 1145 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); 1146 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); 1147 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); 1148 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); 1149 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); 1150 1151 static struct event_constraint * 1152 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, 1153 struct perf_event *event) 1154 { 1155 struct hw_perf_event *hwc = &event->hw; 1156 unsigned int event_code = amd_get_event_code(hwc); 1157 1158 switch (event_code & AMD_EVENT_TYPE_MASK) { 1159 case AMD_EVENT_FP: 1160 switch (event_code) { 1161 case 0x000: 1162 if (!(hwc->config & 0x0000F000ULL)) 1163 break; 1164 if (!(hwc->config & 0x00000F00ULL)) 1165 break; 1166 return &amd_f15_PMC3; 1167 case 0x004: 1168 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) 1169 break; 1170 return &amd_f15_PMC3; 1171 case 0x003: 1172 case 0x00B: 1173 case 0x00D: 1174 return &amd_f15_PMC3; 1175 } 1176 return &amd_f15_PMC53; 1177 case AMD_EVENT_LS: 1178 case AMD_EVENT_DC: 1179 case AMD_EVENT_EX_LS: 1180 switch (event_code) { 1181 case 0x023: 1182 case 0x043: 1183 case 0x045: 1184 case 0x046: 1185 case 0x054: 1186 case 0x055: 1187 return &amd_f15_PMC20; 1188 case 0x02D: 1189 return &amd_f15_PMC3; 1190 case 0x02E: 1191 return &amd_f15_PMC30; 1192 case 0x031: 1193 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) 1194 return &amd_f15_PMC20; 1195 return &emptyconstraint; 1196 case 0x1C0: 1197 return &amd_f15_PMC53; 1198 default: 1199 return &amd_f15_PMC50; 1200 } 1201 case AMD_EVENT_CU: 1202 case AMD_EVENT_IC_DE: 1203 case AMD_EVENT_DE: 1204 switch (event_code) { 1205 case 0x08F: 1206 case 0x187: 1207 case 0x188: 1208 return &amd_f15_PMC0; 1209 case 0x0DB ... 0x0DF: 1210 case 0x1D6: 1211 case 0x1D8: 1212 return &amd_f15_PMC50; 1213 default: 1214 return &amd_f15_PMC20; 1215 } 1216 case AMD_EVENT_NB: 1217 /* moved to uncore.c */ 1218 return &emptyconstraint; 1219 default: 1220 return &emptyconstraint; 1221 } 1222 } 1223 1224 static struct event_constraint pair_constraint; 1225 1226 static struct event_constraint * 1227 amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx, 1228 struct perf_event *event) 1229 { 1230 struct hw_perf_event *hwc = &event->hw; 1231 1232 if (amd_is_pair_event_code(hwc)) 1233 return &pair_constraint; 1234 1235 return &unconstrained; 1236 } 1237 1238 static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc, 1239 struct perf_event *event) 1240 { 1241 struct hw_perf_event *hwc = &event->hw; 1242 1243 if (is_counter_pair(hwc)) 1244 --cpuc->n_pair; 1245 } 1246 1247 /* 1248 * Because of the way BRS operates with an inactive and active phases, and 1249 * the link to one counter, it is not possible to have two events using BRS 1250 * scheduled at the same time. There would be an issue with enforcing the 1251 * period of each one and given that the BRS saturates, it would not be possible 1252 * to guarantee correlated content for all events. Therefore, in situations 1253 * where multiple events want to use BRS, the kernel enforces mutual exclusion. 1254 * Exclusion is enforced by choosing only one counter for events using BRS. 1255 * The event scheduling logic will then automatically multiplex the 1256 * events and ensure that at most one event is actively using BRS. 1257 * 1258 * The BRS counter could be any counter, but there is no constraint on Fam19h, 1259 * therefore all counters are equal and thus we pick the first one: PMC0 1260 */ 1261 static struct event_constraint amd_fam19h_brs_cntr0_constraint = 1262 EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK); 1263 1264 static struct event_constraint amd_fam19h_brs_pair_cntr0_constraint = 1265 __EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK, 1, 0, PERF_X86_EVENT_PAIR); 1266 1267 static struct event_constraint * 1268 amd_get_event_constraints_f19h(struct cpu_hw_events *cpuc, int idx, 1269 struct perf_event *event) 1270 { 1271 struct hw_perf_event *hwc = &event->hw; 1272 bool has_brs = has_amd_brs(hwc); 1273 1274 /* 1275 * In case BRS is used with an event requiring a counter pair, 1276 * the kernel allows it but only on counter 0 & 1 to enforce 1277 * multiplexing requiring to protect BRS in case of multiple 1278 * BRS users 1279 */ 1280 if (amd_is_pair_event_code(hwc)) { 1281 return has_brs ? &amd_fam19h_brs_pair_cntr0_constraint 1282 : &pair_constraint; 1283 } 1284 1285 if (has_brs) 1286 return &amd_fam19h_brs_cntr0_constraint; 1287 1288 return &unconstrained; 1289 } 1290 1291 1292 static ssize_t amd_event_sysfs_show(char *page, u64 config) 1293 { 1294 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | 1295 (config & AMD64_EVENTSEL_EVENT) >> 24; 1296 1297 return x86_event_sysfs_show(page, config, event); 1298 } 1299 1300 static void amd_pmu_limit_period(struct perf_event *event, s64 *left) 1301 { 1302 /* 1303 * Decrease period by the depth of the BRS feature to get the last N 1304 * taken branches and approximate the desired period 1305 */ 1306 if (has_branch_stack(event) && *left > x86_pmu.lbr_nr) 1307 *left -= x86_pmu.lbr_nr; 1308 } 1309 1310 static __initconst const struct x86_pmu amd_pmu = { 1311 .name = "AMD", 1312 .handle_irq = amd_pmu_handle_irq, 1313 .disable_all = amd_pmu_disable_all, 1314 .enable_all = amd_pmu_enable_all, 1315 .enable = amd_pmu_enable_event, 1316 .disable = amd_pmu_disable_event, 1317 .hw_config = amd_pmu_hw_config, 1318 .schedule_events = x86_schedule_events, 1319 .eventsel = MSR_K7_EVNTSEL0, 1320 .perfctr = MSR_K7_PERFCTR0, 1321 .addr_offset = amd_pmu_addr_offset, 1322 .event_map = amd_pmu_event_map, 1323 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 1324 .cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0), 1325 .add = amd_pmu_add_event, 1326 .del = amd_pmu_del_event, 1327 .cntval_bits = 48, 1328 .cntval_mask = (1ULL << 48) - 1, 1329 .apic = 1, 1330 /* use highest bit to detect overflow */ 1331 .max_period = (1ULL << 47) - 1, 1332 .get_event_constraints = amd_get_event_constraints, 1333 .put_event_constraints = amd_put_event_constraints, 1334 1335 .format_attrs = amd_format_attr, 1336 .events_sysfs_show = amd_event_sysfs_show, 1337 1338 .cpu_prepare = amd_pmu_cpu_prepare, 1339 .cpu_starting = amd_pmu_cpu_starting, 1340 .cpu_dead = amd_pmu_cpu_dead, 1341 1342 .amd_nb_constraints = 1, 1343 }; 1344 1345 static ssize_t branches_show(struct device *cdev, 1346 struct device_attribute *attr, 1347 char *buf) 1348 { 1349 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); 1350 } 1351 1352 static DEVICE_ATTR_RO(branches); 1353 1354 static struct attribute *amd_pmu_branches_attrs[] = { 1355 &dev_attr_branches.attr, 1356 NULL, 1357 }; 1358 1359 static umode_t 1360 amd_branches_is_visible(struct kobject *kobj, struct attribute *attr, int i) 1361 { 1362 return x86_pmu.lbr_nr ? attr->mode : 0; 1363 } 1364 1365 static struct attribute_group group_caps_amd_branches = { 1366 .name = "caps", 1367 .attrs = amd_pmu_branches_attrs, 1368 .is_visible = amd_branches_is_visible, 1369 }; 1370 1371 #ifdef CONFIG_PERF_EVENTS_AMD_BRS 1372 1373 EVENT_ATTR_STR(branch-brs, amd_branch_brs, 1374 "event=" __stringify(AMD_FAM19H_BRS_EVENT)"\n"); 1375 1376 static struct attribute *amd_brs_events_attrs[] = { 1377 EVENT_PTR(amd_branch_brs), 1378 NULL, 1379 }; 1380 1381 static umode_t 1382 amd_brs_is_visible(struct kobject *kobj, struct attribute *attr, int i) 1383 { 1384 return static_cpu_has(X86_FEATURE_BRS) && x86_pmu.lbr_nr ? 1385 attr->mode : 0; 1386 } 1387 1388 static struct attribute_group group_events_amd_brs = { 1389 .name = "events", 1390 .attrs = amd_brs_events_attrs, 1391 .is_visible = amd_brs_is_visible, 1392 }; 1393 1394 #endif /* CONFIG_PERF_EVENTS_AMD_BRS */ 1395 1396 static const struct attribute_group *amd_attr_update[] = { 1397 &group_caps_amd_branches, 1398 #ifdef CONFIG_PERF_EVENTS_AMD_BRS 1399 &group_events_amd_brs, 1400 #endif 1401 NULL, 1402 }; 1403 1404 static int __init amd_core_pmu_init(void) 1405 { 1406 union cpuid_0x80000022_ebx ebx; 1407 u64 even_ctr_mask = 0ULL; 1408 int i; 1409 1410 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) 1411 return 0; 1412 1413 /* Avoid calculating the value each time in the NMI handler */ 1414 perf_nmi_window = msecs_to_jiffies(100); 1415 1416 /* 1417 * If core performance counter extensions exists, we must use 1418 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also 1419 * amd_pmu_addr_offset(). 1420 */ 1421 x86_pmu.eventsel = MSR_F15H_PERF_CTL; 1422 x86_pmu.perfctr = MSR_F15H_PERF_CTR; 1423 x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0); 1424 1425 /* Check for Performance Monitoring v2 support */ 1426 if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) { 1427 ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES); 1428 1429 /* Update PMU version for later usage */ 1430 x86_pmu.version = 2; 1431 1432 /* Find the number of available Core PMCs */ 1433 x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0); 1434 1435 amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64; 1436 1437 /* Update PMC handling functions */ 1438 x86_pmu.enable_all = amd_pmu_v2_enable_all; 1439 x86_pmu.disable_all = amd_pmu_v2_disable_all; 1440 x86_pmu.enable = amd_pmu_v2_enable_event; 1441 x86_pmu.handle_irq = amd_pmu_v2_handle_irq; 1442 static_call_update(amd_pmu_test_overflow, amd_pmu_test_overflow_status); 1443 } 1444 1445 /* 1446 * AMD Core perfctr has separate MSRs for the NB events, see 1447 * the amd/uncore.c driver. 1448 */ 1449 x86_pmu.amd_nb_constraints = 0; 1450 1451 if (boot_cpu_data.x86 == 0x15) { 1452 pr_cont("Fam15h "); 1453 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; 1454 } 1455 if (boot_cpu_data.x86 >= 0x17) { 1456 pr_cont("Fam17h+ "); 1457 /* 1458 * Family 17h and compatibles have constraints for Large 1459 * Increment per Cycle events: they may only be assigned an 1460 * even numbered counter that has a consecutive adjacent odd 1461 * numbered counter following it. 1462 */ 1463 for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2) 1464 even_ctr_mask |= BIT_ULL(i); 1465 1466 pair_constraint = (struct event_constraint) 1467 __EVENT_CONSTRAINT(0, even_ctr_mask, 0, 1468 x86_pmu_max_num_counters(NULL) / 2, 0, 1469 PERF_X86_EVENT_PAIR); 1470 1471 x86_pmu.get_event_constraints = amd_get_event_constraints_f17h; 1472 x86_pmu.put_event_constraints = amd_put_event_constraints_f17h; 1473 x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE; 1474 x86_pmu.flags |= PMU_FL_PAIR; 1475 } 1476 1477 /* LBR and BRS are mutually exclusive features */ 1478 if (!amd_pmu_lbr_init()) { 1479 /* LBR requires flushing on context switch */ 1480 x86_pmu.sched_task = amd_pmu_lbr_sched_task; 1481 static_call_update(amd_pmu_branch_hw_config, amd_pmu_lbr_hw_config); 1482 static_call_update(amd_pmu_branch_reset, amd_pmu_lbr_reset); 1483 static_call_update(amd_pmu_branch_add, amd_pmu_lbr_add); 1484 static_call_update(amd_pmu_branch_del, amd_pmu_lbr_del); 1485 1486 /* Only support branch_stack snapshot on perfmon v2 */ 1487 if (x86_pmu.handle_irq == amd_pmu_v2_handle_irq) 1488 static_call_update(perf_snapshot_branch_stack, amd_pmu_v2_snapshot_branch_stack); 1489 } else if (!amd_brs_init()) { 1490 /* 1491 * BRS requires special event constraints and flushing on ctxsw. 1492 */ 1493 x86_pmu.get_event_constraints = amd_get_event_constraints_f19h; 1494 x86_pmu.sched_task = amd_pmu_brs_sched_task; 1495 x86_pmu.limit_period = amd_pmu_limit_period; 1496 1497 static_call_update(amd_pmu_branch_hw_config, amd_brs_hw_config); 1498 static_call_update(amd_pmu_branch_reset, amd_brs_reset); 1499 static_call_update(amd_pmu_branch_add, amd_pmu_brs_add); 1500 static_call_update(amd_pmu_branch_del, amd_pmu_brs_del); 1501 1502 /* 1503 * put_event_constraints callback same as Fam17h, set above 1504 */ 1505 1506 /* branch sampling must be stopped when entering low power */ 1507 amd_brs_lopwr_init(); 1508 } 1509 1510 x86_pmu.attr_update = amd_attr_update; 1511 1512 pr_cont("core perfctr, "); 1513 return 0; 1514 } 1515 1516 __init int amd_pmu_init(void) 1517 { 1518 int ret; 1519 1520 /* Performance-monitoring supported from K7 and later: */ 1521 if (boot_cpu_data.x86 < 6) 1522 return -ENODEV; 1523 1524 x86_pmu = amd_pmu; 1525 1526 ret = amd_core_pmu_init(); 1527 if (ret) 1528 return ret; 1529 1530 if (num_possible_cpus() == 1) { 1531 /* 1532 * No point in allocating data structures to serialize 1533 * against other CPUs, when there is only the one CPU. 1534 */ 1535 x86_pmu.amd_nb_constraints = 0; 1536 } 1537 1538 if (boot_cpu_data.x86 >= 0x17) 1539 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids)); 1540 else 1541 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 1542 1543 return 0; 1544 } 1545 1546 static inline void amd_pmu_reload_virt(void) 1547 { 1548 if (x86_pmu.version >= 2) { 1549 /* 1550 * Clear global enable bits, reprogram the PERF_CTL 1551 * registers with updated perf_ctr_virt_mask and then 1552 * set global enable bits once again 1553 */ 1554 amd_pmu_v2_disable_all(); 1555 amd_pmu_enable_all(0); 1556 amd_pmu_v2_enable_all(0); 1557 return; 1558 } 1559 1560 amd_pmu_disable_all(); 1561 amd_pmu_enable_all(0); 1562 } 1563 1564 void amd_pmu_enable_virt(void) 1565 { 1566 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1567 1568 cpuc->perf_ctr_virt_mask = 0; 1569 1570 /* Reload all events */ 1571 amd_pmu_reload_virt(); 1572 } 1573 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); 1574 1575 void amd_pmu_disable_virt(void) 1576 { 1577 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1578 1579 /* 1580 * We only mask out the Host-only bit so that host-only counting works 1581 * when SVM is disabled. If someone sets up a guest-only counter when 1582 * SVM is disabled the Guest-only bits still gets set and the counter 1583 * will not count anything. 1584 */ 1585 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; 1586 1587 /* Reload all events */ 1588 amd_pmu_reload_virt(); 1589 } 1590 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); 1591