1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PERF_EVENT_H 3 #define _ASM_X86_PERF_EVENT_H 4 5 #include <linux/static_call.h> 6 7 /* 8 * Performance event hw details: 9 */ 10 11 #define INTEL_PMC_MAX_GENERIC 32 12 #define INTEL_PMC_MAX_FIXED 16 13 #define INTEL_PMC_IDX_FIXED 32 14 15 #define X86_PMC_IDX_MAX 64 16 17 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 18 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 19 20 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 21 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 22 23 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL 24 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL 25 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) 26 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) 27 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) 28 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19) 29 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) 30 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) 31 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) 32 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 33 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 34 #define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35) 35 #define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36) 36 #define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40) 37 38 #define INTEL_FIXED_BITS_MASK 0xFULL 39 #define INTEL_FIXED_BITS_STRIDE 4 40 #define INTEL_FIXED_0_KERNEL (1ULL << 0) 41 #define INTEL_FIXED_0_USER (1ULL << 1) 42 #define INTEL_FIXED_0_ANYTHREAD (1ULL << 2) 43 #define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3) 44 #define INTEL_FIXED_3_METRICS_CLEAR (1ULL << 2) 45 46 #define HSW_IN_TX (1ULL << 32) 47 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33) 48 #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) 49 #define ICL_FIXED_0_ADAPTIVE (1ULL << 32) 50 51 #define intel_fixed_bits_by_idx(_idx, _bits) \ 52 ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE)) 53 54 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) 55 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) 56 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) 57 58 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 59 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ 60 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT) 61 62 #define AMD64_EVENTSEL_EVENT \ 63 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) 64 #define INTEL_ARCH_EVENT_MASK \ 65 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) 66 67 #define AMD64_L3_SLICE_SHIFT 48 68 #define AMD64_L3_SLICE_MASK \ 69 (0xFULL << AMD64_L3_SLICE_SHIFT) 70 #define AMD64_L3_SLICEID_MASK \ 71 (0x7ULL << AMD64_L3_SLICE_SHIFT) 72 73 #define AMD64_L3_THREAD_SHIFT 56 74 #define AMD64_L3_THREAD_MASK \ 75 (0xFFULL << AMD64_L3_THREAD_SHIFT) 76 #define AMD64_L3_F19H_THREAD_MASK \ 77 (0x3ULL << AMD64_L3_THREAD_SHIFT) 78 79 #define AMD64_L3_EN_ALL_CORES BIT_ULL(47) 80 #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46) 81 82 #define AMD64_L3_COREID_SHIFT 42 83 #define AMD64_L3_COREID_MASK \ 84 (0x7ULL << AMD64_L3_COREID_SHIFT) 85 86 #define X86_RAW_EVENT_MASK \ 87 (ARCH_PERFMON_EVENTSEL_EVENT | \ 88 ARCH_PERFMON_EVENTSEL_UMASK | \ 89 ARCH_PERFMON_EVENTSEL_EDGE | \ 90 ARCH_PERFMON_EVENTSEL_INV | \ 91 ARCH_PERFMON_EVENTSEL_CMASK) 92 #define X86_ALL_EVENT_FLAGS \ 93 (ARCH_PERFMON_EVENTSEL_EDGE | \ 94 ARCH_PERFMON_EVENTSEL_INV | \ 95 ARCH_PERFMON_EVENTSEL_CMASK | \ 96 ARCH_PERFMON_EVENTSEL_ANY | \ 97 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \ 98 HSW_IN_TX | \ 99 HSW_IN_TX_CHECKPOINTED) 100 #define AMD64_RAW_EVENT_MASK \ 101 (X86_RAW_EVENT_MASK | \ 102 AMD64_EVENTSEL_EVENT) 103 #define AMD64_RAW_EVENT_MASK_NB \ 104 (AMD64_EVENTSEL_EVENT | \ 105 ARCH_PERFMON_EVENTSEL_UMASK) 106 107 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \ 108 (AMD64_EVENTSEL_EVENT | \ 109 GENMASK_ULL(37, 36)) 110 111 #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \ 112 (ARCH_PERFMON_EVENTSEL_UMASK | \ 113 GENMASK_ULL(27, 24)) 114 115 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \ 116 (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \ 117 AMD64_PERFMON_V2_EVENTSEL_UMASK_NB) 118 119 #define AMD64_PERFMON_V2_ENABLE_UMC BIT_ULL(31) 120 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC GENMASK_ULL(7, 0) 121 #define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC GENMASK_ULL(9, 8) 122 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC \ 123 (AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC | \ 124 AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC) 125 126 #define AMD64_NUM_COUNTERS 4 127 #define AMD64_NUM_COUNTERS_CORE 6 128 #define AMD64_NUM_COUNTERS_NB 4 129 130 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 131 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 132 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 133 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ 134 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) 135 136 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 137 #define ARCH_PERFMON_EVENTS_COUNT 7 138 139 #define PEBS_DATACFG_MEMINFO BIT_ULL(0) 140 #define PEBS_DATACFG_GP BIT_ULL(1) 141 #define PEBS_DATACFG_XMMS BIT_ULL(2) 142 #define PEBS_DATACFG_LBRS BIT_ULL(3) 143 #define PEBS_DATACFG_LBR_SHIFT 24 144 145 /* Steal the highest bit of pebs_data_cfg for SW usage */ 146 #define PEBS_UPDATE_DS_SW BIT_ULL(63) 147 148 /* 149 * Intel "Architectural Performance Monitoring" CPUID 150 * detection/enumeration details: 151 */ 152 union cpuid10_eax { 153 struct { 154 unsigned int version_id:8; 155 unsigned int num_counters:8; 156 unsigned int bit_width:8; 157 unsigned int mask_length:8; 158 } split; 159 unsigned int full; 160 }; 161 162 union cpuid10_ebx { 163 struct { 164 unsigned int no_unhalted_core_cycles:1; 165 unsigned int no_instructions_retired:1; 166 unsigned int no_unhalted_reference_cycles:1; 167 unsigned int no_llc_reference:1; 168 unsigned int no_llc_misses:1; 169 unsigned int no_branch_instruction_retired:1; 170 unsigned int no_branch_misses_retired:1; 171 } split; 172 unsigned int full; 173 }; 174 175 union cpuid10_edx { 176 struct { 177 unsigned int num_counters_fixed:5; 178 unsigned int bit_width_fixed:8; 179 unsigned int reserved1:2; 180 unsigned int anythread_deprecated:1; 181 unsigned int reserved2:16; 182 } split; 183 unsigned int full; 184 }; 185 186 /* 187 * Intel "Architectural Performance Monitoring extension" CPUID 188 * detection/enumeration details: 189 */ 190 #define ARCH_PERFMON_EXT_LEAF 0x00000023 191 #define ARCH_PERFMON_EXT_UMASK2 0x1 192 #define ARCH_PERFMON_EXT_EQ 0x2 193 #define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT 0x1 194 #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1 195 196 /* 197 * Intel Architectural LBR CPUID detection/enumeration details: 198 */ 199 union cpuid28_eax { 200 struct { 201 /* Supported LBR depth values */ 202 unsigned int lbr_depth_mask:8; 203 unsigned int reserved:22; 204 /* Deep C-state Reset */ 205 unsigned int lbr_deep_c_reset:1; 206 /* IP values contain LIP */ 207 unsigned int lbr_lip:1; 208 } split; 209 unsigned int full; 210 }; 211 212 union cpuid28_ebx { 213 struct { 214 /* CPL Filtering Supported */ 215 unsigned int lbr_cpl:1; 216 /* Branch Filtering Supported */ 217 unsigned int lbr_filter:1; 218 /* Call-stack Mode Supported */ 219 unsigned int lbr_call_stack:1; 220 } split; 221 unsigned int full; 222 }; 223 224 union cpuid28_ecx { 225 struct { 226 /* Mispredict Bit Supported */ 227 unsigned int lbr_mispred:1; 228 /* Timed LBRs Supported */ 229 unsigned int lbr_timed_lbr:1; 230 /* Branch Type Field Supported */ 231 unsigned int lbr_br_type:1; 232 unsigned int reserved:13; 233 /* Branch counters (Event Logging) Supported */ 234 unsigned int lbr_counters:4; 235 } split; 236 unsigned int full; 237 }; 238 239 /* 240 * AMD "Extended Performance Monitoring and Debug" CPUID 241 * detection/enumeration details: 242 */ 243 union cpuid_0x80000022_ebx { 244 struct { 245 /* Number of Core Performance Counters */ 246 unsigned int num_core_pmc:4; 247 /* Number of available LBR Stack Entries */ 248 unsigned int lbr_v2_stack_sz:6; 249 /* Number of Data Fabric Counters */ 250 unsigned int num_df_pmc:6; 251 /* Number of Unified Memory Controller Counters */ 252 unsigned int num_umc_pmc:6; 253 } split; 254 unsigned int full; 255 }; 256 257 struct x86_pmu_capability { 258 int version; 259 int num_counters_gp; 260 int num_counters_fixed; 261 int bit_width_gp; 262 int bit_width_fixed; 263 unsigned int events_mask; 264 int events_mask_len; 265 unsigned int pebs_ept :1; 266 }; 267 268 /* 269 * Fixed-purpose performance events: 270 */ 271 272 /* RDPMC offset for Fixed PMCs */ 273 #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30) 274 #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29) 275 276 /* 277 * All the fixed-mode PMCs are configured via this single MSR: 278 */ 279 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d 280 281 /* 282 * There is no event-code assigned to the fixed-mode PMCs. 283 * 284 * For a fixed-mode PMC, which has an equivalent event on a general-purpose 285 * PMC, the event-code of the equivalent event is used for the fixed-mode PMC, 286 * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core. 287 * 288 * For a fixed-mode PMC, which doesn't have an equivalent event, a 289 * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS. 290 * The pseudo event-code for a fixed-mode PMC must be 0x00. 291 * The pseudo umask-code is 0xX. The X equals the index of the fixed 292 * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300. 293 * 294 * The counts are available in separate MSRs: 295 */ 296 297 /* Instr_Retired.Any: */ 298 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 299 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) 300 301 /* CPU_CLK_Unhalted.Core: */ 302 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a 303 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) 304 305 /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */ 306 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b 307 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) 308 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) 309 310 /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */ 311 #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c 312 #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3) 313 #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS) 314 315 /* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */ 316 /* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */ 317 /* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */ 318 319 static inline bool use_fixed_pseudo_encoding(u64 code) 320 { 321 return !(code & 0xff); 322 } 323 324 /* 325 * We model BTS tracing as another fixed-mode PMC. 326 * 327 * We choose the value 47 for the fixed index of BTS, since lower 328 * values are used by actual fixed events and higher values are used 329 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. 330 */ 331 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15) 332 333 /* 334 * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for 335 * each TopDown metric event. 336 * 337 * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS). 338 */ 339 #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16) 340 #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0) 341 #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1) 342 #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2) 343 #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3) 344 #define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4) 345 #define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5) 346 #define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6) 347 #define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7) 348 #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND 349 #define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \ 350 INTEL_PMC_MSK_FIXED_SLOTS) 351 352 /* 353 * There is no event-code assigned to the TopDown events. 354 * 355 * For the slots event, use the pseudo code of the fixed counter 3. 356 * 357 * For the metric events, the pseudo event-code is 0x00. 358 * The pseudo umask-code starts from the middle of the pseudo event 359 * space, 0x80. 360 */ 361 #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */ 362 /* Level 1 metrics */ 363 #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */ 364 #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */ 365 #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */ 366 #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */ 367 /* Level 2 metrics */ 368 #define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */ 369 #define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */ 370 #define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */ 371 #define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */ 372 373 #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND 374 #define INTEL_TD_METRIC_NUM 8 375 376 #define INTEL_TD_CFG_METRIC_CLEAR_BIT 0 377 #define INTEL_TD_CFG_METRIC_CLEAR BIT_ULL(INTEL_TD_CFG_METRIC_CLEAR_BIT) 378 379 static inline bool is_metric_idx(int idx) 380 { 381 return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM; 382 } 383 384 static inline bool is_topdown_idx(int idx) 385 { 386 return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS; 387 } 388 389 #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \ 390 (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN) 391 392 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63) 393 #define GLOBAL_STATUS_BUFFER_OVF_BIT 62 394 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT) 395 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) 396 #define GLOBAL_STATUS_ASIF BIT_ULL(60) 397 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) 398 #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58 399 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) 400 #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55 401 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT) 402 #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48 403 404 #define GLOBAL_CTRL_EN_PERF_METRICS 48 405 /* 406 * We model guest LBR event tracing as another fixed-mode PMC like BTS. 407 * 408 * We choose bit 58 because it's used to indicate LBR stack frozen state 409 * for architectural perfmon v4, also we unconditionally mask that bit in 410 * the handle_pmi_common(), so it'll never be set in the overflow handling. 411 * 412 * With this fake counter assigned, the guest LBR event user (such as KVM), 413 * can program the LBR registers on its own, and we don't actually do anything 414 * with then in the host context. 415 */ 416 #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT) 417 418 /* 419 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b, 420 * since it would claim bit 58 which is effectively Fixed26. 421 */ 422 #define INTEL_FIXED_VLBR_EVENT 0x1b00 423 424 /* 425 * Adaptive PEBS v4 426 */ 427 428 struct pebs_basic { 429 u64 format_group:32, 430 retire_latency:16, 431 format_size:16; 432 u64 ip; 433 u64 applicable_counters; 434 u64 tsc; 435 }; 436 437 struct pebs_meminfo { 438 u64 address; 439 u64 aux; 440 union { 441 /* pre Alder Lake */ 442 u64 mem_latency; 443 /* Alder Lake and later */ 444 struct { 445 u64 instr_latency:16; 446 u64 pad2:16; 447 u64 cache_latency:16; 448 u64 pad3:16; 449 }; 450 }; 451 u64 tsx_tuning; 452 }; 453 454 struct pebs_gprs { 455 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; 456 u64 r8, r9, r10, r11, r12, r13, r14, r15; 457 }; 458 459 struct pebs_xmm { 460 u64 xmm[16*2]; /* two entries for each register */ 461 }; 462 463 /* 464 * AMD Extended Performance Monitoring and Debug cpuid feature detection 465 */ 466 #define EXT_PERFMON_DEBUG_FEATURES 0x80000022 467 468 /* 469 * IBS cpuid feature detection 470 */ 471 472 #define IBS_CPUID_FEATURES 0x8000001b 473 474 /* 475 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but 476 * bit 0 is used to indicate the existence of IBS. 477 */ 478 #define IBS_CAPS_AVAIL (1U<<0) 479 #define IBS_CAPS_FETCHSAM (1U<<1) 480 #define IBS_CAPS_OPSAM (1U<<2) 481 #define IBS_CAPS_RDWROPCNT (1U<<3) 482 #define IBS_CAPS_OPCNT (1U<<4) 483 #define IBS_CAPS_BRNTRGT (1U<<5) 484 #define IBS_CAPS_OPCNTEXT (1U<<6) 485 #define IBS_CAPS_RIPINVALIDCHK (1U<<7) 486 #define IBS_CAPS_OPBRNFUSE (1U<<8) 487 #define IBS_CAPS_FETCHCTLEXTD (1U<<9) 488 #define IBS_CAPS_OPDATA4 (1U<<10) 489 #define IBS_CAPS_ZEN4 (1U<<11) 490 491 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ 492 | IBS_CAPS_FETCHSAM \ 493 | IBS_CAPS_OPSAM) 494 495 /* 496 * IBS APIC setup 497 */ 498 #define IBSCTL 0x1cc 499 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 500 #define IBSCTL_LVT_OFFSET_MASK 0x0F 501 502 /* IBS fetch bits/masks */ 503 #define IBS_FETCH_L3MISSONLY (1ULL<<59) 504 #define IBS_FETCH_RAND_EN (1ULL<<57) 505 #define IBS_FETCH_VAL (1ULL<<49) 506 #define IBS_FETCH_ENABLE (1ULL<<48) 507 #define IBS_FETCH_CNT 0xFFFF0000ULL 508 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL 509 510 /* 511 * IBS op bits/masks 512 * The lower 7 bits of the current count are random bits 513 * preloaded by hardware and ignored in software 514 */ 515 #define IBS_OP_CUR_CNT (0xFFF80ULL<<32) 516 #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) 517 #define IBS_OP_CNT_CTL (1ULL<<19) 518 #define IBS_OP_VAL (1ULL<<18) 519 #define IBS_OP_ENABLE (1ULL<<17) 520 #define IBS_OP_L3MISSONLY (1ULL<<16) 521 #define IBS_OP_MAX_CNT 0x0000FFFFULL 522 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ 523 #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */ 524 #define IBS_RIP_INVALID (1ULL<<38) 525 526 #ifdef CONFIG_X86_LOCAL_APIC 527 extern u32 get_ibs_caps(void); 528 extern int forward_event_to_ibs(struct perf_event *event); 529 #else 530 static inline u32 get_ibs_caps(void) { return 0; } 531 static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; } 532 #endif 533 534 #ifdef CONFIG_PERF_EVENTS 535 extern void perf_events_lapic_init(void); 536 537 /* 538 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise 539 * unused and ABI specified to be 0, so nobody should care what we do with 540 * them. 541 * 542 * EXACT - the IP points to the exact instruction that triggered the 543 * event (HW bugs exempt). 544 * VM - original X86_VM_MASK; see set_linear_ip(). 545 */ 546 #define PERF_EFLAGS_EXACT (1UL << 3) 547 #define PERF_EFLAGS_VM (1UL << 5) 548 549 struct pt_regs; 550 struct x86_perf_regs { 551 struct pt_regs regs; 552 u64 *xmm_regs; 553 }; 554 555 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); 556 extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); 557 extern unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs); 558 #define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs) 559 #define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs) 560 561 #include <asm/stacktrace.h> 562 563 /* 564 * We abuse bit 3 from flags to pass exact information, see 565 * perf_arch_misc_flags() and the comment with PERF_EFLAGS_EXACT. 566 */ 567 #define perf_arch_fetch_caller_regs(regs, __ip) { \ 568 (regs)->ip = (__ip); \ 569 (regs)->sp = (unsigned long)__builtin_frame_address(0); \ 570 (regs)->cs = __KERNEL_CS; \ 571 regs->flags = 0; \ 572 } 573 574 struct perf_guest_switch_msr { 575 unsigned msr; 576 u64 host, guest; 577 }; 578 579 struct x86_pmu_lbr { 580 unsigned int nr; 581 unsigned int from; 582 unsigned int to; 583 unsigned int info; 584 bool has_callstack; 585 }; 586 587 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); 588 extern u64 perf_get_hw_event_config(int hw_event); 589 extern void perf_check_microcode(void); 590 extern void perf_clear_dirty_counters(void); 591 extern int x86_perf_rdpmc_index(struct perf_event *event); 592 #else 593 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) 594 { 595 memset(cap, 0, sizeof(*cap)); 596 } 597 598 static inline u64 perf_get_hw_event_config(int hw_event) 599 { 600 return 0; 601 } 602 603 static inline void perf_events_lapic_init(void) { } 604 static inline void perf_check_microcode(void) { } 605 #endif 606 607 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) 608 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); 609 extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr); 610 #else 611 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); 612 static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr) 613 { 614 memset(lbr, 0, sizeof(*lbr)); 615 } 616 #endif 617 618 #ifdef CONFIG_CPU_SUP_INTEL 619 extern void intel_pt_handle_vmx(int on); 620 #else 621 static inline void intel_pt_handle_vmx(int on) 622 { 623 624 } 625 #endif 626 627 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 628 extern void amd_pmu_enable_virt(void); 629 extern void amd_pmu_disable_virt(void); 630 631 #if defined(CONFIG_PERF_EVENTS_AMD_BRS) 632 633 #define PERF_NEEDS_LOPWR_CB 1 634 635 /* 636 * architectural low power callback impacts 637 * drivers/acpi/processor_idle.c 638 * drivers/acpi/acpi_pad.c 639 */ 640 extern void perf_amd_brs_lopwr_cb(bool lopwr_in); 641 642 DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb); 643 644 static __always_inline void perf_lopwr_cb(bool lopwr_in) 645 { 646 static_call_mod(perf_lopwr_cb)(lopwr_in); 647 } 648 649 #endif /* PERF_NEEDS_LOPWR_CB */ 650 651 #else 652 static inline void amd_pmu_enable_virt(void) { } 653 static inline void amd_pmu_disable_virt(void) { } 654 #endif 655 656 #define arch_perf_out_copy_user copy_from_user_nmi 657 658 #endif /* _ASM_X86_PERF_EVENT_H */ 659