1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
4
5 #include <linux/static_call.h>
6
7 /*
8 * Performance event hw details:
9 */
10
11 #define INTEL_PMC_MAX_GENERIC 32
12 #define INTEL_PMC_MAX_FIXED 16
13 #define INTEL_PMC_IDX_FIXED 32
14
15 #define X86_PMC_IDX_MAX 64
16
17 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
18 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
19
20 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
21 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
22
23 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
24 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
25 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
26 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
27 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
28 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
29 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
30 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
31 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
32 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
33 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
34 #define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35)
35 #define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36)
36 #define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40)
37
38 #define INTEL_FIXED_BITS_MASK 0xFULL
39 #define INTEL_FIXED_BITS_STRIDE 4
40 #define INTEL_FIXED_0_KERNEL (1ULL << 0)
41 #define INTEL_FIXED_0_USER (1ULL << 1)
42 #define INTEL_FIXED_0_ANYTHREAD (1ULL << 2)
43 #define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3)
44 #define INTEL_FIXED_3_METRICS_CLEAR (1ULL << 2)
45
46 #define HSW_IN_TX (1ULL << 32)
47 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
48 #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
49 #define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
50
51 #define intel_fixed_bits_by_idx(_idx, _bits) \
52 ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
53
54 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
55 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
56 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
57
58 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
59 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
60 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
61
62 #define AMD64_EVENTSEL_EVENT \
63 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
64 #define INTEL_ARCH_EVENT_MASK \
65 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
66
67 #define AMD64_L3_SLICE_SHIFT 48
68 #define AMD64_L3_SLICE_MASK \
69 (0xFULL << AMD64_L3_SLICE_SHIFT)
70 #define AMD64_L3_SLICEID_MASK \
71 (0x7ULL << AMD64_L3_SLICE_SHIFT)
72
73 #define AMD64_L3_THREAD_SHIFT 56
74 #define AMD64_L3_THREAD_MASK \
75 (0xFFULL << AMD64_L3_THREAD_SHIFT)
76 #define AMD64_L3_F19H_THREAD_MASK \
77 (0x3ULL << AMD64_L3_THREAD_SHIFT)
78
79 #define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
80 #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
81
82 #define AMD64_L3_COREID_SHIFT 42
83 #define AMD64_L3_COREID_MASK \
84 (0x7ULL << AMD64_L3_COREID_SHIFT)
85
86 #define X86_RAW_EVENT_MASK \
87 (ARCH_PERFMON_EVENTSEL_EVENT | \
88 ARCH_PERFMON_EVENTSEL_UMASK | \
89 ARCH_PERFMON_EVENTSEL_EDGE | \
90 ARCH_PERFMON_EVENTSEL_INV | \
91 ARCH_PERFMON_EVENTSEL_CMASK)
92 #define X86_ALL_EVENT_FLAGS \
93 (ARCH_PERFMON_EVENTSEL_EDGE | \
94 ARCH_PERFMON_EVENTSEL_INV | \
95 ARCH_PERFMON_EVENTSEL_CMASK | \
96 ARCH_PERFMON_EVENTSEL_ANY | \
97 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
98 HSW_IN_TX | \
99 HSW_IN_TX_CHECKPOINTED)
100 #define AMD64_RAW_EVENT_MASK \
101 (X86_RAW_EVENT_MASK | \
102 AMD64_EVENTSEL_EVENT)
103 #define AMD64_RAW_EVENT_MASK_NB \
104 (AMD64_EVENTSEL_EVENT | \
105 ARCH_PERFMON_EVENTSEL_UMASK)
106
107 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \
108 (AMD64_EVENTSEL_EVENT | \
109 GENMASK_ULL(37, 36))
110
111 #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \
112 (ARCH_PERFMON_EVENTSEL_UMASK | \
113 GENMASK_ULL(27, 24))
114
115 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \
116 (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \
117 AMD64_PERFMON_V2_EVENTSEL_UMASK_NB)
118
119 #define AMD64_PERFMON_V2_ENABLE_UMC BIT_ULL(31)
120 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC GENMASK_ULL(7, 0)
121 #define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC GENMASK_ULL(9, 8)
122 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC \
123 (AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC | \
124 AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC)
125
126 #define AMD64_NUM_COUNTERS 4
127 #define AMD64_NUM_COUNTERS_CORE 6
128 #define AMD64_NUM_COUNTERS_NB 4
129
130 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
131 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
132 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
133 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
134 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
135
136 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
137 #define ARCH_PERFMON_EVENTS_COUNT 7
138
139 #define PEBS_DATACFG_MEMINFO BIT_ULL(0)
140 #define PEBS_DATACFG_GP BIT_ULL(1)
141 #define PEBS_DATACFG_XMMS BIT_ULL(2)
142 #define PEBS_DATACFG_LBRS BIT_ULL(3)
143 #define PEBS_DATACFG_LBR_SHIFT 24
144 #define PEBS_DATACFG_CNTR BIT_ULL(4)
145 #define PEBS_DATACFG_CNTR_SHIFT 32
146 #define PEBS_DATACFG_CNTR_MASK GENMASK_ULL(15, 0)
147 #define PEBS_DATACFG_FIX_SHIFT 48
148 #define PEBS_DATACFG_FIX_MASK GENMASK_ULL(7, 0)
149 #define PEBS_DATACFG_METRICS BIT_ULL(5)
150
151 /* Steal the highest bit of pebs_data_cfg for SW usage */
152 #define PEBS_UPDATE_DS_SW BIT_ULL(63)
153
154 /*
155 * Intel "Architectural Performance Monitoring" CPUID
156 * detection/enumeration details:
157 */
158 union cpuid10_eax {
159 struct {
160 unsigned int version_id:8;
161 unsigned int num_counters:8;
162 unsigned int bit_width:8;
163 unsigned int mask_length:8;
164 } split;
165 unsigned int full;
166 };
167
168 union cpuid10_ebx {
169 struct {
170 unsigned int no_unhalted_core_cycles:1;
171 unsigned int no_instructions_retired:1;
172 unsigned int no_unhalted_reference_cycles:1;
173 unsigned int no_llc_reference:1;
174 unsigned int no_llc_misses:1;
175 unsigned int no_branch_instruction_retired:1;
176 unsigned int no_branch_misses_retired:1;
177 } split;
178 unsigned int full;
179 };
180
181 union cpuid10_edx {
182 struct {
183 unsigned int num_counters_fixed:5;
184 unsigned int bit_width_fixed:8;
185 unsigned int reserved1:2;
186 unsigned int anythread_deprecated:1;
187 unsigned int reserved2:16;
188 } split;
189 unsigned int full;
190 };
191
192 /*
193 * Intel "Architectural Performance Monitoring extension" CPUID
194 * detection/enumeration details:
195 */
196 #define ARCH_PERFMON_EXT_LEAF 0x00000023
197 #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1
198
199 union cpuid35_eax {
200 struct {
201 unsigned int leaf0:1;
202 /* Counters Sub-Leaf */
203 unsigned int cntr_subleaf:1;
204 /* Auto Counter Reload Sub-Leaf */
205 unsigned int acr_subleaf:1;
206 /* Events Sub-Leaf */
207 unsigned int events_subleaf:1;
208 unsigned int reserved:28;
209 } split;
210 unsigned int full;
211 };
212
213 union cpuid35_ebx {
214 struct {
215 /* UnitMask2 Supported */
216 unsigned int umask2:1;
217 /* EQ-bit Supported */
218 unsigned int eq:1;
219 unsigned int reserved:30;
220 } split;
221 unsigned int full;
222 };
223
224 /*
225 * Intel Architectural LBR CPUID detection/enumeration details:
226 */
227 union cpuid28_eax {
228 struct {
229 /* Supported LBR depth values */
230 unsigned int lbr_depth_mask:8;
231 unsigned int reserved:22;
232 /* Deep C-state Reset */
233 unsigned int lbr_deep_c_reset:1;
234 /* IP values contain LIP */
235 unsigned int lbr_lip:1;
236 } split;
237 unsigned int full;
238 };
239
240 union cpuid28_ebx {
241 struct {
242 /* CPL Filtering Supported */
243 unsigned int lbr_cpl:1;
244 /* Branch Filtering Supported */
245 unsigned int lbr_filter:1;
246 /* Call-stack Mode Supported */
247 unsigned int lbr_call_stack:1;
248 } split;
249 unsigned int full;
250 };
251
252 union cpuid28_ecx {
253 struct {
254 /* Mispredict Bit Supported */
255 unsigned int lbr_mispred:1;
256 /* Timed LBRs Supported */
257 unsigned int lbr_timed_lbr:1;
258 /* Branch Type Field Supported */
259 unsigned int lbr_br_type:1;
260 unsigned int reserved:13;
261 /* Branch counters (Event Logging) Supported */
262 unsigned int lbr_counters:4;
263 } split;
264 unsigned int full;
265 };
266
267 /*
268 * AMD "Extended Performance Monitoring and Debug" CPUID
269 * detection/enumeration details:
270 */
271 union cpuid_0x80000022_ebx {
272 struct {
273 /* Number of Core Performance Counters */
274 unsigned int num_core_pmc:4;
275 /* Number of available LBR Stack Entries */
276 unsigned int lbr_v2_stack_sz:6;
277 /* Number of Data Fabric Counters */
278 unsigned int num_df_pmc:6;
279 /* Number of Unified Memory Controller Counters */
280 unsigned int num_umc_pmc:6;
281 } split;
282 unsigned int full;
283 };
284
285 struct x86_pmu_capability {
286 int version;
287 int num_counters_gp;
288 int num_counters_fixed;
289 int bit_width_gp;
290 int bit_width_fixed;
291 unsigned int events_mask;
292 int events_mask_len;
293 unsigned int pebs_ept :1;
294 };
295
296 /*
297 * Fixed-purpose performance events:
298 */
299
300 /* RDPMC offset for Fixed PMCs */
301 #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30)
302 #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29)
303
304 /*
305 * All the fixed-mode PMCs are configured via this single MSR:
306 */
307 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
308
309 /*
310 * There is no event-code assigned to the fixed-mode PMCs.
311 *
312 * For a fixed-mode PMC, which has an equivalent event on a general-purpose
313 * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
314 * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
315 *
316 * For a fixed-mode PMC, which doesn't have an equivalent event, a
317 * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
318 * The pseudo event-code for a fixed-mode PMC must be 0x00.
319 * The pseudo umask-code is 0xX. The X equals the index of the fixed
320 * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
321 *
322 * The counts are available in separate MSRs:
323 */
324
325 /* Instr_Retired.Any: */
326 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
327 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
328
329 /* CPU_CLK_Unhalted.Core: */
330 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
331 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
332
333 /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
334 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
335 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
336 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
337
338 /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
339 #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c
340 #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
341 #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
342
343 /* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */
344 /* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */
345 /* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */
346
use_fixed_pseudo_encoding(u64 code)347 static inline bool use_fixed_pseudo_encoding(u64 code)
348 {
349 return !(code & 0xff);
350 }
351
352 /*
353 * We model BTS tracing as another fixed-mode PMC.
354 *
355 * We choose the value 47 for the fixed index of BTS, since lower
356 * values are used by actual fixed events and higher values are used
357 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
358 */
359 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15)
360
361 /*
362 * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
363 * each TopDown metric event.
364 *
365 * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
366 */
367 #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16)
368 #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0)
369 #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1)
370 #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2)
371 #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3)
372 #define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4)
373 #define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5)
374 #define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6)
375 #define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7)
376 #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND
377 #define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \
378 INTEL_PMC_MSK_FIXED_SLOTS)
379
380 /*
381 * There is no event-code assigned to the TopDown events.
382 *
383 * For the slots event, use the pseudo code of the fixed counter 3.
384 *
385 * For the metric events, the pseudo event-code is 0x00.
386 * The pseudo umask-code starts from the middle of the pseudo event
387 * space, 0x80.
388 */
389 #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */
390 /* Level 1 metrics */
391 #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */
392 #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */
393 #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */
394 #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */
395 /* Level 2 metrics */
396 #define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */
397 #define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */
398 #define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */
399 #define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */
400
401 #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND
402 #define INTEL_TD_METRIC_NUM 8
403
404 #define INTEL_TD_CFG_METRIC_CLEAR_BIT 0
405 #define INTEL_TD_CFG_METRIC_CLEAR BIT_ULL(INTEL_TD_CFG_METRIC_CLEAR_BIT)
406
is_metric_idx(int idx)407 static inline bool is_metric_idx(int idx)
408 {
409 return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
410 }
411
is_topdown_idx(int idx)412 static inline bool is_topdown_idx(int idx)
413 {
414 return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
415 }
416
417 #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \
418 (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
419
420 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
421 #define GLOBAL_STATUS_BUFFER_OVF_BIT 62
422 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
423 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
424 #define GLOBAL_STATUS_ASIF BIT_ULL(60)
425 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
426 #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
427 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
428 #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55
429 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
430 #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48
431
432 #define GLOBAL_CTRL_EN_PERF_METRICS 48
433 /*
434 * We model guest LBR event tracing as another fixed-mode PMC like BTS.
435 *
436 * We choose bit 58 because it's used to indicate LBR stack frozen state
437 * for architectural perfmon v4, also we unconditionally mask that bit in
438 * the handle_pmi_common(), so it'll never be set in the overflow handling.
439 *
440 * With this fake counter assigned, the guest LBR event user (such as KVM),
441 * can program the LBR registers on its own, and we don't actually do anything
442 * with then in the host context.
443 */
444 #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT)
445
446 /*
447 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
448 * since it would claim bit 58 which is effectively Fixed26.
449 */
450 #define INTEL_FIXED_VLBR_EVENT 0x1b00
451
452 /*
453 * Adaptive PEBS v4
454 */
455
456 struct pebs_basic {
457 u64 format_group:32,
458 retire_latency:16,
459 format_size:16;
460 u64 ip;
461 u64 applicable_counters;
462 u64 tsc;
463 };
464
465 struct pebs_meminfo {
466 u64 address;
467 u64 aux;
468 union {
469 /* pre Alder Lake */
470 u64 mem_latency;
471 /* Alder Lake and later */
472 struct {
473 u64 instr_latency:16;
474 u64 pad2:16;
475 u64 cache_latency:16;
476 u64 pad3:16;
477 };
478 };
479 u64 tsx_tuning;
480 };
481
482 struct pebs_gprs {
483 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
484 u64 r8, r9, r10, r11, r12, r13, r14, r15;
485 };
486
487 struct pebs_xmm {
488 u64 xmm[16*2]; /* two entries for each register */
489 };
490
491 struct pebs_cntr_header {
492 u32 cntr;
493 u32 fixed;
494 u32 metrics;
495 u32 reserved;
496 };
497
498 #define INTEL_CNTR_METRICS 0x3
499
500 /*
501 * AMD Extended Performance Monitoring and Debug cpuid feature detection
502 */
503 #define EXT_PERFMON_DEBUG_FEATURES 0x80000022
504
505 /*
506 * IBS cpuid feature detection
507 */
508
509 #define IBS_CPUID_FEATURES 0x8000001b
510
511 /*
512 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
513 * bit 0 is used to indicate the existence of IBS.
514 */
515 #define IBS_CAPS_AVAIL (1U<<0)
516 #define IBS_CAPS_FETCHSAM (1U<<1)
517 #define IBS_CAPS_OPSAM (1U<<2)
518 #define IBS_CAPS_RDWROPCNT (1U<<3)
519 #define IBS_CAPS_OPCNT (1U<<4)
520 #define IBS_CAPS_BRNTRGT (1U<<5)
521 #define IBS_CAPS_OPCNTEXT (1U<<6)
522 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
523 #define IBS_CAPS_OPBRNFUSE (1U<<8)
524 #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
525 #define IBS_CAPS_OPDATA4 (1U<<10)
526 #define IBS_CAPS_ZEN4 (1U<<11)
527 #define IBS_CAPS_OPLDLAT (1U<<12)
528 #define IBS_CAPS_OPDTLBPGSIZE (1U<<19)
529
530 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
531 | IBS_CAPS_FETCHSAM \
532 | IBS_CAPS_OPSAM)
533
534 /*
535 * IBS APIC setup
536 */
537 #define IBSCTL 0x1cc
538 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
539 #define IBSCTL_LVT_OFFSET_MASK 0x0F
540
541 /* IBS fetch bits/masks */
542 #define IBS_FETCH_L3MISSONLY (1ULL<<59)
543 #define IBS_FETCH_RAND_EN (1ULL<<57)
544 #define IBS_FETCH_VAL (1ULL<<49)
545 #define IBS_FETCH_ENABLE (1ULL<<48)
546 #define IBS_FETCH_CNT 0xFFFF0000ULL
547 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
548
549 /*
550 * IBS op bits/masks
551 * The lower 7 bits of the current count are random bits
552 * preloaded by hardware and ignored in software
553 */
554 #define IBS_OP_LDLAT_EN (1ULL<<63)
555 #define IBS_OP_LDLAT_THRSH (0xFULL<<59)
556 #define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
557 #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
558 #define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52)
559 #define IBS_OP_CNT_CTL (1ULL<<19)
560 #define IBS_OP_VAL (1ULL<<18)
561 #define IBS_OP_ENABLE (1ULL<<17)
562 #define IBS_OP_L3MISSONLY (1ULL<<16)
563 #define IBS_OP_MAX_CNT 0x0000FFFFULL
564 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
565 #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */
566 #define IBS_RIP_INVALID (1ULL<<38)
567
568 #ifdef CONFIG_X86_LOCAL_APIC
569 extern u32 get_ibs_caps(void);
570 extern int forward_event_to_ibs(struct perf_event *event);
571 #else
get_ibs_caps(void)572 static inline u32 get_ibs_caps(void) { return 0; }
forward_event_to_ibs(struct perf_event * event)573 static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
574 #endif
575
576 #ifdef CONFIG_PERF_EVENTS
577 extern void perf_events_lapic_init(void);
578
579 /*
580 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
581 * unused and ABI specified to be 0, so nobody should care what we do with
582 * them.
583 *
584 * EXACT - the IP points to the exact instruction that triggered the
585 * event (HW bugs exempt).
586 * VM - original X86_VM_MASK; see set_linear_ip().
587 */
588 #define PERF_EFLAGS_EXACT (1UL << 3)
589 #define PERF_EFLAGS_VM (1UL << 5)
590
591 struct pt_regs;
592 struct x86_perf_regs {
593 struct pt_regs regs;
594 u64 *xmm_regs;
595 };
596
597 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs);
598 extern unsigned long perf_arch_misc_flags(struct pt_regs *regs);
599 extern unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs);
600 #define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs)
601 #define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs)
602
603 #include <asm/stacktrace.h>
604
605 /*
606 * We abuse bit 3 from flags to pass exact information, see
607 * perf_arch_misc_flags() and the comment with PERF_EFLAGS_EXACT.
608 */
609 #define perf_arch_fetch_caller_regs(regs, __ip) { \
610 (regs)->ip = (__ip); \
611 (regs)->sp = (unsigned long)__builtin_frame_address(0); \
612 (regs)->cs = __KERNEL_CS; \
613 regs->flags = 0; \
614 }
615
616 struct perf_guest_switch_msr {
617 unsigned msr;
618 u64 host, guest;
619 };
620
621 struct x86_pmu_lbr {
622 unsigned int nr;
623 unsigned int from;
624 unsigned int to;
625 unsigned int info;
626 bool has_callstack;
627 };
628
629 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
630 extern u64 perf_get_hw_event_config(int hw_event);
631 extern void perf_check_microcode(void);
632 extern void perf_clear_dirty_counters(void);
633 extern int x86_perf_rdpmc_index(struct perf_event *event);
634 #else
perf_get_x86_pmu_capability(struct x86_pmu_capability * cap)635 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
636 {
637 memset(cap, 0, sizeof(*cap));
638 }
639
perf_get_hw_event_config(int hw_event)640 static inline u64 perf_get_hw_event_config(int hw_event)
641 {
642 return 0;
643 }
644
perf_events_lapic_init(void)645 static inline void perf_events_lapic_init(void) { }
perf_check_microcode(void)646 static inline void perf_check_microcode(void) { }
647 #endif
648
649 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
650 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
651 extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
652 #else
653 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
x86_perf_get_lbr(struct x86_pmu_lbr * lbr)654 static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
655 {
656 memset(lbr, 0, sizeof(*lbr));
657 }
658 #endif
659
660 #ifdef CONFIG_CPU_SUP_INTEL
661 extern void intel_pt_handle_vmx(int on);
662 #else
intel_pt_handle_vmx(int on)663 static inline void intel_pt_handle_vmx(int on)
664 {
665
666 }
667 #endif
668
669 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
670 extern void amd_pmu_enable_virt(void);
671 extern void amd_pmu_disable_virt(void);
672
673 #if defined(CONFIG_PERF_EVENTS_AMD_BRS)
674
675 #define PERF_NEEDS_LOPWR_CB 1
676
677 /*
678 * architectural low power callback impacts
679 * drivers/acpi/processor_idle.c
680 * drivers/acpi/acpi_pad.c
681 */
682 extern void perf_amd_brs_lopwr_cb(bool lopwr_in);
683
684 DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
685
perf_lopwr_cb(bool lopwr_in)686 static __always_inline void perf_lopwr_cb(bool lopwr_in)
687 {
688 static_call_mod(perf_lopwr_cb)(lopwr_in);
689 }
690
691 #endif /* PERF_NEEDS_LOPWR_CB */
692
693 #else
amd_pmu_enable_virt(void)694 static inline void amd_pmu_enable_virt(void) { }
amd_pmu_disable_virt(void)695 static inline void amd_pmu_disable_virt(void) { }
696 #endif
697
698 #define arch_perf_out_copy_user copy_from_user_nmi
699
700 #endif /* _ASM_X86_PERF_EVENT_H */
701