1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2023, Tencent, Inc.
4 */
5 #ifndef SELFTEST_KVM_PMU_H
6 #define SELFTEST_KVM_PMU_H
7
8 #include <stdbool.h>
9 #include <stdint.h>
10
11 #include <linux/bits.h>
12
13 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
14
15 /*
16 * Encode an eventsel+umask pair into event-select MSR format. Note, this is
17 * technically AMD's format, as Intel's format only supports 8 bits for the
18 * event selector, i.e. doesn't use bits 24:16 for the selector. But, OR-ing
19 * in '0' is a nop and won't clobber the CMASK.
20 */
21 #define RAW_EVENT(eventsel, umask) (((eventsel & 0xf00UL) << 24) | \
22 ((eventsel) & 0xff) | \
23 ((umask) & 0xff) << 8)
24
25 /*
26 * These are technically Intel's definitions, but except for CMASK (see above),
27 * AMD's layout is compatible with Intel's.
28 */
29 #define ARCH_PERFMON_EVENTSEL_EVENT GENMASK_ULL(7, 0)
30 #define ARCH_PERFMON_EVENTSEL_UMASK GENMASK_ULL(15, 8)
31 #define ARCH_PERFMON_EVENTSEL_USR BIT_ULL(16)
32 #define ARCH_PERFMON_EVENTSEL_OS BIT_ULL(17)
33 #define ARCH_PERFMON_EVENTSEL_EDGE BIT_ULL(18)
34 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL BIT_ULL(19)
35 #define ARCH_PERFMON_EVENTSEL_INT BIT_ULL(20)
36 #define ARCH_PERFMON_EVENTSEL_ANY BIT_ULL(21)
37 #define ARCH_PERFMON_EVENTSEL_ENABLE BIT_ULL(22)
38 #define ARCH_PERFMON_EVENTSEL_INV BIT_ULL(23)
39 #define ARCH_PERFMON_EVENTSEL_CMASK GENMASK_ULL(31, 24)
40
41 /* RDPMC control flags, Intel only. */
42 #define INTEL_RDPMC_METRICS BIT_ULL(29)
43 #define INTEL_RDPMC_FIXED BIT_ULL(30)
44 #define INTEL_RDPMC_FAST BIT_ULL(31)
45
46 /* Fixed PMC controls, Intel only. */
47 #define FIXED_PMC_GLOBAL_CTRL_ENABLE(_idx) BIT_ULL((32 + (_idx)))
48
49 #define FIXED_PMC_KERNEL BIT_ULL(0)
50 #define FIXED_PMC_USER BIT_ULL(1)
51 #define FIXED_PMC_ANYTHREAD BIT_ULL(2)
52 #define FIXED_PMC_ENABLE_PMI BIT_ULL(3)
53 #define FIXED_PMC_NR_BITS 4
54 #define FIXED_PMC_CTRL(_idx, _val) ((_val) << ((_idx) * FIXED_PMC_NR_BITS))
55
56 #define PMU_CAP_FW_WRITES BIT_ULL(13)
57 #define PMU_CAP_LBR_FMT 0x3f
58
59 #define INTEL_ARCH_CPU_CYCLES RAW_EVENT(0x3c, 0x00)
60 #define INTEL_ARCH_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
61 #define INTEL_ARCH_REFERENCE_CYCLES RAW_EVENT(0x3c, 0x01)
62 #define INTEL_ARCH_LLC_REFERENCES RAW_EVENT(0x2e, 0x4f)
63 #define INTEL_ARCH_LLC_MISSES RAW_EVENT(0x2e, 0x41)
64 #define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00)
65 #define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00)
66 #define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01)
67 #define INTEL_ARCH_TOPDOWN_BE_BOUND RAW_EVENT(0xa4, 0x02)
68 #define INTEL_ARCH_TOPDOWN_BAD_SPEC RAW_EVENT(0x73, 0x00)
69 #define INTEL_ARCH_TOPDOWN_FE_BOUND RAW_EVENT(0x9c, 0x01)
70 #define INTEL_ARCH_TOPDOWN_RETIRING RAW_EVENT(0xc2, 0x02)
71 #define INTEL_ARCH_LBR_INSERTS RAW_EVENT(0xe4, 0x01)
72
73 #define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00)
74 #define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
75 #define AMD_ZEN_BRANCHES_RETIRED RAW_EVENT(0xc2, 0x00)
76 #define AMD_ZEN_BRANCHES_MISPREDICTED RAW_EVENT(0xc3, 0x00)
77
78 /*
79 * Note! The order and thus the index of the architectural events matters as
80 * support for each event is enumerated via CPUID using the index of the event.
81 */
82 enum intel_pmu_architectural_events {
83 INTEL_ARCH_CPU_CYCLES_INDEX,
84 INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX,
85 INTEL_ARCH_REFERENCE_CYCLES_INDEX,
86 INTEL_ARCH_LLC_REFERENCES_INDEX,
87 INTEL_ARCH_LLC_MISSES_INDEX,
88 INTEL_ARCH_BRANCHES_RETIRED_INDEX,
89 INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
90 INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
91 INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX,
92 INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX,
93 INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX,
94 INTEL_ARCH_TOPDOWN_RETIRING_INDEX,
95 INTEL_ARCH_LBR_INSERTS_INDEX,
96 NR_INTEL_ARCH_EVENTS,
97 };
98
99 enum amd_pmu_zen_events {
100 AMD_ZEN_CORE_CYCLES_INDEX,
101 AMD_ZEN_INSTRUCTIONS_INDEX,
102 AMD_ZEN_BRANCHES_INDEX,
103 AMD_ZEN_BRANCH_MISSES_INDEX,
104 NR_AMD_ZEN_EVENTS,
105 };
106
107 extern const uint64_t intel_pmu_arch_events[];
108 extern const uint64_t amd_pmu_zen_events[];
109
110 enum pmu_errata {
111 INSTRUCTIONS_RETIRED_OVERCOUNT,
112 BRANCHES_RETIRED_OVERCOUNT,
113 };
114 extern uint64_t pmu_errata_mask;
115
116 void kvm_init_pmu_errata(void);
117
this_pmu_has_errata(enum pmu_errata errata)118 static inline bool this_pmu_has_errata(enum pmu_errata errata)
119 {
120 return pmu_errata_mask & BIT_ULL(errata);
121 }
122
123 #endif /* SELFTEST_KVM_PMU_H */
124