1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
4
5 #include <linux/nospec.h>
6
7 #include <asm/kvm_host.h>
8
9 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
10 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
11 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
12
13 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \
14 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
15
16 /* retrieve a fixed counter bits out of IA32_FIXED_CTR_CTRL */
17 #define fixed_ctrl_field(ctrl_reg, idx) \
18 (((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
19
20 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
21 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
22 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
23
24 #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
25
26 struct kvm_pmu_ops {
27 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
28 unsigned int idx, u64 *mask);
29 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
30 int (*check_rdpmc_early)(struct kvm_vcpu *vcpu, unsigned int idx);
31 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
32 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
33 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
34 void (*refresh)(struct kvm_vcpu *vcpu);
35 void (*init)(struct kvm_vcpu *vcpu);
36 void (*reset)(struct kvm_vcpu *vcpu);
37 void (*deliver_pmi)(struct kvm_vcpu *vcpu);
38 void (*cleanup)(struct kvm_vcpu *vcpu);
39
40 const u64 EVENTSEL_EVENT;
41 const int MAX_NR_GP_COUNTERS;
42 const int MIN_NR_GP_COUNTERS;
43 };
44
45 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
46
kvm_pmu_has_perf_global_ctrl(struct kvm_pmu * pmu)47 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
48 {
49 /*
50 * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
51 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
52 * greater than zero. However, KVM only exposes and emulates the MSR
53 * to/for the guest if the guest PMU supports at least "Architectural
54 * Performance Monitoring Version 2".
55 *
56 * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
57 */
58 return pmu->version > 1;
59 }
60
61 /*
62 * KVM tracks all counters in 64-bit bitmaps, with general purpose counters
63 * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
64 * is tracked internally via index 32. On Intel, (AMD doesn't support fixed
65 * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
66 * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
67 * amounter of boilerplate needed to iterate over PMCs *and* simplifies common
68 * enabling/disable/reset operations.
69 *
70 * WARNING! This helper is only for lookups that are initiated by KVM, it is
71 * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
72 * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
73 * for RDPMC, not by adding 32 to the fixed counter index).
74 */
kvm_pmc_idx_to_pmc(struct kvm_pmu * pmu,int idx)75 static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
76 {
77 if (idx < pmu->nr_arch_gp_counters)
78 return &pmu->gp_counters[idx];
79
80 idx -= KVM_FIXED_PMC_BASE_IDX;
81 if (idx >= 0 && idx < pmu->nr_arch_fixed_counters)
82 return &pmu->fixed_counters[idx];
83
84 return NULL;
85 }
86
87 #define kvm_for_each_pmc(pmu, pmc, i, bitmap) \
88 for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX) \
89 if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i))) \
90 continue; \
91 else \
92
pmc_bitmask(struct kvm_pmc * pmc)93 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
94 {
95 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
96
97 return pmu->counter_bitmask[pmc->type];
98 }
99
pmc_read_counter(struct kvm_pmc * pmc)100 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
101 {
102 u64 counter, enabled, running;
103
104 counter = pmc->counter + pmc->emulated_counter;
105
106 if (pmc->perf_event && !pmc->is_paused)
107 counter += perf_event_read_value(pmc->perf_event,
108 &enabled, &running);
109 /* FIXME: Scaling needed? */
110 return counter & pmc_bitmask(pmc);
111 }
112
113 void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
114
pmc_is_gp(struct kvm_pmc * pmc)115 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
116 {
117 return pmc->type == KVM_PMC_GP;
118 }
119
pmc_is_fixed(struct kvm_pmc * pmc)120 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
121 {
122 return pmc->type == KVM_PMC_FIXED;
123 }
124
kvm_valid_perf_global_ctrl(struct kvm_pmu * pmu,u64 data)125 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
126 u64 data)
127 {
128 return !(pmu->global_ctrl_rsvd & data);
129 }
130
131 /* returns general purpose PMC with the specified MSR. Note that it can be
132 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
133 * parameter to tell them apart.
134 */
get_gp_pmc(struct kvm_pmu * pmu,u32 msr,u32 base)135 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
136 u32 base)
137 {
138 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
139 u32 index = array_index_nospec(msr - base,
140 pmu->nr_arch_gp_counters);
141
142 return &pmu->gp_counters[index];
143 }
144
145 return NULL;
146 }
147
148 /* returns fixed PMC with the specified MSR */
get_fixed_pmc(struct kvm_pmu * pmu,u32 msr)149 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
150 {
151 int base = MSR_CORE_PERF_FIXED_CTR0;
152
153 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
154 u32 index = array_index_nospec(msr - base,
155 pmu->nr_arch_fixed_counters);
156
157 return &pmu->fixed_counters[index];
158 }
159
160 return NULL;
161 }
162
pmc_is_locally_enabled(struct kvm_pmc * pmc)163 static inline bool pmc_is_locally_enabled(struct kvm_pmc *pmc)
164 {
165 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
166
167 if (pmc_is_fixed(pmc))
168 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
169 pmc->idx - KVM_FIXED_PMC_BASE_IDX) &
170 (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER);
171
172 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
173 }
174
175 extern struct x86_pmu_capability kvm_pmu_cap;
176
177 void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops);
178
179 void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc);
180
kvm_pmu_request_counter_reprogram(struct kvm_pmc * pmc)181 static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
182 {
183 kvm_pmu_recalc_pmc_emulation(pmc_to_pmu(pmc), pmc);
184
185 set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
186 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
187 }
188
reprogram_counters(struct kvm_pmu * pmu,u64 diff)189 static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
190 {
191 int bit;
192
193 if (!diff)
194 return;
195
196 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
197 set_bit(bit, pmu->reprogram_pmi);
198 kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
199 }
200
201 /*
202 * Check if a PMC is enabled by comparing it against global_ctrl bits.
203 *
204 * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
205 */
pmc_is_globally_enabled(struct kvm_pmc * pmc)206 static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
207 {
208 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
209
210 if (!kvm_pmu_has_perf_global_ctrl(pmu))
211 return true;
212
213 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
214 }
215
216 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
217 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
218 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
219 int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx);
220 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
221 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
222 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
223 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
224 void kvm_pmu_init(struct kvm_vcpu *vcpu);
225 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
226 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
227 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
228 void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu);
229 void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu);
230
231 bool is_vmware_backdoor_pmc(u32 pmc_idx);
232
233 extern struct kvm_pmu_ops intel_pmu_ops;
234 extern struct kvm_pmu_ops amd_pmu_ops;
235 #endif /* __KVM_X86_PMU_H */
236