1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __KVM_X86_PMU_H 3 #define __KVM_X86_PMU_H 4 5 #include <linux/nospec.h> 6 7 #include <asm/kvm_host.h> 8 9 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) 10 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) 11 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 12 13 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \ 14 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) 15 16 /* retrieve a fixed counter bits out of IA32_FIXED_CTR_CTRL */ 17 #define fixed_ctrl_field(ctrl_reg, idx) \ 18 (((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK) 19 20 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 21 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 22 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 23 24 #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED 25 26 struct kvm_pmu_ops { 27 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, 28 unsigned int idx, u64 *mask); 29 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); 30 int (*check_rdpmc_early)(struct kvm_vcpu *vcpu, unsigned int idx); 31 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 32 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 33 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 34 void (*refresh)(struct kvm_vcpu *vcpu); 35 void (*init)(struct kvm_vcpu *vcpu); 36 void (*reset)(struct kvm_vcpu *vcpu); 37 void (*deliver_pmi)(struct kvm_vcpu *vcpu); 38 void (*cleanup)(struct kvm_vcpu *vcpu); 39 40 bool (*is_mediated_pmu_supported)(struct x86_pmu_capability *host_pmu); 41 void (*mediated_load)(struct kvm_vcpu *vcpu); 42 void (*mediated_put)(struct kvm_vcpu *vcpu); 43 void (*write_global_ctrl)(u64 global_ctrl); 44 45 const u64 EVENTSEL_EVENT; 46 const int MAX_NR_GP_COUNTERS; 47 const int MIN_NR_GP_COUNTERS; 48 49 const u32 PERF_GLOBAL_CTRL; 50 const u32 GP_EVENTSEL_BASE; 51 const u32 GP_COUNTER_BASE; 52 const u32 FIXED_COUNTER_BASE; 53 const u32 MSR_STRIDE; 54 }; 55 56 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); 57 58 void kvm_handle_guest_mediated_pmi(void); 59 60 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu) 61 { 62 /* 63 * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is 64 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is 65 * greater than zero. However, KVM only exposes and emulates the MSR 66 * to/for the guest if the guest PMU supports at least "Architectural 67 * Performance Monitoring Version 2". 68 * 69 * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2. 70 */ 71 return pmu->version > 1; 72 } 73 74 static inline bool kvm_vcpu_has_mediated_pmu(struct kvm_vcpu *vcpu) 75 { 76 return enable_mediated_pmu && vcpu_to_pmu(vcpu)->version; 77 } 78 79 /* 80 * KVM tracks all counters in 64-bit bitmaps, with general purpose counters 81 * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0 82 * is tracked internally via index 32. On Intel, (AMD doesn't support fixed 83 * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL 84 * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the 85 * amounter of boilerplate needed to iterate over PMCs *and* simplifies common 86 * enabling/disable/reset operations. 87 * 88 * WARNING! This helper is only for lookups that are initiated by KVM, it is 89 * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw 90 * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX 91 * for RDPMC, not by adding 32 to the fixed counter index). 92 */ 93 static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx) 94 { 95 if (idx < pmu->nr_arch_gp_counters) 96 return &pmu->gp_counters[idx]; 97 98 idx -= KVM_FIXED_PMC_BASE_IDX; 99 if (idx >= 0 && idx < pmu->nr_arch_fixed_counters) 100 return &pmu->fixed_counters[idx]; 101 102 return NULL; 103 } 104 105 #define kvm_for_each_pmc(pmu, pmc, i, bitmap) \ 106 for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX) \ 107 if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i))) \ 108 continue; \ 109 else \ 110 111 static inline u64 pmc_bitmask(struct kvm_pmc *pmc) 112 { 113 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 114 115 return pmu->counter_bitmask[pmc->type]; 116 } 117 118 static inline u64 pmc_read_counter(struct kvm_pmc *pmc) 119 { 120 u64 counter, enabled, running; 121 122 if (kvm_vcpu_has_mediated_pmu(pmc->vcpu)) 123 return pmc->counter & pmc_bitmask(pmc); 124 125 counter = pmc->counter + pmc->emulated_counter; 126 127 if (pmc->perf_event && !pmc->is_paused) 128 counter += perf_event_read_value(pmc->perf_event, 129 &enabled, &running); 130 /* FIXME: Scaling needed? */ 131 return counter & pmc_bitmask(pmc); 132 } 133 134 void pmc_write_counter(struct kvm_pmc *pmc, u64 val); 135 136 static inline bool pmc_is_gp(struct kvm_pmc *pmc) 137 { 138 return pmc->type == KVM_PMC_GP; 139 } 140 141 static inline bool pmc_is_fixed(struct kvm_pmc *pmc) 142 { 143 return pmc->type == KVM_PMC_FIXED; 144 } 145 146 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, 147 u64 data) 148 { 149 return !(pmu->global_ctrl_rsvd & data); 150 } 151 152 /* returns general purpose PMC with the specified MSR. Note that it can be 153 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a 154 * parameter to tell them apart. 155 */ 156 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, 157 u32 base) 158 { 159 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { 160 u32 index = array_index_nospec(msr - base, 161 pmu->nr_arch_gp_counters); 162 163 return &pmu->gp_counters[index]; 164 } 165 166 return NULL; 167 } 168 169 /* returns fixed PMC with the specified MSR */ 170 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) 171 { 172 int base = MSR_CORE_PERF_FIXED_CTR0; 173 174 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { 175 u32 index = array_index_nospec(msr - base, 176 pmu->nr_arch_fixed_counters); 177 178 return &pmu->fixed_counters[index]; 179 } 180 181 return NULL; 182 } 183 184 static inline bool pmc_is_locally_enabled(struct kvm_pmc *pmc) 185 { 186 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 187 188 if (pmc_is_fixed(pmc)) 189 return fixed_ctrl_field(pmu->fixed_ctr_ctrl, 190 pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 191 (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER); 192 193 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; 194 } 195 196 extern struct x86_pmu_capability kvm_pmu_cap; 197 198 void kvm_init_pmu_capability(struct kvm_pmu_ops *pmu_ops); 199 200 void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc); 201 202 static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc) 203 { 204 kvm_pmu_recalc_pmc_emulation(pmc_to_pmu(pmc), pmc); 205 206 set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); 207 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); 208 } 209 210 static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff) 211 { 212 int bit; 213 214 if (!diff) 215 return; 216 217 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) 218 set_bit(bit, pmu->reprogram_pmi); 219 kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu)); 220 } 221 222 /* 223 * Check if a PMC is enabled by comparing it against global_ctrl bits. 224 * 225 * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled. 226 */ 227 static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc) 228 { 229 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 230 231 if (!kvm_pmu_has_perf_global_ctrl(pmu)) 232 return true; 233 234 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); 235 } 236 237 static inline bool kvm_pmu_is_fastpath_emulation_allowed(struct kvm_vcpu *vcpu) 238 { 239 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 240 241 return !kvm_vcpu_has_mediated_pmu(vcpu) || 242 !bitmap_intersects(pmu->pmc_counting_instructions, 243 (unsigned long *)&pmu->global_ctrl, 244 X86_PMC_IDX_MAX); 245 } 246 247 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); 248 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); 249 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 250 int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx); 251 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); 252 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 253 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 254 void kvm_pmu_refresh(struct kvm_vcpu *vcpu); 255 void kvm_pmu_init(struct kvm_vcpu *vcpu); 256 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); 257 void kvm_pmu_destroy(struct kvm_vcpu *vcpu); 258 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); 259 void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu); 260 void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu); 261 void kvm_mediated_pmu_load(struct kvm_vcpu *vcpu); 262 void kvm_mediated_pmu_put(struct kvm_vcpu *vcpu); 263 264 bool is_vmware_backdoor_pmc(u32 pmc_idx); 265 bool kvm_need_perf_global_ctrl_intercept(struct kvm_vcpu *vcpu); 266 bool kvm_need_rdpmc_intercept(struct kvm_vcpu *vcpu); 267 268 extern struct kvm_pmu_ops intel_pmu_ops; 269 extern struct kvm_pmu_ops amd_pmu_ops; 270 #endif /* __KVM_X86_PMU_H */ 271