1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __KVM_X86_PMU_H 3 #define __KVM_X86_PMU_H 4 5 #include <linux/nospec.h> 6 7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) 8 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) 9 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 10 11 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \ 12 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) 13 14 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ 15 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) 16 17 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 18 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 19 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 20 21 struct kvm_pmu_ops { 22 bool (*hw_event_available)(struct kvm_pmc *pmc); 23 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); 24 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, 25 unsigned int idx, u64 *mask); 26 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); 27 bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); 28 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 29 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 30 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 31 void (*refresh)(struct kvm_vcpu *vcpu); 32 void (*init)(struct kvm_vcpu *vcpu); 33 void (*reset)(struct kvm_vcpu *vcpu); 34 void (*deliver_pmi)(struct kvm_vcpu *vcpu); 35 void (*cleanup)(struct kvm_vcpu *vcpu); 36 37 const u64 EVENTSEL_EVENT; 38 const int MAX_NR_GP_COUNTERS; 39 const int MIN_NR_GP_COUNTERS; 40 }; 41 42 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); 43 44 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu) 45 { 46 /* 47 * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is 48 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is 49 * greater than zero. However, KVM only exposes and emulates the MSR 50 * to/for the guest if the guest PMU supports at least "Architectural 51 * Performance Monitoring Version 2". 52 * 53 * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2. 54 */ 55 return pmu->version > 1; 56 } 57 58 static inline u64 pmc_bitmask(struct kvm_pmc *pmc) 59 { 60 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 61 62 return pmu->counter_bitmask[pmc->type]; 63 } 64 65 static inline u64 pmc_read_counter(struct kvm_pmc *pmc) 66 { 67 u64 counter, enabled, running; 68 69 counter = pmc->counter + pmc->emulated_counter; 70 71 if (pmc->perf_event && !pmc->is_paused) 72 counter += perf_event_read_value(pmc->perf_event, 73 &enabled, &running); 74 /* FIXME: Scaling needed? */ 75 return counter & pmc_bitmask(pmc); 76 } 77 78 void pmc_write_counter(struct kvm_pmc *pmc, u64 val); 79 80 static inline bool pmc_is_gp(struct kvm_pmc *pmc) 81 { 82 return pmc->type == KVM_PMC_GP; 83 } 84 85 static inline bool pmc_is_fixed(struct kvm_pmc *pmc) 86 { 87 return pmc->type == KVM_PMC_FIXED; 88 } 89 90 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, 91 u64 data) 92 { 93 return !(pmu->global_ctrl_mask & data); 94 } 95 96 /* returns general purpose PMC with the specified MSR. Note that it can be 97 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a 98 * parameter to tell them apart. 99 */ 100 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, 101 u32 base) 102 { 103 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { 104 u32 index = array_index_nospec(msr - base, 105 pmu->nr_arch_gp_counters); 106 107 return &pmu->gp_counters[index]; 108 } 109 110 return NULL; 111 } 112 113 /* returns fixed PMC with the specified MSR */ 114 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) 115 { 116 int base = MSR_CORE_PERF_FIXED_CTR0; 117 118 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { 119 u32 index = array_index_nospec(msr - base, 120 pmu->nr_arch_fixed_counters); 121 122 return &pmu->fixed_counters[index]; 123 } 124 125 return NULL; 126 } 127 128 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) 129 { 130 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 131 132 if (pmc_is_fixed(pmc)) 133 return fixed_ctrl_field(pmu->fixed_ctr_ctrl, 134 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; 135 136 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; 137 } 138 139 extern struct x86_pmu_capability kvm_pmu_cap; 140 141 static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops) 142 { 143 bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL; 144 int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS; 145 146 /* 147 * Hybrid PMUs don't play nice with virtualization without careful 148 * configuration by userspace, and KVM's APIs for reporting supported 149 * vPMU features do not account for hybrid PMUs. Disable vPMU support 150 * for hybrid PMUs until KVM gains a way to let userspace opt-in. 151 */ 152 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) 153 enable_pmu = false; 154 155 if (enable_pmu) { 156 perf_get_x86_pmu_capability(&kvm_pmu_cap); 157 158 /* 159 * WARN if perf did NOT disable hardware PMU if the number of 160 * architecturally required GP counters aren't present, i.e. if 161 * there are a non-zero number of counters, but fewer than what 162 * is architecturally required. 163 */ 164 if (!kvm_pmu_cap.num_counters_gp || 165 WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs)) 166 enable_pmu = false; 167 else if (is_intel && !kvm_pmu_cap.version) 168 enable_pmu = false; 169 } 170 171 if (!enable_pmu) { 172 memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap)); 173 return; 174 } 175 176 kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2); 177 kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp, 178 pmu_ops->MAX_NR_GP_COUNTERS); 179 kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed, 180 KVM_PMC_MAX_FIXED); 181 } 182 183 static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc) 184 { 185 set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); 186 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); 187 } 188 189 static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff) 190 { 191 int bit; 192 193 if (!diff) 194 return; 195 196 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) 197 set_bit(bit, pmu->reprogram_pmi); 198 kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu)); 199 } 200 201 /* 202 * Check if a PMC is enabled by comparing it against global_ctrl bits. 203 * 204 * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled. 205 */ 206 static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc) 207 { 208 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 209 210 if (!kvm_pmu_has_perf_global_ctrl(pmu)) 211 return true; 212 213 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); 214 } 215 216 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); 217 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); 218 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 219 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); 220 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); 221 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 222 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 223 void kvm_pmu_refresh(struct kvm_vcpu *vcpu); 224 void kvm_pmu_init(struct kvm_vcpu *vcpu); 225 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); 226 void kvm_pmu_destroy(struct kvm_vcpu *vcpu); 227 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); 228 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id); 229 230 bool is_vmware_backdoor_pmc(u32 pmc_idx); 231 232 extern struct kvm_pmu_ops intel_pmu_ops; 233 extern struct kvm_pmu_ops amd_pmu_ops; 234 #endif /* __KVM_X86_PMU_H */ 235