1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __KVM_X86_PMU_H 3 #define __KVM_X86_PMU_H 4 5 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) 6 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) 7 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 8 9 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ 10 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) 11 12 struct kvm_event_hw_type_mapping { 13 u8 eventsel; 14 u8 unit_mask; 15 unsigned event_type; 16 }; 17 18 struct kvm_pmu_ops { 19 unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, 20 u8 unit_mask); 21 unsigned (*find_fixed_event)(int idx); 22 bool (*pmc_is_enabled)(struct kvm_pmc *pmc); 23 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); 24 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx); 25 int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); 26 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 27 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 28 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 29 void (*refresh)(struct kvm_vcpu *vcpu); 30 void (*init)(struct kvm_vcpu *vcpu); 31 void (*reset)(struct kvm_vcpu *vcpu); 32 }; 33 34 static inline u64 pmc_bitmask(struct kvm_pmc *pmc) 35 { 36 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 37 38 return pmu->counter_bitmask[pmc->type]; 39 } 40 41 static inline u64 pmc_read_counter(struct kvm_pmc *pmc) 42 { 43 u64 counter, enabled, running; 44 45 counter = pmc->counter; 46 if (pmc->perf_event) 47 counter += perf_event_read_value(pmc->perf_event, 48 &enabled, &running); 49 /* FIXME: Scaling needed? */ 50 return counter & pmc_bitmask(pmc); 51 } 52 53 static inline void pmc_stop_counter(struct kvm_pmc *pmc) 54 { 55 if (pmc->perf_event) { 56 pmc->counter = pmc_read_counter(pmc); 57 perf_event_release_kernel(pmc->perf_event); 58 pmc->perf_event = NULL; 59 } 60 } 61 62 static inline bool pmc_is_gp(struct kvm_pmc *pmc) 63 { 64 return pmc->type == KVM_PMC_GP; 65 } 66 67 static inline bool pmc_is_fixed(struct kvm_pmc *pmc) 68 { 69 return pmc->type == KVM_PMC_FIXED; 70 } 71 72 static inline bool pmc_is_enabled(struct kvm_pmc *pmc) 73 { 74 return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc); 75 } 76 77 /* returns general purpose PMC with the specified MSR. Note that it can be 78 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a 79 * paramenter to tell them apart. 80 */ 81 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, 82 u32 base) 83 { 84 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) 85 return &pmu->gp_counters[msr - base]; 86 87 return NULL; 88 } 89 90 /* returns fixed PMC with the specified MSR */ 91 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) 92 { 93 int base = MSR_CORE_PERF_FIXED_CTR0; 94 95 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) 96 return &pmu->fixed_counters[msr - base]; 97 98 return NULL; 99 } 100 101 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); 102 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); 103 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); 104 105 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); 106 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); 107 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 108 int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx); 109 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); 110 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 111 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 112 void kvm_pmu_refresh(struct kvm_vcpu *vcpu); 113 void kvm_pmu_reset(struct kvm_vcpu *vcpu); 114 void kvm_pmu_init(struct kvm_vcpu *vcpu); 115 void kvm_pmu_destroy(struct kvm_vcpu *vcpu); 116 117 extern struct kvm_pmu_ops intel_pmu_ops; 118 extern struct kvm_pmu_ops amd_pmu_ops; 119 #endif /* __KVM_X86_PMU_H */ 120