xref: /linux/arch/x86/kvm/pmu.h (revision d6296cb65320be16dbf20f2fd584ddc25f3437cd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
4 
5 #include <linux/nospec.h>
6 
7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
9 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
10 
11 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |	\
12 					  MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
13 
14 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
15 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
16 
17 #define VMWARE_BACKDOOR_PMC_HOST_TSC		0x10000
18 #define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
19 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME	0x10002
20 
21 struct kvm_event_hw_type_mapping {
22 	u8 eventsel;
23 	u8 unit_mask;
24 	unsigned event_type;
25 };
26 
27 struct kvm_pmu_ops {
28 	bool (*hw_event_available)(struct kvm_pmc *pmc);
29 	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
30 	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
31 	struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
32 		unsigned int idx, u64 *mask);
33 	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
34 	bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
35 	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
36 	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
37 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
38 	void (*refresh)(struct kvm_vcpu *vcpu);
39 	void (*init)(struct kvm_vcpu *vcpu);
40 	void (*reset)(struct kvm_vcpu *vcpu);
41 	void (*deliver_pmi)(struct kvm_vcpu *vcpu);
42 	void (*cleanup)(struct kvm_vcpu *vcpu);
43 };
44 
45 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
46 
47 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
48 {
49 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
50 
51 	return pmu->counter_bitmask[pmc->type];
52 }
53 
54 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
55 {
56 	u64 counter, enabled, running;
57 
58 	counter = pmc->counter;
59 	if (pmc->perf_event && !pmc->is_paused)
60 		counter += perf_event_read_value(pmc->perf_event,
61 						 &enabled, &running);
62 	/* FIXME: Scaling needed? */
63 	return counter & pmc_bitmask(pmc);
64 }
65 
66 static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
67 {
68 	if (pmc->perf_event) {
69 		perf_event_release_kernel(pmc->perf_event);
70 		pmc->perf_event = NULL;
71 		pmc->current_config = 0;
72 		pmc_to_pmu(pmc)->event_count--;
73 	}
74 }
75 
76 static inline void pmc_stop_counter(struct kvm_pmc *pmc)
77 {
78 	if (pmc->perf_event) {
79 		pmc->counter = pmc_read_counter(pmc);
80 		pmc_release_perf_event(pmc);
81 	}
82 }
83 
84 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
85 {
86 	return pmc->type == KVM_PMC_GP;
87 }
88 
89 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
90 {
91 	return pmc->type == KVM_PMC_FIXED;
92 }
93 
94 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
95 						 u64 data)
96 {
97 	return !(pmu->global_ctrl_mask & data);
98 }
99 
100 /* returns general purpose PMC with the specified MSR. Note that it can be
101  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
102  * parameter to tell them apart.
103  */
104 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
105 					 u32 base)
106 {
107 	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
108 		u32 index = array_index_nospec(msr - base,
109 					       pmu->nr_arch_gp_counters);
110 
111 		return &pmu->gp_counters[index];
112 	}
113 
114 	return NULL;
115 }
116 
117 /* returns fixed PMC with the specified MSR */
118 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
119 {
120 	int base = MSR_CORE_PERF_FIXED_CTR0;
121 
122 	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
123 		u32 index = array_index_nospec(msr - base,
124 					       pmu->nr_arch_fixed_counters);
125 
126 		return &pmu->fixed_counters[index];
127 	}
128 
129 	return NULL;
130 }
131 
132 static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
133 {
134 	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
135 
136 	if (!sample_period)
137 		sample_period = pmc_bitmask(pmc) + 1;
138 	return sample_period;
139 }
140 
141 static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
142 {
143 	if (!pmc->perf_event || pmc->is_paused ||
144 	    !is_sampling_event(pmc->perf_event))
145 		return;
146 
147 	perf_event_period(pmc->perf_event,
148 			  get_sample_period(pmc, pmc->counter));
149 }
150 
151 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
152 {
153 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
154 
155 	if (pmc_is_fixed(pmc))
156 		return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
157 					pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
158 
159 	return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
160 }
161 
162 extern struct x86_pmu_capability kvm_pmu_cap;
163 
164 static inline void kvm_init_pmu_capability(void)
165 {
166 	bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
167 
168 	/*
169 	 * Hybrid PMUs don't play nice with virtualization without careful
170 	 * configuration by userspace, and KVM's APIs for reporting supported
171 	 * vPMU features do not account for hybrid PMUs.  Disable vPMU support
172 	 * for hybrid PMUs until KVM gains a way to let userspace opt-in.
173 	 */
174 	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
175 		enable_pmu = false;
176 
177 	if (enable_pmu) {
178 		perf_get_x86_pmu_capability(&kvm_pmu_cap);
179 
180 		/*
181 		 * For Intel, only support guest architectural pmu
182 		 * on a host with architectural pmu.
183 		 */
184 		if ((is_intel && !kvm_pmu_cap.version) ||
185 		    !kvm_pmu_cap.num_counters_gp)
186 			enable_pmu = false;
187 	}
188 
189 	if (!enable_pmu) {
190 		memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
191 		return;
192 	}
193 
194 	kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
195 	kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
196 					     KVM_PMC_MAX_FIXED);
197 }
198 
199 static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc)
200 {
201 	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
202 	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
203 }
204 
205 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
206 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
207 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
208 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
209 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
210 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
211 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
212 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
213 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
214 void kvm_pmu_init(struct kvm_vcpu *vcpu);
215 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
216 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
217 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
218 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
219 
220 bool is_vmware_backdoor_pmc(u32 pmc_idx);
221 
222 extern struct kvm_pmu_ops intel_pmu_ops;
223 extern struct kvm_pmu_ops amd_pmu_ops;
224 #endif /* __KVM_X86_PMU_H */
225