1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * KVM PMU support for Intel CPUs 4 * 5 * Copyright 2011 Red Hat, Inc. and/or its affiliates. 6 * 7 * Authors: 8 * Avi Kivity <avi@redhat.com> 9 * Gleb Natapov <gleb@redhat.com> 10 */ 11 #include <linux/types.h> 12 #include <linux/kvm_host.h> 13 #include <linux/perf_event.h> 14 #include <asm/perf_event.h> 15 #include "x86.h" 16 #include "cpuid.h" 17 #include "lapic.h" 18 #include "nested.h" 19 #include "pmu.h" 20 21 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0) 22 23 static struct kvm_event_hw_type_mapping intel_arch_events[] = { 24 /* Index must match CPUID 0x0A.EBX bit vector */ 25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES }, 26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, 27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES }, 28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES }, 29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, 30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, 32 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, 33 }; 34 35 /* mapping between fixed pmc index and intel_arch_events array */ 36 static int fixed_pmc_events[] = {1, 0, 7}; 37 38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) 39 { 40 int i; 41 42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { 43 u8 new_ctrl = fixed_ctrl_field(data, i); 44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i); 45 struct kvm_pmc *pmc; 46 47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); 48 49 if (old_ctrl == new_ctrl) 50 continue; 51 52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); 53 reprogram_fixed_counter(pmc, new_ctrl, i); 54 } 55 56 pmu->fixed_ctr_ctrl = data; 57 } 58 59 /* function is called when global control register has been updated. */ 60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) 61 { 62 int bit; 63 u64 diff = pmu->global_ctrl ^ data; 64 65 pmu->global_ctrl = data; 66 67 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) 68 reprogram_counter(pmu, bit); 69 } 70 71 static unsigned intel_find_arch_event(struct kvm_pmu *pmu, 72 u8 event_select, 73 u8 unit_mask) 74 { 75 int i; 76 77 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) 78 if (intel_arch_events[i].eventsel == event_select 79 && intel_arch_events[i].unit_mask == unit_mask 80 && (pmu->available_event_types & (1 << i))) 81 break; 82 83 if (i == ARRAY_SIZE(intel_arch_events)) 84 return PERF_COUNT_HW_MAX; 85 86 return intel_arch_events[i].event_type; 87 } 88 89 static unsigned intel_find_fixed_event(int idx) 90 { 91 u32 event; 92 size_t size = ARRAY_SIZE(fixed_pmc_events); 93 94 if (idx >= size) 95 return PERF_COUNT_HW_MAX; 96 97 event = fixed_pmc_events[array_index_nospec(idx, size)]; 98 return intel_arch_events[event].event_type; 99 } 100 101 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */ 102 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) 103 { 104 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 105 106 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); 107 } 108 109 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) 110 { 111 if (pmc_idx < INTEL_PMC_IDX_FIXED) 112 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, 113 MSR_P6_EVNTSEL0); 114 else { 115 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED; 116 117 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); 118 } 119 } 120 121 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */ 122 static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) 123 { 124 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 125 bool fixed = idx & (1u << 30); 126 127 idx &= ~(3u << 30); 128 129 return (!fixed && idx >= pmu->nr_arch_gp_counters) || 130 (fixed && idx >= pmu->nr_arch_fixed_counters); 131 } 132 133 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, 134 unsigned int idx, u64 *mask) 135 { 136 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 137 bool fixed = idx & (1u << 30); 138 struct kvm_pmc *counters; 139 unsigned int num_counters; 140 141 idx &= ~(3u << 30); 142 if (fixed) { 143 counters = pmu->fixed_counters; 144 num_counters = pmu->nr_arch_fixed_counters; 145 } else { 146 counters = pmu->gp_counters; 147 num_counters = pmu->nr_arch_gp_counters; 148 } 149 if (idx >= num_counters) 150 return NULL; 151 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; 152 return &counters[array_index_nospec(idx, num_counters)]; 153 } 154 155 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) 156 { 157 if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 158 return false; 159 160 return vcpu->arch.perf_capabilities & PMU_CAP_FW_WRITES; 161 } 162 163 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) 164 { 165 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu))) 166 return NULL; 167 168 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); 169 } 170 171 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) 172 { 173 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 174 int ret; 175 176 switch (msr) { 177 case MSR_CORE_PERF_FIXED_CTR_CTRL: 178 case MSR_CORE_PERF_GLOBAL_STATUS: 179 case MSR_CORE_PERF_GLOBAL_CTRL: 180 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 181 ret = pmu->version > 1; 182 break; 183 case MSR_IA32_PERF_CAPABILITIES: 184 ret = 1; 185 break; 186 default: 187 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || 188 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || 189 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr); 190 break; 191 } 192 193 return ret; 194 } 195 196 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) 197 { 198 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 199 struct kvm_pmc *pmc; 200 201 pmc = get_fixed_pmc(pmu, msr); 202 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); 203 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); 204 205 return pmc; 206 } 207 208 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 209 { 210 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 211 struct kvm_pmc *pmc; 212 u32 msr = msr_info->index; 213 214 switch (msr) { 215 case MSR_CORE_PERF_FIXED_CTR_CTRL: 216 msr_info->data = pmu->fixed_ctr_ctrl; 217 return 0; 218 case MSR_CORE_PERF_GLOBAL_STATUS: 219 msr_info->data = pmu->global_status; 220 return 0; 221 case MSR_CORE_PERF_GLOBAL_CTRL: 222 msr_info->data = pmu->global_ctrl; 223 return 0; 224 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 225 msr_info->data = pmu->global_ovf_ctrl; 226 return 0; 227 case MSR_IA32_PERF_CAPABILITIES: 228 if (!msr_info->host_initiated && 229 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 230 return 1; 231 msr_info->data = vcpu->arch.perf_capabilities; 232 return 0; 233 default: 234 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || 235 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { 236 u64 val = pmc_read_counter(pmc); 237 msr_info->data = 238 val & pmu->counter_bitmask[KVM_PMC_GP]; 239 return 0; 240 } else if ((pmc = get_fixed_pmc(pmu, msr))) { 241 u64 val = pmc_read_counter(pmc); 242 msr_info->data = 243 val & pmu->counter_bitmask[KVM_PMC_FIXED]; 244 return 0; 245 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { 246 msr_info->data = pmc->eventsel; 247 return 0; 248 } 249 } 250 251 return 1; 252 } 253 254 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 255 { 256 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 257 struct kvm_pmc *pmc; 258 u32 msr = msr_info->index; 259 u64 data = msr_info->data; 260 261 switch (msr) { 262 case MSR_CORE_PERF_FIXED_CTR_CTRL: 263 if (pmu->fixed_ctr_ctrl == data) 264 return 0; 265 if (!(data & 0xfffffffffffff444ull)) { 266 reprogram_fixed_counters(pmu, data); 267 return 0; 268 } 269 break; 270 case MSR_CORE_PERF_GLOBAL_STATUS: 271 if (msr_info->host_initiated) { 272 pmu->global_status = data; 273 return 0; 274 } 275 break; /* RO MSR */ 276 case MSR_CORE_PERF_GLOBAL_CTRL: 277 if (pmu->global_ctrl == data) 278 return 0; 279 if (kvm_valid_perf_global_ctrl(pmu, data)) { 280 global_ctrl_changed(pmu, data); 281 return 0; 282 } 283 break; 284 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 285 if (!(data & pmu->global_ovf_ctrl_mask)) { 286 if (!msr_info->host_initiated) 287 pmu->global_status &= ~data; 288 pmu->global_ovf_ctrl = data; 289 return 0; 290 } 291 break; 292 case MSR_IA32_PERF_CAPABILITIES: 293 if (!msr_info->host_initiated) 294 return 1; 295 if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) ? 296 (data & ~vmx_get_perf_capabilities()) : data) 297 return 1; 298 vcpu->arch.perf_capabilities = data; 299 return 0; 300 default: 301 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || 302 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { 303 if ((msr & MSR_PMC_FULL_WIDTH_BIT) && 304 (data & ~pmu->counter_bitmask[KVM_PMC_GP])) 305 return 1; 306 if (!msr_info->host_initiated && 307 !(msr & MSR_PMC_FULL_WIDTH_BIT)) 308 data = (s64)(s32)data; 309 pmc->counter += data - pmc_read_counter(pmc); 310 if (pmc->perf_event) 311 perf_event_period(pmc->perf_event, 312 get_sample_period(pmc, data)); 313 return 0; 314 } else if ((pmc = get_fixed_pmc(pmu, msr))) { 315 pmc->counter += data - pmc_read_counter(pmc); 316 if (pmc->perf_event) 317 perf_event_period(pmc->perf_event, 318 get_sample_period(pmc, data)); 319 return 0; 320 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { 321 if (data == pmc->eventsel) 322 return 0; 323 if (!(data & pmu->reserved_bits)) { 324 reprogram_gp_counter(pmc, data); 325 return 0; 326 } 327 } 328 } 329 330 return 1; 331 } 332 333 static void intel_pmu_refresh(struct kvm_vcpu *vcpu) 334 { 335 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 336 struct x86_pmu_capability x86_pmu; 337 struct kvm_cpuid_entry2 *entry; 338 union cpuid10_eax eax; 339 union cpuid10_edx edx; 340 341 pmu->nr_arch_gp_counters = 0; 342 pmu->nr_arch_fixed_counters = 0; 343 pmu->counter_bitmask[KVM_PMC_GP] = 0; 344 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; 345 pmu->version = 0; 346 pmu->reserved_bits = 0xffffffff00200000ull; 347 vcpu->arch.perf_capabilities = 0; 348 349 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); 350 if (!entry) 351 return; 352 eax.full = entry->eax; 353 edx.full = entry->edx; 354 355 pmu->version = eax.split.version_id; 356 if (!pmu->version) 357 return; 358 359 perf_get_x86_pmu_capability(&x86_pmu); 360 if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 361 vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); 362 363 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, 364 x86_pmu.num_counters_gp); 365 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; 366 pmu->available_event_types = ~entry->ebx & 367 ((1ull << eax.split.mask_length) - 1); 368 369 if (pmu->version == 1) { 370 pmu->nr_arch_fixed_counters = 0; 371 } else { 372 pmu->nr_arch_fixed_counters = 373 min_t(int, edx.split.num_counters_fixed, 374 x86_pmu.num_counters_fixed); 375 pmu->counter_bitmask[KVM_PMC_FIXED] = 376 ((u64)1 << edx.split.bit_width_fixed) - 1; 377 } 378 379 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | 380 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); 381 pmu->global_ctrl_mask = ~pmu->global_ctrl; 382 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask 383 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF | 384 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD); 385 if (vmx_pt_mode_is_host_guest()) 386 pmu->global_ovf_ctrl_mask &= 387 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI; 388 389 entry = kvm_find_cpuid_entry(vcpu, 7, 0); 390 if (entry && 391 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) && 392 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) 393 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; 394 395 bitmap_set(pmu->all_valid_pmc_idx, 396 0, pmu->nr_arch_gp_counters); 397 bitmap_set(pmu->all_valid_pmc_idx, 398 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); 399 400 nested_vmx_pmu_entry_exit_ctls_update(vcpu); 401 } 402 403 static void intel_pmu_init(struct kvm_vcpu *vcpu) 404 { 405 int i; 406 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 407 408 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { 409 pmu->gp_counters[i].type = KVM_PMC_GP; 410 pmu->gp_counters[i].vcpu = vcpu; 411 pmu->gp_counters[i].idx = i; 412 pmu->gp_counters[i].current_config = 0; 413 } 414 415 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { 416 pmu->fixed_counters[i].type = KVM_PMC_FIXED; 417 pmu->fixed_counters[i].vcpu = vcpu; 418 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; 419 pmu->fixed_counters[i].current_config = 0; 420 } 421 } 422 423 static void intel_pmu_reset(struct kvm_vcpu *vcpu) 424 { 425 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 426 struct kvm_pmc *pmc = NULL; 427 int i; 428 429 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { 430 pmc = &pmu->gp_counters[i]; 431 432 pmc_stop_counter(pmc); 433 pmc->counter = pmc->eventsel = 0; 434 } 435 436 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { 437 pmc = &pmu->fixed_counters[i]; 438 439 pmc_stop_counter(pmc); 440 pmc->counter = 0; 441 } 442 443 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 444 pmu->global_ovf_ctrl = 0; 445 } 446 447 struct kvm_pmu_ops intel_pmu_ops = { 448 .find_arch_event = intel_find_arch_event, 449 .find_fixed_event = intel_find_fixed_event, 450 .pmc_is_enabled = intel_pmc_is_enabled, 451 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, 452 .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, 453 .msr_idx_to_pmc = intel_msr_idx_to_pmc, 454 .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx, 455 .is_valid_msr = intel_is_valid_msr, 456 .get_msr = intel_pmu_get_msr, 457 .set_msr = intel_pmu_set_msr, 458 .refresh = intel_pmu_refresh, 459 .init = intel_pmu_init, 460 .reset = intel_pmu_reset, 461 }; 462