Lines Matching full:pmu
3 * KVM PMU support for Intel CPUs
22 #include "pmu.h"
57 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument
60 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; in reprogram_fixed_counters()
63 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters()
64 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters()
71 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
73 __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use); in reprogram_fixed_counters()
82 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_rdpmc_ecx_to_pmc() local
94 * Yell and reject attempts to read PMCs for a non-architectural PMU, in intel_rdpmc_ecx_to_pmc()
97 if (WARN_ON_ONCE(!pmu->version)) in intel_rdpmc_ecx_to_pmc()
103 * supported by KVM. Note, KVM only emulates fixed PMCs for PMU v2+, in intel_rdpmc_ecx_to_pmc()
110 counters = pmu->fixed_counters; in intel_rdpmc_ecx_to_pmc()
111 num_counters = pmu->nr_arch_fixed_counters; in intel_rdpmc_ecx_to_pmc()
112 bitmask = pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_rdpmc_ecx_to_pmc()
115 counters = pmu->gp_counters; in intel_rdpmc_ecx_to_pmc()
116 num_counters = pmu->nr_arch_gp_counters; in intel_rdpmc_ecx_to_pmc()
117 bitmask = pmu->counter_bitmask[KVM_PMC_GP]; in intel_rdpmc_ecx_to_pmc()
144 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) in get_fw_gp_pmc() argument
146 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu))) in get_fw_gp_pmc()
149 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); in get_fw_gp_pmc()
188 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr() local
194 return kvm_pmu_has_perf_global_ctrl(pmu); in intel_is_valid_msr()
207 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr()
208 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr()
209 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || in intel_is_valid_msr()
219 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_msr_idx_to_pmc() local
222 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc()
223 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); in intel_msr_idx_to_pmc()
224 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); in intel_msr_idx_to_pmc()
246 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_create_guest_lbr_event() local
281 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
293 pmu->event_count++; in intel_pmu_create_guest_lbr_event()
294 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
342 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_get_msr() local
348 msr_info->data = pmu->fixed_ctr_ctrl; in intel_pmu_get_msr()
351 msr_info->data = pmu->pebs_enable; in intel_pmu_get_msr()
354 msr_info->data = pmu->ds_area; in intel_pmu_get_msr()
357 msr_info->data = pmu->pebs_data_cfg; in intel_pmu_get_msr()
360 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
361 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_get_msr()
364 val & pmu->counter_bitmask[KVM_PMC_GP]; in intel_pmu_get_msr()
366 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
369 val & pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_pmu_get_msr()
371 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
385 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_set_msr() local
393 if (data & pmu->fixed_ctr_ctrl_rsvd) in intel_pmu_set_msr()
396 if (pmu->fixed_ctr_ctrl != data) in intel_pmu_set_msr()
397 reprogram_fixed_counters(pmu, data); in intel_pmu_set_msr()
400 if (data & pmu->pebs_enable_rsvd) in intel_pmu_set_msr()
403 if (pmu->pebs_enable != data) { in intel_pmu_set_msr()
404 diff = pmu->pebs_enable ^ data; in intel_pmu_set_msr()
405 pmu->pebs_enable = data; in intel_pmu_set_msr()
406 reprogram_counters(pmu, diff); in intel_pmu_set_msr()
413 pmu->ds_area = data; in intel_pmu_set_msr()
416 if (data & pmu->pebs_data_cfg_rsvd) in intel_pmu_set_msr()
419 pmu->pebs_data_cfg = data; in intel_pmu_set_msr()
422 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_set_msr()
423 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_set_msr()
425 (data & ~pmu->counter_bitmask[KVM_PMC_GP])) in intel_pmu_set_msr()
433 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_set_msr()
436 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_set_msr()
437 reserved_bits = pmu->reserved_bits; in intel_pmu_set_msr()
439 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) in intel_pmu_set_msr()
452 /* Not a known PMU MSR. */ in intel_pmu_set_msr()
493 static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits) in intel_pmu_enable_fixed_counter_bits() argument
497 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) in intel_pmu_enable_fixed_counter_bits()
498 pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits); in intel_pmu_enable_fixed_counter_bits()
503 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_refresh() local
518 * and PMU refresh is disallowed after the vCPU has run, i.e. this code in intel_pmu_refresh()
531 pmu->version = eax.split.version_id; in intel_pmu_refresh()
532 if (!pmu->version) in intel_pmu_refresh()
535 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh()
539 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh()
542 pmu->available_event_types = ~entry->ebx & in intel_pmu_refresh()
545 if (pmu->version == 1) { in intel_pmu_refresh()
546 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
548 pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, in intel_pmu_refresh()
552 pmu->counter_bitmask[KVM_PMC_FIXED] = in intel_pmu_refresh()
556 intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL | in intel_pmu_refresh()
560 counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | in intel_pmu_refresh()
561 (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX)); in intel_pmu_refresh()
562 pmu->global_ctrl_rsvd = counter_rsvd; in intel_pmu_refresh()
569 pmu->global_status_rsvd = pmu->global_ctrl_rsvd in intel_pmu_refresh()
573 pmu->global_status_rsvd &= in intel_pmu_refresh()
580 pmu->reserved_bits ^= HSW_IN_TX; in intel_pmu_refresh()
581 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); in intel_pmu_refresh()
584 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
585 0, pmu->nr_arch_gp_counters); in intel_pmu_refresh()
586 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
587 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); in intel_pmu_refresh()
597 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); in intel_pmu_refresh()
601 pmu->pebs_enable_rsvd = counter_rsvd; in intel_pmu_refresh()
602 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; in intel_pmu_refresh()
603 pmu->pebs_data_cfg_rsvd = ~0xff00000full; in intel_pmu_refresh()
604 intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE); in intel_pmu_refresh()
606 pmu->pebs_enable_rsvd = in intel_pmu_refresh()
607 ~((1ull << pmu->nr_arch_gp_counters) - 1); in intel_pmu_refresh()
615 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_init() local
622 pmu->gp_counters[i].type = KVM_PMC_GP; in intel_pmu_init()
623 pmu->gp_counters[i].vcpu = vcpu; in intel_pmu_init()
624 pmu->gp_counters[i].idx = i; in intel_pmu_init()
625 pmu->gp_counters[i].current_config = 0; in intel_pmu_init()
629 pmu->fixed_counters[i].type = KVM_PMC_FIXED; in intel_pmu_init()
630 pmu->fixed_counters[i].vcpu = vcpu; in intel_pmu_init()
631 pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX; in intel_pmu_init()
632 pmu->fixed_counters[i].current_config = 0; in intel_pmu_init()
633 pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i); in intel_pmu_init()
647 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
715 * pmu resources (e.g. LBR) that were assigned to the guest. This is
719 * confirm that the pmu features enabled to the guest are not reclaimed
725 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in vmx_passthrough_lbr_msrs() local
735 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) in vmx_passthrough_lbr_msrs()
742 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in vmx_passthrough_lbr_msrs()
759 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu) in intel_pmu_cross_mapped_check() argument
764 kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) { in intel_pmu_cross_mapped_check()
775 pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx); in intel_pmu_cross_mapped_check()