Lines Matching refs:pmu

101 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);  in __kvm_perf_overflow()  local
116 (unsigned long *)&pmu->global_status); in __kvm_perf_overflow()
119 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in __kvm_perf_overflow()
180 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in pmc_reprogram_counter() local
192 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable); in pmc_reprogram_counter()
452 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in reprogram_counter() local
470 fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, in reprogram_counter()
489 (eventsel & pmu->raw_event_mask), in reprogram_counter()
498 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_handle_event() local
502 bitmap_copy(bitmap, pmu->reprogram_pmi, X86_PMC_IDX_MAX); in kvm_pmu_handle_event()
510 atomic64_andnot(*(s64 *)bitmap, &pmu->__reprogram_pmi); in kvm_pmu_handle_event()
512 kvm_for_each_pmc(pmu, pmc, bit, bitmap) { in kvm_pmu_handle_event()
520 set_bit(pmc->idx, pmu->reprogram_pmi); in kvm_pmu_handle_event()
528 if (unlikely(pmu->need_cleanup)) in kvm_pmu_handle_event()
584 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_rdpmc() local
588 if (!pmu->version) in kvm_pmu_rdpmc()
631 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_mark_pmc_in_use() local
635 __set_bit(pmc->idx, pmu->pmc_in_use); in kvm_pmu_mark_pmc_in_use()
640 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_get_msr() local
646 msr_info->data = pmu->global_status; in kvm_pmu_get_msr()
650 msr_info->data = pmu->global_ctrl; in kvm_pmu_get_msr()
665 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_set_msr() local
684 if (data & pmu->global_status_rsvd) in kvm_pmu_set_msr()
687 pmu->global_status = data; in kvm_pmu_set_msr()
690 data &= ~pmu->global_ctrl_rsvd; in kvm_pmu_set_msr()
693 if (!kvm_valid_perf_global_ctrl(pmu, data)) in kvm_pmu_set_msr()
696 if (pmu->global_ctrl != data) { in kvm_pmu_set_msr()
697 diff = pmu->global_ctrl ^ data; in kvm_pmu_set_msr()
698 pmu->global_ctrl = data; in kvm_pmu_set_msr()
699 reprogram_counters(pmu, diff); in kvm_pmu_set_msr()
707 if (data & pmu->global_status_rsvd) in kvm_pmu_set_msr()
712 pmu->global_status &= ~data; in kvm_pmu_set_msr()
724 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_reset() local
728 pmu->need_cleanup = false; in kvm_pmu_reset()
730 bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX); in kvm_pmu_reset()
732 kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) { in kvm_pmu_reset()
741 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; in kvm_pmu_reset()
753 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_refresh() local
764 pmu->version = 0; in kvm_pmu_refresh()
765 pmu->nr_arch_gp_counters = 0; in kvm_pmu_refresh()
766 pmu->nr_arch_fixed_counters = 0; in kvm_pmu_refresh()
767 pmu->counter_bitmask[KVM_PMC_GP] = 0; in kvm_pmu_refresh()
768 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; in kvm_pmu_refresh()
769 pmu->reserved_bits = 0xffffffff00200000ull; in kvm_pmu_refresh()
770 pmu->raw_event_mask = X86_RAW_EVENT_MASK; in kvm_pmu_refresh()
771 pmu->global_ctrl_rsvd = ~0ull; in kvm_pmu_refresh()
772 pmu->global_status_rsvd = ~0ull; in kvm_pmu_refresh()
773 pmu->fixed_ctr_ctrl_rsvd = ~0ull; in kvm_pmu_refresh()
774 pmu->pebs_enable_rsvd = ~0ull; in kvm_pmu_refresh()
775 pmu->pebs_data_cfg_rsvd = ~0ull; in kvm_pmu_refresh()
776 bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX); in kvm_pmu_refresh()
790 if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters) in kvm_pmu_refresh()
791 pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0); in kvm_pmu_refresh()
796 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_init() local
798 memset(pmu, 0, sizeof(*pmu)); in kvm_pmu_init()
806 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_cleanup() local
811 pmu->need_cleanup = false; in kvm_pmu_cleanup()
813 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx, in kvm_pmu_cleanup()
814 pmu->pmc_in_use, X86_PMC_IDX_MAX); in kvm_pmu_cleanup()
816 kvm_for_each_pmc(pmu, pmc, i, bitmask) { in kvm_pmu_cleanup()
823 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); in kvm_pmu_cleanup()
867 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_trigger_event() local
871 BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX); in kvm_pmu_trigger_event()
873 if (!kvm_pmu_has_perf_global_ctrl(pmu)) in kvm_pmu_trigger_event()
874 bitmap_copy(bitmap, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX); in kvm_pmu_trigger_event()
875 else if (!bitmap_and(bitmap, pmu->all_valid_pmc_idx, in kvm_pmu_trigger_event()
876 (unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX)) in kvm_pmu_trigger_event()
879 kvm_for_each_pmc(pmu, pmc, i, bitmap) { in kvm_pmu_trigger_event()