Lines Matching +full:architecturally +full:- +full:defined
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
26 /* This is enough to filter the vast majority of currently defined events. */
46 /* Instruction-Accurate PDIR (PDIR++) */
58 * - Each perf counter is defined as "struct kvm_pmc";
59 * - There are two types of perf counters: general purpose (gp) and fixed.
63 * - pmu.c understands the difference between gp counters and fixed counters.
64 * However AMD doesn't support fixed-counters;
65 * - There are three types of index to access perf counters (PMC):
68 * MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
79 * * Intel: [0 .. KVM_MAX_NR_INTEL_GP_COUNTERS-1] <=> gp counters
81 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
82 * and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
89 *(((struct kvm_pmu_ops *)0)->func));
91 #include <asm/kvm-x86-pmu-ops.h>
102 #include <asm/kvm-x86-pmu-ops.h> in kvm_pmu_ops_update()
109 int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS; in kvm_init_pmu_capability()
115 * for hybrid PMUs until KVM gains a way to let userspace opt-in. in kvm_init_pmu_capability()
127 * architecturally required GP counters aren't present, i.e. if in kvm_init_pmu_capability()
128 * there are a non-zero number of counters, but fewer than what in kvm_init_pmu_capability()
129 * is architecturally required. in kvm_init_pmu_capability()
146 pmu_ops->MAX_NR_GP_COUNTERS); in kvm_init_pmu_capability()
161 if (pmc->perf_event && pmc->perf_event->attr.precise_ip) { in __kvm_perf_overflow()
173 (unsigned long *)&pmu->global_status); in __kvm_perf_overflow()
176 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in __kvm_perf_overflow()
179 if (pmc->intr && !skip_pmi) in __kvm_perf_overflow()
180 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in __kvm_perf_overflow()
187 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow()
194 if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi)) in kvm_perf_overflow()
199 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
211 if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) || in pmc_get_pebs_precise_level()
212 (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu))) in pmc_get_pebs_precise_level()
216 * The non-zero precision level of guest event makes the ordinary in pmc_get_pebs_precise_level()
226 u64 sample_period = (-counter_value) & pmc_bitmask(pmc); in get_sample_period()
249 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable); in pmc_reprogram_counter()
251 attr.sample_period = get_sample_period(pmc, pmc->counter); in pmc_reprogram_counter()
272 event = perf_event_create_kernel_counter(&attr, -1, current, in pmc_reprogram_counter()
275 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", in pmc_reprogram_counter()
276 PTR_ERR(event), pmc->idx); in pmc_reprogram_counter()
280 pmc->perf_event = event; in pmc_reprogram_counter()
281 pmc_to_pmu(pmc)->event_count++; in pmc_reprogram_counter()
282 pmc->is_paused = false; in pmc_reprogram_counter()
283 pmc->intr = intr || pebs; in pmc_reprogram_counter()
289 u64 counter = pmc->counter; in pmc_pause_counter()
293 if (pmc->perf_event && !pmc->is_paused) in pmc_pause_counter()
294 counter += perf_event_pause(pmc->perf_event, true); in pmc_pause_counter()
304 counter += pmc->emulated_counter; in pmc_pause_counter()
305 pmc->counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
307 pmc->emulated_counter = 0; in pmc_pause_counter()
308 pmc->is_paused = true; in pmc_pause_counter()
310 return pmc->counter < prev_counter; in pmc_pause_counter()
315 if (!pmc->perf_event) in pmc_resume_counter()
319 if (is_sampling_event(pmc->perf_event) && in pmc_resume_counter()
320 perf_event_period(pmc->perf_event, in pmc_resume_counter()
321 get_sample_period(pmc, pmc->counter))) in pmc_resume_counter()
324 if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) != in pmc_resume_counter()
325 (!!pmc->perf_event->attr.precise_ip)) in pmc_resume_counter()
329 perf_event_enable(pmc->perf_event); in pmc_resume_counter()
330 pmc->is_paused = false; in pmc_resume_counter()
337 if (pmc->perf_event) { in pmc_release_perf_event()
338 perf_event_release_kernel(pmc->perf_event); in pmc_release_perf_event()
339 pmc->perf_event = NULL; in pmc_release_perf_event()
340 pmc->current_config = 0; in pmc_release_perf_event()
341 pmc_to_pmu(pmc)->event_count--; in pmc_release_perf_event()
347 if (pmc->perf_event) { in pmc_stop_counter()
348 pmc->counter = pmc_read_counter(pmc); in pmc_stop_counter()
355 if (!pmc->perf_event || pmc->is_paused || in pmc_update_sample_period()
356 !is_sampling_event(pmc->perf_event)) in pmc_update_sample_period()
359 perf_event_period(pmc->perf_event, in pmc_update_sample_period()
360 get_sample_period(pmc, pmc->counter)); in pmc_update_sample_period()
367 * read-modify-write. Adjust the counter value so that its value is in pmc_write_counter()
373 pmc->emulated_counter = 0; in pmc_write_counter()
374 pmc->counter += val - pmc_read_counter(pmc); in pmc_write_counter()
375 pmc->counter &= pmc_bitmask(pmc); in pmc_write_counter()
385 return (a > b) - (a < b); in filter_cmp()
411 return -1; in find_filter_index()
413 return fe - events; in find_filter_index()
418 u64 mask = filter_event >> (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8); in is_filter_entry_match()
422 (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8)) != in is_filter_entry_match()
450 for (i = index - 1; i >= 0; i--) { in filter_contains_match()
464 if (filter_contains_match(f->includes, f->nr_includes, eventsel) && in is_gp_event_allowed()
465 !filter_contains_match(f->excludes, f->nr_excludes, eventsel)) in is_gp_event_allowed()
466 return f->action == KVM_PMU_EVENT_ALLOW; in is_gp_event_allowed()
468 return f->action == KVM_PMU_EVENT_DENY; in is_gp_event_allowed()
474 int fixed_idx = idx - KVM_FIXED_PMC_BASE_IDX; in is_fixed_event_allowed()
476 if (filter->action == KVM_PMU_EVENT_DENY && in is_fixed_event_allowed()
477 test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap)) in is_fixed_event_allowed()
479 if (filter->action == KVM_PMU_EVENT_ALLOW && in is_fixed_event_allowed()
480 !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap)) in is_fixed_event_allowed()
489 struct kvm *kvm = pmc->vcpu->kvm; in pmc_is_event_allowed()
491 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); in pmc_is_event_allowed()
496 return is_gp_event_allowed(filter, pmc->eventsel); in pmc_is_event_allowed()
498 return is_fixed_event_allowed(filter, pmc->idx); in pmc_is_event_allowed()
504 u64 eventsel = pmc->eventsel; in reprogram_counter()
522 fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, in reprogram_counter()
523 pmc->idx - KVM_FIXED_PMC_BASE_IDX); in reprogram_counter()
533 if (pmc->current_config == new_config && pmc_resume_counter(pmc)) in reprogram_counter()
538 pmc->current_config = new_config; in reprogram_counter()
541 (eventsel & pmu->raw_event_mask), in reprogram_counter()
560 * Checking the reserved bits might be wrong if they are defined in the in pmc_is_event_match()
563 return !((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB); in pmc_is_event_match()
568 bitmap_clear(pmu->pmc_counting_instructions, pmc->idx, 1); in kvm_pmu_recalc_pmc_emulation()
569 bitmap_clear(pmu->pmc_counting_branches, pmc->idx, 1); in kvm_pmu_recalc_pmc_emulation()
581 bitmap_set(pmu->pmc_counting_instructions, pmc->idx, 1); in kvm_pmu_recalc_pmc_emulation()
584 bitmap_set(pmu->pmc_counting_branches, pmc->idx, 1); in kvm_pmu_recalc_pmc_emulation()
595 bitmap_copy(bitmap, pmu->reprogram_pmi, X86_PMC_IDX_MAX); in kvm_pmu_handle_event()
599 * other than the task that holds vcpu->mutex, take care to clear only in kvm_pmu_handle_event()
603 atomic64_andnot(*(s64 *)bitmap, &pmu->__reprogram_pmi); in kvm_pmu_handle_event()
607 * If reprogramming fails, e.g. due to contention, re-set the in kvm_pmu_handle_event()
613 set_bit(pmc->idx, pmu->reprogram_pmi); in kvm_pmu_handle_event()
621 if (unlikely(pmu->need_cleanup)) in kvm_pmu_handle_event()
668 vcpu->kvm->arch.kvmclock_offset; in kvm_pmu_rdpmc_vmware()
684 if (!pmu->version) in kvm_pmu_rdpmc()
707 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); in kvm_pmu_deliver_pmi()
731 __set_bit(pmc->idx, pmu->pmc_in_use); in kvm_pmu_mark_pmc_in_use()
737 u32 msr = msr_info->index; in kvm_pmu_get_msr()
742 msr_info->data = pmu->global_status; in kvm_pmu_get_msr()
746 msr_info->data = pmu->global_ctrl; in kvm_pmu_get_msr()
751 msr_info->data = 0; in kvm_pmu_get_msr()
763 u32 msr = msr_info->index; in kvm_pmu_set_msr()
764 u64 data = msr_info->data; in kvm_pmu_set_msr()
768 * Note, AMD ignores writes to reserved bits and read-only PMU MSRs, in kvm_pmu_set_msr()
773 if (!msr_info->host_initiated) in kvm_pmu_set_msr()
777 /* Per PPR, Read-only MSR. Writes are ignored. */ in kvm_pmu_set_msr()
778 if (!msr_info->host_initiated) in kvm_pmu_set_msr()
781 if (data & pmu->global_status_rsvd) in kvm_pmu_set_msr()
784 pmu->global_status = data; in kvm_pmu_set_msr()
787 data &= ~pmu->global_ctrl_rsvd; in kvm_pmu_set_msr()
793 if (pmu->global_ctrl != data) { in kvm_pmu_set_msr()
794 diff = pmu->global_ctrl ^ data; in kvm_pmu_set_msr()
795 pmu->global_ctrl = data; in kvm_pmu_set_msr()
804 if (data & pmu->global_status_rsvd) in kvm_pmu_set_msr()
808 if (!msr_info->host_initiated) in kvm_pmu_set_msr()
809 pmu->global_status &= ~data; in kvm_pmu_set_msr()
812 if (!msr_info->host_initiated) in kvm_pmu_set_msr()
813 pmu->global_status |= data & ~pmu->global_status_rsvd; in kvm_pmu_set_msr()
816 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); in kvm_pmu_set_msr()
829 pmu->need_cleanup = false; in kvm_pmu_reset()
831 bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX); in kvm_pmu_reset()
833 kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) { in kvm_pmu_reset()
835 pmc->counter = 0; in kvm_pmu_reset()
836 pmc->emulated_counter = 0; in kvm_pmu_reset()
839 pmc->eventsel = 0; in kvm_pmu_reset()
842 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; in kvm_pmu_reset()
856 if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm)) in kvm_pmu_refresh()
865 pmu->version = 0; in kvm_pmu_refresh()
866 pmu->nr_arch_gp_counters = 0; in kvm_pmu_refresh()
867 pmu->nr_arch_fixed_counters = 0; in kvm_pmu_refresh()
868 pmu->counter_bitmask[KVM_PMC_GP] = 0; in kvm_pmu_refresh()
869 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; in kvm_pmu_refresh()
870 pmu->reserved_bits = 0xffffffff00200000ull; in kvm_pmu_refresh()
871 pmu->raw_event_mask = X86_RAW_EVENT_MASK; in kvm_pmu_refresh()
872 pmu->global_ctrl_rsvd = ~0ull; in kvm_pmu_refresh()
873 pmu->global_status_rsvd = ~0ull; in kvm_pmu_refresh()
874 pmu->fixed_ctr_ctrl_rsvd = ~0ull; in kvm_pmu_refresh()
875 pmu->pebs_enable_rsvd = ~0ull; in kvm_pmu_refresh()
876 pmu->pebs_data_cfg_rsvd = ~0ull; in kvm_pmu_refresh()
877 bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX); in kvm_pmu_refresh()
879 if (!vcpu->kvm->arch.enable_pmu) in kvm_pmu_refresh()
891 if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters) in kvm_pmu_refresh()
892 pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0); in kvm_pmu_refresh()
894 bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters); in kvm_pmu_refresh()
895 bitmap_set(pmu->all_valid_pmc_idx, KVM_FIXED_PMC_BASE_IDX, in kvm_pmu_refresh()
896 pmu->nr_arch_fixed_counters); in kvm_pmu_refresh()
915 pmu->need_cleanup = false; in kvm_pmu_cleanup()
917 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx, in kvm_pmu_cleanup()
918 pmu->pmc_in_use, X86_PMC_IDX_MAX); in kvm_pmu_cleanup()
921 if (pmc->perf_event && !pmc_is_locally_enabled(pmc)) in kvm_pmu_cleanup()
927 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); in kvm_pmu_cleanup()
937 pmc->emulated_counter++; in kvm_pmu_incr_counter()
947 config = pmc->eventsel; in cpl_is_matched()
951 config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, in cpl_is_matched()
952 pmc->idx - KVM_FIXED_PMC_BASE_IDX); in cpl_is_matched()
964 return (kvm_x86_call(get_cpl)(pmc->vcpu) == 0) ? select_os : in cpl_is_matched()
976 BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX); in kvm_pmu_trigger_event()
984 (unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX)) in kvm_pmu_trigger_event()
987 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_pmu_trigger_event()
994 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_pmu_trigger_event()
999 kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_instructions); in kvm_pmu_instruction_retired()
1005 kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_branches); in kvm_pmu_branch_retired()
1017 for (i = 0; i < filter->nevents; i++) { in is_masked_filter_valid()
1018 if (filter->events[i] & ~mask) in is_masked_filter_valid()
1029 for (i = 0, j = 0; i < filter->nevents; i++) { in convert_to_masked_filter()
1034 * compatibility, impossible filters can't be rejected :-( in convert_to_masked_filter()
1036 if (filter->events[i] & ~(kvm_pmu_ops.EVENTSEL_EVENT | in convert_to_masked_filter()
1040 * Convert userspace events to a common in-kernel event so in convert_to_masked_filter()
1042 * the in-kernel events use masked events because they are in convert_to_masked_filter()
1047 filter->events[j++] = filter->events[i] | in convert_to_masked_filter()
1051 filter->nevents = j; in convert_to_masked_filter()
1058 if (!(filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS)) in prepare_filter_lists()
1061 return -EINVAL; in prepare_filter_lists()
1068 * includes and excludes sub-lists. in prepare_filter_lists()
1070 sort(&filter->events, filter->nevents, sizeof(filter->events[0]), in prepare_filter_lists()
1073 i = filter->nevents; in prepare_filter_lists()
1075 if (filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS) { in prepare_filter_lists()
1076 for (i = 0; i < filter->nevents; i++) { in prepare_filter_lists()
1077 if (filter->events[i] & KVM_PMU_MASKED_ENTRY_EXCLUDE) in prepare_filter_lists()
1082 filter->nr_includes = i; in prepare_filter_lists()
1083 filter->nr_excludes = filter->nevents - filter->nr_includes; in prepare_filter_lists()
1084 filter->includes = filter->events; in prepare_filter_lists()
1085 filter->excludes = filter->events + filter->nr_includes; in prepare_filter_lists()
1101 return -EFAULT; in kvm_vm_ioctl_set_pmu_event_filter()
1105 return -EINVAL; in kvm_vm_ioctl_set_pmu_event_filter()
1108 return -EINVAL; in kvm_vm_ioctl_set_pmu_event_filter()
1111 return -E2BIG; in kvm_vm_ioctl_set_pmu_event_filter()
1116 return -ENOMEM; in kvm_vm_ioctl_set_pmu_event_filter()
1118 filter->action = tmp.action; in kvm_vm_ioctl_set_pmu_event_filter()
1119 filter->nevents = tmp.nevents; in kvm_vm_ioctl_set_pmu_event_filter()
1120 filter->fixed_counter_bitmap = tmp.fixed_counter_bitmap; in kvm_vm_ioctl_set_pmu_event_filter()
1121 filter->flags = tmp.flags; in kvm_vm_ioctl_set_pmu_event_filter()
1123 r = -EFAULT; in kvm_vm_ioctl_set_pmu_event_filter()
1124 if (copy_from_user(filter->events, user_filter->events, in kvm_vm_ioctl_set_pmu_event_filter()
1125 sizeof(filter->events[0]) * filter->nevents)) in kvm_vm_ioctl_set_pmu_event_filter()
1132 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_pmu_event_filter()
1133 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter, in kvm_vm_ioctl_set_pmu_event_filter()
1134 mutex_is_locked(&kvm->lock)); in kvm_vm_ioctl_set_pmu_event_filter()
1135 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_pmu_event_filter()
1136 synchronize_srcu_expedited(&kvm->srcu); in kvm_vm_ioctl_set_pmu_event_filter()
1138 BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) > in kvm_vm_ioctl_set_pmu_event_filter()
1139 sizeof(((struct kvm_pmu *)0)->__reprogram_pmi)); in kvm_vm_ioctl_set_pmu_event_filter()
1142 atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull); in kvm_vm_ioctl_set_pmu_event_filter()