Lines Matching full:pmc
65 * - There are three types of index to access perf counters (PMC):
75 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
76 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
78 * between pmc and perf counters is as the following:
156 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) in __kvm_perf_overflow() argument
158 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in __kvm_perf_overflow()
161 if (pmc->perf_event && pmc->perf_event->attr.precise_ip) { in __kvm_perf_overflow()
176 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in __kvm_perf_overflow()
179 if (pmc->intr && !skip_pmi) in __kvm_perf_overflow()
180 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in __kvm_perf_overflow()
187 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow() local
194 if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi)) in kvm_perf_overflow()
197 __kvm_perf_overflow(pmc, true); in kvm_perf_overflow()
199 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
202 static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc) in pmc_get_pebs_precise_level() argument
211 if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) || in pmc_get_pebs_precise_level()
212 (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu))) in pmc_get_pebs_precise_level()
224 static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) in get_sample_period() argument
226 u64 sample_period = (-counter_value) & pmc_bitmask(pmc); in get_sample_period()
229 sample_period = pmc_bitmask(pmc) + 1; in get_sample_period()
233 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config, in pmc_reprogram_counter() argument
237 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in pmc_reprogram_counter()
249 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable); in pmc_reprogram_counter()
251 attr.sample_period = get_sample_period(pmc, pmc->counter); in pmc_reprogram_counter()
269 attr.precise_ip = pmc_get_pebs_precise_level(pmc); in pmc_reprogram_counter()
273 kvm_perf_overflow, pmc); in pmc_reprogram_counter()
275 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", in pmc_reprogram_counter()
276 PTR_ERR(event), pmc->idx); in pmc_reprogram_counter()
280 pmc->perf_event = event; in pmc_reprogram_counter()
281 pmc_to_pmu(pmc)->event_count++; in pmc_reprogram_counter()
282 pmc->is_paused = false; in pmc_reprogram_counter()
283 pmc->intr = intr || pebs; in pmc_reprogram_counter()
287 static bool pmc_pause_counter(struct kvm_pmc *pmc) in pmc_pause_counter() argument
289 u64 counter = pmc->counter; in pmc_pause_counter()
293 if (pmc->perf_event && !pmc->is_paused) in pmc_pause_counter()
294 counter += perf_event_pause(pmc->perf_event, true); in pmc_pause_counter()
302 prev_counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
304 counter += pmc->emulated_counter; in pmc_pause_counter()
305 pmc->counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
307 pmc->emulated_counter = 0; in pmc_pause_counter()
308 pmc->is_paused = true; in pmc_pause_counter()
310 return pmc->counter < prev_counter; in pmc_pause_counter()
313 static bool pmc_resume_counter(struct kvm_pmc *pmc) in pmc_resume_counter() argument
315 if (!pmc->perf_event) in pmc_resume_counter()
319 if (is_sampling_event(pmc->perf_event) && in pmc_resume_counter()
320 perf_event_period(pmc->perf_event, in pmc_resume_counter()
321 get_sample_period(pmc, pmc->counter))) in pmc_resume_counter()
324 if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) != in pmc_resume_counter()
325 (!!pmc->perf_event->attr.precise_ip)) in pmc_resume_counter()
329 perf_event_enable(pmc->perf_event); in pmc_resume_counter()
330 pmc->is_paused = false; in pmc_resume_counter()
335 static void pmc_release_perf_event(struct kvm_pmc *pmc) in pmc_release_perf_event() argument
337 if (pmc->perf_event) { in pmc_release_perf_event()
338 perf_event_release_kernel(pmc->perf_event); in pmc_release_perf_event()
339 pmc->perf_event = NULL; in pmc_release_perf_event()
340 pmc->current_config = 0; in pmc_release_perf_event()
341 pmc_to_pmu(pmc)->event_count--; in pmc_release_perf_event()
345 static void pmc_stop_counter(struct kvm_pmc *pmc) in pmc_stop_counter() argument
347 if (pmc->perf_event) { in pmc_stop_counter()
348 pmc->counter = pmc_read_counter(pmc); in pmc_stop_counter()
349 pmc_release_perf_event(pmc); in pmc_stop_counter()
353 static void pmc_update_sample_period(struct kvm_pmc *pmc) in pmc_update_sample_period() argument
355 if (!pmc->perf_event || pmc->is_paused || in pmc_update_sample_period()
356 !is_sampling_event(pmc->perf_event)) in pmc_update_sample_period()
359 perf_event_period(pmc->perf_event, in pmc_update_sample_period()
360 get_sample_period(pmc, pmc->counter)); in pmc_update_sample_period()
363 void pmc_write_counter(struct kvm_pmc *pmc, u64 val) in pmc_write_counter() argument
373 pmc->emulated_counter = 0; in pmc_write_counter()
374 pmc->counter += val - pmc_read_counter(pmc); in pmc_write_counter()
375 pmc->counter &= pmc_bitmask(pmc); in pmc_write_counter()
376 pmc_update_sample_period(pmc); in pmc_write_counter()
486 static bool pmc_is_event_allowed(struct kvm_pmc *pmc) in pmc_is_event_allowed() argument
489 struct kvm *kvm = pmc->vcpu->kvm; in pmc_is_event_allowed()
495 if (pmc_is_gp(pmc)) in pmc_is_event_allowed()
496 return is_gp_event_allowed(filter, pmc->eventsel); in pmc_is_event_allowed()
498 return is_fixed_event_allowed(filter, pmc->idx); in pmc_is_event_allowed()
501 static int reprogram_counter(struct kvm_pmc *pmc) in reprogram_counter() argument
503 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in reprogram_counter()
504 u64 eventsel = pmc->eventsel; in reprogram_counter()
509 emulate_overflow = pmc_pause_counter(pmc); in reprogram_counter()
511 if (!pmc_is_globally_enabled(pmc) || !pmc_is_locally_enabled(pmc) || in reprogram_counter()
512 !pmc_is_event_allowed(pmc)) in reprogram_counter()
516 __kvm_perf_overflow(pmc, false); in reprogram_counter()
521 if (pmc_is_fixed(pmc)) { in reprogram_counter()
523 pmc->idx - KVM_FIXED_PMC_BASE_IDX); in reprogram_counter()
533 if (pmc->current_config == new_config && pmc_resume_counter(pmc)) in reprogram_counter()
536 pmc_release_perf_event(pmc); in reprogram_counter()
538 pmc->current_config = new_config; in reprogram_counter()
540 return pmc_reprogram_counter(pmc, PERF_TYPE_RAW, in reprogram_counter()
547 static bool pmc_is_event_match(struct kvm_pmc *pmc, u64 eventsel) in pmc_is_event_match() argument
563 return !((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB); in pmc_is_event_match()
566 void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc) in kvm_pmu_recalc_pmc_emulation() argument
568 bitmap_clear(pmu->pmc_counting_instructions, pmc->idx, 1); in kvm_pmu_recalc_pmc_emulation()
569 bitmap_clear(pmu->pmc_counting_branches, pmc->idx, 1); in kvm_pmu_recalc_pmc_emulation()
574 * omitting a PMC from a bitmap could result in a missed event if the in kvm_pmu_recalc_pmc_emulation()
577 if (!pmc_is_locally_enabled(pmc)) in kvm_pmu_recalc_pmc_emulation()
580 if (pmc_is_event_match(pmc, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED)) in kvm_pmu_recalc_pmc_emulation()
581 bitmap_set(pmu->pmc_counting_instructions, pmc->idx, 1); in kvm_pmu_recalc_pmc_emulation()
583 if (pmc_is_event_match(pmc, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED)) in kvm_pmu_recalc_pmc_emulation()
584 bitmap_set(pmu->pmc_counting_branches, pmc->idx, 1); in kvm_pmu_recalc_pmc_emulation()
592 struct kvm_pmc *pmc; in kvm_pmu_handle_event() local
605 kvm_for_each_pmc(pmu, pmc, bit, bitmap) { in kvm_pmu_handle_event()
612 if (reprogram_counter(pmc)) in kvm_pmu_handle_event()
613 set_bit(pmc->idx, pmu->reprogram_pmi); in kvm_pmu_handle_event()
624 kvm_for_each_pmc(pmu, pmc, bit, bitmap) in kvm_pmu_handle_event()
625 kvm_pmu_recalc_pmc_emulation(pmu, pmc); in kvm_pmu_handle_event()
636 * i.e. an invalid PMC results in a #GP, not #VMEXIT. in kvm_pmu_check_rdpmc_early()
681 struct kvm_pmc *pmc; in kvm_pmu_rdpmc() local
690 pmc = kvm_pmu_call(rdpmc_ecx_to_pmc)(vcpu, idx, &mask); in kvm_pmu_rdpmc()
691 if (!pmc) in kvm_pmu_rdpmc()
699 *data = pmc_read_counter(pmc) & mask; in kvm_pmu_rdpmc()
728 struct kvm_pmc *pmc = kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr); in kvm_pmu_mark_pmc_in_use() local
730 if (pmc) in kvm_pmu_mark_pmc_in_use()
731 __set_bit(pmc->idx, pmu->pmc_in_use); in kvm_pmu_mark_pmc_in_use()
826 struct kvm_pmc *pmc; in kvm_pmu_reset() local
833 kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) { in kvm_pmu_reset()
834 pmc_stop_counter(pmc); in kvm_pmu_reset()
835 pmc->counter = 0; in kvm_pmu_reset()
836 pmc->emulated_counter = 0; in kvm_pmu_reset()
838 if (pmc_is_gp(pmc)) in kvm_pmu_reset()
839 pmc->eventsel = 0; in kvm_pmu_reset()
911 struct kvm_pmc *pmc = NULL; in kvm_pmu_cleanup() local
920 kvm_for_each_pmc(pmu, pmc, i, bitmask) { in kvm_pmu_cleanup()
921 if (pmc->perf_event && !pmc_is_locally_enabled(pmc)) in kvm_pmu_cleanup()
922 pmc_stop_counter(pmc); in kvm_pmu_cleanup()
935 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc) in kvm_pmu_incr_counter() argument
937 pmc->emulated_counter++; in kvm_pmu_incr_counter()
938 kvm_pmu_request_counter_reprogram(pmc); in kvm_pmu_incr_counter()
941 static inline bool cpl_is_matched(struct kvm_pmc *pmc) in cpl_is_matched() argument
946 if (pmc_is_gp(pmc)) { in cpl_is_matched()
947 config = pmc->eventsel; in cpl_is_matched()
951 config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, in cpl_is_matched()
952 pmc->idx - KVM_FIXED_PMC_BASE_IDX); in cpl_is_matched()
964 return (kvm_x86_call(get_cpl)(pmc->vcpu) == 0) ? select_os : in cpl_is_matched()
973 struct kvm_pmc *pmc; in kvm_pmu_trigger_event() local
988 kvm_for_each_pmc(pmu, pmc, i, bitmap) { in kvm_pmu_trigger_event()
989 if (!pmc_is_event_allowed(pmc) || !cpl_is_matched(pmc)) in kvm_pmu_trigger_event()
992 kvm_pmu_incr_counter(pmc); in kvm_pmu_trigger_event()