Lines Matching full:vcpu

33 static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)  in kvm_vcpu_idx_to_pmc()  argument
35 return &vcpu->arch.pmu.pmc[cnt_idx]; in kvm_vcpu_idx_to_pmc()
84 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmc_is_64bit() local
87 kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)); in kvm_pmc_is_64bit()
92 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmc_has_64bit_overflow() local
93 u64 val = kvm_vcpu_read_pmcr(vcpu); in kvm_pmc_has_64bit_overflow()
95 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) in kvm_pmc_has_64bit_overflow()
96 return __vcpu_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HLP; in kvm_pmc_has_64bit_overflow()
125 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_get_pmc_value() local
129 counter = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_get_pmc_value()
147 * @vcpu: The vcpu pointer
150 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) in kvm_pmu_get_counter_value() argument
152 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_pmu_get_counter_value()
155 return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); in kvm_pmu_get_counter_value()
160 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_set_pmc_value() local
167 if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX && in kvm_pmu_set_pmc_value()
175 val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32); in kvm_pmu_set_pmc_value()
179 __vcpu_sys_reg(vcpu, reg) = val; in kvm_pmu_set_pmc_value()
187 * @vcpu: The vcpu pointer
191 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) in kvm_pmu_set_counter_value() argument
193 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_pmu_set_counter_value()
196 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false); in kvm_pmu_set_counter_value()
220 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_stop_counter() local
230 __vcpu_sys_reg(vcpu, reg) = val; in kvm_pmu_stop_counter()
237 * @vcpu: The vcpu pointer
240 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) in kvm_pmu_vcpu_init() argument
243 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_init()
251 * @vcpu: The vcpu pointer
254 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) in kvm_pmu_vcpu_reset() argument
256 unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu); in kvm_pmu_vcpu_reset()
260 kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i)); in kvm_pmu_vcpu_reset()
265 * @vcpu: The vcpu pointer
268 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_pmu_vcpu_destroy() argument
273 kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i)); in kvm_pmu_vcpu_destroy()
274 irq_work_sync(&vcpu->arch.pmu.overflow_work); in kvm_pmu_vcpu_destroy()
277 static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu) in kvm_pmu_hyp_counter_mask() argument
281 if (!vcpu_has_nv(vcpu)) in kvm_pmu_hyp_counter_mask()
284 hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2)); in kvm_pmu_hyp_counter_mask()
285 n = vcpu->kvm->arch.pmcr_n; in kvm_pmu_hyp_counter_mask()
305 bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) in kvm_pmu_counter_is_hyp() argument
307 return kvm_pmu_hyp_counter_mask(vcpu) & BIT(idx); in kvm_pmu_counter_is_hyp()
310 u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) in kvm_pmu_accessible_counter_mask() argument
312 u64 mask = kvm_pmu_implemented_counter_mask(vcpu); in kvm_pmu_accessible_counter_mask()
314 if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu)) in kvm_pmu_accessible_counter_mask()
317 return mask & ~kvm_pmu_hyp_counter_mask(vcpu); in kvm_pmu_accessible_counter_mask()
320 u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) in kvm_pmu_implemented_counter_mask() argument
322 u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu)); in kvm_pmu_implemented_counter_mask()
332 * @vcpu: The vcpu pointer
337 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_enable_counter_mask() argument
340 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_pmu_enable_counter_mask()
343 if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val) in kvm_pmu_enable_counter_mask()
352 pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_enable_counter_mask()
366 * @vcpu: The vcpu pointer
371 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_disable_counter_mask() argument
375 if (!kvm_vcpu_has_pmu(vcpu) || !val) in kvm_pmu_disable_counter_mask()
384 pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_disable_counter_mask()
396 static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) in kvm_pmu_overflow_status() argument
398 u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); in kvm_pmu_overflow_status()
400 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); in kvm_pmu_overflow_status()
406 if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) in kvm_pmu_overflow_status()
407 reg &= kvm_pmu_hyp_counter_mask(vcpu); in kvm_pmu_overflow_status()
413 if (!(vcpu_read_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HPME)) in kvm_pmu_overflow_status()
414 reg &= ~kvm_pmu_hyp_counter_mask(vcpu); in kvm_pmu_overflow_status()
419 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) in kvm_pmu_update_state() argument
421 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_state()
424 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_pmu_update_state()
427 overflow = kvm_pmu_overflow_status(vcpu); in kvm_pmu_update_state()
433 if (likely(irqchip_in_kernel(vcpu->kvm))) { in kvm_pmu_update_state()
434 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu, in kvm_pmu_update_state()
440 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) in kvm_pmu_should_notify_user() argument
442 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_should_notify_user()
443 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; in kvm_pmu_should_notify_user()
446 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_pmu_should_notify_user()
455 void kvm_pmu_update_run(struct kvm_vcpu *vcpu) in kvm_pmu_update_run() argument
457 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_pmu_update_run()
461 if (vcpu->arch.pmu.irq_level) in kvm_pmu_update_run()
467 * @vcpu: The vcpu pointer
472 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) in kvm_pmu_flush_hwstate() argument
474 kvm_pmu_update_state(vcpu); in kvm_pmu_flush_hwstate()
479 * @vcpu: The vcpu pointer
484 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) in kvm_pmu_sync_hwstate() argument
486 kvm_pmu_update_state(vcpu); in kvm_pmu_sync_hwstate()
490 * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
496 struct kvm_vcpu *vcpu; in kvm_pmu_perf_overflow_notify_vcpu() local
498 vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work); in kvm_pmu_perf_overflow_notify_vcpu()
499 kvm_vcpu_kick(vcpu); in kvm_pmu_perf_overflow_notify_vcpu()
507 static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, in kvm_pmu_counter_increment() argument
512 if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) in kvm_pmu_counter_increment()
516 mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); in kvm_pmu_counter_increment()
519 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_counter_increment()
523 type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i)); in kvm_pmu_counter_increment()
524 type &= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_counter_increment()
529 reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1; in kvm_pmu_counter_increment()
532 __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg; in kvm_pmu_counter_increment()
539 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); in kvm_pmu_counter_increment()
542 kvm_pmu_counter_increment(vcpu, BIT(i + 1), in kvm_pmu_counter_increment()
561 * When the perf event overflows, set the overflow status and inform the vcpu.
569 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_perf_overflow() local
585 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); in kvm_pmu_perf_overflow()
588 kvm_pmu_counter_increment(vcpu, BIT(idx + 1), in kvm_pmu_perf_overflow()
591 if (kvm_pmu_overflow_status(vcpu)) { in kvm_pmu_perf_overflow()
592 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); in kvm_pmu_perf_overflow()
595 kvm_vcpu_kick(vcpu); in kvm_pmu_perf_overflow()
597 irq_work_queue(&vcpu->arch.pmu.overflow_work); in kvm_pmu_perf_overflow()
605 * @vcpu: The vcpu pointer
608 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_software_increment() argument
610 kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR); in kvm_pmu_software_increment()
615 * @vcpu: The vcpu pointer
618 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) in kvm_pmu_handle_pmcr() argument
622 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_pmu_handle_pmcr()
626 if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)) in kvm_pmu_handle_pmcr()
630 __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P); in kvm_pmu_handle_pmcr()
633 kvm_pmu_enable_counter_mask(vcpu, in kvm_pmu_handle_pmcr()
634 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); in kvm_pmu_handle_pmcr()
636 kvm_pmu_disable_counter_mask(vcpu, in kvm_pmu_handle_pmcr()
637 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); in kvm_pmu_handle_pmcr()
641 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); in kvm_pmu_handle_pmcr()
644 unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu); in kvm_pmu_handle_pmcr()
647 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true); in kvm_pmu_handle_pmcr()
649 kvm_vcpu_pmu_restore_guest(vcpu); in kvm_pmu_handle_pmcr()
654 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_counter_is_enabled() local
655 unsigned int mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2); in kvm_pmu_counter_is_enabled()
657 if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx))) in kvm_pmu_counter_is_enabled()
660 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) in kvm_pmu_counter_is_enabled()
663 return kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E; in kvm_pmu_counter_is_enabled()
686 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmc_counts_at_el2() local
687 u64 mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2); in kvm_pmc_counts_at_el2()
689 if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD)) in kvm_pmc_counts_at_el2()
701 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_create_perf_event() local
702 struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu; in kvm_pmu_create_perf_event()
713 eventsel = evtreg & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_create_perf_event()
727 if (vcpu->kvm->arch.pmu_filter && in kvm_pmu_create_perf_event()
728 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) in kvm_pmu_create_perf_event()
745 if (unlikely(is_hyp_ctxt(vcpu))) in kvm_pmu_create_perf_event()
774 * @vcpu: The vcpu pointer
782 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, in kvm_pmu_set_counter_event_type() argument
785 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); in kvm_pmu_set_counter_event_type()
788 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_pmu_set_counter_event_type()
792 __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm); in kvm_pmu_set_counter_event_type()
841 * PMU instance for the core during vcpu init. A dependent use in kvm_pmu_probe_armpmu()
864 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) in kvm_pmu_get_pmceid() argument
866 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; in kvm_pmu_get_pmceid()
870 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_pmu_get_pmceid()
893 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; in kvm_pmu_get_pmceid()
909 void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) in kvm_vcpu_reload_pmu() argument
911 u64 mask = kvm_pmu_implemented_counter_mask(vcpu); in kvm_vcpu_reload_pmu()
913 kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); in kvm_vcpu_reload_pmu()
915 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask; in kvm_vcpu_reload_pmu()
916 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask; in kvm_vcpu_reload_pmu()
917 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask; in kvm_vcpu_reload_pmu()
920 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) in kvm_arm_pmu_v3_enable() argument
922 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_arm_pmu_v3_enable()
925 if (!vcpu->arch.pmu.created) in kvm_arm_pmu_v3_enable()
933 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_enable()
934 int irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_enable()
941 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) in kvm_arm_pmu_v3_enable()
943 } else if (kvm_arm_pmu_irq_initialized(vcpu)) { in kvm_arm_pmu_v3_enable()
948 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); in kvm_arm_pmu_v3_enable()
953 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) in kvm_arm_pmu_v3_init() argument
955 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_init()
963 if (!vgic_initialized(vcpu->kvm)) in kvm_arm_pmu_v3_init()
966 if (!kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_init()
969 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, in kvm_arm_pmu_v3_init()
970 &vcpu->arch.pmu); in kvm_arm_pmu_v3_init()
975 init_irq_work(&vcpu->arch.pmu.overflow_work, in kvm_arm_pmu_v3_init()
978 vcpu->arch.pmu.created = true; in kvm_arm_pmu_v3_init()
983 * For one VM the interrupt type must be same for each vcpu.
985 * while as an SPI it must be a separate number per vcpu.
990 struct kvm_vcpu *vcpu; in pmu_irq_is_valid() local
992 kvm_for_each_vcpu(i, vcpu, kvm) { in pmu_irq_is_valid()
993 if (!kvm_arm_pmu_irq_initialized(vcpu)) in pmu_irq_is_valid()
997 if (vcpu->arch.pmu.irq_num != irq) in pmu_irq_is_valid()
1000 if (vcpu->arch.pmu.irq_num == irq) in pmu_irq_is_valid()
1054 static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) in kvm_arm_pmu_v3_set_pmu() argument
1056 struct kvm *kvm = vcpu->kvm; in kvm_arm_pmu_v3_set_pmu()
1084 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_pmu_v3_set_attr() argument
1086 struct kvm *kvm = vcpu->kvm; in kvm_arm_pmu_v3_set_attr()
1090 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_arm_pmu_v3_set_attr()
1093 if (vcpu->arch.pmu.created) in kvm_arm_pmu_v3_set_attr()
1114 if (kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_set_attr()
1118 vcpu->arch.pmu.irq_num = irq; in kvm_arm_pmu_v3_set_attr()
1178 return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id); in kvm_arm_pmu_v3_set_attr()
1181 return kvm_arm_pmu_v3_init(vcpu); in kvm_arm_pmu_v3_set_attr()
1187 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_pmu_v3_get_attr() argument
1194 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_get_attr()
1197 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_arm_pmu_v3_get_attr()
1200 if (!kvm_arm_pmu_irq_initialized(vcpu)) in kvm_arm_pmu_v3_get_attr()
1203 irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_get_attr()
1211 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_pmu_v3_has_attr() argument
1218 if (kvm_vcpu_has_pmu(vcpu)) in kvm_arm_pmu_v3_has_attr()
1237 * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
1238 * @vcpu: The vcpu pointer
1240 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) in kvm_vcpu_read_pmcr() argument
1242 u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); in kvm_vcpu_read_pmcr()
1244 return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N); in kvm_vcpu_read_pmcr()
1247 void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) in kvm_pmu_nested_transition() argument
1253 if (!kvm_vcpu_has_pmu(vcpu)) in kvm_pmu_nested_transition()
1256 mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); in kvm_pmu_nested_transition()
1258 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_nested_transition()
1273 kvm_vcpu_pmu_restore_guest(vcpu); in kvm_pmu_nested_transition()