Lines Matching +full:ptimer +full:- +full:handle

1 // SPDX-License-Identifier: GPL-2.0-only
69 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_get_ctl()
88 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_get_cval()
107 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_set_ctl()
129 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_set_cval()
151 if (!ctxt->offset.vm_offset) { in timer_set_offset()
156 WRITE_ONCE(*ctxt->offset.vm_offset, offset); in timer_set_offset()
161 return timecounter->cc->read(timecounter->cc); in kvm_phys_timer_read()
168 map->direct_vtimer = vcpu_hvtimer(vcpu); in get_timer_map()
169 map->direct_ptimer = vcpu_hptimer(vcpu); in get_timer_map()
170 map->emul_vtimer = vcpu_vtimer(vcpu); in get_timer_map()
171 map->emul_ptimer = vcpu_ptimer(vcpu); in get_timer_map()
173 map->direct_vtimer = vcpu_vtimer(vcpu); in get_timer_map()
174 map->direct_ptimer = vcpu_ptimer(vcpu); in get_timer_map()
175 map->emul_vtimer = vcpu_hvtimer(vcpu); in get_timer_map()
176 map->emul_ptimer = vcpu_hptimer(vcpu); in get_timer_map()
179 map->direct_vtimer = vcpu_vtimer(vcpu); in get_timer_map()
180 map->direct_ptimer = vcpu_ptimer(vcpu); in get_timer_map()
181 map->emul_vtimer = NULL; in get_timer_map()
182 map->emul_ptimer = NULL; in get_timer_map()
184 map->direct_vtimer = vcpu_vtimer(vcpu); in get_timer_map()
185 map->direct_ptimer = NULL; in get_timer_map()
186 map->emul_vtimer = NULL; in get_timer_map()
187 map->emul_ptimer = vcpu_ptimer(vcpu); in get_timer_map()
190 trace_kvm_get_timer_map(vcpu->vcpu_id, map); in get_timer_map()
234 if (userspace_irqchip(vcpu->kvm) && in kvm_arch_timer_handler()
244 u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx); in kvm_counter_compute_delta()
249 ns = cyclecounter_cyc2ns(timecounter->cc, in kvm_counter_compute_delta()
250 val - now, in kvm_counter_compute_delta()
251 timecounter->mask, in kvm_counter_compute_delta()
252 &timer_ctx->ns_frac); in kvm_counter_compute_delta()
266 WARN_ON(timer_ctx && timer_ctx->loaded); in kvm_timer_irq_can_fire()
298 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i]; in kvm_timer_earliest_exp()
300 WARN(ctx->loaded, "timer %d loaded\n", i); in kvm_timer_earliest_exp()
346 vcpu = ctx->vcpu; in kvm_hrtimer_expire()
375 if (timer_ctx->loaded) { in kvm_timer_should_fire()
402 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx); in kvm_timer_should_fire()
418 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); in kvm_timer_update_run() local
419 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_timer_update_run()
422 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER | in kvm_timer_update_run()
425 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER; in kvm_timer_update_run()
426 if (kvm_timer_should_fire(ptimer)) in kvm_timer_update_run()
427 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER; in kvm_timer_update_run()
439 if (is_hyp_ctxt(ctx->vcpu) && in kvm_timer_update_status()
440 (ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) { in kvm_timer_update_status()
452 timer_ctx->irq.level = new_level; in kvm_timer_update_irq()
453 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx), in kvm_timer_update_irq()
454 timer_ctx->irq.level); in kvm_timer_update_irq()
456 if (userspace_irqchip(vcpu->kvm)) in kvm_timer_update_irq()
459 kvm_vgic_inject_irq(vcpu->kvm, vcpu, in kvm_timer_update_irq()
461 timer_ctx->irq.level, in kvm_timer_update_irq()
472 if (should_fire != ctx->irq.level) in timer_emulate()
473 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx); in timer_emulate()
485 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx)); in timer_emulate()
501 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); in timer_save_state()
505 if (!timer->enabled) in timer_save_state()
510 if (!ctx->loaded) in timer_save_state()
522 cval -= timer_get_offset(ctx); in timer_save_state()
535 * physical counter of non-VHE case. in timer_save_state()
552 cval -= timer_get_offset(ctx); in timer_save_state()
568 ctx->loaded = false; in timer_save_state()
576 * interrupt to handle.
600 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); in kvm_timer_blocking()
607 soft_timer_cancel(&timer->bg_timer); in kvm_timer_unblocking()
612 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); in timer_restore_state()
616 if (!timer->enabled) in timer_restore_state()
621 if (ctx->loaded) in timer_restore_state()
657 ctx->loaded = true; in timer_restore_state()
665 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active); in set_timer_irq_phys_active()
671 struct kvm_vcpu *vcpu = ctx->vcpu; in kvm_timer_vcpu_load_gic()
680 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx); in kvm_timer_vcpu_load_gic()
682 if (irqchip_in_kernel(vcpu->kvm)) in kvm_timer_vcpu_load_gic()
685 phys_active |= ctx->irq.level; in kvm_timer_vcpu_load_gic()
709 * being de-asserted, we unmask the interrupt again so that we exit in kvm_timer_vcpu_load_nogic()
712 if (vtimer->irq.level) in kvm_timer_vcpu_load_nogic()
732 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_timer_vcpu_load_nested_switch()
745 hw = kvm_vgic_get_map(vcpu, timer_irq(map->direct_vtimer)); in kvm_timer_vcpu_load_nested_switch()
747 kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_vtimer)); in kvm_timer_vcpu_load_nested_switch()
748 kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_ptimer)); in kvm_timer_vcpu_load_nested_switch()
751 map->direct_vtimer->host_timer_irq, in kvm_timer_vcpu_load_nested_switch()
752 timer_irq(map->direct_vtimer), in kvm_timer_vcpu_load_nested_switch()
756 map->direct_ptimer->host_timer_irq, in kvm_timer_vcpu_load_nested_switch()
757 timer_irq(map->direct_ptimer), in kvm_timer_vcpu_load_nested_switch()
793 * None of the trapping is required when running in non-HYP context, in timer_set_traps()
807 * - Either we have CNTPOFF (yay!) or the offset is 0: in timer_set_traps()
810 * - or neither of these condition apply: in timer_set_traps()
814 if (!has_cntpoff() && timer_get_offset(map->direct_ptimer)) in timer_set_traps()
821 if (has_broken_cntvoff() && timer_get_offset(map->direct_vtimer)) in timer_set_traps()
870 if (unlikely(!timer->enabled)) in kvm_timer_vcpu_load()
902 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); in kvm_timer_should_notify_user() local
903 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; in kvm_timer_should_notify_user()
906 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_timer_should_notify_user()
909 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER; in kvm_timer_should_notify_user()
910 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER; in kvm_timer_should_notify_user()
913 kvm_timer_should_fire(ptimer) != plevel; in kvm_timer_should_notify_user()
921 if (unlikely(!timer->enabled)) in kvm_timer_vcpu_put()
936 * In any case, we re-schedule the hrtimer for the physical timer when in kvm_timer_vcpu_put()
940 soft_timer_cancel(&map.emul_vtimer->hrtimer); in kvm_timer_vcpu_put()
942 soft_timer_cancel(&map.emul_ptimer->hrtimer); in kvm_timer_vcpu_put()
968 * A non-VHE guest hypervisor doesn't have any direct access in kvm_timer_sync_nested()
979 soft_timer_cancel(&map.emul_vtimer->hrtimer); in kvm_timer_sync_nested()
980 soft_timer_cancel(&map.emul_ptimer->hrtimer); in kvm_timer_sync_nested()
987 * With a userspace irqchip we have to check if the guest de-asserted the
1008 if (unlikely(!timer->enabled)) in kvm_timer_sync_user()
1011 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_timer_sync_user()
1037 struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset; in kvm_timer_vcpu_reset()
1039 offs->vcpu_offset = __ctxt_sys_reg(&vcpu->arch.ctxt, CNTVOFF_EL2); in kvm_timer_vcpu_reset()
1040 offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset; in kvm_timer_vcpu_reset()
1043 if (timer->enabled) { in kvm_timer_vcpu_reset()
1048 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_timer_vcpu_reset()
1056 soft_timer_cancel(&map.emul_vtimer->hrtimer); in kvm_timer_vcpu_reset()
1058 soft_timer_cancel(&map.emul_ptimer->hrtimer); in kvm_timer_vcpu_reset()
1064 struct kvm *kvm = vcpu->kvm; in timer_context_init()
1066 ctxt->vcpu = vcpu; in timer_context_init()
1069 ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset; in timer_context_init()
1071 ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset; in timer_context_init()
1073 hrtimer_setup(&ctxt->hrtimer, kvm_hrtimer_expire, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); in timer_context_init()
1078 ctxt->host_timer_irq = host_ptimer_irq; in timer_context_init()
1082 ctxt->host_timer_irq = host_vtimer_irq; in timer_context_init()
1095 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) { in kvm_timer_vcpu_init()
1100 hrtimer_setup(&timer->bg_timer, kvm_bg_timer_expire, CLOCK_MONOTONIC, in kvm_timer_vcpu_init()
1107 kvm->arch.timer_data.ppi[i] = default_ppi[i]; in kvm_timer_init_vm()
1135 &vcpu->kvm->arch.flags)) { in kvm_arm_timer_set_reg()
1137 timer_set_offset(timer, kvm_phys_timer_read() - value); in kvm_arm_timer_set_reg()
1150 &vcpu->kvm->arch.flags)) { in kvm_arm_timer_set_reg()
1152 timer_set_offset(timer, kvm_phys_timer_read() - value); in kvm_arm_timer_set_reg()
1161 return -1; in kvm_arm_timer_set_reg()
1205 return (u64)-1; in kvm_arm_timer_get_reg()
1216 val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer); in kvm_arm_timer_read()
1229 val = kvm_phys_timer_read() - timer_get_offset(timer); in kvm_arm_timer_read()
1233 val = *timer->offset.vcpu_offset; in kvm_arm_timer_read()
1275 timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val); in kvm_arm_timer_write()
1287 *timer->offset.vcpu_offset = val; in kvm_arm_timer_write()
1306 soft_timer_cancel(&timer->hrtimer); in kvm_arm_timer_write_sysreg()
1350 d = d->parent_data; in timer_irq_ack()
1351 if (d->chip->irq_ack) in timer_irq_ack()
1352 d->chip->irq_ack(d); in timer_irq_ack()
1399 if (info->virtual_irq <= 0) { in kvm_irq_init()
1401 info->virtual_irq); in kvm_irq_init()
1402 return -ENODEV; in kvm_irq_init()
1405 host_vtimer_irq = info->virtual_irq; in kvm_irq_init()
1412 fwnode = irq_domain_alloc_named_fwnode("kvm-timer"); in kvm_irq_init()
1414 return -ENOMEM; in kvm_irq_init()
1416 /* Assume both vtimer and ptimer in the same parent */ in kvm_irq_init()
1418 domain = irq_domain_create_hierarchy(data->domain, 0, in kvm_irq_init()
1423 return -ENOMEM; in kvm_irq_init()
1431 if (info->physical_irq > 0) { in kvm_irq_init()
1432 host_ptimer_irq = info->physical_irq; in kvm_irq_init()
1480 timecounter = &info->timecounter; in kvm_timer_hyp_init()
1482 if (!timecounter->cc) { in kvm_timer_hyp_init()
1484 return -ENODEV; in kvm_timer_hyp_init()
1516 if (info->physical_irq > 0) { in kvm_timer_hyp_init()
1518 "kvm guest ptimer", kvm_get_running_vcpus()); in kvm_timer_hyp_init()
1520 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n", in kvm_timer_hyp_init()
1537 info->physical_irq); in kvm_timer_hyp_init()
1538 err = -ENODEV; in kvm_timer_hyp_init()
1546 if (info->physical_irq > 0) in kvm_timer_hyp_init()
1557 soft_timer_cancel(&timer->bg_timer); in kvm_timer_vcpu_terminate()
1565 mutex_lock(&vcpu->kvm->arch.config_lock); in timer_irqs_are_valid()
1586 set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE, &vcpu->kvm->arch.flags); in timer_irqs_are_valid()
1588 mutex_unlock(&vcpu->kvm->arch.config_lock); in timer_irqs_are_valid()
1620 if (timer->enabled) in kvm_timer_enable()
1624 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_timer_enable()
1633 return -EINVAL; in kvm_timer_enable()
1639 map.direct_vtimer->host_timer_irq, in kvm_timer_enable()
1647 map.direct_ptimer->host_timer_irq, in kvm_timer_enable()
1656 timer->enabled = 1; in kvm_timer_enable()
1669 int __user *uaddr = (int __user *)(long)attr->addr; in kvm_arm_timer_set_attr()
1672 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_timer_set_attr()
1673 return -EINVAL; in kvm_arm_timer_set_attr()
1676 return -EFAULT; in kvm_arm_timer_set_attr()
1679 return -EINVAL; in kvm_arm_timer_set_attr()
1681 mutex_lock(&vcpu->kvm->arch.config_lock); in kvm_arm_timer_set_attr()
1684 &vcpu->kvm->arch.flags)) { in kvm_arm_timer_set_attr()
1685 ret = -EBUSY; in kvm_arm_timer_set_attr()
1689 switch (attr->attr) { in kvm_arm_timer_set_attr()
1703 ret = -ENXIO; in kvm_arm_timer_set_attr()
1712 vcpu->kvm->arch.timer_data.ppi[idx] = irq; in kvm_arm_timer_set_attr()
1715 mutex_unlock(&vcpu->kvm->arch.config_lock); in kvm_arm_timer_set_attr()
1721 int __user *uaddr = (int __user *)(long)attr->addr; in kvm_arm_timer_get_attr()
1725 switch (attr->attr) { in kvm_arm_timer_get_attr()
1739 return -ENXIO; in kvm_arm_timer_get_attr()
1748 switch (attr->attr) { in kvm_arm_timer_has_attr()
1756 return -ENXIO; in kvm_arm_timer_has_attr()
1764 if (offset->reserved) in kvm_vm_ioctl_set_counter_offset()
1765 return -EINVAL; in kvm_vm_ioctl_set_counter_offset()
1767 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_counter_offset()
1770 set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags); in kvm_vm_ioctl_set_counter_offset()
1778 kvm->arch.timer_data.voffset = offset->counter_offset; in kvm_vm_ioctl_set_counter_offset()
1779 kvm->arch.timer_data.poffset = offset->counter_offset; in kvm_vm_ioctl_set_counter_offset()
1783 ret = -EBUSY; in kvm_vm_ioctl_set_counter_offset()
1786 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_counter_offset()