Lines Matching refs:kvm
82 if (pic_in_kernel(v->kvm)) in kvm_cpu_has_extint()
83 return v->kvm->arch.vpic->output; in kvm_cpu_has_extint()
86 WARN_ON_ONCE(!irqchip_split(v->kvm)); in kvm_cpu_has_extint()
140 return v->kvm->arch.xen.upcall_vector; in kvm_cpu_get_extint()
144 if (pic_in_kernel(v->kvm)) in kvm_cpu_get_extint()
145 return kvm_pic_read_irq(v->kvm); /* PIC */ in kvm_cpu_get_extint()
148 WARN_ON_ONCE(!irqchip_split(v->kvm)); in kvm_cpu_get_extint()
186 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) in kvm_arch_irqfd_allowed() argument
190 return resample ? irqchip_full(kvm) : irqchip_in_kernel(kvm); in kvm_arch_irqfd_allowed()
193 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) in kvm_arch_irqchip_in_kernel() argument
195 return irqchip_in_kernel(kvm); in kvm_arch_irqchip_in_kernel()
198 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, in kvm_irq_delivery_to_apic() argument
206 if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) in kvm_irq_delivery_to_apic()
217 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_irq_delivery_to_apic()
246 lowest = kvm_get_vcpu(kvm, idx); in kvm_irq_delivery_to_apic()
255 static void kvm_msi_to_lapic_irq(struct kvm *kvm, in kvm_msi_to_lapic_irq() argument
263 trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ? in kvm_msi_to_lapic_irq()
266 irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format); in kvm_msi_to_lapic_irq()
276 static inline bool kvm_msi_route_invalid(struct kvm *kvm, in kvm_msi_route_invalid() argument
279 return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff); in kvm_msi_route_invalid()
283 struct kvm *kvm, int irq_source_id, int level, bool line_status) in kvm_set_msi() argument
287 if (kvm_msi_route_invalid(kvm, e)) in kvm_set_msi()
293 kvm_msi_to_lapic_irq(kvm, e, &irq); in kvm_set_msi()
295 return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); in kvm_set_msi()
299 struct kvm *kvm, int irq_source_id, int level, in kvm_arch_set_irq_inatomic() argument
308 return kvm_hv_synic_set_irq(e, kvm, irq_source_id, level, in kvm_arch_set_irq_inatomic()
313 if (kvm_msi_route_invalid(kvm, e)) in kvm_arch_set_irq_inatomic()
316 kvm_msi_to_lapic_irq(kvm, e, &irq); in kvm_arch_set_irq_inatomic()
318 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) in kvm_arch_set_irq_inatomic()
327 return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm); in kvm_arch_set_irq_inatomic()
336 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, in kvm_vm_ioctl_irq_line() argument
339 if (!irqchip_in_kernel(kvm)) in kvm_vm_ioctl_irq_line()
342 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
348 bool kvm_arch_can_set_irq_routing(struct kvm *kvm) in kvm_arch_can_set_irq_routing() argument
350 return irqchip_in_kernel(kvm); in kvm_arch_can_set_irq_routing()
353 int kvm_set_routing_entry(struct kvm *kvm, in kvm_set_routing_entry() argument
364 if (irqchip_split(kvm)) in kvm_set_routing_entry()
393 if (kvm_msi_route_invalid(kvm, e)) in kvm_set_routing_entry()
405 return kvm_xen_setup_evtchn(kvm, e, ue); in kvm_set_routing_entry()
414 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, in kvm_intr_is_single_vcpu() argument
421 if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu)) in kvm_intr_is_single_vcpu()
424 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_intr_is_single_vcpu()
472 struct kvm *kvm = vcpu->kvm; in kvm_scan_ioapic_routes() local
478 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_scan_ioapic_routes()
479 table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); in kvm_scan_ioapic_routes()
481 kvm->arch.nr_reserved_ioapic_pins); in kvm_scan_ioapic_routes()
489 kvm_msi_to_lapic_irq(vcpu->kvm, entry, &irq); in kvm_scan_ioapic_routes()
498 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_scan_ioapic_routes()
501 void kvm_arch_irq_routing_update(struct kvm *kvm) in kvm_arch_irq_routing_update() argument
504 kvm_hv_irq_routing_update(kvm); in kvm_arch_irq_routing_update()
507 if (irqchip_split(kvm)) in kvm_arch_irq_routing_update()
508 kvm_make_scan_ioapic_request(kvm); in kvm_arch_irq_routing_update()
515 struct kvm *kvm = irqfd->kvm; in kvm_pi_update_irte() local
520 if (WARN_ON_ONCE(!irqchip_in_kernel(kvm) || !kvm_arch_has_irq_bypass())) in kvm_pi_update_irte()
524 kvm_msi_to_lapic_irq(kvm, entry, &irq); in kvm_pi_update_irte()
538 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) || in kvm_pi_update_irte()
546 r = kvm_x86_call(pi_update_irte)(irqfd, irqfd->kvm, host_irq, irqfd->gsi, in kvm_pi_update_irte()
565 struct kvm *kvm = irqfd->kvm; in kvm_arch_irq_bypass_add_producer() local
568 spin_lock_irq(&kvm->irqfds.lock); in kvm_arch_irq_bypass_add_producer()
571 if (!kvm->arch.nr_possible_bypass_irqs++) in kvm_arch_irq_bypass_add_producer()
572 kvm_x86_call(pi_start_bypass)(kvm); in kvm_arch_irq_bypass_add_producer()
577 kvm->arch.nr_possible_bypass_irqs--; in kvm_arch_irq_bypass_add_producer()
579 spin_unlock_irq(&kvm->irqfds.lock); in kvm_arch_irq_bypass_add_producer()
589 struct kvm *kvm = irqfd->kvm; in kvm_arch_irq_bypass_del_producer() local
600 spin_lock_irq(&kvm->irqfds.lock); in kvm_arch_irq_bypass_del_producer()
610 kvm->arch.nr_possible_bypass_irqs--; in kvm_arch_irq_bypass_del_producer()
612 spin_unlock_irq(&kvm->irqfds.lock); in kvm_arch_irq_bypass_del_producer()
658 int kvm_setup_default_ioapic_and_pic_routing(struct kvm *kvm) in kvm_setup_default_ioapic_and_pic_routing() argument
660 return kvm_set_irq_routing(kvm, default_routing, in kvm_setup_default_ioapic_and_pic_routing()
664 int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) in kvm_vm_ioctl_get_irqchip() argument
666 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_get_irqchip()
680 kvm_get_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_get_irqchip()
689 int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) in kvm_vm_ioctl_set_irqchip() argument
691 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_set_irqchip()
709 kvm_set_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_set_irqchip()