Lines Matching +full:wait +full:- +full:pin
1 // SPDX-License-Identifier: GPL-2.0-only
3 * kvm eventfd support - use eventfd objects to signal various KVM events
17 #include <linux/wait.h>
46 struct kvm *kvm = irqfd->kvm; in irqfd_inject()
48 if (!irqfd->resampler) { in irqfd_inject()
49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, in irqfd_inject()
51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, in irqfd_inject()
55 irqfd->gsi, 1, false); in irqfd_inject()
62 list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link, in irqfd_resampler_notify()
63 srcu_read_lock_held(&resampler->kvm->irq_srcu)) in irqfd_resampler_notify()
64 eventfd_signal(irqfd->resamplefd); in irqfd_resampler_notify()
68 * Since resampler irqfds share an IRQ source ID, we de-assert once
70 * do multiple de-asserts or we risk racing with incoming re-asserts.
81 kvm = resampler->kvm; in irqfd_resampler_ack()
84 resampler->notifier.gsi, 0, false); in irqfd_resampler_ack()
86 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_resampler_ack()
88 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_resampler_ack()
94 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler; in irqfd_resampler_shutdown()
95 struct kvm *kvm = resampler->kvm; in irqfd_resampler_shutdown()
97 mutex_lock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
99 list_del_rcu(&irqfd->resampler_link); in irqfd_resampler_shutdown()
101 if (list_empty(&resampler->list)) { in irqfd_resampler_shutdown()
102 list_del_rcu(&resampler->link); in irqfd_resampler_shutdown()
103 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); in irqfd_resampler_shutdown()
105 * synchronize_srcu_expedited(&kvm->irq_srcu) already called in irqfd_resampler_shutdown()
109 resampler->notifier.gsi, 0, false); in irqfd_resampler_shutdown()
112 synchronize_srcu_expedited(&kvm->irq_srcu); in irqfd_resampler_shutdown()
115 mutex_unlock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
119 * Race-free decouple logic (ordering is critical)
126 struct kvm *kvm = irqfd->kvm; in irqfd_shutdown()
130 synchronize_srcu_expedited(&kvm->irq_srcu); in irqfd_shutdown()
133 * Synchronize with the wait-queue and unhook ourselves to prevent in irqfd_shutdown()
136 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); in irqfd_shutdown()
142 flush_work(&irqfd->inject); in irqfd_shutdown()
144 if (irqfd->resampler) { in irqfd_shutdown()
146 eventfd_ctx_put(irqfd->resamplefd); in irqfd_shutdown()
153 irq_bypass_unregister_consumer(&irqfd->consumer); in irqfd_shutdown()
155 eventfd_ctx_put(irqfd->eventfd); in irqfd_shutdown()
160 /* assumes kvm->irqfds.lock is held */
164 return list_empty(&irqfd->list) ? false : true; in irqfd_is_active()
170 * assumes kvm->irqfds.lock is held
177 list_del_init(&irqfd->list); in irqfd_deactivate()
179 queue_work(irqfd_cleanup_wq, &irqfd->shutdown); in irqfd_deactivate()
188 return -EWOULDBLOCK; in kvm_arch_set_irq_inatomic()
192 * Called with wqh->lock held and interrupts disabled
195 irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) in irqfd_wakeup() argument
198 container_of(wait, struct kvm_kernel_irqfd, wait); in irqfd_wakeup()
201 struct kvm *kvm = irqfd->kvm; in irqfd_wakeup()
213 eventfd_ctx_do_read(irqfd->eventfd, &cnt); in irqfd_wakeup()
215 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_wakeup()
217 seq = read_seqcount_begin(&irqfd->irq_entry_sc); in irqfd_wakeup()
218 irq = irqfd->irq_entry; in irqfd_wakeup()
219 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); in irqfd_wakeup()
223 false) == -EWOULDBLOCK) in irqfd_wakeup()
224 schedule_work(&irqfd->inject); in irqfd_wakeup()
225 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_wakeup()
238 spin_lock_irqsave(&kvm->irqfds.lock, iflags); in irqfd_wakeup()
244 * the wait-queue. If it is already deactivated, we can in irqfd_wakeup()
247 * other side is required to acquire wqh->lock, which we hold in irqfd_wakeup()
252 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags); in irqfd_wakeup()
264 lockdep_assert_held(&kvm->irqfds.lock); in irqfd_update()
266 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); in irqfd_update()
268 write_seqcount_begin(&irqfd->irq_entry_sc); in irqfd_update()
272 irqfd->irq_entry = *e; in irqfd_update()
274 irqfd->irq_entry.type = 0; in irqfd_update()
276 write_seqcount_end(&irqfd->irq_entry_sc); in irqfd_update()
290 struct kvm_kernel_irqfd *irqfd = p->irqfd; in kvm_irqfd_register()
291 struct kvm *kvm = p->kvm; in kvm_irqfd_register()
297 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_register()
308 * wake-up handler, so that KVM *and only KVM* is notified whenever the in kvm_irqfd_register()
311 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); in kvm_irqfd_register()
318 * Adding to the wait queue will fail if there is already a priority in kvm_irqfd_register()
320 * VM). Note, kvm_irqfd_deassign() waits for all in-flight shutdown in kvm_irqfd_register()
324 spin_release(&kvm->irqfds.lock.dep_map, _RET_IP_); in kvm_irqfd_register()
325 p->ret = add_wait_queue_priority_exclusive(wqh, &irqfd->wait); in kvm_irqfd_register()
326 spin_acquire(&kvm->irqfds.lock.dep_map, 0, 0, _RET_IP_); in kvm_irqfd_register()
327 if (p->ret) in kvm_irqfd_register()
330 list_add_tail(&irqfd->list, &kvm->irqfds.items); in kvm_irqfd_register()
333 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_register()
366 return -EAGAIN; in kvm_irqfd_assign()
369 return -EINVAL; in kvm_irqfd_assign()
373 return -ENOMEM; in kvm_irqfd_assign()
375 irqfd->kvm = kvm; in kvm_irqfd_assign()
376 irqfd->gsi = args->gsi; in kvm_irqfd_assign()
377 INIT_LIST_HEAD(&irqfd->list); in kvm_irqfd_assign()
378 INIT_WORK(&irqfd->inject, irqfd_inject); in kvm_irqfd_assign()
379 INIT_WORK(&irqfd->shutdown, irqfd_shutdown); in kvm_irqfd_assign()
380 seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock); in kvm_irqfd_assign()
382 CLASS(fd, f)(args->fd); in kvm_irqfd_assign()
384 ret = -EBADF; in kvm_irqfd_assign()
394 irqfd->eventfd = eventfd; in kvm_irqfd_assign()
396 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) { in kvm_irqfd_assign()
399 resamplefd = eventfd_ctx_fdget(args->resamplefd); in kvm_irqfd_assign()
405 irqfd->resamplefd = resamplefd; in kvm_irqfd_assign()
406 INIT_LIST_HEAD(&irqfd->resampler_link); in kvm_irqfd_assign()
408 mutex_lock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
411 &kvm->irqfds.resampler_list, link) { in kvm_irqfd_assign()
412 if (resampler->notifier.gsi == irqfd->gsi) { in kvm_irqfd_assign()
413 irqfd->resampler = resampler; in kvm_irqfd_assign()
418 if (!irqfd->resampler) { in kvm_irqfd_assign()
422 ret = -ENOMEM; in kvm_irqfd_assign()
423 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
427 resampler->kvm = kvm; in kvm_irqfd_assign()
428 INIT_LIST_HEAD(&resampler->list); in kvm_irqfd_assign()
429 resampler->notifier.gsi = irqfd->gsi; in kvm_irqfd_assign()
430 resampler->notifier.irq_acked = irqfd_resampler_ack; in kvm_irqfd_assign()
431 INIT_LIST_HEAD(&resampler->link); in kvm_irqfd_assign()
433 list_add_rcu(&resampler->link, &kvm->irqfds.resampler_list); in kvm_irqfd_assign()
435 &resampler->notifier); in kvm_irqfd_assign()
436 irqfd->resampler = resampler; in kvm_irqfd_assign()
439 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); in kvm_irqfd_assign()
440 synchronize_srcu_expedited(&kvm->irq_srcu); in kvm_irqfd_assign()
442 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
455 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irqfd_assign()
474 schedule_work(&irqfd->inject); in kvm_irqfd_assign()
478 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer; in kvm_irqfd_assign()
479 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer; in kvm_irqfd_assign()
480 irqfd->consumer.stop = kvm_arch_irq_bypass_stop; in kvm_irqfd_assign()
481 irqfd->consumer.start = kvm_arch_irq_bypass_start; in kvm_irqfd_assign()
482 ret = irq_bypass_register_consumer(&irqfd->consumer, irqfd->eventfd); in kvm_irqfd_assign()
485 irqfd->eventfd, ret); in kvm_irqfd_assign()
489 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irqfd_assign()
493 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irqfd_assign()
495 if (irqfd->resampler) in kvm_irqfd_assign()
509 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_irq_has_notifier() argument
514 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irq_has_notifier()
515 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_irq_has_notifier()
516 if (gsi != -1) in kvm_irq_has_notifier()
517 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, in kvm_irq_has_notifier()
518 link, srcu_read_lock_held(&kvm->irq_srcu)) in kvm_irq_has_notifier()
519 if (kian->gsi == gsi) { in kvm_irq_has_notifier()
520 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
524 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
534 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, in kvm_notify_acked_gsi()
535 link, srcu_read_lock_held(&kvm->irq_srcu)) in kvm_notify_acked_gsi()
536 if (kian->gsi == gsi) in kvm_notify_acked_gsi()
537 kian->irq_acked(kian); in kvm_notify_acked_gsi()
540 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_notify_acked_irq() argument
544 trace_kvm_ack_irq(irqchip, pin); in kvm_notify_acked_irq()
546 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_notify_acked_irq()
547 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_notify_acked_irq()
548 if (gsi != -1) in kvm_notify_acked_irq()
550 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_acked_irq()
556 mutex_lock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
557 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); in kvm_register_irq_ack_notifier()
558 mutex_unlock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
565 mutex_lock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
566 hlist_del_init_rcu(&kian->link); in kvm_unregister_irq_ack_notifier()
567 mutex_unlock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
568 synchronize_srcu_expedited(&kvm->irq_srcu); in kvm_unregister_irq_ack_notifier()
581 eventfd = eventfd_ctx_fdget(args->fd); in kvm_irqfd_deassign()
585 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
587 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { in kvm_irqfd_deassign()
588 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { in kvm_irqfd_deassign()
595 write_seqcount_begin(&irqfd->irq_entry_sc); in kvm_irqfd_deassign()
596 irqfd->irq_entry.type = 0; in kvm_irqfd_deassign()
597 write_seqcount_end(&irqfd->irq_entry_sc); in kvm_irqfd_deassign()
602 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
618 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) in kvm_irqfd()
619 return -EINVAL; in kvm_irqfd()
621 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) in kvm_irqfd()
636 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
638 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) in kvm_irqfd_release()
641 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
653 * Caller must invoke synchronize_srcu_expedited(&kvm->irq_srcu) afterwards.
659 spin_lock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
661 list_for_each_entry(irqfd, &kvm->irqfds.items, list) { in kvm_irq_routing_update()
664 struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry; in kvm_irq_routing_update()
670 if (irqfd->producer) in kvm_irq_routing_update()
671 kvm_arch_update_irqfd_routing(irqfd, &old, &irqfd->irq_entry); in kvm_irq_routing_update()
675 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
680 unsigned int pin) in kvm_notify_irqfd_resampler() argument
685 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_notify_irqfd_resampler()
686 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_notify_irqfd_resampler()
687 if (gsi != -1) { in kvm_notify_irqfd_resampler()
689 &kvm->irqfds.resampler_list, link, in kvm_notify_irqfd_resampler()
690 srcu_read_lock_held(&kvm->irq_srcu)) { in kvm_notify_irqfd_resampler()
691 if (resampler->notifier.gsi == gsi) { in kvm_notify_irqfd_resampler()
693 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_irqfd_resampler()
698 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_irqfd_resampler()
704 * create a host-wide workqueue for issuing deferred shutdown requests
710 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0); in kvm_irqfd_init()
712 return -ENOMEM; in kvm_irqfd_init()
724 * --------------------------------------------------------------------
729 * --------------------------------------------------------------------
752 eventfd_ctx_put(p->eventfd); in ioeventfd_release()
753 list_del(&p->list); in ioeventfd_release()
762 if (addr != p->addr) in ioeventfd_in_range()
766 if (!p->length) in ioeventfd_in_range()
770 if (len != p->length) in ioeventfd_in_range()
771 /* address-range must be precise for a hit */ in ioeventfd_in_range()
774 if (p->wildcard) in ioeventfd_in_range()
799 return _val == p->datamatch; in ioeventfd_in_range()
810 return -EOPNOTSUPP; in ioeventfd_write()
812 eventfd_signal(p->eventfd); in ioeventfd_write()
833 /* assumes kvm->slots_lock held */
839 list_for_each_entry(_p, &kvm->ioeventfds, list) in ioeventfd_check_collision()
840 if (_p->bus_idx == p->bus_idx && in ioeventfd_check_collision()
841 _p->addr == p->addr && in ioeventfd_check_collision()
842 (!_p->length || !p->length || in ioeventfd_check_collision()
843 (_p->length == p->length && in ioeventfd_check_collision()
844 (_p->wildcard || p->wildcard || in ioeventfd_check_collision()
845 _p->datamatch == p->datamatch)))) in ioeventfd_check_collision()
869 eventfd = eventfd_ctx_fdget(args->fd); in kvm_assign_ioeventfd_idx()
875 ret = -ENOMEM; in kvm_assign_ioeventfd_idx()
879 INIT_LIST_HEAD(&p->list); in kvm_assign_ioeventfd_idx()
880 p->addr = args->addr; in kvm_assign_ioeventfd_idx()
881 p->bus_idx = bus_idx; in kvm_assign_ioeventfd_idx()
882 p->length = args->len; in kvm_assign_ioeventfd_idx()
883 p->eventfd = eventfd; in kvm_assign_ioeventfd_idx()
886 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH) in kvm_assign_ioeventfd_idx()
887 p->datamatch = args->datamatch; in kvm_assign_ioeventfd_idx()
889 p->wildcard = true; in kvm_assign_ioeventfd_idx()
891 mutex_lock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
895 ret = -EEXIST; in kvm_assign_ioeventfd_idx()
899 kvm_iodevice_init(&p->dev, &ioeventfd_ops); in kvm_assign_ioeventfd_idx()
901 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, in kvm_assign_ioeventfd_idx()
902 &p->dev); in kvm_assign_ioeventfd_idx()
906 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++; in kvm_assign_ioeventfd_idx()
907 list_add_tail(&p->list, &kvm->ioeventfds); in kvm_assign_ioeventfd_idx()
909 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
914 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
930 int ret = -ENOENT; in kvm_deassign_ioeventfd_idx()
933 eventfd = eventfd_ctx_fdget(args->fd); in kvm_deassign_ioeventfd_idx()
937 wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); in kvm_deassign_ioeventfd_idx()
939 mutex_lock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
941 list_for_each_entry(p, &kvm->ioeventfds, list) { in kvm_deassign_ioeventfd_idx()
942 if (p->bus_idx != bus_idx || in kvm_deassign_ioeventfd_idx()
943 p->eventfd != eventfd || in kvm_deassign_ioeventfd_idx()
944 p->addr != args->addr || in kvm_deassign_ioeventfd_idx()
945 p->length != args->len || in kvm_deassign_ioeventfd_idx()
946 p->wildcard != wildcard) in kvm_deassign_ioeventfd_idx()
949 if (!p->wildcard && p->datamatch != args->datamatch) in kvm_deassign_ioeventfd_idx()
952 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); in kvm_deassign_ioeventfd_idx()
955 bus->ioeventfd_count--; in kvm_deassign_ioeventfd_idx()
960 mutex_unlock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
969 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags); in kvm_deassign_ioeventfd()
972 if (!args->len && bus_idx == KVM_MMIO_BUS) in kvm_deassign_ioeventfd()
984 bus_idx = ioeventfd_bus_from_flags(args->flags); in kvm_assign_ioeventfd()
985 /* must be natural-word sized, or 0 to ignore length */ in kvm_assign_ioeventfd()
986 switch (args->len) { in kvm_assign_ioeventfd()
994 return -EINVAL; in kvm_assign_ioeventfd()
998 if (args->addr + args->len < args->addr) in kvm_assign_ioeventfd()
999 return -EINVAL; in kvm_assign_ioeventfd()
1002 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) in kvm_assign_ioeventfd()
1003 return -EINVAL; in kvm_assign_ioeventfd()
1006 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)) in kvm_assign_ioeventfd()
1007 return -EINVAL; in kvm_assign_ioeventfd()
1016 if (!args->len && bus_idx == KVM_MMIO_BUS) { in kvm_assign_ioeventfd()
1033 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN) in kvm_ioeventfd()
1043 spin_lock_init(&kvm->irqfds.lock); in kvm_eventfd_init()
1044 INIT_LIST_HEAD(&kvm->irqfds.items); in kvm_eventfd_init()
1045 INIT_LIST_HEAD(&kvm->irqfds.resampler_list); in kvm_eventfd_init()
1046 mutex_init(&kvm->irqfds.resampler_lock); in kvm_eventfd_init()
1048 INIT_LIST_HEAD(&kvm->ioeventfds); in kvm_eventfd_init()