Lines Matching full:pit

54 static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)  in pit_set_gate()  argument
56 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; in pit_set_gate()
77 static int pit_get_gate(struct kvm_pit *pit, int channel) in pit_get_gate() argument
79 return pit->pit_state.channels[channel].gate; in pit_get_gate()
82 static s64 __kpit_elapsed(struct kvm_pit *pit) in __kpit_elapsed() argument
86 struct kvm_kpit_state *ps = &pit->pit_state; in __kpit_elapsed()
106 static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c, in kpit_elapsed() argument
110 return __kpit_elapsed(pit); in kpit_elapsed()
115 static int pit_get_count(struct kvm_pit *pit, int channel) in pit_get_count() argument
117 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; in pit_get_count()
121 t = kpit_elapsed(pit, c, channel); in pit_get_count()
142 static int pit_get_out(struct kvm_pit *pit, int channel) in pit_get_out() argument
144 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; in pit_get_out()
148 t = kpit_elapsed(pit, c, channel); in pit_get_out()
174 static void pit_latch_count(struct kvm_pit *pit, int channel) in pit_latch_count() argument
176 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; in pit_latch_count()
179 c->latched_count = pit_get_count(pit, channel); in pit_latch_count()
184 static void pit_latch_status(struct kvm_pit *pit, int channel) in pit_latch_status() argument
186 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; in pit_latch_status()
190 c->status = ((pit_get_out(pit, channel) << 7) | in pit_latch_status()
207 struct kvm_pit *pit = pit_state_to_pit(ps); in kvm_pit_ack_irq() local
215 kthread_queue_work(pit->worker, &pit->expired); in kvm_pit_ack_irq()
220 struct kvm_pit *pit = vcpu->kvm->arch.vpit; in __kvm_migrate_pit_timer() local
223 /* Somewhat arbitrarily make vcpu0 the owner of the PIT. */ in __kvm_migrate_pit_timer()
224 if (vcpu->vcpu_id || !pit) in __kvm_migrate_pit_timer()
227 timer = &pit->pit_state.timer; in __kvm_migrate_pit_timer()
228 mutex_lock(&pit->pit_state.lock); in __kvm_migrate_pit_timer()
231 mutex_unlock(&pit->pit_state.lock); in __kvm_migrate_pit_timer()
234 static void destroy_pit_timer(struct kvm_pit *pit) in destroy_pit_timer() argument
236 hrtimer_cancel(&pit->pit_state.timer); in destroy_pit_timer()
237 kthread_flush_work(&pit->expired); in destroy_pit_timer()
242 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired); in pit_do_work() local
243 struct kvm *kvm = pit->kvm; in pit_do_work()
246 struct kvm_kpit_state *ps = &pit->pit_state; in pit_do_work()
256 * The route is: PIT -> LVT0 in NMI mode. in pit_do_work()
259 * the MP specification. We propagate a PIT interrupt to all in pit_do_work()
285 static inline void kvm_pit_reset_reinject(struct kvm_pit *pit) in kvm_pit_reset_reinject() argument
287 atomic_set(&pit->pit_state.pending, 0); in kvm_pit_reset_reinject()
288 atomic_set(&pit->pit_state.irq_ack, 1); in kvm_pit_reset_reinject()
291 static void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject) in kvm_pit_set_reinject() argument
293 struct kvm_kpit_state *ps = &pit->pit_state; in kvm_pit_set_reinject()
294 struct kvm *kvm = pit->kvm; in kvm_pit_set_reinject()
301 * This cause in-kernel PIT re-inject mode to fail in kvm_pit_set_reinject()
305 * So, deactivate APICv when PIT is in reinject mode. in kvm_pit_set_reinject()
310 kvm_pit_reset_reinject(pit); in kvm_pit_set_reinject()
312 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); in kvm_pit_set_reinject()
316 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); in kvm_pit_set_reinject()
322 static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period) in create_pit_timer() argument
324 struct kvm_kpit_state *ps = &pit->pit_state; in create_pit_timer()
325 struct kvm *kvm = pit->kvm; in create_pit_timer()
334 pr_debug("create pit timer, interval is %llu nsec\n", interval); in create_pit_timer()
338 kthread_flush_work(&pit->expired); in create_pit_timer()
342 kvm_pit_reset_reinject(pit); in create_pit_timer()
365 static void pit_load_count(struct kvm_pit *pit, int channel, u32 val) in pit_load_count() argument
367 struct kvm_kpit_state *ps = &pit->pit_state; in pit_load_count()
392 create_pit_timer(pit, val, 0); in pit_load_count()
396 create_pit_timer(pit, val, 1); in pit_load_count()
399 destroy_pit_timer(pit); in pit_load_count()
403 static void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val, in kvm_pit_load_count() argument
408 WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock)); in kvm_pit_load_count()
413 saved_mode = pit->pit_state.channels[0].mode; in kvm_pit_load_count()
414 pit->pit_state.channels[0].mode = 0xff; /* disable timer */ in kvm_pit_load_count()
415 pit_load_count(pit, channel, val); in kvm_pit_load_count()
416 pit->pit_state.channels[0].mode = saved_mode; in kvm_pit_load_count()
418 pit_load_count(pit, channel, val); in kvm_pit_load_count()
442 struct kvm_pit *pit = dev_to_pit(this); in pit_ioport_write() local
443 struct kvm_kpit_state *pit_state = &pit->pit_state; in pit_ioport_write()
466 pit_latch_count(pit, channel); in pit_ioport_write()
468 pit_latch_status(pit, channel); in pit_ioport_write()
476 pit_latch_count(pit, channel); in pit_ioport_write()
493 pit_load_count(pit, addr, val); in pit_ioport_write()
496 pit_load_count(pit, addr, val << 8); in pit_ioport_write()
503 pit_load_count(pit, addr, s->write_latch | (val << 8)); in pit_ioport_write()
517 struct kvm_pit *pit = dev_to_pit(this); in pit_ioport_read() local
518 struct kvm_kpit_state *pit_state = &pit->pit_state; in pit_ioport_read()
555 count = pit_get_count(pit, addr); in pit_ioport_read()
559 count = pit_get_count(pit, addr); in pit_ioport_read()
563 count = pit_get_count(pit, addr); in pit_ioport_read()
568 count = pit_get_count(pit, addr); in pit_ioport_read()
587 struct kvm_pit *pit = speaker_to_pit(this); in speaker_ioport_write() local
588 struct kvm_kpit_state *pit_state = &pit->pit_state; in speaker_ioport_write()
598 pit_set_gate(pit, 2, val & 1); in speaker_ioport_write()
607 struct kvm_pit *pit = speaker_to_pit(this); in speaker_ioport_read() local
608 struct kvm_kpit_state *pit_state = &pit->pit_state; in speaker_ioport_read()
619 pit_get_gate(pit, 2) | (pit_get_out(pit, 2) << 5) | in speaker_ioport_read()
628 static void kvm_pit_reset(struct kvm_pit *pit) in kvm_pit_reset() argument
633 pit->pit_state.flags = 0; in kvm_pit_reset()
635 c = &pit->pit_state.channels[i]; in kvm_pit_reset()
638 pit_load_count(pit, i, 0); in kvm_pit_reset()
641 kvm_pit_reset_reinject(pit); in kvm_pit_reset()
646 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); in pit_mask_notifier() local
649 kvm_pit_reset_reinject(pit); in pit_mask_notifier()
667 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit() local
669 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
670 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); in kvm_vm_ioctl_set_pit()
672 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); in kvm_vm_ioctl_set_pit()
673 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
693 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit2() local
695 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
696 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
700 memcpy(&pit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
701 sizeof(pit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
702 pit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
704 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, in kvm_vm_ioctl_set_pit2()
706 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
712 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_reinject() local
714 /* pit->pit_state.lock was overloaded to prevent userspace from getting in kvm_vm_ioctl_reinject()
718 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
719 kvm_pit_set_reinject(pit, control->pit_reinject); in kvm_vm_ioctl_reinject()
720 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
737 struct kvm_pit *pit; in kvm_create_pit() local
743 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL_ACCOUNT); in kvm_create_pit()
744 if (!pit) in kvm_create_pit()
747 mutex_init(&pit->pit_state.lock); in kvm_create_pit()
753 pit->worker = kthread_run_worker(0, "kvm-pit/%d", pid_nr); in kvm_create_pit()
754 if (IS_ERR(pit->worker)) in kvm_create_pit()
757 kthread_init_work(&pit->expired, pit_do_work); in kvm_create_pit()
759 pit->kvm = kvm; in kvm_create_pit()
761 pit_state = &pit->pit_state; in kvm_create_pit()
766 pit->mask_notifier.func = pit_mask_notifier; in kvm_create_pit()
768 kvm_pit_reset(pit); in kvm_create_pit()
770 kvm_pit_set_reinject(pit, true); in kvm_create_pit()
773 kvm_iodevice_init(&pit->dev, &pit_dev_ops); in kvm_create_pit()
775 KVM_PIT_MEM_LENGTH, &pit->dev); in kvm_create_pit()
780 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); in kvm_create_pit()
783 &pit->speaker_dev); in kvm_create_pit()
789 return pit; in kvm_create_pit()
792 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); in kvm_create_pit()
795 kvm_pit_set_reinject(pit, false); in kvm_create_pit()
796 kthread_destroy_worker(pit->worker); in kvm_create_pit()
798 kfree(pit); in kvm_create_pit()
804 struct kvm_pit *pit = kvm->arch.vpit; in kvm_free_pit() local
806 if (pit) { in kvm_free_pit()
808 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); in kvm_free_pit()
809 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev); in kvm_free_pit()
811 kvm_pit_set_reinject(pit, false); in kvm_free_pit()
812 hrtimer_cancel(&pit->pit_state.timer); in kvm_free_pit()
813 kthread_destroy_worker(pit->worker); in kvm_free_pit()
814 kfree(pit); in kvm_free_pit()