Lines Matching +full:tsc +full:- +full:irq

1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM Microsoft Hyper-V emulation
17 * Ben-Ami Yassour <benami@il.ibm.com>
42 #include "irq.h"
48 * As per Hyper-V TLFS, extended hypercalls start from 0x8001
53 * 0x8001 - First extended hypercall, HvExtCallQueryCapabilities, no bit
56 * 0x8002 - Bit 0
57 * 0x8003 - Bit 1
59 * 0x8041 - Bit 63
70 return atomic64_read(&synic->sint[sint]); in synic_read_sint()
76 return -1; in synic_get_sint_vector()
85 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_has_vector_connected()
98 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_has_vector_auto_eoi()
111 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in synic_update_vector()
118 __set_bit(vector, synic->vec_bitmap); in synic_update_vector()
120 __clear_bit(vector, synic->vec_bitmap); in synic_update_vector()
122 auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256); in synic_update_vector()
125 __set_bit(vector, synic->auto_eoi_bitmap); in synic_update_vector()
127 __clear_bit(vector, synic->auto_eoi_bitmap); in synic_update_vector()
129 auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256); in synic_update_vector()
137 down_write(&vcpu->kvm->arch.apicv_update_lock); in synic_update_vector()
140 hv->synic_auto_eoi_used++; in synic_update_vector()
142 hv->synic_auto_eoi_used--; in synic_update_vector()
148 __kvm_set_or_clear_apicv_inhibit(vcpu->kvm, in synic_update_vector()
150 !!hv->synic_auto_eoi_used); in synic_update_vector()
152 up_write(&vcpu->kvm->arch.apicv_update_lock); in synic_update_vector()
165 * Valid vectors are 16-255, however, nested Hyper-V attempts to write in synic_set_sint()
167 * allow zero-initing the register from host as well. in synic_set_sint()
174 * bitmap of vectors with auto-eoi behavior. The bitmaps are in synic_set_sint()
179 atomic64_set(&synic->sint[sint], data); in synic_set_sint()
216 return (synic->active) ? synic : NULL; in synic_get()
221 struct kvm *kvm = vcpu->kvm; in kvm_hv_notify_acked_sint()
227 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); in kvm_hv_notify_acked_sint()
229 /* Try to deliver pending Hyper-V SynIC timers messages */ in kvm_hv_notify_acked_sint()
230 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { in kvm_hv_notify_acked_sint()
231 stimer = &hv_vcpu->stimer[idx]; in kvm_hv_notify_acked_sint()
232 if (stimer->msg_pending && stimer->config.enable && in kvm_hv_notify_acked_sint()
233 !stimer->config.direct_mode && in kvm_hv_notify_acked_sint()
234 stimer->config.sintx == sint) in kvm_hv_notify_acked_sint()
238 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_hv_notify_acked_sint()
239 gsi = atomic_read(&synic->sint_to_gsi[sint]); in kvm_hv_notify_acked_sint()
240 if (gsi != -1) in kvm_hv_notify_acked_sint()
242 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_hv_notify_acked_sint()
250 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; in synic_exit()
251 hv_vcpu->exit.u.synic.msr = msr; in synic_exit()
252 hv_vcpu->exit.u.synic.control = synic->control; in synic_exit()
253 hv_vcpu->exit.u.synic.evt_page = synic->evt_page; in synic_exit()
254 hv_vcpu->exit.u.synic.msg_page = synic->msg_page; in synic_exit()
265 if (!synic->active && (!host || data)) in synic_set_msr()
268 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); in synic_set_msr()
273 synic->control = data; in synic_set_msr()
282 synic->version = data; in synic_set_msr()
286 !synic->dont_zero_synic_pages) in synic_set_msr()
287 if (kvm_clear_guest(vcpu->kvm, in synic_set_msr()
292 synic->evt_page = data; in synic_set_msr()
298 !synic->dont_zero_synic_pages) in synic_set_msr()
299 if (kvm_clear_guest(vcpu->kvm, in synic_set_msr()
304 synic->msg_page = data; in synic_set_msr()
311 if (!synic->active) in synic_set_msr()
314 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) in synic_set_msr()
319 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); in synic_set_msr()
332 return hv_vcpu->cpuid_cache.syndbg_cap_eax & in kvm_hv_is_syndbg_enabled()
338 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_syndbg_complete_userspace()
340 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) in kvm_hv_syndbg_complete_userspace()
341 hv->hv_syndbg.control.status = in kvm_hv_syndbg_complete_userspace()
342 vcpu->run->hyperv.u.syndbg.status; in kvm_hv_syndbg_complete_userspace()
351 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; in syndbg_exit()
352 hv_vcpu->exit.u.syndbg.msr = msr; in syndbg_exit()
353 hv_vcpu->exit.u.syndbg.control = syndbg->control.control; in syndbg_exit()
354 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; in syndbg_exit()
355 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; in syndbg_exit()
356 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; in syndbg_exit()
357 vcpu->arch.complete_userspace_io = in syndbg_exit()
370 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, in syndbg_set_msr()
371 to_hv_vcpu(vcpu)->vp_index, msr, data); in syndbg_set_msr()
374 syndbg->control.control = data; in syndbg_set_msr()
379 syndbg->control.status = data; in syndbg_set_msr()
382 syndbg->control.send_page = data; in syndbg_set_msr()
385 syndbg->control.recv_page = data; in syndbg_set_msr()
388 syndbg->control.pending_page = data; in syndbg_set_msr()
393 syndbg->options = data; in syndbg_set_msr()
411 *pdata = syndbg->control.control; in syndbg_get_msr()
414 *pdata = syndbg->control.status; in syndbg_get_msr()
417 *pdata = syndbg->control.send_page; in syndbg_get_msr()
420 *pdata = syndbg->control.recv_page; in syndbg_get_msr()
423 *pdata = syndbg->control.pending_page; in syndbg_get_msr()
426 *pdata = syndbg->options; in syndbg_get_msr()
432 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata); in syndbg_get_msr()
442 if (!synic->active && !host) in synic_get_msr()
448 *pdata = synic->control; in synic_get_msr()
451 *pdata = synic->version; in synic_get_msr()
454 *pdata = synic->evt_page; in synic_get_msr()
457 *pdata = synic->msg_page; in synic_get_msr()
463 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); in synic_get_msr()
475 struct kvm_lapic_irq irq; in synic_set_irq() local
478 if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm)) in synic_set_irq()
479 return -EINVAL; in synic_set_irq()
481 if (sint >= ARRAY_SIZE(synic->sint)) in synic_set_irq()
482 return -EINVAL; in synic_set_irq()
486 return -ENOENT; in synic_set_irq()
488 memset(&irq, 0, sizeof(irq)); in synic_set_irq()
489 irq.shorthand = APIC_DEST_SELF; in synic_set_irq()
490 irq.dest_mode = APIC_DEST_PHYSICAL; in synic_set_irq()
491 irq.delivery_mode = APIC_DM_FIXED; in synic_set_irq()
492 irq.vector = vector; in synic_set_irq()
493 irq.level = 1; in synic_set_irq()
495 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL); in synic_set_irq()
496 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); in synic_set_irq()
506 return -1; in kvm_hv_synic_set_irq()
508 synic = synic_get(kvm, e->hv_sint.vcpu); in kvm_hv_synic_set_irq()
510 return -EINVAL; in kvm_hv_synic_set_irq()
512 return synic_set_irq(synic, e->hv_sint.sint); in kvm_hv_synic_set_irq()
520 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); in kvm_hv_synic_send_eoi()
522 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) in kvm_hv_synic_send_eoi()
533 return -EINVAL; in kvm_hv_set_sint_gsi()
535 if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) in kvm_hv_set_sint_gsi()
536 return -EINVAL; in kvm_hv_set_sint_gsi()
538 atomic_set(&synic->sint_to_gsi[sint], gsi); in kvm_hv_set_sint_gsi()
548 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, in kvm_hv_irq_routing_update()
549 lockdep_is_held(&kvm->irq_lock)); in kvm_hv_irq_routing_update()
551 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { in kvm_hv_irq_routing_update()
552 hlist_for_each_entry(e, &irq_rt->map[gsi], link) { in kvm_hv_irq_routing_update()
553 if (e->type == KVM_IRQ_ROUTING_HV_SINT) in kvm_hv_irq_routing_update()
554 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, in kvm_hv_irq_routing_update()
555 e->hv_sint.sint, gsi); in kvm_hv_irq_routing_update()
565 synic->version = HV_SYNIC_VERSION_1; in synic_init()
566 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_init()
567 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); in synic_init()
568 atomic_set(&synic->sint_to_gsi[i], -1); in synic_init()
576 u64 tsc; in get_time_ref_counter() local
579 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up, in get_time_ref_counter()
582 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET) in get_time_ref_counter()
586 tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in get_time_ref_counter()
587 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) in get_time_ref_counter()
588 + hv->tsc_ref.tsc_offset; in get_time_ref_counter()
596 set_bit(stimer->index, in stimer_mark_pending()
597 to_hv_vcpu(vcpu)->stimer_pending_bitmap); in stimer_mark_pending()
607 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_cleanup()
608 stimer->index); in stimer_cleanup()
610 hrtimer_cancel(&stimer->timer); in stimer_cleanup()
611 clear_bit(stimer->index, in stimer_cleanup()
612 to_hv_vcpu(vcpu)->stimer_pending_bitmap); in stimer_cleanup()
613 stimer->msg_pending = false; in stimer_cleanup()
614 stimer->exp_time = 0; in stimer_cleanup()
622 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_timer_callback()
623 stimer->index); in stimer_timer_callback()
631 * a) stimer->count is not equal to 0
632 * b) stimer->config has HV_STIMER_ENABLE flag
639 time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm); in stimer_start()
642 if (stimer->config.periodic) { in stimer_start()
643 if (stimer->exp_time) { in stimer_start()
644 if (time_now >= stimer->exp_time) { in stimer_start()
647 div64_u64_rem(time_now - stimer->exp_time, in stimer_start()
648 stimer->count, &remainder); in stimer_start()
649 stimer->exp_time = in stimer_start()
650 time_now + (stimer->count - remainder); in stimer_start()
653 stimer->exp_time = time_now + stimer->count; in stimer_start()
656 hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_start()
657 stimer->index, in stimer_start()
658 time_now, stimer->exp_time); in stimer_start()
660 hrtimer_start(&stimer->timer, in stimer_start()
662 100 * (stimer->exp_time - time_now)), in stimer_start()
666 stimer->exp_time = stimer->count; in stimer_start()
667 if (time_now >= stimer->count) { in stimer_start()
669 * Expire timer according to Hypervisor Top-Level Functional in stimer_start()
678 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_start()
679 stimer->index, in stimer_start()
680 time_now, stimer->count); in stimer_start()
682 hrtimer_start(&stimer->timer, in stimer_start()
683 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), in stimer_start()
692 old_config = {.as_uint64 = stimer->config.as_uint64}; in stimer_set_config()
697 if (!synic->active && (!host || config)) in stimer_set_config()
700 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && in stimer_set_config()
701 !(hv_vcpu->cpuid_cache.features_edx & in stimer_set_config()
705 trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_set_config()
706 stimer->index, config, host); in stimer_set_config()
712 stimer->config.as_uint64 = new_config.as_uint64; in stimer_set_config()
714 if (stimer->config.enable) in stimer_set_config()
726 if (!synic->active && (!host || count)) in stimer_set_count()
729 trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_set_count()
730 stimer->index, count, host); in stimer_set_count()
733 stimer->count = count; in stimer_set_count()
735 if (stimer->count == 0) in stimer_set_count()
736 stimer->config.enable = 0; in stimer_set_count()
737 else if (stimer->config.auto_enable) in stimer_set_count()
738 stimer->config.enable = 1; in stimer_set_count()
741 if (stimer->config.enable) in stimer_set_count()
749 *pconfig = stimer->config.as_uint64; in stimer_get_config()
755 *pcount = stimer->count; in stimer_get_count()
768 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) in synic_deliver_msg()
769 return -ENOENT; in synic_deliver_msg()
771 msg_page_gfn = synic->msg_page >> PAGE_SHIFT; in synic_deliver_msg()
774 * Strictly following the spec-mandated ordering would assume setting in synic_deliver_msg()
799 return -EAGAIN; in synic_deliver_msg()
803 sizeof(src_msg->header) + in synic_deliver_msg()
804 src_msg->header.payload_size); in synic_deliver_msg()
812 return -EFAULT; in synic_deliver_msg()
819 struct hv_message *msg = &stimer->msg; in stimer_send_msg()
821 (struct hv_timer_message_payload *)&msg->u.payload; in stimer_send_msg()
827 bool no_retry = stimer->config.periodic; in stimer_send_msg()
829 payload->expiration_time = stimer->exp_time; in stimer_send_msg()
830 payload->delivery_time = get_time_ref_counter(vcpu->kvm); in stimer_send_msg()
832 stimer->config.sintx, msg, in stimer_send_msg()
839 struct kvm_lapic_irq irq = { in stimer_notify_direct() local
841 .vector = stimer->config.apic_vector in stimer_notify_direct()
845 return !kvm_apic_set_irq(vcpu, &irq, NULL); in stimer_notify_direct()
851 int r, direct = stimer->config.direct_mode; in stimer_expiration()
853 stimer->msg_pending = true; in stimer_expiration()
858 trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_expiration()
859 stimer->index, direct, r); in stimer_expiration()
861 stimer->msg_pending = false; in stimer_expiration()
862 if (!(stimer->config.periodic)) in stimer_expiration()
863 stimer->config.enable = 0; in stimer_expiration()
877 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_process_stimers()
878 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { in kvm_hv_process_stimers()
879 stimer = &hv_vcpu->stimer[i]; in kvm_hv_process_stimers()
880 if (stimer->config.enable) { in kvm_hv_process_stimers()
881 exp_time = stimer->exp_time; in kvm_hv_process_stimers()
885 get_time_ref_counter(vcpu->kvm); in kvm_hv_process_stimers()
890 if ((stimer->config.enable) && in kvm_hv_process_stimers()
891 stimer->count) { in kvm_hv_process_stimers()
892 if (!stimer->msg_pending) in kvm_hv_process_stimers()
908 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_uninit()
909 stimer_cleanup(&hv_vcpu->stimer[i]); in kvm_hv_vcpu_uninit()
912 vcpu->arch.hyperv = NULL; in kvm_hv_vcpu_uninit()
922 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) in kvm_hv_assist_page_enabled()
924 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; in kvm_hv_assist_page_enabled()
933 return -EFAULT; in kvm_hv_get_assist_page()
935 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, in kvm_hv_get_assist_page()
936 &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page)); in kvm_hv_get_assist_page()
942 struct hv_message *msg = &stimer->msg; in stimer_prepare_msg()
944 (struct hv_timer_message_payload *)&msg->u.payload; in stimer_prepare_msg()
946 memset(&msg->header, 0, sizeof(msg->header)); in stimer_prepare_msg()
947 msg->header.message_type = HVMSG_TIMER_EXPIRED; in stimer_prepare_msg()
948 msg->header.payload_size = sizeof(*payload); in stimer_prepare_msg()
950 payload->timer_index = stimer->index; in stimer_prepare_msg()
951 payload->expiration_time = 0; in stimer_prepare_msg()
952 payload->delivery_time = 0; in stimer_prepare_msg()
958 stimer->index = timer_index; in stimer_init()
959 hrtimer_setup(&stimer->timer, stimer_timer_callback, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in stimer_init()
973 return -ENOMEM; in kvm_hv_vcpu_init()
975 vcpu->arch.hyperv = hv_vcpu; in kvm_hv_vcpu_init()
976 hv_vcpu->vcpu = vcpu; in kvm_hv_vcpu_init()
978 synic_init(&hv_vcpu->synic); in kvm_hv_vcpu_init()
980 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); in kvm_hv_vcpu_init()
981 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_init()
982 stimer_init(&hv_vcpu->stimer[i], i); in kvm_hv_vcpu_init()
984 hv_vcpu->vp_index = vcpu->vcpu_idx; in kvm_hv_vcpu_init()
987 INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries); in kvm_hv_vcpu_init()
988 spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock); in kvm_hv_vcpu_init()
1005 synic->active = true; in kvm_hv_activate_synic()
1006 synic->dont_zero_synic_pages = dont_zero_synic_pages; in kvm_hv_activate_synic()
1007 synic->control = HV_SYNIC_CONTROL_ENABLE; in kvm_hv_activate_synic()
1039 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_get_crash_data()
1042 return -EINVAL; in kvm_hv_msr_get_crash_data()
1044 *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; in kvm_hv_msr_get_crash_data()
1052 *pdata = hv->hv_crash_ctl; in kvm_hv_msr_get_crash_ctl()
1060 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; in kvm_hv_msr_set_crash_ctl()
1068 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_set_crash_data()
1071 return -EINVAL; in kvm_hv_msr_set_crash_data()
1073 hv->hv_crash_param[array_index_nospec(index, size)] = data; in kvm_hv_msr_set_crash_data()
1078 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1082 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1085 * Hyper-V formula:
1088 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1090 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1091 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1095 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1096 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1098 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1099 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1102 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1104 * - tsc_timestamp * scale / 2^64
1107 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1108 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1117 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) in compute_tsc_page_parameters()
1123 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) in compute_tsc_page_parameters()
1124 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) in compute_tsc_page_parameters()
1126 max_mul = 100ull << (32 - hv_clock->tsc_shift); in compute_tsc_page_parameters()
1127 if (hv_clock->tsc_to_system_mul >= max_mul) in compute_tsc_page_parameters()
1134 tsc_ref->tsc_scale = in compute_tsc_page_parameters()
1135 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift), in compute_tsc_page_parameters()
1136 hv_clock->tsc_to_system_mul, in compute_tsc_page_parameters()
1139 tsc_ref->tsc_offset = hv_clock->system_time; in compute_tsc_page_parameters()
1140 do_div(tsc_ref->tsc_offset, 100); in compute_tsc_page_parameters()
1141 tsc_ref->tsc_offset -= in compute_tsc_page_parameters()
1142 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64); in compute_tsc_page_parameters()
1147 * Don't touch TSC page values if the guest has opted for TSC emulation after
1148 * migration. KVM doesn't fully support reenlightenment notifications and TSC
1149 * access emulation and Hyper-V is known to expect the values in TSC page to
1150 * stay constant before TSC access emulation is disabled from guest side
1151 * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
1152 * frequency and guest visible TSC value across migration (and prevent it when
1153 * TSC scaling is unsupported).
1157 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) && in tsc_page_update_unsafe()
1158 hv->hv_tsc_emulation_control; in tsc_page_update_unsafe()
1168 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); in kvm_hv_setup_tsc_page()
1171 guard(mutex)(&hv->hv_lock); in kvm_hv_setup_tsc_page()
1173 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || in kvm_hv_setup_tsc_page()
1174 hv->hv_tsc_page_status == HV_TSC_PAGE_SET || in kvm_hv_setup_tsc_page()
1175 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET) in kvm_hv_setup_tsc_page()
1178 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) in kvm_hv_setup_tsc_page()
1181 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; in kvm_hv_setup_tsc_page()
1183 * Because the TSC parameters only vary when there is a in kvm_hv_setup_tsc_page()
1191 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
1194 hv->hv_tsc_page_status = HV_TSC_PAGE_SET; in kvm_hv_setup_tsc_page()
1202 hv->tsc_ref.tsc_sequence = 0; in kvm_hv_setup_tsc_page()
1204 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) in kvm_hv_setup_tsc_page()
1207 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) in kvm_hv_setup_tsc_page()
1212 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
1216 * Now switch to the TSC page mechanism by writing the sequence. in kvm_hv_setup_tsc_page()
1222 /* Write the struct entirely before the non-zero sequence. */ in kvm_hv_setup_tsc_page()
1225 hv->tsc_ref.tsc_sequence = tsc_seq; in kvm_hv_setup_tsc_page()
1227 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) in kvm_hv_setup_tsc_page()
1230 hv->hv_tsc_page_status = HV_TSC_PAGE_SET; in kvm_hv_setup_tsc_page()
1234 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; in kvm_hv_setup_tsc_page()
1241 mutex_lock(&hv->hv_lock); in kvm_hv_request_tsc_page_update()
1243 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET && in kvm_hv_request_tsc_page_update()
1245 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; in kvm_hv_request_tsc_page_update()
1247 mutex_unlock(&hv->hv_lock); in kvm_hv_request_tsc_page_update()
1252 if (!hv_vcpu->enforce_cpuid) in hv_check_msr_access()
1258 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1261 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1264 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1267 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1270 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1273 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1281 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1291 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1297 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1301 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1306 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1309 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1313 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1317 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1330 * Hyper-V enabled Windows Server 2016 SMP VMs fail to boot in !XSAVES && XSAVEC
1338 struct kvm *kvm = vcpu->kvm; in __kvm_hv_xsaves_xsavec_maybe_warn()
1342 if (hv->xsaves_xsavec_checked) in __kvm_hv_xsaves_xsavec_maybe_warn()
1345 if ((hv->hv_guest_os_id & KVM_HV_WIN2016_GUEST_ID_MASK) != in __kvm_hv_xsaves_xsavec_maybe_warn()
1349 hv->xsaves_xsavec_checked = true; in __kvm_hv_xsaves_xsavec_maybe_warn()
1352 if (atomic_read(&kvm->online_vcpus) < 2) in __kvm_hv_xsaves_xsavec_maybe_warn()
1365 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_xsaves_xsavec_maybe_warn()
1367 if (!vcpu->arch.hyperv_enabled || in kvm_hv_xsaves_xsavec_maybe_warn()
1368 hv->xsaves_xsavec_checked) in kvm_hv_xsaves_xsavec_maybe_warn()
1371 mutex_lock(&hv->hv_lock); in kvm_hv_xsaves_xsavec_maybe_warn()
1373 mutex_unlock(&hv->hv_lock); in kvm_hv_xsaves_xsavec_maybe_warn()
1379 struct kvm *kvm = vcpu->kvm; in kvm_hv_set_msr_pw()
1387 hv->hv_guest_os_id = data; in kvm_hv_set_msr_pw()
1389 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1390 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; in kvm_hv_set_msr_pw()
1398 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1401 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1406 * If Xen and Hyper-V hypercalls are both enabled, disambiguate in kvm_hv_set_msr_pw()
1408 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just in kvm_hv_set_msr_pw()
1409 * going to be clobbered on 64-bit. in kvm_hv_set_msr_pw()
1430 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1434 hv->hv_tsc_page = data; in kvm_hv_set_msr_pw()
1435 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) { in kvm_hv_set_msr_pw()
1437 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED; in kvm_hv_set_msr_pw()
1439 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; in kvm_hv_set_msr_pw()
1442 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET; in kvm_hv_set_msr_pw()
1447 msr - HV_X64_MSR_CRASH_P0, in kvm_hv_set_msr_pw()
1455 hv->hv_crash_param[0], in kvm_hv_set_msr_pw()
1456 hv->hv_crash_param[1], in kvm_hv_set_msr_pw()
1457 hv->hv_crash_param[2], in kvm_hv_set_msr_pw()
1458 hv->hv_crash_param[3], in kvm_hv_set_msr_pw()
1459 hv->hv_crash_param[4]); in kvm_hv_set_msr_pw()
1467 vcpu_debug(vcpu, "hyper-v reset requested\n"); in kvm_hv_set_msr_pw()
1472 hv->hv_reenlightenment_control = data; in kvm_hv_set_msr_pw()
1475 hv->hv_tsc_emulation_control = data; in kvm_hv_set_msr_pw()
1481 hv->hv_tsc_emulation_status = data; in kvm_hv_set_msr_pw()
1484 /* read-only, but still ignore it if host-initiated */ in kvm_hv_set_msr_pw()
1494 if (!host && hv->hv_invtsc_control && !data) in kvm_hv_set_msr_pw()
1497 hv->hv_invtsc_control = data; in kvm_hv_set_msr_pw()
1528 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_set_msr()
1534 if (new_vp_index == hv_vcpu->vp_index) in kvm_hv_set_msr()
1543 if (hv_vcpu->vp_index == vcpu->vcpu_idx) in kvm_hv_set_msr()
1544 atomic_inc(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1545 else if (new_vp_index == vcpu->vcpu_idx) in kvm_hv_set_msr()
1546 atomic_dec(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1548 hv_vcpu->vp_index = new_vp_index; in kvm_hv_set_msr()
1556 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1573 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1590 hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); in kvm_hv_set_msr()
1603 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; in kvm_hv_set_msr()
1612 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; in kvm_hv_set_msr()
1619 /* read-only, but still ignore it if host-initiated */ in kvm_hv_set_msr()
1635 struct kvm *kvm = vcpu->kvm; in kvm_hv_get_msr_pw()
1643 data = hv->hv_guest_os_id; in kvm_hv_get_msr_pw()
1646 data = hv->hv_hypercall; in kvm_hv_get_msr_pw()
1652 data = hv->hv_tsc_page; in kvm_hv_get_msr_pw()
1656 msr - HV_X64_MSR_CRASH_P0, in kvm_hv_get_msr_pw()
1664 data = hv->hv_reenlightenment_control; in kvm_hv_get_msr_pw()
1667 data = hv->hv_tsc_emulation_control; in kvm_hv_get_msr_pw()
1670 data = hv->hv_tsc_emulation_status; in kvm_hv_get_msr_pw()
1673 data = hv->hv_invtsc_control; in kvm_hv_get_msr_pw()
1698 data = hv_vcpu->vp_index; in kvm_hv_get_msr()
1707 data = hv_vcpu->hv_vapic; in kvm_hv_get_msr()
1710 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; in kvm_hv_get_msr()
1723 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; in kvm_hv_get_msr()
1732 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; in kvm_hv_get_msr()
1738 data = (u64)vcpu->arch.virtual_tsc_khz * 1000; in kvm_hv_get_msr()
1742 vcpu->kvm->arch.apic_bus_cycle_ns); in kvm_hv_get_msr()
1754 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_set_msr_common()
1756 if (!host && !vcpu->arch.hyperv_enabled) in kvm_hv_set_msr_common()
1765 mutex_lock(&hv->hv_lock); in kvm_hv_set_msr_common()
1767 mutex_unlock(&hv->hv_lock); in kvm_hv_set_msr_common()
1775 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_get_msr_common()
1777 if (!host && !vcpu->arch.hyperv_enabled) in kvm_hv_get_msr_common()
1786 mutex_lock(&hv->hv_lock); in kvm_hv_get_msr_common()
1788 mutex_unlock(&hv->hv_lock); in kvm_hv_get_msr_common()
1798 bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes); in sparse_set_to_vcpu_mask()
1851 sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0)); in hv_is_vp_in_sparse_set()
1889 * still needs to validate the guest input (though the non-XMM path in kvm_hv_get_hc_data()
1895 if (hc->fast) { in kvm_hv_get_hc_data()
1900 if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves) in kvm_hv_get_hc_data()
1904 j = i + hc->consumed_xmm_halves; in kvm_hv_get_hc_data()
1906 data[i] = sse128_hi(hc->xmm[j / 2]); in kvm_hv_get_hc_data()
1908 data[i] = sse128_lo(hc->xmm[j / 2]); in kvm_hv_get_hc_data()
1913 return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data, in kvm_hv_get_hc_data()
1920 if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS) in kvm_get_sparse_vp_set()
1921 return -EINVAL; in kvm_get_sparse_vp_set()
1924 return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS, in kvm_get_sparse_vp_set()
1930 return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries); in kvm_hv_get_tlb_flush_entries()
1943 spin_lock(&tlb_flush_fifo->write_lock); in hv_tlb_flush_enqueue()
1950 if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) { in hv_tlb_flush_enqueue()
1951 WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count); in hv_tlb_flush_enqueue()
1959 kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1); in hv_tlb_flush_enqueue()
1962 spin_unlock(&tlb_flush_fifo->write_lock); in hv_tlb_flush_enqueue()
1974 return -EINVAL; in kvm_hv_vcpu_flush_tlb()
1978 count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE); in kvm_hv_vcpu_flush_tlb()
1995 ++vcpu->stat.tlb_flush; in kvm_hv_vcpu_flush_tlb()
2000 kfifo_reset_out(&tlb_flush_fifo->entries); in kvm_hv_vcpu_flush_tlb()
2003 return -ENOSPC; in kvm_hv_vcpu_flush_tlb()
2009 unsigned long *vcpu_mask = hv_vcpu->vcpu_mask; in kvm_hv_flush_tlb()
2010 u64 *sparse_banks = hv_vcpu->sparse_banks; in kvm_hv_flush_tlb()
2011 struct kvm *kvm = vcpu->kvm; in kvm_hv_flush_tlb()
2021 u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1]; in kvm_hv_flush_tlb()
2029 * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS in kvm_hv_flush_tlb()
2042 if (!hc->fast && is_guest_mode(vcpu)) { in kvm_hv_flush_tlb()
2043 hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL); in kvm_hv_flush_tlb()
2044 if (unlikely(hc->ingpa == INVALID_GPA)) in kvm_hv_flush_tlb()
2048 if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST || in kvm_hv_flush_tlb()
2049 hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) { in kvm_hv_flush_tlb()
2050 if (hc->fast) { in kvm_hv_flush_tlb()
2051 flush.address_space = hc->ingpa; in kvm_hv_flush_tlb()
2052 flush.flags = hc->outgpa; in kvm_hv_flush_tlb()
2053 flush.processor_mask = sse128_lo(hc->xmm[0]); in kvm_hv_flush_tlb()
2054 hc->consumed_xmm_halves = 1; in kvm_hv_flush_tlb()
2056 if (unlikely(kvm_read_guest(kvm, hc->ingpa, in kvm_hv_flush_tlb()
2059 hc->data_offset = sizeof(flush); in kvm_hv_flush_tlb()
2079 if (hc->fast) { in kvm_hv_flush_tlb()
2080 flush_ex.address_space = hc->ingpa; in kvm_hv_flush_tlb()
2081 flush_ex.flags = hc->outgpa; in kvm_hv_flush_tlb()
2083 &hc->xmm[0], sizeof(hc->xmm[0])); in kvm_hv_flush_tlb()
2084 hc->consumed_xmm_halves = 2; in kvm_hv_flush_tlb()
2086 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex, in kvm_hv_flush_tlb()
2089 hc->data_offset = sizeof(flush_ex); in kvm_hv_flush_tlb()
2101 if (hc->var_cnt != hweight64(valid_bank_mask)) in kvm_hv_flush_tlb()
2105 if (!hc->var_cnt) in kvm_hv_flush_tlb()
2113 * Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU in kvm_hv_flush_tlb()
2114 * banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs' in kvm_hv_flush_tlb()
2119 if (hc->fast) in kvm_hv_flush_tlb()
2120 hc->consumed_xmm_halves += hc->var_cnt; in kvm_hv_flush_tlb()
2122 hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]); in kvm_hv_flush_tlb()
2125 if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || in kvm_hv_flush_tlb()
2126 hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || in kvm_hv_flush_tlb()
2127 hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) { in kvm_hv_flush_tlb()
2136 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't in kvm_hv_flush_tlb()
2143 tlb_flush_entries, hc->rep_cnt); in kvm_hv_flush_tlb()
2156 tlb_flush_entries, hc->rep_cnt); in kvm_hv_flush_tlb()
2178 if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id) in kvm_hv_flush_tlb()
2182 !hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask, in kvm_hv_flush_tlb()
2189 tlb_flush_entries, hc->rep_cnt); in kvm_hv_flush_tlb()
2198 ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); in kvm_hv_flush_tlb()
2204 struct kvm_lapic_irq irq = { in kvm_hv_send_ipi_to_many() local
2218 kvm_apic_set_irq(vcpu, &irq, NULL); in kvm_hv_send_ipi_to_many()
2225 u64 *sparse_banks = hv_vcpu->sparse_banks; in kvm_hv_send_ipi()
2226 struct kvm *kvm = vcpu->kvm; in kvm_hv_send_ipi()
2236 if (hc->code == HVCALL_SEND_IPI) { in kvm_hv_send_ipi()
2237 if (!hc->fast) { in kvm_hv_send_ipi()
2238 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi, in kvm_hv_send_ipi()
2245 if (unlikely(hc->ingpa >> 32 != 0)) in kvm_hv_send_ipi()
2247 sparse_banks[0] = hc->outgpa; in kvm_hv_send_ipi()
2248 vector = (u32)hc->ingpa; in kvm_hv_send_ipi()
2255 if (!hc->fast) { in kvm_hv_send_ipi()
2256 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex, in kvm_hv_send_ipi()
2260 send_ipi_ex.vector = (u32)hc->ingpa; in kvm_hv_send_ipi()
2261 send_ipi_ex.vp_set.format = hc->outgpa; in kvm_hv_send_ipi()
2262 send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]); in kvm_hv_send_ipi()
2273 if (hc->var_cnt != hweight64(valid_bank_mask)) in kvm_hv_send_ipi()
2279 if (!hc->var_cnt) in kvm_hv_send_ipi()
2282 if (!hc->fast) in kvm_hv_send_ipi()
2283 hc->data_offset = offsetof(struct hv_send_ipi_ex, in kvm_hv_send_ipi()
2286 hc->consumed_xmm_halves = 1; in kvm_hv_send_ipi()
2310 vcpu->arch.hyperv_enabled = hyperv_enabled; in kvm_hv_set_cpuid()
2314 * KVM should have already allocated kvm_vcpu_hv if Hyper-V is in kvm_hv_set_cpuid()
2317 WARN_ON_ONCE(vcpu->arch.hyperv_enabled); in kvm_hv_set_cpuid()
2321 memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache)); in kvm_hv_set_cpuid()
2323 if (!vcpu->arch.hyperv_enabled) in kvm_hv_set_cpuid()
2328 hv_vcpu->cpuid_cache.features_eax = entry->eax; in kvm_hv_set_cpuid()
2329 hv_vcpu->cpuid_cache.features_ebx = entry->ebx; in kvm_hv_set_cpuid()
2330 hv_vcpu->cpuid_cache.features_edx = entry->edx; in kvm_hv_set_cpuid()
2335 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax; in kvm_hv_set_cpuid()
2336 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx; in kvm_hv_set_cpuid()
2341 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax; in kvm_hv_set_cpuid()
2345 hv_vcpu->cpuid_cache.nested_eax = entry->eax; in kvm_hv_set_cpuid()
2346 hv_vcpu->cpuid_cache.nested_ebx = entry->ebx; in kvm_hv_set_cpuid()
2366 hv_vcpu->enforce_cpuid = enforce; in kvm_hv_set_enforce_cpuid()
2391 kvm_read_guest(vcpu->kvm, to_hv_vcpu(vcpu)->nested.pa_page_gpa, in kvm_hv_hypercall_complete()
2397 ++vcpu->stat.hypercalls; in kvm_hv_hypercall_complete()
2402 kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu); in kvm_hv_hypercall_complete()
2409 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); in kvm_hv_hypercall_complete_userspace()
2414 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hvcall_signal_event()
2417 if (unlikely(!hc->fast)) { in kvm_hvcall_signal_event()
2419 gpa_t gpa = hc->ingpa; in kvm_hvcall_signal_event()
2421 if ((gpa & (__alignof__(hc->ingpa) - 1)) || in kvm_hvcall_signal_event()
2422 offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE) in kvm_hvcall_signal_event()
2426 &hc->ingpa, sizeof(hc->ingpa)); in kvm_hvcall_signal_event()
2432 * Per spec, bits 32-47 contain the extra "flag number". However, we in kvm_hvcall_signal_event()
2436 if (hc->ingpa & 0xffff00000000ULL) in kvm_hvcall_signal_event()
2438 /* remaining bits are reserved-zero */ in kvm_hvcall_signal_event()
2439 if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK) in kvm_hvcall_signal_event()
2442 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ in kvm_hvcall_signal_event()
2444 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa); in kvm_hvcall_signal_event()
2455 switch (hc->code) { in is_xmm_fast_hypercall()
2473 _kvm_read_sse_reg(reg, &hc->xmm[reg]); in kvm_hv_hypercall_read_xmm()
2479 if (!hv_vcpu->enforce_cpuid) in hv_check_hypercall_access()
2484 return hv_vcpu->cpuid_cache.enlightenments_ebx && in hv_check_hypercall_access()
2485 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX; in hv_check_hypercall_access()
2487 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES; in hv_check_hypercall_access()
2489 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS; in hv_check_hypercall_access()
2497 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) || in hv_check_hypercall_access()
2498 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING; in hv_check_hypercall_access()
2501 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2507 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2510 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2515 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2518 return hv_vcpu->cpuid_cache.features_ebx & in hv_check_hypercall_access()
2535 * per HYPER-V spec in kvm_hv_hypercall()
2579 if (unlikely(hv_vcpu->enforce_cpuid && in kvm_hv_hypercall()
2580 !(hv_vcpu->cpuid_cache.features_edx & in kvm_hv_hypercall()
2608 if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) { in kvm_hv_hypercall()
2667 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) { in kvm_hv_hypercall()
2688 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in kvm_hv_hypercall()
2689 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; in kvm_hv_hypercall()
2690 vcpu->run->hyperv.u.hcall.input = hc.param; in kvm_hv_hypercall()
2691 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa; in kvm_hv_hypercall()
2692 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa; in kvm_hv_hypercall()
2693 vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace; in kvm_hv_hypercall()
2701 mutex_init(&hv->hv_lock); in kvm_hv_init_vm()
2702 idr_init(&hv->conn_to_evt); in kvm_hv_init_vm()
2711 idr_for_each_entry(&hv->conn_to_evt, eventfd, i) in kvm_hv_destroy_vm()
2713 idr_destroy(&hv->conn_to_evt); in kvm_hv_destroy_vm()
2726 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_assign()
2727 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1, in kvm_hv_eventfd_assign()
2729 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_assign()
2734 if (ret == -ENOSPC) in kvm_hv_eventfd_assign()
2735 ret = -EEXIST; in kvm_hv_eventfd_assign()
2745 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
2746 eventfd = idr_remove(&hv->conn_to_evt, conn_id); in kvm_hv_eventfd_deassign()
2747 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
2750 return -ENOENT; in kvm_hv_eventfd_deassign()
2752 synchronize_srcu(&kvm->srcu); in kvm_hv_eventfd_deassign()
2759 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) || in kvm_vm_ioctl_hv_eventfd()
2760 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK)) in kvm_vm_ioctl_hv_eventfd()
2761 return -EINVAL; in kvm_vm_ioctl_hv_eventfd()
2763 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN) in kvm_vm_ioctl_hv_eventfd()
2764 return kvm_hv_eventfd_deassign(kvm, args->conn_id); in kvm_vm_ioctl_hv_eventfd()
2765 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd); in kvm_vm_ioctl_hv_eventfd()
2786 if (kvm_x86_ops.nested_ops->get_evmcs_version) in kvm_get_hv_cpuid()
2787 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu); in kvm_get_hv_cpuid()
2789 if (cpuid->nent < nent) in kvm_get_hv_cpuid()
2790 return -E2BIG; in kvm_get_hv_cpuid()
2792 if (cpuid->nent > nent) in kvm_get_hv_cpuid()
2793 cpuid->nent = nent; in kvm_get_hv_cpuid()
2799 switch (ent->function) { in kvm_get_hv_cpuid()
2803 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; in kvm_get_hv_cpuid()
2804 ent->ebx = signature[0]; in kvm_get_hv_cpuid()
2805 ent->ecx = signature[1]; in kvm_get_hv_cpuid()
2806 ent->edx = signature[2]; in kvm_get_hv_cpuid()
2810 ent->eax = HYPERV_CPUID_SIGNATURE_EAX; in kvm_get_hv_cpuid()
2815 * We implement some Hyper-V 2016 functions so let's use in kvm_get_hv_cpuid()
2818 ent->eax = 0x00003839; in kvm_get_hv_cpuid()
2819 ent->ebx = 0x000A0000; in kvm_get_hv_cpuid()
2823 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; in kvm_get_hv_cpuid()
2824 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; in kvm_get_hv_cpuid()
2825 ent->eax |= HV_MSR_SYNIC_AVAILABLE; in kvm_get_hv_cpuid()
2826 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; in kvm_get_hv_cpuid()
2827 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; in kvm_get_hv_cpuid()
2828 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE; in kvm_get_hv_cpuid()
2829 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE; in kvm_get_hv_cpuid()
2830 ent->eax |= HV_MSR_RESET_AVAILABLE; in kvm_get_hv_cpuid()
2831 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; in kvm_get_hv_cpuid()
2832 ent->eax |= HV_ACCESS_FREQUENCY_MSRS; in kvm_get_hv_cpuid()
2833 ent->eax |= HV_ACCESS_REENLIGHTENMENT; in kvm_get_hv_cpuid()
2834 ent->eax |= HV_ACCESS_TSC_INVARIANT; in kvm_get_hv_cpuid()
2836 ent->ebx |= HV_POST_MESSAGES; in kvm_get_hv_cpuid()
2837 ent->ebx |= HV_SIGNAL_EVENTS; in kvm_get_hv_cpuid()
2838 ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS; in kvm_get_hv_cpuid()
2840 ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; in kvm_get_hv_cpuid()
2841 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; in kvm_get_hv_cpuid()
2842 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; in kvm_get_hv_cpuid()
2844 ent->ebx |= HV_DEBUGGING; in kvm_get_hv_cpuid()
2845 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE; in kvm_get_hv_cpuid()
2846 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; in kvm_get_hv_cpuid()
2847 ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH; in kvm_get_hv_cpuid()
2850 * Direct Synthetic timers only make sense with in-kernel in kvm_get_hv_cpuid()
2854 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; in kvm_get_hv_cpuid()
2859 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; in kvm_get_hv_cpuid()
2860 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; in kvm_get_hv_cpuid()
2861 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; in kvm_get_hv_cpuid()
2863 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; in kvm_get_hv_cpuid()
2864 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; in kvm_get_hv_cpuid()
2866 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; in kvm_get_hv_cpuid()
2868 ent->eax |= HV_X64_NO_NONARCH_CORESHARING; in kvm_get_hv_cpuid()
2870 ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED; in kvm_get_hv_cpuid()
2875 ent->ebx = 0x00000FFF; in kvm_get_hv_cpuid()
2881 ent->eax = KVM_MAX_VCPUS; in kvm_get_hv_cpuid()
2886 ent->ebx = 64; in kvm_get_hv_cpuid()
2891 ent->eax = evmcs_ver; in kvm_get_hv_cpuid()
2892 ent->eax |= HV_X64_NESTED_DIRECT_FLUSH; in kvm_get_hv_cpuid()
2893 ent->eax |= HV_X64_NESTED_MSR_BITMAP; in kvm_get_hv_cpuid()
2894 ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL; in kvm_get_hv_cpuid()
2900 ent->eax = 0; in kvm_get_hv_cpuid()
2901 ent->ebx = signature[0]; in kvm_get_hv_cpuid()
2902 ent->ecx = signature[1]; in kvm_get_hv_cpuid()
2903 ent->edx = signature[2]; in kvm_get_hv_cpuid()
2908 ent->eax = signature[0]; in kvm_get_hv_cpuid()
2912 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; in kvm_get_hv_cpuid()
2922 return -EFAULT; in kvm_get_hv_cpuid()