1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Hyper-V specific functions. 4 * 5 * Copyright (C) 2021, Red Hat Inc. 6 */ 7 #include <stdint.h> 8 #include "processor.h" 9 #include "hyperv.h" 10 11 const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) 12 { 13 static struct kvm_cpuid2 *cpuid; 14 int kvm_fd; 15 16 if (cpuid) 17 return cpuid; 18 19 cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES); 20 kvm_fd = open_kvm_dev_path_or_exit(); 21 22 kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 23 24 close(kvm_fd); 25 return cpuid; 26 } 27 28 void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu) 29 { 30 static struct kvm_cpuid2 *cpuid_full; 31 const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; 32 int i, nent = 0; 33 34 if (!cpuid_full) { 35 cpuid_sys = kvm_get_supported_cpuid(); 36 cpuid_hv = kvm_get_supported_hv_cpuid(); 37 38 cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent); 39 if (!cpuid_full) { 40 perror("malloc"); 41 abort(); 42 } 43 44 /* Need to skip KVM CPUID leaves 0x400000xx */ 45 for (i = 0; i < cpuid_sys->nent; i++) { 46 if (cpuid_sys->entries[i].function >= 0x40000000 && 47 cpuid_sys->entries[i].function < 0x40000100) 48 continue; 49 cpuid_full->entries[nent] = cpuid_sys->entries[i]; 50 nent++; 51 } 52 53 memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, 54 cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); 55 cpuid_full->nent = nent + cpuid_hv->nent; 56 } 57 58 vcpu_init_cpuid(vcpu, cpuid_full); 59 } 60 61 const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu) 62 { 63 struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES); 64 65 vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 66 67 return cpuid; 68 } 69 70 bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature) 71 { 72 if (!kvm_has_cap(KVM_CAP_SYS_HYPERV_CPUID)) 73 return false; 74 75 return kvm_cpuid_has(kvm_get_supported_hv_cpuid(), feature); 76 } 77 78 struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, 79 vm_vaddr_t *p_hv_pages_gva) 80 { 81 vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm); 82 struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva); 83 84 /* Setup of a region of guest memory for the VP Assist page. */ 85 hv->vp_assist = (void *)vm_vaddr_alloc_page(vm); 86 hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist); 87 hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist); 88 89 /* Setup of a region of guest memory for the partition assist page. */ 90 hv->partition_assist = (void *)vm_vaddr_alloc_page(vm); 91 hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist); 92 hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist); 93 94 /* Setup of a region of guest memory for the enlightened VMCS. */ 95 hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm); 96 hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs); 97 hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs); 98 99 *p_hv_pages_gva = hv_pages_gva; 100 return hv; 101 } 102 103 int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) 104 { 105 uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | 106 HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; 107 108 wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val); 109 110 current_vp_assist = vp_assist; 111 112 return 0; 113 } 114