vmx.c (b93af02c6722fde384ed2e921b71b61b9addb740) vmx.c (3c86c0d3dbb98865a60a0c9d5c3a229af15a8a96)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.

--- 38 unchanged lines hidden (view full) ---

47#include <asm/spec-ctrl.h>
48#include <asm/virtext.h>
49#include <asm/vmx.h>
50
51#include "capabilities.h"
52#include "cpuid.h"
53#include "evmcs.h"
54#include "hyperv.h"
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.

--- 38 unchanged lines hidden (view full) ---

47#include <asm/spec-ctrl.h>
48#include <asm/virtext.h>
49#include <asm/vmx.h>
50
51#include "capabilities.h"
52#include "cpuid.h"
53#include "evmcs.h"
54#include "hyperv.h"
55#include "kvm_onhyperv.h"
55#include "irq.h"
56#include "kvm_cache_regs.h"
57#include "lapic.h"
58#include "mmu.h"
59#include "nested.h"
60#include "pmu.h"
61#include "sgx.h"
62#include "trace.h"

--- 390 unchanged lines hidden (view full) ---

453}
454
455static unsigned long host_idt_base;
456
457#if IS_ENABLED(CONFIG_HYPERV)
458static bool __read_mostly enlightened_vmcs = true;
459module_param(enlightened_vmcs, bool, 0444);
460
56#include "irq.h"
57#include "kvm_cache_regs.h"
58#include "lapic.h"
59#include "mmu.h"
60#include "nested.h"
61#include "pmu.h"
62#include "sgx.h"
63#include "trace.h"

--- 390 unchanged lines hidden (view full) ---

454}
455
456static unsigned long host_idt_base;
457
458#if IS_ENABLED(CONFIG_HYPERV)
459static bool __read_mostly enlightened_vmcs = true;
460module_param(enlightened_vmcs, bool, 0444);
461
461static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
462 void *data)
463{
464 struct kvm_tlb_range *range = data;
465
466 return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
467 range->pages);
468}
469
470static inline int hv_remote_flush_root_ept(hpa_t root_ept,
471 struct kvm_tlb_range *range)
472{
473 if (range)
474 return hyperv_flush_guest_mapping_range(root_ept,
475 kvm_fill_hv_flush_list_func, (void *)range);
476 else
477 return hyperv_flush_guest_mapping(root_ept);
478}
479
480static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
481 struct kvm_tlb_range *range)
482{
483 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
484 struct kvm_vcpu *vcpu;
485 int ret = 0, i, nr_unique_valid_roots;
486 hpa_t root;
487
488 spin_lock(&kvm_vmx->hv_root_ept_lock);
489
490 if (!VALID_PAGE(kvm_vmx->hv_root_ept)) {
491 nr_unique_valid_roots = 0;
492
493 /*
494 * Flush all valid roots, and see if all vCPUs have converged
495 * on a common root, in which case future flushes can skip the
496 * loop and flush the common root.
497 */
498 kvm_for_each_vcpu(i, vcpu, kvm) {
499 root = to_vmx(vcpu)->hv_root_ept;
500 if (!VALID_PAGE(root) || root == kvm_vmx->hv_root_ept)
501 continue;
502
503 /*
504 * Set the tracked root to the first valid root. Keep
505 * this root for the entirety of the loop even if more
506 * roots are encountered as a low effort optimization
507 * to avoid flushing the same (first) root again.
508 */
509 if (++nr_unique_valid_roots == 1)
510 kvm_vmx->hv_root_ept = root;
511
512 if (!ret)
513 ret = hv_remote_flush_root_ept(root, range);
514
515 /*
516 * Stop processing roots if a failure occurred and
517 * multiple valid roots have already been detected.
518 */
519 if (ret && nr_unique_valid_roots > 1)
520 break;
521 }
522
523 /*
524 * The optimized flush of a single root can't be used if there
525 * are multiple valid roots (obviously).
526 */
527 if (nr_unique_valid_roots > 1)
528 kvm_vmx->hv_root_ept = INVALID_PAGE;
529 } else {
530 ret = hv_remote_flush_root_ept(kvm_vmx->hv_root_ept, range);
531 }
532
533 spin_unlock(&kvm_vmx->hv_root_ept_lock);
534 return ret;
535}
536static int hv_remote_flush_tlb(struct kvm *kvm)
537{
538 return hv_remote_flush_tlb_with_range(kvm, NULL);
539}
540
541static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
542{
543 struct hv_enlightened_vmcs *evmcs;
544 struct hv_partition_assist_pg **p_hv_pa_pg =
545 &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
546 /*
547 * Synthetic VM-Exit is not enabled in current code and so All
548 * evmcs in singe VM shares same assist page.

--- 11 unchanged lines hidden (view full) ---

560 evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
561 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
562
563 return 0;
564}
565
566#endif /* IS_ENABLED(CONFIG_HYPERV) */
567
462static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
463{
464 struct hv_enlightened_vmcs *evmcs;
465 struct hv_partition_assist_pg **p_hv_pa_pg =
466 &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
467 /*
468 * Synthetic VM-Exit is not enabled in current code and so All
469 * evmcs in singe VM shares same assist page.

--- 11 unchanged lines hidden (view full) ---

481 evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
482 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
483
484 return 0;
485}
486
487#endif /* IS_ENABLED(CONFIG_HYPERV) */
488
568static void hv_track_root_ept(struct kvm_vcpu *vcpu, hpa_t root_ept)
569{
570#if IS_ENABLED(CONFIG_HYPERV)
571 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
572
573 if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
574 spin_lock(&kvm_vmx->hv_root_ept_lock);
575 to_vmx(vcpu)->hv_root_ept = root_ept;
576 if (root_ept != kvm_vmx->hv_root_ept)
577 kvm_vmx->hv_root_ept = INVALID_PAGE;
578 spin_unlock(&kvm_vmx->hv_root_ept_lock);
579 }
580#endif
581}
582
583/*
584 * Comment's format: document - errata name - stepping - processor name.
585 * Refer from
586 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
587 */
588static u32 vmx_preemption_cpu_tfms[] = {
589/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
5900x000206E6,

--- 2588 unchanged lines hidden (view full) ---

3179 bool update_guest_cr3 = true;
3180 unsigned long guest_cr3;
3181 u64 eptp;
3182
3183 if (enable_ept) {
3184 eptp = construct_eptp(vcpu, root_hpa, root_level);
3185 vmcs_write64(EPT_POINTER, eptp);
3186
489/*
490 * Comment's format: document - errata name - stepping - processor name.
491 * Refer from
492 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
493 */
494static u32 vmx_preemption_cpu_tfms[] = {
495/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
4960x000206E6,

--- 2588 unchanged lines hidden (view full) ---

3085 bool update_guest_cr3 = true;
3086 unsigned long guest_cr3;
3087 u64 eptp;
3088
3089 if (enable_ept) {
3090 eptp = construct_eptp(vcpu, root_hpa, root_level);
3091 vmcs_write64(EPT_POINTER, eptp);
3092
3187 hv_track_root_ept(vcpu, root_hpa);
3093 hv_track_root_tdp(vcpu, root_hpa);
3188
3189 if (!enable_unrestricted_guest && !is_paging(vcpu))
3190 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
3191 else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3192 guest_cr3 = vcpu->arch.cr3;
3193 else /* vmcs01.GUEST_CR3 is already up-to-date. */
3194 update_guest_cr3 = false;
3195 vmx_ept_load_pdptrs(vcpu);

--- 3765 unchanged lines hidden (view full) ---

6961
6962 /*
6963 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
6964 * or POSTED_INTR_WAKEUP_VECTOR.
6965 */
6966 vmx->pi_desc.nv = POSTED_INTR_VECTOR;
6967 vmx->pi_desc.sn = 1;
6968
3094
3095 if (!enable_unrestricted_guest && !is_paging(vcpu))
3096 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
3097 else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3098 guest_cr3 = vcpu->arch.cr3;
3099 else /* vmcs01.GUEST_CR3 is already up-to-date. */
3100 update_guest_cr3 = false;
3101 vmx_ept_load_pdptrs(vcpu);

--- 3765 unchanged lines hidden (view full) ---

6867
6868 /*
6869 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
6870 * or POSTED_INTR_WAKEUP_VECTOR.
6871 */
6872 vmx->pi_desc.nv = POSTED_INTR_VECTOR;
6873 vmx->pi_desc.sn = 1;
6874
6969#if IS_ENABLED(CONFIG_HYPERV)
6970 vmx->hv_root_ept = INVALID_PAGE;
6971#endif
6972 return 0;
6973
6974free_vmcs:
6975 free_loaded_vmcs(vmx->loaded_vmcs);
6976free_pml:
6977 vmx_destroy_pml_buffer(vmx);
6978free_vpid:
6979 free_vpid(vmx->vpid);
6980 return err;
6981}
6982
6983#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
6984#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
6985
6986static int vmx_vm_init(struct kvm *kvm)
6987{
6875 return 0;
6876
6877free_vmcs:
6878 free_loaded_vmcs(vmx->loaded_vmcs);
6879free_pml:
6880 vmx_destroy_pml_buffer(vmx);
6881free_vpid:
6882 free_vpid(vmx->vpid);
6883 return err;
6884}
6885
6886#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
6887#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
6888
6889static int vmx_vm_init(struct kvm *kvm)
6890{
6988#if IS_ENABLED(CONFIG_HYPERV)
6989 spin_lock_init(&to_kvm_vmx(kvm)->hv_root_ept_lock);
6990#endif
6991
6992 if (!ple_gap)
6993 kvm->arch.pause_in_guest = true;
6994
6995 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
6996 switch (l1tf_mitigation) {
6997 case L1TF_MITIGATION_OFF:
6998 case L1TF_MITIGATION_FLUSH_NOWARN:
6999 /* 'I explicitly don't care' is set */

--- 1097 unchanged lines hidden ---
6891 if (!ple_gap)
6892 kvm->arch.pause_in_guest = true;
6893
6894 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
6895 switch (l1tf_mitigation) {
6896 case L1TF_MITIGATION_OFF:
6897 case L1TF_MITIGATION_FLUSH_NOWARN:
6898 /* 'I explicitly don't care' is set */

--- 1097 unchanged lines hidden ---