1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/objtool.h> 5 #include <linux/percpu.h> 6 7 #include <asm/debugreg.h> 8 #include <asm/mmu_context.h> 9 #include <asm/msr.h> 10 11 #include "x86.h" 12 #include "cpuid.h" 13 #include "hyperv.h" 14 #include "mmu.h" 15 #include "nested.h" 16 #include "pmu.h" 17 #include "posted_intr.h" 18 #include "sgx.h" 19 #include "trace.h" 20 #include "vmx.h" 21 #include "smm.h" 22 23 static bool __read_mostly enable_shadow_vmcs = 1; 24 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 25 26 static bool __read_mostly nested_early_check = 0; 27 module_param(nested_early_check, bool, S_IRUGO); 28 29 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK 30 31 /* 32 * Hyper-V requires all of these, so mark them as supported even though 33 * they are just treated the same as all-context. 34 */ 35 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 36 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 37 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 38 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 39 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 40 41 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 42 43 enum { 44 VMX_VMREAD_BITMAP, 45 VMX_VMWRITE_BITMAP, 46 VMX_BITMAP_NR 47 }; 48 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 49 50 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 51 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 52 53 struct shadow_vmcs_field { 54 u16 encoding; 55 u16 offset; 56 }; 57 static struct shadow_vmcs_field shadow_read_only_fields[] = { 58 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) }, 59 #include "vmcs_shadow_fields.h" 60 }; 61 static int max_shadow_read_only_fields = 62 ARRAY_SIZE(shadow_read_only_fields); 63 64 static struct shadow_vmcs_field shadow_read_write_fields[] = { 65 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) }, 66 #include "vmcs_shadow_fields.h" 67 }; 68 static int max_shadow_read_write_fields = 69 ARRAY_SIZE(shadow_read_write_fields); 70 71 static void init_vmcs_shadow_fields(void) 72 { 73 int i, j; 74 75 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 76 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 77 78 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 79 struct shadow_vmcs_field entry = shadow_read_only_fields[i]; 80 u16 field = entry.encoding; 81 82 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 83 (i + 1 == max_shadow_read_only_fields || 84 shadow_read_only_fields[i + 1].encoding != field + 1)) 85 pr_err("Missing field from shadow_read_only_field %x\n", 86 field + 1); 87 88 clear_bit(field, vmx_vmread_bitmap); 89 if (field & 1) 90 #ifdef CONFIG_X86_64 91 continue; 92 #else 93 entry.offset += sizeof(u32); 94 #endif 95 shadow_read_only_fields[j++] = entry; 96 } 97 max_shadow_read_only_fields = j; 98 99 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 100 struct shadow_vmcs_field entry = shadow_read_write_fields[i]; 101 u16 field = entry.encoding; 102 103 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 104 (i + 1 == max_shadow_read_write_fields || 105 shadow_read_write_fields[i + 1].encoding != field + 1)) 106 pr_err("Missing field from shadow_read_write_field %x\n", 107 field + 1); 108 109 WARN_ONCE(field >= GUEST_ES_AR_BYTES && 110 field <= GUEST_TR_AR_BYTES, 111 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); 112 113 /* 114 * PML and the preemption timer can be emulated, but the 115 * processor cannot vmwrite to fields that don't exist 116 * on bare metal. 117 */ 118 switch (field) { 119 case GUEST_PML_INDEX: 120 if (!cpu_has_vmx_pml()) 121 continue; 122 break; 123 case VMX_PREEMPTION_TIMER_VALUE: 124 if (!cpu_has_vmx_preemption_timer()) 125 continue; 126 break; 127 case GUEST_INTR_STATUS: 128 if (!cpu_has_vmx_apicv()) 129 continue; 130 break; 131 default: 132 break; 133 } 134 135 clear_bit(field, vmx_vmwrite_bitmap); 136 clear_bit(field, vmx_vmread_bitmap); 137 if (field & 1) 138 #ifdef CONFIG_X86_64 139 continue; 140 #else 141 entry.offset += sizeof(u32); 142 #endif 143 shadow_read_write_fields[j++] = entry; 144 } 145 max_shadow_read_write_fields = j; 146 } 147 148 /* 149 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 150 * set the success or error code of an emulated VMX instruction (as specified 151 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 152 * instruction. 153 */ 154 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 155 { 156 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 157 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 158 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 159 return kvm_skip_emulated_instruction(vcpu); 160 } 161 162 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 163 { 164 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 165 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 166 X86_EFLAGS_SF | X86_EFLAGS_OF)) 167 | X86_EFLAGS_CF); 168 return kvm_skip_emulated_instruction(vcpu); 169 } 170 171 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 172 u32 vm_instruction_error) 173 { 174 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 175 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 176 X86_EFLAGS_SF | X86_EFLAGS_OF)) 177 | X86_EFLAGS_ZF); 178 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 179 /* 180 * We don't need to force sync to shadow VMCS because 181 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all 182 * fields and thus must be synced. 183 */ 184 if (nested_vmx_is_evmptr12_set(to_vmx(vcpu))) 185 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true; 186 187 return kvm_skip_emulated_instruction(vcpu); 188 } 189 190 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) 191 { 192 struct vcpu_vmx *vmx = to_vmx(vcpu); 193 194 /* 195 * failValid writes the error number to the current VMCS, which 196 * can't be done if there isn't a current VMCS. 197 */ 198 if (vmx->nested.current_vmptr == INVALID_GPA && 199 !nested_vmx_is_evmptr12_valid(vmx)) 200 return nested_vmx_failInvalid(vcpu); 201 202 return nested_vmx_failValid(vcpu, vm_instruction_error); 203 } 204 205 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 206 { 207 /* TODO: not to reset guest simply here. */ 208 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 209 pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator); 210 } 211 212 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 213 { 214 return fixed_bits_valid(control, low, high); 215 } 216 217 static inline u64 vmx_control_msr(u32 low, u32 high) 218 { 219 return low | ((u64)high << 32); 220 } 221 222 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 223 { 224 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 225 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); 226 vmx->nested.need_vmcs12_to_shadow_sync = false; 227 } 228 229 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 230 { 231 #ifdef CONFIG_KVM_HYPERV 232 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 233 struct vcpu_vmx *vmx = to_vmx(vcpu); 234 235 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map); 236 vmx->nested.hv_evmcs = NULL; 237 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; 238 239 if (hv_vcpu) { 240 hv_vcpu->nested.pa_page_gpa = INVALID_GPA; 241 hv_vcpu->nested.vm_id = 0; 242 hv_vcpu->nested.vp_id = 0; 243 } 244 #endif 245 } 246 247 static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr) 248 { 249 #ifdef CONFIG_KVM_HYPERV 250 struct vcpu_vmx *vmx = to_vmx(vcpu); 251 /* 252 * When Enlightened VMEntry is enabled on the calling CPU we treat 253 * memory area pointer by vmptr as Enlightened VMCS (as there's no good 254 * way to distinguish it from VMCS12) and we must not corrupt it by 255 * writing to the non-existent 'launch_state' field. The area doesn't 256 * have to be the currently active EVMCS on the calling CPU and there's 257 * nothing KVM has to do to transition it from 'active' to 'non-active' 258 * state. It is possible that the area will stay mapped as 259 * vmx->nested.hv_evmcs but this shouldn't be a problem. 260 */ 261 if (!guest_cpu_cap_has_evmcs(vcpu) || 262 !evmptr_is_valid(nested_get_evmptr(vcpu))) 263 return false; 264 265 if (nested_vmx_evmcs(vmx) && vmptr == vmx->nested.hv_evmcs_vmptr) 266 nested_release_evmcs(vcpu); 267 268 return true; 269 #else 270 return false; 271 #endif 272 } 273 274 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, 275 struct loaded_vmcs *prev) 276 { 277 struct vmcs_host_state *dest, *src; 278 279 if (unlikely(!vmx->vt.guest_state_loaded)) 280 return; 281 282 src = &prev->host_state; 283 dest = &vmx->loaded_vmcs->host_state; 284 285 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); 286 dest->ldt_sel = src->ldt_sel; 287 #ifdef CONFIG_X86_64 288 dest->ds_sel = src->ds_sel; 289 dest->es_sel = src->es_sel; 290 #endif 291 } 292 293 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 294 { 295 struct vcpu_vmx *vmx = to_vmx(vcpu); 296 struct loaded_vmcs *prev; 297 int cpu; 298 299 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) 300 return; 301 302 cpu = get_cpu(); 303 prev = vmx->loaded_vmcs; 304 vmx->loaded_vmcs = vmcs; 305 vmx_vcpu_load_vmcs(vcpu, cpu); 306 vmx_sync_vmcs_host_state(vmx, prev); 307 put_cpu(); 308 309 vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET; 310 311 /* 312 * All lazily updated registers will be reloaded from VMCS12 on both 313 * vmentry and vmexit. 314 */ 315 vcpu->arch.regs_dirty = 0; 316 } 317 318 static void nested_put_vmcs12_pages(struct kvm_vcpu *vcpu) 319 { 320 struct vcpu_vmx *vmx = to_vmx(vcpu); 321 322 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map); 323 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map); 324 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map); 325 vmx->nested.pi_desc = NULL; 326 } 327 328 /* 329 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 330 * just stops using VMX. 331 */ 332 static void free_nested(struct kvm_vcpu *vcpu) 333 { 334 struct vcpu_vmx *vmx = to_vmx(vcpu); 335 336 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) 337 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 338 339 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 340 return; 341 342 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 343 344 vmx->nested.vmxon = false; 345 vmx->nested.smm.vmxon = false; 346 vmx->nested.vmxon_ptr = INVALID_GPA; 347 free_vpid(vmx->nested.vpid02); 348 vmx->nested.posted_intr_nv = -1; 349 vmx->nested.current_vmptr = INVALID_GPA; 350 if (enable_shadow_vmcs) { 351 vmx_disable_shadow_vmcs(vmx); 352 vmcs_clear(vmx->vmcs01.shadow_vmcs); 353 free_vmcs(vmx->vmcs01.shadow_vmcs); 354 vmx->vmcs01.shadow_vmcs = NULL; 355 } 356 kfree(vmx->nested.cached_vmcs12); 357 vmx->nested.cached_vmcs12 = NULL; 358 kfree(vmx->nested.cached_shadow_vmcs12); 359 vmx->nested.cached_shadow_vmcs12 = NULL; 360 361 nested_put_vmcs12_pages(vcpu); 362 363 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 364 365 nested_release_evmcs(vcpu); 366 367 free_loaded_vmcs(&vmx->nested.vmcs02); 368 } 369 370 /* 371 * Ensure that the current vmcs of the logical processor is the 372 * vmcs01 of the vcpu before calling free_nested(). 373 */ 374 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 375 { 376 vcpu_load(vcpu); 377 vmx_leave_nested(vcpu); 378 vcpu_put(vcpu); 379 } 380 381 #define EPTP_PA_MASK GENMASK_ULL(51, 12) 382 383 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) 384 { 385 return VALID_PAGE(root_hpa) && 386 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); 387 } 388 389 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, 390 gpa_t addr) 391 { 392 unsigned long roots = 0; 393 uint i; 394 struct kvm_mmu_root_info *cached_root; 395 396 WARN_ON_ONCE(!mmu_is_nested(vcpu)); 397 398 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 399 cached_root = &vcpu->arch.mmu->prev_roots[i]; 400 401 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, 402 eptp)) 403 roots |= KVM_MMU_ROOT_PREVIOUS(i); 404 } 405 if (roots) 406 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.mmu, addr, roots); 407 } 408 409 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 410 struct x86_exception *fault) 411 { 412 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 413 struct vcpu_vmx *vmx = to_vmx(vcpu); 414 unsigned long exit_qualification; 415 u32 vm_exit_reason; 416 417 if (vmx->nested.pml_full) { 418 vm_exit_reason = EXIT_REASON_PML_FULL; 419 vmx->nested.pml_full = false; 420 421 /* 422 * It should be impossible to trigger a nested PML Full VM-Exit 423 * for anything other than an EPT Violation from L2. KVM *can* 424 * trigger nEPT page fault injection in response to an EPT 425 * Misconfig, e.g. if the MMIO SPTE was stale and L1's EPT 426 * tables also changed, but KVM should not treat EPT Misconfig 427 * VM-Exits as writes. 428 */ 429 WARN_ON_ONCE(vmx->vt.exit_reason.basic != EXIT_REASON_EPT_VIOLATION); 430 431 /* 432 * PML Full and EPT Violation VM-Exits both use bit 12 to report 433 * "NMI unblocking due to IRET", i.e. the bit can be propagated 434 * as-is from the original EXIT_QUALIFICATION. 435 */ 436 exit_qualification = vmx_get_exit_qual(vcpu) & INTR_INFO_UNBLOCK_NMI; 437 } else { 438 if (fault->error_code & PFERR_RSVD_MASK) { 439 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 440 exit_qualification = 0; 441 } else { 442 exit_qualification = fault->exit_qualification; 443 exit_qualification |= vmx_get_exit_qual(vcpu) & 444 (EPT_VIOLATION_GVA_IS_VALID | 445 EPT_VIOLATION_GVA_TRANSLATED); 446 vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 447 } 448 449 /* 450 * Although the caller (kvm_inject_emulated_page_fault) would 451 * have already synced the faulting address in the shadow EPT 452 * tables for the current EPTP12, we also need to sync it for 453 * any other cached EPTP02s based on the same EP4TA, since the 454 * TLB associates mappings to the EP4TA rather than the full EPTP. 455 */ 456 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer, 457 fault->address); 458 } 459 460 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); 461 vmcs12->guest_physical_address = fault->address; 462 } 463 464 static void nested_ept_new_eptp(struct kvm_vcpu *vcpu) 465 { 466 struct vcpu_vmx *vmx = to_vmx(vcpu); 467 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; 468 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); 469 470 kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level, 471 nested_ept_ad_enabled(vcpu), 472 nested_ept_get_eptp(vcpu)); 473 } 474 475 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 476 { 477 WARN_ON(mmu_is_nested(vcpu)); 478 479 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 480 nested_ept_new_eptp(vcpu); 481 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; 482 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 483 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 484 485 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 486 } 487 488 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 489 { 490 vcpu->arch.mmu = &vcpu->arch.root_mmu; 491 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 492 } 493 494 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 495 u16 error_code) 496 { 497 bool inequality, bit; 498 499 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 500 inequality = 501 (error_code & vmcs12->page_fault_error_code_mask) != 502 vmcs12->page_fault_error_code_match; 503 return inequality ^ bit; 504 } 505 506 static bool nested_vmx_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector, 507 u32 error_code) 508 { 509 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 510 511 /* 512 * Drop bits 31:16 of the error code when performing the #PF mask+match 513 * check. All VMCS fields involved are 32 bits, but Intel CPUs never 514 * set bits 31:16 and VMX disallows setting bits 31:16 in the injected 515 * error code. Including the to-be-dropped bits in the check might 516 * result in an "impossible" or missed exit from L1's perspective. 517 */ 518 if (vector == PF_VECTOR) 519 return nested_vmx_is_page_fault_vmexit(vmcs12, (u16)error_code); 520 521 return (vmcs12->exception_bitmap & (1u << vector)); 522 } 523 524 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 525 struct vmcs12 *vmcs12) 526 { 527 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 528 return 0; 529 530 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || 531 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) 532 return -EINVAL; 533 534 return 0; 535 } 536 537 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 538 struct vmcs12 *vmcs12) 539 { 540 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 541 return 0; 542 543 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) 544 return -EINVAL; 545 546 return 0; 547 } 548 549 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 550 struct vmcs12 *vmcs12) 551 { 552 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 553 return 0; 554 555 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) 556 return -EINVAL; 557 558 return 0; 559 } 560 561 /* 562 * For x2APIC MSRs, ignore the vmcs01 bitmap. L1 can enable x2APIC without L1 563 * itself utilizing x2APIC. All MSRs were previously set to be intercepted, 564 * only the "disable intercept" case needs to be handled. 565 */ 566 static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1, 567 unsigned long *msr_bitmap_l0, 568 u32 msr, int type) 569 { 570 if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr)) 571 vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr); 572 573 if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr)) 574 vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr); 575 } 576 577 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) 578 { 579 int msr; 580 581 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 582 unsigned word = msr / BITS_PER_LONG; 583 584 msr_bitmap[word] = ~0; 585 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 586 } 587 } 588 589 #define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \ 590 static inline \ 591 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \ 592 unsigned long *msr_bitmap_l1, \ 593 unsigned long *msr_bitmap_l0, u32 msr) \ 594 { \ 595 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \ 596 vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \ 597 vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \ 598 else \ 599 vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \ 600 } 601 BUILD_NVMX_MSR_INTERCEPT_HELPER(read) 602 BUILD_NVMX_MSR_INTERCEPT_HELPER(write) 603 604 static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx, 605 unsigned long *msr_bitmap_l1, 606 unsigned long *msr_bitmap_l0, 607 u32 msr, int types) 608 { 609 if (types & MSR_TYPE_R) 610 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1, 611 msr_bitmap_l0, msr); 612 if (types & MSR_TYPE_W) 613 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1, 614 msr_bitmap_l0, msr); 615 } 616 617 /* 618 * Merge L0's and L1's MSR bitmap, return false to indicate that 619 * we do not use the hardware. 620 */ 621 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 622 struct vmcs12 *vmcs12) 623 { 624 struct vcpu_vmx *vmx = to_vmx(vcpu); 625 int msr; 626 unsigned long *msr_bitmap_l1; 627 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; 628 struct kvm_host_map map; 629 630 /* Nothing to do if the MSR bitmap is not in use. */ 631 if (!cpu_has_vmx_msr_bitmap() || 632 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 633 return false; 634 635 /* 636 * MSR bitmap update can be skipped when: 637 * - MSR bitmap for L1 hasn't changed. 638 * - Nested hypervisor (L1) is attempting to launch the same L2 as 639 * before. 640 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature 641 * and tells KVM (L0) there were no changes in MSR bitmap for L2. 642 */ 643 if (!vmx->nested.force_msr_bitmap_recalc) { 644 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 645 646 if (evmcs && evmcs->hv_enlightenments_control.msr_bitmap && 647 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP) 648 return true; 649 } 650 651 if (kvm_vcpu_map_readonly(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), &map)) 652 return false; 653 654 msr_bitmap_l1 = (unsigned long *)map.hva; 655 656 /* 657 * To keep the control flow simple, pay eight 8-byte writes (sixteen 658 * 4-byte writes on 32-bit systems) up front to enable intercepts for 659 * the x2APIC MSR range and selectively toggle those relevant to L2. 660 */ 661 enable_x2apic_msr_intercepts(msr_bitmap_l0); 662 663 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 664 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 665 /* 666 * L0 need not intercept reads for MSRs between 0x800 667 * and 0x8ff, it just lets the processor take the value 668 * from the virtual-APIC page; take those 256 bits 669 * directly from the L1 bitmap. 670 */ 671 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 672 unsigned word = msr / BITS_PER_LONG; 673 674 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 675 } 676 } 677 678 nested_vmx_disable_intercept_for_x2apic_msr( 679 msr_bitmap_l1, msr_bitmap_l0, 680 X2APIC_MSR(APIC_TASKPRI), 681 MSR_TYPE_R | MSR_TYPE_W); 682 683 if (nested_cpu_has_vid(vmcs12)) { 684 nested_vmx_disable_intercept_for_x2apic_msr( 685 msr_bitmap_l1, msr_bitmap_l0, 686 X2APIC_MSR(APIC_EOI), 687 MSR_TYPE_W); 688 nested_vmx_disable_intercept_for_x2apic_msr( 689 msr_bitmap_l1, msr_bitmap_l0, 690 X2APIC_MSR(APIC_SELF_IPI), 691 MSR_TYPE_W); 692 } 693 } 694 695 /* 696 * Always check vmcs01's bitmap to honor userspace MSR filters and any 697 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through. 698 */ 699 #ifdef CONFIG_X86_64 700 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 701 MSR_FS_BASE, MSR_TYPE_RW); 702 703 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 704 MSR_GS_BASE, MSR_TYPE_RW); 705 706 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 707 MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 708 #endif 709 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 710 MSR_IA32_SPEC_CTRL, MSR_TYPE_RW); 711 712 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 713 MSR_IA32_PRED_CMD, MSR_TYPE_W); 714 715 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 716 MSR_IA32_FLUSH_CMD, MSR_TYPE_W); 717 718 kvm_vcpu_unmap(vcpu, &map); 719 720 vmx->nested.force_msr_bitmap_recalc = false; 721 722 return true; 723 } 724 725 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 726 struct vmcs12 *vmcs12) 727 { 728 struct vcpu_vmx *vmx = to_vmx(vcpu); 729 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 730 731 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 732 vmcs12->vmcs_link_pointer == INVALID_GPA) 733 return; 734 735 if (ghc->gpa != vmcs12->vmcs_link_pointer && 736 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 737 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 738 return; 739 740 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 741 VMCS12_SIZE); 742 } 743 744 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 745 struct vmcs12 *vmcs12) 746 { 747 struct vcpu_vmx *vmx = to_vmx(vcpu); 748 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 749 750 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 751 vmcs12->vmcs_link_pointer == INVALID_GPA) 752 return; 753 754 if (ghc->gpa != vmcs12->vmcs_link_pointer && 755 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 756 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 757 return; 758 759 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 760 VMCS12_SIZE); 761 } 762 763 /* 764 * In nested virtualization, check if L1 has set 765 * VM_EXIT_ACK_INTR_ON_EXIT 766 */ 767 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 768 { 769 return get_vmcs12(vcpu)->vm_exit_controls & 770 VM_EXIT_ACK_INTR_ON_EXIT; 771 } 772 773 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 774 struct vmcs12 *vmcs12) 775 { 776 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 777 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) 778 return -EINVAL; 779 else 780 return 0; 781 } 782 783 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 784 struct vmcs12 *vmcs12) 785 { 786 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 787 !nested_cpu_has_apic_reg_virt(vmcs12) && 788 !nested_cpu_has_vid(vmcs12) && 789 !nested_cpu_has_posted_intr(vmcs12)) 790 return 0; 791 792 /* 793 * If virtualize x2apic mode is enabled, 794 * virtualize apic access must be disabled. 795 */ 796 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) && 797 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))) 798 return -EINVAL; 799 800 /* 801 * If virtual interrupt delivery is enabled, 802 * we must exit on external interrupts. 803 */ 804 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu))) 805 return -EINVAL; 806 807 /* 808 * bits 15:8 should be zero in posted_intr_nv, 809 * the descriptor address has been already checked 810 * in nested_get_vmcs12_pages. 811 * 812 * bits 5:0 of posted_intr_desc_addr should be zero. 813 */ 814 if (nested_cpu_has_posted_intr(vmcs12) && 815 (CC(!nested_cpu_has_vid(vmcs12)) || 816 CC(!nested_exit_intr_ack_set(vcpu)) || 817 CC((vmcs12->posted_intr_nv & 0xff00)) || 818 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) 819 return -EINVAL; 820 821 /* tpr shadow is needed by all apicv features. */ 822 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))) 823 return -EINVAL; 824 825 return 0; 826 } 827 828 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) 829 { 830 struct vcpu_vmx *vmx = to_vmx(vcpu); 831 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 832 vmx->nested.msrs.misc_high); 833 834 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; 835 } 836 837 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 838 u32 count, u64 addr) 839 { 840 if (count == 0) 841 return 0; 842 843 /* 844 * Exceeding the limit results in architecturally _undefined_ behavior, 845 * i.e. KVM is allowed to do literally anything in response to a bad 846 * limit. Immediately generate a consistency check so that code that 847 * consumes the count doesn't need to worry about extreme edge cases. 848 */ 849 if (count > nested_vmx_max_atomic_switch_msrs(vcpu)) 850 return -EINVAL; 851 852 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || 853 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) 854 return -EINVAL; 855 856 return 0; 857 } 858 859 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 860 struct vmcs12 *vmcs12) 861 { 862 if (CC(nested_vmx_check_msr_switch(vcpu, 863 vmcs12->vm_exit_msr_load_count, 864 vmcs12->vm_exit_msr_load_addr)) || 865 CC(nested_vmx_check_msr_switch(vcpu, 866 vmcs12->vm_exit_msr_store_count, 867 vmcs12->vm_exit_msr_store_addr))) 868 return -EINVAL; 869 870 return 0; 871 } 872 873 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 874 struct vmcs12 *vmcs12) 875 { 876 if (CC(nested_vmx_check_msr_switch(vcpu, 877 vmcs12->vm_entry_msr_load_count, 878 vmcs12->vm_entry_msr_load_addr))) 879 return -EINVAL; 880 881 return 0; 882 } 883 884 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 885 struct vmcs12 *vmcs12) 886 { 887 if (!nested_cpu_has_pml(vmcs12)) 888 return 0; 889 890 if (CC(!nested_cpu_has_ept(vmcs12)) || 891 CC(!page_address_valid(vcpu, vmcs12->pml_address))) 892 return -EINVAL; 893 894 return 0; 895 } 896 897 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 898 struct vmcs12 *vmcs12) 899 { 900 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 901 !nested_cpu_has_ept(vmcs12))) 902 return -EINVAL; 903 return 0; 904 } 905 906 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 907 struct vmcs12 *vmcs12) 908 { 909 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 910 !nested_cpu_has_ept(vmcs12))) 911 return -EINVAL; 912 return 0; 913 } 914 915 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 916 struct vmcs12 *vmcs12) 917 { 918 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 919 return 0; 920 921 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || 922 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) 923 return -EINVAL; 924 925 return 0; 926 } 927 928 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 929 struct vmx_msr_entry *e) 930 { 931 /* x2APIC MSR accesses are not allowed */ 932 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) 933 return -EINVAL; 934 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ 935 CC(e->index == MSR_IA32_UCODE_REV)) 936 return -EINVAL; 937 if (CC(e->reserved != 0)) 938 return -EINVAL; 939 return 0; 940 } 941 942 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 943 struct vmx_msr_entry *e) 944 { 945 if (CC(e->index == MSR_FS_BASE) || 946 CC(e->index == MSR_GS_BASE) || 947 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ 948 nested_vmx_msr_check_common(vcpu, e)) 949 return -EINVAL; 950 return 0; 951 } 952 953 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 954 struct vmx_msr_entry *e) 955 { 956 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ 957 nested_vmx_msr_check_common(vcpu, e)) 958 return -EINVAL; 959 return 0; 960 } 961 962 /* 963 * Load guest's/host's msr at nested entry/exit. 964 * return 0 for success, entry index for failure. 965 * 966 * One of the failure modes for MSR load/store is when a list exceeds the 967 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch 968 * as possible, process all valid entries before failing rather than precheck 969 * for a capacity violation. 970 */ 971 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 972 { 973 u32 i; 974 struct vmx_msr_entry e; 975 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 976 977 for (i = 0; i < count; i++) { 978 if (WARN_ON_ONCE(i >= max_msr_list_size)) 979 goto fail; 980 981 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 982 &e, sizeof(e))) { 983 pr_debug_ratelimited( 984 "%s cannot read MSR entry (%u, 0x%08llx)\n", 985 __func__, i, gpa + i * sizeof(e)); 986 goto fail; 987 } 988 if (nested_vmx_load_msr_check(vcpu, &e)) { 989 pr_debug_ratelimited( 990 "%s check failed (%u, 0x%x, 0x%x)\n", 991 __func__, i, e.index, e.reserved); 992 goto fail; 993 } 994 if (kvm_set_msr_with_filter(vcpu, e.index, e.value)) { 995 pr_debug_ratelimited( 996 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 997 __func__, i, e.index, e.value); 998 goto fail; 999 } 1000 } 1001 return 0; 1002 fail: 1003 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */ 1004 return i + 1; 1005 } 1006 1007 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, 1008 u32 msr_index, 1009 u64 *data) 1010 { 1011 struct vcpu_vmx *vmx = to_vmx(vcpu); 1012 1013 /* 1014 * If the L0 hypervisor stored a more accurate value for the TSC that 1015 * does not include the time taken for emulation of the L2->L1 1016 * VM-exit in L0, use the more accurate value. 1017 */ 1018 if (msr_index == MSR_IA32_TSC) { 1019 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, 1020 MSR_IA32_TSC); 1021 1022 if (i >= 0) { 1023 u64 val = vmx->msr_autostore.guest.val[i].value; 1024 1025 *data = kvm_read_l1_tsc(vcpu, val); 1026 return true; 1027 } 1028 } 1029 1030 if (kvm_get_msr_with_filter(vcpu, msr_index, data)) { 1031 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, 1032 msr_index); 1033 return false; 1034 } 1035 return true; 1036 } 1037 1038 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, 1039 struct vmx_msr_entry *e) 1040 { 1041 if (kvm_vcpu_read_guest(vcpu, 1042 gpa + i * sizeof(*e), 1043 e, 2 * sizeof(u32))) { 1044 pr_debug_ratelimited( 1045 "%s cannot read MSR entry (%u, 0x%08llx)\n", 1046 __func__, i, gpa + i * sizeof(*e)); 1047 return false; 1048 } 1049 if (nested_vmx_store_msr_check(vcpu, e)) { 1050 pr_debug_ratelimited( 1051 "%s check failed (%u, 0x%x, 0x%x)\n", 1052 __func__, i, e->index, e->reserved); 1053 return false; 1054 } 1055 return true; 1056 } 1057 1058 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 1059 { 1060 u64 data; 1061 u32 i; 1062 struct vmx_msr_entry e; 1063 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 1064 1065 for (i = 0; i < count; i++) { 1066 if (WARN_ON_ONCE(i >= max_msr_list_size)) 1067 return -EINVAL; 1068 1069 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1070 return -EINVAL; 1071 1072 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) 1073 return -EINVAL; 1074 1075 if (kvm_vcpu_write_guest(vcpu, 1076 gpa + i * sizeof(e) + 1077 offsetof(struct vmx_msr_entry, value), 1078 &data, sizeof(data))) { 1079 pr_debug_ratelimited( 1080 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 1081 __func__, i, e.index, data); 1082 return -EINVAL; 1083 } 1084 } 1085 return 0; 1086 } 1087 1088 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) 1089 { 1090 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1091 u32 count = vmcs12->vm_exit_msr_store_count; 1092 u64 gpa = vmcs12->vm_exit_msr_store_addr; 1093 struct vmx_msr_entry e; 1094 u32 i; 1095 1096 for (i = 0; i < count; i++) { 1097 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1098 return false; 1099 1100 if (e.index == msr_index) 1101 return true; 1102 } 1103 return false; 1104 } 1105 1106 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, 1107 u32 msr_index) 1108 { 1109 struct vcpu_vmx *vmx = to_vmx(vcpu); 1110 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; 1111 bool in_vmcs12_store_list; 1112 int msr_autostore_slot; 1113 bool in_autostore_list; 1114 int last; 1115 1116 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index); 1117 in_autostore_list = msr_autostore_slot >= 0; 1118 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); 1119 1120 if (in_vmcs12_store_list && !in_autostore_list) { 1121 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) { 1122 /* 1123 * Emulated VMEntry does not fail here. Instead a less 1124 * accurate value will be returned by 1125 * nested_vmx_get_vmexit_msr_value() by reading KVM's 1126 * internal MSR state instead of reading the value from 1127 * the vmcs02 VMExit MSR-store area. 1128 */ 1129 pr_warn_ratelimited( 1130 "Not enough msr entries in msr_autostore. Can't add msr %x\n", 1131 msr_index); 1132 return; 1133 } 1134 last = autostore->nr++; 1135 autostore->val[last].index = msr_index; 1136 } else if (!in_vmcs12_store_list && in_autostore_list) { 1137 last = --autostore->nr; 1138 autostore->val[msr_autostore_slot] = autostore->val[last]; 1139 } 1140 } 1141 1142 /* 1143 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are 1144 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected 1145 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to 1146 * @entry_failure_code. 1147 */ 1148 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, 1149 bool nested_ept, bool reload_pdptrs, 1150 enum vm_entry_failure_code *entry_failure_code) 1151 { 1152 if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) { 1153 *entry_failure_code = ENTRY_FAIL_DEFAULT; 1154 return -EINVAL; 1155 } 1156 1157 /* 1158 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 1159 * must not be dereferenced. 1160 */ 1161 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) && 1162 CC(!load_pdptrs(vcpu, cr3))) { 1163 *entry_failure_code = ENTRY_FAIL_PDPTE; 1164 return -EINVAL; 1165 } 1166 1167 vcpu->arch.cr3 = cr3; 1168 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1169 1170 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ 1171 kvm_init_mmu(vcpu); 1172 1173 if (!nested_ept) 1174 kvm_mmu_new_pgd(vcpu, cr3); 1175 1176 return 0; 1177 } 1178 1179 /* 1180 * Returns if KVM is able to config CPU to tag TLB entries 1181 * populated by L2 differently than TLB entries populated 1182 * by L1. 1183 * 1184 * If L0 uses EPT, L1 and L2 run with different EPTP because 1185 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries 1186 * are tagged with different EPTP. 1187 * 1188 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 1189 * with different VPID (L1 entries are tagged with vmx->vpid 1190 * while L2 entries are tagged with vmx->nested.vpid02). 1191 */ 1192 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 1193 { 1194 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1195 1196 return enable_ept || 1197 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 1198 } 1199 1200 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, 1201 struct vmcs12 *vmcs12, 1202 bool is_vmenter) 1203 { 1204 struct vcpu_vmx *vmx = to_vmx(vcpu); 1205 1206 /* Handle pending Hyper-V TLB flush requests */ 1207 kvm_hv_nested_transtion_tlb_flush(vcpu, enable_ept); 1208 1209 /* 1210 * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the 1211 * same VPID as the host, and so architecturally, linear and combined 1212 * mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM 1213 * emulates L2 sharing L1's VPID=0 by using vpid01 while running L2, 1214 * and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01. This 1215 * is required if VPID is disabled in KVM, as a TLB flush (there are no 1216 * VPIDs) still occurs from L1's perspective, and KVM may need to 1217 * synchronize the MMU in response to the guest TLB flush. 1218 * 1219 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use. 1220 * EPT is a special snowflake, as guest-physical mappings aren't 1221 * flushed on VPID invalidations, including VM-Enter or VM-Exit with 1222 * VPID disabled. As a result, KVM _never_ needs to sync nEPT 1223 * entries on VM-Enter because L1 can't rely on VM-Enter to flush 1224 * those mappings. 1225 */ 1226 if (!nested_cpu_has_vpid(vmcs12)) { 1227 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1228 return; 1229 } 1230 1231 /* L2 should never have a VPID if VPID is disabled. */ 1232 WARN_ON(!enable_vpid); 1233 1234 /* 1235 * VPID is enabled and in use by vmcs12. If vpid12 is changing, then 1236 * emulate a guest TLB flush as KVM does not track vpid12 history nor 1237 * is the VPID incorporated into the MMU context. I.e. KVM must assume 1238 * that the new vpid12 has never been used and thus represents a new 1239 * guest ASID that cannot have entries in the TLB. 1240 */ 1241 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 1242 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 1243 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1244 return; 1245 } 1246 1247 /* 1248 * If VPID is enabled, used by vmc12, and vpid12 is not changing but 1249 * does not have a unique TLB tag (ASID), i.e. EPT is disabled and 1250 * KVM was unable to allocate a VPID for L2, flush the current context 1251 * as the effective ASID is common to both L1 and L2. 1252 */ 1253 if (!nested_has_guest_tlb_tag(vcpu)) 1254 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1255 } 1256 1257 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 1258 { 1259 superset &= mask; 1260 subset &= mask; 1261 1262 return (superset | subset) == superset; 1263 } 1264 1265 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 1266 { 1267 const u64 feature_bits = VMX_BASIC_DUAL_MONITOR_TREATMENT | 1268 VMX_BASIC_INOUT | 1269 VMX_BASIC_TRUE_CTLS; 1270 1271 const u64 reserved_bits = GENMASK_ULL(63, 56) | 1272 GENMASK_ULL(47, 45) | 1273 BIT_ULL(31); 1274 1275 u64 vmx_basic = vmcs_config.nested.basic; 1276 1277 BUILD_BUG_ON(feature_bits & reserved_bits); 1278 1279 /* 1280 * Except for 32BIT_PHYS_ADDR_ONLY, which is an anti-feature bit (has 1281 * inverted polarity), the incoming value must not set feature bits or 1282 * reserved bits that aren't allowed/supported by KVM. Fields, i.e. 1283 * multi-bit values, are explicitly checked below. 1284 */ 1285 if (!is_bitwise_subset(vmx_basic, data, feature_bits | reserved_bits)) 1286 return -EINVAL; 1287 1288 /* 1289 * KVM does not emulate a version of VMX that constrains physical 1290 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1291 */ 1292 if (data & VMX_BASIC_32BIT_PHYS_ADDR_ONLY) 1293 return -EINVAL; 1294 1295 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1296 vmx_basic_vmcs_revision_id(data)) 1297 return -EINVAL; 1298 1299 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1300 return -EINVAL; 1301 1302 vmx->nested.msrs.basic = data; 1303 return 0; 1304 } 1305 1306 static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index, 1307 u32 **low, u32 **high) 1308 { 1309 switch (msr_index) { 1310 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1311 *low = &msrs->pinbased_ctls_low; 1312 *high = &msrs->pinbased_ctls_high; 1313 break; 1314 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1315 *low = &msrs->procbased_ctls_low; 1316 *high = &msrs->procbased_ctls_high; 1317 break; 1318 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1319 *low = &msrs->exit_ctls_low; 1320 *high = &msrs->exit_ctls_high; 1321 break; 1322 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1323 *low = &msrs->entry_ctls_low; 1324 *high = &msrs->entry_ctls_high; 1325 break; 1326 case MSR_IA32_VMX_PROCBASED_CTLS2: 1327 *low = &msrs->secondary_ctls_low; 1328 *high = &msrs->secondary_ctls_high; 1329 break; 1330 default: 1331 BUG(); 1332 } 1333 } 1334 1335 static int 1336 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1337 { 1338 u32 *lowp, *highp; 1339 u64 supported; 1340 1341 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp); 1342 1343 supported = vmx_control_msr(*lowp, *highp); 1344 1345 /* Check must-be-1 bits are still 1. */ 1346 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1347 return -EINVAL; 1348 1349 /* Check must-be-0 bits are still 0. */ 1350 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1351 return -EINVAL; 1352 1353 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); 1354 *lowp = data; 1355 *highp = data >> 32; 1356 return 0; 1357 } 1358 1359 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1360 { 1361 const u64 feature_bits = VMX_MISC_SAVE_EFER_LMA | 1362 VMX_MISC_ACTIVITY_HLT | 1363 VMX_MISC_ACTIVITY_SHUTDOWN | 1364 VMX_MISC_ACTIVITY_WAIT_SIPI | 1365 VMX_MISC_INTEL_PT | 1366 VMX_MISC_RDMSR_IN_SMM | 1367 VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 1368 VMX_MISC_VMXOFF_BLOCK_SMI | 1369 VMX_MISC_ZERO_LEN_INS; 1370 1371 const u64 reserved_bits = BIT_ULL(31) | GENMASK_ULL(13, 9); 1372 1373 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low, 1374 vmcs_config.nested.misc_high); 1375 1376 BUILD_BUG_ON(feature_bits & reserved_bits); 1377 1378 /* 1379 * The incoming value must not set feature bits or reserved bits that 1380 * aren't allowed/supported by KVM. Fields, i.e. multi-bit values, are 1381 * explicitly checked below. 1382 */ 1383 if (!is_bitwise_subset(vmx_misc, data, feature_bits | reserved_bits)) 1384 return -EINVAL; 1385 1386 if ((vmx->nested.msrs.pinbased_ctls_high & 1387 PIN_BASED_VMX_PREEMPTION_TIMER) && 1388 vmx_misc_preemption_timer_rate(data) != 1389 vmx_misc_preemption_timer_rate(vmx_misc)) 1390 return -EINVAL; 1391 1392 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1393 return -EINVAL; 1394 1395 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1396 return -EINVAL; 1397 1398 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1399 return -EINVAL; 1400 1401 vmx->nested.msrs.misc_low = data; 1402 vmx->nested.msrs.misc_high = data >> 32; 1403 1404 return 0; 1405 } 1406 1407 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1408 { 1409 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps, 1410 vmcs_config.nested.vpid_caps); 1411 1412 /* Every bit is either reserved or a feature bit. */ 1413 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1414 return -EINVAL; 1415 1416 vmx->nested.msrs.ept_caps = data; 1417 vmx->nested.msrs.vpid_caps = data >> 32; 1418 return 0; 1419 } 1420 1421 static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index) 1422 { 1423 switch (msr_index) { 1424 case MSR_IA32_VMX_CR0_FIXED0: 1425 return &msrs->cr0_fixed0; 1426 case MSR_IA32_VMX_CR4_FIXED0: 1427 return &msrs->cr4_fixed0; 1428 default: 1429 BUG(); 1430 } 1431 } 1432 1433 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1434 { 1435 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index); 1436 1437 /* 1438 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1439 * must be 1 in the restored value. 1440 */ 1441 if (!is_bitwise_subset(data, *msr, -1ULL)) 1442 return -EINVAL; 1443 1444 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; 1445 return 0; 1446 } 1447 1448 /* 1449 * Called when userspace is restoring VMX MSRs. 1450 * 1451 * Returns 0 on success, non-0 otherwise. 1452 */ 1453 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1454 { 1455 struct vcpu_vmx *vmx = to_vmx(vcpu); 1456 1457 /* 1458 * Don't allow changes to the VMX capability MSRs while the vCPU 1459 * is in VMX operation. 1460 */ 1461 if (vmx->nested.vmxon) 1462 return -EBUSY; 1463 1464 switch (msr_index) { 1465 case MSR_IA32_VMX_BASIC: 1466 return vmx_restore_vmx_basic(vmx, data); 1467 case MSR_IA32_VMX_PINBASED_CTLS: 1468 case MSR_IA32_VMX_PROCBASED_CTLS: 1469 case MSR_IA32_VMX_EXIT_CTLS: 1470 case MSR_IA32_VMX_ENTRY_CTLS: 1471 /* 1472 * The "non-true" VMX capability MSRs are generated from the 1473 * "true" MSRs, so we do not support restoring them directly. 1474 * 1475 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1476 * should restore the "true" MSRs with the must-be-1 bits 1477 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1478 * DEFAULT SETTINGS". 1479 */ 1480 return -EINVAL; 1481 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1482 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1483 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1484 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1485 case MSR_IA32_VMX_PROCBASED_CTLS2: 1486 return vmx_restore_control_msr(vmx, msr_index, data); 1487 case MSR_IA32_VMX_MISC: 1488 return vmx_restore_vmx_misc(vmx, data); 1489 case MSR_IA32_VMX_CR0_FIXED0: 1490 case MSR_IA32_VMX_CR4_FIXED0: 1491 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1492 case MSR_IA32_VMX_CR0_FIXED1: 1493 case MSR_IA32_VMX_CR4_FIXED1: 1494 /* 1495 * These MSRs are generated based on the vCPU's CPUID, so we 1496 * do not support restoring them directly. 1497 */ 1498 return -EINVAL; 1499 case MSR_IA32_VMX_EPT_VPID_CAP: 1500 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1501 case MSR_IA32_VMX_VMCS_ENUM: 1502 vmx->nested.msrs.vmcs_enum = data; 1503 return 0; 1504 case MSR_IA32_VMX_VMFUNC: 1505 if (data & ~vmcs_config.nested.vmfunc_controls) 1506 return -EINVAL; 1507 vmx->nested.msrs.vmfunc_controls = data; 1508 return 0; 1509 default: 1510 /* 1511 * The rest of the VMX capability MSRs do not support restore. 1512 */ 1513 return -EINVAL; 1514 } 1515 } 1516 1517 /* Returns 0 on success, non-0 otherwise. */ 1518 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1519 { 1520 switch (msr_index) { 1521 case MSR_IA32_VMX_BASIC: 1522 *pdata = msrs->basic; 1523 break; 1524 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1525 case MSR_IA32_VMX_PINBASED_CTLS: 1526 *pdata = vmx_control_msr( 1527 msrs->pinbased_ctls_low, 1528 msrs->pinbased_ctls_high); 1529 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1530 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1531 break; 1532 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1533 case MSR_IA32_VMX_PROCBASED_CTLS: 1534 *pdata = vmx_control_msr( 1535 msrs->procbased_ctls_low, 1536 msrs->procbased_ctls_high); 1537 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1538 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1539 break; 1540 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1541 case MSR_IA32_VMX_EXIT_CTLS: 1542 *pdata = vmx_control_msr( 1543 msrs->exit_ctls_low, 1544 msrs->exit_ctls_high); 1545 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1546 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1547 break; 1548 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1549 case MSR_IA32_VMX_ENTRY_CTLS: 1550 *pdata = vmx_control_msr( 1551 msrs->entry_ctls_low, 1552 msrs->entry_ctls_high); 1553 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1554 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1555 break; 1556 case MSR_IA32_VMX_MISC: 1557 *pdata = vmx_control_msr( 1558 msrs->misc_low, 1559 msrs->misc_high); 1560 break; 1561 case MSR_IA32_VMX_CR0_FIXED0: 1562 *pdata = msrs->cr0_fixed0; 1563 break; 1564 case MSR_IA32_VMX_CR0_FIXED1: 1565 *pdata = msrs->cr0_fixed1; 1566 break; 1567 case MSR_IA32_VMX_CR4_FIXED0: 1568 *pdata = msrs->cr4_fixed0; 1569 break; 1570 case MSR_IA32_VMX_CR4_FIXED1: 1571 *pdata = msrs->cr4_fixed1; 1572 break; 1573 case MSR_IA32_VMX_VMCS_ENUM: 1574 *pdata = msrs->vmcs_enum; 1575 break; 1576 case MSR_IA32_VMX_PROCBASED_CTLS2: 1577 *pdata = vmx_control_msr( 1578 msrs->secondary_ctls_low, 1579 msrs->secondary_ctls_high); 1580 break; 1581 case MSR_IA32_VMX_EPT_VPID_CAP: 1582 *pdata = msrs->ept_caps | 1583 ((u64)msrs->vpid_caps << 32); 1584 break; 1585 case MSR_IA32_VMX_VMFUNC: 1586 *pdata = msrs->vmfunc_controls; 1587 break; 1588 default: 1589 return 1; 1590 } 1591 1592 return 0; 1593 } 1594 1595 /* 1596 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have 1597 * been modified by the L1 guest. Note, "writable" in this context means 1598 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of 1599 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only" 1600 * VM-exit information fields (which are actually writable if the vCPU is 1601 * configured to support "VMWRITE to any supported field in the VMCS"). 1602 */ 1603 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1604 { 1605 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1606 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1607 struct shadow_vmcs_field field; 1608 unsigned long val; 1609 int i; 1610 1611 if (WARN_ON(!shadow_vmcs)) 1612 return; 1613 1614 preempt_disable(); 1615 1616 vmcs_load(shadow_vmcs); 1617 1618 for (i = 0; i < max_shadow_read_write_fields; i++) { 1619 field = shadow_read_write_fields[i]; 1620 val = __vmcs_readl(field.encoding); 1621 vmcs12_write_any(vmcs12, field.encoding, field.offset, val); 1622 } 1623 1624 vmcs_clear(shadow_vmcs); 1625 vmcs_load(vmx->loaded_vmcs->vmcs); 1626 1627 preempt_enable(); 1628 } 1629 1630 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1631 { 1632 const struct shadow_vmcs_field *fields[] = { 1633 shadow_read_write_fields, 1634 shadow_read_only_fields 1635 }; 1636 const int max_fields[] = { 1637 max_shadow_read_write_fields, 1638 max_shadow_read_only_fields 1639 }; 1640 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1641 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1642 struct shadow_vmcs_field field; 1643 unsigned long val; 1644 int i, q; 1645 1646 if (WARN_ON(!shadow_vmcs)) 1647 return; 1648 1649 vmcs_load(shadow_vmcs); 1650 1651 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1652 for (i = 0; i < max_fields[q]; i++) { 1653 field = fields[q][i]; 1654 val = vmcs12_read_any(vmcs12, field.encoding, 1655 field.offset); 1656 __vmcs_writel(field.encoding, val); 1657 } 1658 } 1659 1660 vmcs_clear(shadow_vmcs); 1661 vmcs_load(vmx->loaded_vmcs->vmcs); 1662 } 1663 1664 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields) 1665 { 1666 #ifdef CONFIG_KVM_HYPERV 1667 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1668 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 1669 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu); 1670 1671 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1672 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1673 vmcs12->guest_rip = evmcs->guest_rip; 1674 1675 if (unlikely(!(hv_clean_fields & 1676 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL))) { 1677 hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page; 1678 hv_vcpu->nested.vm_id = evmcs->hv_vm_id; 1679 hv_vcpu->nested.vp_id = evmcs->hv_vp_id; 1680 } 1681 1682 if (unlikely(!(hv_clean_fields & 1683 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1684 vmcs12->guest_rsp = evmcs->guest_rsp; 1685 vmcs12->guest_rflags = evmcs->guest_rflags; 1686 vmcs12->guest_interruptibility_info = 1687 evmcs->guest_interruptibility_info; 1688 /* 1689 * Not present in struct vmcs12: 1690 * vmcs12->guest_ssp = evmcs->guest_ssp; 1691 */ 1692 } 1693 1694 if (unlikely(!(hv_clean_fields & 1695 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1696 vmcs12->cpu_based_vm_exec_control = 1697 evmcs->cpu_based_vm_exec_control; 1698 } 1699 1700 if (unlikely(!(hv_clean_fields & 1701 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) { 1702 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1703 } 1704 1705 if (unlikely(!(hv_clean_fields & 1706 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1707 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1708 } 1709 1710 if (unlikely(!(hv_clean_fields & 1711 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1712 vmcs12->vm_entry_intr_info_field = 1713 evmcs->vm_entry_intr_info_field; 1714 vmcs12->vm_entry_exception_error_code = 1715 evmcs->vm_entry_exception_error_code; 1716 vmcs12->vm_entry_instruction_len = 1717 evmcs->vm_entry_instruction_len; 1718 } 1719 1720 if (unlikely(!(hv_clean_fields & 1721 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1722 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1723 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1724 vmcs12->host_cr0 = evmcs->host_cr0; 1725 vmcs12->host_cr3 = evmcs->host_cr3; 1726 vmcs12->host_cr4 = evmcs->host_cr4; 1727 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1728 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1729 vmcs12->host_rip = evmcs->host_rip; 1730 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1731 vmcs12->host_es_selector = evmcs->host_es_selector; 1732 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1733 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1734 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1735 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1736 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1737 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1738 vmcs12->host_ia32_perf_global_ctrl = evmcs->host_ia32_perf_global_ctrl; 1739 /* 1740 * Not present in struct vmcs12: 1741 * vmcs12->host_ia32_s_cet = evmcs->host_ia32_s_cet; 1742 * vmcs12->host_ssp = evmcs->host_ssp; 1743 * vmcs12->host_ia32_int_ssp_table_addr = evmcs->host_ia32_int_ssp_table_addr; 1744 */ 1745 } 1746 1747 if (unlikely(!(hv_clean_fields & 1748 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) { 1749 vmcs12->pin_based_vm_exec_control = 1750 evmcs->pin_based_vm_exec_control; 1751 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1752 vmcs12->secondary_vm_exec_control = 1753 evmcs->secondary_vm_exec_control; 1754 } 1755 1756 if (unlikely(!(hv_clean_fields & 1757 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1758 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1759 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1760 } 1761 1762 if (unlikely(!(hv_clean_fields & 1763 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1764 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1765 } 1766 1767 if (unlikely(!(hv_clean_fields & 1768 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1769 vmcs12->guest_es_base = evmcs->guest_es_base; 1770 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1771 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1772 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1773 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1774 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1775 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1776 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1777 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1778 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1779 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1780 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1781 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1782 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1783 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1784 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1785 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1786 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1787 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1788 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1789 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1790 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1791 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1792 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1793 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1794 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1795 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1796 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1797 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1798 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1799 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1800 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1801 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1802 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1803 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1804 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1805 } 1806 1807 if (unlikely(!(hv_clean_fields & 1808 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1809 vmcs12->tsc_offset = evmcs->tsc_offset; 1810 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1811 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1812 vmcs12->encls_exiting_bitmap = evmcs->encls_exiting_bitmap; 1813 vmcs12->tsc_multiplier = evmcs->tsc_multiplier; 1814 } 1815 1816 if (unlikely(!(hv_clean_fields & 1817 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1818 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1819 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1820 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1821 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1822 vmcs12->guest_cr0 = evmcs->guest_cr0; 1823 vmcs12->guest_cr3 = evmcs->guest_cr3; 1824 vmcs12->guest_cr4 = evmcs->guest_cr4; 1825 vmcs12->guest_dr7 = evmcs->guest_dr7; 1826 } 1827 1828 if (unlikely(!(hv_clean_fields & 1829 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1830 vmcs12->host_fs_base = evmcs->host_fs_base; 1831 vmcs12->host_gs_base = evmcs->host_gs_base; 1832 vmcs12->host_tr_base = evmcs->host_tr_base; 1833 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1834 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1835 vmcs12->host_rsp = evmcs->host_rsp; 1836 } 1837 1838 if (unlikely(!(hv_clean_fields & 1839 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1840 vmcs12->ept_pointer = evmcs->ept_pointer; 1841 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1842 } 1843 1844 if (unlikely(!(hv_clean_fields & 1845 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1846 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1847 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1848 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1849 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1850 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1851 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1852 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1853 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1854 vmcs12->guest_pending_dbg_exceptions = 1855 evmcs->guest_pending_dbg_exceptions; 1856 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1857 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1858 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1859 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1860 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1861 vmcs12->guest_ia32_perf_global_ctrl = evmcs->guest_ia32_perf_global_ctrl; 1862 /* 1863 * Not present in struct vmcs12: 1864 * vmcs12->guest_ia32_s_cet = evmcs->guest_ia32_s_cet; 1865 * vmcs12->guest_ia32_lbr_ctl = evmcs->guest_ia32_lbr_ctl; 1866 * vmcs12->guest_ia32_int_ssp_table_addr = evmcs->guest_ia32_int_ssp_table_addr; 1867 */ 1868 } 1869 1870 /* 1871 * Not used? 1872 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1873 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1874 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1875 * vmcs12->page_fault_error_code_mask = 1876 * evmcs->page_fault_error_code_mask; 1877 * vmcs12->page_fault_error_code_match = 1878 * evmcs->page_fault_error_code_match; 1879 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1880 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1881 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1882 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1883 */ 1884 1885 /* 1886 * Read only fields: 1887 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1888 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1889 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1890 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1891 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1892 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1893 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1894 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1895 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1896 * vmcs12->exit_qualification = evmcs->exit_qualification; 1897 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1898 * 1899 * Not present in struct vmcs12: 1900 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1901 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1902 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1903 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1904 */ 1905 1906 return; 1907 #else /* CONFIG_KVM_HYPERV */ 1908 KVM_BUG_ON(1, vmx->vcpu.kvm); 1909 #endif /* CONFIG_KVM_HYPERV */ 1910 } 1911 1912 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1913 { 1914 #ifdef CONFIG_KVM_HYPERV 1915 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1916 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 1917 1918 /* 1919 * Should not be changed by KVM: 1920 * 1921 * evmcs->host_es_selector = vmcs12->host_es_selector; 1922 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1923 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1924 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1925 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1926 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1927 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1928 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1929 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1930 * evmcs->host_cr0 = vmcs12->host_cr0; 1931 * evmcs->host_cr3 = vmcs12->host_cr3; 1932 * evmcs->host_cr4 = vmcs12->host_cr4; 1933 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1934 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1935 * evmcs->host_rip = vmcs12->host_rip; 1936 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1937 * evmcs->host_fs_base = vmcs12->host_fs_base; 1938 * evmcs->host_gs_base = vmcs12->host_gs_base; 1939 * evmcs->host_tr_base = vmcs12->host_tr_base; 1940 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1941 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1942 * evmcs->host_rsp = vmcs12->host_rsp; 1943 * sync_vmcs02_to_vmcs12() doesn't read these: 1944 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1945 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1946 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1947 * evmcs->ept_pointer = vmcs12->ept_pointer; 1948 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1949 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1950 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1951 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1952 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1953 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1954 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1955 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1956 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1957 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1958 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1959 * evmcs->page_fault_error_code_mask = 1960 * vmcs12->page_fault_error_code_mask; 1961 * evmcs->page_fault_error_code_match = 1962 * vmcs12->page_fault_error_code_match; 1963 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1964 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1965 * evmcs->tsc_offset = vmcs12->tsc_offset; 1966 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1967 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1968 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1969 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1970 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1971 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1972 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1973 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1974 * evmcs->guest_ia32_perf_global_ctrl = vmcs12->guest_ia32_perf_global_ctrl; 1975 * evmcs->host_ia32_perf_global_ctrl = vmcs12->host_ia32_perf_global_ctrl; 1976 * evmcs->encls_exiting_bitmap = vmcs12->encls_exiting_bitmap; 1977 * evmcs->tsc_multiplier = vmcs12->tsc_multiplier; 1978 * 1979 * Not present in struct vmcs12: 1980 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1981 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1982 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1983 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1984 * evmcs->host_ia32_s_cet = vmcs12->host_ia32_s_cet; 1985 * evmcs->host_ssp = vmcs12->host_ssp; 1986 * evmcs->host_ia32_int_ssp_table_addr = vmcs12->host_ia32_int_ssp_table_addr; 1987 * evmcs->guest_ia32_s_cet = vmcs12->guest_ia32_s_cet; 1988 * evmcs->guest_ia32_lbr_ctl = vmcs12->guest_ia32_lbr_ctl; 1989 * evmcs->guest_ia32_int_ssp_table_addr = vmcs12->guest_ia32_int_ssp_table_addr; 1990 * evmcs->guest_ssp = vmcs12->guest_ssp; 1991 */ 1992 1993 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1994 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1995 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1996 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1997 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1998 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1999 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 2000 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 2001 2002 evmcs->guest_es_limit = vmcs12->guest_es_limit; 2003 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 2004 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 2005 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 2006 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 2007 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 2008 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 2009 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 2010 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 2011 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 2012 2013 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 2014 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 2015 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 2016 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 2017 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 2018 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 2019 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 2020 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 2021 2022 evmcs->guest_es_base = vmcs12->guest_es_base; 2023 evmcs->guest_cs_base = vmcs12->guest_cs_base; 2024 evmcs->guest_ss_base = vmcs12->guest_ss_base; 2025 evmcs->guest_ds_base = vmcs12->guest_ds_base; 2026 evmcs->guest_fs_base = vmcs12->guest_fs_base; 2027 evmcs->guest_gs_base = vmcs12->guest_gs_base; 2028 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 2029 evmcs->guest_tr_base = vmcs12->guest_tr_base; 2030 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 2031 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 2032 2033 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 2034 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 2035 2036 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 2037 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 2038 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 2039 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 2040 2041 evmcs->guest_pending_dbg_exceptions = 2042 vmcs12->guest_pending_dbg_exceptions; 2043 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 2044 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 2045 2046 evmcs->guest_activity_state = vmcs12->guest_activity_state; 2047 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 2048 2049 evmcs->guest_cr0 = vmcs12->guest_cr0; 2050 evmcs->guest_cr3 = vmcs12->guest_cr3; 2051 evmcs->guest_cr4 = vmcs12->guest_cr4; 2052 evmcs->guest_dr7 = vmcs12->guest_dr7; 2053 2054 evmcs->guest_physical_address = vmcs12->guest_physical_address; 2055 2056 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 2057 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 2058 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 2059 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 2060 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 2061 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 2062 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 2063 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 2064 2065 evmcs->exit_qualification = vmcs12->exit_qualification; 2066 2067 evmcs->guest_linear_address = vmcs12->guest_linear_address; 2068 evmcs->guest_rsp = vmcs12->guest_rsp; 2069 evmcs->guest_rflags = vmcs12->guest_rflags; 2070 2071 evmcs->guest_interruptibility_info = 2072 vmcs12->guest_interruptibility_info; 2073 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 2074 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 2075 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 2076 evmcs->vm_entry_exception_error_code = 2077 vmcs12->vm_entry_exception_error_code; 2078 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 2079 2080 evmcs->guest_rip = vmcs12->guest_rip; 2081 2082 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 2083 2084 return; 2085 #else /* CONFIG_KVM_HYPERV */ 2086 KVM_BUG_ON(1, vmx->vcpu.kvm); 2087 #endif /* CONFIG_KVM_HYPERV */ 2088 } 2089 2090 /* 2091 * This is an equivalent of the nested hypervisor executing the vmptrld 2092 * instruction. 2093 */ 2094 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( 2095 struct kvm_vcpu *vcpu, bool from_launch) 2096 { 2097 #ifdef CONFIG_KVM_HYPERV 2098 struct vcpu_vmx *vmx = to_vmx(vcpu); 2099 bool evmcs_gpa_changed = false; 2100 u64 evmcs_gpa; 2101 2102 if (likely(!guest_cpu_cap_has_evmcs(vcpu))) 2103 return EVMPTRLD_DISABLED; 2104 2105 evmcs_gpa = nested_get_evmptr(vcpu); 2106 if (!evmptr_is_valid(evmcs_gpa)) { 2107 nested_release_evmcs(vcpu); 2108 return EVMPTRLD_DISABLED; 2109 } 2110 2111 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { 2112 vmx->nested.current_vmptr = INVALID_GPA; 2113 2114 nested_release_evmcs(vcpu); 2115 2116 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa), 2117 &vmx->nested.hv_evmcs_map)) 2118 return EVMPTRLD_ERROR; 2119 2120 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; 2121 2122 /* 2123 * Currently, KVM only supports eVMCS version 1 2124 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 2125 * value to first u32 field of eVMCS which should specify eVMCS 2126 * VersionNumber. 2127 * 2128 * Guest should be aware of supported eVMCS versions by host by 2129 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 2130 * expected to set this CPUID leaf according to the value 2131 * returned in vmcs_version from nested_enable_evmcs(). 2132 * 2133 * However, it turns out that Microsoft Hyper-V fails to comply 2134 * to their own invented interface: When Hyper-V use eVMCS, it 2135 * just sets first u32 field of eVMCS to revision_id specified 2136 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 2137 * which is one of the supported versions specified in 2138 * CPUID.0x4000000A.EAX[0:15]. 2139 * 2140 * To overcome Hyper-V bug, we accept here either a supported 2141 * eVMCS version or VMCS12 revision_id as valid values for first 2142 * u32 field of eVMCS. 2143 */ 2144 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 2145 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 2146 nested_release_evmcs(vcpu); 2147 return EVMPTRLD_VMFAIL; 2148 } 2149 2150 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; 2151 2152 evmcs_gpa_changed = true; 2153 /* 2154 * Unlike normal vmcs12, enlightened vmcs12 is not fully 2155 * reloaded from guest's memory (read only fields, fields not 2156 * present in struct hv_enlightened_vmcs, ...). Make sure there 2157 * are no leftovers. 2158 */ 2159 if (from_launch) { 2160 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2161 memset(vmcs12, 0, sizeof(*vmcs12)); 2162 vmcs12->hdr.revision_id = VMCS12_REVISION; 2163 } 2164 2165 } 2166 2167 /* 2168 * Clean fields data can't be used on VMLAUNCH and when we switch 2169 * between different L2 guests as KVM keeps a single VMCS12 per L1. 2170 */ 2171 if (from_launch || evmcs_gpa_changed) { 2172 vmx->nested.hv_evmcs->hv_clean_fields &= 2173 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2174 2175 vmx->nested.force_msr_bitmap_recalc = true; 2176 } 2177 2178 return EVMPTRLD_SUCCEEDED; 2179 #else 2180 return EVMPTRLD_DISABLED; 2181 #endif 2182 } 2183 2184 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) 2185 { 2186 struct vcpu_vmx *vmx = to_vmx(vcpu); 2187 2188 if (nested_vmx_is_evmptr12_valid(vmx)) 2189 copy_vmcs12_to_enlightened(vmx); 2190 else 2191 copy_vmcs12_to_shadow(vmx); 2192 2193 vmx->nested.need_vmcs12_to_shadow_sync = false; 2194 } 2195 2196 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 2197 { 2198 struct vcpu_vmx *vmx = 2199 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 2200 2201 vmx->nested.preemption_timer_expired = true; 2202 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 2203 kvm_vcpu_kick(&vmx->vcpu); 2204 2205 return HRTIMER_NORESTART; 2206 } 2207 2208 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu) 2209 { 2210 struct vcpu_vmx *vmx = to_vmx(vcpu); 2211 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2212 2213 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >> 2214 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2215 2216 if (!vmx->nested.has_preemption_timer_deadline) { 2217 vmx->nested.preemption_timer_deadline = 2218 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc; 2219 vmx->nested.has_preemption_timer_deadline = true; 2220 } 2221 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; 2222 } 2223 2224 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, 2225 u64 preemption_timeout) 2226 { 2227 struct vcpu_vmx *vmx = to_vmx(vcpu); 2228 2229 /* 2230 * A timer value of zero is architecturally guaranteed to cause 2231 * a VMExit prior to executing any instructions in the guest. 2232 */ 2233 if (preemption_timeout == 0) { 2234 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 2235 return; 2236 } 2237 2238 if (vcpu->arch.virtual_tsc_khz == 0) 2239 return; 2240 2241 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2242 preemption_timeout *= 1000000; 2243 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 2244 hrtimer_start(&vmx->nested.preemption_timer, 2245 ktime_add_ns(ktime_get(), preemption_timeout), 2246 HRTIMER_MODE_ABS_PINNED); 2247 } 2248 2249 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2250 { 2251 if (vmx->nested.nested_run_pending && 2252 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 2253 return vmcs12->guest_ia32_efer; 2254 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 2255 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 2256 else 2257 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 2258 } 2259 2260 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 2261 { 2262 struct kvm *kvm = vmx->vcpu.kvm; 2263 2264 /* 2265 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 2266 * according to L0's settings (vmcs12 is irrelevant here). Host 2267 * fields that come from L0 and are not constant, e.g. HOST_CR3, 2268 * will be set as needed prior to VMLAUNCH/VMRESUME. 2269 */ 2270 if (vmx->nested.vmcs02_initialized) 2271 return; 2272 vmx->nested.vmcs02_initialized = true; 2273 2274 /* 2275 * We don't care what the EPTP value is we just need to guarantee 2276 * it's valid so we don't get a false positive when doing early 2277 * consistency checks. 2278 */ 2279 if (enable_ept && nested_early_check) 2280 vmcs_write64(EPT_POINTER, 2281 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); 2282 2283 if (vmx->ve_info) 2284 vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info)); 2285 2286 /* All VMFUNCs are currently emulated through L0 vmexits. */ 2287 if (cpu_has_vmx_vmfunc()) 2288 vmcs_write64(VM_FUNCTION_CONTROL, 0); 2289 2290 if (cpu_has_vmx_posted_intr()) 2291 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 2292 2293 if (cpu_has_vmx_msr_bitmap()) 2294 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 2295 2296 /* 2297 * PML is emulated for L2, but never enabled in hardware as the MMU 2298 * handles A/D emulation. Disabling PML for L2 also avoids having to 2299 * deal with filtering out L2 GPAs from the buffer. 2300 */ 2301 if (enable_pml) { 2302 vmcs_write64(PML_ADDRESS, 0); 2303 vmcs_write16(GUEST_PML_INDEX, -1); 2304 } 2305 2306 if (cpu_has_vmx_encls_vmexit()) 2307 vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA); 2308 2309 if (kvm_notify_vmexit_enabled(kvm)) 2310 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window); 2311 2312 /* 2313 * Set the MSR load/store lists to match L0's settings. Only the 2314 * addresses are constant (for vmcs02), the counts can change based 2315 * on L2's behavior, e.g. switching to/from long mode. 2316 */ 2317 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); 2318 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 2319 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 2320 2321 vmx_set_constant_host_state(vmx); 2322 } 2323 2324 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, 2325 struct vmcs12 *vmcs12) 2326 { 2327 prepare_vmcs02_constant_state(vmx); 2328 2329 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); 2330 2331 /* 2332 * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the 2333 * same VPID as the host. Emulate this behavior by using vpid01 for L2 2334 * if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter 2335 * and VM-Exit are architecturally required to flush VPID=0, but *only* 2336 * VPID=0. I.e. using vpid02 would be ok (so long as KVM emulates the 2337 * required flushes), but doing so would cause KVM to over-flush. E.g. 2338 * if L1 runs L2 X with VPID12=1, then runs L2 Y with VPID12 disabled, 2339 * and then runs L2 X again, then KVM can and should retain TLB entries 2340 * for VPID12=1. 2341 */ 2342 if (enable_vpid) { 2343 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 2344 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 2345 else 2346 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2347 } 2348 } 2349 2350 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, 2351 struct vmcs12 *vmcs12) 2352 { 2353 u32 exec_control; 2354 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 2355 2356 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) 2357 prepare_vmcs02_early_rare(vmx, vmcs12); 2358 2359 /* 2360 * PIN CONTROLS 2361 */ 2362 exec_control = __pin_controls_get(vmcs01); 2363 exec_control |= (vmcs12->pin_based_vm_exec_control & 2364 ~PIN_BASED_VMX_PREEMPTION_TIMER); 2365 2366 /* Posted interrupts setting is only taken from vmcs12. */ 2367 vmx->nested.pi_pending = false; 2368 if (nested_cpu_has_posted_intr(vmcs12)) { 2369 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2370 } else { 2371 vmx->nested.posted_intr_nv = -1; 2372 exec_control &= ~PIN_BASED_POSTED_INTR; 2373 } 2374 pin_controls_set(vmx, exec_control); 2375 2376 /* 2377 * EXEC CONTROLS 2378 */ 2379 exec_control = __exec_controls_get(vmcs01); /* L0's desires */ 2380 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; 2381 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; 2382 exec_control &= ~CPU_BASED_TPR_SHADOW; 2383 exec_control |= vmcs12->cpu_based_vm_exec_control; 2384 2385 vmx->nested.l1_tpr_threshold = -1; 2386 if (exec_control & CPU_BASED_TPR_SHADOW) 2387 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2388 #ifdef CONFIG_X86_64 2389 else 2390 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2391 CPU_BASED_CR8_STORE_EXITING; 2392 #endif 2393 2394 /* 2395 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2396 * for I/O port accesses. 2397 */ 2398 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2399 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2400 2401 /* 2402 * This bit will be computed in nested_get_vmcs12_pages, because 2403 * we do not have access to L1's MSR bitmap yet. For now, keep 2404 * the same bit as before, hoping to avoid multiple VMWRITEs that 2405 * only set/clear this bit. 2406 */ 2407 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 2408 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; 2409 2410 exec_controls_set(vmx, exec_control); 2411 2412 /* 2413 * SECONDARY EXEC CONTROLS 2414 */ 2415 if (cpu_has_secondary_exec_ctrls()) { 2416 exec_control = __secondary_exec_controls_get(vmcs01); 2417 2418 /* Take the following fields only from vmcs12 */ 2419 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2420 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2421 SECONDARY_EXEC_ENABLE_INVPCID | 2422 SECONDARY_EXEC_ENABLE_RDTSCP | 2423 SECONDARY_EXEC_ENABLE_XSAVES | 2424 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2425 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2426 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2427 SECONDARY_EXEC_ENABLE_VMFUNC | 2428 SECONDARY_EXEC_DESC); 2429 2430 if (nested_cpu_has(vmcs12, 2431 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) 2432 exec_control |= vmcs12->secondary_vm_exec_control; 2433 2434 /* PML is emulated and never enabled in hardware for L2. */ 2435 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 2436 2437 /* VMCS shadowing for L2 is emulated for now */ 2438 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2439 2440 /* 2441 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() 2442 * will not have to rewrite the controls just for this bit. 2443 */ 2444 if (vmx_umip_emulated() && (vmcs12->guest_cr4 & X86_CR4_UMIP)) 2445 exec_control |= SECONDARY_EXEC_DESC; 2446 2447 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2448 vmcs_write16(GUEST_INTR_STATUS, 2449 vmcs12->guest_intr_status); 2450 2451 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) 2452 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 2453 2454 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) 2455 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); 2456 2457 secondary_exec_controls_set(vmx, exec_control); 2458 } 2459 2460 /* 2461 * ENTRY CONTROLS 2462 * 2463 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2464 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2465 * on the related bits (if supported by the CPU) in the hope that 2466 * we can avoid VMWrites during vmx_set_efer(). 2467 * 2468 * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is 2469 * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to 2470 * do the same for L2. 2471 */ 2472 exec_control = __vm_entry_controls_get(vmcs01); 2473 exec_control |= (vmcs12->vm_entry_controls & 2474 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL); 2475 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER); 2476 if (cpu_has_load_ia32_efer()) { 2477 if (guest_efer & EFER_LMA) 2478 exec_control |= VM_ENTRY_IA32E_MODE; 2479 if (guest_efer != kvm_host.efer) 2480 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2481 } 2482 vm_entry_controls_set(vmx, exec_control); 2483 2484 /* 2485 * EXIT CONTROLS 2486 * 2487 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2488 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2489 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2490 */ 2491 exec_control = __vm_exit_controls_get(vmcs01); 2492 if (cpu_has_load_ia32_efer() && guest_efer != kvm_host.efer) 2493 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2494 else 2495 exec_control &= ~VM_EXIT_LOAD_IA32_EFER; 2496 vm_exit_controls_set(vmx, exec_control); 2497 2498 /* 2499 * Interrupt/Exception Fields 2500 */ 2501 if (vmx->nested.nested_run_pending) { 2502 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2503 vmcs12->vm_entry_intr_info_field); 2504 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2505 vmcs12->vm_entry_exception_error_code); 2506 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2507 vmcs12->vm_entry_instruction_len); 2508 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2509 vmcs12->guest_interruptibility_info); 2510 vmx->loaded_vmcs->nmi_known_unmasked = 2511 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2512 } else { 2513 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2514 } 2515 } 2516 2517 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2518 { 2519 struct hv_enlightened_vmcs *hv_evmcs = nested_vmx_evmcs(vmx); 2520 2521 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2522 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2523 2524 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2525 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2526 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2527 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2528 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2529 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2530 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2531 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2532 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2533 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2534 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2535 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2536 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2537 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2538 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2539 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2540 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2541 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2542 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2543 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2544 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2545 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2546 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2547 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2548 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2549 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2550 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2551 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2552 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2553 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2554 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2555 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2556 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2557 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2558 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2559 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2560 2561 vmx_segment_cache_clear(vmx); 2562 } 2563 2564 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2565 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2566 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2567 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2568 vmcs12->guest_pending_dbg_exceptions); 2569 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2570 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2571 2572 /* 2573 * L1 may access the L2's PDPTR, so save them to construct 2574 * vmcs12 2575 */ 2576 if (enable_ept) { 2577 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2578 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2579 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2580 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2581 } 2582 2583 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && 2584 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2585 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2586 } 2587 2588 if (nested_cpu_has_xsaves(vmcs12)) 2589 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2590 2591 /* 2592 * Whether page-faults are trapped is determined by a combination of 2593 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0 2594 * doesn't care about page faults then we should set all of these to 2595 * L1's desires. However, if L0 does care about (some) page faults, it 2596 * is not easy (if at all possible?) to merge L0 and L1's desires, we 2597 * simply ask to exit on each and every L2 page fault. This is done by 2598 * setting MASK=MATCH=0 and (see below) EB.PF=1. 2599 * Note that below we don't need special code to set EB.PF beyond the 2600 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2601 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2602 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2603 */ 2604 if (vmx_need_pf_intercept(&vmx->vcpu)) { 2605 /* 2606 * TODO: if both L0 and L1 need the same MASK and MATCH, 2607 * go ahead and use it? 2608 */ 2609 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 2610 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 2611 } else { 2612 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); 2613 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); 2614 } 2615 2616 if (cpu_has_vmx_apicv()) { 2617 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2618 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2619 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2620 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2621 } 2622 2623 /* 2624 * Make sure the msr_autostore list is up to date before we set the 2625 * count in the vmcs02. 2626 */ 2627 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); 2628 2629 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); 2630 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2631 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2632 2633 set_cr4_guest_host_mask(vmx); 2634 } 2635 2636 /* 2637 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2638 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2639 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2640 * guest in a way that will both be appropriate to L1's requests, and our 2641 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2642 * function also has additional necessary side-effects, like setting various 2643 * vcpu->arch fields. 2644 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2645 * is assigned to entry_failure_code on failure. 2646 */ 2647 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2648 bool from_vmentry, 2649 enum vm_entry_failure_code *entry_failure_code) 2650 { 2651 struct vcpu_vmx *vmx = to_vmx(vcpu); 2652 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 2653 bool load_guest_pdptrs_vmcs12 = false; 2654 2655 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) { 2656 prepare_vmcs02_rare(vmx, vmcs12); 2657 vmx->nested.dirty_vmcs12 = false; 2658 2659 load_guest_pdptrs_vmcs12 = !nested_vmx_is_evmptr12_valid(vmx) || 2660 !(evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); 2661 } 2662 2663 if (vmx->nested.nested_run_pending && 2664 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2665 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2666 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2667 } else { 2668 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2669 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl); 2670 } 2671 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || 2672 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 2673 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs); 2674 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2675 2676 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2677 * bitwise-or of what L1 wants to trap for L2, and what we want to 2678 * trap. Note that CR0.TS also needs updating - we do this later. 2679 */ 2680 vmx_update_exception_bitmap(vcpu); 2681 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2682 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2683 2684 if (vmx->nested.nested_run_pending && 2685 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2686 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2687 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2688 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2689 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2690 } 2691 2692 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2693 vcpu->arch.l1_tsc_offset, 2694 vmx_get_l2_tsc_offset(vcpu), 2695 vmx_get_l2_tsc_multiplier(vcpu)); 2696 2697 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2698 vcpu->arch.l1_tsc_scaling_ratio, 2699 vmx_get_l2_tsc_multiplier(vcpu)); 2700 2701 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2702 if (kvm_caps.has_tsc_control) 2703 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 2704 2705 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true); 2706 2707 if (nested_cpu_has_ept(vmcs12)) 2708 nested_ept_init_mmu_context(vcpu); 2709 2710 /* 2711 * Override the CR0/CR4 read shadows after setting the effective guest 2712 * CR0/CR4. The common helpers also set the shadows, but they don't 2713 * account for vmcs12's cr0/4_guest_host_mask. 2714 */ 2715 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2716 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2717 2718 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2719 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2720 2721 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2722 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2723 vmx_set_efer(vcpu, vcpu->arch.efer); 2724 2725 /* 2726 * Guest state is invalid and unrestricted guest is disabled, 2727 * which means L1 attempted VMEntry to L2 with invalid state. 2728 * Fail the VMEntry. 2729 * 2730 * However when force loading the guest state (SMM exit or 2731 * loading nested state after migration, it is possible to 2732 * have invalid guest state now, which will be later fixed by 2733 * restoring L2 register state 2734 */ 2735 if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) { 2736 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2737 return -EINVAL; 2738 } 2739 2740 /* Shadow page tables on either EPT or shadow page tables. */ 2741 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2742 from_vmentry, entry_failure_code)) 2743 return -EINVAL; 2744 2745 /* 2746 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 2747 * on nested VM-Exit, which can occur without actually running L2 and 2748 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with 2749 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the 2750 * transition to HLT instead of running L2. 2751 */ 2752 if (enable_ept) 2753 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); 2754 2755 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ 2756 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && 2757 is_pae_paging(vcpu)) { 2758 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2759 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2760 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2761 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2762 } 2763 2764 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2765 kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) && 2766 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 2767 vmcs12->guest_ia32_perf_global_ctrl))) { 2768 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2769 return -EINVAL; 2770 } 2771 2772 kvm_rsp_write(vcpu, vmcs12->guest_rsp); 2773 kvm_rip_write(vcpu, vmcs12->guest_rip); 2774 2775 /* 2776 * It was observed that genuine Hyper-V running in L1 doesn't reset 2777 * 'hv_clean_fields' by itself, it only sets the corresponding dirty 2778 * bits when it changes a field in eVMCS. Mark all fields as clean 2779 * here. 2780 */ 2781 if (nested_vmx_is_evmptr12_valid(vmx)) 2782 evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2783 2784 return 0; 2785 } 2786 2787 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2788 { 2789 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) && 2790 nested_cpu_has_virtual_nmis(vmcs12))) 2791 return -EINVAL; 2792 2793 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) && 2794 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING))) 2795 return -EINVAL; 2796 2797 return 0; 2798 } 2799 2800 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) 2801 { 2802 struct vcpu_vmx *vmx = to_vmx(vcpu); 2803 2804 /* Check for memory type validity */ 2805 switch (new_eptp & VMX_EPTP_MT_MASK) { 2806 case VMX_EPTP_MT_UC: 2807 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) 2808 return false; 2809 break; 2810 case VMX_EPTP_MT_WB: 2811 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) 2812 return false; 2813 break; 2814 default: 2815 return false; 2816 } 2817 2818 /* Page-walk levels validity. */ 2819 switch (new_eptp & VMX_EPTP_PWL_MASK) { 2820 case VMX_EPTP_PWL_5: 2821 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) 2822 return false; 2823 break; 2824 case VMX_EPTP_PWL_4: 2825 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) 2826 return false; 2827 break; 2828 default: 2829 return false; 2830 } 2831 2832 /* Reserved bits should not be set */ 2833 if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) 2834 return false; 2835 2836 /* AD, if set, should be supported */ 2837 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) { 2838 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) 2839 return false; 2840 } 2841 2842 return true; 2843 } 2844 2845 /* 2846 * Checks related to VM-Execution Control Fields 2847 */ 2848 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2849 struct vmcs12 *vmcs12) 2850 { 2851 struct vcpu_vmx *vmx = to_vmx(vcpu); 2852 2853 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2854 vmx->nested.msrs.pinbased_ctls_low, 2855 vmx->nested.msrs.pinbased_ctls_high)) || 2856 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2857 vmx->nested.msrs.procbased_ctls_low, 2858 vmx->nested.msrs.procbased_ctls_high))) 2859 return -EINVAL; 2860 2861 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2862 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, 2863 vmx->nested.msrs.secondary_ctls_low, 2864 vmx->nested.msrs.secondary_ctls_high))) 2865 return -EINVAL; 2866 2867 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || 2868 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2869 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2870 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2871 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2872 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2873 nested_vmx_check_nmi_controls(vmcs12) || 2874 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2875 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2876 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2877 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2878 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2879 return -EINVAL; 2880 2881 if (!nested_cpu_has_preemption_timer(vmcs12) && 2882 nested_cpu_has_save_preemption_timer(vmcs12)) 2883 return -EINVAL; 2884 2885 if (nested_cpu_has_ept(vmcs12) && 2886 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) 2887 return -EINVAL; 2888 2889 if (nested_cpu_has_vmfunc(vmcs12)) { 2890 if (CC(vmcs12->vm_function_control & 2891 ~vmx->nested.msrs.vmfunc_controls)) 2892 return -EINVAL; 2893 2894 if (nested_cpu_has_eptp_switching(vmcs12)) { 2895 if (CC(!nested_cpu_has_ept(vmcs12)) || 2896 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) 2897 return -EINVAL; 2898 } 2899 } 2900 2901 return 0; 2902 } 2903 2904 /* 2905 * Checks related to VM-Exit Control Fields 2906 */ 2907 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2908 struct vmcs12 *vmcs12) 2909 { 2910 struct vcpu_vmx *vmx = to_vmx(vcpu); 2911 2912 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, 2913 vmx->nested.msrs.exit_ctls_low, 2914 vmx->nested.msrs.exit_ctls_high)) || 2915 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))) 2916 return -EINVAL; 2917 2918 return 0; 2919 } 2920 2921 /* 2922 * Checks related to VM-Entry Control Fields 2923 */ 2924 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2925 struct vmcs12 *vmcs12) 2926 { 2927 struct vcpu_vmx *vmx = to_vmx(vcpu); 2928 2929 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, 2930 vmx->nested.msrs.entry_ctls_low, 2931 vmx->nested.msrs.entry_ctls_high))) 2932 return -EINVAL; 2933 2934 /* 2935 * From the Intel SDM, volume 3: 2936 * Fields relevant to VM-entry event injection must be set properly. 2937 * These fields are the VM-entry interruption-information field, the 2938 * VM-entry exception error code, and the VM-entry instruction length. 2939 */ 2940 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2941 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2942 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2943 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2944 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2945 bool should_have_error_code; 2946 bool urg = nested_cpu_has2(vmcs12, 2947 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2948 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2949 2950 /* VM-entry interruption-info field: interruption type */ 2951 if (CC(intr_type == INTR_TYPE_RESERVED) || 2952 CC(intr_type == INTR_TYPE_OTHER_EVENT && 2953 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2954 return -EINVAL; 2955 2956 /* VM-entry interruption-info field: vector */ 2957 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2958 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2959 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2960 return -EINVAL; 2961 2962 /* VM-entry interruption-info field: deliver error code */ 2963 should_have_error_code = 2964 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2965 x86_exception_has_error_code(vector); 2966 if (CC(has_error_code != should_have_error_code)) 2967 return -EINVAL; 2968 2969 /* VM-entry exception error code */ 2970 if (CC(has_error_code && 2971 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) 2972 return -EINVAL; 2973 2974 /* VM-entry interruption-info field: reserved bits */ 2975 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK)) 2976 return -EINVAL; 2977 2978 /* VM-entry instruction length */ 2979 switch (intr_type) { 2980 case INTR_TYPE_SOFT_EXCEPTION: 2981 case INTR_TYPE_SOFT_INTR: 2982 case INTR_TYPE_PRIV_SW_EXCEPTION: 2983 if (CC(vmcs12->vm_entry_instruction_len > X86_MAX_INSTRUCTION_LENGTH) || 2984 CC(vmcs12->vm_entry_instruction_len == 0 && 2985 CC(!nested_cpu_has_zero_length_injection(vcpu)))) 2986 return -EINVAL; 2987 } 2988 } 2989 2990 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2991 return -EINVAL; 2992 2993 return 0; 2994 } 2995 2996 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, 2997 struct vmcs12 *vmcs12) 2998 { 2999 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 3000 nested_check_vm_exit_controls(vcpu, vmcs12) || 3001 nested_check_vm_entry_controls(vcpu, vmcs12)) 3002 return -EINVAL; 3003 3004 #ifdef CONFIG_KVM_HYPERV 3005 if (guest_cpu_cap_has_evmcs(vcpu)) 3006 return nested_evmcs_check_controls(vmcs12); 3007 #endif 3008 3009 return 0; 3010 } 3011 3012 static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, 3013 struct vmcs12 *vmcs12) 3014 { 3015 #ifdef CONFIG_X86_64 3016 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != 3017 !!(vcpu->arch.efer & EFER_LMA))) 3018 return -EINVAL; 3019 #endif 3020 return 0; 3021 } 3022 3023 static bool is_l1_noncanonical_address_on_vmexit(u64 la, struct vmcs12 *vmcs12) 3024 { 3025 /* 3026 * Check that the given linear address is canonical after a VM exit 3027 * from L2, based on HOST_CR4.LA57 value that will be loaded for L1. 3028 */ 3029 u8 l1_address_bits_on_exit = (vmcs12->host_cr4 & X86_CR4_LA57) ? 57 : 48; 3030 3031 return !__is_canonical_address(la, l1_address_bits_on_exit); 3032 } 3033 3034 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, 3035 struct vmcs12 *vmcs12) 3036 { 3037 bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); 3038 3039 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || 3040 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || 3041 CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3))) 3042 return -EINVAL; 3043 3044 if (CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || 3045 CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_eip, vcpu))) 3046 return -EINVAL; 3047 3048 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && 3049 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) 3050 return -EINVAL; 3051 3052 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 3053 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 3054 vmcs12->host_ia32_perf_global_ctrl))) 3055 return -EINVAL; 3056 3057 if (ia32e) { 3058 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) 3059 return -EINVAL; 3060 } else { 3061 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || 3062 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || 3063 CC((vmcs12->host_rip) >> 32)) 3064 return -EINVAL; 3065 } 3066 3067 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3068 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3069 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3070 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3071 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3072 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3073 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 3074 CC(vmcs12->host_cs_selector == 0) || 3075 CC(vmcs12->host_tr_selector == 0) || 3076 CC(vmcs12->host_ss_selector == 0 && !ia32e)) 3077 return -EINVAL; 3078 3079 if (CC(is_noncanonical_base_address(vmcs12->host_fs_base, vcpu)) || 3080 CC(is_noncanonical_base_address(vmcs12->host_gs_base, vcpu)) || 3081 CC(is_noncanonical_base_address(vmcs12->host_gdtr_base, vcpu)) || 3082 CC(is_noncanonical_base_address(vmcs12->host_idtr_base, vcpu)) || 3083 CC(is_noncanonical_base_address(vmcs12->host_tr_base, vcpu)) || 3084 CC(is_l1_noncanonical_address_on_vmexit(vmcs12->host_rip, vmcs12))) 3085 return -EINVAL; 3086 3087 /* 3088 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 3089 * IA32_EFER MSR must be 0 in the field for that register. In addition, 3090 * the values of the LMA and LME bits in the field must each be that of 3091 * the host address-space size VM-exit control. 3092 */ 3093 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 3094 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || 3095 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || 3096 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) 3097 return -EINVAL; 3098 } 3099 3100 return 0; 3101 } 3102 3103 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 3104 struct vmcs12 *vmcs12) 3105 { 3106 struct vcpu_vmx *vmx = to_vmx(vcpu); 3107 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 3108 struct vmcs_hdr hdr; 3109 3110 if (vmcs12->vmcs_link_pointer == INVALID_GPA) 3111 return 0; 3112 3113 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) 3114 return -EINVAL; 3115 3116 if (ghc->gpa != vmcs12->vmcs_link_pointer && 3117 CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 3118 vmcs12->vmcs_link_pointer, VMCS12_SIZE))) 3119 return -EINVAL; 3120 3121 if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, 3122 offsetof(struct vmcs12, hdr), 3123 sizeof(hdr)))) 3124 return -EINVAL; 3125 3126 if (CC(hdr.revision_id != VMCS12_REVISION) || 3127 CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) 3128 return -EINVAL; 3129 3130 return 0; 3131 } 3132 3133 /* 3134 * Checks related to Guest Non-register State 3135 */ 3136 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 3137 { 3138 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 3139 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT && 3140 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI)) 3141 return -EINVAL; 3142 3143 return 0; 3144 } 3145 3146 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, 3147 struct vmcs12 *vmcs12, 3148 enum vm_entry_failure_code *entry_failure_code) 3149 { 3150 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE); 3151 3152 *entry_failure_code = ENTRY_FAIL_DEFAULT; 3153 3154 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || 3155 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) 3156 return -EINVAL; 3157 3158 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 3159 CC(!kvm_dr7_valid(vmcs12->guest_dr7))) 3160 return -EINVAL; 3161 3162 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 3163 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) 3164 return -EINVAL; 3165 3166 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 3167 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR; 3168 return -EINVAL; 3169 } 3170 3171 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 3172 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 3173 vmcs12->guest_ia32_perf_global_ctrl))) 3174 return -EINVAL; 3175 3176 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG)) 3177 return -EINVAL; 3178 3179 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) || 3180 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG))) 3181 return -EINVAL; 3182 3183 /* 3184 * If the load IA32_EFER VM-entry control is 1, the following checks 3185 * are performed on the field for the IA32_EFER MSR: 3186 * - Bits reserved in the IA32_EFER MSR must be 0. 3187 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 3188 * the IA-32e mode guest VM-exit control. It must also be identical 3189 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 3190 * CR0.PG) is 1. 3191 */ 3192 if (to_vmx(vcpu)->nested.nested_run_pending && 3193 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 3194 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || 3195 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || 3196 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && 3197 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) 3198 return -EINVAL; 3199 } 3200 3201 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 3202 (CC(is_noncanonical_msr_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || 3203 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) 3204 return -EINVAL; 3205 3206 if (nested_check_guest_non_reg_state(vmcs12)) 3207 return -EINVAL; 3208 3209 return 0; 3210 } 3211 3212 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 3213 { 3214 struct vcpu_vmx *vmx = to_vmx(vcpu); 3215 unsigned long cr3, cr4; 3216 bool vm_fail; 3217 3218 if (!nested_early_check) 3219 return 0; 3220 3221 if (vmx->msr_autoload.host.nr) 3222 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 3223 if (vmx->msr_autoload.guest.nr) 3224 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 3225 3226 preempt_disable(); 3227 3228 vmx_prepare_switch_to_guest(vcpu); 3229 3230 /* 3231 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 3232 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 3233 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e. 3234 * there is no need to preserve other bits or save/restore the field. 3235 */ 3236 vmcs_writel(GUEST_RFLAGS, 0); 3237 3238 cr3 = __get_current_cr3_fast(); 3239 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 3240 vmcs_writel(HOST_CR3, cr3); 3241 vmx->loaded_vmcs->host_state.cr3 = cr3; 3242 } 3243 3244 cr4 = cr4_read_shadow(); 3245 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 3246 vmcs_writel(HOST_CR4, cr4); 3247 vmx->loaded_vmcs->host_state.cr4 = cr4; 3248 } 3249 3250 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 3251 __vmx_vcpu_run_flags(vmx)); 3252 3253 if (vmx->msr_autoload.host.nr) 3254 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3255 if (vmx->msr_autoload.guest.nr) 3256 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3257 3258 if (vm_fail) { 3259 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); 3260 3261 preempt_enable(); 3262 3263 trace_kvm_nested_vmenter_failed( 3264 "early hardware check VM-instruction error: ", error); 3265 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3266 return 1; 3267 } 3268 3269 /* 3270 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 3271 */ 3272 if (hw_breakpoint_active()) 3273 set_debugreg(__this_cpu_read(cpu_dr7), 7); 3274 local_irq_enable(); 3275 preempt_enable(); 3276 3277 /* 3278 * A non-failing VMEntry means we somehow entered guest mode with 3279 * an illegal RIP, and that's just the tip of the iceberg. There 3280 * is no telling what memory has been modified or what state has 3281 * been exposed to unknown code. Hitting this all but guarantees 3282 * a (very critical) hardware issue. 3283 */ 3284 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 3285 VMX_EXIT_REASONS_FAILED_VMENTRY)); 3286 3287 return 0; 3288 } 3289 3290 #ifdef CONFIG_KVM_HYPERV 3291 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) 3292 { 3293 struct vcpu_vmx *vmx = to_vmx(vcpu); 3294 3295 /* 3296 * hv_evmcs may end up being not mapped after migration (when 3297 * L2 was running), map it here to make sure vmcs12 changes are 3298 * properly reflected. 3299 */ 3300 if (guest_cpu_cap_has_evmcs(vcpu) && 3301 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) { 3302 enum nested_evmptrld_status evmptrld_status = 3303 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 3304 3305 if (evmptrld_status == EVMPTRLD_VMFAIL || 3306 evmptrld_status == EVMPTRLD_ERROR) 3307 return false; 3308 3309 /* 3310 * Post migration VMCS12 always provides the most actual 3311 * information, copy it to eVMCS upon entry. 3312 */ 3313 vmx->nested.need_vmcs12_to_shadow_sync = true; 3314 } 3315 3316 return true; 3317 } 3318 #endif 3319 3320 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 3321 { 3322 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3323 struct vcpu_vmx *vmx = to_vmx(vcpu); 3324 struct kvm_host_map *map; 3325 3326 if (!vcpu->arch.pdptrs_from_userspace && 3327 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 3328 /* 3329 * Reload the guest's PDPTRs since after a migration 3330 * the guest CR3 might be restored prior to setting the nested 3331 * state which can lead to a load of wrong PDPTRs. 3332 */ 3333 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) 3334 return false; 3335 } 3336 3337 3338 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3339 map = &vmx->nested.apic_access_page_map; 3340 3341 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) { 3342 vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn)); 3343 } else { 3344 pr_debug_ratelimited("%s: no backing for APIC-access address in vmcs12\n", 3345 __func__); 3346 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3347 vcpu->run->internal.suberror = 3348 KVM_INTERNAL_ERROR_EMULATION; 3349 vcpu->run->internal.ndata = 0; 3350 return false; 3351 } 3352 } 3353 3354 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3355 map = &vmx->nested.virtual_apic_map; 3356 3357 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { 3358 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); 3359 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && 3360 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && 3361 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3362 /* 3363 * The processor will never use the TPR shadow, simply 3364 * clear the bit from the execution control. Such a 3365 * configuration is useless, but it happens in tests. 3366 * For any other configuration, failing the vm entry is 3367 * _not_ what the processor does but it's basically the 3368 * only possibility we have. 3369 */ 3370 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); 3371 } else { 3372 /* 3373 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to 3374 * force VM-Entry to fail. 3375 */ 3376 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA); 3377 } 3378 } 3379 3380 if (nested_cpu_has_posted_intr(vmcs12)) { 3381 map = &vmx->nested.pi_desc_map; 3382 3383 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { 3384 vmx->nested.pi_desc = 3385 (struct pi_desc *)(((void *)map->hva) + 3386 offset_in_page(vmcs12->posted_intr_desc_addr)); 3387 vmcs_write64(POSTED_INTR_DESC_ADDR, 3388 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); 3389 } else { 3390 /* 3391 * Defer the KVM_INTERNAL_EXIT until KVM tries to 3392 * access the contents of the VMCS12 posted interrupt 3393 * descriptor. (Note that KVM may do this when it 3394 * should not, per the architectural specification.) 3395 */ 3396 vmx->nested.pi_desc = NULL; 3397 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR); 3398 } 3399 } 3400 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 3401 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3402 else 3403 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3404 3405 return true; 3406 } 3407 3408 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) 3409 { 3410 #ifdef CONFIG_KVM_HYPERV 3411 /* 3412 * Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy 3413 * in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory 3414 * to make nested_evmcs_l2_tlb_flush_enabled() work correctly post 3415 * migration. 3416 */ 3417 if (!nested_get_evmcs_page(vcpu)) { 3418 pr_debug_ratelimited("%s: enlightened vmptrld failed\n", 3419 __func__); 3420 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3421 vcpu->run->internal.suberror = 3422 KVM_INTERNAL_ERROR_EMULATION; 3423 vcpu->run->internal.ndata = 0; 3424 3425 return false; 3426 } 3427 #endif 3428 3429 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu)) 3430 return false; 3431 3432 return true; 3433 } 3434 3435 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) 3436 { 3437 struct vmcs12 *vmcs12; 3438 struct vcpu_vmx *vmx = to_vmx(vcpu); 3439 gpa_t dst; 3440 3441 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) 3442 return 0; 3443 3444 if (WARN_ON_ONCE(vmx->nested.pml_full)) 3445 return 1; 3446 3447 /* 3448 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is 3449 * set is already checked as part of A/D emulation. 3450 */ 3451 vmcs12 = get_vmcs12(vcpu); 3452 if (!nested_cpu_has_pml(vmcs12)) 3453 return 0; 3454 3455 if (vmcs12->guest_pml_index >= PML_LOG_NR_ENTRIES) { 3456 vmx->nested.pml_full = true; 3457 return 1; 3458 } 3459 3460 gpa &= ~0xFFFull; 3461 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; 3462 3463 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, 3464 offset_in_page(dst), sizeof(gpa))) 3465 return 0; 3466 3467 vmcs12->guest_pml_index--; 3468 3469 return 0; 3470 } 3471 3472 /* 3473 * Intel's VMX Instruction Reference specifies a common set of prerequisites 3474 * for running VMX instructions (except VMXON, whose prerequisites are 3475 * slightly different). It also specifies what exception to inject otherwise. 3476 * Note that many of these exceptions have priority over VM exits, so they 3477 * don't have to be checked again here. 3478 */ 3479 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 3480 { 3481 if (!to_vmx(vcpu)->nested.vmxon) { 3482 kvm_queue_exception(vcpu, UD_VECTOR); 3483 return 0; 3484 } 3485 3486 if (vmx_get_cpl(vcpu)) { 3487 kvm_inject_gp(vcpu, 0); 3488 return 0; 3489 } 3490 3491 return 1; 3492 } 3493 3494 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3495 struct vmcs12 *vmcs12); 3496 3497 /* 3498 * If from_vmentry is false, this is being called from state restore (either RSM 3499 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 3500 * 3501 * Returns: 3502 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode 3503 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail 3504 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit 3505 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error 3506 */ 3507 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 3508 bool from_vmentry) 3509 { 3510 struct vcpu_vmx *vmx = to_vmx(vcpu); 3511 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3512 enum vm_entry_failure_code entry_failure_code; 3513 union vmx_exit_reason exit_reason = { 3514 .basic = EXIT_REASON_INVALID_STATE, 3515 .failed_vmentry = 1, 3516 }; 3517 u32 failed_index; 3518 3519 trace_kvm_nested_vmenter(kvm_rip_read(vcpu), 3520 vmx->nested.current_vmptr, 3521 vmcs12->guest_rip, 3522 vmcs12->guest_intr_status, 3523 vmcs12->vm_entry_intr_info_field, 3524 vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT, 3525 vmcs12->ept_pointer, 3526 vmcs12->guest_cr3, 3527 KVM_ISA_VMX); 3528 3529 kvm_service_local_tlb_flush_requests(vcpu); 3530 3531 if (!vmx->nested.nested_run_pending || 3532 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 3533 vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3534 if (kvm_mpx_supported() && 3535 (!vmx->nested.nested_run_pending || 3536 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 3537 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3538 3539 /* 3540 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* 3541 * nested early checks are disabled. In the event of a "late" VM-Fail, 3542 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its 3543 * software model to the pre-VMEntry host state. When EPT is disabled, 3544 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes 3545 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing 3546 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to 3547 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested 3548 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is 3549 * guaranteed to be overwritten with a shadow CR3 prior to re-entering 3550 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as 3551 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks 3552 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail 3553 * path would need to manually save/restore vmcs01.GUEST_CR3. 3554 */ 3555 if (!enable_ept && !nested_early_check) 3556 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); 3557 3558 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 3559 3560 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); 3561 3562 if (from_vmentry) { 3563 if (unlikely(!nested_get_vmcs12_pages(vcpu))) { 3564 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3565 return NVMX_VMENTRY_KVM_INTERNAL_ERROR; 3566 } 3567 3568 if (nested_vmx_check_vmentry_hw(vcpu)) { 3569 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3570 return NVMX_VMENTRY_VMFAIL; 3571 } 3572 3573 if (nested_vmx_check_guest_state(vcpu, vmcs12, 3574 &entry_failure_code)) { 3575 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3576 vmcs12->exit_qualification = entry_failure_code; 3577 goto vmentry_fail_vmexit; 3578 } 3579 } 3580 3581 enter_guest_mode(vcpu); 3582 3583 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) { 3584 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3585 vmcs12->exit_qualification = entry_failure_code; 3586 goto vmentry_fail_vmexit_guest_mode; 3587 } 3588 3589 if (from_vmentry) { 3590 failed_index = nested_vmx_load_msr(vcpu, 3591 vmcs12->vm_entry_msr_load_addr, 3592 vmcs12->vm_entry_msr_load_count); 3593 if (failed_index) { 3594 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL; 3595 vmcs12->exit_qualification = failed_index; 3596 goto vmentry_fail_vmexit_guest_mode; 3597 } 3598 } else { 3599 /* 3600 * The MMU is not initialized to point at the right entities yet and 3601 * "get pages" would need to read data from the guest (i.e. we will 3602 * need to perform gpa to hpa translation). Request a call 3603 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3604 * have already been set at vmentry time and should not be reset. 3605 */ 3606 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 3607 } 3608 3609 /* 3610 * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI 3611 * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can 3612 * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit 3613 * unconditionally. Take care to pull data from vmcs01 as appropriate, 3614 * e.g. when checking for interrupt windows, as vmcs02 is now loaded. 3615 */ 3616 if ((__exec_controls_get(&vmx->vmcs01) & (CPU_BASED_INTR_WINDOW_EXITING | 3617 CPU_BASED_NMI_WINDOW_EXITING)) || 3618 kvm_apic_has_pending_init_or_sipi(vcpu) || 3619 kvm_apic_has_interrupt(vcpu)) 3620 kvm_make_request(KVM_REQ_EVENT, vcpu); 3621 3622 /* 3623 * Do not start the preemption timer hrtimer until after we know 3624 * we are successful, so that only nested_vmx_vmexit needs to cancel 3625 * the timer. 3626 */ 3627 vmx->nested.preemption_timer_expired = false; 3628 if (nested_cpu_has_preemption_timer(vmcs12)) { 3629 u64 timer_value = vmx_calc_preemption_timer_value(vcpu); 3630 vmx_start_preemption_timer(vcpu, timer_value); 3631 } 3632 3633 /* 3634 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3635 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3636 * returned as far as L1 is concerned. It will only return (and set 3637 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3638 */ 3639 return NVMX_VMENTRY_SUCCESS; 3640 3641 /* 3642 * A failed consistency check that leads to a VMExit during L1's 3643 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3644 * 26.7 "VM-entry failures during or after loading guest state". 3645 */ 3646 vmentry_fail_vmexit_guest_mode: 3647 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3648 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3649 leave_guest_mode(vcpu); 3650 3651 vmentry_fail_vmexit: 3652 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3653 3654 if (!from_vmentry) 3655 return NVMX_VMENTRY_VMEXIT; 3656 3657 load_vmcs12_host_state(vcpu, vmcs12); 3658 vmcs12->vm_exit_reason = exit_reason.full; 3659 if (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx)) 3660 vmx->nested.need_vmcs12_to_shadow_sync = true; 3661 return NVMX_VMENTRY_VMEXIT; 3662 } 3663 3664 /* 3665 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3666 * for running an L2 nested guest. 3667 */ 3668 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3669 { 3670 struct vmcs12 *vmcs12; 3671 enum nvmx_vmentry_status status; 3672 struct vcpu_vmx *vmx = to_vmx(vcpu); 3673 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3674 enum nested_evmptrld_status evmptrld_status; 3675 3676 if (!nested_vmx_check_permission(vcpu)) 3677 return 1; 3678 3679 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch); 3680 if (evmptrld_status == EVMPTRLD_ERROR) { 3681 kvm_queue_exception(vcpu, UD_VECTOR); 3682 return 1; 3683 } 3684 3685 kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED); 3686 3687 if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) 3688 return nested_vmx_failInvalid(vcpu); 3689 3690 if (CC(!nested_vmx_is_evmptr12_valid(vmx) && 3691 vmx->nested.current_vmptr == INVALID_GPA)) 3692 return nested_vmx_failInvalid(vcpu); 3693 3694 vmcs12 = get_vmcs12(vcpu); 3695 3696 /* 3697 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3698 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3699 * rather than RFLAGS.ZF, and no error number is stored to the 3700 * VM-instruction error field. 3701 */ 3702 if (CC(vmcs12->hdr.shadow_vmcs)) 3703 return nested_vmx_failInvalid(vcpu); 3704 3705 if (nested_vmx_is_evmptr12_valid(vmx)) { 3706 struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx); 3707 3708 copy_enlightened_to_vmcs12(vmx, evmcs->hv_clean_fields); 3709 /* Enlightened VMCS doesn't have launch state */ 3710 vmcs12->launch_state = !launch; 3711 } else if (enable_shadow_vmcs) { 3712 copy_shadow_to_vmcs12(vmx); 3713 } 3714 3715 /* 3716 * The nested entry process starts with enforcing various prerequisites 3717 * on vmcs12 as required by the Intel SDM, and act appropriately when 3718 * they fail: As the SDM explains, some conditions should cause the 3719 * instruction to fail, while others will cause the instruction to seem 3720 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3721 * To speed up the normal (success) code path, we should avoid checking 3722 * for misconfigurations which will anyway be caught by the processor 3723 * when using the merged vmcs02. 3724 */ 3725 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)) 3726 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3727 3728 if (CC(vmcs12->launch_state == launch)) 3729 return nested_vmx_fail(vcpu, 3730 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3731 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3732 3733 if (nested_vmx_check_controls(vcpu, vmcs12)) 3734 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3735 3736 if (nested_vmx_check_address_space_size(vcpu, vmcs12)) 3737 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3738 3739 if (nested_vmx_check_host_state(vcpu, vmcs12)) 3740 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3741 3742 /* 3743 * We're finally done with prerequisite checking, and can start with 3744 * the nested entry. 3745 */ 3746 vmx->nested.nested_run_pending = 1; 3747 vmx->nested.has_preemption_timer_deadline = false; 3748 status = nested_vmx_enter_non_root_mode(vcpu, true); 3749 if (unlikely(status != NVMX_VMENTRY_SUCCESS)) 3750 goto vmentry_failed; 3751 3752 /* Hide L1D cache contents from the nested guest. */ 3753 vmx->vcpu.arch.l1tf_flush_l1d = true; 3754 3755 /* 3756 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3757 * also be used as part of restoring nVMX state for 3758 * snapshot restore (migration). 3759 * 3760 * In this flow, it is assumed that vmcs12 cache was 3761 * transferred as part of captured nVMX state and should 3762 * therefore not be read from guest memory (which may not 3763 * exist on destination host yet). 3764 */ 3765 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3766 3767 switch (vmcs12->guest_activity_state) { 3768 case GUEST_ACTIVITY_HLT: 3769 /* 3770 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3771 * awakened by event injection or by an NMI-window VM-exit or 3772 * by an interrupt-window VM-exit, halt the vcpu. 3773 */ 3774 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3775 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) && 3776 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) && 3777 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3778 vmx->nested.nested_run_pending = 0; 3779 return kvm_emulate_halt_noskip(vcpu); 3780 } 3781 break; 3782 case GUEST_ACTIVITY_WAIT_SIPI: 3783 vmx->nested.nested_run_pending = 0; 3784 kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED); 3785 break; 3786 default: 3787 break; 3788 } 3789 3790 return 1; 3791 3792 vmentry_failed: 3793 vmx->nested.nested_run_pending = 0; 3794 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR) 3795 return 0; 3796 if (status == NVMX_VMENTRY_VMEXIT) 3797 return 1; 3798 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); 3799 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3800 } 3801 3802 /* 3803 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3804 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK). 3805 * This function returns the new value we should put in vmcs12.guest_cr0. 3806 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3807 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3808 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3809 * didn't trap the bit, because if L1 did, so would L0). 3810 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3811 * been modified by L2, and L1 knows it. So just leave the old value of 3812 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3813 * isn't relevant, because if L0 traps this bit it can set it to anything. 3814 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3815 * changed these bits, and therefore they need to be updated, but L0 3816 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3817 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3818 */ 3819 static inline unsigned long 3820 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3821 { 3822 return 3823 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3824 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3825 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3826 vcpu->arch.cr0_guest_owned_bits)); 3827 } 3828 3829 static inline unsigned long 3830 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3831 { 3832 return 3833 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3834 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3835 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3836 vcpu->arch.cr4_guest_owned_bits)); 3837 } 3838 3839 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3840 struct vmcs12 *vmcs12, 3841 u32 vm_exit_reason, u32 exit_intr_info) 3842 { 3843 u32 idt_vectoring; 3844 unsigned int nr; 3845 3846 /* 3847 * Per the SDM, VM-Exits due to double and triple faults are never 3848 * considered to occur during event delivery, even if the double/triple 3849 * fault is the result of an escalating vectoring issue. 3850 * 3851 * Note, the SDM qualifies the double fault behavior with "The original 3852 * event results in a double-fault exception". It's unclear why the 3853 * qualification exists since exits due to double fault can occur only 3854 * while vectoring a different exception (injected events are never 3855 * subject to interception), i.e. there's _always_ an original event. 3856 * 3857 * The SDM also uses NMI as a confusing example for the "original event 3858 * causes the VM exit directly" clause. NMI isn't special in any way, 3859 * the same rule applies to all events that cause an exit directly. 3860 * NMI is an odd choice for the example because NMIs can only occur on 3861 * instruction boundaries, i.e. they _can't_ occur during vectoring. 3862 */ 3863 if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT || 3864 ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI && 3865 is_double_fault(exit_intr_info))) { 3866 vmcs12->idt_vectoring_info_field = 0; 3867 } else if (vcpu->arch.exception.injected) { 3868 nr = vcpu->arch.exception.vector; 3869 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3870 3871 if (kvm_exception_is_soft(nr)) { 3872 vmcs12->vm_exit_instruction_len = 3873 vcpu->arch.event_exit_inst_len; 3874 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3875 } else 3876 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3877 3878 if (vcpu->arch.exception.has_error_code) { 3879 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3880 vmcs12->idt_vectoring_error_code = 3881 vcpu->arch.exception.error_code; 3882 } 3883 3884 vmcs12->idt_vectoring_info_field = idt_vectoring; 3885 } else if (vcpu->arch.nmi_injected) { 3886 vmcs12->idt_vectoring_info_field = 3887 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3888 } else if (vcpu->arch.interrupt.injected) { 3889 nr = vcpu->arch.interrupt.nr; 3890 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3891 3892 if (vcpu->arch.interrupt.soft) { 3893 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3894 vmcs12->vm_entry_instruction_len = 3895 vcpu->arch.event_exit_inst_len; 3896 } else 3897 idt_vectoring |= INTR_TYPE_EXT_INTR; 3898 3899 vmcs12->idt_vectoring_info_field = idt_vectoring; 3900 } else { 3901 vmcs12->idt_vectoring_info_field = 0; 3902 } 3903 } 3904 3905 3906 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3907 { 3908 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3909 gfn_t gfn; 3910 3911 /* 3912 * Don't need to mark the APIC access page dirty; it is never 3913 * written to by the CPU during APIC virtualization. 3914 */ 3915 3916 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3917 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3918 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3919 } 3920 3921 if (nested_cpu_has_posted_intr(vmcs12)) { 3922 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3923 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3924 } 3925 } 3926 3927 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3928 { 3929 struct vcpu_vmx *vmx = to_vmx(vcpu); 3930 int max_irr; 3931 void *vapic_page; 3932 u16 status; 3933 3934 if (!vmx->nested.pi_pending) 3935 return 0; 3936 3937 if (!vmx->nested.pi_desc) 3938 goto mmio_needed; 3939 3940 vmx->nested.pi_pending = false; 3941 3942 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3943 return 0; 3944 3945 max_irr = pi_find_highest_vector(vmx->nested.pi_desc); 3946 if (max_irr > 0) { 3947 vapic_page = vmx->nested.virtual_apic_map.hva; 3948 if (!vapic_page) 3949 goto mmio_needed; 3950 3951 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3952 vapic_page, &max_irr); 3953 status = vmcs_read16(GUEST_INTR_STATUS); 3954 if ((u8)max_irr > ((u8)status & 0xff)) { 3955 status &= ~0xff; 3956 status |= (u8)max_irr; 3957 vmcs_write16(GUEST_INTR_STATUS, status); 3958 } 3959 } 3960 3961 nested_mark_vmcs12_pages_dirty(vcpu); 3962 return 0; 3963 3964 mmio_needed: 3965 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL); 3966 return -ENXIO; 3967 } 3968 3969 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu) 3970 { 3971 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; 3972 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK; 3973 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3974 unsigned long exit_qual; 3975 3976 if (ex->has_payload) { 3977 exit_qual = ex->payload; 3978 } else if (ex->vector == PF_VECTOR) { 3979 exit_qual = vcpu->arch.cr2; 3980 } else if (ex->vector == DB_VECTOR) { 3981 exit_qual = vcpu->arch.dr6; 3982 exit_qual &= ~DR6_BT; 3983 exit_qual ^= DR6_ACTIVE_LOW; 3984 } else { 3985 exit_qual = 0; 3986 } 3987 3988 /* 3989 * Unlike AMD's Paged Real Mode, which reports an error code on #PF 3990 * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the 3991 * "has error code" flags on VM-Exit if the CPU is in Real Mode. 3992 */ 3993 if (ex->has_error_code && is_protmode(vcpu)) { 3994 /* 3995 * Intel CPUs do not generate error codes with bits 31:16 set, 3996 * and more importantly VMX disallows setting bits 31:16 in the 3997 * injected error code for VM-Entry. Drop the bits to mimic 3998 * hardware and avoid inducing failure on nested VM-Entry if L1 3999 * chooses to inject the exception back to L2. AMD CPUs _do_ 4000 * generate "full" 32-bit error codes, so KVM allows userspace 4001 * to inject exception error codes with bits 31:16 set. 4002 */ 4003 vmcs12->vm_exit_intr_error_code = (u16)ex->error_code; 4004 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 4005 } 4006 4007 if (kvm_exception_is_soft(ex->vector)) 4008 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 4009 else 4010 intr_info |= INTR_TYPE_HARD_EXCEPTION; 4011 4012 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 4013 vmx_get_nmi_mask(vcpu)) 4014 intr_info |= INTR_INFO_UNBLOCK_NMI; 4015 4016 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 4017 } 4018 4019 /* 4020 * Returns true if a debug trap is (likely) pending delivery. Infer the class 4021 * of a #DB (trap-like vs. fault-like) from the exception payload (to-be-DR6). 4022 * Using the payload is flawed because code breakpoints (fault-like) and data 4023 * breakpoints (trap-like) set the same bits in DR6 (breakpoint detected), i.e. 4024 * this will return false positives if a to-be-injected code breakpoint #DB is 4025 * pending (from KVM's perspective, but not "pending" across an instruction 4026 * boundary). ICEBP, a.k.a. INT1, is also not reflected here even though it 4027 * too is trap-like. 4028 * 4029 * KVM "works" despite these flaws as ICEBP isn't currently supported by the 4030 * emulator, Monitor Trap Flag is not marked pending on intercepted #DBs (the 4031 * #DB has already happened), and MTF isn't marked pending on code breakpoints 4032 * from the emulator (because such #DBs are fault-like and thus don't trigger 4033 * actions that fire on instruction retire). 4034 */ 4035 static unsigned long vmx_get_pending_dbg_trap(struct kvm_queued_exception *ex) 4036 { 4037 if (!ex->pending || ex->vector != DB_VECTOR) 4038 return 0; 4039 4040 /* General Detect #DBs are always fault-like. */ 4041 return ex->payload & ~DR6_BD; 4042 } 4043 4044 /* 4045 * Returns true if there's a pending #DB exception that is lower priority than 4046 * a pending Monitor Trap Flag VM-Exit. TSS T-flag #DBs are not emulated by 4047 * KVM, but could theoretically be injected by userspace. Note, this code is 4048 * imperfect, see above. 4049 */ 4050 static bool vmx_is_low_priority_db_trap(struct kvm_queued_exception *ex) 4051 { 4052 return vmx_get_pending_dbg_trap(ex) & ~DR6_BT; 4053 } 4054 4055 /* 4056 * Certain VM-exits set the 'pending debug exceptions' field to indicate a 4057 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM 4058 * represents these debug traps with a payload that is said to be compatible 4059 * with the 'pending debug exceptions' field, write the payload to the VMCS 4060 * field if a VM-exit is delivered before the debug trap. 4061 */ 4062 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) 4063 { 4064 unsigned long pending_dbg; 4065 4066 pending_dbg = vmx_get_pending_dbg_trap(&vcpu->arch.exception); 4067 if (pending_dbg) 4068 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, pending_dbg); 4069 } 4070 4071 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) 4072 { 4073 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 4074 to_vmx(vcpu)->nested.preemption_timer_expired; 4075 } 4076 4077 static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection) 4078 { 4079 struct vcpu_vmx *vmx = to_vmx(vcpu); 4080 void *vapic = vmx->nested.virtual_apic_map.hva; 4081 int max_irr, vppr; 4082 4083 if (nested_vmx_preemption_timer_pending(vcpu) || 4084 vmx->nested.mtf_pending) 4085 return true; 4086 4087 /* 4088 * Virtual Interrupt Delivery doesn't require manual injection. Either 4089 * the interrupt is already in GUEST_RVI and will be recognized by CPU 4090 * at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move 4091 * the interrupt from the PIR to RVI prior to entering the guest. 4092 */ 4093 if (for_injection) 4094 return false; 4095 4096 if (!nested_cpu_has_vid(get_vmcs12(vcpu)) || 4097 __vmx_interrupt_blocked(vcpu)) 4098 return false; 4099 4100 if (!vapic) 4101 return false; 4102 4103 vppr = *((u32 *)(vapic + APIC_PROCPRI)); 4104 4105 max_irr = vmx_get_rvi(); 4106 if ((max_irr & 0xf0) > (vppr & 0xf0)) 4107 return true; 4108 4109 if (vmx->nested.pi_pending && vmx->nested.pi_desc && 4110 pi_test_on(vmx->nested.pi_desc)) { 4111 max_irr = pi_find_highest_vector(vmx->nested.pi_desc); 4112 if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0)) 4113 return true; 4114 } 4115 4116 return false; 4117 } 4118 4119 /* 4120 * Per the Intel SDM's table "Priority Among Concurrent Events", with minor 4121 * edits to fill in missing examples, e.g. #DB due to split-lock accesses, 4122 * and less minor edits to splice in the priority of VMX Non-Root specific 4123 * events, e.g. MTF and NMI/INTR-window exiting. 4124 * 4125 * 1 Hardware Reset and Machine Checks 4126 * - RESET 4127 * - Machine Check 4128 * 4129 * 2 Trap on Task Switch 4130 * - T flag in TSS is set (on task switch) 4131 * 4132 * 3 External Hardware Interventions 4133 * - FLUSH 4134 * - STOPCLK 4135 * - SMI 4136 * - INIT 4137 * 4138 * 3.5 Monitor Trap Flag (MTF) VM-exit[1] 4139 * 4140 * 4 Traps on Previous Instruction 4141 * - Breakpoints 4142 * - Trap-class Debug Exceptions (#DB due to TF flag set, data/I-O 4143 * breakpoint, or #DB due to a split-lock access) 4144 * 4145 * 4.3 VMX-preemption timer expired VM-exit 4146 * 4147 * 4.6 NMI-window exiting VM-exit[2] 4148 * 4149 * 5 Nonmaskable Interrupts (NMI) 4150 * 4151 * 5.5 Interrupt-window exiting VM-exit and Virtual-interrupt delivery 4152 * 4153 * 6 Maskable Hardware Interrupts 4154 * 4155 * 7 Code Breakpoint Fault 4156 * 4157 * 8 Faults from Fetching Next Instruction 4158 * - Code-Segment Limit Violation 4159 * - Code Page Fault 4160 * - Control protection exception (missing ENDBRANCH at target of indirect 4161 * call or jump) 4162 * 4163 * 9 Faults from Decoding Next Instruction 4164 * - Instruction length > 15 bytes 4165 * - Invalid Opcode 4166 * - Coprocessor Not Available 4167 * 4168 *10 Faults on Executing Instruction 4169 * - Overflow 4170 * - Bound error 4171 * - Invalid TSS 4172 * - Segment Not Present 4173 * - Stack fault 4174 * - General Protection 4175 * - Data Page Fault 4176 * - Alignment Check 4177 * - x86 FPU Floating-point exception 4178 * - SIMD floating-point exception 4179 * - Virtualization exception 4180 * - Control protection exception 4181 * 4182 * [1] Per the "Monitor Trap Flag" section: System-management interrupts (SMIs), 4183 * INIT signals, and higher priority events take priority over MTF VM exits. 4184 * MTF VM exits take priority over debug-trap exceptions and lower priority 4185 * events. 4186 * 4187 * [2] Debug-trap exceptions and higher priority events take priority over VM exits 4188 * caused by the VMX-preemption timer. VM exits caused by the VMX-preemption 4189 * timer take priority over VM exits caused by the "NMI-window exiting" 4190 * VM-execution control and lower priority events. 4191 * 4192 * [3] Debug-trap exceptions and higher priority events take priority over VM exits 4193 * caused by "NMI-window exiting". VM exits caused by this control take 4194 * priority over non-maskable interrupts (NMIs) and lower priority events. 4195 * 4196 * [4] Virtual-interrupt delivery has the same priority as that of VM exits due to 4197 * the 1-setting of the "interrupt-window exiting" VM-execution control. Thus, 4198 * non-maskable interrupts (NMIs) and higher priority events take priority over 4199 * delivery of a virtual interrupt; delivery of a virtual interrupt takes 4200 * priority over external interrupts and lower priority events. 4201 */ 4202 static int vmx_check_nested_events(struct kvm_vcpu *vcpu) 4203 { 4204 struct kvm_lapic *apic = vcpu->arch.apic; 4205 struct vcpu_vmx *vmx = to_vmx(vcpu); 4206 /* 4207 * Only a pending nested run blocks a pending exception. If there is a 4208 * previously injected event, the pending exception occurred while said 4209 * event was being delivered and thus needs to be handled. 4210 */ 4211 bool block_nested_exceptions = vmx->nested.nested_run_pending; 4212 /* 4213 * Events that don't require injection, i.e. that are virtualized by 4214 * hardware, aren't blocked by a pending VM-Enter as KVM doesn't need 4215 * to regain control in order to deliver the event, and hardware will 4216 * handle event ordering, e.g. with respect to injected exceptions. 4217 * 4218 * But, new events (not exceptions) are only recognized at instruction 4219 * boundaries. If an event needs reinjection, then KVM is handling a 4220 * VM-Exit that occurred _during_ instruction execution; new events, 4221 * irrespective of whether or not they're injected, are blocked until 4222 * the instruction completes. 4223 */ 4224 bool block_non_injected_events = kvm_event_needs_reinjection(vcpu); 4225 /* 4226 * Inject events are blocked by nested VM-Enter, as KVM is responsible 4227 * for managing priority between concurrent events, i.e. KVM needs to 4228 * wait until after VM-Enter completes to deliver injected events. 4229 */ 4230 bool block_nested_events = block_nested_exceptions || 4231 block_non_injected_events; 4232 4233 if (lapic_in_kernel(vcpu) && 4234 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 4235 if (block_nested_events) 4236 return -EBUSY; 4237 nested_vmx_update_pending_dbg(vcpu); 4238 clear_bit(KVM_APIC_INIT, &apic->pending_events); 4239 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED) 4240 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); 4241 4242 /* MTF is discarded if the vCPU is in WFS. */ 4243 vmx->nested.mtf_pending = false; 4244 return 0; 4245 } 4246 4247 if (lapic_in_kernel(vcpu) && 4248 test_bit(KVM_APIC_SIPI, &apic->pending_events)) { 4249 if (block_nested_events) 4250 return -EBUSY; 4251 4252 clear_bit(KVM_APIC_SIPI, &apic->pending_events); 4253 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 4254 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, 4255 apic->sipi_vector & 0xFFUL); 4256 return 0; 4257 } 4258 /* Fallthrough, the SIPI is completely ignored. */ 4259 } 4260 4261 /* 4262 * Process exceptions that are higher priority than Monitor Trap Flag: 4263 * fault-like exceptions, TSS T flag #DB (not emulated by KVM, but 4264 * could theoretically come in from userspace), and ICEBP (INT1). 4265 * 4266 * TODO: SMIs have higher priority than MTF and trap-like #DBs (except 4267 * for TSS T flag #DBs). KVM also doesn't save/restore pending MTF 4268 * across SMI/RSM as it should; that needs to be addressed in order to 4269 * prioritize SMI over MTF and trap-like #DBs. 4270 */ 4271 if (vcpu->arch.exception_vmexit.pending && 4272 !vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) { 4273 if (block_nested_exceptions) 4274 return -EBUSY; 4275 4276 nested_vmx_inject_exception_vmexit(vcpu); 4277 return 0; 4278 } 4279 4280 if (vcpu->arch.exception.pending && 4281 !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) { 4282 if (block_nested_exceptions) 4283 return -EBUSY; 4284 goto no_vmexit; 4285 } 4286 4287 if (vmx->nested.mtf_pending) { 4288 if (block_nested_events) 4289 return -EBUSY; 4290 nested_vmx_update_pending_dbg(vcpu); 4291 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); 4292 return 0; 4293 } 4294 4295 if (vcpu->arch.exception_vmexit.pending) { 4296 if (block_nested_exceptions) 4297 return -EBUSY; 4298 4299 nested_vmx_inject_exception_vmexit(vcpu); 4300 return 0; 4301 } 4302 4303 if (vcpu->arch.exception.pending) { 4304 if (block_nested_exceptions) 4305 return -EBUSY; 4306 goto no_vmexit; 4307 } 4308 4309 if (nested_vmx_preemption_timer_pending(vcpu)) { 4310 if (block_nested_events) 4311 return -EBUSY; 4312 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 4313 return 0; 4314 } 4315 4316 if (vcpu->arch.smi_pending && !is_smm(vcpu)) { 4317 if (block_nested_events) 4318 return -EBUSY; 4319 goto no_vmexit; 4320 } 4321 4322 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) { 4323 if (block_nested_events) 4324 return -EBUSY; 4325 if (!nested_exit_on_nmi(vcpu)) 4326 goto no_vmexit; 4327 4328 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 4329 NMI_VECTOR | INTR_TYPE_NMI_INTR | 4330 INTR_INFO_VALID_MASK, 0); 4331 /* 4332 * The NMI-triggered VM exit counts as injection: 4333 * clear this one and block further NMIs. 4334 */ 4335 vcpu->arch.nmi_pending = 0; 4336 vmx_set_nmi_mask(vcpu, true); 4337 return 0; 4338 } 4339 4340 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) { 4341 int irq; 4342 4343 if (!nested_exit_on_intr(vcpu)) { 4344 if (block_nested_events) 4345 return -EBUSY; 4346 4347 goto no_vmexit; 4348 } 4349 4350 if (!nested_exit_intr_ack_set(vcpu)) { 4351 if (block_nested_events) 4352 return -EBUSY; 4353 4354 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 4355 return 0; 4356 } 4357 4358 irq = kvm_cpu_get_extint(vcpu); 4359 if (irq != -1) { 4360 if (block_nested_events) 4361 return -EBUSY; 4362 4363 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 4364 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0); 4365 return 0; 4366 } 4367 4368 irq = kvm_apic_has_interrupt(vcpu); 4369 if (WARN_ON_ONCE(irq < 0)) 4370 goto no_vmexit; 4371 4372 /* 4373 * If the IRQ is L2's PI notification vector, process posted 4374 * interrupts for L2 instead of injecting VM-Exit, as the 4375 * detection/morphing architecturally occurs when the IRQ is 4376 * delivered to the CPU. Note, only interrupts that are routed 4377 * through the local APIC trigger posted interrupt processing, 4378 * and enabling posted interrupts requires ACK-on-exit. 4379 */ 4380 if (irq == vmx->nested.posted_intr_nv) { 4381 /* 4382 * Nested posted interrupts are delivered via RVI, i.e. 4383 * aren't injected by KVM, and so can be queued even if 4384 * manual event injection is disallowed. 4385 */ 4386 if (block_non_injected_events) 4387 return -EBUSY; 4388 4389 vmx->nested.pi_pending = true; 4390 kvm_apic_clear_irr(vcpu, irq); 4391 goto no_vmexit; 4392 } 4393 4394 if (block_nested_events) 4395 return -EBUSY; 4396 4397 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 4398 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0); 4399 4400 /* 4401 * ACK the interrupt _after_ emulating VM-Exit, as the IRQ must 4402 * be marked as in-service in vmcs01.GUEST_INTERRUPT_STATUS.SVI 4403 * if APICv is active. 4404 */ 4405 kvm_apic_ack_interrupt(vcpu, irq); 4406 return 0; 4407 } 4408 4409 no_vmexit: 4410 return vmx_complete_nested_posted_interrupt(vcpu); 4411 } 4412 4413 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 4414 { 4415 ktime_t remaining = 4416 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 4417 u64 value; 4418 4419 if (ktime_to_ns(remaining) <= 0) 4420 return 0; 4421 4422 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 4423 do_div(value, 1000000); 4424 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 4425 } 4426 4427 static bool is_vmcs12_ext_field(unsigned long field) 4428 { 4429 switch (field) { 4430 case GUEST_ES_SELECTOR: 4431 case GUEST_CS_SELECTOR: 4432 case GUEST_SS_SELECTOR: 4433 case GUEST_DS_SELECTOR: 4434 case GUEST_FS_SELECTOR: 4435 case GUEST_GS_SELECTOR: 4436 case GUEST_LDTR_SELECTOR: 4437 case GUEST_TR_SELECTOR: 4438 case GUEST_ES_LIMIT: 4439 case GUEST_CS_LIMIT: 4440 case GUEST_SS_LIMIT: 4441 case GUEST_DS_LIMIT: 4442 case GUEST_FS_LIMIT: 4443 case GUEST_GS_LIMIT: 4444 case GUEST_LDTR_LIMIT: 4445 case GUEST_TR_LIMIT: 4446 case GUEST_GDTR_LIMIT: 4447 case GUEST_IDTR_LIMIT: 4448 case GUEST_ES_AR_BYTES: 4449 case GUEST_DS_AR_BYTES: 4450 case GUEST_FS_AR_BYTES: 4451 case GUEST_GS_AR_BYTES: 4452 case GUEST_LDTR_AR_BYTES: 4453 case GUEST_TR_AR_BYTES: 4454 case GUEST_ES_BASE: 4455 case GUEST_CS_BASE: 4456 case GUEST_SS_BASE: 4457 case GUEST_DS_BASE: 4458 case GUEST_FS_BASE: 4459 case GUEST_GS_BASE: 4460 case GUEST_LDTR_BASE: 4461 case GUEST_TR_BASE: 4462 case GUEST_GDTR_BASE: 4463 case GUEST_IDTR_BASE: 4464 case GUEST_PENDING_DBG_EXCEPTIONS: 4465 case GUEST_BNDCFGS: 4466 return true; 4467 default: 4468 break; 4469 } 4470 4471 return false; 4472 } 4473 4474 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4475 struct vmcs12 *vmcs12) 4476 { 4477 struct vcpu_vmx *vmx = to_vmx(vcpu); 4478 4479 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 4480 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 4481 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 4482 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 4483 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 4484 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 4485 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 4486 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 4487 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 4488 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 4489 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 4490 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 4491 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 4492 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 4493 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 4494 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 4495 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 4496 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 4497 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 4498 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 4499 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 4500 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 4501 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 4502 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 4503 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 4504 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 4505 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 4506 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 4507 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 4508 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 4509 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 4510 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 4511 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 4512 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 4513 vmcs12->guest_pending_dbg_exceptions = 4514 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 4515 4516 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; 4517 } 4518 4519 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4520 struct vmcs12 *vmcs12) 4521 { 4522 struct vcpu_vmx *vmx = to_vmx(vcpu); 4523 int cpu; 4524 4525 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) 4526 return; 4527 4528 4529 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); 4530 4531 cpu = get_cpu(); 4532 vmx->loaded_vmcs = &vmx->nested.vmcs02; 4533 vmx_vcpu_load_vmcs(vcpu, cpu); 4534 4535 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4536 4537 vmx->loaded_vmcs = &vmx->vmcs01; 4538 vmx_vcpu_load_vmcs(vcpu, cpu); 4539 put_cpu(); 4540 } 4541 4542 /* 4543 * Update the guest state fields of vmcs12 to reflect changes that 4544 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 4545 * VM-entry controls is also updated, since this is really a guest 4546 * state bit.) 4547 */ 4548 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 4549 { 4550 struct vcpu_vmx *vmx = to_vmx(vcpu); 4551 4552 if (nested_vmx_is_evmptr12_valid(vmx)) 4553 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4554 4555 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = 4556 !nested_vmx_is_evmptr12_valid(vmx); 4557 4558 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 4559 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 4560 4561 vmcs12->guest_rsp = kvm_rsp_read(vcpu); 4562 vmcs12->guest_rip = kvm_rip_read(vcpu); 4563 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 4564 4565 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 4566 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 4567 4568 vmcs12->guest_interruptibility_info = 4569 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 4570 4571 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 4572 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 4573 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 4574 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI; 4575 else 4576 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 4577 4578 if (nested_cpu_has_preemption_timer(vmcs12) && 4579 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER && 4580 !vmx->nested.nested_run_pending) 4581 vmcs12->vmx_preemption_timer_value = 4582 vmx_get_preemption_timer_value(vcpu); 4583 4584 /* 4585 * In some cases (usually, nested EPT), L2 is allowed to change its 4586 * own CR3 without exiting. If it has changed it, we must keep it. 4587 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 4588 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 4589 * 4590 * Additionally, restore L2's PDPTR to vmcs12. 4591 */ 4592 if (enable_ept) { 4593 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 4594 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 4595 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 4596 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 4597 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 4598 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 4599 } 4600 } 4601 4602 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 4603 4604 if (nested_cpu_has_vid(vmcs12)) 4605 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 4606 4607 vmcs12->vm_entry_controls = 4608 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 4609 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 4610 4611 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) 4612 vmcs12->guest_dr7 = vcpu->arch.dr7; 4613 4614 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 4615 vmcs12->guest_ia32_efer = vcpu->arch.efer; 4616 } 4617 4618 /* 4619 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 4620 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 4621 * and this function updates it to reflect the changes to the guest state while 4622 * L2 was running (and perhaps made some exits which were handled directly by L0 4623 * without going back to L1), and to reflect the exit reason. 4624 * Note that we do not have to copy here all VMCS fields, just those that 4625 * could have changed by the L2 guest or the exit - i.e., the guest-state and 4626 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 4627 * which already writes to vmcs12 directly. 4628 */ 4629 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 4630 u32 vm_exit_reason, u32 exit_intr_info, 4631 unsigned long exit_qualification, u32 exit_insn_len) 4632 { 4633 /* update exit information fields: */ 4634 vmcs12->vm_exit_reason = vm_exit_reason; 4635 if (vmx_get_exit_reason(vcpu).enclave_mode) 4636 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; 4637 vmcs12->exit_qualification = exit_qualification; 4638 4639 /* 4640 * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched 4641 * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other 4642 * exit info fields are unmodified. 4643 */ 4644 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 4645 vmcs12->launch_state = 1; 4646 4647 /* vm_entry_intr_info_field is cleared on exit. Emulate this 4648 * instead of reading the real value. */ 4649 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 4650 4651 /* 4652 * Transfer the event that L0 or L1 may wanted to inject into 4653 * L2 to IDT_VECTORING_INFO_FIELD. 4654 */ 4655 vmcs12_save_pending_event(vcpu, vmcs12, 4656 vm_exit_reason, exit_intr_info); 4657 4658 vmcs12->vm_exit_intr_info = exit_intr_info; 4659 vmcs12->vm_exit_instruction_len = exit_insn_len; 4660 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4661 4662 /* 4663 * According to spec, there's no need to store the guest's 4664 * MSRs if the exit is due to a VM-entry failure that occurs 4665 * during or after loading the guest state. Since this exit 4666 * does not fall in that category, we need to save the MSRs. 4667 */ 4668 if (nested_vmx_store_msr(vcpu, 4669 vmcs12->vm_exit_msr_store_addr, 4670 vmcs12->vm_exit_msr_store_count)) 4671 nested_vmx_abort(vcpu, 4672 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 4673 } 4674 } 4675 4676 /* 4677 * A part of what we need to when the nested L2 guest exits and we want to 4678 * run its L1 parent, is to reset L1's guest state to the host state specified 4679 * in vmcs12. 4680 * This function is to be called not only on normal nested exit, but also on 4681 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 4682 * Failures During or After Loading Guest State"). 4683 * This function should be called when the active VMCS is L1's (vmcs01). 4684 */ 4685 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 4686 struct vmcs12 *vmcs12) 4687 { 4688 enum vm_entry_failure_code ignored; 4689 struct kvm_segment seg; 4690 4691 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 4692 vcpu->arch.efer = vmcs12->host_ia32_efer; 4693 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4694 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 4695 else 4696 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 4697 vmx_set_efer(vcpu, vcpu->arch.efer); 4698 4699 kvm_rsp_write(vcpu, vmcs12->host_rsp); 4700 kvm_rip_write(vcpu, vmcs12->host_rip); 4701 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 4702 vmx_set_interrupt_shadow(vcpu, 0); 4703 4704 /* 4705 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 4706 * actually changed, because vmx_set_cr0 refers to efer set above. 4707 * 4708 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 4709 * (KVM doesn't change it); 4710 */ 4711 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); 4712 vmx_set_cr0(vcpu, vmcs12->host_cr0); 4713 4714 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 4715 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4716 vmx_set_cr4(vcpu, vmcs12->host_cr4); 4717 4718 nested_ept_uninit_mmu_context(vcpu); 4719 4720 /* 4721 * Only PDPTE load can fail as the value of cr3 was checked on entry and 4722 * couldn't have changed. 4723 */ 4724 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored)) 4725 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 4726 4727 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false); 4728 4729 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 4730 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 4731 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 4732 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 4733 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 4734 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 4735 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 4736 4737 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 4738 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 4739 vmcs_write64(GUEST_BNDCFGS, 0); 4740 4741 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 4742 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 4743 vcpu->arch.pat = vmcs12->host_ia32_pat; 4744 } 4745 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 4746 kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu))) 4747 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 4748 vmcs12->host_ia32_perf_global_ctrl)); 4749 4750 /* Set L1 segment info according to Intel SDM 4751 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 4752 seg = (struct kvm_segment) { 4753 .base = 0, 4754 .limit = 0xFFFFFFFF, 4755 .selector = vmcs12->host_cs_selector, 4756 .type = 11, 4757 .present = 1, 4758 .s = 1, 4759 .g = 1 4760 }; 4761 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4762 seg.l = 1; 4763 else 4764 seg.db = 1; 4765 __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 4766 seg = (struct kvm_segment) { 4767 .base = 0, 4768 .limit = 0xFFFFFFFF, 4769 .type = 3, 4770 .present = 1, 4771 .s = 1, 4772 .db = 1, 4773 .g = 1 4774 }; 4775 seg.selector = vmcs12->host_ds_selector; 4776 __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 4777 seg.selector = vmcs12->host_es_selector; 4778 __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 4779 seg.selector = vmcs12->host_ss_selector; 4780 __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 4781 seg.selector = vmcs12->host_fs_selector; 4782 seg.base = vmcs12->host_fs_base; 4783 __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 4784 seg.selector = vmcs12->host_gs_selector; 4785 seg.base = vmcs12->host_gs_base; 4786 __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 4787 seg = (struct kvm_segment) { 4788 .base = vmcs12->host_tr_base, 4789 .limit = 0x67, 4790 .selector = vmcs12->host_tr_selector, 4791 .type = 11, 4792 .present = 1 4793 }; 4794 __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 4795 4796 memset(&seg, 0, sizeof(seg)); 4797 seg.unusable = 1; 4798 __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR); 4799 4800 kvm_set_dr(vcpu, 7, 0x400); 4801 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4802 4803 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 4804 vmcs12->vm_exit_msr_load_count)) 4805 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4806 4807 to_vt(vcpu)->emulation_required = vmx_emulation_required(vcpu); 4808 } 4809 4810 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 4811 { 4812 struct vmx_uret_msr *efer_msr; 4813 unsigned int i; 4814 4815 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 4816 return vmcs_read64(GUEST_IA32_EFER); 4817 4818 if (cpu_has_load_ia32_efer()) 4819 return kvm_host.efer; 4820 4821 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 4822 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 4823 return vmx->msr_autoload.guest.val[i].value; 4824 } 4825 4826 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); 4827 if (efer_msr) 4828 return efer_msr->data; 4829 4830 return kvm_host.efer; 4831 } 4832 4833 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 4834 { 4835 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4836 struct vcpu_vmx *vmx = to_vmx(vcpu); 4837 struct vmx_msr_entry g, h; 4838 gpa_t gpa; 4839 u32 i, j; 4840 4841 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 4842 4843 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 4844 /* 4845 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 4846 * as vmcs01.GUEST_DR7 contains a userspace defined value 4847 * and vcpu->arch.dr7 is not squirreled away before the 4848 * nested VMENTER (not worth adding a variable in nested_vmx). 4849 */ 4850 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 4851 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 4852 else 4853 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 4854 } 4855 4856 /* 4857 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 4858 * handle a variety of side effects to KVM's software model. 4859 */ 4860 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 4861 4862 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); 4863 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 4864 4865 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4866 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 4867 4868 nested_ept_uninit_mmu_context(vcpu); 4869 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 4870 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 4871 4872 /* 4873 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 4874 * from vmcs01 (if necessary). The PDPTRs are not loaded on 4875 * VMFail, like everything else we just need to ensure our 4876 * software model is up-to-date. 4877 */ 4878 if (enable_ept && is_pae_paging(vcpu)) 4879 ept_save_pdptrs(vcpu); 4880 4881 kvm_mmu_reset_context(vcpu); 4882 4883 /* 4884 * This nasty bit of open coding is a compromise between blindly 4885 * loading L1's MSRs using the exit load lists (incorrect emulation 4886 * of VMFail), leaving the nested VM's MSRs in the software model 4887 * (incorrect behavior) and snapshotting the modified MSRs (too 4888 * expensive since the lists are unbound by hardware). For each 4889 * MSR that was (prematurely) loaded from the nested VMEntry load 4890 * list, reload it from the exit load list if it exists and differs 4891 * from the guest value. The intent is to stuff host state as 4892 * silently as possible, not to fully process the exit load list. 4893 */ 4894 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 4895 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 4896 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 4897 pr_debug_ratelimited( 4898 "%s read MSR index failed (%u, 0x%08llx)\n", 4899 __func__, i, gpa); 4900 goto vmabort; 4901 } 4902 4903 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 4904 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 4905 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 4906 pr_debug_ratelimited( 4907 "%s read MSR failed (%u, 0x%08llx)\n", 4908 __func__, j, gpa); 4909 goto vmabort; 4910 } 4911 if (h.index != g.index) 4912 continue; 4913 if (h.value == g.value) 4914 break; 4915 4916 if (nested_vmx_load_msr_check(vcpu, &h)) { 4917 pr_debug_ratelimited( 4918 "%s check failed (%u, 0x%x, 0x%x)\n", 4919 __func__, j, h.index, h.reserved); 4920 goto vmabort; 4921 } 4922 4923 if (kvm_set_msr_with_filter(vcpu, h.index, h.value)) { 4924 pr_debug_ratelimited( 4925 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 4926 __func__, j, h.index, h.value); 4927 goto vmabort; 4928 } 4929 } 4930 } 4931 4932 return; 4933 4934 vmabort: 4935 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4936 } 4937 4938 /* 4939 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 4940 * and modify vmcs12 to make it see what it would expect to see there if 4941 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 4942 */ 4943 void __nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, 4944 u32 exit_intr_info, unsigned long exit_qualification, 4945 u32 exit_insn_len) 4946 { 4947 struct vcpu_vmx *vmx = to_vmx(vcpu); 4948 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4949 4950 /* Pending MTF traps are discarded on VM-Exit. */ 4951 vmx->nested.mtf_pending = false; 4952 4953 /* trying to cancel vmlaunch/vmresume is a bug */ 4954 WARN_ON_ONCE(vmx->nested.nested_run_pending); 4955 4956 #ifdef CONFIG_KVM_HYPERV 4957 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 4958 /* 4959 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map 4960 * Enlightened VMCS after migration and we still need to 4961 * do that when something is forcing L2->L1 exit prior to 4962 * the first L2 run. 4963 */ 4964 (void)nested_get_evmcs_page(vcpu); 4965 } 4966 #endif 4967 4968 /* Service pending TLB flush requests for L2 before switching to L1. */ 4969 kvm_service_local_tlb_flush_requests(vcpu); 4970 4971 /* 4972 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between 4973 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are 4974 * up-to-date before switching to L1. 4975 */ 4976 if (enable_ept && is_pae_paging(vcpu)) 4977 vmx_ept_load_pdptrs(vcpu); 4978 4979 leave_guest_mode(vcpu); 4980 4981 if (nested_cpu_has_preemption_timer(vmcs12)) 4982 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 4983 4984 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) { 4985 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset; 4986 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING)) 4987 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; 4988 } 4989 4990 if (likely(!vmx->fail)) { 4991 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 4992 4993 if (vm_exit_reason != -1) 4994 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason, 4995 exit_intr_info, exit_qualification, 4996 exit_insn_len); 4997 4998 /* 4999 * Must happen outside of sync_vmcs02_to_vmcs12() as it will 5000 * also be used to capture vmcs12 cache as part of 5001 * capturing nVMX state for snapshot (migration). 5002 * 5003 * Otherwise, this flush will dirty guest memory at a 5004 * point it is already assumed by user-space to be 5005 * immutable. 5006 */ 5007 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 5008 } else { 5009 /* 5010 * The only expected VM-instruction error is "VM entry with 5011 * invalid control field(s)." Anything else indicates a 5012 * problem with L0. And we should never get here with a 5013 * VMFail of any type if early consistency checks are enabled. 5014 */ 5015 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 5016 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 5017 WARN_ON_ONCE(nested_early_check); 5018 } 5019 5020 /* 5021 * Drop events/exceptions that were queued for re-injection to L2 5022 * (picked up via vmx_complete_interrupts()), as well as exceptions 5023 * that were pending for L2. Note, this must NOT be hoisted above 5024 * prepare_vmcs12(), events/exceptions queued for re-injection need to 5025 * be captured in vmcs12 (see vmcs12_save_pending_event()). 5026 */ 5027 vcpu->arch.nmi_injected = false; 5028 kvm_clear_exception_queue(vcpu); 5029 kvm_clear_interrupt_queue(vcpu); 5030 5031 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 5032 5033 kvm_nested_vmexit_handle_ibrs(vcpu); 5034 5035 /* Update any VMCS fields that might have changed while L2 ran */ 5036 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 5037 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 5038 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 5039 if (kvm_caps.has_tsc_control) 5040 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 5041 5042 if (vmx->nested.l1_tpr_threshold != -1) 5043 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); 5044 5045 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 5046 vmx->nested.change_vmcs01_virtual_apic_mode = false; 5047 vmx_set_virtual_apic_mode(vcpu); 5048 } 5049 5050 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { 5051 vmx->nested.update_vmcs01_cpu_dirty_logging = false; 5052 vmx_update_cpu_dirty_logging(vcpu); 5053 } 5054 5055 nested_put_vmcs12_pages(vcpu); 5056 5057 if (vmx->nested.reload_vmcs01_apic_access_page) { 5058 vmx->nested.reload_vmcs01_apic_access_page = false; 5059 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 5060 } 5061 5062 if (vmx->nested.update_vmcs01_apicv_status) { 5063 vmx->nested.update_vmcs01_apicv_status = false; 5064 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 5065 } 5066 5067 if (vmx->nested.update_vmcs01_hwapic_isr) { 5068 vmx->nested.update_vmcs01_hwapic_isr = false; 5069 kvm_apic_update_hwapic_isr(vcpu); 5070 } 5071 5072 if ((vm_exit_reason != -1) && 5073 (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx))) 5074 vmx->nested.need_vmcs12_to_shadow_sync = true; 5075 5076 /* in case we halted in L2 */ 5077 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE); 5078 5079 if (likely(!vmx->fail)) { 5080 if (vm_exit_reason != -1) 5081 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 5082 vmcs12->exit_qualification, 5083 vmcs12->idt_vectoring_info_field, 5084 vmcs12->vm_exit_intr_info, 5085 vmcs12->vm_exit_intr_error_code, 5086 KVM_ISA_VMX); 5087 5088 load_vmcs12_host_state(vcpu, vmcs12); 5089 5090 /* 5091 * Process events if an injectable IRQ or NMI is pending, even 5092 * if the event is blocked (RFLAGS.IF is cleared on VM-Exit). 5093 * If an event became pending while L2 was active, KVM needs to 5094 * either inject the event or request an IRQ/NMI window. SMIs 5095 * don't need to be processed as SMM is mutually exclusive with 5096 * non-root mode. INIT/SIPI don't need to be checked as INIT 5097 * is blocked post-VMXON, and SIPIs are ignored. 5098 */ 5099 if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending) 5100 kvm_make_request(KVM_REQ_EVENT, vcpu); 5101 return; 5102 } 5103 5104 /* 5105 * After an early L2 VM-entry failure, we're now back 5106 * in L1 which thinks it just finished a VMLAUNCH or 5107 * VMRESUME instruction, so we need to set the failure 5108 * flag and the VM-instruction error field of the VMCS 5109 * accordingly, and skip the emulated instruction. 5110 */ 5111 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 5112 5113 /* 5114 * Restore L1's host state to KVM's software model. We're here 5115 * because a consistency check was caught by hardware, which 5116 * means some amount of guest state has been propagated to KVM's 5117 * model and needs to be unwound to the host's state. 5118 */ 5119 nested_vmx_restore_host_state(vcpu); 5120 5121 vmx->fail = 0; 5122 } 5123 5124 static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu) 5125 { 5126 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5127 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); 5128 } 5129 5130 /* 5131 * Decode the memory-address operand of a vmx instruction, as recorded on an 5132 * exit caused by such an instruction (run by a guest hypervisor). 5133 * On success, returns 0. When the operand is invalid, returns 1 and throws 5134 * #UD, #GP, or #SS. 5135 */ 5136 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 5137 u32 vmx_instruction_info, bool wr, int len, gva_t *ret) 5138 { 5139 gva_t off; 5140 bool exn; 5141 struct kvm_segment s; 5142 5143 /* 5144 * According to Vol. 3B, "Information for VM Exits Due to Instruction 5145 * Execution", on an exit, vmx_instruction_info holds most of the 5146 * addressing components of the operand. Only the displacement part 5147 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 5148 * For how an actual address is calculated from all these components, 5149 * refer to Vol. 1, "Operand Addressing". 5150 */ 5151 int scaling = vmx_instruction_info & 3; 5152 int addr_size = (vmx_instruction_info >> 7) & 7; 5153 bool is_reg = vmx_instruction_info & (1u << 10); 5154 int seg_reg = (vmx_instruction_info >> 15) & 7; 5155 int index_reg = (vmx_instruction_info >> 18) & 0xf; 5156 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 5157 int base_reg = (vmx_instruction_info >> 23) & 0xf; 5158 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 5159 5160 if (is_reg) { 5161 kvm_queue_exception(vcpu, UD_VECTOR); 5162 return 1; 5163 } 5164 5165 /* Addr = segment_base + offset */ 5166 /* offset = base + [index * scale] + displacement */ 5167 off = exit_qualification; /* holds the displacement */ 5168 if (addr_size == 1) 5169 off = (gva_t)sign_extend64(off, 31); 5170 else if (addr_size == 0) 5171 off = (gva_t)sign_extend64(off, 15); 5172 if (base_is_valid) 5173 off += kvm_register_read(vcpu, base_reg); 5174 if (index_is_valid) 5175 off += kvm_register_read(vcpu, index_reg) << scaling; 5176 vmx_get_segment(vcpu, &s, seg_reg); 5177 5178 /* 5179 * The effective address, i.e. @off, of a memory operand is truncated 5180 * based on the address size of the instruction. Note that this is 5181 * the *effective address*, i.e. the address prior to accounting for 5182 * the segment's base. 5183 */ 5184 if (addr_size == 1) /* 32 bit */ 5185 off &= 0xffffffff; 5186 else if (addr_size == 0) /* 16 bit */ 5187 off &= 0xffff; 5188 5189 /* Checks for #GP/#SS exceptions. */ 5190 exn = false; 5191 if (is_long_mode(vcpu)) { 5192 /* 5193 * The virtual/linear address is never truncated in 64-bit 5194 * mode, e.g. a 32-bit address size can yield a 64-bit virtual 5195 * address when using FS/GS with a non-zero base. 5196 */ 5197 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS) 5198 *ret = s.base + off; 5199 else 5200 *ret = off; 5201 5202 *ret = vmx_get_untagged_addr(vcpu, *ret, 0); 5203 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 5204 * non-canonical form. This is the only check on the memory 5205 * destination for long mode! 5206 */ 5207 exn = is_noncanonical_address(*ret, vcpu, 0); 5208 } else { 5209 /* 5210 * When not in long mode, the virtual/linear address is 5211 * unconditionally truncated to 32 bits regardless of the 5212 * address size. 5213 */ 5214 *ret = (s.base + off) & 0xffffffff; 5215 5216 /* Protected mode: apply checks for segment validity in the 5217 * following order: 5218 * - segment type check (#GP(0) may be thrown) 5219 * - usability check (#GP(0)/#SS(0)) 5220 * - limit check (#GP(0)/#SS(0)) 5221 */ 5222 if (wr) 5223 /* #GP(0) if the destination operand is located in a 5224 * read-only data segment or any code segment. 5225 */ 5226 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 5227 else 5228 /* #GP(0) if the source operand is located in an 5229 * execute-only code segment 5230 */ 5231 exn = ((s.type & 0xa) == 8); 5232 if (exn) { 5233 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 5234 return 1; 5235 } 5236 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 5237 */ 5238 exn = (s.unusable != 0); 5239 5240 /* 5241 * Protected mode: #GP(0)/#SS(0) if the memory operand is 5242 * outside the segment limit. All CPUs that support VMX ignore 5243 * limit checks for flat segments, i.e. segments with base==0, 5244 * limit==0xffffffff and of type expand-up data or code. 5245 */ 5246 if (!(s.base == 0 && s.limit == 0xffffffff && 5247 ((s.type & 8) || !(s.type & 4)))) 5248 exn = exn || ((u64)off + len - 1 > s.limit); 5249 } 5250 if (exn) { 5251 kvm_queue_exception_e(vcpu, 5252 seg_reg == VCPU_SREG_SS ? 5253 SS_VECTOR : GP_VECTOR, 5254 0); 5255 return 1; 5256 } 5257 5258 return 0; 5259 } 5260 5261 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, 5262 int *ret) 5263 { 5264 gva_t gva; 5265 struct x86_exception e; 5266 int r; 5267 5268 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5269 vmcs_read32(VMX_INSTRUCTION_INFO), false, 5270 sizeof(*vmpointer), &gva)) { 5271 *ret = 1; 5272 return -EINVAL; 5273 } 5274 5275 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); 5276 if (r != X86EMUL_CONTINUE) { 5277 *ret = kvm_handle_memory_failure(vcpu, r, &e); 5278 return -EINVAL; 5279 } 5280 5281 return 0; 5282 } 5283 5284 /* 5285 * Allocate a shadow VMCS and associate it with the currently loaded 5286 * VMCS, unless such a shadow VMCS already exists. The newly allocated 5287 * VMCS is also VMCLEARed, so that it is ready for use. 5288 */ 5289 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 5290 { 5291 struct vcpu_vmx *vmx = to_vmx(vcpu); 5292 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 5293 5294 /* 5295 * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it 5296 * when L1 executes VMXOFF or the vCPU is forced out of nested 5297 * operation. VMXON faults if the CPU is already post-VMXON, so it 5298 * should be impossible to already have an allocated shadow VMCS. KVM 5299 * doesn't support virtualization of VMCS shadowing, so vmcs01 should 5300 * always be the loaded VMCS. 5301 */ 5302 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs)) 5303 return loaded_vmcs->shadow_vmcs; 5304 5305 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 5306 if (loaded_vmcs->shadow_vmcs) 5307 vmcs_clear(loaded_vmcs->shadow_vmcs); 5308 5309 return loaded_vmcs->shadow_vmcs; 5310 } 5311 5312 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 5313 { 5314 struct vcpu_vmx *vmx = to_vmx(vcpu); 5315 int r; 5316 5317 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 5318 if (r < 0) 5319 goto out_vmcs02; 5320 5321 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 5322 if (!vmx->nested.cached_vmcs12) 5323 goto out_cached_vmcs12; 5324 5325 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; 5326 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 5327 if (!vmx->nested.cached_shadow_vmcs12) 5328 goto out_cached_shadow_vmcs12; 5329 5330 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 5331 goto out_shadow_vmcs; 5332 5333 hrtimer_setup(&vmx->nested.preemption_timer, vmx_preemption_timer_fn, CLOCK_MONOTONIC, 5334 HRTIMER_MODE_ABS_PINNED); 5335 5336 vmx->nested.vpid02 = allocate_vpid(); 5337 5338 vmx->nested.vmcs02_initialized = false; 5339 vmx->nested.vmxon = true; 5340 5341 if (vmx_pt_mode_is_host_guest()) { 5342 vmx->pt_desc.guest.ctl = 0; 5343 pt_update_intercept_for_msr(vcpu); 5344 } 5345 5346 return 0; 5347 5348 out_shadow_vmcs: 5349 kfree(vmx->nested.cached_shadow_vmcs12); 5350 5351 out_cached_shadow_vmcs12: 5352 kfree(vmx->nested.cached_vmcs12); 5353 5354 out_cached_vmcs12: 5355 free_loaded_vmcs(&vmx->nested.vmcs02); 5356 5357 out_vmcs02: 5358 return -ENOMEM; 5359 } 5360 5361 /* Emulate the VMXON instruction. */ 5362 static int handle_vmxon(struct kvm_vcpu *vcpu) 5363 { 5364 int ret; 5365 gpa_t vmptr; 5366 uint32_t revision; 5367 struct vcpu_vmx *vmx = to_vmx(vcpu); 5368 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED 5369 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 5370 5371 /* 5372 * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter 5373 * the guest and so cannot rely on hardware to perform the check, 5374 * which has higher priority than VM-Exit (see Intel SDM's pseudocode 5375 * for VMXON). 5376 * 5377 * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 5378 * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't 5379 * force any of the relevant guest state. For a restricted guest, KVM 5380 * does force CR0.PE=1, but only to also force VM86 in order to emulate 5381 * Real Mode, and so there's no need to check CR0.PE manually. 5382 */ 5383 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) { 5384 kvm_queue_exception(vcpu, UD_VECTOR); 5385 return 1; 5386 } 5387 5388 /* 5389 * The CPL is checked for "not in VMX operation" and for "in VMX root", 5390 * and has higher priority than the VM-Fail due to being post-VMXON, 5391 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, 5392 * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits 5393 * from L2 to L1, i.e. there's no need to check for the vCPU being in 5394 * VMX non-root. 5395 * 5396 * Forwarding the VM-Exit unconditionally, i.e. without performing the 5397 * #UD checks (see above), is functionally ok because KVM doesn't allow 5398 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's 5399 * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are 5400 * missed by hardware due to shadowing CR0 and/or CR4. 5401 */ 5402 if (vmx_get_cpl(vcpu)) { 5403 kvm_inject_gp(vcpu, 0); 5404 return 1; 5405 } 5406 5407 if (vmx->nested.vmxon) 5408 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 5409 5410 /* 5411 * Invalid CR0/CR4 generates #GP. These checks are performed if and 5412 * only if the vCPU isn't already in VMX operation, i.e. effectively 5413 * have lower priority than the VM-Fail above. 5414 */ 5415 if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || 5416 !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { 5417 kvm_inject_gp(vcpu, 0); 5418 return 1; 5419 } 5420 5421 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 5422 != VMXON_NEEDED_FEATURES) { 5423 kvm_inject_gp(vcpu, 0); 5424 return 1; 5425 } 5426 5427 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret)) 5428 return ret; 5429 5430 /* 5431 * SDM 3: 24.11.5 5432 * The first 4 bytes of VMXON region contain the supported 5433 * VMCS revision identifier 5434 * 5435 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 5436 * which replaces physical address width with 32 5437 */ 5438 if (!page_address_valid(vcpu, vmptr)) 5439 return nested_vmx_failInvalid(vcpu); 5440 5441 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || 5442 revision != VMCS12_REVISION) 5443 return nested_vmx_failInvalid(vcpu); 5444 5445 vmx->nested.vmxon_ptr = vmptr; 5446 ret = enter_vmx_operation(vcpu); 5447 if (ret) 5448 return ret; 5449 5450 return nested_vmx_succeed(vcpu); 5451 } 5452 5453 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 5454 { 5455 struct vcpu_vmx *vmx = to_vmx(vcpu); 5456 5457 if (vmx->nested.current_vmptr == INVALID_GPA) 5458 return; 5459 5460 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 5461 5462 if (enable_shadow_vmcs) { 5463 /* copy to memory all shadowed fields in case 5464 they were modified */ 5465 copy_shadow_to_vmcs12(vmx); 5466 vmx_disable_shadow_vmcs(vmx); 5467 } 5468 vmx->nested.posted_intr_nv = -1; 5469 5470 /* Flush VMCS12 to guest memory */ 5471 kvm_vcpu_write_guest_page(vcpu, 5472 vmx->nested.current_vmptr >> PAGE_SHIFT, 5473 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 5474 5475 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 5476 5477 vmx->nested.current_vmptr = INVALID_GPA; 5478 } 5479 5480 /* Emulate the VMXOFF instruction */ 5481 static int handle_vmxoff(struct kvm_vcpu *vcpu) 5482 { 5483 if (!nested_vmx_check_permission(vcpu)) 5484 return 1; 5485 5486 free_nested(vcpu); 5487 5488 if (kvm_apic_has_pending_init_or_sipi(vcpu)) 5489 kvm_make_request(KVM_REQ_EVENT, vcpu); 5490 5491 return nested_vmx_succeed(vcpu); 5492 } 5493 5494 /* Emulate the VMCLEAR instruction */ 5495 static int handle_vmclear(struct kvm_vcpu *vcpu) 5496 { 5497 struct vcpu_vmx *vmx = to_vmx(vcpu); 5498 u32 zero = 0; 5499 gpa_t vmptr; 5500 int r; 5501 5502 if (!nested_vmx_check_permission(vcpu)) 5503 return 1; 5504 5505 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5506 return r; 5507 5508 if (!page_address_valid(vcpu, vmptr)) 5509 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); 5510 5511 if (vmptr == vmx->nested.vmxon_ptr) 5512 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); 5513 5514 if (likely(!nested_evmcs_handle_vmclear(vcpu, vmptr))) { 5515 if (vmptr == vmx->nested.current_vmptr) 5516 nested_release_vmcs12(vcpu); 5517 5518 /* 5519 * Silently ignore memory errors on VMCLEAR, Intel's pseudocode 5520 * for VMCLEAR includes a "ensure that data for VMCS referenced 5521 * by the operand is in memory" clause that guards writes to 5522 * memory, i.e. doing nothing for I/O is architecturally valid. 5523 * 5524 * FIXME: Suppress failures if and only if no memslot is found, 5525 * i.e. exit to userspace if __copy_to_user() fails. 5526 */ 5527 (void)kvm_vcpu_write_guest(vcpu, 5528 vmptr + offsetof(struct vmcs12, 5529 launch_state), 5530 &zero, sizeof(zero)); 5531 } 5532 5533 return nested_vmx_succeed(vcpu); 5534 } 5535 5536 /* Emulate the VMLAUNCH instruction */ 5537 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 5538 { 5539 return nested_vmx_run(vcpu, true); 5540 } 5541 5542 /* Emulate the VMRESUME instruction */ 5543 static int handle_vmresume(struct kvm_vcpu *vcpu) 5544 { 5545 5546 return nested_vmx_run(vcpu, false); 5547 } 5548 5549 static int handle_vmread(struct kvm_vcpu *vcpu) 5550 { 5551 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5552 : get_vmcs12(vcpu); 5553 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5554 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5555 struct vcpu_vmx *vmx = to_vmx(vcpu); 5556 struct x86_exception e; 5557 unsigned long field; 5558 u64 value; 5559 gva_t gva = 0; 5560 short offset; 5561 int len, r; 5562 5563 if (!nested_vmx_check_permission(vcpu)) 5564 return 1; 5565 5566 /* Decode instruction info and find the field to read */ 5567 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5568 5569 if (!nested_vmx_is_evmptr12_valid(vmx)) { 5570 /* 5571 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, 5572 * any VMREAD sets the ALU flags for VMfailInvalid. 5573 */ 5574 if (vmx->nested.current_vmptr == INVALID_GPA || 5575 (is_guest_mode(vcpu) && 5576 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) 5577 return nested_vmx_failInvalid(vcpu); 5578 5579 offset = get_vmcs12_field_offset(field); 5580 if (offset < 0) 5581 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5582 5583 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) 5584 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5585 5586 /* Read the field, zero-extended to a u64 value */ 5587 value = vmcs12_read_any(vmcs12, field, offset); 5588 } else { 5589 /* 5590 * Hyper-V TLFS (as of 6.0b) explicitly states, that while an 5591 * enlightened VMCS is active VMREAD/VMWRITE instructions are 5592 * unsupported. Unfortunately, certain versions of Windows 11 5593 * don't comply with this requirement which is not enforced in 5594 * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a 5595 * workaround, as misbehaving guests will panic on VM-Fail. 5596 * Note, enlightened VMCS is incompatible with shadow VMCS so 5597 * all VMREADs from L2 should go to L1. 5598 */ 5599 if (WARN_ON_ONCE(is_guest_mode(vcpu))) 5600 return nested_vmx_failInvalid(vcpu); 5601 5602 offset = evmcs_field_offset(field, NULL); 5603 if (offset < 0) 5604 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5605 5606 /* Read the field, zero-extended to a u64 value */ 5607 value = evmcs_read_any(nested_vmx_evmcs(vmx), field, offset); 5608 } 5609 5610 /* 5611 * Now copy part of this value to register or memory, as requested. 5612 * Note that the number of bits actually copied is 32 or 64 depending 5613 * on the guest's mode (32 or 64 bit), not on the given field's length. 5614 */ 5615 if (instr_info & BIT(10)) { 5616 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value); 5617 } else { 5618 len = is_64_bit_mode(vcpu) ? 8 : 4; 5619 if (get_vmx_mem_address(vcpu, exit_qualification, 5620 instr_info, true, len, &gva)) 5621 return 1; 5622 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 5623 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); 5624 if (r != X86EMUL_CONTINUE) 5625 return kvm_handle_memory_failure(vcpu, r, &e); 5626 } 5627 5628 return nested_vmx_succeed(vcpu); 5629 } 5630 5631 static bool is_shadow_field_rw(unsigned long field) 5632 { 5633 switch (field) { 5634 #define SHADOW_FIELD_RW(x, y) case x: 5635 #include "vmcs_shadow_fields.h" 5636 return true; 5637 default: 5638 break; 5639 } 5640 return false; 5641 } 5642 5643 static bool is_shadow_field_ro(unsigned long field) 5644 { 5645 switch (field) { 5646 #define SHADOW_FIELD_RO(x, y) case x: 5647 #include "vmcs_shadow_fields.h" 5648 return true; 5649 default: 5650 break; 5651 } 5652 return false; 5653 } 5654 5655 static int handle_vmwrite(struct kvm_vcpu *vcpu) 5656 { 5657 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5658 : get_vmcs12(vcpu); 5659 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5660 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5661 struct vcpu_vmx *vmx = to_vmx(vcpu); 5662 struct x86_exception e; 5663 unsigned long field; 5664 short offset; 5665 gva_t gva; 5666 int len, r; 5667 5668 /* 5669 * The value to write might be 32 or 64 bits, depending on L1's long 5670 * mode, and eventually we need to write that into a field of several 5671 * possible lengths. The code below first zero-extends the value to 64 5672 * bit (value), and then copies only the appropriate number of 5673 * bits into the vmcs12 field. 5674 */ 5675 u64 value = 0; 5676 5677 if (!nested_vmx_check_permission(vcpu)) 5678 return 1; 5679 5680 /* 5681 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, 5682 * any VMWRITE sets the ALU flags for VMfailInvalid. 5683 */ 5684 if (vmx->nested.current_vmptr == INVALID_GPA || 5685 (is_guest_mode(vcpu) && 5686 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) 5687 return nested_vmx_failInvalid(vcpu); 5688 5689 if (instr_info & BIT(10)) 5690 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf)); 5691 else { 5692 len = is_64_bit_mode(vcpu) ? 8 : 4; 5693 if (get_vmx_mem_address(vcpu, exit_qualification, 5694 instr_info, false, len, &gva)) 5695 return 1; 5696 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); 5697 if (r != X86EMUL_CONTINUE) 5698 return kvm_handle_memory_failure(vcpu, r, &e); 5699 } 5700 5701 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5702 5703 offset = get_vmcs12_field_offset(field); 5704 if (offset < 0) 5705 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5706 5707 /* 5708 * If the vCPU supports "VMWRITE to any supported field in the 5709 * VMCS," then the "read-only" fields are actually read/write. 5710 */ 5711 if (vmcs_field_readonly(field) && 5712 !nested_cpu_has_vmwrite_any_field(vcpu)) 5713 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 5714 5715 /* 5716 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties 5717 * vmcs12, else we may crush a field or consume a stale value. 5718 */ 5719 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) 5720 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5721 5722 /* 5723 * Some Intel CPUs intentionally drop the reserved bits of the AR byte 5724 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM 5725 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE 5726 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD 5727 * from L1 will return a different value than VMREAD from L2 (L1 sees 5728 * the stripped down value, L2 sees the full value as stored by KVM). 5729 */ 5730 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) 5731 value &= 0x1f0ff; 5732 5733 vmcs12_write_any(vmcs12, field, offset, value); 5734 5735 /* 5736 * Do not track vmcs12 dirty-state if in guest-mode as we actually 5737 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated 5738 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't 5739 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path. 5740 */ 5741 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) { 5742 /* 5743 * L1 can read these fields without exiting, ensure the 5744 * shadow VMCS is up-to-date. 5745 */ 5746 if (enable_shadow_vmcs && is_shadow_field_ro(field)) { 5747 preempt_disable(); 5748 vmcs_load(vmx->vmcs01.shadow_vmcs); 5749 5750 __vmcs_writel(field, value); 5751 5752 vmcs_clear(vmx->vmcs01.shadow_vmcs); 5753 vmcs_load(vmx->loaded_vmcs->vmcs); 5754 preempt_enable(); 5755 } 5756 vmx->nested.dirty_vmcs12 = true; 5757 } 5758 5759 return nested_vmx_succeed(vcpu); 5760 } 5761 5762 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 5763 { 5764 vmx->nested.current_vmptr = vmptr; 5765 if (enable_shadow_vmcs) { 5766 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 5767 vmcs_write64(VMCS_LINK_POINTER, 5768 __pa(vmx->vmcs01.shadow_vmcs)); 5769 vmx->nested.need_vmcs12_to_shadow_sync = true; 5770 } 5771 vmx->nested.dirty_vmcs12 = true; 5772 vmx->nested.force_msr_bitmap_recalc = true; 5773 } 5774 5775 /* Emulate the VMPTRLD instruction */ 5776 static int handle_vmptrld(struct kvm_vcpu *vcpu) 5777 { 5778 struct vcpu_vmx *vmx = to_vmx(vcpu); 5779 gpa_t vmptr; 5780 int r; 5781 5782 if (!nested_vmx_check_permission(vcpu)) 5783 return 1; 5784 5785 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5786 return r; 5787 5788 if (!page_address_valid(vcpu, vmptr)) 5789 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); 5790 5791 if (vmptr == vmx->nested.vmxon_ptr) 5792 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); 5793 5794 /* Forbid normal VMPTRLD if Enlightened version was used */ 5795 if (nested_vmx_is_evmptr12_valid(vmx)) 5796 return 1; 5797 5798 if (vmx->nested.current_vmptr != vmptr) { 5799 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; 5800 struct vmcs_hdr hdr; 5801 5802 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) { 5803 /* 5804 * Reads from an unbacked page return all 1s, 5805 * which means that the 32 bits located at the 5806 * given physical address won't match the required 5807 * VMCS12_REVISION identifier. 5808 */ 5809 return nested_vmx_fail(vcpu, 5810 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5811 } 5812 5813 if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, 5814 offsetof(struct vmcs12, hdr), 5815 sizeof(hdr))) { 5816 return nested_vmx_fail(vcpu, 5817 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5818 } 5819 5820 if (hdr.revision_id != VMCS12_REVISION || 5821 (hdr.shadow_vmcs && 5822 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 5823 return nested_vmx_fail(vcpu, 5824 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5825 } 5826 5827 nested_release_vmcs12(vcpu); 5828 5829 /* 5830 * Load VMCS12 from guest memory since it is not already 5831 * cached. 5832 */ 5833 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12, 5834 VMCS12_SIZE)) { 5835 return nested_vmx_fail(vcpu, 5836 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5837 } 5838 5839 set_current_vmptr(vmx, vmptr); 5840 } 5841 5842 return nested_vmx_succeed(vcpu); 5843 } 5844 5845 /* Emulate the VMPTRST instruction */ 5846 static int handle_vmptrst(struct kvm_vcpu *vcpu) 5847 { 5848 unsigned long exit_qual = vmx_get_exit_qual(vcpu); 5849 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5850 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 5851 struct x86_exception e; 5852 gva_t gva; 5853 int r; 5854 5855 if (!nested_vmx_check_permission(vcpu)) 5856 return 1; 5857 5858 if (unlikely(nested_vmx_is_evmptr12_valid(to_vmx(vcpu)))) 5859 return 1; 5860 5861 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, 5862 true, sizeof(gpa_t), &gva)) 5863 return 1; 5864 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 5865 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 5866 sizeof(gpa_t), &e); 5867 if (r != X86EMUL_CONTINUE) 5868 return kvm_handle_memory_failure(vcpu, r, &e); 5869 5870 return nested_vmx_succeed(vcpu); 5871 } 5872 5873 /* Emulate the INVEPT instruction */ 5874 static int handle_invept(struct kvm_vcpu *vcpu) 5875 { 5876 struct vcpu_vmx *vmx = to_vmx(vcpu); 5877 u32 vmx_instruction_info, types; 5878 unsigned long type, roots_to_free; 5879 struct kvm_mmu *mmu; 5880 gva_t gva; 5881 struct x86_exception e; 5882 struct { 5883 u64 eptp, gpa; 5884 } operand; 5885 int i, r, gpr_index; 5886 5887 if (!(vmx->nested.msrs.secondary_ctls_high & 5888 SECONDARY_EXEC_ENABLE_EPT) || 5889 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 5890 kvm_queue_exception(vcpu, UD_VECTOR); 5891 return 1; 5892 } 5893 5894 if (!nested_vmx_check_permission(vcpu)) 5895 return 1; 5896 5897 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5898 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5899 type = kvm_register_read(vcpu, gpr_index); 5900 5901 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 5902 5903 if (type >= 32 || !(types & (1 << type))) 5904 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5905 5906 /* According to the Intel VMX instruction reference, the memory 5907 * operand is read even if it isn't needed (e.g., for type==global) 5908 */ 5909 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5910 vmx_instruction_info, false, sizeof(operand), &gva)) 5911 return 1; 5912 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5913 if (r != X86EMUL_CONTINUE) 5914 return kvm_handle_memory_failure(vcpu, r, &e); 5915 5916 /* 5917 * Nested EPT roots are always held through guest_mmu, 5918 * not root_mmu. 5919 */ 5920 mmu = &vcpu->arch.guest_mmu; 5921 5922 switch (type) { 5923 case VMX_EPT_EXTENT_CONTEXT: 5924 if (!nested_vmx_check_eptp(vcpu, operand.eptp)) 5925 return nested_vmx_fail(vcpu, 5926 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5927 5928 roots_to_free = 0; 5929 if (nested_ept_root_matches(mmu->root.hpa, mmu->root.pgd, 5930 operand.eptp)) 5931 roots_to_free |= KVM_MMU_ROOT_CURRENT; 5932 5933 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 5934 if (nested_ept_root_matches(mmu->prev_roots[i].hpa, 5935 mmu->prev_roots[i].pgd, 5936 operand.eptp)) 5937 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 5938 } 5939 break; 5940 case VMX_EPT_EXTENT_GLOBAL: 5941 roots_to_free = KVM_MMU_ROOTS_ALL; 5942 break; 5943 default: 5944 BUG(); 5945 break; 5946 } 5947 5948 if (roots_to_free) 5949 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); 5950 5951 return nested_vmx_succeed(vcpu); 5952 } 5953 5954 static int handle_invvpid(struct kvm_vcpu *vcpu) 5955 { 5956 struct vcpu_vmx *vmx = to_vmx(vcpu); 5957 u32 vmx_instruction_info; 5958 unsigned long type, types; 5959 gva_t gva; 5960 struct x86_exception e; 5961 struct { 5962 u64 vpid; 5963 u64 gla; 5964 } operand; 5965 u16 vpid02; 5966 int r, gpr_index; 5967 5968 if (!(vmx->nested.msrs.secondary_ctls_high & 5969 SECONDARY_EXEC_ENABLE_VPID) || 5970 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 5971 kvm_queue_exception(vcpu, UD_VECTOR); 5972 return 1; 5973 } 5974 5975 if (!nested_vmx_check_permission(vcpu)) 5976 return 1; 5977 5978 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5979 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5980 type = kvm_register_read(vcpu, gpr_index); 5981 5982 types = (vmx->nested.msrs.vpid_caps & 5983 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 5984 5985 if (type >= 32 || !(types & (1 << type))) 5986 return nested_vmx_fail(vcpu, 5987 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5988 5989 /* according to the intel vmx instruction reference, the memory 5990 * operand is read even if it isn't needed (e.g., for type==global) 5991 */ 5992 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5993 vmx_instruction_info, false, sizeof(operand), &gva)) 5994 return 1; 5995 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5996 if (r != X86EMUL_CONTINUE) 5997 return kvm_handle_memory_failure(vcpu, r, &e); 5998 5999 if (operand.vpid >> 16) 6000 return nested_vmx_fail(vcpu, 6001 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 6002 6003 /* 6004 * Always flush the effective vpid02, i.e. never flush the current VPID 6005 * and never explicitly flush vpid01. INVVPID targets a VPID, not a 6006 * VMCS, and so whether or not the current vmcs12 has VPID enabled is 6007 * irrelevant (and there may not be a loaded vmcs12). 6008 */ 6009 vpid02 = nested_get_vpid02(vcpu); 6010 switch (type) { 6011 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 6012 /* 6013 * LAM doesn't apply to addresses that are inputs to TLB 6014 * invalidation. 6015 */ 6016 if (!operand.vpid || 6017 is_noncanonical_invlpg_address(operand.gla, vcpu)) 6018 return nested_vmx_fail(vcpu, 6019 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 6020 vpid_sync_vcpu_addr(vpid02, operand.gla); 6021 break; 6022 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 6023 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 6024 if (!operand.vpid) 6025 return nested_vmx_fail(vcpu, 6026 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 6027 vpid_sync_context(vpid02); 6028 break; 6029 case VMX_VPID_EXTENT_ALL_CONTEXT: 6030 vpid_sync_context(vpid02); 6031 break; 6032 default: 6033 WARN_ON_ONCE(1); 6034 return kvm_skip_emulated_instruction(vcpu); 6035 } 6036 6037 /* 6038 * Sync the shadow page tables if EPT is disabled, L1 is invalidating 6039 * linear mappings for L2 (tagged with L2's VPID). Free all guest 6040 * roots as VPIDs are not tracked in the MMU role. 6041 * 6042 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share 6043 * an MMU when EPT is disabled. 6044 * 6045 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR. 6046 */ 6047 if (!enable_ept) 6048 kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu); 6049 6050 return nested_vmx_succeed(vcpu); 6051 } 6052 6053 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 6054 struct vmcs12 *vmcs12) 6055 { 6056 u32 index = kvm_rcx_read(vcpu); 6057 u64 new_eptp; 6058 6059 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12))) 6060 return 1; 6061 if (index >= VMFUNC_EPTP_ENTRIES) 6062 return 1; 6063 6064 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 6065 &new_eptp, index * 8, 8)) 6066 return 1; 6067 6068 /* 6069 * If the (L2) guest does a vmfunc to the currently 6070 * active ept pointer, we don't have to do anything else 6071 */ 6072 if (vmcs12->ept_pointer != new_eptp) { 6073 if (!nested_vmx_check_eptp(vcpu, new_eptp)) 6074 return 1; 6075 6076 vmcs12->ept_pointer = new_eptp; 6077 nested_ept_new_eptp(vcpu); 6078 6079 if (!nested_cpu_has_vpid(vmcs12)) 6080 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 6081 } 6082 6083 return 0; 6084 } 6085 6086 static int handle_vmfunc(struct kvm_vcpu *vcpu) 6087 { 6088 struct vcpu_vmx *vmx = to_vmx(vcpu); 6089 struct vmcs12 *vmcs12; 6090 u32 function = kvm_rax_read(vcpu); 6091 6092 /* 6093 * VMFUNC should never execute cleanly while L1 is active; KVM supports 6094 * VMFUNC for nested VMs, but not for L1. 6095 */ 6096 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) { 6097 kvm_queue_exception(vcpu, UD_VECTOR); 6098 return 1; 6099 } 6100 6101 vmcs12 = get_vmcs12(vcpu); 6102 6103 /* 6104 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC 6105 * is enabled in vmcs02 if and only if it's enabled in vmcs12. 6106 */ 6107 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) { 6108 kvm_queue_exception(vcpu, UD_VECTOR); 6109 return 1; 6110 } 6111 6112 if (!(vmcs12->vm_function_control & BIT_ULL(function))) 6113 goto fail; 6114 6115 switch (function) { 6116 case 0: 6117 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 6118 goto fail; 6119 break; 6120 default: 6121 goto fail; 6122 } 6123 return kvm_skip_emulated_instruction(vcpu); 6124 6125 fail: 6126 /* 6127 * This is effectively a reflected VM-Exit, as opposed to a synthesized 6128 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode 6129 * EXIT_REASON_VMFUNC as the exit reason. 6130 */ 6131 nested_vmx_vmexit(vcpu, vmx->vt.exit_reason.full, 6132 vmx_get_intr_info(vcpu), 6133 vmx_get_exit_qual(vcpu)); 6134 return 1; 6135 } 6136 6137 /* 6138 * Return true if an IO instruction with the specified port and size should cause 6139 * a VM-exit into L1. 6140 */ 6141 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, 6142 int size) 6143 { 6144 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6145 gpa_t bitmap, last_bitmap; 6146 u8 b; 6147 6148 last_bitmap = INVALID_GPA; 6149 b = -1; 6150 6151 while (size > 0) { 6152 if (port < 0x8000) 6153 bitmap = vmcs12->io_bitmap_a; 6154 else if (port < 0x10000) 6155 bitmap = vmcs12->io_bitmap_b; 6156 else 6157 return true; 6158 bitmap += (port & 0x7fff) / 8; 6159 6160 if (last_bitmap != bitmap) 6161 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 6162 return true; 6163 if (b & (1 << (port & 7))) 6164 return true; 6165 6166 port++; 6167 size--; 6168 last_bitmap = bitmap; 6169 } 6170 6171 return false; 6172 } 6173 6174 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 6175 struct vmcs12 *vmcs12) 6176 { 6177 unsigned long exit_qualification; 6178 unsigned short port; 6179 int size; 6180 6181 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 6182 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 6183 6184 exit_qualification = vmx_get_exit_qual(vcpu); 6185 6186 port = exit_qualification >> 16; 6187 size = (exit_qualification & 7) + 1; 6188 6189 return nested_vmx_check_io_bitmaps(vcpu, port, size); 6190 } 6191 6192 /* 6193 * Return 1 if we should exit from L2 to L1 to handle an MSR access, 6194 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 6195 * disinterest in the current event (read or write a specific MSR) by using an 6196 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 6197 */ 6198 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 6199 struct vmcs12 *vmcs12, 6200 union vmx_exit_reason exit_reason) 6201 { 6202 u32 msr_index = kvm_rcx_read(vcpu); 6203 gpa_t bitmap; 6204 6205 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 6206 return true; 6207 6208 /* 6209 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 6210 * for the four combinations of read/write and low/high MSR numbers. 6211 * First we need to figure out which of the four to use: 6212 */ 6213 bitmap = vmcs12->msr_bitmap; 6214 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 6215 bitmap += 2048; 6216 if (msr_index >= 0xc0000000) { 6217 msr_index -= 0xc0000000; 6218 bitmap += 1024; 6219 } 6220 6221 /* Then read the msr_index'th bit from this bitmap: */ 6222 if (msr_index < 1024*8) { 6223 unsigned char b; 6224 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 6225 return true; 6226 return 1 & (b >> (msr_index & 7)); 6227 } else 6228 return true; /* let L1 handle the wrong parameter */ 6229 } 6230 6231 /* 6232 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 6233 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 6234 * intercept (via guest_host_mask etc.) the current event. 6235 */ 6236 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 6237 struct vmcs12 *vmcs12) 6238 { 6239 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 6240 int cr = exit_qualification & 15; 6241 int reg; 6242 unsigned long val; 6243 6244 switch ((exit_qualification >> 4) & 3) { 6245 case 0: /* mov to cr */ 6246 reg = (exit_qualification >> 8) & 15; 6247 val = kvm_register_read(vcpu, reg); 6248 switch (cr) { 6249 case 0: 6250 if (vmcs12->cr0_guest_host_mask & 6251 (val ^ vmcs12->cr0_read_shadow)) 6252 return true; 6253 break; 6254 case 3: 6255 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 6256 return true; 6257 break; 6258 case 4: 6259 if (vmcs12->cr4_guest_host_mask & 6260 (vmcs12->cr4_read_shadow ^ val)) 6261 return true; 6262 break; 6263 case 8: 6264 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 6265 return true; 6266 break; 6267 } 6268 break; 6269 case 2: /* clts */ 6270 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 6271 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 6272 return true; 6273 break; 6274 case 1: /* mov from cr */ 6275 switch (cr) { 6276 case 3: 6277 if (vmcs12->cpu_based_vm_exec_control & 6278 CPU_BASED_CR3_STORE_EXITING) 6279 return true; 6280 break; 6281 case 8: 6282 if (vmcs12->cpu_based_vm_exec_control & 6283 CPU_BASED_CR8_STORE_EXITING) 6284 return true; 6285 break; 6286 } 6287 break; 6288 case 3: /* lmsw */ 6289 /* 6290 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 6291 * cr0. Other attempted changes are ignored, with no exit. 6292 */ 6293 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 6294 if (vmcs12->cr0_guest_host_mask & 0xe & 6295 (val ^ vmcs12->cr0_read_shadow)) 6296 return true; 6297 if ((vmcs12->cr0_guest_host_mask & 0x1) && 6298 !(vmcs12->cr0_read_shadow & 0x1) && 6299 (val & 0x1)) 6300 return true; 6301 break; 6302 } 6303 return false; 6304 } 6305 6306 static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu, 6307 struct vmcs12 *vmcs12) 6308 { 6309 u32 encls_leaf; 6310 6311 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SGX) || 6312 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING)) 6313 return false; 6314 6315 encls_leaf = kvm_rax_read(vcpu); 6316 if (encls_leaf > 62) 6317 encls_leaf = 63; 6318 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf); 6319 } 6320 6321 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 6322 struct vmcs12 *vmcs12, gpa_t bitmap) 6323 { 6324 u32 vmx_instruction_info; 6325 unsigned long field; 6326 u8 b; 6327 6328 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 6329 return true; 6330 6331 /* Decode instruction info and find the field to access */ 6332 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 6333 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 6334 6335 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 6336 if (field >> 15) 6337 return true; 6338 6339 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 6340 return true; 6341 6342 return 1 & (b >> (field & 7)); 6343 } 6344 6345 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) 6346 { 6347 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; 6348 6349 if (nested_cpu_has_mtf(vmcs12)) 6350 return true; 6351 6352 /* 6353 * An MTF VM-exit may be injected into the guest by setting the 6354 * interruption-type to 7 (other event) and the vector field to 0. Such 6355 * is the case regardless of the 'monitor trap flag' VM-execution 6356 * control. 6357 */ 6358 return entry_intr_info == (INTR_INFO_VALID_MASK 6359 | INTR_TYPE_OTHER_EVENT); 6360 } 6361 6362 /* 6363 * Return true if L0 wants to handle an exit from L2 regardless of whether or not 6364 * L1 wants the exit. Only call this when in is_guest_mode (L2). 6365 */ 6366 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, 6367 union vmx_exit_reason exit_reason) 6368 { 6369 u32 intr_info; 6370 6371 switch ((u16)exit_reason.basic) { 6372 case EXIT_REASON_EXCEPTION_NMI: 6373 intr_info = vmx_get_intr_info(vcpu); 6374 if (is_nmi(intr_info)) 6375 return true; 6376 else if (is_page_fault(intr_info)) 6377 return vcpu->arch.apf.host_apf_flags || 6378 vmx_need_pf_intercept(vcpu); 6379 else if (is_debug(intr_info) && 6380 vcpu->guest_debug & 6381 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 6382 return true; 6383 else if (is_breakpoint(intr_info) && 6384 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 6385 return true; 6386 else if (is_alignment_check(intr_info) && 6387 !vmx_guest_inject_ac(vcpu)) 6388 return true; 6389 else if (is_ve_fault(intr_info)) 6390 return true; 6391 return false; 6392 case EXIT_REASON_EXTERNAL_INTERRUPT: 6393 return true; 6394 case EXIT_REASON_MCE_DURING_VMENTRY: 6395 return true; 6396 case EXIT_REASON_EPT_VIOLATION: 6397 /* 6398 * L0 always deals with the EPT violation. If nested EPT is 6399 * used, and the nested mmu code discovers that the address is 6400 * missing in the guest EPT table (EPT12), the EPT violation 6401 * will be injected with nested_ept_inject_page_fault() 6402 */ 6403 return true; 6404 case EXIT_REASON_EPT_MISCONFIG: 6405 /* 6406 * L2 never uses directly L1's EPT, but rather L0's own EPT 6407 * table (shadow on EPT) or a merged EPT table that L0 built 6408 * (EPT on EPT). So any problems with the structure of the 6409 * table is L0's fault. 6410 */ 6411 return true; 6412 case EXIT_REASON_PREEMPTION_TIMER: 6413 return true; 6414 case EXIT_REASON_PML_FULL: 6415 /* 6416 * PML is emulated for an L1 VMM and should never be enabled in 6417 * vmcs02, always "handle" PML_FULL by exiting to userspace. 6418 */ 6419 return true; 6420 case EXIT_REASON_VMFUNC: 6421 /* VM functions are emulated through L2->L0 vmexits. */ 6422 return true; 6423 case EXIT_REASON_BUS_LOCK: 6424 /* 6425 * At present, bus lock VM exit is never exposed to L1. 6426 * Handle L2's bus locks in L0 directly. 6427 */ 6428 return true; 6429 #ifdef CONFIG_KVM_HYPERV 6430 case EXIT_REASON_VMCALL: 6431 /* Hyper-V L2 TLB flush hypercall is handled by L0 */ 6432 return guest_hv_cpuid_has_l2_tlb_flush(vcpu) && 6433 nested_evmcs_l2_tlb_flush_enabled(vcpu) && 6434 kvm_hv_is_tlb_flush_hcall(vcpu); 6435 #endif 6436 default: 6437 break; 6438 } 6439 return false; 6440 } 6441 6442 /* 6443 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in 6444 * is_guest_mode (L2). 6445 */ 6446 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, 6447 union vmx_exit_reason exit_reason) 6448 { 6449 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6450 u32 intr_info; 6451 6452 switch ((u16)exit_reason.basic) { 6453 case EXIT_REASON_EXCEPTION_NMI: 6454 intr_info = vmx_get_intr_info(vcpu); 6455 if (is_nmi(intr_info)) 6456 return true; 6457 else if (is_page_fault(intr_info)) 6458 return true; 6459 return vmcs12->exception_bitmap & 6460 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 6461 case EXIT_REASON_EXTERNAL_INTERRUPT: 6462 return nested_exit_on_intr(vcpu); 6463 case EXIT_REASON_TRIPLE_FAULT: 6464 return true; 6465 case EXIT_REASON_INTERRUPT_WINDOW: 6466 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING); 6467 case EXIT_REASON_NMI_WINDOW: 6468 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING); 6469 case EXIT_REASON_TASK_SWITCH: 6470 return true; 6471 case EXIT_REASON_CPUID: 6472 return true; 6473 case EXIT_REASON_HLT: 6474 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 6475 case EXIT_REASON_INVD: 6476 return true; 6477 case EXIT_REASON_INVLPG: 6478 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 6479 case EXIT_REASON_RDPMC: 6480 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 6481 case EXIT_REASON_RDRAND: 6482 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 6483 case EXIT_REASON_RDSEED: 6484 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 6485 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 6486 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 6487 case EXIT_REASON_VMREAD: 6488 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 6489 vmcs12->vmread_bitmap); 6490 case EXIT_REASON_VMWRITE: 6491 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 6492 vmcs12->vmwrite_bitmap); 6493 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 6494 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 6495 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 6496 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 6497 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 6498 /* 6499 * VMX instructions trap unconditionally. This allows L1 to 6500 * emulate them for its L2 guest, i.e., allows 3-level nesting! 6501 */ 6502 return true; 6503 case EXIT_REASON_CR_ACCESS: 6504 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 6505 case EXIT_REASON_DR_ACCESS: 6506 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 6507 case EXIT_REASON_IO_INSTRUCTION: 6508 return nested_vmx_exit_handled_io(vcpu, vmcs12); 6509 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 6510 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 6511 case EXIT_REASON_MSR_READ: 6512 case EXIT_REASON_MSR_WRITE: 6513 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 6514 case EXIT_REASON_INVALID_STATE: 6515 return true; 6516 case EXIT_REASON_MWAIT_INSTRUCTION: 6517 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 6518 case EXIT_REASON_MONITOR_TRAP_FLAG: 6519 return nested_vmx_exit_handled_mtf(vmcs12); 6520 case EXIT_REASON_MONITOR_INSTRUCTION: 6521 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 6522 case EXIT_REASON_PAUSE_INSTRUCTION: 6523 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 6524 nested_cpu_has2(vmcs12, 6525 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 6526 case EXIT_REASON_MCE_DURING_VMENTRY: 6527 return true; 6528 case EXIT_REASON_TPR_BELOW_THRESHOLD: 6529 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 6530 case EXIT_REASON_APIC_ACCESS: 6531 case EXIT_REASON_APIC_WRITE: 6532 case EXIT_REASON_EOI_INDUCED: 6533 /* 6534 * The controls for "virtualize APIC accesses," "APIC- 6535 * register virtualization," and "virtual-interrupt 6536 * delivery" only come from vmcs12. 6537 */ 6538 return true; 6539 case EXIT_REASON_INVPCID: 6540 return 6541 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 6542 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 6543 case EXIT_REASON_WBINVD: 6544 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 6545 case EXIT_REASON_XSETBV: 6546 return true; 6547 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 6548 /* 6549 * This should never happen, since it is not possible to 6550 * set XSS to a non-zero value---neither in L1 nor in L2. 6551 * If if it were, XSS would have to be checked against 6552 * the XSS exit bitmap in vmcs12. 6553 */ 6554 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES); 6555 case EXIT_REASON_UMWAIT: 6556 case EXIT_REASON_TPAUSE: 6557 return nested_cpu_has2(vmcs12, 6558 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); 6559 case EXIT_REASON_ENCLS: 6560 return nested_vmx_exit_handled_encls(vcpu, vmcs12); 6561 case EXIT_REASON_NOTIFY: 6562 /* Notify VM exit is not exposed to L1 */ 6563 return false; 6564 default: 6565 return true; 6566 } 6567 } 6568 6569 /* 6570 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was 6571 * reflected into L1. 6572 */ 6573 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) 6574 { 6575 struct vcpu_vmx *vmx = to_vmx(vcpu); 6576 union vmx_exit_reason exit_reason = vmx->vt.exit_reason; 6577 unsigned long exit_qual; 6578 u32 exit_intr_info; 6579 6580 WARN_ON_ONCE(vmx->nested.nested_run_pending); 6581 6582 /* 6583 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM 6584 * has already loaded L2's state. 6585 */ 6586 if (unlikely(vmx->fail)) { 6587 trace_kvm_nested_vmenter_failed( 6588 "hardware VM-instruction error: ", 6589 vmcs_read32(VM_INSTRUCTION_ERROR)); 6590 exit_intr_info = 0; 6591 exit_qual = 0; 6592 goto reflect_vmexit; 6593 } 6594 6595 trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX); 6596 6597 /* If L0 (KVM) wants the exit, it trumps L1's desires. */ 6598 if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) 6599 return false; 6600 6601 /* If L1 doesn't want the exit, handle it in L0. */ 6602 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason)) 6603 return false; 6604 6605 /* 6606 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For 6607 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would 6608 * need to be synthesized by querying the in-kernel LAPIC, but external 6609 * interrupts are never reflected to L1 so it's a non-issue. 6610 */ 6611 exit_intr_info = vmx_get_intr_info(vcpu); 6612 if (is_exception_with_error_code(exit_intr_info)) { 6613 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6614 6615 vmcs12->vm_exit_intr_error_code = 6616 vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 6617 } 6618 exit_qual = vmx_get_exit_qual(vcpu); 6619 6620 reflect_vmexit: 6621 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual); 6622 return true; 6623 } 6624 6625 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 6626 struct kvm_nested_state __user *user_kvm_nested_state, 6627 u32 user_data_size) 6628 { 6629 struct vcpu_vmx *vmx; 6630 struct vmcs12 *vmcs12; 6631 struct kvm_nested_state kvm_state = { 6632 .flags = 0, 6633 .format = KVM_STATE_NESTED_FORMAT_VMX, 6634 .size = sizeof(kvm_state), 6635 .hdr.vmx.flags = 0, 6636 .hdr.vmx.vmxon_pa = INVALID_GPA, 6637 .hdr.vmx.vmcs12_pa = INVALID_GPA, 6638 .hdr.vmx.preemption_timer_deadline = 0, 6639 }; 6640 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6641 &user_kvm_nested_state->data.vmx[0]; 6642 6643 if (!vcpu) 6644 return kvm_state.size + sizeof(*user_vmx_nested_state); 6645 6646 vmx = to_vmx(vcpu); 6647 vmcs12 = get_vmcs12(vcpu); 6648 6649 if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX) && 6650 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 6651 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 6652 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; 6653 6654 if (vmx_has_valid_vmcs12(vcpu)) { 6655 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); 6656 6657 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */ 6658 if (nested_vmx_is_evmptr12_set(vmx)) 6659 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 6660 6661 if (is_guest_mode(vcpu) && 6662 nested_cpu_has_shadow_vmcs(vmcs12) && 6663 vmcs12->vmcs_link_pointer != INVALID_GPA) 6664 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); 6665 } 6666 6667 if (vmx->nested.smm.vmxon) 6668 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 6669 6670 if (vmx->nested.smm.guest_mode) 6671 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 6672 6673 if (is_guest_mode(vcpu)) { 6674 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 6675 6676 if (vmx->nested.nested_run_pending) 6677 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 6678 6679 if (vmx->nested.mtf_pending) 6680 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; 6681 6682 if (nested_cpu_has_preemption_timer(vmcs12) && 6683 vmx->nested.has_preemption_timer_deadline) { 6684 kvm_state.hdr.vmx.flags |= 6685 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE; 6686 kvm_state.hdr.vmx.preemption_timer_deadline = 6687 vmx->nested.preemption_timer_deadline; 6688 } 6689 } 6690 } 6691 6692 if (user_data_size < kvm_state.size) 6693 goto out; 6694 6695 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 6696 return -EFAULT; 6697 6698 if (!vmx_has_valid_vmcs12(vcpu)) 6699 goto out; 6700 6701 /* 6702 * When running L2, the authoritative vmcs12 state is in the 6703 * vmcs02. When running L1, the authoritative vmcs12 state is 6704 * in the shadow or enlightened vmcs linked to vmcs01, unless 6705 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative 6706 * vmcs12 state is in the vmcs12 already. 6707 */ 6708 if (is_guest_mode(vcpu)) { 6709 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 6710 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 6711 } else { 6712 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 6713 if (!vmx->nested.need_vmcs12_to_shadow_sync) { 6714 if (nested_vmx_is_evmptr12_valid(vmx)) 6715 /* 6716 * L1 hypervisor is not obliged to keep eVMCS 6717 * clean fields data always up-to-date while 6718 * not in guest mode, 'hv_clean_fields' is only 6719 * supposed to be actual upon vmentry so we need 6720 * to ignore it here and do full copy. 6721 */ 6722 copy_enlightened_to_vmcs12(vmx, 0); 6723 else if (enable_shadow_vmcs) 6724 copy_shadow_to_vmcs12(vmx); 6725 } 6726 } 6727 6728 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); 6729 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); 6730 6731 /* 6732 * Copy over the full allocated size of vmcs12 rather than just the size 6733 * of the struct. 6734 */ 6735 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) 6736 return -EFAULT; 6737 6738 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6739 vmcs12->vmcs_link_pointer != INVALID_GPA) { 6740 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, 6741 get_shadow_vmcs12(vcpu), VMCS12_SIZE)) 6742 return -EFAULT; 6743 } 6744 out: 6745 return kvm_state.size; 6746 } 6747 6748 void vmx_leave_nested(struct kvm_vcpu *vcpu) 6749 { 6750 if (is_guest_mode(vcpu)) { 6751 to_vmx(vcpu)->nested.nested_run_pending = 0; 6752 nested_vmx_vmexit(vcpu, -1, 0, 0); 6753 } 6754 free_nested(vcpu); 6755 } 6756 6757 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 6758 struct kvm_nested_state __user *user_kvm_nested_state, 6759 struct kvm_nested_state *kvm_state) 6760 { 6761 struct vcpu_vmx *vmx = to_vmx(vcpu); 6762 struct vmcs12 *vmcs12; 6763 enum vm_entry_failure_code ignored; 6764 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6765 &user_kvm_nested_state->data.vmx[0]; 6766 int ret; 6767 6768 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) 6769 return -EINVAL; 6770 6771 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) { 6772 if (kvm_state->hdr.vmx.smm.flags) 6773 return -EINVAL; 6774 6775 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) 6776 return -EINVAL; 6777 6778 /* 6779 * KVM_STATE_NESTED_EVMCS used to signal that KVM should 6780 * enable eVMCS capability on vCPU. However, since then 6781 * code was changed such that flag signals vmcs12 should 6782 * be copied into eVMCS in guest memory. 6783 * 6784 * To preserve backwards compatibility, allow user 6785 * to set this flag even when there is no VMXON region. 6786 */ 6787 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) 6788 return -EINVAL; 6789 } else { 6790 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX)) 6791 return -EINVAL; 6792 6793 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) 6794 return -EINVAL; 6795 } 6796 6797 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6798 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6799 return -EINVAL; 6800 6801 if (kvm_state->hdr.vmx.smm.flags & 6802 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 6803 return -EINVAL; 6804 6805 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) 6806 return -EINVAL; 6807 6808 /* 6809 * SMM temporarily disables VMX, so we cannot be in guest mode, 6810 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 6811 * must be zero. 6812 */ 6813 if (is_smm(vcpu) ? 6814 (kvm_state->flags & 6815 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) 6816 : kvm_state->hdr.vmx.smm.flags) 6817 return -EINVAL; 6818 6819 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6820 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 6821 return -EINVAL; 6822 6823 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && 6824 (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX) || 6825 !vmx->nested.enlightened_vmcs_enabled)) 6826 return -EINVAL; 6827 6828 vmx_leave_nested(vcpu); 6829 6830 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) 6831 return 0; 6832 6833 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; 6834 ret = enter_vmx_operation(vcpu); 6835 if (ret) 6836 return ret; 6837 6838 /* Empty 'VMXON' state is permitted if no VMCS loaded */ 6839 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { 6840 /* See vmx_has_valid_vmcs12. */ 6841 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || 6842 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || 6843 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)) 6844 return -EINVAL; 6845 else 6846 return 0; 6847 } 6848 6849 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) { 6850 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || 6851 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) 6852 return -EINVAL; 6853 6854 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); 6855 #ifdef CONFIG_KVM_HYPERV 6856 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 6857 /* 6858 * nested_vmx_handle_enlightened_vmptrld() cannot be called 6859 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be 6860 * restored yet. EVMCS will be mapped from 6861 * nested_get_vmcs12_pages(). 6862 */ 6863 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING; 6864 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 6865 #endif 6866 } else { 6867 return -EINVAL; 6868 } 6869 6870 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 6871 vmx->nested.smm.vmxon = true; 6872 vmx->nested.vmxon = false; 6873 6874 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 6875 vmx->nested.smm.guest_mode = true; 6876 } 6877 6878 vmcs12 = get_vmcs12(vcpu); 6879 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) 6880 return -EFAULT; 6881 6882 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 6883 return -EINVAL; 6884 6885 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6886 return 0; 6887 6888 vmx->nested.nested_run_pending = 6889 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 6890 6891 vmx->nested.mtf_pending = 6892 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 6893 6894 ret = -EINVAL; 6895 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6896 vmcs12->vmcs_link_pointer != INVALID_GPA) { 6897 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 6898 6899 if (kvm_state->size < 6900 sizeof(*kvm_state) + 6901 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 6902 goto error_guest_mode; 6903 6904 if (copy_from_user(shadow_vmcs12, 6905 user_vmx_nested_state->shadow_vmcs12, 6906 sizeof(*shadow_vmcs12))) { 6907 ret = -EFAULT; 6908 goto error_guest_mode; 6909 } 6910 6911 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 6912 !shadow_vmcs12->hdr.shadow_vmcs) 6913 goto error_guest_mode; 6914 } 6915 6916 vmx->nested.has_preemption_timer_deadline = false; 6917 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { 6918 vmx->nested.has_preemption_timer_deadline = true; 6919 vmx->nested.preemption_timer_deadline = 6920 kvm_state->hdr.vmx.preemption_timer_deadline; 6921 } 6922 6923 if (nested_vmx_check_controls(vcpu, vmcs12) || 6924 nested_vmx_check_host_state(vcpu, vmcs12) || 6925 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 6926 goto error_guest_mode; 6927 6928 vmx->nested.dirty_vmcs12 = true; 6929 vmx->nested.force_msr_bitmap_recalc = true; 6930 ret = nested_vmx_enter_non_root_mode(vcpu, false); 6931 if (ret) 6932 goto error_guest_mode; 6933 6934 if (vmx->nested.mtf_pending) 6935 kvm_make_request(KVM_REQ_EVENT, vcpu); 6936 6937 return 0; 6938 6939 error_guest_mode: 6940 vmx->nested.nested_run_pending = 0; 6941 return ret; 6942 } 6943 6944 void nested_vmx_set_vmcs_shadowing_bitmap(void) 6945 { 6946 if (enable_shadow_vmcs) { 6947 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 6948 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 6949 } 6950 } 6951 6952 /* 6953 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo 6954 * that madness to get the encoding for comparison. 6955 */ 6956 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10))) 6957 6958 static u64 nested_vmx_calc_vmcs_enum_msr(void) 6959 { 6960 /* 6961 * Note these are the so called "index" of the VMCS field encoding, not 6962 * the index into vmcs12. 6963 */ 6964 unsigned int max_idx, idx; 6965 int i; 6966 6967 /* 6968 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in 6969 * vmcs12, regardless of whether or not the associated feature is 6970 * exposed to L1. Simply find the field with the highest index. 6971 */ 6972 max_idx = 0; 6973 for (i = 0; i < nr_vmcs12_fields; i++) { 6974 /* The vmcs12 table is very, very sparsely populated. */ 6975 if (!vmcs12_field_offsets[i]) 6976 continue; 6977 6978 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i)); 6979 if (idx > max_idx) 6980 max_idx = idx; 6981 } 6982 6983 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT; 6984 } 6985 6986 static void nested_vmx_setup_pinbased_ctls(struct vmcs_config *vmcs_conf, 6987 struct nested_vmx_msrs *msrs) 6988 { 6989 msrs->pinbased_ctls_low = 6990 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6991 6992 msrs->pinbased_ctls_high = vmcs_conf->pin_based_exec_ctrl; 6993 msrs->pinbased_ctls_high &= 6994 PIN_BASED_EXT_INTR_MASK | 6995 PIN_BASED_NMI_EXITING | 6996 PIN_BASED_VIRTUAL_NMIS | 6997 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); 6998 msrs->pinbased_ctls_high |= 6999 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 7000 PIN_BASED_VMX_PREEMPTION_TIMER; 7001 } 7002 7003 static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf, 7004 struct nested_vmx_msrs *msrs) 7005 { 7006 msrs->exit_ctls_low = 7007 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 7008 7009 msrs->exit_ctls_high = vmcs_conf->vmexit_ctrl; 7010 msrs->exit_ctls_high &= 7011 #ifdef CONFIG_X86_64 7012 VM_EXIT_HOST_ADDR_SPACE_SIZE | 7013 #endif 7014 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT | 7015 VM_EXIT_CLEAR_BNDCFGS; 7016 msrs->exit_ctls_high |= 7017 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 7018 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 7019 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT | 7020 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 7021 7022 /* We support free control of debug control saving. */ 7023 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 7024 } 7025 7026 static void nested_vmx_setup_entry_ctls(struct vmcs_config *vmcs_conf, 7027 struct nested_vmx_msrs *msrs) 7028 { 7029 msrs->entry_ctls_low = 7030 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 7031 7032 msrs->entry_ctls_high = vmcs_conf->vmentry_ctrl; 7033 msrs->entry_ctls_high &= 7034 #ifdef CONFIG_X86_64 7035 VM_ENTRY_IA32E_MODE | 7036 #endif 7037 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; 7038 msrs->entry_ctls_high |= 7039 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER | 7040 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL); 7041 7042 /* We support free control of debug control loading. */ 7043 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 7044 } 7045 7046 static void nested_vmx_setup_cpubased_ctls(struct vmcs_config *vmcs_conf, 7047 struct nested_vmx_msrs *msrs) 7048 { 7049 msrs->procbased_ctls_low = 7050 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 7051 7052 msrs->procbased_ctls_high = vmcs_conf->cpu_based_exec_ctrl; 7053 msrs->procbased_ctls_high &= 7054 CPU_BASED_INTR_WINDOW_EXITING | 7055 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING | 7056 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 7057 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 7058 CPU_BASED_CR3_STORE_EXITING | 7059 #ifdef CONFIG_X86_64 7060 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 7061 #endif 7062 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 7063 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 7064 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 7065 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 7066 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 7067 /* 7068 * We can allow some features even when not supported by the 7069 * hardware. For example, L1 can specify an MSR bitmap - and we 7070 * can use it to avoid exits to L1 - even when L0 runs L2 7071 * without MSR bitmaps. 7072 */ 7073 msrs->procbased_ctls_high |= 7074 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 7075 CPU_BASED_USE_MSR_BITMAPS; 7076 7077 /* We support free control of CR3 access interception. */ 7078 msrs->procbased_ctls_low &= 7079 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 7080 } 7081 7082 static void nested_vmx_setup_secondary_ctls(u32 ept_caps, 7083 struct vmcs_config *vmcs_conf, 7084 struct nested_vmx_msrs *msrs) 7085 { 7086 msrs->secondary_ctls_low = 0; 7087 7088 msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl; 7089 msrs->secondary_ctls_high &= 7090 SECONDARY_EXEC_DESC | 7091 SECONDARY_EXEC_ENABLE_RDTSCP | 7092 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 7093 SECONDARY_EXEC_WBINVD_EXITING | 7094 SECONDARY_EXEC_APIC_REGISTER_VIRT | 7095 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 7096 SECONDARY_EXEC_RDRAND_EXITING | 7097 SECONDARY_EXEC_ENABLE_INVPCID | 7098 SECONDARY_EXEC_ENABLE_VMFUNC | 7099 SECONDARY_EXEC_RDSEED_EXITING | 7100 SECONDARY_EXEC_ENABLE_XSAVES | 7101 SECONDARY_EXEC_TSC_SCALING | 7102 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; 7103 7104 /* 7105 * We can emulate "VMCS shadowing," even if the hardware 7106 * doesn't support it. 7107 */ 7108 msrs->secondary_ctls_high |= 7109 SECONDARY_EXEC_SHADOW_VMCS; 7110 7111 if (enable_ept) { 7112 /* nested EPT: emulate EPT also to L1 */ 7113 msrs->secondary_ctls_high |= 7114 SECONDARY_EXEC_ENABLE_EPT; 7115 msrs->ept_caps = 7116 VMX_EPT_PAGE_WALK_4_BIT | 7117 VMX_EPT_PAGE_WALK_5_BIT | 7118 VMX_EPTP_WB_BIT | 7119 VMX_EPT_INVEPT_BIT | 7120 VMX_EPT_EXECUTE_ONLY_BIT; 7121 7122 msrs->ept_caps &= ept_caps; 7123 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 7124 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 7125 VMX_EPT_1GB_PAGE_BIT; 7126 if (enable_ept_ad_bits) { 7127 msrs->secondary_ctls_high |= 7128 SECONDARY_EXEC_ENABLE_PML; 7129 msrs->ept_caps |= VMX_EPT_AD_BIT; 7130 } 7131 7132 /* 7133 * Advertise EPTP switching irrespective of hardware support, 7134 * KVM emulates it in software so long as VMFUNC is supported. 7135 */ 7136 if (cpu_has_vmx_vmfunc()) 7137 msrs->vmfunc_controls = VMX_VMFUNC_EPTP_SWITCHING; 7138 } 7139 7140 /* 7141 * Old versions of KVM use the single-context version without 7142 * checking for support, so declare that it is supported even 7143 * though it is treated as global context. The alternative is 7144 * not failing the single-context invvpid, and it is worse. 7145 */ 7146 if (enable_vpid) { 7147 msrs->secondary_ctls_high |= 7148 SECONDARY_EXEC_ENABLE_VPID; 7149 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 7150 VMX_VPID_EXTENT_SUPPORTED_MASK; 7151 } 7152 7153 if (enable_unrestricted_guest) 7154 msrs->secondary_ctls_high |= 7155 SECONDARY_EXEC_UNRESTRICTED_GUEST; 7156 7157 if (flexpriority_enabled) 7158 msrs->secondary_ctls_high |= 7159 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 7160 7161 if (enable_sgx) 7162 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; 7163 } 7164 7165 static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf, 7166 struct nested_vmx_msrs *msrs) 7167 { 7168 msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA; 7169 msrs->misc_low |= 7170 VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 7171 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 7172 VMX_MISC_ACTIVITY_HLT | 7173 VMX_MISC_ACTIVITY_WAIT_SIPI; 7174 msrs->misc_high = 0; 7175 } 7176 7177 static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs) 7178 { 7179 /* 7180 * This MSR reports some information about VMX support. We 7181 * should return information about the VMX we emulate for the 7182 * guest, and the VMCS structure we give it - not about the 7183 * VMX support of the underlying hardware. 7184 */ 7185 msrs->basic = vmx_basic_encode_vmcs_info(VMCS12_REVISION, VMCS12_SIZE, 7186 X86_MEMTYPE_WB); 7187 7188 msrs->basic |= VMX_BASIC_TRUE_CTLS; 7189 if (cpu_has_vmx_basic_inout()) 7190 msrs->basic |= VMX_BASIC_INOUT; 7191 } 7192 7193 static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs) 7194 { 7195 /* 7196 * These MSRs specify bits which the guest must keep fixed on 7197 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 7198 * We picked the standard core2 setting. 7199 */ 7200 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 7201 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 7202 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 7203 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 7204 7205 /* These MSRs specify bits which the guest must keep fixed off. */ 7206 rdmsrq(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 7207 rdmsrq(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 7208 7209 if (vmx_umip_emulated()) 7210 msrs->cr4_fixed1 |= X86_CR4_UMIP; 7211 } 7212 7213 /* 7214 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 7215 * returned for the various VMX controls MSRs when nested VMX is enabled. 7216 * The same values should also be used to verify that vmcs12 control fields are 7217 * valid during nested entry from L1 to L2. 7218 * Each of these control msrs has a low and high 32-bit half: A low bit is on 7219 * if the corresponding bit in the (32-bit) control field *must* be on, and a 7220 * bit in the high half is on if the corresponding bit in the control field 7221 * may be on. See also vmx_control_verify(). 7222 */ 7223 void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) 7224 { 7225 struct nested_vmx_msrs *msrs = &vmcs_conf->nested; 7226 7227 /* 7228 * Note that as a general rule, the high half of the MSRs (bits in 7229 * the control fields which may be 1) should be initialized by the 7230 * intersection of the underlying hardware's MSR (i.e., features which 7231 * can be supported) and the list of features we want to expose - 7232 * because they are known to be properly supported in our code. 7233 * Also, usually, the low half of the MSRs (bits which must be 1) can 7234 * be set to 0, meaning that L1 may turn off any of these bits. The 7235 * reason is that if one of these bits is necessary, it will appear 7236 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 7237 * fields of vmcs01 and vmcs02, will turn these bits off - and 7238 * nested_vmx_l1_wants_exit() will not pass related exits to L1. 7239 * These rules have exceptions below. 7240 */ 7241 nested_vmx_setup_pinbased_ctls(vmcs_conf, msrs); 7242 7243 nested_vmx_setup_exit_ctls(vmcs_conf, msrs); 7244 7245 nested_vmx_setup_entry_ctls(vmcs_conf, msrs); 7246 7247 nested_vmx_setup_cpubased_ctls(vmcs_conf, msrs); 7248 7249 nested_vmx_setup_secondary_ctls(ept_caps, vmcs_conf, msrs); 7250 7251 nested_vmx_setup_misc_data(vmcs_conf, msrs); 7252 7253 nested_vmx_setup_basic(msrs); 7254 7255 nested_vmx_setup_cr_fixed(msrs); 7256 7257 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr(); 7258 } 7259 7260 void nested_vmx_hardware_unsetup(void) 7261 { 7262 int i; 7263 7264 if (enable_shadow_vmcs) { 7265 for (i = 0; i < VMX_BITMAP_NR; i++) 7266 free_page((unsigned long)vmx_bitmap[i]); 7267 } 7268 } 7269 7270 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 7271 { 7272 int i; 7273 7274 if (!cpu_has_vmx_shadow_vmcs()) 7275 enable_shadow_vmcs = 0; 7276 if (enable_shadow_vmcs) { 7277 for (i = 0; i < VMX_BITMAP_NR; i++) { 7278 /* 7279 * The vmx_bitmap is not tied to a VM and so should 7280 * not be charged to a memcg. 7281 */ 7282 vmx_bitmap[i] = (unsigned long *) 7283 __get_free_page(GFP_KERNEL); 7284 if (!vmx_bitmap[i]) { 7285 nested_vmx_hardware_unsetup(); 7286 return -ENOMEM; 7287 } 7288 } 7289 7290 init_vmcs_shadow_fields(); 7291 } 7292 7293 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; 7294 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; 7295 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; 7296 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; 7297 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; 7298 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; 7299 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; 7300 exit_handlers[EXIT_REASON_VMOFF] = handle_vmxoff; 7301 exit_handlers[EXIT_REASON_VMON] = handle_vmxon; 7302 exit_handlers[EXIT_REASON_INVEPT] = handle_invept; 7303 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; 7304 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; 7305 7306 return 0; 7307 } 7308 7309 struct kvm_x86_nested_ops vmx_nested_ops = { 7310 .leave_nested = vmx_leave_nested, 7311 .is_exception_vmexit = nested_vmx_is_exception_vmexit, 7312 .check_events = vmx_check_nested_events, 7313 .has_events = vmx_has_nested_events, 7314 .triple_fault = nested_vmx_triple_fault, 7315 .get_state = vmx_get_nested_state, 7316 .set_state = vmx_set_nested_state, 7317 .get_nested_state_pages = vmx_get_nested_state_pages, 7318 .write_log_dirty = nested_vmx_write_pml_buffer, 7319 #ifdef CONFIG_KVM_HYPERV 7320 .enable_evmcs = nested_enable_evmcs, 7321 .get_evmcs_version = nested_get_evmcs_version, 7322 .hv_inject_synthetic_vmexit_post_tlb_flush = vmx_hv_inject_synthetic_vmexit_post_tlb_flush, 7323 #endif 7324 }; 7325