1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/objtool.h> 4 #include <linux/percpu.h> 5 6 #include <asm/debugreg.h> 7 #include <asm/mmu_context.h> 8 9 #include "cpuid.h" 10 #include "hyperv.h" 11 #include "mmu.h" 12 #include "nested.h" 13 #include "pmu.h" 14 #include "trace.h" 15 #include "vmx.h" 16 #include "x86.h" 17 18 static bool __read_mostly enable_shadow_vmcs = 1; 19 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 20 21 static bool __read_mostly nested_early_check = 0; 22 module_param(nested_early_check, bool, S_IRUGO); 23 24 #define CC(consistency_check) \ 25 ({ \ 26 bool failed = (consistency_check); \ 27 if (failed) \ 28 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ 29 failed; \ 30 }) 31 32 /* 33 * Hyper-V requires all of these, so mark them as supported even though 34 * they are just treated the same as all-context. 35 */ 36 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 37 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 38 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 39 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 40 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 41 42 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 43 44 enum { 45 VMX_VMREAD_BITMAP, 46 VMX_VMWRITE_BITMAP, 47 VMX_BITMAP_NR 48 }; 49 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 50 51 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 52 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 53 54 struct shadow_vmcs_field { 55 u16 encoding; 56 u16 offset; 57 }; 58 static struct shadow_vmcs_field shadow_read_only_fields[] = { 59 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) }, 60 #include "vmcs_shadow_fields.h" 61 }; 62 static int max_shadow_read_only_fields = 63 ARRAY_SIZE(shadow_read_only_fields); 64 65 static struct shadow_vmcs_field shadow_read_write_fields[] = { 66 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) }, 67 #include "vmcs_shadow_fields.h" 68 }; 69 static int max_shadow_read_write_fields = 70 ARRAY_SIZE(shadow_read_write_fields); 71 72 static void init_vmcs_shadow_fields(void) 73 { 74 int i, j; 75 76 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 77 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 78 79 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 80 struct shadow_vmcs_field entry = shadow_read_only_fields[i]; 81 u16 field = entry.encoding; 82 83 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 84 (i + 1 == max_shadow_read_only_fields || 85 shadow_read_only_fields[i + 1].encoding != field + 1)) 86 pr_err("Missing field from shadow_read_only_field %x\n", 87 field + 1); 88 89 clear_bit(field, vmx_vmread_bitmap); 90 if (field & 1) 91 #ifdef CONFIG_X86_64 92 continue; 93 #else 94 entry.offset += sizeof(u32); 95 #endif 96 shadow_read_only_fields[j++] = entry; 97 } 98 max_shadow_read_only_fields = j; 99 100 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 101 struct shadow_vmcs_field entry = shadow_read_write_fields[i]; 102 u16 field = entry.encoding; 103 104 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 105 (i + 1 == max_shadow_read_write_fields || 106 shadow_read_write_fields[i + 1].encoding != field + 1)) 107 pr_err("Missing field from shadow_read_write_field %x\n", 108 field + 1); 109 110 WARN_ONCE(field >= GUEST_ES_AR_BYTES && 111 field <= GUEST_TR_AR_BYTES, 112 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); 113 114 /* 115 * PML and the preemption timer can be emulated, but the 116 * processor cannot vmwrite to fields that don't exist 117 * on bare metal. 118 */ 119 switch (field) { 120 case GUEST_PML_INDEX: 121 if (!cpu_has_vmx_pml()) 122 continue; 123 break; 124 case VMX_PREEMPTION_TIMER_VALUE: 125 if (!cpu_has_vmx_preemption_timer()) 126 continue; 127 break; 128 case GUEST_INTR_STATUS: 129 if (!cpu_has_vmx_apicv()) 130 continue; 131 break; 132 default: 133 break; 134 } 135 136 clear_bit(field, vmx_vmwrite_bitmap); 137 clear_bit(field, vmx_vmread_bitmap); 138 if (field & 1) 139 #ifdef CONFIG_X86_64 140 continue; 141 #else 142 entry.offset += sizeof(u32); 143 #endif 144 shadow_read_write_fields[j++] = entry; 145 } 146 max_shadow_read_write_fields = j; 147 } 148 149 /* 150 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 151 * set the success or error code of an emulated VMX instruction (as specified 152 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 153 * instruction. 154 */ 155 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 156 { 157 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 158 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 159 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 160 return kvm_skip_emulated_instruction(vcpu); 161 } 162 163 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 164 { 165 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 166 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 167 X86_EFLAGS_SF | X86_EFLAGS_OF)) 168 | X86_EFLAGS_CF); 169 return kvm_skip_emulated_instruction(vcpu); 170 } 171 172 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 173 u32 vm_instruction_error) 174 { 175 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 176 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 177 X86_EFLAGS_SF | X86_EFLAGS_OF)) 178 | X86_EFLAGS_ZF); 179 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 180 /* 181 * We don't need to force a shadow sync because 182 * VM_INSTRUCTION_ERROR is not shadowed 183 */ 184 return kvm_skip_emulated_instruction(vcpu); 185 } 186 187 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) 188 { 189 struct vcpu_vmx *vmx = to_vmx(vcpu); 190 191 /* 192 * failValid writes the error number to the current VMCS, which 193 * can't be done if there isn't a current VMCS. 194 */ 195 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) 196 return nested_vmx_failInvalid(vcpu); 197 198 return nested_vmx_failValid(vcpu, vm_instruction_error); 199 } 200 201 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 202 { 203 /* TODO: not to reset guest simply here. */ 204 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 205 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); 206 } 207 208 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 209 { 210 return fixed_bits_valid(control, low, high); 211 } 212 213 static inline u64 vmx_control_msr(u32 low, u32 high) 214 { 215 return low | ((u64)high << 32); 216 } 217 218 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 219 { 220 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 221 vmcs_write64(VMCS_LINK_POINTER, -1ull); 222 vmx->nested.need_vmcs12_to_shadow_sync = false; 223 } 224 225 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 226 { 227 struct vcpu_vmx *vmx = to_vmx(vcpu); 228 229 if (!vmx->nested.hv_evmcs) 230 return; 231 232 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); 233 vmx->nested.hv_evmcs_vmptr = 0; 234 vmx->nested.hv_evmcs = NULL; 235 } 236 237 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, 238 struct loaded_vmcs *prev) 239 { 240 struct vmcs_host_state *dest, *src; 241 242 if (unlikely(!vmx->guest_state_loaded)) 243 return; 244 245 src = &prev->host_state; 246 dest = &vmx->loaded_vmcs->host_state; 247 248 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); 249 dest->ldt_sel = src->ldt_sel; 250 #ifdef CONFIG_X86_64 251 dest->ds_sel = src->ds_sel; 252 dest->es_sel = src->es_sel; 253 #endif 254 } 255 256 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 257 { 258 struct vcpu_vmx *vmx = to_vmx(vcpu); 259 struct loaded_vmcs *prev; 260 int cpu; 261 262 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) 263 return; 264 265 cpu = get_cpu(); 266 prev = vmx->loaded_vmcs; 267 vmx->loaded_vmcs = vmcs; 268 vmx_vcpu_load_vmcs(vcpu, cpu, prev); 269 vmx_sync_vmcs_host_state(vmx, prev); 270 put_cpu(); 271 272 vmx_register_cache_reset(vcpu); 273 } 274 275 /* 276 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 277 * just stops using VMX. 278 */ 279 static void free_nested(struct kvm_vcpu *vcpu) 280 { 281 struct vcpu_vmx *vmx = to_vmx(vcpu); 282 283 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) 284 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 285 286 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 287 return; 288 289 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 290 291 vmx->nested.vmxon = false; 292 vmx->nested.smm.vmxon = false; 293 free_vpid(vmx->nested.vpid02); 294 vmx->nested.posted_intr_nv = -1; 295 vmx->nested.current_vmptr = -1ull; 296 if (enable_shadow_vmcs) { 297 vmx_disable_shadow_vmcs(vmx); 298 vmcs_clear(vmx->vmcs01.shadow_vmcs); 299 free_vmcs(vmx->vmcs01.shadow_vmcs); 300 vmx->vmcs01.shadow_vmcs = NULL; 301 } 302 kfree(vmx->nested.cached_vmcs12); 303 vmx->nested.cached_vmcs12 = NULL; 304 kfree(vmx->nested.cached_shadow_vmcs12); 305 vmx->nested.cached_shadow_vmcs12 = NULL; 306 /* Unpin physical memory we referred to in the vmcs02 */ 307 if (vmx->nested.apic_access_page) { 308 kvm_release_page_clean(vmx->nested.apic_access_page); 309 vmx->nested.apic_access_page = NULL; 310 } 311 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 312 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 313 vmx->nested.pi_desc = NULL; 314 315 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 316 317 nested_release_evmcs(vcpu); 318 319 free_loaded_vmcs(&vmx->nested.vmcs02); 320 } 321 322 /* 323 * Ensure that the current vmcs of the logical processor is the 324 * vmcs01 of the vcpu before calling free_nested(). 325 */ 326 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 327 { 328 vcpu_load(vcpu); 329 vmx_leave_nested(vcpu); 330 vcpu_put(vcpu); 331 } 332 333 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 334 struct x86_exception *fault) 335 { 336 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 337 struct vcpu_vmx *vmx = to_vmx(vcpu); 338 u32 vm_exit_reason; 339 unsigned long exit_qualification = vcpu->arch.exit_qualification; 340 341 if (vmx->nested.pml_full) { 342 vm_exit_reason = EXIT_REASON_PML_FULL; 343 vmx->nested.pml_full = false; 344 exit_qualification &= INTR_INFO_UNBLOCK_NMI; 345 } else if (fault->error_code & PFERR_RSVD_MASK) 346 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 347 else 348 vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 349 350 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); 351 vmcs12->guest_physical_address = fault->address; 352 } 353 354 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 355 { 356 WARN_ON(mmu_is_nested(vcpu)); 357 358 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 359 kvm_init_shadow_ept_mmu(vcpu, 360 to_vmx(vcpu)->nested.msrs.ept_caps & 361 VMX_EPT_EXECUTE_ONLY_BIT, 362 nested_ept_ad_enabled(vcpu), 363 nested_ept_get_eptp(vcpu)); 364 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; 365 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 366 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 367 368 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 369 } 370 371 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 372 { 373 vcpu->arch.mmu = &vcpu->arch.root_mmu; 374 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 375 } 376 377 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 378 u16 error_code) 379 { 380 bool inequality, bit; 381 382 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 383 inequality = 384 (error_code & vmcs12->page_fault_error_code_mask) != 385 vmcs12->page_fault_error_code_match; 386 return inequality ^ bit; 387 } 388 389 390 /* 391 * KVM wants to inject page-faults which it got to the guest. This function 392 * checks whether in a nested guest, we need to inject them to L1 or L2. 393 */ 394 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) 395 { 396 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 397 unsigned int nr = vcpu->arch.exception.nr; 398 bool has_payload = vcpu->arch.exception.has_payload; 399 unsigned long payload = vcpu->arch.exception.payload; 400 401 if (nr == PF_VECTOR) { 402 if (vcpu->arch.exception.nested_apf) { 403 *exit_qual = vcpu->arch.apf.nested_apf_token; 404 return 1; 405 } 406 if (nested_vmx_is_page_fault_vmexit(vmcs12, 407 vcpu->arch.exception.error_code)) { 408 *exit_qual = has_payload ? payload : vcpu->arch.cr2; 409 return 1; 410 } 411 } else if (vmcs12->exception_bitmap & (1u << nr)) { 412 if (nr == DB_VECTOR) { 413 if (!has_payload) { 414 payload = vcpu->arch.dr6; 415 payload &= ~DR6_BT; 416 payload ^= DR6_ACTIVE_LOW; 417 } 418 *exit_qual = payload; 419 } else 420 *exit_qual = 0; 421 return 1; 422 } 423 424 return 0; 425 } 426 427 428 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, 429 struct x86_exception *fault) 430 { 431 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 432 433 WARN_ON(!is_guest_mode(vcpu)); 434 435 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && 436 !to_vmx(vcpu)->nested.nested_run_pending) { 437 vmcs12->vm_exit_intr_error_code = fault->error_code; 438 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 439 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | 440 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, 441 fault->address); 442 } else { 443 kvm_inject_page_fault(vcpu, fault); 444 } 445 } 446 447 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 448 struct vmcs12 *vmcs12) 449 { 450 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 451 return 0; 452 453 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || 454 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) 455 return -EINVAL; 456 457 return 0; 458 } 459 460 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 461 struct vmcs12 *vmcs12) 462 { 463 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 464 return 0; 465 466 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) 467 return -EINVAL; 468 469 return 0; 470 } 471 472 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 473 struct vmcs12 *vmcs12) 474 { 475 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 476 return 0; 477 478 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) 479 return -EINVAL; 480 481 return 0; 482 } 483 484 /* 485 * Check if MSR is intercepted for L01 MSR bitmap. 486 */ 487 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) 488 { 489 unsigned long *msr_bitmap; 490 int f = sizeof(unsigned long); 491 492 if (!cpu_has_vmx_msr_bitmap()) 493 return true; 494 495 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; 496 497 if (msr <= 0x1fff) { 498 return !!test_bit(msr, msr_bitmap + 0x800 / f); 499 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 500 msr &= 0x1fff; 501 return !!test_bit(msr, msr_bitmap + 0xc00 / f); 502 } 503 504 return true; 505 } 506 507 /* 508 * If a msr is allowed by L0, we should check whether it is allowed by L1. 509 * The corresponding bit will be cleared unless both of L0 and L1 allow it. 510 */ 511 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, 512 unsigned long *msr_bitmap_nested, 513 u32 msr, int type) 514 { 515 int f = sizeof(unsigned long); 516 517 /* 518 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 519 * have the write-low and read-high bitmap offsets the wrong way round. 520 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 521 */ 522 if (msr <= 0x1fff) { 523 if (type & MSR_TYPE_R && 524 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) 525 /* read-low */ 526 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); 527 528 if (type & MSR_TYPE_W && 529 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) 530 /* write-low */ 531 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); 532 533 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 534 msr &= 0x1fff; 535 if (type & MSR_TYPE_R && 536 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) 537 /* read-high */ 538 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); 539 540 if (type & MSR_TYPE_W && 541 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) 542 /* write-high */ 543 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); 544 545 } 546 } 547 548 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) 549 { 550 int msr; 551 552 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 553 unsigned word = msr / BITS_PER_LONG; 554 555 msr_bitmap[word] = ~0; 556 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 557 } 558 } 559 560 /* 561 * Merge L0's and L1's MSR bitmap, return false to indicate that 562 * we do not use the hardware. 563 */ 564 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 565 struct vmcs12 *vmcs12) 566 { 567 int msr; 568 unsigned long *msr_bitmap_l1; 569 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; 570 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; 571 572 /* Nothing to do if the MSR bitmap is not in use. */ 573 if (!cpu_has_vmx_msr_bitmap() || 574 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 575 return false; 576 577 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) 578 return false; 579 580 msr_bitmap_l1 = (unsigned long *)map->hva; 581 582 /* 583 * To keep the control flow simple, pay eight 8-byte writes (sixteen 584 * 4-byte writes on 32-bit systems) up front to enable intercepts for 585 * the x2APIC MSR range and selectively disable them below. 586 */ 587 enable_x2apic_msr_intercepts(msr_bitmap_l0); 588 589 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 590 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 591 /* 592 * L0 need not intercept reads for MSRs between 0x800 593 * and 0x8ff, it just lets the processor take the value 594 * from the virtual-APIC page; take those 256 bits 595 * directly from the L1 bitmap. 596 */ 597 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 598 unsigned word = msr / BITS_PER_LONG; 599 600 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 601 } 602 } 603 604 nested_vmx_disable_intercept_for_msr( 605 msr_bitmap_l1, msr_bitmap_l0, 606 X2APIC_MSR(APIC_TASKPRI), 607 MSR_TYPE_R | MSR_TYPE_W); 608 609 if (nested_cpu_has_vid(vmcs12)) { 610 nested_vmx_disable_intercept_for_msr( 611 msr_bitmap_l1, msr_bitmap_l0, 612 X2APIC_MSR(APIC_EOI), 613 MSR_TYPE_W); 614 nested_vmx_disable_intercept_for_msr( 615 msr_bitmap_l1, msr_bitmap_l0, 616 X2APIC_MSR(APIC_SELF_IPI), 617 MSR_TYPE_W); 618 } 619 } 620 621 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */ 622 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 623 MSR_FS_BASE, MSR_TYPE_RW); 624 625 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 626 MSR_GS_BASE, MSR_TYPE_RW); 627 628 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 629 MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 630 631 /* 632 * Checking the L0->L1 bitmap is trying to verify two things: 633 * 634 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This 635 * ensures that we do not accidentally generate an L02 MSR bitmap 636 * from the L12 MSR bitmap that is too permissive. 637 * 2. That L1 or L2s have actually used the MSR. This avoids 638 * unnecessarily merging of the bitmap if the MSR is unused. This 639 * works properly because we only update the L01 MSR bitmap lazily. 640 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only 641 * updated to reflect this when L1 (or its L2s) actually write to 642 * the MSR. 643 */ 644 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL)) 645 nested_vmx_disable_intercept_for_msr( 646 msr_bitmap_l1, msr_bitmap_l0, 647 MSR_IA32_SPEC_CTRL, 648 MSR_TYPE_R | MSR_TYPE_W); 649 650 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD)) 651 nested_vmx_disable_intercept_for_msr( 652 msr_bitmap_l1, msr_bitmap_l0, 653 MSR_IA32_PRED_CMD, 654 MSR_TYPE_W); 655 656 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false); 657 658 return true; 659 } 660 661 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 662 struct vmcs12 *vmcs12) 663 { 664 struct kvm_host_map map; 665 struct vmcs12 *shadow; 666 667 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 668 vmcs12->vmcs_link_pointer == -1ull) 669 return; 670 671 shadow = get_shadow_vmcs12(vcpu); 672 673 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)) 674 return; 675 676 memcpy(shadow, map.hva, VMCS12_SIZE); 677 kvm_vcpu_unmap(vcpu, &map, false); 678 } 679 680 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 681 struct vmcs12 *vmcs12) 682 { 683 struct vcpu_vmx *vmx = to_vmx(vcpu); 684 685 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 686 vmcs12->vmcs_link_pointer == -1ull) 687 return; 688 689 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, 690 get_shadow_vmcs12(vcpu), VMCS12_SIZE); 691 } 692 693 /* 694 * In nested virtualization, check if L1 has set 695 * VM_EXIT_ACK_INTR_ON_EXIT 696 */ 697 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 698 { 699 return get_vmcs12(vcpu)->vm_exit_controls & 700 VM_EXIT_ACK_INTR_ON_EXIT; 701 } 702 703 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 704 struct vmcs12 *vmcs12) 705 { 706 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 707 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) 708 return -EINVAL; 709 else 710 return 0; 711 } 712 713 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 714 struct vmcs12 *vmcs12) 715 { 716 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 717 !nested_cpu_has_apic_reg_virt(vmcs12) && 718 !nested_cpu_has_vid(vmcs12) && 719 !nested_cpu_has_posted_intr(vmcs12)) 720 return 0; 721 722 /* 723 * If virtualize x2apic mode is enabled, 724 * virtualize apic access must be disabled. 725 */ 726 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) && 727 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))) 728 return -EINVAL; 729 730 /* 731 * If virtual interrupt delivery is enabled, 732 * we must exit on external interrupts. 733 */ 734 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu))) 735 return -EINVAL; 736 737 /* 738 * bits 15:8 should be zero in posted_intr_nv, 739 * the descriptor address has been already checked 740 * in nested_get_vmcs12_pages. 741 * 742 * bits 5:0 of posted_intr_desc_addr should be zero. 743 */ 744 if (nested_cpu_has_posted_intr(vmcs12) && 745 (CC(!nested_cpu_has_vid(vmcs12)) || 746 CC(!nested_exit_intr_ack_set(vcpu)) || 747 CC((vmcs12->posted_intr_nv & 0xff00)) || 748 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) 749 return -EINVAL; 750 751 /* tpr shadow is needed by all apicv features. */ 752 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))) 753 return -EINVAL; 754 755 return 0; 756 } 757 758 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 759 u32 count, u64 addr) 760 { 761 if (count == 0) 762 return 0; 763 764 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || 765 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) 766 return -EINVAL; 767 768 return 0; 769 } 770 771 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 772 struct vmcs12 *vmcs12) 773 { 774 if (CC(nested_vmx_check_msr_switch(vcpu, 775 vmcs12->vm_exit_msr_load_count, 776 vmcs12->vm_exit_msr_load_addr)) || 777 CC(nested_vmx_check_msr_switch(vcpu, 778 vmcs12->vm_exit_msr_store_count, 779 vmcs12->vm_exit_msr_store_addr))) 780 return -EINVAL; 781 782 return 0; 783 } 784 785 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 786 struct vmcs12 *vmcs12) 787 { 788 if (CC(nested_vmx_check_msr_switch(vcpu, 789 vmcs12->vm_entry_msr_load_count, 790 vmcs12->vm_entry_msr_load_addr))) 791 return -EINVAL; 792 793 return 0; 794 } 795 796 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 797 struct vmcs12 *vmcs12) 798 { 799 if (!nested_cpu_has_pml(vmcs12)) 800 return 0; 801 802 if (CC(!nested_cpu_has_ept(vmcs12)) || 803 CC(!page_address_valid(vcpu, vmcs12->pml_address))) 804 return -EINVAL; 805 806 return 0; 807 } 808 809 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 810 struct vmcs12 *vmcs12) 811 { 812 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 813 !nested_cpu_has_ept(vmcs12))) 814 return -EINVAL; 815 return 0; 816 } 817 818 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 819 struct vmcs12 *vmcs12) 820 { 821 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 822 !nested_cpu_has_ept(vmcs12))) 823 return -EINVAL; 824 return 0; 825 } 826 827 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 828 struct vmcs12 *vmcs12) 829 { 830 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 831 return 0; 832 833 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || 834 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) 835 return -EINVAL; 836 837 return 0; 838 } 839 840 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 841 struct vmx_msr_entry *e) 842 { 843 /* x2APIC MSR accesses are not allowed */ 844 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) 845 return -EINVAL; 846 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ 847 CC(e->index == MSR_IA32_UCODE_REV)) 848 return -EINVAL; 849 if (CC(e->reserved != 0)) 850 return -EINVAL; 851 return 0; 852 } 853 854 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 855 struct vmx_msr_entry *e) 856 { 857 if (CC(e->index == MSR_FS_BASE) || 858 CC(e->index == MSR_GS_BASE) || 859 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ 860 nested_vmx_msr_check_common(vcpu, e)) 861 return -EINVAL; 862 return 0; 863 } 864 865 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 866 struct vmx_msr_entry *e) 867 { 868 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ 869 nested_vmx_msr_check_common(vcpu, e)) 870 return -EINVAL; 871 return 0; 872 } 873 874 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) 875 { 876 struct vcpu_vmx *vmx = to_vmx(vcpu); 877 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 878 vmx->nested.msrs.misc_high); 879 880 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; 881 } 882 883 /* 884 * Load guest's/host's msr at nested entry/exit. 885 * return 0 for success, entry index for failure. 886 * 887 * One of the failure modes for MSR load/store is when a list exceeds the 888 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch 889 * as possible, process all valid entries before failing rather than precheck 890 * for a capacity violation. 891 */ 892 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 893 { 894 u32 i; 895 struct vmx_msr_entry e; 896 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 897 898 for (i = 0; i < count; i++) { 899 if (unlikely(i >= max_msr_list_size)) 900 goto fail; 901 902 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 903 &e, sizeof(e))) { 904 pr_debug_ratelimited( 905 "%s cannot read MSR entry (%u, 0x%08llx)\n", 906 __func__, i, gpa + i * sizeof(e)); 907 goto fail; 908 } 909 if (nested_vmx_load_msr_check(vcpu, &e)) { 910 pr_debug_ratelimited( 911 "%s check failed (%u, 0x%x, 0x%x)\n", 912 __func__, i, e.index, e.reserved); 913 goto fail; 914 } 915 if (kvm_set_msr(vcpu, e.index, e.value)) { 916 pr_debug_ratelimited( 917 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 918 __func__, i, e.index, e.value); 919 goto fail; 920 } 921 } 922 return 0; 923 fail: 924 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */ 925 return i + 1; 926 } 927 928 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, 929 u32 msr_index, 930 u64 *data) 931 { 932 struct vcpu_vmx *vmx = to_vmx(vcpu); 933 934 /* 935 * If the L0 hypervisor stored a more accurate value for the TSC that 936 * does not include the time taken for emulation of the L2->L1 937 * VM-exit in L0, use the more accurate value. 938 */ 939 if (msr_index == MSR_IA32_TSC) { 940 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, 941 MSR_IA32_TSC); 942 943 if (i >= 0) { 944 u64 val = vmx->msr_autostore.guest.val[i].value; 945 946 *data = kvm_read_l1_tsc(vcpu, val); 947 return true; 948 } 949 } 950 951 if (kvm_get_msr(vcpu, msr_index, data)) { 952 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, 953 msr_index); 954 return false; 955 } 956 return true; 957 } 958 959 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, 960 struct vmx_msr_entry *e) 961 { 962 if (kvm_vcpu_read_guest(vcpu, 963 gpa + i * sizeof(*e), 964 e, 2 * sizeof(u32))) { 965 pr_debug_ratelimited( 966 "%s cannot read MSR entry (%u, 0x%08llx)\n", 967 __func__, i, gpa + i * sizeof(*e)); 968 return false; 969 } 970 if (nested_vmx_store_msr_check(vcpu, e)) { 971 pr_debug_ratelimited( 972 "%s check failed (%u, 0x%x, 0x%x)\n", 973 __func__, i, e->index, e->reserved); 974 return false; 975 } 976 return true; 977 } 978 979 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 980 { 981 u64 data; 982 u32 i; 983 struct vmx_msr_entry e; 984 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 985 986 for (i = 0; i < count; i++) { 987 if (unlikely(i >= max_msr_list_size)) 988 return -EINVAL; 989 990 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 991 return -EINVAL; 992 993 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) 994 return -EINVAL; 995 996 if (kvm_vcpu_write_guest(vcpu, 997 gpa + i * sizeof(e) + 998 offsetof(struct vmx_msr_entry, value), 999 &data, sizeof(data))) { 1000 pr_debug_ratelimited( 1001 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 1002 __func__, i, e.index, data); 1003 return -EINVAL; 1004 } 1005 } 1006 return 0; 1007 } 1008 1009 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) 1010 { 1011 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1012 u32 count = vmcs12->vm_exit_msr_store_count; 1013 u64 gpa = vmcs12->vm_exit_msr_store_addr; 1014 struct vmx_msr_entry e; 1015 u32 i; 1016 1017 for (i = 0; i < count; i++) { 1018 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1019 return false; 1020 1021 if (e.index == msr_index) 1022 return true; 1023 } 1024 return false; 1025 } 1026 1027 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, 1028 u32 msr_index) 1029 { 1030 struct vcpu_vmx *vmx = to_vmx(vcpu); 1031 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; 1032 bool in_vmcs12_store_list; 1033 int msr_autostore_slot; 1034 bool in_autostore_list; 1035 int last; 1036 1037 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index); 1038 in_autostore_list = msr_autostore_slot >= 0; 1039 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); 1040 1041 if (in_vmcs12_store_list && !in_autostore_list) { 1042 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) { 1043 /* 1044 * Emulated VMEntry does not fail here. Instead a less 1045 * accurate value will be returned by 1046 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr() 1047 * instead of reading the value from the vmcs02 VMExit 1048 * MSR-store area. 1049 */ 1050 pr_warn_ratelimited( 1051 "Not enough msr entries in msr_autostore. Can't add msr %x\n", 1052 msr_index); 1053 return; 1054 } 1055 last = autostore->nr++; 1056 autostore->val[last].index = msr_index; 1057 } else if (!in_vmcs12_store_list && in_autostore_list) { 1058 last = --autostore->nr; 1059 autostore->val[msr_autostore_slot] = autostore->val[last]; 1060 } 1061 } 1062 1063 /* 1064 * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit. 1065 * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't 1066 * enable VPID for L2 (implying it expects a TLB flush on VMX transitions). 1067 * Here's why. 1068 * 1069 * If EPT is enabled by L0 a sync is never needed: 1070 * - if it is disabled by L1, then L0 is not shadowing L1 or L2 PTEs, there 1071 * cannot be unsync'd SPTEs for either L1 or L2. 1072 * 1073 * - if it is also enabled by L1, then L0 doesn't need to sync on VM-Enter 1074 * VM-Enter as VM-Enter isn't required to invalidate guest-physical mappings 1075 * (irrespective of VPID), i.e. L1 can't rely on the (virtual) CPU to flush 1076 * stale guest-physical mappings for L2 from the TLB. And as above, L0 isn't 1077 * shadowing L1 PTEs so there are no unsync'd SPTEs to sync on VM-Exit. 1078 * 1079 * If EPT is disabled by L0: 1080 * - if VPID is enabled by L1 (for L2), the situation is similar to when L1 1081 * enables EPT: L0 doesn't need to sync as VM-Enter and VM-Exit aren't 1082 * required to invalidate linear mappings (EPT is disabled so there are 1083 * no combined or guest-physical mappings), i.e. L1 can't rely on the 1084 * (virtual) CPU to flush stale linear mappings for either L2 or itself (L1). 1085 * 1086 * - however if VPID is disabled by L1, then a sync is needed as L1 expects all 1087 * linear mappings (EPT is disabled so there are no combined or guest-physical 1088 * mappings) to be invalidated on both VM-Enter and VM-Exit. 1089 * 1090 * Note, this logic is subtly different than nested_has_guest_tlb_tag(), which 1091 * additionally checks that L2 has been assigned a VPID (when EPT is disabled). 1092 * Whether or not L2 has been assigned a VPID by L0 is irrelevant with respect 1093 * to L1's expectations, e.g. L0 needs to invalidate hardware TLB entries if L2 1094 * doesn't have a unique VPID to prevent reusing L1's entries (assuming L1 has 1095 * been assigned a VPID), but L0 doesn't need to do a MMU sync because L1 1096 * doesn't expect stale (virtual) TLB entries to be flushed, i.e. L1 doesn't 1097 * know that L0 will flush the TLB and so L1 will do INVVPID as needed to flush 1098 * stale TLB entries, at which point L0 will sync L2's MMU. 1099 */ 1100 static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu *vcpu) 1101 { 1102 return !enable_ept && !nested_cpu_has_vpid(get_vmcs12(vcpu)); 1103 } 1104 1105 /* 1106 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are 1107 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected 1108 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to 1109 * @entry_failure_code. 1110 */ 1111 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, 1112 enum vm_entry_failure_code *entry_failure_code) 1113 { 1114 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { 1115 *entry_failure_code = ENTRY_FAIL_DEFAULT; 1116 return -EINVAL; 1117 } 1118 1119 /* 1120 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 1121 * must not be dereferenced. 1122 */ 1123 if (!nested_ept && is_pae_paging(vcpu) && 1124 (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) { 1125 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) { 1126 *entry_failure_code = ENTRY_FAIL_PDPTE; 1127 return -EINVAL; 1128 } 1129 } 1130 1131 /* 1132 * Unconditionally skip the TLB flush on fast CR3 switch, all TLB 1133 * flushes are handled by nested_vmx_transition_tlb_flush(). See 1134 * nested_vmx_transition_mmu_sync for details on skipping the MMU sync. 1135 */ 1136 if (!nested_ept) 1137 kvm_mmu_new_pgd(vcpu, cr3, true, 1138 !nested_vmx_transition_mmu_sync(vcpu)); 1139 1140 vcpu->arch.cr3 = cr3; 1141 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 1142 1143 kvm_init_mmu(vcpu, false); 1144 1145 return 0; 1146 } 1147 1148 /* 1149 * Returns if KVM is able to config CPU to tag TLB entries 1150 * populated by L2 differently than TLB entries populated 1151 * by L1. 1152 * 1153 * If L0 uses EPT, L1 and L2 run with different EPTP because 1154 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries 1155 * are tagged with different EPTP. 1156 * 1157 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 1158 * with different VPID (L1 entries are tagged with vmx->vpid 1159 * while L2 entries are tagged with vmx->nested.vpid02). 1160 */ 1161 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 1162 { 1163 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1164 1165 return enable_ept || 1166 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 1167 } 1168 1169 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, 1170 struct vmcs12 *vmcs12, 1171 bool is_vmenter) 1172 { 1173 struct vcpu_vmx *vmx = to_vmx(vcpu); 1174 1175 /* 1176 * If VPID is disabled, linear and combined mappings are flushed on 1177 * VM-Enter/VM-Exit, and guest-physical mappings are valid only for 1178 * their associated EPTP. 1179 */ 1180 if (!enable_vpid) 1181 return; 1182 1183 /* 1184 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings 1185 * for *all* contexts to be flushed on VM-Enter/VM-Exit. 1186 * 1187 * If VPID is enabled and used by vmc12, but L2 does not have a unique 1188 * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate 1189 * a VPID for L2, flush the current context as the effective ASID is 1190 * common to both L1 and L2. 1191 * 1192 * Defer the flush so that it runs after vmcs02.EPTP has been set by 1193 * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid 1194 * redundant flushes further down the nested pipeline. 1195 * 1196 * If a TLB flush isn't required due to any of the above, and vpid12 is 1197 * changing then the new "virtual" VPID (vpid12) will reuse the same 1198 * "real" VPID (vpid02), and so needs to be sync'd. There is no direct 1199 * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for 1200 * all nested vCPUs. 1201 */ 1202 if (!nested_cpu_has_vpid(vmcs12)) { 1203 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1204 } else if (!nested_has_guest_tlb_tag(vcpu)) { 1205 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1206 } else if (is_vmenter && 1207 vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 1208 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 1209 vpid_sync_context(nested_get_vpid02(vcpu)); 1210 } 1211 } 1212 1213 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 1214 { 1215 superset &= mask; 1216 subset &= mask; 1217 1218 return (superset | subset) == superset; 1219 } 1220 1221 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 1222 { 1223 const u64 feature_and_reserved = 1224 /* feature (except bit 48; see below) */ 1225 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | 1226 /* reserved */ 1227 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); 1228 u64 vmx_basic = vmx->nested.msrs.basic; 1229 1230 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) 1231 return -EINVAL; 1232 1233 /* 1234 * KVM does not emulate a version of VMX that constrains physical 1235 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1236 */ 1237 if (data & BIT_ULL(48)) 1238 return -EINVAL; 1239 1240 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1241 vmx_basic_vmcs_revision_id(data)) 1242 return -EINVAL; 1243 1244 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1245 return -EINVAL; 1246 1247 vmx->nested.msrs.basic = data; 1248 return 0; 1249 } 1250 1251 static int 1252 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1253 { 1254 u64 supported; 1255 u32 *lowp, *highp; 1256 1257 switch (msr_index) { 1258 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1259 lowp = &vmx->nested.msrs.pinbased_ctls_low; 1260 highp = &vmx->nested.msrs.pinbased_ctls_high; 1261 break; 1262 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1263 lowp = &vmx->nested.msrs.procbased_ctls_low; 1264 highp = &vmx->nested.msrs.procbased_ctls_high; 1265 break; 1266 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1267 lowp = &vmx->nested.msrs.exit_ctls_low; 1268 highp = &vmx->nested.msrs.exit_ctls_high; 1269 break; 1270 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1271 lowp = &vmx->nested.msrs.entry_ctls_low; 1272 highp = &vmx->nested.msrs.entry_ctls_high; 1273 break; 1274 case MSR_IA32_VMX_PROCBASED_CTLS2: 1275 lowp = &vmx->nested.msrs.secondary_ctls_low; 1276 highp = &vmx->nested.msrs.secondary_ctls_high; 1277 break; 1278 default: 1279 BUG(); 1280 } 1281 1282 supported = vmx_control_msr(*lowp, *highp); 1283 1284 /* Check must-be-1 bits are still 1. */ 1285 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1286 return -EINVAL; 1287 1288 /* Check must-be-0 bits are still 0. */ 1289 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1290 return -EINVAL; 1291 1292 *lowp = data; 1293 *highp = data >> 32; 1294 return 0; 1295 } 1296 1297 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1298 { 1299 const u64 feature_and_reserved_bits = 1300 /* feature */ 1301 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | 1302 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | 1303 /* reserved */ 1304 GENMASK_ULL(13, 9) | BIT_ULL(31); 1305 u64 vmx_misc; 1306 1307 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 1308 vmx->nested.msrs.misc_high); 1309 1310 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) 1311 return -EINVAL; 1312 1313 if ((vmx->nested.msrs.pinbased_ctls_high & 1314 PIN_BASED_VMX_PREEMPTION_TIMER) && 1315 vmx_misc_preemption_timer_rate(data) != 1316 vmx_misc_preemption_timer_rate(vmx_misc)) 1317 return -EINVAL; 1318 1319 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1320 return -EINVAL; 1321 1322 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1323 return -EINVAL; 1324 1325 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1326 return -EINVAL; 1327 1328 vmx->nested.msrs.misc_low = data; 1329 vmx->nested.msrs.misc_high = data >> 32; 1330 1331 return 0; 1332 } 1333 1334 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1335 { 1336 u64 vmx_ept_vpid_cap; 1337 1338 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, 1339 vmx->nested.msrs.vpid_caps); 1340 1341 /* Every bit is either reserved or a feature bit. */ 1342 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1343 return -EINVAL; 1344 1345 vmx->nested.msrs.ept_caps = data; 1346 vmx->nested.msrs.vpid_caps = data >> 32; 1347 return 0; 1348 } 1349 1350 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1351 { 1352 u64 *msr; 1353 1354 switch (msr_index) { 1355 case MSR_IA32_VMX_CR0_FIXED0: 1356 msr = &vmx->nested.msrs.cr0_fixed0; 1357 break; 1358 case MSR_IA32_VMX_CR4_FIXED0: 1359 msr = &vmx->nested.msrs.cr4_fixed0; 1360 break; 1361 default: 1362 BUG(); 1363 } 1364 1365 /* 1366 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1367 * must be 1 in the restored value. 1368 */ 1369 if (!is_bitwise_subset(data, *msr, -1ULL)) 1370 return -EINVAL; 1371 1372 *msr = data; 1373 return 0; 1374 } 1375 1376 /* 1377 * Called when userspace is restoring VMX MSRs. 1378 * 1379 * Returns 0 on success, non-0 otherwise. 1380 */ 1381 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1382 { 1383 struct vcpu_vmx *vmx = to_vmx(vcpu); 1384 1385 /* 1386 * Don't allow changes to the VMX capability MSRs while the vCPU 1387 * is in VMX operation. 1388 */ 1389 if (vmx->nested.vmxon) 1390 return -EBUSY; 1391 1392 switch (msr_index) { 1393 case MSR_IA32_VMX_BASIC: 1394 return vmx_restore_vmx_basic(vmx, data); 1395 case MSR_IA32_VMX_PINBASED_CTLS: 1396 case MSR_IA32_VMX_PROCBASED_CTLS: 1397 case MSR_IA32_VMX_EXIT_CTLS: 1398 case MSR_IA32_VMX_ENTRY_CTLS: 1399 /* 1400 * The "non-true" VMX capability MSRs are generated from the 1401 * "true" MSRs, so we do not support restoring them directly. 1402 * 1403 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1404 * should restore the "true" MSRs with the must-be-1 bits 1405 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1406 * DEFAULT SETTINGS". 1407 */ 1408 return -EINVAL; 1409 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1410 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1411 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1412 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1413 case MSR_IA32_VMX_PROCBASED_CTLS2: 1414 return vmx_restore_control_msr(vmx, msr_index, data); 1415 case MSR_IA32_VMX_MISC: 1416 return vmx_restore_vmx_misc(vmx, data); 1417 case MSR_IA32_VMX_CR0_FIXED0: 1418 case MSR_IA32_VMX_CR4_FIXED0: 1419 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1420 case MSR_IA32_VMX_CR0_FIXED1: 1421 case MSR_IA32_VMX_CR4_FIXED1: 1422 /* 1423 * These MSRs are generated based on the vCPU's CPUID, so we 1424 * do not support restoring them directly. 1425 */ 1426 return -EINVAL; 1427 case MSR_IA32_VMX_EPT_VPID_CAP: 1428 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1429 case MSR_IA32_VMX_VMCS_ENUM: 1430 vmx->nested.msrs.vmcs_enum = data; 1431 return 0; 1432 case MSR_IA32_VMX_VMFUNC: 1433 if (data & ~vmx->nested.msrs.vmfunc_controls) 1434 return -EINVAL; 1435 vmx->nested.msrs.vmfunc_controls = data; 1436 return 0; 1437 default: 1438 /* 1439 * The rest of the VMX capability MSRs do not support restore. 1440 */ 1441 return -EINVAL; 1442 } 1443 } 1444 1445 /* Returns 0 on success, non-0 otherwise. */ 1446 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1447 { 1448 switch (msr_index) { 1449 case MSR_IA32_VMX_BASIC: 1450 *pdata = msrs->basic; 1451 break; 1452 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1453 case MSR_IA32_VMX_PINBASED_CTLS: 1454 *pdata = vmx_control_msr( 1455 msrs->pinbased_ctls_low, 1456 msrs->pinbased_ctls_high); 1457 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1458 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1459 break; 1460 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1461 case MSR_IA32_VMX_PROCBASED_CTLS: 1462 *pdata = vmx_control_msr( 1463 msrs->procbased_ctls_low, 1464 msrs->procbased_ctls_high); 1465 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1466 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1467 break; 1468 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1469 case MSR_IA32_VMX_EXIT_CTLS: 1470 *pdata = vmx_control_msr( 1471 msrs->exit_ctls_low, 1472 msrs->exit_ctls_high); 1473 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1474 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1475 break; 1476 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1477 case MSR_IA32_VMX_ENTRY_CTLS: 1478 *pdata = vmx_control_msr( 1479 msrs->entry_ctls_low, 1480 msrs->entry_ctls_high); 1481 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1482 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1483 break; 1484 case MSR_IA32_VMX_MISC: 1485 *pdata = vmx_control_msr( 1486 msrs->misc_low, 1487 msrs->misc_high); 1488 break; 1489 case MSR_IA32_VMX_CR0_FIXED0: 1490 *pdata = msrs->cr0_fixed0; 1491 break; 1492 case MSR_IA32_VMX_CR0_FIXED1: 1493 *pdata = msrs->cr0_fixed1; 1494 break; 1495 case MSR_IA32_VMX_CR4_FIXED0: 1496 *pdata = msrs->cr4_fixed0; 1497 break; 1498 case MSR_IA32_VMX_CR4_FIXED1: 1499 *pdata = msrs->cr4_fixed1; 1500 break; 1501 case MSR_IA32_VMX_VMCS_ENUM: 1502 *pdata = msrs->vmcs_enum; 1503 break; 1504 case MSR_IA32_VMX_PROCBASED_CTLS2: 1505 *pdata = vmx_control_msr( 1506 msrs->secondary_ctls_low, 1507 msrs->secondary_ctls_high); 1508 break; 1509 case MSR_IA32_VMX_EPT_VPID_CAP: 1510 *pdata = msrs->ept_caps | 1511 ((u64)msrs->vpid_caps << 32); 1512 break; 1513 case MSR_IA32_VMX_VMFUNC: 1514 *pdata = msrs->vmfunc_controls; 1515 break; 1516 default: 1517 return 1; 1518 } 1519 1520 return 0; 1521 } 1522 1523 /* 1524 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have 1525 * been modified by the L1 guest. Note, "writable" in this context means 1526 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of 1527 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only" 1528 * VM-exit information fields (which are actually writable if the vCPU is 1529 * configured to support "VMWRITE to any supported field in the VMCS"). 1530 */ 1531 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1532 { 1533 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1534 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1535 struct shadow_vmcs_field field; 1536 unsigned long val; 1537 int i; 1538 1539 if (WARN_ON(!shadow_vmcs)) 1540 return; 1541 1542 preempt_disable(); 1543 1544 vmcs_load(shadow_vmcs); 1545 1546 for (i = 0; i < max_shadow_read_write_fields; i++) { 1547 field = shadow_read_write_fields[i]; 1548 val = __vmcs_readl(field.encoding); 1549 vmcs12_write_any(vmcs12, field.encoding, field.offset, val); 1550 } 1551 1552 vmcs_clear(shadow_vmcs); 1553 vmcs_load(vmx->loaded_vmcs->vmcs); 1554 1555 preempt_enable(); 1556 } 1557 1558 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1559 { 1560 const struct shadow_vmcs_field *fields[] = { 1561 shadow_read_write_fields, 1562 shadow_read_only_fields 1563 }; 1564 const int max_fields[] = { 1565 max_shadow_read_write_fields, 1566 max_shadow_read_only_fields 1567 }; 1568 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1569 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1570 struct shadow_vmcs_field field; 1571 unsigned long val; 1572 int i, q; 1573 1574 if (WARN_ON(!shadow_vmcs)) 1575 return; 1576 1577 vmcs_load(shadow_vmcs); 1578 1579 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1580 for (i = 0; i < max_fields[q]; i++) { 1581 field = fields[q][i]; 1582 val = vmcs12_read_any(vmcs12, field.encoding, 1583 field.offset); 1584 __vmcs_writel(field.encoding, val); 1585 } 1586 } 1587 1588 vmcs_clear(shadow_vmcs); 1589 vmcs_load(vmx->loaded_vmcs->vmcs); 1590 } 1591 1592 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) 1593 { 1594 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1595 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1596 1597 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1598 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1599 vmcs12->guest_rip = evmcs->guest_rip; 1600 1601 if (unlikely(!(evmcs->hv_clean_fields & 1602 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1603 vmcs12->guest_rsp = evmcs->guest_rsp; 1604 vmcs12->guest_rflags = evmcs->guest_rflags; 1605 vmcs12->guest_interruptibility_info = 1606 evmcs->guest_interruptibility_info; 1607 } 1608 1609 if (unlikely(!(evmcs->hv_clean_fields & 1610 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1611 vmcs12->cpu_based_vm_exec_control = 1612 evmcs->cpu_based_vm_exec_control; 1613 } 1614 1615 if (unlikely(!(evmcs->hv_clean_fields & 1616 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) { 1617 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1618 } 1619 1620 if (unlikely(!(evmcs->hv_clean_fields & 1621 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1622 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1623 } 1624 1625 if (unlikely(!(evmcs->hv_clean_fields & 1626 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1627 vmcs12->vm_entry_intr_info_field = 1628 evmcs->vm_entry_intr_info_field; 1629 vmcs12->vm_entry_exception_error_code = 1630 evmcs->vm_entry_exception_error_code; 1631 vmcs12->vm_entry_instruction_len = 1632 evmcs->vm_entry_instruction_len; 1633 } 1634 1635 if (unlikely(!(evmcs->hv_clean_fields & 1636 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1637 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1638 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1639 vmcs12->host_cr0 = evmcs->host_cr0; 1640 vmcs12->host_cr3 = evmcs->host_cr3; 1641 vmcs12->host_cr4 = evmcs->host_cr4; 1642 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1643 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1644 vmcs12->host_rip = evmcs->host_rip; 1645 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1646 vmcs12->host_es_selector = evmcs->host_es_selector; 1647 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1648 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1649 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1650 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1651 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1652 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1653 } 1654 1655 if (unlikely(!(evmcs->hv_clean_fields & 1656 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) { 1657 vmcs12->pin_based_vm_exec_control = 1658 evmcs->pin_based_vm_exec_control; 1659 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1660 vmcs12->secondary_vm_exec_control = 1661 evmcs->secondary_vm_exec_control; 1662 } 1663 1664 if (unlikely(!(evmcs->hv_clean_fields & 1665 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1666 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1667 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1668 } 1669 1670 if (unlikely(!(evmcs->hv_clean_fields & 1671 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1672 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1673 } 1674 1675 if (unlikely(!(evmcs->hv_clean_fields & 1676 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1677 vmcs12->guest_es_base = evmcs->guest_es_base; 1678 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1679 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1680 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1681 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1682 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1683 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1684 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1685 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1686 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1687 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1688 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1689 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1690 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1691 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1692 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1693 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1694 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1695 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1696 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1697 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1698 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1699 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1700 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1701 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1702 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1703 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1704 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1705 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1706 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1707 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1708 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1709 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1710 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1711 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1712 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1713 } 1714 1715 if (unlikely(!(evmcs->hv_clean_fields & 1716 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1717 vmcs12->tsc_offset = evmcs->tsc_offset; 1718 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1719 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1720 } 1721 1722 if (unlikely(!(evmcs->hv_clean_fields & 1723 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1724 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1725 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1726 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1727 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1728 vmcs12->guest_cr0 = evmcs->guest_cr0; 1729 vmcs12->guest_cr3 = evmcs->guest_cr3; 1730 vmcs12->guest_cr4 = evmcs->guest_cr4; 1731 vmcs12->guest_dr7 = evmcs->guest_dr7; 1732 } 1733 1734 if (unlikely(!(evmcs->hv_clean_fields & 1735 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1736 vmcs12->host_fs_base = evmcs->host_fs_base; 1737 vmcs12->host_gs_base = evmcs->host_gs_base; 1738 vmcs12->host_tr_base = evmcs->host_tr_base; 1739 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1740 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1741 vmcs12->host_rsp = evmcs->host_rsp; 1742 } 1743 1744 if (unlikely(!(evmcs->hv_clean_fields & 1745 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1746 vmcs12->ept_pointer = evmcs->ept_pointer; 1747 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1748 } 1749 1750 if (unlikely(!(evmcs->hv_clean_fields & 1751 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1752 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1753 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1754 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1755 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1756 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1757 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1758 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1759 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1760 vmcs12->guest_pending_dbg_exceptions = 1761 evmcs->guest_pending_dbg_exceptions; 1762 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1763 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1764 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1765 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1766 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1767 } 1768 1769 /* 1770 * Not used? 1771 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1772 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1773 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1774 * vmcs12->page_fault_error_code_mask = 1775 * evmcs->page_fault_error_code_mask; 1776 * vmcs12->page_fault_error_code_match = 1777 * evmcs->page_fault_error_code_match; 1778 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1779 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1780 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1781 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1782 */ 1783 1784 /* 1785 * Read only fields: 1786 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1787 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1788 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1789 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1790 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1791 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1792 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1793 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1794 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1795 * vmcs12->exit_qualification = evmcs->exit_qualification; 1796 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1797 * 1798 * Not present in struct vmcs12: 1799 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1800 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1801 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1802 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1803 */ 1804 1805 return 0; 1806 } 1807 1808 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1809 { 1810 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1811 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1812 1813 /* 1814 * Should not be changed by KVM: 1815 * 1816 * evmcs->host_es_selector = vmcs12->host_es_selector; 1817 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1818 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1819 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1820 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1821 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1822 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1823 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1824 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1825 * evmcs->host_cr0 = vmcs12->host_cr0; 1826 * evmcs->host_cr3 = vmcs12->host_cr3; 1827 * evmcs->host_cr4 = vmcs12->host_cr4; 1828 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1829 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1830 * evmcs->host_rip = vmcs12->host_rip; 1831 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1832 * evmcs->host_fs_base = vmcs12->host_fs_base; 1833 * evmcs->host_gs_base = vmcs12->host_gs_base; 1834 * evmcs->host_tr_base = vmcs12->host_tr_base; 1835 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1836 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1837 * evmcs->host_rsp = vmcs12->host_rsp; 1838 * sync_vmcs02_to_vmcs12() doesn't read these: 1839 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1840 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1841 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1842 * evmcs->ept_pointer = vmcs12->ept_pointer; 1843 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1844 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1845 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1846 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1847 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1848 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1849 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1850 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1851 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1852 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1853 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1854 * evmcs->page_fault_error_code_mask = 1855 * vmcs12->page_fault_error_code_mask; 1856 * evmcs->page_fault_error_code_match = 1857 * vmcs12->page_fault_error_code_match; 1858 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1859 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1860 * evmcs->tsc_offset = vmcs12->tsc_offset; 1861 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1862 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1863 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1864 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1865 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1866 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1867 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1868 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1869 * 1870 * Not present in struct vmcs12: 1871 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1872 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1873 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1874 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1875 */ 1876 1877 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1878 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1879 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1880 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1881 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1882 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1883 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 1884 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 1885 1886 evmcs->guest_es_limit = vmcs12->guest_es_limit; 1887 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 1888 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 1889 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 1890 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 1891 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 1892 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 1893 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 1894 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 1895 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 1896 1897 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 1898 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 1899 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 1900 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 1901 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 1902 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 1903 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 1904 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 1905 1906 evmcs->guest_es_base = vmcs12->guest_es_base; 1907 evmcs->guest_cs_base = vmcs12->guest_cs_base; 1908 evmcs->guest_ss_base = vmcs12->guest_ss_base; 1909 evmcs->guest_ds_base = vmcs12->guest_ds_base; 1910 evmcs->guest_fs_base = vmcs12->guest_fs_base; 1911 evmcs->guest_gs_base = vmcs12->guest_gs_base; 1912 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 1913 evmcs->guest_tr_base = vmcs12->guest_tr_base; 1914 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 1915 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 1916 1917 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 1918 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 1919 1920 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 1921 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 1922 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 1923 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 1924 1925 evmcs->guest_pending_dbg_exceptions = 1926 vmcs12->guest_pending_dbg_exceptions; 1927 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 1928 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 1929 1930 evmcs->guest_activity_state = vmcs12->guest_activity_state; 1931 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 1932 1933 evmcs->guest_cr0 = vmcs12->guest_cr0; 1934 evmcs->guest_cr3 = vmcs12->guest_cr3; 1935 evmcs->guest_cr4 = vmcs12->guest_cr4; 1936 evmcs->guest_dr7 = vmcs12->guest_dr7; 1937 1938 evmcs->guest_physical_address = vmcs12->guest_physical_address; 1939 1940 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 1941 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 1942 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 1943 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 1944 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 1945 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 1946 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 1947 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 1948 1949 evmcs->exit_qualification = vmcs12->exit_qualification; 1950 1951 evmcs->guest_linear_address = vmcs12->guest_linear_address; 1952 evmcs->guest_rsp = vmcs12->guest_rsp; 1953 evmcs->guest_rflags = vmcs12->guest_rflags; 1954 1955 evmcs->guest_interruptibility_info = 1956 vmcs12->guest_interruptibility_info; 1957 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 1958 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 1959 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 1960 evmcs->vm_entry_exception_error_code = 1961 vmcs12->vm_entry_exception_error_code; 1962 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 1963 1964 evmcs->guest_rip = vmcs12->guest_rip; 1965 1966 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 1967 1968 return 0; 1969 } 1970 1971 /* 1972 * This is an equivalent of the nested hypervisor executing the vmptrld 1973 * instruction. 1974 */ 1975 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( 1976 struct kvm_vcpu *vcpu, bool from_launch) 1977 { 1978 struct vcpu_vmx *vmx = to_vmx(vcpu); 1979 bool evmcs_gpa_changed = false; 1980 u64 evmcs_gpa; 1981 1982 if (likely(!vmx->nested.enlightened_vmcs_enabled)) 1983 return EVMPTRLD_DISABLED; 1984 1985 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) 1986 return EVMPTRLD_DISABLED; 1987 1988 if (unlikely(!vmx->nested.hv_evmcs || 1989 evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { 1990 if (!vmx->nested.hv_evmcs) 1991 vmx->nested.current_vmptr = -1ull; 1992 1993 nested_release_evmcs(vcpu); 1994 1995 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa), 1996 &vmx->nested.hv_evmcs_map)) 1997 return EVMPTRLD_ERROR; 1998 1999 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; 2000 2001 /* 2002 * Currently, KVM only supports eVMCS version 1 2003 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 2004 * value to first u32 field of eVMCS which should specify eVMCS 2005 * VersionNumber. 2006 * 2007 * Guest should be aware of supported eVMCS versions by host by 2008 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 2009 * expected to set this CPUID leaf according to the value 2010 * returned in vmcs_version from nested_enable_evmcs(). 2011 * 2012 * However, it turns out that Microsoft Hyper-V fails to comply 2013 * to their own invented interface: When Hyper-V use eVMCS, it 2014 * just sets first u32 field of eVMCS to revision_id specified 2015 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 2016 * which is one of the supported versions specified in 2017 * CPUID.0x4000000A.EAX[0:15]. 2018 * 2019 * To overcome Hyper-V bug, we accept here either a supported 2020 * eVMCS version or VMCS12 revision_id as valid values for first 2021 * u32 field of eVMCS. 2022 */ 2023 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 2024 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 2025 nested_release_evmcs(vcpu); 2026 return EVMPTRLD_VMFAIL; 2027 } 2028 2029 vmx->nested.dirty_vmcs12 = true; 2030 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; 2031 2032 evmcs_gpa_changed = true; 2033 /* 2034 * Unlike normal vmcs12, enlightened vmcs12 is not fully 2035 * reloaded from guest's memory (read only fields, fields not 2036 * present in struct hv_enlightened_vmcs, ...). Make sure there 2037 * are no leftovers. 2038 */ 2039 if (from_launch) { 2040 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2041 memset(vmcs12, 0, sizeof(*vmcs12)); 2042 vmcs12->hdr.revision_id = VMCS12_REVISION; 2043 } 2044 2045 } 2046 2047 /* 2048 * Clean fields data can't be used on VMLAUNCH and when we switch 2049 * between different L2 guests as KVM keeps a single VMCS12 per L1. 2050 */ 2051 if (from_launch || evmcs_gpa_changed) 2052 vmx->nested.hv_evmcs->hv_clean_fields &= 2053 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2054 2055 return EVMPTRLD_SUCCEEDED; 2056 } 2057 2058 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) 2059 { 2060 struct vcpu_vmx *vmx = to_vmx(vcpu); 2061 2062 if (vmx->nested.hv_evmcs) { 2063 copy_vmcs12_to_enlightened(vmx); 2064 /* All fields are clean */ 2065 vmx->nested.hv_evmcs->hv_clean_fields |= 2066 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2067 } else { 2068 copy_vmcs12_to_shadow(vmx); 2069 } 2070 2071 vmx->nested.need_vmcs12_to_shadow_sync = false; 2072 } 2073 2074 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 2075 { 2076 struct vcpu_vmx *vmx = 2077 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 2078 2079 vmx->nested.preemption_timer_expired = true; 2080 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 2081 kvm_vcpu_kick(&vmx->vcpu); 2082 2083 return HRTIMER_NORESTART; 2084 } 2085 2086 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu) 2087 { 2088 struct vcpu_vmx *vmx = to_vmx(vcpu); 2089 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2090 2091 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >> 2092 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2093 2094 if (!vmx->nested.has_preemption_timer_deadline) { 2095 vmx->nested.preemption_timer_deadline = 2096 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc; 2097 vmx->nested.has_preemption_timer_deadline = true; 2098 } 2099 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; 2100 } 2101 2102 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, 2103 u64 preemption_timeout) 2104 { 2105 struct vcpu_vmx *vmx = to_vmx(vcpu); 2106 2107 /* 2108 * A timer value of zero is architecturally guaranteed to cause 2109 * a VMExit prior to executing any instructions in the guest. 2110 */ 2111 if (preemption_timeout == 0) { 2112 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 2113 return; 2114 } 2115 2116 if (vcpu->arch.virtual_tsc_khz == 0) 2117 return; 2118 2119 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2120 preemption_timeout *= 1000000; 2121 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 2122 hrtimer_start(&vmx->nested.preemption_timer, 2123 ktime_add_ns(ktime_get(), preemption_timeout), 2124 HRTIMER_MODE_ABS_PINNED); 2125 } 2126 2127 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2128 { 2129 if (vmx->nested.nested_run_pending && 2130 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 2131 return vmcs12->guest_ia32_efer; 2132 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 2133 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 2134 else 2135 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 2136 } 2137 2138 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 2139 { 2140 /* 2141 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 2142 * according to L0's settings (vmcs12 is irrelevant here). Host 2143 * fields that come from L0 and are not constant, e.g. HOST_CR3, 2144 * will be set as needed prior to VMLAUNCH/VMRESUME. 2145 */ 2146 if (vmx->nested.vmcs02_initialized) 2147 return; 2148 vmx->nested.vmcs02_initialized = true; 2149 2150 /* 2151 * We don't care what the EPTP value is we just need to guarantee 2152 * it's valid so we don't get a false positive when doing early 2153 * consistency checks. 2154 */ 2155 if (enable_ept && nested_early_check) 2156 vmcs_write64(EPT_POINTER, 2157 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); 2158 2159 /* All VMFUNCs are currently emulated through L0 vmexits. */ 2160 if (cpu_has_vmx_vmfunc()) 2161 vmcs_write64(VM_FUNCTION_CONTROL, 0); 2162 2163 if (cpu_has_vmx_posted_intr()) 2164 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 2165 2166 if (cpu_has_vmx_msr_bitmap()) 2167 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 2168 2169 /* 2170 * The PML address never changes, so it is constant in vmcs02. 2171 * Conceptually we want to copy the PML index from vmcs01 here, 2172 * and then back to vmcs01 on nested vmexit. But since we flush 2173 * the log and reset GUEST_PML_INDEX on each vmexit, the PML 2174 * index is also effectively constant in vmcs02. 2175 */ 2176 if (enable_pml) { 2177 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 2178 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 2179 } 2180 2181 if (cpu_has_vmx_encls_vmexit()) 2182 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); 2183 2184 /* 2185 * Set the MSR load/store lists to match L0's settings. Only the 2186 * addresses are constant (for vmcs02), the counts can change based 2187 * on L2's behavior, e.g. switching to/from long mode. 2188 */ 2189 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); 2190 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 2191 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 2192 2193 vmx_set_constant_host_state(vmx); 2194 } 2195 2196 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, 2197 struct vmcs12 *vmcs12) 2198 { 2199 prepare_vmcs02_constant_state(vmx); 2200 2201 vmcs_write64(VMCS_LINK_POINTER, -1ull); 2202 2203 if (enable_vpid) { 2204 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 2205 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 2206 else 2207 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2208 } 2209 } 2210 2211 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2212 { 2213 u32 exec_control, vmcs12_exec_ctrl; 2214 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 2215 2216 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) 2217 prepare_vmcs02_early_rare(vmx, vmcs12); 2218 2219 /* 2220 * PIN CONTROLS 2221 */ 2222 exec_control = vmx_pin_based_exec_ctrl(vmx); 2223 exec_control |= (vmcs12->pin_based_vm_exec_control & 2224 ~PIN_BASED_VMX_PREEMPTION_TIMER); 2225 2226 /* Posted interrupts setting is only taken from vmcs12. */ 2227 if (nested_cpu_has_posted_intr(vmcs12)) { 2228 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2229 vmx->nested.pi_pending = false; 2230 } else { 2231 exec_control &= ~PIN_BASED_POSTED_INTR; 2232 } 2233 pin_controls_set(vmx, exec_control); 2234 2235 /* 2236 * EXEC CONTROLS 2237 */ 2238 exec_control = vmx_exec_control(vmx); /* L0's desires */ 2239 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; 2240 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; 2241 exec_control &= ~CPU_BASED_TPR_SHADOW; 2242 exec_control |= vmcs12->cpu_based_vm_exec_control; 2243 2244 vmx->nested.l1_tpr_threshold = -1; 2245 if (exec_control & CPU_BASED_TPR_SHADOW) 2246 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2247 #ifdef CONFIG_X86_64 2248 else 2249 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2250 CPU_BASED_CR8_STORE_EXITING; 2251 #endif 2252 2253 /* 2254 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2255 * for I/O port accesses. 2256 */ 2257 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2258 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2259 2260 /* 2261 * This bit will be computed in nested_get_vmcs12_pages, because 2262 * we do not have access to L1's MSR bitmap yet. For now, keep 2263 * the same bit as before, hoping to avoid multiple VMWRITEs that 2264 * only set/clear this bit. 2265 */ 2266 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 2267 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; 2268 2269 exec_controls_set(vmx, exec_control); 2270 2271 /* 2272 * SECONDARY EXEC CONTROLS 2273 */ 2274 if (cpu_has_secondary_exec_ctrls()) { 2275 exec_control = vmx->secondary_exec_control; 2276 2277 /* Take the following fields only from vmcs12 */ 2278 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2279 SECONDARY_EXEC_ENABLE_INVPCID | 2280 SECONDARY_EXEC_ENABLE_RDTSCP | 2281 SECONDARY_EXEC_XSAVES | 2282 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2283 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2284 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2285 SECONDARY_EXEC_ENABLE_VMFUNC); 2286 if (nested_cpu_has(vmcs12, 2287 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { 2288 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & 2289 ~SECONDARY_EXEC_ENABLE_PML; 2290 exec_control |= vmcs12_exec_ctrl; 2291 } 2292 2293 /* VMCS shadowing for L2 is emulated for now */ 2294 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2295 2296 /* 2297 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() 2298 * will not have to rewrite the controls just for this bit. 2299 */ 2300 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() && 2301 (vmcs12->guest_cr4 & X86_CR4_UMIP)) 2302 exec_control |= SECONDARY_EXEC_DESC; 2303 2304 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2305 vmcs_write16(GUEST_INTR_STATUS, 2306 vmcs12->guest_intr_status); 2307 2308 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) 2309 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 2310 2311 secondary_exec_controls_set(vmx, exec_control); 2312 } 2313 2314 /* 2315 * ENTRY CONTROLS 2316 * 2317 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2318 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2319 * on the related bits (if supported by the CPU) in the hope that 2320 * we can avoid VMWrites during vmx_set_efer(). 2321 */ 2322 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & 2323 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; 2324 if (cpu_has_load_ia32_efer()) { 2325 if (guest_efer & EFER_LMA) 2326 exec_control |= VM_ENTRY_IA32E_MODE; 2327 if (guest_efer != host_efer) 2328 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2329 } 2330 vm_entry_controls_set(vmx, exec_control); 2331 2332 /* 2333 * EXIT CONTROLS 2334 * 2335 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2336 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2337 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2338 */ 2339 exec_control = vmx_vmexit_ctrl(); 2340 if (cpu_has_load_ia32_efer() && guest_efer != host_efer) 2341 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2342 vm_exit_controls_set(vmx, exec_control); 2343 2344 /* 2345 * Interrupt/Exception Fields 2346 */ 2347 if (vmx->nested.nested_run_pending) { 2348 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2349 vmcs12->vm_entry_intr_info_field); 2350 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2351 vmcs12->vm_entry_exception_error_code); 2352 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2353 vmcs12->vm_entry_instruction_len); 2354 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2355 vmcs12->guest_interruptibility_info); 2356 vmx->loaded_vmcs->nmi_known_unmasked = 2357 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2358 } else { 2359 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2360 } 2361 } 2362 2363 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2364 { 2365 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2366 2367 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2368 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2369 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2370 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2371 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2372 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2373 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2374 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2375 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2376 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2377 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2378 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2379 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2380 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2381 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2382 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2383 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2384 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2385 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2386 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2387 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2388 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2389 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2390 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2391 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2392 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2393 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2394 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2395 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2396 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2397 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2398 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2399 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2400 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2401 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2402 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2403 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2404 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2405 2406 vmx->segment_cache.bitmask = 0; 2407 } 2408 2409 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2410 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2411 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2412 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2413 vmcs12->guest_pending_dbg_exceptions); 2414 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2415 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2416 2417 /* 2418 * L1 may access the L2's PDPTR, so save them to construct 2419 * vmcs12 2420 */ 2421 if (enable_ept) { 2422 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2423 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2424 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2425 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2426 } 2427 2428 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && 2429 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2430 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2431 } 2432 2433 if (nested_cpu_has_xsaves(vmcs12)) 2434 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2435 2436 /* 2437 * Whether page-faults are trapped is determined by a combination of 2438 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0 2439 * doesn't care about page faults then we should set all of these to 2440 * L1's desires. However, if L0 does care about (some) page faults, it 2441 * is not easy (if at all possible?) to merge L0 and L1's desires, we 2442 * simply ask to exit on each and every L2 page fault. This is done by 2443 * setting MASK=MATCH=0 and (see below) EB.PF=1. 2444 * Note that below we don't need special code to set EB.PF beyond the 2445 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2446 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2447 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2448 */ 2449 if (vmx_need_pf_intercept(&vmx->vcpu)) { 2450 /* 2451 * TODO: if both L0 and L1 need the same MASK and MATCH, 2452 * go ahead and use it? 2453 */ 2454 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 2455 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 2456 } else { 2457 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); 2458 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); 2459 } 2460 2461 if (cpu_has_vmx_apicv()) { 2462 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2463 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2464 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2465 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2466 } 2467 2468 /* 2469 * Make sure the msr_autostore list is up to date before we set the 2470 * count in the vmcs02. 2471 */ 2472 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); 2473 2474 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); 2475 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2476 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2477 2478 set_cr4_guest_host_mask(vmx); 2479 } 2480 2481 /* 2482 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2483 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2484 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2485 * guest in a way that will both be appropriate to L1's requests, and our 2486 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2487 * function also has additional necessary side-effects, like setting various 2488 * vcpu->arch fields. 2489 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2490 * is assigned to entry_failure_code on failure. 2491 */ 2492 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2493 enum vm_entry_failure_code *entry_failure_code) 2494 { 2495 struct vcpu_vmx *vmx = to_vmx(vcpu); 2496 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2497 bool load_guest_pdptrs_vmcs12 = false; 2498 2499 if (vmx->nested.dirty_vmcs12 || hv_evmcs) { 2500 prepare_vmcs02_rare(vmx, vmcs12); 2501 vmx->nested.dirty_vmcs12 = false; 2502 2503 load_guest_pdptrs_vmcs12 = !hv_evmcs || 2504 !(hv_evmcs->hv_clean_fields & 2505 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); 2506 } 2507 2508 if (vmx->nested.nested_run_pending && 2509 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2510 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2511 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2512 } else { 2513 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2514 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); 2515 } 2516 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || 2517 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 2518 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); 2519 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2520 2521 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2522 * bitwise-or of what L1 wants to trap for L2, and what we want to 2523 * trap. Note that CR0.TS also needs updating - we do this later. 2524 */ 2525 vmx_update_exception_bitmap(vcpu); 2526 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2527 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2528 2529 if (vmx->nested.nested_run_pending && 2530 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2531 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2532 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2533 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2534 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2535 } 2536 2537 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2538 2539 if (kvm_has_tsc_control) 2540 decache_tsc_multiplier(vmx); 2541 2542 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true); 2543 2544 if (nested_cpu_has_ept(vmcs12)) 2545 nested_ept_init_mmu_context(vcpu); 2546 2547 /* 2548 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those 2549 * bits which we consider mandatory enabled. 2550 * The CR0_READ_SHADOW is what L2 should have expected to read given 2551 * the specifications by L1; It's not enough to take 2552 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we 2553 * have more bits than L1 expected. 2554 */ 2555 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2556 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2557 2558 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2559 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2560 2561 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2562 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2563 vmx_set_efer(vcpu, vcpu->arch.efer); 2564 2565 /* 2566 * Guest state is invalid and unrestricted guest is disabled, 2567 * which means L1 attempted VMEntry to L2 with invalid state. 2568 * Fail the VMEntry. 2569 */ 2570 if (CC(!vmx_guest_state_valid(vcpu))) { 2571 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2572 return -EINVAL; 2573 } 2574 2575 /* Shadow page tables on either EPT or shadow page tables. */ 2576 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2577 entry_failure_code)) 2578 return -EINVAL; 2579 2580 /* 2581 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 2582 * on nested VM-Exit, which can occur without actually running L2 and 2583 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with 2584 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the 2585 * transition to HLT instead of running L2. 2586 */ 2587 if (enable_ept) 2588 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); 2589 2590 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ 2591 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && 2592 is_pae_paging(vcpu)) { 2593 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2594 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2595 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2596 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2597 } 2598 2599 if (!enable_ept) 2600 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 2601 2602 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2603 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 2604 vmcs12->guest_ia32_perf_global_ctrl))) 2605 return -EINVAL; 2606 2607 kvm_rsp_write(vcpu, vmcs12->guest_rsp); 2608 kvm_rip_write(vcpu, vmcs12->guest_rip); 2609 return 0; 2610 } 2611 2612 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2613 { 2614 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) && 2615 nested_cpu_has_virtual_nmis(vmcs12))) 2616 return -EINVAL; 2617 2618 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) && 2619 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING))) 2620 return -EINVAL; 2621 2622 return 0; 2623 } 2624 2625 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) 2626 { 2627 struct vcpu_vmx *vmx = to_vmx(vcpu); 2628 2629 /* Check for memory type validity */ 2630 switch (new_eptp & VMX_EPTP_MT_MASK) { 2631 case VMX_EPTP_MT_UC: 2632 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) 2633 return false; 2634 break; 2635 case VMX_EPTP_MT_WB: 2636 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) 2637 return false; 2638 break; 2639 default: 2640 return false; 2641 } 2642 2643 /* Page-walk levels validity. */ 2644 switch (new_eptp & VMX_EPTP_PWL_MASK) { 2645 case VMX_EPTP_PWL_5: 2646 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) 2647 return false; 2648 break; 2649 case VMX_EPTP_PWL_4: 2650 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) 2651 return false; 2652 break; 2653 default: 2654 return false; 2655 } 2656 2657 /* Reserved bits should not be set */ 2658 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) 2659 return false; 2660 2661 /* AD, if set, should be supported */ 2662 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) { 2663 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) 2664 return false; 2665 } 2666 2667 return true; 2668 } 2669 2670 /* 2671 * Checks related to VM-Execution Control Fields 2672 */ 2673 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2674 struct vmcs12 *vmcs12) 2675 { 2676 struct vcpu_vmx *vmx = to_vmx(vcpu); 2677 2678 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2679 vmx->nested.msrs.pinbased_ctls_low, 2680 vmx->nested.msrs.pinbased_ctls_high)) || 2681 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2682 vmx->nested.msrs.procbased_ctls_low, 2683 vmx->nested.msrs.procbased_ctls_high))) 2684 return -EINVAL; 2685 2686 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2687 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, 2688 vmx->nested.msrs.secondary_ctls_low, 2689 vmx->nested.msrs.secondary_ctls_high))) 2690 return -EINVAL; 2691 2692 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || 2693 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2694 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2695 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2696 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2697 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2698 nested_vmx_check_nmi_controls(vmcs12) || 2699 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2700 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2701 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2702 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2703 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2704 return -EINVAL; 2705 2706 if (!nested_cpu_has_preemption_timer(vmcs12) && 2707 nested_cpu_has_save_preemption_timer(vmcs12)) 2708 return -EINVAL; 2709 2710 if (nested_cpu_has_ept(vmcs12) && 2711 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) 2712 return -EINVAL; 2713 2714 if (nested_cpu_has_vmfunc(vmcs12)) { 2715 if (CC(vmcs12->vm_function_control & 2716 ~vmx->nested.msrs.vmfunc_controls)) 2717 return -EINVAL; 2718 2719 if (nested_cpu_has_eptp_switching(vmcs12)) { 2720 if (CC(!nested_cpu_has_ept(vmcs12)) || 2721 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) 2722 return -EINVAL; 2723 } 2724 } 2725 2726 return 0; 2727 } 2728 2729 /* 2730 * Checks related to VM-Exit Control Fields 2731 */ 2732 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2733 struct vmcs12 *vmcs12) 2734 { 2735 struct vcpu_vmx *vmx = to_vmx(vcpu); 2736 2737 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, 2738 vmx->nested.msrs.exit_ctls_low, 2739 vmx->nested.msrs.exit_ctls_high)) || 2740 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))) 2741 return -EINVAL; 2742 2743 return 0; 2744 } 2745 2746 /* 2747 * Checks related to VM-Entry Control Fields 2748 */ 2749 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2750 struct vmcs12 *vmcs12) 2751 { 2752 struct vcpu_vmx *vmx = to_vmx(vcpu); 2753 2754 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, 2755 vmx->nested.msrs.entry_ctls_low, 2756 vmx->nested.msrs.entry_ctls_high))) 2757 return -EINVAL; 2758 2759 /* 2760 * From the Intel SDM, volume 3: 2761 * Fields relevant to VM-entry event injection must be set properly. 2762 * These fields are the VM-entry interruption-information field, the 2763 * VM-entry exception error code, and the VM-entry instruction length. 2764 */ 2765 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2766 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2767 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2768 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2769 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2770 bool should_have_error_code; 2771 bool urg = nested_cpu_has2(vmcs12, 2772 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2773 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2774 2775 /* VM-entry interruption-info field: interruption type */ 2776 if (CC(intr_type == INTR_TYPE_RESERVED) || 2777 CC(intr_type == INTR_TYPE_OTHER_EVENT && 2778 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2779 return -EINVAL; 2780 2781 /* VM-entry interruption-info field: vector */ 2782 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2783 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2784 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2785 return -EINVAL; 2786 2787 /* VM-entry interruption-info field: deliver error code */ 2788 should_have_error_code = 2789 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2790 x86_exception_has_error_code(vector); 2791 if (CC(has_error_code != should_have_error_code)) 2792 return -EINVAL; 2793 2794 /* VM-entry exception error code */ 2795 if (CC(has_error_code && 2796 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) 2797 return -EINVAL; 2798 2799 /* VM-entry interruption-info field: reserved bits */ 2800 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK)) 2801 return -EINVAL; 2802 2803 /* VM-entry instruction length */ 2804 switch (intr_type) { 2805 case INTR_TYPE_SOFT_EXCEPTION: 2806 case INTR_TYPE_SOFT_INTR: 2807 case INTR_TYPE_PRIV_SW_EXCEPTION: 2808 if (CC(vmcs12->vm_entry_instruction_len > 15) || 2809 CC(vmcs12->vm_entry_instruction_len == 0 && 2810 CC(!nested_cpu_has_zero_length_injection(vcpu)))) 2811 return -EINVAL; 2812 } 2813 } 2814 2815 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2816 return -EINVAL; 2817 2818 return 0; 2819 } 2820 2821 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, 2822 struct vmcs12 *vmcs12) 2823 { 2824 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 2825 nested_check_vm_exit_controls(vcpu, vmcs12) || 2826 nested_check_vm_entry_controls(vcpu, vmcs12)) 2827 return -EINVAL; 2828 2829 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled) 2830 return nested_evmcs_check_controls(vmcs12); 2831 2832 return 0; 2833 } 2834 2835 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, 2836 struct vmcs12 *vmcs12) 2837 { 2838 bool ia32e; 2839 2840 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || 2841 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || 2842 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) 2843 return -EINVAL; 2844 2845 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || 2846 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))) 2847 return -EINVAL; 2848 2849 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && 2850 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) 2851 return -EINVAL; 2852 2853 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 2854 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 2855 vmcs12->host_ia32_perf_global_ctrl))) 2856 return -EINVAL; 2857 2858 #ifdef CONFIG_X86_64 2859 ia32e = !!(vcpu->arch.efer & EFER_LMA); 2860 #else 2861 ia32e = false; 2862 #endif 2863 2864 if (ia32e) { 2865 if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) || 2866 CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) 2867 return -EINVAL; 2868 } else { 2869 if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) || 2870 CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || 2871 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || 2872 CC((vmcs12->host_rip) >> 32)) 2873 return -EINVAL; 2874 } 2875 2876 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2877 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2878 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2879 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2880 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2881 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2882 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2883 CC(vmcs12->host_cs_selector == 0) || 2884 CC(vmcs12->host_tr_selector == 0) || 2885 CC(vmcs12->host_ss_selector == 0 && !ia32e)) 2886 return -EINVAL; 2887 2888 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) || 2889 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) || 2890 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) || 2891 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) || 2892 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) || 2893 CC(is_noncanonical_address(vmcs12->host_rip, vcpu))) 2894 return -EINVAL; 2895 2896 /* 2897 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2898 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2899 * the values of the LMA and LME bits in the field must each be that of 2900 * the host address-space size VM-exit control. 2901 */ 2902 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 2903 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || 2904 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || 2905 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) 2906 return -EINVAL; 2907 } 2908 2909 return 0; 2910 } 2911 2912 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 2913 struct vmcs12 *vmcs12) 2914 { 2915 int r = 0; 2916 struct vmcs12 *shadow; 2917 struct kvm_host_map map; 2918 2919 if (vmcs12->vmcs_link_pointer == -1ull) 2920 return 0; 2921 2922 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) 2923 return -EINVAL; 2924 2925 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))) 2926 return -EINVAL; 2927 2928 shadow = map.hva; 2929 2930 if (CC(shadow->hdr.revision_id != VMCS12_REVISION) || 2931 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) 2932 r = -EINVAL; 2933 2934 kvm_vcpu_unmap(vcpu, &map, false); 2935 return r; 2936 } 2937 2938 /* 2939 * Checks related to Guest Non-register State 2940 */ 2941 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 2942 { 2943 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 2944 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT && 2945 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI)) 2946 return -EINVAL; 2947 2948 return 0; 2949 } 2950 2951 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, 2952 struct vmcs12 *vmcs12, 2953 enum vm_entry_failure_code *entry_failure_code) 2954 { 2955 bool ia32e; 2956 2957 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2958 2959 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || 2960 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) 2961 return -EINVAL; 2962 2963 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 2964 CC(!kvm_dr7_valid(vmcs12->guest_dr7))) 2965 return -EINVAL; 2966 2967 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 2968 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) 2969 return -EINVAL; 2970 2971 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 2972 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR; 2973 return -EINVAL; 2974 } 2975 2976 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2977 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 2978 vmcs12->guest_ia32_perf_global_ctrl))) 2979 return -EINVAL; 2980 2981 /* 2982 * If the load IA32_EFER VM-entry control is 1, the following checks 2983 * are performed on the field for the IA32_EFER MSR: 2984 * - Bits reserved in the IA32_EFER MSR must be 0. 2985 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 2986 * the IA-32e mode guest VM-exit control. It must also be identical 2987 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 2988 * CR0.PG) is 1. 2989 */ 2990 if (to_vmx(vcpu)->nested.nested_run_pending && 2991 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 2992 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; 2993 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || 2994 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || 2995 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && 2996 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) 2997 return -EINVAL; 2998 } 2999 3000 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 3001 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || 3002 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) 3003 return -EINVAL; 3004 3005 if (nested_check_guest_non_reg_state(vmcs12)) 3006 return -EINVAL; 3007 3008 return 0; 3009 } 3010 3011 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 3012 { 3013 struct vcpu_vmx *vmx = to_vmx(vcpu); 3014 unsigned long cr3, cr4; 3015 bool vm_fail; 3016 3017 if (!nested_early_check) 3018 return 0; 3019 3020 if (vmx->msr_autoload.host.nr) 3021 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 3022 if (vmx->msr_autoload.guest.nr) 3023 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 3024 3025 preempt_disable(); 3026 3027 vmx_prepare_switch_to_guest(vcpu); 3028 3029 /* 3030 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 3031 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 3032 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e. 3033 * there is no need to preserve other bits or save/restore the field. 3034 */ 3035 vmcs_writel(GUEST_RFLAGS, 0); 3036 3037 cr3 = __get_current_cr3_fast(); 3038 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 3039 vmcs_writel(HOST_CR3, cr3); 3040 vmx->loaded_vmcs->host_state.cr3 = cr3; 3041 } 3042 3043 cr4 = cr4_read_shadow(); 3044 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 3045 vmcs_writel(HOST_CR4, cr4); 3046 vmx->loaded_vmcs->host_state.cr4 = cr4; 3047 } 3048 3049 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 3050 vmx->loaded_vmcs->launched); 3051 3052 if (vmx->msr_autoload.host.nr) 3053 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3054 if (vmx->msr_autoload.guest.nr) 3055 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3056 3057 if (vm_fail) { 3058 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); 3059 3060 preempt_enable(); 3061 3062 trace_kvm_nested_vmenter_failed( 3063 "early hardware check VM-instruction error: ", error); 3064 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3065 return 1; 3066 } 3067 3068 /* 3069 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 3070 */ 3071 if (hw_breakpoint_active()) 3072 set_debugreg(__this_cpu_read(cpu_dr7), 7); 3073 local_irq_enable(); 3074 preempt_enable(); 3075 3076 /* 3077 * A non-failing VMEntry means we somehow entered guest mode with 3078 * an illegal RIP, and that's just the tip of the iceberg. There 3079 * is no telling what memory has been modified or what state has 3080 * been exposed to unknown code. Hitting this all but guarantees 3081 * a (very critical) hardware issue. 3082 */ 3083 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 3084 VMX_EXIT_REASONS_FAILED_VMENTRY)); 3085 3086 return 0; 3087 } 3088 3089 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) 3090 { 3091 struct vcpu_vmx *vmx = to_vmx(vcpu); 3092 3093 /* 3094 * hv_evmcs may end up being not mapped after migration (when 3095 * L2 was running), map it here to make sure vmcs12 changes are 3096 * properly reflected. 3097 */ 3098 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) { 3099 enum nested_evmptrld_status evmptrld_status = 3100 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 3101 3102 if (evmptrld_status == EVMPTRLD_VMFAIL || 3103 evmptrld_status == EVMPTRLD_ERROR) { 3104 pr_debug_ratelimited("%s: enlightened vmptrld failed\n", 3105 __func__); 3106 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3107 vcpu->run->internal.suberror = 3108 KVM_INTERNAL_ERROR_EMULATION; 3109 vcpu->run->internal.ndata = 0; 3110 return false; 3111 } 3112 } 3113 3114 return true; 3115 } 3116 3117 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 3118 { 3119 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3120 struct vcpu_vmx *vmx = to_vmx(vcpu); 3121 struct kvm_host_map *map; 3122 struct page *page; 3123 u64 hpa; 3124 3125 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3126 /* 3127 * Translate L1 physical address to host physical 3128 * address for vmcs02. Keep the page pinned, so this 3129 * physical address remains valid. We keep a reference 3130 * to it so we can release it later. 3131 */ 3132 if (vmx->nested.apic_access_page) { /* shouldn't happen */ 3133 kvm_release_page_clean(vmx->nested.apic_access_page); 3134 vmx->nested.apic_access_page = NULL; 3135 } 3136 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); 3137 if (!is_error_page(page)) { 3138 vmx->nested.apic_access_page = page; 3139 hpa = page_to_phys(vmx->nested.apic_access_page); 3140 vmcs_write64(APIC_ACCESS_ADDR, hpa); 3141 } else { 3142 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n", 3143 __func__); 3144 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3145 vcpu->run->internal.suberror = 3146 KVM_INTERNAL_ERROR_EMULATION; 3147 vcpu->run->internal.ndata = 0; 3148 return false; 3149 } 3150 } 3151 3152 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3153 map = &vmx->nested.virtual_apic_map; 3154 3155 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { 3156 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); 3157 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && 3158 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && 3159 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3160 /* 3161 * The processor will never use the TPR shadow, simply 3162 * clear the bit from the execution control. Such a 3163 * configuration is useless, but it happens in tests. 3164 * For any other configuration, failing the vm entry is 3165 * _not_ what the processor does but it's basically the 3166 * only possibility we have. 3167 */ 3168 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); 3169 } else { 3170 /* 3171 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to 3172 * force VM-Entry to fail. 3173 */ 3174 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); 3175 } 3176 } 3177 3178 if (nested_cpu_has_posted_intr(vmcs12)) { 3179 map = &vmx->nested.pi_desc_map; 3180 3181 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { 3182 vmx->nested.pi_desc = 3183 (struct pi_desc *)(((void *)map->hva) + 3184 offset_in_page(vmcs12->posted_intr_desc_addr)); 3185 vmcs_write64(POSTED_INTR_DESC_ADDR, 3186 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); 3187 } 3188 } 3189 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 3190 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3191 else 3192 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3193 3194 return true; 3195 } 3196 3197 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) 3198 { 3199 if (!nested_get_evmcs_page(vcpu)) 3200 return false; 3201 3202 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu)) 3203 return false; 3204 3205 return true; 3206 } 3207 3208 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) 3209 { 3210 struct vmcs12 *vmcs12; 3211 struct vcpu_vmx *vmx = to_vmx(vcpu); 3212 gpa_t dst; 3213 3214 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) 3215 return 0; 3216 3217 if (WARN_ON_ONCE(vmx->nested.pml_full)) 3218 return 1; 3219 3220 /* 3221 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is 3222 * set is already checked as part of A/D emulation. 3223 */ 3224 vmcs12 = get_vmcs12(vcpu); 3225 if (!nested_cpu_has_pml(vmcs12)) 3226 return 0; 3227 3228 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { 3229 vmx->nested.pml_full = true; 3230 return 1; 3231 } 3232 3233 gpa &= ~0xFFFull; 3234 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; 3235 3236 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, 3237 offset_in_page(dst), sizeof(gpa))) 3238 return 0; 3239 3240 vmcs12->guest_pml_index--; 3241 3242 return 0; 3243 } 3244 3245 /* 3246 * Intel's VMX Instruction Reference specifies a common set of prerequisites 3247 * for running VMX instructions (except VMXON, whose prerequisites are 3248 * slightly different). It also specifies what exception to inject otherwise. 3249 * Note that many of these exceptions have priority over VM exits, so they 3250 * don't have to be checked again here. 3251 */ 3252 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 3253 { 3254 if (!to_vmx(vcpu)->nested.vmxon) { 3255 kvm_queue_exception(vcpu, UD_VECTOR); 3256 return 0; 3257 } 3258 3259 if (vmx_get_cpl(vcpu)) { 3260 kvm_inject_gp(vcpu, 0); 3261 return 0; 3262 } 3263 3264 return 1; 3265 } 3266 3267 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) 3268 { 3269 u8 rvi = vmx_get_rvi(); 3270 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); 3271 3272 return ((rvi & 0xf0) > (vppr & 0xf0)); 3273 } 3274 3275 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3276 struct vmcs12 *vmcs12); 3277 3278 /* 3279 * If from_vmentry is false, this is being called from state restore (either RSM 3280 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 3281 * 3282 * Returns: 3283 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode 3284 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail 3285 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit 3286 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error 3287 */ 3288 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 3289 bool from_vmentry) 3290 { 3291 struct vcpu_vmx *vmx = to_vmx(vcpu); 3292 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3293 enum vm_entry_failure_code entry_failure_code; 3294 bool evaluate_pending_interrupts; 3295 union vmx_exit_reason exit_reason = { 3296 .basic = EXIT_REASON_INVALID_STATE, 3297 .failed_vmentry = 1, 3298 }; 3299 u32 failed_index; 3300 3301 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 3302 kvm_vcpu_flush_tlb_current(vcpu); 3303 3304 evaluate_pending_interrupts = exec_controls_get(vmx) & 3305 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); 3306 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) 3307 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); 3308 3309 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 3310 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3311 if (kvm_mpx_supported() && 3312 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 3313 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3314 3315 /* 3316 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* 3317 * nested early checks are disabled. In the event of a "late" VM-Fail, 3318 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its 3319 * software model to the pre-VMEntry host state. When EPT is disabled, 3320 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes 3321 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing 3322 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to 3323 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested 3324 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is 3325 * guaranteed to be overwritten with a shadow CR3 prior to re-entering 3326 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as 3327 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks 3328 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail 3329 * path would need to manually save/restore vmcs01.GUEST_CR3. 3330 */ 3331 if (!enable_ept && !nested_early_check) 3332 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); 3333 3334 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 3335 3336 prepare_vmcs02_early(vmx, vmcs12); 3337 3338 if (from_vmentry) { 3339 if (unlikely(!nested_get_vmcs12_pages(vcpu))) { 3340 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3341 return NVMX_VMENTRY_KVM_INTERNAL_ERROR; 3342 } 3343 3344 if (nested_vmx_check_vmentry_hw(vcpu)) { 3345 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3346 return NVMX_VMENTRY_VMFAIL; 3347 } 3348 3349 if (nested_vmx_check_guest_state(vcpu, vmcs12, 3350 &entry_failure_code)) { 3351 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3352 vmcs12->exit_qualification = entry_failure_code; 3353 goto vmentry_fail_vmexit; 3354 } 3355 } 3356 3357 enter_guest_mode(vcpu); 3358 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3359 vcpu->arch.tsc_offset += vmcs12->tsc_offset; 3360 3361 if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) { 3362 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3363 vmcs12->exit_qualification = entry_failure_code; 3364 goto vmentry_fail_vmexit_guest_mode; 3365 } 3366 3367 if (from_vmentry) { 3368 failed_index = nested_vmx_load_msr(vcpu, 3369 vmcs12->vm_entry_msr_load_addr, 3370 vmcs12->vm_entry_msr_load_count); 3371 if (failed_index) { 3372 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL; 3373 vmcs12->exit_qualification = failed_index; 3374 goto vmentry_fail_vmexit_guest_mode; 3375 } 3376 } else { 3377 /* 3378 * The MMU is not initialized to point at the right entities yet and 3379 * "get pages" would need to read data from the guest (i.e. we will 3380 * need to perform gpa to hpa translation). Request a call 3381 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3382 * have already been set at vmentry time and should not be reset. 3383 */ 3384 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 3385 } 3386 3387 /* 3388 * If L1 had a pending IRQ/NMI until it executed 3389 * VMLAUNCH/VMRESUME which wasn't delivered because it was 3390 * disallowed (e.g. interrupts disabled), L0 needs to 3391 * evaluate if this pending event should cause an exit from L2 3392 * to L1 or delivered directly to L2 (e.g. In case L1 don't 3393 * intercept EXTERNAL_INTERRUPT). 3394 * 3395 * Usually this would be handled by the processor noticing an 3396 * IRQ/NMI window request, or checking RVI during evaluation of 3397 * pending virtual interrupts. However, this setting was done 3398 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 3399 * to perform pending event evaluation by requesting a KVM_REQ_EVENT. 3400 */ 3401 if (unlikely(evaluate_pending_interrupts)) 3402 kvm_make_request(KVM_REQ_EVENT, vcpu); 3403 3404 /* 3405 * Do not start the preemption timer hrtimer until after we know 3406 * we are successful, so that only nested_vmx_vmexit needs to cancel 3407 * the timer. 3408 */ 3409 vmx->nested.preemption_timer_expired = false; 3410 if (nested_cpu_has_preemption_timer(vmcs12)) { 3411 u64 timer_value = vmx_calc_preemption_timer_value(vcpu); 3412 vmx_start_preemption_timer(vcpu, timer_value); 3413 } 3414 3415 /* 3416 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3417 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3418 * returned as far as L1 is concerned. It will only return (and set 3419 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3420 */ 3421 return NVMX_VMENTRY_SUCCESS; 3422 3423 /* 3424 * A failed consistency check that leads to a VMExit during L1's 3425 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3426 * 26.7 "VM-entry failures during or after loading guest state". 3427 */ 3428 vmentry_fail_vmexit_guest_mode: 3429 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3430 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3431 leave_guest_mode(vcpu); 3432 3433 vmentry_fail_vmexit: 3434 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3435 3436 if (!from_vmentry) 3437 return NVMX_VMENTRY_VMEXIT; 3438 3439 load_vmcs12_host_state(vcpu, vmcs12); 3440 vmcs12->vm_exit_reason = exit_reason.full; 3441 if (enable_shadow_vmcs || vmx->nested.hv_evmcs) 3442 vmx->nested.need_vmcs12_to_shadow_sync = true; 3443 return NVMX_VMENTRY_VMEXIT; 3444 } 3445 3446 /* 3447 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3448 * for running an L2 nested guest. 3449 */ 3450 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3451 { 3452 struct vmcs12 *vmcs12; 3453 enum nvmx_vmentry_status status; 3454 struct vcpu_vmx *vmx = to_vmx(vcpu); 3455 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3456 enum nested_evmptrld_status evmptrld_status; 3457 3458 if (!nested_vmx_check_permission(vcpu)) 3459 return 1; 3460 3461 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch); 3462 if (evmptrld_status == EVMPTRLD_ERROR) { 3463 kvm_queue_exception(vcpu, UD_VECTOR); 3464 return 1; 3465 } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) { 3466 return nested_vmx_failInvalid(vcpu); 3467 } 3468 3469 if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)) 3470 return nested_vmx_failInvalid(vcpu); 3471 3472 vmcs12 = get_vmcs12(vcpu); 3473 3474 /* 3475 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3476 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3477 * rather than RFLAGS.ZF, and no error number is stored to the 3478 * VM-instruction error field. 3479 */ 3480 if (CC(vmcs12->hdr.shadow_vmcs)) 3481 return nested_vmx_failInvalid(vcpu); 3482 3483 if (vmx->nested.hv_evmcs) { 3484 copy_enlightened_to_vmcs12(vmx); 3485 /* Enlightened VMCS doesn't have launch state */ 3486 vmcs12->launch_state = !launch; 3487 } else if (enable_shadow_vmcs) { 3488 copy_shadow_to_vmcs12(vmx); 3489 } 3490 3491 /* 3492 * The nested entry process starts with enforcing various prerequisites 3493 * on vmcs12 as required by the Intel SDM, and act appropriately when 3494 * they fail: As the SDM explains, some conditions should cause the 3495 * instruction to fail, while others will cause the instruction to seem 3496 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3497 * To speed up the normal (success) code path, we should avoid checking 3498 * for misconfigurations which will anyway be caught by the processor 3499 * when using the merged vmcs02. 3500 */ 3501 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)) 3502 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3503 3504 if (CC(vmcs12->launch_state == launch)) 3505 return nested_vmx_fail(vcpu, 3506 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3507 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3508 3509 if (nested_vmx_check_controls(vcpu, vmcs12)) 3510 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3511 3512 if (nested_vmx_check_host_state(vcpu, vmcs12)) 3513 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3514 3515 /* 3516 * We're finally done with prerequisite checking, and can start with 3517 * the nested entry. 3518 */ 3519 vmx->nested.nested_run_pending = 1; 3520 vmx->nested.has_preemption_timer_deadline = false; 3521 status = nested_vmx_enter_non_root_mode(vcpu, true); 3522 if (unlikely(status != NVMX_VMENTRY_SUCCESS)) 3523 goto vmentry_failed; 3524 3525 /* Emulate processing of posted interrupts on VM-Enter. */ 3526 if (nested_cpu_has_posted_intr(vmcs12) && 3527 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { 3528 vmx->nested.pi_pending = true; 3529 kvm_make_request(KVM_REQ_EVENT, vcpu); 3530 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); 3531 } 3532 3533 /* Hide L1D cache contents from the nested guest. */ 3534 vmx->vcpu.arch.l1tf_flush_l1d = true; 3535 3536 /* 3537 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3538 * also be used as part of restoring nVMX state for 3539 * snapshot restore (migration). 3540 * 3541 * In this flow, it is assumed that vmcs12 cache was 3542 * trasferred as part of captured nVMX state and should 3543 * therefore not be read from guest memory (which may not 3544 * exist on destination host yet). 3545 */ 3546 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3547 3548 switch (vmcs12->guest_activity_state) { 3549 case GUEST_ACTIVITY_HLT: 3550 /* 3551 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3552 * awakened by event injection or by an NMI-window VM-exit or 3553 * by an interrupt-window VM-exit, halt the vcpu. 3554 */ 3555 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3556 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) && 3557 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) && 3558 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3559 vmx->nested.nested_run_pending = 0; 3560 return kvm_vcpu_halt(vcpu); 3561 } 3562 break; 3563 case GUEST_ACTIVITY_WAIT_SIPI: 3564 vmx->nested.nested_run_pending = 0; 3565 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 3566 break; 3567 default: 3568 break; 3569 } 3570 3571 return 1; 3572 3573 vmentry_failed: 3574 vmx->nested.nested_run_pending = 0; 3575 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR) 3576 return 0; 3577 if (status == NVMX_VMENTRY_VMEXIT) 3578 return 1; 3579 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); 3580 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3581 } 3582 3583 /* 3584 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3585 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK). 3586 * This function returns the new value we should put in vmcs12.guest_cr0. 3587 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3588 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3589 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3590 * didn't trap the bit, because if L1 did, so would L0). 3591 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3592 * been modified by L2, and L1 knows it. So just leave the old value of 3593 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3594 * isn't relevant, because if L0 traps this bit it can set it to anything. 3595 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3596 * changed these bits, and therefore they need to be updated, but L0 3597 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3598 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3599 */ 3600 static inline unsigned long 3601 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3602 { 3603 return 3604 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3605 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3606 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3607 vcpu->arch.cr0_guest_owned_bits)); 3608 } 3609 3610 static inline unsigned long 3611 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3612 { 3613 return 3614 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3615 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3616 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3617 vcpu->arch.cr4_guest_owned_bits)); 3618 } 3619 3620 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3621 struct vmcs12 *vmcs12) 3622 { 3623 u32 idt_vectoring; 3624 unsigned int nr; 3625 3626 if (vcpu->arch.exception.injected) { 3627 nr = vcpu->arch.exception.nr; 3628 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3629 3630 if (kvm_exception_is_soft(nr)) { 3631 vmcs12->vm_exit_instruction_len = 3632 vcpu->arch.event_exit_inst_len; 3633 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3634 } else 3635 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3636 3637 if (vcpu->arch.exception.has_error_code) { 3638 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3639 vmcs12->idt_vectoring_error_code = 3640 vcpu->arch.exception.error_code; 3641 } 3642 3643 vmcs12->idt_vectoring_info_field = idt_vectoring; 3644 } else if (vcpu->arch.nmi_injected) { 3645 vmcs12->idt_vectoring_info_field = 3646 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3647 } else if (vcpu->arch.interrupt.injected) { 3648 nr = vcpu->arch.interrupt.nr; 3649 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3650 3651 if (vcpu->arch.interrupt.soft) { 3652 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3653 vmcs12->vm_entry_instruction_len = 3654 vcpu->arch.event_exit_inst_len; 3655 } else 3656 idt_vectoring |= INTR_TYPE_EXT_INTR; 3657 3658 vmcs12->idt_vectoring_info_field = idt_vectoring; 3659 } 3660 } 3661 3662 3663 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3664 { 3665 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3666 gfn_t gfn; 3667 3668 /* 3669 * Don't need to mark the APIC access page dirty; it is never 3670 * written to by the CPU during APIC virtualization. 3671 */ 3672 3673 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3674 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3675 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3676 } 3677 3678 if (nested_cpu_has_posted_intr(vmcs12)) { 3679 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3680 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3681 } 3682 } 3683 3684 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3685 { 3686 struct vcpu_vmx *vmx = to_vmx(vcpu); 3687 int max_irr; 3688 void *vapic_page; 3689 u16 status; 3690 3691 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) 3692 return; 3693 3694 vmx->nested.pi_pending = false; 3695 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3696 return; 3697 3698 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); 3699 if (max_irr != 256) { 3700 vapic_page = vmx->nested.virtual_apic_map.hva; 3701 if (!vapic_page) 3702 return; 3703 3704 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3705 vapic_page, &max_irr); 3706 status = vmcs_read16(GUEST_INTR_STATUS); 3707 if ((u8)max_irr > ((u8)status & 0xff)) { 3708 status &= ~0xff; 3709 status |= (u8)max_irr; 3710 vmcs_write16(GUEST_INTR_STATUS, status); 3711 } 3712 } 3713 3714 nested_mark_vmcs12_pages_dirty(vcpu); 3715 } 3716 3717 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, 3718 unsigned long exit_qual) 3719 { 3720 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3721 unsigned int nr = vcpu->arch.exception.nr; 3722 u32 intr_info = nr | INTR_INFO_VALID_MASK; 3723 3724 if (vcpu->arch.exception.has_error_code) { 3725 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; 3726 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 3727 } 3728 3729 if (kvm_exception_is_soft(nr)) 3730 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 3731 else 3732 intr_info |= INTR_TYPE_HARD_EXCEPTION; 3733 3734 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 3735 vmx_get_nmi_mask(vcpu)) 3736 intr_info |= INTR_INFO_UNBLOCK_NMI; 3737 3738 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 3739 } 3740 3741 /* 3742 * Returns true if a debug trap is pending delivery. 3743 * 3744 * In KVM, debug traps bear an exception payload. As such, the class of a #DB 3745 * exception may be inferred from the presence of an exception payload. 3746 */ 3747 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu) 3748 { 3749 return vcpu->arch.exception.pending && 3750 vcpu->arch.exception.nr == DB_VECTOR && 3751 vcpu->arch.exception.payload; 3752 } 3753 3754 /* 3755 * Certain VM-exits set the 'pending debug exceptions' field to indicate a 3756 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM 3757 * represents these debug traps with a payload that is said to be compatible 3758 * with the 'pending debug exceptions' field, write the payload to the VMCS 3759 * field if a VM-exit is delivered before the debug trap. 3760 */ 3761 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) 3762 { 3763 if (vmx_pending_dbg_trap(vcpu)) 3764 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 3765 vcpu->arch.exception.payload); 3766 } 3767 3768 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) 3769 { 3770 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 3771 to_vmx(vcpu)->nested.preemption_timer_expired; 3772 } 3773 3774 static int vmx_check_nested_events(struct kvm_vcpu *vcpu) 3775 { 3776 struct vcpu_vmx *vmx = to_vmx(vcpu); 3777 unsigned long exit_qual; 3778 bool block_nested_events = 3779 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); 3780 bool mtf_pending = vmx->nested.mtf_pending; 3781 struct kvm_lapic *apic = vcpu->arch.apic; 3782 3783 /* 3784 * Clear the MTF state. If a higher priority VM-exit is delivered first, 3785 * this state is discarded. 3786 */ 3787 if (!block_nested_events) 3788 vmx->nested.mtf_pending = false; 3789 3790 if (lapic_in_kernel(vcpu) && 3791 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 3792 if (block_nested_events) 3793 return -EBUSY; 3794 nested_vmx_update_pending_dbg(vcpu); 3795 clear_bit(KVM_APIC_INIT, &apic->pending_events); 3796 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED) 3797 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); 3798 return 0; 3799 } 3800 3801 if (lapic_in_kernel(vcpu) && 3802 test_bit(KVM_APIC_SIPI, &apic->pending_events)) { 3803 if (block_nested_events) 3804 return -EBUSY; 3805 3806 clear_bit(KVM_APIC_SIPI, &apic->pending_events); 3807 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 3808 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, 3809 apic->sipi_vector & 0xFFUL); 3810 return 0; 3811 } 3812 3813 /* 3814 * Process any exceptions that are not debug traps before MTF. 3815 */ 3816 if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) { 3817 if (block_nested_events) 3818 return -EBUSY; 3819 if (!nested_vmx_check_exception(vcpu, &exit_qual)) 3820 goto no_vmexit; 3821 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3822 return 0; 3823 } 3824 3825 if (mtf_pending) { 3826 if (block_nested_events) 3827 return -EBUSY; 3828 nested_vmx_update_pending_dbg(vcpu); 3829 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); 3830 return 0; 3831 } 3832 3833 if (vcpu->arch.exception.pending) { 3834 if (block_nested_events) 3835 return -EBUSY; 3836 if (!nested_vmx_check_exception(vcpu, &exit_qual)) 3837 goto no_vmexit; 3838 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3839 return 0; 3840 } 3841 3842 if (nested_vmx_preemption_timer_pending(vcpu)) { 3843 if (block_nested_events) 3844 return -EBUSY; 3845 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 3846 return 0; 3847 } 3848 3849 if (vcpu->arch.smi_pending && !is_smm(vcpu)) { 3850 if (block_nested_events) 3851 return -EBUSY; 3852 goto no_vmexit; 3853 } 3854 3855 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) { 3856 if (block_nested_events) 3857 return -EBUSY; 3858 if (!nested_exit_on_nmi(vcpu)) 3859 goto no_vmexit; 3860 3861 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 3862 NMI_VECTOR | INTR_TYPE_NMI_INTR | 3863 INTR_INFO_VALID_MASK, 0); 3864 /* 3865 * The NMI-triggered VM exit counts as injection: 3866 * clear this one and block further NMIs. 3867 */ 3868 vcpu->arch.nmi_pending = 0; 3869 vmx_set_nmi_mask(vcpu, true); 3870 return 0; 3871 } 3872 3873 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) { 3874 if (block_nested_events) 3875 return -EBUSY; 3876 if (!nested_exit_on_intr(vcpu)) 3877 goto no_vmexit; 3878 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 3879 return 0; 3880 } 3881 3882 no_vmexit: 3883 vmx_complete_nested_posted_interrupt(vcpu); 3884 return 0; 3885 } 3886 3887 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 3888 { 3889 ktime_t remaining = 3890 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 3891 u64 value; 3892 3893 if (ktime_to_ns(remaining) <= 0) 3894 return 0; 3895 3896 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 3897 do_div(value, 1000000); 3898 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 3899 } 3900 3901 static bool is_vmcs12_ext_field(unsigned long field) 3902 { 3903 switch (field) { 3904 case GUEST_ES_SELECTOR: 3905 case GUEST_CS_SELECTOR: 3906 case GUEST_SS_SELECTOR: 3907 case GUEST_DS_SELECTOR: 3908 case GUEST_FS_SELECTOR: 3909 case GUEST_GS_SELECTOR: 3910 case GUEST_LDTR_SELECTOR: 3911 case GUEST_TR_SELECTOR: 3912 case GUEST_ES_LIMIT: 3913 case GUEST_CS_LIMIT: 3914 case GUEST_SS_LIMIT: 3915 case GUEST_DS_LIMIT: 3916 case GUEST_FS_LIMIT: 3917 case GUEST_GS_LIMIT: 3918 case GUEST_LDTR_LIMIT: 3919 case GUEST_TR_LIMIT: 3920 case GUEST_GDTR_LIMIT: 3921 case GUEST_IDTR_LIMIT: 3922 case GUEST_ES_AR_BYTES: 3923 case GUEST_DS_AR_BYTES: 3924 case GUEST_FS_AR_BYTES: 3925 case GUEST_GS_AR_BYTES: 3926 case GUEST_LDTR_AR_BYTES: 3927 case GUEST_TR_AR_BYTES: 3928 case GUEST_ES_BASE: 3929 case GUEST_CS_BASE: 3930 case GUEST_SS_BASE: 3931 case GUEST_DS_BASE: 3932 case GUEST_FS_BASE: 3933 case GUEST_GS_BASE: 3934 case GUEST_LDTR_BASE: 3935 case GUEST_TR_BASE: 3936 case GUEST_GDTR_BASE: 3937 case GUEST_IDTR_BASE: 3938 case GUEST_PENDING_DBG_EXCEPTIONS: 3939 case GUEST_BNDCFGS: 3940 return true; 3941 default: 3942 break; 3943 } 3944 3945 return false; 3946 } 3947 3948 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 3949 struct vmcs12 *vmcs12) 3950 { 3951 struct vcpu_vmx *vmx = to_vmx(vcpu); 3952 3953 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 3954 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 3955 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 3956 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 3957 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 3958 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 3959 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 3960 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 3961 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 3962 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 3963 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 3964 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 3965 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 3966 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 3967 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 3968 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 3969 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 3970 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 3971 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 3972 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 3973 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 3974 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 3975 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 3976 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 3977 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 3978 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 3979 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 3980 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 3981 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 3982 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 3983 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 3984 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 3985 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 3986 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 3987 vmcs12->guest_pending_dbg_exceptions = 3988 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 3989 if (kvm_mpx_supported()) 3990 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3991 3992 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; 3993 } 3994 3995 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 3996 struct vmcs12 *vmcs12) 3997 { 3998 struct vcpu_vmx *vmx = to_vmx(vcpu); 3999 int cpu; 4000 4001 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) 4002 return; 4003 4004 4005 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); 4006 4007 cpu = get_cpu(); 4008 vmx->loaded_vmcs = &vmx->nested.vmcs02; 4009 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); 4010 4011 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4012 4013 vmx->loaded_vmcs = &vmx->vmcs01; 4014 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); 4015 put_cpu(); 4016 } 4017 4018 /* 4019 * Update the guest state fields of vmcs12 to reflect changes that 4020 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 4021 * VM-entry controls is also updated, since this is really a guest 4022 * state bit.) 4023 */ 4024 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 4025 { 4026 struct vcpu_vmx *vmx = to_vmx(vcpu); 4027 4028 if (vmx->nested.hv_evmcs) 4029 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4030 4031 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs; 4032 4033 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 4034 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 4035 4036 vmcs12->guest_rsp = kvm_rsp_read(vcpu); 4037 vmcs12->guest_rip = kvm_rip_read(vcpu); 4038 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 4039 4040 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 4041 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 4042 4043 vmcs12->guest_interruptibility_info = 4044 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 4045 4046 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 4047 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 4048 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 4049 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI; 4050 else 4051 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 4052 4053 if (nested_cpu_has_preemption_timer(vmcs12) && 4054 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER && 4055 !vmx->nested.nested_run_pending) 4056 vmcs12->vmx_preemption_timer_value = 4057 vmx_get_preemption_timer_value(vcpu); 4058 4059 /* 4060 * In some cases (usually, nested EPT), L2 is allowed to change its 4061 * own CR3 without exiting. If it has changed it, we must keep it. 4062 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 4063 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 4064 * 4065 * Additionally, restore L2's PDPTR to vmcs12. 4066 */ 4067 if (enable_ept) { 4068 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 4069 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 4070 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 4071 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 4072 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 4073 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 4074 } 4075 } 4076 4077 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 4078 4079 if (nested_cpu_has_vid(vmcs12)) 4080 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 4081 4082 vmcs12->vm_entry_controls = 4083 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 4084 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 4085 4086 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) 4087 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); 4088 4089 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 4090 vmcs12->guest_ia32_efer = vcpu->arch.efer; 4091 } 4092 4093 /* 4094 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 4095 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 4096 * and this function updates it to reflect the changes to the guest state while 4097 * L2 was running (and perhaps made some exits which were handled directly by L0 4098 * without going back to L1), and to reflect the exit reason. 4099 * Note that we do not have to copy here all VMCS fields, just those that 4100 * could have changed by the L2 guest or the exit - i.e., the guest-state and 4101 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 4102 * which already writes to vmcs12 directly. 4103 */ 4104 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 4105 u32 vm_exit_reason, u32 exit_intr_info, 4106 unsigned long exit_qualification) 4107 { 4108 /* update exit information fields: */ 4109 vmcs12->vm_exit_reason = vm_exit_reason; 4110 vmcs12->exit_qualification = exit_qualification; 4111 vmcs12->vm_exit_intr_info = exit_intr_info; 4112 4113 vmcs12->idt_vectoring_info_field = 0; 4114 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4115 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4116 4117 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 4118 vmcs12->launch_state = 1; 4119 4120 /* vm_entry_intr_info_field is cleared on exit. Emulate this 4121 * instead of reading the real value. */ 4122 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 4123 4124 /* 4125 * Transfer the event that L0 or L1 may wanted to inject into 4126 * L2 to IDT_VECTORING_INFO_FIELD. 4127 */ 4128 vmcs12_save_pending_event(vcpu, vmcs12); 4129 4130 /* 4131 * According to spec, there's no need to store the guest's 4132 * MSRs if the exit is due to a VM-entry failure that occurs 4133 * during or after loading the guest state. Since this exit 4134 * does not fall in that category, we need to save the MSRs. 4135 */ 4136 if (nested_vmx_store_msr(vcpu, 4137 vmcs12->vm_exit_msr_store_addr, 4138 vmcs12->vm_exit_msr_store_count)) 4139 nested_vmx_abort(vcpu, 4140 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 4141 } 4142 4143 /* 4144 * Drop what we picked up for L2 via vmx_complete_interrupts. It is 4145 * preserved above and would only end up incorrectly in L1. 4146 */ 4147 vcpu->arch.nmi_injected = false; 4148 kvm_clear_exception_queue(vcpu); 4149 kvm_clear_interrupt_queue(vcpu); 4150 } 4151 4152 /* 4153 * A part of what we need to when the nested L2 guest exits and we want to 4154 * run its L1 parent, is to reset L1's guest state to the host state specified 4155 * in vmcs12. 4156 * This function is to be called not only on normal nested exit, but also on 4157 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 4158 * Failures During or After Loading Guest State"). 4159 * This function should be called when the active VMCS is L1's (vmcs01). 4160 */ 4161 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 4162 struct vmcs12 *vmcs12) 4163 { 4164 enum vm_entry_failure_code ignored; 4165 struct kvm_segment seg; 4166 4167 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 4168 vcpu->arch.efer = vmcs12->host_ia32_efer; 4169 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4170 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 4171 else 4172 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 4173 vmx_set_efer(vcpu, vcpu->arch.efer); 4174 4175 kvm_rsp_write(vcpu, vmcs12->host_rsp); 4176 kvm_rip_write(vcpu, vmcs12->host_rip); 4177 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 4178 vmx_set_interrupt_shadow(vcpu, 0); 4179 4180 /* 4181 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 4182 * actually changed, because vmx_set_cr0 refers to efer set above. 4183 * 4184 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 4185 * (KVM doesn't change it); 4186 */ 4187 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4188 vmx_set_cr0(vcpu, vmcs12->host_cr0); 4189 4190 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 4191 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4192 vmx_set_cr4(vcpu, vmcs12->host_cr4); 4193 4194 nested_ept_uninit_mmu_context(vcpu); 4195 4196 /* 4197 * Only PDPTE load can fail as the value of cr3 was checked on entry and 4198 * couldn't have changed. 4199 */ 4200 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored)) 4201 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 4202 4203 if (!enable_ept) 4204 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; 4205 4206 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false); 4207 4208 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 4209 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 4210 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 4211 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 4212 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 4213 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 4214 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 4215 4216 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 4217 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 4218 vmcs_write64(GUEST_BNDCFGS, 0); 4219 4220 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 4221 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 4222 vcpu->arch.pat = vmcs12->host_ia32_pat; 4223 } 4224 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 4225 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 4226 vmcs12->host_ia32_perf_global_ctrl)); 4227 4228 /* Set L1 segment info according to Intel SDM 4229 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 4230 seg = (struct kvm_segment) { 4231 .base = 0, 4232 .limit = 0xFFFFFFFF, 4233 .selector = vmcs12->host_cs_selector, 4234 .type = 11, 4235 .present = 1, 4236 .s = 1, 4237 .g = 1 4238 }; 4239 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4240 seg.l = 1; 4241 else 4242 seg.db = 1; 4243 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 4244 seg = (struct kvm_segment) { 4245 .base = 0, 4246 .limit = 0xFFFFFFFF, 4247 .type = 3, 4248 .present = 1, 4249 .s = 1, 4250 .db = 1, 4251 .g = 1 4252 }; 4253 seg.selector = vmcs12->host_ds_selector; 4254 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 4255 seg.selector = vmcs12->host_es_selector; 4256 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 4257 seg.selector = vmcs12->host_ss_selector; 4258 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 4259 seg.selector = vmcs12->host_fs_selector; 4260 seg.base = vmcs12->host_fs_base; 4261 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 4262 seg.selector = vmcs12->host_gs_selector; 4263 seg.base = vmcs12->host_gs_base; 4264 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 4265 seg = (struct kvm_segment) { 4266 .base = vmcs12->host_tr_base, 4267 .limit = 0x67, 4268 .selector = vmcs12->host_tr_selector, 4269 .type = 11, 4270 .present = 1 4271 }; 4272 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 4273 4274 kvm_set_dr(vcpu, 7, 0x400); 4275 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4276 4277 if (cpu_has_vmx_msr_bitmap()) 4278 vmx_update_msr_bitmap(vcpu); 4279 4280 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 4281 vmcs12->vm_exit_msr_load_count)) 4282 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4283 } 4284 4285 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 4286 { 4287 struct vmx_uret_msr *efer_msr; 4288 unsigned int i; 4289 4290 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 4291 return vmcs_read64(GUEST_IA32_EFER); 4292 4293 if (cpu_has_load_ia32_efer()) 4294 return host_efer; 4295 4296 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 4297 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 4298 return vmx->msr_autoload.guest.val[i].value; 4299 } 4300 4301 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); 4302 if (efer_msr) 4303 return efer_msr->data; 4304 4305 return host_efer; 4306 } 4307 4308 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 4309 { 4310 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4311 struct vcpu_vmx *vmx = to_vmx(vcpu); 4312 struct vmx_msr_entry g, h; 4313 gpa_t gpa; 4314 u32 i, j; 4315 4316 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 4317 4318 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 4319 /* 4320 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 4321 * as vmcs01.GUEST_DR7 contains a userspace defined value 4322 * and vcpu->arch.dr7 is not squirreled away before the 4323 * nested VMENTER (not worth adding a variable in nested_vmx). 4324 */ 4325 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 4326 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 4327 else 4328 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 4329 } 4330 4331 /* 4332 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 4333 * handle a variety of side effects to KVM's software model. 4334 */ 4335 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 4336 4337 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4338 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 4339 4340 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4341 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 4342 4343 nested_ept_uninit_mmu_context(vcpu); 4344 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 4345 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 4346 4347 /* 4348 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 4349 * from vmcs01 (if necessary). The PDPTRs are not loaded on 4350 * VMFail, like everything else we just need to ensure our 4351 * software model is up-to-date. 4352 */ 4353 if (enable_ept && is_pae_paging(vcpu)) 4354 ept_save_pdptrs(vcpu); 4355 4356 kvm_mmu_reset_context(vcpu); 4357 4358 if (cpu_has_vmx_msr_bitmap()) 4359 vmx_update_msr_bitmap(vcpu); 4360 4361 /* 4362 * This nasty bit of open coding is a compromise between blindly 4363 * loading L1's MSRs using the exit load lists (incorrect emulation 4364 * of VMFail), leaving the nested VM's MSRs in the software model 4365 * (incorrect behavior) and snapshotting the modified MSRs (too 4366 * expensive since the lists are unbound by hardware). For each 4367 * MSR that was (prematurely) loaded from the nested VMEntry load 4368 * list, reload it from the exit load list if it exists and differs 4369 * from the guest value. The intent is to stuff host state as 4370 * silently as possible, not to fully process the exit load list. 4371 */ 4372 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 4373 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 4374 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 4375 pr_debug_ratelimited( 4376 "%s read MSR index failed (%u, 0x%08llx)\n", 4377 __func__, i, gpa); 4378 goto vmabort; 4379 } 4380 4381 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 4382 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 4383 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 4384 pr_debug_ratelimited( 4385 "%s read MSR failed (%u, 0x%08llx)\n", 4386 __func__, j, gpa); 4387 goto vmabort; 4388 } 4389 if (h.index != g.index) 4390 continue; 4391 if (h.value == g.value) 4392 break; 4393 4394 if (nested_vmx_load_msr_check(vcpu, &h)) { 4395 pr_debug_ratelimited( 4396 "%s check failed (%u, 0x%x, 0x%x)\n", 4397 __func__, j, h.index, h.reserved); 4398 goto vmabort; 4399 } 4400 4401 if (kvm_set_msr(vcpu, h.index, h.value)) { 4402 pr_debug_ratelimited( 4403 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 4404 __func__, j, h.index, h.value); 4405 goto vmabort; 4406 } 4407 } 4408 } 4409 4410 return; 4411 4412 vmabort: 4413 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4414 } 4415 4416 /* 4417 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 4418 * and modify vmcs12 to make it see what it would expect to see there if 4419 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 4420 */ 4421 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, 4422 u32 exit_intr_info, unsigned long exit_qualification) 4423 { 4424 struct vcpu_vmx *vmx = to_vmx(vcpu); 4425 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4426 4427 /* trying to cancel vmlaunch/vmresume is a bug */ 4428 WARN_ON_ONCE(vmx->nested.nested_run_pending); 4429 4430 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 4431 4432 /* Service the TLB flush request for L2 before switching to L1. */ 4433 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 4434 kvm_vcpu_flush_tlb_current(vcpu); 4435 4436 /* 4437 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between 4438 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are 4439 * up-to-date before switching to L1. 4440 */ 4441 if (enable_ept && is_pae_paging(vcpu)) 4442 vmx_ept_load_pdptrs(vcpu); 4443 4444 leave_guest_mode(vcpu); 4445 4446 if (nested_cpu_has_preemption_timer(vmcs12)) 4447 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 4448 4449 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 4450 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 4451 4452 if (likely(!vmx->fail)) { 4453 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 4454 4455 if (vm_exit_reason != -1) 4456 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason, 4457 exit_intr_info, exit_qualification); 4458 4459 /* 4460 * Must happen outside of sync_vmcs02_to_vmcs12() as it will 4461 * also be used to capture vmcs12 cache as part of 4462 * capturing nVMX state for snapshot (migration). 4463 * 4464 * Otherwise, this flush will dirty guest memory at a 4465 * point it is already assumed by user-space to be 4466 * immutable. 4467 */ 4468 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 4469 } else { 4470 /* 4471 * The only expected VM-instruction error is "VM entry with 4472 * invalid control field(s)." Anything else indicates a 4473 * problem with L0. And we should never get here with a 4474 * VMFail of any type if early consistency checks are enabled. 4475 */ 4476 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 4477 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4478 WARN_ON_ONCE(nested_early_check); 4479 } 4480 4481 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 4482 4483 /* Update any VMCS fields that might have changed while L2 ran */ 4484 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 4485 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 4486 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 4487 if (vmx->nested.l1_tpr_threshold != -1) 4488 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); 4489 4490 if (kvm_has_tsc_control) 4491 decache_tsc_multiplier(vmx); 4492 4493 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 4494 vmx->nested.change_vmcs01_virtual_apic_mode = false; 4495 vmx_set_virtual_apic_mode(vcpu); 4496 } 4497 4498 /* Unpin physical memory we referred to in vmcs02 */ 4499 if (vmx->nested.apic_access_page) { 4500 kvm_release_page_clean(vmx->nested.apic_access_page); 4501 vmx->nested.apic_access_page = NULL; 4502 } 4503 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 4504 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 4505 vmx->nested.pi_desc = NULL; 4506 4507 if (vmx->nested.reload_vmcs01_apic_access_page) { 4508 vmx->nested.reload_vmcs01_apic_access_page = false; 4509 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4510 } 4511 4512 if ((vm_exit_reason != -1) && 4513 (enable_shadow_vmcs || vmx->nested.hv_evmcs)) 4514 vmx->nested.need_vmcs12_to_shadow_sync = true; 4515 4516 /* in case we halted in L2 */ 4517 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4518 4519 if (likely(!vmx->fail)) { 4520 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && 4521 nested_exit_intr_ack_set(vcpu)) { 4522 int irq = kvm_cpu_get_interrupt(vcpu); 4523 WARN_ON(irq < 0); 4524 vmcs12->vm_exit_intr_info = irq | 4525 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; 4526 } 4527 4528 if (vm_exit_reason != -1) 4529 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 4530 vmcs12->exit_qualification, 4531 vmcs12->idt_vectoring_info_field, 4532 vmcs12->vm_exit_intr_info, 4533 vmcs12->vm_exit_intr_error_code, 4534 KVM_ISA_VMX); 4535 4536 load_vmcs12_host_state(vcpu, vmcs12); 4537 4538 return; 4539 } 4540 4541 /* 4542 * After an early L2 VM-entry failure, we're now back 4543 * in L1 which thinks it just finished a VMLAUNCH or 4544 * VMRESUME instruction, so we need to set the failure 4545 * flag and the VM-instruction error field of the VMCS 4546 * accordingly, and skip the emulated instruction. 4547 */ 4548 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4549 4550 /* 4551 * Restore L1's host state to KVM's software model. We're here 4552 * because a consistency check was caught by hardware, which 4553 * means some amount of guest state has been propagated to KVM's 4554 * model and needs to be unwound to the host's state. 4555 */ 4556 nested_vmx_restore_host_state(vcpu); 4557 4558 vmx->fail = 0; 4559 } 4560 4561 /* 4562 * Decode the memory-address operand of a vmx instruction, as recorded on an 4563 * exit caused by such an instruction (run by a guest hypervisor). 4564 * On success, returns 0. When the operand is invalid, returns 1 and throws 4565 * #UD, #GP, or #SS. 4566 */ 4567 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 4568 u32 vmx_instruction_info, bool wr, int len, gva_t *ret) 4569 { 4570 gva_t off; 4571 bool exn; 4572 struct kvm_segment s; 4573 4574 /* 4575 * According to Vol. 3B, "Information for VM Exits Due to Instruction 4576 * Execution", on an exit, vmx_instruction_info holds most of the 4577 * addressing components of the operand. Only the displacement part 4578 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 4579 * For how an actual address is calculated from all these components, 4580 * refer to Vol. 1, "Operand Addressing". 4581 */ 4582 int scaling = vmx_instruction_info & 3; 4583 int addr_size = (vmx_instruction_info >> 7) & 7; 4584 bool is_reg = vmx_instruction_info & (1u << 10); 4585 int seg_reg = (vmx_instruction_info >> 15) & 7; 4586 int index_reg = (vmx_instruction_info >> 18) & 0xf; 4587 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 4588 int base_reg = (vmx_instruction_info >> 23) & 0xf; 4589 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 4590 4591 if (is_reg) { 4592 kvm_queue_exception(vcpu, UD_VECTOR); 4593 return 1; 4594 } 4595 4596 /* Addr = segment_base + offset */ 4597 /* offset = base + [index * scale] + displacement */ 4598 off = exit_qualification; /* holds the displacement */ 4599 if (addr_size == 1) 4600 off = (gva_t)sign_extend64(off, 31); 4601 else if (addr_size == 0) 4602 off = (gva_t)sign_extend64(off, 15); 4603 if (base_is_valid) 4604 off += kvm_register_read(vcpu, base_reg); 4605 if (index_is_valid) 4606 off += kvm_register_read(vcpu, index_reg) << scaling; 4607 vmx_get_segment(vcpu, &s, seg_reg); 4608 4609 /* 4610 * The effective address, i.e. @off, of a memory operand is truncated 4611 * based on the address size of the instruction. Note that this is 4612 * the *effective address*, i.e. the address prior to accounting for 4613 * the segment's base. 4614 */ 4615 if (addr_size == 1) /* 32 bit */ 4616 off &= 0xffffffff; 4617 else if (addr_size == 0) /* 16 bit */ 4618 off &= 0xffff; 4619 4620 /* Checks for #GP/#SS exceptions. */ 4621 exn = false; 4622 if (is_long_mode(vcpu)) { 4623 /* 4624 * The virtual/linear address is never truncated in 64-bit 4625 * mode, e.g. a 32-bit address size can yield a 64-bit virtual 4626 * address when using FS/GS with a non-zero base. 4627 */ 4628 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS) 4629 *ret = s.base + off; 4630 else 4631 *ret = off; 4632 4633 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 4634 * non-canonical form. This is the only check on the memory 4635 * destination for long mode! 4636 */ 4637 exn = is_noncanonical_address(*ret, vcpu); 4638 } else { 4639 /* 4640 * When not in long mode, the virtual/linear address is 4641 * unconditionally truncated to 32 bits regardless of the 4642 * address size. 4643 */ 4644 *ret = (s.base + off) & 0xffffffff; 4645 4646 /* Protected mode: apply checks for segment validity in the 4647 * following order: 4648 * - segment type check (#GP(0) may be thrown) 4649 * - usability check (#GP(0)/#SS(0)) 4650 * - limit check (#GP(0)/#SS(0)) 4651 */ 4652 if (wr) 4653 /* #GP(0) if the destination operand is located in a 4654 * read-only data segment or any code segment. 4655 */ 4656 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 4657 else 4658 /* #GP(0) if the source operand is located in an 4659 * execute-only code segment 4660 */ 4661 exn = ((s.type & 0xa) == 8); 4662 if (exn) { 4663 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 4664 return 1; 4665 } 4666 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 4667 */ 4668 exn = (s.unusable != 0); 4669 4670 /* 4671 * Protected mode: #GP(0)/#SS(0) if the memory operand is 4672 * outside the segment limit. All CPUs that support VMX ignore 4673 * limit checks for flat segments, i.e. segments with base==0, 4674 * limit==0xffffffff and of type expand-up data or code. 4675 */ 4676 if (!(s.base == 0 && s.limit == 0xffffffff && 4677 ((s.type & 8) || !(s.type & 4)))) 4678 exn = exn || ((u64)off + len - 1 > s.limit); 4679 } 4680 if (exn) { 4681 kvm_queue_exception_e(vcpu, 4682 seg_reg == VCPU_SREG_SS ? 4683 SS_VECTOR : GP_VECTOR, 4684 0); 4685 return 1; 4686 } 4687 4688 return 0; 4689 } 4690 4691 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 4692 { 4693 struct vcpu_vmx *vmx; 4694 4695 if (!nested_vmx_allowed(vcpu)) 4696 return; 4697 4698 vmx = to_vmx(vcpu); 4699 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) { 4700 vmx->nested.msrs.entry_ctls_high |= 4701 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4702 vmx->nested.msrs.exit_ctls_high |= 4703 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 4704 } else { 4705 vmx->nested.msrs.entry_ctls_high &= 4706 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4707 vmx->nested.msrs.exit_ctls_high &= 4708 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 4709 } 4710 } 4711 4712 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, 4713 int *ret) 4714 { 4715 gva_t gva; 4716 struct x86_exception e; 4717 int r; 4718 4719 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 4720 vmcs_read32(VMX_INSTRUCTION_INFO), false, 4721 sizeof(*vmpointer), &gva)) { 4722 *ret = 1; 4723 return -EINVAL; 4724 } 4725 4726 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); 4727 if (r != X86EMUL_CONTINUE) { 4728 *ret = kvm_handle_memory_failure(vcpu, r, &e); 4729 return -EINVAL; 4730 } 4731 4732 return 0; 4733 } 4734 4735 /* 4736 * Allocate a shadow VMCS and associate it with the currently loaded 4737 * VMCS, unless such a shadow VMCS already exists. The newly allocated 4738 * VMCS is also VMCLEARed, so that it is ready for use. 4739 */ 4740 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 4741 { 4742 struct vcpu_vmx *vmx = to_vmx(vcpu); 4743 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 4744 4745 /* 4746 * We should allocate a shadow vmcs for vmcs01 only when L1 4747 * executes VMXON and free it when L1 executes VMXOFF. 4748 * As it is invalid to execute VMXON twice, we shouldn't reach 4749 * here when vmcs01 already have an allocated shadow vmcs. 4750 */ 4751 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); 4752 4753 if (!loaded_vmcs->shadow_vmcs) { 4754 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 4755 if (loaded_vmcs->shadow_vmcs) 4756 vmcs_clear(loaded_vmcs->shadow_vmcs); 4757 } 4758 return loaded_vmcs->shadow_vmcs; 4759 } 4760 4761 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 4762 { 4763 struct vcpu_vmx *vmx = to_vmx(vcpu); 4764 int r; 4765 4766 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 4767 if (r < 0) 4768 goto out_vmcs02; 4769 4770 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4771 if (!vmx->nested.cached_vmcs12) 4772 goto out_cached_vmcs12; 4773 4774 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4775 if (!vmx->nested.cached_shadow_vmcs12) 4776 goto out_cached_shadow_vmcs12; 4777 4778 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 4779 goto out_shadow_vmcs; 4780 4781 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 4782 HRTIMER_MODE_ABS_PINNED); 4783 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 4784 4785 vmx->nested.vpid02 = allocate_vpid(); 4786 4787 vmx->nested.vmcs02_initialized = false; 4788 vmx->nested.vmxon = true; 4789 4790 if (vmx_pt_mode_is_host_guest()) { 4791 vmx->pt_desc.guest.ctl = 0; 4792 pt_update_intercept_for_msr(vcpu); 4793 } 4794 4795 return 0; 4796 4797 out_shadow_vmcs: 4798 kfree(vmx->nested.cached_shadow_vmcs12); 4799 4800 out_cached_shadow_vmcs12: 4801 kfree(vmx->nested.cached_vmcs12); 4802 4803 out_cached_vmcs12: 4804 free_loaded_vmcs(&vmx->nested.vmcs02); 4805 4806 out_vmcs02: 4807 return -ENOMEM; 4808 } 4809 4810 /* 4811 * Emulate the VMXON instruction. 4812 * Currently, we just remember that VMX is active, and do not save or even 4813 * inspect the argument to VMXON (the so-called "VMXON pointer") because we 4814 * do not currently need to store anything in that guest-allocated memory 4815 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their 4816 * argument is different from the VMXON pointer (which the spec says they do). 4817 */ 4818 static int handle_vmon(struct kvm_vcpu *vcpu) 4819 { 4820 int ret; 4821 gpa_t vmptr; 4822 uint32_t revision; 4823 struct vcpu_vmx *vmx = to_vmx(vcpu); 4824 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED 4825 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 4826 4827 /* 4828 * The Intel VMX Instruction Reference lists a bunch of bits that are 4829 * prerequisite to running VMXON, most notably cr4.VMXE must be set to 4830 * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this). 4831 * Otherwise, we should fail with #UD. But most faulting conditions 4832 * have already been checked by hardware, prior to the VM-exit for 4833 * VMXON. We do test guest cr4.VMXE because processor CR4 always has 4834 * that bit set to 1 in non-root mode. 4835 */ 4836 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { 4837 kvm_queue_exception(vcpu, UD_VECTOR); 4838 return 1; 4839 } 4840 4841 /* CPL=0 must be checked manually. */ 4842 if (vmx_get_cpl(vcpu)) { 4843 kvm_inject_gp(vcpu, 0); 4844 return 1; 4845 } 4846 4847 if (vmx->nested.vmxon) 4848 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 4849 4850 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 4851 != VMXON_NEEDED_FEATURES) { 4852 kvm_inject_gp(vcpu, 0); 4853 return 1; 4854 } 4855 4856 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret)) 4857 return ret; 4858 4859 /* 4860 * SDM 3: 24.11.5 4861 * The first 4 bytes of VMXON region contain the supported 4862 * VMCS revision identifier 4863 * 4864 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 4865 * which replaces physical address width with 32 4866 */ 4867 if (!page_address_valid(vcpu, vmptr)) 4868 return nested_vmx_failInvalid(vcpu); 4869 4870 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || 4871 revision != VMCS12_REVISION) 4872 return nested_vmx_failInvalid(vcpu); 4873 4874 vmx->nested.vmxon_ptr = vmptr; 4875 ret = enter_vmx_operation(vcpu); 4876 if (ret) 4877 return ret; 4878 4879 return nested_vmx_succeed(vcpu); 4880 } 4881 4882 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 4883 { 4884 struct vcpu_vmx *vmx = to_vmx(vcpu); 4885 4886 if (vmx->nested.current_vmptr == -1ull) 4887 return; 4888 4889 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 4890 4891 if (enable_shadow_vmcs) { 4892 /* copy to memory all shadowed fields in case 4893 they were modified */ 4894 copy_shadow_to_vmcs12(vmx); 4895 vmx_disable_shadow_vmcs(vmx); 4896 } 4897 vmx->nested.posted_intr_nv = -1; 4898 4899 /* Flush VMCS12 to guest memory */ 4900 kvm_vcpu_write_guest_page(vcpu, 4901 vmx->nested.current_vmptr >> PAGE_SHIFT, 4902 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 4903 4904 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 4905 4906 vmx->nested.current_vmptr = -1ull; 4907 } 4908 4909 /* Emulate the VMXOFF instruction */ 4910 static int handle_vmoff(struct kvm_vcpu *vcpu) 4911 { 4912 if (!nested_vmx_check_permission(vcpu)) 4913 return 1; 4914 4915 free_nested(vcpu); 4916 4917 /* Process a latched INIT during time CPU was in VMX operation */ 4918 kvm_make_request(KVM_REQ_EVENT, vcpu); 4919 4920 return nested_vmx_succeed(vcpu); 4921 } 4922 4923 /* Emulate the VMCLEAR instruction */ 4924 static int handle_vmclear(struct kvm_vcpu *vcpu) 4925 { 4926 struct vcpu_vmx *vmx = to_vmx(vcpu); 4927 u32 zero = 0; 4928 gpa_t vmptr; 4929 u64 evmcs_gpa; 4930 int r; 4931 4932 if (!nested_vmx_check_permission(vcpu)) 4933 return 1; 4934 4935 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 4936 return r; 4937 4938 if (!page_address_valid(vcpu, vmptr)) 4939 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); 4940 4941 if (vmptr == vmx->nested.vmxon_ptr) 4942 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); 4943 4944 /* 4945 * When Enlightened VMEntry is enabled on the calling CPU we treat 4946 * memory area pointer by vmptr as Enlightened VMCS (as there's no good 4947 * way to distinguish it from VMCS12) and we must not corrupt it by 4948 * writing to the non-existent 'launch_state' field. The area doesn't 4949 * have to be the currently active EVMCS on the calling CPU and there's 4950 * nothing KVM has to do to transition it from 'active' to 'non-active' 4951 * state. It is possible that the area will stay mapped as 4952 * vmx->nested.hv_evmcs but this shouldn't be a problem. 4953 */ 4954 if (likely(!vmx->nested.enlightened_vmcs_enabled || 4955 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) { 4956 if (vmptr == vmx->nested.current_vmptr) 4957 nested_release_vmcs12(vcpu); 4958 4959 kvm_vcpu_write_guest(vcpu, 4960 vmptr + offsetof(struct vmcs12, 4961 launch_state), 4962 &zero, sizeof(zero)); 4963 } 4964 4965 return nested_vmx_succeed(vcpu); 4966 } 4967 4968 /* Emulate the VMLAUNCH instruction */ 4969 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 4970 { 4971 return nested_vmx_run(vcpu, true); 4972 } 4973 4974 /* Emulate the VMRESUME instruction */ 4975 static int handle_vmresume(struct kvm_vcpu *vcpu) 4976 { 4977 4978 return nested_vmx_run(vcpu, false); 4979 } 4980 4981 static int handle_vmread(struct kvm_vcpu *vcpu) 4982 { 4983 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 4984 : get_vmcs12(vcpu); 4985 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 4986 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4987 struct vcpu_vmx *vmx = to_vmx(vcpu); 4988 struct x86_exception e; 4989 unsigned long field; 4990 u64 value; 4991 gva_t gva = 0; 4992 short offset; 4993 int len, r; 4994 4995 if (!nested_vmx_check_permission(vcpu)) 4996 return 1; 4997 4998 /* 4999 * In VMX non-root operation, when the VMCS-link pointer is -1ull, 5000 * any VMREAD sets the ALU flags for VMfailInvalid. 5001 */ 5002 if (vmx->nested.current_vmptr == -1ull || 5003 (is_guest_mode(vcpu) && 5004 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) 5005 return nested_vmx_failInvalid(vcpu); 5006 5007 /* Decode instruction info and find the field to read */ 5008 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); 5009 5010 offset = vmcs_field_to_offset(field); 5011 if (offset < 0) 5012 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5013 5014 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) 5015 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5016 5017 /* Read the field, zero-extended to a u64 value */ 5018 value = vmcs12_read_any(vmcs12, field, offset); 5019 5020 /* 5021 * Now copy part of this value to register or memory, as requested. 5022 * Note that the number of bits actually copied is 32 or 64 depending 5023 * on the guest's mode (32 or 64 bit), not on the given field's length. 5024 */ 5025 if (instr_info & BIT(10)) { 5026 kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value); 5027 } else { 5028 len = is_64_bit_mode(vcpu) ? 8 : 4; 5029 if (get_vmx_mem_address(vcpu, exit_qualification, 5030 instr_info, true, len, &gva)) 5031 return 1; 5032 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 5033 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); 5034 if (r != X86EMUL_CONTINUE) 5035 return kvm_handle_memory_failure(vcpu, r, &e); 5036 } 5037 5038 return nested_vmx_succeed(vcpu); 5039 } 5040 5041 static bool is_shadow_field_rw(unsigned long field) 5042 { 5043 switch (field) { 5044 #define SHADOW_FIELD_RW(x, y) case x: 5045 #include "vmcs_shadow_fields.h" 5046 return true; 5047 default: 5048 break; 5049 } 5050 return false; 5051 } 5052 5053 static bool is_shadow_field_ro(unsigned long field) 5054 { 5055 switch (field) { 5056 #define SHADOW_FIELD_RO(x, y) case x: 5057 #include "vmcs_shadow_fields.h" 5058 return true; 5059 default: 5060 break; 5061 } 5062 return false; 5063 } 5064 5065 static int handle_vmwrite(struct kvm_vcpu *vcpu) 5066 { 5067 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5068 : get_vmcs12(vcpu); 5069 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5070 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5071 struct vcpu_vmx *vmx = to_vmx(vcpu); 5072 struct x86_exception e; 5073 unsigned long field; 5074 short offset; 5075 gva_t gva; 5076 int len, r; 5077 5078 /* 5079 * The value to write might be 32 or 64 bits, depending on L1's long 5080 * mode, and eventually we need to write that into a field of several 5081 * possible lengths. The code below first zero-extends the value to 64 5082 * bit (value), and then copies only the appropriate number of 5083 * bits into the vmcs12 field. 5084 */ 5085 u64 value = 0; 5086 5087 if (!nested_vmx_check_permission(vcpu)) 5088 return 1; 5089 5090 /* 5091 * In VMX non-root operation, when the VMCS-link pointer is -1ull, 5092 * any VMWRITE sets the ALU flags for VMfailInvalid. 5093 */ 5094 if (vmx->nested.current_vmptr == -1ull || 5095 (is_guest_mode(vcpu) && 5096 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) 5097 return nested_vmx_failInvalid(vcpu); 5098 5099 if (instr_info & BIT(10)) 5100 value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf)); 5101 else { 5102 len = is_64_bit_mode(vcpu) ? 8 : 4; 5103 if (get_vmx_mem_address(vcpu, exit_qualification, 5104 instr_info, false, len, &gva)) 5105 return 1; 5106 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); 5107 if (r != X86EMUL_CONTINUE) 5108 return kvm_handle_memory_failure(vcpu, r, &e); 5109 } 5110 5111 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); 5112 5113 offset = vmcs_field_to_offset(field); 5114 if (offset < 0) 5115 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5116 5117 /* 5118 * If the vCPU supports "VMWRITE to any supported field in the 5119 * VMCS," then the "read-only" fields are actually read/write. 5120 */ 5121 if (vmcs_field_readonly(field) && 5122 !nested_cpu_has_vmwrite_any_field(vcpu)) 5123 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 5124 5125 /* 5126 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties 5127 * vmcs12, else we may crush a field or consume a stale value. 5128 */ 5129 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) 5130 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5131 5132 /* 5133 * Some Intel CPUs intentionally drop the reserved bits of the AR byte 5134 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM 5135 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE 5136 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD 5137 * from L1 will return a different value than VMREAD from L2 (L1 sees 5138 * the stripped down value, L2 sees the full value as stored by KVM). 5139 */ 5140 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) 5141 value &= 0x1f0ff; 5142 5143 vmcs12_write_any(vmcs12, field, offset, value); 5144 5145 /* 5146 * Do not track vmcs12 dirty-state if in guest-mode as we actually 5147 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated 5148 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't 5149 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path. 5150 */ 5151 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) { 5152 /* 5153 * L1 can read these fields without exiting, ensure the 5154 * shadow VMCS is up-to-date. 5155 */ 5156 if (enable_shadow_vmcs && is_shadow_field_ro(field)) { 5157 preempt_disable(); 5158 vmcs_load(vmx->vmcs01.shadow_vmcs); 5159 5160 __vmcs_writel(field, value); 5161 5162 vmcs_clear(vmx->vmcs01.shadow_vmcs); 5163 vmcs_load(vmx->loaded_vmcs->vmcs); 5164 preempt_enable(); 5165 } 5166 vmx->nested.dirty_vmcs12 = true; 5167 } 5168 5169 return nested_vmx_succeed(vcpu); 5170 } 5171 5172 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 5173 { 5174 vmx->nested.current_vmptr = vmptr; 5175 if (enable_shadow_vmcs) { 5176 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 5177 vmcs_write64(VMCS_LINK_POINTER, 5178 __pa(vmx->vmcs01.shadow_vmcs)); 5179 vmx->nested.need_vmcs12_to_shadow_sync = true; 5180 } 5181 vmx->nested.dirty_vmcs12 = true; 5182 } 5183 5184 /* Emulate the VMPTRLD instruction */ 5185 static int handle_vmptrld(struct kvm_vcpu *vcpu) 5186 { 5187 struct vcpu_vmx *vmx = to_vmx(vcpu); 5188 gpa_t vmptr; 5189 int r; 5190 5191 if (!nested_vmx_check_permission(vcpu)) 5192 return 1; 5193 5194 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5195 return r; 5196 5197 if (!page_address_valid(vcpu, vmptr)) 5198 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); 5199 5200 if (vmptr == vmx->nested.vmxon_ptr) 5201 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); 5202 5203 /* Forbid normal VMPTRLD if Enlightened version was used */ 5204 if (vmx->nested.hv_evmcs) 5205 return 1; 5206 5207 if (vmx->nested.current_vmptr != vmptr) { 5208 struct kvm_host_map map; 5209 struct vmcs12 *new_vmcs12; 5210 5211 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) { 5212 /* 5213 * Reads from an unbacked page return all 1s, 5214 * which means that the 32 bits located at the 5215 * given physical address won't match the required 5216 * VMCS12_REVISION identifier. 5217 */ 5218 return nested_vmx_fail(vcpu, 5219 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5220 } 5221 5222 new_vmcs12 = map.hva; 5223 5224 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || 5225 (new_vmcs12->hdr.shadow_vmcs && 5226 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 5227 kvm_vcpu_unmap(vcpu, &map, false); 5228 return nested_vmx_fail(vcpu, 5229 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5230 } 5231 5232 nested_release_vmcs12(vcpu); 5233 5234 /* 5235 * Load VMCS12 from guest memory since it is not already 5236 * cached. 5237 */ 5238 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); 5239 kvm_vcpu_unmap(vcpu, &map, false); 5240 5241 set_current_vmptr(vmx, vmptr); 5242 } 5243 5244 return nested_vmx_succeed(vcpu); 5245 } 5246 5247 /* Emulate the VMPTRST instruction */ 5248 static int handle_vmptrst(struct kvm_vcpu *vcpu) 5249 { 5250 unsigned long exit_qual = vmx_get_exit_qual(vcpu); 5251 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5252 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 5253 struct x86_exception e; 5254 gva_t gva; 5255 int r; 5256 5257 if (!nested_vmx_check_permission(vcpu)) 5258 return 1; 5259 5260 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) 5261 return 1; 5262 5263 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, 5264 true, sizeof(gpa_t), &gva)) 5265 return 1; 5266 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 5267 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 5268 sizeof(gpa_t), &e); 5269 if (r != X86EMUL_CONTINUE) 5270 return kvm_handle_memory_failure(vcpu, r, &e); 5271 5272 return nested_vmx_succeed(vcpu); 5273 } 5274 5275 #define EPTP_PA_MASK GENMASK_ULL(51, 12) 5276 5277 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) 5278 { 5279 return VALID_PAGE(root_hpa) && 5280 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); 5281 } 5282 5283 /* Emulate the INVEPT instruction */ 5284 static int handle_invept(struct kvm_vcpu *vcpu) 5285 { 5286 struct vcpu_vmx *vmx = to_vmx(vcpu); 5287 u32 vmx_instruction_info, types; 5288 unsigned long type, roots_to_free; 5289 struct kvm_mmu *mmu; 5290 gva_t gva; 5291 struct x86_exception e; 5292 struct { 5293 u64 eptp, gpa; 5294 } operand; 5295 int i, r; 5296 5297 if (!(vmx->nested.msrs.secondary_ctls_high & 5298 SECONDARY_EXEC_ENABLE_EPT) || 5299 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 5300 kvm_queue_exception(vcpu, UD_VECTOR); 5301 return 1; 5302 } 5303 5304 if (!nested_vmx_check_permission(vcpu)) 5305 return 1; 5306 5307 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5308 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 5309 5310 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 5311 5312 if (type >= 32 || !(types & (1 << type))) 5313 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5314 5315 /* According to the Intel VMX instruction reference, the memory 5316 * operand is read even if it isn't needed (e.g., for type==global) 5317 */ 5318 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5319 vmx_instruction_info, false, sizeof(operand), &gva)) 5320 return 1; 5321 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5322 if (r != X86EMUL_CONTINUE) 5323 return kvm_handle_memory_failure(vcpu, r, &e); 5324 5325 /* 5326 * Nested EPT roots are always held through guest_mmu, 5327 * not root_mmu. 5328 */ 5329 mmu = &vcpu->arch.guest_mmu; 5330 5331 switch (type) { 5332 case VMX_EPT_EXTENT_CONTEXT: 5333 if (!nested_vmx_check_eptp(vcpu, operand.eptp)) 5334 return nested_vmx_fail(vcpu, 5335 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5336 5337 roots_to_free = 0; 5338 if (nested_ept_root_matches(mmu->root_hpa, mmu->root_pgd, 5339 operand.eptp)) 5340 roots_to_free |= KVM_MMU_ROOT_CURRENT; 5341 5342 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 5343 if (nested_ept_root_matches(mmu->prev_roots[i].hpa, 5344 mmu->prev_roots[i].pgd, 5345 operand.eptp)) 5346 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 5347 } 5348 break; 5349 case VMX_EPT_EXTENT_GLOBAL: 5350 roots_to_free = KVM_MMU_ROOTS_ALL; 5351 break; 5352 default: 5353 BUG(); 5354 break; 5355 } 5356 5357 if (roots_to_free) 5358 kvm_mmu_free_roots(vcpu, mmu, roots_to_free); 5359 5360 return nested_vmx_succeed(vcpu); 5361 } 5362 5363 static int handle_invvpid(struct kvm_vcpu *vcpu) 5364 { 5365 struct vcpu_vmx *vmx = to_vmx(vcpu); 5366 u32 vmx_instruction_info; 5367 unsigned long type, types; 5368 gva_t gva; 5369 struct x86_exception e; 5370 struct { 5371 u64 vpid; 5372 u64 gla; 5373 } operand; 5374 u16 vpid02; 5375 int r; 5376 5377 if (!(vmx->nested.msrs.secondary_ctls_high & 5378 SECONDARY_EXEC_ENABLE_VPID) || 5379 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 5380 kvm_queue_exception(vcpu, UD_VECTOR); 5381 return 1; 5382 } 5383 5384 if (!nested_vmx_check_permission(vcpu)) 5385 return 1; 5386 5387 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5388 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 5389 5390 types = (vmx->nested.msrs.vpid_caps & 5391 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 5392 5393 if (type >= 32 || !(types & (1 << type))) 5394 return nested_vmx_fail(vcpu, 5395 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5396 5397 /* according to the intel vmx instruction reference, the memory 5398 * operand is read even if it isn't needed (e.g., for type==global) 5399 */ 5400 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5401 vmx_instruction_info, false, sizeof(operand), &gva)) 5402 return 1; 5403 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5404 if (r != X86EMUL_CONTINUE) 5405 return kvm_handle_memory_failure(vcpu, r, &e); 5406 5407 if (operand.vpid >> 16) 5408 return nested_vmx_fail(vcpu, 5409 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5410 5411 vpid02 = nested_get_vpid02(vcpu); 5412 switch (type) { 5413 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 5414 if (!operand.vpid || 5415 is_noncanonical_address(operand.gla, vcpu)) 5416 return nested_vmx_fail(vcpu, 5417 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5418 vpid_sync_vcpu_addr(vpid02, operand.gla); 5419 break; 5420 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 5421 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 5422 if (!operand.vpid) 5423 return nested_vmx_fail(vcpu, 5424 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5425 vpid_sync_context(vpid02); 5426 break; 5427 case VMX_VPID_EXTENT_ALL_CONTEXT: 5428 vpid_sync_context(vpid02); 5429 break; 5430 default: 5431 WARN_ON_ONCE(1); 5432 return kvm_skip_emulated_instruction(vcpu); 5433 } 5434 5435 /* 5436 * Sync the shadow page tables if EPT is disabled, L1 is invalidating 5437 * linear mappings for L2 (tagged with L2's VPID). Free all roots as 5438 * VPIDs are not tracked in the MMU role. 5439 * 5440 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share 5441 * an MMU when EPT is disabled. 5442 * 5443 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR. 5444 */ 5445 if (!enable_ept) 5446 kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, 5447 KVM_MMU_ROOTS_ALL); 5448 5449 return nested_vmx_succeed(vcpu); 5450 } 5451 5452 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 5453 struct vmcs12 *vmcs12) 5454 { 5455 u32 index = kvm_rcx_read(vcpu); 5456 u64 new_eptp; 5457 bool accessed_dirty; 5458 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 5459 5460 if (!nested_cpu_has_eptp_switching(vmcs12) || 5461 !nested_cpu_has_ept(vmcs12)) 5462 return 1; 5463 5464 if (index >= VMFUNC_EPTP_ENTRIES) 5465 return 1; 5466 5467 5468 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 5469 &new_eptp, index * 8, 8)) 5470 return 1; 5471 5472 accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT); 5473 5474 /* 5475 * If the (L2) guest does a vmfunc to the currently 5476 * active ept pointer, we don't have to do anything else 5477 */ 5478 if (vmcs12->ept_pointer != new_eptp) { 5479 if (!nested_vmx_check_eptp(vcpu, new_eptp)) 5480 return 1; 5481 5482 kvm_mmu_unload(vcpu); 5483 mmu->ept_ad = accessed_dirty; 5484 mmu->mmu_role.base.ad_disabled = !accessed_dirty; 5485 vmcs12->ept_pointer = new_eptp; 5486 /* 5487 * TODO: Check what's the correct approach in case 5488 * mmu reload fails. Currently, we just let the next 5489 * reload potentially fail 5490 */ 5491 kvm_mmu_reload(vcpu); 5492 } 5493 5494 return 0; 5495 } 5496 5497 static int handle_vmfunc(struct kvm_vcpu *vcpu) 5498 { 5499 struct vcpu_vmx *vmx = to_vmx(vcpu); 5500 struct vmcs12 *vmcs12; 5501 u32 function = kvm_rax_read(vcpu); 5502 5503 /* 5504 * VMFUNC is only supported for nested guests, but we always enable the 5505 * secondary control for simplicity; for non-nested mode, fake that we 5506 * didn't by injecting #UD. 5507 */ 5508 if (!is_guest_mode(vcpu)) { 5509 kvm_queue_exception(vcpu, UD_VECTOR); 5510 return 1; 5511 } 5512 5513 vmcs12 = get_vmcs12(vcpu); 5514 if ((vmcs12->vm_function_control & (1 << function)) == 0) 5515 goto fail; 5516 5517 switch (function) { 5518 case 0: 5519 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 5520 goto fail; 5521 break; 5522 default: 5523 goto fail; 5524 } 5525 return kvm_skip_emulated_instruction(vcpu); 5526 5527 fail: 5528 /* 5529 * This is effectively a reflected VM-Exit, as opposed to a synthesized 5530 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode 5531 * EXIT_REASON_VMFUNC as the exit reason. 5532 */ 5533 nested_vmx_vmexit(vcpu, vmx->exit_reason.full, 5534 vmx_get_intr_info(vcpu), 5535 vmx_get_exit_qual(vcpu)); 5536 return 1; 5537 } 5538 5539 /* 5540 * Return true if an IO instruction with the specified port and size should cause 5541 * a VM-exit into L1. 5542 */ 5543 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, 5544 int size) 5545 { 5546 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5547 gpa_t bitmap, last_bitmap; 5548 u8 b; 5549 5550 last_bitmap = (gpa_t)-1; 5551 b = -1; 5552 5553 while (size > 0) { 5554 if (port < 0x8000) 5555 bitmap = vmcs12->io_bitmap_a; 5556 else if (port < 0x10000) 5557 bitmap = vmcs12->io_bitmap_b; 5558 else 5559 return true; 5560 bitmap += (port & 0x7fff) / 8; 5561 5562 if (last_bitmap != bitmap) 5563 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 5564 return true; 5565 if (b & (1 << (port & 7))) 5566 return true; 5567 5568 port++; 5569 size--; 5570 last_bitmap = bitmap; 5571 } 5572 5573 return false; 5574 } 5575 5576 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 5577 struct vmcs12 *vmcs12) 5578 { 5579 unsigned long exit_qualification; 5580 unsigned short port; 5581 int size; 5582 5583 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 5584 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 5585 5586 exit_qualification = vmx_get_exit_qual(vcpu); 5587 5588 port = exit_qualification >> 16; 5589 size = (exit_qualification & 7) + 1; 5590 5591 return nested_vmx_check_io_bitmaps(vcpu, port, size); 5592 } 5593 5594 /* 5595 * Return 1 if we should exit from L2 to L1 to handle an MSR access, 5596 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 5597 * disinterest in the current event (read or write a specific MSR) by using an 5598 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 5599 */ 5600 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 5601 struct vmcs12 *vmcs12, 5602 union vmx_exit_reason exit_reason) 5603 { 5604 u32 msr_index = kvm_rcx_read(vcpu); 5605 gpa_t bitmap; 5606 5607 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 5608 return true; 5609 5610 /* 5611 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 5612 * for the four combinations of read/write and low/high MSR numbers. 5613 * First we need to figure out which of the four to use: 5614 */ 5615 bitmap = vmcs12->msr_bitmap; 5616 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 5617 bitmap += 2048; 5618 if (msr_index >= 0xc0000000) { 5619 msr_index -= 0xc0000000; 5620 bitmap += 1024; 5621 } 5622 5623 /* Then read the msr_index'th bit from this bitmap: */ 5624 if (msr_index < 1024*8) { 5625 unsigned char b; 5626 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 5627 return true; 5628 return 1 & (b >> (msr_index & 7)); 5629 } else 5630 return true; /* let L1 handle the wrong parameter */ 5631 } 5632 5633 /* 5634 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 5635 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 5636 * intercept (via guest_host_mask etc.) the current event. 5637 */ 5638 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 5639 struct vmcs12 *vmcs12) 5640 { 5641 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5642 int cr = exit_qualification & 15; 5643 int reg; 5644 unsigned long val; 5645 5646 switch ((exit_qualification >> 4) & 3) { 5647 case 0: /* mov to cr */ 5648 reg = (exit_qualification >> 8) & 15; 5649 val = kvm_register_readl(vcpu, reg); 5650 switch (cr) { 5651 case 0: 5652 if (vmcs12->cr0_guest_host_mask & 5653 (val ^ vmcs12->cr0_read_shadow)) 5654 return true; 5655 break; 5656 case 3: 5657 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 5658 return true; 5659 break; 5660 case 4: 5661 if (vmcs12->cr4_guest_host_mask & 5662 (vmcs12->cr4_read_shadow ^ val)) 5663 return true; 5664 break; 5665 case 8: 5666 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 5667 return true; 5668 break; 5669 } 5670 break; 5671 case 2: /* clts */ 5672 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 5673 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 5674 return true; 5675 break; 5676 case 1: /* mov from cr */ 5677 switch (cr) { 5678 case 3: 5679 if (vmcs12->cpu_based_vm_exec_control & 5680 CPU_BASED_CR3_STORE_EXITING) 5681 return true; 5682 break; 5683 case 8: 5684 if (vmcs12->cpu_based_vm_exec_control & 5685 CPU_BASED_CR8_STORE_EXITING) 5686 return true; 5687 break; 5688 } 5689 break; 5690 case 3: /* lmsw */ 5691 /* 5692 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 5693 * cr0. Other attempted changes are ignored, with no exit. 5694 */ 5695 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5696 if (vmcs12->cr0_guest_host_mask & 0xe & 5697 (val ^ vmcs12->cr0_read_shadow)) 5698 return true; 5699 if ((vmcs12->cr0_guest_host_mask & 0x1) && 5700 !(vmcs12->cr0_read_shadow & 0x1) && 5701 (val & 0x1)) 5702 return true; 5703 break; 5704 } 5705 return false; 5706 } 5707 5708 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 5709 struct vmcs12 *vmcs12, gpa_t bitmap) 5710 { 5711 u32 vmx_instruction_info; 5712 unsigned long field; 5713 u8 b; 5714 5715 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 5716 return true; 5717 5718 /* Decode instruction info and find the field to access */ 5719 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5720 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 5721 5722 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 5723 if (field >> 15) 5724 return true; 5725 5726 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 5727 return true; 5728 5729 return 1 & (b >> (field & 7)); 5730 } 5731 5732 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) 5733 { 5734 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; 5735 5736 if (nested_cpu_has_mtf(vmcs12)) 5737 return true; 5738 5739 /* 5740 * An MTF VM-exit may be injected into the guest by setting the 5741 * interruption-type to 7 (other event) and the vector field to 0. Such 5742 * is the case regardless of the 'monitor trap flag' VM-execution 5743 * control. 5744 */ 5745 return entry_intr_info == (INTR_INFO_VALID_MASK 5746 | INTR_TYPE_OTHER_EVENT); 5747 } 5748 5749 /* 5750 * Return true if L0 wants to handle an exit from L2 regardless of whether or not 5751 * L1 wants the exit. Only call this when in is_guest_mode (L2). 5752 */ 5753 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, 5754 union vmx_exit_reason exit_reason) 5755 { 5756 u32 intr_info; 5757 5758 switch ((u16)exit_reason.basic) { 5759 case EXIT_REASON_EXCEPTION_NMI: 5760 intr_info = vmx_get_intr_info(vcpu); 5761 if (is_nmi(intr_info)) 5762 return true; 5763 else if (is_page_fault(intr_info)) 5764 return vcpu->arch.apf.host_apf_flags || !enable_ept; 5765 else if (is_debug(intr_info) && 5766 vcpu->guest_debug & 5767 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 5768 return true; 5769 else if (is_breakpoint(intr_info) && 5770 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 5771 return true; 5772 return false; 5773 case EXIT_REASON_EXTERNAL_INTERRUPT: 5774 return true; 5775 case EXIT_REASON_MCE_DURING_VMENTRY: 5776 return true; 5777 case EXIT_REASON_EPT_VIOLATION: 5778 /* 5779 * L0 always deals with the EPT violation. If nested EPT is 5780 * used, and the nested mmu code discovers that the address is 5781 * missing in the guest EPT table (EPT12), the EPT violation 5782 * will be injected with nested_ept_inject_page_fault() 5783 */ 5784 return true; 5785 case EXIT_REASON_EPT_MISCONFIG: 5786 /* 5787 * L2 never uses directly L1's EPT, but rather L0's own EPT 5788 * table (shadow on EPT) or a merged EPT table that L0 built 5789 * (EPT on EPT). So any problems with the structure of the 5790 * table is L0's fault. 5791 */ 5792 return true; 5793 case EXIT_REASON_PREEMPTION_TIMER: 5794 return true; 5795 case EXIT_REASON_PML_FULL: 5796 /* We emulate PML support to L1. */ 5797 return true; 5798 case EXIT_REASON_VMFUNC: 5799 /* VM functions are emulated through L2->L0 vmexits. */ 5800 return true; 5801 case EXIT_REASON_ENCLS: 5802 /* SGX is never exposed to L1 */ 5803 return true; 5804 default: 5805 break; 5806 } 5807 return false; 5808 } 5809 5810 /* 5811 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in 5812 * is_guest_mode (L2). 5813 */ 5814 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, 5815 union vmx_exit_reason exit_reason) 5816 { 5817 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5818 u32 intr_info; 5819 5820 switch ((u16)exit_reason.basic) { 5821 case EXIT_REASON_EXCEPTION_NMI: 5822 intr_info = vmx_get_intr_info(vcpu); 5823 if (is_nmi(intr_info)) 5824 return true; 5825 else if (is_page_fault(intr_info)) 5826 return true; 5827 return vmcs12->exception_bitmap & 5828 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 5829 case EXIT_REASON_EXTERNAL_INTERRUPT: 5830 return nested_exit_on_intr(vcpu); 5831 case EXIT_REASON_TRIPLE_FAULT: 5832 return true; 5833 case EXIT_REASON_INTERRUPT_WINDOW: 5834 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING); 5835 case EXIT_REASON_NMI_WINDOW: 5836 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING); 5837 case EXIT_REASON_TASK_SWITCH: 5838 return true; 5839 case EXIT_REASON_CPUID: 5840 return true; 5841 case EXIT_REASON_HLT: 5842 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 5843 case EXIT_REASON_INVD: 5844 return true; 5845 case EXIT_REASON_INVLPG: 5846 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5847 case EXIT_REASON_RDPMC: 5848 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 5849 case EXIT_REASON_RDRAND: 5850 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 5851 case EXIT_REASON_RDSEED: 5852 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 5853 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 5854 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 5855 case EXIT_REASON_VMREAD: 5856 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5857 vmcs12->vmread_bitmap); 5858 case EXIT_REASON_VMWRITE: 5859 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5860 vmcs12->vmwrite_bitmap); 5861 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 5862 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 5863 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 5864 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 5865 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 5866 /* 5867 * VMX instructions trap unconditionally. This allows L1 to 5868 * emulate them for its L2 guest, i.e., allows 3-level nesting! 5869 */ 5870 return true; 5871 case EXIT_REASON_CR_ACCESS: 5872 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 5873 case EXIT_REASON_DR_ACCESS: 5874 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 5875 case EXIT_REASON_IO_INSTRUCTION: 5876 return nested_vmx_exit_handled_io(vcpu, vmcs12); 5877 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 5878 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 5879 case EXIT_REASON_MSR_READ: 5880 case EXIT_REASON_MSR_WRITE: 5881 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 5882 case EXIT_REASON_INVALID_STATE: 5883 return true; 5884 case EXIT_REASON_MWAIT_INSTRUCTION: 5885 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 5886 case EXIT_REASON_MONITOR_TRAP_FLAG: 5887 return nested_vmx_exit_handled_mtf(vmcs12); 5888 case EXIT_REASON_MONITOR_INSTRUCTION: 5889 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 5890 case EXIT_REASON_PAUSE_INSTRUCTION: 5891 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 5892 nested_cpu_has2(vmcs12, 5893 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 5894 case EXIT_REASON_MCE_DURING_VMENTRY: 5895 return true; 5896 case EXIT_REASON_TPR_BELOW_THRESHOLD: 5897 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 5898 case EXIT_REASON_APIC_ACCESS: 5899 case EXIT_REASON_APIC_WRITE: 5900 case EXIT_REASON_EOI_INDUCED: 5901 /* 5902 * The controls for "virtualize APIC accesses," "APIC- 5903 * register virtualization," and "virtual-interrupt 5904 * delivery" only come from vmcs12. 5905 */ 5906 return true; 5907 case EXIT_REASON_INVPCID: 5908 return 5909 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 5910 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5911 case EXIT_REASON_WBINVD: 5912 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 5913 case EXIT_REASON_XSETBV: 5914 return true; 5915 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 5916 /* 5917 * This should never happen, since it is not possible to 5918 * set XSS to a non-zero value---neither in L1 nor in L2. 5919 * If if it were, XSS would have to be checked against 5920 * the XSS exit bitmap in vmcs12. 5921 */ 5922 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 5923 case EXIT_REASON_UMWAIT: 5924 case EXIT_REASON_TPAUSE: 5925 return nested_cpu_has2(vmcs12, 5926 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); 5927 default: 5928 return true; 5929 } 5930 } 5931 5932 /* 5933 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was 5934 * reflected into L1. 5935 */ 5936 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) 5937 { 5938 struct vcpu_vmx *vmx = to_vmx(vcpu); 5939 union vmx_exit_reason exit_reason = vmx->exit_reason; 5940 unsigned long exit_qual; 5941 u32 exit_intr_info; 5942 5943 WARN_ON_ONCE(vmx->nested.nested_run_pending); 5944 5945 /* 5946 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM 5947 * has already loaded L2's state. 5948 */ 5949 if (unlikely(vmx->fail)) { 5950 trace_kvm_nested_vmenter_failed( 5951 "hardware VM-instruction error: ", 5952 vmcs_read32(VM_INSTRUCTION_ERROR)); 5953 exit_intr_info = 0; 5954 exit_qual = 0; 5955 goto reflect_vmexit; 5956 } 5957 5958 trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX); 5959 5960 /* If L0 (KVM) wants the exit, it trumps L1's desires. */ 5961 if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) 5962 return false; 5963 5964 /* If L1 doesn't want the exit, handle it in L0. */ 5965 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason)) 5966 return false; 5967 5968 /* 5969 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For 5970 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would 5971 * need to be synthesized by querying the in-kernel LAPIC, but external 5972 * interrupts are never reflected to L1 so it's a non-issue. 5973 */ 5974 exit_intr_info = vmx_get_intr_info(vcpu); 5975 if (is_exception_with_error_code(exit_intr_info)) { 5976 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5977 5978 vmcs12->vm_exit_intr_error_code = 5979 vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 5980 } 5981 exit_qual = vmx_get_exit_qual(vcpu); 5982 5983 reflect_vmexit: 5984 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual); 5985 return true; 5986 } 5987 5988 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 5989 struct kvm_nested_state __user *user_kvm_nested_state, 5990 u32 user_data_size) 5991 { 5992 struct vcpu_vmx *vmx; 5993 struct vmcs12 *vmcs12; 5994 struct kvm_nested_state kvm_state = { 5995 .flags = 0, 5996 .format = KVM_STATE_NESTED_FORMAT_VMX, 5997 .size = sizeof(kvm_state), 5998 .hdr.vmx.flags = 0, 5999 .hdr.vmx.vmxon_pa = -1ull, 6000 .hdr.vmx.vmcs12_pa = -1ull, 6001 .hdr.vmx.preemption_timer_deadline = 0, 6002 }; 6003 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6004 &user_kvm_nested_state->data.vmx[0]; 6005 6006 if (!vcpu) 6007 return kvm_state.size + sizeof(*user_vmx_nested_state); 6008 6009 vmx = to_vmx(vcpu); 6010 vmcs12 = get_vmcs12(vcpu); 6011 6012 if (nested_vmx_allowed(vcpu) && 6013 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 6014 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 6015 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; 6016 6017 if (vmx_has_valid_vmcs12(vcpu)) { 6018 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); 6019 6020 if (vmx->nested.hv_evmcs) 6021 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 6022 6023 if (is_guest_mode(vcpu) && 6024 nested_cpu_has_shadow_vmcs(vmcs12) && 6025 vmcs12->vmcs_link_pointer != -1ull) 6026 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); 6027 } 6028 6029 if (vmx->nested.smm.vmxon) 6030 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 6031 6032 if (vmx->nested.smm.guest_mode) 6033 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 6034 6035 if (is_guest_mode(vcpu)) { 6036 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 6037 6038 if (vmx->nested.nested_run_pending) 6039 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 6040 6041 if (vmx->nested.mtf_pending) 6042 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; 6043 6044 if (nested_cpu_has_preemption_timer(vmcs12) && 6045 vmx->nested.has_preemption_timer_deadline) { 6046 kvm_state.hdr.vmx.flags |= 6047 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE; 6048 kvm_state.hdr.vmx.preemption_timer_deadline = 6049 vmx->nested.preemption_timer_deadline; 6050 } 6051 } 6052 } 6053 6054 if (user_data_size < kvm_state.size) 6055 goto out; 6056 6057 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 6058 return -EFAULT; 6059 6060 if (!vmx_has_valid_vmcs12(vcpu)) 6061 goto out; 6062 6063 /* 6064 * When running L2, the authoritative vmcs12 state is in the 6065 * vmcs02. When running L1, the authoritative vmcs12 state is 6066 * in the shadow or enlightened vmcs linked to vmcs01, unless 6067 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative 6068 * vmcs12 state is in the vmcs12 already. 6069 */ 6070 if (is_guest_mode(vcpu)) { 6071 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 6072 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 6073 } else { 6074 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 6075 if (!vmx->nested.need_vmcs12_to_shadow_sync) { 6076 if (vmx->nested.hv_evmcs) 6077 copy_enlightened_to_vmcs12(vmx); 6078 else if (enable_shadow_vmcs) 6079 copy_shadow_to_vmcs12(vmx); 6080 } 6081 } 6082 6083 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); 6084 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); 6085 6086 /* 6087 * Copy over the full allocated size of vmcs12 rather than just the size 6088 * of the struct. 6089 */ 6090 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) 6091 return -EFAULT; 6092 6093 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6094 vmcs12->vmcs_link_pointer != -1ull) { 6095 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, 6096 get_shadow_vmcs12(vcpu), VMCS12_SIZE)) 6097 return -EFAULT; 6098 } 6099 out: 6100 return kvm_state.size; 6101 } 6102 6103 /* 6104 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 6105 */ 6106 void vmx_leave_nested(struct kvm_vcpu *vcpu) 6107 { 6108 if (is_guest_mode(vcpu)) { 6109 to_vmx(vcpu)->nested.nested_run_pending = 0; 6110 nested_vmx_vmexit(vcpu, -1, 0, 0); 6111 } 6112 free_nested(vcpu); 6113 } 6114 6115 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 6116 struct kvm_nested_state __user *user_kvm_nested_state, 6117 struct kvm_nested_state *kvm_state) 6118 { 6119 struct vcpu_vmx *vmx = to_vmx(vcpu); 6120 struct vmcs12 *vmcs12; 6121 enum vm_entry_failure_code ignored; 6122 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6123 &user_kvm_nested_state->data.vmx[0]; 6124 int ret; 6125 6126 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) 6127 return -EINVAL; 6128 6129 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) { 6130 if (kvm_state->hdr.vmx.smm.flags) 6131 return -EINVAL; 6132 6133 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) 6134 return -EINVAL; 6135 6136 /* 6137 * KVM_STATE_NESTED_EVMCS used to signal that KVM should 6138 * enable eVMCS capability on vCPU. However, since then 6139 * code was changed such that flag signals vmcs12 should 6140 * be copied into eVMCS in guest memory. 6141 * 6142 * To preserve backwards compatability, allow user 6143 * to set this flag even when there is no VMXON region. 6144 */ 6145 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) 6146 return -EINVAL; 6147 } else { 6148 if (!nested_vmx_allowed(vcpu)) 6149 return -EINVAL; 6150 6151 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) 6152 return -EINVAL; 6153 } 6154 6155 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6156 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6157 return -EINVAL; 6158 6159 if (kvm_state->hdr.vmx.smm.flags & 6160 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 6161 return -EINVAL; 6162 6163 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) 6164 return -EINVAL; 6165 6166 /* 6167 * SMM temporarily disables VMX, so we cannot be in guest mode, 6168 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 6169 * must be zero. 6170 */ 6171 if (is_smm(vcpu) ? 6172 (kvm_state->flags & 6173 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) 6174 : kvm_state->hdr.vmx.smm.flags) 6175 return -EINVAL; 6176 6177 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6178 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 6179 return -EINVAL; 6180 6181 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && 6182 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) 6183 return -EINVAL; 6184 6185 vmx_leave_nested(vcpu); 6186 6187 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) 6188 return 0; 6189 6190 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; 6191 ret = enter_vmx_operation(vcpu); 6192 if (ret) 6193 return ret; 6194 6195 /* Empty 'VMXON' state is permitted if no VMCS loaded */ 6196 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { 6197 /* See vmx_has_valid_vmcs12. */ 6198 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || 6199 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || 6200 (kvm_state->hdr.vmx.vmcs12_pa != -1ull)) 6201 return -EINVAL; 6202 else 6203 return 0; 6204 } 6205 6206 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { 6207 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || 6208 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) 6209 return -EINVAL; 6210 6211 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); 6212 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 6213 /* 6214 * nested_vmx_handle_enlightened_vmptrld() cannot be called 6215 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be 6216 * restored yet. EVMCS will be mapped from 6217 * nested_get_vmcs12_pages(). 6218 */ 6219 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 6220 } else { 6221 return -EINVAL; 6222 } 6223 6224 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 6225 vmx->nested.smm.vmxon = true; 6226 vmx->nested.vmxon = false; 6227 6228 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 6229 vmx->nested.smm.guest_mode = true; 6230 } 6231 6232 vmcs12 = get_vmcs12(vcpu); 6233 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) 6234 return -EFAULT; 6235 6236 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 6237 return -EINVAL; 6238 6239 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6240 return 0; 6241 6242 vmx->nested.nested_run_pending = 6243 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 6244 6245 vmx->nested.mtf_pending = 6246 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 6247 6248 ret = -EINVAL; 6249 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6250 vmcs12->vmcs_link_pointer != -1ull) { 6251 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 6252 6253 if (kvm_state->size < 6254 sizeof(*kvm_state) + 6255 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 6256 goto error_guest_mode; 6257 6258 if (copy_from_user(shadow_vmcs12, 6259 user_vmx_nested_state->shadow_vmcs12, 6260 sizeof(*shadow_vmcs12))) { 6261 ret = -EFAULT; 6262 goto error_guest_mode; 6263 } 6264 6265 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 6266 !shadow_vmcs12->hdr.shadow_vmcs) 6267 goto error_guest_mode; 6268 } 6269 6270 vmx->nested.has_preemption_timer_deadline = false; 6271 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { 6272 vmx->nested.has_preemption_timer_deadline = true; 6273 vmx->nested.preemption_timer_deadline = 6274 kvm_state->hdr.vmx.preemption_timer_deadline; 6275 } 6276 6277 if (nested_vmx_check_controls(vcpu, vmcs12) || 6278 nested_vmx_check_host_state(vcpu, vmcs12) || 6279 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 6280 goto error_guest_mode; 6281 6282 vmx->nested.dirty_vmcs12 = true; 6283 ret = nested_vmx_enter_non_root_mode(vcpu, false); 6284 if (ret) 6285 goto error_guest_mode; 6286 6287 return 0; 6288 6289 error_guest_mode: 6290 vmx->nested.nested_run_pending = 0; 6291 return ret; 6292 } 6293 6294 void nested_vmx_set_vmcs_shadowing_bitmap(void) 6295 { 6296 if (enable_shadow_vmcs) { 6297 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 6298 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 6299 } 6300 } 6301 6302 /* 6303 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 6304 * returned for the various VMX controls MSRs when nested VMX is enabled. 6305 * The same values should also be used to verify that vmcs12 control fields are 6306 * valid during nested entry from L1 to L2. 6307 * Each of these control msrs has a low and high 32-bit half: A low bit is on 6308 * if the corresponding bit in the (32-bit) control field *must* be on, and a 6309 * bit in the high half is on if the corresponding bit in the control field 6310 * may be on. See also vmx_control_verify(). 6311 */ 6312 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) 6313 { 6314 /* 6315 * Note that as a general rule, the high half of the MSRs (bits in 6316 * the control fields which may be 1) should be initialized by the 6317 * intersection of the underlying hardware's MSR (i.e., features which 6318 * can be supported) and the list of features we want to expose - 6319 * because they are known to be properly supported in our code. 6320 * Also, usually, the low half of the MSRs (bits which must be 1) can 6321 * be set to 0, meaning that L1 may turn off any of these bits. The 6322 * reason is that if one of these bits is necessary, it will appear 6323 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 6324 * fields of vmcs01 and vmcs02, will turn these bits off - and 6325 * nested_vmx_l1_wants_exit() will not pass related exits to L1. 6326 * These rules have exceptions below. 6327 */ 6328 6329 /* pin-based controls */ 6330 rdmsr(MSR_IA32_VMX_PINBASED_CTLS, 6331 msrs->pinbased_ctls_low, 6332 msrs->pinbased_ctls_high); 6333 msrs->pinbased_ctls_low |= 6334 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6335 msrs->pinbased_ctls_high &= 6336 PIN_BASED_EXT_INTR_MASK | 6337 PIN_BASED_NMI_EXITING | 6338 PIN_BASED_VIRTUAL_NMIS | 6339 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); 6340 msrs->pinbased_ctls_high |= 6341 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6342 PIN_BASED_VMX_PREEMPTION_TIMER; 6343 6344 /* exit controls */ 6345 rdmsr(MSR_IA32_VMX_EXIT_CTLS, 6346 msrs->exit_ctls_low, 6347 msrs->exit_ctls_high); 6348 msrs->exit_ctls_low = 6349 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 6350 6351 msrs->exit_ctls_high &= 6352 #ifdef CONFIG_X86_64 6353 VM_EXIT_HOST_ADDR_SPACE_SIZE | 6354 #endif 6355 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT | 6356 VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 6357 msrs->exit_ctls_high |= 6358 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 6359 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 6360 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 6361 6362 /* We support free control of debug control saving. */ 6363 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 6364 6365 /* entry controls */ 6366 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 6367 msrs->entry_ctls_low, 6368 msrs->entry_ctls_high); 6369 msrs->entry_ctls_low = 6370 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 6371 msrs->entry_ctls_high &= 6372 #ifdef CONFIG_X86_64 6373 VM_ENTRY_IA32E_MODE | 6374 #endif 6375 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS | 6376 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 6377 msrs->entry_ctls_high |= 6378 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 6379 6380 /* We support free control of debug control loading. */ 6381 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 6382 6383 /* cpu-based controls */ 6384 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 6385 msrs->procbased_ctls_low, 6386 msrs->procbased_ctls_high); 6387 msrs->procbased_ctls_low = 6388 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6389 msrs->procbased_ctls_high &= 6390 CPU_BASED_INTR_WINDOW_EXITING | 6391 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING | 6392 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 6393 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 6394 CPU_BASED_CR3_STORE_EXITING | 6395 #ifdef CONFIG_X86_64 6396 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 6397 #endif 6398 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 6399 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 6400 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 6401 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 6402 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 6403 /* 6404 * We can allow some features even when not supported by the 6405 * hardware. For example, L1 can specify an MSR bitmap - and we 6406 * can use it to avoid exits to L1 - even when L0 runs L2 6407 * without MSR bitmaps. 6408 */ 6409 msrs->procbased_ctls_high |= 6410 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6411 CPU_BASED_USE_MSR_BITMAPS; 6412 6413 /* We support free control of CR3 access interception. */ 6414 msrs->procbased_ctls_low &= 6415 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 6416 6417 /* 6418 * secondary cpu-based controls. Do not include those that 6419 * depend on CPUID bits, they are added later by 6420 * vmx_vcpu_after_set_cpuid. 6421 */ 6422 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) 6423 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 6424 msrs->secondary_ctls_low, 6425 msrs->secondary_ctls_high); 6426 6427 msrs->secondary_ctls_low = 0; 6428 msrs->secondary_ctls_high &= 6429 SECONDARY_EXEC_DESC | 6430 SECONDARY_EXEC_ENABLE_RDTSCP | 6431 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 6432 SECONDARY_EXEC_WBINVD_EXITING | 6433 SECONDARY_EXEC_APIC_REGISTER_VIRT | 6434 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 6435 SECONDARY_EXEC_RDRAND_EXITING | 6436 SECONDARY_EXEC_ENABLE_INVPCID | 6437 SECONDARY_EXEC_RDSEED_EXITING | 6438 SECONDARY_EXEC_XSAVES; 6439 6440 /* 6441 * We can emulate "VMCS shadowing," even if the hardware 6442 * doesn't support it. 6443 */ 6444 msrs->secondary_ctls_high |= 6445 SECONDARY_EXEC_SHADOW_VMCS; 6446 6447 if (enable_ept) { 6448 /* nested EPT: emulate EPT also to L1 */ 6449 msrs->secondary_ctls_high |= 6450 SECONDARY_EXEC_ENABLE_EPT; 6451 msrs->ept_caps = 6452 VMX_EPT_PAGE_WALK_4_BIT | 6453 VMX_EPT_PAGE_WALK_5_BIT | 6454 VMX_EPTP_WB_BIT | 6455 VMX_EPT_INVEPT_BIT | 6456 VMX_EPT_EXECUTE_ONLY_BIT; 6457 6458 msrs->ept_caps &= ept_caps; 6459 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 6460 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 6461 VMX_EPT_1GB_PAGE_BIT; 6462 if (enable_ept_ad_bits) { 6463 msrs->secondary_ctls_high |= 6464 SECONDARY_EXEC_ENABLE_PML; 6465 msrs->ept_caps |= VMX_EPT_AD_BIT; 6466 } 6467 } 6468 6469 if (cpu_has_vmx_vmfunc()) { 6470 msrs->secondary_ctls_high |= 6471 SECONDARY_EXEC_ENABLE_VMFUNC; 6472 /* 6473 * Advertise EPTP switching unconditionally 6474 * since we emulate it 6475 */ 6476 if (enable_ept) 6477 msrs->vmfunc_controls = 6478 VMX_VMFUNC_EPTP_SWITCHING; 6479 } 6480 6481 /* 6482 * Old versions of KVM use the single-context version without 6483 * checking for support, so declare that it is supported even 6484 * though it is treated as global context. The alternative is 6485 * not failing the single-context invvpid, and it is worse. 6486 */ 6487 if (enable_vpid) { 6488 msrs->secondary_ctls_high |= 6489 SECONDARY_EXEC_ENABLE_VPID; 6490 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 6491 VMX_VPID_EXTENT_SUPPORTED_MASK; 6492 } 6493 6494 if (enable_unrestricted_guest) 6495 msrs->secondary_ctls_high |= 6496 SECONDARY_EXEC_UNRESTRICTED_GUEST; 6497 6498 if (flexpriority_enabled) 6499 msrs->secondary_ctls_high |= 6500 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6501 6502 /* miscellaneous data */ 6503 rdmsr(MSR_IA32_VMX_MISC, 6504 msrs->misc_low, 6505 msrs->misc_high); 6506 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; 6507 msrs->misc_low |= 6508 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 6509 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 6510 VMX_MISC_ACTIVITY_HLT | 6511 VMX_MISC_ACTIVITY_WAIT_SIPI; 6512 msrs->misc_high = 0; 6513 6514 /* 6515 * This MSR reports some information about VMX support. We 6516 * should return information about the VMX we emulate for the 6517 * guest, and the VMCS structure we give it - not about the 6518 * VMX support of the underlying hardware. 6519 */ 6520 msrs->basic = 6521 VMCS12_REVISION | 6522 VMX_BASIC_TRUE_CTLS | 6523 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | 6524 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); 6525 6526 if (cpu_has_vmx_basic_inout()) 6527 msrs->basic |= VMX_BASIC_INOUT; 6528 6529 /* 6530 * These MSRs specify bits which the guest must keep fixed on 6531 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 6532 * We picked the standard core2 setting. 6533 */ 6534 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 6535 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 6536 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 6537 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 6538 6539 /* These MSRs specify bits which the guest must keep fixed off. */ 6540 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 6541 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 6542 6543 /* highest index: VMX_PREEMPTION_TIMER_VALUE */ 6544 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1; 6545 } 6546 6547 void nested_vmx_hardware_unsetup(void) 6548 { 6549 int i; 6550 6551 if (enable_shadow_vmcs) { 6552 for (i = 0; i < VMX_BITMAP_NR; i++) 6553 free_page((unsigned long)vmx_bitmap[i]); 6554 } 6555 } 6556 6557 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 6558 { 6559 int i; 6560 6561 if (!cpu_has_vmx_shadow_vmcs()) 6562 enable_shadow_vmcs = 0; 6563 if (enable_shadow_vmcs) { 6564 for (i = 0; i < VMX_BITMAP_NR; i++) { 6565 /* 6566 * The vmx_bitmap is not tied to a VM and so should 6567 * not be charged to a memcg. 6568 */ 6569 vmx_bitmap[i] = (unsigned long *) 6570 __get_free_page(GFP_KERNEL); 6571 if (!vmx_bitmap[i]) { 6572 nested_vmx_hardware_unsetup(); 6573 return -ENOMEM; 6574 } 6575 } 6576 6577 init_vmcs_shadow_fields(); 6578 } 6579 6580 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; 6581 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; 6582 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; 6583 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; 6584 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; 6585 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; 6586 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; 6587 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff; 6588 exit_handlers[EXIT_REASON_VMON] = handle_vmon; 6589 exit_handlers[EXIT_REASON_INVEPT] = handle_invept; 6590 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; 6591 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; 6592 6593 return 0; 6594 } 6595 6596 struct kvm_x86_nested_ops vmx_nested_ops = { 6597 .check_events = vmx_check_nested_events, 6598 .hv_timer_pending = nested_vmx_preemption_timer_pending, 6599 .get_state = vmx_get_nested_state, 6600 .set_state = vmx_set_nested_state, 6601 .get_nested_state_pages = vmx_get_nested_state_pages, 6602 .write_log_dirty = nested_vmx_write_pml_buffer, 6603 .enable_evmcs = nested_enable_evmcs, 6604 .get_evmcs_version = nested_get_evmcs_version, 6605 }; 6606