1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM support 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * Avi Kivity <avi@qumranet.com> 13 */ 14 15 #ifndef __SVM_SVM_H 16 #define __SVM_SVM_H 17 18 #include <linux/kvm_types.h> 19 #include <linux/kvm_host.h> 20 #include <linux/bits.h> 21 22 #include <asm/svm.h> 23 #include <asm/sev-common.h> 24 25 #include "kvm_cache_regs.h" 26 27 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) 28 29 #define IOPM_SIZE PAGE_SIZE * 3 30 #define MSRPM_SIZE PAGE_SIZE * 2 31 32 #define MAX_DIRECT_ACCESS_MSRS 46 33 #define MSRPM_OFFSETS 32 34 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 35 extern bool npt_enabled; 36 extern int vgif; 37 extern bool intercept_smi; 38 extern bool x2avic_enabled; 39 extern bool vnmi; 40 41 /* 42 * Clean bits in VMCB. 43 * VMCB_ALL_CLEAN_MASK might also need to 44 * be updated if this enum is modified. 45 */ 46 enum { 47 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, 48 pause filter count */ 49 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ 50 VMCB_ASID, /* ASID */ 51 VMCB_INTR, /* int_ctl, int_vector */ 52 VMCB_NPT, /* npt_en, nCR3, gPAT */ 53 VMCB_CR, /* CR0, CR3, CR4, EFER */ 54 VMCB_DR, /* DR6, DR7 */ 55 VMCB_DT, /* GDT, IDT */ 56 VMCB_SEG, /* CS, DS, SS, ES, CPL */ 57 VMCB_CR2, /* CR2 only */ 58 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ 59 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, 60 * AVIC PHYSICAL_TABLE pointer, 61 * AVIC LOGICAL_TABLE pointer 62 */ 63 VMCB_SW = 31, /* Reserved for hypervisor/software use */ 64 }; 65 66 #define VMCB_ALL_CLEAN_MASK ( \ 67 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ 68 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ 69 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ 70 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ 71 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ 72 (1U << VMCB_SW)) 73 74 /* TPR and CR2 are always written before VMRUN */ 75 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) 76 77 struct kvm_sev_info { 78 bool active; /* SEV enabled guest */ 79 bool es_active; /* SEV-ES enabled guest */ 80 unsigned int asid; /* ASID used for this guest */ 81 unsigned int handle; /* SEV firmware handle */ 82 int fd; /* SEV device fd */ 83 unsigned long pages_locked; /* Number of pages locked */ 84 struct list_head regions_list; /* List of registered regions */ 85 u64 ap_jump_table; /* SEV-ES AP Jump Table address */ 86 struct kvm *enc_context_owner; /* Owner of copied encryption context */ 87 struct list_head mirror_vms; /* List of VMs mirroring */ 88 struct list_head mirror_entry; /* Use as a list entry of mirrors */ 89 struct misc_cg *misc_cg; /* For misc cgroup accounting */ 90 atomic_t migration_in_progress; 91 }; 92 93 struct kvm_svm { 94 struct kvm kvm; 95 96 /* Struct members for AVIC */ 97 u32 avic_vm_id; 98 struct page *avic_logical_id_table_page; 99 struct page *avic_physical_id_table_page; 100 struct hlist_node hnode; 101 102 struct kvm_sev_info sev_info; 103 }; 104 105 struct kvm_vcpu; 106 107 struct kvm_vmcb_info { 108 struct vmcb *ptr; 109 unsigned long pa; 110 int cpu; 111 uint64_t asid_generation; 112 }; 113 114 struct vmcb_save_area_cached { 115 u64 efer; 116 u64 cr4; 117 u64 cr3; 118 u64 cr0; 119 u64 dr7; 120 u64 dr6; 121 }; 122 123 struct vmcb_ctrl_area_cached { 124 u32 intercepts[MAX_INTERCEPT]; 125 u16 pause_filter_thresh; 126 u16 pause_filter_count; 127 u64 iopm_base_pa; 128 u64 msrpm_base_pa; 129 u64 tsc_offset; 130 u32 asid; 131 u8 tlb_ctl; 132 u32 int_ctl; 133 u32 int_vector; 134 u32 int_state; 135 u32 exit_code; 136 u32 exit_code_hi; 137 u64 exit_info_1; 138 u64 exit_info_2; 139 u32 exit_int_info; 140 u32 exit_int_info_err; 141 u64 nested_ctl; 142 u32 event_inj; 143 u32 event_inj_err; 144 u64 next_rip; 145 u64 nested_cr3; 146 u64 virt_ext; 147 u32 clean; 148 union { 149 struct hv_vmcb_enlightenments hv_enlightenments; 150 u8 reserved_sw[32]; 151 }; 152 }; 153 154 struct svm_nested_state { 155 struct kvm_vmcb_info vmcb02; 156 u64 hsave_msr; 157 u64 vm_cr_msr; 158 u64 vmcb12_gpa; 159 u64 last_vmcb12_gpa; 160 161 /* These are the merged vectors */ 162 u32 *msrpm; 163 164 /* A VMRUN has started but has not yet been performed, so 165 * we cannot inject a nested vmexit yet. */ 166 bool nested_run_pending; 167 168 /* cache for control fields of the guest */ 169 struct vmcb_ctrl_area_cached ctl; 170 171 /* 172 * Note: this struct is not kept up-to-date while L2 runs; it is only 173 * valid within nested_svm_vmrun. 174 */ 175 struct vmcb_save_area_cached save; 176 177 bool initialized; 178 179 /* 180 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to 181 * changes in MSR bitmap for L1 or switching to a different L2. Note, 182 * this flag can only be used reliably in conjunction with a paravirt L1 183 * which informs L0 whether any changes to MSR bitmap for L2 were done 184 * on its side. 185 */ 186 bool force_msr_bitmap_recalc; 187 }; 188 189 struct vcpu_sev_es_state { 190 /* SEV-ES support */ 191 struct sev_es_save_area *vmsa; 192 struct ghcb *ghcb; 193 struct kvm_host_map ghcb_map; 194 bool received_first_sipi; 195 196 /* SEV-ES scratch area support */ 197 void *ghcb_sa; 198 u32 ghcb_sa_len; 199 bool ghcb_sa_sync; 200 bool ghcb_sa_free; 201 }; 202 203 struct vcpu_svm { 204 struct kvm_vcpu vcpu; 205 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ 206 struct vmcb *vmcb; 207 struct kvm_vmcb_info vmcb01; 208 struct kvm_vmcb_info *current_vmcb; 209 u32 asid; 210 u32 sysenter_esp_hi; 211 u32 sysenter_eip_hi; 212 uint64_t tsc_aux; 213 214 u64 msr_decfg; 215 216 u64 next_rip; 217 218 u64 spec_ctrl; 219 220 u64 tsc_ratio_msr; 221 /* 222 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be 223 * translated into the appropriate L2_CFG bits on the host to 224 * perform speculative control. 225 */ 226 u64 virt_spec_ctrl; 227 228 u32 *msrpm; 229 230 ulong nmi_iret_rip; 231 232 struct svm_nested_state nested; 233 234 /* NMI mask value, used when vNMI is not enabled */ 235 bool nmi_masked; 236 237 /* 238 * True when NMIs are still masked but guest IRET was just intercepted 239 * and KVM is waiting for RIP to change, which will signal that the 240 * intercepted IRET was retired and thus NMI can be unmasked. 241 */ 242 bool awaiting_iret_completion; 243 244 /* 245 * Set when KVM is awaiting IRET completion and needs to inject NMIs as 246 * soon as the IRET completes (e.g. NMI is pending injection). KVM 247 * temporarily steals RFLAGS.TF to single-step the guest in this case 248 * in order to regain control as soon as the NMI-blocking condition 249 * goes away. 250 */ 251 bool nmi_singlestep; 252 u64 nmi_singlestep_guest_rflags; 253 254 bool nmi_l1_to_l2; 255 256 unsigned long soft_int_csbase; 257 unsigned long soft_int_old_rip; 258 unsigned long soft_int_next_rip; 259 bool soft_int_injected; 260 261 /* optional nested SVM features that are enabled for this guest */ 262 bool vgif_enabled : 1; 263 bool vnmi_enabled : 1; 264 265 u32 ldr_reg; 266 u32 dfr_reg; 267 struct page *avic_backing_page; 268 u64 *avic_physical_id_cache; 269 270 /* 271 * Per-vcpu list of struct amd_svm_iommu_ir: 272 * This is used mainly to store interrupt remapping information used 273 * when update the vcpu affinity. This avoids the need to scan for 274 * IRTE and try to match ga_tag in the IOMMU driver. 275 */ 276 struct list_head ir_list; 277 spinlock_t ir_list_lock; 278 279 /* Save desired MSR intercept (read: pass-through) state */ 280 struct { 281 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); 282 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); 283 } shadow_msr_intercept; 284 285 struct vcpu_sev_es_state sev_es; 286 287 bool guest_state_loaded; 288 289 bool x2avic_msrs_intercepted; 290 291 /* Guest GIF value, used when vGIF is not enabled */ 292 bool guest_gif; 293 }; 294 295 struct svm_cpu_data { 296 u64 asid_generation; 297 u32 max_asid; 298 u32 next_asid; 299 u32 min_asid; 300 301 struct page *save_area; 302 unsigned long save_area_pa; 303 304 struct vmcb *current_vmcb; 305 306 /* index = sev_asid, value = vmcb pointer */ 307 struct vmcb **sev_vmcbs; 308 }; 309 310 DECLARE_PER_CPU(struct svm_cpu_data, svm_data); 311 312 void recalc_intercepts(struct vcpu_svm *svm); 313 314 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) 315 { 316 return container_of(kvm, struct kvm_svm, kvm); 317 } 318 319 static __always_inline bool sev_guest(struct kvm *kvm) 320 { 321 #ifdef CONFIG_KVM_AMD_SEV 322 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 323 324 return sev->active; 325 #else 326 return false; 327 #endif 328 } 329 330 static __always_inline bool sev_es_guest(struct kvm *kvm) 331 { 332 #ifdef CONFIG_KVM_AMD_SEV 333 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 334 335 return sev->es_active && !WARN_ON_ONCE(!sev->active); 336 #else 337 return false; 338 #endif 339 } 340 341 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) 342 { 343 vmcb->control.clean = 0; 344 } 345 346 static inline void vmcb_mark_all_clean(struct vmcb *vmcb) 347 { 348 vmcb->control.clean = VMCB_ALL_CLEAN_MASK 349 & ~VMCB_ALWAYS_DIRTY_MASK; 350 } 351 352 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) 353 { 354 vmcb->control.clean &= ~(1 << bit); 355 } 356 357 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) 358 { 359 return !test_bit(bit, (unsigned long *)&vmcb->control.clean); 360 } 361 362 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 363 { 364 return container_of(vcpu, struct vcpu_svm, vcpu); 365 } 366 367 /* 368 * Only the PDPTRs are loaded on demand into the shadow MMU. All other 369 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap. 370 * 371 * CR3 might be out of date in the VMCB but it is not marked dirty; instead, 372 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3 373 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB. 374 */ 375 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR) 376 377 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) 378 { 379 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 380 __set_bit(bit, (unsigned long *)&control->intercepts); 381 } 382 383 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) 384 { 385 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 386 __clear_bit(bit, (unsigned long *)&control->intercepts); 387 } 388 389 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) 390 { 391 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 392 return test_bit(bit, (unsigned long *)&control->intercepts); 393 } 394 395 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit) 396 { 397 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 398 return test_bit(bit, (unsigned long *)&control->intercepts); 399 } 400 401 static inline void set_dr_intercepts(struct vcpu_svm *svm) 402 { 403 struct vmcb *vmcb = svm->vmcb01.ptr; 404 405 if (!sev_es_guest(svm->vcpu.kvm)) { 406 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); 407 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); 408 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); 409 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); 410 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); 411 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); 412 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); 413 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); 414 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); 415 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); 416 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); 417 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); 418 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); 419 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); 420 } 421 422 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); 423 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); 424 425 recalc_intercepts(svm); 426 } 427 428 static inline void clr_dr_intercepts(struct vcpu_svm *svm) 429 { 430 struct vmcb *vmcb = svm->vmcb01.ptr; 431 432 vmcb->control.intercepts[INTERCEPT_DR] = 0; 433 434 /* DR7 access must remain intercepted for an SEV-ES guest */ 435 if (sev_es_guest(svm->vcpu.kvm)) { 436 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); 437 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); 438 } 439 440 recalc_intercepts(svm); 441 } 442 443 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) 444 { 445 struct vmcb *vmcb = svm->vmcb01.ptr; 446 447 WARN_ON_ONCE(bit >= 32); 448 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 449 450 recalc_intercepts(svm); 451 } 452 453 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) 454 { 455 struct vmcb *vmcb = svm->vmcb01.ptr; 456 457 WARN_ON_ONCE(bit >= 32); 458 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 459 460 recalc_intercepts(svm); 461 } 462 463 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) 464 { 465 struct vmcb *vmcb = svm->vmcb01.ptr; 466 467 vmcb_set_intercept(&vmcb->control, bit); 468 469 recalc_intercepts(svm); 470 } 471 472 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) 473 { 474 struct vmcb *vmcb = svm->vmcb01.ptr; 475 476 vmcb_clr_intercept(&vmcb->control, bit); 477 478 recalc_intercepts(svm); 479 } 480 481 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) 482 { 483 return vmcb_is_intercept(&svm->vmcb->control, bit); 484 } 485 486 static inline bool nested_vgif_enabled(struct vcpu_svm *svm) 487 { 488 return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK); 489 } 490 491 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm) 492 { 493 if (!vgif) 494 return NULL; 495 496 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm)) 497 return svm->nested.vmcb02.ptr; 498 else 499 return svm->vmcb01.ptr; 500 } 501 502 static inline void enable_gif(struct vcpu_svm *svm) 503 { 504 struct vmcb *vmcb = get_vgif_vmcb(svm); 505 506 if (vmcb) 507 vmcb->control.int_ctl |= V_GIF_MASK; 508 else 509 svm->guest_gif = true; 510 } 511 512 static inline void disable_gif(struct vcpu_svm *svm) 513 { 514 struct vmcb *vmcb = get_vgif_vmcb(svm); 515 516 if (vmcb) 517 vmcb->control.int_ctl &= ~V_GIF_MASK; 518 else 519 svm->guest_gif = false; 520 } 521 522 static inline bool gif_set(struct vcpu_svm *svm) 523 { 524 struct vmcb *vmcb = get_vgif_vmcb(svm); 525 526 if (vmcb) 527 return !!(vmcb->control.int_ctl & V_GIF_MASK); 528 else 529 return svm->guest_gif; 530 } 531 532 static inline bool nested_npt_enabled(struct vcpu_svm *svm) 533 { 534 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; 535 } 536 537 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm) 538 { 539 return svm->vnmi_enabled && 540 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK); 541 } 542 543 static inline bool is_x2apic_msrpm_offset(u32 offset) 544 { 545 /* 4 msrs per u8, and 4 u8 in u32 */ 546 u32 msr = offset * 16; 547 548 return (msr >= APIC_BASE_MSR) && 549 (msr < (APIC_BASE_MSR + 0x100)); 550 } 551 552 static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm) 553 { 554 if (!vnmi) 555 return NULL; 556 557 if (is_guest_mode(&svm->vcpu)) 558 return NULL; 559 else 560 return svm->vmcb01.ptr; 561 } 562 563 static inline bool is_vnmi_enabled(struct vcpu_svm *svm) 564 { 565 struct vmcb *vmcb = get_vnmi_vmcb_l1(svm); 566 567 if (vmcb) 568 return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK); 569 else 570 return false; 571 } 572 573 /* svm.c */ 574 #define MSR_INVALID 0xffffffffU 575 576 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 577 578 extern bool dump_invalid_vmcb; 579 580 u32 svm_msrpm_offset(u32 msr); 581 u32 *svm_vcpu_alloc_msrpm(void); 582 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); 583 void svm_vcpu_free_msrpm(u32 *msrpm); 584 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb); 585 void svm_update_lbrv(struct kvm_vcpu *vcpu); 586 587 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); 588 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 589 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 590 void disable_nmi_singlestep(struct vcpu_svm *svm); 591 bool svm_smi_blocked(struct kvm_vcpu *vcpu); 592 bool svm_nmi_blocked(struct kvm_vcpu *vcpu); 593 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); 594 void svm_set_gif(struct vcpu_svm *svm, bool value); 595 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); 596 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, 597 int read, int write); 598 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable); 599 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, 600 int trig_mode, int vec); 601 602 /* nested.c */ 603 604 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ 605 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ 606 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ 607 608 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) 609 { 610 struct vcpu_svm *svm = to_svm(vcpu); 611 612 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); 613 } 614 615 static inline bool nested_exit_on_smi(struct vcpu_svm *svm) 616 { 617 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); 618 } 619 620 static inline bool nested_exit_on_intr(struct vcpu_svm *svm) 621 { 622 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); 623 } 624 625 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) 626 { 627 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); 628 } 629 630 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, 631 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); 632 void svm_leave_nested(struct kvm_vcpu *vcpu); 633 void svm_free_nested(struct vcpu_svm *svm); 634 int svm_allocate_nested(struct vcpu_svm *svm); 635 int nested_svm_vmrun(struct kvm_vcpu *vcpu); 636 void svm_copy_vmrun_state(struct vmcb_save_area *to_save, 637 struct vmcb_save_area *from_save); 638 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); 639 int nested_svm_vmexit(struct vcpu_svm *svm); 640 641 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) 642 { 643 svm->vmcb->control.exit_code = exit_code; 644 svm->vmcb->control.exit_info_1 = 0; 645 svm->vmcb->control.exit_info_2 = 0; 646 return nested_svm_vmexit(svm); 647 } 648 649 int nested_svm_exit_handled(struct vcpu_svm *svm); 650 int nested_svm_check_permissions(struct kvm_vcpu *vcpu); 651 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 652 bool has_error_code, u32 error_code); 653 int nested_svm_exit_special(struct vcpu_svm *svm); 654 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); 655 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu); 656 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, 657 struct vmcb_control_area *control); 658 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, 659 struct vmcb_save_area *save); 660 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); 661 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); 662 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); 663 664 extern struct kvm_x86_nested_ops svm_nested_ops; 665 666 /* avic.c */ 667 #define AVIC_REQUIRED_APICV_INHIBITS \ 668 ( \ 669 BIT(APICV_INHIBIT_REASON_DISABLE) | \ 670 BIT(APICV_INHIBIT_REASON_ABSENT) | \ 671 BIT(APICV_INHIBIT_REASON_HYPERV) | \ 672 BIT(APICV_INHIBIT_REASON_NESTED) | \ 673 BIT(APICV_INHIBIT_REASON_IRQWIN) | \ 674 BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \ 675 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \ 676 BIT(APICV_INHIBIT_REASON_SEV) | \ 677 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \ 678 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \ 679 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \ 680 BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \ 681 ) 682 683 bool avic_hardware_setup(void); 684 int avic_ga_log_notifier(u32 ga_tag); 685 void avic_vm_destroy(struct kvm *kvm); 686 int avic_vm_init(struct kvm *kvm); 687 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb); 688 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); 689 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); 690 int avic_init_vcpu(struct vcpu_svm *svm); 691 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 692 void avic_vcpu_put(struct kvm_vcpu *vcpu); 693 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu); 694 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); 695 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, 696 uint32_t guest_irq, bool set); 697 void avic_vcpu_blocking(struct kvm_vcpu *vcpu); 698 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu); 699 void avic_ring_doorbell(struct kvm_vcpu *vcpu); 700 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu); 701 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); 702 703 704 /* sev.c */ 705 706 #define GHCB_VERSION_MAX 1ULL 707 #define GHCB_VERSION_MIN 1ULL 708 709 710 extern unsigned int max_sev_asid; 711 712 void sev_vm_destroy(struct kvm *kvm); 713 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp); 714 int sev_mem_enc_register_region(struct kvm *kvm, 715 struct kvm_enc_region *range); 716 int sev_mem_enc_unregister_region(struct kvm *kvm, 717 struct kvm_enc_region *range); 718 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd); 719 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd); 720 void sev_guest_memory_reclaimed(struct kvm *kvm); 721 722 void pre_sev_run(struct vcpu_svm *svm, int cpu); 723 void __init sev_set_cpu_caps(void); 724 void __init sev_hardware_setup(void); 725 void sev_hardware_unsetup(void); 726 int sev_cpu_init(struct svm_cpu_data *sd); 727 void sev_init_vmcb(struct vcpu_svm *svm); 728 void sev_free_vcpu(struct kvm_vcpu *vcpu); 729 int sev_handle_vmgexit(struct kvm_vcpu *vcpu); 730 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); 731 void sev_es_vcpu_reset(struct vcpu_svm *svm); 732 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); 733 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); 734 void sev_es_unmap_ghcb(struct vcpu_svm *svm); 735 736 /* vmenter.S */ 737 738 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); 739 void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); 740 741 #endif 742