1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM support 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * Avi Kivity <avi@qumranet.com> 13 */ 14 15 #ifndef __SVM_SVM_H 16 #define __SVM_SVM_H 17 18 #include <linux/kvm_types.h> 19 #include <linux/kvm_host.h> 20 #include <linux/bits.h> 21 22 #include <asm/svm.h> 23 #include <asm/sev-common.h> 24 25 #include "cpuid.h" 26 #include "kvm_cache_regs.h" 27 28 /* 29 * Helpers to convert to/from physical addresses for pages whose address is 30 * consumed directly by hardware. Even though it's a physical address, SVM 31 * often restricts the address to the natural width, hence 'unsigned long' 32 * instead of 'hpa_t'. 33 */ 34 static inline unsigned long __sme_page_pa(struct page *page) 35 { 36 return __sme_set(page_to_pfn(page) << PAGE_SHIFT); 37 } 38 39 static inline struct page *__sme_pa_to_page(unsigned long pa) 40 { 41 return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT); 42 } 43 44 #define IOPM_SIZE PAGE_SIZE * 3 45 #define MSRPM_SIZE PAGE_SIZE * 2 46 47 #define MAX_DIRECT_ACCESS_MSRS 48 48 #define MSRPM_OFFSETS 32 49 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 50 extern bool npt_enabled; 51 extern int nrips; 52 extern int vgif; 53 extern bool intercept_smi; 54 extern bool x2avic_enabled; 55 extern bool vnmi; 56 extern int lbrv; 57 58 /* 59 * Clean bits in VMCB. 60 * VMCB_ALL_CLEAN_MASK might also need to 61 * be updated if this enum is modified. 62 */ 63 enum { 64 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, 65 pause filter count */ 66 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ 67 VMCB_ASID, /* ASID */ 68 VMCB_INTR, /* int_ctl, int_vector */ 69 VMCB_NPT, /* npt_en, nCR3, gPAT */ 70 VMCB_CR, /* CR0, CR3, CR4, EFER */ 71 VMCB_DR, /* DR6, DR7 */ 72 VMCB_DT, /* GDT, IDT */ 73 VMCB_SEG, /* CS, DS, SS, ES, CPL */ 74 VMCB_CR2, /* CR2 only */ 75 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ 76 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, 77 * AVIC PHYSICAL_TABLE pointer, 78 * AVIC LOGICAL_TABLE pointer 79 */ 80 VMCB_SW = 31, /* Reserved for hypervisor/software use */ 81 }; 82 83 #define VMCB_ALL_CLEAN_MASK ( \ 84 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ 85 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ 86 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ 87 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ 88 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ 89 (1U << VMCB_SW)) 90 91 /* TPR and CR2 are always written before VMRUN */ 92 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) 93 94 struct kvm_sev_info { 95 bool active; /* SEV enabled guest */ 96 bool es_active; /* SEV-ES enabled guest */ 97 bool need_init; /* waiting for SEV_INIT2 */ 98 unsigned int asid; /* ASID used for this guest */ 99 unsigned int handle; /* SEV firmware handle */ 100 int fd; /* SEV device fd */ 101 unsigned long pages_locked; /* Number of pages locked */ 102 struct list_head regions_list; /* List of registered regions */ 103 u64 ap_jump_table; /* SEV-ES AP Jump Table address */ 104 u64 vmsa_features; 105 u16 ghcb_version; /* Highest guest GHCB protocol version allowed */ 106 struct kvm *enc_context_owner; /* Owner of copied encryption context */ 107 struct list_head mirror_vms; /* List of VMs mirroring */ 108 struct list_head mirror_entry; /* Use as a list entry of mirrors */ 109 struct misc_cg *misc_cg; /* For misc cgroup accounting */ 110 atomic_t migration_in_progress; 111 void *snp_context; /* SNP guest context page */ 112 void *guest_req_buf; /* Bounce buffer for SNP Guest Request input */ 113 void *guest_resp_buf; /* Bounce buffer for SNP Guest Request output */ 114 struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */ 115 }; 116 117 struct kvm_svm { 118 struct kvm kvm; 119 120 /* Struct members for AVIC */ 121 u32 avic_vm_id; 122 struct page *avic_logical_id_table_page; 123 struct page *avic_physical_id_table_page; 124 struct hlist_node hnode; 125 126 struct kvm_sev_info sev_info; 127 }; 128 129 struct kvm_vcpu; 130 131 struct kvm_vmcb_info { 132 struct vmcb *ptr; 133 unsigned long pa; 134 int cpu; 135 uint64_t asid_generation; 136 }; 137 138 struct vmcb_save_area_cached { 139 u64 efer; 140 u64 cr4; 141 u64 cr3; 142 u64 cr0; 143 u64 dr7; 144 u64 dr6; 145 }; 146 147 struct vmcb_ctrl_area_cached { 148 u32 intercepts[MAX_INTERCEPT]; 149 u16 pause_filter_thresh; 150 u16 pause_filter_count; 151 u64 iopm_base_pa; 152 u64 msrpm_base_pa; 153 u64 tsc_offset; 154 u32 asid; 155 u8 tlb_ctl; 156 u32 int_ctl; 157 u32 int_vector; 158 u32 int_state; 159 u32 exit_code; 160 u32 exit_code_hi; 161 u64 exit_info_1; 162 u64 exit_info_2; 163 u32 exit_int_info; 164 u32 exit_int_info_err; 165 u64 nested_ctl; 166 u32 event_inj; 167 u32 event_inj_err; 168 u64 next_rip; 169 u64 nested_cr3; 170 u64 virt_ext; 171 u32 clean; 172 union { 173 #if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV) 174 struct hv_vmcb_enlightenments hv_enlightenments; 175 #endif 176 u8 reserved_sw[32]; 177 }; 178 }; 179 180 struct svm_nested_state { 181 struct kvm_vmcb_info vmcb02; 182 u64 hsave_msr; 183 u64 vm_cr_msr; 184 u64 vmcb12_gpa; 185 u64 last_vmcb12_gpa; 186 187 /* These are the merged vectors */ 188 u32 *msrpm; 189 190 /* A VMRUN has started but has not yet been performed, so 191 * we cannot inject a nested vmexit yet. */ 192 bool nested_run_pending; 193 194 /* cache for control fields of the guest */ 195 struct vmcb_ctrl_area_cached ctl; 196 197 /* 198 * Note: this struct is not kept up-to-date while L2 runs; it is only 199 * valid within nested_svm_vmrun. 200 */ 201 struct vmcb_save_area_cached save; 202 203 bool initialized; 204 205 /* 206 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to 207 * changes in MSR bitmap for L1 or switching to a different L2. Note, 208 * this flag can only be used reliably in conjunction with a paravirt L1 209 * which informs L0 whether any changes to MSR bitmap for L2 were done 210 * on its side. 211 */ 212 bool force_msr_bitmap_recalc; 213 }; 214 215 struct vcpu_sev_es_state { 216 /* SEV-ES support */ 217 struct sev_es_save_area *vmsa; 218 struct ghcb *ghcb; 219 u8 valid_bitmap[16]; 220 struct kvm_host_map ghcb_map; 221 bool received_first_sipi; 222 unsigned int ap_reset_hold_type; 223 224 /* SEV-ES scratch area support */ 225 u64 sw_scratch; 226 void *ghcb_sa; 227 u32 ghcb_sa_len; 228 bool ghcb_sa_sync; 229 bool ghcb_sa_free; 230 231 /* SNP Page-State-Change buffer entries currently being processed */ 232 u16 psc_idx; 233 u16 psc_inflight; 234 bool psc_2m; 235 236 u64 ghcb_registered_gpa; 237 238 struct mutex snp_vmsa_mutex; /* Used to handle concurrent updates of VMSA. */ 239 gpa_t snp_vmsa_gpa; 240 bool snp_ap_waiting_for_reset; 241 bool snp_has_guest_vmsa; 242 }; 243 244 struct vcpu_svm { 245 struct kvm_vcpu vcpu; 246 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ 247 struct vmcb *vmcb; 248 struct kvm_vmcb_info vmcb01; 249 struct kvm_vmcb_info *current_vmcb; 250 u32 asid; 251 u32 sysenter_esp_hi; 252 u32 sysenter_eip_hi; 253 uint64_t tsc_aux; 254 255 u64 msr_decfg; 256 257 u64 next_rip; 258 259 u64 spec_ctrl; 260 261 u64 tsc_ratio_msr; 262 /* 263 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be 264 * translated into the appropriate L2_CFG bits on the host to 265 * perform speculative control. 266 */ 267 u64 virt_spec_ctrl; 268 269 u32 *msrpm; 270 271 ulong nmi_iret_rip; 272 273 struct svm_nested_state nested; 274 275 /* NMI mask value, used when vNMI is not enabled */ 276 bool nmi_masked; 277 278 /* 279 * True when NMIs are still masked but guest IRET was just intercepted 280 * and KVM is waiting for RIP to change, which will signal that the 281 * intercepted IRET was retired and thus NMI can be unmasked. 282 */ 283 bool awaiting_iret_completion; 284 285 /* 286 * Set when KVM is awaiting IRET completion and needs to inject NMIs as 287 * soon as the IRET completes (e.g. NMI is pending injection). KVM 288 * temporarily steals RFLAGS.TF to single-step the guest in this case 289 * in order to regain control as soon as the NMI-blocking condition 290 * goes away. 291 */ 292 bool nmi_singlestep; 293 u64 nmi_singlestep_guest_rflags; 294 295 bool nmi_l1_to_l2; 296 297 unsigned long soft_int_csbase; 298 unsigned long soft_int_old_rip; 299 unsigned long soft_int_next_rip; 300 bool soft_int_injected; 301 302 u32 ldr_reg; 303 u32 dfr_reg; 304 struct page *avic_backing_page; 305 u64 *avic_physical_id_cache; 306 307 /* 308 * Per-vcpu list of struct amd_svm_iommu_ir: 309 * This is used mainly to store interrupt remapping information used 310 * when update the vcpu affinity. This avoids the need to scan for 311 * IRTE and try to match ga_tag in the IOMMU driver. 312 */ 313 struct list_head ir_list; 314 spinlock_t ir_list_lock; 315 316 /* Save desired MSR intercept (read: pass-through) state */ 317 struct { 318 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); 319 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); 320 } shadow_msr_intercept; 321 322 struct vcpu_sev_es_state sev_es; 323 324 bool guest_state_loaded; 325 326 bool x2avic_msrs_intercepted; 327 328 /* Guest GIF value, used when vGIF is not enabled */ 329 bool guest_gif; 330 }; 331 332 struct svm_cpu_data { 333 u64 asid_generation; 334 u32 max_asid; 335 u32 next_asid; 336 u32 min_asid; 337 338 struct vmcb *save_area; 339 unsigned long save_area_pa; 340 341 struct vmcb *current_vmcb; 342 343 /* index = sev_asid, value = vmcb pointer */ 344 struct vmcb **sev_vmcbs; 345 }; 346 347 DECLARE_PER_CPU(struct svm_cpu_data, svm_data); 348 349 void recalc_intercepts(struct vcpu_svm *svm); 350 351 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) 352 { 353 return container_of(kvm, struct kvm_svm, kvm); 354 } 355 356 static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm) 357 { 358 return &to_kvm_svm(kvm)->sev_info; 359 } 360 361 #ifdef CONFIG_KVM_AMD_SEV 362 static __always_inline bool sev_guest(struct kvm *kvm) 363 { 364 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 365 366 return sev->active; 367 } 368 static __always_inline bool sev_es_guest(struct kvm *kvm) 369 { 370 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 371 372 return sev->es_active && !WARN_ON_ONCE(!sev->active); 373 } 374 375 static __always_inline bool sev_snp_guest(struct kvm *kvm) 376 { 377 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 378 379 return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) && 380 !WARN_ON_ONCE(!sev_es_guest(kvm)); 381 } 382 #else 383 #define sev_guest(kvm) false 384 #define sev_es_guest(kvm) false 385 #define sev_snp_guest(kvm) false 386 #endif 387 388 static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val) 389 { 390 return svm->sev_es.ghcb_registered_gpa == val; 391 } 392 393 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) 394 { 395 vmcb->control.clean = 0; 396 } 397 398 static inline void vmcb_mark_all_clean(struct vmcb *vmcb) 399 { 400 vmcb->control.clean = VMCB_ALL_CLEAN_MASK 401 & ~VMCB_ALWAYS_DIRTY_MASK; 402 } 403 404 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) 405 { 406 vmcb->control.clean &= ~(1 << bit); 407 } 408 409 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) 410 { 411 return !test_bit(bit, (unsigned long *)&vmcb->control.clean); 412 } 413 414 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 415 { 416 return container_of(vcpu, struct vcpu_svm, vcpu); 417 } 418 419 /* 420 * Only the PDPTRs are loaded on demand into the shadow MMU. All other 421 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap. 422 * 423 * CR3 might be out of date in the VMCB but it is not marked dirty; instead, 424 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3 425 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB. 426 */ 427 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR) 428 429 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) 430 { 431 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 432 __set_bit(bit, (unsigned long *)&control->intercepts); 433 } 434 435 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) 436 { 437 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 438 __clear_bit(bit, (unsigned long *)&control->intercepts); 439 } 440 441 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) 442 { 443 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 444 return test_bit(bit, (unsigned long *)&control->intercepts); 445 } 446 447 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit) 448 { 449 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 450 return test_bit(bit, (unsigned long *)&control->intercepts); 451 } 452 453 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) 454 { 455 struct vmcb *vmcb = svm->vmcb01.ptr; 456 457 WARN_ON_ONCE(bit >= 32); 458 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 459 460 recalc_intercepts(svm); 461 } 462 463 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) 464 { 465 struct vmcb *vmcb = svm->vmcb01.ptr; 466 467 WARN_ON_ONCE(bit >= 32); 468 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 469 470 recalc_intercepts(svm); 471 } 472 473 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) 474 { 475 struct vmcb *vmcb = svm->vmcb01.ptr; 476 477 vmcb_set_intercept(&vmcb->control, bit); 478 479 recalc_intercepts(svm); 480 } 481 482 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) 483 { 484 struct vmcb *vmcb = svm->vmcb01.ptr; 485 486 vmcb_clr_intercept(&vmcb->control, bit); 487 488 recalc_intercepts(svm); 489 } 490 491 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) 492 { 493 return vmcb_is_intercept(&svm->vmcb->control, bit); 494 } 495 496 static inline bool nested_vgif_enabled(struct vcpu_svm *svm) 497 { 498 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VGIF) && 499 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK); 500 } 501 502 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm) 503 { 504 if (!vgif) 505 return NULL; 506 507 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm)) 508 return svm->nested.vmcb02.ptr; 509 else 510 return svm->vmcb01.ptr; 511 } 512 513 static inline void enable_gif(struct vcpu_svm *svm) 514 { 515 struct vmcb *vmcb = get_vgif_vmcb(svm); 516 517 if (vmcb) 518 vmcb->control.int_ctl |= V_GIF_MASK; 519 else 520 svm->guest_gif = true; 521 } 522 523 static inline void disable_gif(struct vcpu_svm *svm) 524 { 525 struct vmcb *vmcb = get_vgif_vmcb(svm); 526 527 if (vmcb) 528 vmcb->control.int_ctl &= ~V_GIF_MASK; 529 else 530 svm->guest_gif = false; 531 } 532 533 static inline bool gif_set(struct vcpu_svm *svm) 534 { 535 struct vmcb *vmcb = get_vgif_vmcb(svm); 536 537 if (vmcb) 538 return !!(vmcb->control.int_ctl & V_GIF_MASK); 539 else 540 return svm->guest_gif; 541 } 542 543 static inline bool nested_npt_enabled(struct vcpu_svm *svm) 544 { 545 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; 546 } 547 548 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm) 549 { 550 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) && 551 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK); 552 } 553 554 static inline bool is_x2apic_msrpm_offset(u32 offset) 555 { 556 /* 4 msrs per u8, and 4 u8 in u32 */ 557 u32 msr = offset * 16; 558 559 return (msr >= APIC_BASE_MSR) && 560 (msr < (APIC_BASE_MSR + 0x100)); 561 } 562 563 static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm) 564 { 565 if (!vnmi) 566 return NULL; 567 568 if (is_guest_mode(&svm->vcpu)) 569 return NULL; 570 else 571 return svm->vmcb01.ptr; 572 } 573 574 static inline bool is_vnmi_enabled(struct vcpu_svm *svm) 575 { 576 struct vmcb *vmcb = get_vnmi_vmcb_l1(svm); 577 578 if (vmcb) 579 return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK); 580 else 581 return false; 582 } 583 584 /* svm.c */ 585 #define MSR_INVALID 0xffffffffU 586 587 #define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR) 588 589 extern bool dump_invalid_vmcb; 590 591 u32 svm_msrpm_offset(u32 msr); 592 u32 *svm_vcpu_alloc_msrpm(void); 593 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); 594 void svm_vcpu_free_msrpm(u32 *msrpm); 595 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb); 596 void svm_enable_lbrv(struct kvm_vcpu *vcpu); 597 void svm_update_lbrv(struct kvm_vcpu *vcpu); 598 599 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); 600 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 601 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 602 void disable_nmi_singlestep(struct vcpu_svm *svm); 603 bool svm_smi_blocked(struct kvm_vcpu *vcpu); 604 bool svm_nmi_blocked(struct kvm_vcpu *vcpu); 605 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); 606 void svm_set_gif(struct vcpu_svm *svm, bool value); 607 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); 608 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, 609 int read, int write); 610 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable); 611 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, 612 int trig_mode, int vec); 613 614 /* nested.c */ 615 616 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ 617 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ 618 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ 619 620 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) 621 { 622 struct vcpu_svm *svm = to_svm(vcpu); 623 624 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); 625 } 626 627 static inline bool nested_exit_on_smi(struct vcpu_svm *svm) 628 { 629 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); 630 } 631 632 static inline bool nested_exit_on_intr(struct vcpu_svm *svm) 633 { 634 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); 635 } 636 637 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) 638 { 639 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); 640 } 641 642 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, 643 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); 644 void svm_leave_nested(struct kvm_vcpu *vcpu); 645 void svm_free_nested(struct vcpu_svm *svm); 646 int svm_allocate_nested(struct vcpu_svm *svm); 647 int nested_svm_vmrun(struct kvm_vcpu *vcpu); 648 void svm_copy_vmrun_state(struct vmcb_save_area *to_save, 649 struct vmcb_save_area *from_save); 650 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); 651 int nested_svm_vmexit(struct vcpu_svm *svm); 652 653 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) 654 { 655 svm->vmcb->control.exit_code = exit_code; 656 svm->vmcb->control.exit_info_1 = 0; 657 svm->vmcb->control.exit_info_2 = 0; 658 return nested_svm_vmexit(svm); 659 } 660 661 int nested_svm_exit_handled(struct vcpu_svm *svm); 662 int nested_svm_check_permissions(struct kvm_vcpu *vcpu); 663 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 664 bool has_error_code, u32 error_code); 665 int nested_svm_exit_special(struct vcpu_svm *svm); 666 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); 667 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu); 668 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, 669 struct vmcb_control_area *control); 670 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, 671 struct vmcb_save_area *save); 672 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); 673 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); 674 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); 675 676 extern struct kvm_x86_nested_ops svm_nested_ops; 677 678 /* avic.c */ 679 #define AVIC_REQUIRED_APICV_INHIBITS \ 680 ( \ 681 BIT(APICV_INHIBIT_REASON_DISABLED) | \ 682 BIT(APICV_INHIBIT_REASON_ABSENT) | \ 683 BIT(APICV_INHIBIT_REASON_HYPERV) | \ 684 BIT(APICV_INHIBIT_REASON_NESTED) | \ 685 BIT(APICV_INHIBIT_REASON_IRQWIN) | \ 686 BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \ 687 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \ 688 BIT(APICV_INHIBIT_REASON_SEV) | \ 689 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \ 690 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \ 691 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \ 692 BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \ 693 ) 694 695 bool avic_hardware_setup(void); 696 int avic_ga_log_notifier(u32 ga_tag); 697 void avic_vm_destroy(struct kvm *kvm); 698 int avic_vm_init(struct kvm *kvm); 699 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb); 700 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); 701 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); 702 int avic_init_vcpu(struct vcpu_svm *svm); 703 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 704 void avic_vcpu_put(struct kvm_vcpu *vcpu); 705 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu); 706 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); 707 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, 708 uint32_t guest_irq, bool set); 709 void avic_vcpu_blocking(struct kvm_vcpu *vcpu); 710 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu); 711 void avic_ring_doorbell(struct kvm_vcpu *vcpu); 712 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu); 713 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); 714 715 716 /* sev.c */ 717 718 void pre_sev_run(struct vcpu_svm *svm, int cpu); 719 void sev_init_vmcb(struct vcpu_svm *svm); 720 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm); 721 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); 722 void sev_es_vcpu_reset(struct vcpu_svm *svm); 723 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); 724 void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa); 725 void sev_es_unmap_ghcb(struct vcpu_svm *svm); 726 727 #ifdef CONFIG_KVM_AMD_SEV 728 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp); 729 int sev_mem_enc_register_region(struct kvm *kvm, 730 struct kvm_enc_region *range); 731 int sev_mem_enc_unregister_region(struct kvm *kvm, 732 struct kvm_enc_region *range); 733 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd); 734 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd); 735 void sev_guest_memory_reclaimed(struct kvm *kvm); 736 int sev_handle_vmgexit(struct kvm_vcpu *vcpu); 737 738 /* These symbols are used in common code and are stubbed below. */ 739 740 struct page *snp_safe_alloc_page_node(int node, gfp_t gfp); 741 static inline struct page *snp_safe_alloc_page(void) 742 { 743 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT); 744 } 745 746 void sev_free_vcpu(struct kvm_vcpu *vcpu); 747 void sev_vm_destroy(struct kvm *kvm); 748 void __init sev_set_cpu_caps(void); 749 void __init sev_hardware_setup(void); 750 void sev_hardware_unsetup(void); 751 int sev_cpu_init(struct svm_cpu_data *sd); 752 int sev_dev_get_attr(u32 group, u64 attr, u64 *val); 753 extern unsigned int max_sev_asid; 754 void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code); 755 void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu); 756 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); 757 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); 758 int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); 759 #else 760 static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp) 761 { 762 return alloc_pages_node(node, gfp | __GFP_ZERO, 0); 763 } 764 765 static inline struct page *snp_safe_alloc_page(void) 766 { 767 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT); 768 } 769 770 static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {} 771 static inline void sev_vm_destroy(struct kvm *kvm) {} 772 static inline void __init sev_set_cpu_caps(void) {} 773 static inline void __init sev_hardware_setup(void) {} 774 static inline void sev_hardware_unsetup(void) {} 775 static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; } 776 static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; } 777 #define max_sev_asid 0 778 static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {} 779 static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {} 780 static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order) 781 { 782 return 0; 783 } 784 static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {} 785 static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) 786 { 787 return 0; 788 } 789 790 #endif 791 792 /* vmenter.S */ 793 794 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted, 795 struct sev_es_save_area *hostsa); 796 void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); 797 798 #define DEFINE_KVM_GHCB_ACCESSORS(field) \ 799 static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \ 800 { \ 801 return test_bit(GHCB_BITMAP_IDX(field), \ 802 (unsigned long *)&svm->sev_es.valid_bitmap); \ 803 } \ 804 \ 805 static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \ 806 { \ 807 return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \ 808 } \ 809 810 DEFINE_KVM_GHCB_ACCESSORS(cpl) 811 DEFINE_KVM_GHCB_ACCESSORS(rax) 812 DEFINE_KVM_GHCB_ACCESSORS(rcx) 813 DEFINE_KVM_GHCB_ACCESSORS(rdx) 814 DEFINE_KVM_GHCB_ACCESSORS(rbx) 815 DEFINE_KVM_GHCB_ACCESSORS(rsi) 816 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code) 817 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1) 818 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2) 819 DEFINE_KVM_GHCB_ACCESSORS(sw_scratch) 820 DEFINE_KVM_GHCB_ACCESSORS(xcr0) 821 822 #endif 823