1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM support 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * Avi Kivity <avi@qumranet.com> 13 */ 14 15 #ifndef __SVM_SVM_H 16 #define __SVM_SVM_H 17 18 #include <linux/kvm_types.h> 19 #include <linux/kvm_host.h> 20 #include <linux/bits.h> 21 22 #include <asm/svm.h> 23 #include <asm/sev-common.h> 24 25 #include "kvm_cache_regs.h" 26 27 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) 28 29 #define IOPM_SIZE PAGE_SIZE * 3 30 #define MSRPM_SIZE PAGE_SIZE * 2 31 32 #define MAX_DIRECT_ACCESS_MSRS 46 33 #define MSRPM_OFFSETS 32 34 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 35 extern bool npt_enabled; 36 extern int vgif; 37 extern bool intercept_smi; 38 39 enum avic_modes { 40 AVIC_MODE_NONE = 0, 41 AVIC_MODE_X1, 42 AVIC_MODE_X2, 43 }; 44 45 extern enum avic_modes avic_mode; 46 47 /* 48 * Clean bits in VMCB. 49 * VMCB_ALL_CLEAN_MASK might also need to 50 * be updated if this enum is modified. 51 */ 52 enum { 53 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, 54 pause filter count */ 55 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ 56 VMCB_ASID, /* ASID */ 57 VMCB_INTR, /* int_ctl, int_vector */ 58 VMCB_NPT, /* npt_en, nCR3, gPAT */ 59 VMCB_CR, /* CR0, CR3, CR4, EFER */ 60 VMCB_DR, /* DR6, DR7 */ 61 VMCB_DT, /* GDT, IDT */ 62 VMCB_SEG, /* CS, DS, SS, ES, CPL */ 63 VMCB_CR2, /* CR2 only */ 64 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ 65 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, 66 * AVIC PHYSICAL_TABLE pointer, 67 * AVIC LOGICAL_TABLE pointer 68 */ 69 VMCB_SW = 31, /* Reserved for hypervisor/software use */ 70 }; 71 72 #define VMCB_ALL_CLEAN_MASK ( \ 73 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ 74 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ 75 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ 76 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ 77 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ 78 (1U << VMCB_SW)) 79 80 /* TPR and CR2 are always written before VMRUN */ 81 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) 82 83 struct kvm_sev_info { 84 bool active; /* SEV enabled guest */ 85 bool es_active; /* SEV-ES enabled guest */ 86 unsigned int asid; /* ASID used for this guest */ 87 unsigned int handle; /* SEV firmware handle */ 88 int fd; /* SEV device fd */ 89 unsigned long pages_locked; /* Number of pages locked */ 90 struct list_head regions_list; /* List of registered regions */ 91 u64 ap_jump_table; /* SEV-ES AP Jump Table address */ 92 struct kvm *enc_context_owner; /* Owner of copied encryption context */ 93 struct list_head mirror_vms; /* List of VMs mirroring */ 94 struct list_head mirror_entry; /* Use as a list entry of mirrors */ 95 struct misc_cg *misc_cg; /* For misc cgroup accounting */ 96 atomic_t migration_in_progress; 97 }; 98 99 struct kvm_svm { 100 struct kvm kvm; 101 102 /* Struct members for AVIC */ 103 u32 avic_vm_id; 104 struct page *avic_logical_id_table_page; 105 struct page *avic_physical_id_table_page; 106 struct hlist_node hnode; 107 108 struct kvm_sev_info sev_info; 109 }; 110 111 struct kvm_vcpu; 112 113 struct kvm_vmcb_info { 114 struct vmcb *ptr; 115 unsigned long pa; 116 int cpu; 117 uint64_t asid_generation; 118 }; 119 120 struct vmcb_save_area_cached { 121 u64 efer; 122 u64 cr4; 123 u64 cr3; 124 u64 cr0; 125 u64 dr7; 126 u64 dr6; 127 }; 128 129 struct vmcb_ctrl_area_cached { 130 u32 intercepts[MAX_INTERCEPT]; 131 u16 pause_filter_thresh; 132 u16 pause_filter_count; 133 u64 iopm_base_pa; 134 u64 msrpm_base_pa; 135 u64 tsc_offset; 136 u32 asid; 137 u8 tlb_ctl; 138 u32 int_ctl; 139 u32 int_vector; 140 u32 int_state; 141 u32 exit_code; 142 u32 exit_code_hi; 143 u64 exit_info_1; 144 u64 exit_info_2; 145 u32 exit_int_info; 146 u32 exit_int_info_err; 147 u64 nested_ctl; 148 u32 event_inj; 149 u32 event_inj_err; 150 u64 next_rip; 151 u64 nested_cr3; 152 u64 virt_ext; 153 u32 clean; 154 u8 reserved_sw[32]; 155 }; 156 157 struct svm_nested_state { 158 struct kvm_vmcb_info vmcb02; 159 u64 hsave_msr; 160 u64 vm_cr_msr; 161 u64 vmcb12_gpa; 162 u64 last_vmcb12_gpa; 163 164 /* These are the merged vectors */ 165 u32 *msrpm; 166 167 /* A VMRUN has started but has not yet been performed, so 168 * we cannot inject a nested vmexit yet. */ 169 bool nested_run_pending; 170 171 /* cache for control fields of the guest */ 172 struct vmcb_ctrl_area_cached ctl; 173 174 /* 175 * Note: this struct is not kept up-to-date while L2 runs; it is only 176 * valid within nested_svm_vmrun. 177 */ 178 struct vmcb_save_area_cached save; 179 180 bool initialized; 181 182 /* 183 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to 184 * changes in MSR bitmap for L1 or switching to a different L2. Note, 185 * this flag can only be used reliably in conjunction with a paravirt L1 186 * which informs L0 whether any changes to MSR bitmap for L2 were done 187 * on its side. 188 */ 189 bool force_msr_bitmap_recalc; 190 }; 191 192 struct vcpu_sev_es_state { 193 /* SEV-ES support */ 194 struct sev_es_save_area *vmsa; 195 struct ghcb *ghcb; 196 struct kvm_host_map ghcb_map; 197 bool received_first_sipi; 198 199 /* SEV-ES scratch area support */ 200 void *ghcb_sa; 201 u32 ghcb_sa_len; 202 bool ghcb_sa_sync; 203 bool ghcb_sa_free; 204 }; 205 206 struct vcpu_svm { 207 struct kvm_vcpu vcpu; 208 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ 209 struct vmcb *vmcb; 210 struct kvm_vmcb_info vmcb01; 211 struct kvm_vmcb_info *current_vmcb; 212 struct svm_cpu_data *svm_data; 213 u32 asid; 214 u32 sysenter_esp_hi; 215 u32 sysenter_eip_hi; 216 uint64_t tsc_aux; 217 218 u64 msr_decfg; 219 220 u64 next_rip; 221 222 u64 spec_ctrl; 223 224 u64 tsc_ratio_msr; 225 /* 226 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be 227 * translated into the appropriate L2_CFG bits on the host to 228 * perform speculative control. 229 */ 230 u64 virt_spec_ctrl; 231 232 u32 *msrpm; 233 234 ulong nmi_iret_rip; 235 236 struct svm_nested_state nested; 237 238 bool nmi_singlestep; 239 u64 nmi_singlestep_guest_rflags; 240 bool nmi_l1_to_l2; 241 242 unsigned long soft_int_csbase; 243 unsigned long soft_int_old_rip; 244 unsigned long soft_int_next_rip; 245 bool soft_int_injected; 246 247 /* optional nested SVM features that are enabled for this guest */ 248 bool nrips_enabled : 1; 249 bool tsc_scaling_enabled : 1; 250 bool v_vmload_vmsave_enabled : 1; 251 bool lbrv_enabled : 1; 252 bool pause_filter_enabled : 1; 253 bool pause_threshold_enabled : 1; 254 bool vgif_enabled : 1; 255 256 u32 ldr_reg; 257 u32 dfr_reg; 258 struct page *avic_backing_page; 259 u64 *avic_physical_id_cache; 260 261 /* 262 * Per-vcpu list of struct amd_svm_iommu_ir: 263 * This is used mainly to store interrupt remapping information used 264 * when update the vcpu affinity. This avoids the need to scan for 265 * IRTE and try to match ga_tag in the IOMMU driver. 266 */ 267 struct list_head ir_list; 268 spinlock_t ir_list_lock; 269 270 /* Save desired MSR intercept (read: pass-through) state */ 271 struct { 272 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); 273 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); 274 } shadow_msr_intercept; 275 276 struct vcpu_sev_es_state sev_es; 277 278 bool guest_state_loaded; 279 280 bool x2avic_msrs_intercepted; 281 }; 282 283 struct svm_cpu_data { 284 int cpu; 285 286 u64 asid_generation; 287 u32 max_asid; 288 u32 next_asid; 289 u32 min_asid; 290 struct kvm_ldttss_desc *tss_desc; 291 292 struct page *save_area; 293 struct vmcb *current_vmcb; 294 295 /* index = sev_asid, value = vmcb pointer */ 296 struct vmcb **sev_vmcbs; 297 }; 298 299 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data); 300 301 void recalc_intercepts(struct vcpu_svm *svm); 302 303 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) 304 { 305 return container_of(kvm, struct kvm_svm, kvm); 306 } 307 308 static __always_inline bool sev_guest(struct kvm *kvm) 309 { 310 #ifdef CONFIG_KVM_AMD_SEV 311 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 312 313 return sev->active; 314 #else 315 return false; 316 #endif 317 } 318 319 static __always_inline bool sev_es_guest(struct kvm *kvm) 320 { 321 #ifdef CONFIG_KVM_AMD_SEV 322 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 323 324 return sev->es_active && !WARN_ON_ONCE(!sev->active); 325 #else 326 return false; 327 #endif 328 } 329 330 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) 331 { 332 vmcb->control.clean = 0; 333 } 334 335 static inline void vmcb_mark_all_clean(struct vmcb *vmcb) 336 { 337 vmcb->control.clean = VMCB_ALL_CLEAN_MASK 338 & ~VMCB_ALWAYS_DIRTY_MASK; 339 } 340 341 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) 342 { 343 vmcb->control.clean &= ~(1 << bit); 344 } 345 346 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) 347 { 348 return !test_bit(bit, (unsigned long *)&vmcb->control.clean); 349 } 350 351 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 352 { 353 return container_of(vcpu, struct vcpu_svm, vcpu); 354 } 355 356 /* 357 * Only the PDPTRs are loaded on demand into the shadow MMU. All other 358 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap. 359 * 360 * CR3 might be out of date in the VMCB but it is not marked dirty; instead, 361 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3 362 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB. 363 */ 364 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR) 365 366 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) 367 { 368 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 369 __set_bit(bit, (unsigned long *)&control->intercepts); 370 } 371 372 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) 373 { 374 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 375 __clear_bit(bit, (unsigned long *)&control->intercepts); 376 } 377 378 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) 379 { 380 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 381 return test_bit(bit, (unsigned long *)&control->intercepts); 382 } 383 384 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit) 385 { 386 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 387 return test_bit(bit, (unsigned long *)&control->intercepts); 388 } 389 390 static inline void set_dr_intercepts(struct vcpu_svm *svm) 391 { 392 struct vmcb *vmcb = svm->vmcb01.ptr; 393 394 if (!sev_es_guest(svm->vcpu.kvm)) { 395 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); 396 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); 397 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); 398 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); 399 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); 400 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); 401 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); 402 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); 403 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); 404 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); 405 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); 406 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); 407 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); 408 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); 409 } 410 411 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); 412 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); 413 414 recalc_intercepts(svm); 415 } 416 417 static inline void clr_dr_intercepts(struct vcpu_svm *svm) 418 { 419 struct vmcb *vmcb = svm->vmcb01.ptr; 420 421 vmcb->control.intercepts[INTERCEPT_DR] = 0; 422 423 /* DR7 access must remain intercepted for an SEV-ES guest */ 424 if (sev_es_guest(svm->vcpu.kvm)) { 425 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); 426 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); 427 } 428 429 recalc_intercepts(svm); 430 } 431 432 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) 433 { 434 struct vmcb *vmcb = svm->vmcb01.ptr; 435 436 WARN_ON_ONCE(bit >= 32); 437 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 438 439 recalc_intercepts(svm); 440 } 441 442 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) 443 { 444 struct vmcb *vmcb = svm->vmcb01.ptr; 445 446 WARN_ON_ONCE(bit >= 32); 447 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 448 449 recalc_intercepts(svm); 450 } 451 452 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) 453 { 454 struct vmcb *vmcb = svm->vmcb01.ptr; 455 456 vmcb_set_intercept(&vmcb->control, bit); 457 458 recalc_intercepts(svm); 459 } 460 461 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) 462 { 463 struct vmcb *vmcb = svm->vmcb01.ptr; 464 465 vmcb_clr_intercept(&vmcb->control, bit); 466 467 recalc_intercepts(svm); 468 } 469 470 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) 471 { 472 return vmcb_is_intercept(&svm->vmcb->control, bit); 473 } 474 475 static inline bool nested_vgif_enabled(struct vcpu_svm *svm) 476 { 477 return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK); 478 } 479 480 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm) 481 { 482 if (!vgif) 483 return NULL; 484 485 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm)) 486 return svm->nested.vmcb02.ptr; 487 else 488 return svm->vmcb01.ptr; 489 } 490 491 static inline void enable_gif(struct vcpu_svm *svm) 492 { 493 struct vmcb *vmcb = get_vgif_vmcb(svm); 494 495 if (vmcb) 496 vmcb->control.int_ctl |= V_GIF_MASK; 497 else 498 svm->vcpu.arch.hflags |= HF_GIF_MASK; 499 } 500 501 static inline void disable_gif(struct vcpu_svm *svm) 502 { 503 struct vmcb *vmcb = get_vgif_vmcb(svm); 504 505 if (vmcb) 506 vmcb->control.int_ctl &= ~V_GIF_MASK; 507 else 508 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; 509 } 510 511 static inline bool gif_set(struct vcpu_svm *svm) 512 { 513 struct vmcb *vmcb = get_vgif_vmcb(svm); 514 515 if (vmcb) 516 return !!(vmcb->control.int_ctl & V_GIF_MASK); 517 else 518 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); 519 } 520 521 static inline bool nested_npt_enabled(struct vcpu_svm *svm) 522 { 523 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; 524 } 525 526 static inline bool is_x2apic_msrpm_offset(u32 offset) 527 { 528 /* 4 msrs per u8, and 4 u8 in u32 */ 529 u32 msr = offset * 16; 530 531 return (msr >= APIC_BASE_MSR) && 532 (msr < (APIC_BASE_MSR + 0x100)); 533 } 534 535 /* svm.c */ 536 #define MSR_INVALID 0xffffffffU 537 538 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 539 540 extern bool dump_invalid_vmcb; 541 542 u32 svm_msrpm_offset(u32 msr); 543 u32 *svm_vcpu_alloc_msrpm(void); 544 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); 545 void svm_vcpu_free_msrpm(u32 *msrpm); 546 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb); 547 void svm_update_lbrv(struct kvm_vcpu *vcpu); 548 549 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); 550 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 551 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 552 void disable_nmi_singlestep(struct vcpu_svm *svm); 553 bool svm_smi_blocked(struct kvm_vcpu *vcpu); 554 bool svm_nmi_blocked(struct kvm_vcpu *vcpu); 555 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); 556 void svm_set_gif(struct vcpu_svm *svm, bool value); 557 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); 558 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, 559 int read, int write); 560 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable); 561 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, 562 int trig_mode, int vec); 563 564 /* nested.c */ 565 566 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ 567 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ 568 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ 569 570 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) 571 { 572 struct vcpu_svm *svm = to_svm(vcpu); 573 574 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); 575 } 576 577 static inline bool nested_exit_on_smi(struct vcpu_svm *svm) 578 { 579 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); 580 } 581 582 static inline bool nested_exit_on_intr(struct vcpu_svm *svm) 583 { 584 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); 585 } 586 587 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) 588 { 589 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); 590 } 591 592 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, 593 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); 594 void svm_leave_nested(struct kvm_vcpu *vcpu); 595 void svm_free_nested(struct vcpu_svm *svm); 596 int svm_allocate_nested(struct vcpu_svm *svm); 597 int nested_svm_vmrun(struct kvm_vcpu *vcpu); 598 void svm_copy_vmrun_state(struct vmcb_save_area *to_save, 599 struct vmcb_save_area *from_save); 600 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); 601 int nested_svm_vmexit(struct vcpu_svm *svm); 602 603 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) 604 { 605 svm->vmcb->control.exit_code = exit_code; 606 svm->vmcb->control.exit_info_1 = 0; 607 svm->vmcb->control.exit_info_2 = 0; 608 return nested_svm_vmexit(svm); 609 } 610 611 int nested_svm_exit_handled(struct vcpu_svm *svm); 612 int nested_svm_check_permissions(struct kvm_vcpu *vcpu); 613 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 614 bool has_error_code, u32 error_code); 615 int nested_svm_exit_special(struct vcpu_svm *svm); 616 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); 617 void __svm_write_tsc_multiplier(u64 multiplier); 618 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, 619 struct vmcb_control_area *control); 620 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, 621 struct vmcb_save_area *save); 622 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); 623 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); 624 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); 625 626 extern struct kvm_x86_nested_ops svm_nested_ops; 627 628 /* avic.c */ 629 630 bool avic_hardware_setup(struct kvm_x86_ops *ops); 631 int avic_ga_log_notifier(u32 ga_tag); 632 void avic_vm_destroy(struct kvm *kvm); 633 int avic_vm_init(struct kvm *kvm); 634 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb); 635 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); 636 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); 637 int avic_init_vcpu(struct vcpu_svm *svm); 638 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 639 void avic_vcpu_put(struct kvm_vcpu *vcpu); 640 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu); 641 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); 642 bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason); 643 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, 644 uint32_t guest_irq, bool set); 645 void avic_vcpu_blocking(struct kvm_vcpu *vcpu); 646 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu); 647 void avic_ring_doorbell(struct kvm_vcpu *vcpu); 648 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu); 649 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu); 650 651 652 /* sev.c */ 653 654 #define GHCB_VERSION_MAX 1ULL 655 #define GHCB_VERSION_MIN 1ULL 656 657 658 extern unsigned int max_sev_asid; 659 660 void sev_vm_destroy(struct kvm *kvm); 661 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp); 662 int sev_mem_enc_register_region(struct kvm *kvm, 663 struct kvm_enc_region *range); 664 int sev_mem_enc_unregister_region(struct kvm *kvm, 665 struct kvm_enc_region *range); 666 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd); 667 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd); 668 void sev_guest_memory_reclaimed(struct kvm *kvm); 669 670 void pre_sev_run(struct vcpu_svm *svm, int cpu); 671 void __init sev_set_cpu_caps(void); 672 void __init sev_hardware_setup(void); 673 void sev_hardware_unsetup(void); 674 int sev_cpu_init(struct svm_cpu_data *sd); 675 void sev_init_vmcb(struct vcpu_svm *svm); 676 void sev_free_vcpu(struct kvm_vcpu *vcpu); 677 int sev_handle_vmgexit(struct kvm_vcpu *vcpu); 678 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); 679 void sev_es_vcpu_reset(struct vcpu_svm *svm); 680 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); 681 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); 682 void sev_es_unmap_ghcb(struct vcpu_svm *svm); 683 684 /* vmenter.S */ 685 686 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa); 687 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); 688 689 #endif 690