1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This header defines architecture specific interfaces, x86 version 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 */ 10 11 #ifndef _ASM_X86_KVM_HOST_H 12 #define _ASM_X86_KVM_HOST_H 13 14 #include <linux/types.h> 15 #include <linux/mm.h> 16 #include <linux/mmu_notifier.h> 17 #include <linux/tracepoint.h> 18 #include <linux/cpumask.h> 19 #include <linux/irq_work.h> 20 21 #include <linux/kvm.h> 22 #include <linux/kvm_para.h> 23 #include <linux/kvm_types.h> 24 #include <linux/perf_event.h> 25 #include <linux/pvclock_gtod.h> 26 #include <linux/clocksource.h> 27 #include <linux/irqbypass.h> 28 #include <linux/hyperv.h> 29 30 #include <asm/apic.h> 31 #include <asm/pvclock-abi.h> 32 #include <asm/desc.h> 33 #include <asm/mtrr.h> 34 #include <asm/msr-index.h> 35 #include <asm/asm.h> 36 #include <asm/kvm_page_track.h> 37 38 #define KVM_MAX_VCPUS 288 39 #define KVM_SOFT_MAX_VCPUS 240 40 #define KVM_MAX_VCPU_ID 1023 41 #define KVM_USER_MEM_SLOTS 509 42 /* memory slots that are not exposed to userspace */ 43 #define KVM_PRIVATE_MEM_SLOTS 3 44 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 45 46 #define KVM_PIO_PAGE_OFFSET 1 47 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 48 #define KVM_HALT_POLL_NS_DEFAULT 400000 49 50 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 51 52 /* x86-specific vcpu->requests bit members */ 53 #define KVM_REQ_MIGRATE_TIMER 8 54 #define KVM_REQ_REPORT_TPR_ACCESS 9 55 #define KVM_REQ_TRIPLE_FAULT 10 56 #define KVM_REQ_MMU_SYNC 11 57 #define KVM_REQ_CLOCK_UPDATE 12 58 #define KVM_REQ_DEACTIVATE_FPU 13 59 #define KVM_REQ_EVENT 14 60 #define KVM_REQ_APF_HALT 15 61 #define KVM_REQ_STEAL_UPDATE 16 62 #define KVM_REQ_NMI 17 63 #define KVM_REQ_PMU 18 64 #define KVM_REQ_PMI 19 65 #define KVM_REQ_SMI 20 66 #define KVM_REQ_MASTERCLOCK_UPDATE 21 67 #define KVM_REQ_MCLOCK_INPROGRESS 22 68 #define KVM_REQ_SCAN_IOAPIC 23 69 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 24 70 #define KVM_REQ_APIC_PAGE_RELOAD 25 71 #define KVM_REQ_HV_CRASH 26 72 #define KVM_REQ_IOAPIC_EOI_EXIT 27 73 #define KVM_REQ_HV_RESET 28 74 #define KVM_REQ_HV_EXIT 29 75 #define KVM_REQ_HV_STIMER 30 76 77 #define CR0_RESERVED_BITS \ 78 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ 79 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ 80 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) 81 82 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL 83 #define CR3_PCID_INVD BIT_64(63) 84 #define CR4_RESERVED_BITS \ 85 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ 86 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 87 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ 88 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ 89 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \ 90 | X86_CR4_PKE)) 91 92 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 93 94 95 96 #define INVALID_PAGE (~(hpa_t)0) 97 #define VALID_PAGE(x) ((x) != INVALID_PAGE) 98 99 #define UNMAPPED_GVA (~(gpa_t)0) 100 101 /* KVM Hugepage definitions for x86 */ 102 #define KVM_NR_PAGE_SIZES 3 103 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) 104 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) 105 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) 106 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 107 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 108 109 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) 110 { 111 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ 112 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - 113 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); 114 } 115 116 #define KVM_PERMILLE_MMU_PAGES 20 117 #define KVM_MIN_ALLOC_MMU_PAGES 64 118 #define KVM_MMU_HASH_SHIFT 10 119 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 120 #define KVM_MIN_FREE_MMU_PAGES 5 121 #define KVM_REFILL_PAGES 25 122 #define KVM_MAX_CPUID_ENTRIES 80 123 #define KVM_NR_FIXED_MTRR_REGION 88 124 #define KVM_NR_VAR_MTRR 8 125 126 #define ASYNC_PF_PER_VCPU 64 127 128 enum kvm_reg { 129 VCPU_REGS_RAX = 0, 130 VCPU_REGS_RCX = 1, 131 VCPU_REGS_RDX = 2, 132 VCPU_REGS_RBX = 3, 133 VCPU_REGS_RSP = 4, 134 VCPU_REGS_RBP = 5, 135 VCPU_REGS_RSI = 6, 136 VCPU_REGS_RDI = 7, 137 #ifdef CONFIG_X86_64 138 VCPU_REGS_R8 = 8, 139 VCPU_REGS_R9 = 9, 140 VCPU_REGS_R10 = 10, 141 VCPU_REGS_R11 = 11, 142 VCPU_REGS_R12 = 12, 143 VCPU_REGS_R13 = 13, 144 VCPU_REGS_R14 = 14, 145 VCPU_REGS_R15 = 15, 146 #endif 147 VCPU_REGS_RIP, 148 NR_VCPU_REGS 149 }; 150 151 enum kvm_reg_ex { 152 VCPU_EXREG_PDPTR = NR_VCPU_REGS, 153 VCPU_EXREG_CR3, 154 VCPU_EXREG_RFLAGS, 155 VCPU_EXREG_SEGMENTS, 156 }; 157 158 enum { 159 VCPU_SREG_ES, 160 VCPU_SREG_CS, 161 VCPU_SREG_SS, 162 VCPU_SREG_DS, 163 VCPU_SREG_FS, 164 VCPU_SREG_GS, 165 VCPU_SREG_TR, 166 VCPU_SREG_LDTR, 167 }; 168 169 #include <asm/kvm_emulate.h> 170 171 #define KVM_NR_MEM_OBJS 40 172 173 #define KVM_NR_DB_REGS 4 174 175 #define DR6_BD (1 << 13) 176 #define DR6_BS (1 << 14) 177 #define DR6_RTM (1 << 16) 178 #define DR6_FIXED_1 0xfffe0ff0 179 #define DR6_INIT 0xffff0ff0 180 #define DR6_VOLATILE 0x0001e00f 181 182 #define DR7_BP_EN_MASK 0x000000ff 183 #define DR7_GE (1 << 9) 184 #define DR7_GD (1 << 13) 185 #define DR7_FIXED_1 0x00000400 186 #define DR7_VOLATILE 0xffff2bff 187 188 #define PFERR_PRESENT_BIT 0 189 #define PFERR_WRITE_BIT 1 190 #define PFERR_USER_BIT 2 191 #define PFERR_RSVD_BIT 3 192 #define PFERR_FETCH_BIT 4 193 #define PFERR_PK_BIT 5 194 #define PFERR_GUEST_FINAL_BIT 32 195 #define PFERR_GUEST_PAGE_BIT 33 196 197 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) 198 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) 199 #define PFERR_USER_MASK (1U << PFERR_USER_BIT) 200 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) 201 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) 202 #define PFERR_PK_MASK (1U << PFERR_PK_BIT) 203 #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT) 204 #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) 205 206 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ 207 PFERR_USER_MASK | \ 208 PFERR_WRITE_MASK | \ 209 PFERR_PRESENT_MASK) 210 211 /* apic attention bits */ 212 #define KVM_APIC_CHECK_VAPIC 0 213 /* 214 * The following bit is set with PV-EOI, unset on EOI. 215 * We detect PV-EOI changes by guest by comparing 216 * this bit with PV-EOI in guest memory. 217 * See the implementation in apic_update_pv_eoi. 218 */ 219 #define KVM_APIC_PV_EOI_PENDING 1 220 221 struct kvm_kernel_irq_routing_entry; 222 223 /* 224 * We don't want allocation failures within the mmu code, so we preallocate 225 * enough memory for a single page fault in a cache. 226 */ 227 struct kvm_mmu_memory_cache { 228 int nobjs; 229 void *objects[KVM_NR_MEM_OBJS]; 230 }; 231 232 /* 233 * the pages used as guest page table on soft mmu are tracked by 234 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used 235 * by indirect shadow page can not be more than 15 bits. 236 * 237 * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, 238 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. 239 */ 240 union kvm_mmu_page_role { 241 unsigned word; 242 struct { 243 unsigned level:4; 244 unsigned cr4_pae:1; 245 unsigned quadrant:2; 246 unsigned direct:1; 247 unsigned access:3; 248 unsigned invalid:1; 249 unsigned nxe:1; 250 unsigned cr0_wp:1; 251 unsigned smep_andnot_wp:1; 252 unsigned smap_andnot_wp:1; 253 unsigned :8; 254 255 /* 256 * This is left at the top of the word so that 257 * kvm_memslots_for_spte_role can extract it with a 258 * simple shift. While there is room, give it a whole 259 * byte so it is also faster to load it from memory. 260 */ 261 unsigned smm:8; 262 }; 263 }; 264 265 struct kvm_rmap_head { 266 unsigned long val; 267 }; 268 269 struct kvm_mmu_page { 270 struct list_head link; 271 struct hlist_node hash_link; 272 273 /* 274 * The following two entries are used to key the shadow page in the 275 * hash table. 276 */ 277 gfn_t gfn; 278 union kvm_mmu_page_role role; 279 280 u64 *spt; 281 /* hold the gfn of each spte inside spt */ 282 gfn_t *gfns; 283 bool unsync; 284 int root_count; /* Currently serving as active root */ 285 unsigned int unsync_children; 286 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 287 288 /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ 289 unsigned long mmu_valid_gen; 290 291 DECLARE_BITMAP(unsync_child_bitmap, 512); 292 293 #ifdef CONFIG_X86_32 294 /* 295 * Used out of the mmu-lock to avoid reading spte values while an 296 * update is in progress; see the comments in __get_spte_lockless(). 297 */ 298 int clear_spte_count; 299 #endif 300 301 /* Number of writes since the last time traversal visited this page. */ 302 atomic_t write_flooding_count; 303 }; 304 305 struct kvm_pio_request { 306 unsigned long count; 307 int in; 308 int port; 309 int size; 310 }; 311 312 struct rsvd_bits_validate { 313 u64 rsvd_bits_mask[2][4]; 314 u64 bad_mt_xwr; 315 }; 316 317 /* 318 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level 319 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu 320 * mode. 321 */ 322 struct kvm_mmu { 323 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 324 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 325 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); 326 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, 327 bool prefault); 328 void (*inject_page_fault)(struct kvm_vcpu *vcpu, 329 struct x86_exception *fault); 330 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 331 struct x86_exception *exception); 332 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 333 struct x86_exception *exception); 334 int (*sync_page)(struct kvm_vcpu *vcpu, 335 struct kvm_mmu_page *sp); 336 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 337 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 338 u64 *spte, const void *pte); 339 hpa_t root_hpa; 340 int root_level; 341 int shadow_root_level; 342 union kvm_mmu_page_role base_role; 343 bool direct_map; 344 345 /* 346 * Bitmap; bit set = permission fault 347 * Byte index: page fault error code [4:1] 348 * Bit index: pte permissions in ACC_* format 349 */ 350 u8 permissions[16]; 351 352 /* 353 * The pkru_mask indicates if protection key checks are needed. It 354 * consists of 16 domains indexed by page fault error code bits [4:1], 355 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables. 356 * Each domain has 2 bits which are ANDed with AD and WD from PKRU. 357 */ 358 u32 pkru_mask; 359 360 u64 *pae_root; 361 u64 *lm_root; 362 363 /* 364 * check zero bits on shadow page table entries, these 365 * bits include not only hardware reserved bits but also 366 * the bits spte never used. 367 */ 368 struct rsvd_bits_validate shadow_zero_check; 369 370 struct rsvd_bits_validate guest_rsvd_check; 371 372 /* Can have large pages at levels 2..last_nonleaf_level-1. */ 373 u8 last_nonleaf_level; 374 375 bool nx; 376 377 u64 pdptrs[4]; /* pae */ 378 }; 379 380 enum pmc_type { 381 KVM_PMC_GP = 0, 382 KVM_PMC_FIXED, 383 }; 384 385 struct kvm_pmc { 386 enum pmc_type type; 387 u8 idx; 388 u64 counter; 389 u64 eventsel; 390 struct perf_event *perf_event; 391 struct kvm_vcpu *vcpu; 392 }; 393 394 struct kvm_pmu { 395 unsigned nr_arch_gp_counters; 396 unsigned nr_arch_fixed_counters; 397 unsigned available_event_types; 398 u64 fixed_ctr_ctrl; 399 u64 global_ctrl; 400 u64 global_status; 401 u64 global_ovf_ctrl; 402 u64 counter_bitmask[2]; 403 u64 global_ctrl_mask; 404 u64 reserved_bits; 405 u8 version; 406 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; 407 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; 408 struct irq_work irq_work; 409 u64 reprogram_pmi; 410 }; 411 412 struct kvm_pmu_ops; 413 414 enum { 415 KVM_DEBUGREG_BP_ENABLED = 1, 416 KVM_DEBUGREG_WONT_EXIT = 2, 417 KVM_DEBUGREG_RELOAD = 4, 418 }; 419 420 struct kvm_mtrr_range { 421 u64 base; 422 u64 mask; 423 struct list_head node; 424 }; 425 426 struct kvm_mtrr { 427 struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR]; 428 mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION]; 429 u64 deftype; 430 431 struct list_head head; 432 }; 433 434 /* Hyper-V SynIC timer */ 435 struct kvm_vcpu_hv_stimer { 436 struct hrtimer timer; 437 int index; 438 u64 config; 439 u64 count; 440 u64 exp_time; 441 struct hv_message msg; 442 bool msg_pending; 443 }; 444 445 /* Hyper-V synthetic interrupt controller (SynIC)*/ 446 struct kvm_vcpu_hv_synic { 447 u64 version; 448 u64 control; 449 u64 msg_page; 450 u64 evt_page; 451 atomic64_t sint[HV_SYNIC_SINT_COUNT]; 452 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT]; 453 DECLARE_BITMAP(auto_eoi_bitmap, 256); 454 DECLARE_BITMAP(vec_bitmap, 256); 455 bool active; 456 }; 457 458 /* Hyper-V per vcpu emulation context */ 459 struct kvm_vcpu_hv { 460 u64 hv_vapic; 461 s64 runtime_offset; 462 struct kvm_vcpu_hv_synic synic; 463 struct kvm_hyperv_exit exit; 464 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; 465 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); 466 }; 467 468 struct kvm_vcpu_arch { 469 /* 470 * rip and regs accesses must go through 471 * kvm_{register,rip}_{read,write} functions. 472 */ 473 unsigned long regs[NR_VCPU_REGS]; 474 u32 regs_avail; 475 u32 regs_dirty; 476 477 unsigned long cr0; 478 unsigned long cr0_guest_owned_bits; 479 unsigned long cr2; 480 unsigned long cr3; 481 unsigned long cr4; 482 unsigned long cr4_guest_owned_bits; 483 unsigned long cr8; 484 u32 hflags; 485 u64 efer; 486 u64 apic_base; 487 struct kvm_lapic *apic; /* kernel irqchip context */ 488 bool apicv_active; 489 DECLARE_BITMAP(ioapic_handled_vectors, 256); 490 unsigned long apic_attention; 491 int32_t apic_arb_prio; 492 int mp_state; 493 u64 ia32_misc_enable_msr; 494 u64 smbase; 495 bool tpr_access_reporting; 496 u64 ia32_xss; 497 498 /* 499 * Paging state of the vcpu 500 * 501 * If the vcpu runs in guest mode with two level paging this still saves 502 * the paging mode of the l1 guest. This context is always used to 503 * handle faults. 504 */ 505 struct kvm_mmu mmu; 506 507 /* 508 * Paging state of an L2 guest (used for nested npt) 509 * 510 * This context will save all necessary information to walk page tables 511 * of the an L2 guest. This context is only initialized for page table 512 * walking and not for faulting since we never handle l2 page faults on 513 * the host. 514 */ 515 struct kvm_mmu nested_mmu; 516 517 /* 518 * Pointer to the mmu context currently used for 519 * gva_to_gpa translations. 520 */ 521 struct kvm_mmu *walk_mmu; 522 523 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; 524 struct kvm_mmu_memory_cache mmu_page_cache; 525 struct kvm_mmu_memory_cache mmu_page_header_cache; 526 527 struct fpu guest_fpu; 528 u64 xcr0; 529 u64 guest_supported_xcr0; 530 u32 guest_xstate_size; 531 532 struct kvm_pio_request pio; 533 void *pio_data; 534 535 u8 event_exit_inst_len; 536 537 struct kvm_queued_exception { 538 bool pending; 539 bool has_error_code; 540 bool reinject; 541 u8 nr; 542 u32 error_code; 543 } exception; 544 545 struct kvm_queued_interrupt { 546 bool pending; 547 bool soft; 548 u8 nr; 549 } interrupt; 550 551 int halt_request; /* real mode on Intel only */ 552 553 int cpuid_nent; 554 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 555 556 int maxphyaddr; 557 558 /* emulate context */ 559 560 struct x86_emulate_ctxt emulate_ctxt; 561 bool emulate_regs_need_sync_to_vcpu; 562 bool emulate_regs_need_sync_from_vcpu; 563 int (*complete_userspace_io)(struct kvm_vcpu *vcpu); 564 565 gpa_t time; 566 struct pvclock_vcpu_time_info hv_clock; 567 unsigned int hw_tsc_khz; 568 struct gfn_to_hva_cache pv_time; 569 bool pv_time_enabled; 570 /* set guest stopped flag in pvclock flags field */ 571 bool pvclock_set_guest_stopped_request; 572 573 struct { 574 u64 msr_val; 575 u64 last_steal; 576 struct gfn_to_hva_cache stime; 577 struct kvm_steal_time steal; 578 } st; 579 580 u64 tsc_offset; 581 u64 last_guest_tsc; 582 u64 last_host_tsc; 583 u64 tsc_offset_adjustment; 584 u64 this_tsc_nsec; 585 u64 this_tsc_write; 586 u64 this_tsc_generation; 587 bool tsc_catchup; 588 bool tsc_always_catchup; 589 s8 virtual_tsc_shift; 590 u32 virtual_tsc_mult; 591 u32 virtual_tsc_khz; 592 s64 ia32_tsc_adjust_msr; 593 u64 tsc_scaling_ratio; 594 595 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ 596 unsigned nmi_pending; /* NMI queued after currently running handler */ 597 bool nmi_injected; /* Trying to inject an NMI this entry */ 598 bool smi_pending; /* SMI queued after currently running handler */ 599 600 struct kvm_mtrr mtrr_state; 601 u64 pat; 602 603 unsigned switch_db_regs; 604 unsigned long db[KVM_NR_DB_REGS]; 605 unsigned long dr6; 606 unsigned long dr7; 607 unsigned long eff_db[KVM_NR_DB_REGS]; 608 unsigned long guest_debug_dr7; 609 610 u64 mcg_cap; 611 u64 mcg_status; 612 u64 mcg_ctl; 613 u64 mcg_ext_ctl; 614 u64 *mce_banks; 615 616 /* Cache MMIO info */ 617 u64 mmio_gva; 618 unsigned access; 619 gfn_t mmio_gfn; 620 u64 mmio_gen; 621 622 struct kvm_pmu pmu; 623 624 /* used for guest single stepping over the given code position */ 625 unsigned long singlestep_rip; 626 627 struct kvm_vcpu_hv hyperv; 628 629 cpumask_var_t wbinvd_dirty_mask; 630 631 unsigned long last_retry_eip; 632 unsigned long last_retry_addr; 633 634 struct { 635 bool halted; 636 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; 637 struct gfn_to_hva_cache data; 638 u64 msr_val; 639 u32 id; 640 bool send_user_only; 641 } apf; 642 643 /* OSVW MSRs (AMD only) */ 644 struct { 645 u64 length; 646 u64 status; 647 } osvw; 648 649 struct { 650 u64 msr_val; 651 struct gfn_to_hva_cache data; 652 } pv_eoi; 653 654 /* 655 * Indicate whether the access faults on its page table in guest 656 * which is set when fix page fault and used to detect unhandeable 657 * instruction. 658 */ 659 bool write_fault_to_shadow_pgtable; 660 661 /* set at EPT violation at this point */ 662 unsigned long exit_qualification; 663 664 /* pv related host specific info */ 665 struct { 666 bool pv_unhalted; 667 } pv; 668 669 int pending_ioapic_eoi; 670 int pending_external_vector; 671 }; 672 673 struct kvm_lpage_info { 674 int disallow_lpage; 675 }; 676 677 struct kvm_arch_memory_slot { 678 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES]; 679 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; 680 unsigned short *gfn_track[KVM_PAGE_TRACK_MAX]; 681 }; 682 683 /* 684 * We use as the mode the number of bits allocated in the LDR for the 685 * logical processor ID. It happens that these are all powers of two. 686 * This makes it is very easy to detect cases where the APICs are 687 * configured for multiple modes; in that case, we cannot use the map and 688 * hence cannot use kvm_irq_delivery_to_apic_fast either. 689 */ 690 #define KVM_APIC_MODE_XAPIC_CLUSTER 4 691 #define KVM_APIC_MODE_XAPIC_FLAT 8 692 #define KVM_APIC_MODE_X2APIC 16 693 694 struct kvm_apic_map { 695 struct rcu_head rcu; 696 u8 mode; 697 u32 max_apic_id; 698 union { 699 struct kvm_lapic *xapic_flat_map[8]; 700 struct kvm_lapic *xapic_cluster_map[16][4]; 701 }; 702 struct kvm_lapic *phys_map[]; 703 }; 704 705 /* Hyper-V emulation context */ 706 struct kvm_hv { 707 struct mutex hv_lock; 708 u64 hv_guest_os_id; 709 u64 hv_hypercall; 710 u64 hv_tsc_page; 711 712 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ 713 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; 714 u64 hv_crash_ctl; 715 716 HV_REFERENCE_TSC_PAGE tsc_ref; 717 }; 718 719 struct kvm_arch { 720 unsigned int n_used_mmu_pages; 721 unsigned int n_requested_mmu_pages; 722 unsigned int n_max_mmu_pages; 723 unsigned int indirect_shadow_pages; 724 unsigned long mmu_valid_gen; 725 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 726 /* 727 * Hash table of struct kvm_mmu_page. 728 */ 729 struct list_head active_mmu_pages; 730 struct list_head zapped_obsolete_pages; 731 struct kvm_page_track_notifier_node mmu_sp_tracker; 732 struct kvm_page_track_notifier_head track_notifier_head; 733 734 struct list_head assigned_dev_head; 735 struct iommu_domain *iommu_domain; 736 bool iommu_noncoherent; 737 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA 738 atomic_t noncoherent_dma_count; 739 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE 740 atomic_t assigned_device_count; 741 struct kvm_pic *vpic; 742 struct kvm_ioapic *vioapic; 743 struct kvm_pit *vpit; 744 atomic_t vapics_in_nmi_mode; 745 struct mutex apic_map_lock; 746 struct kvm_apic_map *apic_map; 747 748 unsigned int tss_addr; 749 bool apic_access_page_done; 750 751 gpa_t wall_clock; 752 753 bool ept_identity_pagetable_done; 754 gpa_t ept_identity_map_addr; 755 756 unsigned long irq_sources_bitmap; 757 s64 kvmclock_offset; 758 raw_spinlock_t tsc_write_lock; 759 u64 last_tsc_nsec; 760 u64 last_tsc_write; 761 u32 last_tsc_khz; 762 u64 cur_tsc_nsec; 763 u64 cur_tsc_write; 764 u64 cur_tsc_offset; 765 u64 cur_tsc_generation; 766 int nr_vcpus_matched_tsc; 767 768 spinlock_t pvclock_gtod_sync_lock; 769 bool use_master_clock; 770 u64 master_kernel_ns; 771 u64 master_cycle_now; 772 struct delayed_work kvmclock_update_work; 773 struct delayed_work kvmclock_sync_work; 774 775 struct kvm_xen_hvm_config xen_hvm_config; 776 777 /* reads protected by irq_srcu, writes by irq_lock */ 778 struct hlist_head mask_notifier_list; 779 780 struct kvm_hv hyperv; 781 782 #ifdef CONFIG_KVM_MMU_AUDIT 783 int audit_point; 784 #endif 785 786 bool boot_vcpu_runs_old_kvmclock; 787 u32 bsp_vcpu_id; 788 789 u64 disabled_quirks; 790 791 bool irqchip_split; 792 u8 nr_reserved_ioapic_pins; 793 794 bool disabled_lapic_found; 795 796 /* Struct members for AVIC */ 797 u32 avic_vm_id; 798 u32 ldr_mode; 799 struct page *avic_logical_id_table_page; 800 struct page *avic_physical_id_table_page; 801 struct hlist_node hnode; 802 803 bool x2apic_format; 804 bool x2apic_broadcast_quirk_disabled; 805 }; 806 807 struct kvm_vm_stat { 808 ulong mmu_shadow_zapped; 809 ulong mmu_pte_write; 810 ulong mmu_pte_updated; 811 ulong mmu_pde_zapped; 812 ulong mmu_flooded; 813 ulong mmu_recycled; 814 ulong mmu_cache_miss; 815 ulong mmu_unsync; 816 ulong remote_tlb_flush; 817 ulong lpages; 818 }; 819 820 struct kvm_vcpu_stat { 821 u64 pf_fixed; 822 u64 pf_guest; 823 u64 tlb_flush; 824 u64 invlpg; 825 826 u64 exits; 827 u64 io_exits; 828 u64 mmio_exits; 829 u64 signal_exits; 830 u64 irq_window_exits; 831 u64 nmi_window_exits; 832 u64 halt_exits; 833 u64 halt_successful_poll; 834 u64 halt_attempted_poll; 835 u64 halt_poll_invalid; 836 u64 halt_wakeup; 837 u64 request_irq_exits; 838 u64 irq_exits; 839 u64 host_state_reload; 840 u64 efer_reload; 841 u64 fpu_reload; 842 u64 insn_emulation; 843 u64 insn_emulation_fail; 844 u64 hypercalls; 845 u64 irq_injections; 846 u64 nmi_injections; 847 }; 848 849 struct x86_instruction_info; 850 851 struct msr_data { 852 bool host_initiated; 853 u32 index; 854 u64 data; 855 }; 856 857 struct kvm_lapic_irq { 858 u32 vector; 859 u16 delivery_mode; 860 u16 dest_mode; 861 bool level; 862 u16 trig_mode; 863 u32 shorthand; 864 u32 dest_id; 865 bool msi_redir_hint; 866 }; 867 868 struct kvm_x86_ops { 869 int (*cpu_has_kvm_support)(void); /* __init */ 870 int (*disabled_by_bios)(void); /* __init */ 871 int (*hardware_enable)(void); 872 void (*hardware_disable)(void); 873 void (*check_processor_compatibility)(void *rtn); 874 int (*hardware_setup)(void); /* __init */ 875 void (*hardware_unsetup)(void); /* __exit */ 876 bool (*cpu_has_accelerated_tpr)(void); 877 bool (*cpu_has_high_real_mode_segbase)(void); 878 void (*cpuid_update)(struct kvm_vcpu *vcpu); 879 880 int (*vm_init)(struct kvm *kvm); 881 void (*vm_destroy)(struct kvm *kvm); 882 883 /* Create, but do not attach this VCPU */ 884 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); 885 void (*vcpu_free)(struct kvm_vcpu *vcpu); 886 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); 887 888 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); 889 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 890 void (*vcpu_put)(struct kvm_vcpu *vcpu); 891 892 void (*update_bp_intercept)(struct kvm_vcpu *vcpu); 893 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); 894 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); 895 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 896 void (*get_segment)(struct kvm_vcpu *vcpu, 897 struct kvm_segment *var, int seg); 898 int (*get_cpl)(struct kvm_vcpu *vcpu); 899 void (*set_segment)(struct kvm_vcpu *vcpu, 900 struct kvm_segment *var, int seg); 901 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 902 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); 903 void (*decache_cr3)(struct kvm_vcpu *vcpu); 904 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 905 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 906 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 907 int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 908 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 909 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 910 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 911 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 912 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 913 u64 (*get_dr6)(struct kvm_vcpu *vcpu); 914 void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); 915 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); 916 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); 917 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 918 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 919 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 920 u32 (*get_pkru)(struct kvm_vcpu *vcpu); 921 void (*fpu_activate)(struct kvm_vcpu *vcpu); 922 void (*fpu_deactivate)(struct kvm_vcpu *vcpu); 923 924 void (*tlb_flush)(struct kvm_vcpu *vcpu); 925 926 void (*run)(struct kvm_vcpu *vcpu); 927 int (*handle_exit)(struct kvm_vcpu *vcpu); 928 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 929 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); 930 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); 931 void (*patch_hypercall)(struct kvm_vcpu *vcpu, 932 unsigned char *hypercall_addr); 933 void (*set_irq)(struct kvm_vcpu *vcpu); 934 void (*set_nmi)(struct kvm_vcpu *vcpu); 935 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 936 bool has_error_code, u32 error_code, 937 bool reinject); 938 void (*cancel_injection)(struct kvm_vcpu *vcpu); 939 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 940 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 941 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 942 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); 943 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 944 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 945 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 946 bool (*get_enable_apicv)(void); 947 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); 948 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); 949 void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); 950 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); 951 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); 952 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); 953 void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); 954 void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); 955 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 956 int (*get_tdp_level)(void); 957 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 958 int (*get_lpage_level)(void); 959 bool (*rdtscp_supported)(void); 960 bool (*invpcid_supported)(void); 961 962 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 963 964 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); 965 966 bool (*has_wbinvd_exit)(void); 967 968 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 969 970 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 971 972 int (*check_intercept)(struct kvm_vcpu *vcpu, 973 struct x86_instruction_info *info, 974 enum x86_intercept_stage stage); 975 void (*handle_external_intr)(struct kvm_vcpu *vcpu); 976 bool (*mpx_supported)(void); 977 bool (*xsaves_supported)(void); 978 979 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); 980 981 void (*sched_in)(struct kvm_vcpu *kvm, int cpu); 982 983 /* 984 * Arch-specific dirty logging hooks. These hooks are only supposed to 985 * be valid if the specific arch has hardware-accelerated dirty logging 986 * mechanism. Currently only for PML on VMX. 987 * 988 * - slot_enable_log_dirty: 989 * called when enabling log dirty mode for the slot. 990 * - slot_disable_log_dirty: 991 * called when disabling log dirty mode for the slot. 992 * also called when slot is created with log dirty disabled. 993 * - flush_log_dirty: 994 * called before reporting dirty_bitmap to userspace. 995 * - enable_log_dirty_pt_masked: 996 * called when reenabling log dirty for the GFNs in the mask after 997 * corresponding bits are cleared in slot->dirty_bitmap. 998 */ 999 void (*slot_enable_log_dirty)(struct kvm *kvm, 1000 struct kvm_memory_slot *slot); 1001 void (*slot_disable_log_dirty)(struct kvm *kvm, 1002 struct kvm_memory_slot *slot); 1003 void (*flush_log_dirty)(struct kvm *kvm); 1004 void (*enable_log_dirty_pt_masked)(struct kvm *kvm, 1005 struct kvm_memory_slot *slot, 1006 gfn_t offset, unsigned long mask); 1007 /* pmu operations of sub-arch */ 1008 const struct kvm_pmu_ops *pmu_ops; 1009 1010 /* 1011 * Architecture specific hooks for vCPU blocking due to 1012 * HLT instruction. 1013 * Returns for .pre_block(): 1014 * - 0 means continue to block the vCPU. 1015 * - 1 means we cannot block the vCPU since some event 1016 * happens during this period, such as, 'ON' bit in 1017 * posted-interrupts descriptor is set. 1018 */ 1019 int (*pre_block)(struct kvm_vcpu *vcpu); 1020 void (*post_block)(struct kvm_vcpu *vcpu); 1021 1022 void (*vcpu_blocking)(struct kvm_vcpu *vcpu); 1023 void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); 1024 1025 int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, 1026 uint32_t guest_irq, bool set); 1027 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); 1028 1029 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc); 1030 void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); 1031 1032 void (*setup_mce)(struct kvm_vcpu *vcpu); 1033 }; 1034 1035 struct kvm_arch_async_pf { 1036 u32 token; 1037 gfn_t gfn; 1038 unsigned long cr3; 1039 bool direct_map; 1040 }; 1041 1042 extern struct kvm_x86_ops *kvm_x86_ops; 1043 1044 int kvm_mmu_module_init(void); 1045 void kvm_mmu_module_exit(void); 1046 1047 void kvm_mmu_destroy(struct kvm_vcpu *vcpu); 1048 int kvm_mmu_create(struct kvm_vcpu *vcpu); 1049 void kvm_mmu_setup(struct kvm_vcpu *vcpu); 1050 void kvm_mmu_init_vm(struct kvm *kvm); 1051 void kvm_mmu_uninit_vm(struct kvm *kvm); 1052 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 1053 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask); 1054 1055 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 1056 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 1057 struct kvm_memory_slot *memslot); 1058 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, 1059 const struct kvm_memory_slot *memslot); 1060 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, 1061 struct kvm_memory_slot *memslot); 1062 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, 1063 struct kvm_memory_slot *memslot); 1064 void kvm_mmu_slot_set_dirty(struct kvm *kvm, 1065 struct kvm_memory_slot *memslot); 1066 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1067 struct kvm_memory_slot *slot, 1068 gfn_t gfn_offset, unsigned long mask); 1069 void kvm_mmu_zap_all(struct kvm *kvm); 1070 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots); 1071 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 1072 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 1073 1074 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); 1075 bool pdptrs_changed(struct kvm_vcpu *vcpu); 1076 1077 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 1078 const void *val, int bytes); 1079 1080 struct kvm_irq_mask_notifier { 1081 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); 1082 int irq; 1083 struct hlist_node link; 1084 }; 1085 1086 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 1087 struct kvm_irq_mask_notifier *kimn); 1088 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 1089 struct kvm_irq_mask_notifier *kimn); 1090 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 1091 bool mask); 1092 1093 extern bool tdp_enabled; 1094 1095 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); 1096 1097 /* control of guest tsc rate supported? */ 1098 extern bool kvm_has_tsc_control; 1099 /* maximum supported tsc_khz for guests */ 1100 extern u32 kvm_max_guest_tsc_khz; 1101 /* number of bits of the fractional part of the TSC scaling ratio */ 1102 extern u8 kvm_tsc_scaling_ratio_frac_bits; 1103 /* maximum allowed value of TSC scaling ratio */ 1104 extern u64 kvm_max_tsc_scaling_ratio; 1105 /* 1ull << kvm_tsc_scaling_ratio_frac_bits */ 1106 extern u64 kvm_default_tsc_scaling_ratio; 1107 1108 extern u64 kvm_mce_cap_supported; 1109 1110 enum emulation_result { 1111 EMULATE_DONE, /* no further processing */ 1112 EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */ 1113 EMULATE_FAIL, /* can't emulate this instruction */ 1114 }; 1115 1116 #define EMULTYPE_NO_DECODE (1 << 0) 1117 #define EMULTYPE_TRAP_UD (1 << 1) 1118 #define EMULTYPE_SKIP (1 << 2) 1119 #define EMULTYPE_RETRY (1 << 3) 1120 #define EMULTYPE_NO_REEXECUTE (1 << 4) 1121 int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, 1122 int emulation_type, void *insn, int insn_len); 1123 1124 static inline int emulate_instruction(struct kvm_vcpu *vcpu, 1125 int emulation_type) 1126 { 1127 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 1128 } 1129 1130 void kvm_enable_efer_bits(u64); 1131 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); 1132 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); 1133 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); 1134 1135 struct x86_emulate_ctxt; 1136 1137 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); 1138 int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port); 1139 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 1140 int kvm_emulate_halt(struct kvm_vcpu *vcpu); 1141 int kvm_vcpu_halt(struct kvm_vcpu *vcpu); 1142 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); 1143 1144 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 1145 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 1146 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); 1147 1148 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 1149 int reason, bool has_error_code, u32 error_code); 1150 1151 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 1152 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 1153 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 1154 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 1155 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); 1156 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); 1157 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 1158 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 1159 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 1160 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); 1161 1162 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); 1163 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); 1164 1165 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); 1166 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 1167 bool kvm_rdpmc(struct kvm_vcpu *vcpu); 1168 1169 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 1170 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 1171 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); 1172 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 1173 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); 1174 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 1175 gfn_t gfn, void *data, int offset, int len, 1176 u32 access); 1177 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 1178 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); 1179 1180 static inline int __kvm_irq_line_state(unsigned long *irq_state, 1181 int irq_source_id, int level) 1182 { 1183 /* Logical OR for level trig interrupt */ 1184 if (level) 1185 __set_bit(irq_source_id, irq_state); 1186 else 1187 __clear_bit(irq_source_id, irq_state); 1188 1189 return !!(*irq_state); 1190 } 1191 1192 int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); 1193 void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); 1194 1195 void kvm_inject_nmi(struct kvm_vcpu *vcpu); 1196 1197 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); 1198 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 1199 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 1200 int kvm_mmu_load(struct kvm_vcpu *vcpu); 1201 void kvm_mmu_unload(struct kvm_vcpu *vcpu); 1202 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 1203 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 1204 struct x86_exception *exception); 1205 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 1206 struct x86_exception *exception); 1207 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 1208 struct x86_exception *exception); 1209 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 1210 struct x86_exception *exception); 1211 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 1212 struct x86_exception *exception); 1213 1214 void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu); 1215 1216 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 1217 1218 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code, 1219 void *insn, int insn_len); 1220 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 1221 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu); 1222 1223 void kvm_enable_tdp(void); 1224 void kvm_disable_tdp(void); 1225 1226 static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 1227 struct x86_exception *exception) 1228 { 1229 return gpa; 1230 } 1231 1232 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 1233 { 1234 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 1235 1236 return (struct kvm_mmu_page *)page_private(page); 1237 } 1238 1239 static inline u16 kvm_read_ldt(void) 1240 { 1241 u16 ldt; 1242 asm("sldt %0" : "=g"(ldt)); 1243 return ldt; 1244 } 1245 1246 static inline void kvm_load_ldt(u16 sel) 1247 { 1248 asm("lldt %0" : : "rm"(sel)); 1249 } 1250 1251 #ifdef CONFIG_X86_64 1252 static inline unsigned long read_msr(unsigned long msr) 1253 { 1254 u64 value; 1255 1256 rdmsrl(msr, value); 1257 return value; 1258 } 1259 #endif 1260 1261 static inline u32 get_rdx_init_val(void) 1262 { 1263 return 0x600; /* P6 family */ 1264 } 1265 1266 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) 1267 { 1268 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 1269 } 1270 1271 static inline u64 get_canonical(u64 la) 1272 { 1273 return ((int64_t)la << 16) >> 16; 1274 } 1275 1276 static inline bool is_noncanonical_address(u64 la) 1277 { 1278 #ifdef CONFIG_X86_64 1279 return get_canonical(la) != la; 1280 #else 1281 return false; 1282 #endif 1283 } 1284 1285 #define TSS_IOPB_BASE_OFFSET 0x66 1286 #define TSS_BASE_SIZE 0x68 1287 #define TSS_IOPB_SIZE (65536 / 8) 1288 #define TSS_REDIRECTION_SIZE (256 / 8) 1289 #define RMODE_TSS_SIZE \ 1290 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 1291 1292 enum { 1293 TASK_SWITCH_CALL = 0, 1294 TASK_SWITCH_IRET = 1, 1295 TASK_SWITCH_JMP = 2, 1296 TASK_SWITCH_GATE = 3, 1297 }; 1298 1299 #define HF_GIF_MASK (1 << 0) 1300 #define HF_HIF_MASK (1 << 1) 1301 #define HF_VINTR_MASK (1 << 2) 1302 #define HF_NMI_MASK (1 << 3) 1303 #define HF_IRET_MASK (1 << 4) 1304 #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ 1305 #define HF_SMM_MASK (1 << 6) 1306 #define HF_SMM_INSIDE_NMI_MASK (1 << 7) 1307 1308 #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE 1309 #define KVM_ADDRESS_SPACE_NUM 2 1310 1311 #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) 1312 #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) 1313 1314 /* 1315 * Hardware virtualization extension instructions may fault if a 1316 * reboot turns off virtualization while processes are running. 1317 * Trap the fault and ignore the instruction if that happens. 1318 */ 1319 asmlinkage void kvm_spurious_fault(void); 1320 1321 #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ 1322 "666: " insn "\n\t" \ 1323 "668: \n\t" \ 1324 ".pushsection .fixup, \"ax\" \n" \ 1325 "667: \n\t" \ 1326 cleanup_insn "\n\t" \ 1327 "cmpb $0, kvm_rebooting \n\t" \ 1328 "jne 668b \n\t" \ 1329 __ASM_SIZE(push) " $666b \n\t" \ 1330 "call kvm_spurious_fault \n\t" \ 1331 ".popsection \n\t" \ 1332 _ASM_EXTABLE(666b, 667b) 1333 1334 #define __kvm_handle_fault_on_reboot(insn) \ 1335 ____kvm_handle_fault_on_reboot(insn, "") 1336 1337 #define KVM_ARCH_WANT_MMU_NOTIFIER 1338 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 1339 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); 1340 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 1341 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 1342 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 1343 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); 1344 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 1345 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 1346 int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 1347 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); 1348 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); 1349 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, 1350 unsigned long address); 1351 1352 void kvm_define_shared_msr(unsigned index, u32 msr); 1353 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 1354 1355 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); 1356 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); 1357 1358 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); 1359 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); 1360 1361 void kvm_make_mclock_inprogress_request(struct kvm *kvm); 1362 void kvm_make_scan_ioapic_request(struct kvm *kvm); 1363 1364 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 1365 struct kvm_async_pf *work); 1366 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 1367 struct kvm_async_pf *work); 1368 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 1369 struct kvm_async_pf *work); 1370 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); 1371 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 1372 1373 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); 1374 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); 1375 1376 int kvm_is_in_guest(void); 1377 1378 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); 1379 int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); 1380 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); 1381 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); 1382 1383 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, 1384 struct kvm_vcpu **dest_vcpu); 1385 1386 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, 1387 struct kvm_lapic_irq *irq); 1388 1389 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 1390 { 1391 if (kvm_x86_ops->vcpu_blocking) 1392 kvm_x86_ops->vcpu_blocking(vcpu); 1393 } 1394 1395 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 1396 { 1397 if (kvm_x86_ops->vcpu_unblocking) 1398 kvm_x86_ops->vcpu_unblocking(vcpu); 1399 } 1400 1401 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 1402 1403 static inline int kvm_cpu_get_apicid(int mps_cpu) 1404 { 1405 #ifdef CONFIG_X86_LOCAL_APIC 1406 return __default_cpu_present_to_apicid(mps_cpu); 1407 #else 1408 WARN_ON_ONCE(1); 1409 return BAD_APICID; 1410 #endif 1411 } 1412 1413 #endif /* _ASM_X86_KVM_HOST_H */ 1414