1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This header defines architecture specific interfaces, x86 version 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 */ 10 11 #ifndef _ASM_X86_KVM_HOST_H 12 #define _ASM_X86_KVM_HOST_H 13 14 #include <linux/types.h> 15 #include <linux/mm.h> 16 #include <linux/mmu_notifier.h> 17 #include <linux/tracepoint.h> 18 19 #include <linux/kvm.h> 20 #include <linux/kvm_para.h> 21 #include <linux/kvm_types.h> 22 23 #include <asm/pvclock-abi.h> 24 #include <asm/desc.h> 25 #include <asm/mtrr.h> 26 #include <asm/msr-index.h> 27 28 #define KVM_MAX_VCPUS 64 29 #define KVM_MEMORY_SLOTS 32 30 /* memory slots that does not exposed to userspace */ 31 #define KVM_PRIVATE_MEM_SLOTS 4 32 33 #define KVM_PIO_PAGE_OFFSET 1 34 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 35 36 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 37 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 38 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 39 0xFFFFFF0000000000ULL) 40 41 #define INVALID_PAGE (~(hpa_t)0) 42 #define UNMAPPED_GVA (~(gpa_t)0) 43 44 /* KVM Hugepage definitions for x86 */ 45 #define KVM_NR_PAGE_SIZES 3 46 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9)) 47 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) 48 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 49 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 50 51 #define DE_VECTOR 0 52 #define DB_VECTOR 1 53 #define BP_VECTOR 3 54 #define OF_VECTOR 4 55 #define BR_VECTOR 5 56 #define UD_VECTOR 6 57 #define NM_VECTOR 7 58 #define DF_VECTOR 8 59 #define TS_VECTOR 10 60 #define NP_VECTOR 11 61 #define SS_VECTOR 12 62 #define GP_VECTOR 13 63 #define PF_VECTOR 14 64 #define MF_VECTOR 16 65 #define MC_VECTOR 18 66 67 #define SELECTOR_TI_MASK (1 << 2) 68 #define SELECTOR_RPL_MASK 0x03 69 70 #define IOPL_SHIFT 12 71 72 #define KVM_ALIAS_SLOTS 4 73 74 #define KVM_PERMILLE_MMU_PAGES 20 75 #define KVM_MIN_ALLOC_MMU_PAGES 64 76 #define KVM_MMU_HASH_SHIFT 10 77 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 78 #define KVM_MIN_FREE_MMU_PAGES 5 79 #define KVM_REFILL_PAGES 25 80 #define KVM_MAX_CPUID_ENTRIES 40 81 #define KVM_NR_FIXED_MTRR_REGION 88 82 #define KVM_NR_VAR_MTRR 8 83 84 extern spinlock_t kvm_lock; 85 extern struct list_head vm_list; 86 87 struct kvm_vcpu; 88 struct kvm; 89 90 enum kvm_reg { 91 VCPU_REGS_RAX = 0, 92 VCPU_REGS_RCX = 1, 93 VCPU_REGS_RDX = 2, 94 VCPU_REGS_RBX = 3, 95 VCPU_REGS_RSP = 4, 96 VCPU_REGS_RBP = 5, 97 VCPU_REGS_RSI = 6, 98 VCPU_REGS_RDI = 7, 99 #ifdef CONFIG_X86_64 100 VCPU_REGS_R8 = 8, 101 VCPU_REGS_R9 = 9, 102 VCPU_REGS_R10 = 10, 103 VCPU_REGS_R11 = 11, 104 VCPU_REGS_R12 = 12, 105 VCPU_REGS_R13 = 13, 106 VCPU_REGS_R14 = 14, 107 VCPU_REGS_R15 = 15, 108 #endif 109 VCPU_REGS_RIP, 110 NR_VCPU_REGS 111 }; 112 113 enum kvm_reg_ex { 114 VCPU_EXREG_PDPTR = NR_VCPU_REGS, 115 }; 116 117 enum { 118 VCPU_SREG_ES, 119 VCPU_SREG_CS, 120 VCPU_SREG_SS, 121 VCPU_SREG_DS, 122 VCPU_SREG_FS, 123 VCPU_SREG_GS, 124 VCPU_SREG_TR, 125 VCPU_SREG_LDTR, 126 }; 127 128 #include <asm/kvm_emulate.h> 129 130 #define KVM_NR_MEM_OBJS 40 131 132 #define KVM_NR_DB_REGS 4 133 134 #define DR6_BD (1 << 13) 135 #define DR6_BS (1 << 14) 136 #define DR6_FIXED_1 0xffff0ff0 137 #define DR6_VOLATILE 0x0000e00f 138 139 #define DR7_BP_EN_MASK 0x000000ff 140 #define DR7_GE (1 << 9) 141 #define DR7_GD (1 << 13) 142 #define DR7_FIXED_1 0x00000400 143 #define DR7_VOLATILE 0xffff23ff 144 145 /* 146 * We don't want allocation failures within the mmu code, so we preallocate 147 * enough memory for a single page fault in a cache. 148 */ 149 struct kvm_mmu_memory_cache { 150 int nobjs; 151 void *objects[KVM_NR_MEM_OBJS]; 152 }; 153 154 #define NR_PTE_CHAIN_ENTRIES 5 155 156 struct kvm_pte_chain { 157 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; 158 struct hlist_node link; 159 }; 160 161 /* 162 * kvm_mmu_page_role, below, is defined as: 163 * 164 * bits 0:3 - total guest paging levels (2-4, or zero for real mode) 165 * bits 4:7 - page table level for this shadow (1-4) 166 * bits 8:9 - page table quadrant for 2-level guests 167 * bit 16 - direct mapping of virtual to physical mapping at gfn 168 * used for real mode and two-dimensional paging 169 * bits 17:19 - common access permissions for all ptes in this shadow page 170 */ 171 union kvm_mmu_page_role { 172 unsigned word; 173 struct { 174 unsigned glevels:4; 175 unsigned level:4; 176 unsigned quadrant:2; 177 unsigned pad_for_nice_hex_output:6; 178 unsigned direct:1; 179 unsigned access:3; 180 unsigned invalid:1; 181 unsigned cr4_pge:1; 182 unsigned nxe:1; 183 }; 184 }; 185 186 struct kvm_mmu_page { 187 struct list_head link; 188 struct hlist_node hash_link; 189 190 struct list_head oos_link; 191 192 /* 193 * The following two entries are used to key the shadow page in the 194 * hash table. 195 */ 196 gfn_t gfn; 197 union kvm_mmu_page_role role; 198 199 u64 *spt; 200 /* hold the gfn of each spte inside spt */ 201 gfn_t *gfns; 202 /* 203 * One bit set per slot which has memory 204 * in this shadow page. 205 */ 206 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 207 int multimapped; /* More than one parent_pte? */ 208 int root_count; /* Currently serving as active root */ 209 bool unsync; 210 unsigned int unsync_children; 211 union { 212 u64 *parent_pte; /* !multimapped */ 213 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ 214 }; 215 DECLARE_BITMAP(unsync_child_bitmap, 512); 216 }; 217 218 struct kvm_pv_mmu_op_buffer { 219 void *ptr; 220 unsigned len; 221 unsigned processed; 222 char buf[512] __aligned(sizeof(long)); 223 }; 224 225 struct kvm_pio_request { 226 unsigned long count; 227 int cur_count; 228 gva_t guest_gva; 229 int in; 230 int port; 231 int size; 232 int string; 233 int down; 234 int rep; 235 }; 236 237 /* 238 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level 239 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu 240 * mode. 241 */ 242 struct kvm_mmu { 243 void (*new_cr3)(struct kvm_vcpu *vcpu); 244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 245 void (*free)(struct kvm_vcpu *vcpu); 246 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 247 u32 *error); 248 void (*prefetch_page)(struct kvm_vcpu *vcpu, 249 struct kvm_mmu_page *page); 250 int (*sync_page)(struct kvm_vcpu *vcpu, 251 struct kvm_mmu_page *sp); 252 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 253 hpa_t root_hpa; 254 int root_level; 255 int shadow_root_level; 256 union kvm_mmu_page_role base_role; 257 258 u64 *pae_root; 259 u64 rsvd_bits_mask[2][4]; 260 }; 261 262 struct kvm_vcpu_arch { 263 u64 host_tsc; 264 /* 265 * rip and regs accesses must go through 266 * kvm_{register,rip}_{read,write} functions. 267 */ 268 unsigned long regs[NR_VCPU_REGS]; 269 u32 regs_avail; 270 u32 regs_dirty; 271 272 unsigned long cr0; 273 unsigned long cr0_guest_owned_bits; 274 unsigned long cr2; 275 unsigned long cr3; 276 unsigned long cr4; 277 unsigned long cr4_guest_owned_bits; 278 unsigned long cr8; 279 u32 hflags; 280 u64 pdptrs[4]; /* pae */ 281 u64 efer; 282 u64 apic_base; 283 struct kvm_lapic *apic; /* kernel irqchip context */ 284 int32_t apic_arb_prio; 285 int mp_state; 286 int sipi_vector; 287 u64 ia32_misc_enable_msr; 288 bool tpr_access_reporting; 289 290 struct kvm_mmu mmu; 291 /* only needed in kvm_pv_mmu_op() path, but it's hot so 292 * put it here to avoid allocation */ 293 struct kvm_pv_mmu_op_buffer mmu_op_buffer; 294 295 struct kvm_mmu_memory_cache mmu_pte_chain_cache; 296 struct kvm_mmu_memory_cache mmu_rmap_desc_cache; 297 struct kvm_mmu_memory_cache mmu_page_cache; 298 struct kvm_mmu_memory_cache mmu_page_header_cache; 299 300 gfn_t last_pt_write_gfn; 301 int last_pt_write_count; 302 u64 *last_pte_updated; 303 gfn_t last_pte_gfn; 304 305 struct { 306 gfn_t gfn; /* presumed gfn during guest pte update */ 307 pfn_t pfn; /* pfn corresponding to that gfn */ 308 unsigned long mmu_seq; 309 } update_pte; 310 311 struct i387_fxsave_struct host_fx_image; 312 struct i387_fxsave_struct guest_fx_image; 313 314 gva_t mmio_fault_cr2; 315 struct kvm_pio_request pio; 316 void *pio_data; 317 318 u8 event_exit_inst_len; 319 320 struct kvm_queued_exception { 321 bool pending; 322 bool has_error_code; 323 u8 nr; 324 u32 error_code; 325 } exception; 326 327 struct kvm_queued_interrupt { 328 bool pending; 329 bool soft; 330 u8 nr; 331 } interrupt; 332 333 int halt_request; /* real mode on Intel only */ 334 335 int cpuid_nent; 336 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 337 /* emulate context */ 338 339 struct x86_emulate_ctxt emulate_ctxt; 340 341 gpa_t time; 342 struct pvclock_vcpu_time_info hv_clock; 343 unsigned int hv_clock_tsc_khz; 344 unsigned int time_offset; 345 struct page *time_page; 346 347 bool nmi_pending; 348 bool nmi_injected; 349 350 struct mtrr_state_type mtrr_state; 351 u32 pat; 352 353 int switch_db_regs; 354 unsigned long db[KVM_NR_DB_REGS]; 355 unsigned long dr6; 356 unsigned long dr7; 357 unsigned long eff_db[KVM_NR_DB_REGS]; 358 359 u64 mcg_cap; 360 u64 mcg_status; 361 u64 mcg_ctl; 362 u64 *mce_banks; 363 364 /* used for guest single stepping over the given code position */ 365 u16 singlestep_cs; 366 unsigned long singlestep_rip; 367 /* fields used by HYPER-V emulation */ 368 u64 hv_vapic; 369 }; 370 371 struct kvm_mem_alias { 372 gfn_t base_gfn; 373 unsigned long npages; 374 gfn_t target_gfn; 375 #define KVM_ALIAS_INVALID 1UL 376 unsigned long flags; 377 }; 378 379 #define KVM_ARCH_HAS_UNALIAS_INSTANTIATION 380 381 struct kvm_mem_aliases { 382 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; 383 int naliases; 384 }; 385 386 struct kvm_arch { 387 struct kvm_mem_aliases *aliases; 388 389 unsigned int n_free_mmu_pages; 390 unsigned int n_requested_mmu_pages; 391 unsigned int n_alloc_mmu_pages; 392 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 393 /* 394 * Hash table of struct kvm_mmu_page. 395 */ 396 struct list_head active_mmu_pages; 397 struct list_head assigned_dev_head; 398 struct iommu_domain *iommu_domain; 399 int iommu_flags; 400 struct kvm_pic *vpic; 401 struct kvm_ioapic *vioapic; 402 struct kvm_pit *vpit; 403 int vapics_in_nmi_mode; 404 405 unsigned int tss_addr; 406 struct page *apic_access_page; 407 408 gpa_t wall_clock; 409 410 struct page *ept_identity_pagetable; 411 bool ept_identity_pagetable_done; 412 gpa_t ept_identity_map_addr; 413 414 unsigned long irq_sources_bitmap; 415 u64 vm_init_tsc; 416 s64 kvmclock_offset; 417 418 struct kvm_xen_hvm_config xen_hvm_config; 419 420 /* fields used by HYPER-V emulation */ 421 u64 hv_guest_os_id; 422 u64 hv_hypercall; 423 }; 424 425 struct kvm_vm_stat { 426 u32 mmu_shadow_zapped; 427 u32 mmu_pte_write; 428 u32 mmu_pte_updated; 429 u32 mmu_pde_zapped; 430 u32 mmu_flooded; 431 u32 mmu_recycled; 432 u32 mmu_cache_miss; 433 u32 mmu_unsync; 434 u32 remote_tlb_flush; 435 u32 lpages; 436 }; 437 438 struct kvm_vcpu_stat { 439 u32 pf_fixed; 440 u32 pf_guest; 441 u32 tlb_flush; 442 u32 invlpg; 443 444 u32 exits; 445 u32 io_exits; 446 u32 mmio_exits; 447 u32 signal_exits; 448 u32 irq_window_exits; 449 u32 nmi_window_exits; 450 u32 halt_exits; 451 u32 halt_wakeup; 452 u32 request_irq_exits; 453 u32 irq_exits; 454 u32 host_state_reload; 455 u32 efer_reload; 456 u32 fpu_reload; 457 u32 insn_emulation; 458 u32 insn_emulation_fail; 459 u32 hypercalls; 460 u32 irq_injections; 461 u32 nmi_injections; 462 }; 463 464 struct descriptor_table { 465 u16 limit; 466 unsigned long base; 467 } __attribute__((packed)); 468 469 struct kvm_x86_ops { 470 int (*cpu_has_kvm_support)(void); /* __init */ 471 int (*disabled_by_bios)(void); /* __init */ 472 int (*hardware_enable)(void *dummy); 473 void (*hardware_disable)(void *dummy); 474 void (*check_processor_compatibility)(void *rtn); 475 int (*hardware_setup)(void); /* __init */ 476 void (*hardware_unsetup)(void); /* __exit */ 477 bool (*cpu_has_accelerated_tpr)(void); 478 void (*cpuid_update)(struct kvm_vcpu *vcpu); 479 480 /* Create, but do not attach this VCPU */ 481 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); 482 void (*vcpu_free)(struct kvm_vcpu *vcpu); 483 int (*vcpu_reset)(struct kvm_vcpu *vcpu); 484 485 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); 486 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 487 void (*vcpu_put)(struct kvm_vcpu *vcpu); 488 489 void (*set_guest_debug)(struct kvm_vcpu *vcpu, 490 struct kvm_guest_debug *dbg); 491 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 492 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 493 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 494 void (*get_segment)(struct kvm_vcpu *vcpu, 495 struct kvm_segment *var, int seg); 496 int (*get_cpl)(struct kvm_vcpu *vcpu); 497 void (*set_segment)(struct kvm_vcpu *vcpu, 498 struct kvm_segment *var, int seg); 499 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 500 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); 501 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 502 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 503 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 504 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 505 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 506 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 507 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 508 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 509 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 510 int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); 511 int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value); 512 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 513 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 514 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 515 void (*fpu_activate)(struct kvm_vcpu *vcpu); 516 void (*fpu_deactivate)(struct kvm_vcpu *vcpu); 517 518 void (*tlb_flush)(struct kvm_vcpu *vcpu); 519 520 void (*run)(struct kvm_vcpu *vcpu); 521 int (*handle_exit)(struct kvm_vcpu *vcpu); 522 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 523 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); 524 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); 525 void (*patch_hypercall)(struct kvm_vcpu *vcpu, 526 unsigned char *hypercall_addr); 527 void (*set_irq)(struct kvm_vcpu *vcpu); 528 void (*set_nmi)(struct kvm_vcpu *vcpu); 529 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 530 bool has_error_code, u32 error_code); 531 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 532 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 533 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 534 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); 535 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 536 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 537 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 538 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 539 int (*get_tdp_level)(void); 540 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 541 int (*get_lpage_level)(void); 542 bool (*rdtscp_supported)(void); 543 544 const struct trace_print_flags *exit_reasons_str; 545 }; 546 547 extern struct kvm_x86_ops *kvm_x86_ops; 548 549 int kvm_mmu_module_init(void); 550 void kvm_mmu_module_exit(void); 551 552 void kvm_mmu_destroy(struct kvm_vcpu *vcpu); 553 int kvm_mmu_create(struct kvm_vcpu *vcpu); 554 int kvm_mmu_setup(struct kvm_vcpu *vcpu); 555 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); 556 void kvm_mmu_set_base_ptes(u64 base_pte); 557 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 558 u64 dirty_mask, u64 nx_mask, u64 x_mask); 559 560 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 561 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 562 void kvm_mmu_zap_all(struct kvm *kvm); 563 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 564 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 565 566 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 567 568 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 569 const void *val, int bytes); 570 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, 571 gpa_t addr, unsigned long *ret); 572 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 573 574 extern bool tdp_enabled; 575 576 enum emulation_result { 577 EMULATE_DONE, /* no further processing */ 578 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 579 EMULATE_FAIL, /* can't emulate this instruction */ 580 }; 581 582 #define EMULTYPE_NO_DECODE (1 << 0) 583 #define EMULTYPE_TRAP_UD (1 << 1) 584 #define EMULTYPE_SKIP (1 << 2) 585 int emulate_instruction(struct kvm_vcpu *vcpu, 586 unsigned long cr2, u16 error_code, int emulation_type); 587 void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); 588 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 589 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 590 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, 591 unsigned long *rflags); 592 593 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); 594 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, 595 unsigned long *rflags); 596 void kvm_enable_efer_bits(u64); 597 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 598 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 599 600 struct x86_emulate_ctxt; 601 602 int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, 603 int size, unsigned port); 604 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, 605 int size, unsigned long count, int down, 606 gva_t address, int rep, unsigned port); 607 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 608 int kvm_emulate_halt(struct kvm_vcpu *vcpu); 609 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 610 int emulate_clts(struct kvm_vcpu *vcpu); 611 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 612 unsigned long *dest); 613 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 614 unsigned long value); 615 616 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 617 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 618 619 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 620 621 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 622 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 623 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 624 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 625 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 626 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 627 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 628 629 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 630 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); 631 632 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); 633 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 634 635 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 636 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 637 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 638 u32 error_code); 639 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 640 641 int kvm_pic_set_irq(void *opaque, int irq, int level); 642 643 void kvm_inject_nmi(struct kvm_vcpu *vcpu); 644 645 void fx_init(struct kvm_vcpu *vcpu); 646 647 int emulator_write_emulated(unsigned long addr, 648 const void *val, 649 unsigned int bytes, 650 struct kvm_vcpu *vcpu); 651 652 unsigned long segment_base(u16 selector); 653 654 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); 655 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 656 const u8 *new, int bytes, 657 bool guest_initiated); 658 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 659 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 660 int kvm_mmu_load(struct kvm_vcpu *vcpu); 661 void kvm_mmu_unload(struct kvm_vcpu *vcpu); 662 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 663 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 664 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 665 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 666 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 667 668 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 669 670 int kvm_fix_hypercall(struct kvm_vcpu *vcpu); 671 672 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 673 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 674 675 void kvm_enable_tdp(void); 676 void kvm_disable_tdp(void); 677 678 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 679 int complete_pio(struct kvm_vcpu *vcpu); 680 bool kvm_check_iopl(struct kvm_vcpu *vcpu); 681 682 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); 683 684 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 685 { 686 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 687 688 return (struct kvm_mmu_page *)page_private(page); 689 } 690 691 static inline u16 kvm_read_fs(void) 692 { 693 u16 seg; 694 asm("mov %%fs, %0" : "=g"(seg)); 695 return seg; 696 } 697 698 static inline u16 kvm_read_gs(void) 699 { 700 u16 seg; 701 asm("mov %%gs, %0" : "=g"(seg)); 702 return seg; 703 } 704 705 static inline u16 kvm_read_ldt(void) 706 { 707 u16 ldt; 708 asm("sldt %0" : "=g"(ldt)); 709 return ldt; 710 } 711 712 static inline void kvm_load_fs(u16 sel) 713 { 714 asm("mov %0, %%fs" : : "rm"(sel)); 715 } 716 717 static inline void kvm_load_gs(u16 sel) 718 { 719 asm("mov %0, %%gs" : : "rm"(sel)); 720 } 721 722 static inline void kvm_load_ldt(u16 sel) 723 { 724 asm("lldt %0" : : "rm"(sel)); 725 } 726 727 static inline void kvm_get_idt(struct descriptor_table *table) 728 { 729 asm("sidt %0" : "=m"(*table)); 730 } 731 732 static inline void kvm_get_gdt(struct descriptor_table *table) 733 { 734 asm("sgdt %0" : "=m"(*table)); 735 } 736 737 static inline unsigned long kvm_read_tr_base(void) 738 { 739 u16 tr; 740 asm("str %0" : "=g"(tr)); 741 return segment_base(tr); 742 } 743 744 #ifdef CONFIG_X86_64 745 static inline unsigned long read_msr(unsigned long msr) 746 { 747 u64 value; 748 749 rdmsrl(msr, value); 750 return value; 751 } 752 #endif 753 754 static inline void kvm_fx_save(struct i387_fxsave_struct *image) 755 { 756 asm("fxsave (%0)":: "r" (image)); 757 } 758 759 static inline void kvm_fx_restore(struct i387_fxsave_struct *image) 760 { 761 asm("fxrstor (%0)":: "r" (image)); 762 } 763 764 static inline void kvm_fx_finit(void) 765 { 766 asm("finit"); 767 } 768 769 static inline u32 get_rdx_init_val(void) 770 { 771 return 0x600; /* P6 family */ 772 } 773 774 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) 775 { 776 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 777 } 778 779 #define TSS_IOPB_BASE_OFFSET 0x66 780 #define TSS_BASE_SIZE 0x68 781 #define TSS_IOPB_SIZE (65536 / 8) 782 #define TSS_REDIRECTION_SIZE (256 / 8) 783 #define RMODE_TSS_SIZE \ 784 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 785 786 enum { 787 TASK_SWITCH_CALL = 0, 788 TASK_SWITCH_IRET = 1, 789 TASK_SWITCH_JMP = 2, 790 TASK_SWITCH_GATE = 3, 791 }; 792 793 #define HF_GIF_MASK (1 << 0) 794 #define HF_HIF_MASK (1 << 1) 795 #define HF_VINTR_MASK (1 << 2) 796 #define HF_NMI_MASK (1 << 3) 797 #define HF_IRET_MASK (1 << 4) 798 799 /* 800 * Hardware virtualization extension instructions may fault if a 801 * reboot turns off virtualization while processes are running. 802 * Trap the fault and ignore the instruction if that happens. 803 */ 804 asmlinkage void kvm_handle_fault_on_reboot(void); 805 806 #define __kvm_handle_fault_on_reboot(insn) \ 807 "666: " insn "\n\t" \ 808 ".pushsection .fixup, \"ax\" \n" \ 809 "667: \n\t" \ 810 __ASM_SIZE(push) " $666b \n\t" \ 811 "jmp kvm_handle_fault_on_reboot \n\t" \ 812 ".popsection \n\t" \ 813 ".pushsection __ex_table, \"a\" \n\t" \ 814 _ASM_PTR " 666b, 667b \n\t" \ 815 ".popsection" 816 817 #define KVM_ARCH_WANT_MMU_NOTIFIER 818 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 819 int kvm_age_hva(struct kvm *kvm, unsigned long hva); 820 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 821 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 822 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 823 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 824 int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 825 826 void kvm_define_shared_msr(unsigned index, u32 msr); 827 void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 828 829 #endif /* _ASM_X86_KVM_HOST_H */ 830