1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This header defines architecture specific interfaces, x86 version 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 */ 10 11 #ifndef _ASM_X86_KVM_HOST_H 12 #define _ASM_X86_KVM_HOST_H 13 14 #include <linux/types.h> 15 #include <linux/mm.h> 16 #include <linux/mmu_notifier.h> 17 18 #include <linux/kvm.h> 19 #include <linux/kvm_para.h> 20 #include <linux/kvm_types.h> 21 22 #include <asm/pvclock-abi.h> 23 #include <asm/desc.h> 24 #include <asm/mtrr.h> 25 26 #define KVM_MAX_VCPUS 16 27 #define KVM_MEMORY_SLOTS 32 28 /* memory slots that does not exposed to userspace */ 29 #define KVM_PRIVATE_MEM_SLOTS 4 30 31 #define KVM_PIO_PAGE_OFFSET 1 32 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 33 34 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 35 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 36 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 37 0xFFFFFF0000000000ULL) 38 39 #define KVM_GUEST_CR0_MASK \ 40 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ 41 | X86_CR0_NW | X86_CR0_CD) 42 #define KVM_VM_CR0_ALWAYS_ON \ 43 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ 44 | X86_CR0_MP) 45 #define KVM_GUEST_CR4_MASK \ 46 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) 47 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 48 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) 49 50 #define INVALID_PAGE (~(hpa_t)0) 51 #define UNMAPPED_GVA (~(gpa_t)0) 52 53 /* shadow tables are PAE even on non-PAE hosts */ 54 #define KVM_HPAGE_SHIFT 21 55 #define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT) 56 #define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1)) 57 58 #define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) 59 60 #define DE_VECTOR 0 61 #define DB_VECTOR 1 62 #define BP_VECTOR 3 63 #define OF_VECTOR 4 64 #define BR_VECTOR 5 65 #define UD_VECTOR 6 66 #define NM_VECTOR 7 67 #define DF_VECTOR 8 68 #define TS_VECTOR 10 69 #define NP_VECTOR 11 70 #define SS_VECTOR 12 71 #define GP_VECTOR 13 72 #define PF_VECTOR 14 73 #define MF_VECTOR 16 74 #define MC_VECTOR 18 75 76 #define SELECTOR_TI_MASK (1 << 2) 77 #define SELECTOR_RPL_MASK 0x03 78 79 #define IOPL_SHIFT 12 80 81 #define KVM_ALIAS_SLOTS 4 82 83 #define KVM_PERMILLE_MMU_PAGES 20 84 #define KVM_MIN_ALLOC_MMU_PAGES 64 85 #define KVM_MMU_HASH_SHIFT 10 86 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 87 #define KVM_MIN_FREE_MMU_PAGES 5 88 #define KVM_REFILL_PAGES 25 89 #define KVM_MAX_CPUID_ENTRIES 40 90 #define KVM_NR_FIXED_MTRR_REGION 88 91 #define KVM_NR_VAR_MTRR 8 92 93 extern spinlock_t kvm_lock; 94 extern struct list_head vm_list; 95 96 struct kvm_vcpu; 97 struct kvm; 98 99 enum kvm_reg { 100 VCPU_REGS_RAX = 0, 101 VCPU_REGS_RCX = 1, 102 VCPU_REGS_RDX = 2, 103 VCPU_REGS_RBX = 3, 104 VCPU_REGS_RSP = 4, 105 VCPU_REGS_RBP = 5, 106 VCPU_REGS_RSI = 6, 107 VCPU_REGS_RDI = 7, 108 #ifdef CONFIG_X86_64 109 VCPU_REGS_R8 = 8, 110 VCPU_REGS_R9 = 9, 111 VCPU_REGS_R10 = 10, 112 VCPU_REGS_R11 = 11, 113 VCPU_REGS_R12 = 12, 114 VCPU_REGS_R13 = 13, 115 VCPU_REGS_R14 = 14, 116 VCPU_REGS_R15 = 15, 117 #endif 118 VCPU_REGS_RIP, 119 NR_VCPU_REGS 120 }; 121 122 enum { 123 VCPU_SREG_ES, 124 VCPU_SREG_CS, 125 VCPU_SREG_SS, 126 VCPU_SREG_DS, 127 VCPU_SREG_FS, 128 VCPU_SREG_GS, 129 VCPU_SREG_TR, 130 VCPU_SREG_LDTR, 131 }; 132 133 #include <asm/kvm_x86_emulate.h> 134 135 #define KVM_NR_MEM_OBJS 40 136 137 struct kvm_guest_debug { 138 int enabled; 139 unsigned long bp[4]; 140 int singlestep; 141 }; 142 143 /* 144 * We don't want allocation failures within the mmu code, so we preallocate 145 * enough memory for a single page fault in a cache. 146 */ 147 struct kvm_mmu_memory_cache { 148 int nobjs; 149 void *objects[KVM_NR_MEM_OBJS]; 150 }; 151 152 #define NR_PTE_CHAIN_ENTRIES 5 153 154 struct kvm_pte_chain { 155 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; 156 struct hlist_node link; 157 }; 158 159 /* 160 * kvm_mmu_page_role, below, is defined as: 161 * 162 * bits 0:3 - total guest paging levels (2-4, or zero for real mode) 163 * bits 4:7 - page table level for this shadow (1-4) 164 * bits 8:9 - page table quadrant for 2-level guests 165 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) 166 * bits 17:19 - common access permissions for all ptes in this shadow page 167 */ 168 union kvm_mmu_page_role { 169 unsigned word; 170 struct { 171 unsigned glevels:4; 172 unsigned level:4; 173 unsigned quadrant:2; 174 unsigned pad_for_nice_hex_output:6; 175 unsigned metaphysical:1; 176 unsigned access:3; 177 unsigned invalid:1; 178 }; 179 }; 180 181 struct kvm_mmu_page { 182 struct list_head link; 183 struct hlist_node hash_link; 184 185 struct list_head oos_link; 186 187 /* 188 * The following two entries are used to key the shadow page in the 189 * hash table. 190 */ 191 gfn_t gfn; 192 union kvm_mmu_page_role role; 193 194 u64 *spt; 195 /* hold the gfn of each spte inside spt */ 196 gfn_t *gfns; 197 /* 198 * One bit set per slot which has memory 199 * in this shadow page. 200 */ 201 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 202 int multimapped; /* More than one parent_pte? */ 203 int root_count; /* Currently serving as active root */ 204 bool unsync; 205 bool global; 206 unsigned int unsync_children; 207 union { 208 u64 *parent_pte; /* !multimapped */ 209 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ 210 }; 211 DECLARE_BITMAP(unsync_child_bitmap, 512); 212 }; 213 214 struct kvm_pv_mmu_op_buffer { 215 void *ptr; 216 unsigned len; 217 unsigned processed; 218 char buf[512] __aligned(sizeof(long)); 219 }; 220 221 /* 222 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level 223 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu 224 * mode. 225 */ 226 struct kvm_mmu { 227 void (*new_cr3)(struct kvm_vcpu *vcpu); 228 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 229 void (*free)(struct kvm_vcpu *vcpu); 230 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); 231 void (*prefetch_page)(struct kvm_vcpu *vcpu, 232 struct kvm_mmu_page *page); 233 int (*sync_page)(struct kvm_vcpu *vcpu, 234 struct kvm_mmu_page *sp); 235 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 236 hpa_t root_hpa; 237 int root_level; 238 int shadow_root_level; 239 240 u64 *pae_root; 241 }; 242 243 struct kvm_vcpu_arch { 244 u64 host_tsc; 245 int interrupt_window_open; 246 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 247 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); 248 /* 249 * rip and regs accesses must go through 250 * kvm_{register,rip}_{read,write} functions. 251 */ 252 unsigned long regs[NR_VCPU_REGS]; 253 u32 regs_avail; 254 u32 regs_dirty; 255 256 unsigned long cr0; 257 unsigned long cr2; 258 unsigned long cr3; 259 unsigned long cr4; 260 unsigned long cr8; 261 u64 pdptrs[4]; /* pae */ 262 u64 shadow_efer; 263 u64 apic_base; 264 struct kvm_lapic *apic; /* kernel irqchip context */ 265 int mp_state; 266 int sipi_vector; 267 u64 ia32_misc_enable_msr; 268 bool tpr_access_reporting; 269 270 struct kvm_mmu mmu; 271 /* only needed in kvm_pv_mmu_op() path, but it's hot so 272 * put it here to avoid allocation */ 273 struct kvm_pv_mmu_op_buffer mmu_op_buffer; 274 275 struct kvm_mmu_memory_cache mmu_pte_chain_cache; 276 struct kvm_mmu_memory_cache mmu_rmap_desc_cache; 277 struct kvm_mmu_memory_cache mmu_page_cache; 278 struct kvm_mmu_memory_cache mmu_page_header_cache; 279 280 gfn_t last_pt_write_gfn; 281 int last_pt_write_count; 282 u64 *last_pte_updated; 283 gfn_t last_pte_gfn; 284 285 struct { 286 gfn_t gfn; /* presumed gfn during guest pte update */ 287 pfn_t pfn; /* pfn corresponding to that gfn */ 288 int largepage; 289 unsigned long mmu_seq; 290 } update_pte; 291 292 struct i387_fxsave_struct host_fx_image; 293 struct i387_fxsave_struct guest_fx_image; 294 295 gva_t mmio_fault_cr2; 296 struct kvm_pio_request pio; 297 void *pio_data; 298 299 struct kvm_queued_exception { 300 bool pending; 301 bool has_error_code; 302 u8 nr; 303 u32 error_code; 304 } exception; 305 306 struct kvm_queued_interrupt { 307 bool pending; 308 u8 nr; 309 } interrupt; 310 311 struct { 312 int active; 313 u8 save_iopl; 314 struct kvm_save_segment { 315 u16 selector; 316 unsigned long base; 317 u32 limit; 318 u32 ar; 319 } tr, es, ds, fs, gs; 320 } rmode; 321 int halt_request; /* real mode on Intel only */ 322 323 int cpuid_nent; 324 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 325 /* emulate context */ 326 327 struct x86_emulate_ctxt emulate_ctxt; 328 329 gpa_t time; 330 struct pvclock_vcpu_time_info hv_clock; 331 unsigned int hv_clock_tsc_khz; 332 unsigned int time_offset; 333 struct page *time_page; 334 335 bool nmi_pending; 336 bool nmi_injected; 337 bool nmi_window_open; 338 339 struct mtrr_state_type mtrr_state; 340 u32 pat; 341 }; 342 343 struct kvm_mem_alias { 344 gfn_t base_gfn; 345 unsigned long npages; 346 gfn_t target_gfn; 347 }; 348 349 struct kvm_arch{ 350 int naliases; 351 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; 352 353 unsigned int n_free_mmu_pages; 354 unsigned int n_requested_mmu_pages; 355 unsigned int n_alloc_mmu_pages; 356 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 357 /* 358 * Hash table of struct kvm_mmu_page. 359 */ 360 struct list_head active_mmu_pages; 361 struct list_head assigned_dev_head; 362 struct list_head oos_global_pages; 363 struct iommu_domain *iommu_domain; 364 struct kvm_pic *vpic; 365 struct kvm_ioapic *vioapic; 366 struct kvm_pit *vpit; 367 struct hlist_head irq_ack_notifier_list; 368 int vapics_in_nmi_mode; 369 370 int round_robin_prev_vcpu; 371 unsigned int tss_addr; 372 struct page *apic_access_page; 373 374 gpa_t wall_clock; 375 376 struct page *ept_identity_pagetable; 377 bool ept_identity_pagetable_done; 378 379 unsigned long irq_sources_bitmap; 380 unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 381 }; 382 383 struct kvm_vm_stat { 384 u32 mmu_shadow_zapped; 385 u32 mmu_pte_write; 386 u32 mmu_pte_updated; 387 u32 mmu_pde_zapped; 388 u32 mmu_flooded; 389 u32 mmu_recycled; 390 u32 mmu_cache_miss; 391 u32 mmu_unsync; 392 u32 mmu_unsync_global; 393 u32 remote_tlb_flush; 394 u32 lpages; 395 }; 396 397 struct kvm_vcpu_stat { 398 u32 pf_fixed; 399 u32 pf_guest; 400 u32 tlb_flush; 401 u32 invlpg; 402 403 u32 exits; 404 u32 io_exits; 405 u32 mmio_exits; 406 u32 signal_exits; 407 u32 irq_window_exits; 408 u32 nmi_window_exits; 409 u32 halt_exits; 410 u32 halt_wakeup; 411 u32 request_irq_exits; 412 u32 request_nmi_exits; 413 u32 irq_exits; 414 u32 host_state_reload; 415 u32 efer_reload; 416 u32 fpu_reload; 417 u32 insn_emulation; 418 u32 insn_emulation_fail; 419 u32 hypercalls; 420 u32 irq_injections; 421 u32 nmi_injections; 422 }; 423 424 struct descriptor_table { 425 u16 limit; 426 unsigned long base; 427 } __attribute__((packed)); 428 429 struct kvm_x86_ops { 430 int (*cpu_has_kvm_support)(void); /* __init */ 431 int (*disabled_by_bios)(void); /* __init */ 432 void (*hardware_enable)(void *dummy); /* __init */ 433 void (*hardware_disable)(void *dummy); 434 void (*check_processor_compatibility)(void *rtn); 435 int (*hardware_setup)(void); /* __init */ 436 void (*hardware_unsetup)(void); /* __exit */ 437 bool (*cpu_has_accelerated_tpr)(void); 438 439 /* Create, but do not attach this VCPU */ 440 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); 441 void (*vcpu_free)(struct kvm_vcpu *vcpu); 442 int (*vcpu_reset)(struct kvm_vcpu *vcpu); 443 444 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); 445 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 446 void (*vcpu_put)(struct kvm_vcpu *vcpu); 447 448 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 449 struct kvm_debug_guest *dbg); 450 void (*guest_debug_pre)(struct kvm_vcpu *vcpu); 451 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 452 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 453 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 454 void (*get_segment)(struct kvm_vcpu *vcpu, 455 struct kvm_segment *var, int seg); 456 int (*get_cpl)(struct kvm_vcpu *vcpu); 457 void (*set_segment)(struct kvm_vcpu *vcpu, 458 struct kvm_segment *var, int seg); 459 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 460 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 461 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 462 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 463 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 464 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 465 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 466 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 467 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 468 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 469 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); 470 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, 471 int *exception); 472 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 473 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 474 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 475 476 void (*tlb_flush)(struct kvm_vcpu *vcpu); 477 478 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); 479 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); 480 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 481 void (*patch_hypercall)(struct kvm_vcpu *vcpu, 482 unsigned char *hypercall_addr); 483 int (*get_irq)(struct kvm_vcpu *vcpu); 484 void (*set_irq)(struct kvm_vcpu *vcpu, int vec); 485 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 486 bool has_error_code, u32 error_code); 487 bool (*exception_injected)(struct kvm_vcpu *vcpu); 488 void (*inject_pending_irq)(struct kvm_vcpu *vcpu); 489 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, 490 struct kvm_run *run); 491 492 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 493 int (*get_tdp_level)(void); 494 int (*get_mt_mask_shift)(void); 495 }; 496 497 extern struct kvm_x86_ops *kvm_x86_ops; 498 499 int kvm_mmu_module_init(void); 500 void kvm_mmu_module_exit(void); 501 502 void kvm_mmu_destroy(struct kvm_vcpu *vcpu); 503 int kvm_mmu_create(struct kvm_vcpu *vcpu); 504 int kvm_mmu_setup(struct kvm_vcpu *vcpu); 505 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); 506 void kvm_mmu_set_base_ptes(u64 base_pte); 507 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 508 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask); 509 510 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 511 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 512 void kvm_mmu_zap_all(struct kvm *kvm); 513 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 514 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 515 516 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 517 518 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 519 const void *val, int bytes); 520 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, 521 gpa_t addr, unsigned long *ret); 522 523 extern bool tdp_enabled; 524 525 enum emulation_result { 526 EMULATE_DONE, /* no further processing */ 527 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 528 EMULATE_FAIL, /* can't emulate this instruction */ 529 }; 530 531 #define EMULTYPE_NO_DECODE (1 << 0) 532 #define EMULTYPE_TRAP_UD (1 << 1) 533 int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, 534 unsigned long cr2, u16 error_code, int emulation_type); 535 void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); 536 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 537 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 538 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, 539 unsigned long *rflags); 540 541 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); 542 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, 543 unsigned long *rflags); 544 void kvm_enable_efer_bits(u64); 545 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 546 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 547 548 struct x86_emulate_ctxt; 549 550 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 551 int size, unsigned port); 552 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 553 int size, unsigned long count, int down, 554 gva_t address, int rep, unsigned port); 555 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 556 int kvm_emulate_halt(struct kvm_vcpu *vcpu); 557 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 558 int emulate_clts(struct kvm_vcpu *vcpu); 559 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 560 unsigned long *dest); 561 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 562 unsigned long value); 563 564 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 565 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 566 int type_bits, int seg); 567 568 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 569 570 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 571 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 572 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 573 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 574 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 575 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 576 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 577 578 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 579 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); 580 581 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 582 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 583 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 584 u32 error_code); 585 586 void kvm_pic_set_irq(void *opaque, int irq, int level); 587 588 void kvm_inject_nmi(struct kvm_vcpu *vcpu); 589 590 void fx_init(struct kvm_vcpu *vcpu); 591 592 int emulator_read_std(unsigned long addr, 593 void *val, 594 unsigned int bytes, 595 struct kvm_vcpu *vcpu); 596 int emulator_write_emulated(unsigned long addr, 597 const void *val, 598 unsigned int bytes, 599 struct kvm_vcpu *vcpu); 600 601 unsigned long segment_base(u16 selector); 602 603 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); 604 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 605 const u8 *new, int bytes, 606 bool guest_initiated); 607 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 608 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 609 int kvm_mmu_load(struct kvm_vcpu *vcpu); 610 void kvm_mmu_unload(struct kvm_vcpu *vcpu); 611 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 612 void kvm_mmu_sync_global(struct kvm_vcpu *vcpu); 613 614 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 615 616 int kvm_fix_hypercall(struct kvm_vcpu *vcpu); 617 618 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 619 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 620 621 void kvm_enable_tdp(void); 622 void kvm_disable_tdp(void); 623 624 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 625 int complete_pio(struct kvm_vcpu *vcpu); 626 627 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); 628 629 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 630 { 631 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 632 633 return (struct kvm_mmu_page *)page_private(page); 634 } 635 636 static inline u16 kvm_read_fs(void) 637 { 638 u16 seg; 639 asm("mov %%fs, %0" : "=g"(seg)); 640 return seg; 641 } 642 643 static inline u16 kvm_read_gs(void) 644 { 645 u16 seg; 646 asm("mov %%gs, %0" : "=g"(seg)); 647 return seg; 648 } 649 650 static inline u16 kvm_read_ldt(void) 651 { 652 u16 ldt; 653 asm("sldt %0" : "=g"(ldt)); 654 return ldt; 655 } 656 657 static inline void kvm_load_fs(u16 sel) 658 { 659 asm("mov %0, %%fs" : : "rm"(sel)); 660 } 661 662 static inline void kvm_load_gs(u16 sel) 663 { 664 asm("mov %0, %%gs" : : "rm"(sel)); 665 } 666 667 static inline void kvm_load_ldt(u16 sel) 668 { 669 asm("lldt %0" : : "rm"(sel)); 670 } 671 672 static inline void kvm_get_idt(struct descriptor_table *table) 673 { 674 asm("sidt %0" : "=m"(*table)); 675 } 676 677 static inline void kvm_get_gdt(struct descriptor_table *table) 678 { 679 asm("sgdt %0" : "=m"(*table)); 680 } 681 682 static inline unsigned long kvm_read_tr_base(void) 683 { 684 u16 tr; 685 asm("str %0" : "=g"(tr)); 686 return segment_base(tr); 687 } 688 689 #ifdef CONFIG_X86_64 690 static inline unsigned long read_msr(unsigned long msr) 691 { 692 u64 value; 693 694 rdmsrl(msr, value); 695 return value; 696 } 697 #endif 698 699 static inline void kvm_fx_save(struct i387_fxsave_struct *image) 700 { 701 asm("fxsave (%0)":: "r" (image)); 702 } 703 704 static inline void kvm_fx_restore(struct i387_fxsave_struct *image) 705 { 706 asm("fxrstor (%0)":: "r" (image)); 707 } 708 709 static inline void kvm_fx_finit(void) 710 { 711 asm("finit"); 712 } 713 714 static inline u32 get_rdx_init_val(void) 715 { 716 return 0x600; /* P6 family */ 717 } 718 719 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) 720 { 721 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 722 } 723 724 #define MSR_IA32_TIME_STAMP_COUNTER 0x010 725 726 #define TSS_IOPB_BASE_OFFSET 0x66 727 #define TSS_BASE_SIZE 0x68 728 #define TSS_IOPB_SIZE (65536 / 8) 729 #define TSS_REDIRECTION_SIZE (256 / 8) 730 #define RMODE_TSS_SIZE \ 731 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 732 733 enum { 734 TASK_SWITCH_CALL = 0, 735 TASK_SWITCH_IRET = 1, 736 TASK_SWITCH_JMP = 2, 737 TASK_SWITCH_GATE = 3, 738 }; 739 740 /* 741 * Hardware virtualization extension instructions may fault if a 742 * reboot turns off virtualization while processes are running. 743 * Trap the fault and ignore the instruction if that happens. 744 */ 745 asmlinkage void kvm_handle_fault_on_reboot(void); 746 747 #define __kvm_handle_fault_on_reboot(insn) \ 748 "666: " insn "\n\t" \ 749 ".pushsection .fixup, \"ax\" \n" \ 750 "667: \n\t" \ 751 __ASM_SIZE(push) " $666b \n\t" \ 752 "jmp kvm_handle_fault_on_reboot \n\t" \ 753 ".popsection \n\t" \ 754 ".pushsection __ex_table, \"a\" \n\t" \ 755 _ASM_PTR " 666b, 667b \n\t" \ 756 ".popsection" 757 758 #define KVM_ARCH_WANT_MMU_NOTIFIER 759 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 760 int kvm_age_hva(struct kvm *kvm, unsigned long hva); 761 762 #endif /* _ASM_X86_KVM_HOST_H */ 763