1 /*- 2 * Copyright (c) 2012 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _VMM_INSTRUCTION_EMUL_H_ 30 #define _VMM_INSTRUCTION_EMUL_H_ 31 32 #include <sys/mman.h> 33 34 /* 35 * Callback functions to read and write memory regions. 36 */ 37 typedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa, 38 uint64_t *rval, int rsize, void *arg); 39 40 typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa, 41 uint64_t wval, int wsize, void *arg); 42 43 /* 44 * Emulate the decoded 'vie' instruction. 45 * 46 * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region 47 * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the 48 * callback functions. 49 * 50 * 'void *vm' should be 'struct vm *' when called from kernel context and 51 * 'struct vmctx *' when called from user context. 52 * s 53 */ 54 int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie, 55 struct vm_guest_paging *paging, mem_region_read_t mrr, 56 mem_region_write_t mrw, void *mrarg); 57 58 int vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg, 59 uint64_t val, int size); 60 61 /* 62 * Returns 1 if an alignment check exception should be injected and 0 otherwise. 63 */ 64 int vie_alignment_check(int cpl, int operand_size, uint64_t cr0, 65 uint64_t rflags, uint64_t gla); 66 67 /* Returns 1 if the 'gla' is not canonical and 0 otherwise. */ 68 int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla); 69 70 uint64_t vie_size2mask(int size); 71 72 int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg, 73 struct seg_desc *desc, uint64_t off, int length, int addrsize, int prot, 74 uint64_t *gla); 75 76 #ifdef _KERNEL 77 /* 78 * APIs to fetch and decode the instruction from nested page fault handler. 79 * 80 * 'vie' must be initialized before calling 'vmm_fetch_instruction()' 81 */ 82 int vmm_fetch_instruction(struct vm *vm, int cpuid, 83 struct vm_guest_paging *guest_paging, 84 uint64_t rip, int inst_length, struct vie *vie); 85 86 /* 87 * Translate the guest linear address 'gla' to a guest physical address. 88 * 89 * Returns 0 on success and '*gpa' contains the result of the translation. 90 * Returns 1 if an exception was injected into the guest. 91 * Returns -1 otherwise. 92 */ 93 int vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 94 uint64_t gla, int prot, uint64_t *gpa); 95 96 void vie_init(struct vie *vie); 97 98 /* 99 * Decode the instruction fetched into 'vie' so it can be emulated. 100 * 101 * 'gla' is the guest linear address provided by the hardware assist 102 * that caused the nested page table fault. It is used to verify that 103 * the software instruction decoding is in agreement with the hardware. 104 * 105 * Some hardware assists do not provide the 'gla' to the hypervisor. 106 * To skip the 'gla' verification for this or any other reason pass 107 * in VIE_INVALID_GLA instead. 108 */ 109 #define VIE_INVALID_GLA (1UL << 63) /* a non-canonical address */ 110 int vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla, 111 enum vm_cpu_mode cpu_mode, int csd, struct vie *vie); 112 #endif /* _KERNEL */ 113 114 #endif /* _VMM_INSTRUCTION_EMUL_H_ */ 115