1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #ifndef _VMM_INSTRUCTION_EMUL_H_ 30 #define _VMM_INSTRUCTION_EMUL_H_ 31 32 #include <sys/mman.h> 33 34 /* struct vie_op.op_type */ 35 enum { 36 VIE_OP_TYPE_NONE = 0, 37 VIE_OP_TYPE_MOV, 38 VIE_OP_TYPE_MOVSX, 39 VIE_OP_TYPE_MOVZX, 40 VIE_OP_TYPE_AND, 41 VIE_OP_TYPE_OR, 42 VIE_OP_TYPE_SUB, 43 VIE_OP_TYPE_TWO_BYTE, 44 VIE_OP_TYPE_PUSH, 45 VIE_OP_TYPE_CMP, 46 VIE_OP_TYPE_POP, 47 VIE_OP_TYPE_MOVS, 48 VIE_OP_TYPE_GROUP1, 49 VIE_OP_TYPE_STOS, 50 VIE_OP_TYPE_BITTEST, 51 VIE_OP_TYPE_TWOB_GRP15, 52 VIE_OP_TYPE_ADD, 53 VIE_OP_TYPE_TEST, 54 VIE_OP_TYPE_BEXTR, 55 VIE_OP_TYPE_OUTS, 56 VIE_OP_TYPE_LAST 57 }; 58 59 /* 60 * Callback functions to read and write memory regions. 61 */ 62 typedef int (*mem_region_read_t)(struct vcpu *vcpu, uint64_t gpa, 63 uint64_t *rval, int rsize, void *arg); 64 65 typedef int (*mem_region_write_t)(struct vcpu *vcpu, uint64_t gpa, 66 uint64_t wval, int wsize, void *arg); 67 68 /* 69 * Emulate the decoded 'vie' instruction. 70 * 71 * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region 72 * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the 73 * callback functions. 74 * 75 * 'void *vm' should be 'struct vm *' when called from kernel context and 76 * 'struct vmctx *' when called from user context. 77 * s 78 */ 79 int vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, 80 struct vm_guest_paging *paging, mem_region_read_t mrr, 81 mem_region_write_t mrw, void *mrarg); 82 83 int vie_update_register(struct vcpu *vcpu, enum vm_reg_name reg, 84 uint64_t val, int size); 85 86 /* 87 * Returns 1 if an alignment check exception should be injected and 0 otherwise. 88 */ 89 int vie_alignment_check(int cpl, int operand_size, uint64_t cr0, 90 uint64_t rflags, uint64_t gla); 91 92 /* Returns 1 if the 'gla' is not canonical and 0 otherwise. */ 93 int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla); 94 95 uint64_t vie_size2mask(int size); 96 97 int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg, 98 struct seg_desc *desc, uint64_t off, int length, int addrsize, int prot, 99 uint64_t *gla); 100 101 #ifdef _KERNEL 102 /* 103 * APIs to fetch and decode the instruction from nested page fault handler. 104 * 105 * 'vie' must be initialized before calling 'vmm_fetch_instruction()' 106 */ 107 int vmm_fetch_instruction(struct vcpu *vcpu, 108 struct vm_guest_paging *guest_paging, 109 uint64_t rip, int inst_length, struct vie *vie, 110 int *is_fault); 111 112 /* 113 * Translate the guest linear address 'gla' to a guest physical address. 114 * 115 * retval is_fault Interpretation 116 * 0 0 'gpa' contains result of the translation 117 * 0 1 An exception was injected into the guest 118 * EFAULT N/A An unrecoverable hypervisor error occurred 119 */ 120 int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging, 121 uint64_t gla, int prot, uint64_t *gpa, int *is_fault); 122 123 /* 124 * Like vm_gla2gpa, but no exceptions are injected into the guest and 125 * PTEs are not changed. 126 */ 127 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, 128 uint64_t gla, int prot, uint64_t *gpa, int *is_fault); 129 #endif /* _KERNEL */ 130 131 void vie_restart(struct vie *vie); 132 void vie_init(struct vie *vie, const char *inst_bytes, int inst_length); 133 134 /* 135 * Decode the instruction fetched into 'vie' so it can be emulated. 136 * 137 * 'gla' is the guest linear address provided by the hardware assist 138 * that caused the nested page table fault. It is used to verify that 139 * the software instruction decoding is in agreement with the hardware. 140 * 141 * Some hardware assists do not provide the 'gla' to the hypervisor. 142 * To skip the 'gla' verification for this or any other reason pass 143 * in VIE_INVALID_GLA instead. 144 */ 145 #ifdef _KERNEL 146 #define VIE_INVALID_GLA (1UL << 63) /* a non-canonical address */ 147 int vmm_decode_instruction(struct vcpu *vcpu, uint64_t gla, 148 enum vm_cpu_mode cpu_mode, int csd, struct vie *vie); 149 #else /* !_KERNEL */ 150 /* 151 * Permit instruction decoding logic to be compiled outside of the kernel for 152 * rapid iteration and validation. No GLA validation is performed, obviously. 153 */ 154 int vmm_decode_instruction(enum vm_cpu_mode cpu_mode, int csd, 155 struct vie *vie); 156 #endif /* _KERNEL */ 157 158 #endif /* _VMM_INSTRUCTION_EMUL_H_ */ 159