xref: /freebsd/sys/amd64/include/vmm_instruction_emul.h (revision 66fd12cf4896eb08ad8e7a2627537f84ead84dd3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef	_VMM_INSTRUCTION_EMUL_H_
32 #define	_VMM_INSTRUCTION_EMUL_H_
33 
34 #include <sys/mman.h>
35 
36 /*
37  * Callback functions to read and write memory regions.
38  */
39 typedef int (*mem_region_read_t)(struct vcpu *vcpu, uint64_t gpa,
40 				 uint64_t *rval, int rsize, void *arg);
41 
42 typedef int (*mem_region_write_t)(struct vcpu *vcpu, uint64_t gpa,
43 				  uint64_t wval, int wsize, void *arg);
44 
45 /*
46  * Emulate the decoded 'vie' instruction.
47  *
48  * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region
49  * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the
50  * callback functions.
51  *
52  * 'void *vm' should be 'struct vm *' when called from kernel context and
53  * 'struct vmctx *' when called from user context.
54  * s
55  */
56 int vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
57     struct vm_guest_paging *paging, mem_region_read_t mrr,
58     mem_region_write_t mrw, void *mrarg);
59 
60 int vie_update_register(struct vcpu *vcpu, enum vm_reg_name reg,
61     uint64_t val, int size);
62 
63 /*
64  * Returns 1 if an alignment check exception should be injected and 0 otherwise.
65  */
66 int vie_alignment_check(int cpl, int operand_size, uint64_t cr0,
67     uint64_t rflags, uint64_t gla);
68 
69 /* Returns 1 if the 'gla' is not canonical and 0 otherwise. */
70 int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla);
71 
72 uint64_t vie_size2mask(int size);
73 
74 int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
75     struct seg_desc *desc, uint64_t off, int length, int addrsize, int prot,
76     uint64_t *gla);
77 
78 #ifdef _KERNEL
79 /*
80  * APIs to fetch and decode the instruction from nested page fault handler.
81  *
82  * 'vie' must be initialized before calling 'vmm_fetch_instruction()'
83  */
84 int vmm_fetch_instruction(struct vcpu *vcpu,
85 			  struct vm_guest_paging *guest_paging,
86 			  uint64_t rip, int inst_length, struct vie *vie,
87 			  int *is_fault);
88 
89 /*
90  * Translate the guest linear address 'gla' to a guest physical address.
91  *
92  * retval	is_fault	Interpretation
93  *   0		   0		'gpa' contains result of the translation
94  *   0		   1		An exception was injected into the guest
95  * EFAULT	  N/A		An unrecoverable hypervisor error occurred
96  */
97 int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
98     uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
99 
100 /*
101  * Like vm_gla2gpa, but no exceptions are injected into the guest and
102  * PTEs are not changed.
103  */
104 int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
105     uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
106 #endif /* _KERNEL */
107 
108 void vie_restart(struct vie *vie);
109 void vie_init(struct vie *vie, const char *inst_bytes, int inst_length);
110 
111 /*
112  * Decode the instruction fetched into 'vie' so it can be emulated.
113  *
114  * 'gla' is the guest linear address provided by the hardware assist
115  * that caused the nested page table fault. It is used to verify that
116  * the software instruction decoding is in agreement with the hardware.
117  *
118  * Some hardware assists do not provide the 'gla' to the hypervisor.
119  * To skip the 'gla' verification for this or any other reason pass
120  * in VIE_INVALID_GLA instead.
121  */
122 #ifdef _KERNEL
123 #define	VIE_INVALID_GLA		(1UL << 63)	/* a non-canonical address */
124 int vmm_decode_instruction(struct vcpu *vcpu, uint64_t gla,
125 			   enum vm_cpu_mode cpu_mode, int csd, struct vie *vie);
126 #else /* !_KERNEL */
127 /*
128  * Permit instruction decoding logic to be compiled outside of the kernel for
129  * rapid iteration and validation.  No GLA validation is performed, obviously.
130  */
131 int vmm_decode_instruction(enum vm_cpu_mode cpu_mode, int csd,
132     struct vie *vie);
133 #endif	/* _KERNEL */
134 
135 #endif	/* _VMM_INSTRUCTION_EMUL_H_ */
136