1a2da7af6SNeel Natu /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
3c49761ddSPedro F. Giffuni *
4a2da7af6SNeel Natu * Copyright (c) 2012 Sandvine, Inc.
5a2da7af6SNeel Natu * Copyright (c) 2012 NetApp, Inc.
6a2da7af6SNeel Natu * All rights reserved.
7a2da7af6SNeel Natu *
8a2da7af6SNeel Natu * Redistribution and use in source and binary forms, with or without
9a2da7af6SNeel Natu * modification, are permitted provided that the following conditions
10a2da7af6SNeel Natu * are met:
11a2da7af6SNeel Natu * 1. Redistributions of source code must retain the above copyright
12a2da7af6SNeel Natu * notice, this list of conditions and the following disclaimer.
13a2da7af6SNeel Natu * 2. Redistributions in binary form must reproduce the above copyright
14a2da7af6SNeel Natu * notice, this list of conditions and the following disclaimer in the
15a2da7af6SNeel Natu * documentation and/or other materials provided with the distribution.
16a2da7af6SNeel Natu *
17f5efbffeSEd Maste * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18a2da7af6SNeel Natu * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19a2da7af6SNeel Natu * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20f5efbffeSEd Maste * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21a2da7af6SNeel Natu * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22a2da7af6SNeel Natu * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23a2da7af6SNeel Natu * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24a2da7af6SNeel Natu * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25a2da7af6SNeel Natu * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26a2da7af6SNeel Natu * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27a2da7af6SNeel Natu * SUCH DAMAGE.
28a2da7af6SNeel Natu */
29a2da7af6SNeel Natu
30a2da7af6SNeel Natu #include <sys/cdefs.h>
31ba9b7bf7SNeel Natu #ifdef _KERNEL
32a2da7af6SNeel Natu #include <sys/param.h>
33a2da7af6SNeel Natu #include <sys/pcpu.h>
34a2da7af6SNeel Natu #include <sys/systm.h>
35f888763dSNeel Natu #include <sys/proc.h>
36a2da7af6SNeel Natu
37a2da7af6SNeel Natu #include <vm/vm.h>
38a2da7af6SNeel Natu #include <vm/pmap.h>
39a2da7af6SNeel Natu
40a2da7af6SNeel Natu #include <machine/vmparam.h>
41a2da7af6SNeel Natu #include <machine/vmm.h>
42c76c2a19SMark Johnston
43c76c2a19SMark Johnston #include <dev/vmm/vmm_mem.h>
44ba9b7bf7SNeel Natu #else /* !_KERNEL */
45ba9b7bf7SNeel Natu #include <sys/types.h>
46ba9b7bf7SNeel Natu #include <sys/errno.h>
47d665d229SNeel Natu #include <sys/_iovec.h>
48a2da7af6SNeel Natu
49ba9b7bf7SNeel Natu #include <machine/vmm.h>
50ba9b7bf7SNeel Natu
51b645fd45SConrad Meyer #include <err.h>
52a7424861SNeel Natu #include <assert.h>
53b645fd45SConrad Meyer #include <stdbool.h>
544daa95f8SConrad Meyer #include <stddef.h>
55b645fd45SConrad Meyer #include <stdio.h>
564daa95f8SConrad Meyer #include <string.h>
57b645fd45SConrad Meyer #include <strings.h>
58ba9b7bf7SNeel Natu #include <vmmapi.h>
59d4e82073SRobert Wing #define __diagused
60a7424861SNeel Natu #define KASSERT(exp,msg) assert((exp))
61b645fd45SConrad Meyer #define panic(...) errx(4, __VA_ARGS__)
62ba9b7bf7SNeel Natu #endif /* _KERNEL */
63ba9b7bf7SNeel Natu
64e813a873SNeel Natu #include <machine/vmm_instruction_emul.h>
65a7424861SNeel Natu #include <x86/psl.h>
66a7424861SNeel Natu #include <x86/specialreg.h>
67ba9b7bf7SNeel Natu
68ba9b7bf7SNeel Natu /* struct vie_op.op_type */
69ba9b7bf7SNeel Natu enum {
70ba9b7bf7SNeel Natu VIE_OP_TYPE_NONE = 0,
71ba9b7bf7SNeel Natu VIE_OP_TYPE_MOV,
7213545712STycho Nightingale VIE_OP_TYPE_MOVSX,
7354e03e07STycho Nightingale VIE_OP_TYPE_MOVZX,
74ba9b7bf7SNeel Natu VIE_OP_TYPE_AND,
7584e169c6SNeel Natu VIE_OP_TYPE_OR,
76fc3dde90SPeter Grehan VIE_OP_TYPE_SUB,
7754e03e07STycho Nightingale VIE_OP_TYPE_TWO_BYTE,
78d665d229SNeel Natu VIE_OP_TYPE_PUSH,
79d665d229SNeel Natu VIE_OP_TYPE_CMP,
80d63e02eaSNeel Natu VIE_OP_TYPE_POP,
8175346353SNeel Natu VIE_OP_TYPE_MOVS,
82fb5e95b4STycho Nightingale VIE_OP_TYPE_GROUP1,
8357f7026cSTycho Nightingale VIE_OP_TYPE_STOS,
84b8070ef5SNeel Natu VIE_OP_TYPE_BITTEST,
8578a36527SKonstantin Belousov VIE_OP_TYPE_TWOB_GRP15,
86c2b4ceddSJohn Baldwin VIE_OP_TYPE_ADD,
87e4da41f9SRodney W. Grimes VIE_OP_TYPE_TEST,
8847332982SConrad Meyer VIE_OP_TYPE_BEXTR,
89ba9b7bf7SNeel Natu VIE_OP_TYPE_LAST
90ba9b7bf7SNeel Natu };
91ba9b7bf7SNeel Natu
92ba9b7bf7SNeel Natu /* struct vie_op.op_flags */
93f7a9f178SNeel Natu #define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */
94ba9b7bf7SNeel Natu #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
95f7a9f178SNeel Natu #define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */
96d665d229SNeel Natu #define VIE_OP_F_NO_MODRM (1 << 3)
9775346353SNeel Natu #define VIE_OP_F_NO_GLA_VERIFICATION (1 << 4)
98ba9b7bf7SNeel Natu
99cfdea69dSConrad Meyer static const struct vie_op three_byte_opcodes_0f38[256] = {
10047332982SConrad Meyer [0xF7] = {
10147332982SConrad Meyer .op_byte = 0xF7,
10247332982SConrad Meyer .op_type = VIE_OP_TYPE_BEXTR,
10347332982SConrad Meyer },
104cfdea69dSConrad Meyer };
105cfdea69dSConrad Meyer
10654e03e07STycho Nightingale static const struct vie_op two_byte_opcodes[256] = {
10778a36527SKonstantin Belousov [0xAE] = {
10878a36527SKonstantin Belousov .op_byte = 0xAE,
10978a36527SKonstantin Belousov .op_type = VIE_OP_TYPE_TWOB_GRP15,
11078a36527SKonstantin Belousov },
11154e03e07STycho Nightingale [0xB6] = {
11254e03e07STycho Nightingale .op_byte = 0xB6,
11354e03e07STycho Nightingale .op_type = VIE_OP_TYPE_MOVZX,
11454e03e07STycho Nightingale },
11512a6eb99SNeel Natu [0xB7] = {
11612a6eb99SNeel Natu .op_byte = 0xB7,
11712a6eb99SNeel Natu .op_type = VIE_OP_TYPE_MOVZX,
11812a6eb99SNeel Natu },
119b8070ef5SNeel Natu [0xBA] = {
120b8070ef5SNeel Natu .op_byte = 0xBA,
121b8070ef5SNeel Natu .op_type = VIE_OP_TYPE_BITTEST,
122b8070ef5SNeel Natu .op_flags = VIE_OP_F_IMM8,
123b8070ef5SNeel Natu },
12413545712STycho Nightingale [0xBE] = {
12513545712STycho Nightingale .op_byte = 0xBE,
12613545712STycho Nightingale .op_type = VIE_OP_TYPE_MOVSX,
12713545712STycho Nightingale },
12854e03e07STycho Nightingale };
12954e03e07STycho Nightingale
130ba9b7bf7SNeel Natu static const struct vie_op one_byte_opcodes[256] = {
131c2b4ceddSJohn Baldwin [0x03] = {
132c2b4ceddSJohn Baldwin .op_byte = 0x03,
133c2b4ceddSJohn Baldwin .op_type = VIE_OP_TYPE_ADD,
134c2b4ceddSJohn Baldwin },
13554e03e07STycho Nightingale [0x0F] = {
13654e03e07STycho Nightingale .op_byte = 0x0F,
13754e03e07STycho Nightingale .op_type = VIE_OP_TYPE_TWO_BYTE
13854e03e07STycho Nightingale },
1399d210a4aSPeter Grehan [0x0B] = {
1409d210a4aSPeter Grehan .op_byte = 0x0B,
1419d210a4aSPeter Grehan .op_type = VIE_OP_TYPE_OR,
1429d210a4aSPeter Grehan },
143fc3dde90SPeter Grehan [0x2B] = {
144fc3dde90SPeter Grehan .op_byte = 0x2B,
145fc3dde90SPeter Grehan .op_type = VIE_OP_TYPE_SUB,
146fc3dde90SPeter Grehan },
147b32d1908SNeel Natu [0x39] = {
148b32d1908SNeel Natu .op_byte = 0x39,
149b32d1908SNeel Natu .op_type = VIE_OP_TYPE_CMP,
150b32d1908SNeel Natu },
151d665d229SNeel Natu [0x3B] = {
152d665d229SNeel Natu .op_byte = 0x3B,
153d665d229SNeel Natu .op_type = VIE_OP_TYPE_CMP,
154d665d229SNeel Natu },
1558faceb32SNeel Natu [0x88] = {
1568faceb32SNeel Natu .op_byte = 0x88,
1578faceb32SNeel Natu .op_type = VIE_OP_TYPE_MOV,
1588faceb32SNeel Natu },
159ba9b7bf7SNeel Natu [0x89] = {
160ba9b7bf7SNeel Natu .op_byte = 0x89,
161ba9b7bf7SNeel Natu .op_type = VIE_OP_TYPE_MOV,
162ba9b7bf7SNeel Natu },
1636f6ebf3cSNeel Natu [0x8A] = {
1646f6ebf3cSNeel Natu .op_byte = 0x8A,
1656f6ebf3cSNeel Natu .op_type = VIE_OP_TYPE_MOV,
1666f6ebf3cSNeel Natu },
167ba9b7bf7SNeel Natu [0x8B] = {
168ba9b7bf7SNeel Natu .op_byte = 0x8B,
169ba9b7bf7SNeel Natu .op_type = VIE_OP_TYPE_MOV,
170ba9b7bf7SNeel Natu },
171d665d229SNeel Natu [0xA1] = {
172d665d229SNeel Natu .op_byte = 0xA1,
173d665d229SNeel Natu .op_type = VIE_OP_TYPE_MOV,
174d665d229SNeel Natu .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
175d665d229SNeel Natu },
176d665d229SNeel Natu [0xA3] = {
177d665d229SNeel Natu .op_byte = 0xA3,
178d665d229SNeel Natu .op_type = VIE_OP_TYPE_MOV,
179d665d229SNeel Natu .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
180d665d229SNeel Natu },
18175346353SNeel Natu [0xA4] = {
18275346353SNeel Natu .op_byte = 0xA4,
18375346353SNeel Natu .op_type = VIE_OP_TYPE_MOVS,
18475346353SNeel Natu .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
18575346353SNeel Natu },
18675346353SNeel Natu [0xA5] = {
18775346353SNeel Natu .op_byte = 0xA5,
18875346353SNeel Natu .op_type = VIE_OP_TYPE_MOVS,
18975346353SNeel Natu .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
19075346353SNeel Natu },
19157f7026cSTycho Nightingale [0xAA] = {
19257f7026cSTycho Nightingale .op_byte = 0xAA,
19357f7026cSTycho Nightingale .op_type = VIE_OP_TYPE_STOS,
19457f7026cSTycho Nightingale .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
19557f7026cSTycho Nightingale },
19657f7026cSTycho Nightingale [0xAB] = {
19757f7026cSTycho Nightingale .op_byte = 0xAB,
19857f7026cSTycho Nightingale .op_type = VIE_OP_TYPE_STOS,
19957f7026cSTycho Nightingale .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
20057f7026cSTycho Nightingale },
201896d1f77STycho Nightingale [0xC6] = {
202896d1f77STycho Nightingale /* XXX Group 11 extended opcode - not just MOV */
203896d1f77STycho Nightingale .op_byte = 0xC6,
204896d1f77STycho Nightingale .op_type = VIE_OP_TYPE_MOV,
205896d1f77STycho Nightingale .op_flags = VIE_OP_F_IMM8,
206896d1f77STycho Nightingale },
207ba9b7bf7SNeel Natu [0xC7] = {
208ba9b7bf7SNeel Natu .op_byte = 0xC7,
209ba9b7bf7SNeel Natu .op_type = VIE_OP_TYPE_MOV,
210ba9b7bf7SNeel Natu .op_flags = VIE_OP_F_IMM,
211ba9b7bf7SNeel Natu },
212ba9b7bf7SNeel Natu [0x23] = {
213ba9b7bf7SNeel Natu .op_byte = 0x23,
214ba9b7bf7SNeel Natu .op_type = VIE_OP_TYPE_AND,
2153b2b0011SPeter Grehan },
2166a273d5eSNeel Natu [0x80] = {
2176a273d5eSNeel Natu /* Group 1 extended opcode */
2186a273d5eSNeel Natu .op_byte = 0x80,
2196a273d5eSNeel Natu .op_type = VIE_OP_TYPE_GROUP1,
2206a273d5eSNeel Natu .op_flags = VIE_OP_F_IMM8,
2216a273d5eSNeel Natu },
2223b2b0011SPeter Grehan [0x81] = {
2236a273d5eSNeel Natu /* Group 1 extended opcode */
2243b2b0011SPeter Grehan .op_byte = 0x81,
225fb5e95b4STycho Nightingale .op_type = VIE_OP_TYPE_GROUP1,
2263b2b0011SPeter Grehan .op_flags = VIE_OP_F_IMM,
22784e169c6SNeel Natu },
22884e169c6SNeel Natu [0x83] = {
2296a273d5eSNeel Natu /* Group 1 extended opcode */
23084e169c6SNeel Natu .op_byte = 0x83,
231fb5e95b4STycho Nightingale .op_type = VIE_OP_TYPE_GROUP1,
23284e169c6SNeel Natu .op_flags = VIE_OP_F_IMM8,
23384e169c6SNeel Natu },
234d63e02eaSNeel Natu [0x8F] = {
235d63e02eaSNeel Natu /* XXX Group 1A extended opcode - not just POP */
236d63e02eaSNeel Natu .op_byte = 0x8F,
237d63e02eaSNeel Natu .op_type = VIE_OP_TYPE_POP,
238d63e02eaSNeel Natu },
239*49a4838aSJose Luis Duran [0xF6] = {
240*49a4838aSJose Luis Duran /* XXX Group 3 extended opcode - not just TEST */
241*49a4838aSJose Luis Duran .op_byte = 0xF6,
242*49a4838aSJose Luis Duran .op_type = VIE_OP_TYPE_TEST,
243*49a4838aSJose Luis Duran .op_flags = VIE_OP_F_IMM8,
244*49a4838aSJose Luis Duran },
245e4da41f9SRodney W. Grimes [0xF7] = {
246e4da41f9SRodney W. Grimes /* XXX Group 3 extended opcode - not just TEST */
247e4da41f9SRodney W. Grimes .op_byte = 0xF7,
248e4da41f9SRodney W. Grimes .op_type = VIE_OP_TYPE_TEST,
249e4da41f9SRodney W. Grimes .op_flags = VIE_OP_F_IMM,
250e4da41f9SRodney W. Grimes },
251d665d229SNeel Natu [0xFF] = {
252d665d229SNeel Natu /* XXX Group 5 extended opcode - not just PUSH */
253d665d229SNeel Natu .op_byte = 0xFF,
254d665d229SNeel Natu .op_type = VIE_OP_TYPE_PUSH,
255d665d229SNeel Natu }
256ba9b7bf7SNeel Natu };
257ba9b7bf7SNeel Natu
258ba9b7bf7SNeel Natu /* struct vie.mod */
259ba9b7bf7SNeel Natu #define VIE_MOD_INDIRECT 0
260ba9b7bf7SNeel Natu #define VIE_MOD_INDIRECT_DISP8 1
261ba9b7bf7SNeel Natu #define VIE_MOD_INDIRECT_DISP32 2
262ba9b7bf7SNeel Natu #define VIE_MOD_DIRECT 3
263ba9b7bf7SNeel Natu
264ba9b7bf7SNeel Natu /* struct vie.rm */
265ba9b7bf7SNeel Natu #define VIE_RM_SIB 4
266ba9b7bf7SNeel Natu #define VIE_RM_DISP32 5
267a2da7af6SNeel Natu
268a2da7af6SNeel Natu #define GB (1024 * 1024 * 1024)
269a2da7af6SNeel Natu
270a2da7af6SNeel Natu static enum vm_reg_name gpr_map[16] = {
271a2da7af6SNeel Natu VM_REG_GUEST_RAX,
272a2da7af6SNeel Natu VM_REG_GUEST_RCX,
273a2da7af6SNeel Natu VM_REG_GUEST_RDX,
274a2da7af6SNeel Natu VM_REG_GUEST_RBX,
275a2da7af6SNeel Natu VM_REG_GUEST_RSP,
276a2da7af6SNeel Natu VM_REG_GUEST_RBP,
277a2da7af6SNeel Natu VM_REG_GUEST_RSI,
278a2da7af6SNeel Natu VM_REG_GUEST_RDI,
279a2da7af6SNeel Natu VM_REG_GUEST_R8,
280a2da7af6SNeel Natu VM_REG_GUEST_R9,
281a2da7af6SNeel Natu VM_REG_GUEST_R10,
282a2da7af6SNeel Natu VM_REG_GUEST_R11,
283a2da7af6SNeel Natu VM_REG_GUEST_R12,
284a2da7af6SNeel Natu VM_REG_GUEST_R13,
285a2da7af6SNeel Natu VM_REG_GUEST_R14,
286a2da7af6SNeel Natu VM_REG_GUEST_R15
287a2da7af6SNeel Natu };
288a2da7af6SNeel Natu
289ba9b7bf7SNeel Natu static uint64_t size2mask[] = {
290ba9b7bf7SNeel Natu [1] = 0xff,
291ba9b7bf7SNeel Natu [2] = 0xffff,
292ba9b7bf7SNeel Natu [4] = 0xffffffff,
293ba9b7bf7SNeel Natu [8] = 0xffffffffffffffff,
294ba9b7bf7SNeel Natu };
295ba9b7bf7SNeel Natu
296ba9b7bf7SNeel Natu static int
vie_read_register(struct vcpu * vcpu,enum vm_reg_name reg,uint64_t * rval)2977d9ef309SJohn Baldwin vie_read_register(struct vcpu *vcpu, enum vm_reg_name reg, uint64_t *rval)
298ba9b7bf7SNeel Natu {
299ba9b7bf7SNeel Natu int error;
300ba9b7bf7SNeel Natu
3017d9ef309SJohn Baldwin error = vm_get_register(vcpu, reg, rval);
302ba9b7bf7SNeel Natu
303ba9b7bf7SNeel Natu return (error);
304ba9b7bf7SNeel Natu }
305ba9b7bf7SNeel Natu
306f7a9f178SNeel Natu static void
vie_calc_bytereg(struct vie * vie,enum vm_reg_name * reg,int * lhbr)307f7a9f178SNeel Natu vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr)
3088faceb32SNeel Natu {
309f7a9f178SNeel Natu *lhbr = 0;
310f7a9f178SNeel Natu *reg = gpr_map[vie->reg];
3118faceb32SNeel Natu
3128faceb32SNeel Natu /*
313f7a9f178SNeel Natu * 64-bit mode imposes limitations on accessing legacy high byte
314f7a9f178SNeel Natu * registers (lhbr).
3158faceb32SNeel Natu *
3168faceb32SNeel Natu * The legacy high-byte registers cannot be addressed if the REX
3178faceb32SNeel Natu * prefix is present. In this case the values 4, 5, 6 and 7 of the
3188faceb32SNeel Natu * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
3198faceb32SNeel Natu *
3208faceb32SNeel Natu * If the REX prefix is not present then the values 4, 5, 6 and 7
3218faceb32SNeel Natu * of the 'ModRM:reg' field address the legacy high-byte registers,
3228faceb32SNeel Natu * %ah, %ch, %dh and %bh respectively.
3238faceb32SNeel Natu */
3248faceb32SNeel Natu if (!vie->rex_present) {
3258faceb32SNeel Natu if (vie->reg & 0x4) {
326f7a9f178SNeel Natu *lhbr = 1;
327f7a9f178SNeel Natu *reg = gpr_map[vie->reg & 0x3];
328f7a9f178SNeel Natu }
3298faceb32SNeel Natu }
3308faceb32SNeel Natu }
3318faceb32SNeel Natu
332f7a9f178SNeel Natu static int
vie_read_bytereg(struct vcpu * vcpu,struct vie * vie,uint8_t * rval)3337d9ef309SJohn Baldwin vie_read_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t *rval)
334f7a9f178SNeel Natu {
335f7a9f178SNeel Natu uint64_t val;
336f7a9f178SNeel Natu int error, lhbr;
337f7a9f178SNeel Natu enum vm_reg_name reg;
338f7a9f178SNeel Natu
339f7a9f178SNeel Natu vie_calc_bytereg(vie, ®, &lhbr);
3407d9ef309SJohn Baldwin error = vm_get_register(vcpu, reg, &val);
341f7a9f178SNeel Natu
342f7a9f178SNeel Natu /*
343f7a9f178SNeel Natu * To obtain the value of a legacy high byte register shift the
344f7a9f178SNeel Natu * base register right by 8 bits (%ah = %rax >> 8).
345f7a9f178SNeel Natu */
346f7a9f178SNeel Natu if (lhbr)
347f7a9f178SNeel Natu *rval = val >> 8;
348f7a9f178SNeel Natu else
349f7a9f178SNeel Natu *rval = val;
350f7a9f178SNeel Natu return (error);
351f7a9f178SNeel Natu }
352f7a9f178SNeel Natu
353f7a9f178SNeel Natu static int
vie_write_bytereg(struct vcpu * vcpu,struct vie * vie,uint8_t byte)3547d9ef309SJohn Baldwin vie_write_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t byte)
355f7a9f178SNeel Natu {
356f7a9f178SNeel Natu uint64_t origval, val, mask;
357f7a9f178SNeel Natu int error, lhbr;
358f7a9f178SNeel Natu enum vm_reg_name reg;
359f7a9f178SNeel Natu
360f7a9f178SNeel Natu vie_calc_bytereg(vie, ®, &lhbr);
3617d9ef309SJohn Baldwin error = vm_get_register(vcpu, reg, &origval);
362f7a9f178SNeel Natu if (error == 0) {
363f7a9f178SNeel Natu val = byte;
364f7a9f178SNeel Natu mask = 0xff;
365f7a9f178SNeel Natu if (lhbr) {
366f7a9f178SNeel Natu /*
367f7a9f178SNeel Natu * Shift left by 8 to store 'byte' in a legacy high
368f7a9f178SNeel Natu * byte register.
369f7a9f178SNeel Natu */
370f7a9f178SNeel Natu val <<= 8;
371f7a9f178SNeel Natu mask <<= 8;
372f7a9f178SNeel Natu }
373f7a9f178SNeel Natu val |= origval & ~mask;
3747d9ef309SJohn Baldwin error = vm_set_register(vcpu, reg, val);
375f7a9f178SNeel Natu }
3768faceb32SNeel Natu return (error);
3778faceb32SNeel Natu }
3788faceb32SNeel Natu
379d17b5104SNeel Natu int
vie_update_register(struct vcpu * vcpu,enum vm_reg_name reg,uint64_t val,int size)3807d9ef309SJohn Baldwin vie_update_register(struct vcpu *vcpu, enum vm_reg_name reg,
381ba9b7bf7SNeel Natu uint64_t val, int size)
382ba9b7bf7SNeel Natu {
383ba9b7bf7SNeel Natu int error;
384ba9b7bf7SNeel Natu uint64_t origval;
385ba9b7bf7SNeel Natu
386ba9b7bf7SNeel Natu switch (size) {
387ba9b7bf7SNeel Natu case 1:
388ba9b7bf7SNeel Natu case 2:
3897d9ef309SJohn Baldwin error = vie_read_register(vcpu, reg, &origval);
390ba9b7bf7SNeel Natu if (error)
391ba9b7bf7SNeel Natu return (error);
392ba9b7bf7SNeel Natu val &= size2mask[size];
393ba9b7bf7SNeel Natu val |= origval & ~size2mask[size];
394ba9b7bf7SNeel Natu break;
395ba9b7bf7SNeel Natu case 4:
396ba9b7bf7SNeel Natu val &= 0xffffffffUL;
397ba9b7bf7SNeel Natu break;
398ba9b7bf7SNeel Natu case 8:
399ba9b7bf7SNeel Natu break;
400ba9b7bf7SNeel Natu default:
401ba9b7bf7SNeel Natu return (EINVAL);
402ba9b7bf7SNeel Natu }
403ba9b7bf7SNeel Natu
4047d9ef309SJohn Baldwin error = vm_set_register(vcpu, reg, val);
405ba9b7bf7SNeel Natu return (error);
406ba9b7bf7SNeel Natu }
407ba9b7bf7SNeel Natu
4084c98655eSNeel Natu #define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V)
4094c98655eSNeel Natu
410ba9b7bf7SNeel Natu /*
411d665d229SNeel Natu * Return the status flags that would result from doing (x - y).
412ba9b7bf7SNeel Natu */
4134c98655eSNeel Natu #define GETCC(sz) \
4144c98655eSNeel Natu static u_long \
4154c98655eSNeel Natu getcc##sz(uint##sz##_t x, uint##sz##_t y) \
4164c98655eSNeel Natu { \
4174c98655eSNeel Natu u_long rflags; \
4184c98655eSNeel Natu \
4194c98655eSNeel Natu __asm __volatile("sub %2,%1; pushfq; popq %0" : \
4204c98655eSNeel Natu "=r" (rflags), "+r" (x) : "m" (y)); \
4214c98655eSNeel Natu return (rflags); \
4224c98655eSNeel Natu } struct __hack
423d665d229SNeel Natu
4244c98655eSNeel Natu GETCC(8);
4254c98655eSNeel Natu GETCC(16);
4264c98655eSNeel Natu GETCC(32);
4274c98655eSNeel Natu GETCC(64);
428d665d229SNeel Natu
429d665d229SNeel Natu static u_long
getcc(int opsize,uint64_t x,uint64_t y)430d665d229SNeel Natu getcc(int opsize, uint64_t x, uint64_t y)
431d665d229SNeel Natu {
4324c98655eSNeel Natu KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
433d665d229SNeel Natu ("getcc: invalid operand size %d", opsize));
434d665d229SNeel Natu
4354c98655eSNeel Natu if (opsize == 1)
4364c98655eSNeel Natu return (getcc8(x, y));
4374c98655eSNeel Natu else if (opsize == 2)
438d665d229SNeel Natu return (getcc16(x, y));
439d665d229SNeel Natu else if (opsize == 4)
440d665d229SNeel Natu return (getcc32(x, y));
441d665d229SNeel Natu else
442d665d229SNeel Natu return (getcc64(x, y));
443d665d229SNeel Natu }
444d665d229SNeel Natu
445c2b4ceddSJohn Baldwin /*
446c2b4ceddSJohn Baldwin * Macro creation of functions getaddflags{8,16,32,64}
447c2b4ceddSJohn Baldwin */
448c2b4ceddSJohn Baldwin #define GETADDFLAGS(sz) \
449c2b4ceddSJohn Baldwin static u_long \
450c2b4ceddSJohn Baldwin getaddflags##sz(uint##sz##_t x, uint##sz##_t y) \
451c2b4ceddSJohn Baldwin { \
452c2b4ceddSJohn Baldwin u_long rflags; \
453c2b4ceddSJohn Baldwin \
454c2b4ceddSJohn Baldwin __asm __volatile("add %2,%1; pushfq; popq %0" : \
455c2b4ceddSJohn Baldwin "=r" (rflags), "+r" (x) : "m" (y)); \
456c2b4ceddSJohn Baldwin return (rflags); \
457c2b4ceddSJohn Baldwin } struct __hack
458c2b4ceddSJohn Baldwin
459c2b4ceddSJohn Baldwin GETADDFLAGS(8);
460c2b4ceddSJohn Baldwin GETADDFLAGS(16);
461c2b4ceddSJohn Baldwin GETADDFLAGS(32);
462c2b4ceddSJohn Baldwin GETADDFLAGS(64);
463c2b4ceddSJohn Baldwin
464c2b4ceddSJohn Baldwin static u_long
getaddflags(int opsize,uint64_t x,uint64_t y)465c2b4ceddSJohn Baldwin getaddflags(int opsize, uint64_t x, uint64_t y)
466c2b4ceddSJohn Baldwin {
467c2b4ceddSJohn Baldwin KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
468c2b4ceddSJohn Baldwin ("getaddflags: invalid operand size %d", opsize));
469c2b4ceddSJohn Baldwin
470c2b4ceddSJohn Baldwin if (opsize == 1)
471c2b4ceddSJohn Baldwin return (getaddflags8(x, y));
472c2b4ceddSJohn Baldwin else if (opsize == 2)
473c2b4ceddSJohn Baldwin return (getaddflags16(x, y));
474c2b4ceddSJohn Baldwin else if (opsize == 4)
475c2b4ceddSJohn Baldwin return (getaddflags32(x, y));
476c2b4ceddSJohn Baldwin else
477c2b4ceddSJohn Baldwin return (getaddflags64(x, y));
478c2b4ceddSJohn Baldwin }
479c2b4ceddSJohn Baldwin
480e4da41f9SRodney W. Grimes /*
481e4da41f9SRodney W. Grimes * Return the status flags that would result from doing (x & y).
482e4da41f9SRodney W. Grimes */
483e4da41f9SRodney W. Grimes #define GETANDFLAGS(sz) \
484e4da41f9SRodney W. Grimes static u_long \
485e4da41f9SRodney W. Grimes getandflags##sz(uint##sz##_t x, uint##sz##_t y) \
486e4da41f9SRodney W. Grimes { \
487e4da41f9SRodney W. Grimes u_long rflags; \
488e4da41f9SRodney W. Grimes \
489e4da41f9SRodney W. Grimes __asm __volatile("and %2,%1; pushfq; popq %0" : \
490e4da41f9SRodney W. Grimes "=r" (rflags), "+r" (x) : "m" (y)); \
491e4da41f9SRodney W. Grimes return (rflags); \
492e4da41f9SRodney W. Grimes } struct __hack
493e4da41f9SRodney W. Grimes
494e4da41f9SRodney W. Grimes GETANDFLAGS(8);
495e4da41f9SRodney W. Grimes GETANDFLAGS(16);
496e4da41f9SRodney W. Grimes GETANDFLAGS(32);
497e4da41f9SRodney W. Grimes GETANDFLAGS(64);
498e4da41f9SRodney W. Grimes
499e4da41f9SRodney W. Grimes static u_long
getandflags(int opsize,uint64_t x,uint64_t y)500e4da41f9SRodney W. Grimes getandflags(int opsize, uint64_t x, uint64_t y)
501e4da41f9SRodney W. Grimes {
502e4da41f9SRodney W. Grimes KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
503e4da41f9SRodney W. Grimes ("getandflags: invalid operand size %d", opsize));
504e4da41f9SRodney W. Grimes
505e4da41f9SRodney W. Grimes if (opsize == 1)
506e4da41f9SRodney W. Grimes return (getandflags8(x, y));
507e4da41f9SRodney W. Grimes else if (opsize == 2)
508e4da41f9SRodney W. Grimes return (getandflags16(x, y));
509e4da41f9SRodney W. Grimes else if (opsize == 4)
510e4da41f9SRodney W. Grimes return (getandflags32(x, y));
511e4da41f9SRodney W. Grimes else
512e4da41f9SRodney W. Grimes return (getandflags64(x, y));
513e4da41f9SRodney W. Grimes }
514e4da41f9SRodney W. Grimes
515ba9b7bf7SNeel Natu static int
emulate_mov(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite,void * arg)5167d9ef309SJohn Baldwin emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
517ba9b7bf7SNeel Natu mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
518ba9b7bf7SNeel Natu {
519ba9b7bf7SNeel Natu int error, size;
520ba9b7bf7SNeel Natu enum vm_reg_name reg;
5218faceb32SNeel Natu uint8_t byte;
522ba9b7bf7SNeel Natu uint64_t val;
523ba9b7bf7SNeel Natu
524f7a9f178SNeel Natu size = vie->opsize;
525ba9b7bf7SNeel Natu error = EINVAL;
526ba9b7bf7SNeel Natu
527ba9b7bf7SNeel Natu switch (vie->op.op_byte) {
5288faceb32SNeel Natu case 0x88:
5298faceb32SNeel Natu /*
5308faceb32SNeel Natu * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
5318faceb32SNeel Natu * 88/r: mov r/m8, r8
5328faceb32SNeel Natu * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
5338faceb32SNeel Natu */
534f7a9f178SNeel Natu size = 1; /* override for byte operation */
5357d9ef309SJohn Baldwin error = vie_read_bytereg(vcpu, vie, &byte);
5368faceb32SNeel Natu if (error == 0)
5377d9ef309SJohn Baldwin error = memwrite(vcpu, gpa, byte, size, arg);
5388faceb32SNeel Natu break;
539ba9b7bf7SNeel Natu case 0x89:
540ba9b7bf7SNeel Natu /*
541ba9b7bf7SNeel Natu * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
542f7a9f178SNeel Natu * 89/r: mov r/m16, r16
543ba9b7bf7SNeel Natu * 89/r: mov r/m32, r32
544ba9b7bf7SNeel Natu * REX.W + 89/r mov r/m64, r64
545ba9b7bf7SNeel Natu */
546ba9b7bf7SNeel Natu reg = gpr_map[vie->reg];
5477d9ef309SJohn Baldwin error = vie_read_register(vcpu, reg, &val);
548ba9b7bf7SNeel Natu if (error == 0) {
549ba9b7bf7SNeel Natu val &= size2mask[size];
5507d9ef309SJohn Baldwin error = memwrite(vcpu, gpa, val, size, arg);
551ba9b7bf7SNeel Natu }
552ba9b7bf7SNeel Natu break;
5536f6ebf3cSNeel Natu case 0x8A:
554f7a9f178SNeel Natu /*
555f7a9f178SNeel Natu * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg)
556f7a9f178SNeel Natu * 8A/r: mov r8, r/m8
557f7a9f178SNeel Natu * REX + 8A/r: mov r8, r/m8
558f7a9f178SNeel Natu */
559f7a9f178SNeel Natu size = 1; /* override for byte operation */
5607d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val, size, arg);
561f7a9f178SNeel Natu if (error == 0)
5627d9ef309SJohn Baldwin error = vie_write_bytereg(vcpu, vie, val);
563f7a9f178SNeel Natu break;
564ba9b7bf7SNeel Natu case 0x8B:
565ba9b7bf7SNeel Natu /*
566ba9b7bf7SNeel Natu * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
567f7a9f178SNeel Natu * 8B/r: mov r16, r/m16
568ba9b7bf7SNeel Natu * 8B/r: mov r32, r/m32
569ba9b7bf7SNeel Natu * REX.W 8B/r: mov r64, r/m64
570ba9b7bf7SNeel Natu */
5717d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val, size, arg);
572ba9b7bf7SNeel Natu if (error == 0) {
573ba9b7bf7SNeel Natu reg = gpr_map[vie->reg];
5747d9ef309SJohn Baldwin error = vie_update_register(vcpu, reg, val, size);
575ba9b7bf7SNeel Natu }
576ba9b7bf7SNeel Natu break;
577d665d229SNeel Natu case 0xA1:
578d665d229SNeel Natu /*
579d665d229SNeel Natu * MOV from seg:moffset to AX/EAX/RAX
580d665d229SNeel Natu * A1: mov AX, moffs16
581d665d229SNeel Natu * A1: mov EAX, moffs32
582d665d229SNeel Natu * REX.W + A1: mov RAX, moffs64
583d665d229SNeel Natu */
5847d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val, size, arg);
585d665d229SNeel Natu if (error == 0) {
586d665d229SNeel Natu reg = VM_REG_GUEST_RAX;
5877d9ef309SJohn Baldwin error = vie_update_register(vcpu, reg, val, size);
588d665d229SNeel Natu }
589d665d229SNeel Natu break;
590d665d229SNeel Natu case 0xA3:
591d665d229SNeel Natu /*
592d665d229SNeel Natu * MOV from AX/EAX/RAX to seg:moffset
593d665d229SNeel Natu * A3: mov moffs16, AX
594d665d229SNeel Natu * A3: mov moffs32, EAX
595d665d229SNeel Natu * REX.W + A3: mov moffs64, RAX
596d665d229SNeel Natu */
5977d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val);
598d665d229SNeel Natu if (error == 0) {
599d665d229SNeel Natu val &= size2mask[size];
6007d9ef309SJohn Baldwin error = memwrite(vcpu, gpa, val, size, arg);
601d665d229SNeel Natu }
602d665d229SNeel Natu break;
603896d1f77STycho Nightingale case 0xC6:
604896d1f77STycho Nightingale /*
605896d1f77STycho Nightingale * MOV from imm8 to mem (ModRM:r/m)
606896d1f77STycho Nightingale * C6/0 mov r/m8, imm8
607896d1f77STycho Nightingale * REX + C6/0 mov r/m8, imm8
608896d1f77STycho Nightingale */
609f7a9f178SNeel Natu size = 1; /* override for byte operation */
6107d9ef309SJohn Baldwin error = memwrite(vcpu, gpa, vie->immediate, size, arg);
611896d1f77STycho Nightingale break;
612ba9b7bf7SNeel Natu case 0xC7:
613ba9b7bf7SNeel Natu /*
614f7a9f178SNeel Natu * MOV from imm16/imm32 to mem (ModRM:r/m)
615f7a9f178SNeel Natu * C7/0 mov r/m16, imm16
616ba9b7bf7SNeel Natu * C7/0 mov r/m32, imm32
617ba9b7bf7SNeel Natu * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
618ba9b7bf7SNeel Natu */
619f7a9f178SNeel Natu val = vie->immediate & size2mask[size];
6207d9ef309SJohn Baldwin error = memwrite(vcpu, gpa, val, size, arg);
621ba9b7bf7SNeel Natu break;
622ba9b7bf7SNeel Natu default:
623ba9b7bf7SNeel Natu break;
624ba9b7bf7SNeel Natu }
625ba9b7bf7SNeel Natu
626ba9b7bf7SNeel Natu return (error);
627ba9b7bf7SNeel Natu }
628ba9b7bf7SNeel Natu
62954e03e07STycho Nightingale static int
emulate_movx(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg)6307d9ef309SJohn Baldwin emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
63198d920d9SMark Johnston mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
63254e03e07STycho Nightingale {
63354e03e07STycho Nightingale int error, size;
63454e03e07STycho Nightingale enum vm_reg_name reg;
63554e03e07STycho Nightingale uint64_t val;
63654e03e07STycho Nightingale
637f7a9f178SNeel Natu size = vie->opsize;
63854e03e07STycho Nightingale error = EINVAL;
63954e03e07STycho Nightingale
64054e03e07STycho Nightingale switch (vie->op.op_byte) {
64154e03e07STycho Nightingale case 0xB6:
64254e03e07STycho Nightingale /*
64354e03e07STycho Nightingale * MOV and zero extend byte from mem (ModRM:r/m) to
64454e03e07STycho Nightingale * reg (ModRM:reg).
64554e03e07STycho Nightingale *
646f7a9f178SNeel Natu * 0F B6/r movzx r16, r/m8
647f7a9f178SNeel Natu * 0F B6/r movzx r32, r/m8
648f7a9f178SNeel Natu * REX.W + 0F B6/r movzx r64, r/m8
64954e03e07STycho Nightingale */
65054e03e07STycho Nightingale
65154e03e07STycho Nightingale /* get the first operand */
6527d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val, 1, arg);
65354e03e07STycho Nightingale if (error)
65454e03e07STycho Nightingale break;
65554e03e07STycho Nightingale
65654e03e07STycho Nightingale /* get the second operand */
65754e03e07STycho Nightingale reg = gpr_map[vie->reg];
65854e03e07STycho Nightingale
659f7a9f178SNeel Natu /* zero-extend byte */
660f7a9f178SNeel Natu val = (uint8_t)val;
66154e03e07STycho Nightingale
66254e03e07STycho Nightingale /* write the result */
6637d9ef309SJohn Baldwin error = vie_update_register(vcpu, reg, val, size);
66454e03e07STycho Nightingale break;
66512a6eb99SNeel Natu case 0xB7:
66612a6eb99SNeel Natu /*
66712a6eb99SNeel Natu * MOV and zero extend word from mem (ModRM:r/m) to
66812a6eb99SNeel Natu * reg (ModRM:reg).
66912a6eb99SNeel Natu *
67012a6eb99SNeel Natu * 0F B7/r movzx r32, r/m16
67112a6eb99SNeel Natu * REX.W + 0F B7/r movzx r64, r/m16
67212a6eb99SNeel Natu */
6737d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val, 2, arg);
67412a6eb99SNeel Natu if (error)
67512a6eb99SNeel Natu return (error);
67612a6eb99SNeel Natu
67712a6eb99SNeel Natu reg = gpr_map[vie->reg];
67812a6eb99SNeel Natu
67912a6eb99SNeel Natu /* zero-extend word */
68012a6eb99SNeel Natu val = (uint16_t)val;
68112a6eb99SNeel Natu
6827d9ef309SJohn Baldwin error = vie_update_register(vcpu, reg, val, size);
68312a6eb99SNeel Natu break;
68413545712STycho Nightingale case 0xBE:
68513545712STycho Nightingale /*
68613545712STycho Nightingale * MOV and sign extend byte from mem (ModRM:r/m) to
68713545712STycho Nightingale * reg (ModRM:reg).
68813545712STycho Nightingale *
689f7a9f178SNeel Natu * 0F BE/r movsx r16, r/m8
690f7a9f178SNeel Natu * 0F BE/r movsx r32, r/m8
691f7a9f178SNeel Natu * REX.W + 0F BE/r movsx r64, r/m8
69213545712STycho Nightingale */
69313545712STycho Nightingale
69413545712STycho Nightingale /* get the first operand */
6957d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val, 1, arg);
69613545712STycho Nightingale if (error)
69713545712STycho Nightingale break;
69813545712STycho Nightingale
69913545712STycho Nightingale /* get the second operand */
70013545712STycho Nightingale reg = gpr_map[vie->reg];
70113545712STycho Nightingale
70213545712STycho Nightingale /* sign extend byte */
70313545712STycho Nightingale val = (int8_t)val;
70413545712STycho Nightingale
70513545712STycho Nightingale /* write the result */
7067d9ef309SJohn Baldwin error = vie_update_register(vcpu, reg, val, size);
70713545712STycho Nightingale break;
70854e03e07STycho Nightingale default:
70954e03e07STycho Nightingale break;
71054e03e07STycho Nightingale }
71154e03e07STycho Nightingale return (error);
71254e03e07STycho Nightingale }
71354e03e07STycho Nightingale
71475346353SNeel Natu /*
71575346353SNeel Natu * Helper function to calculate and validate a linear address.
71675346353SNeel Natu */
71775346353SNeel Natu static int
get_gla(struct vcpu * vcpu,struct vie * vie __unused,struct vm_guest_paging * paging,int opsize,int addrsize,int prot,enum vm_reg_name seg,enum vm_reg_name gpr,uint64_t * gla,int * fault)7187d9ef309SJohn Baldwin get_gla(struct vcpu *vcpu, struct vie *vie __unused,
71998d920d9SMark Johnston struct vm_guest_paging *paging, int opsize, int addrsize, int prot,
72098d920d9SMark Johnston enum vm_reg_name seg, enum vm_reg_name gpr, uint64_t *gla, int *fault)
72175346353SNeel Natu {
72275346353SNeel Natu struct seg_desc desc;
72375346353SNeel Natu uint64_t cr0, val, rflags;
7245241577aSRobert Wing int error __diagused;
72575346353SNeel Natu
7267d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_CR0, &cr0);
72775346353SNeel Natu KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
72875346353SNeel Natu
7297d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
73075346353SNeel Natu KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
73175346353SNeel Natu
7327d9ef309SJohn Baldwin error = vm_get_seg_desc(vcpu, seg, &desc);
73375346353SNeel Natu KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
73475346353SNeel Natu __func__, error, seg));
73575346353SNeel Natu
7367d9ef309SJohn Baldwin error = vie_read_register(vcpu, gpr, &val);
73775346353SNeel Natu KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
73875346353SNeel Natu error, gpr));
73975346353SNeel Natu
74075346353SNeel Natu if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
74175346353SNeel Natu addrsize, prot, gla)) {
74275346353SNeel Natu if (seg == VM_REG_GUEST_SS)
7437d9ef309SJohn Baldwin vm_inject_ss(vcpu, 0);
74475346353SNeel Natu else
7457d9ef309SJohn Baldwin vm_inject_gp(vcpu);
7469c4d5478SNeel Natu goto guest_fault;
74775346353SNeel Natu }
74875346353SNeel Natu
74975346353SNeel Natu if (vie_canonical_check(paging->cpu_mode, *gla)) {
75075346353SNeel Natu if (seg == VM_REG_GUEST_SS)
7517d9ef309SJohn Baldwin vm_inject_ss(vcpu, 0);
75275346353SNeel Natu else
7537d9ef309SJohn Baldwin vm_inject_gp(vcpu);
7549c4d5478SNeel Natu goto guest_fault;
75575346353SNeel Natu }
75675346353SNeel Natu
75775346353SNeel Natu if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
7587d9ef309SJohn Baldwin vm_inject_ac(vcpu, 0);
7599c4d5478SNeel Natu goto guest_fault;
76075346353SNeel Natu }
76175346353SNeel Natu
7629c4d5478SNeel Natu *fault = 0;
7639c4d5478SNeel Natu return (0);
7649c4d5478SNeel Natu
7659c4d5478SNeel Natu guest_fault:
7669c4d5478SNeel Natu *fault = 1;
76775346353SNeel Natu return (0);
76875346353SNeel Natu }
76975346353SNeel Natu
77075346353SNeel Natu static int
emulate_movs(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * arg)7717d9ef309SJohn Baldwin emulate_movs(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
77275346353SNeel Natu struct vm_guest_paging *paging, mem_region_read_t memread,
77375346353SNeel Natu mem_region_write_t memwrite, void *arg)
77475346353SNeel Natu {
77575346353SNeel Natu #ifdef _KERNEL
77675346353SNeel Natu struct vm_copyinfo copyinfo[2];
77775346353SNeel Natu #else
77875346353SNeel Natu struct iovec copyinfo[2];
77975346353SNeel Natu #endif
780ef7c2a82STycho Nightingale uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val;
78175346353SNeel Natu uint64_t rcx, rdi, rsi, rflags;
7829c4d5478SNeel Natu int error, fault, opsize, seg, repeat;
78375346353SNeel Natu
78475346353SNeel Natu opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
78575346353SNeel Natu val = 0;
78675346353SNeel Natu error = 0;
78775346353SNeel Natu
78875346353SNeel Natu /*
78975346353SNeel Natu * XXX although the MOVS instruction is only supposed to be used with
79075346353SNeel Natu * the "rep" prefix some guests like FreeBSD will use "repnz" instead.
79175346353SNeel Natu *
79275346353SNeel Natu * Empirically the "repnz" prefix has identical behavior to "rep"
79375346353SNeel Natu * and the zero flag does not make a difference.
79475346353SNeel Natu */
79575346353SNeel Natu repeat = vie->repz_present | vie->repnz_present;
79675346353SNeel Natu
79775346353SNeel Natu if (repeat) {
7987d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RCX, &rcx);
79975346353SNeel Natu KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
80075346353SNeel Natu
80175346353SNeel Natu /*
80275346353SNeel Natu * The count register is %rcx, %ecx or %cx depending on the
80375346353SNeel Natu * address size of the instruction.
80475346353SNeel Natu */
8059c4d5478SNeel Natu if ((rcx & vie_size2mask(vie->addrsize)) == 0) {
8069c4d5478SNeel Natu error = 0;
8079c4d5478SNeel Natu goto done;
8089c4d5478SNeel Natu }
80975346353SNeel Natu }
81075346353SNeel Natu
81175346353SNeel Natu /*
81275346353SNeel Natu * Source Destination Comments
81375346353SNeel Natu * --------------------------------------------
81475346353SNeel Natu * (1) memory memory n/a
81575346353SNeel Natu * (2) memory mmio emulated
81675346353SNeel Natu * (3) mmio memory emulated
817ef7c2a82STycho Nightingale * (4) mmio mmio emulated
81875346353SNeel Natu *
81975346353SNeel Natu * At this point we don't have sufficient information to distinguish
82075346353SNeel Natu * between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
82175346353SNeel Natu * out because it will succeed only when operating on regular memory.
82275346353SNeel Natu *
82375346353SNeel Natu * XXX the emulation doesn't properly handle the case where 'gpa'
82475346353SNeel Natu * is straddling the boundary between the normal memory and MMIO.
82575346353SNeel Natu */
82675346353SNeel Natu
82775346353SNeel Natu seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
8287d9ef309SJohn Baldwin error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
8299c4d5478SNeel Natu PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr, &fault);
8309c4d5478SNeel Natu if (error || fault)
83175346353SNeel Natu goto done;
83275346353SNeel Natu
8337d9ef309SJohn Baldwin error = vm_copy_setup(vcpu, paging, srcaddr, opsize, PROT_READ,
8349c4d5478SNeel Natu copyinfo, nitems(copyinfo), &fault);
83575346353SNeel Natu if (error == 0) {
8369c4d5478SNeel Natu if (fault)
8379c4d5478SNeel Natu goto done; /* Resume guest to handle fault */
8389c4d5478SNeel Natu
83975346353SNeel Natu /*
84075346353SNeel Natu * case (2): read from system memory and write to mmio.
84175346353SNeel Natu */
8422b4fe856SJohn Baldwin vm_copyin(copyinfo, &val, opsize);
8432b4fe856SJohn Baldwin vm_copy_teardown(copyinfo, nitems(copyinfo));
8447d9ef309SJohn Baldwin error = memwrite(vcpu, gpa, val, opsize, arg);
845ef7c2a82STycho Nightingale if (error)
84675346353SNeel Natu goto done;
84775346353SNeel Natu } else {
84875346353SNeel Natu /*
84975346353SNeel Natu * 'vm_copy_setup()' is expected to fail for cases (3) and (4)
85075346353SNeel Natu * if 'srcaddr' is in the mmio space.
85175346353SNeel Natu */
85275346353SNeel Natu
8537d9ef309SJohn Baldwin error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
8549c4d5478SNeel Natu PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr,
8559c4d5478SNeel Natu &fault);
8569c4d5478SNeel Natu if (error || fault)
85775346353SNeel Natu goto done;
85875346353SNeel Natu
8597d9ef309SJohn Baldwin error = vm_copy_setup(vcpu, paging, dstaddr, opsize,
8609c4d5478SNeel Natu PROT_WRITE, copyinfo, nitems(copyinfo), &fault);
86175346353SNeel Natu if (error == 0) {
8629c4d5478SNeel Natu if (fault)
8639c4d5478SNeel Natu goto done; /* Resume guest to handle fault */
8649c4d5478SNeel Natu
86575346353SNeel Natu /*
86675346353SNeel Natu * case (3): read from MMIO and write to system memory.
86775346353SNeel Natu *
868ef7c2a82STycho Nightingale * A MMIO read can have side-effects so we
869ef7c2a82STycho Nightingale * commit to it only after vm_copy_setup() is
870ef7c2a82STycho Nightingale * successful. If a page-fault needs to be
871ef7c2a82STycho Nightingale * injected into the guest then it will happen
87275346353SNeel Natu * before the MMIO read is attempted.
87375346353SNeel Natu */
8747d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val, opsize, arg);
87575346353SNeel Natu if (error)
87675346353SNeel Natu goto done;
87775346353SNeel Natu
8782b4fe856SJohn Baldwin vm_copyout(&val, copyinfo, opsize);
8792b4fe856SJohn Baldwin vm_copy_teardown(copyinfo, nitems(copyinfo));
88075346353SNeel Natu } else {
881ef7c2a82STycho Nightingale /*
882ef7c2a82STycho Nightingale * Case (4): read from and write to mmio.
8839c4d5478SNeel Natu *
8849c4d5478SNeel Natu * Commit to the MMIO read/write (with potential
8859c4d5478SNeel Natu * side-effects) only after we are sure that the
8869c4d5478SNeel Natu * instruction is not going to be restarted due
8879c4d5478SNeel Natu * to address translation faults.
888ef7c2a82STycho Nightingale */
8897d9ef309SJohn Baldwin error = vm_gla2gpa(vcpu, paging, srcaddr,
8909c4d5478SNeel Natu PROT_READ, &srcgpa, &fault);
8919c4d5478SNeel Natu if (error || fault)
89275346353SNeel Natu goto done;
8939c4d5478SNeel Natu
8947d9ef309SJohn Baldwin error = vm_gla2gpa(vcpu, paging, dstaddr,
8959c4d5478SNeel Natu PROT_WRITE, &dstgpa, &fault);
8969c4d5478SNeel Natu if (error || fault)
8979c4d5478SNeel Natu goto done;
8989c4d5478SNeel Natu
8997d9ef309SJohn Baldwin error = memread(vcpu, srcgpa, &val, opsize, arg);
900ef7c2a82STycho Nightingale if (error)
901ef7c2a82STycho Nightingale goto done;
902ef7c2a82STycho Nightingale
9037d9ef309SJohn Baldwin error = memwrite(vcpu, dstgpa, val, opsize, arg);
904ef7c2a82STycho Nightingale if (error)
905ef7c2a82STycho Nightingale goto done;
906ef7c2a82STycho Nightingale }
90775346353SNeel Natu }
90875346353SNeel Natu
9097d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RSI, &rsi);
91075346353SNeel Natu KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
91175346353SNeel Natu
9127d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RDI, &rdi);
91375346353SNeel Natu KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
91475346353SNeel Natu
9157d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
91675346353SNeel Natu KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
91775346353SNeel Natu
91875346353SNeel Natu if (rflags & PSL_D) {
91975346353SNeel Natu rsi -= opsize;
92075346353SNeel Natu rdi -= opsize;
92175346353SNeel Natu } else {
92275346353SNeel Natu rsi += opsize;
92375346353SNeel Natu rdi += opsize;
92475346353SNeel Natu }
92575346353SNeel Natu
9267d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RSI, rsi,
92775346353SNeel Natu vie->addrsize);
92875346353SNeel Natu KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
92975346353SNeel Natu
9307d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RDI, rdi,
93175346353SNeel Natu vie->addrsize);
93275346353SNeel Natu KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
93375346353SNeel Natu
93475346353SNeel Natu if (repeat) {
93575346353SNeel Natu rcx = rcx - 1;
9367d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RCX,
93775346353SNeel Natu rcx, vie->addrsize);
93875346353SNeel Natu KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
93975346353SNeel Natu
94075346353SNeel Natu /*
94175346353SNeel Natu * Repeat the instruction if the count register is not zero.
94275346353SNeel Natu */
94375346353SNeel Natu if ((rcx & vie_size2mask(vie->addrsize)) != 0)
9447d9ef309SJohn Baldwin vm_restart_instruction(vcpu);
94575346353SNeel Natu }
94675346353SNeel Natu done:
9479c4d5478SNeel Natu KASSERT(error == 0 || error == EFAULT, ("%s: unexpected error %d",
9489c4d5478SNeel Natu __func__, error));
9499c4d5478SNeel Natu return (error);
95075346353SNeel Natu }
95175346353SNeel Natu
952ba9b7bf7SNeel Natu static int
emulate_stos(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging __unused,mem_region_read_t memread __unused,mem_region_write_t memwrite,void * arg)9537d9ef309SJohn Baldwin emulate_stos(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
95498d920d9SMark Johnston struct vm_guest_paging *paging __unused, mem_region_read_t memread __unused,
95557f7026cSTycho Nightingale mem_region_write_t memwrite, void *arg)
95657f7026cSTycho Nightingale {
95757f7026cSTycho Nightingale int error, opsize, repeat;
95857f7026cSTycho Nightingale uint64_t val;
95957f7026cSTycho Nightingale uint64_t rcx, rdi, rflags;
96057f7026cSTycho Nightingale
96157f7026cSTycho Nightingale opsize = (vie->op.op_byte == 0xAA) ? 1 : vie->opsize;
96257f7026cSTycho Nightingale repeat = vie->repz_present | vie->repnz_present;
96357f7026cSTycho Nightingale
96457f7026cSTycho Nightingale if (repeat) {
9657d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RCX, &rcx);
96657f7026cSTycho Nightingale KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
96757f7026cSTycho Nightingale
96857f7026cSTycho Nightingale /*
96957f7026cSTycho Nightingale * The count register is %rcx, %ecx or %cx depending on the
97057f7026cSTycho Nightingale * address size of the instruction.
97157f7026cSTycho Nightingale */
97257f7026cSTycho Nightingale if ((rcx & vie_size2mask(vie->addrsize)) == 0)
97357f7026cSTycho Nightingale return (0);
97457f7026cSTycho Nightingale }
97557f7026cSTycho Nightingale
9767d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val);
97757f7026cSTycho Nightingale KASSERT(!error, ("%s: error %d getting rax", __func__, error));
97857f7026cSTycho Nightingale
9797d9ef309SJohn Baldwin error = memwrite(vcpu, gpa, val, opsize, arg);
98057f7026cSTycho Nightingale if (error)
98157f7026cSTycho Nightingale return (error);
98257f7026cSTycho Nightingale
9837d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RDI, &rdi);
98457f7026cSTycho Nightingale KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
98557f7026cSTycho Nightingale
9867d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
98757f7026cSTycho Nightingale KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
98857f7026cSTycho Nightingale
98957f7026cSTycho Nightingale if (rflags & PSL_D)
99057f7026cSTycho Nightingale rdi -= opsize;
99157f7026cSTycho Nightingale else
99257f7026cSTycho Nightingale rdi += opsize;
99357f7026cSTycho Nightingale
9947d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RDI, rdi,
99557f7026cSTycho Nightingale vie->addrsize);
99657f7026cSTycho Nightingale KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
99757f7026cSTycho Nightingale
99857f7026cSTycho Nightingale if (repeat) {
99957f7026cSTycho Nightingale rcx = rcx - 1;
10007d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RCX,
100157f7026cSTycho Nightingale rcx, vie->addrsize);
100257f7026cSTycho Nightingale KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
100357f7026cSTycho Nightingale
100457f7026cSTycho Nightingale /*
100557f7026cSTycho Nightingale * Repeat the instruction if the count register is not zero.
100657f7026cSTycho Nightingale */
100757f7026cSTycho Nightingale if ((rcx & vie_size2mask(vie->addrsize)) != 0)
10087d9ef309SJohn Baldwin vm_restart_instruction(vcpu);
100957f7026cSTycho Nightingale }
101057f7026cSTycho Nightingale
101157f7026cSTycho Nightingale return (0);
101257f7026cSTycho Nightingale }
101357f7026cSTycho Nightingale
101457f7026cSTycho Nightingale static int
emulate_and(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite,void * arg)10157d9ef309SJohn Baldwin emulate_and(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
1016ba9b7bf7SNeel Natu mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1017ba9b7bf7SNeel Natu {
1018ba9b7bf7SNeel Natu int error, size;
1019ba9b7bf7SNeel Natu enum vm_reg_name reg;
10204c98655eSNeel Natu uint64_t result, rflags, rflags2, val1, val2;
1021ba9b7bf7SNeel Natu
1022f7a9f178SNeel Natu size = vie->opsize;
1023ba9b7bf7SNeel Natu error = EINVAL;
1024ba9b7bf7SNeel Natu
1025ba9b7bf7SNeel Natu switch (vie->op.op_byte) {
1026ba9b7bf7SNeel Natu case 0x23:
1027ba9b7bf7SNeel Natu /*
1028ba9b7bf7SNeel Natu * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
1029ba9b7bf7SNeel Natu * result in reg.
1030ba9b7bf7SNeel Natu *
1031f7a9f178SNeel Natu * 23/r and r16, r/m16
1032ba9b7bf7SNeel Natu * 23/r and r32, r/m32
1033ba9b7bf7SNeel Natu * REX.W + 23/r and r64, r/m64
1034ba9b7bf7SNeel Natu */
1035ba9b7bf7SNeel Natu
1036ba9b7bf7SNeel Natu /* get the first operand */
1037ba9b7bf7SNeel Natu reg = gpr_map[vie->reg];
10387d9ef309SJohn Baldwin error = vie_read_register(vcpu, reg, &val1);
1039ba9b7bf7SNeel Natu if (error)
1040ba9b7bf7SNeel Natu break;
1041ba9b7bf7SNeel Natu
1042ba9b7bf7SNeel Natu /* get the second operand */
10437d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val2, size, arg);
1044ba9b7bf7SNeel Natu if (error)
1045ba9b7bf7SNeel Natu break;
1046ba9b7bf7SNeel Natu
1047ba9b7bf7SNeel Natu /* perform the operation and write the result */
10484c98655eSNeel Natu result = val1 & val2;
10497d9ef309SJohn Baldwin error = vie_update_register(vcpu, reg, result, size);
1050ba9b7bf7SNeel Natu break;
10513b2b0011SPeter Grehan case 0x81:
1052fb5e95b4STycho Nightingale case 0x83:
10533b2b0011SPeter Grehan /*
1054fb5e95b4STycho Nightingale * AND mem (ModRM:r/m) with immediate and store the
105584e169c6SNeel Natu * result in mem.
10563b2b0011SPeter Grehan *
1057fb5e95b4STycho Nightingale * 81 /4 and r/m16, imm16
1058fb5e95b4STycho Nightingale * 81 /4 and r/m32, imm32
1059fb5e95b4STycho Nightingale * REX.W + 81 /4 and r/m64, imm32 sign-extended to 64
1060e6f1f347SPeter Grehan *
1061fb5e95b4STycho Nightingale * 83 /4 and r/m16, imm8 sign-extended to 16
1062fb5e95b4STycho Nightingale * 83 /4 and r/m32, imm8 sign-extended to 32
1063fb5e95b4STycho Nightingale * REX.W + 83/4 and r/m64, imm8 sign-extended to 64
10643b2b0011SPeter Grehan */
1065e6f1f347SPeter Grehan
10663b2b0011SPeter Grehan /* get the first operand */
10677d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val1, size, arg);
10683b2b0011SPeter Grehan if (error)
10693b2b0011SPeter Grehan break;
10703b2b0011SPeter Grehan
10713b2b0011SPeter Grehan /*
10723b2b0011SPeter Grehan * perform the operation with the pre-fetched immediate
10733b2b0011SPeter Grehan * operand and write the result
10743b2b0011SPeter Grehan */
10754c98655eSNeel Natu result = val1 & vie->immediate;
10767d9ef309SJohn Baldwin error = memwrite(vcpu, gpa, result, size, arg);
10773b2b0011SPeter Grehan break;
1078ba9b7bf7SNeel Natu default:
1079ba9b7bf7SNeel Natu break;
1080ba9b7bf7SNeel Natu }
10814c98655eSNeel Natu if (error)
10824c98655eSNeel Natu return (error);
10834c98655eSNeel Natu
10847d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
10854c98655eSNeel Natu if (error)
10864c98655eSNeel Natu return (error);
10874c98655eSNeel Natu
10884c98655eSNeel Natu /*
10894c98655eSNeel Natu * OF and CF are cleared; the SF, ZF and PF flags are set according
10904c98655eSNeel Natu * to the result; AF is undefined.
10914c98655eSNeel Natu *
10924c98655eSNeel Natu * The updated status flags are obtained by subtracting 0 from 'result'.
10934c98655eSNeel Natu */
10944c98655eSNeel Natu rflags2 = getcc(size, result, 0);
10954c98655eSNeel Natu rflags &= ~RFLAGS_STATUS_BITS;
10964c98655eSNeel Natu rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
10974c98655eSNeel Natu
10987d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
1099ba9b7bf7SNeel Natu return (error);
1100ba9b7bf7SNeel Natu }
1101ba9b7bf7SNeel Natu
110284e169c6SNeel Natu static int
emulate_or(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite,void * arg)11037d9ef309SJohn Baldwin emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
110484e169c6SNeel Natu mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
110584e169c6SNeel Natu {
110684e169c6SNeel Natu int error, size;
11079d210a4aSPeter Grehan enum vm_reg_name reg;
11089d210a4aSPeter Grehan uint64_t result, rflags, rflags2, val1, val2;
110984e169c6SNeel Natu
1110f7a9f178SNeel Natu size = vie->opsize;
111184e169c6SNeel Natu error = EINVAL;
111284e169c6SNeel Natu
111384e169c6SNeel Natu switch (vie->op.op_byte) {
11149d210a4aSPeter Grehan case 0x0B:
11159d210a4aSPeter Grehan /*
11169d210a4aSPeter Grehan * OR reg (ModRM:reg) and mem (ModRM:r/m) and store the
11179d210a4aSPeter Grehan * result in reg.
11189d210a4aSPeter Grehan *
11199d210a4aSPeter Grehan * 0b/r or r16, r/m16
11209d210a4aSPeter Grehan * 0b/r or r32, r/m32
11219d210a4aSPeter Grehan * REX.W + 0b/r or r64, r/m64
11229d210a4aSPeter Grehan */
11239d210a4aSPeter Grehan
11249d210a4aSPeter Grehan /* get the first operand */
11259d210a4aSPeter Grehan reg = gpr_map[vie->reg];
11267d9ef309SJohn Baldwin error = vie_read_register(vcpu, reg, &val1);
11279d210a4aSPeter Grehan if (error)
11289d210a4aSPeter Grehan break;
11299d210a4aSPeter Grehan
11309d210a4aSPeter Grehan /* get the second operand */
11317d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val2, size, arg);
11329d210a4aSPeter Grehan if (error)
11339d210a4aSPeter Grehan break;
11349d210a4aSPeter Grehan
11359d210a4aSPeter Grehan /* perform the operation and write the result */
11369d210a4aSPeter Grehan result = val1 | val2;
11377d9ef309SJohn Baldwin error = vie_update_register(vcpu, reg, result, size);
11389d210a4aSPeter Grehan break;
1139fb5e95b4STycho Nightingale case 0x81:
114084e169c6SNeel Natu case 0x83:
114184e169c6SNeel Natu /*
114284e169c6SNeel Natu * OR mem (ModRM:r/m) with immediate and store the
114384e169c6SNeel Natu * result in mem.
114484e169c6SNeel Natu *
1145fb5e95b4STycho Nightingale * 81 /1 or r/m16, imm16
1146fb5e95b4STycho Nightingale * 81 /1 or r/m32, imm32
1147fb5e95b4STycho Nightingale * REX.W + 81 /1 or r/m64, imm32 sign-extended to 64
114884e169c6SNeel Natu *
1149fb5e95b4STycho Nightingale * 83 /1 or r/m16, imm8 sign-extended to 16
1150fb5e95b4STycho Nightingale * 83 /1 or r/m32, imm8 sign-extended to 32
1151fb5e95b4STycho Nightingale * REX.W + 83/1 or r/m64, imm8 sign-extended to 64
115284e169c6SNeel Natu */
115384e169c6SNeel Natu
115484e169c6SNeel Natu /* get the first operand */
11557d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val1, size, arg);
115684e169c6SNeel Natu if (error)
115784e169c6SNeel Natu break;
115884e169c6SNeel Natu
115984e169c6SNeel Natu /*
116084e169c6SNeel Natu * perform the operation with the pre-fetched immediate
116184e169c6SNeel Natu * operand and write the result
116284e169c6SNeel Natu */
11634c98655eSNeel Natu result = val1 | vie->immediate;
11647d9ef309SJohn Baldwin error = memwrite(vcpu, gpa, result, size, arg);
116584e169c6SNeel Natu break;
116684e169c6SNeel Natu default:
116784e169c6SNeel Natu break;
116884e169c6SNeel Natu }
11694c98655eSNeel Natu if (error)
11704c98655eSNeel Natu return (error);
11714c98655eSNeel Natu
11727d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
11734c98655eSNeel Natu if (error)
11744c98655eSNeel Natu return (error);
11754c98655eSNeel Natu
11764c98655eSNeel Natu /*
11774c98655eSNeel Natu * OF and CF are cleared; the SF, ZF and PF flags are set according
11784c98655eSNeel Natu * to the result; AF is undefined.
11794c98655eSNeel Natu *
11804c98655eSNeel Natu * The updated status flags are obtained by subtracting 0 from 'result'.
11814c98655eSNeel Natu */
11824c98655eSNeel Natu rflags2 = getcc(size, result, 0);
11834c98655eSNeel Natu rflags &= ~RFLAGS_STATUS_BITS;
11844c98655eSNeel Natu rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
11854c98655eSNeel Natu
11867d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
118784e169c6SNeel Natu return (error);
118884e169c6SNeel Natu }
118984e169c6SNeel Natu
1190d665d229SNeel Natu static int
emulate_cmp(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg)11917d9ef309SJohn Baldwin emulate_cmp(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
119298d920d9SMark Johnston mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
1193d665d229SNeel Natu {
1194d665d229SNeel Natu int error, size;
1195b32d1908SNeel Natu uint64_t regop, memop, op1, op2, rflags, rflags2;
1196d665d229SNeel Natu enum vm_reg_name reg;
1197d665d229SNeel Natu
1198d665d229SNeel Natu size = vie->opsize;
1199d665d229SNeel Natu switch (vie->op.op_byte) {
1200b32d1908SNeel Natu case 0x39:
1201d665d229SNeel Natu case 0x3B:
1202d665d229SNeel Natu /*
1203b32d1908SNeel Natu * 39/r CMP r/m16, r16
1204b32d1908SNeel Natu * 39/r CMP r/m32, r32
1205b32d1908SNeel Natu * REX.W 39/r CMP r/m64, r64
1206b32d1908SNeel Natu *
1207d665d229SNeel Natu * 3B/r CMP r16, r/m16
1208d665d229SNeel Natu * 3B/r CMP r32, r/m32
1209d665d229SNeel Natu * REX.W + 3B/r CMP r64, r/m64
1210d665d229SNeel Natu *
1211b32d1908SNeel Natu * Compare the first operand with the second operand and
1212d665d229SNeel Natu * set status flags in EFLAGS register. The comparison is
1213d665d229SNeel Natu * performed by subtracting the second operand from the first
1214d665d229SNeel Natu * operand and then setting the status flags.
1215d665d229SNeel Natu */
1216d665d229SNeel Natu
1217b32d1908SNeel Natu /* Get the register operand */
1218d665d229SNeel Natu reg = gpr_map[vie->reg];
12197d9ef309SJohn Baldwin error = vie_read_register(vcpu, reg, ®op);
1220d665d229SNeel Natu if (error)
1221d665d229SNeel Natu return (error);
1222d665d229SNeel Natu
1223b32d1908SNeel Natu /* Get the memory operand */
12247d9ef309SJohn Baldwin error = memread(vcpu, gpa, &memop, size, arg);
1225d665d229SNeel Natu if (error)
1226d665d229SNeel Natu return (error);
1227d665d229SNeel Natu
1228b32d1908SNeel Natu if (vie->op.op_byte == 0x3B) {
1229b32d1908SNeel Natu op1 = regop;
1230b32d1908SNeel Natu op2 = memop;
1231b32d1908SNeel Natu } else {
1232b32d1908SNeel Natu op1 = memop;
1233b32d1908SNeel Natu op2 = regop;
1234b32d1908SNeel Natu }
1235fb5e95b4STycho Nightingale rflags2 = getcc(size, op1, op2);
1236fb5e95b4STycho Nightingale break;
12376a273d5eSNeel Natu case 0x80:
1238fb5e95b4STycho Nightingale case 0x81:
1239fb5e95b4STycho Nightingale case 0x83:
1240fb5e95b4STycho Nightingale /*
12416a273d5eSNeel Natu * 80 /7 cmp r/m8, imm8
12426a273d5eSNeel Natu * REX + 80 /7 cmp r/m8, imm8
12436a273d5eSNeel Natu *
1244fb5e95b4STycho Nightingale * 81 /7 cmp r/m16, imm16
1245fb5e95b4STycho Nightingale * 81 /7 cmp r/m32, imm32
1246fb5e95b4STycho Nightingale * REX.W + 81 /7 cmp r/m64, imm32 sign-extended to 64
1247fb5e95b4STycho Nightingale *
1248fb5e95b4STycho Nightingale * 83 /7 cmp r/m16, imm8 sign-extended to 16
1249fb5e95b4STycho Nightingale * 83 /7 cmp r/m32, imm8 sign-extended to 32
1250fb5e95b4STycho Nightingale * REX.W + 83 /7 cmp r/m64, imm8 sign-extended to 64
1251fb5e95b4STycho Nightingale *
1252fb5e95b4STycho Nightingale * Compare mem (ModRM:r/m) with immediate and set
1253fb5e95b4STycho Nightingale * status flags according to the results. The
1254fb5e95b4STycho Nightingale * comparison is performed by subtracting the
1255fb5e95b4STycho Nightingale * immediate from the first operand and then setting
1256fb5e95b4STycho Nightingale * the status flags.
1257fb5e95b4STycho Nightingale *
1258fb5e95b4STycho Nightingale */
12596a273d5eSNeel Natu if (vie->op.op_byte == 0x80)
12606a273d5eSNeel Natu size = 1;
1261fb5e95b4STycho Nightingale
1262fb5e95b4STycho Nightingale /* get the first operand */
12637d9ef309SJohn Baldwin error = memread(vcpu, gpa, &op1, size, arg);
1264fb5e95b4STycho Nightingale if (error)
1265fb5e95b4STycho Nightingale return (error);
1266fb5e95b4STycho Nightingale
1267fb5e95b4STycho Nightingale rflags2 = getcc(size, op1, vie->immediate);
1268d665d229SNeel Natu break;
1269d665d229SNeel Natu default:
1270d665d229SNeel Natu return (EINVAL);
1271d665d229SNeel Natu }
12727d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1273d665d229SNeel Natu if (error)
1274d665d229SNeel Natu return (error);
1275d665d229SNeel Natu rflags &= ~RFLAGS_STATUS_BITS;
1276d665d229SNeel Natu rflags |= rflags2 & RFLAGS_STATUS_BITS;
1277d665d229SNeel Natu
12787d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
1279d665d229SNeel Natu return (error);
1280d665d229SNeel Natu }
1281d665d229SNeel Natu
1282d665d229SNeel Natu static int
emulate_test(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg)12837d9ef309SJohn Baldwin emulate_test(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
128498d920d9SMark Johnston mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
1285e4da41f9SRodney W. Grimes {
1286e4da41f9SRodney W. Grimes int error, size;
1287e4da41f9SRodney W. Grimes uint64_t op1, rflags, rflags2;
1288e4da41f9SRodney W. Grimes
1289e4da41f9SRodney W. Grimes size = vie->opsize;
1290e4da41f9SRodney W. Grimes error = EINVAL;
1291e4da41f9SRodney W. Grimes
1292e4da41f9SRodney W. Grimes switch (vie->op.op_byte) {
1293*49a4838aSJose Luis Duran case 0xF6:
1294*49a4838aSJose Luis Duran /*
1295*49a4838aSJose Luis Duran * F6 /0 test r/m8, imm8
1296*49a4838aSJose Luis Duran */
1297*49a4838aSJose Luis Duran size = 1; /* override for byte operation */
1298*49a4838aSJose Luis Duran /* FALLTHROUGH */
1299e4da41f9SRodney W. Grimes case 0xF7:
1300e4da41f9SRodney W. Grimes /*
1301e4da41f9SRodney W. Grimes * F7 /0 test r/m16, imm16
1302e4da41f9SRodney W. Grimes * F7 /0 test r/m32, imm32
1303e4da41f9SRodney W. Grimes * REX.W + F7 /0 test r/m64, imm32 sign-extended to 64
1304e4da41f9SRodney W. Grimes *
1305e4da41f9SRodney W. Grimes * Test mem (ModRM:r/m) with immediate and set status
1306e4da41f9SRodney W. Grimes * flags according to the results. The comparison is
1307e4da41f9SRodney W. Grimes * performed by anding the immediate from the first
1308e4da41f9SRodney W. Grimes * operand and then setting the status flags.
1309e4da41f9SRodney W. Grimes */
1310e4da41f9SRodney W. Grimes if ((vie->reg & 7) != 0)
1311e4da41f9SRodney W. Grimes return (EINVAL);
1312e4da41f9SRodney W. Grimes
13137d9ef309SJohn Baldwin error = memread(vcpu, gpa, &op1, size, arg);
1314e4da41f9SRodney W. Grimes if (error)
1315e4da41f9SRodney W. Grimes return (error);
1316e4da41f9SRodney W. Grimes
1317e4da41f9SRodney W. Grimes rflags2 = getandflags(size, op1, vie->immediate);
1318e4da41f9SRodney W. Grimes break;
1319e4da41f9SRodney W. Grimes default:
1320e4da41f9SRodney W. Grimes return (EINVAL);
1321e4da41f9SRodney W. Grimes }
13227d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1323e4da41f9SRodney W. Grimes if (error)
1324e4da41f9SRodney W. Grimes return (error);
1325e4da41f9SRodney W. Grimes
1326e4da41f9SRodney W. Grimes /*
1327e4da41f9SRodney W. Grimes * OF and CF are cleared; the SF, ZF and PF flags are set according
1328e4da41f9SRodney W. Grimes * to the result; AF is undefined.
1329e4da41f9SRodney W. Grimes */
1330e4da41f9SRodney W. Grimes rflags &= ~RFLAGS_STATUS_BITS;
1331e4da41f9SRodney W. Grimes rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
1332e4da41f9SRodney W. Grimes
13337d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
1334e4da41f9SRodney W. Grimes return (error);
1335e4da41f9SRodney W. Grimes }
1336e4da41f9SRodney W. Grimes
1337e4da41f9SRodney W. Grimes static int
emulate_bextr(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg)13387d9ef309SJohn Baldwin emulate_bextr(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
133947332982SConrad Meyer struct vm_guest_paging *paging, mem_region_read_t memread,
134098d920d9SMark Johnston mem_region_write_t memwrite __unused, void *arg)
134147332982SConrad Meyer {
134247332982SConrad Meyer uint64_t src1, src2, dst, rflags;
1343ed721684SMark Johnston unsigned start, len, size;
1344ed721684SMark Johnston int error;
134547332982SConrad Meyer
134647332982SConrad Meyer size = vie->opsize;
134747332982SConrad Meyer error = EINVAL;
134847332982SConrad Meyer
134947332982SConrad Meyer /*
135047332982SConrad Meyer * VEX.LZ.0F38.W0 F7 /r BEXTR r32a, r/m32, r32b
135147332982SConrad Meyer * VEX.LZ.0F38.W1 F7 /r BEXTR r64a, r/m64, r64b
135247332982SConrad Meyer *
135347332982SConrad Meyer * Destination operand is ModRM:reg. Source operands are ModRM:r/m and
135447332982SConrad Meyer * Vex.vvvv.
135547332982SConrad Meyer *
135647332982SConrad Meyer * Operand size is always 32-bit if not in 64-bit mode (W1 is ignored).
135747332982SConrad Meyer */
135847332982SConrad Meyer if (size != 4 && paging->cpu_mode != CPU_MODE_64BIT)
135947332982SConrad Meyer size = 4;
136047332982SConrad Meyer
136147332982SConrad Meyer /*
136247332982SConrad Meyer * Extracts contiguous bits from the first /source/ operand (second
136347332982SConrad Meyer * operand) using an index and length specified in the second /source/
136447332982SConrad Meyer * operand (third operand).
136547332982SConrad Meyer */
13667d9ef309SJohn Baldwin error = memread(vcpu, gpa, &src1, size, arg);
136747332982SConrad Meyer if (error)
136847332982SConrad Meyer return (error);
13697d9ef309SJohn Baldwin error = vie_read_register(vcpu, gpr_map[vie->vex_reg], &src2);
137047332982SConrad Meyer if (error)
137147332982SConrad Meyer return (error);
13727d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
137347332982SConrad Meyer if (error)
137447332982SConrad Meyer return (error);
137547332982SConrad Meyer
137647332982SConrad Meyer start = (src2 & 0xff);
137747332982SConrad Meyer len = (src2 & 0xff00) >> 8;
137847332982SConrad Meyer
137947332982SConrad Meyer /* If no bits are extracted, the destination register is cleared. */
138047332982SConrad Meyer dst = 0;
138147332982SConrad Meyer
138247332982SConrad Meyer /* If START exceeds the operand size, no bits are extracted. */
138347332982SConrad Meyer if (start > size * 8)
138447332982SConrad Meyer goto done;
138547332982SConrad Meyer /* Length is bounded by both the destination size and start offset. */
138647332982SConrad Meyer if (start + len > size * 8)
138747332982SConrad Meyer len = (size * 8) - start;
138847332982SConrad Meyer if (len == 0)
138947332982SConrad Meyer goto done;
139047332982SConrad Meyer
139147332982SConrad Meyer if (start > 0)
139247332982SConrad Meyer src1 = (src1 >> start);
139347332982SConrad Meyer if (len < 64)
139447332982SConrad Meyer src1 = src1 & ((1ull << len) - 1);
139547332982SConrad Meyer dst = src1;
139647332982SConrad Meyer
139747332982SConrad Meyer done:
13987d9ef309SJohn Baldwin error = vie_update_register(vcpu, gpr_map[vie->reg], dst, size);
139947332982SConrad Meyer if (error)
140047332982SConrad Meyer return (error);
140147332982SConrad Meyer
140247332982SConrad Meyer /*
140347332982SConrad Meyer * AMD: OF, CF cleared; SF/AF/PF undefined; ZF set by result.
140447332982SConrad Meyer * Intel: ZF is set by result; AF/SF/PF undefined; all others cleared.
140547332982SConrad Meyer */
140647332982SConrad Meyer rflags &= ~RFLAGS_STATUS_BITS;
140747332982SConrad Meyer if (dst == 0)
140847332982SConrad Meyer rflags |= PSL_Z;
14097d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags,
141047332982SConrad Meyer 8);
141147332982SConrad Meyer return (error);
141247332982SConrad Meyer }
141347332982SConrad Meyer
141447332982SConrad Meyer static int
emulate_add(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg)14157d9ef309SJohn Baldwin emulate_add(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
141698d920d9SMark Johnston mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
1417c2b4ceddSJohn Baldwin {
1418c2b4ceddSJohn Baldwin int error, size;
1419c2b4ceddSJohn Baldwin uint64_t nval, rflags, rflags2, val1, val2;
1420c2b4ceddSJohn Baldwin enum vm_reg_name reg;
1421c2b4ceddSJohn Baldwin
1422c2b4ceddSJohn Baldwin size = vie->opsize;
1423c2b4ceddSJohn Baldwin error = EINVAL;
1424c2b4ceddSJohn Baldwin
1425c2b4ceddSJohn Baldwin switch (vie->op.op_byte) {
1426c2b4ceddSJohn Baldwin case 0x03:
1427c2b4ceddSJohn Baldwin /*
1428c2b4ceddSJohn Baldwin * ADD r/m to r and store the result in r
1429c2b4ceddSJohn Baldwin *
1430c2b4ceddSJohn Baldwin * 03/r ADD r16, r/m16
1431c2b4ceddSJohn Baldwin * 03/r ADD r32, r/m32
1432c2b4ceddSJohn Baldwin * REX.W + 03/r ADD r64, r/m64
1433c2b4ceddSJohn Baldwin */
1434c2b4ceddSJohn Baldwin
1435c2b4ceddSJohn Baldwin /* get the first operand */
1436c2b4ceddSJohn Baldwin reg = gpr_map[vie->reg];
14377d9ef309SJohn Baldwin error = vie_read_register(vcpu, reg, &val1);
1438c2b4ceddSJohn Baldwin if (error)
1439c2b4ceddSJohn Baldwin break;
1440c2b4ceddSJohn Baldwin
1441c2b4ceddSJohn Baldwin /* get the second operand */
14427d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val2, size, arg);
1443c2b4ceddSJohn Baldwin if (error)
1444c2b4ceddSJohn Baldwin break;
1445c2b4ceddSJohn Baldwin
1446c2b4ceddSJohn Baldwin /* perform the operation and write the result */
1447c2b4ceddSJohn Baldwin nval = val1 + val2;
14487d9ef309SJohn Baldwin error = vie_update_register(vcpu, reg, nval, size);
1449c2b4ceddSJohn Baldwin break;
1450c2b4ceddSJohn Baldwin default:
1451c2b4ceddSJohn Baldwin break;
1452c2b4ceddSJohn Baldwin }
1453c2b4ceddSJohn Baldwin
1454c2b4ceddSJohn Baldwin if (!error) {
1455c2b4ceddSJohn Baldwin rflags2 = getaddflags(size, val1, val2);
14567d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS,
1457c2b4ceddSJohn Baldwin &rflags);
1458c2b4ceddSJohn Baldwin if (error)
1459c2b4ceddSJohn Baldwin return (error);
1460c2b4ceddSJohn Baldwin
1461c2b4ceddSJohn Baldwin rflags &= ~RFLAGS_STATUS_BITS;
1462c2b4ceddSJohn Baldwin rflags |= rflags2 & RFLAGS_STATUS_BITS;
14637d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS,
1464c2b4ceddSJohn Baldwin rflags, 8);
1465c2b4ceddSJohn Baldwin }
1466c2b4ceddSJohn Baldwin
1467c2b4ceddSJohn Baldwin return (error);
1468c2b4ceddSJohn Baldwin }
1469c2b4ceddSJohn Baldwin
1470c2b4ceddSJohn Baldwin static int
emulate_sub(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * arg)14717d9ef309SJohn Baldwin emulate_sub(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
147298d920d9SMark Johnston mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
1473fc3dde90SPeter Grehan {
1474fc3dde90SPeter Grehan int error, size;
1475fc3dde90SPeter Grehan uint64_t nval, rflags, rflags2, val1, val2;
1476fc3dde90SPeter Grehan enum vm_reg_name reg;
1477fc3dde90SPeter Grehan
1478fc3dde90SPeter Grehan size = vie->opsize;
1479fc3dde90SPeter Grehan error = EINVAL;
1480fc3dde90SPeter Grehan
1481fc3dde90SPeter Grehan switch (vie->op.op_byte) {
1482fc3dde90SPeter Grehan case 0x2B:
1483fc3dde90SPeter Grehan /*
1484fc3dde90SPeter Grehan * SUB r/m from r and store the result in r
1485fc3dde90SPeter Grehan *
1486fc3dde90SPeter Grehan * 2B/r SUB r16, r/m16
1487fc3dde90SPeter Grehan * 2B/r SUB r32, r/m32
1488fc3dde90SPeter Grehan * REX.W + 2B/r SUB r64, r/m64
1489fc3dde90SPeter Grehan */
1490fc3dde90SPeter Grehan
1491fc3dde90SPeter Grehan /* get the first operand */
1492fc3dde90SPeter Grehan reg = gpr_map[vie->reg];
14937d9ef309SJohn Baldwin error = vie_read_register(vcpu, reg, &val1);
1494fc3dde90SPeter Grehan if (error)
1495fc3dde90SPeter Grehan break;
1496fc3dde90SPeter Grehan
1497fc3dde90SPeter Grehan /* get the second operand */
14987d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val2, size, arg);
1499fc3dde90SPeter Grehan if (error)
1500fc3dde90SPeter Grehan break;
1501fc3dde90SPeter Grehan
1502fc3dde90SPeter Grehan /* perform the operation and write the result */
1503fc3dde90SPeter Grehan nval = val1 - val2;
15047d9ef309SJohn Baldwin error = vie_update_register(vcpu, reg, nval, size);
1505fc3dde90SPeter Grehan break;
1506fc3dde90SPeter Grehan default:
1507fc3dde90SPeter Grehan break;
1508fc3dde90SPeter Grehan }
1509fc3dde90SPeter Grehan
1510fc3dde90SPeter Grehan if (!error) {
1511fc3dde90SPeter Grehan rflags2 = getcc(size, val1, val2);
15127d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS,
1513fc3dde90SPeter Grehan &rflags);
1514fc3dde90SPeter Grehan if (error)
1515fc3dde90SPeter Grehan return (error);
1516fc3dde90SPeter Grehan
1517fc3dde90SPeter Grehan rflags &= ~RFLAGS_STATUS_BITS;
1518fc3dde90SPeter Grehan rflags |= rflags2 & RFLAGS_STATUS_BITS;
15197d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS,
1520fc3dde90SPeter Grehan rflags, 8);
1521fc3dde90SPeter Grehan }
1522fc3dde90SPeter Grehan
1523fc3dde90SPeter Grehan return (error);
1524fc3dde90SPeter Grehan }
1525fc3dde90SPeter Grehan
1526fc3dde90SPeter Grehan static int
emulate_stack_op(struct vcpu * vcpu,uint64_t mmio_gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * arg)15277d9ef309SJohn Baldwin emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
1528d665d229SNeel Natu struct vm_guest_paging *paging, mem_region_read_t memread,
1529d665d229SNeel Natu mem_region_write_t memwrite, void *arg)
1530d665d229SNeel Natu {
1531d665d229SNeel Natu #ifdef _KERNEL
1532d665d229SNeel Natu struct vm_copyinfo copyinfo[2];
1533d665d229SNeel Natu #else
1534d665d229SNeel Natu struct iovec copyinfo[2];
1535d665d229SNeel Natu #endif
1536d665d229SNeel Natu struct seg_desc ss_desc;
1537d665d229SNeel Natu uint64_t cr0, rflags, rsp, stack_gla, val;
15389c4d5478SNeel Natu int error, fault, size, stackaddrsize, pushop;
1539d665d229SNeel Natu
1540d63e02eaSNeel Natu val = 0;
1541d665d229SNeel Natu size = vie->opsize;
1542d63e02eaSNeel Natu pushop = (vie->op.op_type == VIE_OP_TYPE_PUSH) ? 1 : 0;
1543d63e02eaSNeel Natu
1544d665d229SNeel Natu /*
1545d665d229SNeel Natu * From "Address-Size Attributes for Stack Accesses", Intel SDL, Vol 1
1546d665d229SNeel Natu */
1547830be8acSNeel Natu if (paging->cpu_mode == CPU_MODE_REAL) {
1548d665d229SNeel Natu stackaddrsize = 2;
1549830be8acSNeel Natu } else if (paging->cpu_mode == CPU_MODE_64BIT) {
1550830be8acSNeel Natu /*
1551830be8acSNeel Natu * "Stack Manipulation Instructions in 64-bit Mode", SDM, Vol 3
1552830be8acSNeel Natu * - Stack pointer size is always 64-bits.
1553830be8acSNeel Natu * - PUSH/POP of 32-bit values is not possible in 64-bit mode.
1554830be8acSNeel Natu * - 16-bit PUSH/POP is supported by using the operand size
1555830be8acSNeel Natu * override prefix (66H).
1556830be8acSNeel Natu */
1557d665d229SNeel Natu stackaddrsize = 8;
1558830be8acSNeel Natu size = vie->opsize_override ? 2 : 8;
1559830be8acSNeel Natu } else {
1560d665d229SNeel Natu /*
1561500eb14aSPedro F. Giffuni * In protected or compatibility mode the 'B' flag in the
1562d665d229SNeel Natu * stack-segment descriptor determines the size of the
1563d665d229SNeel Natu * stack pointer.
1564d665d229SNeel Natu */
15657d9ef309SJohn Baldwin error = vm_get_seg_desc(vcpu, VM_REG_GUEST_SS, &ss_desc);
1566d665d229SNeel Natu KASSERT(error == 0, ("%s: error %d getting SS descriptor",
1567d665d229SNeel Natu __func__, error));
1568d665d229SNeel Natu if (SEG_DESC_DEF32(ss_desc.access))
1569d665d229SNeel Natu stackaddrsize = 4;
1570d665d229SNeel Natu else
1571d665d229SNeel Natu stackaddrsize = 2;
1572d665d229SNeel Natu }
1573d665d229SNeel Natu
15747d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_CR0, &cr0);
1575d665d229SNeel Natu KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
1576d665d229SNeel Natu
15777d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1578d665d229SNeel Natu KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
1579d665d229SNeel Natu
15807d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RSP, &rsp);
1581d665d229SNeel Natu KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
1582d63e02eaSNeel Natu if (pushop) {
1583d665d229SNeel Natu rsp -= size;
1584d63e02eaSNeel Natu }
1585d63e02eaSNeel Natu
1586d665d229SNeel Natu if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
1587d63e02eaSNeel Natu rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
1588d63e02eaSNeel Natu &stack_gla)) {
15897d9ef309SJohn Baldwin vm_inject_ss(vcpu, 0);
1590d665d229SNeel Natu return (0);
1591d665d229SNeel Natu }
1592d665d229SNeel Natu
1593d665d229SNeel Natu if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
15947d9ef309SJohn Baldwin vm_inject_ss(vcpu, 0);
1595d665d229SNeel Natu return (0);
1596d665d229SNeel Natu }
1597d665d229SNeel Natu
1598d665d229SNeel Natu if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
15997d9ef309SJohn Baldwin vm_inject_ac(vcpu, 0);
1600d665d229SNeel Natu return (0);
1601d665d229SNeel Natu }
1602d665d229SNeel Natu
16037d9ef309SJohn Baldwin error = vm_copy_setup(vcpu, paging, stack_gla, size,
16049c4d5478SNeel Natu pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo),
16059c4d5478SNeel Natu &fault);
16069c4d5478SNeel Natu if (error || fault)
16079c4d5478SNeel Natu return (error);
1608d665d229SNeel Natu
1609d63e02eaSNeel Natu if (pushop) {
16107d9ef309SJohn Baldwin error = memread(vcpu, mmio_gpa, &val, size, arg);
1611d63e02eaSNeel Natu if (error == 0)
16122b4fe856SJohn Baldwin vm_copyout(&val, copyinfo, size);
1613d63e02eaSNeel Natu } else {
16142b4fe856SJohn Baldwin vm_copyin(copyinfo, &val, size);
16157d9ef309SJohn Baldwin error = memwrite(vcpu, mmio_gpa, val, size, arg);
1616d63e02eaSNeel Natu rsp += size;
1617d665d229SNeel Natu }
16182b4fe856SJohn Baldwin vm_copy_teardown(copyinfo, nitems(copyinfo));
1619d63e02eaSNeel Natu
1620d63e02eaSNeel Natu if (error == 0) {
16217d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RSP, rsp,
1622d63e02eaSNeel Natu stackaddrsize);
1623d63e02eaSNeel Natu KASSERT(error == 0, ("error %d updating rsp", error));
1624d63e02eaSNeel Natu }
1625d63e02eaSNeel Natu return (error);
1626d63e02eaSNeel Natu }
1627d63e02eaSNeel Natu
1628d63e02eaSNeel Natu static int
emulate_push(struct vcpu * vcpu,uint64_t mmio_gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * arg)16297d9ef309SJohn Baldwin emulate_push(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
1630d63e02eaSNeel Natu struct vm_guest_paging *paging, mem_region_read_t memread,
1631d63e02eaSNeel Natu mem_region_write_t memwrite, void *arg)
1632d63e02eaSNeel Natu {
1633d63e02eaSNeel Natu int error;
1634d63e02eaSNeel Natu
1635d63e02eaSNeel Natu /*
1636d63e02eaSNeel Natu * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1637d63e02eaSNeel Natu *
1638d63e02eaSNeel Natu * PUSH is part of the group 5 extended opcodes and is identified
1639d63e02eaSNeel Natu * by ModRM:reg = b110.
1640d63e02eaSNeel Natu */
1641d63e02eaSNeel Natu if ((vie->reg & 7) != 6)
1642d63e02eaSNeel Natu return (EINVAL);
1643d63e02eaSNeel Natu
16447d9ef309SJohn Baldwin error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
1645d63e02eaSNeel Natu memwrite, arg);
1646d63e02eaSNeel Natu return (error);
1647d63e02eaSNeel Natu }
1648d63e02eaSNeel Natu
1649d63e02eaSNeel Natu static int
emulate_pop(struct vcpu * vcpu,uint64_t mmio_gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * arg)16507d9ef309SJohn Baldwin emulate_pop(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
1651d63e02eaSNeel Natu struct vm_guest_paging *paging, mem_region_read_t memread,
1652d63e02eaSNeel Natu mem_region_write_t memwrite, void *arg)
1653d63e02eaSNeel Natu {
1654d63e02eaSNeel Natu int error;
1655d63e02eaSNeel Natu
1656d63e02eaSNeel Natu /*
1657d63e02eaSNeel Natu * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1658d63e02eaSNeel Natu *
1659d63e02eaSNeel Natu * POP is part of the group 1A extended opcodes and is identified
1660d63e02eaSNeel Natu * by ModRM:reg = b000.
1661d63e02eaSNeel Natu */
1662d63e02eaSNeel Natu if ((vie->reg & 7) != 0)
1663d63e02eaSNeel Natu return (EINVAL);
1664d63e02eaSNeel Natu
16657d9ef309SJohn Baldwin error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
1666d63e02eaSNeel Natu memwrite, arg);
1667d665d229SNeel Natu return (error);
1668d665d229SNeel Natu }
1669d665d229SNeel Natu
1670fb5e95b4STycho Nightingale static int
emulate_group1(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging __unused,mem_region_read_t memread,mem_region_write_t memwrite,void * memarg)16717d9ef309SJohn Baldwin emulate_group1(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
167298d920d9SMark Johnston struct vm_guest_paging *paging __unused, mem_region_read_t memread,
1673fb5e95b4STycho Nightingale mem_region_write_t memwrite, void *memarg)
1674fb5e95b4STycho Nightingale {
1675fb5e95b4STycho Nightingale int error;
1676fb5e95b4STycho Nightingale
1677fb5e95b4STycho Nightingale switch (vie->reg & 7) {
1678fb5e95b4STycho Nightingale case 0x1: /* OR */
16797d9ef309SJohn Baldwin error = emulate_or(vcpu, gpa, vie,
1680fb5e95b4STycho Nightingale memread, memwrite, memarg);
1681fb5e95b4STycho Nightingale break;
1682fb5e95b4STycho Nightingale case 0x4: /* AND */
16837d9ef309SJohn Baldwin error = emulate_and(vcpu, gpa, vie,
1684fb5e95b4STycho Nightingale memread, memwrite, memarg);
1685fb5e95b4STycho Nightingale break;
1686fb5e95b4STycho Nightingale case 0x7: /* CMP */
16877d9ef309SJohn Baldwin error = emulate_cmp(vcpu, gpa, vie,
1688fb5e95b4STycho Nightingale memread, memwrite, memarg);
1689fb5e95b4STycho Nightingale break;
1690fb5e95b4STycho Nightingale default:
1691fb5e95b4STycho Nightingale error = EINVAL;
1692fb5e95b4STycho Nightingale break;
1693fb5e95b4STycho Nightingale }
1694fb5e95b4STycho Nightingale
1695fb5e95b4STycho Nightingale return (error);
1696fb5e95b4STycho Nightingale }
1697fb5e95b4STycho Nightingale
1698b8070ef5SNeel Natu static int
emulate_bittest(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * memarg)16997d9ef309SJohn Baldwin emulate_bittest(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
170098d920d9SMark Johnston mem_region_read_t memread, mem_region_write_t memwrite __unused,
170198d920d9SMark Johnston void *memarg)
1702b8070ef5SNeel Natu {
1703b8070ef5SNeel Natu uint64_t val, rflags;
1704b8070ef5SNeel Natu int error, bitmask, bitoff;
1705b8070ef5SNeel Natu
1706b8070ef5SNeel Natu /*
1707b8070ef5SNeel Natu * 0F BA is a Group 8 extended opcode.
1708b8070ef5SNeel Natu *
1709b8070ef5SNeel Natu * Currently we only emulate the 'Bit Test' instruction which is
1710b8070ef5SNeel Natu * identified by a ModR/M:reg encoding of 100b.
1711b8070ef5SNeel Natu */
1712b8070ef5SNeel Natu if ((vie->reg & 7) != 4)
1713b8070ef5SNeel Natu return (EINVAL);
1714b8070ef5SNeel Natu
17157d9ef309SJohn Baldwin error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1716b8070ef5SNeel Natu KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
1717b8070ef5SNeel Natu
17187d9ef309SJohn Baldwin error = memread(vcpu, gpa, &val, vie->opsize, memarg);
1719b8070ef5SNeel Natu if (error)
1720b8070ef5SNeel Natu return (error);
1721b8070ef5SNeel Natu
1722b8070ef5SNeel Natu /*
1723b8070ef5SNeel Natu * Intel SDM, Vol 2, Table 3-2:
1724b8070ef5SNeel Natu * "Range of Bit Positions Specified by Bit Offset Operands"
1725b8070ef5SNeel Natu */
1726b8070ef5SNeel Natu bitmask = vie->opsize * 8 - 1;
1727b8070ef5SNeel Natu bitoff = vie->immediate & bitmask;
1728b8070ef5SNeel Natu
1729b8070ef5SNeel Natu /* Copy the bit into the Carry flag in %rflags */
1730b8070ef5SNeel Natu if (val & (1UL << bitoff))
1731b8070ef5SNeel Natu rflags |= PSL_C;
1732b8070ef5SNeel Natu else
1733b8070ef5SNeel Natu rflags &= ~PSL_C;
1734b8070ef5SNeel Natu
17357d9ef309SJohn Baldwin error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
1736b8070ef5SNeel Natu KASSERT(error == 0, ("%s: error %d updating rflags", __func__, error));
1737b8070ef5SNeel Natu
1738b8070ef5SNeel Natu return (0);
1739b8070ef5SNeel Natu }
1740b8070ef5SNeel Natu
174178a36527SKonstantin Belousov static int
emulate_twob_group15(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,mem_region_read_t memread,mem_region_write_t memwrite __unused,void * memarg)17427d9ef309SJohn Baldwin emulate_twob_group15(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
174398d920d9SMark Johnston mem_region_read_t memread, mem_region_write_t memwrite __unused,
174498d920d9SMark Johnston void *memarg)
174578a36527SKonstantin Belousov {
174678a36527SKonstantin Belousov int error;
174778a36527SKonstantin Belousov uint64_t buf;
174878a36527SKonstantin Belousov
174978a36527SKonstantin Belousov switch (vie->reg & 7) {
175078a36527SKonstantin Belousov case 0x7: /* CLFLUSH, CLFLUSHOPT, and SFENCE */
175178a36527SKonstantin Belousov if (vie->mod == 0x3) {
175278a36527SKonstantin Belousov /*
175378a36527SKonstantin Belousov * SFENCE. Ignore it, VM exit provides enough
175478a36527SKonstantin Belousov * barriers on its own.
175578a36527SKonstantin Belousov */
175678a36527SKonstantin Belousov error = 0;
175778a36527SKonstantin Belousov } else {
175878a36527SKonstantin Belousov /*
175978a36527SKonstantin Belousov * CLFLUSH, CLFLUSHOPT. Only check for access
176078a36527SKonstantin Belousov * rights.
176178a36527SKonstantin Belousov */
17627d9ef309SJohn Baldwin error = memread(vcpu, gpa, &buf, 1, memarg);
176378a36527SKonstantin Belousov }
176478a36527SKonstantin Belousov break;
176578a36527SKonstantin Belousov default:
176678a36527SKonstantin Belousov error = EINVAL;
176778a36527SKonstantin Belousov break;
176878a36527SKonstantin Belousov }
176978a36527SKonstantin Belousov
177078a36527SKonstantin Belousov return (error);
177178a36527SKonstantin Belousov }
177278a36527SKonstantin Belousov
1773ba9b7bf7SNeel Natu int
vmm_emulate_instruction(struct vcpu * vcpu,uint64_t gpa,struct vie * vie,struct vm_guest_paging * paging,mem_region_read_t memread,mem_region_write_t memwrite,void * memarg)17747d9ef309SJohn Baldwin vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
1775d665d229SNeel Natu struct vm_guest_paging *paging, mem_region_read_t memread,
1776d665d229SNeel Natu mem_region_write_t memwrite, void *memarg)
1777ba9b7bf7SNeel Natu {
1778ba9b7bf7SNeel Natu int error;
1779ba9b7bf7SNeel Natu
1780ba9b7bf7SNeel Natu if (!vie->decoded)
1781ba9b7bf7SNeel Natu return (EINVAL);
1782ba9b7bf7SNeel Natu
1783ba9b7bf7SNeel Natu switch (vie->op.op_type) {
1784fb5e95b4STycho Nightingale case VIE_OP_TYPE_GROUP1:
17857d9ef309SJohn Baldwin error = emulate_group1(vcpu, gpa, vie, paging, memread,
1786fb5e95b4STycho Nightingale memwrite, memarg);
1787fb5e95b4STycho Nightingale break;
1788d63e02eaSNeel Natu case VIE_OP_TYPE_POP:
17897d9ef309SJohn Baldwin error = emulate_pop(vcpu, gpa, vie, paging, memread,
1790d63e02eaSNeel Natu memwrite, memarg);
1791d63e02eaSNeel Natu break;
1792d665d229SNeel Natu case VIE_OP_TYPE_PUSH:
17937d9ef309SJohn Baldwin error = emulate_push(vcpu, gpa, vie, paging, memread,
1794d665d229SNeel Natu memwrite, memarg);
1795d665d229SNeel Natu break;
1796d665d229SNeel Natu case VIE_OP_TYPE_CMP:
17977d9ef309SJohn Baldwin error = emulate_cmp(vcpu, gpa, vie,
1798d665d229SNeel Natu memread, memwrite, memarg);
1799d665d229SNeel Natu break;
1800ba9b7bf7SNeel Natu case VIE_OP_TYPE_MOV:
18017d9ef309SJohn Baldwin error = emulate_mov(vcpu, gpa, vie,
1802ba9b7bf7SNeel Natu memread, memwrite, memarg);
1803ba9b7bf7SNeel Natu break;
180413545712STycho Nightingale case VIE_OP_TYPE_MOVSX:
180554e03e07STycho Nightingale case VIE_OP_TYPE_MOVZX:
18067d9ef309SJohn Baldwin error = emulate_movx(vcpu, gpa, vie,
180754e03e07STycho Nightingale memread, memwrite, memarg);
180854e03e07STycho Nightingale break;
180975346353SNeel Natu case VIE_OP_TYPE_MOVS:
18107d9ef309SJohn Baldwin error = emulate_movs(vcpu, gpa, vie, paging, memread,
181175346353SNeel Natu memwrite, memarg);
181275346353SNeel Natu break;
181357f7026cSTycho Nightingale case VIE_OP_TYPE_STOS:
18147d9ef309SJohn Baldwin error = emulate_stos(vcpu, gpa, vie, paging, memread,
181557f7026cSTycho Nightingale memwrite, memarg);
181657f7026cSTycho Nightingale break;
1817ba9b7bf7SNeel Natu case VIE_OP_TYPE_AND:
18187d9ef309SJohn Baldwin error = emulate_and(vcpu, gpa, vie,
1819ba9b7bf7SNeel Natu memread, memwrite, memarg);
1820ba9b7bf7SNeel Natu break;
182184e169c6SNeel Natu case VIE_OP_TYPE_OR:
18227d9ef309SJohn Baldwin error = emulate_or(vcpu, gpa, vie,
182384e169c6SNeel Natu memread, memwrite, memarg);
182484e169c6SNeel Natu break;
1825fc3dde90SPeter Grehan case VIE_OP_TYPE_SUB:
18267d9ef309SJohn Baldwin error = emulate_sub(vcpu, gpa, vie,
1827fc3dde90SPeter Grehan memread, memwrite, memarg);
1828fc3dde90SPeter Grehan break;
1829b8070ef5SNeel Natu case VIE_OP_TYPE_BITTEST:
18307d9ef309SJohn Baldwin error = emulate_bittest(vcpu, gpa, vie,
1831b8070ef5SNeel Natu memread, memwrite, memarg);
1832b8070ef5SNeel Natu break;
183378a36527SKonstantin Belousov case VIE_OP_TYPE_TWOB_GRP15:
18347d9ef309SJohn Baldwin error = emulate_twob_group15(vcpu, gpa, vie,
183578a36527SKonstantin Belousov memread, memwrite, memarg);
183678a36527SKonstantin Belousov break;
1837c2b4ceddSJohn Baldwin case VIE_OP_TYPE_ADD:
18387d9ef309SJohn Baldwin error = emulate_add(vcpu, gpa, vie, memread,
1839c2b4ceddSJohn Baldwin memwrite, memarg);
1840c2b4ceddSJohn Baldwin break;
1841e4da41f9SRodney W. Grimes case VIE_OP_TYPE_TEST:
18427d9ef309SJohn Baldwin error = emulate_test(vcpu, gpa, vie,
1843e4da41f9SRodney W. Grimes memread, memwrite, memarg);
1844e4da41f9SRodney W. Grimes break;
184547332982SConrad Meyer case VIE_OP_TYPE_BEXTR:
18467d9ef309SJohn Baldwin error = emulate_bextr(vcpu, gpa, vie, paging,
184747332982SConrad Meyer memread, memwrite, memarg);
184847332982SConrad Meyer break;
1849ba9b7bf7SNeel Natu default:
1850ba9b7bf7SNeel Natu error = EINVAL;
1851ba9b7bf7SNeel Natu break;
1852ba9b7bf7SNeel Natu }
1853ba9b7bf7SNeel Natu
1854ba9b7bf7SNeel Natu return (error);
1855ba9b7bf7SNeel Natu }
1856ba9b7bf7SNeel Natu
1857a7424861SNeel Natu int
vie_alignment_check(int cpl,int size,uint64_t cr0,uint64_t rf,uint64_t gla)1858a7424861SNeel Natu vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla)
1859a7424861SNeel Natu {
1860a7424861SNeel Natu KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1861a7424861SNeel Natu ("%s: invalid size %d", __func__, size));
1862a7424861SNeel Natu KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl));
1863a7424861SNeel Natu
1864a7424861SNeel Natu if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0)
1865a7424861SNeel Natu return (0);
1866a7424861SNeel Natu
1867a7424861SNeel Natu return ((gla & (size - 1)) ? 1 : 0);
1868a7424861SNeel Natu }
1869a7424861SNeel Natu
1870e813a873SNeel Natu int
vie_canonical_check(enum vm_cpu_mode cpu_mode,uint64_t gla)1871e813a873SNeel Natu vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
1872e813a873SNeel Natu {
1873e813a873SNeel Natu uint64_t mask;
1874e813a873SNeel Natu
1875e813a873SNeel Natu if (cpu_mode != CPU_MODE_64BIT)
1876e813a873SNeel Natu return (0);
1877e813a873SNeel Natu
1878e813a873SNeel Natu /*
1879e813a873SNeel Natu * The value of the bit 47 in the 'gla' should be replicated in the
1880e813a873SNeel Natu * most significant 16 bits.
1881e813a873SNeel Natu */
1882e813a873SNeel Natu mask = ~((1UL << 48) - 1);
1883e813a873SNeel Natu if (gla & (1UL << 47))
1884e813a873SNeel Natu return ((gla & mask) != mask);
1885e813a873SNeel Natu else
1886e813a873SNeel Natu return ((gla & mask) != 0);
1887e813a873SNeel Natu }
1888e813a873SNeel Natu
1889a7424861SNeel Natu uint64_t
vie_size2mask(int size)1890a7424861SNeel Natu vie_size2mask(int size)
1891a7424861SNeel Natu {
1892a7424861SNeel Natu KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1893a7424861SNeel Natu ("vie_size2mask: invalid size %d", size));
1894a7424861SNeel Natu return (size2mask[size]);
1895a7424861SNeel Natu }
1896a7424861SNeel Natu
18975382c19dSNeel Natu int
vie_calculate_gla(enum vm_cpu_mode cpu_mode,enum vm_reg_name seg,struct seg_desc * desc,uint64_t offset,int length,int addrsize,int prot,uint64_t * gla)189865ffa035SNeel Natu vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
189965ffa035SNeel Natu struct seg_desc *desc, uint64_t offset, int length, int addrsize,
190065ffa035SNeel Natu int prot, uint64_t *gla)
19015382c19dSNeel Natu {
19023ada6e07SNeel Natu uint64_t firstoff, low_limit, high_limit, segbase;
190365ffa035SNeel Natu int glasize, type;
19045382c19dSNeel Natu
19055382c19dSNeel Natu KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
19065382c19dSNeel Natu ("%s: invalid segment %d", __func__, seg));
190765ffa035SNeel Natu KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
190865ffa035SNeel Natu ("%s: invalid operand size %d", __func__, length));
190965ffa035SNeel Natu KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
191065ffa035SNeel Natu ("%s: invalid prot %#x", __func__, prot));
19115382c19dSNeel Natu
19123ada6e07SNeel Natu firstoff = offset;
191365ffa035SNeel Natu if (cpu_mode == CPU_MODE_64BIT) {
191465ffa035SNeel Natu KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
191565ffa035SNeel Natu "size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
191665ffa035SNeel Natu glasize = 8;
191765ffa035SNeel Natu } else {
191865ffa035SNeel Natu KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
191965ffa035SNeel Natu "size %d for cpu mode %d", __func__, addrsize, cpu_mode));
192065ffa035SNeel Natu glasize = 4;
192165ffa035SNeel Natu /*
192265ffa035SNeel Natu * If the segment selector is loaded with a NULL selector
192365ffa035SNeel Natu * then the descriptor is unusable and attempting to use
192465ffa035SNeel Natu * it results in a #GP(0).
192565ffa035SNeel Natu */
1926f7a9f178SNeel Natu if (SEG_DESC_UNUSABLE(desc->access))
192765ffa035SNeel Natu return (-1);
192865ffa035SNeel Natu
192965ffa035SNeel Natu /*
193065ffa035SNeel Natu * The processor generates a #NP exception when a segment
193165ffa035SNeel Natu * register is loaded with a selector that points to a
193265ffa035SNeel Natu * descriptor that is not present. If this was the case then
193365ffa035SNeel Natu * it would have been checked before the VM-exit.
193465ffa035SNeel Natu */
1935f7a9f178SNeel Natu KASSERT(SEG_DESC_PRESENT(desc->access),
1936f7a9f178SNeel Natu ("segment %d not present: %#x", seg, desc->access));
193765ffa035SNeel Natu
193865ffa035SNeel Natu /*
193965ffa035SNeel Natu * The descriptor type must indicate a code/data segment.
194065ffa035SNeel Natu */
1941f7a9f178SNeel Natu type = SEG_DESC_TYPE(desc->access);
194265ffa035SNeel Natu KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
194365ffa035SNeel Natu "descriptor type %#x", seg, type));
194465ffa035SNeel Natu
194565ffa035SNeel Natu if (prot & PROT_READ) {
194665ffa035SNeel Natu /* #GP on a read access to a exec-only code segment */
194765ffa035SNeel Natu if ((type & 0xA) == 0x8)
194865ffa035SNeel Natu return (-1);
194965ffa035SNeel Natu }
195065ffa035SNeel Natu
195165ffa035SNeel Natu if (prot & PROT_WRITE) {
195265ffa035SNeel Natu /*
195365ffa035SNeel Natu * #GP on a write access to a code segment or a
195465ffa035SNeel Natu * read-only data segment.
195565ffa035SNeel Natu */
195665ffa035SNeel Natu if (type & 0x8) /* code segment */
195765ffa035SNeel Natu return (-1);
195865ffa035SNeel Natu
195965ffa035SNeel Natu if ((type & 0xA) == 0) /* read-only data seg */
196065ffa035SNeel Natu return (-1);
196165ffa035SNeel Natu }
196265ffa035SNeel Natu
196365ffa035SNeel Natu /*
196465ffa035SNeel Natu * 'desc->limit' is fully expanded taking granularity into
196565ffa035SNeel Natu * account.
196665ffa035SNeel Natu */
196765ffa035SNeel Natu if ((type & 0xC) == 0x4) {
196865ffa035SNeel Natu /* expand-down data segment */
196965ffa035SNeel Natu low_limit = desc->limit + 1;
1970f7a9f178SNeel Natu high_limit = SEG_DESC_DEF32(desc->access) ?
1971f7a9f178SNeel Natu 0xffffffff : 0xffff;
197265ffa035SNeel Natu } else {
197365ffa035SNeel Natu /* code segment or expand-up data segment */
197465ffa035SNeel Natu low_limit = 0;
197565ffa035SNeel Natu high_limit = desc->limit;
197665ffa035SNeel Natu }
197765ffa035SNeel Natu
197865ffa035SNeel Natu while (length > 0) {
197965ffa035SNeel Natu offset &= vie_size2mask(addrsize);
198065ffa035SNeel Natu if (offset < low_limit || offset > high_limit)
198165ffa035SNeel Natu return (-1);
198265ffa035SNeel Natu offset++;
198365ffa035SNeel Natu length--;
198465ffa035SNeel Natu }
198565ffa035SNeel Natu }
19865382c19dSNeel Natu
19875382c19dSNeel Natu /*
19885382c19dSNeel Natu * In 64-bit mode all segments except %fs and %gs have a segment
19895382c19dSNeel Natu * base address of 0.
19905382c19dSNeel Natu */
19915382c19dSNeel Natu if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
19925382c19dSNeel Natu seg != VM_REG_GUEST_GS) {
19935382c19dSNeel Natu segbase = 0;
19945382c19dSNeel Natu } else {
19955382c19dSNeel Natu segbase = desc->base;
19965382c19dSNeel Natu }
19975382c19dSNeel Natu
19985382c19dSNeel Natu /*
19993ada6e07SNeel Natu * Truncate 'firstoff' to the effective address size before adding
20005382c19dSNeel Natu * it to the segment base.
20015382c19dSNeel Natu */
20023ada6e07SNeel Natu firstoff &= vie_size2mask(addrsize);
20033ada6e07SNeel Natu *gla = (segbase + firstoff) & vie_size2mask(glasize);
20045382c19dSNeel Natu return (0);
20055382c19dSNeel Natu }
20065382c19dSNeel Natu
20074daa95f8SConrad Meyer /*
20084daa95f8SConrad Meyer * Prepare a partially decoded vie for a 2nd attempt.
20094daa95f8SConrad Meyer */
20104daa95f8SConrad Meyer void
vie_restart(struct vie * vie)20114daa95f8SConrad Meyer vie_restart(struct vie *vie)
20124daa95f8SConrad Meyer {
20134daa95f8SConrad Meyer _Static_assert(
20144daa95f8SConrad Meyer offsetof(struct vie, inst) < offsetof(struct vie, vie_startzero) &&
20154daa95f8SConrad Meyer offsetof(struct vie, num_valid) < offsetof(struct vie, vie_startzero),
20164daa95f8SConrad Meyer "restart should not erase instruction length or contents");
20174daa95f8SConrad Meyer
20184daa95f8SConrad Meyer memset((char *)vie + offsetof(struct vie, vie_startzero), 0,
20194daa95f8SConrad Meyer sizeof(*vie) - offsetof(struct vie, vie_startzero));
20204daa95f8SConrad Meyer
20214daa95f8SConrad Meyer vie->base_register = VM_REG_LAST;
20224daa95f8SConrad Meyer vie->index_register = VM_REG_LAST;
20234daa95f8SConrad Meyer vie->segment_register = VM_REG_LAST;
20244daa95f8SConrad Meyer }
20254daa95f8SConrad Meyer
2026318224bbSNeel Natu void
vie_init(struct vie * vie,const char * inst_bytes,int inst_length)2027c2a875f9SNeel Natu vie_init(struct vie *vie, const char *inst_bytes, int inst_length)
2028a2da7af6SNeel Natu {
2029c2a875f9SNeel Natu KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE,
2030c2a875f9SNeel Natu ("%s: invalid instruction length (%d)", __func__, inst_length));
2031a2da7af6SNeel Natu
20324daa95f8SConrad Meyer vie_restart(vie);
20334daa95f8SConrad Meyer memset(vie->inst, 0, sizeof(vie->inst));
20344daa95f8SConrad Meyer if (inst_length != 0)
20354daa95f8SConrad Meyer memcpy(vie->inst, inst_bytes, inst_length);
2036c2a875f9SNeel Natu vie->num_valid = inst_length;
2037c2a875f9SNeel Natu }
2038a2da7af6SNeel Natu
2039b645fd45SConrad Meyer #ifdef _KERNEL
2040a2da7af6SNeel Natu static int
pf_error_code(int usermode,int prot,int rsvd,uint64_t pte)204137a723a5SNeel Natu pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
2042a2da7af6SNeel Natu {
2043fd949af6SNeel Natu int error_code = 0;
2044fd949af6SNeel Natu
2045fd949af6SNeel Natu if (pte & PG_V)
2046fd949af6SNeel Natu error_code |= PGEX_P;
2047fd949af6SNeel Natu if (prot & VM_PROT_WRITE)
2048fd949af6SNeel Natu error_code |= PGEX_W;
2049fd949af6SNeel Natu if (usermode)
2050fd949af6SNeel Natu error_code |= PGEX_U;
205137a723a5SNeel Natu if (rsvd)
205237a723a5SNeel Natu error_code |= PGEX_RSV;
2053fd949af6SNeel Natu if (prot & VM_PROT_EXECUTE)
2054fd949af6SNeel Natu error_code |= PGEX_I;
2055fd949af6SNeel Natu
2056fd949af6SNeel Natu return (error_code);
2057fd949af6SNeel Natu }
2058fd949af6SNeel Natu
2059f888763dSNeel Natu static void
ptp_release(void ** cookie)2060f888763dSNeel Natu ptp_release(void **cookie)
2061f888763dSNeel Natu {
2062f888763dSNeel Natu if (*cookie != NULL) {
2063f888763dSNeel Natu vm_gpa_release(*cookie);
2064f888763dSNeel Natu *cookie = NULL;
2065f888763dSNeel Natu }
2066f888763dSNeel Natu }
2067f888763dSNeel Natu
2068f888763dSNeel Natu static void *
ptp_hold(struct vcpu * vcpu,vm_paddr_t ptpphys,size_t len,void ** cookie)2069d3956e46SJohn Baldwin ptp_hold(struct vcpu *vcpu, vm_paddr_t ptpphys, size_t len, void **cookie)
2070f888763dSNeel Natu {
2071f888763dSNeel Natu void *ptr;
2072f888763dSNeel Natu
2073f888763dSNeel Natu ptp_release(cookie);
2074d3956e46SJohn Baldwin ptr = vm_gpa_hold(vcpu, ptpphys, len, VM_PROT_RW, cookie);
2075f888763dSNeel Natu return (ptr);
2076f888763dSNeel Natu }
2077f888763dSNeel Natu
20785f8754c0SJohn Baldwin static int
_vm_gla2gpa(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * guest_fault,bool check_only)2079d3956e46SJohn Baldwin _vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
20805f8754c0SJohn Baldwin uint64_t gla, int prot, uint64_t *gpa, int *guest_fault, bool check_only)
2081a2da7af6SNeel Natu {
2082fd949af6SNeel Natu int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
2083f888763dSNeel Natu u_int retries;
2084e813a873SNeel Natu uint64_t *ptpbase, ptpphys, pte, pgsize;
208500f3efe1SJohn Baldwin uint32_t *ptpbase32, pte32;
2086318224bbSNeel Natu void *cookie;
2087a2da7af6SNeel Natu
20889c4d5478SNeel Natu *guest_fault = 0;
20899c4d5478SNeel Natu
2090e813a873SNeel Natu usermode = (paging->cpl == 3 ? 1 : 0);
2091f888763dSNeel Natu writable = prot & VM_PROT_WRITE;
2092f888763dSNeel Natu cookie = NULL;
2093fd949af6SNeel Natu retval = 0;
2094f888763dSNeel Natu retries = 0;
2095f888763dSNeel Natu restart:
2096e813a873SNeel Natu ptpphys = paging->cr3; /* root of the page tables */
2097f888763dSNeel Natu ptp_release(&cookie);
2098f888763dSNeel Natu if (retries++ > 0)
2099f888763dSNeel Natu maybe_yield();
2100e4c8a13dSNeel Natu
2101e813a873SNeel Natu if (vie_canonical_check(paging->cpu_mode, gla)) {
2102a2da7af6SNeel Natu /*
2103e813a873SNeel Natu * XXX assuming a non-stack reference otherwise a stack fault
2104e813a873SNeel Natu * should be generated.
2105a2da7af6SNeel Natu */
21065f8754c0SJohn Baldwin if (!check_only)
2107d3956e46SJohn Baldwin vm_inject_gp(vcpu);
2108e813a873SNeel Natu goto fault;
2109e813a873SNeel Natu }
2110e813a873SNeel Natu
2111e813a873SNeel Natu if (paging->paging_mode == PAGING_MODE_FLAT) {
211200f3efe1SJohn Baldwin *gpa = gla;
2113f888763dSNeel Natu goto done;
211400f3efe1SJohn Baldwin }
211500f3efe1SJohn Baldwin
2116e813a873SNeel Natu if (paging->paging_mode == PAGING_MODE_32) {
211700f3efe1SJohn Baldwin nlevels = 2;
211800f3efe1SJohn Baldwin while (--nlevels >= 0) {
211900f3efe1SJohn Baldwin /* Zero out the lower 12 bits. */
212000f3efe1SJohn Baldwin ptpphys &= ~0xfff;
212100f3efe1SJohn Baldwin
2122d3956e46SJohn Baldwin ptpbase32 = ptp_hold(vcpu, ptpphys, PAGE_SIZE,
21239b1aa8d6SNeel Natu &cookie);
212400f3efe1SJohn Baldwin
212500f3efe1SJohn Baldwin if (ptpbase32 == NULL)
212600f3efe1SJohn Baldwin goto error;
212700f3efe1SJohn Baldwin
212800f3efe1SJohn Baldwin ptpshift = PAGE_SHIFT + nlevels * 10;
212900f3efe1SJohn Baldwin ptpindex = (gla >> ptpshift) & 0x3FF;
213000f3efe1SJohn Baldwin pgsize = 1UL << ptpshift;
213100f3efe1SJohn Baldwin
213200f3efe1SJohn Baldwin pte32 = ptpbase32[ptpindex];
213300f3efe1SJohn Baldwin
2134fd949af6SNeel Natu if ((pte32 & PG_V) == 0 ||
2135fd949af6SNeel Natu (usermode && (pte32 & PG_U) == 0) ||
2136fd949af6SNeel Natu (writable && (pte32 & PG_RW) == 0)) {
21375f8754c0SJohn Baldwin if (!check_only) {
213837a723a5SNeel Natu pfcode = pf_error_code(usermode, prot, 0,
213937a723a5SNeel Natu pte32);
2140d3956e46SJohn Baldwin vm_inject_pf(vcpu, pfcode, gla);
21415f8754c0SJohn Baldwin }
2142e813a873SNeel Natu goto fault;
2143fd949af6SNeel Natu }
2144e4c8a13dSNeel Natu
2145f888763dSNeel Natu /*
2146f888763dSNeel Natu * Emulate the x86 MMU's management of the accessed
2147f888763dSNeel Natu * and dirty flags. While the accessed flag is set
2148f888763dSNeel Natu * at every level of the page table, the dirty flag
2149f888763dSNeel Natu * is only set at the last level providing the guest
2150f888763dSNeel Natu * physical address.
2151f888763dSNeel Natu */
21525f8754c0SJohn Baldwin if (!check_only && (pte32 & PG_A) == 0) {
2153f888763dSNeel Natu if (atomic_cmpset_32(&ptpbase32[ptpindex],
2154f888763dSNeel Natu pte32, pte32 | PG_A) == 0) {
2155f888763dSNeel Natu goto restart;
2156f888763dSNeel Natu }
2157f888763dSNeel Natu }
2158f888763dSNeel Natu
2159e4c8a13dSNeel Natu /* XXX must be ignored if CR4.PSE=0 */
2160e4c8a13dSNeel Natu if (nlevels > 0 && (pte32 & PG_PS) != 0)
216100f3efe1SJohn Baldwin break;
216200f3efe1SJohn Baldwin
216300f3efe1SJohn Baldwin ptpphys = pte32;
216400f3efe1SJohn Baldwin }
216500f3efe1SJohn Baldwin
2166f888763dSNeel Natu /* Set the dirty bit in the page table entry if necessary */
21675f8754c0SJohn Baldwin if (!check_only && writable && (pte32 & PG_M) == 0) {
2168f888763dSNeel Natu if (atomic_cmpset_32(&ptpbase32[ptpindex],
2169f888763dSNeel Natu pte32, pte32 | PG_M) == 0) {
2170f888763dSNeel Natu goto restart;
2171f888763dSNeel Natu }
2172f888763dSNeel Natu }
2173f888763dSNeel Natu
217400f3efe1SJohn Baldwin /* Zero out the lower 'ptpshift' bits */
217500f3efe1SJohn Baldwin pte32 >>= ptpshift; pte32 <<= ptpshift;
217600f3efe1SJohn Baldwin *gpa = pte32 | (gla & (pgsize - 1));
2177f888763dSNeel Natu goto done;
217800f3efe1SJohn Baldwin }
217900f3efe1SJohn Baldwin
2180e813a873SNeel Natu if (paging->paging_mode == PAGING_MODE_PAE) {
2181e4c8a13dSNeel Natu /* Zero out the lower 5 bits and the upper 32 bits */
2182e4c8a13dSNeel Natu ptpphys &= 0xffffffe0UL;
218300f3efe1SJohn Baldwin
2184d3956e46SJohn Baldwin ptpbase = ptp_hold(vcpu, ptpphys, sizeof(*ptpbase) * 4,
21859b1aa8d6SNeel Natu &cookie);
218600f3efe1SJohn Baldwin if (ptpbase == NULL)
218700f3efe1SJohn Baldwin goto error;
218800f3efe1SJohn Baldwin
218900f3efe1SJohn Baldwin ptpindex = (gla >> 30) & 0x3;
219000f3efe1SJohn Baldwin
219100f3efe1SJohn Baldwin pte = ptpbase[ptpindex];
219200f3efe1SJohn Baldwin
2193fd949af6SNeel Natu if ((pte & PG_V) == 0) {
21945f8754c0SJohn Baldwin if (!check_only) {
219537a723a5SNeel Natu pfcode = pf_error_code(usermode, prot, 0, pte);
2196d3956e46SJohn Baldwin vm_inject_pf(vcpu, pfcode, gla);
21975f8754c0SJohn Baldwin }
2198e813a873SNeel Natu goto fault;
2199fd949af6SNeel Natu }
220000f3efe1SJohn Baldwin
220100f3efe1SJohn Baldwin ptpphys = pte;
220200f3efe1SJohn Baldwin
220300f3efe1SJohn Baldwin nlevels = 2;
2204f3eb12e4SKonstantin Belousov } else if (paging->paging_mode == PAGING_MODE_64_LA57) {
2205f3eb12e4SKonstantin Belousov nlevels = 5;
2206f3eb12e4SKonstantin Belousov } else {
2207a2da7af6SNeel Natu nlevels = 4;
2208f3eb12e4SKonstantin Belousov }
2209f3eb12e4SKonstantin Belousov
2210a2da7af6SNeel Natu while (--nlevels >= 0) {
2211a2da7af6SNeel Natu /* Zero out the lower 12 bits and the upper 12 bits */
2212a2da7af6SNeel Natu ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
2213a2da7af6SNeel Natu
2214d3956e46SJohn Baldwin ptpbase = ptp_hold(vcpu, ptpphys, PAGE_SIZE, &cookie);
2215318224bbSNeel Natu if (ptpbase == NULL)
2216a2da7af6SNeel Natu goto error;
2217a2da7af6SNeel Natu
2218a2da7af6SNeel Natu ptpshift = PAGE_SHIFT + nlevels * 9;
2219a2da7af6SNeel Natu ptpindex = (gla >> ptpshift) & 0x1FF;
2220a2da7af6SNeel Natu pgsize = 1UL << ptpshift;
2221a2da7af6SNeel Natu
2222a2da7af6SNeel Natu pte = ptpbase[ptpindex];
2223a2da7af6SNeel Natu
2224fd949af6SNeel Natu if ((pte & PG_V) == 0 ||
2225fd949af6SNeel Natu (usermode && (pte & PG_U) == 0) ||
2226fd949af6SNeel Natu (writable && (pte & PG_RW) == 0)) {
22275f8754c0SJohn Baldwin if (!check_only) {
222837a723a5SNeel Natu pfcode = pf_error_code(usermode, prot, 0, pte);
2229d3956e46SJohn Baldwin vm_inject_pf(vcpu, pfcode, gla);
22305f8754c0SJohn Baldwin }
2231e813a873SNeel Natu goto fault;
2232fd949af6SNeel Natu }
2233318224bbSNeel Natu
2234f888763dSNeel Natu /* Set the accessed bit in the page table entry */
22355f8754c0SJohn Baldwin if (!check_only && (pte & PG_A) == 0) {
2236f888763dSNeel Natu if (atomic_cmpset_64(&ptpbase[ptpindex],
2237f888763dSNeel Natu pte, pte | PG_A) == 0) {
2238f888763dSNeel Natu goto restart;
2239f888763dSNeel Natu }
2240f888763dSNeel Natu }
2241a2da7af6SNeel Natu
2242e4c8a13dSNeel Natu if (nlevels > 0 && (pte & PG_PS) != 0) {
224337a723a5SNeel Natu if (pgsize > 1 * GB) {
22445f8754c0SJohn Baldwin if (!check_only) {
22455f8754c0SJohn Baldwin pfcode = pf_error_code(usermode, prot, 1,
22465f8754c0SJohn Baldwin pte);
2247d3956e46SJohn Baldwin vm_inject_pf(vcpu, pfcode, gla);
22485f8754c0SJohn Baldwin }
2249e813a873SNeel Natu goto fault;
225037a723a5SNeel Natu }
2251a2da7af6SNeel Natu break;
2252a2da7af6SNeel Natu }
2253a2da7af6SNeel Natu
2254a2da7af6SNeel Natu ptpphys = pte;
2255a2da7af6SNeel Natu }
2256a2da7af6SNeel Natu
2257f888763dSNeel Natu /* Set the dirty bit in the page table entry if necessary */
22585f8754c0SJohn Baldwin if (!check_only && writable && (pte & PG_M) == 0) {
2259f888763dSNeel Natu if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0)
2260f888763dSNeel Natu goto restart;
2261f888763dSNeel Natu }
2262f888763dSNeel Natu
2263a2da7af6SNeel Natu /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
2264a2da7af6SNeel Natu pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
2265a2da7af6SNeel Natu *gpa = pte | (gla & (pgsize - 1));
2266f888763dSNeel Natu done:
2267f888763dSNeel Natu ptp_release(&cookie);
22689c4d5478SNeel Natu KASSERT(retval == 0 || retval == EFAULT, ("%s: unexpected retval %d",
22699c4d5478SNeel Natu __func__, retval));
2270f888763dSNeel Natu return (retval);
2271a2da7af6SNeel Natu error:
22729c4d5478SNeel Natu retval = EFAULT;
2273fd949af6SNeel Natu goto done;
2274e813a873SNeel Natu fault:
22759c4d5478SNeel Natu *guest_fault = 1;
2276fd949af6SNeel Natu goto done;
2277a2da7af6SNeel Natu }
2278a2da7af6SNeel Natu
227970593114SNeel Natu int
vm_gla2gpa(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * guest_fault)2280d3956e46SJohn Baldwin vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
22815f8754c0SJohn Baldwin uint64_t gla, int prot, uint64_t *gpa, int *guest_fault)
22825f8754c0SJohn Baldwin {
22835f8754c0SJohn Baldwin
2284d3956e46SJohn Baldwin return (_vm_gla2gpa(vcpu, paging, gla, prot, gpa, guest_fault,
22855f8754c0SJohn Baldwin false));
22865f8754c0SJohn Baldwin }
22875f8754c0SJohn Baldwin
22885f8754c0SJohn Baldwin int
vm_gla2gpa_nofault(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,int prot,uint64_t * gpa,int * guest_fault)2289d3956e46SJohn Baldwin vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
22905f8754c0SJohn Baldwin uint64_t gla, int prot, uint64_t *gpa, int *guest_fault)
22915f8754c0SJohn Baldwin {
22925f8754c0SJohn Baldwin
2293d3956e46SJohn Baldwin return (_vm_gla2gpa(vcpu, paging, gla, prot, gpa, guest_fault,
22945f8754c0SJohn Baldwin true));
22955f8754c0SJohn Baldwin }
22965f8754c0SJohn Baldwin
22975f8754c0SJohn Baldwin int
vmm_fetch_instruction(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t rip,int inst_length,struct vie * vie,int * faultptr)2298d3956e46SJohn Baldwin vmm_fetch_instruction(struct vcpu *vcpu, struct vm_guest_paging *paging,
22999c4d5478SNeel Natu uint64_t rip, int inst_length, struct vie *vie, int *faultptr)
2300a2da7af6SNeel Natu {
2301d665d229SNeel Natu struct vm_copyinfo copyinfo[2];
2302d665d229SNeel Natu int error, prot;
2303a2da7af6SNeel Natu
230470593114SNeel Natu if (inst_length > VIE_INST_SIZE)
230570593114SNeel Natu panic("vmm_fetch_instruction: invalid length %d", inst_length);
230670593114SNeel Natu
2307d665d229SNeel Natu prot = PROT_READ | PROT_EXEC;
2308d3956e46SJohn Baldwin error = vm_copy_setup(vcpu, paging, rip, inst_length, prot,
23099c4d5478SNeel Natu copyinfo, nitems(copyinfo), faultptr);
23109c4d5478SNeel Natu if (error || *faultptr)
23119c4d5478SNeel Natu return (error);
23129c4d5478SNeel Natu
23132b4fe856SJohn Baldwin vm_copyin(copyinfo, vie->inst, inst_length);
23142b4fe856SJohn Baldwin vm_copy_teardown(copyinfo, nitems(copyinfo));
2315d665d229SNeel Natu vie->num_valid = inst_length;
23169c4d5478SNeel Natu return (0);
2317a2da7af6SNeel Natu }
2318b645fd45SConrad Meyer #endif /* _KERNEL */
2319a2da7af6SNeel Natu
2320a2da7af6SNeel Natu static int
vie_peek(struct vie * vie,uint8_t * x)2321a2da7af6SNeel Natu vie_peek(struct vie *vie, uint8_t *x)
2322a2da7af6SNeel Natu {
2323ba9b7bf7SNeel Natu
2324a2da7af6SNeel Natu if (vie->num_processed < vie->num_valid) {
2325a2da7af6SNeel Natu *x = vie->inst[vie->num_processed];
2326a2da7af6SNeel Natu return (0);
2327a2da7af6SNeel Natu } else
2328a2da7af6SNeel Natu return (-1);
2329a2da7af6SNeel Natu }
2330a2da7af6SNeel Natu
2331a2da7af6SNeel Natu static void
vie_advance(struct vie * vie)2332a2da7af6SNeel Natu vie_advance(struct vie *vie)
2333a2da7af6SNeel Natu {
2334a2da7af6SNeel Natu
2335a2da7af6SNeel Natu vie->num_processed++;
2336a2da7af6SNeel Natu }
2337a2da7af6SNeel Natu
233875346353SNeel Natu static bool
segment_override(uint8_t x,int * seg)233975346353SNeel Natu segment_override(uint8_t x, int *seg)
234075346353SNeel Natu {
234175346353SNeel Natu
234275346353SNeel Natu switch (x) {
234375346353SNeel Natu case 0x2E:
234475346353SNeel Natu *seg = VM_REG_GUEST_CS;
234575346353SNeel Natu break;
234675346353SNeel Natu case 0x36:
234775346353SNeel Natu *seg = VM_REG_GUEST_SS;
234875346353SNeel Natu break;
234975346353SNeel Natu case 0x3E:
235075346353SNeel Natu *seg = VM_REG_GUEST_DS;
235175346353SNeel Natu break;
235275346353SNeel Natu case 0x26:
235375346353SNeel Natu *seg = VM_REG_GUEST_ES;
235475346353SNeel Natu break;
235575346353SNeel Natu case 0x64:
235675346353SNeel Natu *seg = VM_REG_GUEST_FS;
235775346353SNeel Natu break;
235875346353SNeel Natu case 0x65:
235975346353SNeel Natu *seg = VM_REG_GUEST_GS;
236075346353SNeel Natu break;
236175346353SNeel Natu default:
236275346353SNeel Natu return (false);
236375346353SNeel Natu }
236475346353SNeel Natu return (true);
236575346353SNeel Natu }
236675346353SNeel Natu
2367a2da7af6SNeel Natu static int
decode_prefixes(struct vie * vie,enum vm_cpu_mode cpu_mode,int cs_d)2368f7a9f178SNeel Natu decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
2369a2da7af6SNeel Natu {
2370a2da7af6SNeel Natu uint8_t x;
2371a2da7af6SNeel Natu
2372f7a9f178SNeel Natu while (1) {
2373a2da7af6SNeel Natu if (vie_peek(vie, &x))
2374a2da7af6SNeel Natu return (-1);
2375a2da7af6SNeel Natu
2376f7a9f178SNeel Natu if (x == 0x66)
2377f7a9f178SNeel Natu vie->opsize_override = 1;
2378f7a9f178SNeel Natu else if (x == 0x67)
2379f7a9f178SNeel Natu vie->addrsize_override = 1;
238075346353SNeel Natu else if (x == 0xF3)
238175346353SNeel Natu vie->repz_present = 1;
238275346353SNeel Natu else if (x == 0xF2)
238375346353SNeel Natu vie->repnz_present = 1;
238475346353SNeel Natu else if (segment_override(x, &vie->segment_register))
238575346353SNeel Natu vie->segment_override = 1;
2386f7a9f178SNeel Natu else
2387f7a9f178SNeel Natu break;
2388a2da7af6SNeel Natu
2389a2da7af6SNeel Natu vie_advance(vie);
2390a2da7af6SNeel Natu }
2391a2da7af6SNeel Natu
2392f7a9f178SNeel Natu /*
2393f7a9f178SNeel Natu * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:
2394f7a9f178SNeel Natu * - Only one REX prefix is allowed per instruction.
2395f7a9f178SNeel Natu * - The REX prefix must immediately precede the opcode byte or the
2396f7a9f178SNeel Natu * escape opcode byte.
2397f7a9f178SNeel Natu * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3)
2398f7a9f178SNeel Natu * the mandatory prefix must come before the REX prefix.
2399f7a9f178SNeel Natu */
2400f7a9f178SNeel Natu if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) {
2401f7a9f178SNeel Natu vie->rex_present = 1;
2402f7a9f178SNeel Natu vie->rex_w = x & 0x8 ? 1 : 0;
2403f7a9f178SNeel Natu vie->rex_r = x & 0x4 ? 1 : 0;
2404f7a9f178SNeel Natu vie->rex_x = x & 0x2 ? 1 : 0;
2405f7a9f178SNeel Natu vie->rex_b = x & 0x1 ? 1 : 0;
2406f7a9f178SNeel Natu vie_advance(vie);
2407f7a9f178SNeel Natu }
2408f7a9f178SNeel Natu
2409f7a9f178SNeel Natu /*
2410cfdea69dSConrad Meyer * § 2.3.5, "The VEX Prefix", SDM Vol 2.
2411cfdea69dSConrad Meyer */
2412cfdea69dSConrad Meyer if ((cpu_mode == CPU_MODE_64BIT || cpu_mode == CPU_MODE_COMPATIBILITY)
2413cfdea69dSConrad Meyer && x == 0xC4) {
2414cfdea69dSConrad Meyer const struct vie_op *optab;
2415cfdea69dSConrad Meyer
2416cfdea69dSConrad Meyer /* 3-byte VEX prefix. */
2417cfdea69dSConrad Meyer vie->vex_present = 1;
2418cfdea69dSConrad Meyer
2419cfdea69dSConrad Meyer vie_advance(vie);
2420cfdea69dSConrad Meyer if (vie_peek(vie, &x))
2421cfdea69dSConrad Meyer return (-1);
2422cfdea69dSConrad Meyer
2423cfdea69dSConrad Meyer /*
2424cfdea69dSConrad Meyer * 2nd byte: [R', X', B', mmmmm[4:0]]. Bits are inverted
2425cfdea69dSConrad Meyer * relative to REX encoding.
2426cfdea69dSConrad Meyer */
2427cfdea69dSConrad Meyer vie->rex_r = x & 0x80 ? 0 : 1;
2428cfdea69dSConrad Meyer vie->rex_x = x & 0x40 ? 0 : 1;
2429cfdea69dSConrad Meyer vie->rex_b = x & 0x20 ? 0 : 1;
2430cfdea69dSConrad Meyer
2431cfdea69dSConrad Meyer switch (x & 0x1F) {
2432cfdea69dSConrad Meyer case 0x2:
2433cfdea69dSConrad Meyer /* 0F 38. */
2434cfdea69dSConrad Meyer optab = three_byte_opcodes_0f38;
2435cfdea69dSConrad Meyer break;
2436cfdea69dSConrad Meyer case 0x1:
2437cfdea69dSConrad Meyer /* 0F class - nothing handled here yet. */
2438cfdea69dSConrad Meyer /* FALLTHROUGH */
2439cfdea69dSConrad Meyer case 0x3:
2440cfdea69dSConrad Meyer /* 0F 3A class - nothing handled here yet. */
2441cfdea69dSConrad Meyer /* FALLTHROUGH */
2442cfdea69dSConrad Meyer default:
2443cfdea69dSConrad Meyer /* Reserved (#UD). */
2444cfdea69dSConrad Meyer return (-1);
2445cfdea69dSConrad Meyer }
2446cfdea69dSConrad Meyer
2447cfdea69dSConrad Meyer vie_advance(vie);
2448cfdea69dSConrad Meyer if (vie_peek(vie, &x))
2449cfdea69dSConrad Meyer return (-1);
2450cfdea69dSConrad Meyer
2451cfdea69dSConrad Meyer /* 3rd byte: [W, vvvv[6:3], L, pp[1:0]]. */
2452cfdea69dSConrad Meyer vie->rex_w = x & 0x80 ? 1 : 0;
2453cfdea69dSConrad Meyer
2454cfdea69dSConrad Meyer vie->vex_reg = ((~(unsigned)x & 0x78u) >> 3);
2455cfdea69dSConrad Meyer vie->vex_l = !!(x & 0x4);
2456cfdea69dSConrad Meyer vie->vex_pp = (x & 0x3);
2457cfdea69dSConrad Meyer
2458cfdea69dSConrad Meyer /* PP: 1=66 2=F3 3=F2 prefixes. */
2459cfdea69dSConrad Meyer switch (vie->vex_pp) {
2460cfdea69dSConrad Meyer case 0x1:
2461cfdea69dSConrad Meyer vie->opsize_override = 1;
2462cfdea69dSConrad Meyer break;
2463cfdea69dSConrad Meyer case 0x2:
2464cfdea69dSConrad Meyer vie->repz_present = 1;
2465cfdea69dSConrad Meyer break;
2466cfdea69dSConrad Meyer case 0x3:
2467cfdea69dSConrad Meyer vie->repnz_present = 1;
2468cfdea69dSConrad Meyer break;
2469cfdea69dSConrad Meyer }
2470cfdea69dSConrad Meyer
2471cfdea69dSConrad Meyer vie_advance(vie);
2472cfdea69dSConrad Meyer
2473cfdea69dSConrad Meyer /* Opcode, sans literal prefix prefix. */
2474cfdea69dSConrad Meyer if (vie_peek(vie, &x))
2475cfdea69dSConrad Meyer return (-1);
2476cfdea69dSConrad Meyer
2477cfdea69dSConrad Meyer vie->op = optab[x];
2478cfdea69dSConrad Meyer if (vie->op.op_type == VIE_OP_TYPE_NONE)
2479cfdea69dSConrad Meyer return (-1);
2480cfdea69dSConrad Meyer
2481cfdea69dSConrad Meyer vie_advance(vie);
2482cfdea69dSConrad Meyer }
2483cfdea69dSConrad Meyer
2484cfdea69dSConrad Meyer /*
2485f7a9f178SNeel Natu * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1
2486f7a9f178SNeel Natu */
2487f7a9f178SNeel Natu if (cpu_mode == CPU_MODE_64BIT) {
2488f7a9f178SNeel Natu /*
2489f7a9f178SNeel Natu * Default address size is 64-bits and default operand size
2490f7a9f178SNeel Natu * is 32-bits.
2491f7a9f178SNeel Natu */
2492f7a9f178SNeel Natu vie->addrsize = vie->addrsize_override ? 4 : 8;
2493f7a9f178SNeel Natu if (vie->rex_w)
2494f7a9f178SNeel Natu vie->opsize = 8;
2495f7a9f178SNeel Natu else if (vie->opsize_override)
2496f7a9f178SNeel Natu vie->opsize = 2;
2497f7a9f178SNeel Natu else
2498f7a9f178SNeel Natu vie->opsize = 4;
2499f7a9f178SNeel Natu } else if (cs_d) {
2500f7a9f178SNeel Natu /* Default address and operand sizes are 32-bits */
2501f7a9f178SNeel Natu vie->addrsize = vie->addrsize_override ? 2 : 4;
2502f7a9f178SNeel Natu vie->opsize = vie->opsize_override ? 2 : 4;
2503f7a9f178SNeel Natu } else {
2504f7a9f178SNeel Natu /* Default address and operand sizes are 16-bits */
2505f7a9f178SNeel Natu vie->addrsize = vie->addrsize_override ? 4 : 2;
2506f7a9f178SNeel Natu vie->opsize = vie->opsize_override ? 4 : 2;
2507f7a9f178SNeel Natu }
2508a2da7af6SNeel Natu return (0);
2509a2da7af6SNeel Natu }
2510a2da7af6SNeel Natu
2511a2da7af6SNeel Natu static int
decode_two_byte_opcode(struct vie * vie)251254e03e07STycho Nightingale decode_two_byte_opcode(struct vie *vie)
251354e03e07STycho Nightingale {
251454e03e07STycho Nightingale uint8_t x;
251554e03e07STycho Nightingale
251654e03e07STycho Nightingale if (vie_peek(vie, &x))
251754e03e07STycho Nightingale return (-1);
251854e03e07STycho Nightingale
251954e03e07STycho Nightingale vie->op = two_byte_opcodes[x];
252054e03e07STycho Nightingale
252154e03e07STycho Nightingale if (vie->op.op_type == VIE_OP_TYPE_NONE)
252254e03e07STycho Nightingale return (-1);
252354e03e07STycho Nightingale
252454e03e07STycho Nightingale vie_advance(vie);
252554e03e07STycho Nightingale return (0);
252654e03e07STycho Nightingale }
252754e03e07STycho Nightingale
252854e03e07STycho Nightingale static int
decode_opcode(struct vie * vie)2529a2da7af6SNeel Natu decode_opcode(struct vie *vie)
2530a2da7af6SNeel Natu {
2531a2da7af6SNeel Natu uint8_t x;
2532a2da7af6SNeel Natu
2533a2da7af6SNeel Natu if (vie_peek(vie, &x))
2534a2da7af6SNeel Natu return (-1);
2535a2da7af6SNeel Natu
2536cfdea69dSConrad Meyer /* Already did this via VEX prefix. */
2537cfdea69dSConrad Meyer if (vie->op.op_type != VIE_OP_TYPE_NONE)
2538cfdea69dSConrad Meyer return (0);
2539cfdea69dSConrad Meyer
2540ba9b7bf7SNeel Natu vie->op = one_byte_opcodes[x];
2541ba9b7bf7SNeel Natu
2542ba9b7bf7SNeel Natu if (vie->op.op_type == VIE_OP_TYPE_NONE)
2543ba9b7bf7SNeel Natu return (-1);
2544a2da7af6SNeel Natu
2545a2da7af6SNeel Natu vie_advance(vie);
254654e03e07STycho Nightingale
254754e03e07STycho Nightingale if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
254854e03e07STycho Nightingale return (decode_two_byte_opcode(vie));
254954e03e07STycho Nightingale
2550a2da7af6SNeel Natu return (0);
2551a2da7af6SNeel Natu }
2552a2da7af6SNeel Natu
2553a2da7af6SNeel Natu static int
decode_modrm(struct vie * vie,enum vm_cpu_mode cpu_mode)2554e813a873SNeel Natu decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
2555a2da7af6SNeel Natu {
2556a2da7af6SNeel Natu uint8_t x;
2557a2da7af6SNeel Natu
2558d665d229SNeel Natu if (vie->op.op_flags & VIE_OP_F_NO_MODRM)
2559d665d229SNeel Natu return (0);
2560d665d229SNeel Natu
2561e4f605eeSTycho Nightingale if (cpu_mode == CPU_MODE_REAL)
2562e4f605eeSTycho Nightingale return (-1);
2563e4f605eeSTycho Nightingale
2564a2da7af6SNeel Natu if (vie_peek(vie, &x))
2565a2da7af6SNeel Natu return (-1);
2566a2da7af6SNeel Natu
2567a2da7af6SNeel Natu vie->mod = (x >> 6) & 0x3;
2568a2da7af6SNeel Natu vie->rm = (x >> 0) & 0x7;
2569a2da7af6SNeel Natu vie->reg = (x >> 3) & 0x7;
2570a2da7af6SNeel Natu
2571ba9b7bf7SNeel Natu /*
2572ba9b7bf7SNeel Natu * A direct addressing mode makes no sense in the context of an EPT
2573ba9b7bf7SNeel Natu * fault. There has to be a memory access involved to cause the
2574ba9b7bf7SNeel Natu * EPT fault.
2575ba9b7bf7SNeel Natu */
2576ba9b7bf7SNeel Natu if (vie->mod == VIE_MOD_DIRECT)
2577ba9b7bf7SNeel Natu return (-1);
2578ba9b7bf7SNeel Natu
2579a2da7af6SNeel Natu if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
2580a2da7af6SNeel Natu (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
2581a2da7af6SNeel Natu /*
2582a2da7af6SNeel Natu * Table 2-5: Special Cases of REX Encodings
2583a2da7af6SNeel Natu *
2584a2da7af6SNeel Natu * mod=0, r/m=5 is used in the compatibility mode to
2585a2da7af6SNeel Natu * indicate a disp32 without a base register.
2586a2da7af6SNeel Natu *
2587a2da7af6SNeel Natu * mod!=3, r/m=4 is used in the compatibility mode to
2588a2da7af6SNeel Natu * indicate that the SIB byte is present.
2589a2da7af6SNeel Natu *
2590a2da7af6SNeel Natu * The 'b' bit in the REX prefix is don't care in
2591a2da7af6SNeel Natu * this case.
2592a2da7af6SNeel Natu */
2593a2da7af6SNeel Natu } else {
2594a2da7af6SNeel Natu vie->rm |= (vie->rex_b << 3);
2595a2da7af6SNeel Natu }
2596a2da7af6SNeel Natu
2597a2da7af6SNeel Natu vie->reg |= (vie->rex_r << 3);
2598a2da7af6SNeel Natu
2599ba9b7bf7SNeel Natu /* SIB */
2600a2da7af6SNeel Natu if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
2601ba9b7bf7SNeel Natu goto done;
2602a2da7af6SNeel Natu
2603a2da7af6SNeel Natu vie->base_register = gpr_map[vie->rm];
2604a2da7af6SNeel Natu
2605a2da7af6SNeel Natu switch (vie->mod) {
2606a2da7af6SNeel Natu case VIE_MOD_INDIRECT_DISP8:
2607a2da7af6SNeel Natu vie->disp_bytes = 1;
2608a2da7af6SNeel Natu break;
2609a2da7af6SNeel Natu case VIE_MOD_INDIRECT_DISP32:
2610a2da7af6SNeel Natu vie->disp_bytes = 4;
2611a2da7af6SNeel Natu break;
2612a2da7af6SNeel Natu case VIE_MOD_INDIRECT:
2613a2da7af6SNeel Natu if (vie->rm == VIE_RM_DISP32) {
2614a2da7af6SNeel Natu vie->disp_bytes = 4;
2615d3c11f40SPeter Grehan /*
2616d3c11f40SPeter Grehan * Table 2-7. RIP-Relative Addressing
2617d3c11f40SPeter Grehan *
2618d3c11f40SPeter Grehan * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
2619d3c11f40SPeter Grehan * whereas in compatibility mode it just implies disp32.
2620d3c11f40SPeter Grehan */
2621d3c11f40SPeter Grehan
2622d3c11f40SPeter Grehan if (cpu_mode == CPU_MODE_64BIT)
2623d3c11f40SPeter Grehan vie->base_register = VM_REG_GUEST_RIP;
2624d3c11f40SPeter Grehan else
2625d3c11f40SPeter Grehan vie->base_register = VM_REG_LAST;
2626a2da7af6SNeel Natu }
2627a2da7af6SNeel Natu break;
2628a2da7af6SNeel Natu }
2629a2da7af6SNeel Natu
2630ba9b7bf7SNeel Natu done:
2631ba9b7bf7SNeel Natu vie_advance(vie);
2632ba9b7bf7SNeel Natu
2633ba9b7bf7SNeel Natu return (0);
2634ba9b7bf7SNeel Natu }
2635ba9b7bf7SNeel Natu
2636ba9b7bf7SNeel Natu static int
decode_sib(struct vie * vie)2637ba9b7bf7SNeel Natu decode_sib(struct vie *vie)
2638ba9b7bf7SNeel Natu {
2639ba9b7bf7SNeel Natu uint8_t x;
2640ba9b7bf7SNeel Natu
2641ba9b7bf7SNeel Natu /* Proceed only if SIB byte is present */
2642ba9b7bf7SNeel Natu if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
2643ba9b7bf7SNeel Natu return (0);
2644ba9b7bf7SNeel Natu
2645ba9b7bf7SNeel Natu if (vie_peek(vie, &x))
2646ba9b7bf7SNeel Natu return (-1);
2647ba9b7bf7SNeel Natu
2648ba9b7bf7SNeel Natu /* De-construct the SIB byte */
2649ba9b7bf7SNeel Natu vie->ss = (x >> 6) & 0x3;
2650ba9b7bf7SNeel Natu vie->index = (x >> 3) & 0x7;
2651ba9b7bf7SNeel Natu vie->base = (x >> 0) & 0x7;
2652ba9b7bf7SNeel Natu
2653ba9b7bf7SNeel Natu /* Apply the REX prefix modifiers */
2654ba9b7bf7SNeel Natu vie->index |= vie->rex_x << 3;
2655ba9b7bf7SNeel Natu vie->base |= vie->rex_b << 3;
2656ba9b7bf7SNeel Natu
2657ba9b7bf7SNeel Natu switch (vie->mod) {
2658ba9b7bf7SNeel Natu case VIE_MOD_INDIRECT_DISP8:
2659ba9b7bf7SNeel Natu vie->disp_bytes = 1;
2660ba9b7bf7SNeel Natu break;
2661ba9b7bf7SNeel Natu case VIE_MOD_INDIRECT_DISP32:
2662ba9b7bf7SNeel Natu vie->disp_bytes = 4;
2663ba9b7bf7SNeel Natu break;
2664ba9b7bf7SNeel Natu }
2665ba9b7bf7SNeel Natu
2666ba9b7bf7SNeel Natu if (vie->mod == VIE_MOD_INDIRECT &&
2667ba9b7bf7SNeel Natu (vie->base == 5 || vie->base == 13)) {
2668ba9b7bf7SNeel Natu /*
2669ba9b7bf7SNeel Natu * Special case when base register is unused if mod = 0
2670ba9b7bf7SNeel Natu * and base = %rbp or %r13.
2671ba9b7bf7SNeel Natu *
2672ba9b7bf7SNeel Natu * Documented in:
2673ba9b7bf7SNeel Natu * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2674ba9b7bf7SNeel Natu * Table 2-5: Special Cases of REX Encodings
2675ba9b7bf7SNeel Natu */
2676ba9b7bf7SNeel Natu vie->disp_bytes = 4;
2677ba9b7bf7SNeel Natu } else {
2678ba9b7bf7SNeel Natu vie->base_register = gpr_map[vie->base];
2679ba9b7bf7SNeel Natu }
2680ba9b7bf7SNeel Natu
2681ba9b7bf7SNeel Natu /*
2682ba9b7bf7SNeel Natu * All encodings of 'index' are valid except for %rsp (4).
2683ba9b7bf7SNeel Natu *
2684ba9b7bf7SNeel Natu * Documented in:
2685ba9b7bf7SNeel Natu * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2686ba9b7bf7SNeel Natu * Table 2-5: Special Cases of REX Encodings
2687ba9b7bf7SNeel Natu */
2688ba9b7bf7SNeel Natu if (vie->index != 4)
2689ba9b7bf7SNeel Natu vie->index_register = gpr_map[vie->index];
2690ba9b7bf7SNeel Natu
2691ba9b7bf7SNeel Natu /* 'scale' makes sense only in the context of an index register */
2692ba9b7bf7SNeel Natu if (vie->index_register < VM_REG_LAST)
2693ba9b7bf7SNeel Natu vie->scale = 1 << vie->ss;
2694a2da7af6SNeel Natu
2695a2da7af6SNeel Natu vie_advance(vie);
2696a2da7af6SNeel Natu
2697a2da7af6SNeel Natu return (0);
2698a2da7af6SNeel Natu }
2699a2da7af6SNeel Natu
2700a2da7af6SNeel Natu static int
decode_displacement(struct vie * vie)2701a2da7af6SNeel Natu decode_displacement(struct vie *vie)
2702a2da7af6SNeel Natu {
2703a2da7af6SNeel Natu int n, i;
2704a2da7af6SNeel Natu uint8_t x;
2705a2da7af6SNeel Natu
2706a2da7af6SNeel Natu union {
2707a2da7af6SNeel Natu char buf[4];
2708a2da7af6SNeel Natu int8_t signed8;
2709a2da7af6SNeel Natu int32_t signed32;
2710a2da7af6SNeel Natu } u;
2711a2da7af6SNeel Natu
2712a2da7af6SNeel Natu if ((n = vie->disp_bytes) == 0)
2713a2da7af6SNeel Natu return (0);
2714a2da7af6SNeel Natu
2715a2da7af6SNeel Natu if (n != 1 && n != 4)
2716a2da7af6SNeel Natu panic("decode_displacement: invalid disp_bytes %d", n);
2717a2da7af6SNeel Natu
2718a2da7af6SNeel Natu for (i = 0; i < n; i++) {
2719a2da7af6SNeel Natu if (vie_peek(vie, &x))
2720a2da7af6SNeel Natu return (-1);
2721a2da7af6SNeel Natu
2722a2da7af6SNeel Natu u.buf[i] = x;
2723a2da7af6SNeel Natu vie_advance(vie);
2724a2da7af6SNeel Natu }
2725a2da7af6SNeel Natu
2726a2da7af6SNeel Natu if (n == 1)
2727a2da7af6SNeel Natu vie->displacement = u.signed8; /* sign-extended */
2728a2da7af6SNeel Natu else
2729a2da7af6SNeel Natu vie->displacement = u.signed32; /* sign-extended */
2730a2da7af6SNeel Natu
2731a2da7af6SNeel Natu return (0);
2732a2da7af6SNeel Natu }
2733a2da7af6SNeel Natu
2734a2da7af6SNeel Natu static int
decode_immediate(struct vie * vie)2735a2da7af6SNeel Natu decode_immediate(struct vie *vie)
2736a2da7af6SNeel Natu {
2737a2da7af6SNeel Natu int i, n;
2738a2da7af6SNeel Natu uint8_t x;
2739a2da7af6SNeel Natu union {
2740a2da7af6SNeel Natu char buf[4];
2741ba9b7bf7SNeel Natu int8_t signed8;
2742f7a9f178SNeel Natu int16_t signed16;
2743a2da7af6SNeel Natu int32_t signed32;
2744a2da7af6SNeel Natu } u;
2745a2da7af6SNeel Natu
27460f9d5dc7SNeel Natu /* Figure out immediate operand size (if any) */
2747d665d229SNeel Natu if (vie->op.op_flags & VIE_OP_F_IMM) {
2748f7a9f178SNeel Natu /*
2749f7a9f178SNeel Natu * Section 2.2.1.5 "Immediates", Intel SDM:
2750f7a9f178SNeel Natu * In 64-bit mode the typical size of immediate operands
2751f7a9f178SNeel Natu * remains 32-bits. When the operand size if 64-bits, the
2752f7a9f178SNeel Natu * processor sign-extends all immediates to 64-bits prior
2753f7a9f178SNeel Natu * to their use.
2754f7a9f178SNeel Natu */
2755f7a9f178SNeel Natu if (vie->opsize == 4 || vie->opsize == 8)
27560f9d5dc7SNeel Natu vie->imm_bytes = 4;
2757f7a9f178SNeel Natu else
2758f7a9f178SNeel Natu vie->imm_bytes = 2;
2759f7a9f178SNeel Natu } else if (vie->op.op_flags & VIE_OP_F_IMM8) {
27600f9d5dc7SNeel Natu vie->imm_bytes = 1;
2761f7a9f178SNeel Natu }
27620f9d5dc7SNeel Natu
2763a2da7af6SNeel Natu if ((n = vie->imm_bytes) == 0)
2764a2da7af6SNeel Natu return (0);
2765a2da7af6SNeel Natu
2766d665d229SNeel Natu KASSERT(n == 1 || n == 2 || n == 4,
2767f7a9f178SNeel Natu ("%s: invalid number of immediate bytes: %d", __func__, n));
2768a2da7af6SNeel Natu
2769a2da7af6SNeel Natu for (i = 0; i < n; i++) {
2770a2da7af6SNeel Natu if (vie_peek(vie, &x))
2771a2da7af6SNeel Natu return (-1);
2772a2da7af6SNeel Natu
2773a2da7af6SNeel Natu u.buf[i] = x;
2774a2da7af6SNeel Natu vie_advance(vie);
2775a2da7af6SNeel Natu }
2776a2da7af6SNeel Natu
2777f7a9f178SNeel Natu /* sign-extend the immediate value before use */
2778ba9b7bf7SNeel Natu if (n == 1)
2779f7a9f178SNeel Natu vie->immediate = u.signed8;
2780f7a9f178SNeel Natu else if (n == 2)
2781f7a9f178SNeel Natu vie->immediate = u.signed16;
2782ba9b7bf7SNeel Natu else
2783d665d229SNeel Natu vie->immediate = u.signed32;
2784a2da7af6SNeel Natu
2785a2da7af6SNeel Natu return (0);
2786a2da7af6SNeel Natu }
2787a2da7af6SNeel Natu
2788d665d229SNeel Natu static int
decode_moffset(struct vie * vie)2789d665d229SNeel Natu decode_moffset(struct vie *vie)
2790d665d229SNeel Natu {
2791d665d229SNeel Natu int i, n;
2792d665d229SNeel Natu uint8_t x;
2793d665d229SNeel Natu union {
2794d665d229SNeel Natu char buf[8];
2795d665d229SNeel Natu uint64_t u64;
2796d665d229SNeel Natu } u;
2797d665d229SNeel Natu
2798d665d229SNeel Natu if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0)
2799d665d229SNeel Natu return (0);
2800d665d229SNeel Natu
2801d665d229SNeel Natu /*
2802d665d229SNeel Natu * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
2803d665d229SNeel Natu * The memory offset size follows the address-size of the instruction.
2804d665d229SNeel Natu */
2805d665d229SNeel Natu n = vie->addrsize;
2806d665d229SNeel Natu KASSERT(n == 2 || n == 4 || n == 8, ("invalid moffset bytes: %d", n));
2807d665d229SNeel Natu
2808d665d229SNeel Natu u.u64 = 0;
2809d665d229SNeel Natu for (i = 0; i < n; i++) {
2810d665d229SNeel Natu if (vie_peek(vie, &x))
2811d665d229SNeel Natu return (-1);
2812d665d229SNeel Natu
2813d665d229SNeel Natu u.buf[i] = x;
2814d665d229SNeel Natu vie_advance(vie);
2815d665d229SNeel Natu }
2816d665d229SNeel Natu vie->displacement = u.u64;
2817a2da7af6SNeel Natu return (0);
2818a2da7af6SNeel Natu }
2819a2da7af6SNeel Natu
2820b645fd45SConrad Meyer #ifdef _KERNEL
2821ba9b7bf7SNeel Natu /*
2822ba9b7bf7SNeel Natu * Verify that the 'guest linear address' provided as collateral of the nested
2823ba9b7bf7SNeel Natu * page table fault matches with our instruction decoding.
2824ba9b7bf7SNeel Natu */
2825ba9b7bf7SNeel Natu static int
verify_gla(struct vcpu * vcpu,uint64_t gla,struct vie * vie,enum vm_cpu_mode cpu_mode)2826d3956e46SJohn Baldwin verify_gla(struct vcpu *vcpu, uint64_t gla, struct vie *vie,
2827ea587cd8STycho Nightingale enum vm_cpu_mode cpu_mode)
2828a2da7af6SNeel Natu {
2829ba9b7bf7SNeel Natu int error;
2830ea587cd8STycho Nightingale uint64_t base, segbase, idx, gla2;
2831ea587cd8STycho Nightingale enum vm_reg_name seg;
2832ea587cd8STycho Nightingale struct seg_desc desc;
2833ba9b7bf7SNeel Natu
283466f71b7dSNeel Natu /* Skip 'gla' verification */
283566f71b7dSNeel Natu if (gla == VIE_INVALID_GLA)
283666f71b7dSNeel Natu return (0);
283766f71b7dSNeel Natu
2838ba9b7bf7SNeel Natu base = 0;
2839ba9b7bf7SNeel Natu if (vie->base_register != VM_REG_LAST) {
2840d3956e46SJohn Baldwin error = vm_get_register(vcpu, vie->base_register, &base);
2841ba9b7bf7SNeel Natu if (error) {
2842ba9b7bf7SNeel Natu printf("verify_gla: error %d getting base reg %d\n",
2843ba9b7bf7SNeel Natu error, vie->base_register);
2844ba9b7bf7SNeel Natu return (-1);
2845ba9b7bf7SNeel Natu }
2846d3c11f40SPeter Grehan
2847d3c11f40SPeter Grehan /*
2848d3c11f40SPeter Grehan * RIP-relative addressing starts from the following
2849d3c11f40SPeter Grehan * instruction
2850d3c11f40SPeter Grehan */
2851d3c11f40SPeter Grehan if (vie->base_register == VM_REG_GUEST_RIP)
2852647c8782SNeel Natu base += vie->num_processed;
2853ba9b7bf7SNeel Natu }
2854ba9b7bf7SNeel Natu
2855ba9b7bf7SNeel Natu idx = 0;
2856ba9b7bf7SNeel Natu if (vie->index_register != VM_REG_LAST) {
2857d3956e46SJohn Baldwin error = vm_get_register(vcpu, vie->index_register, &idx);
2858ba9b7bf7SNeel Natu if (error) {
2859ba9b7bf7SNeel Natu printf("verify_gla: error %d getting index reg %d\n",
2860ba9b7bf7SNeel Natu error, vie->index_register);
2861ba9b7bf7SNeel Natu return (-1);
2862ba9b7bf7SNeel Natu }
2863ba9b7bf7SNeel Natu }
2864ba9b7bf7SNeel Natu
2865ea587cd8STycho Nightingale /*
2866ea587cd8STycho Nightingale * From "Specifying a Segment Selector", Intel SDM, Vol 1
2867ea587cd8STycho Nightingale *
2868ea587cd8STycho Nightingale * In 64-bit mode, segmentation is generally (but not
2869ea587cd8STycho Nightingale * completely) disabled. The exceptions are the FS and GS
2870ea587cd8STycho Nightingale * segments.
2871ea587cd8STycho Nightingale *
2872ea587cd8STycho Nightingale * In legacy IA-32 mode, when the ESP or EBP register is used
2873ea587cd8STycho Nightingale * as the base, the SS segment is the default segment. For
2874ea587cd8STycho Nightingale * other data references, except when relative to stack or
2875ea587cd8STycho Nightingale * string destination the DS segment is the default. These
2876ea587cd8STycho Nightingale * can be overridden to allow other segments to be accessed.
2877ea587cd8STycho Nightingale */
2878ea587cd8STycho Nightingale if (vie->segment_override)
2879ea587cd8STycho Nightingale seg = vie->segment_register;
2880ea587cd8STycho Nightingale else if (vie->base_register == VM_REG_GUEST_RSP ||
2881ea587cd8STycho Nightingale vie->base_register == VM_REG_GUEST_RBP)
2882ea587cd8STycho Nightingale seg = VM_REG_GUEST_SS;
2883ea587cd8STycho Nightingale else
2884ea587cd8STycho Nightingale seg = VM_REG_GUEST_DS;
2885ea587cd8STycho Nightingale if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
2886ea587cd8STycho Nightingale seg != VM_REG_GUEST_GS) {
2887ea587cd8STycho Nightingale segbase = 0;
2888ea587cd8STycho Nightingale } else {
2889d3956e46SJohn Baldwin error = vm_get_seg_desc(vcpu, seg, &desc);
2890ea587cd8STycho Nightingale if (error) {
2891ea587cd8STycho Nightingale printf("verify_gla: error %d getting segment"
2892ea587cd8STycho Nightingale " descriptor %d", error,
2893ea587cd8STycho Nightingale vie->segment_register);
2894ea587cd8STycho Nightingale return (-1);
2895ea587cd8STycho Nightingale }
2896ea587cd8STycho Nightingale segbase = desc.base;
2897ea587cd8STycho Nightingale }
2898ea587cd8STycho Nightingale
2899ea587cd8STycho Nightingale gla2 = segbase + base + vie->scale * idx + vie->displacement;
2900f7a9f178SNeel Natu gla2 &= size2mask[vie->addrsize];
2901f7a9f178SNeel Natu if (gla != gla2) {
2902ea587cd8STycho Nightingale printf("verify_gla mismatch: segbase(0x%0lx)"
2903ba9b7bf7SNeel Natu "base(0x%0lx), scale(%d), index(0x%0lx), "
2904f7a9f178SNeel Natu "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
2905ea587cd8STycho Nightingale segbase, base, vie->scale, idx, vie->displacement,
2906ea587cd8STycho Nightingale gla, gla2);
2907ba9b7bf7SNeel Natu return (-1);
2908ba9b7bf7SNeel Natu }
2909ba9b7bf7SNeel Natu
2910ba9b7bf7SNeel Natu return (0);
2911ba9b7bf7SNeel Natu }
2912b645fd45SConrad Meyer #endif /* _KERNEL */
2913ba9b7bf7SNeel Natu
2914ba9b7bf7SNeel Natu int
2915b645fd45SConrad Meyer #ifdef _KERNEL
vmm_decode_instruction(struct vcpu * vcpu,uint64_t gla,enum vm_cpu_mode cpu_mode,int cs_d,struct vie * vie)2916d3956e46SJohn Baldwin vmm_decode_instruction(struct vcpu *vcpu, uint64_t gla,
2917f7a9f178SNeel Natu enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
2918b645fd45SConrad Meyer #else
2919b645fd45SConrad Meyer vmm_decode_instruction(enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
2920b645fd45SConrad Meyer #endif
2921ba9b7bf7SNeel Natu {
2922ba9b7bf7SNeel Natu
2923f7a9f178SNeel Natu if (decode_prefixes(vie, cpu_mode, cs_d))
2924a2da7af6SNeel Natu return (-1);
2925a2da7af6SNeel Natu
2926a2da7af6SNeel Natu if (decode_opcode(vie))
2927a2da7af6SNeel Natu return (-1);
2928a2da7af6SNeel Natu
292900f3efe1SJohn Baldwin if (decode_modrm(vie, cpu_mode))
2930a2da7af6SNeel Natu return (-1);
2931a2da7af6SNeel Natu
2932ba9b7bf7SNeel Natu if (decode_sib(vie))
2933ba9b7bf7SNeel Natu return (-1);
2934ba9b7bf7SNeel Natu
2935a2da7af6SNeel Natu if (decode_displacement(vie))
2936a2da7af6SNeel Natu return (-1);
2937a2da7af6SNeel Natu
2938a2da7af6SNeel Natu if (decode_immediate(vie))
2939a2da7af6SNeel Natu return (-1);
2940a2da7af6SNeel Natu
2941d665d229SNeel Natu if (decode_moffset(vie))
2942d665d229SNeel Natu return (-1);
2943d665d229SNeel Natu
2944b645fd45SConrad Meyer #ifdef _KERNEL
294575346353SNeel Natu if ((vie->op.op_flags & VIE_OP_F_NO_GLA_VERIFICATION) == 0) {
2946d3956e46SJohn Baldwin if (verify_gla(vcpu, gla, vie, cpu_mode))
2947ba9b7bf7SNeel Natu return (-1);
294875346353SNeel Natu }
2949b645fd45SConrad Meyer #endif
2950ba9b7bf7SNeel Natu
2951ba9b7bf7SNeel Natu vie->decoded = 1; /* success */
2952ba9b7bf7SNeel Natu
2953a2da7af6SNeel Natu return (0);
2954a2da7af6SNeel Natu }
2955