1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 * emulate.c
4 *
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 *
7 * Copyright (c) 2005 Keir Fraser
8 *
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10 * privileged instructions:
11 *
12 * Copyright (C) 2006 Qumranet
13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 *
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
17 *
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
19 */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kvm_host.h>
23 #include "kvm_cache_regs.h"
24 #include "kvm_emulate.h"
25 #include <linux/stringify.h>
26 #include <asm/debugreg.h>
27 #include <asm/nospec-branch.h>
28 #include <asm/ibt.h>
29 #include <asm/text-patching.h>
30
31 #include "x86.h"
32 #include "tss.h"
33 #include "mmu.h"
34 #include "pmu.h"
35
36 /*
37 * Operand types
38 */
39 #define OpNone 0ull
40 #define OpImplicit 1ull /* No generic decode */
41 #define OpReg 2ull /* Register */
42 #define OpMem 3ull /* Memory */
43 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
44 #define OpDI 5ull /* ES:DI/EDI/RDI */
45 #define OpMem64 6ull /* Memory, 64-bit */
46 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
47 #define OpDX 8ull /* DX register */
48 #define OpCL 9ull /* CL register (for shifts) */
49 #define OpImmByte 10ull /* 8-bit sign extended immediate */
50 #define OpOne 11ull /* Implied 1 */
51 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
52 #define OpMem16 13ull /* Memory operand (16-bit). */
53 #define OpMem32 14ull /* Memory operand (32-bit). */
54 #define OpImmU 15ull /* Immediate operand, zero extended */
55 #define OpSI 16ull /* SI/ESI/RSI */
56 #define OpImmFAddr 17ull /* Immediate far address */
57 #define OpMemFAddr 18ull /* Far address in memory */
58 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
59 #define OpES 20ull /* ES */
60 #define OpCS 21ull /* CS */
61 #define OpSS 22ull /* SS */
62 #define OpDS 23ull /* DS */
63 #define OpFS 24ull /* FS */
64 #define OpGS 25ull /* GS */
65 #define OpMem8 26ull /* 8-bit zero extended memory operand */
66 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
67 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
68 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
69 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
70
71 #define OpBits 5 /* Width of operand field */
72 #define OpMask ((1ull << OpBits) - 1)
73
74 /*
75 * Opcode effective-address decode tables.
76 * Note that we only emulate instructions that have at least one memory
77 * operand (excluding implicit stack references). We assume that stack
78 * references and instruction fetches will never occur in special memory
79 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 * not be handled.
81 */
82
83 /* Operand sizes: 8-bit operands or specified/overridden size. */
84 #define ByteOp (1<<0) /* 8-bit operands. */
85 #define DstShift 1 /* Destination operand type at bits 1-5 */
86 #define ImplicitOps (OpImplicit << DstShift)
87 #define DstReg (OpReg << DstShift)
88 #define DstMem (OpMem << DstShift)
89 #define DstAcc (OpAcc << DstShift)
90 #define DstDI (OpDI << DstShift)
91 #define DstMem64 (OpMem64 << DstShift)
92 #define DstMem16 (OpMem16 << DstShift)
93 #define DstImmUByte (OpImmUByte << DstShift)
94 #define DstDX (OpDX << DstShift)
95 #define DstAccLo (OpAccLo << DstShift)
96 #define DstMask (OpMask << DstShift)
97 #define SrcShift 6 /* Source operand type at bits 6-10 */
98 #define SrcNone (OpNone << SrcShift)
99 #define SrcReg (OpReg << SrcShift)
100 #define SrcMem (OpMem << SrcShift)
101 #define SrcMem16 (OpMem16 << SrcShift)
102 #define SrcMem32 (OpMem32 << SrcShift)
103 #define SrcImm (OpImm << SrcShift)
104 #define SrcImmByte (OpImmByte << SrcShift)
105 #define SrcOne (OpOne << SrcShift)
106 #define SrcImmUByte (OpImmUByte << SrcShift)
107 #define SrcImmU (OpImmU << SrcShift)
108 #define SrcSI (OpSI << SrcShift)
109 #define SrcXLat (OpXLat << SrcShift)
110 #define SrcImmFAddr (OpImmFAddr << SrcShift)
111 #define SrcMemFAddr (OpMemFAddr << SrcShift)
112 #define SrcAcc (OpAcc << SrcShift)
113 #define SrcImmU16 (OpImmU16 << SrcShift)
114 #define SrcImm64 (OpImm64 << SrcShift)
115 #define SrcDX (OpDX << SrcShift)
116 #define SrcMem8 (OpMem8 << SrcShift)
117 #define SrcAccHi (OpAccHi << SrcShift)
118 #define SrcMask (OpMask << SrcShift)
119 #define BitOp (1<<11)
120 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
121 #define String (1<<13) /* String instruction (rep capable) */
122 #define Stack (1<<14) /* Stack instruction (push/pop) */
123 #define GroupMask (7<<15) /* Group mechanisms, at bits 15-17 */
124 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
125 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
126 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
127 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
128 #define Escape (5<<15) /* Escape to coprocessor instruction */
129 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
130 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
131 #define Sse (1<<18) /* SSE Vector instruction */
132 #define ModRM (1<<19) /* Generic ModRM decode. */
133 #define Mov (1<<20) /* Destination is only written; never read. */
134 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
135 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
136 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
137 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
138 #define Undefined (1<<25) /* No Such Instruction */
139 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
140 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
141 #define No64 (1<<28) /* Instruction generates #UD in 64-bit mode */
142 #define PageTable (1 << 29) /* instruction used to write page table */
143 #define NotImpl (1 << 30) /* instruction is not implemented */
144 #define Avx ((u64)1 << 31) /* Instruction uses VEX prefix */
145 #define Src2Shift (32) /* Source 2 operand type at bits 32-36 */
146 #define Src2None (OpNone << Src2Shift)
147 #define Src2Mem (OpMem << Src2Shift)
148 #define Src2CL (OpCL << Src2Shift)
149 #define Src2ImmByte (OpImmByte << Src2Shift)
150 #define Src2One (OpOne << Src2Shift)
151 #define Src2Imm (OpImm << Src2Shift)
152 #define Src2ES (OpES << Src2Shift)
153 #define Src2CS (OpCS << Src2Shift)
154 #define Src2SS (OpSS << Src2Shift)
155 #define Src2DS (OpDS << Src2Shift)
156 #define Src2FS (OpFS << Src2Shift)
157 #define Src2GS (OpGS << Src2Shift)
158 #define Src2Mask (OpMask << Src2Shift)
159 /* free: 37-39 */
160 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
161 #define AlignMask ((u64)3 << 41) /* Memory alignment requirement at bits 41-42 */
162 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
163 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
164 #define Aligned16 ((u64)3 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
165 /* free: 43-44 */
166 #define NoWrite ((u64)1 << 45) /* No writeback */
167 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
168 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
169 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
170 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
171 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
172 #define NearBranch ((u64)1 << 52) /* Near branches */
173 #define No16 ((u64)1 << 53) /* No 16 bit operand */
174 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
175 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
176 #define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
177 #define ShadowStack ((u64)1 << 57) /* Instruction affects Shadow Stacks. */
178
179 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
180
181 #define X2(x...) x, x
182 #define X3(x...) X2(x), x
183 #define X4(x...) X2(x), X2(x)
184 #define X5(x...) X4(x), x
185 #define X6(x...) X4(x), X2(x)
186 #define X7(x...) X4(x), X3(x)
187 #define X8(x...) X4(x), X4(x)
188 #define X16(x...) X8(x), X8(x)
189
190 struct opcode {
191 u64 flags;
192 u8 intercept;
193 u8 pad[7];
194 union {
195 int (*execute)(struct x86_emulate_ctxt *ctxt);
196 const struct opcode *group;
197 const struct group_dual *gdual;
198 const struct gprefix *gprefix;
199 const struct escape *esc;
200 const struct instr_dual *idual;
201 const struct mode_dual *mdual;
202 } u;
203 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
204 };
205
206 struct group_dual {
207 struct opcode mod012[8];
208 struct opcode mod3[8];
209 };
210
211 struct gprefix {
212 struct opcode pfx_no;
213 struct opcode pfx_66;
214 struct opcode pfx_f2;
215 struct opcode pfx_f3;
216 };
217
218 struct escape {
219 struct opcode op[8];
220 struct opcode high[64];
221 };
222
223 struct instr_dual {
224 struct opcode mod012;
225 struct opcode mod3;
226 };
227
228 struct mode_dual {
229 struct opcode mode32;
230 struct opcode mode64;
231 };
232
233 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
234
235 enum x86_transfer_type {
236 X86_TRANSFER_NONE,
237 X86_TRANSFER_CALL_JMP,
238 X86_TRANSFER_RET,
239 X86_TRANSFER_TASK_SWITCH,
240 };
241
242 enum rex_bits {
243 REX_B = 1,
244 REX_X = 2,
245 REX_R = 4,
246 REX_W = 8,
247 };
248
writeback_registers(struct x86_emulate_ctxt * ctxt)249 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
250 {
251 unsigned long dirty = ctxt->regs_dirty;
252 unsigned reg;
253
254 for_each_set_bit(reg, &dirty, NR_EMULATOR_GPRS)
255 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
256 }
257
invalidate_registers(struct x86_emulate_ctxt * ctxt)258 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
259 {
260 ctxt->regs_dirty = 0;
261 ctxt->regs_valid = 0;
262 }
263
264 /*
265 * These EFLAGS bits are restored from saved value during emulation, and
266 * any changes are written back to the saved value after emulation.
267 */
268 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
269 X86_EFLAGS_PF|X86_EFLAGS_CF)
270
271 #ifdef CONFIG_X86_64
272 #define ON64(x...) x
273 #else
274 #define ON64(x...)
275 #endif
276
277 #define EM_ASM_START(op) \
278 static int em_##op(struct x86_emulate_ctxt *ctxt) \
279 { \
280 unsigned long flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; \
281 int bytes = 1, ok = 1; \
282 if (!(ctxt->d & ByteOp)) \
283 bytes = ctxt->dst.bytes; \
284 switch (bytes) {
285
286 #define __EM_ASM(str) \
287 asm("push %[flags]; popf \n\t" \
288 "10: " str \
289 "pushf; pop %[flags] \n\t" \
290 "11: \n\t" \
291 : "+a" (ctxt->dst.val), \
292 "+d" (ctxt->src.val), \
293 [flags] "+D" (flags), \
294 "+S" (ok) \
295 : "c" (ctxt->src2.val))
296
297 #define __EM_ASM_1(op, dst) \
298 __EM_ASM(#op " %%" #dst " \n\t")
299
300 #define __EM_ASM_1_EX(op, dst) \
301 __EM_ASM(#op " %%" #dst " \n\t" \
302 _ASM_EXTABLE_TYPE_REG(10b, 11f, EX_TYPE_ZERO_REG, %%esi))
303
304 #define __EM_ASM_2(op, dst, src) \
305 __EM_ASM(#op " %%" #src ", %%" #dst " \n\t")
306
307 #define __EM_ASM_3(op, dst, src, src2) \
308 __EM_ASM(#op " %%" #src2 ", %%" #src ", %%" #dst " \n\t")
309
310 #define EM_ASM_END \
311 } \
312 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); \
313 return !ok ? emulate_de(ctxt) : X86EMUL_CONTINUE; \
314 }
315
316 /* 1-operand, using "a" (dst) */
317 #define EM_ASM_1(op) \
318 EM_ASM_START(op) \
319 case 1: __EM_ASM_1(op##b, al); break; \
320 case 2: __EM_ASM_1(op##w, ax); break; \
321 case 4: __EM_ASM_1(op##l, eax); break; \
322 ON64(case 8: __EM_ASM_1(op##q, rax); break;) \
323 EM_ASM_END
324
325 /* 1-operand, using "c" (src2) */
326 #define EM_ASM_1SRC2(op, name) \
327 EM_ASM_START(name) \
328 case 1: __EM_ASM_1(op##b, cl); break; \
329 case 2: __EM_ASM_1(op##w, cx); break; \
330 case 4: __EM_ASM_1(op##l, ecx); break; \
331 ON64(case 8: __EM_ASM_1(op##q, rcx); break;) \
332 EM_ASM_END
333
334 /* 1-operand, using "c" (src2) with exception */
335 #define EM_ASM_1SRC2EX(op, name) \
336 EM_ASM_START(name) \
337 case 1: __EM_ASM_1_EX(op##b, cl); break; \
338 case 2: __EM_ASM_1_EX(op##w, cx); break; \
339 case 4: __EM_ASM_1_EX(op##l, ecx); break; \
340 ON64(case 8: __EM_ASM_1_EX(op##q, rcx); break;) \
341 EM_ASM_END
342
343 /* 2-operand, using "a" (dst), "d" (src) */
344 #define EM_ASM_2(op) \
345 EM_ASM_START(op) \
346 case 1: __EM_ASM_2(op##b, al, dl); break; \
347 case 2: __EM_ASM_2(op##w, ax, dx); break; \
348 case 4: __EM_ASM_2(op##l, eax, edx); break; \
349 ON64(case 8: __EM_ASM_2(op##q, rax, rdx); break;) \
350 EM_ASM_END
351
352 /* 2-operand, reversed */
353 #define EM_ASM_2R(op, name) \
354 EM_ASM_START(name) \
355 case 1: __EM_ASM_2(op##b, dl, al); break; \
356 case 2: __EM_ASM_2(op##w, dx, ax); break; \
357 case 4: __EM_ASM_2(op##l, edx, eax); break; \
358 ON64(case 8: __EM_ASM_2(op##q, rdx, rax); break;) \
359 EM_ASM_END
360
361 /* 2-operand, word only (no byte op) */
362 #define EM_ASM_2W(op) \
363 EM_ASM_START(op) \
364 case 1: break; \
365 case 2: __EM_ASM_2(op##w, ax, dx); break; \
366 case 4: __EM_ASM_2(op##l, eax, edx); break; \
367 ON64(case 8: __EM_ASM_2(op##q, rax, rdx); break;) \
368 EM_ASM_END
369
370 /* 2-operand, using "a" (dst) and CL (src2) */
371 #define EM_ASM_2CL(op) \
372 EM_ASM_START(op) \
373 case 1: __EM_ASM_2(op##b, al, cl); break; \
374 case 2: __EM_ASM_2(op##w, ax, cl); break; \
375 case 4: __EM_ASM_2(op##l, eax, cl); break; \
376 ON64(case 8: __EM_ASM_2(op##q, rax, cl); break;) \
377 EM_ASM_END
378
379 /* 3-operand, using "a" (dst), "d" (src) and CL (src2) */
380 #define EM_ASM_3WCL(op) \
381 EM_ASM_START(op) \
382 case 1: break; \
383 case 2: __EM_ASM_3(op##w, ax, dx, cl); break; \
384 case 4: __EM_ASM_3(op##l, eax, edx, cl); break; \
385 ON64(case 8: __EM_ASM_3(op##q, rax, rdx, cl); break;) \
386 EM_ASM_END
387
em_salc(struct x86_emulate_ctxt * ctxt)388 static int em_salc(struct x86_emulate_ctxt *ctxt)
389 {
390 /*
391 * Set AL 0xFF if CF is set, or 0x00 when clear.
392 */
393 ctxt->dst.val = 0xFF * !!(ctxt->eflags & X86_EFLAGS_CF);
394 return X86EMUL_CONTINUE;
395 }
396
397 /*
398 * XXX: inoutclob user must know where the argument is being expanded.
399 * Using asm goto would allow us to remove _fault.
400 */
401 #define asm_safe(insn, inoutclob...) \
402 ({ \
403 int _fault = 0; \
404 \
405 asm volatile("1:" insn "\n" \
406 "2:\n" \
407 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
408 : [_fault] "+r"(_fault) inoutclob ); \
409 \
410 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
411 })
412
emulator_check_intercept(struct x86_emulate_ctxt * ctxt,enum x86_intercept intercept,enum x86_intercept_stage stage)413 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
414 enum x86_intercept intercept,
415 enum x86_intercept_stage stage)
416 {
417 struct x86_instruction_info info = {
418 .intercept = intercept,
419 .rep_prefix = ctxt->rep_prefix,
420 .modrm_mod = ctxt->modrm_mod,
421 .modrm_reg = ctxt->modrm_reg,
422 .modrm_rm = ctxt->modrm_rm,
423 .src_val = ctxt->src.val64,
424 .dst_val = ctxt->dst.val64,
425 .src_bytes = ctxt->src.bytes,
426 .dst_bytes = ctxt->dst.bytes,
427 .src_type = ctxt->src.type,
428 .dst_type = ctxt->dst.type,
429 .ad_bytes = ctxt->ad_bytes,
430 .rip = ctxt->eip,
431 .next_rip = ctxt->_eip,
432 };
433
434 return ctxt->ops->intercept(ctxt, &info, stage);
435 }
436
assign_masked(ulong * dest,ulong src,ulong mask)437 static void assign_masked(ulong *dest, ulong src, ulong mask)
438 {
439 *dest = (*dest & ~mask) | (src & mask);
440 }
441
assign_register(unsigned long * reg,u64 val,int bytes)442 static void assign_register(unsigned long *reg, u64 val, int bytes)
443 {
444 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
445 switch (bytes) {
446 case 1:
447 *(u8 *)reg = (u8)val;
448 break;
449 case 2:
450 *(u16 *)reg = (u16)val;
451 break;
452 case 4:
453 *reg = (u32)val;
454 break; /* 64b: zero-extend */
455 case 8:
456 *reg = val;
457 break;
458 }
459 }
460
ad_mask(struct x86_emulate_ctxt * ctxt)461 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
462 {
463 return (1UL << (ctxt->ad_bytes << 3)) - 1;
464 }
465
stack_mask(struct x86_emulate_ctxt * ctxt)466 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
467 {
468 u16 sel;
469 struct desc_struct ss;
470
471 if (ctxt->mode == X86EMUL_MODE_PROT64)
472 return ~0UL;
473 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
474 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
475 }
476
stack_size(struct x86_emulate_ctxt * ctxt)477 static int stack_size(struct x86_emulate_ctxt *ctxt)
478 {
479 return (__fls(stack_mask(ctxt)) + 1) >> 3;
480 }
481
482 /* Access/update address held in a register, based on addressing mode. */
483 static inline unsigned long
address_mask(struct x86_emulate_ctxt * ctxt,unsigned long reg)484 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
485 {
486 if (ctxt->ad_bytes == sizeof(unsigned long))
487 return reg;
488 else
489 return reg & ad_mask(ctxt);
490 }
491
492 static inline unsigned long
register_address(struct x86_emulate_ctxt * ctxt,int reg)493 register_address(struct x86_emulate_ctxt *ctxt, int reg)
494 {
495 return address_mask(ctxt, reg_read(ctxt, reg));
496 }
497
masked_increment(ulong * reg,ulong mask,int inc)498 static void masked_increment(ulong *reg, ulong mask, int inc)
499 {
500 assign_masked(reg, *reg + inc, mask);
501 }
502
503 static inline void
register_address_increment(struct x86_emulate_ctxt * ctxt,int reg,int inc)504 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
505 {
506 ulong *preg = reg_rmw(ctxt, reg);
507
508 assign_register(preg, *preg + inc, ctxt->ad_bytes);
509 }
510
rsp_increment(struct x86_emulate_ctxt * ctxt,int inc)511 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
512 {
513 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
514 }
515
desc_limit_scaled(struct desc_struct * desc)516 static u32 desc_limit_scaled(struct desc_struct *desc)
517 {
518 u32 limit = get_desc_limit(desc);
519
520 return desc->g ? (limit << 12) | 0xfff : limit;
521 }
522
seg_base(struct x86_emulate_ctxt * ctxt,int seg)523 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
524 {
525 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
526 return 0;
527
528 return ctxt->ops->get_cached_segment_base(ctxt, seg);
529 }
530
emulate_exception(struct x86_emulate_ctxt * ctxt,int vec,u32 error,bool valid)531 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
532 u32 error, bool valid)
533 {
534 if (KVM_EMULATOR_BUG_ON(vec > 0x1f, ctxt))
535 return X86EMUL_UNHANDLEABLE;
536
537 ctxt->exception.vector = vec;
538 ctxt->exception.error_code = error;
539 ctxt->exception.error_code_valid = valid;
540 return X86EMUL_PROPAGATE_FAULT;
541 }
542
emulate_db(struct x86_emulate_ctxt * ctxt)543 static int emulate_db(struct x86_emulate_ctxt *ctxt)
544 {
545 return emulate_exception(ctxt, DB_VECTOR, 0, false);
546 }
547
emulate_gp(struct x86_emulate_ctxt * ctxt,int err)548 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
549 {
550 return emulate_exception(ctxt, GP_VECTOR, err, true);
551 }
552
emulate_ss(struct x86_emulate_ctxt * ctxt,int err)553 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
554 {
555 return emulate_exception(ctxt, SS_VECTOR, err, true);
556 }
557
emulate_ud(struct x86_emulate_ctxt * ctxt)558 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
559 {
560 return emulate_exception(ctxt, UD_VECTOR, 0, false);
561 }
562
emulate_ts(struct x86_emulate_ctxt * ctxt,int err)563 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
564 {
565 return emulate_exception(ctxt, TS_VECTOR, err, true);
566 }
567
emulate_de(struct x86_emulate_ctxt * ctxt)568 static int emulate_de(struct x86_emulate_ctxt *ctxt)
569 {
570 return emulate_exception(ctxt, DE_VECTOR, 0, false);
571 }
572
emulate_nm(struct x86_emulate_ctxt * ctxt)573 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
574 {
575 return emulate_exception(ctxt, NM_VECTOR, 0, false);
576 }
577
get_segment_selector(struct x86_emulate_ctxt * ctxt,unsigned seg)578 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
579 {
580 u16 selector;
581 struct desc_struct desc;
582
583 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
584 return selector;
585 }
586
set_segment_selector(struct x86_emulate_ctxt * ctxt,u16 selector,unsigned seg)587 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
588 unsigned seg)
589 {
590 u16 dummy;
591 u32 base3;
592 struct desc_struct desc;
593
594 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
595 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
596 }
597
ctxt_virt_addr_bits(struct x86_emulate_ctxt * ctxt)598 static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
599 {
600 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
601 }
602
emul_is_noncanonical_address(u64 la,struct x86_emulate_ctxt * ctxt,unsigned int flags)603 static inline bool emul_is_noncanonical_address(u64 la,
604 struct x86_emulate_ctxt *ctxt,
605 unsigned int flags)
606 {
607 return !ctxt->ops->is_canonical_addr(ctxt, la, flags);
608 }
609
610 /*
611 * x86 defines three classes of vector instructions: explicitly
612 * aligned, explicitly unaligned, and the rest, which change behaviour
613 * depending on whether they're AVX encoded or not.
614 *
615 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
616 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
617 * 512 bytes of data must be aligned to a 16 byte boundary.
618 */
insn_alignment(struct x86_emulate_ctxt * ctxt,unsigned size)619 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
620 {
621 u64 alignment = ctxt->d & AlignMask;
622
623 if (likely(size < 16))
624 return 1;
625
626 switch (alignment) {
627 case Unaligned:
628 return 1;
629 case Aligned16:
630 return 16;
631 case Aligned:
632 default:
633 return size;
634 }
635 }
636
__linearize(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,unsigned * max_size,unsigned size,enum x86emul_mode mode,ulong * linear,unsigned int flags)637 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
638 struct segmented_address addr,
639 unsigned *max_size, unsigned size,
640 enum x86emul_mode mode, ulong *linear,
641 unsigned int flags)
642 {
643 struct desc_struct desc;
644 bool usable;
645 ulong la;
646 u32 lim;
647 u16 sel;
648 u8 va_bits;
649
650 la = seg_base(ctxt, addr.seg) + addr.ea;
651 *max_size = 0;
652 switch (mode) {
653 case X86EMUL_MODE_PROT64:
654 *linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags);
655 va_bits = ctxt_virt_addr_bits(ctxt);
656 if (!__is_canonical_address(la, va_bits))
657 goto bad;
658
659 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
660 if (size > *max_size)
661 goto bad;
662 break;
663 default:
664 *linear = la = (u32)la;
665 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
666 addr.seg);
667 if (!usable)
668 goto bad;
669 /* code segment in protected mode or read-only data segment */
670 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) &&
671 (flags & X86EMUL_F_WRITE))
672 goto bad;
673 /* unreadable code segment */
674 if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2))
675 goto bad;
676 lim = desc_limit_scaled(&desc);
677 if (!(desc.type & 8) && (desc.type & 4)) {
678 /* expand-down segment */
679 if (addr.ea <= lim)
680 goto bad;
681 lim = desc.d ? 0xffffffff : 0xffff;
682 }
683 if (addr.ea > lim)
684 goto bad;
685 if (lim == 0xffffffff)
686 *max_size = ~0u;
687 else {
688 *max_size = (u64)lim + 1 - addr.ea;
689 if (size > *max_size)
690 goto bad;
691 }
692 break;
693 }
694 if (la & (insn_alignment(ctxt, size) - 1))
695 return emulate_gp(ctxt, 0);
696 return X86EMUL_CONTINUE;
697 bad:
698 if (addr.seg == VCPU_SREG_SS)
699 return emulate_ss(ctxt, 0);
700 else
701 return emulate_gp(ctxt, 0);
702 }
703
linearize(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,unsigned size,bool write,ulong * linear)704 static int linearize(struct x86_emulate_ctxt *ctxt,
705 struct segmented_address addr,
706 unsigned size, bool write,
707 ulong *linear)
708 {
709 unsigned max_size;
710 return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear,
711 write ? X86EMUL_F_WRITE : 0);
712 }
713
assign_eip(struct x86_emulate_ctxt * ctxt,ulong dst)714 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
715 {
716 ulong linear;
717 int rc;
718 unsigned max_size;
719 struct segmented_address addr = { .seg = VCPU_SREG_CS,
720 .ea = dst };
721
722 if (ctxt->op_bytes != sizeof(unsigned long))
723 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
724 rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
725 X86EMUL_F_FETCH);
726 if (rc == X86EMUL_CONTINUE)
727 ctxt->_eip = addr.ea;
728 return rc;
729 }
730
emulator_recalc_and_set_mode(struct x86_emulate_ctxt * ctxt)731 static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
732 {
733 u64 efer;
734 struct desc_struct cs;
735 u16 selector;
736 u32 base3;
737
738 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
739
740 if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
741 /* Real mode. cpu must not have long mode active */
742 if (efer & EFER_LMA)
743 return X86EMUL_UNHANDLEABLE;
744 ctxt->mode = X86EMUL_MODE_REAL;
745 return X86EMUL_CONTINUE;
746 }
747
748 if (ctxt->eflags & X86_EFLAGS_VM) {
749 /* Protected/VM86 mode. cpu must not have long mode active */
750 if (efer & EFER_LMA)
751 return X86EMUL_UNHANDLEABLE;
752 ctxt->mode = X86EMUL_MODE_VM86;
753 return X86EMUL_CONTINUE;
754 }
755
756 if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
757 return X86EMUL_UNHANDLEABLE;
758
759 if (efer & EFER_LMA) {
760 if (cs.l) {
761 /* Proper long mode */
762 ctxt->mode = X86EMUL_MODE_PROT64;
763 } else if (cs.d) {
764 /* 32 bit compatibility mode*/
765 ctxt->mode = X86EMUL_MODE_PROT32;
766 } else {
767 ctxt->mode = X86EMUL_MODE_PROT16;
768 }
769 } else {
770 /* Legacy 32 bit / 16 bit mode */
771 ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
772 }
773
774 return X86EMUL_CONTINUE;
775 }
776
assign_eip_near(struct x86_emulate_ctxt * ctxt,ulong dst)777 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
778 {
779 return assign_eip(ctxt, dst);
780 }
781
assign_eip_far(struct x86_emulate_ctxt * ctxt,ulong dst)782 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
783 {
784 int rc = emulator_recalc_and_set_mode(ctxt);
785
786 if (rc != X86EMUL_CONTINUE)
787 return rc;
788
789 return assign_eip(ctxt, dst);
790 }
791
jmp_rel(struct x86_emulate_ctxt * ctxt,int rel)792 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
793 {
794 return assign_eip_near(ctxt, ctxt->_eip + rel);
795 }
796
linear_read_system(struct x86_emulate_ctxt * ctxt,ulong linear,void * data,unsigned size)797 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
798 void *data, unsigned size)
799 {
800 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
801 }
802
linear_write_system(struct x86_emulate_ctxt * ctxt,ulong linear,void * data,unsigned int size)803 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
804 ulong linear, void *data,
805 unsigned int size)
806 {
807 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
808 }
809
segmented_read_std(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,void * data,unsigned size)810 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
811 struct segmented_address addr,
812 void *data,
813 unsigned size)
814 {
815 int rc;
816 ulong linear;
817
818 rc = linearize(ctxt, addr, size, false, &linear);
819 if (rc != X86EMUL_CONTINUE)
820 return rc;
821 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
822 }
823
segmented_write_std(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,void * data,unsigned int size)824 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
825 struct segmented_address addr,
826 void *data,
827 unsigned int size)
828 {
829 int rc;
830 ulong linear;
831
832 rc = linearize(ctxt, addr, size, true, &linear);
833 if (rc != X86EMUL_CONTINUE)
834 return rc;
835 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
836 }
837
838 /*
839 * Prefetch the remaining bytes of the instruction without crossing page
840 * boundary if they are not in fetch_cache yet.
841 */
__do_insn_fetch_bytes(struct x86_emulate_ctxt * ctxt,int op_size)842 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
843 {
844 int rc;
845 unsigned size, max_size;
846 unsigned long linear;
847 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
848 struct segmented_address addr = { .seg = VCPU_SREG_CS,
849 .ea = ctxt->eip + cur_size };
850
851 /*
852 * We do not know exactly how many bytes will be needed, and
853 * __linearize is expensive, so fetch as much as possible. We
854 * just have to avoid going beyond the 15 byte limit, the end
855 * of the segment, or the end of the page.
856 *
857 * __linearize is called with size 0 so that it does not do any
858 * boundary check itself. Instead, we use max_size to check
859 * against op_size.
860 */
861 rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
862 X86EMUL_F_FETCH);
863 if (unlikely(rc != X86EMUL_CONTINUE))
864 return rc;
865
866 size = min_t(unsigned, 15UL ^ cur_size, max_size);
867 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
868
869 /*
870 * One instruction can only straddle two pages,
871 * and one has been loaded at the beginning of
872 * x86_decode_insn. So, if not enough bytes
873 * still, we must have hit the 15-byte boundary.
874 */
875 if (unlikely(size < op_size))
876 return emulate_gp(ctxt, 0);
877
878 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
879 size, &ctxt->exception);
880 if (unlikely(rc != X86EMUL_CONTINUE))
881 return rc;
882 ctxt->fetch.end += size;
883 return X86EMUL_CONTINUE;
884 }
885
do_insn_fetch_bytes(struct x86_emulate_ctxt * ctxt,unsigned size)886 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
887 unsigned size)
888 {
889 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
890
891 if (unlikely(done_size < size))
892 return __do_insn_fetch_bytes(ctxt, size - done_size);
893 else
894 return X86EMUL_CONTINUE;
895 }
896
897 /* Fetch next part of the instruction being emulated. */
898 #define insn_fetch(_type, _ctxt) \
899 ({ _type _x; \
900 \
901 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
902 if (rc != X86EMUL_CONTINUE) \
903 goto done; \
904 ctxt->_eip += sizeof(_type); \
905 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
906 ctxt->fetch.ptr += sizeof(_type); \
907 _x; \
908 })
909
910 #define insn_fetch_arr(_arr, _size, _ctxt) \
911 ({ \
912 rc = do_insn_fetch_bytes(_ctxt, _size); \
913 if (rc != X86EMUL_CONTINUE) \
914 goto done; \
915 ctxt->_eip += (_size); \
916 memcpy(_arr, ctxt->fetch.ptr, _size); \
917 ctxt->fetch.ptr += (_size); \
918 })
919
920 /*
921 * Given the 'reg' portion of a ModRM byte, and a register block, return a
922 * pointer into the block that addresses the relevant register.
923 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
924 */
decode_register(struct x86_emulate_ctxt * ctxt,u8 modrm_reg,int byteop)925 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
926 int byteop)
927 {
928 void *p;
929 int highbyte_regs = (ctxt->rex_prefix == REX_NONE) && byteop;
930
931 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
932 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
933 else
934 p = reg_rmw(ctxt, modrm_reg);
935 return p;
936 }
937
read_descriptor(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,u16 * size,unsigned long * address,int op_bytes)938 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
939 struct segmented_address addr,
940 u16 *size, unsigned long *address, int op_bytes)
941 {
942 int rc;
943
944 if (op_bytes == 2)
945 op_bytes = 3;
946 *address = 0;
947 rc = segmented_read_std(ctxt, addr, size, 2);
948 if (rc != X86EMUL_CONTINUE)
949 return rc;
950 addr.ea += 2;
951 rc = segmented_read_std(ctxt, addr, address, op_bytes);
952 return rc;
953 }
954
955 EM_ASM_2(add);
956 EM_ASM_2(or);
957 EM_ASM_2(adc);
958 EM_ASM_2(sbb);
959 EM_ASM_2(and);
960 EM_ASM_2(sub);
961 EM_ASM_2(xor);
962 EM_ASM_2(cmp);
963 EM_ASM_2(test);
964 EM_ASM_2(xadd);
965
966 EM_ASM_1SRC2(mul, mul_ex);
967 EM_ASM_1SRC2(imul, imul_ex);
968 EM_ASM_1SRC2EX(div, div_ex);
969 EM_ASM_1SRC2EX(idiv, idiv_ex);
970
971 EM_ASM_3WCL(shld);
972 EM_ASM_3WCL(shrd);
973
974 EM_ASM_2W(imul);
975
976 EM_ASM_1(not);
977 EM_ASM_1(neg);
978 EM_ASM_1(inc);
979 EM_ASM_1(dec);
980
981 EM_ASM_2CL(rol);
982 EM_ASM_2CL(ror);
983 EM_ASM_2CL(rcl);
984 EM_ASM_2CL(rcr);
985 EM_ASM_2CL(shl);
986 EM_ASM_2CL(shr);
987 EM_ASM_2CL(sar);
988
989 EM_ASM_2W(bsf);
990 EM_ASM_2W(bsr);
991 EM_ASM_2W(bt);
992 EM_ASM_2W(bts);
993 EM_ASM_2W(btr);
994 EM_ASM_2W(btc);
995
996 EM_ASM_2R(cmp, cmp_r);
997
em_bsf_c(struct x86_emulate_ctxt * ctxt)998 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
999 {
1000 /* If src is zero, do not writeback, but update flags */
1001 if (ctxt->src.val == 0)
1002 ctxt->dst.type = OP_NONE;
1003 return em_bsf(ctxt);
1004 }
1005
em_bsr_c(struct x86_emulate_ctxt * ctxt)1006 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1007 {
1008 /* If src is zero, do not writeback, but update flags */
1009 if (ctxt->src.val == 0)
1010 ctxt->dst.type = OP_NONE;
1011 return em_bsr(ctxt);
1012 }
1013
test_cc(unsigned int condition,unsigned long flags)1014 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1015 {
1016 return __emulate_cc(flags, condition & 0xf);
1017 }
1018
fetch_register_operand(struct operand * op)1019 static void fetch_register_operand(struct operand *op)
1020 {
1021 switch (op->bytes) {
1022 case 1:
1023 op->val = *(u8 *)op->addr.reg;
1024 break;
1025 case 2:
1026 op->val = *(u16 *)op->addr.reg;
1027 break;
1028 case 4:
1029 op->val = *(u32 *)op->addr.reg;
1030 break;
1031 case 8:
1032 op->val = *(u64 *)op->addr.reg;
1033 break;
1034 }
1035 op->orig_val = op->val;
1036 }
1037
em_fninit(struct x86_emulate_ctxt * ctxt)1038 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1039 {
1040 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1041 return emulate_nm(ctxt);
1042
1043 kvm_fpu_get();
1044 asm volatile("fninit");
1045 kvm_fpu_put();
1046 return X86EMUL_CONTINUE;
1047 }
1048
em_fnstcw(struct x86_emulate_ctxt * ctxt)1049 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1050 {
1051 u16 fcw;
1052
1053 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1054 return emulate_nm(ctxt);
1055
1056 kvm_fpu_get();
1057 asm volatile("fnstcw %0": "+m"(fcw));
1058 kvm_fpu_put();
1059
1060 ctxt->dst.val = fcw;
1061
1062 return X86EMUL_CONTINUE;
1063 }
1064
em_fnstsw(struct x86_emulate_ctxt * ctxt)1065 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1066 {
1067 u16 fsw;
1068
1069 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1070 return emulate_nm(ctxt);
1071
1072 kvm_fpu_get();
1073 asm volatile("fnstsw %0": "+m"(fsw));
1074 kvm_fpu_put();
1075
1076 ctxt->dst.val = fsw;
1077
1078 return X86EMUL_CONTINUE;
1079 }
1080
__decode_register_operand(struct x86_emulate_ctxt * ctxt,struct operand * op,int reg)1081 static void __decode_register_operand(struct x86_emulate_ctxt *ctxt,
1082 struct operand *op, int reg)
1083 {
1084 if ((ctxt->d & Avx) && ctxt->op_bytes == 32) {
1085 op->type = OP_YMM;
1086 op->bytes = 32;
1087 op->addr.xmm = reg;
1088 kvm_read_avx_reg(reg, &op->vec_val2);
1089 return;
1090 }
1091 if (ctxt->d & (Avx|Sse)) {
1092 op->type = OP_XMM;
1093 op->bytes = 16;
1094 op->addr.xmm = reg;
1095 kvm_read_sse_reg(reg, &op->vec_val);
1096 return;
1097 }
1098 if (ctxt->d & Mmx) {
1099 reg &= 7;
1100 op->type = OP_MM;
1101 op->bytes = 8;
1102 op->addr.mm = reg;
1103 return;
1104 }
1105
1106 op->type = OP_REG;
1107 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1108 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1109 fetch_register_operand(op);
1110 }
1111
decode_register_operand(struct x86_emulate_ctxt * ctxt,struct operand * op)1112 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1113 struct operand *op)
1114 {
1115 unsigned int reg;
1116
1117 if (ctxt->d & ModRM)
1118 reg = ctxt->modrm_reg;
1119 else
1120 reg = (ctxt->b & 7) | (ctxt->rex_bits & REX_B ? 8 : 0);
1121
1122 __decode_register_operand(ctxt, op, reg);
1123 }
1124
adjust_modrm_seg(struct x86_emulate_ctxt * ctxt,int base_reg)1125 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1126 {
1127 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1128 ctxt->modrm_seg = VCPU_SREG_SS;
1129 }
1130
decode_modrm(struct x86_emulate_ctxt * ctxt,struct operand * op)1131 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1132 struct operand *op)
1133 {
1134 u8 sib;
1135 int index_reg, base_reg, scale;
1136 int rc = X86EMUL_CONTINUE;
1137 ulong modrm_ea = 0;
1138
1139 ctxt->modrm_reg = (ctxt->rex_bits & REX_R ? 8 : 0);
1140 index_reg = (ctxt->rex_bits & REX_X ? 8 : 0);
1141 base_reg = (ctxt->rex_bits & REX_B ? 8 : 0);
1142
1143 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1144 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1145 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1146 ctxt->modrm_seg = VCPU_SREG_DS;
1147
1148 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1149 __decode_register_operand(ctxt, op, ctxt->modrm_rm);
1150 return rc;
1151 }
1152
1153 op->type = OP_MEM;
1154
1155 if (ctxt->ad_bytes == 2) {
1156 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1157 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1158 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1159 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1160
1161 /* 16-bit ModR/M decode. */
1162 switch (ctxt->modrm_mod) {
1163 case 0:
1164 if (ctxt->modrm_rm == 6)
1165 modrm_ea += insn_fetch(u16, ctxt);
1166 break;
1167 case 1:
1168 modrm_ea += insn_fetch(s8, ctxt);
1169 break;
1170 case 2:
1171 modrm_ea += insn_fetch(u16, ctxt);
1172 break;
1173 }
1174 switch (ctxt->modrm_rm) {
1175 case 0:
1176 modrm_ea += bx + si;
1177 break;
1178 case 1:
1179 modrm_ea += bx + di;
1180 break;
1181 case 2:
1182 modrm_ea += bp + si;
1183 break;
1184 case 3:
1185 modrm_ea += bp + di;
1186 break;
1187 case 4:
1188 modrm_ea += si;
1189 break;
1190 case 5:
1191 modrm_ea += di;
1192 break;
1193 case 6:
1194 if (ctxt->modrm_mod != 0)
1195 modrm_ea += bp;
1196 break;
1197 case 7:
1198 modrm_ea += bx;
1199 break;
1200 }
1201 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1202 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1203 ctxt->modrm_seg = VCPU_SREG_SS;
1204 modrm_ea = (u16)modrm_ea;
1205 } else {
1206 /* 32/64-bit ModR/M decode. */
1207 if ((ctxt->modrm_rm & 7) == 4) {
1208 sib = insn_fetch(u8, ctxt);
1209 index_reg |= (sib >> 3) & 7;
1210 base_reg |= sib & 7;
1211 scale = sib >> 6;
1212
1213 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1214 modrm_ea += insn_fetch(s32, ctxt);
1215 else {
1216 modrm_ea += reg_read(ctxt, base_reg);
1217 adjust_modrm_seg(ctxt, base_reg);
1218 /* Increment ESP on POP [ESP] */
1219 if ((ctxt->d & IncSP) &&
1220 base_reg == VCPU_REGS_RSP)
1221 modrm_ea += ctxt->op_bytes;
1222 }
1223 if (index_reg != 4)
1224 modrm_ea += reg_read(ctxt, index_reg) << scale;
1225 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1226 modrm_ea += insn_fetch(s32, ctxt);
1227 if (ctxt->mode == X86EMUL_MODE_PROT64)
1228 ctxt->rip_relative = 1;
1229 } else {
1230 base_reg = ctxt->modrm_rm;
1231 modrm_ea += reg_read(ctxt, base_reg);
1232 adjust_modrm_seg(ctxt, base_reg);
1233 }
1234 switch (ctxt->modrm_mod) {
1235 case 1:
1236 modrm_ea += insn_fetch(s8, ctxt);
1237 break;
1238 case 2:
1239 modrm_ea += insn_fetch(s32, ctxt);
1240 break;
1241 }
1242 }
1243 op->addr.mem.ea = modrm_ea;
1244 if (ctxt->ad_bytes != 8)
1245 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1246
1247 done:
1248 return rc;
1249 }
1250
decode_abs(struct x86_emulate_ctxt * ctxt,struct operand * op)1251 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1252 struct operand *op)
1253 {
1254 int rc = X86EMUL_CONTINUE;
1255
1256 op->type = OP_MEM;
1257 switch (ctxt->ad_bytes) {
1258 case 2:
1259 op->addr.mem.ea = insn_fetch(u16, ctxt);
1260 break;
1261 case 4:
1262 op->addr.mem.ea = insn_fetch(u32, ctxt);
1263 break;
1264 case 8:
1265 op->addr.mem.ea = insn_fetch(u64, ctxt);
1266 break;
1267 }
1268 done:
1269 return rc;
1270 }
1271
fetch_bit_operand(struct x86_emulate_ctxt * ctxt)1272 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1273 {
1274 long sv = 0, mask;
1275
1276 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1277 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1278
1279 if (ctxt->src.bytes == 2)
1280 sv = (s16)ctxt->src.val & (s16)mask;
1281 else if (ctxt->src.bytes == 4)
1282 sv = (s32)ctxt->src.val & (s32)mask;
1283 else
1284 sv = (s64)ctxt->src.val & (s64)mask;
1285
1286 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1287 ctxt->dst.addr.mem.ea + (sv >> 3));
1288 }
1289
1290 /* only subword offset */
1291 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1292 }
1293
read_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * dest,unsigned size)1294 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1295 unsigned long addr, void *dest, unsigned size)
1296 {
1297 int rc;
1298 struct read_cache *mc = &ctxt->mem_read;
1299
1300 /*
1301 * If the read gets a cache hit, simply copy the value from the cache.
1302 * A "hit" here means that there is unused data in the cache, i.e. when
1303 * re-emulating an instruction to complete a userspace exit, KVM relies
1304 * on "no decode" to ensure the instruction is re-emulated in the same
1305 * sequence, so that multiple reads are fulfilled in the correct order.
1306 */
1307 if (mc->pos < mc->end)
1308 goto read_cached;
1309
1310 if (KVM_EMULATOR_BUG_ON((mc->end + size) >= sizeof(mc->data), ctxt))
1311 return X86EMUL_UNHANDLEABLE;
1312
1313 /*
1314 * Route all reads to the cache. This allows @dest to be an on-stack
1315 * variable without triggering use-after-free if KVM needs to exit to
1316 * userspace to handle an MMIO read (the MMIO fragment will point at
1317 * the current location in the cache).
1318 */
1319 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1320 &ctxt->exception);
1321 if (rc != X86EMUL_CONTINUE)
1322 return rc;
1323
1324 mc->end += size;
1325
1326 read_cached:
1327 memcpy(dest, mc->data + mc->pos, size);
1328 mc->pos += size;
1329 return X86EMUL_CONTINUE;
1330 }
1331
segmented_read(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,void * data,unsigned size)1332 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1333 struct segmented_address addr,
1334 void *data,
1335 unsigned size)
1336 {
1337 int rc;
1338 ulong linear;
1339
1340 rc = linearize(ctxt, addr, size, false, &linear);
1341 if (rc != X86EMUL_CONTINUE)
1342 return rc;
1343 return read_emulated(ctxt, linear, data, size);
1344 }
1345
segmented_write(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,const void * data,unsigned size)1346 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1347 struct segmented_address addr,
1348 const void *data,
1349 unsigned size)
1350 {
1351 int rc;
1352 ulong linear;
1353
1354 rc = linearize(ctxt, addr, size, true, &linear);
1355 if (rc != X86EMUL_CONTINUE)
1356 return rc;
1357 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1358 &ctxt->exception);
1359 }
1360
segmented_cmpxchg(struct x86_emulate_ctxt * ctxt,struct segmented_address addr,const void * orig_data,const void * data,unsigned size)1361 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1362 struct segmented_address addr,
1363 const void *orig_data, const void *data,
1364 unsigned size)
1365 {
1366 int rc;
1367 ulong linear;
1368
1369 rc = linearize(ctxt, addr, size, true, &linear);
1370 if (rc != X86EMUL_CONTINUE)
1371 return rc;
1372 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1373 size, &ctxt->exception);
1374 }
1375
pio_in_emulated(struct x86_emulate_ctxt * ctxt,unsigned int size,unsigned short port,void * dest)1376 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1377 unsigned int size, unsigned short port,
1378 void *dest)
1379 {
1380 struct read_cache *rc = &ctxt->io_read;
1381
1382 if (rc->pos == rc->end) { /* refill pio read ahead */
1383 unsigned int in_page, n;
1384 unsigned int count = ctxt->rep_prefix ?
1385 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1386 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1387 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1388 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1389 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1390 if (n == 0)
1391 n = 1;
1392 rc->pos = rc->end = 0;
1393 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1394 return 0;
1395 rc->end = n * size;
1396 }
1397
1398 if (ctxt->rep_prefix && (ctxt->d & String) &&
1399 !(ctxt->eflags & X86_EFLAGS_DF)) {
1400 ctxt->dst.data = rc->data + rc->pos;
1401 ctxt->dst.type = OP_MEM_STR;
1402 ctxt->dst.count = (rc->end - rc->pos) / size;
1403 rc->pos = rc->end;
1404 } else {
1405 memcpy(dest, rc->data + rc->pos, size);
1406 rc->pos += size;
1407 }
1408 return 1;
1409 }
1410
read_interrupt_descriptor(struct x86_emulate_ctxt * ctxt,u16 index,struct desc_struct * desc)1411 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1412 u16 index, struct desc_struct *desc)
1413 {
1414 struct desc_ptr dt;
1415 ulong addr;
1416
1417 ctxt->ops->get_idt(ctxt, &dt);
1418
1419 if (dt.size < index * 8 + 7)
1420 return emulate_gp(ctxt, index << 3 | 0x2);
1421
1422 addr = dt.address + index * 8;
1423 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1424 }
1425
get_descriptor_table_ptr(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_ptr * dt)1426 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1427 u16 selector, struct desc_ptr *dt)
1428 {
1429 const struct x86_emulate_ops *ops = ctxt->ops;
1430 u32 base3 = 0;
1431
1432 if (selector & 1 << 2) {
1433 struct desc_struct desc;
1434 u16 sel;
1435
1436 memset(dt, 0, sizeof(*dt));
1437 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1438 VCPU_SREG_LDTR))
1439 return;
1440
1441 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1442 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1443 } else
1444 ops->get_gdt(ctxt, dt);
1445 }
1446
get_descriptor_ptr(struct x86_emulate_ctxt * ctxt,u16 selector,ulong * desc_addr_p)1447 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1448 u16 selector, ulong *desc_addr_p)
1449 {
1450 struct desc_ptr dt;
1451 u16 index = selector >> 3;
1452 ulong addr;
1453
1454 get_descriptor_table_ptr(ctxt, selector, &dt);
1455
1456 if (dt.size < index * 8 + 7)
1457 return emulate_gp(ctxt, selector & 0xfffc);
1458
1459 addr = dt.address + index * 8;
1460
1461 #ifdef CONFIG_X86_64
1462 if (addr >> 32 != 0) {
1463 u64 efer = 0;
1464
1465 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1466 if (!(efer & EFER_LMA))
1467 addr &= (u32)-1;
1468 }
1469 #endif
1470
1471 *desc_addr_p = addr;
1472 return X86EMUL_CONTINUE;
1473 }
1474
1475 /* allowed just for 8 bytes segments */
read_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc,ulong * desc_addr_p)1476 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1477 u16 selector, struct desc_struct *desc,
1478 ulong *desc_addr_p)
1479 {
1480 int rc;
1481
1482 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1483 if (rc != X86EMUL_CONTINUE)
1484 return rc;
1485
1486 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1487 }
1488
1489 /* allowed just for 8 bytes segments */
write_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc)1490 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1491 u16 selector, struct desc_struct *desc)
1492 {
1493 int rc;
1494 ulong addr;
1495
1496 rc = get_descriptor_ptr(ctxt, selector, &addr);
1497 if (rc != X86EMUL_CONTINUE)
1498 return rc;
1499
1500 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1501 }
1502
emulator_is_ssp_invalid(struct x86_emulate_ctxt * ctxt,u8 cpl)1503 static bool emulator_is_ssp_invalid(struct x86_emulate_ctxt *ctxt, u8 cpl)
1504 {
1505 const u32 MSR_IA32_X_CET = cpl == 3 ? MSR_IA32_U_CET : MSR_IA32_S_CET;
1506 u64 efer = 0, cet = 0, ssp = 0;
1507
1508 if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_CET))
1509 return false;
1510
1511 if (ctxt->ops->get_msr(ctxt, MSR_EFER, &efer))
1512 return true;
1513
1514 /* SSP is guaranteed to be valid if the vCPU was already in 32-bit mode. */
1515 if (!(efer & EFER_LMA))
1516 return false;
1517
1518 if (ctxt->ops->get_msr(ctxt, MSR_IA32_X_CET, &cet))
1519 return true;
1520
1521 if (!(cet & CET_SHSTK_EN))
1522 return false;
1523
1524 if (ctxt->ops->get_msr(ctxt, MSR_KVM_INTERNAL_GUEST_SSP, &ssp))
1525 return true;
1526
1527 /*
1528 * On transfer from 64-bit mode to compatibility mode, SSP[63:32] must
1529 * be 0, i.e. SSP must be a 32-bit value outside of 64-bit mode.
1530 */
1531 return ssp >> 32;
1532 }
1533
__load_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,int seg,u8 cpl,enum x86_transfer_type transfer,struct desc_struct * desc)1534 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1535 u16 selector, int seg, u8 cpl,
1536 enum x86_transfer_type transfer,
1537 struct desc_struct *desc)
1538 {
1539 struct desc_struct seg_desc, old_desc;
1540 u8 dpl, rpl;
1541 unsigned err_vec = GP_VECTOR;
1542 u32 err_code = 0;
1543 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1544 ulong desc_addr;
1545 int ret;
1546 u16 dummy;
1547 u32 base3 = 0;
1548
1549 memset(&seg_desc, 0, sizeof(seg_desc));
1550
1551 if (ctxt->mode == X86EMUL_MODE_REAL) {
1552 /* set real mode segment descriptor (keep limit etc. for
1553 * unreal mode) */
1554 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1555 set_desc_base(&seg_desc, selector << 4);
1556 goto load;
1557 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1558 /* VM86 needs a clean new segment descriptor */
1559 set_desc_base(&seg_desc, selector << 4);
1560 set_desc_limit(&seg_desc, 0xffff);
1561 seg_desc.type = 3;
1562 seg_desc.p = 1;
1563 seg_desc.s = 1;
1564 seg_desc.dpl = 3;
1565 goto load;
1566 }
1567
1568 rpl = selector & 3;
1569
1570 /* TR should be in GDT only */
1571 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1572 goto exception;
1573
1574 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1575 if (null_selector) {
1576 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1577 goto exception;
1578
1579 if (seg == VCPU_SREG_SS) {
1580 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1581 goto exception;
1582
1583 /*
1584 * ctxt->ops->set_segment expects the CPL to be in
1585 * SS.DPL, so fake an expand-up 32-bit data segment.
1586 */
1587 seg_desc.type = 3;
1588 seg_desc.p = 1;
1589 seg_desc.s = 1;
1590 seg_desc.dpl = cpl;
1591 seg_desc.d = 1;
1592 seg_desc.g = 1;
1593 }
1594
1595 /* Skip all following checks */
1596 goto load;
1597 }
1598
1599 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1600 if (ret != X86EMUL_CONTINUE)
1601 return ret;
1602
1603 err_code = selector & 0xfffc;
1604 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1605 GP_VECTOR;
1606
1607 /* can't load system descriptor into segment selector */
1608 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1609 if (transfer == X86_TRANSFER_CALL_JMP)
1610 return X86EMUL_UNHANDLEABLE;
1611 goto exception;
1612 }
1613
1614 dpl = seg_desc.dpl;
1615
1616 switch (seg) {
1617 case VCPU_SREG_SS:
1618 /*
1619 * segment is not a writable data segment or segment
1620 * selector's RPL != CPL or DPL != CPL
1621 */
1622 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1623 goto exception;
1624 break;
1625 case VCPU_SREG_CS:
1626 /*
1627 * KVM uses "none" when loading CS as part of emulating Real
1628 * Mode exceptions and IRET (handled above). In all other
1629 * cases, loading CS without a control transfer is a KVM bug.
1630 */
1631 if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE))
1632 goto exception;
1633
1634 if (!(seg_desc.type & 8))
1635 goto exception;
1636
1637 if (transfer == X86_TRANSFER_RET) {
1638 /* RET can never return to an inner privilege level. */
1639 if (rpl < cpl)
1640 goto exception;
1641 /* Outer-privilege level return is not implemented */
1642 if (rpl > cpl)
1643 return X86EMUL_UNHANDLEABLE;
1644 }
1645 if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) {
1646 if (seg_desc.type & 4) {
1647 /* conforming */
1648 if (dpl > rpl)
1649 goto exception;
1650 } else {
1651 /* nonconforming */
1652 if (dpl != rpl)
1653 goto exception;
1654 }
1655 } else { /* X86_TRANSFER_CALL_JMP */
1656 if (seg_desc.type & 4) {
1657 /* conforming */
1658 if (dpl > cpl)
1659 goto exception;
1660 } else {
1661 /* nonconforming */
1662 if (rpl > cpl || dpl != cpl)
1663 goto exception;
1664 }
1665 }
1666 /* in long-mode d/b must be clear if l is set */
1667 if (seg_desc.d && seg_desc.l) {
1668 u64 efer = 0;
1669
1670 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1671 if (efer & EFER_LMA)
1672 goto exception;
1673 }
1674 if (!seg_desc.l && emulator_is_ssp_invalid(ctxt, cpl)) {
1675 err_code = 0;
1676 goto exception;
1677 }
1678
1679 /* CS(RPL) <- CPL */
1680 selector = (selector & 0xfffc) | cpl;
1681 break;
1682 case VCPU_SREG_TR:
1683 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1684 goto exception;
1685 break;
1686 case VCPU_SREG_LDTR:
1687 if (seg_desc.s || seg_desc.type != 2)
1688 goto exception;
1689 break;
1690 default: /* DS, ES, FS, or GS */
1691 /*
1692 * segment is not a data or readable code segment or
1693 * ((segment is a data or nonconforming code segment)
1694 * and ((RPL > DPL) or (CPL > DPL)))
1695 */
1696 if ((seg_desc.type & 0xa) == 0x8 ||
1697 (((seg_desc.type & 0xc) != 0xc) &&
1698 (rpl > dpl || cpl > dpl)))
1699 goto exception;
1700 break;
1701 }
1702
1703 if (!seg_desc.p) {
1704 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1705 goto exception;
1706 }
1707
1708 if (seg_desc.s) {
1709 /* mark segment as accessed */
1710 if (!(seg_desc.type & 1)) {
1711 seg_desc.type |= 1;
1712 ret = write_segment_descriptor(ctxt, selector,
1713 &seg_desc);
1714 if (ret != X86EMUL_CONTINUE)
1715 return ret;
1716 }
1717 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1718 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1719 if (ret != X86EMUL_CONTINUE)
1720 return ret;
1721 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1722 ((u64)base3 << 32), ctxt,
1723 X86EMUL_F_DT_LOAD))
1724 return emulate_gp(ctxt, err_code);
1725 }
1726
1727 if (seg == VCPU_SREG_TR) {
1728 old_desc = seg_desc;
1729 seg_desc.type |= 2; /* busy */
1730 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1731 sizeof(seg_desc), &ctxt->exception);
1732 if (ret != X86EMUL_CONTINUE)
1733 return ret;
1734 }
1735 load:
1736 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1737 if (desc)
1738 *desc = seg_desc;
1739 return X86EMUL_CONTINUE;
1740 exception:
1741 return emulate_exception(ctxt, err_vec, err_code, true);
1742 }
1743
load_segment_descriptor(struct x86_emulate_ctxt * ctxt,u16 selector,int seg)1744 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1745 u16 selector, int seg)
1746 {
1747 u8 cpl = ctxt->ops->cpl(ctxt);
1748
1749 /*
1750 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1751 * they can load it at CPL<3 (Intel's manual says only LSS can,
1752 * but it's wrong).
1753 *
1754 * However, the Intel manual says that putting IST=1/DPL=3 in
1755 * an interrupt gate will result in SS=3 (the AMD manual instead
1756 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1757 * and only forbid it here.
1758 */
1759 if (seg == VCPU_SREG_SS && selector == 3 &&
1760 ctxt->mode == X86EMUL_MODE_PROT64)
1761 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1762
1763 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1764 X86_TRANSFER_NONE, NULL);
1765 }
1766
write_register_operand(struct operand * op)1767 static void write_register_operand(struct operand *op)
1768 {
1769 return assign_register(op->addr.reg, op->val, op->bytes);
1770 }
1771
writeback(struct x86_emulate_ctxt * ctxt,struct operand * op)1772 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1773 {
1774 switch (op->type) {
1775 case OP_REG:
1776 write_register_operand(op);
1777 break;
1778 case OP_MEM:
1779 if (ctxt->lock_prefix)
1780 return segmented_cmpxchg(ctxt,
1781 op->addr.mem,
1782 &op->orig_val,
1783 &op->val,
1784 op->bytes);
1785 else
1786 return segmented_write(ctxt,
1787 op->addr.mem,
1788 &op->val,
1789 op->bytes);
1790 case OP_MEM_STR:
1791 return segmented_write(ctxt,
1792 op->addr.mem,
1793 op->data,
1794 op->bytes * op->count);
1795 case OP_XMM:
1796 if (!(ctxt->d & Avx)) {
1797 kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1798 break;
1799 }
1800 /* full YMM write but with high bytes cleared */
1801 memset(op->valptr + 16, 0, 16);
1802 fallthrough;
1803 case OP_YMM:
1804 kvm_write_avx_reg(op->addr.xmm, &op->vec_val2);
1805 break;
1806 case OP_MM:
1807 kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1808 break;
1809 case OP_NONE:
1810 /* no writeback */
1811 break;
1812 default:
1813 break;
1814 }
1815 return X86EMUL_CONTINUE;
1816 }
1817
emulate_push(struct x86_emulate_ctxt * ctxt,const void * data,int len)1818 static int emulate_push(struct x86_emulate_ctxt *ctxt, const void *data, int len)
1819 {
1820 struct segmented_address addr;
1821
1822 rsp_increment(ctxt, -len);
1823 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1824 addr.seg = VCPU_SREG_SS;
1825
1826 return segmented_write(ctxt, addr, data, len);
1827 }
1828
em_push(struct x86_emulate_ctxt * ctxt)1829 static int em_push(struct x86_emulate_ctxt *ctxt)
1830 {
1831 /* Disable writeback. */
1832 ctxt->dst.type = OP_NONE;
1833 return emulate_push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1834 }
1835
emulate_pop(struct x86_emulate_ctxt * ctxt,void * dest,int len)1836 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1837 void *dest, int len)
1838 {
1839 int rc;
1840 struct segmented_address addr;
1841
1842 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1843 addr.seg = VCPU_SREG_SS;
1844 rc = segmented_read(ctxt, addr, dest, len);
1845 if (rc != X86EMUL_CONTINUE)
1846 return rc;
1847
1848 rsp_increment(ctxt, len);
1849 return rc;
1850 }
1851
em_pop(struct x86_emulate_ctxt * ctxt)1852 static int em_pop(struct x86_emulate_ctxt *ctxt)
1853 {
1854 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1855 }
1856
emulate_popf(struct x86_emulate_ctxt * ctxt,void * dest,int len)1857 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1858 void *dest, int len)
1859 {
1860 int rc;
1861 unsigned long val = 0;
1862 unsigned long change_mask;
1863 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1864 int cpl = ctxt->ops->cpl(ctxt);
1865
1866 rc = emulate_pop(ctxt, &val, len);
1867 if (rc != X86EMUL_CONTINUE)
1868 return rc;
1869
1870 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1871 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1872 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1873 X86_EFLAGS_AC | X86_EFLAGS_ID;
1874
1875 switch(ctxt->mode) {
1876 case X86EMUL_MODE_PROT64:
1877 case X86EMUL_MODE_PROT32:
1878 case X86EMUL_MODE_PROT16:
1879 if (cpl == 0)
1880 change_mask |= X86_EFLAGS_IOPL;
1881 if (cpl <= iopl)
1882 change_mask |= X86_EFLAGS_IF;
1883 break;
1884 case X86EMUL_MODE_VM86:
1885 if (iopl < 3)
1886 return emulate_gp(ctxt, 0);
1887 change_mask |= X86_EFLAGS_IF;
1888 break;
1889 default: /* real mode */
1890 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1891 break;
1892 }
1893
1894 *(unsigned long *)dest =
1895 (ctxt->eflags & ~change_mask) | (val & change_mask);
1896
1897 return rc;
1898 }
1899
em_popf(struct x86_emulate_ctxt * ctxt)1900 static int em_popf(struct x86_emulate_ctxt *ctxt)
1901 {
1902 ctxt->dst.type = OP_REG;
1903 ctxt->dst.addr.reg = &ctxt->eflags;
1904 ctxt->dst.bytes = ctxt->op_bytes;
1905 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1906 }
1907
em_enter(struct x86_emulate_ctxt * ctxt)1908 static int em_enter(struct x86_emulate_ctxt *ctxt)
1909 {
1910 int rc;
1911 unsigned frame_size = ctxt->src.val;
1912 unsigned nesting_level = ctxt->src2.val & 31;
1913 ulong rbp;
1914
1915 if (nesting_level)
1916 return X86EMUL_UNHANDLEABLE;
1917
1918 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1919 rc = emulate_push(ctxt, &rbp, stack_size(ctxt));
1920 if (rc != X86EMUL_CONTINUE)
1921 return rc;
1922 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1923 stack_mask(ctxt));
1924 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1925 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1926 stack_mask(ctxt));
1927 return X86EMUL_CONTINUE;
1928 }
1929
em_leave(struct x86_emulate_ctxt * ctxt)1930 static int em_leave(struct x86_emulate_ctxt *ctxt)
1931 {
1932 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1933 stack_mask(ctxt));
1934 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1935 }
1936
em_push_sreg(struct x86_emulate_ctxt * ctxt)1937 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1938 {
1939 int seg = ctxt->src2.val;
1940
1941 ctxt->src.val = get_segment_selector(ctxt, seg);
1942 if (ctxt->op_bytes == 4) {
1943 rsp_increment(ctxt, -2);
1944 ctxt->op_bytes = 2;
1945 }
1946
1947 return em_push(ctxt);
1948 }
1949
em_pop_sreg(struct x86_emulate_ctxt * ctxt)1950 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1951 {
1952 int seg = ctxt->src2.val;
1953 unsigned long selector = 0;
1954 int rc;
1955
1956 rc = emulate_pop(ctxt, &selector, 2);
1957 if (rc != X86EMUL_CONTINUE)
1958 return rc;
1959
1960 if (seg == VCPU_SREG_SS)
1961 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1962 if (ctxt->op_bytes > 2)
1963 rsp_increment(ctxt, ctxt->op_bytes - 2);
1964
1965 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1966 return rc;
1967 }
1968
em_pusha(struct x86_emulate_ctxt * ctxt)1969 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1970 {
1971 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1972 int rc = X86EMUL_CONTINUE;
1973 int reg = VCPU_REGS_RAX;
1974
1975 while (reg <= VCPU_REGS_RDI) {
1976 (reg == VCPU_REGS_RSP) ?
1977 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1978
1979 rc = em_push(ctxt);
1980 if (rc != X86EMUL_CONTINUE)
1981 return rc;
1982
1983 ++reg;
1984 }
1985
1986 return rc;
1987 }
1988
em_pushf(struct x86_emulate_ctxt * ctxt)1989 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1990 {
1991 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1992 return em_push(ctxt);
1993 }
1994
em_popa(struct x86_emulate_ctxt * ctxt)1995 static int em_popa(struct x86_emulate_ctxt *ctxt)
1996 {
1997 int rc = X86EMUL_CONTINUE;
1998 int reg = VCPU_REGS_RDI;
1999 u32 val = 0;
2000
2001 while (reg >= VCPU_REGS_RAX) {
2002 if (reg == VCPU_REGS_RSP) {
2003 rsp_increment(ctxt, ctxt->op_bytes);
2004 --reg;
2005 }
2006
2007 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2008 if (rc != X86EMUL_CONTINUE)
2009 break;
2010 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2011 --reg;
2012 }
2013 return rc;
2014 }
2015
__emulate_int_real(struct x86_emulate_ctxt * ctxt,int irq)2016 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2017 {
2018 const struct x86_emulate_ops *ops = ctxt->ops;
2019 int rc;
2020 struct desc_ptr dt;
2021 gva_t cs_addr;
2022 gva_t eip_addr;
2023 u16 cs, eip;
2024
2025 /* TODO: Add limit checks */
2026 ctxt->src.val = ctxt->eflags;
2027 rc = em_push(ctxt);
2028 if (rc != X86EMUL_CONTINUE)
2029 return rc;
2030
2031 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2032
2033 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2034 rc = em_push(ctxt);
2035 if (rc != X86EMUL_CONTINUE)
2036 return rc;
2037
2038 ctxt->src.val = ctxt->_eip;
2039 rc = em_push(ctxt);
2040 if (rc != X86EMUL_CONTINUE)
2041 return rc;
2042
2043 ops->get_idt(ctxt, &dt);
2044
2045 eip_addr = dt.address + (irq << 2);
2046 cs_addr = dt.address + (irq << 2) + 2;
2047
2048 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2049 if (rc != X86EMUL_CONTINUE)
2050 return rc;
2051
2052 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2053 if (rc != X86EMUL_CONTINUE)
2054 return rc;
2055
2056 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2057 if (rc != X86EMUL_CONTINUE)
2058 return rc;
2059
2060 ctxt->_eip = eip;
2061
2062 return rc;
2063 }
2064
emulate_int_real(struct x86_emulate_ctxt * ctxt,int irq)2065 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2066 {
2067 int rc;
2068
2069 invalidate_registers(ctxt);
2070 rc = __emulate_int_real(ctxt, irq);
2071 if (rc == X86EMUL_CONTINUE)
2072 writeback_registers(ctxt);
2073 return rc;
2074 }
2075
emulate_int(struct x86_emulate_ctxt * ctxt,int irq)2076 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2077 {
2078 switch(ctxt->mode) {
2079 case X86EMUL_MODE_REAL:
2080 return __emulate_int_real(ctxt, irq);
2081 case X86EMUL_MODE_VM86:
2082 case X86EMUL_MODE_PROT16:
2083 case X86EMUL_MODE_PROT32:
2084 case X86EMUL_MODE_PROT64:
2085 default:
2086 /* Protected mode interrupts unimplemented yet */
2087 return X86EMUL_UNHANDLEABLE;
2088 }
2089 }
2090
emulate_iret_real(struct x86_emulate_ctxt * ctxt)2091 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2092 {
2093 int rc = X86EMUL_CONTINUE;
2094 unsigned long temp_eip = 0;
2095 unsigned long temp_eflags = 0;
2096 unsigned long cs = 0;
2097 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2098 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2099 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2100 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2101 X86_EFLAGS_AC | X86_EFLAGS_ID |
2102 X86_EFLAGS_FIXED;
2103 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2104 X86_EFLAGS_VIP;
2105
2106 /* TODO: Add stack limit check */
2107
2108 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2109
2110 if (rc != X86EMUL_CONTINUE)
2111 return rc;
2112
2113 if (temp_eip & ~0xffff)
2114 return emulate_gp(ctxt, 0);
2115
2116 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2117
2118 if (rc != X86EMUL_CONTINUE)
2119 return rc;
2120
2121 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2122
2123 if (rc != X86EMUL_CONTINUE)
2124 return rc;
2125
2126 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2127
2128 if (rc != X86EMUL_CONTINUE)
2129 return rc;
2130
2131 ctxt->_eip = temp_eip;
2132
2133 if (ctxt->op_bytes == 4)
2134 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2135 else if (ctxt->op_bytes == 2) {
2136 ctxt->eflags &= ~0xffff;
2137 ctxt->eflags |= temp_eflags;
2138 }
2139
2140 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2141 ctxt->eflags |= X86_EFLAGS_FIXED;
2142 ctxt->ops->set_nmi_mask(ctxt, false);
2143
2144 return rc;
2145 }
2146
em_iret(struct x86_emulate_ctxt * ctxt)2147 static int em_iret(struct x86_emulate_ctxt *ctxt)
2148 {
2149 switch(ctxt->mode) {
2150 case X86EMUL_MODE_REAL:
2151 return emulate_iret_real(ctxt);
2152 case X86EMUL_MODE_VM86:
2153 case X86EMUL_MODE_PROT16:
2154 case X86EMUL_MODE_PROT32:
2155 case X86EMUL_MODE_PROT64:
2156 default:
2157 /* iret from protected mode unimplemented yet */
2158 return X86EMUL_UNHANDLEABLE;
2159 }
2160 }
2161
em_jmp_far(struct x86_emulate_ctxt * ctxt)2162 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2163 {
2164 int rc;
2165 unsigned short sel;
2166 struct desc_struct new_desc;
2167 u8 cpl = ctxt->ops->cpl(ctxt);
2168
2169 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2170
2171 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2172 X86_TRANSFER_CALL_JMP,
2173 &new_desc);
2174 if (rc != X86EMUL_CONTINUE)
2175 return rc;
2176
2177 rc = assign_eip_far(ctxt, ctxt->src.val);
2178 /* Error handling is not implemented. */
2179 if (rc != X86EMUL_CONTINUE)
2180 return X86EMUL_UNHANDLEABLE;
2181
2182 return rc;
2183 }
2184
em_jmp_abs(struct x86_emulate_ctxt * ctxt)2185 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2186 {
2187 return assign_eip_near(ctxt, ctxt->src.val);
2188 }
2189
em_call_near_abs(struct x86_emulate_ctxt * ctxt)2190 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2191 {
2192 int rc;
2193 long int old_eip;
2194
2195 old_eip = ctxt->_eip;
2196 rc = assign_eip_near(ctxt, ctxt->src.val);
2197 if (rc != X86EMUL_CONTINUE)
2198 return rc;
2199 ctxt->src.val = old_eip;
2200 rc = em_push(ctxt);
2201 return rc;
2202 }
2203
em_cmpxchg8b(struct x86_emulate_ctxt * ctxt)2204 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2205 {
2206 u64 old = ctxt->dst.orig_val64;
2207
2208 if (ctxt->dst.bytes == 16)
2209 return X86EMUL_UNHANDLEABLE;
2210
2211 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2212 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2213 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2214 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2215 ctxt->eflags &= ~X86_EFLAGS_ZF;
2216 } else {
2217 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2218 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2219
2220 ctxt->eflags |= X86_EFLAGS_ZF;
2221 }
2222 return X86EMUL_CONTINUE;
2223 }
2224
em_ret(struct x86_emulate_ctxt * ctxt)2225 static int em_ret(struct x86_emulate_ctxt *ctxt)
2226 {
2227 int rc;
2228 unsigned long eip = 0;
2229
2230 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2231 if (rc != X86EMUL_CONTINUE)
2232 return rc;
2233
2234 return assign_eip_near(ctxt, eip);
2235 }
2236
em_ret_far(struct x86_emulate_ctxt * ctxt)2237 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2238 {
2239 int rc;
2240 unsigned long eip = 0;
2241 unsigned long cs = 0;
2242 int cpl = ctxt->ops->cpl(ctxt);
2243 struct desc_struct new_desc;
2244
2245 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2246 if (rc != X86EMUL_CONTINUE)
2247 return rc;
2248 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2249 if (rc != X86EMUL_CONTINUE)
2250 return rc;
2251 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2252 X86_TRANSFER_RET,
2253 &new_desc);
2254 if (rc != X86EMUL_CONTINUE)
2255 return rc;
2256 rc = assign_eip_far(ctxt, eip);
2257 /* Error handling is not implemented. */
2258 if (rc != X86EMUL_CONTINUE)
2259 return X86EMUL_UNHANDLEABLE;
2260
2261 return rc;
2262 }
2263
em_ret_far_imm(struct x86_emulate_ctxt * ctxt)2264 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2265 {
2266 int rc;
2267
2268 rc = em_ret_far(ctxt);
2269 if (rc != X86EMUL_CONTINUE)
2270 return rc;
2271 rsp_increment(ctxt, ctxt->src.val);
2272 return X86EMUL_CONTINUE;
2273 }
2274
em_cmpxchg(struct x86_emulate_ctxt * ctxt)2275 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2276 {
2277 /* Save real source value, then compare EAX against destination. */
2278 ctxt->dst.orig_val = ctxt->dst.val;
2279 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2280 ctxt->src.orig_val = ctxt->src.val;
2281 ctxt->src.val = ctxt->dst.orig_val;
2282 em_cmp(ctxt);
2283
2284 if (ctxt->eflags & X86_EFLAGS_ZF) {
2285 /* Success: write back to memory; no update of EAX */
2286 ctxt->src.type = OP_NONE;
2287 ctxt->dst.val = ctxt->src.orig_val;
2288 } else {
2289 /* Failure: write the value we saw to EAX. */
2290 ctxt->src.type = OP_REG;
2291 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2292 ctxt->src.val = ctxt->dst.orig_val;
2293 /* Create write-cycle to dest by writing the same value */
2294 ctxt->dst.val = ctxt->dst.orig_val;
2295 }
2296 return X86EMUL_CONTINUE;
2297 }
2298
em_lseg(struct x86_emulate_ctxt * ctxt)2299 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2300 {
2301 int seg = ctxt->src2.val;
2302 unsigned short sel;
2303 int rc;
2304
2305 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2306
2307 rc = load_segment_descriptor(ctxt, sel, seg);
2308 if (rc != X86EMUL_CONTINUE)
2309 return rc;
2310
2311 ctxt->dst.val = ctxt->src.val;
2312 return rc;
2313 }
2314
em_rsm(struct x86_emulate_ctxt * ctxt)2315 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2316 {
2317 if (!ctxt->ops->is_smm(ctxt))
2318 return emulate_ud(ctxt);
2319
2320 if (ctxt->ops->leave_smm(ctxt))
2321 ctxt->ops->triple_fault(ctxt);
2322
2323 return emulator_recalc_and_set_mode(ctxt);
2324 }
2325
2326 static void
setup_syscalls_segments(struct desc_struct * cs,struct desc_struct * ss)2327 setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
2328 {
2329 cs->l = 0; /* will be adjusted later */
2330 set_desc_base(cs, 0); /* flat segment */
2331 cs->g = 1; /* 4kb granularity */
2332 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2333 cs->type = 0x0b; /* Read, Execute, Accessed */
2334 cs->s = 1;
2335 cs->dpl = 0; /* will be adjusted later */
2336 cs->p = 1;
2337 cs->d = 1;
2338 cs->avl = 0;
2339
2340 set_desc_base(ss, 0); /* flat segment */
2341 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2342 ss->g = 1; /* 4kb granularity */
2343 ss->s = 1;
2344 ss->type = 0x03; /* Read/Write, Accessed */
2345 ss->d = 1; /* 32bit stack segment */
2346 ss->dpl = 0;
2347 ss->p = 1;
2348 ss->l = 0;
2349 ss->avl = 0;
2350 }
2351
em_syscall(struct x86_emulate_ctxt * ctxt)2352 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2353 {
2354 const struct x86_emulate_ops *ops = ctxt->ops;
2355 struct desc_struct cs, ss;
2356 u64 msr_data;
2357 u16 cs_sel, ss_sel;
2358 u64 efer = 0;
2359
2360 /* syscall is not available in real mode */
2361 if (ctxt->mode == X86EMUL_MODE_REAL ||
2362 ctxt->mode == X86EMUL_MODE_VM86)
2363 return emulate_ud(ctxt);
2364
2365 /*
2366 * Intel compatible CPUs only support SYSCALL in 64-bit mode, whereas
2367 * AMD allows SYSCALL in any flavor of protected mode. Note, it's
2368 * infeasible to emulate Intel behavior when running on AMD hardware,
2369 * as SYSCALL won't fault in the "wrong" mode, i.e. there is no #UD
2370 * for KVM to trap-and-emulate, unlike emulating AMD on Intel.
2371 */
2372 if (ctxt->mode != X86EMUL_MODE_PROT64 &&
2373 ctxt->ops->guest_cpuid_is_intel_compatible(ctxt))
2374 return emulate_ud(ctxt);
2375
2376 ops->get_msr(ctxt, MSR_EFER, &efer);
2377 if (!(efer & EFER_SCE))
2378 return emulate_ud(ctxt);
2379
2380 setup_syscalls_segments(&cs, &ss);
2381 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2382 msr_data >>= 32;
2383 cs_sel = (u16)(msr_data & 0xfffc);
2384 ss_sel = (u16)(msr_data + 8);
2385
2386 if (efer & EFER_LMA) {
2387 cs.d = 0;
2388 cs.l = 1;
2389 }
2390 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2391 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2392
2393 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2394 if (efer & EFER_LMA) {
2395 #ifdef CONFIG_X86_64
2396 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2397
2398 ops->get_msr(ctxt,
2399 ctxt->mode == X86EMUL_MODE_PROT64 ?
2400 MSR_LSTAR : MSR_CSTAR, &msr_data);
2401 ctxt->_eip = msr_data;
2402
2403 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2404 ctxt->eflags &= ~msr_data;
2405 ctxt->eflags |= X86_EFLAGS_FIXED;
2406 #endif
2407 } else {
2408 /* legacy mode */
2409 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2410 ctxt->_eip = (u32)msr_data;
2411
2412 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2413 }
2414
2415 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2416 return X86EMUL_CONTINUE;
2417 }
2418
em_sysenter(struct x86_emulate_ctxt * ctxt)2419 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2420 {
2421 const struct x86_emulate_ops *ops = ctxt->ops;
2422 struct desc_struct cs, ss;
2423 u64 msr_data;
2424 u16 cs_sel, ss_sel;
2425 u64 efer = 0;
2426
2427 ops->get_msr(ctxt, MSR_EFER, &efer);
2428 /* inject #GP if in real mode */
2429 if (ctxt->mode == X86EMUL_MODE_REAL)
2430 return emulate_gp(ctxt, 0);
2431
2432 /*
2433 * Intel's architecture allows SYSENTER in compatibility mode, but AMD
2434 * does not. Note, AMD does allow SYSENTER in legacy protected mode.
2435 */
2436 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) &&
2437 !ctxt->ops->guest_cpuid_is_intel_compatible(ctxt))
2438 return emulate_ud(ctxt);
2439
2440 /* sysenter/sysexit have not been tested in 64bit mode. */
2441 if (ctxt->mode == X86EMUL_MODE_PROT64)
2442 return X86EMUL_UNHANDLEABLE;
2443
2444 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2445 if ((msr_data & 0xfffc) == 0x0)
2446 return emulate_gp(ctxt, 0);
2447
2448 setup_syscalls_segments(&cs, &ss);
2449 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2450 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2451 ss_sel = cs_sel + 8;
2452 if (efer & EFER_LMA) {
2453 cs.d = 0;
2454 cs.l = 1;
2455 }
2456
2457 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2458 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2459
2460 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2461 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2462
2463 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2464 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2465 (u32)msr_data;
2466 if (efer & EFER_LMA)
2467 ctxt->mode = X86EMUL_MODE_PROT64;
2468
2469 return X86EMUL_CONTINUE;
2470 }
2471
em_sysexit(struct x86_emulate_ctxt * ctxt)2472 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2473 {
2474 const struct x86_emulate_ops *ops = ctxt->ops;
2475 struct desc_struct cs, ss;
2476 u64 msr_data, rcx, rdx;
2477 int usermode;
2478 u16 cs_sel = 0, ss_sel = 0;
2479
2480 /* inject #GP if in real mode or Virtual 8086 mode */
2481 if (ctxt->mode == X86EMUL_MODE_REAL ||
2482 ctxt->mode == X86EMUL_MODE_VM86)
2483 return emulate_gp(ctxt, 0);
2484
2485 setup_syscalls_segments(&cs, &ss);
2486
2487 if (ctxt->rex_bits & REX_W)
2488 usermode = X86EMUL_MODE_PROT64;
2489 else
2490 usermode = X86EMUL_MODE_PROT32;
2491
2492 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2493 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2494
2495 cs.dpl = 3;
2496 ss.dpl = 3;
2497 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2498 switch (usermode) {
2499 case X86EMUL_MODE_PROT32:
2500 cs_sel = (u16)(msr_data + 16);
2501 if ((msr_data & 0xfffc) == 0x0)
2502 return emulate_gp(ctxt, 0);
2503 ss_sel = (u16)(msr_data + 24);
2504 rcx = (u32)rcx;
2505 rdx = (u32)rdx;
2506 break;
2507 case X86EMUL_MODE_PROT64:
2508 cs_sel = (u16)(msr_data + 32);
2509 if (msr_data == 0x0)
2510 return emulate_gp(ctxt, 0);
2511 ss_sel = cs_sel + 8;
2512 cs.d = 0;
2513 cs.l = 1;
2514 if (emul_is_noncanonical_address(rcx, ctxt, 0) ||
2515 emul_is_noncanonical_address(rdx, ctxt, 0))
2516 return emulate_gp(ctxt, 0);
2517 break;
2518 }
2519 cs_sel |= SEGMENT_RPL_MASK;
2520 ss_sel |= SEGMENT_RPL_MASK;
2521
2522 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2523 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2524
2525 ctxt->_eip = rdx;
2526 ctxt->mode = usermode;
2527 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2528
2529 return X86EMUL_CONTINUE;
2530 }
2531
emulator_bad_iopl(struct x86_emulate_ctxt * ctxt)2532 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2533 {
2534 int iopl;
2535 if (ctxt->mode == X86EMUL_MODE_REAL)
2536 return false;
2537 if (ctxt->mode == X86EMUL_MODE_VM86)
2538 return true;
2539 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2540 return ctxt->ops->cpl(ctxt) > iopl;
2541 }
2542
2543 #define VMWARE_PORT_VMPORT (0x5658)
2544 #define VMWARE_PORT_VMRPC (0x5659)
2545
emulator_io_port_access_allowed(struct x86_emulate_ctxt * ctxt,u16 port,u16 len)2546 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2547 u16 port, u16 len)
2548 {
2549 const struct x86_emulate_ops *ops = ctxt->ops;
2550 struct desc_struct tr_seg;
2551 u32 base3;
2552 int r;
2553 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2554 unsigned mask = (1 << len) - 1;
2555 unsigned long base;
2556
2557 /*
2558 * VMware allows access to these ports even if denied
2559 * by TSS I/O permission bitmap. Mimic behavior.
2560 */
2561 if (enable_vmware_backdoor &&
2562 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2563 return true;
2564
2565 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2566 if (!tr_seg.p)
2567 return false;
2568 if (desc_limit_scaled(&tr_seg) < 103)
2569 return false;
2570 base = get_desc_base(&tr_seg);
2571 #ifdef CONFIG_X86_64
2572 base |= ((u64)base3) << 32;
2573 #endif
2574 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2575 if (r != X86EMUL_CONTINUE)
2576 return false;
2577 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2578 return false;
2579 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2580 if (r != X86EMUL_CONTINUE)
2581 return false;
2582 if ((perm >> bit_idx) & mask)
2583 return false;
2584 return true;
2585 }
2586
emulator_io_permitted(struct x86_emulate_ctxt * ctxt,u16 port,u16 len)2587 static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt,
2588 u16 port, u16 len)
2589 {
2590 if (ctxt->perm_ok)
2591 return true;
2592
2593 if (emulator_bad_iopl(ctxt))
2594 if (!emulator_io_port_access_allowed(ctxt, port, len))
2595 return false;
2596
2597 ctxt->perm_ok = true;
2598
2599 return true;
2600 }
2601
string_registers_quirk(struct x86_emulate_ctxt * ctxt)2602 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2603 {
2604 /*
2605 * Intel CPUs mask the counter and pointers in quite strange
2606 * manner when ECX is zero due to REP-string optimizations.
2607 */
2608 #ifdef CONFIG_X86_64
2609 u32 eax, ebx, ecx, edx;
2610
2611 if (ctxt->ad_bytes != 4)
2612 return;
2613
2614 eax = ecx = 0;
2615 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2616 if (!is_guest_vendor_intel(ebx, ecx, edx))
2617 return;
2618
2619 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2620
2621 switch (ctxt->b) {
2622 case 0xa4: /* movsb */
2623 case 0xa5: /* movsd/w */
2624 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2625 fallthrough;
2626 case 0xaa: /* stosb */
2627 case 0xab: /* stosd/w */
2628 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2629 }
2630 #endif
2631 }
2632
save_state_to_tss16(struct x86_emulate_ctxt * ctxt,struct tss_segment_16 * tss)2633 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2634 struct tss_segment_16 *tss)
2635 {
2636 tss->ip = ctxt->_eip;
2637 tss->flag = ctxt->eflags;
2638 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2639 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2640 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2641 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2642 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2643 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2644 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2645 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2646
2647 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2648 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2649 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2650 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2651 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2652 }
2653
load_state_from_tss16(struct x86_emulate_ctxt * ctxt,struct tss_segment_16 * tss)2654 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2655 struct tss_segment_16 *tss)
2656 {
2657 int ret;
2658 u8 cpl;
2659
2660 ctxt->_eip = tss->ip;
2661 ctxt->eflags = tss->flag | 2;
2662 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2663 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2664 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2665 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2666 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2667 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2668 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2669 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2670
2671 /*
2672 * SDM says that segment selectors are loaded before segment
2673 * descriptors
2674 */
2675 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2676 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2677 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2678 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2679 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2680
2681 cpl = tss->cs & 3;
2682
2683 /*
2684 * Now load segment descriptors. If fault happens at this stage
2685 * it is handled in a context of new task
2686 */
2687 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2688 X86_TRANSFER_TASK_SWITCH, NULL);
2689 if (ret != X86EMUL_CONTINUE)
2690 return ret;
2691 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2692 X86_TRANSFER_TASK_SWITCH, NULL);
2693 if (ret != X86EMUL_CONTINUE)
2694 return ret;
2695 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2696 X86_TRANSFER_TASK_SWITCH, NULL);
2697 if (ret != X86EMUL_CONTINUE)
2698 return ret;
2699 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2700 X86_TRANSFER_TASK_SWITCH, NULL);
2701 if (ret != X86EMUL_CONTINUE)
2702 return ret;
2703 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2704 X86_TRANSFER_TASK_SWITCH, NULL);
2705 if (ret != X86EMUL_CONTINUE)
2706 return ret;
2707
2708 return X86EMUL_CONTINUE;
2709 }
2710
task_switch_16(struct x86_emulate_ctxt * ctxt,u16 old_tss_sel,ulong old_tss_base,struct desc_struct * new_desc)2711 static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
2712 ulong old_tss_base, struct desc_struct *new_desc)
2713 {
2714 struct tss_segment_16 tss_seg;
2715 int ret;
2716 u32 new_tss_base = get_desc_base(new_desc);
2717
2718 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
2719 if (ret != X86EMUL_CONTINUE)
2720 return ret;
2721
2722 save_state_to_tss16(ctxt, &tss_seg);
2723
2724 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
2725 if (ret != X86EMUL_CONTINUE)
2726 return ret;
2727
2728 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
2729 if (ret != X86EMUL_CONTINUE)
2730 return ret;
2731
2732 if (old_tss_sel != 0xffff) {
2733 tss_seg.prev_task_link = old_tss_sel;
2734
2735 ret = linear_write_system(ctxt, new_tss_base,
2736 &tss_seg.prev_task_link,
2737 sizeof(tss_seg.prev_task_link));
2738 if (ret != X86EMUL_CONTINUE)
2739 return ret;
2740 }
2741
2742 return load_state_from_tss16(ctxt, &tss_seg);
2743 }
2744
save_state_to_tss32(struct x86_emulate_ctxt * ctxt,struct tss_segment_32 * tss)2745 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2746 struct tss_segment_32 *tss)
2747 {
2748 /* CR3 and ldt selector are not saved intentionally */
2749 tss->eip = ctxt->_eip;
2750 tss->eflags = ctxt->eflags;
2751 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2752 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2753 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2754 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2755 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2756 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2757 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2758 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2759
2760 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2761 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2762 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2763 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2764 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2765 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2766 }
2767
load_state_from_tss32(struct x86_emulate_ctxt * ctxt,struct tss_segment_32 * tss)2768 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2769 struct tss_segment_32 *tss)
2770 {
2771 int ret;
2772 u8 cpl;
2773
2774 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2775 return emulate_gp(ctxt, 0);
2776 ctxt->_eip = tss->eip;
2777 ctxt->eflags = tss->eflags | 2;
2778
2779 /* General purpose registers */
2780 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2781 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2782 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2783 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2784 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2785 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2786 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2787 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2788
2789 /*
2790 * SDM says that segment selectors are loaded before segment
2791 * descriptors. This is important because CPL checks will
2792 * use CS.RPL.
2793 */
2794 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2795 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2796 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2797 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2798 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2799 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2800 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2801
2802 /*
2803 * If we're switching between Protected Mode and VM86, we need to make
2804 * sure to update the mode before loading the segment descriptors so
2805 * that the selectors are interpreted correctly.
2806 */
2807 if (ctxt->eflags & X86_EFLAGS_VM) {
2808 ctxt->mode = X86EMUL_MODE_VM86;
2809 cpl = 3;
2810 } else {
2811 ctxt->mode = X86EMUL_MODE_PROT32;
2812 cpl = tss->cs & 3;
2813 }
2814
2815 /*
2816 * Now load segment descriptors. If fault happens at this stage
2817 * it is handled in a context of new task
2818 */
2819 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2820 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
2821 if (ret != X86EMUL_CONTINUE)
2822 return ret;
2823 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2824 X86_TRANSFER_TASK_SWITCH, NULL);
2825 if (ret != X86EMUL_CONTINUE)
2826 return ret;
2827 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2828 X86_TRANSFER_TASK_SWITCH, NULL);
2829 if (ret != X86EMUL_CONTINUE)
2830 return ret;
2831 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2832 X86_TRANSFER_TASK_SWITCH, NULL);
2833 if (ret != X86EMUL_CONTINUE)
2834 return ret;
2835 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2836 X86_TRANSFER_TASK_SWITCH, NULL);
2837 if (ret != X86EMUL_CONTINUE)
2838 return ret;
2839 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2840 X86_TRANSFER_TASK_SWITCH, NULL);
2841 if (ret != X86EMUL_CONTINUE)
2842 return ret;
2843 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2844 X86_TRANSFER_TASK_SWITCH, NULL);
2845
2846 return ret;
2847 }
2848
task_switch_32(struct x86_emulate_ctxt * ctxt,u16 old_tss_sel,ulong old_tss_base,struct desc_struct * new_desc)2849 static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
2850 ulong old_tss_base, struct desc_struct *new_desc)
2851 {
2852 struct tss_segment_32 tss_seg;
2853 int ret;
2854 u32 new_tss_base = get_desc_base(new_desc);
2855 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2856 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2857
2858 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
2859 if (ret != X86EMUL_CONTINUE)
2860 return ret;
2861
2862 save_state_to_tss32(ctxt, &tss_seg);
2863
2864 /* Only GP registers and segment selectors are saved */
2865 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2866 ldt_sel_offset - eip_offset);
2867 if (ret != X86EMUL_CONTINUE)
2868 return ret;
2869
2870 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
2871 if (ret != X86EMUL_CONTINUE)
2872 return ret;
2873
2874 if (old_tss_sel != 0xffff) {
2875 tss_seg.prev_task_link = old_tss_sel;
2876
2877 ret = linear_write_system(ctxt, new_tss_base,
2878 &tss_seg.prev_task_link,
2879 sizeof(tss_seg.prev_task_link));
2880 if (ret != X86EMUL_CONTINUE)
2881 return ret;
2882 }
2883
2884 return load_state_from_tss32(ctxt, &tss_seg);
2885 }
2886
emulator_do_task_switch(struct x86_emulate_ctxt * ctxt,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)2887 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2888 u16 tss_selector, int idt_index, int reason,
2889 bool has_error_code, u32 error_code)
2890 {
2891 const struct x86_emulate_ops *ops = ctxt->ops;
2892 struct desc_struct curr_tss_desc, next_tss_desc;
2893 int ret;
2894 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2895 ulong old_tss_base =
2896 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2897 u32 desc_limit;
2898 ulong desc_addr, dr7;
2899
2900 /* FIXME: old_tss_base == ~0 ? */
2901
2902 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2903 if (ret != X86EMUL_CONTINUE)
2904 return ret;
2905 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2906 if (ret != X86EMUL_CONTINUE)
2907 return ret;
2908
2909 /* FIXME: check that next_tss_desc is tss */
2910
2911 /*
2912 * Check privileges. The three cases are task switch caused by...
2913 *
2914 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2915 * 2. Exception/IRQ/iret: No check is performed
2916 * 3. jmp/call to TSS/task-gate: No check is performed since the
2917 * hardware checks it before exiting.
2918 */
2919 if (reason == TASK_SWITCH_GATE) {
2920 if (idt_index != -1) {
2921 /* Software interrupts */
2922 struct desc_struct task_gate_desc;
2923 int dpl;
2924
2925 ret = read_interrupt_descriptor(ctxt, idt_index,
2926 &task_gate_desc);
2927 if (ret != X86EMUL_CONTINUE)
2928 return ret;
2929
2930 dpl = task_gate_desc.dpl;
2931 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2932 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2933 }
2934 }
2935
2936 desc_limit = desc_limit_scaled(&next_tss_desc);
2937 if (!next_tss_desc.p ||
2938 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2939 desc_limit < 0x2b)) {
2940 return emulate_ts(ctxt, tss_selector & 0xfffc);
2941 }
2942
2943 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2944 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2945 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2946 }
2947
2948 if (reason == TASK_SWITCH_IRET)
2949 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2950
2951 /* set back link to prev task only if NT bit is set in eflags
2952 note that old_tss_sel is not used after this point */
2953 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2954 old_tss_sel = 0xffff;
2955
2956 if (next_tss_desc.type & 8)
2957 ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc);
2958 else
2959 ret = task_switch_16(ctxt, old_tss_sel,
2960 old_tss_base, &next_tss_desc);
2961 if (ret != X86EMUL_CONTINUE)
2962 return ret;
2963
2964 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2965 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2966
2967 if (reason != TASK_SWITCH_IRET) {
2968 next_tss_desc.type |= (1 << 1); /* set busy flag */
2969 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2970 }
2971
2972 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2973 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2974
2975 if (has_error_code) {
2976 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2977 ctxt->lock_prefix = 0;
2978 ctxt->src.val = (unsigned long) error_code;
2979 ret = em_push(ctxt);
2980 }
2981
2982 dr7 = ops->get_dr(ctxt, 7);
2983 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
2984
2985 return ret;
2986 }
2987
emulator_task_switch(struct x86_emulate_ctxt * ctxt,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)2988 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2989 u16 tss_selector, int idt_index, int reason,
2990 bool has_error_code, u32 error_code)
2991 {
2992 int rc;
2993
2994 invalidate_registers(ctxt);
2995 ctxt->_eip = ctxt->eip;
2996 ctxt->dst.type = OP_NONE;
2997
2998 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2999 has_error_code, error_code);
3000
3001 if (rc == X86EMUL_CONTINUE) {
3002 ctxt->eip = ctxt->_eip;
3003 writeback_registers(ctxt);
3004 }
3005
3006 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3007 }
3008
string_addr_inc(struct x86_emulate_ctxt * ctxt,int reg,struct operand * op)3009 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3010 struct operand *op)
3011 {
3012 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3013
3014 register_address_increment(ctxt, reg, df * op->bytes);
3015 op->addr.mem.ea = register_address(ctxt, reg);
3016 }
3017
em_das(struct x86_emulate_ctxt * ctxt)3018 static int em_das(struct x86_emulate_ctxt *ctxt)
3019 {
3020 u8 al, old_al;
3021 bool af, cf, old_cf;
3022
3023 cf = ctxt->eflags & X86_EFLAGS_CF;
3024 al = ctxt->dst.val;
3025
3026 old_al = al;
3027 old_cf = cf;
3028 cf = false;
3029 af = ctxt->eflags & X86_EFLAGS_AF;
3030 if ((al & 0x0f) > 9 || af) {
3031 al -= 6;
3032 cf = old_cf | (al >= 250);
3033 af = true;
3034 } else {
3035 af = false;
3036 }
3037 if (old_al > 0x99 || old_cf) {
3038 al -= 0x60;
3039 cf = true;
3040 }
3041
3042 ctxt->dst.val = al;
3043 /* Set PF, ZF, SF */
3044 ctxt->src.type = OP_IMM;
3045 ctxt->src.val = 0;
3046 ctxt->src.bytes = 1;
3047 em_or(ctxt);
3048 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3049 if (cf)
3050 ctxt->eflags |= X86_EFLAGS_CF;
3051 if (af)
3052 ctxt->eflags |= X86_EFLAGS_AF;
3053 return X86EMUL_CONTINUE;
3054 }
3055
em_aam(struct x86_emulate_ctxt * ctxt)3056 static int em_aam(struct x86_emulate_ctxt *ctxt)
3057 {
3058 u8 al, ah;
3059
3060 if (ctxt->src.val == 0)
3061 return emulate_de(ctxt);
3062
3063 al = ctxt->dst.val & 0xff;
3064 ah = al / ctxt->src.val;
3065 al %= ctxt->src.val;
3066
3067 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3068
3069 /* Set PF, ZF, SF */
3070 ctxt->src.type = OP_IMM;
3071 ctxt->src.val = 0;
3072 ctxt->src.bytes = 1;
3073 em_or(ctxt);
3074
3075 return X86EMUL_CONTINUE;
3076 }
3077
em_aad(struct x86_emulate_ctxt * ctxt)3078 static int em_aad(struct x86_emulate_ctxt *ctxt)
3079 {
3080 u8 al = ctxt->dst.val & 0xff;
3081 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3082
3083 al = (al + (ah * ctxt->src.val)) & 0xff;
3084
3085 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3086
3087 /* Set PF, ZF, SF */
3088 ctxt->src.type = OP_IMM;
3089 ctxt->src.val = 0;
3090 ctxt->src.bytes = 1;
3091 em_or(ctxt);
3092
3093 return X86EMUL_CONTINUE;
3094 }
3095
em_call(struct x86_emulate_ctxt * ctxt)3096 static int em_call(struct x86_emulate_ctxt *ctxt)
3097 {
3098 int rc;
3099 long rel = ctxt->src.val;
3100
3101 ctxt->src.val = (unsigned long)ctxt->_eip;
3102 rc = jmp_rel(ctxt, rel);
3103 if (rc != X86EMUL_CONTINUE)
3104 return rc;
3105 return em_push(ctxt);
3106 }
3107
em_call_far(struct x86_emulate_ctxt * ctxt)3108 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3109 {
3110 u16 sel, old_cs;
3111 ulong old_eip;
3112 int rc;
3113 struct desc_struct old_desc, new_desc;
3114 const struct x86_emulate_ops *ops = ctxt->ops;
3115 int cpl = ctxt->ops->cpl(ctxt);
3116 enum x86emul_mode prev_mode = ctxt->mode;
3117
3118 old_eip = ctxt->_eip;
3119 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3120
3121 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3122 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3123 X86_TRANSFER_CALL_JMP, &new_desc);
3124 if (rc != X86EMUL_CONTINUE)
3125 return rc;
3126
3127 rc = assign_eip_far(ctxt, ctxt->src.val);
3128 if (rc != X86EMUL_CONTINUE)
3129 goto fail;
3130
3131 ctxt->src.val = old_cs;
3132 rc = em_push(ctxt);
3133 if (rc != X86EMUL_CONTINUE)
3134 goto fail;
3135
3136 ctxt->src.val = old_eip;
3137 rc = em_push(ctxt);
3138 /* If we failed, we tainted the memory, but the very least we should
3139 restore cs */
3140 if (rc != X86EMUL_CONTINUE) {
3141 pr_warn_once("faulting far call emulation tainted memory\n");
3142 goto fail;
3143 }
3144 return rc;
3145 fail:
3146 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3147 ctxt->mode = prev_mode;
3148 return rc;
3149
3150 }
3151
em_ret_near_imm(struct x86_emulate_ctxt * ctxt)3152 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3153 {
3154 int rc;
3155 unsigned long eip = 0;
3156
3157 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3158 if (rc != X86EMUL_CONTINUE)
3159 return rc;
3160 rc = assign_eip_near(ctxt, eip);
3161 if (rc != X86EMUL_CONTINUE)
3162 return rc;
3163 rsp_increment(ctxt, ctxt->src.val);
3164 return X86EMUL_CONTINUE;
3165 }
3166
em_xchg(struct x86_emulate_ctxt * ctxt)3167 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3168 {
3169 /* Write back the register source. */
3170 ctxt->src.val = ctxt->dst.val;
3171 write_register_operand(&ctxt->src);
3172
3173 /* Write back the memory destination with implicit LOCK prefix. */
3174 ctxt->dst.val = ctxt->src.orig_val;
3175 ctxt->lock_prefix = 1;
3176 return X86EMUL_CONTINUE;
3177 }
3178
em_imul_3op(struct x86_emulate_ctxt * ctxt)3179 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3180 {
3181 ctxt->dst.val = ctxt->src2.val;
3182 return em_imul(ctxt);
3183 }
3184
em_cwd(struct x86_emulate_ctxt * ctxt)3185 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3186 {
3187 ctxt->dst.type = OP_REG;
3188 ctxt->dst.bytes = ctxt->src.bytes;
3189 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3190 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3191
3192 return X86EMUL_CONTINUE;
3193 }
3194
em_rdpid(struct x86_emulate_ctxt * ctxt)3195 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3196 {
3197 u64 tsc_aux = 0;
3198
3199 if (!ctxt->ops->guest_has_rdpid(ctxt))
3200 return emulate_ud(ctxt);
3201
3202 ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
3203 ctxt->dst.val = tsc_aux;
3204 return X86EMUL_CONTINUE;
3205 }
3206
em_rdtsc(struct x86_emulate_ctxt * ctxt)3207 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3208 {
3209 u64 tsc = 0;
3210
3211 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3212 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3213 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3214 return X86EMUL_CONTINUE;
3215 }
3216
em_rdpmc(struct x86_emulate_ctxt * ctxt)3217 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3218 {
3219 u64 pmc;
3220
3221 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3222 return emulate_gp(ctxt, 0);
3223 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3224 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3225 return X86EMUL_CONTINUE;
3226 }
3227
em_mov(struct x86_emulate_ctxt * ctxt)3228 static int em_mov(struct x86_emulate_ctxt *ctxt)
3229 {
3230 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3231 return X86EMUL_CONTINUE;
3232 }
3233
em_movbe(struct x86_emulate_ctxt * ctxt)3234 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3235 {
3236 u16 tmp;
3237
3238 if (!ctxt->ops->guest_has_movbe(ctxt))
3239 return emulate_ud(ctxt);
3240
3241 switch (ctxt->op_bytes) {
3242 case 2:
3243 /*
3244 * From MOVBE definition: "...When the operand size is 16 bits,
3245 * the upper word of the destination register remains unchanged
3246 * ..."
3247 *
3248 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3249 * rules so we have to do the operation almost per hand.
3250 */
3251 tmp = (u16)ctxt->src.val;
3252 ctxt->dst.val &= ~0xffffUL;
3253 ctxt->dst.val |= (unsigned long)swab16(tmp);
3254 break;
3255 case 4:
3256 ctxt->dst.val = swab32((u32)ctxt->src.val);
3257 break;
3258 case 8:
3259 ctxt->dst.val = swab64(ctxt->src.val);
3260 break;
3261 default:
3262 BUG();
3263 }
3264 return X86EMUL_CONTINUE;
3265 }
3266
em_cr_write(struct x86_emulate_ctxt * ctxt)3267 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3268 {
3269 int cr_num = ctxt->modrm_reg;
3270 int r;
3271
3272 if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
3273 return emulate_gp(ctxt, 0);
3274
3275 /* Disable writeback. */
3276 ctxt->dst.type = OP_NONE;
3277
3278 if (cr_num == 0) {
3279 /*
3280 * CR0 write might have updated CR0.PE and/or CR0.PG
3281 * which can affect the cpu's execution mode.
3282 */
3283 r = emulator_recalc_and_set_mode(ctxt);
3284 if (r != X86EMUL_CONTINUE)
3285 return r;
3286 }
3287
3288 return X86EMUL_CONTINUE;
3289 }
3290
em_dr_write(struct x86_emulate_ctxt * ctxt)3291 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3292 {
3293 unsigned long val;
3294
3295 if (ctxt->mode == X86EMUL_MODE_PROT64)
3296 val = ctxt->src.val & ~0ULL;
3297 else
3298 val = ctxt->src.val & ~0U;
3299
3300 /* #UD condition is already handled. */
3301 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3302 return emulate_gp(ctxt, 0);
3303
3304 /* Disable writeback. */
3305 ctxt->dst.type = OP_NONE;
3306 return X86EMUL_CONTINUE;
3307 }
3308
em_wrmsr(struct x86_emulate_ctxt * ctxt)3309 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3310 {
3311 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3312 u64 msr_data;
3313 int r;
3314
3315 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3316 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3317 r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
3318
3319 if (r == X86EMUL_PROPAGATE_FAULT)
3320 return emulate_gp(ctxt, 0);
3321
3322 return r;
3323 }
3324
em_rdmsr(struct x86_emulate_ctxt * ctxt)3325 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3326 {
3327 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3328 u64 msr_data;
3329 int r;
3330
3331 r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
3332
3333 if (r == X86EMUL_PROPAGATE_FAULT)
3334 return emulate_gp(ctxt, 0);
3335
3336 if (r == X86EMUL_CONTINUE) {
3337 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3338 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3339 }
3340 return r;
3341 }
3342
em_store_sreg(struct x86_emulate_ctxt * ctxt,int segment)3343 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3344 {
3345 if (segment > VCPU_SREG_GS &&
3346 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3347 ctxt->ops->cpl(ctxt) > 0)
3348 return emulate_gp(ctxt, 0);
3349
3350 ctxt->dst.val = get_segment_selector(ctxt, segment);
3351 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3352 ctxt->dst.bytes = 2;
3353 return X86EMUL_CONTINUE;
3354 }
3355
em_mov_rm_sreg(struct x86_emulate_ctxt * ctxt)3356 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3357 {
3358 if (ctxt->modrm_reg > VCPU_SREG_GS)
3359 return emulate_ud(ctxt);
3360
3361 return em_store_sreg(ctxt, ctxt->modrm_reg);
3362 }
3363
em_mov_sreg_rm(struct x86_emulate_ctxt * ctxt)3364 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3365 {
3366 u16 sel = ctxt->src.val;
3367
3368 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3369 return emulate_ud(ctxt);
3370
3371 if (ctxt->modrm_reg == VCPU_SREG_SS)
3372 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3373
3374 /* Disable writeback. */
3375 ctxt->dst.type = OP_NONE;
3376 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3377 }
3378
em_sldt(struct x86_emulate_ctxt * ctxt)3379 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3380 {
3381 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3382 }
3383
em_lldt(struct x86_emulate_ctxt * ctxt)3384 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3385 {
3386 u16 sel = ctxt->src.val;
3387
3388 /* Disable writeback. */
3389 ctxt->dst.type = OP_NONE;
3390 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3391 }
3392
em_str(struct x86_emulate_ctxt * ctxt)3393 static int em_str(struct x86_emulate_ctxt *ctxt)
3394 {
3395 return em_store_sreg(ctxt, VCPU_SREG_TR);
3396 }
3397
em_ltr(struct x86_emulate_ctxt * ctxt)3398 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3399 {
3400 u16 sel = ctxt->src.val;
3401
3402 /* Disable writeback. */
3403 ctxt->dst.type = OP_NONE;
3404 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3405 }
3406
em_invlpg(struct x86_emulate_ctxt * ctxt)3407 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3408 {
3409 int rc;
3410 ulong linear;
3411 unsigned int max_size;
3412
3413 rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode,
3414 &linear, X86EMUL_F_INVLPG);
3415 if (rc == X86EMUL_CONTINUE)
3416 ctxt->ops->invlpg(ctxt, linear);
3417 /* Disable writeback. */
3418 ctxt->dst.type = OP_NONE;
3419 return X86EMUL_CONTINUE;
3420 }
3421
em_clts(struct x86_emulate_ctxt * ctxt)3422 static int em_clts(struct x86_emulate_ctxt *ctxt)
3423 {
3424 ulong cr0;
3425
3426 cr0 = ctxt->ops->get_cr(ctxt, 0);
3427 cr0 &= ~X86_CR0_TS;
3428 ctxt->ops->set_cr(ctxt, 0, cr0);
3429 return X86EMUL_CONTINUE;
3430 }
3431
em_hypercall(struct x86_emulate_ctxt * ctxt)3432 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3433 {
3434 int rc = ctxt->ops->fix_hypercall(ctxt);
3435
3436 if (rc != X86EMUL_CONTINUE)
3437 return rc;
3438
3439 /* Let the processor re-execute the fixed hypercall */
3440 ctxt->_eip = ctxt->eip;
3441 /* Disable writeback. */
3442 ctxt->dst.type = OP_NONE;
3443 return X86EMUL_CONTINUE;
3444 }
3445
emulate_store_desc_ptr(struct x86_emulate_ctxt * ctxt,void (* get)(struct x86_emulate_ctxt * ctxt,struct desc_ptr * ptr))3446 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3447 void (*get)(struct x86_emulate_ctxt *ctxt,
3448 struct desc_ptr *ptr))
3449 {
3450 struct desc_ptr desc_ptr;
3451
3452 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3453 ctxt->ops->cpl(ctxt) > 0)
3454 return emulate_gp(ctxt, 0);
3455
3456 if (ctxt->mode == X86EMUL_MODE_PROT64)
3457 ctxt->op_bytes = 8;
3458 get(ctxt, &desc_ptr);
3459 if (ctxt->op_bytes == 2) {
3460 ctxt->op_bytes = 4;
3461 desc_ptr.address &= 0x00ffffff;
3462 }
3463 /* Disable writeback. */
3464 ctxt->dst.type = OP_NONE;
3465 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3466 &desc_ptr, 2 + ctxt->op_bytes);
3467 }
3468
em_sgdt(struct x86_emulate_ctxt * ctxt)3469 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3470 {
3471 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3472 }
3473
em_sidt(struct x86_emulate_ctxt * ctxt)3474 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3475 {
3476 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3477 }
3478
em_lgdt_lidt(struct x86_emulate_ctxt * ctxt,bool lgdt)3479 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3480 {
3481 struct desc_ptr desc_ptr;
3482 int rc;
3483
3484 if (ctxt->mode == X86EMUL_MODE_PROT64)
3485 ctxt->op_bytes = 8;
3486 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3487 &desc_ptr.size, &desc_ptr.address,
3488 ctxt->op_bytes);
3489 if (rc != X86EMUL_CONTINUE)
3490 return rc;
3491 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3492 emul_is_noncanonical_address(desc_ptr.address, ctxt,
3493 X86EMUL_F_DT_LOAD))
3494 return emulate_gp(ctxt, 0);
3495 if (lgdt)
3496 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3497 else
3498 ctxt->ops->set_idt(ctxt, &desc_ptr);
3499 /* Disable writeback. */
3500 ctxt->dst.type = OP_NONE;
3501 return X86EMUL_CONTINUE;
3502 }
3503
em_lgdt(struct x86_emulate_ctxt * ctxt)3504 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3505 {
3506 return em_lgdt_lidt(ctxt, true);
3507 }
3508
em_lidt(struct x86_emulate_ctxt * ctxt)3509 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3510 {
3511 return em_lgdt_lidt(ctxt, false);
3512 }
3513
em_smsw(struct x86_emulate_ctxt * ctxt)3514 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3515 {
3516 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3517 ctxt->ops->cpl(ctxt) > 0)
3518 return emulate_gp(ctxt, 0);
3519
3520 if (ctxt->dst.type == OP_MEM)
3521 ctxt->dst.bytes = 2;
3522 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3523 return X86EMUL_CONTINUE;
3524 }
3525
em_lmsw(struct x86_emulate_ctxt * ctxt)3526 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3527 {
3528 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3529 | (ctxt->src.val & 0x0f));
3530 ctxt->dst.type = OP_NONE;
3531 return X86EMUL_CONTINUE;
3532 }
3533
em_loop(struct x86_emulate_ctxt * ctxt)3534 static int em_loop(struct x86_emulate_ctxt *ctxt)
3535 {
3536 int rc = X86EMUL_CONTINUE;
3537
3538 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3539 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3540 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3541 rc = jmp_rel(ctxt, ctxt->src.val);
3542
3543 return rc;
3544 }
3545
em_jcxz(struct x86_emulate_ctxt * ctxt)3546 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3547 {
3548 int rc = X86EMUL_CONTINUE;
3549
3550 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3551 rc = jmp_rel(ctxt, ctxt->src.val);
3552
3553 return rc;
3554 }
3555
em_in(struct x86_emulate_ctxt * ctxt)3556 static int em_in(struct x86_emulate_ctxt *ctxt)
3557 {
3558 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3559 &ctxt->dst.val))
3560 return X86EMUL_IO_NEEDED;
3561
3562 return X86EMUL_CONTINUE;
3563 }
3564
em_out(struct x86_emulate_ctxt * ctxt)3565 static int em_out(struct x86_emulate_ctxt *ctxt)
3566 {
3567 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3568 &ctxt->src.val, 1);
3569 /* Disable writeback. */
3570 ctxt->dst.type = OP_NONE;
3571 return X86EMUL_CONTINUE;
3572 }
3573
em_cli(struct x86_emulate_ctxt * ctxt)3574 static int em_cli(struct x86_emulate_ctxt *ctxt)
3575 {
3576 if (emulator_bad_iopl(ctxt))
3577 return emulate_gp(ctxt, 0);
3578
3579 ctxt->eflags &= ~X86_EFLAGS_IF;
3580 return X86EMUL_CONTINUE;
3581 }
3582
em_sti(struct x86_emulate_ctxt * ctxt)3583 static int em_sti(struct x86_emulate_ctxt *ctxt)
3584 {
3585 if (emulator_bad_iopl(ctxt))
3586 return emulate_gp(ctxt, 0);
3587
3588 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3589 ctxt->eflags |= X86_EFLAGS_IF;
3590 return X86EMUL_CONTINUE;
3591 }
3592
em_cpuid(struct x86_emulate_ctxt * ctxt)3593 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3594 {
3595 u32 eax, ebx, ecx, edx;
3596 u64 msr = 0;
3597
3598 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3599 if (!ctxt->ops->is_smm(ctxt) &&
3600 (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT) &&
3601 ctxt->ops->cpl(ctxt))
3602 return emulate_gp(ctxt, 0);
3603
3604 eax = reg_read(ctxt, VCPU_REGS_RAX);
3605 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3606 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3607 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3608 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3609 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3610 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3611 return X86EMUL_CONTINUE;
3612 }
3613
em_sahf(struct x86_emulate_ctxt * ctxt)3614 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3615 {
3616 u32 flags;
3617
3618 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3619 X86_EFLAGS_SF;
3620 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3621
3622 ctxt->eflags &= ~0xffUL;
3623 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3624 return X86EMUL_CONTINUE;
3625 }
3626
em_lahf(struct x86_emulate_ctxt * ctxt)3627 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3628 {
3629 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3630 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3631 return X86EMUL_CONTINUE;
3632 }
3633
em_bswap(struct x86_emulate_ctxt * ctxt)3634 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3635 {
3636 switch (ctxt->op_bytes) {
3637 #ifdef CONFIG_X86_64
3638 case 8:
3639 asm("bswap %0" : "+r"(ctxt->dst.val));
3640 break;
3641 #endif
3642 default:
3643 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3644 break;
3645 }
3646 return X86EMUL_CONTINUE;
3647 }
3648
em_clflush(struct x86_emulate_ctxt * ctxt)3649 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3650 {
3651 /* emulating clflush regardless of cpuid */
3652 return X86EMUL_CONTINUE;
3653 }
3654
em_clflushopt(struct x86_emulate_ctxt * ctxt)3655 static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3656 {
3657 /* emulating clflushopt regardless of cpuid */
3658 return X86EMUL_CONTINUE;
3659 }
3660
em_movsxd(struct x86_emulate_ctxt * ctxt)3661 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3662 {
3663 ctxt->dst.val = (s32) ctxt->src.val;
3664 return X86EMUL_CONTINUE;
3665 }
3666
check_fxsr(struct x86_emulate_ctxt * ctxt)3667 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3668 {
3669 if (!ctxt->ops->guest_has_fxsr(ctxt))
3670 return emulate_ud(ctxt);
3671
3672 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3673 return emulate_nm(ctxt);
3674
3675 /*
3676 * Don't emulate a case that should never be hit, instead of working
3677 * around a lack of fxsave64/fxrstor64 on old compilers.
3678 */
3679 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3680 return X86EMUL_UNHANDLEABLE;
3681
3682 return X86EMUL_CONTINUE;
3683 }
3684
3685 /*
3686 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3687 * and restore MXCSR.
3688 */
__fxstate_size(int nregs)3689 static size_t __fxstate_size(int nregs)
3690 {
3691 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3692 }
3693
fxstate_size(struct x86_emulate_ctxt * ctxt)3694 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3695 {
3696 bool cr4_osfxsr;
3697 if (ctxt->mode == X86EMUL_MODE_PROT64)
3698 return __fxstate_size(16);
3699
3700 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3701 return __fxstate_size(cr4_osfxsr ? 8 : 0);
3702 }
3703
3704 /*
3705 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3706 * 1) 16 bit mode
3707 * 2) 32 bit mode
3708 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3709 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3710 * save and restore
3711 * 3) 64-bit mode with REX.W prefix
3712 * - like (2), but XMM 8-15 are being saved and restored
3713 * 4) 64-bit mode without REX.W prefix
3714 * - like (3), but FIP and FDP are 64 bit
3715 *
3716 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3717 * desired result. (4) is not emulated.
3718 *
3719 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3720 * and FPU DS) should match.
3721 */
em_fxsave(struct x86_emulate_ctxt * ctxt)3722 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3723 {
3724 struct fxregs_state fx_state = {};
3725 int rc;
3726
3727 rc = check_fxsr(ctxt);
3728 if (rc != X86EMUL_CONTINUE)
3729 return rc;
3730
3731 kvm_fpu_get();
3732
3733 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3734
3735 kvm_fpu_put();
3736
3737 if (rc != X86EMUL_CONTINUE)
3738 return rc;
3739
3740 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
3741 fxstate_size(ctxt));
3742 }
3743
3744 /*
3745 * FXRSTOR might restore XMM registers not provided by the guest. Fill
3746 * in the host registers (via FXSAVE) instead, so they won't be modified.
3747 * (preemption has to stay disabled until FXRSTOR).
3748 *
3749 * Use noinline to keep the stack for other functions called by callers small.
3750 */
fxregs_fixup(struct fxregs_state * fx_state,const size_t used_size)3751 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
3752 const size_t used_size)
3753 {
3754 struct fxregs_state fx_tmp = {};
3755 int rc;
3756
3757 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
3758 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
3759 __fxstate_size(16) - used_size);
3760
3761 return rc;
3762 }
3763
em_fxrstor(struct x86_emulate_ctxt * ctxt)3764 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
3765 {
3766 struct fxregs_state fx_state;
3767 int rc;
3768 size_t size;
3769
3770 rc = check_fxsr(ctxt);
3771 if (rc != X86EMUL_CONTINUE)
3772 return rc;
3773
3774 size = fxstate_size(ctxt);
3775 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3776 if (rc != X86EMUL_CONTINUE)
3777 return rc;
3778
3779 kvm_fpu_get();
3780
3781 if (size < __fxstate_size(16)) {
3782 rc = fxregs_fixup(&fx_state, size);
3783 if (rc != X86EMUL_CONTINUE)
3784 goto out;
3785 }
3786
3787 if (fx_state.mxcsr >> 16) {
3788 rc = emulate_gp(ctxt, 0);
3789 goto out;
3790 }
3791
3792 if (rc == X86EMUL_CONTINUE)
3793 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
3794
3795 out:
3796 kvm_fpu_put();
3797
3798 return rc;
3799 }
3800
em_xsetbv(struct x86_emulate_ctxt * ctxt)3801 static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
3802 {
3803 u32 eax, ecx, edx;
3804
3805 if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
3806 return emulate_ud(ctxt);
3807
3808 eax = reg_read(ctxt, VCPU_REGS_RAX);
3809 edx = reg_read(ctxt, VCPU_REGS_RDX);
3810 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3811
3812 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
3813 return emulate_gp(ctxt, 0);
3814
3815 return X86EMUL_CONTINUE;
3816 }
3817
valid_cr(int nr)3818 static bool valid_cr(int nr)
3819 {
3820 switch (nr) {
3821 case 0:
3822 case 2 ... 4:
3823 case 8:
3824 return true;
3825 default:
3826 return false;
3827 }
3828 }
3829
check_cr_access(struct x86_emulate_ctxt * ctxt)3830 static int check_cr_access(struct x86_emulate_ctxt *ctxt)
3831 {
3832 if (!valid_cr(ctxt->modrm_reg))
3833 return emulate_ud(ctxt);
3834
3835 return X86EMUL_CONTINUE;
3836 }
3837
check_dr_read(struct x86_emulate_ctxt * ctxt)3838 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3839 {
3840 int dr = ctxt->modrm_reg;
3841 u64 cr4;
3842
3843 if (dr > 7)
3844 return emulate_ud(ctxt);
3845
3846 cr4 = ctxt->ops->get_cr(ctxt, 4);
3847 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3848 return emulate_ud(ctxt);
3849
3850 if (ctxt->ops->get_dr(ctxt, 7) & DR7_GD) {
3851 ulong dr6;
3852
3853 dr6 = ctxt->ops->get_dr(ctxt, 6);
3854 dr6 &= ~DR_TRAP_BITS;
3855 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
3856 ctxt->ops->set_dr(ctxt, 6, dr6);
3857 return emulate_db(ctxt);
3858 }
3859
3860 return X86EMUL_CONTINUE;
3861 }
3862
check_dr_write(struct x86_emulate_ctxt * ctxt)3863 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3864 {
3865 u64 new_val = ctxt->src.val64;
3866 int dr = ctxt->modrm_reg;
3867
3868 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3869 return emulate_gp(ctxt, 0);
3870
3871 return check_dr_read(ctxt);
3872 }
3873
check_svme(struct x86_emulate_ctxt * ctxt)3874 static int check_svme(struct x86_emulate_ctxt *ctxt)
3875 {
3876 u64 efer = 0;
3877
3878 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3879
3880 if (!(efer & EFER_SVME))
3881 return emulate_ud(ctxt);
3882
3883 return X86EMUL_CONTINUE;
3884 }
3885
check_svme_pa(struct x86_emulate_ctxt * ctxt)3886 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3887 {
3888 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3889
3890 if (!ctxt->ops->page_address_valid(ctxt, rax))
3891 return emulate_gp(ctxt, 0);
3892
3893 return check_svme(ctxt);
3894 }
3895
check_rdtsc(struct x86_emulate_ctxt * ctxt)3896 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3897 {
3898 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3899
3900 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3901 return emulate_gp(ctxt, 0);
3902
3903 return X86EMUL_CONTINUE;
3904 }
3905
check_rdpmc(struct x86_emulate_ctxt * ctxt)3906 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3907 {
3908 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3909 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3910
3911 /*
3912 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
3913 * in Ring3 when CR4.PCE=0.
3914 */
3915 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
3916 return X86EMUL_CONTINUE;
3917
3918 /*
3919 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE
3920 * check however is unnecessary because CPL is always 0 outside
3921 * protected mode.
3922 */
3923 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3924 ctxt->ops->check_rdpmc_early(ctxt, rcx))
3925 return emulate_gp(ctxt, 0);
3926
3927 return X86EMUL_CONTINUE;
3928 }
3929
check_perm_in(struct x86_emulate_ctxt * ctxt)3930 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3931 {
3932 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3933 if (!emulator_io_permitted(ctxt, ctxt->src.val, ctxt->dst.bytes))
3934 return emulate_gp(ctxt, 0);
3935
3936 return X86EMUL_CONTINUE;
3937 }
3938
check_perm_out(struct x86_emulate_ctxt * ctxt)3939 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3940 {
3941 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3942 if (!emulator_io_permitted(ctxt, ctxt->dst.val, ctxt->src.bytes))
3943 return emulate_gp(ctxt, 0);
3944
3945 return X86EMUL_CONTINUE;
3946 }
3947
3948 #define D(_y) { .flags = (_y) }
3949 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3950 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3951 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3952 #define N D(NotImpl)
3953 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3954 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3955 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3956 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3957 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
3958 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3959 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3960 #define II(_f, _e, _i) \
3961 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3962 #define IIP(_f, _e, _i, _p) \
3963 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3964 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3965 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3966
3967 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3968 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3969 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3970 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3971 #define I2bvIP(_f, _e, _i, _p) \
3972 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3973
3974 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3975 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3976 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3977
3978 static const struct opcode ud = I(SrcNone, emulate_ud);
3979
3980 static const struct opcode group7_rm0[] = {
3981 N,
3982 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
3983 N, N, N, N, N, N,
3984 };
3985
3986 static const struct opcode group7_rm1[] = {
3987 DI(SrcNone | Priv, monitor),
3988 DI(SrcNone | Priv, mwait),
3989 N, N, N, N, N, N,
3990 };
3991
3992 static const struct opcode group7_rm2[] = {
3993 N,
3994 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
3995 N, N, N, N, N, N,
3996 };
3997
3998 static const struct opcode group7_rm3[] = {
3999 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4000 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4001 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4002 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4003 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4004 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4005 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4006 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4007 };
4008
4009 static const struct opcode group7_rm7[] = {
4010 N,
4011 DIP(SrcNone, rdtscp, check_rdtsc),
4012 N, N, N, N, N, N,
4013 };
4014
4015 static const struct opcode group1[] = {
4016 I(Lock, em_add),
4017 I(Lock | PageTable, em_or),
4018 I(Lock, em_adc),
4019 I(Lock, em_sbb),
4020 I(Lock | PageTable, em_and),
4021 I(Lock, em_sub),
4022 I(Lock, em_xor),
4023 I(NoWrite, em_cmp),
4024 };
4025
4026 static const struct opcode group1A[] = {
4027 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4028 };
4029
4030 static const struct opcode group2[] = {
4031 I(DstMem | ModRM, em_rol),
4032 I(DstMem | ModRM, em_ror),
4033 I(DstMem | ModRM, em_rcl),
4034 I(DstMem | ModRM, em_rcr),
4035 I(DstMem | ModRM, em_shl),
4036 I(DstMem | ModRM, em_shr),
4037 I(DstMem | ModRM, em_shl),
4038 I(DstMem | ModRM, em_sar),
4039 };
4040
4041 static const struct opcode group3[] = {
4042 I(DstMem | SrcImm | NoWrite, em_test),
4043 I(DstMem | SrcImm | NoWrite, em_test),
4044 I(DstMem | SrcNone | Lock, em_not),
4045 I(DstMem | SrcNone | Lock, em_neg),
4046 I(DstXacc | Src2Mem, em_mul_ex),
4047 I(DstXacc | Src2Mem, em_imul_ex),
4048 I(DstXacc | Src2Mem, em_div_ex),
4049 I(DstXacc | Src2Mem, em_idiv_ex),
4050 };
4051
4052 static const struct opcode group4[] = {
4053 I(ByteOp | DstMem | SrcNone | Lock, em_inc),
4054 I(ByteOp | DstMem | SrcNone | Lock, em_dec),
4055 N, N, N, N, N, N,
4056 };
4057
4058 static const struct opcode group5[] = {
4059 I(DstMem | SrcNone | Lock, em_inc),
4060 I(DstMem | SrcNone | Lock, em_dec),
4061 I(SrcMem | NearBranch | IsBranch | ShadowStack, em_call_near_abs),
4062 I(SrcMemFAddr | ImplicitOps | IsBranch | ShadowStack, em_call_far),
4063 I(SrcMem | NearBranch | IsBranch, em_jmp_abs),
4064 I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
4065 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4066 };
4067
4068 static const struct opcode group6[] = {
4069 II(Prot | DstMem, em_sldt, sldt),
4070 II(Prot | DstMem, em_str, str),
4071 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4072 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4073 N, N, N, N,
4074 };
4075
4076 static const struct group_dual group7 = { {
4077 II(Mov | DstMem, em_sgdt, sgdt),
4078 II(Mov | DstMem, em_sidt, sidt),
4079 II(SrcMem | Priv, em_lgdt, lgdt),
4080 II(SrcMem | Priv, em_lidt, lidt),
4081 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4082 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4083 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4084 }, {
4085 EXT(0, group7_rm0),
4086 EXT(0, group7_rm1),
4087 EXT(0, group7_rm2),
4088 EXT(0, group7_rm3),
4089 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4090 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4091 EXT(0, group7_rm7),
4092 } };
4093
4094 static const struct opcode group8[] = {
4095 N, N, N, N,
4096 I(DstMem | SrcImmByte | NoWrite, em_bt),
4097 I(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4098 I(DstMem | SrcImmByte | Lock, em_btr),
4099 I(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4100 };
4101
4102 /*
4103 * The "memory" destination is actually always a register, since we come
4104 * from the register case of group9.
4105 */
4106 static const struct gprefix pfx_0f_c7_7 = {
4107 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4108 };
4109
4110
4111 static const struct group_dual group9 = { {
4112 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4113 }, {
4114 N, N, N, N, N, N, N,
4115 GP(0, &pfx_0f_c7_7),
4116 } };
4117
4118 static const struct opcode group11[] = {
4119 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4120 X7(D(Undefined)),
4121 };
4122
4123 static const struct gprefix pfx_0f_ae_7 = {
4124 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4125 };
4126
4127 static const struct group_dual group15 = { {
4128 I(ModRM | Aligned16, em_fxsave),
4129 I(ModRM | Aligned16, em_fxrstor),
4130 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4131 }, {
4132 N, N, N, N, N, N, N, N,
4133 } };
4134
4135 static const struct gprefix pfx_0f_6f_0f_7f = {
4136 I(Mmx, em_mov), I(Sse | Avx | Aligned, em_mov), N, I(Sse | Avx | Unaligned, em_mov),
4137 };
4138
4139 static const struct instr_dual instr_dual_0f_2b = {
4140 I(0, em_mov), N
4141 };
4142
4143 static const struct gprefix pfx_0f_2b = {
4144 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4145 };
4146
4147 static const struct gprefix pfx_0f_10_0f_11 = {
4148 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4149 };
4150
4151 static const struct gprefix pfx_0f_28_0f_29 = {
4152 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4153 };
4154
4155 static const struct gprefix pfx_0f_e7_0f_38_2a = {
4156 N, I(Sse | Avx, em_mov), N, N,
4157 };
4158
4159 static const struct escape escape_d9 = { {
4160 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4161 }, {
4162 /* 0xC0 - 0xC7 */
4163 N, N, N, N, N, N, N, N,
4164 /* 0xC8 - 0xCF */
4165 N, N, N, N, N, N, N, N,
4166 /* 0xD0 - 0xC7 */
4167 N, N, N, N, N, N, N, N,
4168 /* 0xD8 - 0xDF */
4169 N, N, N, N, N, N, N, N,
4170 /* 0xE0 - 0xE7 */
4171 N, N, N, N, N, N, N, N,
4172 /* 0xE8 - 0xEF */
4173 N, N, N, N, N, N, N, N,
4174 /* 0xF0 - 0xF7 */
4175 N, N, N, N, N, N, N, N,
4176 /* 0xF8 - 0xFF */
4177 N, N, N, N, N, N, N, N,
4178 } };
4179
4180 static const struct escape escape_db = { {
4181 N, N, N, N, N, N, N, N,
4182 }, {
4183 /* 0xC0 - 0xC7 */
4184 N, N, N, N, N, N, N, N,
4185 /* 0xC8 - 0xCF */
4186 N, N, N, N, N, N, N, N,
4187 /* 0xD0 - 0xC7 */
4188 N, N, N, N, N, N, N, N,
4189 /* 0xD8 - 0xDF */
4190 N, N, N, N, N, N, N, N,
4191 /* 0xE0 - 0xE7 */
4192 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4193 /* 0xE8 - 0xEF */
4194 N, N, N, N, N, N, N, N,
4195 /* 0xF0 - 0xF7 */
4196 N, N, N, N, N, N, N, N,
4197 /* 0xF8 - 0xFF */
4198 N, N, N, N, N, N, N, N,
4199 } };
4200
4201 static const struct escape escape_dd = { {
4202 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4203 }, {
4204 /* 0xC0 - 0xC7 */
4205 N, N, N, N, N, N, N, N,
4206 /* 0xC8 - 0xCF */
4207 N, N, N, N, N, N, N, N,
4208 /* 0xD0 - 0xC7 */
4209 N, N, N, N, N, N, N, N,
4210 /* 0xD8 - 0xDF */
4211 N, N, N, N, N, N, N, N,
4212 /* 0xE0 - 0xE7 */
4213 N, N, N, N, N, N, N, N,
4214 /* 0xE8 - 0xEF */
4215 N, N, N, N, N, N, N, N,
4216 /* 0xF0 - 0xF7 */
4217 N, N, N, N, N, N, N, N,
4218 /* 0xF8 - 0xFF */
4219 N, N, N, N, N, N, N, N,
4220 } };
4221
4222 static const struct instr_dual instr_dual_0f_c3 = {
4223 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4224 };
4225
4226 static const struct mode_dual mode_dual_63 = {
4227 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4228 };
4229
4230 static const struct instr_dual instr_dual_8d = {
4231 D(DstReg | SrcMem | ModRM | NoAccess), N
4232 };
4233
4234 static const struct opcode opcode_table[256] = {
4235 /* 0x00 - 0x07 */
4236 I6ALU(Lock, em_add),
4237 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4238 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4239 /* 0x08 - 0x0F */
4240 I6ALU(Lock | PageTable, em_or),
4241 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4242 N,
4243 /* 0x10 - 0x17 */
4244 I6ALU(Lock, em_adc),
4245 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4246 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4247 /* 0x18 - 0x1F */
4248 I6ALU(Lock, em_sbb),
4249 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4250 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4251 /* 0x20 - 0x27 */
4252 I6ALU(Lock | PageTable, em_and), N, N,
4253 /* 0x28 - 0x2F */
4254 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4255 /* 0x30 - 0x37 */
4256 I6ALU(Lock, em_xor), N, N,
4257 /* 0x38 - 0x3F */
4258 I6ALU(NoWrite, em_cmp), N, N,
4259 /* 0x40 - 0x4F */
4260 X8(I(DstReg, em_inc)), X8(I(DstReg, em_dec)),
4261 /* 0x50 - 0x57 */
4262 X8(I(SrcReg | Stack, em_push)),
4263 /* 0x58 - 0x5F */
4264 X8(I(DstReg | Stack, em_pop)),
4265 /* 0x60 - 0x67 */
4266 I(ImplicitOps | Stack | No64, em_pusha),
4267 I(ImplicitOps | Stack | No64, em_popa),
4268 N, MD(ModRM, &mode_dual_63),
4269 N, N, N, N,
4270 /* 0x68 - 0x6F */
4271 I(SrcImm | Mov | Stack, em_push),
4272 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4273 I(SrcImmByte | Mov | Stack, em_push),
4274 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4275 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4276 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4277 /* 0x70 - 0x7F */
4278 X16(D(SrcImmByte | NearBranch | IsBranch)),
4279 /* 0x80 - 0x87 */
4280 G(ByteOp | DstMem | SrcImm, group1),
4281 G(DstMem | SrcImm, group1),
4282 G(ByteOp | DstMem | SrcImm | No64, group1),
4283 G(DstMem | SrcImmByte, group1),
4284 I2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4285 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4286 /* 0x88 - 0x8F */
4287 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4288 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4289 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4290 ID(0, &instr_dual_8d),
4291 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4292 G(0, group1A),
4293 /* 0x90 - 0x97 */
4294 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4295 /* 0x98 - 0x9F */
4296 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4297 I(SrcImmFAddr | No64 | IsBranch | ShadowStack, em_call_far), N,
4298 II(ImplicitOps | Stack, em_pushf, pushf),
4299 II(ImplicitOps | Stack, em_popf, popf),
4300 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4301 /* 0xA0 - 0xA7 */
4302 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4303 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4304 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4305 I2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4306 /* 0xA8 - 0xAF */
4307 I2bv(DstAcc | SrcImm | NoWrite, em_test),
4308 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4309 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4310 I2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4311 /* 0xB0 - 0xB7 */
4312 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4313 /* 0xB8 - 0xBF */
4314 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4315 /* 0xC0 - 0xC7 */
4316 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4317 I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch | ShadowStack, em_ret_near_imm),
4318 I(ImplicitOps | NearBranch | IsBranch | ShadowStack, em_ret),
4319 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4320 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4321 G(ByteOp, group11), G(0, group11),
4322 /* 0xC8 - 0xCF */
4323 I(Stack | SrcImmU16 | Src2ImmByte, em_enter),
4324 I(Stack, em_leave),
4325 I(ImplicitOps | SrcImmU16 | IsBranch | ShadowStack, em_ret_far_imm),
4326 I(ImplicitOps | IsBranch | ShadowStack, em_ret_far),
4327 D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch | ShadowStack, intn),
4328 D(ImplicitOps | No64 | IsBranch),
4329 II(ImplicitOps | IsBranch | ShadowStack, em_iret, iret),
4330 /* 0xD0 - 0xD7 */
4331 G(Src2One | ByteOp, group2), G(Src2One, group2),
4332 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4333 I(DstAcc | SrcImmUByte | No64, em_aam),
4334 I(DstAcc | SrcImmUByte | No64, em_aad),
4335 I(DstAcc | ByteOp | No64, em_salc),
4336 I(DstAcc | SrcXLat | ByteOp, em_mov),
4337 /* 0xD8 - 0xDF */
4338 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4339 /* 0xE0 - 0xE7 */
4340 X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4341 I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
4342 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4343 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4344 /* 0xE8 - 0xEF */
4345 I(SrcImm | NearBranch | IsBranch | ShadowStack, em_call),
4346 D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4347 I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4348 D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
4349 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4350 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4351 /* 0xF0 - 0xF7 */
4352 N, DI(ImplicitOps, icebp), N, N,
4353 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4354 G(ByteOp, group3), G(0, group3),
4355 /* 0xF8 - 0xFF */
4356 D(ImplicitOps), D(ImplicitOps),
4357 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4358 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4359 };
4360
4361 static const struct opcode twobyte_table[256] = {
4362 /* 0x00 - 0x0F */
4363 G(0, group6), GD(0, &group7), N, N,
4364 N, I(ImplicitOps | EmulateOnUD | IsBranch | ShadowStack, em_syscall),
4365 II(ImplicitOps | Priv, em_clts, clts), N,
4366 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4367 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4368 /* 0x10 - 0x1F */
4369 GP(ModRM | DstReg | SrcMem | Mov | Sse | Avx, &pfx_0f_10_0f_11),
4370 GP(ModRM | DstMem | SrcReg | Mov | Sse | Avx, &pfx_0f_10_0f_11),
4371 N, N, N, N, N, N,
4372 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4373 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4374 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4375 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4376 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4377 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4378 /* 0x20 - 0x2F */
4379 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4380 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4381 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4382 check_cr_access),
4383 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4384 check_dr_write),
4385 N, N, N, N,
4386 GP(ModRM | DstReg | SrcMem | Mov | Sse | Avx, &pfx_0f_28_0f_29),
4387 GP(ModRM | DstMem | SrcReg | Mov | Sse | Avx, &pfx_0f_28_0f_29),
4388 N, GP(ModRM | DstMem | SrcReg | Mov | Sse | Avx, &pfx_0f_2b),
4389 N, N, N, N,
4390 /* 0x30 - 0x3F */
4391 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4392 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4393 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4394 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4395 I(ImplicitOps | EmulateOnUD | IsBranch | ShadowStack, em_sysenter),
4396 I(ImplicitOps | Priv | EmulateOnUD | IsBranch | ShadowStack, em_sysexit),
4397 N, N,
4398 N, N, N, N, N, N, N, N,
4399 /* 0x40 - 0x4F */
4400 X16(D(DstReg | SrcMem | ModRM)),
4401 /* 0x50 - 0x5F */
4402 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4403 /* 0x60 - 0x6F */
4404 N, N, N, N,
4405 N, N, N, N,
4406 N, N, N, N,
4407 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4408 /* 0x70 - 0x7F */
4409 N, N, N, N,
4410 N, N, N, N,
4411 N, N, N, N,
4412 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4413 /* 0x80 - 0x8F */
4414 X16(D(SrcImm | NearBranch | IsBranch)),
4415 /* 0x90 - 0x9F */
4416 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4417 /* 0xA0 - 0xA7 */
4418 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4419 II(ImplicitOps, em_cpuid, cpuid),
4420 I(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4421 I(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4422 I(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4423 /* 0xA8 - 0xAF */
4424 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4425 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4426 I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4427 I(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4428 I(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4429 GD(0, &group15), I(DstReg | SrcMem | ModRM, em_imul),
4430 /* 0xB0 - 0xB7 */
4431 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4432 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4433 I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4434 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4435 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4436 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4437 /* 0xB8 - 0xBF */
4438 N, N,
4439 G(BitOp, group8),
4440 I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4441 I(DstReg | SrcMem | ModRM, em_bsf_c),
4442 I(DstReg | SrcMem | ModRM, em_bsr_c),
4443 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4444 /* 0xC0 - 0xC7 */
4445 I2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4446 N, ID(0, &instr_dual_0f_c3),
4447 N, N, N, GD(0, &group9),
4448 /* 0xC8 - 0xCF */
4449 X8(I(DstReg, em_bswap)),
4450 /* 0xD0 - 0xDF */
4451 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4452 /* 0xE0 - 0xEF */
4453 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7_0f_38_2a),
4454 N, N, N, N, N, N, N, N,
4455 /* 0xF0 - 0xFF */
4456 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4457 };
4458
4459 static const struct instr_dual instr_dual_0f_38_f0 = {
4460 I(DstReg | SrcMem | Mov, em_movbe), N
4461 };
4462
4463 static const struct instr_dual instr_dual_0f_38_f1 = {
4464 I(DstMem | SrcReg | Mov, em_movbe), N
4465 };
4466
4467 static const struct gprefix three_byte_0f_38_f0 = {
4468 ID(0, &instr_dual_0f_38_f0), ID(0, &instr_dual_0f_38_f0), N, N
4469 };
4470
4471 static const struct gprefix three_byte_0f_38_f1 = {
4472 ID(0, &instr_dual_0f_38_f1), ID(0, &instr_dual_0f_38_f1), N, N
4473 };
4474
4475 /*
4476 * Insns below are selected by the prefix which indexed by the third opcode
4477 * byte.
4478 */
4479 static const struct opcode opcode_map_0f_38[256] = {
4480 /* 0x00 - 0x1f */
4481 X16(N), X16(N),
4482 /* 0x20 - 0x2f */
4483 X8(N),
4484 X2(N), GP(SrcMem | DstReg | ModRM | Mov | Aligned, &pfx_0f_e7_0f_38_2a), N, N, N, N, N,
4485 /* 0x30 - 0x7f */
4486 X16(N), X16(N), X16(N), X16(N), X16(N),
4487 /* 0x80 - 0xef */
4488 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4489 /* 0xf0 - 0xf1 */
4490 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4491 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4492 /* 0xf2 - 0xff */
4493 N, N, X4(N), X8(N)
4494 };
4495
4496 #undef D
4497 #undef N
4498 #undef G
4499 #undef GD
4500 #undef I
4501 #undef GP
4502 #undef EXT
4503 #undef MD
4504 #undef ID
4505
4506 #undef D2bv
4507 #undef D2bvIP
4508 #undef I2bv
4509 #undef I2bvIP
4510 #undef I6ALU
4511
is_shstk_instruction(struct x86_emulate_ctxt * ctxt)4512 static bool is_shstk_instruction(struct x86_emulate_ctxt *ctxt)
4513 {
4514 return ctxt->d & ShadowStack;
4515 }
4516
is_ibt_instruction(struct x86_emulate_ctxt * ctxt)4517 static bool is_ibt_instruction(struct x86_emulate_ctxt *ctxt)
4518 {
4519 u64 flags = ctxt->d;
4520
4521 if (!(flags & IsBranch))
4522 return false;
4523
4524 /*
4525 * All far JMPs and CALLs (including SYSCALL, SYSENTER, and INTn) are
4526 * indirect and thus affect IBT state. All far RETs (including SYSEXIT
4527 * and IRET) are protected via Shadow Stacks and thus don't affect IBT
4528 * state. IRET #GPs when returning to virtual-8086 and IBT or SHSTK is
4529 * enabled, but that should be handled by IRET emulation (in the very
4530 * unlikely scenario that KVM adds support for fully emulating IRET).
4531 */
4532 if (!(flags & NearBranch))
4533 return ctxt->execute != em_iret &&
4534 ctxt->execute != em_ret_far &&
4535 ctxt->execute != em_ret_far_imm &&
4536 ctxt->execute != em_sysexit;
4537
4538 switch (flags & SrcMask) {
4539 case SrcReg:
4540 case SrcMem:
4541 case SrcMem16:
4542 case SrcMem32:
4543 return true;
4544 case SrcMemFAddr:
4545 case SrcImmFAddr:
4546 /* Far branches should be handled above. */
4547 WARN_ON_ONCE(1);
4548 return true;
4549 case SrcNone:
4550 case SrcImm:
4551 case SrcImmByte:
4552 /*
4553 * Note, ImmU16 is used only for the stack adjustment operand on ENTER
4554 * and RET instructions. ENTER isn't a branch and RET FAR is handled
4555 * by the NearBranch check above. RET itself isn't an indirect branch.
4556 */
4557 case SrcImmU16:
4558 return false;
4559 default:
4560 WARN_ONCE(1, "Unexpected Src operand '%llx' on branch",
4561 flags & SrcMask);
4562 return false;
4563 }
4564 }
4565
imm_size(struct x86_emulate_ctxt * ctxt)4566 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4567 {
4568 unsigned size;
4569
4570 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4571 if (size == 8)
4572 size = 4;
4573 return size;
4574 }
4575
decode_imm(struct x86_emulate_ctxt * ctxt,struct operand * op,unsigned size,bool sign_extension)4576 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4577 unsigned size, bool sign_extension)
4578 {
4579 int rc = X86EMUL_CONTINUE;
4580
4581 op->type = OP_IMM;
4582 op->bytes = size;
4583 op->addr.mem.ea = ctxt->_eip;
4584 /* NB. Immediates are sign-extended as necessary. */
4585 switch (op->bytes) {
4586 case 1:
4587 op->val = insn_fetch(s8, ctxt);
4588 break;
4589 case 2:
4590 op->val = insn_fetch(s16, ctxt);
4591 break;
4592 case 4:
4593 op->val = insn_fetch(s32, ctxt);
4594 break;
4595 case 8:
4596 op->val = insn_fetch(s64, ctxt);
4597 break;
4598 }
4599 if (!sign_extension) {
4600 switch (op->bytes) {
4601 case 1:
4602 op->val &= 0xff;
4603 break;
4604 case 2:
4605 op->val &= 0xffff;
4606 break;
4607 case 4:
4608 op->val &= 0xffffffff;
4609 break;
4610 }
4611 }
4612 done:
4613 return rc;
4614 }
4615
decode_operand(struct x86_emulate_ctxt * ctxt,struct operand * op,unsigned d)4616 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4617 unsigned d)
4618 {
4619 int rc = X86EMUL_CONTINUE;
4620
4621 switch (d) {
4622 case OpReg:
4623 decode_register_operand(ctxt, op);
4624 break;
4625 case OpImmUByte:
4626 rc = decode_imm(ctxt, op, 1, false);
4627 break;
4628 case OpMem:
4629 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4630 mem_common:
4631 *op = ctxt->memop;
4632 ctxt->memopp = op;
4633 if (ctxt->d & BitOp)
4634 fetch_bit_operand(ctxt);
4635 op->orig_val = op->val;
4636 break;
4637 case OpMem64:
4638 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4639 goto mem_common;
4640 case OpAcc:
4641 op->type = OP_REG;
4642 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4643 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4644 fetch_register_operand(op);
4645 break;
4646 case OpAccLo:
4647 op->type = OP_REG;
4648 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4649 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4650 fetch_register_operand(op);
4651 break;
4652 case OpAccHi:
4653 if (ctxt->d & ByteOp) {
4654 op->type = OP_NONE;
4655 break;
4656 }
4657 op->type = OP_REG;
4658 op->bytes = ctxt->op_bytes;
4659 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4660 fetch_register_operand(op);
4661 break;
4662 case OpDI:
4663 op->type = OP_MEM;
4664 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4665 op->addr.mem.ea =
4666 register_address(ctxt, VCPU_REGS_RDI);
4667 op->addr.mem.seg = VCPU_SREG_ES;
4668 op->val = 0;
4669 op->count = 1;
4670 break;
4671 case OpDX:
4672 op->type = OP_REG;
4673 op->bytes = 2;
4674 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4675 fetch_register_operand(op);
4676 break;
4677 case OpCL:
4678 op->type = OP_IMM;
4679 op->bytes = 1;
4680 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4681 break;
4682 case OpImmByte:
4683 rc = decode_imm(ctxt, op, 1, true);
4684 break;
4685 case OpOne:
4686 op->type = OP_IMM;
4687 op->bytes = 1;
4688 op->val = 1;
4689 break;
4690 case OpImm:
4691 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4692 break;
4693 case OpImm64:
4694 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4695 break;
4696 case OpMem8:
4697 ctxt->memop.bytes = 1;
4698 if (ctxt->memop.type == OP_REG) {
4699 ctxt->memop.addr.reg = decode_register(ctxt,
4700 ctxt->modrm_rm, true);
4701 fetch_register_operand(&ctxt->memop);
4702 }
4703 goto mem_common;
4704 case OpMem16:
4705 ctxt->memop.bytes = 2;
4706 goto mem_common;
4707 case OpMem32:
4708 ctxt->memop.bytes = 4;
4709 goto mem_common;
4710 case OpImmU16:
4711 rc = decode_imm(ctxt, op, 2, false);
4712 break;
4713 case OpImmU:
4714 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4715 break;
4716 case OpSI:
4717 op->type = OP_MEM;
4718 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4719 op->addr.mem.ea =
4720 register_address(ctxt, VCPU_REGS_RSI);
4721 op->addr.mem.seg = ctxt->seg_override;
4722 op->val = 0;
4723 op->count = 1;
4724 break;
4725 case OpXLat:
4726 op->type = OP_MEM;
4727 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4728 op->addr.mem.ea =
4729 address_mask(ctxt,
4730 reg_read(ctxt, VCPU_REGS_RBX) +
4731 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4732 op->addr.mem.seg = ctxt->seg_override;
4733 op->val = 0;
4734 break;
4735 case OpImmFAddr:
4736 op->type = OP_IMM;
4737 op->addr.mem.ea = ctxt->_eip;
4738 op->bytes = ctxt->op_bytes + 2;
4739 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4740 break;
4741 case OpMemFAddr:
4742 ctxt->memop.bytes = ctxt->op_bytes + 2;
4743 goto mem_common;
4744 case OpES:
4745 op->type = OP_IMM;
4746 op->val = VCPU_SREG_ES;
4747 break;
4748 case OpCS:
4749 op->type = OP_IMM;
4750 op->val = VCPU_SREG_CS;
4751 break;
4752 case OpSS:
4753 op->type = OP_IMM;
4754 op->val = VCPU_SREG_SS;
4755 break;
4756 case OpDS:
4757 op->type = OP_IMM;
4758 op->val = VCPU_SREG_DS;
4759 break;
4760 case OpFS:
4761 op->type = OP_IMM;
4762 op->val = VCPU_SREG_FS;
4763 break;
4764 case OpGS:
4765 op->type = OP_IMM;
4766 op->val = VCPU_SREG_GS;
4767 break;
4768 case OpImplicit:
4769 /* Special instructions do their own operand decoding. */
4770 default:
4771 op->type = OP_NONE; /* Disable writeback. */
4772 break;
4773 }
4774
4775 done:
4776 return rc;
4777 }
4778
x86_decode_avx(struct x86_emulate_ctxt * ctxt,u8 vex_1st,u8 vex_2nd,struct opcode * opcode)4779 static int x86_decode_avx(struct x86_emulate_ctxt *ctxt,
4780 u8 vex_1st, u8 vex_2nd, struct opcode *opcode)
4781 {
4782 u8 vex_3rd, map, pp, l, v;
4783 int rc = X86EMUL_CONTINUE;
4784
4785 if (ctxt->rep_prefix || ctxt->op_prefix || ctxt->rex_prefix)
4786 goto ud;
4787
4788 if (vex_1st == 0xc5) {
4789 /* Expand RVVVVlpp to VEX3 format */
4790 vex_3rd = vex_2nd & ~0x80; /* VVVVlpp from VEX2, w=0 */
4791 vex_2nd = (vex_2nd & 0x80) | 0x61; /* R from VEX2, X=1 B=1 mmmmm=00001 */
4792 } else {
4793 vex_3rd = insn_fetch(u8, ctxt);
4794 }
4795
4796 /* vex_2nd = RXBmmmmm, vex_3rd = wVVVVlpp. Fix polarity */
4797 vex_2nd ^= 0xE0; /* binary 11100000 */
4798 vex_3rd ^= 0x78; /* binary 01111000 */
4799
4800 ctxt->rex_prefix = REX_PREFIX;
4801 ctxt->rex_bits = (vex_2nd & 0xE0) >> 5; /* RXB */
4802 ctxt->rex_bits |= (vex_3rd & 0x80) >> 4; /* w */
4803 if (ctxt->rex_bits && ctxt->mode != X86EMUL_MODE_PROT64)
4804 goto ud;
4805
4806 map = vex_2nd & 0x1f;
4807 v = (vex_3rd >> 3) & 0xf;
4808 l = vex_3rd & 0x4;
4809 pp = vex_3rd & 0x3;
4810
4811 ctxt->b = insn_fetch(u8, ctxt);
4812 switch (map) {
4813 case 1:
4814 ctxt->opcode_len = 2;
4815 *opcode = twobyte_table[ctxt->b];
4816 break;
4817 case 2:
4818 ctxt->opcode_len = 3;
4819 *opcode = opcode_map_0f_38[ctxt->b];
4820 break;
4821 case 3:
4822 /* no 0f 3a instructions are supported yet */
4823 return X86EMUL_UNHANDLEABLE;
4824 default:
4825 goto ud;
4826 }
4827
4828 /*
4829 * No three operand instructions are supported yet; those that
4830 * *are* marked with the Avx flag reserve the VVVV flag.
4831 */
4832 if (v)
4833 goto ud;
4834
4835 if (l)
4836 ctxt->op_bytes = 32;
4837 else
4838 ctxt->op_bytes = 16;
4839
4840 switch (pp) {
4841 case 0: break;
4842 case 1: ctxt->op_prefix = true; break;
4843 case 2: ctxt->rep_prefix = 0xf3; break;
4844 case 3: ctxt->rep_prefix = 0xf2; break;
4845 }
4846
4847 done:
4848 return rc;
4849 ud:
4850 *opcode = ud;
4851 return rc;
4852 }
4853
x86_decode_insn(struct x86_emulate_ctxt * ctxt,void * insn,int insn_len,int emulation_type)4854 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
4855 {
4856 int rc = X86EMUL_CONTINUE;
4857 int mode = ctxt->mode;
4858 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4859 bool vex_prefix = false;
4860 bool has_seg_override = false;
4861 struct opcode opcode;
4862 u16 dummy;
4863 struct desc_struct desc;
4864
4865 ctxt->memop.type = OP_NONE;
4866 ctxt->memopp = NULL;
4867 ctxt->_eip = ctxt->eip;
4868 ctxt->fetch.ptr = ctxt->fetch.data;
4869 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4870 ctxt->opcode_len = 1;
4871 ctxt->intercept = x86_intercept_none;
4872 if (insn_len > 0)
4873 memcpy(ctxt->fetch.data, insn, insn_len);
4874 else {
4875 rc = __do_insn_fetch_bytes(ctxt, 1);
4876 if (rc != X86EMUL_CONTINUE)
4877 goto done;
4878 }
4879
4880 switch (mode) {
4881 case X86EMUL_MODE_REAL:
4882 case X86EMUL_MODE_VM86:
4883 def_op_bytes = def_ad_bytes = 2;
4884 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
4885 if (desc.d)
4886 def_op_bytes = def_ad_bytes = 4;
4887 break;
4888 case X86EMUL_MODE_PROT16:
4889 def_op_bytes = def_ad_bytes = 2;
4890 break;
4891 case X86EMUL_MODE_PROT32:
4892 def_op_bytes = def_ad_bytes = 4;
4893 break;
4894 #ifdef CONFIG_X86_64
4895 case X86EMUL_MODE_PROT64:
4896 def_op_bytes = 4;
4897 def_ad_bytes = 8;
4898 break;
4899 #endif
4900 default:
4901 return EMULATION_FAILED;
4902 }
4903
4904 ctxt->op_bytes = def_op_bytes;
4905 ctxt->ad_bytes = def_ad_bytes;
4906
4907 /* Legacy prefixes. */
4908 for (;;) {
4909 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4910 case 0x66: /* operand-size override */
4911 ctxt->op_prefix = true;
4912 /* switch between 2/4 bytes */
4913 ctxt->op_bytes = def_op_bytes ^ 6;
4914 break;
4915 case 0x67: /* address-size override */
4916 if (mode == X86EMUL_MODE_PROT64)
4917 /* switch between 4/8 bytes */
4918 ctxt->ad_bytes = def_ad_bytes ^ 12;
4919 else
4920 /* switch between 2/4 bytes */
4921 ctxt->ad_bytes = def_ad_bytes ^ 6;
4922 break;
4923 case 0x26: /* ES override */
4924 has_seg_override = true;
4925 ctxt->seg_override = VCPU_SREG_ES;
4926 break;
4927 case 0x2e: /* CS override */
4928 has_seg_override = true;
4929 ctxt->seg_override = VCPU_SREG_CS;
4930 break;
4931 case 0x36: /* SS override */
4932 has_seg_override = true;
4933 ctxt->seg_override = VCPU_SREG_SS;
4934 break;
4935 case 0x3e: /* DS override */
4936 has_seg_override = true;
4937 ctxt->seg_override = VCPU_SREG_DS;
4938 break;
4939 case 0x64: /* FS override */
4940 has_seg_override = true;
4941 ctxt->seg_override = VCPU_SREG_FS;
4942 break;
4943 case 0x65: /* GS override */
4944 has_seg_override = true;
4945 ctxt->seg_override = VCPU_SREG_GS;
4946 break;
4947 case 0x40 ... 0x4f: /* REX */
4948 if (mode != X86EMUL_MODE_PROT64)
4949 goto done_prefixes;
4950 ctxt->rex_prefix = REX_PREFIX;
4951 ctxt->rex_bits = ctxt->b & 0xf;
4952 continue;
4953 case 0xf0: /* LOCK */
4954 ctxt->lock_prefix = 1;
4955 break;
4956 case 0xf2: /* REPNE/REPNZ */
4957 case 0xf3: /* REP/REPE/REPZ */
4958 ctxt->rep_prefix = ctxt->b;
4959 break;
4960 default:
4961 goto done_prefixes;
4962 }
4963
4964 /* Any legacy prefix after a REX prefix nullifies its effect. */
4965 ctxt->rex_prefix = REX_NONE;
4966 ctxt->rex_bits = 0;
4967 }
4968
4969 done_prefixes:
4970
4971 /* REX prefix. */
4972 if (ctxt->rex_bits & REX_W)
4973 ctxt->op_bytes = 8;
4974
4975 /* Opcode byte(s). */
4976 if (ctxt->b == 0xc4 || ctxt->b == 0xc5) {
4977 /* VEX or LDS/LES */
4978 u8 vex_2nd = insn_fetch(u8, ctxt);
4979 if (mode != X86EMUL_MODE_PROT64 && (vex_2nd & 0xc0) != 0xc0) {
4980 opcode = opcode_table[ctxt->b];
4981 ctxt->modrm = vex_2nd;
4982 /* the Mod/RM byte has been fetched already! */
4983 goto done_modrm;
4984 }
4985
4986 vex_prefix = true;
4987 rc = x86_decode_avx(ctxt, ctxt->b, vex_2nd, &opcode);
4988 if (rc != X86EMUL_CONTINUE)
4989 goto done;
4990 } else if (ctxt->b == 0x0f) {
4991 /* Two- or three-byte opcode */
4992 ctxt->opcode_len = 2;
4993 ctxt->b = insn_fetch(u8, ctxt);
4994 opcode = twobyte_table[ctxt->b];
4995
4996 /* 0F_38 opcode map */
4997 if (ctxt->b == 0x38) {
4998 ctxt->opcode_len = 3;
4999 ctxt->b = insn_fetch(u8, ctxt);
5000 opcode = opcode_map_0f_38[ctxt->b];
5001 }
5002 } else {
5003 /* Opcode byte(s). */
5004 opcode = opcode_table[ctxt->b];
5005 }
5006
5007 if (opcode.flags & ModRM)
5008 ctxt->modrm = insn_fetch(u8, ctxt);
5009
5010 done_modrm:
5011 ctxt->d = opcode.flags;
5012 while (ctxt->d & GroupMask) {
5013 switch (ctxt->d & GroupMask) {
5014 case Group:
5015 goffset = (ctxt->modrm >> 3) & 7;
5016 opcode = opcode.u.group[goffset];
5017 break;
5018 case GroupDual:
5019 goffset = (ctxt->modrm >> 3) & 7;
5020 if ((ctxt->modrm >> 6) == 3)
5021 opcode = opcode.u.gdual->mod3[goffset];
5022 else
5023 opcode = opcode.u.gdual->mod012[goffset];
5024 break;
5025 case RMExt:
5026 goffset = ctxt->modrm & 7;
5027 opcode = opcode.u.group[goffset];
5028 break;
5029 case Prefix:
5030 if (ctxt->rep_prefix && ctxt->op_prefix)
5031 return EMULATION_FAILED;
5032 simd_prefix = ctxt->op_prefix ? 0x66 : ctxt->rep_prefix;
5033 switch (simd_prefix) {
5034 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5035 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5036 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5037 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5038 }
5039 break;
5040 case Escape:
5041 if (ctxt->modrm > 0xbf) {
5042 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5043 u32 index = array_index_nospec(
5044 ctxt->modrm - 0xc0, size);
5045
5046 opcode = opcode.u.esc->high[index];
5047 } else {
5048 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5049 }
5050 break;
5051 case InstrDual:
5052 if ((ctxt->modrm >> 6) == 3)
5053 opcode = opcode.u.idual->mod3;
5054 else
5055 opcode = opcode.u.idual->mod012;
5056 break;
5057 case ModeDual:
5058 if (ctxt->mode == X86EMUL_MODE_PROT64)
5059 opcode = opcode.u.mdual->mode64;
5060 else
5061 opcode = opcode.u.mdual->mode32;
5062 break;
5063 default:
5064 return EMULATION_FAILED;
5065 }
5066
5067 ctxt->d &= ~(u64)GroupMask;
5068 ctxt->d |= opcode.flags;
5069 }
5070
5071 ctxt->is_branch = opcode.flags & IsBranch;
5072
5073 /* Unrecognised? */
5074 if (ctxt->d == 0)
5075 return EMULATION_FAILED;
5076
5077 if (unlikely(vex_prefix)) {
5078 /*
5079 * Only specifically marked instructions support VEX. Since many
5080 * instructions support it but are not annotated, return not implemented
5081 * rather than #UD.
5082 */
5083 if (!(ctxt->d & Avx))
5084 return EMULATION_FAILED;
5085
5086 if (!(ctxt->d & AlignMask))
5087 ctxt->d |= Unaligned;
5088 }
5089
5090 ctxt->execute = opcode.u.execute;
5091
5092 /*
5093 * Reject emulation if KVM might need to emulate shadow stack updates
5094 * and/or indirect branch tracking enforcement, which the emulator
5095 * doesn't support.
5096 */
5097 if ((is_ibt_instruction(ctxt) || is_shstk_instruction(ctxt)) &&
5098 ctxt->ops->get_cr(ctxt, 4) & X86_CR4_CET) {
5099 u64 u_cet = 0, s_cet = 0;
5100
5101 /*
5102 * Check both User and Supervisor on far transfers as inter-
5103 * privilege level transfers are impacted by CET at the target
5104 * privilege level, and that is not known at this time. The
5105 * expectation is that the guest will not require emulation of
5106 * any CET-affected instructions at any privilege level.
5107 */
5108 if (!(ctxt->d & NearBranch))
5109 u_cet = s_cet = CET_SHSTK_EN | CET_ENDBR_EN;
5110 else if (ctxt->ops->cpl(ctxt) == 3)
5111 u_cet = CET_SHSTK_EN | CET_ENDBR_EN;
5112 else
5113 s_cet = CET_SHSTK_EN | CET_ENDBR_EN;
5114
5115 if ((u_cet && ctxt->ops->get_msr(ctxt, MSR_IA32_U_CET, &u_cet)) ||
5116 (s_cet && ctxt->ops->get_msr(ctxt, MSR_IA32_S_CET, &s_cet)))
5117 return EMULATION_FAILED;
5118
5119 if ((u_cet | s_cet) & CET_SHSTK_EN && is_shstk_instruction(ctxt))
5120 return EMULATION_FAILED;
5121
5122 if ((u_cet | s_cet) & CET_ENDBR_EN && is_ibt_instruction(ctxt))
5123 return EMULATION_FAILED;
5124 }
5125
5126 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
5127 likely(!(ctxt->d & EmulateOnUD)))
5128 return EMULATION_FAILED;
5129
5130 if (unlikely(ctxt->d &
5131 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5132 No16))) {
5133 /*
5134 * These are copied unconditionally here, and checked unconditionally
5135 * in x86_emulate_insn.
5136 */
5137 ctxt->check_perm = opcode.check_perm;
5138 ctxt->intercept = opcode.intercept;
5139
5140 if (ctxt->d & NotImpl)
5141 return EMULATION_FAILED;
5142
5143 if (mode == X86EMUL_MODE_PROT64) {
5144 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5145 ctxt->op_bytes = 8;
5146 else if (ctxt->d & NearBranch)
5147 ctxt->op_bytes = 8;
5148 }
5149
5150 if (ctxt->d & Op3264) {
5151 if (mode == X86EMUL_MODE_PROT64)
5152 ctxt->op_bytes = 8;
5153 else
5154 ctxt->op_bytes = 4;
5155 }
5156
5157 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5158 ctxt->op_bytes = 4;
5159
5160 if (vex_prefix)
5161 ;
5162 else if (ctxt->d & Sse)
5163 ctxt->op_bytes = 16, ctxt->d &= ~Avx;
5164 else if (ctxt->d & Mmx)
5165 ctxt->op_bytes = 8;
5166 }
5167
5168 /* ModRM and SIB bytes. */
5169 if (ctxt->d & ModRM) {
5170 rc = decode_modrm(ctxt, &ctxt->memop);
5171 if (!has_seg_override) {
5172 has_seg_override = true;
5173 ctxt->seg_override = ctxt->modrm_seg;
5174 }
5175 } else if (ctxt->d & MemAbs)
5176 rc = decode_abs(ctxt, &ctxt->memop);
5177 if (rc != X86EMUL_CONTINUE)
5178 goto done;
5179
5180 if (!has_seg_override)
5181 ctxt->seg_override = VCPU_SREG_DS;
5182
5183 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5184
5185 /*
5186 * Decode and fetch the source operand: register, memory
5187 * or immediate.
5188 */
5189 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5190 if (rc != X86EMUL_CONTINUE)
5191 goto done;
5192
5193 /*
5194 * Decode and fetch the second source operand: register, memory
5195 * or immediate.
5196 */
5197 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5198 if (rc != X86EMUL_CONTINUE)
5199 goto done;
5200
5201 /* Decode and fetch the destination operand: register or memory. */
5202 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5203
5204 if (ctxt->rip_relative && likely(ctxt->memopp))
5205 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5206 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5207
5208 done:
5209 if (rc == X86EMUL_PROPAGATE_FAULT)
5210 ctxt->have_exception = true;
5211 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5212 }
5213
x86_page_table_writing_insn(struct x86_emulate_ctxt * ctxt)5214 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5215 {
5216 return ctxt->d & PageTable;
5217 }
5218
string_insn_completed(struct x86_emulate_ctxt * ctxt)5219 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5220 {
5221 /* The second termination condition only applies for REPE
5222 * and REPNE. Test if the repeat string operation prefix is
5223 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5224 * corresponding termination condition according to:
5225 * - if REPE/REPZ and ZF = 0 then done
5226 * - if REPNE/REPNZ and ZF = 1 then done
5227 */
5228 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5229 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5230 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5231 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5232 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5233 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5234 return true;
5235
5236 return false;
5237 }
5238
flush_pending_x87_faults(struct x86_emulate_ctxt * ctxt)5239 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5240 {
5241 int rc;
5242
5243 kvm_fpu_get();
5244 rc = asm_safe("fwait");
5245 kvm_fpu_put();
5246
5247 if (unlikely(rc != X86EMUL_CONTINUE))
5248 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5249
5250 return X86EMUL_CONTINUE;
5251 }
5252
fetch_possible_mmx_operand(struct operand * op)5253 static void fetch_possible_mmx_operand(struct operand *op)
5254 {
5255 if (op->type == OP_MM)
5256 kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5257 }
5258
init_decode_cache(struct x86_emulate_ctxt * ctxt)5259 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5260 {
5261 /* Clear fields that are set conditionally but read without a guard. */
5262 ctxt->rip_relative = false;
5263 ctxt->rex_prefix = REX_NONE;
5264 ctxt->rex_bits = 0;
5265 ctxt->lock_prefix = 0;
5266 ctxt->op_prefix = false;
5267 ctxt->rep_prefix = 0;
5268 ctxt->regs_valid = 0;
5269 ctxt->regs_dirty = 0;
5270
5271 ctxt->io_read.pos = 0;
5272 ctxt->io_read.end = 0;
5273 ctxt->mem_read.end = 0;
5274 }
5275
x86_emulate_insn(struct x86_emulate_ctxt * ctxt,bool check_intercepts)5276 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts)
5277 {
5278 const struct x86_emulate_ops *ops = ctxt->ops;
5279 int rc = X86EMUL_CONTINUE;
5280 int saved_dst_type = ctxt->dst.type;
5281
5282 ctxt->mem_read.pos = 0;
5283
5284 /* LOCK prefix is allowed only with some instructions */
5285 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5286 rc = emulate_ud(ctxt);
5287 goto done;
5288 }
5289
5290 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5291 rc = emulate_ud(ctxt);
5292 goto done;
5293 }
5294
5295 if (unlikely(ctxt->d &
5296 (No64|Undefined|Avx|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5297 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5298 (ctxt->d & Undefined)) {
5299 rc = emulate_ud(ctxt);
5300 goto done;
5301 }
5302
5303 if ((ctxt->d & (Avx|Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) {
5304 rc = emulate_ud(ctxt);
5305 goto done;
5306 }
5307
5308 if (ctxt->d & Avx) {
5309 u64 xcr = 0;
5310 if (!(ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE)
5311 || ops->get_xcr(ctxt, 0, &xcr)
5312 || !(xcr & XFEATURE_MASK_YMM)) {
5313 rc = emulate_ud(ctxt);
5314 goto done;
5315 }
5316 } else if (ctxt->d & Sse) {
5317 if (!(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)) {
5318 rc = emulate_ud(ctxt);
5319 goto done;
5320 }
5321 }
5322
5323 if ((ctxt->d & (Avx|Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5324 rc = emulate_nm(ctxt);
5325 goto done;
5326 }
5327
5328 if (ctxt->d & Mmx) {
5329 rc = flush_pending_x87_faults(ctxt);
5330 if (rc != X86EMUL_CONTINUE)
5331 goto done;
5332 /*
5333 * Now that we know the fpu is exception safe, we can fetch
5334 * operands from it.
5335 */
5336 fetch_possible_mmx_operand(&ctxt->src);
5337 fetch_possible_mmx_operand(&ctxt->src2);
5338 if (!(ctxt->d & Mov))
5339 fetch_possible_mmx_operand(&ctxt->dst);
5340 }
5341
5342 if (unlikely(check_intercepts) && ctxt->intercept) {
5343 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5344 X86_ICPT_PRE_EXCEPT);
5345 if (rc != X86EMUL_CONTINUE)
5346 goto done;
5347 }
5348
5349 /* Instruction can only be executed in protected mode */
5350 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5351 rc = emulate_ud(ctxt);
5352 goto done;
5353 }
5354
5355 /* Privileged instruction can be executed only in CPL=0 */
5356 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5357 if (ctxt->d & PrivUD)
5358 rc = emulate_ud(ctxt);
5359 else
5360 rc = emulate_gp(ctxt, 0);
5361 goto done;
5362 }
5363
5364 /* Do instruction specific permission checks */
5365 if (ctxt->d & CheckPerm) {
5366 rc = ctxt->check_perm(ctxt);
5367 if (rc != X86EMUL_CONTINUE)
5368 goto done;
5369 }
5370
5371 if (unlikely(check_intercepts) && (ctxt->d & Intercept)) {
5372 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5373 X86_ICPT_POST_EXCEPT);
5374 if (rc != X86EMUL_CONTINUE)
5375 goto done;
5376 }
5377
5378 if (ctxt->rep_prefix && (ctxt->d & String)) {
5379 /* All REP prefixes have the same first termination condition */
5380 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5381 string_registers_quirk(ctxt);
5382 ctxt->eip = ctxt->_eip;
5383 ctxt->eflags &= ~X86_EFLAGS_RF;
5384 goto done;
5385 }
5386 }
5387 }
5388
5389 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5390 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5391 ctxt->src.valptr, ctxt->src.bytes);
5392 if (rc != X86EMUL_CONTINUE)
5393 goto done;
5394 ctxt->src.orig_val64 = ctxt->src.val64;
5395 }
5396
5397 if (ctxt->src2.type == OP_MEM) {
5398 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5399 &ctxt->src2.val, ctxt->src2.bytes);
5400 if (rc != X86EMUL_CONTINUE)
5401 goto done;
5402 }
5403
5404 if ((ctxt->d & DstMask) == ImplicitOps)
5405 goto special_insn;
5406
5407
5408 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5409 /* optimisation - avoid slow emulated read if Mov */
5410 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5411 &ctxt->dst.val, ctxt->dst.bytes);
5412 if (rc != X86EMUL_CONTINUE) {
5413 if (!(ctxt->d & NoWrite) &&
5414 rc == X86EMUL_PROPAGATE_FAULT &&
5415 ctxt->exception.vector == PF_VECTOR)
5416 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5417 goto done;
5418 }
5419 }
5420 /* Copy full 64-bit value for CMPXCHG8B. */
5421 ctxt->dst.orig_val64 = ctxt->dst.val64;
5422
5423 special_insn:
5424
5425 if (unlikely(check_intercepts) && (ctxt->d & Intercept)) {
5426 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5427 X86_ICPT_POST_MEMACCESS);
5428 if (rc != X86EMUL_CONTINUE)
5429 goto done;
5430 }
5431
5432 if (ctxt->rep_prefix && (ctxt->d & String))
5433 ctxt->eflags |= X86_EFLAGS_RF;
5434 else
5435 ctxt->eflags &= ~X86_EFLAGS_RF;
5436
5437 if (ctxt->execute) {
5438 rc = ctxt->execute(ctxt);
5439 if (rc != X86EMUL_CONTINUE)
5440 goto done;
5441 goto writeback;
5442 }
5443
5444 if (ctxt->opcode_len == 2)
5445 goto twobyte_insn;
5446 else if (ctxt->opcode_len == 3)
5447 goto threebyte_insn;
5448
5449 switch (ctxt->b) {
5450 case 0x70 ... 0x7f: /* jcc (short) */
5451 if (test_cc(ctxt->b, ctxt->eflags))
5452 rc = jmp_rel(ctxt, ctxt->src.val);
5453 break;
5454 case 0x8d: /* lea r16/r32, m */
5455 ctxt->dst.val = ctxt->src.addr.mem.ea;
5456 break;
5457 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5458 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5459 ctxt->dst.type = OP_NONE;
5460 else
5461 rc = em_xchg(ctxt);
5462 break;
5463 case 0x98: /* cbw/cwde/cdqe */
5464 switch (ctxt->op_bytes) {
5465 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5466 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5467 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5468 }
5469 break;
5470 case 0xcc: /* int3 */
5471 rc = emulate_int(ctxt, 3);
5472 break;
5473 case 0xcd: /* int n */
5474 rc = emulate_int(ctxt, ctxt->src.val);
5475 break;
5476 case 0xce: /* into */
5477 if (ctxt->eflags & X86_EFLAGS_OF)
5478 rc = emulate_int(ctxt, 4);
5479 break;
5480 case 0xe9: /* jmp rel */
5481 case 0xeb: /* jmp rel short */
5482 rc = jmp_rel(ctxt, ctxt->src.val);
5483 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5484 break;
5485 case 0xf4: /* hlt */
5486 ctxt->ops->halt(ctxt);
5487 break;
5488 case 0xf5: /* cmc */
5489 /* complement carry flag from eflags reg */
5490 ctxt->eflags ^= X86_EFLAGS_CF;
5491 break;
5492 case 0xf8: /* clc */
5493 ctxt->eflags &= ~X86_EFLAGS_CF;
5494 break;
5495 case 0xf9: /* stc */
5496 ctxt->eflags |= X86_EFLAGS_CF;
5497 break;
5498 case 0xfc: /* cld */
5499 ctxt->eflags &= ~X86_EFLAGS_DF;
5500 break;
5501 case 0xfd: /* std */
5502 ctxt->eflags |= X86_EFLAGS_DF;
5503 break;
5504 default:
5505 goto cannot_emulate;
5506 }
5507
5508 if (rc != X86EMUL_CONTINUE)
5509 goto done;
5510
5511 writeback:
5512 if (ctxt->d & SrcWrite) {
5513 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5514 rc = writeback(ctxt, &ctxt->src);
5515 if (rc != X86EMUL_CONTINUE)
5516 goto done;
5517 }
5518 if (!(ctxt->d & NoWrite)) {
5519 rc = writeback(ctxt, &ctxt->dst);
5520 if (rc != X86EMUL_CONTINUE)
5521 goto done;
5522 }
5523
5524 /*
5525 * restore dst type in case the decoding will be reused
5526 * (happens for string instruction )
5527 */
5528 ctxt->dst.type = saved_dst_type;
5529
5530 if ((ctxt->d & SrcMask) == SrcSI)
5531 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5532
5533 if ((ctxt->d & DstMask) == DstDI)
5534 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5535
5536 if (ctxt->rep_prefix && (ctxt->d & String)) {
5537 unsigned int count;
5538 struct read_cache *r = &ctxt->io_read;
5539 if ((ctxt->d & SrcMask) == SrcSI)
5540 count = ctxt->src.count;
5541 else
5542 count = ctxt->dst.count;
5543 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5544
5545 if (!string_insn_completed(ctxt)) {
5546 /*
5547 * Re-enter guest when pio read ahead buffer is empty
5548 * or, if it is not used, after each 1024 iteration.
5549 */
5550 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5551 (r->end == 0 || r->end != r->pos)) {
5552 /*
5553 * Reset read cache. Usually happens before
5554 * decode, but since instruction is restarted
5555 * we have to do it here.
5556 */
5557 ctxt->mem_read.end = 0;
5558 writeback_registers(ctxt);
5559 return EMULATION_RESTART;
5560 }
5561 goto done; /* skip rip writeback */
5562 }
5563 ctxt->eflags &= ~X86_EFLAGS_RF;
5564 }
5565
5566 ctxt->eip = ctxt->_eip;
5567 if (ctxt->mode != X86EMUL_MODE_PROT64)
5568 ctxt->eip = (u32)ctxt->_eip;
5569
5570 done:
5571 if (rc == X86EMUL_PROPAGATE_FAULT) {
5572 if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt))
5573 return EMULATION_FAILED;
5574 ctxt->have_exception = true;
5575 }
5576 if (rc == X86EMUL_INTERCEPTED)
5577 return EMULATION_INTERCEPTED;
5578
5579 if (rc == X86EMUL_CONTINUE)
5580 writeback_registers(ctxt);
5581
5582 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5583
5584 twobyte_insn:
5585 switch (ctxt->b) {
5586 case 0x09: /* wbinvd */
5587 (ctxt->ops->wbinvd)(ctxt);
5588 break;
5589 case 0x08: /* invd */
5590 case 0x0d: /* GrpP (prefetch) */
5591 case 0x18: /* Grp16 (prefetch/nop) */
5592 case 0x1f: /* nop */
5593 break;
5594 case 0x20: /* mov cr, reg */
5595 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5596 break;
5597 case 0x21: /* mov from dr to reg */
5598 ctxt->dst.val = ops->get_dr(ctxt, ctxt->modrm_reg);
5599 break;
5600 case 0x40 ... 0x4f: /* cmov */
5601 if (test_cc(ctxt->b, ctxt->eflags))
5602 ctxt->dst.val = ctxt->src.val;
5603 else if (ctxt->op_bytes != 4)
5604 ctxt->dst.type = OP_NONE; /* no writeback */
5605 break;
5606 case 0x80 ... 0x8f: /* jnz rel, etc*/
5607 if (test_cc(ctxt->b, ctxt->eflags))
5608 rc = jmp_rel(ctxt, ctxt->src.val);
5609 break;
5610 case 0x90 ... 0x9f: /* setcc r/m8 */
5611 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5612 break;
5613 case 0xb6 ... 0xb7: /* movzx */
5614 ctxt->dst.bytes = ctxt->op_bytes;
5615 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5616 : (u16) ctxt->src.val;
5617 break;
5618 case 0xbe ... 0xbf: /* movsx */
5619 ctxt->dst.bytes = ctxt->op_bytes;
5620 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5621 (s16) ctxt->src.val;
5622 break;
5623 default:
5624 goto cannot_emulate;
5625 }
5626
5627 threebyte_insn:
5628
5629 if (rc != X86EMUL_CONTINUE)
5630 goto done;
5631
5632 goto writeback;
5633
5634 cannot_emulate:
5635 return EMULATION_FAILED;
5636 }
5637
emulator_invalidate_register_cache(struct x86_emulate_ctxt * ctxt)5638 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5639 {
5640 invalidate_registers(ctxt);
5641 }
5642
emulator_writeback_register_cache(struct x86_emulate_ctxt * ctxt)5643 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5644 {
5645 writeback_registers(ctxt);
5646 }
5647
emulator_can_use_gpa(struct x86_emulate_ctxt * ctxt)5648 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5649 {
5650 if (ctxt->rep_prefix && (ctxt->d & String))
5651 return false;
5652
5653 if (ctxt->d & TwoMemOp)
5654 return false;
5655
5656 return true;
5657 }
5658