xref: /linux/arch/x86/kvm/emulate.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /******************************************************************************
2  * emulate.c
3  *
4  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5  *
6  * Copyright (c) 2005 Keir Fraser
7  *
8  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9  * privileged instructions:
10  *
11  * Copyright (C) 2006 Qumranet
12  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13  *
14  *   Avi Kivity <avi@qumranet.com>
15  *   Yaniv Kamay <yaniv@qumranet.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21  */
22 
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 
28 #include "x86.h"
29 #include "tss.h"
30 
31 /*
32  * Operand types
33  */
34 #define OpNone             0ull
35 #define OpImplicit         1ull  /* No generic decode */
36 #define OpReg              2ull  /* Register */
37 #define OpMem              3ull  /* Memory */
38 #define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI               5ull  /* ES:DI/EDI/RDI */
40 #define OpMem64            6ull  /* Memory, 64-bit */
41 #define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
42 #define OpDX               8ull  /* DX register */
43 #define OpCL               9ull  /* CL register (for shifts) */
44 #define OpImmByte         10ull  /* 8-bit sign extended immediate */
45 #define OpOne             11ull  /* Implied 1 */
46 #define OpImm             12ull  /* Sign extended immediate */
47 #define OpMem16           13ull  /* Memory operand (16-bit). */
48 #define OpMem32           14ull  /* Memory operand (32-bit). */
49 #define OpImmU            15ull  /* Immediate operand, zero extended */
50 #define OpSI              16ull  /* SI/ESI/RSI */
51 #define OpImmFAddr        17ull  /* Immediate far address */
52 #define OpMemFAddr        18ull  /* Far address in memory */
53 #define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
54 #define OpES              20ull  /* ES */
55 #define OpCS              21ull  /* CS */
56 #define OpSS              22ull  /* SS */
57 #define OpDS              23ull  /* DS */
58 #define OpFS              24ull  /* FS */
59 #define OpGS              25ull  /* GS */
60 
61 #define OpBits             5  /* Width of operand field */
62 #define OpMask             ((1ull << OpBits) - 1)
63 
64 /*
65  * Opcode effective-address decode tables.
66  * Note that we only emulate instructions that have at least one memory
67  * operand (excluding implicit stack references). We assume that stack
68  * references and instruction fetches will never occur in special memory
69  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
70  * not be handled.
71  */
72 
73 /* Operand sizes: 8-bit operands or specified/overridden size. */
74 #define ByteOp      (1<<0)	/* 8-bit operands. */
75 /* Destination operand type. */
76 #define DstShift    1
77 #define ImplicitOps (OpImplicit << DstShift)
78 #define DstReg      (OpReg << DstShift)
79 #define DstMem      (OpMem << DstShift)
80 #define DstAcc      (OpAcc << DstShift)
81 #define DstDI       (OpDI << DstShift)
82 #define DstMem64    (OpMem64 << DstShift)
83 #define DstImmUByte (OpImmUByte << DstShift)
84 #define DstDX       (OpDX << DstShift)
85 #define DstMask     (OpMask << DstShift)
86 /* Source operand type. */
87 #define SrcShift    6
88 #define SrcNone     (OpNone << SrcShift)
89 #define SrcReg      (OpReg << SrcShift)
90 #define SrcMem      (OpMem << SrcShift)
91 #define SrcMem16    (OpMem16 << SrcShift)
92 #define SrcMem32    (OpMem32 << SrcShift)
93 #define SrcImm      (OpImm << SrcShift)
94 #define SrcImmByte  (OpImmByte << SrcShift)
95 #define SrcOne      (OpOne << SrcShift)
96 #define SrcImmUByte (OpImmUByte << SrcShift)
97 #define SrcImmU     (OpImmU << SrcShift)
98 #define SrcSI       (OpSI << SrcShift)
99 #define SrcImmFAddr (OpImmFAddr << SrcShift)
100 #define SrcMemFAddr (OpMemFAddr << SrcShift)
101 #define SrcAcc      (OpAcc << SrcShift)
102 #define SrcImmU16   (OpImmU16 << SrcShift)
103 #define SrcDX       (OpDX << SrcShift)
104 #define SrcMask     (OpMask << SrcShift)
105 #define BitOp       (1<<11)
106 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
107 #define String      (1<<13)     /* String instruction (rep capable) */
108 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
109 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
110 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
111 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
112 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
113 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
114 #define Sse         (1<<18)     /* SSE Vector instruction */
115 /* Generic ModRM decode. */
116 #define ModRM       (1<<19)
117 /* Destination is only written; never read. */
118 #define Mov         (1<<20)
119 /* Misc flags */
120 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
121 #define VendorSpecific (1<<22) /* Vendor specific instruction */
122 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
123 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
124 #define Undefined   (1<<25) /* No Such Instruction */
125 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
126 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
127 #define No64	    (1<<28)
128 /* Source 2 operand type */
129 #define Src2Shift   (29)
130 #define Src2None    (OpNone << Src2Shift)
131 #define Src2CL      (OpCL << Src2Shift)
132 #define Src2ImmByte (OpImmByte << Src2Shift)
133 #define Src2One     (OpOne << Src2Shift)
134 #define Src2Imm     (OpImm << Src2Shift)
135 #define Src2ES      (OpES << Src2Shift)
136 #define Src2CS      (OpCS << Src2Shift)
137 #define Src2SS      (OpSS << Src2Shift)
138 #define Src2DS      (OpDS << Src2Shift)
139 #define Src2FS      (OpFS << Src2Shift)
140 #define Src2GS      (OpGS << Src2Shift)
141 #define Src2Mask    (OpMask << Src2Shift)
142 
143 #define X2(x...) x, x
144 #define X3(x...) X2(x), x
145 #define X4(x...) X2(x), X2(x)
146 #define X5(x...) X4(x), x
147 #define X6(x...) X4(x), X2(x)
148 #define X7(x...) X4(x), X3(x)
149 #define X8(x...) X4(x), X4(x)
150 #define X16(x...) X8(x), X8(x)
151 
152 struct opcode {
153 	u64 flags : 56;
154 	u64 intercept : 8;
155 	union {
156 		int (*execute)(struct x86_emulate_ctxt *ctxt);
157 		struct opcode *group;
158 		struct group_dual *gdual;
159 		struct gprefix *gprefix;
160 	} u;
161 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
162 };
163 
164 struct group_dual {
165 	struct opcode mod012[8];
166 	struct opcode mod3[8];
167 };
168 
169 struct gprefix {
170 	struct opcode pfx_no;
171 	struct opcode pfx_66;
172 	struct opcode pfx_f2;
173 	struct opcode pfx_f3;
174 };
175 
176 /* EFLAGS bit definitions. */
177 #define EFLG_ID (1<<21)
178 #define EFLG_VIP (1<<20)
179 #define EFLG_VIF (1<<19)
180 #define EFLG_AC (1<<18)
181 #define EFLG_VM (1<<17)
182 #define EFLG_RF (1<<16)
183 #define EFLG_IOPL (3<<12)
184 #define EFLG_NT (1<<14)
185 #define EFLG_OF (1<<11)
186 #define EFLG_DF (1<<10)
187 #define EFLG_IF (1<<9)
188 #define EFLG_TF (1<<8)
189 #define EFLG_SF (1<<7)
190 #define EFLG_ZF (1<<6)
191 #define EFLG_AF (1<<4)
192 #define EFLG_PF (1<<2)
193 #define EFLG_CF (1<<0)
194 
195 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
196 #define EFLG_RESERVED_ONE_MASK 2
197 
198 /*
199  * Instruction emulation:
200  * Most instructions are emulated directly via a fragment of inline assembly
201  * code. This allows us to save/restore EFLAGS and thus very easily pick up
202  * any modified flags.
203  */
204 
205 #if defined(CONFIG_X86_64)
206 #define _LO32 "k"		/* force 32-bit operand */
207 #define _STK  "%%rsp"		/* stack pointer */
208 #elif defined(__i386__)
209 #define _LO32 ""		/* force 32-bit operand */
210 #define _STK  "%%esp"		/* stack pointer */
211 #endif
212 
213 /*
214  * These EFLAGS bits are restored from saved value during emulation, and
215  * any changes are written back to the saved value after emulation.
216  */
217 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
218 
219 /* Before executing instruction: restore necessary bits in EFLAGS. */
220 #define _PRE_EFLAGS(_sav, _msk, _tmp)					\
221 	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
222 	"movl %"_sav",%"_LO32 _tmp"; "                                  \
223 	"push %"_tmp"; "                                                \
224 	"push %"_tmp"; "                                                \
225 	"movl %"_msk",%"_LO32 _tmp"; "                                  \
226 	"andl %"_LO32 _tmp",("_STK"); "                                 \
227 	"pushf; "                                                       \
228 	"notl %"_LO32 _tmp"; "                                          \
229 	"andl %"_LO32 _tmp",("_STK"); "                                 \
230 	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
231 	"pop  %"_tmp"; "                                                \
232 	"orl  %"_LO32 _tmp",("_STK"); "                                 \
233 	"popf; "                                                        \
234 	"pop  %"_sav"; "
235 
236 /* After executing instruction: write-back necessary bits in EFLAGS. */
237 #define _POST_EFLAGS(_sav, _msk, _tmp) \
238 	/* _sav |= EFLAGS & _msk; */		\
239 	"pushf; "				\
240 	"pop  %"_tmp"; "			\
241 	"andl %"_msk",%"_LO32 _tmp"; "		\
242 	"orl  %"_LO32 _tmp",%"_sav"; "
243 
244 #ifdef CONFIG_X86_64
245 #define ON64(x) x
246 #else
247 #define ON64(x)
248 #endif
249 
250 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype)	\
251 	do {								\
252 		__asm__ __volatile__ (					\
253 			_PRE_EFLAGS("0", "4", "2")			\
254 			_op _suffix " %"_x"3,%1; "			\
255 			_POST_EFLAGS("0", "4", "2")			\
256 			: "=m" ((ctxt)->eflags),			\
257 			  "+q" (*(_dsttype*)&(ctxt)->dst.val),		\
258 			  "=&r" (_tmp)					\
259 			: _y ((ctxt)->src.val), "i" (EFLAGS_MASK));	\
260 	} while (0)
261 
262 
263 /* Raw emulation: instruction has two explicit operands. */
264 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy)		\
265 	do {								\
266 		unsigned long _tmp;					\
267 									\
268 		switch ((ctxt)->dst.bytes) {				\
269 		case 2:							\
270 			____emulate_2op(ctxt,_op,_wx,_wy,"w",u16);	\
271 			break;						\
272 		case 4:							\
273 			____emulate_2op(ctxt,_op,_lx,_ly,"l",u32);	\
274 			break;						\
275 		case 8:							\
276 			ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
277 			break;						\
278 		}							\
279 	} while (0)
280 
281 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy)		     \
282 	do {								     \
283 		unsigned long _tmp;					     \
284 		switch ((ctxt)->dst.bytes) {				     \
285 		case 1:							     \
286 			____emulate_2op(ctxt,_op,_bx,_by,"b",u8);	     \
287 			break;						     \
288 		default:						     \
289 			__emulate_2op_nobyte(ctxt, _op,			     \
290 					     _wx, _wy, _lx, _ly, _qx, _qy);  \
291 			break;						     \
292 		}							     \
293 	} while (0)
294 
295 /* Source operand is byte-sized and may be restricted to just %cl. */
296 #define emulate_2op_SrcB(ctxt, _op)					\
297 	__emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
298 
299 /* Source operand is byte, word, long or quad sized. */
300 #define emulate_2op_SrcV(ctxt, _op)					\
301 	__emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
302 
303 /* Source operand is word, long or quad sized. */
304 #define emulate_2op_SrcV_nobyte(ctxt, _op)				\
305 	__emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
306 
307 /* Instruction has three operands and one operand is stored in ECX register */
308 #define __emulate_2op_cl(ctxt, _op, _suffix, _type)		\
309 	do {								\
310 		unsigned long _tmp;					\
311 		_type _clv  = (ctxt)->src2.val;				\
312 		_type _srcv = (ctxt)->src.val;				\
313 		_type _dstv = (ctxt)->dst.val;				\
314 									\
315 		__asm__ __volatile__ (					\
316 			_PRE_EFLAGS("0", "5", "2")			\
317 			_op _suffix " %4,%1 \n"				\
318 			_POST_EFLAGS("0", "5", "2")			\
319 			: "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
320 			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)	\
321 			);						\
322 									\
323 		(ctxt)->src2.val  = (unsigned long) _clv;		\
324 		(ctxt)->src2.val = (unsigned long) _srcv;		\
325 		(ctxt)->dst.val = (unsigned long) _dstv;		\
326 	} while (0)
327 
328 #define emulate_2op_cl(ctxt, _op)					\
329 	do {								\
330 		switch ((ctxt)->dst.bytes) {				\
331 		case 2:							\
332 			__emulate_2op_cl(ctxt, _op, "w", u16);		\
333 			break;						\
334 		case 4:							\
335 			__emulate_2op_cl(ctxt, _op, "l", u32);		\
336 			break;						\
337 		case 8:							\
338 			ON64(__emulate_2op_cl(ctxt, _op, "q", ulong));	\
339 			break;						\
340 		}							\
341 	} while (0)
342 
343 #define __emulate_1op(ctxt, _op, _suffix)				\
344 	do {								\
345 		unsigned long _tmp;					\
346 									\
347 		__asm__ __volatile__ (					\
348 			_PRE_EFLAGS("0", "3", "2")			\
349 			_op _suffix " %1; "				\
350 			_POST_EFLAGS("0", "3", "2")			\
351 			: "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
352 			  "=&r" (_tmp)					\
353 			: "i" (EFLAGS_MASK));				\
354 	} while (0)
355 
356 /* Instruction has only one explicit operand (no source operand). */
357 #define emulate_1op(ctxt, _op)						\
358 	do {								\
359 		switch ((ctxt)->dst.bytes) {				\
360 		case 1:	__emulate_1op(ctxt, _op, "b"); break;		\
361 		case 2:	__emulate_1op(ctxt, _op, "w"); break;		\
362 		case 4:	__emulate_1op(ctxt, _op, "l"); break;		\
363 		case 8:	ON64(__emulate_1op(ctxt, _op, "q")); break;	\
364 		}							\
365 	} while (0)
366 
367 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex)			\
368 	do {								\
369 		unsigned long _tmp;					\
370 		ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX];		\
371 		ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX];		\
372 									\
373 		__asm__ __volatile__ (					\
374 			_PRE_EFLAGS("0", "5", "1")			\
375 			"1: \n\t"					\
376 			_op _suffix " %6; "				\
377 			"2: \n\t"					\
378 			_POST_EFLAGS("0", "5", "1")			\
379 			".pushsection .fixup,\"ax\" \n\t"		\
380 			"3: movb $1, %4 \n\t"				\
381 			"jmp 2b \n\t"					\
382 			".popsection \n\t"				\
383 			_ASM_EXTABLE(1b, 3b)				\
384 			: "=m" ((ctxt)->eflags), "=&r" (_tmp),		\
385 			  "+a" (*rax), "+d" (*rdx), "+qm"(_ex)		\
386 			: "i" (EFLAGS_MASK), "m" ((ctxt)->src.val),	\
387 			  "a" (*rax), "d" (*rdx));			\
388 	} while (0)
389 
390 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
391 #define emulate_1op_rax_rdx(ctxt, _op, _ex)	\
392 	do {								\
393 		switch((ctxt)->src.bytes) {				\
394 		case 1:							\
395 			__emulate_1op_rax_rdx(ctxt, _op, "b", _ex);	\
396 			break;						\
397 		case 2:							\
398 			__emulate_1op_rax_rdx(ctxt, _op, "w", _ex);	\
399 			break;						\
400 		case 4:							\
401 			__emulate_1op_rax_rdx(ctxt, _op, "l", _ex);	\
402 			break;						\
403 		case 8: ON64(						\
404 			__emulate_1op_rax_rdx(ctxt, _op, "q", _ex));	\
405 			break;						\
406 		}							\
407 	} while (0)
408 
409 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
410 				    enum x86_intercept intercept,
411 				    enum x86_intercept_stage stage)
412 {
413 	struct x86_instruction_info info = {
414 		.intercept  = intercept,
415 		.rep_prefix = ctxt->rep_prefix,
416 		.modrm_mod  = ctxt->modrm_mod,
417 		.modrm_reg  = ctxt->modrm_reg,
418 		.modrm_rm   = ctxt->modrm_rm,
419 		.src_val    = ctxt->src.val64,
420 		.src_bytes  = ctxt->src.bytes,
421 		.dst_bytes  = ctxt->dst.bytes,
422 		.ad_bytes   = ctxt->ad_bytes,
423 		.next_rip   = ctxt->eip,
424 	};
425 
426 	return ctxt->ops->intercept(ctxt, &info, stage);
427 }
428 
429 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
430 {
431 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
432 }
433 
434 /* Access/update address held in a register, based on addressing mode. */
435 static inline unsigned long
436 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
437 {
438 	if (ctxt->ad_bytes == sizeof(unsigned long))
439 		return reg;
440 	else
441 		return reg & ad_mask(ctxt);
442 }
443 
444 static inline unsigned long
445 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
446 {
447 	return address_mask(ctxt, reg);
448 }
449 
450 static inline void
451 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
452 {
453 	if (ctxt->ad_bytes == sizeof(unsigned long))
454 		*reg += inc;
455 	else
456 		*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
457 }
458 
459 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
460 {
461 	register_address_increment(ctxt, &ctxt->_eip, rel);
462 }
463 
464 static u32 desc_limit_scaled(struct desc_struct *desc)
465 {
466 	u32 limit = get_desc_limit(desc);
467 
468 	return desc->g ? (limit << 12) | 0xfff : limit;
469 }
470 
471 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
472 {
473 	ctxt->has_seg_override = true;
474 	ctxt->seg_override = seg;
475 }
476 
477 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
478 {
479 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
480 		return 0;
481 
482 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
483 }
484 
485 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
486 {
487 	if (!ctxt->has_seg_override)
488 		return 0;
489 
490 	return ctxt->seg_override;
491 }
492 
493 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
494 			     u32 error, bool valid)
495 {
496 	ctxt->exception.vector = vec;
497 	ctxt->exception.error_code = error;
498 	ctxt->exception.error_code_valid = valid;
499 	return X86EMUL_PROPAGATE_FAULT;
500 }
501 
502 static int emulate_db(struct x86_emulate_ctxt *ctxt)
503 {
504 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
505 }
506 
507 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
508 {
509 	return emulate_exception(ctxt, GP_VECTOR, err, true);
510 }
511 
512 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
513 {
514 	return emulate_exception(ctxt, SS_VECTOR, err, true);
515 }
516 
517 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
518 {
519 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
520 }
521 
522 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
523 {
524 	return emulate_exception(ctxt, TS_VECTOR, err, true);
525 }
526 
527 static int emulate_de(struct x86_emulate_ctxt *ctxt)
528 {
529 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
530 }
531 
532 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
533 {
534 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
535 }
536 
537 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
538 {
539 	u16 selector;
540 	struct desc_struct desc;
541 
542 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
543 	return selector;
544 }
545 
546 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
547 				 unsigned seg)
548 {
549 	u16 dummy;
550 	u32 base3;
551 	struct desc_struct desc;
552 
553 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
554 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
555 }
556 
557 static int __linearize(struct x86_emulate_ctxt *ctxt,
558 		     struct segmented_address addr,
559 		     unsigned size, bool write, bool fetch,
560 		     ulong *linear)
561 {
562 	struct desc_struct desc;
563 	bool usable;
564 	ulong la;
565 	u32 lim;
566 	u16 sel;
567 	unsigned cpl, rpl;
568 
569 	la = seg_base(ctxt, addr.seg) + addr.ea;
570 	switch (ctxt->mode) {
571 	case X86EMUL_MODE_REAL:
572 		break;
573 	case X86EMUL_MODE_PROT64:
574 		if (((signed long)la << 16) >> 16 != la)
575 			return emulate_gp(ctxt, 0);
576 		break;
577 	default:
578 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
579 						addr.seg);
580 		if (!usable)
581 			goto bad;
582 		/* code segment or read-only data segment */
583 		if (((desc.type & 8) || !(desc.type & 2)) && write)
584 			goto bad;
585 		/* unreadable code segment */
586 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
587 			goto bad;
588 		lim = desc_limit_scaled(&desc);
589 		if ((desc.type & 8) || !(desc.type & 4)) {
590 			/* expand-up segment */
591 			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
592 				goto bad;
593 		} else {
594 			/* exapand-down segment */
595 			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
596 				goto bad;
597 			lim = desc.d ? 0xffffffff : 0xffff;
598 			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
599 				goto bad;
600 		}
601 		cpl = ctxt->ops->cpl(ctxt);
602 		rpl = sel & 3;
603 		cpl = max(cpl, rpl);
604 		if (!(desc.type & 8)) {
605 			/* data segment */
606 			if (cpl > desc.dpl)
607 				goto bad;
608 		} else if ((desc.type & 8) && !(desc.type & 4)) {
609 			/* nonconforming code segment */
610 			if (cpl != desc.dpl)
611 				goto bad;
612 		} else if ((desc.type & 8) && (desc.type & 4)) {
613 			/* conforming code segment */
614 			if (cpl < desc.dpl)
615 				goto bad;
616 		}
617 		break;
618 	}
619 	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
620 		la &= (u32)-1;
621 	*linear = la;
622 	return X86EMUL_CONTINUE;
623 bad:
624 	if (addr.seg == VCPU_SREG_SS)
625 		return emulate_ss(ctxt, addr.seg);
626 	else
627 		return emulate_gp(ctxt, addr.seg);
628 }
629 
630 static int linearize(struct x86_emulate_ctxt *ctxt,
631 		     struct segmented_address addr,
632 		     unsigned size, bool write,
633 		     ulong *linear)
634 {
635 	return __linearize(ctxt, addr, size, write, false, linear);
636 }
637 
638 
639 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
640 			      struct segmented_address addr,
641 			      void *data,
642 			      unsigned size)
643 {
644 	int rc;
645 	ulong linear;
646 
647 	rc = linearize(ctxt, addr, size, false, &linear);
648 	if (rc != X86EMUL_CONTINUE)
649 		return rc;
650 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
651 }
652 
653 /*
654  * Fetch the next byte of the instruction being emulated which is pointed to
655  * by ctxt->_eip, then increment ctxt->_eip.
656  *
657  * Also prefetch the remaining bytes of the instruction without crossing page
658  * boundary if they are not in fetch_cache yet.
659  */
660 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
661 {
662 	struct fetch_cache *fc = &ctxt->fetch;
663 	int rc;
664 	int size, cur_size;
665 
666 	if (ctxt->_eip == fc->end) {
667 		unsigned long linear;
668 		struct segmented_address addr = { .seg = VCPU_SREG_CS,
669 						  .ea  = ctxt->_eip };
670 		cur_size = fc->end - fc->start;
671 		size = min(15UL - cur_size,
672 			   PAGE_SIZE - offset_in_page(ctxt->_eip));
673 		rc = __linearize(ctxt, addr, size, false, true, &linear);
674 		if (unlikely(rc != X86EMUL_CONTINUE))
675 			return rc;
676 		rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
677 				      size, &ctxt->exception);
678 		if (unlikely(rc != X86EMUL_CONTINUE))
679 			return rc;
680 		fc->end += size;
681 	}
682 	*dest = fc->data[ctxt->_eip - fc->start];
683 	ctxt->_eip++;
684 	return X86EMUL_CONTINUE;
685 }
686 
687 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
688 			 void *dest, unsigned size)
689 {
690 	int rc;
691 
692 	/* x86 instructions are limited to 15 bytes. */
693 	if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
694 		return X86EMUL_UNHANDLEABLE;
695 	while (size--) {
696 		rc = do_insn_fetch_byte(ctxt, dest++);
697 		if (rc != X86EMUL_CONTINUE)
698 			return rc;
699 	}
700 	return X86EMUL_CONTINUE;
701 }
702 
703 /* Fetch next part of the instruction being emulated. */
704 #define insn_fetch(_type, _ctxt)					\
705 ({	unsigned long _x;						\
706 	rc = do_insn_fetch(_ctxt, &_x, sizeof(_type));			\
707 	if (rc != X86EMUL_CONTINUE)					\
708 		goto done;						\
709 	(_type)_x;							\
710 })
711 
712 #define insn_fetch_arr(_arr, _size, _ctxt)				\
713 ({	rc = do_insn_fetch(_ctxt, _arr, (_size));			\
714 	if (rc != X86EMUL_CONTINUE)					\
715 		goto done;						\
716 })
717 
718 /*
719  * Given the 'reg' portion of a ModRM byte, and a register block, return a
720  * pointer into the block that addresses the relevant register.
721  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
722  */
723 static void *decode_register(u8 modrm_reg, unsigned long *regs,
724 			     int highbyte_regs)
725 {
726 	void *p;
727 
728 	p = &regs[modrm_reg];
729 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
730 		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
731 	return p;
732 }
733 
734 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
735 			   struct segmented_address addr,
736 			   u16 *size, unsigned long *address, int op_bytes)
737 {
738 	int rc;
739 
740 	if (op_bytes == 2)
741 		op_bytes = 3;
742 	*address = 0;
743 	rc = segmented_read_std(ctxt, addr, size, 2);
744 	if (rc != X86EMUL_CONTINUE)
745 		return rc;
746 	addr.ea += 2;
747 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
748 	return rc;
749 }
750 
751 static int test_cc(unsigned int condition, unsigned int flags)
752 {
753 	int rc = 0;
754 
755 	switch ((condition & 15) >> 1) {
756 	case 0: /* o */
757 		rc |= (flags & EFLG_OF);
758 		break;
759 	case 1: /* b/c/nae */
760 		rc |= (flags & EFLG_CF);
761 		break;
762 	case 2: /* z/e */
763 		rc |= (flags & EFLG_ZF);
764 		break;
765 	case 3: /* be/na */
766 		rc |= (flags & (EFLG_CF|EFLG_ZF));
767 		break;
768 	case 4: /* s */
769 		rc |= (flags & EFLG_SF);
770 		break;
771 	case 5: /* p/pe */
772 		rc |= (flags & EFLG_PF);
773 		break;
774 	case 7: /* le/ng */
775 		rc |= (flags & EFLG_ZF);
776 		/* fall through */
777 	case 6: /* l/nge */
778 		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
779 		break;
780 	}
781 
782 	/* Odd condition identifiers (lsb == 1) have inverted sense. */
783 	return (!!rc ^ (condition & 1));
784 }
785 
786 static void fetch_register_operand(struct operand *op)
787 {
788 	switch (op->bytes) {
789 	case 1:
790 		op->val = *(u8 *)op->addr.reg;
791 		break;
792 	case 2:
793 		op->val = *(u16 *)op->addr.reg;
794 		break;
795 	case 4:
796 		op->val = *(u32 *)op->addr.reg;
797 		break;
798 	case 8:
799 		op->val = *(u64 *)op->addr.reg;
800 		break;
801 	}
802 }
803 
804 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
805 {
806 	ctxt->ops->get_fpu(ctxt);
807 	switch (reg) {
808 	case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
809 	case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
810 	case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
811 	case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
812 	case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
813 	case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
814 	case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
815 	case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
816 #ifdef CONFIG_X86_64
817 	case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
818 	case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
819 	case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
820 	case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
821 	case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
822 	case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
823 	case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
824 	case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
825 #endif
826 	default: BUG();
827 	}
828 	ctxt->ops->put_fpu(ctxt);
829 }
830 
831 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
832 			  int reg)
833 {
834 	ctxt->ops->get_fpu(ctxt);
835 	switch (reg) {
836 	case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
837 	case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
838 	case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
839 	case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
840 	case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
841 	case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
842 	case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
843 	case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
844 #ifdef CONFIG_X86_64
845 	case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
846 	case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
847 	case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
848 	case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
849 	case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
850 	case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
851 	case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
852 	case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
853 #endif
854 	default: BUG();
855 	}
856 	ctxt->ops->put_fpu(ctxt);
857 }
858 
859 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
860 				    struct operand *op,
861 				    int inhibit_bytereg)
862 {
863 	unsigned reg = ctxt->modrm_reg;
864 	int highbyte_regs = ctxt->rex_prefix == 0;
865 
866 	if (!(ctxt->d & ModRM))
867 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
868 
869 	if (ctxt->d & Sse) {
870 		op->type = OP_XMM;
871 		op->bytes = 16;
872 		op->addr.xmm = reg;
873 		read_sse_reg(ctxt, &op->vec_val, reg);
874 		return;
875 	}
876 
877 	op->type = OP_REG;
878 	if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
879 		op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
880 		op->bytes = 1;
881 	} else {
882 		op->addr.reg = decode_register(reg, ctxt->regs, 0);
883 		op->bytes = ctxt->op_bytes;
884 	}
885 	fetch_register_operand(op);
886 	op->orig_val = op->val;
887 }
888 
889 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
890 			struct operand *op)
891 {
892 	u8 sib;
893 	int index_reg = 0, base_reg = 0, scale;
894 	int rc = X86EMUL_CONTINUE;
895 	ulong modrm_ea = 0;
896 
897 	if (ctxt->rex_prefix) {
898 		ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;	/* REX.R */
899 		index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
900 		ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
901 	}
902 
903 	ctxt->modrm = insn_fetch(u8, ctxt);
904 	ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
905 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
906 	ctxt->modrm_rm |= (ctxt->modrm & 0x07);
907 	ctxt->modrm_seg = VCPU_SREG_DS;
908 
909 	if (ctxt->modrm_mod == 3) {
910 		op->type = OP_REG;
911 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
912 		op->addr.reg = decode_register(ctxt->modrm_rm,
913 					       ctxt->regs, ctxt->d & ByteOp);
914 		if (ctxt->d & Sse) {
915 			op->type = OP_XMM;
916 			op->bytes = 16;
917 			op->addr.xmm = ctxt->modrm_rm;
918 			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
919 			return rc;
920 		}
921 		fetch_register_operand(op);
922 		return rc;
923 	}
924 
925 	op->type = OP_MEM;
926 
927 	if (ctxt->ad_bytes == 2) {
928 		unsigned bx = ctxt->regs[VCPU_REGS_RBX];
929 		unsigned bp = ctxt->regs[VCPU_REGS_RBP];
930 		unsigned si = ctxt->regs[VCPU_REGS_RSI];
931 		unsigned di = ctxt->regs[VCPU_REGS_RDI];
932 
933 		/* 16-bit ModR/M decode. */
934 		switch (ctxt->modrm_mod) {
935 		case 0:
936 			if (ctxt->modrm_rm == 6)
937 				modrm_ea += insn_fetch(u16, ctxt);
938 			break;
939 		case 1:
940 			modrm_ea += insn_fetch(s8, ctxt);
941 			break;
942 		case 2:
943 			modrm_ea += insn_fetch(u16, ctxt);
944 			break;
945 		}
946 		switch (ctxt->modrm_rm) {
947 		case 0:
948 			modrm_ea += bx + si;
949 			break;
950 		case 1:
951 			modrm_ea += bx + di;
952 			break;
953 		case 2:
954 			modrm_ea += bp + si;
955 			break;
956 		case 3:
957 			modrm_ea += bp + di;
958 			break;
959 		case 4:
960 			modrm_ea += si;
961 			break;
962 		case 5:
963 			modrm_ea += di;
964 			break;
965 		case 6:
966 			if (ctxt->modrm_mod != 0)
967 				modrm_ea += bp;
968 			break;
969 		case 7:
970 			modrm_ea += bx;
971 			break;
972 		}
973 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
974 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
975 			ctxt->modrm_seg = VCPU_SREG_SS;
976 		modrm_ea = (u16)modrm_ea;
977 	} else {
978 		/* 32/64-bit ModR/M decode. */
979 		if ((ctxt->modrm_rm & 7) == 4) {
980 			sib = insn_fetch(u8, ctxt);
981 			index_reg |= (sib >> 3) & 7;
982 			base_reg |= sib & 7;
983 			scale = sib >> 6;
984 
985 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
986 				modrm_ea += insn_fetch(s32, ctxt);
987 			else
988 				modrm_ea += ctxt->regs[base_reg];
989 			if (index_reg != 4)
990 				modrm_ea += ctxt->regs[index_reg] << scale;
991 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
992 			if (ctxt->mode == X86EMUL_MODE_PROT64)
993 				ctxt->rip_relative = 1;
994 		} else
995 			modrm_ea += ctxt->regs[ctxt->modrm_rm];
996 		switch (ctxt->modrm_mod) {
997 		case 0:
998 			if (ctxt->modrm_rm == 5)
999 				modrm_ea += insn_fetch(s32, ctxt);
1000 			break;
1001 		case 1:
1002 			modrm_ea += insn_fetch(s8, ctxt);
1003 			break;
1004 		case 2:
1005 			modrm_ea += insn_fetch(s32, ctxt);
1006 			break;
1007 		}
1008 	}
1009 	op->addr.mem.ea = modrm_ea;
1010 done:
1011 	return rc;
1012 }
1013 
1014 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1015 		      struct operand *op)
1016 {
1017 	int rc = X86EMUL_CONTINUE;
1018 
1019 	op->type = OP_MEM;
1020 	switch (ctxt->ad_bytes) {
1021 	case 2:
1022 		op->addr.mem.ea = insn_fetch(u16, ctxt);
1023 		break;
1024 	case 4:
1025 		op->addr.mem.ea = insn_fetch(u32, ctxt);
1026 		break;
1027 	case 8:
1028 		op->addr.mem.ea = insn_fetch(u64, ctxt);
1029 		break;
1030 	}
1031 done:
1032 	return rc;
1033 }
1034 
1035 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1036 {
1037 	long sv = 0, mask;
1038 
1039 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1040 		mask = ~(ctxt->dst.bytes * 8 - 1);
1041 
1042 		if (ctxt->src.bytes == 2)
1043 			sv = (s16)ctxt->src.val & (s16)mask;
1044 		else if (ctxt->src.bytes == 4)
1045 			sv = (s32)ctxt->src.val & (s32)mask;
1046 
1047 		ctxt->dst.addr.mem.ea += (sv >> 3);
1048 	}
1049 
1050 	/* only subword offset */
1051 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1052 }
1053 
1054 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1055 			 unsigned long addr, void *dest, unsigned size)
1056 {
1057 	int rc;
1058 	struct read_cache *mc = &ctxt->mem_read;
1059 
1060 	while (size) {
1061 		int n = min(size, 8u);
1062 		size -= n;
1063 		if (mc->pos < mc->end)
1064 			goto read_cached;
1065 
1066 		rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1067 					      &ctxt->exception);
1068 		if (rc != X86EMUL_CONTINUE)
1069 			return rc;
1070 		mc->end += n;
1071 
1072 	read_cached:
1073 		memcpy(dest, mc->data + mc->pos, n);
1074 		mc->pos += n;
1075 		dest += n;
1076 		addr += n;
1077 	}
1078 	return X86EMUL_CONTINUE;
1079 }
1080 
1081 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1082 			  struct segmented_address addr,
1083 			  void *data,
1084 			  unsigned size)
1085 {
1086 	int rc;
1087 	ulong linear;
1088 
1089 	rc = linearize(ctxt, addr, size, false, &linear);
1090 	if (rc != X86EMUL_CONTINUE)
1091 		return rc;
1092 	return read_emulated(ctxt, linear, data, size);
1093 }
1094 
1095 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1096 			   struct segmented_address addr,
1097 			   const void *data,
1098 			   unsigned size)
1099 {
1100 	int rc;
1101 	ulong linear;
1102 
1103 	rc = linearize(ctxt, addr, size, true, &linear);
1104 	if (rc != X86EMUL_CONTINUE)
1105 		return rc;
1106 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1107 					 &ctxt->exception);
1108 }
1109 
1110 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1111 			     struct segmented_address addr,
1112 			     const void *orig_data, const void *data,
1113 			     unsigned size)
1114 {
1115 	int rc;
1116 	ulong linear;
1117 
1118 	rc = linearize(ctxt, addr, size, true, &linear);
1119 	if (rc != X86EMUL_CONTINUE)
1120 		return rc;
1121 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1122 					   size, &ctxt->exception);
1123 }
1124 
1125 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1126 			   unsigned int size, unsigned short port,
1127 			   void *dest)
1128 {
1129 	struct read_cache *rc = &ctxt->io_read;
1130 
1131 	if (rc->pos == rc->end) { /* refill pio read ahead */
1132 		unsigned int in_page, n;
1133 		unsigned int count = ctxt->rep_prefix ?
1134 			address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1135 		in_page = (ctxt->eflags & EFLG_DF) ?
1136 			offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1137 			PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1138 		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1139 			count);
1140 		if (n == 0)
1141 			n = 1;
1142 		rc->pos = rc->end = 0;
1143 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1144 			return 0;
1145 		rc->end = n * size;
1146 	}
1147 
1148 	memcpy(dest, rc->data + rc->pos, size);
1149 	rc->pos += size;
1150 	return 1;
1151 }
1152 
1153 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1154 				     u16 selector, struct desc_ptr *dt)
1155 {
1156 	struct x86_emulate_ops *ops = ctxt->ops;
1157 
1158 	if (selector & 1 << 2) {
1159 		struct desc_struct desc;
1160 		u16 sel;
1161 
1162 		memset (dt, 0, sizeof *dt);
1163 		if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1164 			return;
1165 
1166 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1167 		dt->address = get_desc_base(&desc);
1168 	} else
1169 		ops->get_gdt(ctxt, dt);
1170 }
1171 
1172 /* allowed just for 8 bytes segments */
1173 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1174 				   u16 selector, struct desc_struct *desc)
1175 {
1176 	struct desc_ptr dt;
1177 	u16 index = selector >> 3;
1178 	ulong addr;
1179 
1180 	get_descriptor_table_ptr(ctxt, selector, &dt);
1181 
1182 	if (dt.size < index * 8 + 7)
1183 		return emulate_gp(ctxt, selector & 0xfffc);
1184 
1185 	addr = dt.address + index * 8;
1186 	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1187 				   &ctxt->exception);
1188 }
1189 
1190 /* allowed just for 8 bytes segments */
1191 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1192 				    u16 selector, struct desc_struct *desc)
1193 {
1194 	struct desc_ptr dt;
1195 	u16 index = selector >> 3;
1196 	ulong addr;
1197 
1198 	get_descriptor_table_ptr(ctxt, selector, &dt);
1199 
1200 	if (dt.size < index * 8 + 7)
1201 		return emulate_gp(ctxt, selector & 0xfffc);
1202 
1203 	addr = dt.address + index * 8;
1204 	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1205 				    &ctxt->exception);
1206 }
1207 
1208 /* Does not support long mode */
1209 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1210 				   u16 selector, int seg)
1211 {
1212 	struct desc_struct seg_desc;
1213 	u8 dpl, rpl, cpl;
1214 	unsigned err_vec = GP_VECTOR;
1215 	u32 err_code = 0;
1216 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1217 	int ret;
1218 
1219 	memset(&seg_desc, 0, sizeof seg_desc);
1220 
1221 	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1222 	    || ctxt->mode == X86EMUL_MODE_REAL) {
1223 		/* set real mode segment descriptor */
1224 		set_desc_base(&seg_desc, selector << 4);
1225 		set_desc_limit(&seg_desc, 0xffff);
1226 		seg_desc.type = 3;
1227 		seg_desc.p = 1;
1228 		seg_desc.s = 1;
1229 		goto load;
1230 	}
1231 
1232 	/* NULL selector is not valid for TR, CS and SS */
1233 	if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1234 	    && null_selector)
1235 		goto exception;
1236 
1237 	/* TR should be in GDT only */
1238 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1239 		goto exception;
1240 
1241 	if (null_selector) /* for NULL selector skip all following checks */
1242 		goto load;
1243 
1244 	ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1245 	if (ret != X86EMUL_CONTINUE)
1246 		return ret;
1247 
1248 	err_code = selector & 0xfffc;
1249 	err_vec = GP_VECTOR;
1250 
1251 	/* can't load system descriptor into segment selecor */
1252 	if (seg <= VCPU_SREG_GS && !seg_desc.s)
1253 		goto exception;
1254 
1255 	if (!seg_desc.p) {
1256 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1257 		goto exception;
1258 	}
1259 
1260 	rpl = selector & 3;
1261 	dpl = seg_desc.dpl;
1262 	cpl = ctxt->ops->cpl(ctxt);
1263 
1264 	switch (seg) {
1265 	case VCPU_SREG_SS:
1266 		/*
1267 		 * segment is not a writable data segment or segment
1268 		 * selector's RPL != CPL or segment selector's RPL != CPL
1269 		 */
1270 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1271 			goto exception;
1272 		break;
1273 	case VCPU_SREG_CS:
1274 		if (!(seg_desc.type & 8))
1275 			goto exception;
1276 
1277 		if (seg_desc.type & 4) {
1278 			/* conforming */
1279 			if (dpl > cpl)
1280 				goto exception;
1281 		} else {
1282 			/* nonconforming */
1283 			if (rpl > cpl || dpl != cpl)
1284 				goto exception;
1285 		}
1286 		/* CS(RPL) <- CPL */
1287 		selector = (selector & 0xfffc) | cpl;
1288 		break;
1289 	case VCPU_SREG_TR:
1290 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1291 			goto exception;
1292 		break;
1293 	case VCPU_SREG_LDTR:
1294 		if (seg_desc.s || seg_desc.type != 2)
1295 			goto exception;
1296 		break;
1297 	default: /*  DS, ES, FS, or GS */
1298 		/*
1299 		 * segment is not a data or readable code segment or
1300 		 * ((segment is a data or nonconforming code segment)
1301 		 * and (both RPL and CPL > DPL))
1302 		 */
1303 		if ((seg_desc.type & 0xa) == 0x8 ||
1304 		    (((seg_desc.type & 0xc) != 0xc) &&
1305 		     (rpl > dpl && cpl > dpl)))
1306 			goto exception;
1307 		break;
1308 	}
1309 
1310 	if (seg_desc.s) {
1311 		/* mark segment as accessed */
1312 		seg_desc.type |= 1;
1313 		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1314 		if (ret != X86EMUL_CONTINUE)
1315 			return ret;
1316 	}
1317 load:
1318 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1319 	return X86EMUL_CONTINUE;
1320 exception:
1321 	emulate_exception(ctxt, err_vec, err_code, true);
1322 	return X86EMUL_PROPAGATE_FAULT;
1323 }
1324 
1325 static void write_register_operand(struct operand *op)
1326 {
1327 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1328 	switch (op->bytes) {
1329 	case 1:
1330 		*(u8 *)op->addr.reg = (u8)op->val;
1331 		break;
1332 	case 2:
1333 		*(u16 *)op->addr.reg = (u16)op->val;
1334 		break;
1335 	case 4:
1336 		*op->addr.reg = (u32)op->val;
1337 		break;	/* 64b: zero-extend */
1338 	case 8:
1339 		*op->addr.reg = op->val;
1340 		break;
1341 	}
1342 }
1343 
1344 static int writeback(struct x86_emulate_ctxt *ctxt)
1345 {
1346 	int rc;
1347 
1348 	switch (ctxt->dst.type) {
1349 	case OP_REG:
1350 		write_register_operand(&ctxt->dst);
1351 		break;
1352 	case OP_MEM:
1353 		if (ctxt->lock_prefix)
1354 			rc = segmented_cmpxchg(ctxt,
1355 					       ctxt->dst.addr.mem,
1356 					       &ctxt->dst.orig_val,
1357 					       &ctxt->dst.val,
1358 					       ctxt->dst.bytes);
1359 		else
1360 			rc = segmented_write(ctxt,
1361 					     ctxt->dst.addr.mem,
1362 					     &ctxt->dst.val,
1363 					     ctxt->dst.bytes);
1364 		if (rc != X86EMUL_CONTINUE)
1365 			return rc;
1366 		break;
1367 	case OP_XMM:
1368 		write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1369 		break;
1370 	case OP_NONE:
1371 		/* no writeback */
1372 		break;
1373 	default:
1374 		break;
1375 	}
1376 	return X86EMUL_CONTINUE;
1377 }
1378 
1379 static int em_push(struct x86_emulate_ctxt *ctxt)
1380 {
1381 	struct segmented_address addr;
1382 
1383 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1384 	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1385 	addr.seg = VCPU_SREG_SS;
1386 
1387 	/* Disable writeback. */
1388 	ctxt->dst.type = OP_NONE;
1389 	return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1390 }
1391 
1392 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1393 		       void *dest, int len)
1394 {
1395 	int rc;
1396 	struct segmented_address addr;
1397 
1398 	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1399 	addr.seg = VCPU_SREG_SS;
1400 	rc = segmented_read(ctxt, addr, dest, len);
1401 	if (rc != X86EMUL_CONTINUE)
1402 		return rc;
1403 
1404 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1405 	return rc;
1406 }
1407 
1408 static int em_pop(struct x86_emulate_ctxt *ctxt)
1409 {
1410 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1411 }
1412 
1413 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1414 			void *dest, int len)
1415 {
1416 	int rc;
1417 	unsigned long val, change_mask;
1418 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1419 	int cpl = ctxt->ops->cpl(ctxt);
1420 
1421 	rc = emulate_pop(ctxt, &val, len);
1422 	if (rc != X86EMUL_CONTINUE)
1423 		return rc;
1424 
1425 	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1426 		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1427 
1428 	switch(ctxt->mode) {
1429 	case X86EMUL_MODE_PROT64:
1430 	case X86EMUL_MODE_PROT32:
1431 	case X86EMUL_MODE_PROT16:
1432 		if (cpl == 0)
1433 			change_mask |= EFLG_IOPL;
1434 		if (cpl <= iopl)
1435 			change_mask |= EFLG_IF;
1436 		break;
1437 	case X86EMUL_MODE_VM86:
1438 		if (iopl < 3)
1439 			return emulate_gp(ctxt, 0);
1440 		change_mask |= EFLG_IF;
1441 		break;
1442 	default: /* real mode */
1443 		change_mask |= (EFLG_IOPL | EFLG_IF);
1444 		break;
1445 	}
1446 
1447 	*(unsigned long *)dest =
1448 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1449 
1450 	return rc;
1451 }
1452 
1453 static int em_popf(struct x86_emulate_ctxt *ctxt)
1454 {
1455 	ctxt->dst.type = OP_REG;
1456 	ctxt->dst.addr.reg = &ctxt->eflags;
1457 	ctxt->dst.bytes = ctxt->op_bytes;
1458 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1459 }
1460 
1461 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1462 {
1463 	int seg = ctxt->src2.val;
1464 
1465 	ctxt->src.val = get_segment_selector(ctxt, seg);
1466 
1467 	return em_push(ctxt);
1468 }
1469 
1470 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1471 {
1472 	int seg = ctxt->src2.val;
1473 	unsigned long selector;
1474 	int rc;
1475 
1476 	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1477 	if (rc != X86EMUL_CONTINUE)
1478 		return rc;
1479 
1480 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1481 	return rc;
1482 }
1483 
1484 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1485 {
1486 	unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1487 	int rc = X86EMUL_CONTINUE;
1488 	int reg = VCPU_REGS_RAX;
1489 
1490 	while (reg <= VCPU_REGS_RDI) {
1491 		(reg == VCPU_REGS_RSP) ?
1492 		(ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1493 
1494 		rc = em_push(ctxt);
1495 		if (rc != X86EMUL_CONTINUE)
1496 			return rc;
1497 
1498 		++reg;
1499 	}
1500 
1501 	return rc;
1502 }
1503 
1504 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1505 {
1506 	ctxt->src.val =  (unsigned long)ctxt->eflags;
1507 	return em_push(ctxt);
1508 }
1509 
1510 static int em_popa(struct x86_emulate_ctxt *ctxt)
1511 {
1512 	int rc = X86EMUL_CONTINUE;
1513 	int reg = VCPU_REGS_RDI;
1514 
1515 	while (reg >= VCPU_REGS_RAX) {
1516 		if (reg == VCPU_REGS_RSP) {
1517 			register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1518 							ctxt->op_bytes);
1519 			--reg;
1520 		}
1521 
1522 		rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1523 		if (rc != X86EMUL_CONTINUE)
1524 			break;
1525 		--reg;
1526 	}
1527 	return rc;
1528 }
1529 
1530 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1531 {
1532 	struct x86_emulate_ops *ops = ctxt->ops;
1533 	int rc;
1534 	struct desc_ptr dt;
1535 	gva_t cs_addr;
1536 	gva_t eip_addr;
1537 	u16 cs, eip;
1538 
1539 	/* TODO: Add limit checks */
1540 	ctxt->src.val = ctxt->eflags;
1541 	rc = em_push(ctxt);
1542 	if (rc != X86EMUL_CONTINUE)
1543 		return rc;
1544 
1545 	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1546 
1547 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1548 	rc = em_push(ctxt);
1549 	if (rc != X86EMUL_CONTINUE)
1550 		return rc;
1551 
1552 	ctxt->src.val = ctxt->_eip;
1553 	rc = em_push(ctxt);
1554 	if (rc != X86EMUL_CONTINUE)
1555 		return rc;
1556 
1557 	ops->get_idt(ctxt, &dt);
1558 
1559 	eip_addr = dt.address + (irq << 2);
1560 	cs_addr = dt.address + (irq << 2) + 2;
1561 
1562 	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1563 	if (rc != X86EMUL_CONTINUE)
1564 		return rc;
1565 
1566 	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1567 	if (rc != X86EMUL_CONTINUE)
1568 		return rc;
1569 
1570 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1571 	if (rc != X86EMUL_CONTINUE)
1572 		return rc;
1573 
1574 	ctxt->_eip = eip;
1575 
1576 	return rc;
1577 }
1578 
1579 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1580 {
1581 	switch(ctxt->mode) {
1582 	case X86EMUL_MODE_REAL:
1583 		return emulate_int_real(ctxt, irq);
1584 	case X86EMUL_MODE_VM86:
1585 	case X86EMUL_MODE_PROT16:
1586 	case X86EMUL_MODE_PROT32:
1587 	case X86EMUL_MODE_PROT64:
1588 	default:
1589 		/* Protected mode interrupts unimplemented yet */
1590 		return X86EMUL_UNHANDLEABLE;
1591 	}
1592 }
1593 
1594 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1595 {
1596 	int rc = X86EMUL_CONTINUE;
1597 	unsigned long temp_eip = 0;
1598 	unsigned long temp_eflags = 0;
1599 	unsigned long cs = 0;
1600 	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1601 			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1602 			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1603 	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1604 
1605 	/* TODO: Add stack limit check */
1606 
1607 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1608 
1609 	if (rc != X86EMUL_CONTINUE)
1610 		return rc;
1611 
1612 	if (temp_eip & ~0xffff)
1613 		return emulate_gp(ctxt, 0);
1614 
1615 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1616 
1617 	if (rc != X86EMUL_CONTINUE)
1618 		return rc;
1619 
1620 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1621 
1622 	if (rc != X86EMUL_CONTINUE)
1623 		return rc;
1624 
1625 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1626 
1627 	if (rc != X86EMUL_CONTINUE)
1628 		return rc;
1629 
1630 	ctxt->_eip = temp_eip;
1631 
1632 
1633 	if (ctxt->op_bytes == 4)
1634 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1635 	else if (ctxt->op_bytes == 2) {
1636 		ctxt->eflags &= ~0xffff;
1637 		ctxt->eflags |= temp_eflags;
1638 	}
1639 
1640 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1641 	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1642 
1643 	return rc;
1644 }
1645 
1646 static int em_iret(struct x86_emulate_ctxt *ctxt)
1647 {
1648 	switch(ctxt->mode) {
1649 	case X86EMUL_MODE_REAL:
1650 		return emulate_iret_real(ctxt);
1651 	case X86EMUL_MODE_VM86:
1652 	case X86EMUL_MODE_PROT16:
1653 	case X86EMUL_MODE_PROT32:
1654 	case X86EMUL_MODE_PROT64:
1655 	default:
1656 		/* iret from protected mode unimplemented yet */
1657 		return X86EMUL_UNHANDLEABLE;
1658 	}
1659 }
1660 
1661 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1662 {
1663 	int rc;
1664 	unsigned short sel;
1665 
1666 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1667 
1668 	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1669 	if (rc != X86EMUL_CONTINUE)
1670 		return rc;
1671 
1672 	ctxt->_eip = 0;
1673 	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1674 	return X86EMUL_CONTINUE;
1675 }
1676 
1677 static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1678 {
1679 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1680 }
1681 
1682 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1683 {
1684 	switch (ctxt->modrm_reg) {
1685 	case 0:	/* rol */
1686 		emulate_2op_SrcB(ctxt, "rol");
1687 		break;
1688 	case 1:	/* ror */
1689 		emulate_2op_SrcB(ctxt, "ror");
1690 		break;
1691 	case 2:	/* rcl */
1692 		emulate_2op_SrcB(ctxt, "rcl");
1693 		break;
1694 	case 3:	/* rcr */
1695 		emulate_2op_SrcB(ctxt, "rcr");
1696 		break;
1697 	case 4:	/* sal/shl */
1698 	case 6:	/* sal/shl */
1699 		emulate_2op_SrcB(ctxt, "sal");
1700 		break;
1701 	case 5:	/* shr */
1702 		emulate_2op_SrcB(ctxt, "shr");
1703 		break;
1704 	case 7:	/* sar */
1705 		emulate_2op_SrcB(ctxt, "sar");
1706 		break;
1707 	}
1708 	return X86EMUL_CONTINUE;
1709 }
1710 
1711 static int em_not(struct x86_emulate_ctxt *ctxt)
1712 {
1713 	ctxt->dst.val = ~ctxt->dst.val;
1714 	return X86EMUL_CONTINUE;
1715 }
1716 
1717 static int em_neg(struct x86_emulate_ctxt *ctxt)
1718 {
1719 	emulate_1op(ctxt, "neg");
1720 	return X86EMUL_CONTINUE;
1721 }
1722 
1723 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1724 {
1725 	u8 ex = 0;
1726 
1727 	emulate_1op_rax_rdx(ctxt, "mul", ex);
1728 	return X86EMUL_CONTINUE;
1729 }
1730 
1731 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1732 {
1733 	u8 ex = 0;
1734 
1735 	emulate_1op_rax_rdx(ctxt, "imul", ex);
1736 	return X86EMUL_CONTINUE;
1737 }
1738 
1739 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1740 {
1741 	u8 de = 0;
1742 
1743 	emulate_1op_rax_rdx(ctxt, "div", de);
1744 	if (de)
1745 		return emulate_de(ctxt);
1746 	return X86EMUL_CONTINUE;
1747 }
1748 
1749 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1750 {
1751 	u8 de = 0;
1752 
1753 	emulate_1op_rax_rdx(ctxt, "idiv", de);
1754 	if (de)
1755 		return emulate_de(ctxt);
1756 	return X86EMUL_CONTINUE;
1757 }
1758 
1759 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1760 {
1761 	int rc = X86EMUL_CONTINUE;
1762 
1763 	switch (ctxt->modrm_reg) {
1764 	case 0:	/* inc */
1765 		emulate_1op(ctxt, "inc");
1766 		break;
1767 	case 1:	/* dec */
1768 		emulate_1op(ctxt, "dec");
1769 		break;
1770 	case 2: /* call near abs */ {
1771 		long int old_eip;
1772 		old_eip = ctxt->_eip;
1773 		ctxt->_eip = ctxt->src.val;
1774 		ctxt->src.val = old_eip;
1775 		rc = em_push(ctxt);
1776 		break;
1777 	}
1778 	case 4: /* jmp abs */
1779 		ctxt->_eip = ctxt->src.val;
1780 		break;
1781 	case 5: /* jmp far */
1782 		rc = em_jmp_far(ctxt);
1783 		break;
1784 	case 6:	/* push */
1785 		rc = em_push(ctxt);
1786 		break;
1787 	}
1788 	return rc;
1789 }
1790 
1791 static int em_grp9(struct x86_emulate_ctxt *ctxt)
1792 {
1793 	u64 old = ctxt->dst.orig_val64;
1794 
1795 	if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1796 	    ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1797 		ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1798 		ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1799 		ctxt->eflags &= ~EFLG_ZF;
1800 	} else {
1801 		ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1802 			(u32) ctxt->regs[VCPU_REGS_RBX];
1803 
1804 		ctxt->eflags |= EFLG_ZF;
1805 	}
1806 	return X86EMUL_CONTINUE;
1807 }
1808 
1809 static int em_ret(struct x86_emulate_ctxt *ctxt)
1810 {
1811 	ctxt->dst.type = OP_REG;
1812 	ctxt->dst.addr.reg = &ctxt->_eip;
1813 	ctxt->dst.bytes = ctxt->op_bytes;
1814 	return em_pop(ctxt);
1815 }
1816 
1817 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1818 {
1819 	int rc;
1820 	unsigned long cs;
1821 
1822 	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1823 	if (rc != X86EMUL_CONTINUE)
1824 		return rc;
1825 	if (ctxt->op_bytes == 4)
1826 		ctxt->_eip = (u32)ctxt->_eip;
1827 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1828 	if (rc != X86EMUL_CONTINUE)
1829 		return rc;
1830 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1831 	return rc;
1832 }
1833 
1834 static int em_lseg(struct x86_emulate_ctxt *ctxt)
1835 {
1836 	int seg = ctxt->src2.val;
1837 	unsigned short sel;
1838 	int rc;
1839 
1840 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1841 
1842 	rc = load_segment_descriptor(ctxt, sel, seg);
1843 	if (rc != X86EMUL_CONTINUE)
1844 		return rc;
1845 
1846 	ctxt->dst.val = ctxt->src.val;
1847 	return rc;
1848 }
1849 
1850 static void
1851 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1852 			struct desc_struct *cs, struct desc_struct *ss)
1853 {
1854 	u16 selector;
1855 
1856 	memset(cs, 0, sizeof(struct desc_struct));
1857 	ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1858 	memset(ss, 0, sizeof(struct desc_struct));
1859 
1860 	cs->l = 0;		/* will be adjusted later */
1861 	set_desc_base(cs, 0);	/* flat segment */
1862 	cs->g = 1;		/* 4kb granularity */
1863 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
1864 	cs->type = 0x0b;	/* Read, Execute, Accessed */
1865 	cs->s = 1;
1866 	cs->dpl = 0;		/* will be adjusted later */
1867 	cs->p = 1;
1868 	cs->d = 1;
1869 
1870 	set_desc_base(ss, 0);	/* flat segment */
1871 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
1872 	ss->g = 1;		/* 4kb granularity */
1873 	ss->s = 1;
1874 	ss->type = 0x03;	/* Read/Write, Accessed */
1875 	ss->d = 1;		/* 32bit stack segment */
1876 	ss->dpl = 0;
1877 	ss->p = 1;
1878 }
1879 
1880 static int em_syscall(struct x86_emulate_ctxt *ctxt)
1881 {
1882 	struct x86_emulate_ops *ops = ctxt->ops;
1883 	struct desc_struct cs, ss;
1884 	u64 msr_data;
1885 	u16 cs_sel, ss_sel;
1886 	u64 efer = 0;
1887 
1888 	/* syscall is not available in real mode */
1889 	if (ctxt->mode == X86EMUL_MODE_REAL ||
1890 	    ctxt->mode == X86EMUL_MODE_VM86)
1891 		return emulate_ud(ctxt);
1892 
1893 	ops->get_msr(ctxt, MSR_EFER, &efer);
1894 	setup_syscalls_segments(ctxt, &cs, &ss);
1895 
1896 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
1897 	msr_data >>= 32;
1898 	cs_sel = (u16)(msr_data & 0xfffc);
1899 	ss_sel = (u16)(msr_data + 8);
1900 
1901 	if (efer & EFER_LMA) {
1902 		cs.d = 0;
1903 		cs.l = 1;
1904 	}
1905 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1906 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1907 
1908 	ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
1909 	if (efer & EFER_LMA) {
1910 #ifdef CONFIG_X86_64
1911 		ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1912 
1913 		ops->get_msr(ctxt,
1914 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
1915 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
1916 		ctxt->_eip = msr_data;
1917 
1918 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1919 		ctxt->eflags &= ~(msr_data | EFLG_RF);
1920 #endif
1921 	} else {
1922 		/* legacy mode */
1923 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
1924 		ctxt->_eip = (u32)msr_data;
1925 
1926 		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1927 	}
1928 
1929 	return X86EMUL_CONTINUE;
1930 }
1931 
1932 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
1933 {
1934 	struct x86_emulate_ops *ops = ctxt->ops;
1935 	struct desc_struct cs, ss;
1936 	u64 msr_data;
1937 	u16 cs_sel, ss_sel;
1938 	u64 efer = 0;
1939 
1940 	ops->get_msr(ctxt, MSR_EFER, &efer);
1941 	/* inject #GP if in real mode */
1942 	if (ctxt->mode == X86EMUL_MODE_REAL)
1943 		return emulate_gp(ctxt, 0);
1944 
1945 	/* XXX sysenter/sysexit have not been tested in 64bit mode.
1946 	* Therefore, we inject an #UD.
1947 	*/
1948 	if (ctxt->mode == X86EMUL_MODE_PROT64)
1949 		return emulate_ud(ctxt);
1950 
1951 	setup_syscalls_segments(ctxt, &cs, &ss);
1952 
1953 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1954 	switch (ctxt->mode) {
1955 	case X86EMUL_MODE_PROT32:
1956 		if ((msr_data & 0xfffc) == 0x0)
1957 			return emulate_gp(ctxt, 0);
1958 		break;
1959 	case X86EMUL_MODE_PROT64:
1960 		if (msr_data == 0x0)
1961 			return emulate_gp(ctxt, 0);
1962 		break;
1963 	}
1964 
1965 	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1966 	cs_sel = (u16)msr_data;
1967 	cs_sel &= ~SELECTOR_RPL_MASK;
1968 	ss_sel = cs_sel + 8;
1969 	ss_sel &= ~SELECTOR_RPL_MASK;
1970 	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1971 		cs.d = 0;
1972 		cs.l = 1;
1973 	}
1974 
1975 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1976 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1977 
1978 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1979 	ctxt->_eip = msr_data;
1980 
1981 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1982 	ctxt->regs[VCPU_REGS_RSP] = msr_data;
1983 
1984 	return X86EMUL_CONTINUE;
1985 }
1986 
1987 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1988 {
1989 	struct x86_emulate_ops *ops = ctxt->ops;
1990 	struct desc_struct cs, ss;
1991 	u64 msr_data;
1992 	int usermode;
1993 	u16 cs_sel = 0, ss_sel = 0;
1994 
1995 	/* inject #GP if in real mode or Virtual 8086 mode */
1996 	if (ctxt->mode == X86EMUL_MODE_REAL ||
1997 	    ctxt->mode == X86EMUL_MODE_VM86)
1998 		return emulate_gp(ctxt, 0);
1999 
2000 	setup_syscalls_segments(ctxt, &cs, &ss);
2001 
2002 	if ((ctxt->rex_prefix & 0x8) != 0x0)
2003 		usermode = X86EMUL_MODE_PROT64;
2004 	else
2005 		usermode = X86EMUL_MODE_PROT32;
2006 
2007 	cs.dpl = 3;
2008 	ss.dpl = 3;
2009 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2010 	switch (usermode) {
2011 	case X86EMUL_MODE_PROT32:
2012 		cs_sel = (u16)(msr_data + 16);
2013 		if ((msr_data & 0xfffc) == 0x0)
2014 			return emulate_gp(ctxt, 0);
2015 		ss_sel = (u16)(msr_data + 24);
2016 		break;
2017 	case X86EMUL_MODE_PROT64:
2018 		cs_sel = (u16)(msr_data + 32);
2019 		if (msr_data == 0x0)
2020 			return emulate_gp(ctxt, 0);
2021 		ss_sel = cs_sel + 8;
2022 		cs.d = 0;
2023 		cs.l = 1;
2024 		break;
2025 	}
2026 	cs_sel |= SELECTOR_RPL_MASK;
2027 	ss_sel |= SELECTOR_RPL_MASK;
2028 
2029 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2030 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2031 
2032 	ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
2033 	ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
2034 
2035 	return X86EMUL_CONTINUE;
2036 }
2037 
2038 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2039 {
2040 	int iopl;
2041 	if (ctxt->mode == X86EMUL_MODE_REAL)
2042 		return false;
2043 	if (ctxt->mode == X86EMUL_MODE_VM86)
2044 		return true;
2045 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2046 	return ctxt->ops->cpl(ctxt) > iopl;
2047 }
2048 
2049 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2050 					    u16 port, u16 len)
2051 {
2052 	struct x86_emulate_ops *ops = ctxt->ops;
2053 	struct desc_struct tr_seg;
2054 	u32 base3;
2055 	int r;
2056 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2057 	unsigned mask = (1 << len) - 1;
2058 	unsigned long base;
2059 
2060 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2061 	if (!tr_seg.p)
2062 		return false;
2063 	if (desc_limit_scaled(&tr_seg) < 103)
2064 		return false;
2065 	base = get_desc_base(&tr_seg);
2066 #ifdef CONFIG_X86_64
2067 	base |= ((u64)base3) << 32;
2068 #endif
2069 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2070 	if (r != X86EMUL_CONTINUE)
2071 		return false;
2072 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2073 		return false;
2074 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2075 	if (r != X86EMUL_CONTINUE)
2076 		return false;
2077 	if ((perm >> bit_idx) & mask)
2078 		return false;
2079 	return true;
2080 }
2081 
2082 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2083 				 u16 port, u16 len)
2084 {
2085 	if (ctxt->perm_ok)
2086 		return true;
2087 
2088 	if (emulator_bad_iopl(ctxt))
2089 		if (!emulator_io_port_access_allowed(ctxt, port, len))
2090 			return false;
2091 
2092 	ctxt->perm_ok = true;
2093 
2094 	return true;
2095 }
2096 
2097 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2098 				struct tss_segment_16 *tss)
2099 {
2100 	tss->ip = ctxt->_eip;
2101 	tss->flag = ctxt->eflags;
2102 	tss->ax = ctxt->regs[VCPU_REGS_RAX];
2103 	tss->cx = ctxt->regs[VCPU_REGS_RCX];
2104 	tss->dx = ctxt->regs[VCPU_REGS_RDX];
2105 	tss->bx = ctxt->regs[VCPU_REGS_RBX];
2106 	tss->sp = ctxt->regs[VCPU_REGS_RSP];
2107 	tss->bp = ctxt->regs[VCPU_REGS_RBP];
2108 	tss->si = ctxt->regs[VCPU_REGS_RSI];
2109 	tss->di = ctxt->regs[VCPU_REGS_RDI];
2110 
2111 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2112 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2113 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2114 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2115 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2116 }
2117 
2118 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2119 				 struct tss_segment_16 *tss)
2120 {
2121 	int ret;
2122 
2123 	ctxt->_eip = tss->ip;
2124 	ctxt->eflags = tss->flag | 2;
2125 	ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2126 	ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2127 	ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2128 	ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2129 	ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2130 	ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2131 	ctxt->regs[VCPU_REGS_RSI] = tss->si;
2132 	ctxt->regs[VCPU_REGS_RDI] = tss->di;
2133 
2134 	/*
2135 	 * SDM says that segment selectors are loaded before segment
2136 	 * descriptors
2137 	 */
2138 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2139 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2140 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2141 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2142 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2143 
2144 	/*
2145 	 * Now load segment descriptors. If fault happenes at this stage
2146 	 * it is handled in a context of new task
2147 	 */
2148 	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2149 	if (ret != X86EMUL_CONTINUE)
2150 		return ret;
2151 	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2152 	if (ret != X86EMUL_CONTINUE)
2153 		return ret;
2154 	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2155 	if (ret != X86EMUL_CONTINUE)
2156 		return ret;
2157 	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2158 	if (ret != X86EMUL_CONTINUE)
2159 		return ret;
2160 	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2161 	if (ret != X86EMUL_CONTINUE)
2162 		return ret;
2163 
2164 	return X86EMUL_CONTINUE;
2165 }
2166 
2167 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2168 			  u16 tss_selector, u16 old_tss_sel,
2169 			  ulong old_tss_base, struct desc_struct *new_desc)
2170 {
2171 	struct x86_emulate_ops *ops = ctxt->ops;
2172 	struct tss_segment_16 tss_seg;
2173 	int ret;
2174 	u32 new_tss_base = get_desc_base(new_desc);
2175 
2176 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2177 			    &ctxt->exception);
2178 	if (ret != X86EMUL_CONTINUE)
2179 		/* FIXME: need to provide precise fault address */
2180 		return ret;
2181 
2182 	save_state_to_tss16(ctxt, &tss_seg);
2183 
2184 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2185 			     &ctxt->exception);
2186 	if (ret != X86EMUL_CONTINUE)
2187 		/* FIXME: need to provide precise fault address */
2188 		return ret;
2189 
2190 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2191 			    &ctxt->exception);
2192 	if (ret != X86EMUL_CONTINUE)
2193 		/* FIXME: need to provide precise fault address */
2194 		return ret;
2195 
2196 	if (old_tss_sel != 0xffff) {
2197 		tss_seg.prev_task_link = old_tss_sel;
2198 
2199 		ret = ops->write_std(ctxt, new_tss_base,
2200 				     &tss_seg.prev_task_link,
2201 				     sizeof tss_seg.prev_task_link,
2202 				     &ctxt->exception);
2203 		if (ret != X86EMUL_CONTINUE)
2204 			/* FIXME: need to provide precise fault address */
2205 			return ret;
2206 	}
2207 
2208 	return load_state_from_tss16(ctxt, &tss_seg);
2209 }
2210 
2211 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2212 				struct tss_segment_32 *tss)
2213 {
2214 	tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2215 	tss->eip = ctxt->_eip;
2216 	tss->eflags = ctxt->eflags;
2217 	tss->eax = ctxt->regs[VCPU_REGS_RAX];
2218 	tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2219 	tss->edx = ctxt->regs[VCPU_REGS_RDX];
2220 	tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2221 	tss->esp = ctxt->regs[VCPU_REGS_RSP];
2222 	tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2223 	tss->esi = ctxt->regs[VCPU_REGS_RSI];
2224 	tss->edi = ctxt->regs[VCPU_REGS_RDI];
2225 
2226 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2227 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2228 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2229 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2230 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2231 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2232 	tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2233 }
2234 
2235 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2236 				 struct tss_segment_32 *tss)
2237 {
2238 	int ret;
2239 
2240 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2241 		return emulate_gp(ctxt, 0);
2242 	ctxt->_eip = tss->eip;
2243 	ctxt->eflags = tss->eflags | 2;
2244 	ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2245 	ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2246 	ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2247 	ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2248 	ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2249 	ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2250 	ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2251 	ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2252 
2253 	/*
2254 	 * SDM says that segment selectors are loaded before segment
2255 	 * descriptors
2256 	 */
2257 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2258 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2259 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2260 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2261 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2262 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2263 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2264 
2265 	/*
2266 	 * Now load segment descriptors. If fault happenes at this stage
2267 	 * it is handled in a context of new task
2268 	 */
2269 	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2270 	if (ret != X86EMUL_CONTINUE)
2271 		return ret;
2272 	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2273 	if (ret != X86EMUL_CONTINUE)
2274 		return ret;
2275 	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2276 	if (ret != X86EMUL_CONTINUE)
2277 		return ret;
2278 	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2279 	if (ret != X86EMUL_CONTINUE)
2280 		return ret;
2281 	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2282 	if (ret != X86EMUL_CONTINUE)
2283 		return ret;
2284 	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2285 	if (ret != X86EMUL_CONTINUE)
2286 		return ret;
2287 	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2288 	if (ret != X86EMUL_CONTINUE)
2289 		return ret;
2290 
2291 	return X86EMUL_CONTINUE;
2292 }
2293 
2294 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2295 			  u16 tss_selector, u16 old_tss_sel,
2296 			  ulong old_tss_base, struct desc_struct *new_desc)
2297 {
2298 	struct x86_emulate_ops *ops = ctxt->ops;
2299 	struct tss_segment_32 tss_seg;
2300 	int ret;
2301 	u32 new_tss_base = get_desc_base(new_desc);
2302 
2303 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2304 			    &ctxt->exception);
2305 	if (ret != X86EMUL_CONTINUE)
2306 		/* FIXME: need to provide precise fault address */
2307 		return ret;
2308 
2309 	save_state_to_tss32(ctxt, &tss_seg);
2310 
2311 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2312 			     &ctxt->exception);
2313 	if (ret != X86EMUL_CONTINUE)
2314 		/* FIXME: need to provide precise fault address */
2315 		return ret;
2316 
2317 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2318 			    &ctxt->exception);
2319 	if (ret != X86EMUL_CONTINUE)
2320 		/* FIXME: need to provide precise fault address */
2321 		return ret;
2322 
2323 	if (old_tss_sel != 0xffff) {
2324 		tss_seg.prev_task_link = old_tss_sel;
2325 
2326 		ret = ops->write_std(ctxt, new_tss_base,
2327 				     &tss_seg.prev_task_link,
2328 				     sizeof tss_seg.prev_task_link,
2329 				     &ctxt->exception);
2330 		if (ret != X86EMUL_CONTINUE)
2331 			/* FIXME: need to provide precise fault address */
2332 			return ret;
2333 	}
2334 
2335 	return load_state_from_tss32(ctxt, &tss_seg);
2336 }
2337 
2338 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2339 				   u16 tss_selector, int reason,
2340 				   bool has_error_code, u32 error_code)
2341 {
2342 	struct x86_emulate_ops *ops = ctxt->ops;
2343 	struct desc_struct curr_tss_desc, next_tss_desc;
2344 	int ret;
2345 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2346 	ulong old_tss_base =
2347 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2348 	u32 desc_limit;
2349 
2350 	/* FIXME: old_tss_base == ~0 ? */
2351 
2352 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2353 	if (ret != X86EMUL_CONTINUE)
2354 		return ret;
2355 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2356 	if (ret != X86EMUL_CONTINUE)
2357 		return ret;
2358 
2359 	/* FIXME: check that next_tss_desc is tss */
2360 
2361 	if (reason != TASK_SWITCH_IRET) {
2362 		if ((tss_selector & 3) > next_tss_desc.dpl ||
2363 		    ops->cpl(ctxt) > next_tss_desc.dpl)
2364 			return emulate_gp(ctxt, 0);
2365 	}
2366 
2367 	desc_limit = desc_limit_scaled(&next_tss_desc);
2368 	if (!next_tss_desc.p ||
2369 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2370 	     desc_limit < 0x2b)) {
2371 		emulate_ts(ctxt, tss_selector & 0xfffc);
2372 		return X86EMUL_PROPAGATE_FAULT;
2373 	}
2374 
2375 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2376 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2377 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2378 	}
2379 
2380 	if (reason == TASK_SWITCH_IRET)
2381 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2382 
2383 	/* set back link to prev task only if NT bit is set in eflags
2384 	   note that old_tss_sel is not used afetr this point */
2385 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2386 		old_tss_sel = 0xffff;
2387 
2388 	if (next_tss_desc.type & 8)
2389 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2390 				     old_tss_base, &next_tss_desc);
2391 	else
2392 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2393 				     old_tss_base, &next_tss_desc);
2394 	if (ret != X86EMUL_CONTINUE)
2395 		return ret;
2396 
2397 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2398 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2399 
2400 	if (reason != TASK_SWITCH_IRET) {
2401 		next_tss_desc.type |= (1 << 1); /* set busy flag */
2402 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2403 	}
2404 
2405 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2406 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2407 
2408 	if (has_error_code) {
2409 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2410 		ctxt->lock_prefix = 0;
2411 		ctxt->src.val = (unsigned long) error_code;
2412 		ret = em_push(ctxt);
2413 	}
2414 
2415 	return ret;
2416 }
2417 
2418 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2419 			 u16 tss_selector, int reason,
2420 			 bool has_error_code, u32 error_code)
2421 {
2422 	int rc;
2423 
2424 	ctxt->_eip = ctxt->eip;
2425 	ctxt->dst.type = OP_NONE;
2426 
2427 	rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2428 				     has_error_code, error_code);
2429 
2430 	if (rc == X86EMUL_CONTINUE)
2431 		ctxt->eip = ctxt->_eip;
2432 
2433 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2434 }
2435 
2436 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2437 			    int reg, struct operand *op)
2438 {
2439 	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2440 
2441 	register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2442 	op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2443 	op->addr.mem.seg = seg;
2444 }
2445 
2446 static int em_das(struct x86_emulate_ctxt *ctxt)
2447 {
2448 	u8 al, old_al;
2449 	bool af, cf, old_cf;
2450 
2451 	cf = ctxt->eflags & X86_EFLAGS_CF;
2452 	al = ctxt->dst.val;
2453 
2454 	old_al = al;
2455 	old_cf = cf;
2456 	cf = false;
2457 	af = ctxt->eflags & X86_EFLAGS_AF;
2458 	if ((al & 0x0f) > 9 || af) {
2459 		al -= 6;
2460 		cf = old_cf | (al >= 250);
2461 		af = true;
2462 	} else {
2463 		af = false;
2464 	}
2465 	if (old_al > 0x99 || old_cf) {
2466 		al -= 0x60;
2467 		cf = true;
2468 	}
2469 
2470 	ctxt->dst.val = al;
2471 	/* Set PF, ZF, SF */
2472 	ctxt->src.type = OP_IMM;
2473 	ctxt->src.val = 0;
2474 	ctxt->src.bytes = 1;
2475 	emulate_2op_SrcV(ctxt, "or");
2476 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2477 	if (cf)
2478 		ctxt->eflags |= X86_EFLAGS_CF;
2479 	if (af)
2480 		ctxt->eflags |= X86_EFLAGS_AF;
2481 	return X86EMUL_CONTINUE;
2482 }
2483 
2484 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2485 {
2486 	u16 sel, old_cs;
2487 	ulong old_eip;
2488 	int rc;
2489 
2490 	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2491 	old_eip = ctxt->_eip;
2492 
2493 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2494 	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2495 		return X86EMUL_CONTINUE;
2496 
2497 	ctxt->_eip = 0;
2498 	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2499 
2500 	ctxt->src.val = old_cs;
2501 	rc = em_push(ctxt);
2502 	if (rc != X86EMUL_CONTINUE)
2503 		return rc;
2504 
2505 	ctxt->src.val = old_eip;
2506 	return em_push(ctxt);
2507 }
2508 
2509 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2510 {
2511 	int rc;
2512 
2513 	ctxt->dst.type = OP_REG;
2514 	ctxt->dst.addr.reg = &ctxt->_eip;
2515 	ctxt->dst.bytes = ctxt->op_bytes;
2516 	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2517 	if (rc != X86EMUL_CONTINUE)
2518 		return rc;
2519 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2520 	return X86EMUL_CONTINUE;
2521 }
2522 
2523 static int em_add(struct x86_emulate_ctxt *ctxt)
2524 {
2525 	emulate_2op_SrcV(ctxt, "add");
2526 	return X86EMUL_CONTINUE;
2527 }
2528 
2529 static int em_or(struct x86_emulate_ctxt *ctxt)
2530 {
2531 	emulate_2op_SrcV(ctxt, "or");
2532 	return X86EMUL_CONTINUE;
2533 }
2534 
2535 static int em_adc(struct x86_emulate_ctxt *ctxt)
2536 {
2537 	emulate_2op_SrcV(ctxt, "adc");
2538 	return X86EMUL_CONTINUE;
2539 }
2540 
2541 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2542 {
2543 	emulate_2op_SrcV(ctxt, "sbb");
2544 	return X86EMUL_CONTINUE;
2545 }
2546 
2547 static int em_and(struct x86_emulate_ctxt *ctxt)
2548 {
2549 	emulate_2op_SrcV(ctxt, "and");
2550 	return X86EMUL_CONTINUE;
2551 }
2552 
2553 static int em_sub(struct x86_emulate_ctxt *ctxt)
2554 {
2555 	emulate_2op_SrcV(ctxt, "sub");
2556 	return X86EMUL_CONTINUE;
2557 }
2558 
2559 static int em_xor(struct x86_emulate_ctxt *ctxt)
2560 {
2561 	emulate_2op_SrcV(ctxt, "xor");
2562 	return X86EMUL_CONTINUE;
2563 }
2564 
2565 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2566 {
2567 	emulate_2op_SrcV(ctxt, "cmp");
2568 	/* Disable writeback. */
2569 	ctxt->dst.type = OP_NONE;
2570 	return X86EMUL_CONTINUE;
2571 }
2572 
2573 static int em_test(struct x86_emulate_ctxt *ctxt)
2574 {
2575 	emulate_2op_SrcV(ctxt, "test");
2576 	/* Disable writeback. */
2577 	ctxt->dst.type = OP_NONE;
2578 	return X86EMUL_CONTINUE;
2579 }
2580 
2581 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2582 {
2583 	/* Write back the register source. */
2584 	ctxt->src.val = ctxt->dst.val;
2585 	write_register_operand(&ctxt->src);
2586 
2587 	/* Write back the memory destination with implicit LOCK prefix. */
2588 	ctxt->dst.val = ctxt->src.orig_val;
2589 	ctxt->lock_prefix = 1;
2590 	return X86EMUL_CONTINUE;
2591 }
2592 
2593 static int em_imul(struct x86_emulate_ctxt *ctxt)
2594 {
2595 	emulate_2op_SrcV_nobyte(ctxt, "imul");
2596 	return X86EMUL_CONTINUE;
2597 }
2598 
2599 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2600 {
2601 	ctxt->dst.val = ctxt->src2.val;
2602 	return em_imul(ctxt);
2603 }
2604 
2605 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2606 {
2607 	ctxt->dst.type = OP_REG;
2608 	ctxt->dst.bytes = ctxt->src.bytes;
2609 	ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2610 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2611 
2612 	return X86EMUL_CONTINUE;
2613 }
2614 
2615 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2616 {
2617 	u64 tsc = 0;
2618 
2619 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2620 	ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2621 	ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2622 	return X86EMUL_CONTINUE;
2623 }
2624 
2625 static int em_mov(struct x86_emulate_ctxt *ctxt)
2626 {
2627 	ctxt->dst.val = ctxt->src.val;
2628 	return X86EMUL_CONTINUE;
2629 }
2630 
2631 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2632 {
2633 	if (ctxt->modrm_reg > VCPU_SREG_GS)
2634 		return emulate_ud(ctxt);
2635 
2636 	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2637 	return X86EMUL_CONTINUE;
2638 }
2639 
2640 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2641 {
2642 	u16 sel = ctxt->src.val;
2643 
2644 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2645 		return emulate_ud(ctxt);
2646 
2647 	if (ctxt->modrm_reg == VCPU_SREG_SS)
2648 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2649 
2650 	/* Disable writeback. */
2651 	ctxt->dst.type = OP_NONE;
2652 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2653 }
2654 
2655 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2656 {
2657 	memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2658 	return X86EMUL_CONTINUE;
2659 }
2660 
2661 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2662 {
2663 	int rc;
2664 	ulong linear;
2665 
2666 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2667 	if (rc == X86EMUL_CONTINUE)
2668 		ctxt->ops->invlpg(ctxt, linear);
2669 	/* Disable writeback. */
2670 	ctxt->dst.type = OP_NONE;
2671 	return X86EMUL_CONTINUE;
2672 }
2673 
2674 static int em_clts(struct x86_emulate_ctxt *ctxt)
2675 {
2676 	ulong cr0;
2677 
2678 	cr0 = ctxt->ops->get_cr(ctxt, 0);
2679 	cr0 &= ~X86_CR0_TS;
2680 	ctxt->ops->set_cr(ctxt, 0, cr0);
2681 	return X86EMUL_CONTINUE;
2682 }
2683 
2684 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2685 {
2686 	int rc;
2687 
2688 	if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2689 		return X86EMUL_UNHANDLEABLE;
2690 
2691 	rc = ctxt->ops->fix_hypercall(ctxt);
2692 	if (rc != X86EMUL_CONTINUE)
2693 		return rc;
2694 
2695 	/* Let the processor re-execute the fixed hypercall */
2696 	ctxt->_eip = ctxt->eip;
2697 	/* Disable writeback. */
2698 	ctxt->dst.type = OP_NONE;
2699 	return X86EMUL_CONTINUE;
2700 }
2701 
2702 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2703 {
2704 	struct desc_ptr desc_ptr;
2705 	int rc;
2706 
2707 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2708 			     &desc_ptr.size, &desc_ptr.address,
2709 			     ctxt->op_bytes);
2710 	if (rc != X86EMUL_CONTINUE)
2711 		return rc;
2712 	ctxt->ops->set_gdt(ctxt, &desc_ptr);
2713 	/* Disable writeback. */
2714 	ctxt->dst.type = OP_NONE;
2715 	return X86EMUL_CONTINUE;
2716 }
2717 
2718 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2719 {
2720 	int rc;
2721 
2722 	rc = ctxt->ops->fix_hypercall(ctxt);
2723 
2724 	/* Disable writeback. */
2725 	ctxt->dst.type = OP_NONE;
2726 	return rc;
2727 }
2728 
2729 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2730 {
2731 	struct desc_ptr desc_ptr;
2732 	int rc;
2733 
2734 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2735 			     &desc_ptr.size, &desc_ptr.address,
2736 			     ctxt->op_bytes);
2737 	if (rc != X86EMUL_CONTINUE)
2738 		return rc;
2739 	ctxt->ops->set_idt(ctxt, &desc_ptr);
2740 	/* Disable writeback. */
2741 	ctxt->dst.type = OP_NONE;
2742 	return X86EMUL_CONTINUE;
2743 }
2744 
2745 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2746 {
2747 	ctxt->dst.bytes = 2;
2748 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2749 	return X86EMUL_CONTINUE;
2750 }
2751 
2752 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2753 {
2754 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2755 			  | (ctxt->src.val & 0x0f));
2756 	ctxt->dst.type = OP_NONE;
2757 	return X86EMUL_CONTINUE;
2758 }
2759 
2760 static int em_loop(struct x86_emulate_ctxt *ctxt)
2761 {
2762 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2763 	if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
2764 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2765 		jmp_rel(ctxt, ctxt->src.val);
2766 
2767 	return X86EMUL_CONTINUE;
2768 }
2769 
2770 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2771 {
2772 	if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2773 		jmp_rel(ctxt, ctxt->src.val);
2774 
2775 	return X86EMUL_CONTINUE;
2776 }
2777 
2778 static int em_cli(struct x86_emulate_ctxt *ctxt)
2779 {
2780 	if (emulator_bad_iopl(ctxt))
2781 		return emulate_gp(ctxt, 0);
2782 
2783 	ctxt->eflags &= ~X86_EFLAGS_IF;
2784 	return X86EMUL_CONTINUE;
2785 }
2786 
2787 static int em_sti(struct x86_emulate_ctxt *ctxt)
2788 {
2789 	if (emulator_bad_iopl(ctxt))
2790 		return emulate_gp(ctxt, 0);
2791 
2792 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2793 	ctxt->eflags |= X86_EFLAGS_IF;
2794 	return X86EMUL_CONTINUE;
2795 }
2796 
2797 static bool valid_cr(int nr)
2798 {
2799 	switch (nr) {
2800 	case 0:
2801 	case 2 ... 4:
2802 	case 8:
2803 		return true;
2804 	default:
2805 		return false;
2806 	}
2807 }
2808 
2809 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2810 {
2811 	if (!valid_cr(ctxt->modrm_reg))
2812 		return emulate_ud(ctxt);
2813 
2814 	return X86EMUL_CONTINUE;
2815 }
2816 
2817 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2818 {
2819 	u64 new_val = ctxt->src.val64;
2820 	int cr = ctxt->modrm_reg;
2821 	u64 efer = 0;
2822 
2823 	static u64 cr_reserved_bits[] = {
2824 		0xffffffff00000000ULL,
2825 		0, 0, 0, /* CR3 checked later */
2826 		CR4_RESERVED_BITS,
2827 		0, 0, 0,
2828 		CR8_RESERVED_BITS,
2829 	};
2830 
2831 	if (!valid_cr(cr))
2832 		return emulate_ud(ctxt);
2833 
2834 	if (new_val & cr_reserved_bits[cr])
2835 		return emulate_gp(ctxt, 0);
2836 
2837 	switch (cr) {
2838 	case 0: {
2839 		u64 cr4;
2840 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2841 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2842 			return emulate_gp(ctxt, 0);
2843 
2844 		cr4 = ctxt->ops->get_cr(ctxt, 4);
2845 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2846 
2847 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2848 		    !(cr4 & X86_CR4_PAE))
2849 			return emulate_gp(ctxt, 0);
2850 
2851 		break;
2852 		}
2853 	case 3: {
2854 		u64 rsvd = 0;
2855 
2856 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2857 		if (efer & EFER_LMA)
2858 			rsvd = CR3_L_MODE_RESERVED_BITS;
2859 		else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2860 			rsvd = CR3_PAE_RESERVED_BITS;
2861 		else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2862 			rsvd = CR3_NONPAE_RESERVED_BITS;
2863 
2864 		if (new_val & rsvd)
2865 			return emulate_gp(ctxt, 0);
2866 
2867 		break;
2868 		}
2869 	case 4: {
2870 		u64 cr4;
2871 
2872 		cr4 = ctxt->ops->get_cr(ctxt, 4);
2873 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2874 
2875 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2876 			return emulate_gp(ctxt, 0);
2877 
2878 		break;
2879 		}
2880 	}
2881 
2882 	return X86EMUL_CONTINUE;
2883 }
2884 
2885 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2886 {
2887 	unsigned long dr7;
2888 
2889 	ctxt->ops->get_dr(ctxt, 7, &dr7);
2890 
2891 	/* Check if DR7.Global_Enable is set */
2892 	return dr7 & (1 << 13);
2893 }
2894 
2895 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2896 {
2897 	int dr = ctxt->modrm_reg;
2898 	u64 cr4;
2899 
2900 	if (dr > 7)
2901 		return emulate_ud(ctxt);
2902 
2903 	cr4 = ctxt->ops->get_cr(ctxt, 4);
2904 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2905 		return emulate_ud(ctxt);
2906 
2907 	if (check_dr7_gd(ctxt))
2908 		return emulate_db(ctxt);
2909 
2910 	return X86EMUL_CONTINUE;
2911 }
2912 
2913 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2914 {
2915 	u64 new_val = ctxt->src.val64;
2916 	int dr = ctxt->modrm_reg;
2917 
2918 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2919 		return emulate_gp(ctxt, 0);
2920 
2921 	return check_dr_read(ctxt);
2922 }
2923 
2924 static int check_svme(struct x86_emulate_ctxt *ctxt)
2925 {
2926 	u64 efer;
2927 
2928 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2929 
2930 	if (!(efer & EFER_SVME))
2931 		return emulate_ud(ctxt);
2932 
2933 	return X86EMUL_CONTINUE;
2934 }
2935 
2936 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2937 {
2938 	u64 rax = ctxt->regs[VCPU_REGS_RAX];
2939 
2940 	/* Valid physical address? */
2941 	if (rax & 0xffff000000000000ULL)
2942 		return emulate_gp(ctxt, 0);
2943 
2944 	return check_svme(ctxt);
2945 }
2946 
2947 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2948 {
2949 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2950 
2951 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2952 		return emulate_ud(ctxt);
2953 
2954 	return X86EMUL_CONTINUE;
2955 }
2956 
2957 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
2958 {
2959 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2960 	u64 rcx = ctxt->regs[VCPU_REGS_RCX];
2961 
2962 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2963 	    (rcx > 3))
2964 		return emulate_gp(ctxt, 0);
2965 
2966 	return X86EMUL_CONTINUE;
2967 }
2968 
2969 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
2970 {
2971 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
2972 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
2973 		return emulate_gp(ctxt, 0);
2974 
2975 	return X86EMUL_CONTINUE;
2976 }
2977 
2978 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
2979 {
2980 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
2981 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
2982 		return emulate_gp(ctxt, 0);
2983 
2984 	return X86EMUL_CONTINUE;
2985 }
2986 
2987 #define D(_y) { .flags = (_y) }
2988 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2989 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
2990 		      .check_perm = (_p) }
2991 #define N    D(0)
2992 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2993 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2994 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
2995 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2996 #define II(_f, _e, _i) \
2997 	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2998 #define IIP(_f, _e, _i, _p) \
2999 	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3000 	  .check_perm = (_p) }
3001 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3002 
3003 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
3004 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3005 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3006 
3007 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
3008 		I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
3009 		I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3010 
3011 static struct opcode group7_rm1[] = {
3012 	DI(SrcNone | ModRM | Priv, monitor),
3013 	DI(SrcNone | ModRM | Priv, mwait),
3014 	N, N, N, N, N, N,
3015 };
3016 
3017 static struct opcode group7_rm3[] = {
3018 	DIP(SrcNone | ModRM | Prot | Priv, vmrun,   check_svme_pa),
3019 	II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3020 	DIP(SrcNone | ModRM | Prot | Priv, vmload,  check_svme_pa),
3021 	DIP(SrcNone | ModRM | Prot | Priv, vmsave,  check_svme_pa),
3022 	DIP(SrcNone | ModRM | Prot | Priv, stgi,    check_svme),
3023 	DIP(SrcNone | ModRM | Prot | Priv, clgi,    check_svme),
3024 	DIP(SrcNone | ModRM | Prot | Priv, skinit,  check_svme),
3025 	DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3026 };
3027 
3028 static struct opcode group7_rm7[] = {
3029 	N,
3030 	DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3031 	N, N, N, N, N, N,
3032 };
3033 
3034 static struct opcode group1[] = {
3035 	I(Lock, em_add),
3036 	I(Lock, em_or),
3037 	I(Lock, em_adc),
3038 	I(Lock, em_sbb),
3039 	I(Lock, em_and),
3040 	I(Lock, em_sub),
3041 	I(Lock, em_xor),
3042 	I(0, em_cmp),
3043 };
3044 
3045 static struct opcode group1A[] = {
3046 	D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3047 };
3048 
3049 static struct opcode group3[] = {
3050 	I(DstMem | SrcImm | ModRM, em_test),
3051 	I(DstMem | SrcImm | ModRM, em_test),
3052 	I(DstMem | SrcNone | ModRM | Lock, em_not),
3053 	I(DstMem | SrcNone | ModRM | Lock, em_neg),
3054 	I(SrcMem | ModRM, em_mul_ex),
3055 	I(SrcMem | ModRM, em_imul_ex),
3056 	I(SrcMem | ModRM, em_div_ex),
3057 	I(SrcMem | ModRM, em_idiv_ex),
3058 };
3059 
3060 static struct opcode group4[] = {
3061 	D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3062 	N, N, N, N, N, N,
3063 };
3064 
3065 static struct opcode group5[] = {
3066 	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3067 	D(SrcMem | ModRM | Stack),
3068 	I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3069 	D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3070 	D(SrcMem | ModRM | Stack), N,
3071 };
3072 
3073 static struct opcode group6[] = {
3074 	DI(ModRM | Prot,        sldt),
3075 	DI(ModRM | Prot,        str),
3076 	DI(ModRM | Prot | Priv, lldt),
3077 	DI(ModRM | Prot | Priv, ltr),
3078 	N, N, N, N,
3079 };
3080 
3081 static struct group_dual group7 = { {
3082 	DI(ModRM | Mov | DstMem | Priv, sgdt),
3083 	DI(ModRM | Mov | DstMem | Priv, sidt),
3084 	II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3085 	II(ModRM | SrcMem | Priv, em_lidt, lidt),
3086 	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3087 	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3088 	II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3089 }, {
3090 	I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3091 	EXT(0, group7_rm1),
3092 	N, EXT(0, group7_rm3),
3093 	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3094 	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3095 } };
3096 
3097 static struct opcode group8[] = {
3098 	N, N, N, N,
3099 	D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3100 	D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3101 };
3102 
3103 static struct group_dual group9 = { {
3104 	N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3105 }, {
3106 	N, N, N, N, N, N, N, N,
3107 } };
3108 
3109 static struct opcode group11[] = {
3110 	I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3111 };
3112 
3113 static struct gprefix pfx_0f_6f_0f_7f = {
3114 	N, N, N, I(Sse, em_movdqu),
3115 };
3116 
3117 static struct opcode opcode_table[256] = {
3118 	/* 0x00 - 0x07 */
3119 	I6ALU(Lock, em_add),
3120 	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3121 	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3122 	/* 0x08 - 0x0F */
3123 	I6ALU(Lock, em_or),
3124 	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3125 	N,
3126 	/* 0x10 - 0x17 */
3127 	I6ALU(Lock, em_adc),
3128 	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3129 	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3130 	/* 0x18 - 0x1F */
3131 	I6ALU(Lock, em_sbb),
3132 	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3133 	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3134 	/* 0x20 - 0x27 */
3135 	I6ALU(Lock, em_and), N, N,
3136 	/* 0x28 - 0x2F */
3137 	I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3138 	/* 0x30 - 0x37 */
3139 	I6ALU(Lock, em_xor), N, N,
3140 	/* 0x38 - 0x3F */
3141 	I6ALU(0, em_cmp), N, N,
3142 	/* 0x40 - 0x4F */
3143 	X16(D(DstReg)),
3144 	/* 0x50 - 0x57 */
3145 	X8(I(SrcReg | Stack, em_push)),
3146 	/* 0x58 - 0x5F */
3147 	X8(I(DstReg | Stack, em_pop)),
3148 	/* 0x60 - 0x67 */
3149 	I(ImplicitOps | Stack | No64, em_pusha),
3150 	I(ImplicitOps | Stack | No64, em_popa),
3151 	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3152 	N, N, N, N,
3153 	/* 0x68 - 0x6F */
3154 	I(SrcImm | Mov | Stack, em_push),
3155 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3156 	I(SrcImmByte | Mov | Stack, em_push),
3157 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3158 	D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3159 	D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3160 	/* 0x70 - 0x7F */
3161 	X16(D(SrcImmByte)),
3162 	/* 0x80 - 0x87 */
3163 	G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3164 	G(DstMem | SrcImm | ModRM | Group, group1),
3165 	G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3166 	G(DstMem | SrcImmByte | ModRM | Group, group1),
3167 	I2bv(DstMem | SrcReg | ModRM, em_test),
3168 	I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3169 	/* 0x88 - 0x8F */
3170 	I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3171 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3172 	I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3173 	D(ModRM | SrcMem | NoAccess | DstReg),
3174 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3175 	G(0, group1A),
3176 	/* 0x90 - 0x97 */
3177 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3178 	/* 0x98 - 0x9F */
3179 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3180 	I(SrcImmFAddr | No64, em_call_far), N,
3181 	II(ImplicitOps | Stack, em_pushf, pushf),
3182 	II(ImplicitOps | Stack, em_popf, popf), N, N,
3183 	/* 0xA0 - 0xA7 */
3184 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3185 	I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3186 	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3187 	I2bv(SrcSI | DstDI | String, em_cmp),
3188 	/* 0xA8 - 0xAF */
3189 	I2bv(DstAcc | SrcImm, em_test),
3190 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3191 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3192 	I2bv(SrcAcc | DstDI | String, em_cmp),
3193 	/* 0xB0 - 0xB7 */
3194 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3195 	/* 0xB8 - 0xBF */
3196 	X8(I(DstReg | SrcImm | Mov, em_mov)),
3197 	/* 0xC0 - 0xC7 */
3198 	D2bv(DstMem | SrcImmByte | ModRM),
3199 	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3200 	I(ImplicitOps | Stack, em_ret),
3201 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3202 	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3203 	G(ByteOp, group11), G(0, group11),
3204 	/* 0xC8 - 0xCF */
3205 	N, N, N, I(ImplicitOps | Stack, em_ret_far),
3206 	D(ImplicitOps), DI(SrcImmByte, intn),
3207 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3208 	/* 0xD0 - 0xD7 */
3209 	D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3210 	N, N, N, N,
3211 	/* 0xD8 - 0xDF */
3212 	N, N, N, N, N, N, N, N,
3213 	/* 0xE0 - 0xE7 */
3214 	X3(I(SrcImmByte, em_loop)),
3215 	I(SrcImmByte, em_jcxz),
3216 	D2bvIP(SrcImmUByte | DstAcc, in,  check_perm_in),
3217 	D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3218 	/* 0xE8 - 0xEF */
3219 	D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3220 	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3221 	D2bvIP(SrcDX | DstAcc, in,  check_perm_in),
3222 	D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3223 	/* 0xF0 - 0xF7 */
3224 	N, DI(ImplicitOps, icebp), N, N,
3225 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3226 	G(ByteOp, group3), G(0, group3),
3227 	/* 0xF8 - 0xFF */
3228 	D(ImplicitOps), D(ImplicitOps),
3229 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3230 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3231 };
3232 
3233 static struct opcode twobyte_table[256] = {
3234 	/* 0x00 - 0x0F */
3235 	G(0, group6), GD(0, &group7), N, N,
3236 	N, I(ImplicitOps | VendorSpecific, em_syscall),
3237 	II(ImplicitOps | Priv, em_clts, clts), N,
3238 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3239 	N, D(ImplicitOps | ModRM), N, N,
3240 	/* 0x10 - 0x1F */
3241 	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3242 	/* 0x20 - 0x2F */
3243 	DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3244 	DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3245 	DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3246 	DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3247 	N, N, N, N,
3248 	N, N, N, N, N, N, N, N,
3249 	/* 0x30 - 0x3F */
3250 	DI(ImplicitOps | Priv, wrmsr),
3251 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3252 	DI(ImplicitOps | Priv, rdmsr),
3253 	DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3254 	I(ImplicitOps | VendorSpecific, em_sysenter),
3255 	I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3256 	N, N,
3257 	N, N, N, N, N, N, N, N,
3258 	/* 0x40 - 0x4F */
3259 	X16(D(DstReg | SrcMem | ModRM | Mov)),
3260 	/* 0x50 - 0x5F */
3261 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3262 	/* 0x60 - 0x6F */
3263 	N, N, N, N,
3264 	N, N, N, N,
3265 	N, N, N, N,
3266 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3267 	/* 0x70 - 0x7F */
3268 	N, N, N, N,
3269 	N, N, N, N,
3270 	N, N, N, N,
3271 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3272 	/* 0x80 - 0x8F */
3273 	X16(D(SrcImm)),
3274 	/* 0x90 - 0x9F */
3275 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3276 	/* 0xA0 - 0xA7 */
3277 	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3278 	DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3279 	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3280 	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3281 	/* 0xA8 - 0xAF */
3282 	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3283 	DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3284 	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3285 	D(DstMem | SrcReg | Src2CL | ModRM),
3286 	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3287 	/* 0xB0 - 0xB7 */
3288 	D2bv(DstMem | SrcReg | ModRM | Lock),
3289 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3290 	D(DstMem | SrcReg | ModRM | BitOp | Lock),
3291 	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3292 	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3293 	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3294 	/* 0xB8 - 0xBF */
3295 	N, N,
3296 	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3297 	D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3298 	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3299 	/* 0xC0 - 0xCF */
3300 	D2bv(DstMem | SrcReg | ModRM | Lock),
3301 	N, D(DstMem | SrcReg | ModRM | Mov),
3302 	N, N, N, GD(0, &group9),
3303 	N, N, N, N, N, N, N, N,
3304 	/* 0xD0 - 0xDF */
3305 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3306 	/* 0xE0 - 0xEF */
3307 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3308 	/* 0xF0 - 0xFF */
3309 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3310 };
3311 
3312 #undef D
3313 #undef N
3314 #undef G
3315 #undef GD
3316 #undef I
3317 #undef GP
3318 #undef EXT
3319 
3320 #undef D2bv
3321 #undef D2bvIP
3322 #undef I2bv
3323 #undef I6ALU
3324 
3325 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3326 {
3327 	unsigned size;
3328 
3329 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3330 	if (size == 8)
3331 		size = 4;
3332 	return size;
3333 }
3334 
3335 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3336 		      unsigned size, bool sign_extension)
3337 {
3338 	int rc = X86EMUL_CONTINUE;
3339 
3340 	op->type = OP_IMM;
3341 	op->bytes = size;
3342 	op->addr.mem.ea = ctxt->_eip;
3343 	/* NB. Immediates are sign-extended as necessary. */
3344 	switch (op->bytes) {
3345 	case 1:
3346 		op->val = insn_fetch(s8, ctxt);
3347 		break;
3348 	case 2:
3349 		op->val = insn_fetch(s16, ctxt);
3350 		break;
3351 	case 4:
3352 		op->val = insn_fetch(s32, ctxt);
3353 		break;
3354 	}
3355 	if (!sign_extension) {
3356 		switch (op->bytes) {
3357 		case 1:
3358 			op->val &= 0xff;
3359 			break;
3360 		case 2:
3361 			op->val &= 0xffff;
3362 			break;
3363 		case 4:
3364 			op->val &= 0xffffffff;
3365 			break;
3366 		}
3367 	}
3368 done:
3369 	return rc;
3370 }
3371 
3372 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3373 			  unsigned d)
3374 {
3375 	int rc = X86EMUL_CONTINUE;
3376 
3377 	switch (d) {
3378 	case OpReg:
3379 		decode_register_operand(ctxt, op,
3380 			 op == &ctxt->dst &&
3381 			 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3382 		break;
3383 	case OpImmUByte:
3384 		rc = decode_imm(ctxt, op, 1, false);
3385 		break;
3386 	case OpMem:
3387 		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3388 	mem_common:
3389 		*op = ctxt->memop;
3390 		ctxt->memopp = op;
3391 		if ((ctxt->d & BitOp) && op == &ctxt->dst)
3392 			fetch_bit_operand(ctxt);
3393 		op->orig_val = op->val;
3394 		break;
3395 	case OpMem64:
3396 		ctxt->memop.bytes = 8;
3397 		goto mem_common;
3398 	case OpAcc:
3399 		op->type = OP_REG;
3400 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3401 		op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3402 		fetch_register_operand(op);
3403 		op->orig_val = op->val;
3404 		break;
3405 	case OpDI:
3406 		op->type = OP_MEM;
3407 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3408 		op->addr.mem.ea =
3409 			register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3410 		op->addr.mem.seg = VCPU_SREG_ES;
3411 		op->val = 0;
3412 		break;
3413 	case OpDX:
3414 		op->type = OP_REG;
3415 		op->bytes = 2;
3416 		op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3417 		fetch_register_operand(op);
3418 		break;
3419 	case OpCL:
3420 		op->bytes = 1;
3421 		op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3422 		break;
3423 	case OpImmByte:
3424 		rc = decode_imm(ctxt, op, 1, true);
3425 		break;
3426 	case OpOne:
3427 		op->bytes = 1;
3428 		op->val = 1;
3429 		break;
3430 	case OpImm:
3431 		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
3432 		break;
3433 	case OpMem16:
3434 		ctxt->memop.bytes = 2;
3435 		goto mem_common;
3436 	case OpMem32:
3437 		ctxt->memop.bytes = 4;
3438 		goto mem_common;
3439 	case OpImmU16:
3440 		rc = decode_imm(ctxt, op, 2, false);
3441 		break;
3442 	case OpImmU:
3443 		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
3444 		break;
3445 	case OpSI:
3446 		op->type = OP_MEM;
3447 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3448 		op->addr.mem.ea =
3449 			register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3450 		op->addr.mem.seg = seg_override(ctxt);
3451 		op->val = 0;
3452 		break;
3453 	case OpImmFAddr:
3454 		op->type = OP_IMM;
3455 		op->addr.mem.ea = ctxt->_eip;
3456 		op->bytes = ctxt->op_bytes + 2;
3457 		insn_fetch_arr(op->valptr, op->bytes, ctxt);
3458 		break;
3459 	case OpMemFAddr:
3460 		ctxt->memop.bytes = ctxt->op_bytes + 2;
3461 		goto mem_common;
3462 	case OpES:
3463 		op->val = VCPU_SREG_ES;
3464 		break;
3465 	case OpCS:
3466 		op->val = VCPU_SREG_CS;
3467 		break;
3468 	case OpSS:
3469 		op->val = VCPU_SREG_SS;
3470 		break;
3471 	case OpDS:
3472 		op->val = VCPU_SREG_DS;
3473 		break;
3474 	case OpFS:
3475 		op->val = VCPU_SREG_FS;
3476 		break;
3477 	case OpGS:
3478 		op->val = VCPU_SREG_GS;
3479 		break;
3480 	case OpImplicit:
3481 		/* Special instructions do their own operand decoding. */
3482 	default:
3483 		op->type = OP_NONE; /* Disable writeback. */
3484 		break;
3485 	}
3486 
3487 done:
3488 	return rc;
3489 }
3490 
3491 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3492 {
3493 	int rc = X86EMUL_CONTINUE;
3494 	int mode = ctxt->mode;
3495 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3496 	bool op_prefix = false;
3497 	struct opcode opcode;
3498 
3499 	ctxt->memop.type = OP_NONE;
3500 	ctxt->memopp = NULL;
3501 	ctxt->_eip = ctxt->eip;
3502 	ctxt->fetch.start = ctxt->_eip;
3503 	ctxt->fetch.end = ctxt->fetch.start + insn_len;
3504 	if (insn_len > 0)
3505 		memcpy(ctxt->fetch.data, insn, insn_len);
3506 
3507 	switch (mode) {
3508 	case X86EMUL_MODE_REAL:
3509 	case X86EMUL_MODE_VM86:
3510 	case X86EMUL_MODE_PROT16:
3511 		def_op_bytes = def_ad_bytes = 2;
3512 		break;
3513 	case X86EMUL_MODE_PROT32:
3514 		def_op_bytes = def_ad_bytes = 4;
3515 		break;
3516 #ifdef CONFIG_X86_64
3517 	case X86EMUL_MODE_PROT64:
3518 		def_op_bytes = 4;
3519 		def_ad_bytes = 8;
3520 		break;
3521 #endif
3522 	default:
3523 		return EMULATION_FAILED;
3524 	}
3525 
3526 	ctxt->op_bytes = def_op_bytes;
3527 	ctxt->ad_bytes = def_ad_bytes;
3528 
3529 	/* Legacy prefixes. */
3530 	for (;;) {
3531 		switch (ctxt->b = insn_fetch(u8, ctxt)) {
3532 		case 0x66:	/* operand-size override */
3533 			op_prefix = true;
3534 			/* switch between 2/4 bytes */
3535 			ctxt->op_bytes = def_op_bytes ^ 6;
3536 			break;
3537 		case 0x67:	/* address-size override */
3538 			if (mode == X86EMUL_MODE_PROT64)
3539 				/* switch between 4/8 bytes */
3540 				ctxt->ad_bytes = def_ad_bytes ^ 12;
3541 			else
3542 				/* switch between 2/4 bytes */
3543 				ctxt->ad_bytes = def_ad_bytes ^ 6;
3544 			break;
3545 		case 0x26:	/* ES override */
3546 		case 0x2e:	/* CS override */
3547 		case 0x36:	/* SS override */
3548 		case 0x3e:	/* DS override */
3549 			set_seg_override(ctxt, (ctxt->b >> 3) & 3);
3550 			break;
3551 		case 0x64:	/* FS override */
3552 		case 0x65:	/* GS override */
3553 			set_seg_override(ctxt, ctxt->b & 7);
3554 			break;
3555 		case 0x40 ... 0x4f: /* REX */
3556 			if (mode != X86EMUL_MODE_PROT64)
3557 				goto done_prefixes;
3558 			ctxt->rex_prefix = ctxt->b;
3559 			continue;
3560 		case 0xf0:	/* LOCK */
3561 			ctxt->lock_prefix = 1;
3562 			break;
3563 		case 0xf2:	/* REPNE/REPNZ */
3564 		case 0xf3:	/* REP/REPE/REPZ */
3565 			ctxt->rep_prefix = ctxt->b;
3566 			break;
3567 		default:
3568 			goto done_prefixes;
3569 		}
3570 
3571 		/* Any legacy prefix after a REX prefix nullifies its effect. */
3572 
3573 		ctxt->rex_prefix = 0;
3574 	}
3575 
3576 done_prefixes:
3577 
3578 	/* REX prefix. */
3579 	if (ctxt->rex_prefix & 8)
3580 		ctxt->op_bytes = 8;	/* REX.W */
3581 
3582 	/* Opcode byte(s). */
3583 	opcode = opcode_table[ctxt->b];
3584 	/* Two-byte opcode? */
3585 	if (ctxt->b == 0x0f) {
3586 		ctxt->twobyte = 1;
3587 		ctxt->b = insn_fetch(u8, ctxt);
3588 		opcode = twobyte_table[ctxt->b];
3589 	}
3590 	ctxt->d = opcode.flags;
3591 
3592 	while (ctxt->d & GroupMask) {
3593 		switch (ctxt->d & GroupMask) {
3594 		case Group:
3595 			ctxt->modrm = insn_fetch(u8, ctxt);
3596 			--ctxt->_eip;
3597 			goffset = (ctxt->modrm >> 3) & 7;
3598 			opcode = opcode.u.group[goffset];
3599 			break;
3600 		case GroupDual:
3601 			ctxt->modrm = insn_fetch(u8, ctxt);
3602 			--ctxt->_eip;
3603 			goffset = (ctxt->modrm >> 3) & 7;
3604 			if ((ctxt->modrm >> 6) == 3)
3605 				opcode = opcode.u.gdual->mod3[goffset];
3606 			else
3607 				opcode = opcode.u.gdual->mod012[goffset];
3608 			break;
3609 		case RMExt:
3610 			goffset = ctxt->modrm & 7;
3611 			opcode = opcode.u.group[goffset];
3612 			break;
3613 		case Prefix:
3614 			if (ctxt->rep_prefix && op_prefix)
3615 				return EMULATION_FAILED;
3616 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3617 			switch (simd_prefix) {
3618 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3619 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3620 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3621 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3622 			}
3623 			break;
3624 		default:
3625 			return EMULATION_FAILED;
3626 		}
3627 
3628 		ctxt->d &= ~(u64)GroupMask;
3629 		ctxt->d |= opcode.flags;
3630 	}
3631 
3632 	ctxt->execute = opcode.u.execute;
3633 	ctxt->check_perm = opcode.check_perm;
3634 	ctxt->intercept = opcode.intercept;
3635 
3636 	/* Unrecognised? */
3637 	if (ctxt->d == 0 || (ctxt->d & Undefined))
3638 		return EMULATION_FAILED;
3639 
3640 	if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3641 		return EMULATION_FAILED;
3642 
3643 	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3644 		ctxt->op_bytes = 8;
3645 
3646 	if (ctxt->d & Op3264) {
3647 		if (mode == X86EMUL_MODE_PROT64)
3648 			ctxt->op_bytes = 8;
3649 		else
3650 			ctxt->op_bytes = 4;
3651 	}
3652 
3653 	if (ctxt->d & Sse)
3654 		ctxt->op_bytes = 16;
3655 
3656 	/* ModRM and SIB bytes. */
3657 	if (ctxt->d & ModRM) {
3658 		rc = decode_modrm(ctxt, &ctxt->memop);
3659 		if (!ctxt->has_seg_override)
3660 			set_seg_override(ctxt, ctxt->modrm_seg);
3661 	} else if (ctxt->d & MemAbs)
3662 		rc = decode_abs(ctxt, &ctxt->memop);
3663 	if (rc != X86EMUL_CONTINUE)
3664 		goto done;
3665 
3666 	if (!ctxt->has_seg_override)
3667 		set_seg_override(ctxt, VCPU_SREG_DS);
3668 
3669 	ctxt->memop.addr.mem.seg = seg_override(ctxt);
3670 
3671 	if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
3672 		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
3673 
3674 	/*
3675 	 * Decode and fetch the source operand: register, memory
3676 	 * or immediate.
3677 	 */
3678 	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
3679 	if (rc != X86EMUL_CONTINUE)
3680 		goto done;
3681 
3682 	/*
3683 	 * Decode and fetch the second source operand: register, memory
3684 	 * or immediate.
3685 	 */
3686 	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
3687 	if (rc != X86EMUL_CONTINUE)
3688 		goto done;
3689 
3690 	/* Decode and fetch the destination operand: register or memory. */
3691 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
3692 
3693 done:
3694 	if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
3695 		ctxt->memopp->addr.mem.ea += ctxt->_eip;
3696 
3697 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
3698 }
3699 
3700 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3701 {
3702 	/* The second termination condition only applies for REPE
3703 	 * and REPNE. Test if the repeat string operation prefix is
3704 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3705 	 * corresponding termination condition according to:
3706 	 * 	- if REPE/REPZ and ZF = 0 then done
3707 	 * 	- if REPNE/REPNZ and ZF = 1 then done
3708 	 */
3709 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
3710 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
3711 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
3712 		 ((ctxt->eflags & EFLG_ZF) == 0))
3713 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
3714 		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3715 		return true;
3716 
3717 	return false;
3718 }
3719 
3720 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3721 {
3722 	struct x86_emulate_ops *ops = ctxt->ops;
3723 	u64 msr_data;
3724 	int rc = X86EMUL_CONTINUE;
3725 	int saved_dst_type = ctxt->dst.type;
3726 
3727 	ctxt->mem_read.pos = 0;
3728 
3729 	if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3730 		rc = emulate_ud(ctxt);
3731 		goto done;
3732 	}
3733 
3734 	/* LOCK prefix is allowed only with some instructions */
3735 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3736 		rc = emulate_ud(ctxt);
3737 		goto done;
3738 	}
3739 
3740 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3741 		rc = emulate_ud(ctxt);
3742 		goto done;
3743 	}
3744 
3745 	if ((ctxt->d & Sse)
3746 	    && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3747 		|| !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3748 		rc = emulate_ud(ctxt);
3749 		goto done;
3750 	}
3751 
3752 	if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3753 		rc = emulate_nm(ctxt);
3754 		goto done;
3755 	}
3756 
3757 	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3758 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3759 					      X86_ICPT_PRE_EXCEPT);
3760 		if (rc != X86EMUL_CONTINUE)
3761 			goto done;
3762 	}
3763 
3764 	/* Privileged instruction can be executed only in CPL=0 */
3765 	if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3766 		rc = emulate_gp(ctxt, 0);
3767 		goto done;
3768 	}
3769 
3770 	/* Instruction can only be executed in protected mode */
3771 	if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3772 		rc = emulate_ud(ctxt);
3773 		goto done;
3774 	}
3775 
3776 	/* Do instruction specific permission checks */
3777 	if (ctxt->check_perm) {
3778 		rc = ctxt->check_perm(ctxt);
3779 		if (rc != X86EMUL_CONTINUE)
3780 			goto done;
3781 	}
3782 
3783 	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3784 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3785 					      X86_ICPT_POST_EXCEPT);
3786 		if (rc != X86EMUL_CONTINUE)
3787 			goto done;
3788 	}
3789 
3790 	if (ctxt->rep_prefix && (ctxt->d & String)) {
3791 		/* All REP prefixes have the same first termination condition */
3792 		if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
3793 			ctxt->eip = ctxt->_eip;
3794 			goto done;
3795 		}
3796 	}
3797 
3798 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
3799 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
3800 				    ctxt->src.valptr, ctxt->src.bytes);
3801 		if (rc != X86EMUL_CONTINUE)
3802 			goto done;
3803 		ctxt->src.orig_val64 = ctxt->src.val64;
3804 	}
3805 
3806 	if (ctxt->src2.type == OP_MEM) {
3807 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
3808 				    &ctxt->src2.val, ctxt->src2.bytes);
3809 		if (rc != X86EMUL_CONTINUE)
3810 			goto done;
3811 	}
3812 
3813 	if ((ctxt->d & DstMask) == ImplicitOps)
3814 		goto special_insn;
3815 
3816 
3817 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
3818 		/* optimisation - avoid slow emulated read if Mov */
3819 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
3820 				   &ctxt->dst.val, ctxt->dst.bytes);
3821 		if (rc != X86EMUL_CONTINUE)
3822 			goto done;
3823 	}
3824 	ctxt->dst.orig_val = ctxt->dst.val;
3825 
3826 special_insn:
3827 
3828 	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3829 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3830 					      X86_ICPT_POST_MEMACCESS);
3831 		if (rc != X86EMUL_CONTINUE)
3832 			goto done;
3833 	}
3834 
3835 	if (ctxt->execute) {
3836 		rc = ctxt->execute(ctxt);
3837 		if (rc != X86EMUL_CONTINUE)
3838 			goto done;
3839 		goto writeback;
3840 	}
3841 
3842 	if (ctxt->twobyte)
3843 		goto twobyte_insn;
3844 
3845 	switch (ctxt->b) {
3846 	case 0x40 ... 0x47: /* inc r16/r32 */
3847 		emulate_1op(ctxt, "inc");
3848 		break;
3849 	case 0x48 ... 0x4f: /* dec r16/r32 */
3850 		emulate_1op(ctxt, "dec");
3851 		break;
3852 	case 0x63:		/* movsxd */
3853 		if (ctxt->mode != X86EMUL_MODE_PROT64)
3854 			goto cannot_emulate;
3855 		ctxt->dst.val = (s32) ctxt->src.val;
3856 		break;
3857 	case 0x6c:		/* insb */
3858 	case 0x6d:		/* insw/insd */
3859 		ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
3860 		goto do_io_in;
3861 	case 0x6e:		/* outsb */
3862 	case 0x6f:		/* outsw/outsd */
3863 		ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
3864 		goto do_io_out;
3865 		break;
3866 	case 0x70 ... 0x7f: /* jcc (short) */
3867 		if (test_cc(ctxt->b, ctxt->eflags))
3868 			jmp_rel(ctxt, ctxt->src.val);
3869 		break;
3870 	case 0x8d: /* lea r16/r32, m */
3871 		ctxt->dst.val = ctxt->src.addr.mem.ea;
3872 		break;
3873 	case 0x8f:		/* pop (sole member of Grp1a) */
3874 		rc = em_grp1a(ctxt);
3875 		break;
3876 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
3877 		if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
3878 			break;
3879 		rc = em_xchg(ctxt);
3880 		break;
3881 	case 0x98: /* cbw/cwde/cdqe */
3882 		switch (ctxt->op_bytes) {
3883 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
3884 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
3885 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
3886 		}
3887 		break;
3888 	case 0xc0 ... 0xc1:
3889 		rc = em_grp2(ctxt);
3890 		break;
3891 	case 0xcc:		/* int3 */
3892 		rc = emulate_int(ctxt, 3);
3893 		break;
3894 	case 0xcd:		/* int n */
3895 		rc = emulate_int(ctxt, ctxt->src.val);
3896 		break;
3897 	case 0xce:		/* into */
3898 		if (ctxt->eflags & EFLG_OF)
3899 			rc = emulate_int(ctxt, 4);
3900 		break;
3901 	case 0xd0 ... 0xd1:	/* Grp2 */
3902 		rc = em_grp2(ctxt);
3903 		break;
3904 	case 0xd2 ... 0xd3:	/* Grp2 */
3905 		ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
3906 		rc = em_grp2(ctxt);
3907 		break;
3908 	case 0xe4: 	/* inb */
3909 	case 0xe5: 	/* in */
3910 		goto do_io_in;
3911 	case 0xe6: /* outb */
3912 	case 0xe7: /* out */
3913 		goto do_io_out;
3914 	case 0xe8: /* call (near) */ {
3915 		long int rel = ctxt->src.val;
3916 		ctxt->src.val = (unsigned long) ctxt->_eip;
3917 		jmp_rel(ctxt, rel);
3918 		rc = em_push(ctxt);
3919 		break;
3920 	}
3921 	case 0xe9: /* jmp rel */
3922 	case 0xeb: /* jmp rel short */
3923 		jmp_rel(ctxt, ctxt->src.val);
3924 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
3925 		break;
3926 	case 0xec: /* in al,dx */
3927 	case 0xed: /* in (e/r)ax,dx */
3928 	do_io_in:
3929 		if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3930 				     &ctxt->dst.val))
3931 			goto done; /* IO is needed */
3932 		break;
3933 	case 0xee: /* out dx,al */
3934 	case 0xef: /* out dx,(e/r)ax */
3935 	do_io_out:
3936 		ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3937 				      &ctxt->src.val, 1);
3938 		ctxt->dst.type = OP_NONE;	/* Disable writeback. */
3939 		break;
3940 	case 0xf4:              /* hlt */
3941 		ctxt->ops->halt(ctxt);
3942 		break;
3943 	case 0xf5:	/* cmc */
3944 		/* complement carry flag from eflags reg */
3945 		ctxt->eflags ^= EFLG_CF;
3946 		break;
3947 	case 0xf8: /* clc */
3948 		ctxt->eflags &= ~EFLG_CF;
3949 		break;
3950 	case 0xf9: /* stc */
3951 		ctxt->eflags |= EFLG_CF;
3952 		break;
3953 	case 0xfc: /* cld */
3954 		ctxt->eflags &= ~EFLG_DF;
3955 		break;
3956 	case 0xfd: /* std */
3957 		ctxt->eflags |= EFLG_DF;
3958 		break;
3959 	case 0xfe: /* Grp4 */
3960 		rc = em_grp45(ctxt);
3961 		break;
3962 	case 0xff: /* Grp5 */
3963 		rc = em_grp45(ctxt);
3964 		break;
3965 	default:
3966 		goto cannot_emulate;
3967 	}
3968 
3969 	if (rc != X86EMUL_CONTINUE)
3970 		goto done;
3971 
3972 writeback:
3973 	rc = writeback(ctxt);
3974 	if (rc != X86EMUL_CONTINUE)
3975 		goto done;
3976 
3977 	/*
3978 	 * restore dst type in case the decoding will be reused
3979 	 * (happens for string instruction )
3980 	 */
3981 	ctxt->dst.type = saved_dst_type;
3982 
3983 	if ((ctxt->d & SrcMask) == SrcSI)
3984 		string_addr_inc(ctxt, seg_override(ctxt),
3985 				VCPU_REGS_RSI, &ctxt->src);
3986 
3987 	if ((ctxt->d & DstMask) == DstDI)
3988 		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3989 				&ctxt->dst);
3990 
3991 	if (ctxt->rep_prefix && (ctxt->d & String)) {
3992 		struct read_cache *r = &ctxt->io_read;
3993 		register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
3994 
3995 		if (!string_insn_completed(ctxt)) {
3996 			/*
3997 			 * Re-enter guest when pio read ahead buffer is empty
3998 			 * or, if it is not used, after each 1024 iteration.
3999 			 */
4000 			if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4001 			    (r->end == 0 || r->end != r->pos)) {
4002 				/*
4003 				 * Reset read cache. Usually happens before
4004 				 * decode, but since instruction is restarted
4005 				 * we have to do it here.
4006 				 */
4007 				ctxt->mem_read.end = 0;
4008 				return EMULATION_RESTART;
4009 			}
4010 			goto done; /* skip rip writeback */
4011 		}
4012 	}
4013 
4014 	ctxt->eip = ctxt->_eip;
4015 
4016 done:
4017 	if (rc == X86EMUL_PROPAGATE_FAULT)
4018 		ctxt->have_exception = true;
4019 	if (rc == X86EMUL_INTERCEPTED)
4020 		return EMULATION_INTERCEPTED;
4021 
4022 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4023 
4024 twobyte_insn:
4025 	switch (ctxt->b) {
4026 	case 0x09:		/* wbinvd */
4027 		(ctxt->ops->wbinvd)(ctxt);
4028 		break;
4029 	case 0x08:		/* invd */
4030 	case 0x0d:		/* GrpP (prefetch) */
4031 	case 0x18:		/* Grp16 (prefetch/nop) */
4032 		break;
4033 	case 0x20: /* mov cr, reg */
4034 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4035 		break;
4036 	case 0x21: /* mov from dr to reg */
4037 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4038 		break;
4039 	case 0x22: /* mov reg, cr */
4040 		if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4041 			emulate_gp(ctxt, 0);
4042 			rc = X86EMUL_PROPAGATE_FAULT;
4043 			goto done;
4044 		}
4045 		ctxt->dst.type = OP_NONE;
4046 		break;
4047 	case 0x23: /* mov from reg to dr */
4048 		if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4049 				((ctxt->mode == X86EMUL_MODE_PROT64) ?
4050 				 ~0ULL : ~0U)) < 0) {
4051 			/* #UD condition is already handled by the code above */
4052 			emulate_gp(ctxt, 0);
4053 			rc = X86EMUL_PROPAGATE_FAULT;
4054 			goto done;
4055 		}
4056 
4057 		ctxt->dst.type = OP_NONE;	/* no writeback */
4058 		break;
4059 	case 0x30:
4060 		/* wrmsr */
4061 		msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
4062 			| ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
4063 		if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4064 			emulate_gp(ctxt, 0);
4065 			rc = X86EMUL_PROPAGATE_FAULT;
4066 			goto done;
4067 		}
4068 		rc = X86EMUL_CONTINUE;
4069 		break;
4070 	case 0x32:
4071 		/* rdmsr */
4072 		if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4073 			emulate_gp(ctxt, 0);
4074 			rc = X86EMUL_PROPAGATE_FAULT;
4075 			goto done;
4076 		} else {
4077 			ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
4078 			ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4079 		}
4080 		rc = X86EMUL_CONTINUE;
4081 		break;
4082 	case 0x40 ... 0x4f:	/* cmov */
4083 		ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4084 		if (!test_cc(ctxt->b, ctxt->eflags))
4085 			ctxt->dst.type = OP_NONE; /* no writeback */
4086 		break;
4087 	case 0x80 ... 0x8f: /* jnz rel, etc*/
4088 		if (test_cc(ctxt->b, ctxt->eflags))
4089 			jmp_rel(ctxt, ctxt->src.val);
4090 		break;
4091 	case 0x90 ... 0x9f:     /* setcc r/m8 */
4092 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4093 		break;
4094 	case 0xa3:
4095 	      bt:		/* bt */
4096 		ctxt->dst.type = OP_NONE;
4097 		/* only subword offset */
4098 		ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4099 		emulate_2op_SrcV_nobyte(ctxt, "bt");
4100 		break;
4101 	case 0xa4: /* shld imm8, r, r/m */
4102 	case 0xa5: /* shld cl, r, r/m */
4103 		emulate_2op_cl(ctxt, "shld");
4104 		break;
4105 	case 0xab:
4106 	      bts:		/* bts */
4107 		emulate_2op_SrcV_nobyte(ctxt, "bts");
4108 		break;
4109 	case 0xac: /* shrd imm8, r, r/m */
4110 	case 0xad: /* shrd cl, r, r/m */
4111 		emulate_2op_cl(ctxt, "shrd");
4112 		break;
4113 	case 0xae:              /* clflush */
4114 		break;
4115 	case 0xb0 ... 0xb1:	/* cmpxchg */
4116 		/*
4117 		 * Save real source value, then compare EAX against
4118 		 * destination.
4119 		 */
4120 		ctxt->src.orig_val = ctxt->src.val;
4121 		ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4122 		emulate_2op_SrcV(ctxt, "cmp");
4123 		if (ctxt->eflags & EFLG_ZF) {
4124 			/* Success: write back to memory. */
4125 			ctxt->dst.val = ctxt->src.orig_val;
4126 		} else {
4127 			/* Failure: write the value we saw to EAX. */
4128 			ctxt->dst.type = OP_REG;
4129 			ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
4130 		}
4131 		break;
4132 	case 0xb3:
4133 	      btr:		/* btr */
4134 		emulate_2op_SrcV_nobyte(ctxt, "btr");
4135 		break;
4136 	case 0xb6 ... 0xb7:	/* movzx */
4137 		ctxt->dst.bytes = ctxt->op_bytes;
4138 		ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4139 						       : (u16) ctxt->src.val;
4140 		break;
4141 	case 0xba:		/* Grp8 */
4142 		switch (ctxt->modrm_reg & 3) {
4143 		case 0:
4144 			goto bt;
4145 		case 1:
4146 			goto bts;
4147 		case 2:
4148 			goto btr;
4149 		case 3:
4150 			goto btc;
4151 		}
4152 		break;
4153 	case 0xbb:
4154 	      btc:		/* btc */
4155 		emulate_2op_SrcV_nobyte(ctxt, "btc");
4156 		break;
4157 	case 0xbc: {		/* bsf */
4158 		u8 zf;
4159 		__asm__ ("bsf %2, %0; setz %1"
4160 			 : "=r"(ctxt->dst.val), "=q"(zf)
4161 			 : "r"(ctxt->src.val));
4162 		ctxt->eflags &= ~X86_EFLAGS_ZF;
4163 		if (zf) {
4164 			ctxt->eflags |= X86_EFLAGS_ZF;
4165 			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4166 		}
4167 		break;
4168 	}
4169 	case 0xbd: {		/* bsr */
4170 		u8 zf;
4171 		__asm__ ("bsr %2, %0; setz %1"
4172 			 : "=r"(ctxt->dst.val), "=q"(zf)
4173 			 : "r"(ctxt->src.val));
4174 		ctxt->eflags &= ~X86_EFLAGS_ZF;
4175 		if (zf) {
4176 			ctxt->eflags |= X86_EFLAGS_ZF;
4177 			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4178 		}
4179 		break;
4180 	}
4181 	case 0xbe ... 0xbf:	/* movsx */
4182 		ctxt->dst.bytes = ctxt->op_bytes;
4183 		ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4184 							(s16) ctxt->src.val;
4185 		break;
4186 	case 0xc0 ... 0xc1:	/* xadd */
4187 		emulate_2op_SrcV(ctxt, "add");
4188 		/* Write back the register source. */
4189 		ctxt->src.val = ctxt->dst.orig_val;
4190 		write_register_operand(&ctxt->src);
4191 		break;
4192 	case 0xc3:		/* movnti */
4193 		ctxt->dst.bytes = ctxt->op_bytes;
4194 		ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4195 							(u64) ctxt->src.val;
4196 		break;
4197 	case 0xc7:		/* Grp9 (cmpxchg8b) */
4198 		rc = em_grp9(ctxt);
4199 		break;
4200 	default:
4201 		goto cannot_emulate;
4202 	}
4203 
4204 	if (rc != X86EMUL_CONTINUE)
4205 		goto done;
4206 
4207 	goto writeback;
4208 
4209 cannot_emulate:
4210 	return EMULATION_FAILED;
4211 }
4212