1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
171 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
173 #define X2(x...) x, x
174 #define X3(x...) X2(x), x
175 #define X4(x...) X2(x), X2(x)
176 #define X5(x...) X4(x), x
177 #define X6(x...) X4(x), X2(x)
178 #define X7(x...) X4(x), X3(x)
179 #define X8(x...) X4(x), X4(x)
180 #define X16(x...) X8(x), X8(x)
182 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
183 #define FASTOP_SIZE 8
186 * fastop functions have a special calling convention:
191 * flags: rflags (in/out)
192 * ex: rsi (in:fastop pointer, out:zero if exception)
194 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
195 * different operand sizes can be reached by calculation, rather than a jump
196 * table (which would be bigger than the code).
198 * fastop functions are declared as taking a never-defined fastop parameter,
199 * so they can't be called from C directly.
208 int (*execute)(struct x86_emulate_ctxt *ctxt);
209 const struct opcode *group;
210 const struct group_dual *gdual;
211 const struct gprefix *gprefix;
212 const struct escape *esc;
213 void (*fastop)(struct fastop *fake);
215 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
219 struct opcode mod012[8];
220 struct opcode mod3[8];
224 struct opcode pfx_no;
225 struct opcode pfx_66;
226 struct opcode pfx_f2;
227 struct opcode pfx_f3;
232 struct opcode high[64];
235 /* EFLAGS bit definitions. */
236 #define EFLG_ID (1<<21)
237 #define EFLG_VIP (1<<20)
238 #define EFLG_VIF (1<<19)
239 #define EFLG_AC (1<<18)
240 #define EFLG_VM (1<<17)
241 #define EFLG_RF (1<<16)
242 #define EFLG_IOPL (3<<12)
243 #define EFLG_NT (1<<14)
244 #define EFLG_OF (1<<11)
245 #define EFLG_DF (1<<10)
246 #define EFLG_IF (1<<9)
247 #define EFLG_TF (1<<8)
248 #define EFLG_SF (1<<7)
249 #define EFLG_ZF (1<<6)
250 #define EFLG_AF (1<<4)
251 #define EFLG_PF (1<<2)
252 #define EFLG_CF (1<<0)
254 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255 #define EFLG_RESERVED_ONE_MASK 2
257 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
259 if (!(ctxt->regs_valid & (1 << nr))) {
260 ctxt->regs_valid |= 1 << nr;
261 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
263 return ctxt->_regs[nr];
266 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->regs_dirty |= 1 << nr;
270 return &ctxt->_regs[nr];
273 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 return reg_write(ctxt, nr);
279 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
283 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
284 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
287 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
289 ctxt->regs_dirty = 0;
290 ctxt->regs_valid = 0;
294 * These EFLAGS bits are restored from saved value during emulation, and
295 * any changes are written back to the saved value after emulation.
297 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
305 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
307 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
308 #define FOP_RET "ret \n\t"
310 #define FOP_START(op) \
311 extern void em_##op(struct fastop *fake); \
312 asm(".pushsection .text, \"ax\" \n\t" \
313 ".global em_" #op " \n\t" \
320 #define FOPNOP() FOP_ALIGN FOP_RET
322 #define FOP1E(op, dst) \
323 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
325 #define FOP1EEX(op, dst) \
326 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
328 #define FASTOP1(op) \
333 ON64(FOP1E(op##q, rax)) \
336 /* 1-operand, using src2 (for MUL/DIV r/m) */
337 #define FASTOP1SRC2(op, name) \
342 ON64(FOP1E(op, rcx)) \
345 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
346 #define FASTOP1SRC2EX(op, name) \
351 ON64(FOP1EEX(op, rcx)) \
354 #define FOP2E(op, dst, src) \
355 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
357 #define FASTOP2(op) \
359 FOP2E(op##b, al, dl) \
360 FOP2E(op##w, ax, dx) \
361 FOP2E(op##l, eax, edx) \
362 ON64(FOP2E(op##q, rax, rdx)) \
365 /* 2 operand, word only */
366 #define FASTOP2W(op) \
369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
374 /* 2 operand, src is CL */
375 #define FASTOP2CL(op) \
377 FOP2E(op##b, al, cl) \
378 FOP2E(op##w, ax, cl) \
379 FOP2E(op##l, eax, cl) \
380 ON64(FOP2E(op##q, rax, cl)) \
383 /* 2 operand, src and dest are reversed */
384 #define FASTOP2R(op, name) \
386 FOP2E(op##b, dl, al) \
387 FOP2E(op##w, dx, ax) \
388 FOP2E(op##l, edx, eax) \
389 ON64(FOP2E(op##q, rdx, rax)) \
392 #define FOP3E(op, dst, src, src2) \
393 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
395 /* 3-operand, word-only, src2=cl */
396 #define FASTOP3WCL(op) \
399 FOP3E(op##w, ax, dx, cl) \
400 FOP3E(op##l, eax, edx, cl) \
401 ON64(FOP3E(op##q, rax, rdx, cl)) \
404 /* Special case for SETcc - 1 instruction per cc */
405 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
407 asm(".global kvm_fastop_exception \n"
408 "kvm_fastop_exception: xor %esi, %esi; ret");
429 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
432 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
433 enum x86_intercept intercept,
434 enum x86_intercept_stage stage)
436 struct x86_instruction_info info = {
437 .intercept = intercept,
438 .rep_prefix = ctxt->rep_prefix,
439 .modrm_mod = ctxt->modrm_mod,
440 .modrm_reg = ctxt->modrm_reg,
441 .modrm_rm = ctxt->modrm_rm,
442 .src_val = ctxt->src.val64,
443 .dst_val = ctxt->dst.val64,
444 .src_bytes = ctxt->src.bytes,
445 .dst_bytes = ctxt->dst.bytes,
446 .ad_bytes = ctxt->ad_bytes,
447 .next_rip = ctxt->eip,
450 return ctxt->ops->intercept(ctxt, &info, stage);
453 static void assign_masked(ulong *dest, ulong src, ulong mask)
455 *dest = (*dest & ~mask) | (src & mask);
458 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
460 return (1UL << (ctxt->ad_bytes << 3)) - 1;
463 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
466 struct desc_struct ss;
468 if (ctxt->mode == X86EMUL_MODE_PROT64)
470 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
471 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
474 static int stack_size(struct x86_emulate_ctxt *ctxt)
476 return (__fls(stack_mask(ctxt)) + 1) >> 3;
479 /* Access/update address held in a register, based on addressing mode. */
480 static inline unsigned long
481 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
483 if (ctxt->ad_bytes == sizeof(unsigned long))
486 return reg & ad_mask(ctxt);
489 static inline unsigned long
490 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
492 return address_mask(ctxt, reg);
495 static void masked_increment(ulong *reg, ulong mask, int inc)
497 assign_masked(reg, *reg + inc, mask);
501 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
505 if (ctxt->ad_bytes == sizeof(unsigned long))
508 mask = ad_mask(ctxt);
509 masked_increment(reg, mask, inc);
512 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
514 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
517 static u32 desc_limit_scaled(struct desc_struct *desc)
519 u32 limit = get_desc_limit(desc);
521 return desc->g ? (limit << 12) | 0xfff : limit;
524 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
526 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
529 return ctxt->ops->get_cached_segment_base(ctxt, seg);
532 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
533 u32 error, bool valid)
536 ctxt->exception.vector = vec;
537 ctxt->exception.error_code = error;
538 ctxt->exception.error_code_valid = valid;
539 return X86EMUL_PROPAGATE_FAULT;
542 static int emulate_db(struct x86_emulate_ctxt *ctxt)
544 return emulate_exception(ctxt, DB_VECTOR, 0, false);
547 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
549 return emulate_exception(ctxt, GP_VECTOR, err, true);
552 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
554 return emulate_exception(ctxt, SS_VECTOR, err, true);
557 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
559 return emulate_exception(ctxt, UD_VECTOR, 0, false);
562 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
564 return emulate_exception(ctxt, TS_VECTOR, err, true);
567 static int emulate_de(struct x86_emulate_ctxt *ctxt)
569 return emulate_exception(ctxt, DE_VECTOR, 0, false);
572 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
574 return emulate_exception(ctxt, NM_VECTOR, 0, false);
577 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
580 switch (ctxt->op_bytes) {
582 ctxt->_eip = (u16)dst;
585 ctxt->_eip = (u32)dst;
589 if ((cs_l && is_noncanonical_address(dst)) ||
590 (!cs_l && (dst >> 32) != 0))
591 return emulate_gp(ctxt, 0);
596 WARN(1, "unsupported eip assignment size\n");
598 return X86EMUL_CONTINUE;
601 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
603 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
606 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
608 return assign_eip_near(ctxt, ctxt->_eip + rel);
611 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
614 struct desc_struct desc;
616 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
620 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
625 struct desc_struct desc;
627 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
628 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
632 * x86 defines three classes of vector instructions: explicitly
633 * aligned, explicitly unaligned, and the rest, which change behaviour
634 * depending on whether they're AVX encoded or not.
636 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
637 * subject to the same check.
639 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
641 if (likely(size < 16))
644 if (ctxt->d & Aligned)
646 else if (ctxt->d & Unaligned)
648 else if (ctxt->d & Avx)
654 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
655 struct segmented_address addr,
656 unsigned *max_size, unsigned size,
657 bool write, bool fetch,
660 struct desc_struct desc;
667 la = seg_base(ctxt, addr.seg) +
668 (fetch || ctxt->ad_bytes == 8 ? addr.ea : (u32)addr.ea);
670 switch (ctxt->mode) {
671 case X86EMUL_MODE_PROT64:
672 if (is_noncanonical_address(la))
673 return emulate_gp(ctxt, 0);
675 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
676 if (size > *max_size)
680 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
684 /* code segment in protected mode or read-only data segment */
685 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
686 || !(desc.type & 2)) && write)
688 /* unreadable code segment */
689 if (!fetch && (desc.type & 8) && !(desc.type & 2))
691 lim = desc_limit_scaled(&desc);
692 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
693 (ctxt->d & NoBigReal)) {
694 /* la is between zero and 0xffff */
697 *max_size = 0x10000 - la;
698 } else if ((desc.type & 8) || !(desc.type & 4)) {
699 /* expand-up segment */
702 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
704 /* expand-down segment */
707 lim = desc.d ? 0xffffffff : 0xffff;
710 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
712 if (size > *max_size)
714 cpl = ctxt->ops->cpl(ctxt);
716 /* data segment or readable code segment */
719 } else if ((desc.type & 8) && !(desc.type & 4)) {
720 /* nonconforming code segment */
723 } else if ((desc.type & 8) && (desc.type & 4)) {
724 /* conforming code segment */
730 if (ctxt->mode != X86EMUL_MODE_PROT64)
732 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
733 return emulate_gp(ctxt, 0);
735 return X86EMUL_CONTINUE;
737 if (addr.seg == VCPU_SREG_SS)
738 return emulate_ss(ctxt, 0);
740 return emulate_gp(ctxt, 0);
743 static int linearize(struct x86_emulate_ctxt *ctxt,
744 struct segmented_address addr,
745 unsigned size, bool write,
749 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
753 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
754 struct segmented_address addr,
761 rc = linearize(ctxt, addr, size, false, &linear);
762 if (rc != X86EMUL_CONTINUE)
764 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
768 * Prefetch the remaining bytes of the instruction without crossing page
769 * boundary if they are not in fetch_cache yet.
771 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
774 unsigned size, max_size;
775 unsigned long linear;
776 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
777 struct segmented_address addr = { .seg = VCPU_SREG_CS,
778 .ea = ctxt->eip + cur_size };
781 * We do not know exactly how many bytes will be needed, and
782 * __linearize is expensive, so fetch as much as possible. We
783 * just have to avoid going beyond the 15 byte limit, the end
784 * of the segment, or the end of the page.
786 * __linearize is called with size 0 so that it does not do any
787 * boundary check itself. Instead, we use max_size to check
790 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
791 if (unlikely(rc != X86EMUL_CONTINUE))
794 size = min_t(unsigned, 15UL ^ cur_size, max_size);
795 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
798 * One instruction can only straddle two pages,
799 * and one has been loaded at the beginning of
800 * x86_decode_insn. So, if not enough bytes
801 * still, we must have hit the 15-byte boundary.
803 if (unlikely(size < op_size))
804 return emulate_gp(ctxt, 0);
806 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
807 size, &ctxt->exception);
808 if (unlikely(rc != X86EMUL_CONTINUE))
810 ctxt->fetch.end += size;
811 return X86EMUL_CONTINUE;
814 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
817 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
819 if (unlikely(done_size < size))
820 return __do_insn_fetch_bytes(ctxt, size - done_size);
822 return X86EMUL_CONTINUE;
825 /* Fetch next part of the instruction being emulated. */
826 #define insn_fetch(_type, _ctxt) \
829 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
830 if (rc != X86EMUL_CONTINUE) \
832 ctxt->_eip += sizeof(_type); \
833 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
834 ctxt->fetch.ptr += sizeof(_type); \
838 #define insn_fetch_arr(_arr, _size, _ctxt) \
840 rc = do_insn_fetch_bytes(_ctxt, _size); \
841 if (rc != X86EMUL_CONTINUE) \
843 ctxt->_eip += (_size); \
844 memcpy(_arr, ctxt->fetch.ptr, _size); \
845 ctxt->fetch.ptr += (_size); \
849 * Given the 'reg' portion of a ModRM byte, and a register block, return a
850 * pointer into the block that addresses the relevant register.
851 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
853 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
857 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
859 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
860 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
862 p = reg_rmw(ctxt, modrm_reg);
866 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
867 struct segmented_address addr,
868 u16 *size, unsigned long *address, int op_bytes)
875 rc = segmented_read_std(ctxt, addr, size, 2);
876 if (rc != X86EMUL_CONTINUE)
879 rc = segmented_read_std(ctxt, addr, address, op_bytes);
893 FASTOP1SRC2(mul, mul_ex);
894 FASTOP1SRC2(imul, imul_ex);
895 FASTOP1SRC2EX(div, div_ex);
896 FASTOP1SRC2EX(idiv, idiv_ex);
925 FASTOP2R(cmp, cmp_r);
927 static u8 test_cc(unsigned int condition, unsigned long flags)
930 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
932 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
933 asm("push %[flags]; popf; call *%[fastop]"
934 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
938 static void fetch_register_operand(struct operand *op)
942 op->val = *(u8 *)op->addr.reg;
945 op->val = *(u16 *)op->addr.reg;
948 op->val = *(u32 *)op->addr.reg;
951 op->val = *(u64 *)op->addr.reg;
956 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
958 ctxt->ops->get_fpu(ctxt);
960 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
961 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
962 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
963 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
964 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
965 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
966 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
967 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
969 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
970 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
971 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
972 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
973 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
974 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
975 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
976 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
980 ctxt->ops->put_fpu(ctxt);
983 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
986 ctxt->ops->get_fpu(ctxt);
988 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
989 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
990 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
991 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
992 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
993 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
994 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
995 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
997 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
998 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
999 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1000 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1001 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1002 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1003 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1004 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1008 ctxt->ops->put_fpu(ctxt);
1011 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1013 ctxt->ops->get_fpu(ctxt);
1015 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1016 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1017 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1018 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1019 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1020 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1021 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1022 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1025 ctxt->ops->put_fpu(ctxt);
1028 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1030 ctxt->ops->get_fpu(ctxt);
1032 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1033 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1034 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1035 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1036 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1037 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1038 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1039 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1042 ctxt->ops->put_fpu(ctxt);
1045 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1047 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1048 return emulate_nm(ctxt);
1050 ctxt->ops->get_fpu(ctxt);
1051 asm volatile("fninit");
1052 ctxt->ops->put_fpu(ctxt);
1053 return X86EMUL_CONTINUE;
1056 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1060 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1061 return emulate_nm(ctxt);
1063 ctxt->ops->get_fpu(ctxt);
1064 asm volatile("fnstcw %0": "+m"(fcw));
1065 ctxt->ops->put_fpu(ctxt);
1067 /* force 2 byte destination */
1068 ctxt->dst.bytes = 2;
1069 ctxt->dst.val = fcw;
1071 return X86EMUL_CONTINUE;
1074 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1078 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1079 return emulate_nm(ctxt);
1081 ctxt->ops->get_fpu(ctxt);
1082 asm volatile("fnstsw %0": "+m"(fsw));
1083 ctxt->ops->put_fpu(ctxt);
1085 /* force 2 byte destination */
1086 ctxt->dst.bytes = 2;
1087 ctxt->dst.val = fsw;
1089 return X86EMUL_CONTINUE;
1092 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1095 unsigned reg = ctxt->modrm_reg;
1097 if (!(ctxt->d & ModRM))
1098 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1100 if (ctxt->d & Sse) {
1104 read_sse_reg(ctxt, &op->vec_val, reg);
1107 if (ctxt->d & Mmx) {
1116 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1117 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1119 fetch_register_operand(op);
1120 op->orig_val = op->val;
1123 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1125 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1126 ctxt->modrm_seg = VCPU_SREG_SS;
1129 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1133 int index_reg, base_reg, scale;
1134 int rc = X86EMUL_CONTINUE;
1137 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1138 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1139 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1141 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1142 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1143 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1144 ctxt->modrm_seg = VCPU_SREG_DS;
1146 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1148 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1149 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1151 if (ctxt->d & Sse) {
1154 op->addr.xmm = ctxt->modrm_rm;
1155 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1158 if (ctxt->d & Mmx) {
1161 op->addr.mm = ctxt->modrm_rm & 7;
1164 fetch_register_operand(op);
1170 if (ctxt->ad_bytes == 2) {
1171 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1172 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1173 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1174 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1176 /* 16-bit ModR/M decode. */
1177 switch (ctxt->modrm_mod) {
1179 if (ctxt->modrm_rm == 6)
1180 modrm_ea += insn_fetch(u16, ctxt);
1183 modrm_ea += insn_fetch(s8, ctxt);
1186 modrm_ea += insn_fetch(u16, ctxt);
1189 switch (ctxt->modrm_rm) {
1191 modrm_ea += bx + si;
1194 modrm_ea += bx + di;
1197 modrm_ea += bp + si;
1200 modrm_ea += bp + di;
1209 if (ctxt->modrm_mod != 0)
1216 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1217 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1218 ctxt->modrm_seg = VCPU_SREG_SS;
1219 modrm_ea = (u16)modrm_ea;
1221 /* 32/64-bit ModR/M decode. */
1222 if ((ctxt->modrm_rm & 7) == 4) {
1223 sib = insn_fetch(u8, ctxt);
1224 index_reg |= (sib >> 3) & 7;
1225 base_reg |= sib & 7;
1228 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1229 modrm_ea += insn_fetch(s32, ctxt);
1231 modrm_ea += reg_read(ctxt, base_reg);
1232 adjust_modrm_seg(ctxt, base_reg);
1235 modrm_ea += reg_read(ctxt, index_reg) << scale;
1236 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1237 modrm_ea += insn_fetch(s32, ctxt);
1238 if (ctxt->mode == X86EMUL_MODE_PROT64)
1239 ctxt->rip_relative = 1;
1241 base_reg = ctxt->modrm_rm;
1242 modrm_ea += reg_read(ctxt, base_reg);
1243 adjust_modrm_seg(ctxt, base_reg);
1245 switch (ctxt->modrm_mod) {
1247 modrm_ea += insn_fetch(s8, ctxt);
1250 modrm_ea += insn_fetch(s32, ctxt);
1254 op->addr.mem.ea = modrm_ea;
1255 if (ctxt->ad_bytes != 8)
1256 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1262 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1265 int rc = X86EMUL_CONTINUE;
1268 switch (ctxt->ad_bytes) {
1270 op->addr.mem.ea = insn_fetch(u16, ctxt);
1273 op->addr.mem.ea = insn_fetch(u32, ctxt);
1276 op->addr.mem.ea = insn_fetch(u64, ctxt);
1283 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1287 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1288 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1290 if (ctxt->src.bytes == 2)
1291 sv = (s16)ctxt->src.val & (s16)mask;
1292 else if (ctxt->src.bytes == 4)
1293 sv = (s32)ctxt->src.val & (s32)mask;
1295 sv = (s64)ctxt->src.val & (s64)mask;
1297 ctxt->dst.addr.mem.ea += (sv >> 3);
1300 /* only subword offset */
1301 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1304 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1305 unsigned long addr, void *dest, unsigned size)
1308 struct read_cache *mc = &ctxt->mem_read;
1310 if (mc->pos < mc->end)
1313 WARN_ON((mc->end + size) >= sizeof(mc->data));
1315 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1317 if (rc != X86EMUL_CONTINUE)
1323 memcpy(dest, mc->data + mc->pos, size);
1325 return X86EMUL_CONTINUE;
1328 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1329 struct segmented_address addr,
1336 rc = linearize(ctxt, addr, size, false, &linear);
1337 if (rc != X86EMUL_CONTINUE)
1339 return read_emulated(ctxt, linear, data, size);
1342 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1343 struct segmented_address addr,
1350 rc = linearize(ctxt, addr, size, true, &linear);
1351 if (rc != X86EMUL_CONTINUE)
1353 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1357 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1358 struct segmented_address addr,
1359 const void *orig_data, const void *data,
1365 rc = linearize(ctxt, addr, size, true, &linear);
1366 if (rc != X86EMUL_CONTINUE)
1368 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1369 size, &ctxt->exception);
1372 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1373 unsigned int size, unsigned short port,
1376 struct read_cache *rc = &ctxt->io_read;
1378 if (rc->pos == rc->end) { /* refill pio read ahead */
1379 unsigned int in_page, n;
1380 unsigned int count = ctxt->rep_prefix ?
1381 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1382 in_page = (ctxt->eflags & EFLG_DF) ?
1383 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1384 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1385 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1388 rc->pos = rc->end = 0;
1389 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1394 if (ctxt->rep_prefix && (ctxt->d & String) &&
1395 !(ctxt->eflags & EFLG_DF)) {
1396 ctxt->dst.data = rc->data + rc->pos;
1397 ctxt->dst.type = OP_MEM_STR;
1398 ctxt->dst.count = (rc->end - rc->pos) / size;
1401 memcpy(dest, rc->data + rc->pos, size);
1407 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1408 u16 index, struct desc_struct *desc)
1413 ctxt->ops->get_idt(ctxt, &dt);
1415 if (dt.size < index * 8 + 7)
1416 return emulate_gp(ctxt, index << 3 | 0x2);
1418 addr = dt.address + index * 8;
1419 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1423 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1424 u16 selector, struct desc_ptr *dt)
1426 const struct x86_emulate_ops *ops = ctxt->ops;
1429 if (selector & 1 << 2) {
1430 struct desc_struct desc;
1433 memset (dt, 0, sizeof *dt);
1434 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1438 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1439 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1441 ops->get_gdt(ctxt, dt);
1444 /* allowed just for 8 bytes segments */
1445 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1446 u16 selector, struct desc_struct *desc,
1450 u16 index = selector >> 3;
1453 get_descriptor_table_ptr(ctxt, selector, &dt);
1455 if (dt.size < index * 8 + 7)
1456 return emulate_gp(ctxt, selector & 0xfffc);
1458 *desc_addr_p = addr = dt.address + index * 8;
1459 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1463 /* allowed just for 8 bytes segments */
1464 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1465 u16 selector, struct desc_struct *desc)
1468 u16 index = selector >> 3;
1471 get_descriptor_table_ptr(ctxt, selector, &dt);
1473 if (dt.size < index * 8 + 7)
1474 return emulate_gp(ctxt, selector & 0xfffc);
1476 addr = dt.address + index * 8;
1477 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1481 /* Does not support long mode */
1482 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1483 u16 selector, int seg, u8 cpl,
1484 bool in_task_switch,
1485 struct desc_struct *desc)
1487 struct desc_struct seg_desc, old_desc;
1489 unsigned err_vec = GP_VECTOR;
1491 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1497 memset(&seg_desc, 0, sizeof seg_desc);
1499 if (ctxt->mode == X86EMUL_MODE_REAL) {
1500 /* set real mode segment descriptor (keep limit etc. for
1502 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1503 set_desc_base(&seg_desc, selector << 4);
1505 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1506 /* VM86 needs a clean new segment descriptor */
1507 set_desc_base(&seg_desc, selector << 4);
1508 set_desc_limit(&seg_desc, 0xffff);
1518 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1519 if ((seg == VCPU_SREG_CS
1520 || (seg == VCPU_SREG_SS
1521 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1522 || seg == VCPU_SREG_TR)
1526 /* TR should be in GDT only */
1527 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1530 if (null_selector) /* for NULL selector skip all following checks */
1533 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1534 if (ret != X86EMUL_CONTINUE)
1537 err_code = selector & 0xfffc;
1538 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1540 /* can't load system descriptor into segment selector */
1541 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1545 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1554 * segment is not a writable data segment or segment
1555 * selector's RPL != CPL or segment selector's RPL != CPL
1557 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1561 if (!(seg_desc.type & 8))
1564 if (seg_desc.type & 4) {
1570 if (rpl > cpl || dpl != cpl)
1573 /* in long-mode d/b must be clear if l is set */
1574 if (seg_desc.d && seg_desc.l) {
1577 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1578 if (efer & EFER_LMA)
1582 /* CS(RPL) <- CPL */
1583 selector = (selector & 0xfffc) | cpl;
1586 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1588 old_desc = seg_desc;
1589 seg_desc.type |= 2; /* busy */
1590 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1591 sizeof(seg_desc), &ctxt->exception);
1592 if (ret != X86EMUL_CONTINUE)
1595 case VCPU_SREG_LDTR:
1596 if (seg_desc.s || seg_desc.type != 2)
1599 default: /* DS, ES, FS, or GS */
1601 * segment is not a data or readable code segment or
1602 * ((segment is a data or nonconforming code segment)
1603 * and (both RPL and CPL > DPL))
1605 if ((seg_desc.type & 0xa) == 0x8 ||
1606 (((seg_desc.type & 0xc) != 0xc) &&
1607 (rpl > dpl && cpl > dpl)))
1613 /* mark segment as accessed */
1615 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1616 if (ret != X86EMUL_CONTINUE)
1618 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1619 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1620 sizeof(base3), &ctxt->exception);
1621 if (ret != X86EMUL_CONTINUE)
1625 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1628 return X86EMUL_CONTINUE;
1630 return emulate_exception(ctxt, err_vec, err_code, true);
1633 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1634 u16 selector, int seg)
1636 u8 cpl = ctxt->ops->cpl(ctxt);
1637 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1640 static void write_register_operand(struct operand *op)
1642 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1643 switch (op->bytes) {
1645 *(u8 *)op->addr.reg = (u8)op->val;
1648 *(u16 *)op->addr.reg = (u16)op->val;
1651 *op->addr.reg = (u32)op->val;
1652 break; /* 64b: zero-extend */
1654 *op->addr.reg = op->val;
1659 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1663 write_register_operand(op);
1666 if (ctxt->lock_prefix)
1667 return segmented_cmpxchg(ctxt,
1673 return segmented_write(ctxt,
1679 return segmented_write(ctxt,
1682 op->bytes * op->count);
1685 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1688 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1696 return X86EMUL_CONTINUE;
1699 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1701 struct segmented_address addr;
1703 rsp_increment(ctxt, -bytes);
1704 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1705 addr.seg = VCPU_SREG_SS;
1707 return segmented_write(ctxt, addr, data, bytes);
1710 static int em_push(struct x86_emulate_ctxt *ctxt)
1712 /* Disable writeback. */
1713 ctxt->dst.type = OP_NONE;
1714 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1717 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1718 void *dest, int len)
1721 struct segmented_address addr;
1723 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1724 addr.seg = VCPU_SREG_SS;
1725 rc = segmented_read(ctxt, addr, dest, len);
1726 if (rc != X86EMUL_CONTINUE)
1729 rsp_increment(ctxt, len);
1733 static int em_pop(struct x86_emulate_ctxt *ctxt)
1735 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1738 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1739 void *dest, int len)
1742 unsigned long val, change_mask;
1743 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1744 int cpl = ctxt->ops->cpl(ctxt);
1746 rc = emulate_pop(ctxt, &val, len);
1747 if (rc != X86EMUL_CONTINUE)
1750 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1751 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1753 switch(ctxt->mode) {
1754 case X86EMUL_MODE_PROT64:
1755 case X86EMUL_MODE_PROT32:
1756 case X86EMUL_MODE_PROT16:
1758 change_mask |= EFLG_IOPL;
1760 change_mask |= EFLG_IF;
1762 case X86EMUL_MODE_VM86:
1764 return emulate_gp(ctxt, 0);
1765 change_mask |= EFLG_IF;
1767 default: /* real mode */
1768 change_mask |= (EFLG_IOPL | EFLG_IF);
1772 *(unsigned long *)dest =
1773 (ctxt->eflags & ~change_mask) | (val & change_mask);
1778 static int em_popf(struct x86_emulate_ctxt *ctxt)
1780 ctxt->dst.type = OP_REG;
1781 ctxt->dst.addr.reg = &ctxt->eflags;
1782 ctxt->dst.bytes = ctxt->op_bytes;
1783 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1786 static int em_enter(struct x86_emulate_ctxt *ctxt)
1789 unsigned frame_size = ctxt->src.val;
1790 unsigned nesting_level = ctxt->src2.val & 31;
1794 return X86EMUL_UNHANDLEABLE;
1796 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1797 rc = push(ctxt, &rbp, stack_size(ctxt));
1798 if (rc != X86EMUL_CONTINUE)
1800 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1802 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1803 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1805 return X86EMUL_CONTINUE;
1808 static int em_leave(struct x86_emulate_ctxt *ctxt)
1810 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1812 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1815 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1817 int seg = ctxt->src2.val;
1819 ctxt->src.val = get_segment_selector(ctxt, seg);
1821 return em_push(ctxt);
1824 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1826 int seg = ctxt->src2.val;
1827 unsigned long selector;
1830 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1831 if (rc != X86EMUL_CONTINUE)
1834 if (ctxt->modrm_reg == VCPU_SREG_SS)
1835 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1837 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1841 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1843 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1844 int rc = X86EMUL_CONTINUE;
1845 int reg = VCPU_REGS_RAX;
1847 while (reg <= VCPU_REGS_RDI) {
1848 (reg == VCPU_REGS_RSP) ?
1849 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1852 if (rc != X86EMUL_CONTINUE)
1861 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1863 ctxt->src.val = (unsigned long)ctxt->eflags;
1864 return em_push(ctxt);
1867 static int em_popa(struct x86_emulate_ctxt *ctxt)
1869 int rc = X86EMUL_CONTINUE;
1870 int reg = VCPU_REGS_RDI;
1872 while (reg >= VCPU_REGS_RAX) {
1873 if (reg == VCPU_REGS_RSP) {
1874 rsp_increment(ctxt, ctxt->op_bytes);
1878 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1879 if (rc != X86EMUL_CONTINUE)
1886 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1888 const struct x86_emulate_ops *ops = ctxt->ops;
1895 /* TODO: Add limit checks */
1896 ctxt->src.val = ctxt->eflags;
1898 if (rc != X86EMUL_CONTINUE)
1901 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1903 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1905 if (rc != X86EMUL_CONTINUE)
1908 ctxt->src.val = ctxt->_eip;
1910 if (rc != X86EMUL_CONTINUE)
1913 ops->get_idt(ctxt, &dt);
1915 eip_addr = dt.address + (irq << 2);
1916 cs_addr = dt.address + (irq << 2) + 2;
1918 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1919 if (rc != X86EMUL_CONTINUE)
1922 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1923 if (rc != X86EMUL_CONTINUE)
1926 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1927 if (rc != X86EMUL_CONTINUE)
1935 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1939 invalidate_registers(ctxt);
1940 rc = __emulate_int_real(ctxt, irq);
1941 if (rc == X86EMUL_CONTINUE)
1942 writeback_registers(ctxt);
1946 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1948 switch(ctxt->mode) {
1949 case X86EMUL_MODE_REAL:
1950 return __emulate_int_real(ctxt, irq);
1951 case X86EMUL_MODE_VM86:
1952 case X86EMUL_MODE_PROT16:
1953 case X86EMUL_MODE_PROT32:
1954 case X86EMUL_MODE_PROT64:
1956 /* Protected mode interrupts unimplemented yet */
1957 return X86EMUL_UNHANDLEABLE;
1961 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1963 int rc = X86EMUL_CONTINUE;
1964 unsigned long temp_eip = 0;
1965 unsigned long temp_eflags = 0;
1966 unsigned long cs = 0;
1967 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1968 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1969 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1970 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1972 /* TODO: Add stack limit check */
1974 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1976 if (rc != X86EMUL_CONTINUE)
1979 if (temp_eip & ~0xffff)
1980 return emulate_gp(ctxt, 0);
1982 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1984 if (rc != X86EMUL_CONTINUE)
1987 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1989 if (rc != X86EMUL_CONTINUE)
1992 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1994 if (rc != X86EMUL_CONTINUE)
1997 ctxt->_eip = temp_eip;
2000 if (ctxt->op_bytes == 4)
2001 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2002 else if (ctxt->op_bytes == 2) {
2003 ctxt->eflags &= ~0xffff;
2004 ctxt->eflags |= temp_eflags;
2007 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2008 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2013 static int em_iret(struct x86_emulate_ctxt *ctxt)
2015 switch(ctxt->mode) {
2016 case X86EMUL_MODE_REAL:
2017 return emulate_iret_real(ctxt);
2018 case X86EMUL_MODE_VM86:
2019 case X86EMUL_MODE_PROT16:
2020 case X86EMUL_MODE_PROT32:
2021 case X86EMUL_MODE_PROT64:
2023 /* iret from protected mode unimplemented yet */
2024 return X86EMUL_UNHANDLEABLE;
2028 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2031 unsigned short sel, old_sel;
2032 struct desc_struct old_desc, new_desc;
2033 const struct x86_emulate_ops *ops = ctxt->ops;
2034 u8 cpl = ctxt->ops->cpl(ctxt);
2036 /* Assignment of RIP may only fail in 64-bit mode */
2037 if (ctxt->mode == X86EMUL_MODE_PROT64)
2038 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2041 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2043 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2045 if (rc != X86EMUL_CONTINUE)
2048 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2049 if (rc != X86EMUL_CONTINUE) {
2050 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2051 /* assigning eip failed; restore the old cs */
2052 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2058 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2060 return assign_eip_near(ctxt, ctxt->src.val);
2063 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2068 old_eip = ctxt->_eip;
2069 rc = assign_eip_near(ctxt, ctxt->src.val);
2070 if (rc != X86EMUL_CONTINUE)
2072 ctxt->src.val = old_eip;
2077 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2079 u64 old = ctxt->dst.orig_val64;
2081 if (ctxt->dst.bytes == 16)
2082 return X86EMUL_UNHANDLEABLE;
2084 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2085 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2086 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2087 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2088 ctxt->eflags &= ~EFLG_ZF;
2090 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2091 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2093 ctxt->eflags |= EFLG_ZF;
2095 return X86EMUL_CONTINUE;
2098 static int em_ret(struct x86_emulate_ctxt *ctxt)
2103 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2104 if (rc != X86EMUL_CONTINUE)
2107 return assign_eip_near(ctxt, eip);
2110 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2113 unsigned long eip, cs;
2115 int cpl = ctxt->ops->cpl(ctxt);
2116 struct desc_struct old_desc, new_desc;
2117 const struct x86_emulate_ops *ops = ctxt->ops;
2119 if (ctxt->mode == X86EMUL_MODE_PROT64)
2120 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2123 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2124 if (rc != X86EMUL_CONTINUE)
2126 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2127 if (rc != X86EMUL_CONTINUE)
2129 /* Outer-privilege level return is not implemented */
2130 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2131 return X86EMUL_UNHANDLEABLE;
2132 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2134 if (rc != X86EMUL_CONTINUE)
2136 rc = assign_eip_far(ctxt, eip, new_desc.l);
2137 if (rc != X86EMUL_CONTINUE) {
2138 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2139 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2144 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2148 rc = em_ret_far(ctxt);
2149 if (rc != X86EMUL_CONTINUE)
2151 rsp_increment(ctxt, ctxt->src.val);
2152 return X86EMUL_CONTINUE;
2155 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2157 /* Save real source value, then compare EAX against destination. */
2158 ctxt->dst.orig_val = ctxt->dst.val;
2159 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2160 ctxt->src.orig_val = ctxt->src.val;
2161 ctxt->src.val = ctxt->dst.orig_val;
2162 fastop(ctxt, em_cmp);
2164 if (ctxt->eflags & EFLG_ZF) {
2165 /* Success: write back to memory. */
2166 ctxt->dst.val = ctxt->src.orig_val;
2168 /* Failure: write the value we saw to EAX. */
2169 ctxt->dst.type = OP_REG;
2170 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2171 ctxt->dst.val = ctxt->dst.orig_val;
2173 return X86EMUL_CONTINUE;
2176 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2178 int seg = ctxt->src2.val;
2182 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2184 rc = load_segment_descriptor(ctxt, sel, seg);
2185 if (rc != X86EMUL_CONTINUE)
2188 ctxt->dst.val = ctxt->src.val;
2193 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2194 struct desc_struct *cs, struct desc_struct *ss)
2196 cs->l = 0; /* will be adjusted later */
2197 set_desc_base(cs, 0); /* flat segment */
2198 cs->g = 1; /* 4kb granularity */
2199 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2200 cs->type = 0x0b; /* Read, Execute, Accessed */
2202 cs->dpl = 0; /* will be adjusted later */
2207 set_desc_base(ss, 0); /* flat segment */
2208 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2209 ss->g = 1; /* 4kb granularity */
2211 ss->type = 0x03; /* Read/Write, Accessed */
2212 ss->d = 1; /* 32bit stack segment */
2219 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2221 u32 eax, ebx, ecx, edx;
2224 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2225 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2226 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2227 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2230 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2232 const struct x86_emulate_ops *ops = ctxt->ops;
2233 u32 eax, ebx, ecx, edx;
2236 * syscall should always be enabled in longmode - so only become
2237 * vendor specific (cpuid) if other modes are active...
2239 if (ctxt->mode == X86EMUL_MODE_PROT64)
2244 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2246 * Intel ("GenuineIntel")
2247 * remark: Intel CPUs only support "syscall" in 64bit
2248 * longmode. Also an 64bit guest with a
2249 * 32bit compat-app running will #UD !! While this
2250 * behaviour can be fixed (by emulating) into AMD
2251 * response - CPUs of AMD can't behave like Intel.
2253 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2254 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2255 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2258 /* AMD ("AuthenticAMD") */
2259 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2260 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2261 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2264 /* AMD ("AMDisbetter!") */
2265 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2266 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2267 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2270 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2274 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2276 const struct x86_emulate_ops *ops = ctxt->ops;
2277 struct desc_struct cs, ss;
2282 /* syscall is not available in real mode */
2283 if (ctxt->mode == X86EMUL_MODE_REAL ||
2284 ctxt->mode == X86EMUL_MODE_VM86)
2285 return emulate_ud(ctxt);
2287 if (!(em_syscall_is_enabled(ctxt)))
2288 return emulate_ud(ctxt);
2290 ops->get_msr(ctxt, MSR_EFER, &efer);
2291 setup_syscalls_segments(ctxt, &cs, &ss);
2293 if (!(efer & EFER_SCE))
2294 return emulate_ud(ctxt);
2296 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2298 cs_sel = (u16)(msr_data & 0xfffc);
2299 ss_sel = (u16)(msr_data + 8);
2301 if (efer & EFER_LMA) {
2305 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2306 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2308 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2309 if (efer & EFER_LMA) {
2310 #ifdef CONFIG_X86_64
2311 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2314 ctxt->mode == X86EMUL_MODE_PROT64 ?
2315 MSR_LSTAR : MSR_CSTAR, &msr_data);
2316 ctxt->_eip = msr_data;
2318 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2319 ctxt->eflags &= ~msr_data;
2320 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2324 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2325 ctxt->_eip = (u32)msr_data;
2327 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2330 return X86EMUL_CONTINUE;
2333 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2335 const struct x86_emulate_ops *ops = ctxt->ops;
2336 struct desc_struct cs, ss;
2341 ops->get_msr(ctxt, MSR_EFER, &efer);
2342 /* inject #GP if in real mode */
2343 if (ctxt->mode == X86EMUL_MODE_REAL)
2344 return emulate_gp(ctxt, 0);
2347 * Not recognized on AMD in compat mode (but is recognized in legacy
2350 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2351 && !vendor_intel(ctxt))
2352 return emulate_ud(ctxt);
2354 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2355 * Therefore, we inject an #UD.
2357 if (ctxt->mode == X86EMUL_MODE_PROT64)
2358 return emulate_ud(ctxt);
2360 setup_syscalls_segments(ctxt, &cs, &ss);
2362 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2363 switch (ctxt->mode) {
2364 case X86EMUL_MODE_PROT32:
2365 if ((msr_data & 0xfffc) == 0x0)
2366 return emulate_gp(ctxt, 0);
2368 case X86EMUL_MODE_PROT64:
2369 if (msr_data == 0x0)
2370 return emulate_gp(ctxt, 0);
2376 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2377 cs_sel = (u16)msr_data;
2378 cs_sel &= ~SELECTOR_RPL_MASK;
2379 ss_sel = cs_sel + 8;
2380 ss_sel &= ~SELECTOR_RPL_MASK;
2381 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2386 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2387 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2389 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2390 ctxt->_eip = msr_data;
2392 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2393 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2395 return X86EMUL_CONTINUE;
2398 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2400 const struct x86_emulate_ops *ops = ctxt->ops;
2401 struct desc_struct cs, ss;
2402 u64 msr_data, rcx, rdx;
2404 u16 cs_sel = 0, ss_sel = 0;
2406 /* inject #GP if in real mode or Virtual 8086 mode */
2407 if (ctxt->mode == X86EMUL_MODE_REAL ||
2408 ctxt->mode == X86EMUL_MODE_VM86)
2409 return emulate_gp(ctxt, 0);
2411 setup_syscalls_segments(ctxt, &cs, &ss);
2413 if ((ctxt->rex_prefix & 0x8) != 0x0)
2414 usermode = X86EMUL_MODE_PROT64;
2416 usermode = X86EMUL_MODE_PROT32;
2418 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2419 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2423 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2425 case X86EMUL_MODE_PROT32:
2426 cs_sel = (u16)(msr_data + 16);
2427 if ((msr_data & 0xfffc) == 0x0)
2428 return emulate_gp(ctxt, 0);
2429 ss_sel = (u16)(msr_data + 24);
2433 case X86EMUL_MODE_PROT64:
2434 cs_sel = (u16)(msr_data + 32);
2435 if (msr_data == 0x0)
2436 return emulate_gp(ctxt, 0);
2437 ss_sel = cs_sel + 8;
2440 if (is_noncanonical_address(rcx) ||
2441 is_noncanonical_address(rdx))
2442 return emulate_gp(ctxt, 0);
2445 cs_sel |= SELECTOR_RPL_MASK;
2446 ss_sel |= SELECTOR_RPL_MASK;
2448 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2449 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2452 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2454 return X86EMUL_CONTINUE;
2457 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2460 if (ctxt->mode == X86EMUL_MODE_REAL)
2462 if (ctxt->mode == X86EMUL_MODE_VM86)
2464 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2465 return ctxt->ops->cpl(ctxt) > iopl;
2468 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2471 const struct x86_emulate_ops *ops = ctxt->ops;
2472 struct desc_struct tr_seg;
2475 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2476 unsigned mask = (1 << len) - 1;
2479 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2482 if (desc_limit_scaled(&tr_seg) < 103)
2484 base = get_desc_base(&tr_seg);
2485 #ifdef CONFIG_X86_64
2486 base |= ((u64)base3) << 32;
2488 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2489 if (r != X86EMUL_CONTINUE)
2491 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2493 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2494 if (r != X86EMUL_CONTINUE)
2496 if ((perm >> bit_idx) & mask)
2501 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2507 if (emulator_bad_iopl(ctxt))
2508 if (!emulator_io_port_access_allowed(ctxt, port, len))
2511 ctxt->perm_ok = true;
2516 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2517 struct tss_segment_16 *tss)
2519 tss->ip = ctxt->_eip;
2520 tss->flag = ctxt->eflags;
2521 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2522 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2523 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2524 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2525 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2526 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2527 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2528 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2530 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2531 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2532 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2533 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2534 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2537 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2538 struct tss_segment_16 *tss)
2543 ctxt->_eip = tss->ip;
2544 ctxt->eflags = tss->flag | 2;
2545 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2546 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2547 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2548 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2549 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2550 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2551 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2552 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2555 * SDM says that segment selectors are loaded before segment
2558 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2559 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2560 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2561 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2562 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2567 * Now load segment descriptors. If fault happens at this stage
2568 * it is handled in a context of new task
2570 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2572 if (ret != X86EMUL_CONTINUE)
2574 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2576 if (ret != X86EMUL_CONTINUE)
2578 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2580 if (ret != X86EMUL_CONTINUE)
2582 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2584 if (ret != X86EMUL_CONTINUE)
2586 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2588 if (ret != X86EMUL_CONTINUE)
2591 return X86EMUL_CONTINUE;
2594 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2595 u16 tss_selector, u16 old_tss_sel,
2596 ulong old_tss_base, struct desc_struct *new_desc)
2598 const struct x86_emulate_ops *ops = ctxt->ops;
2599 struct tss_segment_16 tss_seg;
2601 u32 new_tss_base = get_desc_base(new_desc);
2603 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2605 if (ret != X86EMUL_CONTINUE)
2606 /* FIXME: need to provide precise fault address */
2609 save_state_to_tss16(ctxt, &tss_seg);
2611 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2613 if (ret != X86EMUL_CONTINUE)
2614 /* FIXME: need to provide precise fault address */
2617 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2619 if (ret != X86EMUL_CONTINUE)
2620 /* FIXME: need to provide precise fault address */
2623 if (old_tss_sel != 0xffff) {
2624 tss_seg.prev_task_link = old_tss_sel;
2626 ret = ops->write_std(ctxt, new_tss_base,
2627 &tss_seg.prev_task_link,
2628 sizeof tss_seg.prev_task_link,
2630 if (ret != X86EMUL_CONTINUE)
2631 /* FIXME: need to provide precise fault address */
2635 return load_state_from_tss16(ctxt, &tss_seg);
2638 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2639 struct tss_segment_32 *tss)
2641 /* CR3 and ldt selector are not saved intentionally */
2642 tss->eip = ctxt->_eip;
2643 tss->eflags = ctxt->eflags;
2644 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2645 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2646 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2647 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2648 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2649 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2650 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2651 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2653 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2654 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2655 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2656 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2657 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2658 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2661 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2662 struct tss_segment_32 *tss)
2667 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2668 return emulate_gp(ctxt, 0);
2669 ctxt->_eip = tss->eip;
2670 ctxt->eflags = tss->eflags | 2;
2672 /* General purpose registers */
2673 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2674 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2675 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2676 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2677 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2678 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2679 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2680 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2683 * SDM says that segment selectors are loaded before segment
2684 * descriptors. This is important because CPL checks will
2687 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2688 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2689 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2690 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2691 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2692 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2693 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2696 * If we're switching between Protected Mode and VM86, we need to make
2697 * sure to update the mode before loading the segment descriptors so
2698 * that the selectors are interpreted correctly.
2700 if (ctxt->eflags & X86_EFLAGS_VM) {
2701 ctxt->mode = X86EMUL_MODE_VM86;
2704 ctxt->mode = X86EMUL_MODE_PROT32;
2709 * Now load segment descriptors. If fault happenes at this stage
2710 * it is handled in a context of new task
2712 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2714 if (ret != X86EMUL_CONTINUE)
2716 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2718 if (ret != X86EMUL_CONTINUE)
2720 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2722 if (ret != X86EMUL_CONTINUE)
2724 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2726 if (ret != X86EMUL_CONTINUE)
2728 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2730 if (ret != X86EMUL_CONTINUE)
2732 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2734 if (ret != X86EMUL_CONTINUE)
2736 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2738 if (ret != X86EMUL_CONTINUE)
2741 return X86EMUL_CONTINUE;
2744 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2745 u16 tss_selector, u16 old_tss_sel,
2746 ulong old_tss_base, struct desc_struct *new_desc)
2748 const struct x86_emulate_ops *ops = ctxt->ops;
2749 struct tss_segment_32 tss_seg;
2751 u32 new_tss_base = get_desc_base(new_desc);
2752 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2753 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2755 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2757 if (ret != X86EMUL_CONTINUE)
2758 /* FIXME: need to provide precise fault address */
2761 save_state_to_tss32(ctxt, &tss_seg);
2763 /* Only GP registers and segment selectors are saved */
2764 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2765 ldt_sel_offset - eip_offset, &ctxt->exception);
2766 if (ret != X86EMUL_CONTINUE)
2767 /* FIXME: need to provide precise fault address */
2770 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2772 if (ret != X86EMUL_CONTINUE)
2773 /* FIXME: need to provide precise fault address */
2776 if (old_tss_sel != 0xffff) {
2777 tss_seg.prev_task_link = old_tss_sel;
2779 ret = ops->write_std(ctxt, new_tss_base,
2780 &tss_seg.prev_task_link,
2781 sizeof tss_seg.prev_task_link,
2783 if (ret != X86EMUL_CONTINUE)
2784 /* FIXME: need to provide precise fault address */
2788 return load_state_from_tss32(ctxt, &tss_seg);
2791 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2792 u16 tss_selector, int idt_index, int reason,
2793 bool has_error_code, u32 error_code)
2795 const struct x86_emulate_ops *ops = ctxt->ops;
2796 struct desc_struct curr_tss_desc, next_tss_desc;
2798 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2799 ulong old_tss_base =
2800 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2804 /* FIXME: old_tss_base == ~0 ? */
2806 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2807 if (ret != X86EMUL_CONTINUE)
2809 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2810 if (ret != X86EMUL_CONTINUE)
2813 /* FIXME: check that next_tss_desc is tss */
2816 * Check privileges. The three cases are task switch caused by...
2818 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2819 * 2. Exception/IRQ/iret: No check is performed
2820 * 3. jmp/call to TSS: Check against DPL of the TSS
2822 if (reason == TASK_SWITCH_GATE) {
2823 if (idt_index != -1) {
2824 /* Software interrupts */
2825 struct desc_struct task_gate_desc;
2828 ret = read_interrupt_descriptor(ctxt, idt_index,
2830 if (ret != X86EMUL_CONTINUE)
2833 dpl = task_gate_desc.dpl;
2834 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2835 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2837 } else if (reason != TASK_SWITCH_IRET) {
2838 int dpl = next_tss_desc.dpl;
2839 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2840 return emulate_gp(ctxt, tss_selector);
2844 desc_limit = desc_limit_scaled(&next_tss_desc);
2845 if (!next_tss_desc.p ||
2846 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2847 desc_limit < 0x2b)) {
2848 return emulate_ts(ctxt, tss_selector & 0xfffc);
2851 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2852 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2853 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2856 if (reason == TASK_SWITCH_IRET)
2857 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2859 /* set back link to prev task only if NT bit is set in eflags
2860 note that old_tss_sel is not used after this point */
2861 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2862 old_tss_sel = 0xffff;
2864 if (next_tss_desc.type & 8)
2865 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2866 old_tss_base, &next_tss_desc);
2868 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2869 old_tss_base, &next_tss_desc);
2870 if (ret != X86EMUL_CONTINUE)
2873 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2874 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2876 if (reason != TASK_SWITCH_IRET) {
2877 next_tss_desc.type |= (1 << 1); /* set busy flag */
2878 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2881 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2882 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2884 if (has_error_code) {
2885 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2886 ctxt->lock_prefix = 0;
2887 ctxt->src.val = (unsigned long) error_code;
2888 ret = em_push(ctxt);
2894 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2895 u16 tss_selector, int idt_index, int reason,
2896 bool has_error_code, u32 error_code)
2900 invalidate_registers(ctxt);
2901 ctxt->_eip = ctxt->eip;
2902 ctxt->dst.type = OP_NONE;
2904 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2905 has_error_code, error_code);
2907 if (rc == X86EMUL_CONTINUE) {
2908 ctxt->eip = ctxt->_eip;
2909 writeback_registers(ctxt);
2912 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2915 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2918 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2920 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2921 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2924 static int em_das(struct x86_emulate_ctxt *ctxt)
2927 bool af, cf, old_cf;
2929 cf = ctxt->eflags & X86_EFLAGS_CF;
2935 af = ctxt->eflags & X86_EFLAGS_AF;
2936 if ((al & 0x0f) > 9 || af) {
2938 cf = old_cf | (al >= 250);
2943 if (old_al > 0x99 || old_cf) {
2949 /* Set PF, ZF, SF */
2950 ctxt->src.type = OP_IMM;
2952 ctxt->src.bytes = 1;
2953 fastop(ctxt, em_or);
2954 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2956 ctxt->eflags |= X86_EFLAGS_CF;
2958 ctxt->eflags |= X86_EFLAGS_AF;
2959 return X86EMUL_CONTINUE;
2962 static int em_aam(struct x86_emulate_ctxt *ctxt)
2966 if (ctxt->src.val == 0)
2967 return emulate_de(ctxt);
2969 al = ctxt->dst.val & 0xff;
2970 ah = al / ctxt->src.val;
2971 al %= ctxt->src.val;
2973 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2975 /* Set PF, ZF, SF */
2976 ctxt->src.type = OP_IMM;
2978 ctxt->src.bytes = 1;
2979 fastop(ctxt, em_or);
2981 return X86EMUL_CONTINUE;
2984 static int em_aad(struct x86_emulate_ctxt *ctxt)
2986 u8 al = ctxt->dst.val & 0xff;
2987 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2989 al = (al + (ah * ctxt->src.val)) & 0xff;
2991 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2993 /* Set PF, ZF, SF */
2994 ctxt->src.type = OP_IMM;
2996 ctxt->src.bytes = 1;
2997 fastop(ctxt, em_or);
2999 return X86EMUL_CONTINUE;
3002 static int em_call(struct x86_emulate_ctxt *ctxt)
3005 long rel = ctxt->src.val;
3007 ctxt->src.val = (unsigned long)ctxt->_eip;
3008 rc = jmp_rel(ctxt, rel);
3009 if (rc != X86EMUL_CONTINUE)
3011 return em_push(ctxt);
3014 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3019 struct desc_struct old_desc, new_desc;
3020 const struct x86_emulate_ops *ops = ctxt->ops;
3021 int cpl = ctxt->ops->cpl(ctxt);
3023 old_eip = ctxt->_eip;
3024 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3026 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3027 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3029 if (rc != X86EMUL_CONTINUE)
3030 return X86EMUL_CONTINUE;
3032 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3033 if (rc != X86EMUL_CONTINUE)
3036 ctxt->src.val = old_cs;
3038 if (rc != X86EMUL_CONTINUE)
3041 ctxt->src.val = old_eip;
3043 /* If we failed, we tainted the memory, but the very least we should
3045 if (rc != X86EMUL_CONTINUE)
3049 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3054 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3059 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3060 if (rc != X86EMUL_CONTINUE)
3062 rc = assign_eip_near(ctxt, eip);
3063 if (rc != X86EMUL_CONTINUE)
3065 rsp_increment(ctxt, ctxt->src.val);
3066 return X86EMUL_CONTINUE;
3069 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3071 /* Write back the register source. */
3072 ctxt->src.val = ctxt->dst.val;
3073 write_register_operand(&ctxt->src);
3075 /* Write back the memory destination with implicit LOCK prefix. */
3076 ctxt->dst.val = ctxt->src.orig_val;
3077 ctxt->lock_prefix = 1;
3078 return X86EMUL_CONTINUE;
3081 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3083 ctxt->dst.val = ctxt->src2.val;
3084 return fastop(ctxt, em_imul);
3087 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3089 ctxt->dst.type = OP_REG;
3090 ctxt->dst.bytes = ctxt->src.bytes;
3091 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3092 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3094 return X86EMUL_CONTINUE;
3097 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3101 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3102 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3103 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3104 return X86EMUL_CONTINUE;
3107 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3111 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3112 return emulate_gp(ctxt, 0);
3113 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3114 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3115 return X86EMUL_CONTINUE;
3118 static int em_mov(struct x86_emulate_ctxt *ctxt)
3120 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3121 return X86EMUL_CONTINUE;
3124 #define FFL(x) bit(X86_FEATURE_##x)
3126 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3128 u32 ebx, ecx, edx, eax = 1;
3132 * Check MOVBE is set in the guest-visible CPUID leaf.
3134 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3135 if (!(ecx & FFL(MOVBE)))
3136 return emulate_ud(ctxt);
3138 switch (ctxt->op_bytes) {
3141 * From MOVBE definition: "...When the operand size is 16 bits,
3142 * the upper word of the destination register remains unchanged
3145 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3146 * rules so we have to do the operation almost per hand.
3148 tmp = (u16)ctxt->src.val;
3149 ctxt->dst.val &= ~0xffffUL;
3150 ctxt->dst.val |= (unsigned long)swab16(tmp);
3153 ctxt->dst.val = swab32((u32)ctxt->src.val);
3156 ctxt->dst.val = swab64(ctxt->src.val);
3161 return X86EMUL_CONTINUE;
3164 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3166 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3167 return emulate_gp(ctxt, 0);
3169 /* Disable writeback. */
3170 ctxt->dst.type = OP_NONE;
3171 return X86EMUL_CONTINUE;
3174 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3178 if (ctxt->mode == X86EMUL_MODE_PROT64)
3179 val = ctxt->src.val & ~0ULL;
3181 val = ctxt->src.val & ~0U;
3183 /* #UD condition is already handled. */
3184 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3185 return emulate_gp(ctxt, 0);
3187 /* Disable writeback. */
3188 ctxt->dst.type = OP_NONE;
3189 return X86EMUL_CONTINUE;
3192 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3196 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3197 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3198 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3199 return emulate_gp(ctxt, 0);
3201 return X86EMUL_CONTINUE;
3204 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3208 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3209 return emulate_gp(ctxt, 0);
3211 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3212 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3213 return X86EMUL_CONTINUE;
3216 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3218 if (ctxt->modrm_reg > VCPU_SREG_GS)
3219 return emulate_ud(ctxt);
3221 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3222 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3223 ctxt->dst.bytes = 2;
3224 return X86EMUL_CONTINUE;
3227 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3229 u16 sel = ctxt->src.val;
3231 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3232 return emulate_ud(ctxt);
3234 if (ctxt->modrm_reg == VCPU_SREG_SS)
3235 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3237 /* Disable writeback. */
3238 ctxt->dst.type = OP_NONE;
3239 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3242 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3244 u16 sel = ctxt->src.val;
3246 /* Disable writeback. */
3247 ctxt->dst.type = OP_NONE;
3248 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3251 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3253 u16 sel = ctxt->src.val;
3255 /* Disable writeback. */
3256 ctxt->dst.type = OP_NONE;
3257 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3260 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3265 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3266 if (rc == X86EMUL_CONTINUE)
3267 ctxt->ops->invlpg(ctxt, linear);
3268 /* Disable writeback. */
3269 ctxt->dst.type = OP_NONE;
3270 return X86EMUL_CONTINUE;
3273 static int em_clts(struct x86_emulate_ctxt *ctxt)
3277 cr0 = ctxt->ops->get_cr(ctxt, 0);
3279 ctxt->ops->set_cr(ctxt, 0, cr0);
3280 return X86EMUL_CONTINUE;
3283 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3285 int rc = ctxt->ops->fix_hypercall(ctxt);
3287 if (rc != X86EMUL_CONTINUE)
3290 /* Let the processor re-execute the fixed hypercall */
3291 ctxt->_eip = ctxt->eip;
3292 /* Disable writeback. */
3293 ctxt->dst.type = OP_NONE;
3294 return X86EMUL_CONTINUE;
3297 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3298 void (*get)(struct x86_emulate_ctxt *ctxt,
3299 struct desc_ptr *ptr))
3301 struct desc_ptr desc_ptr;
3303 if (ctxt->mode == X86EMUL_MODE_PROT64)
3305 get(ctxt, &desc_ptr);
3306 if (ctxt->op_bytes == 2) {
3308 desc_ptr.address &= 0x00ffffff;
3310 /* Disable writeback. */
3311 ctxt->dst.type = OP_NONE;
3312 return segmented_write(ctxt, ctxt->dst.addr.mem,
3313 &desc_ptr, 2 + ctxt->op_bytes);
3316 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3318 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3321 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3323 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3326 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3328 struct desc_ptr desc_ptr;
3331 if (ctxt->mode == X86EMUL_MODE_PROT64)
3333 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3334 &desc_ptr.size, &desc_ptr.address,
3336 if (rc != X86EMUL_CONTINUE)
3338 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3339 /* Disable writeback. */
3340 ctxt->dst.type = OP_NONE;
3341 return X86EMUL_CONTINUE;
3344 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3348 rc = ctxt->ops->fix_hypercall(ctxt);
3350 /* Disable writeback. */
3351 ctxt->dst.type = OP_NONE;
3355 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3357 struct desc_ptr desc_ptr;
3360 if (ctxt->mode == X86EMUL_MODE_PROT64)
3362 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3363 &desc_ptr.size, &desc_ptr.address,
3365 if (rc != X86EMUL_CONTINUE)
3367 ctxt->ops->set_idt(ctxt, &desc_ptr);
3368 /* Disable writeback. */
3369 ctxt->dst.type = OP_NONE;
3370 return X86EMUL_CONTINUE;
3373 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3375 if (ctxt->dst.type == OP_MEM)
3376 ctxt->dst.bytes = 2;
3377 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3378 return X86EMUL_CONTINUE;
3381 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3383 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3384 | (ctxt->src.val & 0x0f));
3385 ctxt->dst.type = OP_NONE;
3386 return X86EMUL_CONTINUE;
3389 static int em_loop(struct x86_emulate_ctxt *ctxt)
3391 int rc = X86EMUL_CONTINUE;
3393 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3394 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3395 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3396 rc = jmp_rel(ctxt, ctxt->src.val);
3401 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3403 int rc = X86EMUL_CONTINUE;
3405 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3406 rc = jmp_rel(ctxt, ctxt->src.val);
3411 static int em_in(struct x86_emulate_ctxt *ctxt)
3413 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3415 return X86EMUL_IO_NEEDED;
3417 return X86EMUL_CONTINUE;
3420 static int em_out(struct x86_emulate_ctxt *ctxt)
3422 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3424 /* Disable writeback. */
3425 ctxt->dst.type = OP_NONE;
3426 return X86EMUL_CONTINUE;
3429 static int em_cli(struct x86_emulate_ctxt *ctxt)
3431 if (emulator_bad_iopl(ctxt))
3432 return emulate_gp(ctxt, 0);
3434 ctxt->eflags &= ~X86_EFLAGS_IF;
3435 return X86EMUL_CONTINUE;
3438 static int em_sti(struct x86_emulate_ctxt *ctxt)
3440 if (emulator_bad_iopl(ctxt))
3441 return emulate_gp(ctxt, 0);
3443 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3444 ctxt->eflags |= X86_EFLAGS_IF;
3445 return X86EMUL_CONTINUE;
3448 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3450 u32 eax, ebx, ecx, edx;
3452 eax = reg_read(ctxt, VCPU_REGS_RAX);
3453 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3454 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3455 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3456 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3457 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3458 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3459 return X86EMUL_CONTINUE;
3462 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3466 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3467 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3469 ctxt->eflags &= ~0xffUL;
3470 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3471 return X86EMUL_CONTINUE;
3474 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3476 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3477 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3478 return X86EMUL_CONTINUE;
3481 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3483 switch (ctxt->op_bytes) {
3484 #ifdef CONFIG_X86_64
3486 asm("bswap %0" : "+r"(ctxt->dst.val));
3490 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3493 return X86EMUL_CONTINUE;
3496 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3498 /* emulating clflush regardless of cpuid */
3499 return X86EMUL_CONTINUE;
3502 static bool valid_cr(int nr)
3514 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3516 if (!valid_cr(ctxt->modrm_reg))
3517 return emulate_ud(ctxt);
3519 return X86EMUL_CONTINUE;
3522 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3524 u64 new_val = ctxt->src.val64;
3525 int cr = ctxt->modrm_reg;
3528 static u64 cr_reserved_bits[] = {
3529 0xffffffff00000000ULL,
3530 0, 0, 0, /* CR3 checked later */
3537 return emulate_ud(ctxt);
3539 if (new_val & cr_reserved_bits[cr])
3540 return emulate_gp(ctxt, 0);
3545 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3546 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3547 return emulate_gp(ctxt, 0);
3549 cr4 = ctxt->ops->get_cr(ctxt, 4);
3550 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3552 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3553 !(cr4 & X86_CR4_PAE))
3554 return emulate_gp(ctxt, 0);
3561 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3562 if (efer & EFER_LMA)
3563 rsvd = CR3_L_MODE_RESERVED_BITS;
3566 return emulate_gp(ctxt, 0);
3571 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3573 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3574 return emulate_gp(ctxt, 0);
3580 return X86EMUL_CONTINUE;
3583 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3587 ctxt->ops->get_dr(ctxt, 7, &dr7);
3589 /* Check if DR7.Global_Enable is set */
3590 return dr7 & (1 << 13);
3593 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3595 int dr = ctxt->modrm_reg;
3599 return emulate_ud(ctxt);
3601 cr4 = ctxt->ops->get_cr(ctxt, 4);
3602 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3603 return emulate_ud(ctxt);
3605 if (check_dr7_gd(ctxt)) {
3608 ctxt->ops->get_dr(ctxt, 6, &dr6);
3610 dr6 |= DR6_BD | DR6_RTM;
3611 ctxt->ops->set_dr(ctxt, 6, dr6);
3612 return emulate_db(ctxt);
3615 return X86EMUL_CONTINUE;
3618 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3620 u64 new_val = ctxt->src.val64;
3621 int dr = ctxt->modrm_reg;
3623 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3624 return emulate_gp(ctxt, 0);
3626 return check_dr_read(ctxt);
3629 static int check_svme(struct x86_emulate_ctxt *ctxt)
3633 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3635 if (!(efer & EFER_SVME))
3636 return emulate_ud(ctxt);
3638 return X86EMUL_CONTINUE;
3641 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3643 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3645 /* Valid physical address? */
3646 if (rax & 0xffff000000000000ULL)
3647 return emulate_gp(ctxt, 0);
3649 return check_svme(ctxt);
3652 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3654 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3656 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3657 return emulate_ud(ctxt);
3659 return X86EMUL_CONTINUE;
3662 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3664 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3665 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3667 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3668 ctxt->ops->check_pmc(ctxt, rcx))
3669 return emulate_gp(ctxt, 0);
3671 return X86EMUL_CONTINUE;
3674 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3676 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3677 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3678 return emulate_gp(ctxt, 0);
3680 return X86EMUL_CONTINUE;
3683 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3685 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3686 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3687 return emulate_gp(ctxt, 0);
3689 return X86EMUL_CONTINUE;
3692 #define D(_y) { .flags = (_y) }
3693 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3694 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3695 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3696 #define N D(NotImpl)
3697 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3698 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3699 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3700 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3701 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3702 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3703 #define II(_f, _e, _i) \
3704 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3705 #define IIP(_f, _e, _i, _p) \
3706 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3707 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3708 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3710 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3711 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3712 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3713 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3714 #define I2bvIP(_f, _e, _i, _p) \
3715 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3717 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3718 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3719 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3721 static const struct opcode group7_rm0[] = {
3723 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3727 static const struct opcode group7_rm1[] = {
3728 DI(SrcNone | Priv, monitor),
3729 DI(SrcNone | Priv, mwait),
3733 static const struct opcode group7_rm3[] = {
3734 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3735 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3736 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3737 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3738 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3739 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3740 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3741 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3744 static const struct opcode group7_rm7[] = {
3746 DIP(SrcNone, rdtscp, check_rdtsc),
3750 static const struct opcode group1[] = {
3752 F(Lock | PageTable, em_or),
3755 F(Lock | PageTable, em_and),
3761 static const struct opcode group1A[] = {
3762 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3765 static const struct opcode group2[] = {
3766 F(DstMem | ModRM, em_rol),
3767 F(DstMem | ModRM, em_ror),
3768 F(DstMem | ModRM, em_rcl),
3769 F(DstMem | ModRM, em_rcr),
3770 F(DstMem | ModRM, em_shl),
3771 F(DstMem | ModRM, em_shr),
3772 F(DstMem | ModRM, em_shl),
3773 F(DstMem | ModRM, em_sar),
3776 static const struct opcode group3[] = {
3777 F(DstMem | SrcImm | NoWrite, em_test),
3778 F(DstMem | SrcImm | NoWrite, em_test),
3779 F(DstMem | SrcNone | Lock, em_not),
3780 F(DstMem | SrcNone | Lock, em_neg),
3781 F(DstXacc | Src2Mem, em_mul_ex),
3782 F(DstXacc | Src2Mem, em_imul_ex),
3783 F(DstXacc | Src2Mem, em_div_ex),
3784 F(DstXacc | Src2Mem, em_idiv_ex),
3787 static const struct opcode group4[] = {
3788 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3789 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3793 static const struct opcode group5[] = {
3794 F(DstMem | SrcNone | Lock, em_inc),
3795 F(DstMem | SrcNone | Lock, em_dec),
3796 I(SrcMem | NearBranch, em_call_near_abs),
3797 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3798 I(SrcMem | NearBranch, em_jmp_abs),
3799 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3800 I(SrcMem | Stack, em_push), D(Undefined),
3803 static const struct opcode group6[] = {
3806 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3807 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3811 static const struct group_dual group7 = { {
3812 II(Mov | DstMem, em_sgdt, sgdt),
3813 II(Mov | DstMem, em_sidt, sidt),
3814 II(SrcMem | Priv, em_lgdt, lgdt),
3815 II(SrcMem | Priv, em_lidt, lidt),
3816 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3817 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3818 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3822 N, EXT(0, group7_rm3),
3823 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3824 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3828 static const struct opcode group8[] = {
3830 F(DstMem | SrcImmByte | NoWrite, em_bt),
3831 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3832 F(DstMem | SrcImmByte | Lock, em_btr),
3833 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3836 static const struct group_dual group9 = { {
3837 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3839 N, N, N, N, N, N, N, N,
3842 static const struct opcode group11[] = {
3843 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3847 static const struct gprefix pfx_0f_ae_7 = {
3848 I(SrcMem | ByteOp, em_clflush), N, N, N,
3851 static const struct group_dual group15 = { {
3852 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3854 N, N, N, N, N, N, N, N,
3857 static const struct gprefix pfx_0f_6f_0f_7f = {
3858 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3861 static const struct gprefix pfx_0f_2b = {
3862 I(0, em_mov), I(0, em_mov), N, N,
3865 static const struct gprefix pfx_0f_28_0f_29 = {
3866 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3869 static const struct gprefix pfx_0f_e7 = {
3870 N, I(Sse, em_mov), N, N,
3873 static const struct escape escape_d9 = { {
3874 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3877 N, N, N, N, N, N, N, N,
3879 N, N, N, N, N, N, N, N,
3881 N, N, N, N, N, N, N, N,
3883 N, N, N, N, N, N, N, N,
3885 N, N, N, N, N, N, N, N,
3887 N, N, N, N, N, N, N, N,
3889 N, N, N, N, N, N, N, N,
3891 N, N, N, N, N, N, N, N,
3894 static const struct escape escape_db = { {
3895 N, N, N, N, N, N, N, N,
3898 N, N, N, N, N, N, N, N,
3900 N, N, N, N, N, N, N, N,
3902 N, N, N, N, N, N, N, N,
3904 N, N, N, N, N, N, N, N,
3906 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3908 N, N, N, N, N, N, N, N,
3910 N, N, N, N, N, N, N, N,
3912 N, N, N, N, N, N, N, N,
3915 static const struct escape escape_dd = { {
3916 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3919 N, N, N, N, N, N, N, N,
3921 N, N, N, N, N, N, N, N,
3923 N, N, N, N, N, N, N, N,
3925 N, N, N, N, N, N, N, N,
3927 N, N, N, N, N, N, N, N,
3929 N, N, N, N, N, N, N, N,
3931 N, N, N, N, N, N, N, N,
3933 N, N, N, N, N, N, N, N,
3936 static const struct opcode opcode_table[256] = {
3938 F6ALU(Lock, em_add),
3939 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3940 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3942 F6ALU(Lock | PageTable, em_or),
3943 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3946 F6ALU(Lock, em_adc),
3947 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3948 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3950 F6ALU(Lock, em_sbb),
3951 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3952 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3954 F6ALU(Lock | PageTable, em_and), N, N,
3956 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3958 F6ALU(Lock, em_xor), N, N,
3960 F6ALU(NoWrite, em_cmp), N, N,
3962 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3964 X8(I(SrcReg | Stack, em_push)),
3966 X8(I(DstReg | Stack, em_pop)),
3968 I(ImplicitOps | Stack | No64, em_pusha),
3969 I(ImplicitOps | Stack | No64, em_popa),
3970 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3973 I(SrcImm | Mov | Stack, em_push),
3974 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3975 I(SrcImmByte | Mov | Stack, em_push),
3976 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3977 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3978 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3980 X16(D(SrcImmByte | NearBranch)),
3982 G(ByteOp | DstMem | SrcImm, group1),
3983 G(DstMem | SrcImm, group1),
3984 G(ByteOp | DstMem | SrcImm | No64, group1),
3985 G(DstMem | SrcImmByte, group1),
3986 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3987 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3989 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3990 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3991 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3992 D(ModRM | SrcMem | NoAccess | DstReg),
3993 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3996 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3998 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3999 I(SrcImmFAddr | No64, em_call_far), N,
4000 II(ImplicitOps | Stack, em_pushf, pushf),
4001 II(ImplicitOps | Stack, em_popf, popf),
4002 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4004 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4005 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4006 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4007 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4009 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4010 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4011 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4012 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4014 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4016 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4018 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4019 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4020 I(ImplicitOps | NearBranch, em_ret),
4021 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4022 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4023 G(ByteOp, group11), G(0, group11),
4025 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4026 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4027 I(ImplicitOps | Stack, em_ret_far),
4028 D(ImplicitOps), DI(SrcImmByte, intn),
4029 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4031 G(Src2One | ByteOp, group2), G(Src2One, group2),
4032 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4033 I(DstAcc | SrcImmUByte | No64, em_aam),
4034 I(DstAcc | SrcImmUByte | No64, em_aad),
4035 F(DstAcc | ByteOp | No64, em_salc),
4036 I(DstAcc | SrcXLat | ByteOp, em_mov),
4038 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4040 X3(I(SrcImmByte | NearBranch, em_loop)),
4041 I(SrcImmByte | NearBranch, em_jcxz),
4042 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4043 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4045 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4046 I(SrcImmFAddr | No64, em_jmp_far),
4047 D(SrcImmByte | ImplicitOps | NearBranch),
4048 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4049 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4051 N, DI(ImplicitOps, icebp), N, N,
4052 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4053 G(ByteOp, group3), G(0, group3),
4055 D(ImplicitOps), D(ImplicitOps),
4056 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4057 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4060 static const struct opcode twobyte_table[256] = {
4062 G(0, group6), GD(0, &group7), N, N,
4063 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4064 II(ImplicitOps | Priv, em_clts, clts), N,
4065 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4066 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4068 N, N, N, N, N, N, N, N,
4069 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4070 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4072 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4073 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4074 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4076 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4079 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4080 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4081 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4084 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4085 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4086 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4087 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4088 I(ImplicitOps | EmulateOnUD, em_sysenter),
4089 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4091 N, N, N, N, N, N, N, N,
4093 X16(D(DstReg | SrcMem | ModRM)),
4095 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4100 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4105 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4107 X16(D(SrcImm | NearBranch)),
4109 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4111 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4112 II(ImplicitOps, em_cpuid, cpuid),
4113 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4114 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4115 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4117 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4118 DI(ImplicitOps, rsm),
4119 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4120 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4121 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4122 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4124 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4125 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4126 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4127 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4128 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4129 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4133 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4134 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4135 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4137 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4138 N, D(DstMem | SrcReg | ModRM | Mov),
4139 N, N, N, GD(0, &group9),
4141 X8(I(DstReg, em_bswap)),
4143 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4145 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4146 N, N, N, N, N, N, N, N,
4148 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4151 static const struct gprefix three_byte_0f_38_f0 = {
4152 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4155 static const struct gprefix three_byte_0f_38_f1 = {
4156 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4160 * Insns below are selected by the prefix which indexed by the third opcode
4163 static const struct opcode opcode_map_0f_38[256] = {
4165 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4167 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4169 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4170 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4189 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4193 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4199 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4200 unsigned size, bool sign_extension)
4202 int rc = X86EMUL_CONTINUE;
4206 op->addr.mem.ea = ctxt->_eip;
4207 /* NB. Immediates are sign-extended as necessary. */
4208 switch (op->bytes) {
4210 op->val = insn_fetch(s8, ctxt);
4213 op->val = insn_fetch(s16, ctxt);
4216 op->val = insn_fetch(s32, ctxt);
4219 op->val = insn_fetch(s64, ctxt);
4222 if (!sign_extension) {
4223 switch (op->bytes) {
4231 op->val &= 0xffffffff;
4239 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4242 int rc = X86EMUL_CONTINUE;
4246 decode_register_operand(ctxt, op);
4249 rc = decode_imm(ctxt, op, 1, false);
4252 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4256 if (ctxt->d & BitOp)
4257 fetch_bit_operand(ctxt);
4258 op->orig_val = op->val;
4261 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4265 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4266 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4267 fetch_register_operand(op);
4268 op->orig_val = op->val;
4272 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4273 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4274 fetch_register_operand(op);
4275 op->orig_val = op->val;
4278 if (ctxt->d & ByteOp) {
4283 op->bytes = ctxt->op_bytes;
4284 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4285 fetch_register_operand(op);
4286 op->orig_val = op->val;
4290 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4292 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4293 op->addr.mem.seg = VCPU_SREG_ES;
4300 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4301 fetch_register_operand(op);
4305 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4308 rc = decode_imm(ctxt, op, 1, true);
4315 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4318 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4321 ctxt->memop.bytes = 1;
4322 if (ctxt->memop.type == OP_REG) {
4323 ctxt->memop.addr.reg = decode_register(ctxt,
4324 ctxt->modrm_rm, true);
4325 fetch_register_operand(&ctxt->memop);
4329 ctxt->memop.bytes = 2;
4332 ctxt->memop.bytes = 4;
4335 rc = decode_imm(ctxt, op, 2, false);
4338 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4342 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4344 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4345 op->addr.mem.seg = ctxt->seg_override;
4351 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4353 register_address(ctxt,
4354 reg_read(ctxt, VCPU_REGS_RBX) +
4355 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4356 op->addr.mem.seg = ctxt->seg_override;
4361 op->addr.mem.ea = ctxt->_eip;
4362 op->bytes = ctxt->op_bytes + 2;
4363 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4366 ctxt->memop.bytes = ctxt->op_bytes + 2;
4369 op->val = VCPU_SREG_ES;
4372 op->val = VCPU_SREG_CS;
4375 op->val = VCPU_SREG_SS;
4378 op->val = VCPU_SREG_DS;
4381 op->val = VCPU_SREG_FS;
4384 op->val = VCPU_SREG_GS;
4387 /* Special instructions do their own operand decoding. */
4389 op->type = OP_NONE; /* Disable writeback. */
4397 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4399 int rc = X86EMUL_CONTINUE;
4400 int mode = ctxt->mode;
4401 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4402 bool op_prefix = false;
4403 bool has_seg_override = false;
4404 struct opcode opcode;
4406 ctxt->memop.type = OP_NONE;
4407 ctxt->memopp = NULL;
4408 ctxt->_eip = ctxt->eip;
4409 ctxt->fetch.ptr = ctxt->fetch.data;
4410 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4411 ctxt->opcode_len = 1;
4413 memcpy(ctxt->fetch.data, insn, insn_len);
4415 rc = __do_insn_fetch_bytes(ctxt, 1);
4416 if (rc != X86EMUL_CONTINUE)
4421 case X86EMUL_MODE_REAL:
4422 case X86EMUL_MODE_VM86:
4423 case X86EMUL_MODE_PROT16:
4424 def_op_bytes = def_ad_bytes = 2;
4426 case X86EMUL_MODE_PROT32:
4427 def_op_bytes = def_ad_bytes = 4;
4429 #ifdef CONFIG_X86_64
4430 case X86EMUL_MODE_PROT64:
4436 return EMULATION_FAILED;
4439 ctxt->op_bytes = def_op_bytes;
4440 ctxt->ad_bytes = def_ad_bytes;
4442 /* Legacy prefixes. */
4444 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4445 case 0x66: /* operand-size override */
4447 /* switch between 2/4 bytes */
4448 ctxt->op_bytes = def_op_bytes ^ 6;
4450 case 0x67: /* address-size override */
4451 if (mode == X86EMUL_MODE_PROT64)
4452 /* switch between 4/8 bytes */
4453 ctxt->ad_bytes = def_ad_bytes ^ 12;
4455 /* switch between 2/4 bytes */
4456 ctxt->ad_bytes = def_ad_bytes ^ 6;
4458 case 0x26: /* ES override */
4459 case 0x2e: /* CS override */
4460 case 0x36: /* SS override */
4461 case 0x3e: /* DS override */
4462 has_seg_override = true;
4463 ctxt->seg_override = (ctxt->b >> 3) & 3;
4465 case 0x64: /* FS override */
4466 case 0x65: /* GS override */
4467 has_seg_override = true;
4468 ctxt->seg_override = ctxt->b & 7;
4470 case 0x40 ... 0x4f: /* REX */
4471 if (mode != X86EMUL_MODE_PROT64)
4473 ctxt->rex_prefix = ctxt->b;
4475 case 0xf0: /* LOCK */
4476 ctxt->lock_prefix = 1;
4478 case 0xf2: /* REPNE/REPNZ */
4479 case 0xf3: /* REP/REPE/REPZ */
4480 ctxt->rep_prefix = ctxt->b;
4486 /* Any legacy prefix after a REX prefix nullifies its effect. */
4488 ctxt->rex_prefix = 0;
4494 if (ctxt->rex_prefix & 8)
4495 ctxt->op_bytes = 8; /* REX.W */
4497 /* Opcode byte(s). */
4498 opcode = opcode_table[ctxt->b];
4499 /* Two-byte opcode? */
4500 if (ctxt->b == 0x0f) {
4501 ctxt->opcode_len = 2;
4502 ctxt->b = insn_fetch(u8, ctxt);
4503 opcode = twobyte_table[ctxt->b];
4505 /* 0F_38 opcode map */
4506 if (ctxt->b == 0x38) {
4507 ctxt->opcode_len = 3;
4508 ctxt->b = insn_fetch(u8, ctxt);
4509 opcode = opcode_map_0f_38[ctxt->b];
4512 ctxt->d = opcode.flags;
4514 if (ctxt->d & ModRM)
4515 ctxt->modrm = insn_fetch(u8, ctxt);
4517 /* vex-prefix instructions are not implemented */
4518 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4519 (mode == X86EMUL_MODE_PROT64 ||
4520 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4524 while (ctxt->d & GroupMask) {
4525 switch (ctxt->d & GroupMask) {
4527 goffset = (ctxt->modrm >> 3) & 7;
4528 opcode = opcode.u.group[goffset];
4531 goffset = (ctxt->modrm >> 3) & 7;
4532 if ((ctxt->modrm >> 6) == 3)
4533 opcode = opcode.u.gdual->mod3[goffset];
4535 opcode = opcode.u.gdual->mod012[goffset];
4538 goffset = ctxt->modrm & 7;
4539 opcode = opcode.u.group[goffset];
4542 if (ctxt->rep_prefix && op_prefix)
4543 return EMULATION_FAILED;
4544 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4545 switch (simd_prefix) {
4546 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4547 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4548 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4549 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4553 if (ctxt->modrm > 0xbf)
4554 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4556 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4559 return EMULATION_FAILED;
4562 ctxt->d &= ~(u64)GroupMask;
4563 ctxt->d |= opcode.flags;
4568 return EMULATION_FAILED;
4570 ctxt->execute = opcode.u.execute;
4572 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4573 return EMULATION_FAILED;
4575 if (unlikely(ctxt->d &
4576 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch))) {
4578 * These are copied unconditionally here, and checked unconditionally
4579 * in x86_emulate_insn.
4581 ctxt->check_perm = opcode.check_perm;
4582 ctxt->intercept = opcode.intercept;
4584 if (ctxt->d & NotImpl)
4585 return EMULATION_FAILED;
4587 if (mode == X86EMUL_MODE_PROT64) {
4588 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4590 else if (ctxt->d & NearBranch)
4594 if (ctxt->d & Op3264) {
4595 if (mode == X86EMUL_MODE_PROT64)
4602 ctxt->op_bytes = 16;
4603 else if (ctxt->d & Mmx)
4607 /* ModRM and SIB bytes. */
4608 if (ctxt->d & ModRM) {
4609 rc = decode_modrm(ctxt, &ctxt->memop);
4610 if (!has_seg_override) {
4611 has_seg_override = true;
4612 ctxt->seg_override = ctxt->modrm_seg;
4614 } else if (ctxt->d & MemAbs)
4615 rc = decode_abs(ctxt, &ctxt->memop);
4616 if (rc != X86EMUL_CONTINUE)
4619 if (!has_seg_override)
4620 ctxt->seg_override = VCPU_SREG_DS;
4622 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4625 * Decode and fetch the source operand: register, memory
4628 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4629 if (rc != X86EMUL_CONTINUE)
4633 * Decode and fetch the second source operand: register, memory
4636 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4637 if (rc != X86EMUL_CONTINUE)
4640 /* Decode and fetch the destination operand: register or memory. */
4641 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4643 if (ctxt->rip_relative)
4644 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4647 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4650 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4652 return ctxt->d & PageTable;
4655 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4657 /* The second termination condition only applies for REPE
4658 * and REPNE. Test if the repeat string operation prefix is
4659 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4660 * corresponding termination condition according to:
4661 * - if REPE/REPZ and ZF = 0 then done
4662 * - if REPNE/REPNZ and ZF = 1 then done
4664 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4665 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4666 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4667 ((ctxt->eflags & EFLG_ZF) == 0))
4668 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4669 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4675 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4679 ctxt->ops->get_fpu(ctxt);
4680 asm volatile("1: fwait \n\t"
4682 ".pushsection .fixup,\"ax\" \n\t"
4684 "movb $1, %[fault] \n\t"
4687 _ASM_EXTABLE(1b, 3b)
4688 : [fault]"+qm"(fault));
4689 ctxt->ops->put_fpu(ctxt);
4691 if (unlikely(fault))
4692 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4694 return X86EMUL_CONTINUE;
4697 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4700 if (op->type == OP_MM)
4701 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4704 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4706 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4707 if (!(ctxt->d & ByteOp))
4708 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4709 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4710 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4712 : "c"(ctxt->src2.val));
4713 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4714 if (!fop) /* exception is returned in fop variable */
4715 return emulate_de(ctxt);
4716 return X86EMUL_CONTINUE;
4719 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4721 memset(&ctxt->rip_relative, 0,
4722 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4724 ctxt->io_read.pos = 0;
4725 ctxt->io_read.end = 0;
4726 ctxt->mem_read.end = 0;
4729 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4731 const struct x86_emulate_ops *ops = ctxt->ops;
4732 int rc = X86EMUL_CONTINUE;
4733 int saved_dst_type = ctxt->dst.type;
4735 ctxt->mem_read.pos = 0;
4737 /* LOCK prefix is allowed only with some instructions */
4738 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4739 rc = emulate_ud(ctxt);
4743 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4744 rc = emulate_ud(ctxt);
4748 if (unlikely(ctxt->d &
4749 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4750 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4751 (ctxt->d & Undefined)) {
4752 rc = emulate_ud(ctxt);
4756 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4757 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4758 rc = emulate_ud(ctxt);
4762 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4763 rc = emulate_nm(ctxt);
4767 if (ctxt->d & Mmx) {
4768 rc = flush_pending_x87_faults(ctxt);
4769 if (rc != X86EMUL_CONTINUE)
4772 * Now that we know the fpu is exception safe, we can fetch
4775 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4776 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4777 if (!(ctxt->d & Mov))
4778 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4781 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4782 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4783 X86_ICPT_PRE_EXCEPT);
4784 if (rc != X86EMUL_CONTINUE)
4788 /* Privileged instruction can be executed only in CPL=0 */
4789 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4790 if (ctxt->d & PrivUD)
4791 rc = emulate_ud(ctxt);
4793 rc = emulate_gp(ctxt, 0);
4797 /* Instruction can only be executed in protected mode */
4798 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4799 rc = emulate_ud(ctxt);
4803 /* Do instruction specific permission checks */
4804 if (ctxt->d & CheckPerm) {
4805 rc = ctxt->check_perm(ctxt);
4806 if (rc != X86EMUL_CONTINUE)
4810 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4811 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4812 X86_ICPT_POST_EXCEPT);
4813 if (rc != X86EMUL_CONTINUE)
4817 if (ctxt->rep_prefix && (ctxt->d & String)) {
4818 /* All REP prefixes have the same first termination condition */
4819 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4820 ctxt->eip = ctxt->_eip;
4821 ctxt->eflags &= ~EFLG_RF;
4827 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4828 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4829 ctxt->src.valptr, ctxt->src.bytes);
4830 if (rc != X86EMUL_CONTINUE)
4832 ctxt->src.orig_val64 = ctxt->src.val64;
4835 if (ctxt->src2.type == OP_MEM) {
4836 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4837 &ctxt->src2.val, ctxt->src2.bytes);
4838 if (rc != X86EMUL_CONTINUE)
4842 if ((ctxt->d & DstMask) == ImplicitOps)
4846 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4847 /* optimisation - avoid slow emulated read if Mov */
4848 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4849 &ctxt->dst.val, ctxt->dst.bytes);
4850 if (rc != X86EMUL_CONTINUE)
4853 ctxt->dst.orig_val = ctxt->dst.val;
4857 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4858 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4859 X86_ICPT_POST_MEMACCESS);
4860 if (rc != X86EMUL_CONTINUE)
4864 if (ctxt->rep_prefix && (ctxt->d & String))
4865 ctxt->eflags |= EFLG_RF;
4867 ctxt->eflags &= ~EFLG_RF;
4869 if (ctxt->execute) {
4870 if (ctxt->d & Fastop) {
4871 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4872 rc = fastop(ctxt, fop);
4873 if (rc != X86EMUL_CONTINUE)
4877 rc = ctxt->execute(ctxt);
4878 if (rc != X86EMUL_CONTINUE)
4883 if (ctxt->opcode_len == 2)
4885 else if (ctxt->opcode_len == 3)
4886 goto threebyte_insn;
4889 case 0x63: /* movsxd */
4890 if (ctxt->mode != X86EMUL_MODE_PROT64)
4891 goto cannot_emulate;
4892 ctxt->dst.val = (s32) ctxt->src.val;
4894 case 0x70 ... 0x7f: /* jcc (short) */
4895 if (test_cc(ctxt->b, ctxt->eflags))
4896 rc = jmp_rel(ctxt, ctxt->src.val);
4898 case 0x8d: /* lea r16/r32, m */
4899 ctxt->dst.val = ctxt->src.addr.mem.ea;
4901 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4902 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4903 ctxt->dst.type = OP_NONE;
4907 case 0x98: /* cbw/cwde/cdqe */
4908 switch (ctxt->op_bytes) {
4909 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4910 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4911 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4914 case 0xcc: /* int3 */
4915 rc = emulate_int(ctxt, 3);
4917 case 0xcd: /* int n */
4918 rc = emulate_int(ctxt, ctxt->src.val);
4920 case 0xce: /* into */
4921 if (ctxt->eflags & EFLG_OF)
4922 rc = emulate_int(ctxt, 4);
4924 case 0xe9: /* jmp rel */
4925 case 0xeb: /* jmp rel short */
4926 rc = jmp_rel(ctxt, ctxt->src.val);
4927 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4929 case 0xf4: /* hlt */
4930 ctxt->ops->halt(ctxt);
4932 case 0xf5: /* cmc */
4933 /* complement carry flag from eflags reg */
4934 ctxt->eflags ^= EFLG_CF;
4936 case 0xf8: /* clc */
4937 ctxt->eflags &= ~EFLG_CF;
4939 case 0xf9: /* stc */
4940 ctxt->eflags |= EFLG_CF;
4942 case 0xfc: /* cld */
4943 ctxt->eflags &= ~EFLG_DF;
4945 case 0xfd: /* std */
4946 ctxt->eflags |= EFLG_DF;
4949 goto cannot_emulate;
4952 if (rc != X86EMUL_CONTINUE)
4956 if (ctxt->d & SrcWrite) {
4957 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4958 rc = writeback(ctxt, &ctxt->src);
4959 if (rc != X86EMUL_CONTINUE)
4962 if (!(ctxt->d & NoWrite)) {
4963 rc = writeback(ctxt, &ctxt->dst);
4964 if (rc != X86EMUL_CONTINUE)
4969 * restore dst type in case the decoding will be reused
4970 * (happens for string instruction )
4972 ctxt->dst.type = saved_dst_type;
4974 if ((ctxt->d & SrcMask) == SrcSI)
4975 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4977 if ((ctxt->d & DstMask) == DstDI)
4978 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4980 if (ctxt->rep_prefix && (ctxt->d & String)) {
4982 struct read_cache *r = &ctxt->io_read;
4983 if ((ctxt->d & SrcMask) == SrcSI)
4984 count = ctxt->src.count;
4986 count = ctxt->dst.count;
4987 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4990 if (!string_insn_completed(ctxt)) {
4992 * Re-enter guest when pio read ahead buffer is empty
4993 * or, if it is not used, after each 1024 iteration.
4995 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4996 (r->end == 0 || r->end != r->pos)) {
4998 * Reset read cache. Usually happens before
4999 * decode, but since instruction is restarted
5000 * we have to do it here.
5002 ctxt->mem_read.end = 0;
5003 writeback_registers(ctxt);
5004 return EMULATION_RESTART;
5006 goto done; /* skip rip writeback */
5008 ctxt->eflags &= ~EFLG_RF;
5011 ctxt->eip = ctxt->_eip;
5014 if (rc == X86EMUL_PROPAGATE_FAULT) {
5015 WARN_ON(ctxt->exception.vector > 0x1f);
5016 ctxt->have_exception = true;
5018 if (rc == X86EMUL_INTERCEPTED)
5019 return EMULATION_INTERCEPTED;
5021 if (rc == X86EMUL_CONTINUE)
5022 writeback_registers(ctxt);
5024 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5028 case 0x09: /* wbinvd */
5029 (ctxt->ops->wbinvd)(ctxt);
5031 case 0x08: /* invd */
5032 case 0x0d: /* GrpP (prefetch) */
5033 case 0x18: /* Grp16 (prefetch/nop) */
5034 case 0x1f: /* nop */
5036 case 0x20: /* mov cr, reg */
5037 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5039 case 0x21: /* mov from dr to reg */
5040 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5042 case 0x40 ... 0x4f: /* cmov */
5043 if (test_cc(ctxt->b, ctxt->eflags))
5044 ctxt->dst.val = ctxt->src.val;
5045 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5046 ctxt->op_bytes != 4)
5047 ctxt->dst.type = OP_NONE; /* no writeback */
5049 case 0x80 ... 0x8f: /* jnz rel, etc*/
5050 if (test_cc(ctxt->b, ctxt->eflags))
5051 rc = jmp_rel(ctxt, ctxt->src.val);
5053 case 0x90 ... 0x9f: /* setcc r/m8 */
5054 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5056 case 0xb6 ... 0xb7: /* movzx */
5057 ctxt->dst.bytes = ctxt->op_bytes;
5058 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5059 : (u16) ctxt->src.val;
5061 case 0xbe ... 0xbf: /* movsx */
5062 ctxt->dst.bytes = ctxt->op_bytes;
5063 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5064 (s16) ctxt->src.val;
5066 case 0xc3: /* movnti */
5067 ctxt->dst.bytes = ctxt->op_bytes;
5068 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5069 (u32) ctxt->src.val;
5072 goto cannot_emulate;
5077 if (rc != X86EMUL_CONTINUE)
5083 return EMULATION_FAILED;
5086 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5088 invalidate_registers(ctxt);
5091 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5093 writeback_registers(ctxt);