1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
171 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
173 #define X2(x...) x, x
174 #define X3(x...) X2(x), x
175 #define X4(x...) X2(x), X2(x)
176 #define X5(x...) X4(x), x
177 #define X6(x...) X4(x), X2(x)
178 #define X7(x...) X4(x), X3(x)
179 #define X8(x...) X4(x), X4(x)
180 #define X16(x...) X8(x), X8(x)
182 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
183 #define FASTOP_SIZE 8
186 * fastop functions have a special calling convention:
191 * flags: rflags (in/out)
192 * ex: rsi (in:fastop pointer, out:zero if exception)
194 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
195 * different operand sizes can be reached by calculation, rather than a jump
196 * table (which would be bigger than the code).
198 * fastop functions are declared as taking a never-defined fastop parameter,
199 * so they can't be called from C directly.
208 int (*execute)(struct x86_emulate_ctxt *ctxt);
209 const struct opcode *group;
210 const struct group_dual *gdual;
211 const struct gprefix *gprefix;
212 const struct escape *esc;
213 void (*fastop)(struct fastop *fake);
215 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
219 struct opcode mod012[8];
220 struct opcode mod3[8];
224 struct opcode pfx_no;
225 struct opcode pfx_66;
226 struct opcode pfx_f2;
227 struct opcode pfx_f3;
232 struct opcode high[64];
235 /* EFLAGS bit definitions. */
236 #define EFLG_ID (1<<21)
237 #define EFLG_VIP (1<<20)
238 #define EFLG_VIF (1<<19)
239 #define EFLG_AC (1<<18)
240 #define EFLG_VM (1<<17)
241 #define EFLG_RF (1<<16)
242 #define EFLG_IOPL (3<<12)
243 #define EFLG_NT (1<<14)
244 #define EFLG_OF (1<<11)
245 #define EFLG_DF (1<<10)
246 #define EFLG_IF (1<<9)
247 #define EFLG_TF (1<<8)
248 #define EFLG_SF (1<<7)
249 #define EFLG_ZF (1<<6)
250 #define EFLG_AF (1<<4)
251 #define EFLG_PF (1<<2)
252 #define EFLG_CF (1<<0)
254 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255 #define EFLG_RESERVED_ONE_MASK 2
257 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
259 if (!(ctxt->regs_valid & (1 << nr))) {
260 ctxt->regs_valid |= 1 << nr;
261 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
263 return ctxt->_regs[nr];
266 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->regs_dirty |= 1 << nr;
270 return &ctxt->_regs[nr];
273 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 return reg_write(ctxt, nr);
279 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
283 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
284 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
287 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
289 ctxt->regs_dirty = 0;
290 ctxt->regs_valid = 0;
294 * These EFLAGS bits are restored from saved value during emulation, and
295 * any changes are written back to the saved value after emulation.
297 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
305 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
307 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
308 #define FOP_RET "ret \n\t"
310 #define FOP_START(op) \
311 extern void em_##op(struct fastop *fake); \
312 asm(".pushsection .text, \"ax\" \n\t" \
313 ".global em_" #op " \n\t" \
320 #define FOPNOP() FOP_ALIGN FOP_RET
322 #define FOP1E(op, dst) \
323 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
325 #define FOP1EEX(op, dst) \
326 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
328 #define FASTOP1(op) \
333 ON64(FOP1E(op##q, rax)) \
336 /* 1-operand, using src2 (for MUL/DIV r/m) */
337 #define FASTOP1SRC2(op, name) \
342 ON64(FOP1E(op, rcx)) \
345 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
346 #define FASTOP1SRC2EX(op, name) \
351 ON64(FOP1EEX(op, rcx)) \
354 #define FOP2E(op, dst, src) \
355 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
357 #define FASTOP2(op) \
359 FOP2E(op##b, al, dl) \
360 FOP2E(op##w, ax, dx) \
361 FOP2E(op##l, eax, edx) \
362 ON64(FOP2E(op##q, rax, rdx)) \
365 /* 2 operand, word only */
366 #define FASTOP2W(op) \
369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
374 /* 2 operand, src is CL */
375 #define FASTOP2CL(op) \
377 FOP2E(op##b, al, cl) \
378 FOP2E(op##w, ax, cl) \
379 FOP2E(op##l, eax, cl) \
380 ON64(FOP2E(op##q, rax, cl)) \
383 /* 2 operand, src and dest are reversed */
384 #define FASTOP2R(op, name) \
386 FOP2E(op##b, dl, al) \
387 FOP2E(op##w, dx, ax) \
388 FOP2E(op##l, edx, eax) \
389 ON64(FOP2E(op##q, rdx, rax)) \
392 #define FOP3E(op, dst, src, src2) \
393 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
395 /* 3-operand, word-only, src2=cl */
396 #define FASTOP3WCL(op) \
399 FOP3E(op##w, ax, dx, cl) \
400 FOP3E(op##l, eax, edx, cl) \
401 ON64(FOP3E(op##q, rax, rdx, cl)) \
404 /* Special case for SETcc - 1 instruction per cc */
405 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
407 asm(".global kvm_fastop_exception \n"
408 "kvm_fastop_exception: xor %esi, %esi; ret");
429 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
432 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
433 enum x86_intercept intercept,
434 enum x86_intercept_stage stage)
436 struct x86_instruction_info info = {
437 .intercept = intercept,
438 .rep_prefix = ctxt->rep_prefix,
439 .modrm_mod = ctxt->modrm_mod,
440 .modrm_reg = ctxt->modrm_reg,
441 .modrm_rm = ctxt->modrm_rm,
442 .src_val = ctxt->src.val64,
443 .dst_val = ctxt->dst.val64,
444 .src_bytes = ctxt->src.bytes,
445 .dst_bytes = ctxt->dst.bytes,
446 .ad_bytes = ctxt->ad_bytes,
447 .next_rip = ctxt->eip,
450 return ctxt->ops->intercept(ctxt, &info, stage);
453 static void assign_masked(ulong *dest, ulong src, ulong mask)
455 *dest = (*dest & ~mask) | (src & mask);
458 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
460 return (1UL << (ctxt->ad_bytes << 3)) - 1;
463 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
466 struct desc_struct ss;
468 if (ctxt->mode == X86EMUL_MODE_PROT64)
470 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
471 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
474 static int stack_size(struct x86_emulate_ctxt *ctxt)
476 return (__fls(stack_mask(ctxt)) + 1) >> 3;
479 /* Access/update address held in a register, based on addressing mode. */
480 static inline unsigned long
481 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
483 if (ctxt->ad_bytes == sizeof(unsigned long))
486 return reg & ad_mask(ctxt);
489 static inline unsigned long
490 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
492 return address_mask(ctxt, reg);
495 static void masked_increment(ulong *reg, ulong mask, int inc)
497 assign_masked(reg, *reg + inc, mask);
501 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
505 if (ctxt->ad_bytes == sizeof(unsigned long))
508 mask = ad_mask(ctxt);
509 masked_increment(reg, mask, inc);
512 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
514 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
517 static u32 desc_limit_scaled(struct desc_struct *desc)
519 u32 limit = get_desc_limit(desc);
521 return desc->g ? (limit << 12) | 0xfff : limit;
524 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
526 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
529 return ctxt->ops->get_cached_segment_base(ctxt, seg);
532 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
533 u32 error, bool valid)
536 ctxt->exception.vector = vec;
537 ctxt->exception.error_code = error;
538 ctxt->exception.error_code_valid = valid;
539 return X86EMUL_PROPAGATE_FAULT;
542 static int emulate_db(struct x86_emulate_ctxt *ctxt)
544 return emulate_exception(ctxt, DB_VECTOR, 0, false);
547 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
549 return emulate_exception(ctxt, GP_VECTOR, err, true);
552 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
554 return emulate_exception(ctxt, SS_VECTOR, err, true);
557 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
559 return emulate_exception(ctxt, UD_VECTOR, 0, false);
562 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
564 return emulate_exception(ctxt, TS_VECTOR, err, true);
567 static int emulate_de(struct x86_emulate_ctxt *ctxt)
569 return emulate_exception(ctxt, DE_VECTOR, 0, false);
572 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
574 return emulate_exception(ctxt, NM_VECTOR, 0, false);
577 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
580 switch (ctxt->op_bytes) {
582 ctxt->_eip = (u16)dst;
585 ctxt->_eip = (u32)dst;
589 if ((cs_l && is_noncanonical_address(dst)) ||
590 (!cs_l && (dst >> 32) != 0))
591 return emulate_gp(ctxt, 0);
596 WARN(1, "unsupported eip assignment size\n");
598 return X86EMUL_CONTINUE;
601 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
603 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
606 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
608 return assign_eip_near(ctxt, ctxt->_eip + rel);
611 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
614 struct desc_struct desc;
616 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
620 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
625 struct desc_struct desc;
627 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
628 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
632 * x86 defines three classes of vector instructions: explicitly
633 * aligned, explicitly unaligned, and the rest, which change behaviour
634 * depending on whether they're AVX encoded or not.
636 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
637 * subject to the same check.
639 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
641 if (likely(size < 16))
644 if (ctxt->d & Aligned)
646 else if (ctxt->d & Unaligned)
648 else if (ctxt->d & Avx)
654 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
655 struct segmented_address addr,
656 unsigned *max_size, unsigned size,
657 bool write, bool fetch,
660 struct desc_struct desc;
667 la = seg_base(ctxt, addr.seg) +
668 (fetch || ctxt->ad_bytes == 8 ? addr.ea : (u32)addr.ea);
670 switch (ctxt->mode) {
671 case X86EMUL_MODE_PROT64:
672 if (is_noncanonical_address(la))
673 return emulate_gp(ctxt, 0);
675 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
676 if (size > *max_size)
680 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
684 /* code segment in protected mode or read-only data segment */
685 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
686 || !(desc.type & 2)) && write)
688 /* unreadable code segment */
689 if (!fetch && (desc.type & 8) && !(desc.type & 2))
691 lim = desc_limit_scaled(&desc);
692 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
693 (ctxt->d & NoBigReal)) {
694 /* la is between zero and 0xffff */
697 *max_size = 0x10000 - la;
698 } else if ((desc.type & 8) || !(desc.type & 4)) {
699 /* expand-up segment */
702 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
704 /* expand-down segment */
707 lim = desc.d ? 0xffffffff : 0xffff;
710 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
712 if (size > *max_size)
714 cpl = ctxt->ops->cpl(ctxt);
716 /* data segment or readable code segment */
719 } else if ((desc.type & 8) && !(desc.type & 4)) {
720 /* nonconforming code segment */
723 } else if ((desc.type & 8) && (desc.type & 4)) {
724 /* conforming code segment */
730 if (ctxt->mode != X86EMUL_MODE_PROT64)
732 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
733 return emulate_gp(ctxt, 0);
735 return X86EMUL_CONTINUE;
737 if (addr.seg == VCPU_SREG_SS)
738 return emulate_ss(ctxt, 0);
740 return emulate_gp(ctxt, 0);
743 static int linearize(struct x86_emulate_ctxt *ctxt,
744 struct segmented_address addr,
745 unsigned size, bool write,
749 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
753 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
754 struct segmented_address addr,
761 rc = linearize(ctxt, addr, size, false, &linear);
762 if (rc != X86EMUL_CONTINUE)
764 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
768 * Prefetch the remaining bytes of the instruction without crossing page
769 * boundary if they are not in fetch_cache yet.
771 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
774 unsigned size, max_size;
775 unsigned long linear;
776 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
777 struct segmented_address addr = { .seg = VCPU_SREG_CS,
778 .ea = ctxt->eip + cur_size };
781 * We do not know exactly how many bytes will be needed, and
782 * __linearize is expensive, so fetch as much as possible. We
783 * just have to avoid going beyond the 15 byte limit, the end
784 * of the segment, or the end of the page.
786 * __linearize is called with size 0 so that it does not do any
787 * boundary check itself. Instead, we use max_size to check
790 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
791 if (unlikely(rc != X86EMUL_CONTINUE))
794 size = min_t(unsigned, 15UL ^ cur_size, max_size);
795 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
798 * One instruction can only straddle two pages,
799 * and one has been loaded at the beginning of
800 * x86_decode_insn. So, if not enough bytes
801 * still, we must have hit the 15-byte boundary.
803 if (unlikely(size < op_size))
804 return emulate_gp(ctxt, 0);
806 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
807 size, &ctxt->exception);
808 if (unlikely(rc != X86EMUL_CONTINUE))
810 ctxt->fetch.end += size;
811 return X86EMUL_CONTINUE;
814 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
817 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
819 if (unlikely(done_size < size))
820 return __do_insn_fetch_bytes(ctxt, size - done_size);
822 return X86EMUL_CONTINUE;
825 /* Fetch next part of the instruction being emulated. */
826 #define insn_fetch(_type, _ctxt) \
829 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
830 if (rc != X86EMUL_CONTINUE) \
832 ctxt->_eip += sizeof(_type); \
833 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
834 ctxt->fetch.ptr += sizeof(_type); \
838 #define insn_fetch_arr(_arr, _size, _ctxt) \
840 rc = do_insn_fetch_bytes(_ctxt, _size); \
841 if (rc != X86EMUL_CONTINUE) \
843 ctxt->_eip += (_size); \
844 memcpy(_arr, ctxt->fetch.ptr, _size); \
845 ctxt->fetch.ptr += (_size); \
849 * Given the 'reg' portion of a ModRM byte, and a register block, return a
850 * pointer into the block that addresses the relevant register.
851 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
853 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
857 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
859 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
860 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
862 p = reg_rmw(ctxt, modrm_reg);
866 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
867 struct segmented_address addr,
868 u16 *size, unsigned long *address, int op_bytes)
875 rc = segmented_read_std(ctxt, addr, size, 2);
876 if (rc != X86EMUL_CONTINUE)
879 rc = segmented_read_std(ctxt, addr, address, op_bytes);
893 FASTOP1SRC2(mul, mul_ex);
894 FASTOP1SRC2(imul, imul_ex);
895 FASTOP1SRC2EX(div, div_ex);
896 FASTOP1SRC2EX(idiv, idiv_ex);
925 FASTOP2R(cmp, cmp_r);
927 static u8 test_cc(unsigned int condition, unsigned long flags)
930 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
932 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
933 asm("push %[flags]; popf; call *%[fastop]"
934 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
938 static void fetch_register_operand(struct operand *op)
942 op->val = *(u8 *)op->addr.reg;
945 op->val = *(u16 *)op->addr.reg;
948 op->val = *(u32 *)op->addr.reg;
951 op->val = *(u64 *)op->addr.reg;
956 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
958 ctxt->ops->get_fpu(ctxt);
960 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
961 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
962 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
963 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
964 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
965 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
966 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
967 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
969 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
970 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
971 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
972 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
973 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
974 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
975 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
976 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
980 ctxt->ops->put_fpu(ctxt);
983 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
986 ctxt->ops->get_fpu(ctxt);
988 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
989 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
990 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
991 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
992 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
993 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
994 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
995 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
997 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
998 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
999 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1000 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1001 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1002 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1003 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1004 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1008 ctxt->ops->put_fpu(ctxt);
1011 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1013 ctxt->ops->get_fpu(ctxt);
1015 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1016 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1017 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1018 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1019 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1020 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1021 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1022 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1025 ctxt->ops->put_fpu(ctxt);
1028 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1030 ctxt->ops->get_fpu(ctxt);
1032 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1033 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1034 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1035 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1036 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1037 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1038 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1039 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1042 ctxt->ops->put_fpu(ctxt);
1045 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1047 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1048 return emulate_nm(ctxt);
1050 ctxt->ops->get_fpu(ctxt);
1051 asm volatile("fninit");
1052 ctxt->ops->put_fpu(ctxt);
1053 return X86EMUL_CONTINUE;
1056 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1060 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1061 return emulate_nm(ctxt);
1063 ctxt->ops->get_fpu(ctxt);
1064 asm volatile("fnstcw %0": "+m"(fcw));
1065 ctxt->ops->put_fpu(ctxt);
1067 /* force 2 byte destination */
1068 ctxt->dst.bytes = 2;
1069 ctxt->dst.val = fcw;
1071 return X86EMUL_CONTINUE;
1074 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1078 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1079 return emulate_nm(ctxt);
1081 ctxt->ops->get_fpu(ctxt);
1082 asm volatile("fnstsw %0": "+m"(fsw));
1083 ctxt->ops->put_fpu(ctxt);
1085 /* force 2 byte destination */
1086 ctxt->dst.bytes = 2;
1087 ctxt->dst.val = fsw;
1089 return X86EMUL_CONTINUE;
1092 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1095 unsigned reg = ctxt->modrm_reg;
1097 if (!(ctxt->d & ModRM))
1098 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1100 if (ctxt->d & Sse) {
1104 read_sse_reg(ctxt, &op->vec_val, reg);
1107 if (ctxt->d & Mmx) {
1116 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1117 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1119 fetch_register_operand(op);
1120 op->orig_val = op->val;
1123 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1125 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1126 ctxt->modrm_seg = VCPU_SREG_SS;
1129 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1133 int index_reg, base_reg, scale;
1134 int rc = X86EMUL_CONTINUE;
1137 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1138 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1139 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1141 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1142 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1143 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1144 ctxt->modrm_seg = VCPU_SREG_DS;
1146 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1148 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1149 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1151 if (ctxt->d & Sse) {
1154 op->addr.xmm = ctxt->modrm_rm;
1155 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1158 if (ctxt->d & Mmx) {
1161 op->addr.mm = ctxt->modrm_rm & 7;
1164 fetch_register_operand(op);
1170 if (ctxt->ad_bytes == 2) {
1171 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1172 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1173 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1174 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1176 /* 16-bit ModR/M decode. */
1177 switch (ctxt->modrm_mod) {
1179 if (ctxt->modrm_rm == 6)
1180 modrm_ea += insn_fetch(u16, ctxt);
1183 modrm_ea += insn_fetch(s8, ctxt);
1186 modrm_ea += insn_fetch(u16, ctxt);
1189 switch (ctxt->modrm_rm) {
1191 modrm_ea += bx + si;
1194 modrm_ea += bx + di;
1197 modrm_ea += bp + si;
1200 modrm_ea += bp + di;
1209 if (ctxt->modrm_mod != 0)
1216 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1217 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1218 ctxt->modrm_seg = VCPU_SREG_SS;
1219 modrm_ea = (u16)modrm_ea;
1221 /* 32/64-bit ModR/M decode. */
1222 if ((ctxt->modrm_rm & 7) == 4) {
1223 sib = insn_fetch(u8, ctxt);
1224 index_reg |= (sib >> 3) & 7;
1225 base_reg |= sib & 7;
1228 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1229 modrm_ea += insn_fetch(s32, ctxt);
1231 modrm_ea += reg_read(ctxt, base_reg);
1232 adjust_modrm_seg(ctxt, base_reg);
1235 modrm_ea += reg_read(ctxt, index_reg) << scale;
1236 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1237 modrm_ea += insn_fetch(s32, ctxt);
1238 if (ctxt->mode == X86EMUL_MODE_PROT64)
1239 ctxt->rip_relative = 1;
1241 base_reg = ctxt->modrm_rm;
1242 modrm_ea += reg_read(ctxt, base_reg);
1243 adjust_modrm_seg(ctxt, base_reg);
1245 switch (ctxt->modrm_mod) {
1247 modrm_ea += insn_fetch(s8, ctxt);
1250 modrm_ea += insn_fetch(s32, ctxt);
1254 op->addr.mem.ea = modrm_ea;
1255 if (ctxt->ad_bytes != 8)
1256 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1262 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1265 int rc = X86EMUL_CONTINUE;
1268 switch (ctxt->ad_bytes) {
1270 op->addr.mem.ea = insn_fetch(u16, ctxt);
1273 op->addr.mem.ea = insn_fetch(u32, ctxt);
1276 op->addr.mem.ea = insn_fetch(u64, ctxt);
1283 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1287 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1288 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1290 if (ctxt->src.bytes == 2)
1291 sv = (s16)ctxt->src.val & (s16)mask;
1292 else if (ctxt->src.bytes == 4)
1293 sv = (s32)ctxt->src.val & (s32)mask;
1295 sv = (s64)ctxt->src.val & (s64)mask;
1297 ctxt->dst.addr.mem.ea += (sv >> 3);
1300 /* only subword offset */
1301 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1304 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1305 unsigned long addr, void *dest, unsigned size)
1308 struct read_cache *mc = &ctxt->mem_read;
1310 if (mc->pos < mc->end)
1313 WARN_ON((mc->end + size) >= sizeof(mc->data));
1315 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1317 if (rc != X86EMUL_CONTINUE)
1323 memcpy(dest, mc->data + mc->pos, size);
1325 return X86EMUL_CONTINUE;
1328 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1329 struct segmented_address addr,
1336 rc = linearize(ctxt, addr, size, false, &linear);
1337 if (rc != X86EMUL_CONTINUE)
1339 return read_emulated(ctxt, linear, data, size);
1342 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1343 struct segmented_address addr,
1350 rc = linearize(ctxt, addr, size, true, &linear);
1351 if (rc != X86EMUL_CONTINUE)
1353 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1357 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1358 struct segmented_address addr,
1359 const void *orig_data, const void *data,
1365 rc = linearize(ctxt, addr, size, true, &linear);
1366 if (rc != X86EMUL_CONTINUE)
1368 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1369 size, &ctxt->exception);
1372 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1373 unsigned int size, unsigned short port,
1376 struct read_cache *rc = &ctxt->io_read;
1378 if (rc->pos == rc->end) { /* refill pio read ahead */
1379 unsigned int in_page, n;
1380 unsigned int count = ctxt->rep_prefix ?
1381 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1382 in_page = (ctxt->eflags & EFLG_DF) ?
1383 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1384 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1385 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1388 rc->pos = rc->end = 0;
1389 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1394 if (ctxt->rep_prefix && (ctxt->d & String) &&
1395 !(ctxt->eflags & EFLG_DF)) {
1396 ctxt->dst.data = rc->data + rc->pos;
1397 ctxt->dst.type = OP_MEM_STR;
1398 ctxt->dst.count = (rc->end - rc->pos) / size;
1401 memcpy(dest, rc->data + rc->pos, size);
1407 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1408 u16 index, struct desc_struct *desc)
1413 ctxt->ops->get_idt(ctxt, &dt);
1415 if (dt.size < index * 8 + 7)
1416 return emulate_gp(ctxt, index << 3 | 0x2);
1418 addr = dt.address + index * 8;
1419 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1423 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1424 u16 selector, struct desc_ptr *dt)
1426 const struct x86_emulate_ops *ops = ctxt->ops;
1429 if (selector & 1 << 2) {
1430 struct desc_struct desc;
1433 memset (dt, 0, sizeof *dt);
1434 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1438 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1439 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1441 ops->get_gdt(ctxt, dt);
1444 /* allowed just for 8 bytes segments */
1445 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1446 u16 selector, struct desc_struct *desc,
1450 u16 index = selector >> 3;
1453 get_descriptor_table_ptr(ctxt, selector, &dt);
1455 if (dt.size < index * 8 + 7)
1456 return emulate_gp(ctxt, selector & 0xfffc);
1458 *desc_addr_p = addr = dt.address + index * 8;
1459 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1463 /* allowed just for 8 bytes segments */
1464 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1465 u16 selector, struct desc_struct *desc)
1468 u16 index = selector >> 3;
1471 get_descriptor_table_ptr(ctxt, selector, &dt);
1473 if (dt.size < index * 8 + 7)
1474 return emulate_gp(ctxt, selector & 0xfffc);
1476 addr = dt.address + index * 8;
1477 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1481 /* Does not support long mode */
1482 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1483 u16 selector, int seg, u8 cpl,
1484 bool in_task_switch,
1485 struct desc_struct *desc)
1487 struct desc_struct seg_desc, old_desc;
1489 unsigned err_vec = GP_VECTOR;
1491 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1497 memset(&seg_desc, 0, sizeof seg_desc);
1499 if (ctxt->mode == X86EMUL_MODE_REAL) {
1500 /* set real mode segment descriptor (keep limit etc. for
1502 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1503 set_desc_base(&seg_desc, selector << 4);
1505 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1506 /* VM86 needs a clean new segment descriptor */
1507 set_desc_base(&seg_desc, selector << 4);
1508 set_desc_limit(&seg_desc, 0xffff);
1518 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1519 if ((seg == VCPU_SREG_CS
1520 || (seg == VCPU_SREG_SS
1521 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1522 || seg == VCPU_SREG_TR)
1526 /* TR should be in GDT only */
1527 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1530 if (null_selector) /* for NULL selector skip all following checks */
1533 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1534 if (ret != X86EMUL_CONTINUE)
1537 err_code = selector & 0xfffc;
1538 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1540 /* can't load system descriptor into segment selector */
1541 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1545 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1554 * segment is not a writable data segment or segment
1555 * selector's RPL != CPL or segment selector's RPL != CPL
1557 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1561 if (!(seg_desc.type & 8))
1564 if (seg_desc.type & 4) {
1570 if (rpl > cpl || dpl != cpl)
1573 /* in long-mode d/b must be clear if l is set */
1574 if (seg_desc.d && seg_desc.l) {
1577 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1578 if (efer & EFER_LMA)
1582 /* CS(RPL) <- CPL */
1583 selector = (selector & 0xfffc) | cpl;
1586 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1588 old_desc = seg_desc;
1589 seg_desc.type |= 2; /* busy */
1590 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1591 sizeof(seg_desc), &ctxt->exception);
1592 if (ret != X86EMUL_CONTINUE)
1595 case VCPU_SREG_LDTR:
1596 if (seg_desc.s || seg_desc.type != 2)
1599 default: /* DS, ES, FS, or GS */
1601 * segment is not a data or readable code segment or
1602 * ((segment is a data or nonconforming code segment)
1603 * and (both RPL and CPL > DPL))
1605 if ((seg_desc.type & 0xa) == 0x8 ||
1606 (((seg_desc.type & 0xc) != 0xc) &&
1607 (rpl > dpl && cpl > dpl)))
1613 /* mark segment as accessed */
1615 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1616 if (ret != X86EMUL_CONTINUE)
1618 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1619 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1620 sizeof(base3), &ctxt->exception);
1621 if (ret != X86EMUL_CONTINUE)
1623 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1624 ((u64)base3 << 32)))
1625 return emulate_gp(ctxt, 0);
1628 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1631 return X86EMUL_CONTINUE;
1633 return emulate_exception(ctxt, err_vec, err_code, true);
1636 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1637 u16 selector, int seg)
1639 u8 cpl = ctxt->ops->cpl(ctxt);
1640 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1643 static void write_register_operand(struct operand *op)
1645 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1646 switch (op->bytes) {
1648 *(u8 *)op->addr.reg = (u8)op->val;
1651 *(u16 *)op->addr.reg = (u16)op->val;
1654 *op->addr.reg = (u32)op->val;
1655 break; /* 64b: zero-extend */
1657 *op->addr.reg = op->val;
1662 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1666 write_register_operand(op);
1669 if (ctxt->lock_prefix)
1670 return segmented_cmpxchg(ctxt,
1676 return segmented_write(ctxt,
1682 return segmented_write(ctxt,
1685 op->bytes * op->count);
1688 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1691 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1699 return X86EMUL_CONTINUE;
1702 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1704 struct segmented_address addr;
1706 rsp_increment(ctxt, -bytes);
1707 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1708 addr.seg = VCPU_SREG_SS;
1710 return segmented_write(ctxt, addr, data, bytes);
1713 static int em_push(struct x86_emulate_ctxt *ctxt)
1715 /* Disable writeback. */
1716 ctxt->dst.type = OP_NONE;
1717 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1720 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1721 void *dest, int len)
1724 struct segmented_address addr;
1726 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1727 addr.seg = VCPU_SREG_SS;
1728 rc = segmented_read(ctxt, addr, dest, len);
1729 if (rc != X86EMUL_CONTINUE)
1732 rsp_increment(ctxt, len);
1736 static int em_pop(struct x86_emulate_ctxt *ctxt)
1738 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1741 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1742 void *dest, int len)
1745 unsigned long val, change_mask;
1746 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1747 int cpl = ctxt->ops->cpl(ctxt);
1749 rc = emulate_pop(ctxt, &val, len);
1750 if (rc != X86EMUL_CONTINUE)
1753 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1754 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1756 switch(ctxt->mode) {
1757 case X86EMUL_MODE_PROT64:
1758 case X86EMUL_MODE_PROT32:
1759 case X86EMUL_MODE_PROT16:
1761 change_mask |= EFLG_IOPL;
1763 change_mask |= EFLG_IF;
1765 case X86EMUL_MODE_VM86:
1767 return emulate_gp(ctxt, 0);
1768 change_mask |= EFLG_IF;
1770 default: /* real mode */
1771 change_mask |= (EFLG_IOPL | EFLG_IF);
1775 *(unsigned long *)dest =
1776 (ctxt->eflags & ~change_mask) | (val & change_mask);
1781 static int em_popf(struct x86_emulate_ctxt *ctxt)
1783 ctxt->dst.type = OP_REG;
1784 ctxt->dst.addr.reg = &ctxt->eflags;
1785 ctxt->dst.bytes = ctxt->op_bytes;
1786 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1789 static int em_enter(struct x86_emulate_ctxt *ctxt)
1792 unsigned frame_size = ctxt->src.val;
1793 unsigned nesting_level = ctxt->src2.val & 31;
1797 return X86EMUL_UNHANDLEABLE;
1799 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1800 rc = push(ctxt, &rbp, stack_size(ctxt));
1801 if (rc != X86EMUL_CONTINUE)
1803 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1805 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1806 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1808 return X86EMUL_CONTINUE;
1811 static int em_leave(struct x86_emulate_ctxt *ctxt)
1813 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1815 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1818 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1820 int seg = ctxt->src2.val;
1822 ctxt->src.val = get_segment_selector(ctxt, seg);
1823 if (ctxt->op_bytes == 4) {
1824 rsp_increment(ctxt, -2);
1828 return em_push(ctxt);
1831 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1833 int seg = ctxt->src2.val;
1834 unsigned long selector;
1837 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1838 if (rc != X86EMUL_CONTINUE)
1841 if (ctxt->modrm_reg == VCPU_SREG_SS)
1842 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1844 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1848 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1850 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1851 int rc = X86EMUL_CONTINUE;
1852 int reg = VCPU_REGS_RAX;
1854 while (reg <= VCPU_REGS_RDI) {
1855 (reg == VCPU_REGS_RSP) ?
1856 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1859 if (rc != X86EMUL_CONTINUE)
1868 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1870 ctxt->src.val = (unsigned long)ctxt->eflags;
1871 return em_push(ctxt);
1874 static int em_popa(struct x86_emulate_ctxt *ctxt)
1876 int rc = X86EMUL_CONTINUE;
1877 int reg = VCPU_REGS_RDI;
1879 while (reg >= VCPU_REGS_RAX) {
1880 if (reg == VCPU_REGS_RSP) {
1881 rsp_increment(ctxt, ctxt->op_bytes);
1885 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1886 if (rc != X86EMUL_CONTINUE)
1893 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1895 const struct x86_emulate_ops *ops = ctxt->ops;
1902 /* TODO: Add limit checks */
1903 ctxt->src.val = ctxt->eflags;
1905 if (rc != X86EMUL_CONTINUE)
1908 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1910 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1912 if (rc != X86EMUL_CONTINUE)
1915 ctxt->src.val = ctxt->_eip;
1917 if (rc != X86EMUL_CONTINUE)
1920 ops->get_idt(ctxt, &dt);
1922 eip_addr = dt.address + (irq << 2);
1923 cs_addr = dt.address + (irq << 2) + 2;
1925 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1926 if (rc != X86EMUL_CONTINUE)
1929 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1930 if (rc != X86EMUL_CONTINUE)
1933 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1934 if (rc != X86EMUL_CONTINUE)
1942 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1946 invalidate_registers(ctxt);
1947 rc = __emulate_int_real(ctxt, irq);
1948 if (rc == X86EMUL_CONTINUE)
1949 writeback_registers(ctxt);
1953 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1955 switch(ctxt->mode) {
1956 case X86EMUL_MODE_REAL:
1957 return __emulate_int_real(ctxt, irq);
1958 case X86EMUL_MODE_VM86:
1959 case X86EMUL_MODE_PROT16:
1960 case X86EMUL_MODE_PROT32:
1961 case X86EMUL_MODE_PROT64:
1963 /* Protected mode interrupts unimplemented yet */
1964 return X86EMUL_UNHANDLEABLE;
1968 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1970 int rc = X86EMUL_CONTINUE;
1971 unsigned long temp_eip = 0;
1972 unsigned long temp_eflags = 0;
1973 unsigned long cs = 0;
1974 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1975 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1976 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1977 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1979 /* TODO: Add stack limit check */
1981 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1983 if (rc != X86EMUL_CONTINUE)
1986 if (temp_eip & ~0xffff)
1987 return emulate_gp(ctxt, 0);
1989 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1991 if (rc != X86EMUL_CONTINUE)
1994 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1996 if (rc != X86EMUL_CONTINUE)
1999 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2001 if (rc != X86EMUL_CONTINUE)
2004 ctxt->_eip = temp_eip;
2007 if (ctxt->op_bytes == 4)
2008 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2009 else if (ctxt->op_bytes == 2) {
2010 ctxt->eflags &= ~0xffff;
2011 ctxt->eflags |= temp_eflags;
2014 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2015 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2020 static int em_iret(struct x86_emulate_ctxt *ctxt)
2022 switch(ctxt->mode) {
2023 case X86EMUL_MODE_REAL:
2024 return emulate_iret_real(ctxt);
2025 case X86EMUL_MODE_VM86:
2026 case X86EMUL_MODE_PROT16:
2027 case X86EMUL_MODE_PROT32:
2028 case X86EMUL_MODE_PROT64:
2030 /* iret from protected mode unimplemented yet */
2031 return X86EMUL_UNHANDLEABLE;
2035 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2038 unsigned short sel, old_sel;
2039 struct desc_struct old_desc, new_desc;
2040 const struct x86_emulate_ops *ops = ctxt->ops;
2041 u8 cpl = ctxt->ops->cpl(ctxt);
2043 /* Assignment of RIP may only fail in 64-bit mode */
2044 if (ctxt->mode == X86EMUL_MODE_PROT64)
2045 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2048 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2050 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2052 if (rc != X86EMUL_CONTINUE)
2055 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2056 if (rc != X86EMUL_CONTINUE) {
2057 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2058 /* assigning eip failed; restore the old cs */
2059 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2065 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2067 return assign_eip_near(ctxt, ctxt->src.val);
2070 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2075 old_eip = ctxt->_eip;
2076 rc = assign_eip_near(ctxt, ctxt->src.val);
2077 if (rc != X86EMUL_CONTINUE)
2079 ctxt->src.val = old_eip;
2084 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2086 u64 old = ctxt->dst.orig_val64;
2088 if (ctxt->dst.bytes == 16)
2089 return X86EMUL_UNHANDLEABLE;
2091 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2092 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2093 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2094 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2095 ctxt->eflags &= ~EFLG_ZF;
2097 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2098 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2100 ctxt->eflags |= EFLG_ZF;
2102 return X86EMUL_CONTINUE;
2105 static int em_ret(struct x86_emulate_ctxt *ctxt)
2110 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2111 if (rc != X86EMUL_CONTINUE)
2114 return assign_eip_near(ctxt, eip);
2117 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2120 unsigned long eip, cs;
2122 int cpl = ctxt->ops->cpl(ctxt);
2123 struct desc_struct old_desc, new_desc;
2124 const struct x86_emulate_ops *ops = ctxt->ops;
2126 if (ctxt->mode == X86EMUL_MODE_PROT64)
2127 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2130 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2131 if (rc != X86EMUL_CONTINUE)
2133 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2134 if (rc != X86EMUL_CONTINUE)
2136 /* Outer-privilege level return is not implemented */
2137 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2138 return X86EMUL_UNHANDLEABLE;
2139 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2141 if (rc != X86EMUL_CONTINUE)
2143 rc = assign_eip_far(ctxt, eip, new_desc.l);
2144 if (rc != X86EMUL_CONTINUE) {
2145 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2146 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2151 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2155 rc = em_ret_far(ctxt);
2156 if (rc != X86EMUL_CONTINUE)
2158 rsp_increment(ctxt, ctxt->src.val);
2159 return X86EMUL_CONTINUE;
2162 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2164 /* Save real source value, then compare EAX against destination. */
2165 ctxt->dst.orig_val = ctxt->dst.val;
2166 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2167 ctxt->src.orig_val = ctxt->src.val;
2168 ctxt->src.val = ctxt->dst.orig_val;
2169 fastop(ctxt, em_cmp);
2171 if (ctxt->eflags & EFLG_ZF) {
2172 /* Success: write back to memory. */
2173 ctxt->dst.val = ctxt->src.orig_val;
2175 /* Failure: write the value we saw to EAX. */
2176 ctxt->dst.type = OP_REG;
2177 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2178 ctxt->dst.val = ctxt->dst.orig_val;
2180 return X86EMUL_CONTINUE;
2183 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2185 int seg = ctxt->src2.val;
2189 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2191 rc = load_segment_descriptor(ctxt, sel, seg);
2192 if (rc != X86EMUL_CONTINUE)
2195 ctxt->dst.val = ctxt->src.val;
2200 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2201 struct desc_struct *cs, struct desc_struct *ss)
2203 cs->l = 0; /* will be adjusted later */
2204 set_desc_base(cs, 0); /* flat segment */
2205 cs->g = 1; /* 4kb granularity */
2206 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2207 cs->type = 0x0b; /* Read, Execute, Accessed */
2209 cs->dpl = 0; /* will be adjusted later */
2214 set_desc_base(ss, 0); /* flat segment */
2215 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2216 ss->g = 1; /* 4kb granularity */
2218 ss->type = 0x03; /* Read/Write, Accessed */
2219 ss->d = 1; /* 32bit stack segment */
2226 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2228 u32 eax, ebx, ecx, edx;
2231 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2232 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2233 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2234 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2237 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2239 const struct x86_emulate_ops *ops = ctxt->ops;
2240 u32 eax, ebx, ecx, edx;
2243 * syscall should always be enabled in longmode - so only become
2244 * vendor specific (cpuid) if other modes are active...
2246 if (ctxt->mode == X86EMUL_MODE_PROT64)
2251 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2253 * Intel ("GenuineIntel")
2254 * remark: Intel CPUs only support "syscall" in 64bit
2255 * longmode. Also an 64bit guest with a
2256 * 32bit compat-app running will #UD !! While this
2257 * behaviour can be fixed (by emulating) into AMD
2258 * response - CPUs of AMD can't behave like Intel.
2260 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2261 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2262 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2265 /* AMD ("AuthenticAMD") */
2266 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2267 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2268 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2271 /* AMD ("AMDisbetter!") */
2272 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2273 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2274 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2277 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2281 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2283 const struct x86_emulate_ops *ops = ctxt->ops;
2284 struct desc_struct cs, ss;
2289 /* syscall is not available in real mode */
2290 if (ctxt->mode == X86EMUL_MODE_REAL ||
2291 ctxt->mode == X86EMUL_MODE_VM86)
2292 return emulate_ud(ctxt);
2294 if (!(em_syscall_is_enabled(ctxt)))
2295 return emulate_ud(ctxt);
2297 ops->get_msr(ctxt, MSR_EFER, &efer);
2298 setup_syscalls_segments(ctxt, &cs, &ss);
2300 if (!(efer & EFER_SCE))
2301 return emulate_ud(ctxt);
2303 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2305 cs_sel = (u16)(msr_data & 0xfffc);
2306 ss_sel = (u16)(msr_data + 8);
2308 if (efer & EFER_LMA) {
2312 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2313 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2315 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2316 if (efer & EFER_LMA) {
2317 #ifdef CONFIG_X86_64
2318 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2321 ctxt->mode == X86EMUL_MODE_PROT64 ?
2322 MSR_LSTAR : MSR_CSTAR, &msr_data);
2323 ctxt->_eip = msr_data;
2325 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2326 ctxt->eflags &= ~msr_data;
2327 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2331 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2332 ctxt->_eip = (u32)msr_data;
2334 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2337 return X86EMUL_CONTINUE;
2340 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2342 const struct x86_emulate_ops *ops = ctxt->ops;
2343 struct desc_struct cs, ss;
2348 ops->get_msr(ctxt, MSR_EFER, &efer);
2349 /* inject #GP if in real mode */
2350 if (ctxt->mode == X86EMUL_MODE_REAL)
2351 return emulate_gp(ctxt, 0);
2354 * Not recognized on AMD in compat mode (but is recognized in legacy
2357 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2358 && !vendor_intel(ctxt))
2359 return emulate_ud(ctxt);
2361 /* sysenter/sysexit have not been tested in 64bit mode. */
2362 if (ctxt->mode == X86EMUL_MODE_PROT64)
2363 return X86EMUL_UNHANDLEABLE;
2365 setup_syscalls_segments(ctxt, &cs, &ss);
2367 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2368 switch (ctxt->mode) {
2369 case X86EMUL_MODE_PROT32:
2370 if ((msr_data & 0xfffc) == 0x0)
2371 return emulate_gp(ctxt, 0);
2373 case X86EMUL_MODE_PROT64:
2374 if (msr_data == 0x0)
2375 return emulate_gp(ctxt, 0);
2381 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2382 cs_sel = (u16)msr_data;
2383 cs_sel &= ~SELECTOR_RPL_MASK;
2384 ss_sel = cs_sel + 8;
2385 ss_sel &= ~SELECTOR_RPL_MASK;
2386 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2391 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2392 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2394 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2395 ctxt->_eip = msr_data;
2397 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2398 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2400 return X86EMUL_CONTINUE;
2403 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2405 const struct x86_emulate_ops *ops = ctxt->ops;
2406 struct desc_struct cs, ss;
2407 u64 msr_data, rcx, rdx;
2409 u16 cs_sel = 0, ss_sel = 0;
2411 /* inject #GP if in real mode or Virtual 8086 mode */
2412 if (ctxt->mode == X86EMUL_MODE_REAL ||
2413 ctxt->mode == X86EMUL_MODE_VM86)
2414 return emulate_gp(ctxt, 0);
2416 setup_syscalls_segments(ctxt, &cs, &ss);
2418 if ((ctxt->rex_prefix & 0x8) != 0x0)
2419 usermode = X86EMUL_MODE_PROT64;
2421 usermode = X86EMUL_MODE_PROT32;
2423 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2424 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2428 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2430 case X86EMUL_MODE_PROT32:
2431 cs_sel = (u16)(msr_data + 16);
2432 if ((msr_data & 0xfffc) == 0x0)
2433 return emulate_gp(ctxt, 0);
2434 ss_sel = (u16)(msr_data + 24);
2438 case X86EMUL_MODE_PROT64:
2439 cs_sel = (u16)(msr_data + 32);
2440 if (msr_data == 0x0)
2441 return emulate_gp(ctxt, 0);
2442 ss_sel = cs_sel + 8;
2445 if (is_noncanonical_address(rcx) ||
2446 is_noncanonical_address(rdx))
2447 return emulate_gp(ctxt, 0);
2450 cs_sel |= SELECTOR_RPL_MASK;
2451 ss_sel |= SELECTOR_RPL_MASK;
2453 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2454 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2457 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2459 return X86EMUL_CONTINUE;
2462 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2465 if (ctxt->mode == X86EMUL_MODE_REAL)
2467 if (ctxt->mode == X86EMUL_MODE_VM86)
2469 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2470 return ctxt->ops->cpl(ctxt) > iopl;
2473 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2476 const struct x86_emulate_ops *ops = ctxt->ops;
2477 struct desc_struct tr_seg;
2480 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2481 unsigned mask = (1 << len) - 1;
2484 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2487 if (desc_limit_scaled(&tr_seg) < 103)
2489 base = get_desc_base(&tr_seg);
2490 #ifdef CONFIG_X86_64
2491 base |= ((u64)base3) << 32;
2493 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2494 if (r != X86EMUL_CONTINUE)
2496 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2498 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2499 if (r != X86EMUL_CONTINUE)
2501 if ((perm >> bit_idx) & mask)
2506 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2512 if (emulator_bad_iopl(ctxt))
2513 if (!emulator_io_port_access_allowed(ctxt, port, len))
2516 ctxt->perm_ok = true;
2521 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2522 struct tss_segment_16 *tss)
2524 tss->ip = ctxt->_eip;
2525 tss->flag = ctxt->eflags;
2526 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2527 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2528 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2529 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2530 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2531 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2532 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2533 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2535 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2536 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2537 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2538 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2539 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2542 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2543 struct tss_segment_16 *tss)
2548 ctxt->_eip = tss->ip;
2549 ctxt->eflags = tss->flag | 2;
2550 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2551 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2552 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2553 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2554 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2555 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2556 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2557 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2560 * SDM says that segment selectors are loaded before segment
2563 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2564 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2565 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2566 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2567 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2572 * Now load segment descriptors. If fault happens at this stage
2573 * it is handled in a context of new task
2575 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2577 if (ret != X86EMUL_CONTINUE)
2579 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2581 if (ret != X86EMUL_CONTINUE)
2583 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2585 if (ret != X86EMUL_CONTINUE)
2587 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2589 if (ret != X86EMUL_CONTINUE)
2591 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2593 if (ret != X86EMUL_CONTINUE)
2596 return X86EMUL_CONTINUE;
2599 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2600 u16 tss_selector, u16 old_tss_sel,
2601 ulong old_tss_base, struct desc_struct *new_desc)
2603 const struct x86_emulate_ops *ops = ctxt->ops;
2604 struct tss_segment_16 tss_seg;
2606 u32 new_tss_base = get_desc_base(new_desc);
2608 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2610 if (ret != X86EMUL_CONTINUE)
2611 /* FIXME: need to provide precise fault address */
2614 save_state_to_tss16(ctxt, &tss_seg);
2616 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2618 if (ret != X86EMUL_CONTINUE)
2619 /* FIXME: need to provide precise fault address */
2622 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2624 if (ret != X86EMUL_CONTINUE)
2625 /* FIXME: need to provide precise fault address */
2628 if (old_tss_sel != 0xffff) {
2629 tss_seg.prev_task_link = old_tss_sel;
2631 ret = ops->write_std(ctxt, new_tss_base,
2632 &tss_seg.prev_task_link,
2633 sizeof tss_seg.prev_task_link,
2635 if (ret != X86EMUL_CONTINUE)
2636 /* FIXME: need to provide precise fault address */
2640 return load_state_from_tss16(ctxt, &tss_seg);
2643 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2644 struct tss_segment_32 *tss)
2646 /* CR3 and ldt selector are not saved intentionally */
2647 tss->eip = ctxt->_eip;
2648 tss->eflags = ctxt->eflags;
2649 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2650 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2651 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2652 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2653 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2654 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2655 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2656 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2658 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2659 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2660 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2661 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2662 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2663 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2666 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2667 struct tss_segment_32 *tss)
2672 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2673 return emulate_gp(ctxt, 0);
2674 ctxt->_eip = tss->eip;
2675 ctxt->eflags = tss->eflags | 2;
2677 /* General purpose registers */
2678 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2679 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2680 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2681 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2682 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2683 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2684 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2685 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2688 * SDM says that segment selectors are loaded before segment
2689 * descriptors. This is important because CPL checks will
2692 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2693 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2694 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2695 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2696 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2697 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2698 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2701 * If we're switching between Protected Mode and VM86, we need to make
2702 * sure to update the mode before loading the segment descriptors so
2703 * that the selectors are interpreted correctly.
2705 if (ctxt->eflags & X86_EFLAGS_VM) {
2706 ctxt->mode = X86EMUL_MODE_VM86;
2709 ctxt->mode = X86EMUL_MODE_PROT32;
2714 * Now load segment descriptors. If fault happenes at this stage
2715 * it is handled in a context of new task
2717 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2719 if (ret != X86EMUL_CONTINUE)
2721 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2723 if (ret != X86EMUL_CONTINUE)
2725 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2727 if (ret != X86EMUL_CONTINUE)
2729 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2731 if (ret != X86EMUL_CONTINUE)
2733 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2735 if (ret != X86EMUL_CONTINUE)
2737 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2739 if (ret != X86EMUL_CONTINUE)
2741 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2743 if (ret != X86EMUL_CONTINUE)
2746 return X86EMUL_CONTINUE;
2749 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2750 u16 tss_selector, u16 old_tss_sel,
2751 ulong old_tss_base, struct desc_struct *new_desc)
2753 const struct x86_emulate_ops *ops = ctxt->ops;
2754 struct tss_segment_32 tss_seg;
2756 u32 new_tss_base = get_desc_base(new_desc);
2757 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2758 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2760 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2762 if (ret != X86EMUL_CONTINUE)
2763 /* FIXME: need to provide precise fault address */
2766 save_state_to_tss32(ctxt, &tss_seg);
2768 /* Only GP registers and segment selectors are saved */
2769 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2770 ldt_sel_offset - eip_offset, &ctxt->exception);
2771 if (ret != X86EMUL_CONTINUE)
2772 /* FIXME: need to provide precise fault address */
2775 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2777 if (ret != X86EMUL_CONTINUE)
2778 /* FIXME: need to provide precise fault address */
2781 if (old_tss_sel != 0xffff) {
2782 tss_seg.prev_task_link = old_tss_sel;
2784 ret = ops->write_std(ctxt, new_tss_base,
2785 &tss_seg.prev_task_link,
2786 sizeof tss_seg.prev_task_link,
2788 if (ret != X86EMUL_CONTINUE)
2789 /* FIXME: need to provide precise fault address */
2793 return load_state_from_tss32(ctxt, &tss_seg);
2796 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2797 u16 tss_selector, int idt_index, int reason,
2798 bool has_error_code, u32 error_code)
2800 const struct x86_emulate_ops *ops = ctxt->ops;
2801 struct desc_struct curr_tss_desc, next_tss_desc;
2803 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2804 ulong old_tss_base =
2805 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2809 /* FIXME: old_tss_base == ~0 ? */
2811 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2812 if (ret != X86EMUL_CONTINUE)
2814 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2815 if (ret != X86EMUL_CONTINUE)
2818 /* FIXME: check that next_tss_desc is tss */
2821 * Check privileges. The three cases are task switch caused by...
2823 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2824 * 2. Exception/IRQ/iret: No check is performed
2825 * 3. jmp/call to TSS/task-gate: No check is performed since the
2826 * hardware checks it before exiting.
2828 if (reason == TASK_SWITCH_GATE) {
2829 if (idt_index != -1) {
2830 /* Software interrupts */
2831 struct desc_struct task_gate_desc;
2834 ret = read_interrupt_descriptor(ctxt, idt_index,
2836 if (ret != X86EMUL_CONTINUE)
2839 dpl = task_gate_desc.dpl;
2840 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2841 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2845 desc_limit = desc_limit_scaled(&next_tss_desc);
2846 if (!next_tss_desc.p ||
2847 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2848 desc_limit < 0x2b)) {
2849 return emulate_ts(ctxt, tss_selector & 0xfffc);
2852 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2853 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2854 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2857 if (reason == TASK_SWITCH_IRET)
2858 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2860 /* set back link to prev task only if NT bit is set in eflags
2861 note that old_tss_sel is not used after this point */
2862 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2863 old_tss_sel = 0xffff;
2865 if (next_tss_desc.type & 8)
2866 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2867 old_tss_base, &next_tss_desc);
2869 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2870 old_tss_base, &next_tss_desc);
2871 if (ret != X86EMUL_CONTINUE)
2874 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2875 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2877 if (reason != TASK_SWITCH_IRET) {
2878 next_tss_desc.type |= (1 << 1); /* set busy flag */
2879 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2882 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2883 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2885 if (has_error_code) {
2886 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2887 ctxt->lock_prefix = 0;
2888 ctxt->src.val = (unsigned long) error_code;
2889 ret = em_push(ctxt);
2895 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2896 u16 tss_selector, int idt_index, int reason,
2897 bool has_error_code, u32 error_code)
2901 invalidate_registers(ctxt);
2902 ctxt->_eip = ctxt->eip;
2903 ctxt->dst.type = OP_NONE;
2905 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2906 has_error_code, error_code);
2908 if (rc == X86EMUL_CONTINUE) {
2909 ctxt->eip = ctxt->_eip;
2910 writeback_registers(ctxt);
2913 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2916 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2919 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2921 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2922 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2925 static int em_das(struct x86_emulate_ctxt *ctxt)
2928 bool af, cf, old_cf;
2930 cf = ctxt->eflags & X86_EFLAGS_CF;
2936 af = ctxt->eflags & X86_EFLAGS_AF;
2937 if ((al & 0x0f) > 9 || af) {
2939 cf = old_cf | (al >= 250);
2944 if (old_al > 0x99 || old_cf) {
2950 /* Set PF, ZF, SF */
2951 ctxt->src.type = OP_IMM;
2953 ctxt->src.bytes = 1;
2954 fastop(ctxt, em_or);
2955 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2957 ctxt->eflags |= X86_EFLAGS_CF;
2959 ctxt->eflags |= X86_EFLAGS_AF;
2960 return X86EMUL_CONTINUE;
2963 static int em_aam(struct x86_emulate_ctxt *ctxt)
2967 if (ctxt->src.val == 0)
2968 return emulate_de(ctxt);
2970 al = ctxt->dst.val & 0xff;
2971 ah = al / ctxt->src.val;
2972 al %= ctxt->src.val;
2974 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2976 /* Set PF, ZF, SF */
2977 ctxt->src.type = OP_IMM;
2979 ctxt->src.bytes = 1;
2980 fastop(ctxt, em_or);
2982 return X86EMUL_CONTINUE;
2985 static int em_aad(struct x86_emulate_ctxt *ctxt)
2987 u8 al = ctxt->dst.val & 0xff;
2988 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2990 al = (al + (ah * ctxt->src.val)) & 0xff;
2992 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2994 /* Set PF, ZF, SF */
2995 ctxt->src.type = OP_IMM;
2997 ctxt->src.bytes = 1;
2998 fastop(ctxt, em_or);
3000 return X86EMUL_CONTINUE;
3003 static int em_call(struct x86_emulate_ctxt *ctxt)
3006 long rel = ctxt->src.val;
3008 ctxt->src.val = (unsigned long)ctxt->_eip;
3009 rc = jmp_rel(ctxt, rel);
3010 if (rc != X86EMUL_CONTINUE)
3012 return em_push(ctxt);
3015 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3020 struct desc_struct old_desc, new_desc;
3021 const struct x86_emulate_ops *ops = ctxt->ops;
3022 int cpl = ctxt->ops->cpl(ctxt);
3024 old_eip = ctxt->_eip;
3025 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3027 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3028 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3030 if (rc != X86EMUL_CONTINUE)
3031 return X86EMUL_CONTINUE;
3033 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3034 if (rc != X86EMUL_CONTINUE)
3037 ctxt->src.val = old_cs;
3039 if (rc != X86EMUL_CONTINUE)
3042 ctxt->src.val = old_eip;
3044 /* If we failed, we tainted the memory, but the very least we should
3046 if (rc != X86EMUL_CONTINUE)
3050 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3055 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3060 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3061 if (rc != X86EMUL_CONTINUE)
3063 rc = assign_eip_near(ctxt, eip);
3064 if (rc != X86EMUL_CONTINUE)
3066 rsp_increment(ctxt, ctxt->src.val);
3067 return X86EMUL_CONTINUE;
3070 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3072 /* Write back the register source. */
3073 ctxt->src.val = ctxt->dst.val;
3074 write_register_operand(&ctxt->src);
3076 /* Write back the memory destination with implicit LOCK prefix. */
3077 ctxt->dst.val = ctxt->src.orig_val;
3078 ctxt->lock_prefix = 1;
3079 return X86EMUL_CONTINUE;
3082 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3084 ctxt->dst.val = ctxt->src2.val;
3085 return fastop(ctxt, em_imul);
3088 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3090 ctxt->dst.type = OP_REG;
3091 ctxt->dst.bytes = ctxt->src.bytes;
3092 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3093 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3095 return X86EMUL_CONTINUE;
3098 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3102 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3103 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3104 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3105 return X86EMUL_CONTINUE;
3108 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3112 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3113 return emulate_gp(ctxt, 0);
3114 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3115 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3116 return X86EMUL_CONTINUE;
3119 static int em_mov(struct x86_emulate_ctxt *ctxt)
3121 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3122 return X86EMUL_CONTINUE;
3125 #define FFL(x) bit(X86_FEATURE_##x)
3127 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3129 u32 ebx, ecx, edx, eax = 1;
3133 * Check MOVBE is set in the guest-visible CPUID leaf.
3135 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3136 if (!(ecx & FFL(MOVBE)))
3137 return emulate_ud(ctxt);
3139 switch (ctxt->op_bytes) {
3142 * From MOVBE definition: "...When the operand size is 16 bits,
3143 * the upper word of the destination register remains unchanged
3146 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3147 * rules so we have to do the operation almost per hand.
3149 tmp = (u16)ctxt->src.val;
3150 ctxt->dst.val &= ~0xffffUL;
3151 ctxt->dst.val |= (unsigned long)swab16(tmp);
3154 ctxt->dst.val = swab32((u32)ctxt->src.val);
3157 ctxt->dst.val = swab64(ctxt->src.val);
3162 return X86EMUL_CONTINUE;
3165 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3167 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3168 return emulate_gp(ctxt, 0);
3170 /* Disable writeback. */
3171 ctxt->dst.type = OP_NONE;
3172 return X86EMUL_CONTINUE;
3175 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3179 if (ctxt->mode == X86EMUL_MODE_PROT64)
3180 val = ctxt->src.val & ~0ULL;
3182 val = ctxt->src.val & ~0U;
3184 /* #UD condition is already handled. */
3185 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3186 return emulate_gp(ctxt, 0);
3188 /* Disable writeback. */
3189 ctxt->dst.type = OP_NONE;
3190 return X86EMUL_CONTINUE;
3193 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3197 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3198 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3199 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3200 return emulate_gp(ctxt, 0);
3202 return X86EMUL_CONTINUE;
3205 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3209 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3210 return emulate_gp(ctxt, 0);
3212 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3213 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3214 return X86EMUL_CONTINUE;
3217 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3219 if (ctxt->modrm_reg > VCPU_SREG_GS)
3220 return emulate_ud(ctxt);
3222 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3223 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3224 ctxt->dst.bytes = 2;
3225 return X86EMUL_CONTINUE;
3228 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3230 u16 sel = ctxt->src.val;
3232 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3233 return emulate_ud(ctxt);
3235 if (ctxt->modrm_reg == VCPU_SREG_SS)
3236 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3238 /* Disable writeback. */
3239 ctxt->dst.type = OP_NONE;
3240 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3243 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3245 u16 sel = ctxt->src.val;
3247 /* Disable writeback. */
3248 ctxt->dst.type = OP_NONE;
3249 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3252 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3254 u16 sel = ctxt->src.val;
3256 /* Disable writeback. */
3257 ctxt->dst.type = OP_NONE;
3258 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3261 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3266 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3267 if (rc == X86EMUL_CONTINUE)
3268 ctxt->ops->invlpg(ctxt, linear);
3269 /* Disable writeback. */
3270 ctxt->dst.type = OP_NONE;
3271 return X86EMUL_CONTINUE;
3274 static int em_clts(struct x86_emulate_ctxt *ctxt)
3278 cr0 = ctxt->ops->get_cr(ctxt, 0);
3280 ctxt->ops->set_cr(ctxt, 0, cr0);
3281 return X86EMUL_CONTINUE;
3284 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3286 int rc = ctxt->ops->fix_hypercall(ctxt);
3288 if (rc != X86EMUL_CONTINUE)
3291 /* Let the processor re-execute the fixed hypercall */
3292 ctxt->_eip = ctxt->eip;
3293 /* Disable writeback. */
3294 ctxt->dst.type = OP_NONE;
3295 return X86EMUL_CONTINUE;
3298 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3299 void (*get)(struct x86_emulate_ctxt *ctxt,
3300 struct desc_ptr *ptr))
3302 struct desc_ptr desc_ptr;
3304 if (ctxt->mode == X86EMUL_MODE_PROT64)
3306 get(ctxt, &desc_ptr);
3307 if (ctxt->op_bytes == 2) {
3309 desc_ptr.address &= 0x00ffffff;
3311 /* Disable writeback. */
3312 ctxt->dst.type = OP_NONE;
3313 return segmented_write(ctxt, ctxt->dst.addr.mem,
3314 &desc_ptr, 2 + ctxt->op_bytes);
3317 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3319 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3322 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3324 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3327 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3329 struct desc_ptr desc_ptr;
3332 if (ctxt->mode == X86EMUL_MODE_PROT64)
3334 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3335 &desc_ptr.size, &desc_ptr.address,
3337 if (rc != X86EMUL_CONTINUE)
3339 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3340 is_noncanonical_address(desc_ptr.address))
3341 return emulate_gp(ctxt, 0);
3343 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3345 ctxt->ops->set_idt(ctxt, &desc_ptr);
3346 /* Disable writeback. */
3347 ctxt->dst.type = OP_NONE;
3348 return X86EMUL_CONTINUE;
3351 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3353 return em_lgdt_lidt(ctxt, true);
3356 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3360 rc = ctxt->ops->fix_hypercall(ctxt);
3362 /* Disable writeback. */
3363 ctxt->dst.type = OP_NONE;
3367 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3369 return em_lgdt_lidt(ctxt, false);
3372 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3374 if (ctxt->dst.type == OP_MEM)
3375 ctxt->dst.bytes = 2;
3376 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3377 return X86EMUL_CONTINUE;
3380 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3382 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3383 | (ctxt->src.val & 0x0f));
3384 ctxt->dst.type = OP_NONE;
3385 return X86EMUL_CONTINUE;
3388 static int em_loop(struct x86_emulate_ctxt *ctxt)
3390 int rc = X86EMUL_CONTINUE;
3392 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3393 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3394 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3395 rc = jmp_rel(ctxt, ctxt->src.val);
3400 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3402 int rc = X86EMUL_CONTINUE;
3404 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3405 rc = jmp_rel(ctxt, ctxt->src.val);
3410 static int em_in(struct x86_emulate_ctxt *ctxt)
3412 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3414 return X86EMUL_IO_NEEDED;
3416 return X86EMUL_CONTINUE;
3419 static int em_out(struct x86_emulate_ctxt *ctxt)
3421 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3423 /* Disable writeback. */
3424 ctxt->dst.type = OP_NONE;
3425 return X86EMUL_CONTINUE;
3428 static int em_cli(struct x86_emulate_ctxt *ctxt)
3430 if (emulator_bad_iopl(ctxt))
3431 return emulate_gp(ctxt, 0);
3433 ctxt->eflags &= ~X86_EFLAGS_IF;
3434 return X86EMUL_CONTINUE;
3437 static int em_sti(struct x86_emulate_ctxt *ctxt)
3439 if (emulator_bad_iopl(ctxt))
3440 return emulate_gp(ctxt, 0);
3442 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3443 ctxt->eflags |= X86_EFLAGS_IF;
3444 return X86EMUL_CONTINUE;
3447 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3449 u32 eax, ebx, ecx, edx;
3451 eax = reg_read(ctxt, VCPU_REGS_RAX);
3452 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3453 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3454 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3455 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3456 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3457 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3458 return X86EMUL_CONTINUE;
3461 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3465 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3466 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3468 ctxt->eflags &= ~0xffUL;
3469 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3470 return X86EMUL_CONTINUE;
3473 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3475 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3476 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3477 return X86EMUL_CONTINUE;
3480 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3482 switch (ctxt->op_bytes) {
3483 #ifdef CONFIG_X86_64
3485 asm("bswap %0" : "+r"(ctxt->dst.val));
3489 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3492 return X86EMUL_CONTINUE;
3495 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3497 /* emulating clflush regardless of cpuid */
3498 return X86EMUL_CONTINUE;
3501 static bool valid_cr(int nr)
3513 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3515 if (!valid_cr(ctxt->modrm_reg))
3516 return emulate_ud(ctxt);
3518 return X86EMUL_CONTINUE;
3521 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3523 u64 new_val = ctxt->src.val64;
3524 int cr = ctxt->modrm_reg;
3527 static u64 cr_reserved_bits[] = {
3528 0xffffffff00000000ULL,
3529 0, 0, 0, /* CR3 checked later */
3536 return emulate_ud(ctxt);
3538 if (new_val & cr_reserved_bits[cr])
3539 return emulate_gp(ctxt, 0);
3544 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3545 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3546 return emulate_gp(ctxt, 0);
3548 cr4 = ctxt->ops->get_cr(ctxt, 4);
3549 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3551 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3552 !(cr4 & X86_CR4_PAE))
3553 return emulate_gp(ctxt, 0);
3560 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3561 if (efer & EFER_LMA)
3562 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3565 return emulate_gp(ctxt, 0);
3570 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3572 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3573 return emulate_gp(ctxt, 0);
3579 return X86EMUL_CONTINUE;
3582 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3586 ctxt->ops->get_dr(ctxt, 7, &dr7);
3588 /* Check if DR7.Global_Enable is set */
3589 return dr7 & (1 << 13);
3592 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3594 int dr = ctxt->modrm_reg;
3598 return emulate_ud(ctxt);
3600 cr4 = ctxt->ops->get_cr(ctxt, 4);
3601 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3602 return emulate_ud(ctxt);
3604 if (check_dr7_gd(ctxt)) {
3607 ctxt->ops->get_dr(ctxt, 6, &dr6);
3609 dr6 |= DR6_BD | DR6_RTM;
3610 ctxt->ops->set_dr(ctxt, 6, dr6);
3611 return emulate_db(ctxt);
3614 return X86EMUL_CONTINUE;
3617 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3619 u64 new_val = ctxt->src.val64;
3620 int dr = ctxt->modrm_reg;
3622 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3623 return emulate_gp(ctxt, 0);
3625 return check_dr_read(ctxt);
3628 static int check_svme(struct x86_emulate_ctxt *ctxt)
3632 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3634 if (!(efer & EFER_SVME))
3635 return emulate_ud(ctxt);
3637 return X86EMUL_CONTINUE;
3640 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3642 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3644 /* Valid physical address? */
3645 if (rax & 0xffff000000000000ULL)
3646 return emulate_gp(ctxt, 0);
3648 return check_svme(ctxt);
3651 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3653 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3655 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3656 return emulate_ud(ctxt);
3658 return X86EMUL_CONTINUE;
3661 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3663 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3664 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3666 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3667 ctxt->ops->check_pmc(ctxt, rcx))
3668 return emulate_gp(ctxt, 0);
3670 return X86EMUL_CONTINUE;
3673 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3675 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3676 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3677 return emulate_gp(ctxt, 0);
3679 return X86EMUL_CONTINUE;
3682 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3684 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3685 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3686 return emulate_gp(ctxt, 0);
3688 return X86EMUL_CONTINUE;
3691 #define D(_y) { .flags = (_y) }
3692 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3693 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3694 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3695 #define N D(NotImpl)
3696 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3697 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3698 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3699 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3700 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3701 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3702 #define II(_f, _e, _i) \
3703 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3704 #define IIP(_f, _e, _i, _p) \
3705 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3706 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3707 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3709 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3710 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3711 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3712 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3713 #define I2bvIP(_f, _e, _i, _p) \
3714 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3716 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3717 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3718 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3720 static const struct opcode group7_rm0[] = {
3722 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3726 static const struct opcode group7_rm1[] = {
3727 DI(SrcNone | Priv, monitor),
3728 DI(SrcNone | Priv, mwait),
3732 static const struct opcode group7_rm3[] = {
3733 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3734 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3735 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3736 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3737 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3738 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3739 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3740 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3743 static const struct opcode group7_rm7[] = {
3745 DIP(SrcNone, rdtscp, check_rdtsc),
3749 static const struct opcode group1[] = {
3751 F(Lock | PageTable, em_or),
3754 F(Lock | PageTable, em_and),
3760 static const struct opcode group1A[] = {
3761 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3764 static const struct opcode group2[] = {
3765 F(DstMem | ModRM, em_rol),
3766 F(DstMem | ModRM, em_ror),
3767 F(DstMem | ModRM, em_rcl),
3768 F(DstMem | ModRM, em_rcr),
3769 F(DstMem | ModRM, em_shl),
3770 F(DstMem | ModRM, em_shr),
3771 F(DstMem | ModRM, em_shl),
3772 F(DstMem | ModRM, em_sar),
3775 static const struct opcode group3[] = {
3776 F(DstMem | SrcImm | NoWrite, em_test),
3777 F(DstMem | SrcImm | NoWrite, em_test),
3778 F(DstMem | SrcNone | Lock, em_not),
3779 F(DstMem | SrcNone | Lock, em_neg),
3780 F(DstXacc | Src2Mem, em_mul_ex),
3781 F(DstXacc | Src2Mem, em_imul_ex),
3782 F(DstXacc | Src2Mem, em_div_ex),
3783 F(DstXacc | Src2Mem, em_idiv_ex),
3786 static const struct opcode group4[] = {
3787 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3788 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3792 static const struct opcode group5[] = {
3793 F(DstMem | SrcNone | Lock, em_inc),
3794 F(DstMem | SrcNone | Lock, em_dec),
3795 I(SrcMem | NearBranch, em_call_near_abs),
3796 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3797 I(SrcMem | NearBranch, em_jmp_abs),
3798 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3799 I(SrcMem | Stack, em_push), D(Undefined),
3802 static const struct opcode group6[] = {
3805 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3806 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3810 static const struct group_dual group7 = { {
3811 II(Mov | DstMem, em_sgdt, sgdt),
3812 II(Mov | DstMem, em_sidt, sidt),
3813 II(SrcMem | Priv, em_lgdt, lgdt),
3814 II(SrcMem | Priv, em_lidt, lidt),
3815 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3816 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3817 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3821 N, EXT(0, group7_rm3),
3822 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3823 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3827 static const struct opcode group8[] = {
3829 F(DstMem | SrcImmByte | NoWrite, em_bt),
3830 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3831 F(DstMem | SrcImmByte | Lock, em_btr),
3832 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3835 static const struct group_dual group9 = { {
3836 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3838 N, N, N, N, N, N, N, N,
3841 static const struct opcode group11[] = {
3842 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3846 static const struct gprefix pfx_0f_ae_7 = {
3847 I(SrcMem | ByteOp, em_clflush), N, N, N,
3850 static const struct group_dual group15 = { {
3851 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3853 N, N, N, N, N, N, N, N,
3856 static const struct gprefix pfx_0f_6f_0f_7f = {
3857 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3860 static const struct gprefix pfx_0f_2b = {
3861 I(0, em_mov), I(0, em_mov), N, N,
3864 static const struct gprefix pfx_0f_28_0f_29 = {
3865 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3868 static const struct gprefix pfx_0f_e7 = {
3869 N, I(Sse, em_mov), N, N,
3872 static const struct escape escape_d9 = { {
3873 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3876 N, N, N, N, N, N, N, N,
3878 N, N, N, N, N, N, N, N,
3880 N, N, N, N, N, N, N, N,
3882 N, N, N, N, N, N, N, N,
3884 N, N, N, N, N, N, N, N,
3886 N, N, N, N, N, N, N, N,
3888 N, N, N, N, N, N, N, N,
3890 N, N, N, N, N, N, N, N,
3893 static const struct escape escape_db = { {
3894 N, N, N, N, N, N, N, N,
3897 N, N, N, N, N, N, N, N,
3899 N, N, N, N, N, N, N, N,
3901 N, N, N, N, N, N, N, N,
3903 N, N, N, N, N, N, N, N,
3905 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3907 N, N, N, N, N, N, N, N,
3909 N, N, N, N, N, N, N, N,
3911 N, N, N, N, N, N, N, N,
3914 static const struct escape escape_dd = { {
3915 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3918 N, N, N, N, N, N, N, N,
3920 N, N, N, N, N, N, N, N,
3922 N, N, N, N, N, N, N, N,
3924 N, N, N, N, N, N, N, N,
3926 N, N, N, N, N, N, N, N,
3928 N, N, N, N, N, N, N, N,
3930 N, N, N, N, N, N, N, N,
3932 N, N, N, N, N, N, N, N,
3935 static const struct opcode opcode_table[256] = {
3937 F6ALU(Lock, em_add),
3938 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3939 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3941 F6ALU(Lock | PageTable, em_or),
3942 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3945 F6ALU(Lock, em_adc),
3946 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3947 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3949 F6ALU(Lock, em_sbb),
3950 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3951 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3953 F6ALU(Lock | PageTable, em_and), N, N,
3955 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3957 F6ALU(Lock, em_xor), N, N,
3959 F6ALU(NoWrite, em_cmp), N, N,
3961 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3963 X8(I(SrcReg | Stack, em_push)),
3965 X8(I(DstReg | Stack, em_pop)),
3967 I(ImplicitOps | Stack | No64, em_pusha),
3968 I(ImplicitOps | Stack | No64, em_popa),
3969 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3972 I(SrcImm | Mov | Stack, em_push),
3973 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3974 I(SrcImmByte | Mov | Stack, em_push),
3975 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3976 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3977 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3979 X16(D(SrcImmByte | NearBranch)),
3981 G(ByteOp | DstMem | SrcImm, group1),
3982 G(DstMem | SrcImm, group1),
3983 G(ByteOp | DstMem | SrcImm | No64, group1),
3984 G(DstMem | SrcImmByte, group1),
3985 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3986 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3988 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3989 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3990 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3991 D(ModRM | SrcMem | NoAccess | DstReg),
3992 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3995 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3997 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3998 I(SrcImmFAddr | No64, em_call_far), N,
3999 II(ImplicitOps | Stack, em_pushf, pushf),
4000 II(ImplicitOps | Stack, em_popf, popf),
4001 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4003 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4004 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4005 I2bv(SrcSI | DstDI | Mov | String, em_mov),
4006 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4008 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4009 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4010 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4011 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4013 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4015 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4017 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4018 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4019 I(ImplicitOps | NearBranch, em_ret),
4020 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4021 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4022 G(ByteOp, group11), G(0, group11),
4024 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4025 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4026 I(ImplicitOps | Stack, em_ret_far),
4027 D(ImplicitOps), DI(SrcImmByte, intn),
4028 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4030 G(Src2One | ByteOp, group2), G(Src2One, group2),
4031 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4032 I(DstAcc | SrcImmUByte | No64, em_aam),
4033 I(DstAcc | SrcImmUByte | No64, em_aad),
4034 F(DstAcc | ByteOp | No64, em_salc),
4035 I(DstAcc | SrcXLat | ByteOp, em_mov),
4037 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4039 X3(I(SrcImmByte | NearBranch, em_loop)),
4040 I(SrcImmByte | NearBranch, em_jcxz),
4041 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4042 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4044 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4045 I(SrcImmFAddr | No64, em_jmp_far),
4046 D(SrcImmByte | ImplicitOps | NearBranch),
4047 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4048 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4050 N, DI(ImplicitOps, icebp), N, N,
4051 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4052 G(ByteOp, group3), G(0, group3),
4054 D(ImplicitOps), D(ImplicitOps),
4055 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4056 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4059 static const struct opcode twobyte_table[256] = {
4061 G(0, group6), GD(0, &group7), N, N,
4062 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4063 II(ImplicitOps | Priv, em_clts, clts), N,
4064 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4065 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4067 N, N, N, N, N, N, N, N,
4068 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4069 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4071 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4072 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4073 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4075 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4078 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4079 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4080 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4083 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4084 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4085 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4086 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4087 I(ImplicitOps | EmulateOnUD, em_sysenter),
4088 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4090 N, N, N, N, N, N, N, N,
4092 X16(D(DstReg | SrcMem | ModRM)),
4094 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4099 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4104 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4106 X16(D(SrcImm | NearBranch)),
4108 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4110 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4111 II(ImplicitOps, em_cpuid, cpuid),
4112 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4113 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4114 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4116 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4117 DI(ImplicitOps, rsm),
4118 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4119 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4120 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4121 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4123 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4124 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4125 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4126 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4127 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4128 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4132 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4133 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4134 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4136 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4137 N, D(DstMem | SrcReg | ModRM | Mov),
4138 N, N, N, GD(0, &group9),
4140 X8(I(DstReg, em_bswap)),
4142 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4144 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4145 N, N, N, N, N, N, N, N,
4147 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4150 static const struct gprefix three_byte_0f_38_f0 = {
4151 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4154 static const struct gprefix three_byte_0f_38_f1 = {
4155 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4159 * Insns below are selected by the prefix which indexed by the third opcode
4162 static const struct opcode opcode_map_0f_38[256] = {
4164 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4166 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4168 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4169 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4188 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4192 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4198 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4199 unsigned size, bool sign_extension)
4201 int rc = X86EMUL_CONTINUE;
4205 op->addr.mem.ea = ctxt->_eip;
4206 /* NB. Immediates are sign-extended as necessary. */
4207 switch (op->bytes) {
4209 op->val = insn_fetch(s8, ctxt);
4212 op->val = insn_fetch(s16, ctxt);
4215 op->val = insn_fetch(s32, ctxt);
4218 op->val = insn_fetch(s64, ctxt);
4221 if (!sign_extension) {
4222 switch (op->bytes) {
4230 op->val &= 0xffffffff;
4238 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4241 int rc = X86EMUL_CONTINUE;
4245 decode_register_operand(ctxt, op);
4248 rc = decode_imm(ctxt, op, 1, false);
4251 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4255 if (ctxt->d & BitOp)
4256 fetch_bit_operand(ctxt);
4257 op->orig_val = op->val;
4260 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4264 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4265 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4266 fetch_register_operand(op);
4267 op->orig_val = op->val;
4271 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4272 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4273 fetch_register_operand(op);
4274 op->orig_val = op->val;
4277 if (ctxt->d & ByteOp) {
4282 op->bytes = ctxt->op_bytes;
4283 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4284 fetch_register_operand(op);
4285 op->orig_val = op->val;
4289 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4291 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4292 op->addr.mem.seg = VCPU_SREG_ES;
4299 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4300 fetch_register_operand(op);
4304 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4307 rc = decode_imm(ctxt, op, 1, true);
4314 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4317 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4320 ctxt->memop.bytes = 1;
4321 if (ctxt->memop.type == OP_REG) {
4322 ctxt->memop.addr.reg = decode_register(ctxt,
4323 ctxt->modrm_rm, true);
4324 fetch_register_operand(&ctxt->memop);
4328 ctxt->memop.bytes = 2;
4331 ctxt->memop.bytes = 4;
4334 rc = decode_imm(ctxt, op, 2, false);
4337 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4341 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4343 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4344 op->addr.mem.seg = ctxt->seg_override;
4350 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4352 register_address(ctxt,
4353 reg_read(ctxt, VCPU_REGS_RBX) +
4354 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4355 op->addr.mem.seg = ctxt->seg_override;
4360 op->addr.mem.ea = ctxt->_eip;
4361 op->bytes = ctxt->op_bytes + 2;
4362 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4365 ctxt->memop.bytes = ctxt->op_bytes + 2;
4368 op->val = VCPU_SREG_ES;
4371 op->val = VCPU_SREG_CS;
4374 op->val = VCPU_SREG_SS;
4377 op->val = VCPU_SREG_DS;
4380 op->val = VCPU_SREG_FS;
4383 op->val = VCPU_SREG_GS;
4386 /* Special instructions do their own operand decoding. */
4388 op->type = OP_NONE; /* Disable writeback. */
4396 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4398 int rc = X86EMUL_CONTINUE;
4399 int mode = ctxt->mode;
4400 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4401 bool op_prefix = false;
4402 bool has_seg_override = false;
4403 struct opcode opcode;
4405 ctxt->memop.type = OP_NONE;
4406 ctxt->memopp = NULL;
4407 ctxt->_eip = ctxt->eip;
4408 ctxt->fetch.ptr = ctxt->fetch.data;
4409 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4410 ctxt->opcode_len = 1;
4412 memcpy(ctxt->fetch.data, insn, insn_len);
4414 rc = __do_insn_fetch_bytes(ctxt, 1);
4415 if (rc != X86EMUL_CONTINUE)
4420 case X86EMUL_MODE_REAL:
4421 case X86EMUL_MODE_VM86:
4422 case X86EMUL_MODE_PROT16:
4423 def_op_bytes = def_ad_bytes = 2;
4425 case X86EMUL_MODE_PROT32:
4426 def_op_bytes = def_ad_bytes = 4;
4428 #ifdef CONFIG_X86_64
4429 case X86EMUL_MODE_PROT64:
4435 return EMULATION_FAILED;
4438 ctxt->op_bytes = def_op_bytes;
4439 ctxt->ad_bytes = def_ad_bytes;
4441 /* Legacy prefixes. */
4443 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4444 case 0x66: /* operand-size override */
4446 /* switch between 2/4 bytes */
4447 ctxt->op_bytes = def_op_bytes ^ 6;
4449 case 0x67: /* address-size override */
4450 if (mode == X86EMUL_MODE_PROT64)
4451 /* switch between 4/8 bytes */
4452 ctxt->ad_bytes = def_ad_bytes ^ 12;
4454 /* switch between 2/4 bytes */
4455 ctxt->ad_bytes = def_ad_bytes ^ 6;
4457 case 0x26: /* ES override */
4458 case 0x2e: /* CS override */
4459 case 0x36: /* SS override */
4460 case 0x3e: /* DS override */
4461 has_seg_override = true;
4462 ctxt->seg_override = (ctxt->b >> 3) & 3;
4464 case 0x64: /* FS override */
4465 case 0x65: /* GS override */
4466 has_seg_override = true;
4467 ctxt->seg_override = ctxt->b & 7;
4469 case 0x40 ... 0x4f: /* REX */
4470 if (mode != X86EMUL_MODE_PROT64)
4472 ctxt->rex_prefix = ctxt->b;
4474 case 0xf0: /* LOCK */
4475 ctxt->lock_prefix = 1;
4477 case 0xf2: /* REPNE/REPNZ */
4478 case 0xf3: /* REP/REPE/REPZ */
4479 ctxt->rep_prefix = ctxt->b;
4485 /* Any legacy prefix after a REX prefix nullifies its effect. */
4487 ctxt->rex_prefix = 0;
4493 if (ctxt->rex_prefix & 8)
4494 ctxt->op_bytes = 8; /* REX.W */
4496 /* Opcode byte(s). */
4497 opcode = opcode_table[ctxt->b];
4498 /* Two-byte opcode? */
4499 if (ctxt->b == 0x0f) {
4500 ctxt->opcode_len = 2;
4501 ctxt->b = insn_fetch(u8, ctxt);
4502 opcode = twobyte_table[ctxt->b];
4504 /* 0F_38 opcode map */
4505 if (ctxt->b == 0x38) {
4506 ctxt->opcode_len = 3;
4507 ctxt->b = insn_fetch(u8, ctxt);
4508 opcode = opcode_map_0f_38[ctxt->b];
4511 ctxt->d = opcode.flags;
4513 if (ctxt->d & ModRM)
4514 ctxt->modrm = insn_fetch(u8, ctxt);
4516 /* vex-prefix instructions are not implemented */
4517 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4518 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4522 while (ctxt->d & GroupMask) {
4523 switch (ctxt->d & GroupMask) {
4525 goffset = (ctxt->modrm >> 3) & 7;
4526 opcode = opcode.u.group[goffset];
4529 goffset = (ctxt->modrm >> 3) & 7;
4530 if ((ctxt->modrm >> 6) == 3)
4531 opcode = opcode.u.gdual->mod3[goffset];
4533 opcode = opcode.u.gdual->mod012[goffset];
4536 goffset = ctxt->modrm & 7;
4537 opcode = opcode.u.group[goffset];
4540 if (ctxt->rep_prefix && op_prefix)
4541 return EMULATION_FAILED;
4542 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4543 switch (simd_prefix) {
4544 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4545 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4546 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4547 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4551 if (ctxt->modrm > 0xbf)
4552 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4554 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4557 return EMULATION_FAILED;
4560 ctxt->d &= ~(u64)GroupMask;
4561 ctxt->d |= opcode.flags;
4566 return EMULATION_FAILED;
4568 ctxt->execute = opcode.u.execute;
4570 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4571 return EMULATION_FAILED;
4573 if (unlikely(ctxt->d &
4574 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch))) {
4576 * These are copied unconditionally here, and checked unconditionally
4577 * in x86_emulate_insn.
4579 ctxt->check_perm = opcode.check_perm;
4580 ctxt->intercept = opcode.intercept;
4582 if (ctxt->d & NotImpl)
4583 return EMULATION_FAILED;
4585 if (mode == X86EMUL_MODE_PROT64) {
4586 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4588 else if (ctxt->d & NearBranch)
4592 if (ctxt->d & Op3264) {
4593 if (mode == X86EMUL_MODE_PROT64)
4600 ctxt->op_bytes = 16;
4601 else if (ctxt->d & Mmx)
4605 /* ModRM and SIB bytes. */
4606 if (ctxt->d & ModRM) {
4607 rc = decode_modrm(ctxt, &ctxt->memop);
4608 if (!has_seg_override) {
4609 has_seg_override = true;
4610 ctxt->seg_override = ctxt->modrm_seg;
4612 } else if (ctxt->d & MemAbs)
4613 rc = decode_abs(ctxt, &ctxt->memop);
4614 if (rc != X86EMUL_CONTINUE)
4617 if (!has_seg_override)
4618 ctxt->seg_override = VCPU_SREG_DS;
4620 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4623 * Decode and fetch the source operand: register, memory
4626 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4627 if (rc != X86EMUL_CONTINUE)
4631 * Decode and fetch the second source operand: register, memory
4634 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4635 if (rc != X86EMUL_CONTINUE)
4638 /* Decode and fetch the destination operand: register or memory. */
4639 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4641 if (ctxt->rip_relative)
4642 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4645 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4648 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4650 return ctxt->d & PageTable;
4653 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4655 /* The second termination condition only applies for REPE
4656 * and REPNE. Test if the repeat string operation prefix is
4657 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4658 * corresponding termination condition according to:
4659 * - if REPE/REPZ and ZF = 0 then done
4660 * - if REPNE/REPNZ and ZF = 1 then done
4662 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4663 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4664 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4665 ((ctxt->eflags & EFLG_ZF) == 0))
4666 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4667 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4673 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4677 ctxt->ops->get_fpu(ctxt);
4678 asm volatile("1: fwait \n\t"
4680 ".pushsection .fixup,\"ax\" \n\t"
4682 "movb $1, %[fault] \n\t"
4685 _ASM_EXTABLE(1b, 3b)
4686 : [fault]"+qm"(fault));
4687 ctxt->ops->put_fpu(ctxt);
4689 if (unlikely(fault))
4690 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4692 return X86EMUL_CONTINUE;
4695 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4698 if (op->type == OP_MM)
4699 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4702 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4704 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4705 if (!(ctxt->d & ByteOp))
4706 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4707 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4708 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4710 : "c"(ctxt->src2.val));
4711 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4712 if (!fop) /* exception is returned in fop variable */
4713 return emulate_de(ctxt);
4714 return X86EMUL_CONTINUE;
4717 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4719 memset(&ctxt->rip_relative, 0,
4720 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4722 ctxt->io_read.pos = 0;
4723 ctxt->io_read.end = 0;
4724 ctxt->mem_read.end = 0;
4727 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4729 const struct x86_emulate_ops *ops = ctxt->ops;
4730 int rc = X86EMUL_CONTINUE;
4731 int saved_dst_type = ctxt->dst.type;
4733 ctxt->mem_read.pos = 0;
4735 /* LOCK prefix is allowed only with some instructions */
4736 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4737 rc = emulate_ud(ctxt);
4741 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4742 rc = emulate_ud(ctxt);
4746 if (unlikely(ctxt->d &
4747 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4748 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4749 (ctxt->d & Undefined)) {
4750 rc = emulate_ud(ctxt);
4754 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4755 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4756 rc = emulate_ud(ctxt);
4760 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4761 rc = emulate_nm(ctxt);
4765 if (ctxt->d & Mmx) {
4766 rc = flush_pending_x87_faults(ctxt);
4767 if (rc != X86EMUL_CONTINUE)
4770 * Now that we know the fpu is exception safe, we can fetch
4773 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4774 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4775 if (!(ctxt->d & Mov))
4776 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4779 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4780 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4781 X86_ICPT_PRE_EXCEPT);
4782 if (rc != X86EMUL_CONTINUE)
4786 /* Privileged instruction can be executed only in CPL=0 */
4787 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4788 if (ctxt->d & PrivUD)
4789 rc = emulate_ud(ctxt);
4791 rc = emulate_gp(ctxt, 0);
4795 /* Instruction can only be executed in protected mode */
4796 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4797 rc = emulate_ud(ctxt);
4801 /* Do instruction specific permission checks */
4802 if (ctxt->d & CheckPerm) {
4803 rc = ctxt->check_perm(ctxt);
4804 if (rc != X86EMUL_CONTINUE)
4808 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4809 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4810 X86_ICPT_POST_EXCEPT);
4811 if (rc != X86EMUL_CONTINUE)
4815 if (ctxt->rep_prefix && (ctxt->d & String)) {
4816 /* All REP prefixes have the same first termination condition */
4817 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4818 ctxt->eip = ctxt->_eip;
4819 ctxt->eflags &= ~EFLG_RF;
4825 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4826 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4827 ctxt->src.valptr, ctxt->src.bytes);
4828 if (rc != X86EMUL_CONTINUE)
4830 ctxt->src.orig_val64 = ctxt->src.val64;
4833 if (ctxt->src2.type == OP_MEM) {
4834 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4835 &ctxt->src2.val, ctxt->src2.bytes);
4836 if (rc != X86EMUL_CONTINUE)
4840 if ((ctxt->d & DstMask) == ImplicitOps)
4844 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4845 /* optimisation - avoid slow emulated read if Mov */
4846 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4847 &ctxt->dst.val, ctxt->dst.bytes);
4848 if (rc != X86EMUL_CONTINUE)
4851 ctxt->dst.orig_val = ctxt->dst.val;
4855 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4856 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4857 X86_ICPT_POST_MEMACCESS);
4858 if (rc != X86EMUL_CONTINUE)
4862 if (ctxt->rep_prefix && (ctxt->d & String))
4863 ctxt->eflags |= EFLG_RF;
4865 ctxt->eflags &= ~EFLG_RF;
4867 if (ctxt->execute) {
4868 if (ctxt->d & Fastop) {
4869 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4870 rc = fastop(ctxt, fop);
4871 if (rc != X86EMUL_CONTINUE)
4875 rc = ctxt->execute(ctxt);
4876 if (rc != X86EMUL_CONTINUE)
4881 if (ctxt->opcode_len == 2)
4883 else if (ctxt->opcode_len == 3)
4884 goto threebyte_insn;
4887 case 0x63: /* movsxd */
4888 if (ctxt->mode != X86EMUL_MODE_PROT64)
4889 goto cannot_emulate;
4890 ctxt->dst.val = (s32) ctxt->src.val;
4892 case 0x70 ... 0x7f: /* jcc (short) */
4893 if (test_cc(ctxt->b, ctxt->eflags))
4894 rc = jmp_rel(ctxt, ctxt->src.val);
4896 case 0x8d: /* lea r16/r32, m */
4897 ctxt->dst.val = ctxt->src.addr.mem.ea;
4899 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4900 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4901 ctxt->dst.type = OP_NONE;
4905 case 0x98: /* cbw/cwde/cdqe */
4906 switch (ctxt->op_bytes) {
4907 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4908 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4909 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4912 case 0xcc: /* int3 */
4913 rc = emulate_int(ctxt, 3);
4915 case 0xcd: /* int n */
4916 rc = emulate_int(ctxt, ctxt->src.val);
4918 case 0xce: /* into */
4919 if (ctxt->eflags & EFLG_OF)
4920 rc = emulate_int(ctxt, 4);
4922 case 0xe9: /* jmp rel */
4923 case 0xeb: /* jmp rel short */
4924 rc = jmp_rel(ctxt, ctxt->src.val);
4925 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4927 case 0xf4: /* hlt */
4928 ctxt->ops->halt(ctxt);
4930 case 0xf5: /* cmc */
4931 /* complement carry flag from eflags reg */
4932 ctxt->eflags ^= EFLG_CF;
4934 case 0xf8: /* clc */
4935 ctxt->eflags &= ~EFLG_CF;
4937 case 0xf9: /* stc */
4938 ctxt->eflags |= EFLG_CF;
4940 case 0xfc: /* cld */
4941 ctxt->eflags &= ~EFLG_DF;
4943 case 0xfd: /* std */
4944 ctxt->eflags |= EFLG_DF;
4947 goto cannot_emulate;
4950 if (rc != X86EMUL_CONTINUE)
4954 if (ctxt->d & SrcWrite) {
4955 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4956 rc = writeback(ctxt, &ctxt->src);
4957 if (rc != X86EMUL_CONTINUE)
4960 if (!(ctxt->d & NoWrite)) {
4961 rc = writeback(ctxt, &ctxt->dst);
4962 if (rc != X86EMUL_CONTINUE)
4967 * restore dst type in case the decoding will be reused
4968 * (happens for string instruction )
4970 ctxt->dst.type = saved_dst_type;
4972 if ((ctxt->d & SrcMask) == SrcSI)
4973 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4975 if ((ctxt->d & DstMask) == DstDI)
4976 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4978 if (ctxt->rep_prefix && (ctxt->d & String)) {
4980 struct read_cache *r = &ctxt->io_read;
4981 if ((ctxt->d & SrcMask) == SrcSI)
4982 count = ctxt->src.count;
4984 count = ctxt->dst.count;
4985 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4988 if (!string_insn_completed(ctxt)) {
4990 * Re-enter guest when pio read ahead buffer is empty
4991 * or, if it is not used, after each 1024 iteration.
4993 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4994 (r->end == 0 || r->end != r->pos)) {
4996 * Reset read cache. Usually happens before
4997 * decode, but since instruction is restarted
4998 * we have to do it here.
5000 ctxt->mem_read.end = 0;
5001 writeback_registers(ctxt);
5002 return EMULATION_RESTART;
5004 goto done; /* skip rip writeback */
5006 ctxt->eflags &= ~EFLG_RF;
5009 ctxt->eip = ctxt->_eip;
5012 if (rc == X86EMUL_PROPAGATE_FAULT) {
5013 WARN_ON(ctxt->exception.vector > 0x1f);
5014 ctxt->have_exception = true;
5016 if (rc == X86EMUL_INTERCEPTED)
5017 return EMULATION_INTERCEPTED;
5019 if (rc == X86EMUL_CONTINUE)
5020 writeback_registers(ctxt);
5022 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5026 case 0x09: /* wbinvd */
5027 (ctxt->ops->wbinvd)(ctxt);
5029 case 0x08: /* invd */
5030 case 0x0d: /* GrpP (prefetch) */
5031 case 0x18: /* Grp16 (prefetch/nop) */
5032 case 0x1f: /* nop */
5034 case 0x20: /* mov cr, reg */
5035 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5037 case 0x21: /* mov from dr to reg */
5038 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5040 case 0x40 ... 0x4f: /* cmov */
5041 if (test_cc(ctxt->b, ctxt->eflags))
5042 ctxt->dst.val = ctxt->src.val;
5043 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5044 ctxt->op_bytes != 4)
5045 ctxt->dst.type = OP_NONE; /* no writeback */
5047 case 0x80 ... 0x8f: /* jnz rel, etc*/
5048 if (test_cc(ctxt->b, ctxt->eflags))
5049 rc = jmp_rel(ctxt, ctxt->src.val);
5051 case 0x90 ... 0x9f: /* setcc r/m8 */
5052 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5054 case 0xb6 ... 0xb7: /* movzx */
5055 ctxt->dst.bytes = ctxt->op_bytes;
5056 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5057 : (u16) ctxt->src.val;
5059 case 0xbe ... 0xbf: /* movsx */
5060 ctxt->dst.bytes = ctxt->op_bytes;
5061 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5062 (s16) ctxt->src.val;
5064 case 0xc3: /* movnti */
5065 ctxt->dst.bytes = ctxt->op_bytes;
5066 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5067 (u32) ctxt->src.val;
5070 goto cannot_emulate;
5075 if (rc != X86EMUL_CONTINUE)
5081 return EMULATION_FAILED;
5084 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5086 invalidate_registers(ctxt);
5089 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5091 writeback_registers(ctxt);