1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
171 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
173 #define X2(x...) x, x
174 #define X3(x...) X2(x), x
175 #define X4(x...) X2(x), X2(x)
176 #define X5(x...) X4(x), x
177 #define X6(x...) X4(x), X2(x)
178 #define X7(x...) X4(x), X3(x)
179 #define X8(x...) X4(x), X4(x)
180 #define X16(x...) X8(x), X8(x)
182 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
183 #define FASTOP_SIZE 8
186 * fastop functions have a special calling convention:
191 * flags: rflags (in/out)
192 * ex: rsi (in:fastop pointer, out:zero if exception)
194 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
195 * different operand sizes can be reached by calculation, rather than a jump
196 * table (which would be bigger than the code).
198 * fastop functions are declared as taking a never-defined fastop parameter,
199 * so they can't be called from C directly.
208 int (*execute)(struct x86_emulate_ctxt *ctxt);
209 const struct opcode *group;
210 const struct group_dual *gdual;
211 const struct gprefix *gprefix;
212 const struct escape *esc;
213 void (*fastop)(struct fastop *fake);
215 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
219 struct opcode mod012[8];
220 struct opcode mod3[8];
224 struct opcode pfx_no;
225 struct opcode pfx_66;
226 struct opcode pfx_f2;
227 struct opcode pfx_f3;
232 struct opcode high[64];
235 /* EFLAGS bit definitions. */
236 #define EFLG_ID (1<<21)
237 #define EFLG_VIP (1<<20)
238 #define EFLG_VIF (1<<19)
239 #define EFLG_AC (1<<18)
240 #define EFLG_VM (1<<17)
241 #define EFLG_RF (1<<16)
242 #define EFLG_IOPL (3<<12)
243 #define EFLG_NT (1<<14)
244 #define EFLG_OF (1<<11)
245 #define EFLG_DF (1<<10)
246 #define EFLG_IF (1<<9)
247 #define EFLG_TF (1<<8)
248 #define EFLG_SF (1<<7)
249 #define EFLG_ZF (1<<6)
250 #define EFLG_AF (1<<4)
251 #define EFLG_PF (1<<2)
252 #define EFLG_CF (1<<0)
254 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255 #define EFLG_RESERVED_ONE_MASK 2
257 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
259 if (!(ctxt->regs_valid & (1 << nr))) {
260 ctxt->regs_valid |= 1 << nr;
261 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
263 return ctxt->_regs[nr];
266 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->regs_dirty |= 1 << nr;
270 return &ctxt->_regs[nr];
273 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 return reg_write(ctxt, nr);
279 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
283 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
284 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
287 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
289 ctxt->regs_dirty = 0;
290 ctxt->regs_valid = 0;
294 * These EFLAGS bits are restored from saved value during emulation, and
295 * any changes are written back to the saved value after emulation.
297 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
305 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
307 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
308 #define FOP_RET "ret \n\t"
310 #define FOP_START(op) \
311 extern void em_##op(struct fastop *fake); \
312 asm(".pushsection .text, \"ax\" \n\t" \
313 ".global em_" #op " \n\t" \
320 #define FOPNOP() FOP_ALIGN FOP_RET
322 #define FOP1E(op, dst) \
323 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
325 #define FOP1EEX(op, dst) \
326 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
328 #define FASTOP1(op) \
333 ON64(FOP1E(op##q, rax)) \
336 /* 1-operand, using src2 (for MUL/DIV r/m) */
337 #define FASTOP1SRC2(op, name) \
342 ON64(FOP1E(op, rcx)) \
345 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
346 #define FASTOP1SRC2EX(op, name) \
351 ON64(FOP1EEX(op, rcx)) \
354 #define FOP2E(op, dst, src) \
355 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
357 #define FASTOP2(op) \
359 FOP2E(op##b, al, dl) \
360 FOP2E(op##w, ax, dx) \
361 FOP2E(op##l, eax, edx) \
362 ON64(FOP2E(op##q, rax, rdx)) \
365 /* 2 operand, word only */
366 #define FASTOP2W(op) \
369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
374 /* 2 operand, src is CL */
375 #define FASTOP2CL(op) \
377 FOP2E(op##b, al, cl) \
378 FOP2E(op##w, ax, cl) \
379 FOP2E(op##l, eax, cl) \
380 ON64(FOP2E(op##q, rax, cl)) \
383 #define FOP3E(op, dst, src, src2) \
384 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
386 /* 3-operand, word-only, src2=cl */
387 #define FASTOP3WCL(op) \
390 FOP3E(op##w, ax, dx, cl) \
391 FOP3E(op##l, eax, edx, cl) \
392 ON64(FOP3E(op##q, rax, rdx, cl)) \
395 /* Special case for SETcc - 1 instruction per cc */
396 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
398 asm(".global kvm_fastop_exception \n"
399 "kvm_fastop_exception: xor %esi, %esi; ret");
420 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
423 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
424 enum x86_intercept intercept,
425 enum x86_intercept_stage stage)
427 struct x86_instruction_info info = {
428 .intercept = intercept,
429 .rep_prefix = ctxt->rep_prefix,
430 .modrm_mod = ctxt->modrm_mod,
431 .modrm_reg = ctxt->modrm_reg,
432 .modrm_rm = ctxt->modrm_rm,
433 .src_val = ctxt->src.val64,
434 .dst_val = ctxt->dst.val64,
435 .src_bytes = ctxt->src.bytes,
436 .dst_bytes = ctxt->dst.bytes,
437 .ad_bytes = ctxt->ad_bytes,
438 .next_rip = ctxt->eip,
441 return ctxt->ops->intercept(ctxt, &info, stage);
444 static void assign_masked(ulong *dest, ulong src, ulong mask)
446 *dest = (*dest & ~mask) | (src & mask);
449 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
451 return (1UL << (ctxt->ad_bytes << 3)) - 1;
454 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
457 struct desc_struct ss;
459 if (ctxt->mode == X86EMUL_MODE_PROT64)
461 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
462 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
465 static int stack_size(struct x86_emulate_ctxt *ctxt)
467 return (__fls(stack_mask(ctxt)) + 1) >> 3;
470 /* Access/update address held in a register, based on addressing mode. */
471 static inline unsigned long
472 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
474 if (ctxt->ad_bytes == sizeof(unsigned long))
477 return reg & ad_mask(ctxt);
480 static inline unsigned long
481 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
483 return address_mask(ctxt, reg);
486 static void masked_increment(ulong *reg, ulong mask, int inc)
488 assign_masked(reg, *reg + inc, mask);
492 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
496 if (ctxt->ad_bytes == sizeof(unsigned long))
499 mask = ad_mask(ctxt);
500 masked_increment(reg, mask, inc);
503 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
505 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
508 static u32 desc_limit_scaled(struct desc_struct *desc)
510 u32 limit = get_desc_limit(desc);
512 return desc->g ? (limit << 12) | 0xfff : limit;
515 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
517 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
520 return ctxt->ops->get_cached_segment_base(ctxt, seg);
523 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
524 u32 error, bool valid)
527 ctxt->exception.vector = vec;
528 ctxt->exception.error_code = error;
529 ctxt->exception.error_code_valid = valid;
530 return X86EMUL_PROPAGATE_FAULT;
533 static int emulate_db(struct x86_emulate_ctxt *ctxt)
535 return emulate_exception(ctxt, DB_VECTOR, 0, false);
538 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
540 return emulate_exception(ctxt, GP_VECTOR, err, true);
543 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
545 return emulate_exception(ctxt, SS_VECTOR, err, true);
548 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
550 return emulate_exception(ctxt, UD_VECTOR, 0, false);
553 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
555 return emulate_exception(ctxt, TS_VECTOR, err, true);
558 static int emulate_de(struct x86_emulate_ctxt *ctxt)
560 return emulate_exception(ctxt, DE_VECTOR, 0, false);
563 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
565 return emulate_exception(ctxt, NM_VECTOR, 0, false);
568 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
571 switch (ctxt->op_bytes) {
573 ctxt->_eip = (u16)dst;
576 ctxt->_eip = (u32)dst;
580 if ((cs_l && is_noncanonical_address(dst)) ||
581 (!cs_l && (dst >> 32) != 0))
582 return emulate_gp(ctxt, 0);
587 WARN(1, "unsupported eip assignment size\n");
589 return X86EMUL_CONTINUE;
592 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
594 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
597 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
599 return assign_eip_near(ctxt, ctxt->_eip + rel);
602 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
605 struct desc_struct desc;
607 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
611 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
616 struct desc_struct desc;
618 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
619 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
623 * x86 defines three classes of vector instructions: explicitly
624 * aligned, explicitly unaligned, and the rest, which change behaviour
625 * depending on whether they're AVX encoded or not.
627 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
628 * subject to the same check.
630 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
632 if (likely(size < 16))
635 if (ctxt->d & Aligned)
637 else if (ctxt->d & Unaligned)
639 else if (ctxt->d & Avx)
645 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
646 struct segmented_address addr,
647 unsigned *max_size, unsigned size,
648 bool write, bool fetch,
651 struct desc_struct desc;
658 la = seg_base(ctxt, addr.seg) +
659 (fetch || ctxt->ad_bytes == 8 ? addr.ea : (u32)addr.ea);
661 switch (ctxt->mode) {
662 case X86EMUL_MODE_PROT64:
663 if (is_noncanonical_address(la))
664 return emulate_gp(ctxt, 0);
666 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
667 if (size > *max_size)
671 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
675 /* code segment in protected mode or read-only data segment */
676 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
677 || !(desc.type & 2)) && write)
679 /* unreadable code segment */
680 if (!fetch && (desc.type & 8) && !(desc.type & 2))
682 lim = desc_limit_scaled(&desc);
683 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
684 (ctxt->d & NoBigReal)) {
685 /* la is between zero and 0xffff */
688 *max_size = 0x10000 - la;
689 } else if ((desc.type & 8) || !(desc.type & 4)) {
690 /* expand-up segment */
693 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
695 /* expand-down segment */
698 lim = desc.d ? 0xffffffff : 0xffff;
701 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
703 if (size > *max_size)
705 cpl = ctxt->ops->cpl(ctxt);
707 /* data segment or readable code segment */
710 } else if ((desc.type & 8) && !(desc.type & 4)) {
711 /* nonconforming code segment */
714 } else if ((desc.type & 8) && (desc.type & 4)) {
715 /* conforming code segment */
721 if (ctxt->mode != X86EMUL_MODE_PROT64)
723 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
724 return emulate_gp(ctxt, 0);
726 return X86EMUL_CONTINUE;
728 if (addr.seg == VCPU_SREG_SS)
729 return emulate_ss(ctxt, 0);
731 return emulate_gp(ctxt, 0);
734 static int linearize(struct x86_emulate_ctxt *ctxt,
735 struct segmented_address addr,
736 unsigned size, bool write,
740 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
744 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
745 struct segmented_address addr,
752 rc = linearize(ctxt, addr, size, false, &linear);
753 if (rc != X86EMUL_CONTINUE)
755 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
759 * Prefetch the remaining bytes of the instruction without crossing page
760 * boundary if they are not in fetch_cache yet.
762 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
765 unsigned size, max_size;
766 unsigned long linear;
767 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
768 struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 .ea = ctxt->eip + cur_size };
772 * We do not know exactly how many bytes will be needed, and
773 * __linearize is expensive, so fetch as much as possible. We
774 * just have to avoid going beyond the 15 byte limit, the end
775 * of the segment, or the end of the page.
777 * __linearize is called with size 0 so that it does not do any
778 * boundary check itself. Instead, we use max_size to check
781 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
782 if (unlikely(rc != X86EMUL_CONTINUE))
785 size = min_t(unsigned, 15UL ^ cur_size, max_size);
786 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
789 * One instruction can only straddle two pages,
790 * and one has been loaded at the beginning of
791 * x86_decode_insn. So, if not enough bytes
792 * still, we must have hit the 15-byte boundary.
794 if (unlikely(size < op_size))
795 return emulate_gp(ctxt, 0);
797 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
798 size, &ctxt->exception);
799 if (unlikely(rc != X86EMUL_CONTINUE))
801 ctxt->fetch.end += size;
802 return X86EMUL_CONTINUE;
805 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
808 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
810 if (unlikely(done_size < size))
811 return __do_insn_fetch_bytes(ctxt, size - done_size);
813 return X86EMUL_CONTINUE;
816 /* Fetch next part of the instruction being emulated. */
817 #define insn_fetch(_type, _ctxt) \
820 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
821 if (rc != X86EMUL_CONTINUE) \
823 ctxt->_eip += sizeof(_type); \
824 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
825 ctxt->fetch.ptr += sizeof(_type); \
829 #define insn_fetch_arr(_arr, _size, _ctxt) \
831 rc = do_insn_fetch_bytes(_ctxt, _size); \
832 if (rc != X86EMUL_CONTINUE) \
834 ctxt->_eip += (_size); \
835 memcpy(_arr, ctxt->fetch.ptr, _size); \
836 ctxt->fetch.ptr += (_size); \
840 * Given the 'reg' portion of a ModRM byte, and a register block, return a
841 * pointer into the block that addresses the relevant register.
842 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
844 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
848 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
850 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
851 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
853 p = reg_rmw(ctxt, modrm_reg);
857 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
858 struct segmented_address addr,
859 u16 *size, unsigned long *address, int op_bytes)
866 rc = segmented_read_std(ctxt, addr, size, 2);
867 if (rc != X86EMUL_CONTINUE)
870 rc = segmented_read_std(ctxt, addr, address, op_bytes);
884 FASTOP1SRC2(mul, mul_ex);
885 FASTOP1SRC2(imul, imul_ex);
886 FASTOP1SRC2EX(div, div_ex);
887 FASTOP1SRC2EX(idiv, idiv_ex);
916 static u8 test_cc(unsigned int condition, unsigned long flags)
919 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
921 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
922 asm("push %[flags]; popf; call *%[fastop]"
923 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
927 static void fetch_register_operand(struct operand *op)
931 op->val = *(u8 *)op->addr.reg;
934 op->val = *(u16 *)op->addr.reg;
937 op->val = *(u32 *)op->addr.reg;
940 op->val = *(u64 *)op->addr.reg;
945 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
947 ctxt->ops->get_fpu(ctxt);
949 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
950 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
951 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
952 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
953 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
954 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
955 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
956 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
958 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
959 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
960 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
961 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
962 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
963 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
964 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
965 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
969 ctxt->ops->put_fpu(ctxt);
972 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
975 ctxt->ops->get_fpu(ctxt);
977 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
978 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
979 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
980 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
981 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
982 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
983 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
984 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
986 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
987 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
988 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
989 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
990 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
991 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
992 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
993 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
997 ctxt->ops->put_fpu(ctxt);
1000 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1002 ctxt->ops->get_fpu(ctxt);
1004 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1005 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1006 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1007 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1008 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1009 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1010 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1011 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1014 ctxt->ops->put_fpu(ctxt);
1017 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1019 ctxt->ops->get_fpu(ctxt);
1021 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1022 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1023 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1024 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1025 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1026 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1027 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1028 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1031 ctxt->ops->put_fpu(ctxt);
1034 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1036 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1037 return emulate_nm(ctxt);
1039 ctxt->ops->get_fpu(ctxt);
1040 asm volatile("fninit");
1041 ctxt->ops->put_fpu(ctxt);
1042 return X86EMUL_CONTINUE;
1045 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1049 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1050 return emulate_nm(ctxt);
1052 ctxt->ops->get_fpu(ctxt);
1053 asm volatile("fnstcw %0": "+m"(fcw));
1054 ctxt->ops->put_fpu(ctxt);
1056 /* force 2 byte destination */
1057 ctxt->dst.bytes = 2;
1058 ctxt->dst.val = fcw;
1060 return X86EMUL_CONTINUE;
1063 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1067 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1068 return emulate_nm(ctxt);
1070 ctxt->ops->get_fpu(ctxt);
1071 asm volatile("fnstsw %0": "+m"(fsw));
1072 ctxt->ops->put_fpu(ctxt);
1074 /* force 2 byte destination */
1075 ctxt->dst.bytes = 2;
1076 ctxt->dst.val = fsw;
1078 return X86EMUL_CONTINUE;
1081 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1084 unsigned reg = ctxt->modrm_reg;
1086 if (!(ctxt->d & ModRM))
1087 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1089 if (ctxt->d & Sse) {
1093 read_sse_reg(ctxt, &op->vec_val, reg);
1096 if (ctxt->d & Mmx) {
1105 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1106 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1108 fetch_register_operand(op);
1109 op->orig_val = op->val;
1112 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1114 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1115 ctxt->modrm_seg = VCPU_SREG_SS;
1118 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1122 int index_reg, base_reg, scale;
1123 int rc = X86EMUL_CONTINUE;
1126 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1127 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1128 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1130 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1131 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1132 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1133 ctxt->modrm_seg = VCPU_SREG_DS;
1135 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1137 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1138 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1140 if (ctxt->d & Sse) {
1143 op->addr.xmm = ctxt->modrm_rm;
1144 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1147 if (ctxt->d & Mmx) {
1150 op->addr.mm = ctxt->modrm_rm & 7;
1153 fetch_register_operand(op);
1159 if (ctxt->ad_bytes == 2) {
1160 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1161 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1162 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1163 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1165 /* 16-bit ModR/M decode. */
1166 switch (ctxt->modrm_mod) {
1168 if (ctxt->modrm_rm == 6)
1169 modrm_ea += insn_fetch(u16, ctxt);
1172 modrm_ea += insn_fetch(s8, ctxt);
1175 modrm_ea += insn_fetch(u16, ctxt);
1178 switch (ctxt->modrm_rm) {
1180 modrm_ea += bx + si;
1183 modrm_ea += bx + di;
1186 modrm_ea += bp + si;
1189 modrm_ea += bp + di;
1198 if (ctxt->modrm_mod != 0)
1205 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1206 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1207 ctxt->modrm_seg = VCPU_SREG_SS;
1208 modrm_ea = (u16)modrm_ea;
1210 /* 32/64-bit ModR/M decode. */
1211 if ((ctxt->modrm_rm & 7) == 4) {
1212 sib = insn_fetch(u8, ctxt);
1213 index_reg |= (sib >> 3) & 7;
1214 base_reg |= sib & 7;
1217 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1218 modrm_ea += insn_fetch(s32, ctxt);
1220 modrm_ea += reg_read(ctxt, base_reg);
1221 adjust_modrm_seg(ctxt, base_reg);
1224 modrm_ea += reg_read(ctxt, index_reg) << scale;
1225 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1226 modrm_ea += insn_fetch(s32, ctxt);
1227 if (ctxt->mode == X86EMUL_MODE_PROT64)
1228 ctxt->rip_relative = 1;
1230 base_reg = ctxt->modrm_rm;
1231 modrm_ea += reg_read(ctxt, base_reg);
1232 adjust_modrm_seg(ctxt, base_reg);
1234 switch (ctxt->modrm_mod) {
1236 modrm_ea += insn_fetch(s8, ctxt);
1239 modrm_ea += insn_fetch(s32, ctxt);
1243 op->addr.mem.ea = modrm_ea;
1244 if (ctxt->ad_bytes != 8)
1245 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1251 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1254 int rc = X86EMUL_CONTINUE;
1257 switch (ctxt->ad_bytes) {
1259 op->addr.mem.ea = insn_fetch(u16, ctxt);
1262 op->addr.mem.ea = insn_fetch(u32, ctxt);
1265 op->addr.mem.ea = insn_fetch(u64, ctxt);
1272 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1276 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1277 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1279 if (ctxt->src.bytes == 2)
1280 sv = (s16)ctxt->src.val & (s16)mask;
1281 else if (ctxt->src.bytes == 4)
1282 sv = (s32)ctxt->src.val & (s32)mask;
1284 sv = (s64)ctxt->src.val & (s64)mask;
1286 ctxt->dst.addr.mem.ea += (sv >> 3);
1289 /* only subword offset */
1290 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1293 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1294 unsigned long addr, void *dest, unsigned size)
1297 struct read_cache *mc = &ctxt->mem_read;
1299 if (mc->pos < mc->end)
1302 WARN_ON((mc->end + size) >= sizeof(mc->data));
1304 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1306 if (rc != X86EMUL_CONTINUE)
1312 memcpy(dest, mc->data + mc->pos, size);
1314 return X86EMUL_CONTINUE;
1317 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1318 struct segmented_address addr,
1325 rc = linearize(ctxt, addr, size, false, &linear);
1326 if (rc != X86EMUL_CONTINUE)
1328 return read_emulated(ctxt, linear, data, size);
1331 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1332 struct segmented_address addr,
1339 rc = linearize(ctxt, addr, size, true, &linear);
1340 if (rc != X86EMUL_CONTINUE)
1342 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1346 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1347 struct segmented_address addr,
1348 const void *orig_data, const void *data,
1354 rc = linearize(ctxt, addr, size, true, &linear);
1355 if (rc != X86EMUL_CONTINUE)
1357 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1358 size, &ctxt->exception);
1361 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1362 unsigned int size, unsigned short port,
1365 struct read_cache *rc = &ctxt->io_read;
1367 if (rc->pos == rc->end) { /* refill pio read ahead */
1368 unsigned int in_page, n;
1369 unsigned int count = ctxt->rep_prefix ?
1370 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1371 in_page = (ctxt->eflags & EFLG_DF) ?
1372 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1373 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1374 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1377 rc->pos = rc->end = 0;
1378 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1383 if (ctxt->rep_prefix && (ctxt->d & String) &&
1384 !(ctxt->eflags & EFLG_DF)) {
1385 ctxt->dst.data = rc->data + rc->pos;
1386 ctxt->dst.type = OP_MEM_STR;
1387 ctxt->dst.count = (rc->end - rc->pos) / size;
1390 memcpy(dest, rc->data + rc->pos, size);
1396 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1397 u16 index, struct desc_struct *desc)
1402 ctxt->ops->get_idt(ctxt, &dt);
1404 if (dt.size < index * 8 + 7)
1405 return emulate_gp(ctxt, index << 3 | 0x2);
1407 addr = dt.address + index * 8;
1408 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1412 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1413 u16 selector, struct desc_ptr *dt)
1415 const struct x86_emulate_ops *ops = ctxt->ops;
1418 if (selector & 1 << 2) {
1419 struct desc_struct desc;
1422 memset (dt, 0, sizeof *dt);
1423 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1427 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1428 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1430 ops->get_gdt(ctxt, dt);
1433 /* allowed just for 8 bytes segments */
1434 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1435 u16 selector, struct desc_struct *desc,
1439 u16 index = selector >> 3;
1442 get_descriptor_table_ptr(ctxt, selector, &dt);
1444 if (dt.size < index * 8 + 7)
1445 return emulate_gp(ctxt, selector & 0xfffc);
1447 *desc_addr_p = addr = dt.address + index * 8;
1448 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1452 /* allowed just for 8 bytes segments */
1453 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1454 u16 selector, struct desc_struct *desc)
1457 u16 index = selector >> 3;
1460 get_descriptor_table_ptr(ctxt, selector, &dt);
1462 if (dt.size < index * 8 + 7)
1463 return emulate_gp(ctxt, selector & 0xfffc);
1465 addr = dt.address + index * 8;
1466 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1470 /* Does not support long mode */
1471 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1472 u16 selector, int seg, u8 cpl,
1473 bool in_task_switch,
1474 struct desc_struct *desc)
1476 struct desc_struct seg_desc, old_desc;
1478 unsigned err_vec = GP_VECTOR;
1480 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1486 memset(&seg_desc, 0, sizeof seg_desc);
1488 if (ctxt->mode == X86EMUL_MODE_REAL) {
1489 /* set real mode segment descriptor (keep limit etc. for
1491 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1492 set_desc_base(&seg_desc, selector << 4);
1494 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1495 /* VM86 needs a clean new segment descriptor */
1496 set_desc_base(&seg_desc, selector << 4);
1497 set_desc_limit(&seg_desc, 0xffff);
1507 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1508 if ((seg == VCPU_SREG_CS
1509 || (seg == VCPU_SREG_SS
1510 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1511 || seg == VCPU_SREG_TR)
1515 /* TR should be in GDT only */
1516 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1519 if (null_selector) /* for NULL selector skip all following checks */
1522 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1523 if (ret != X86EMUL_CONTINUE)
1526 err_code = selector & 0xfffc;
1527 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1529 /* can't load system descriptor into segment selector */
1530 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1534 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1543 * segment is not a writable data segment or segment
1544 * selector's RPL != CPL or segment selector's RPL != CPL
1546 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1550 if (!(seg_desc.type & 8))
1553 if (seg_desc.type & 4) {
1559 if (rpl > cpl || dpl != cpl)
1562 /* in long-mode d/b must be clear if l is set */
1563 if (seg_desc.d && seg_desc.l) {
1566 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1567 if (efer & EFER_LMA)
1571 /* CS(RPL) <- CPL */
1572 selector = (selector & 0xfffc) | cpl;
1575 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1577 old_desc = seg_desc;
1578 seg_desc.type |= 2; /* busy */
1579 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1580 sizeof(seg_desc), &ctxt->exception);
1581 if (ret != X86EMUL_CONTINUE)
1584 case VCPU_SREG_LDTR:
1585 if (seg_desc.s || seg_desc.type != 2)
1588 default: /* DS, ES, FS, or GS */
1590 * segment is not a data or readable code segment or
1591 * ((segment is a data or nonconforming code segment)
1592 * and (both RPL and CPL > DPL))
1594 if ((seg_desc.type & 0xa) == 0x8 ||
1595 (((seg_desc.type & 0xc) != 0xc) &&
1596 (rpl > dpl && cpl > dpl)))
1602 /* mark segment as accessed */
1604 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1605 if (ret != X86EMUL_CONTINUE)
1607 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1608 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1609 sizeof(base3), &ctxt->exception);
1610 if (ret != X86EMUL_CONTINUE)
1614 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1617 return X86EMUL_CONTINUE;
1619 return emulate_exception(ctxt, err_vec, err_code, true);
1622 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1623 u16 selector, int seg)
1625 u8 cpl = ctxt->ops->cpl(ctxt);
1626 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1629 static void write_register_operand(struct operand *op)
1631 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1632 switch (op->bytes) {
1634 *(u8 *)op->addr.reg = (u8)op->val;
1637 *(u16 *)op->addr.reg = (u16)op->val;
1640 *op->addr.reg = (u32)op->val;
1641 break; /* 64b: zero-extend */
1643 *op->addr.reg = op->val;
1648 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1652 write_register_operand(op);
1655 if (ctxt->lock_prefix)
1656 return segmented_cmpxchg(ctxt,
1662 return segmented_write(ctxt,
1668 return segmented_write(ctxt,
1671 op->bytes * op->count);
1674 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1677 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1685 return X86EMUL_CONTINUE;
1688 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1690 struct segmented_address addr;
1692 rsp_increment(ctxt, -bytes);
1693 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1694 addr.seg = VCPU_SREG_SS;
1696 return segmented_write(ctxt, addr, data, bytes);
1699 static int em_push(struct x86_emulate_ctxt *ctxt)
1701 /* Disable writeback. */
1702 ctxt->dst.type = OP_NONE;
1703 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1706 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1707 void *dest, int len)
1710 struct segmented_address addr;
1712 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1713 addr.seg = VCPU_SREG_SS;
1714 rc = segmented_read(ctxt, addr, dest, len);
1715 if (rc != X86EMUL_CONTINUE)
1718 rsp_increment(ctxt, len);
1722 static int em_pop(struct x86_emulate_ctxt *ctxt)
1724 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1727 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1728 void *dest, int len)
1731 unsigned long val, change_mask;
1732 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1733 int cpl = ctxt->ops->cpl(ctxt);
1735 rc = emulate_pop(ctxt, &val, len);
1736 if (rc != X86EMUL_CONTINUE)
1739 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1740 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1742 switch(ctxt->mode) {
1743 case X86EMUL_MODE_PROT64:
1744 case X86EMUL_MODE_PROT32:
1745 case X86EMUL_MODE_PROT16:
1747 change_mask |= EFLG_IOPL;
1749 change_mask |= EFLG_IF;
1751 case X86EMUL_MODE_VM86:
1753 return emulate_gp(ctxt, 0);
1754 change_mask |= EFLG_IF;
1756 default: /* real mode */
1757 change_mask |= (EFLG_IOPL | EFLG_IF);
1761 *(unsigned long *)dest =
1762 (ctxt->eflags & ~change_mask) | (val & change_mask);
1767 static int em_popf(struct x86_emulate_ctxt *ctxt)
1769 ctxt->dst.type = OP_REG;
1770 ctxt->dst.addr.reg = &ctxt->eflags;
1771 ctxt->dst.bytes = ctxt->op_bytes;
1772 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1775 static int em_enter(struct x86_emulate_ctxt *ctxt)
1778 unsigned frame_size = ctxt->src.val;
1779 unsigned nesting_level = ctxt->src2.val & 31;
1783 return X86EMUL_UNHANDLEABLE;
1785 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1786 rc = push(ctxt, &rbp, stack_size(ctxt));
1787 if (rc != X86EMUL_CONTINUE)
1789 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1791 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1792 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1794 return X86EMUL_CONTINUE;
1797 static int em_leave(struct x86_emulate_ctxt *ctxt)
1799 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1801 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1804 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1806 int seg = ctxt->src2.val;
1808 ctxt->src.val = get_segment_selector(ctxt, seg);
1810 return em_push(ctxt);
1813 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1815 int seg = ctxt->src2.val;
1816 unsigned long selector;
1819 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1820 if (rc != X86EMUL_CONTINUE)
1823 if (ctxt->modrm_reg == VCPU_SREG_SS)
1824 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1826 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1830 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1832 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1833 int rc = X86EMUL_CONTINUE;
1834 int reg = VCPU_REGS_RAX;
1836 while (reg <= VCPU_REGS_RDI) {
1837 (reg == VCPU_REGS_RSP) ?
1838 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1841 if (rc != X86EMUL_CONTINUE)
1850 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1852 ctxt->src.val = (unsigned long)ctxt->eflags;
1853 return em_push(ctxt);
1856 static int em_popa(struct x86_emulate_ctxt *ctxt)
1858 int rc = X86EMUL_CONTINUE;
1859 int reg = VCPU_REGS_RDI;
1861 while (reg >= VCPU_REGS_RAX) {
1862 if (reg == VCPU_REGS_RSP) {
1863 rsp_increment(ctxt, ctxt->op_bytes);
1867 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1868 if (rc != X86EMUL_CONTINUE)
1875 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1877 const struct x86_emulate_ops *ops = ctxt->ops;
1884 /* TODO: Add limit checks */
1885 ctxt->src.val = ctxt->eflags;
1887 if (rc != X86EMUL_CONTINUE)
1890 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1892 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1894 if (rc != X86EMUL_CONTINUE)
1897 ctxt->src.val = ctxt->_eip;
1899 if (rc != X86EMUL_CONTINUE)
1902 ops->get_idt(ctxt, &dt);
1904 eip_addr = dt.address + (irq << 2);
1905 cs_addr = dt.address + (irq << 2) + 2;
1907 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1908 if (rc != X86EMUL_CONTINUE)
1911 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1912 if (rc != X86EMUL_CONTINUE)
1915 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1916 if (rc != X86EMUL_CONTINUE)
1924 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1928 invalidate_registers(ctxt);
1929 rc = __emulate_int_real(ctxt, irq);
1930 if (rc == X86EMUL_CONTINUE)
1931 writeback_registers(ctxt);
1935 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1937 switch(ctxt->mode) {
1938 case X86EMUL_MODE_REAL:
1939 return __emulate_int_real(ctxt, irq);
1940 case X86EMUL_MODE_VM86:
1941 case X86EMUL_MODE_PROT16:
1942 case X86EMUL_MODE_PROT32:
1943 case X86EMUL_MODE_PROT64:
1945 /* Protected mode interrupts unimplemented yet */
1946 return X86EMUL_UNHANDLEABLE;
1950 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1952 int rc = X86EMUL_CONTINUE;
1953 unsigned long temp_eip = 0;
1954 unsigned long temp_eflags = 0;
1955 unsigned long cs = 0;
1956 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1957 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1958 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1959 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1961 /* TODO: Add stack limit check */
1963 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1965 if (rc != X86EMUL_CONTINUE)
1968 if (temp_eip & ~0xffff)
1969 return emulate_gp(ctxt, 0);
1971 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1973 if (rc != X86EMUL_CONTINUE)
1976 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1978 if (rc != X86EMUL_CONTINUE)
1981 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1983 if (rc != X86EMUL_CONTINUE)
1986 ctxt->_eip = temp_eip;
1989 if (ctxt->op_bytes == 4)
1990 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1991 else if (ctxt->op_bytes == 2) {
1992 ctxt->eflags &= ~0xffff;
1993 ctxt->eflags |= temp_eflags;
1996 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1997 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2002 static int em_iret(struct x86_emulate_ctxt *ctxt)
2004 switch(ctxt->mode) {
2005 case X86EMUL_MODE_REAL:
2006 return emulate_iret_real(ctxt);
2007 case X86EMUL_MODE_VM86:
2008 case X86EMUL_MODE_PROT16:
2009 case X86EMUL_MODE_PROT32:
2010 case X86EMUL_MODE_PROT64:
2012 /* iret from protected mode unimplemented yet */
2013 return X86EMUL_UNHANDLEABLE;
2017 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2020 unsigned short sel, old_sel;
2021 struct desc_struct old_desc, new_desc;
2022 const struct x86_emulate_ops *ops = ctxt->ops;
2023 u8 cpl = ctxt->ops->cpl(ctxt);
2025 /* Assignment of RIP may only fail in 64-bit mode */
2026 if (ctxt->mode == X86EMUL_MODE_PROT64)
2027 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2030 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2032 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2034 if (rc != X86EMUL_CONTINUE)
2037 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2038 if (rc != X86EMUL_CONTINUE) {
2039 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2040 /* assigning eip failed; restore the old cs */
2041 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2047 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2049 return assign_eip_near(ctxt, ctxt->src.val);
2052 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2057 old_eip = ctxt->_eip;
2058 rc = assign_eip_near(ctxt, ctxt->src.val);
2059 if (rc != X86EMUL_CONTINUE)
2061 ctxt->src.val = old_eip;
2066 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2068 u64 old = ctxt->dst.orig_val64;
2070 if (ctxt->dst.bytes == 16)
2071 return X86EMUL_UNHANDLEABLE;
2073 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2074 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2075 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2076 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2077 ctxt->eflags &= ~EFLG_ZF;
2079 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2080 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2082 ctxt->eflags |= EFLG_ZF;
2084 return X86EMUL_CONTINUE;
2087 static int em_ret(struct x86_emulate_ctxt *ctxt)
2092 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2093 if (rc != X86EMUL_CONTINUE)
2096 return assign_eip_near(ctxt, eip);
2099 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2102 unsigned long eip, cs;
2104 int cpl = ctxt->ops->cpl(ctxt);
2105 struct desc_struct old_desc, new_desc;
2106 const struct x86_emulate_ops *ops = ctxt->ops;
2108 if (ctxt->mode == X86EMUL_MODE_PROT64)
2109 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2112 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2113 if (rc != X86EMUL_CONTINUE)
2115 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2116 if (rc != X86EMUL_CONTINUE)
2118 /* Outer-privilege level return is not implemented */
2119 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2120 return X86EMUL_UNHANDLEABLE;
2121 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2123 if (rc != X86EMUL_CONTINUE)
2125 rc = assign_eip_far(ctxt, eip, new_desc.l);
2126 if (rc != X86EMUL_CONTINUE) {
2127 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2128 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2133 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2137 rc = em_ret_far(ctxt);
2138 if (rc != X86EMUL_CONTINUE)
2140 rsp_increment(ctxt, ctxt->src.val);
2141 return X86EMUL_CONTINUE;
2144 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2146 /* Save real source value, then compare EAX against destination. */
2147 ctxt->dst.orig_val = ctxt->dst.val;
2148 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2149 ctxt->src.orig_val = ctxt->src.val;
2150 ctxt->src.val = ctxt->dst.orig_val;
2151 fastop(ctxt, em_cmp);
2153 if (ctxt->eflags & EFLG_ZF) {
2154 /* Success: write back to memory. */
2155 ctxt->dst.val = ctxt->src.orig_val;
2157 /* Failure: write the value we saw to EAX. */
2158 ctxt->dst.type = OP_REG;
2159 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2160 ctxt->dst.val = ctxt->dst.orig_val;
2162 return X86EMUL_CONTINUE;
2165 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2167 int seg = ctxt->src2.val;
2171 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2173 rc = load_segment_descriptor(ctxt, sel, seg);
2174 if (rc != X86EMUL_CONTINUE)
2177 ctxt->dst.val = ctxt->src.val;
2182 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2183 struct desc_struct *cs, struct desc_struct *ss)
2185 cs->l = 0; /* will be adjusted later */
2186 set_desc_base(cs, 0); /* flat segment */
2187 cs->g = 1; /* 4kb granularity */
2188 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2189 cs->type = 0x0b; /* Read, Execute, Accessed */
2191 cs->dpl = 0; /* will be adjusted later */
2196 set_desc_base(ss, 0); /* flat segment */
2197 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2198 ss->g = 1; /* 4kb granularity */
2200 ss->type = 0x03; /* Read/Write, Accessed */
2201 ss->d = 1; /* 32bit stack segment */
2208 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2210 u32 eax, ebx, ecx, edx;
2213 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2214 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2215 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2216 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2219 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2221 const struct x86_emulate_ops *ops = ctxt->ops;
2222 u32 eax, ebx, ecx, edx;
2225 * syscall should always be enabled in longmode - so only become
2226 * vendor specific (cpuid) if other modes are active...
2228 if (ctxt->mode == X86EMUL_MODE_PROT64)
2233 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2235 * Intel ("GenuineIntel")
2236 * remark: Intel CPUs only support "syscall" in 64bit
2237 * longmode. Also an 64bit guest with a
2238 * 32bit compat-app running will #UD !! While this
2239 * behaviour can be fixed (by emulating) into AMD
2240 * response - CPUs of AMD can't behave like Intel.
2242 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2243 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2244 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2247 /* AMD ("AuthenticAMD") */
2248 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2249 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2250 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2253 /* AMD ("AMDisbetter!") */
2254 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2255 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2256 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2259 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2263 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2265 const struct x86_emulate_ops *ops = ctxt->ops;
2266 struct desc_struct cs, ss;
2271 /* syscall is not available in real mode */
2272 if (ctxt->mode == X86EMUL_MODE_REAL ||
2273 ctxt->mode == X86EMUL_MODE_VM86)
2274 return emulate_ud(ctxt);
2276 if (!(em_syscall_is_enabled(ctxt)))
2277 return emulate_ud(ctxt);
2279 ops->get_msr(ctxt, MSR_EFER, &efer);
2280 setup_syscalls_segments(ctxt, &cs, &ss);
2282 if (!(efer & EFER_SCE))
2283 return emulate_ud(ctxt);
2285 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2287 cs_sel = (u16)(msr_data & 0xfffc);
2288 ss_sel = (u16)(msr_data + 8);
2290 if (efer & EFER_LMA) {
2294 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2295 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2297 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2298 if (efer & EFER_LMA) {
2299 #ifdef CONFIG_X86_64
2300 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2303 ctxt->mode == X86EMUL_MODE_PROT64 ?
2304 MSR_LSTAR : MSR_CSTAR, &msr_data);
2305 ctxt->_eip = msr_data;
2307 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2308 ctxt->eflags &= ~msr_data;
2312 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2313 ctxt->_eip = (u32)msr_data;
2315 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2318 return X86EMUL_CONTINUE;
2321 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2323 const struct x86_emulate_ops *ops = ctxt->ops;
2324 struct desc_struct cs, ss;
2329 ops->get_msr(ctxt, MSR_EFER, &efer);
2330 /* inject #GP if in real mode */
2331 if (ctxt->mode == X86EMUL_MODE_REAL)
2332 return emulate_gp(ctxt, 0);
2335 * Not recognized on AMD in compat mode (but is recognized in legacy
2338 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2339 && !vendor_intel(ctxt))
2340 return emulate_ud(ctxt);
2342 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2343 * Therefore, we inject an #UD.
2345 if (ctxt->mode == X86EMUL_MODE_PROT64)
2346 return emulate_ud(ctxt);
2348 setup_syscalls_segments(ctxt, &cs, &ss);
2350 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2351 switch (ctxt->mode) {
2352 case X86EMUL_MODE_PROT32:
2353 if ((msr_data & 0xfffc) == 0x0)
2354 return emulate_gp(ctxt, 0);
2356 case X86EMUL_MODE_PROT64:
2357 if (msr_data == 0x0)
2358 return emulate_gp(ctxt, 0);
2364 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2365 cs_sel = (u16)msr_data;
2366 cs_sel &= ~SELECTOR_RPL_MASK;
2367 ss_sel = cs_sel + 8;
2368 ss_sel &= ~SELECTOR_RPL_MASK;
2369 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2374 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2375 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2377 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2378 ctxt->_eip = msr_data;
2380 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2381 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2383 return X86EMUL_CONTINUE;
2386 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2388 const struct x86_emulate_ops *ops = ctxt->ops;
2389 struct desc_struct cs, ss;
2390 u64 msr_data, rcx, rdx;
2392 u16 cs_sel = 0, ss_sel = 0;
2394 /* inject #GP if in real mode or Virtual 8086 mode */
2395 if (ctxt->mode == X86EMUL_MODE_REAL ||
2396 ctxt->mode == X86EMUL_MODE_VM86)
2397 return emulate_gp(ctxt, 0);
2399 setup_syscalls_segments(ctxt, &cs, &ss);
2401 if ((ctxt->rex_prefix & 0x8) != 0x0)
2402 usermode = X86EMUL_MODE_PROT64;
2404 usermode = X86EMUL_MODE_PROT32;
2406 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2407 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2411 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2413 case X86EMUL_MODE_PROT32:
2414 cs_sel = (u16)(msr_data + 16);
2415 if ((msr_data & 0xfffc) == 0x0)
2416 return emulate_gp(ctxt, 0);
2417 ss_sel = (u16)(msr_data + 24);
2421 case X86EMUL_MODE_PROT64:
2422 cs_sel = (u16)(msr_data + 32);
2423 if (msr_data == 0x0)
2424 return emulate_gp(ctxt, 0);
2425 ss_sel = cs_sel + 8;
2428 if (is_noncanonical_address(rcx) ||
2429 is_noncanonical_address(rdx))
2430 return emulate_gp(ctxt, 0);
2433 cs_sel |= SELECTOR_RPL_MASK;
2434 ss_sel |= SELECTOR_RPL_MASK;
2436 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2437 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2440 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2442 return X86EMUL_CONTINUE;
2445 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2448 if (ctxt->mode == X86EMUL_MODE_REAL)
2450 if (ctxt->mode == X86EMUL_MODE_VM86)
2452 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2453 return ctxt->ops->cpl(ctxt) > iopl;
2456 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2459 const struct x86_emulate_ops *ops = ctxt->ops;
2460 struct desc_struct tr_seg;
2463 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2464 unsigned mask = (1 << len) - 1;
2467 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2470 if (desc_limit_scaled(&tr_seg) < 103)
2472 base = get_desc_base(&tr_seg);
2473 #ifdef CONFIG_X86_64
2474 base |= ((u64)base3) << 32;
2476 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2477 if (r != X86EMUL_CONTINUE)
2479 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2481 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2482 if (r != X86EMUL_CONTINUE)
2484 if ((perm >> bit_idx) & mask)
2489 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2495 if (emulator_bad_iopl(ctxt))
2496 if (!emulator_io_port_access_allowed(ctxt, port, len))
2499 ctxt->perm_ok = true;
2504 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2505 struct tss_segment_16 *tss)
2507 tss->ip = ctxt->_eip;
2508 tss->flag = ctxt->eflags;
2509 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2510 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2511 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2512 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2513 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2514 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2515 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2516 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2518 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2519 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2520 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2521 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2522 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2525 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2526 struct tss_segment_16 *tss)
2531 ctxt->_eip = tss->ip;
2532 ctxt->eflags = tss->flag | 2;
2533 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2534 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2535 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2536 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2537 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2538 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2539 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2540 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2543 * SDM says that segment selectors are loaded before segment
2546 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2547 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2548 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2549 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2550 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2555 * Now load segment descriptors. If fault happens at this stage
2556 * it is handled in a context of new task
2558 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2560 if (ret != X86EMUL_CONTINUE)
2562 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2564 if (ret != X86EMUL_CONTINUE)
2566 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2568 if (ret != X86EMUL_CONTINUE)
2570 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2572 if (ret != X86EMUL_CONTINUE)
2574 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2576 if (ret != X86EMUL_CONTINUE)
2579 return X86EMUL_CONTINUE;
2582 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2583 u16 tss_selector, u16 old_tss_sel,
2584 ulong old_tss_base, struct desc_struct *new_desc)
2586 const struct x86_emulate_ops *ops = ctxt->ops;
2587 struct tss_segment_16 tss_seg;
2589 u32 new_tss_base = get_desc_base(new_desc);
2591 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2593 if (ret != X86EMUL_CONTINUE)
2594 /* FIXME: need to provide precise fault address */
2597 save_state_to_tss16(ctxt, &tss_seg);
2599 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2601 if (ret != X86EMUL_CONTINUE)
2602 /* FIXME: need to provide precise fault address */
2605 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2607 if (ret != X86EMUL_CONTINUE)
2608 /* FIXME: need to provide precise fault address */
2611 if (old_tss_sel != 0xffff) {
2612 tss_seg.prev_task_link = old_tss_sel;
2614 ret = ops->write_std(ctxt, new_tss_base,
2615 &tss_seg.prev_task_link,
2616 sizeof tss_seg.prev_task_link,
2618 if (ret != X86EMUL_CONTINUE)
2619 /* FIXME: need to provide precise fault address */
2623 return load_state_from_tss16(ctxt, &tss_seg);
2626 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2627 struct tss_segment_32 *tss)
2629 /* CR3 and ldt selector are not saved intentionally */
2630 tss->eip = ctxt->_eip;
2631 tss->eflags = ctxt->eflags;
2632 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2633 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2634 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2635 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2636 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2637 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2638 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2639 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2641 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2642 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2643 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2644 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2645 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2646 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2649 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2650 struct tss_segment_32 *tss)
2655 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2656 return emulate_gp(ctxt, 0);
2657 ctxt->_eip = tss->eip;
2658 ctxt->eflags = tss->eflags | 2;
2660 /* General purpose registers */
2661 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2662 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2663 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2664 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2665 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2666 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2667 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2668 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2671 * SDM says that segment selectors are loaded before segment
2672 * descriptors. This is important because CPL checks will
2675 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2676 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2677 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2678 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2679 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2680 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2681 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2684 * If we're switching between Protected Mode and VM86, we need to make
2685 * sure to update the mode before loading the segment descriptors so
2686 * that the selectors are interpreted correctly.
2688 if (ctxt->eflags & X86_EFLAGS_VM) {
2689 ctxt->mode = X86EMUL_MODE_VM86;
2692 ctxt->mode = X86EMUL_MODE_PROT32;
2697 * Now load segment descriptors. If fault happenes at this stage
2698 * it is handled in a context of new task
2700 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2702 if (ret != X86EMUL_CONTINUE)
2704 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2706 if (ret != X86EMUL_CONTINUE)
2708 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2710 if (ret != X86EMUL_CONTINUE)
2712 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2714 if (ret != X86EMUL_CONTINUE)
2716 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2718 if (ret != X86EMUL_CONTINUE)
2720 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2722 if (ret != X86EMUL_CONTINUE)
2724 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2726 if (ret != X86EMUL_CONTINUE)
2729 return X86EMUL_CONTINUE;
2732 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2733 u16 tss_selector, u16 old_tss_sel,
2734 ulong old_tss_base, struct desc_struct *new_desc)
2736 const struct x86_emulate_ops *ops = ctxt->ops;
2737 struct tss_segment_32 tss_seg;
2739 u32 new_tss_base = get_desc_base(new_desc);
2740 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2741 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2743 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2745 if (ret != X86EMUL_CONTINUE)
2746 /* FIXME: need to provide precise fault address */
2749 save_state_to_tss32(ctxt, &tss_seg);
2751 /* Only GP registers and segment selectors are saved */
2752 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2753 ldt_sel_offset - eip_offset, &ctxt->exception);
2754 if (ret != X86EMUL_CONTINUE)
2755 /* FIXME: need to provide precise fault address */
2758 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2760 if (ret != X86EMUL_CONTINUE)
2761 /* FIXME: need to provide precise fault address */
2764 if (old_tss_sel != 0xffff) {
2765 tss_seg.prev_task_link = old_tss_sel;
2767 ret = ops->write_std(ctxt, new_tss_base,
2768 &tss_seg.prev_task_link,
2769 sizeof tss_seg.prev_task_link,
2771 if (ret != X86EMUL_CONTINUE)
2772 /* FIXME: need to provide precise fault address */
2776 return load_state_from_tss32(ctxt, &tss_seg);
2779 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2780 u16 tss_selector, int idt_index, int reason,
2781 bool has_error_code, u32 error_code)
2783 const struct x86_emulate_ops *ops = ctxt->ops;
2784 struct desc_struct curr_tss_desc, next_tss_desc;
2786 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2787 ulong old_tss_base =
2788 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2792 /* FIXME: old_tss_base == ~0 ? */
2794 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2795 if (ret != X86EMUL_CONTINUE)
2797 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2798 if (ret != X86EMUL_CONTINUE)
2801 /* FIXME: check that next_tss_desc is tss */
2804 * Check privileges. The three cases are task switch caused by...
2806 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2807 * 2. Exception/IRQ/iret: No check is performed
2808 * 3. jmp/call to TSS: Check against DPL of the TSS
2810 if (reason == TASK_SWITCH_GATE) {
2811 if (idt_index != -1) {
2812 /* Software interrupts */
2813 struct desc_struct task_gate_desc;
2816 ret = read_interrupt_descriptor(ctxt, idt_index,
2818 if (ret != X86EMUL_CONTINUE)
2821 dpl = task_gate_desc.dpl;
2822 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2823 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2825 } else if (reason != TASK_SWITCH_IRET) {
2826 int dpl = next_tss_desc.dpl;
2827 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2828 return emulate_gp(ctxt, tss_selector);
2832 desc_limit = desc_limit_scaled(&next_tss_desc);
2833 if (!next_tss_desc.p ||
2834 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2835 desc_limit < 0x2b)) {
2836 return emulate_ts(ctxt, tss_selector & 0xfffc);
2839 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2840 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2841 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2844 if (reason == TASK_SWITCH_IRET)
2845 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2847 /* set back link to prev task only if NT bit is set in eflags
2848 note that old_tss_sel is not used after this point */
2849 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2850 old_tss_sel = 0xffff;
2852 if (next_tss_desc.type & 8)
2853 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2854 old_tss_base, &next_tss_desc);
2856 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2857 old_tss_base, &next_tss_desc);
2858 if (ret != X86EMUL_CONTINUE)
2861 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2862 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2864 if (reason != TASK_SWITCH_IRET) {
2865 next_tss_desc.type |= (1 << 1); /* set busy flag */
2866 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2869 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2870 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2872 if (has_error_code) {
2873 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2874 ctxt->lock_prefix = 0;
2875 ctxt->src.val = (unsigned long) error_code;
2876 ret = em_push(ctxt);
2882 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2883 u16 tss_selector, int idt_index, int reason,
2884 bool has_error_code, u32 error_code)
2888 invalidate_registers(ctxt);
2889 ctxt->_eip = ctxt->eip;
2890 ctxt->dst.type = OP_NONE;
2892 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2893 has_error_code, error_code);
2895 if (rc == X86EMUL_CONTINUE) {
2896 ctxt->eip = ctxt->_eip;
2897 writeback_registers(ctxt);
2900 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2903 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2906 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2908 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2909 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2912 static int em_das(struct x86_emulate_ctxt *ctxt)
2915 bool af, cf, old_cf;
2917 cf = ctxt->eflags & X86_EFLAGS_CF;
2923 af = ctxt->eflags & X86_EFLAGS_AF;
2924 if ((al & 0x0f) > 9 || af) {
2926 cf = old_cf | (al >= 250);
2931 if (old_al > 0x99 || old_cf) {
2937 /* Set PF, ZF, SF */
2938 ctxt->src.type = OP_IMM;
2940 ctxt->src.bytes = 1;
2941 fastop(ctxt, em_or);
2942 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2944 ctxt->eflags |= X86_EFLAGS_CF;
2946 ctxt->eflags |= X86_EFLAGS_AF;
2947 return X86EMUL_CONTINUE;
2950 static int em_aam(struct x86_emulate_ctxt *ctxt)
2954 if (ctxt->src.val == 0)
2955 return emulate_de(ctxt);
2957 al = ctxt->dst.val & 0xff;
2958 ah = al / ctxt->src.val;
2959 al %= ctxt->src.val;
2961 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2963 /* Set PF, ZF, SF */
2964 ctxt->src.type = OP_IMM;
2966 ctxt->src.bytes = 1;
2967 fastop(ctxt, em_or);
2969 return X86EMUL_CONTINUE;
2972 static int em_aad(struct x86_emulate_ctxt *ctxt)
2974 u8 al = ctxt->dst.val & 0xff;
2975 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2977 al = (al + (ah * ctxt->src.val)) & 0xff;
2979 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2981 /* Set PF, ZF, SF */
2982 ctxt->src.type = OP_IMM;
2984 ctxt->src.bytes = 1;
2985 fastop(ctxt, em_or);
2987 return X86EMUL_CONTINUE;
2990 static int em_call(struct x86_emulate_ctxt *ctxt)
2993 long rel = ctxt->src.val;
2995 ctxt->src.val = (unsigned long)ctxt->_eip;
2996 rc = jmp_rel(ctxt, rel);
2997 if (rc != X86EMUL_CONTINUE)
2999 return em_push(ctxt);
3002 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3007 struct desc_struct old_desc, new_desc;
3008 const struct x86_emulate_ops *ops = ctxt->ops;
3009 int cpl = ctxt->ops->cpl(ctxt);
3011 old_eip = ctxt->_eip;
3012 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3014 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3015 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3017 if (rc != X86EMUL_CONTINUE)
3018 return X86EMUL_CONTINUE;
3020 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3021 if (rc != X86EMUL_CONTINUE)
3024 ctxt->src.val = old_cs;
3026 if (rc != X86EMUL_CONTINUE)
3029 ctxt->src.val = old_eip;
3031 /* If we failed, we tainted the memory, but the very least we should
3033 if (rc != X86EMUL_CONTINUE)
3037 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3042 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3047 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3048 if (rc != X86EMUL_CONTINUE)
3050 rc = assign_eip_near(ctxt, eip);
3051 if (rc != X86EMUL_CONTINUE)
3053 rsp_increment(ctxt, ctxt->src.val);
3054 return X86EMUL_CONTINUE;
3057 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3059 /* Write back the register source. */
3060 ctxt->src.val = ctxt->dst.val;
3061 write_register_operand(&ctxt->src);
3063 /* Write back the memory destination with implicit LOCK prefix. */
3064 ctxt->dst.val = ctxt->src.orig_val;
3065 ctxt->lock_prefix = 1;
3066 return X86EMUL_CONTINUE;
3069 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3071 ctxt->dst.val = ctxt->src2.val;
3072 return fastop(ctxt, em_imul);
3075 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3077 ctxt->dst.type = OP_REG;
3078 ctxt->dst.bytes = ctxt->src.bytes;
3079 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3080 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3082 return X86EMUL_CONTINUE;
3085 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3089 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3090 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3091 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3092 return X86EMUL_CONTINUE;
3095 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3099 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3100 return emulate_gp(ctxt, 0);
3101 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3102 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3103 return X86EMUL_CONTINUE;
3106 static int em_mov(struct x86_emulate_ctxt *ctxt)
3108 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3109 return X86EMUL_CONTINUE;
3112 #define FFL(x) bit(X86_FEATURE_##x)
3114 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3116 u32 ebx, ecx, edx, eax = 1;
3120 * Check MOVBE is set in the guest-visible CPUID leaf.
3122 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3123 if (!(ecx & FFL(MOVBE)))
3124 return emulate_ud(ctxt);
3126 switch (ctxt->op_bytes) {
3129 * From MOVBE definition: "...When the operand size is 16 bits,
3130 * the upper word of the destination register remains unchanged
3133 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3134 * rules so we have to do the operation almost per hand.
3136 tmp = (u16)ctxt->src.val;
3137 ctxt->dst.val &= ~0xffffUL;
3138 ctxt->dst.val |= (unsigned long)swab16(tmp);
3141 ctxt->dst.val = swab32((u32)ctxt->src.val);
3144 ctxt->dst.val = swab64(ctxt->src.val);
3149 return X86EMUL_CONTINUE;
3152 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3154 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3155 return emulate_gp(ctxt, 0);
3157 /* Disable writeback. */
3158 ctxt->dst.type = OP_NONE;
3159 return X86EMUL_CONTINUE;
3162 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3166 if (ctxt->mode == X86EMUL_MODE_PROT64)
3167 val = ctxt->src.val & ~0ULL;
3169 val = ctxt->src.val & ~0U;
3171 /* #UD condition is already handled. */
3172 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3173 return emulate_gp(ctxt, 0);
3175 /* Disable writeback. */
3176 ctxt->dst.type = OP_NONE;
3177 return X86EMUL_CONTINUE;
3180 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3184 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3185 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3186 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3187 return emulate_gp(ctxt, 0);
3189 return X86EMUL_CONTINUE;
3192 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3196 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3197 return emulate_gp(ctxt, 0);
3199 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3200 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3201 return X86EMUL_CONTINUE;
3204 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3206 if (ctxt->modrm_reg > VCPU_SREG_GS)
3207 return emulate_ud(ctxt);
3209 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3210 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3211 ctxt->dst.bytes = 2;
3212 return X86EMUL_CONTINUE;
3215 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3217 u16 sel = ctxt->src.val;
3219 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3220 return emulate_ud(ctxt);
3222 if (ctxt->modrm_reg == VCPU_SREG_SS)
3223 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3225 /* Disable writeback. */
3226 ctxt->dst.type = OP_NONE;
3227 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3230 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3232 u16 sel = ctxt->src.val;
3234 /* Disable writeback. */
3235 ctxt->dst.type = OP_NONE;
3236 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3239 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3241 u16 sel = ctxt->src.val;
3243 /* Disable writeback. */
3244 ctxt->dst.type = OP_NONE;
3245 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3248 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3253 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3254 if (rc == X86EMUL_CONTINUE)
3255 ctxt->ops->invlpg(ctxt, linear);
3256 /* Disable writeback. */
3257 ctxt->dst.type = OP_NONE;
3258 return X86EMUL_CONTINUE;
3261 static int em_clts(struct x86_emulate_ctxt *ctxt)
3265 cr0 = ctxt->ops->get_cr(ctxt, 0);
3267 ctxt->ops->set_cr(ctxt, 0, cr0);
3268 return X86EMUL_CONTINUE;
3271 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3273 int rc = ctxt->ops->fix_hypercall(ctxt);
3275 if (rc != X86EMUL_CONTINUE)
3278 /* Let the processor re-execute the fixed hypercall */
3279 ctxt->_eip = ctxt->eip;
3280 /* Disable writeback. */
3281 ctxt->dst.type = OP_NONE;
3282 return X86EMUL_CONTINUE;
3285 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3286 void (*get)(struct x86_emulate_ctxt *ctxt,
3287 struct desc_ptr *ptr))
3289 struct desc_ptr desc_ptr;
3291 if (ctxt->mode == X86EMUL_MODE_PROT64)
3293 get(ctxt, &desc_ptr);
3294 if (ctxt->op_bytes == 2) {
3296 desc_ptr.address &= 0x00ffffff;
3298 /* Disable writeback. */
3299 ctxt->dst.type = OP_NONE;
3300 return segmented_write(ctxt, ctxt->dst.addr.mem,
3301 &desc_ptr, 2 + ctxt->op_bytes);
3304 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3306 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3309 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3311 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3314 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3316 struct desc_ptr desc_ptr;
3319 if (ctxt->mode == X86EMUL_MODE_PROT64)
3321 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3322 &desc_ptr.size, &desc_ptr.address,
3324 if (rc != X86EMUL_CONTINUE)
3326 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3327 /* Disable writeback. */
3328 ctxt->dst.type = OP_NONE;
3329 return X86EMUL_CONTINUE;
3332 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3336 rc = ctxt->ops->fix_hypercall(ctxt);
3338 /* Disable writeback. */
3339 ctxt->dst.type = OP_NONE;
3343 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3345 struct desc_ptr desc_ptr;
3348 if (ctxt->mode == X86EMUL_MODE_PROT64)
3350 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3351 &desc_ptr.size, &desc_ptr.address,
3353 if (rc != X86EMUL_CONTINUE)
3355 ctxt->ops->set_idt(ctxt, &desc_ptr);
3356 /* Disable writeback. */
3357 ctxt->dst.type = OP_NONE;
3358 return X86EMUL_CONTINUE;
3361 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3363 if (ctxt->dst.type == OP_MEM)
3364 ctxt->dst.bytes = 2;
3365 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3366 return X86EMUL_CONTINUE;
3369 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3371 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3372 | (ctxt->src.val & 0x0f));
3373 ctxt->dst.type = OP_NONE;
3374 return X86EMUL_CONTINUE;
3377 static int em_loop(struct x86_emulate_ctxt *ctxt)
3379 int rc = X86EMUL_CONTINUE;
3381 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3382 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3383 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3384 rc = jmp_rel(ctxt, ctxt->src.val);
3389 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3391 int rc = X86EMUL_CONTINUE;
3393 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3394 rc = jmp_rel(ctxt, ctxt->src.val);
3399 static int em_in(struct x86_emulate_ctxt *ctxt)
3401 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3403 return X86EMUL_IO_NEEDED;
3405 return X86EMUL_CONTINUE;
3408 static int em_out(struct x86_emulate_ctxt *ctxt)
3410 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3412 /* Disable writeback. */
3413 ctxt->dst.type = OP_NONE;
3414 return X86EMUL_CONTINUE;
3417 static int em_cli(struct x86_emulate_ctxt *ctxt)
3419 if (emulator_bad_iopl(ctxt))
3420 return emulate_gp(ctxt, 0);
3422 ctxt->eflags &= ~X86_EFLAGS_IF;
3423 return X86EMUL_CONTINUE;
3426 static int em_sti(struct x86_emulate_ctxt *ctxt)
3428 if (emulator_bad_iopl(ctxt))
3429 return emulate_gp(ctxt, 0);
3431 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3432 ctxt->eflags |= X86_EFLAGS_IF;
3433 return X86EMUL_CONTINUE;
3436 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3438 u32 eax, ebx, ecx, edx;
3440 eax = reg_read(ctxt, VCPU_REGS_RAX);
3441 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3442 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3443 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3444 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3445 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3446 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3447 return X86EMUL_CONTINUE;
3450 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3454 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3455 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3457 ctxt->eflags &= ~0xffUL;
3458 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3459 return X86EMUL_CONTINUE;
3462 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3464 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3465 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3466 return X86EMUL_CONTINUE;
3469 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3471 switch (ctxt->op_bytes) {
3472 #ifdef CONFIG_X86_64
3474 asm("bswap %0" : "+r"(ctxt->dst.val));
3478 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3481 return X86EMUL_CONTINUE;
3484 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3486 /* emulating clflush regardless of cpuid */
3487 return X86EMUL_CONTINUE;
3490 static bool valid_cr(int nr)
3502 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3504 if (!valid_cr(ctxt->modrm_reg))
3505 return emulate_ud(ctxt);
3507 return X86EMUL_CONTINUE;
3510 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3512 u64 new_val = ctxt->src.val64;
3513 int cr = ctxt->modrm_reg;
3516 static u64 cr_reserved_bits[] = {
3517 0xffffffff00000000ULL,
3518 0, 0, 0, /* CR3 checked later */
3525 return emulate_ud(ctxt);
3527 if (new_val & cr_reserved_bits[cr])
3528 return emulate_gp(ctxt, 0);
3533 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3534 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3535 return emulate_gp(ctxt, 0);
3537 cr4 = ctxt->ops->get_cr(ctxt, 4);
3538 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3540 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3541 !(cr4 & X86_CR4_PAE))
3542 return emulate_gp(ctxt, 0);
3549 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3550 if (efer & EFER_LMA)
3551 rsvd = CR3_L_MODE_RESERVED_BITS;
3554 return emulate_gp(ctxt, 0);
3559 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3561 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3562 return emulate_gp(ctxt, 0);
3568 return X86EMUL_CONTINUE;
3571 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3575 ctxt->ops->get_dr(ctxt, 7, &dr7);
3577 /* Check if DR7.Global_Enable is set */
3578 return dr7 & (1 << 13);
3581 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3583 int dr = ctxt->modrm_reg;
3587 return emulate_ud(ctxt);
3589 cr4 = ctxt->ops->get_cr(ctxt, 4);
3590 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3591 return emulate_ud(ctxt);
3593 if (check_dr7_gd(ctxt)) {
3596 ctxt->ops->get_dr(ctxt, 6, &dr6);
3598 dr6 |= DR6_BD | DR6_RTM;
3599 ctxt->ops->set_dr(ctxt, 6, dr6);
3600 return emulate_db(ctxt);
3603 return X86EMUL_CONTINUE;
3606 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3608 u64 new_val = ctxt->src.val64;
3609 int dr = ctxt->modrm_reg;
3611 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3612 return emulate_gp(ctxt, 0);
3614 return check_dr_read(ctxt);
3617 static int check_svme(struct x86_emulate_ctxt *ctxt)
3621 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3623 if (!(efer & EFER_SVME))
3624 return emulate_ud(ctxt);
3626 return X86EMUL_CONTINUE;
3629 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3631 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3633 /* Valid physical address? */
3634 if (rax & 0xffff000000000000ULL)
3635 return emulate_gp(ctxt, 0);
3637 return check_svme(ctxt);
3640 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3642 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3644 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3645 return emulate_ud(ctxt);
3647 return X86EMUL_CONTINUE;
3650 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3652 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3653 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3655 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3656 ctxt->ops->check_pmc(ctxt, rcx))
3657 return emulate_gp(ctxt, 0);
3659 return X86EMUL_CONTINUE;
3662 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3664 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3665 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3666 return emulate_gp(ctxt, 0);
3668 return X86EMUL_CONTINUE;
3671 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3673 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3674 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3675 return emulate_gp(ctxt, 0);
3677 return X86EMUL_CONTINUE;
3680 #define D(_y) { .flags = (_y) }
3681 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3682 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3683 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3684 #define N D(NotImpl)
3685 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3686 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3687 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3688 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3689 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3690 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3691 #define II(_f, _e, _i) \
3692 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3693 #define IIP(_f, _e, _i, _p) \
3694 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3695 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3696 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3698 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3699 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3700 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3701 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3702 #define I2bvIP(_f, _e, _i, _p) \
3703 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3705 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3706 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3707 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3709 static const struct opcode group7_rm0[] = {
3711 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3715 static const struct opcode group7_rm1[] = {
3716 DI(SrcNone | Priv, monitor),
3717 DI(SrcNone | Priv, mwait),
3721 static const struct opcode group7_rm3[] = {
3722 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3723 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3724 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3725 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3726 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3727 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3728 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3729 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3732 static const struct opcode group7_rm7[] = {
3734 DIP(SrcNone, rdtscp, check_rdtsc),
3738 static const struct opcode group1[] = {
3740 F(Lock | PageTable, em_or),
3743 F(Lock | PageTable, em_and),
3749 static const struct opcode group1A[] = {
3750 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3753 static const struct opcode group2[] = {
3754 F(DstMem | ModRM, em_rol),
3755 F(DstMem | ModRM, em_ror),
3756 F(DstMem | ModRM, em_rcl),
3757 F(DstMem | ModRM, em_rcr),
3758 F(DstMem | ModRM, em_shl),
3759 F(DstMem | ModRM, em_shr),
3760 F(DstMem | ModRM, em_shl),
3761 F(DstMem | ModRM, em_sar),
3764 static const struct opcode group3[] = {
3765 F(DstMem | SrcImm | NoWrite, em_test),
3766 F(DstMem | SrcImm | NoWrite, em_test),
3767 F(DstMem | SrcNone | Lock, em_not),
3768 F(DstMem | SrcNone | Lock, em_neg),
3769 F(DstXacc | Src2Mem, em_mul_ex),
3770 F(DstXacc | Src2Mem, em_imul_ex),
3771 F(DstXacc | Src2Mem, em_div_ex),
3772 F(DstXacc | Src2Mem, em_idiv_ex),
3775 static const struct opcode group4[] = {
3776 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3777 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3781 static const struct opcode group5[] = {
3782 F(DstMem | SrcNone | Lock, em_inc),
3783 F(DstMem | SrcNone | Lock, em_dec),
3784 I(SrcMem | NearBranch, em_call_near_abs),
3785 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3786 I(SrcMem | NearBranch, em_jmp_abs),
3787 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3788 I(SrcMem | Stack, em_push), D(Undefined),
3791 static const struct opcode group6[] = {
3794 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3795 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3799 static const struct group_dual group7 = { {
3800 II(Mov | DstMem, em_sgdt, sgdt),
3801 II(Mov | DstMem, em_sidt, sidt),
3802 II(SrcMem | Priv, em_lgdt, lgdt),
3803 II(SrcMem | Priv, em_lidt, lidt),
3804 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3805 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3806 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3810 N, EXT(0, group7_rm3),
3811 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3812 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3816 static const struct opcode group8[] = {
3818 F(DstMem | SrcImmByte | NoWrite, em_bt),
3819 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3820 F(DstMem | SrcImmByte | Lock, em_btr),
3821 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3824 static const struct group_dual group9 = { {
3825 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3827 N, N, N, N, N, N, N, N,
3830 static const struct opcode group11[] = {
3831 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3835 static const struct gprefix pfx_0f_ae_7 = {
3836 I(SrcMem | ByteOp, em_clflush), N, N, N,
3839 static const struct group_dual group15 = { {
3840 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3842 N, N, N, N, N, N, N, N,
3845 static const struct gprefix pfx_0f_6f_0f_7f = {
3846 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3849 static const struct gprefix pfx_0f_2b = {
3850 I(0, em_mov), I(0, em_mov), N, N,
3853 static const struct gprefix pfx_0f_28_0f_29 = {
3854 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3857 static const struct gprefix pfx_0f_e7 = {
3858 N, I(Sse, em_mov), N, N,
3861 static const struct escape escape_d9 = { {
3862 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3865 N, N, N, N, N, N, N, N,
3867 N, N, N, N, N, N, N, N,
3869 N, N, N, N, N, N, N, N,
3871 N, N, N, N, N, N, N, N,
3873 N, N, N, N, N, N, N, N,
3875 N, N, N, N, N, N, N, N,
3877 N, N, N, N, N, N, N, N,
3879 N, N, N, N, N, N, N, N,
3882 static const struct escape escape_db = { {
3883 N, N, N, N, N, N, N, N,
3886 N, N, N, N, N, N, N, N,
3888 N, N, N, N, N, N, N, N,
3890 N, N, N, N, N, N, N, N,
3892 N, N, N, N, N, N, N, N,
3894 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3896 N, N, N, N, N, N, N, N,
3898 N, N, N, N, N, N, N, N,
3900 N, N, N, N, N, N, N, N,
3903 static const struct escape escape_dd = { {
3904 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3907 N, N, N, N, N, N, N, N,
3909 N, N, N, N, N, N, N, N,
3911 N, N, N, N, N, N, N, N,
3913 N, N, N, N, N, N, N, N,
3915 N, N, N, N, N, N, N, N,
3917 N, N, N, N, N, N, N, N,
3919 N, N, N, N, N, N, N, N,
3921 N, N, N, N, N, N, N, N,
3924 static const struct opcode opcode_table[256] = {
3926 F6ALU(Lock, em_add),
3927 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3928 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3930 F6ALU(Lock | PageTable, em_or),
3931 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3934 F6ALU(Lock, em_adc),
3935 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3936 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3938 F6ALU(Lock, em_sbb),
3939 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3940 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3942 F6ALU(Lock | PageTable, em_and), N, N,
3944 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3946 F6ALU(Lock, em_xor), N, N,
3948 F6ALU(NoWrite, em_cmp), N, N,
3950 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3952 X8(I(SrcReg | Stack, em_push)),
3954 X8(I(DstReg | Stack, em_pop)),
3956 I(ImplicitOps | Stack | No64, em_pusha),
3957 I(ImplicitOps | Stack | No64, em_popa),
3958 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3961 I(SrcImm | Mov | Stack, em_push),
3962 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3963 I(SrcImmByte | Mov | Stack, em_push),
3964 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3965 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3966 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3968 X16(D(SrcImmByte | NearBranch)),
3970 G(ByteOp | DstMem | SrcImm, group1),
3971 G(DstMem | SrcImm, group1),
3972 G(ByteOp | DstMem | SrcImm | No64, group1),
3973 G(DstMem | SrcImmByte, group1),
3974 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3975 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3977 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3978 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3979 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3980 D(ModRM | SrcMem | NoAccess | DstReg),
3981 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3984 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3986 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3987 I(SrcImmFAddr | No64, em_call_far), N,
3988 II(ImplicitOps | Stack, em_pushf, pushf),
3989 II(ImplicitOps | Stack, em_popf, popf),
3990 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3992 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3993 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3994 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3995 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3997 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3998 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3999 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4000 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
4002 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4004 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4006 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4007 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4008 I(ImplicitOps | NearBranch, em_ret),
4009 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4010 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4011 G(ByteOp, group11), G(0, group11),
4013 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4014 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4015 I(ImplicitOps | Stack, em_ret_far),
4016 D(ImplicitOps), DI(SrcImmByte, intn),
4017 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4019 G(Src2One | ByteOp, group2), G(Src2One, group2),
4020 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4021 I(DstAcc | SrcImmUByte | No64, em_aam),
4022 I(DstAcc | SrcImmUByte | No64, em_aad),
4023 F(DstAcc | ByteOp | No64, em_salc),
4024 I(DstAcc | SrcXLat | ByteOp, em_mov),
4026 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4028 X3(I(SrcImmByte | NearBranch, em_loop)),
4029 I(SrcImmByte | NearBranch, em_jcxz),
4030 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4031 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4033 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4034 I(SrcImmFAddr | No64, em_jmp_far),
4035 D(SrcImmByte | ImplicitOps | NearBranch),
4036 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4037 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4039 N, DI(ImplicitOps, icebp), N, N,
4040 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4041 G(ByteOp, group3), G(0, group3),
4043 D(ImplicitOps), D(ImplicitOps),
4044 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4045 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4048 static const struct opcode twobyte_table[256] = {
4050 G(0, group6), GD(0, &group7), N, N,
4051 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4052 II(ImplicitOps | Priv, em_clts, clts), N,
4053 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4054 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4056 N, N, N, N, N, N, N, N,
4057 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4058 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4060 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4061 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4062 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4064 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4067 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4068 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4069 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4072 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4073 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4074 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4075 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4076 I(ImplicitOps | EmulateOnUD, em_sysenter),
4077 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4079 N, N, N, N, N, N, N, N,
4081 X16(D(DstReg | SrcMem | ModRM)),
4083 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4088 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4093 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4095 X16(D(SrcImm | NearBranch)),
4097 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4099 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4100 II(ImplicitOps, em_cpuid, cpuid),
4101 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4102 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4103 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4105 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4106 DI(ImplicitOps, rsm),
4107 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4108 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4109 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4110 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4112 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4113 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4114 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4115 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4116 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4117 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4121 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4122 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4123 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4125 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4126 N, D(DstMem | SrcReg | ModRM | Mov),
4127 N, N, N, GD(0, &group9),
4129 X8(I(DstReg, em_bswap)),
4131 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4133 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4134 N, N, N, N, N, N, N, N,
4136 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4139 static const struct gprefix three_byte_0f_38_f0 = {
4140 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4143 static const struct gprefix three_byte_0f_38_f1 = {
4144 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4148 * Insns below are selected by the prefix which indexed by the third opcode
4151 static const struct opcode opcode_map_0f_38[256] = {
4153 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4155 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4157 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4158 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4177 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4181 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4187 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4188 unsigned size, bool sign_extension)
4190 int rc = X86EMUL_CONTINUE;
4194 op->addr.mem.ea = ctxt->_eip;
4195 /* NB. Immediates are sign-extended as necessary. */
4196 switch (op->bytes) {
4198 op->val = insn_fetch(s8, ctxt);
4201 op->val = insn_fetch(s16, ctxt);
4204 op->val = insn_fetch(s32, ctxt);
4207 op->val = insn_fetch(s64, ctxt);
4210 if (!sign_extension) {
4211 switch (op->bytes) {
4219 op->val &= 0xffffffff;
4227 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4230 int rc = X86EMUL_CONTINUE;
4234 decode_register_operand(ctxt, op);
4237 rc = decode_imm(ctxt, op, 1, false);
4240 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4244 if (ctxt->d & BitOp)
4245 fetch_bit_operand(ctxt);
4246 op->orig_val = op->val;
4249 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4253 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4254 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4255 fetch_register_operand(op);
4256 op->orig_val = op->val;
4260 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4261 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4262 fetch_register_operand(op);
4263 op->orig_val = op->val;
4266 if (ctxt->d & ByteOp) {
4271 op->bytes = ctxt->op_bytes;
4272 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4273 fetch_register_operand(op);
4274 op->orig_val = op->val;
4278 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4280 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4281 op->addr.mem.seg = VCPU_SREG_ES;
4288 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4289 fetch_register_operand(op);
4293 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4296 rc = decode_imm(ctxt, op, 1, true);
4303 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4306 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4309 ctxt->memop.bytes = 1;
4310 if (ctxt->memop.type == OP_REG) {
4311 ctxt->memop.addr.reg = decode_register(ctxt,
4312 ctxt->modrm_rm, true);
4313 fetch_register_operand(&ctxt->memop);
4317 ctxt->memop.bytes = 2;
4320 ctxt->memop.bytes = 4;
4323 rc = decode_imm(ctxt, op, 2, false);
4326 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4330 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4332 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4333 op->addr.mem.seg = ctxt->seg_override;
4339 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4341 register_address(ctxt,
4342 reg_read(ctxt, VCPU_REGS_RBX) +
4343 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4344 op->addr.mem.seg = ctxt->seg_override;
4349 op->addr.mem.ea = ctxt->_eip;
4350 op->bytes = ctxt->op_bytes + 2;
4351 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4354 ctxt->memop.bytes = ctxt->op_bytes + 2;
4357 op->val = VCPU_SREG_ES;
4360 op->val = VCPU_SREG_CS;
4363 op->val = VCPU_SREG_SS;
4366 op->val = VCPU_SREG_DS;
4369 op->val = VCPU_SREG_FS;
4372 op->val = VCPU_SREG_GS;
4375 /* Special instructions do their own operand decoding. */
4377 op->type = OP_NONE; /* Disable writeback. */
4385 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4387 int rc = X86EMUL_CONTINUE;
4388 int mode = ctxt->mode;
4389 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4390 bool op_prefix = false;
4391 bool has_seg_override = false;
4392 struct opcode opcode;
4394 ctxt->memop.type = OP_NONE;
4395 ctxt->memopp = NULL;
4396 ctxt->_eip = ctxt->eip;
4397 ctxt->fetch.ptr = ctxt->fetch.data;
4398 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4399 ctxt->opcode_len = 1;
4401 memcpy(ctxt->fetch.data, insn, insn_len);
4403 rc = __do_insn_fetch_bytes(ctxt, 1);
4404 if (rc != X86EMUL_CONTINUE)
4409 case X86EMUL_MODE_REAL:
4410 case X86EMUL_MODE_VM86:
4411 case X86EMUL_MODE_PROT16:
4412 def_op_bytes = def_ad_bytes = 2;
4414 case X86EMUL_MODE_PROT32:
4415 def_op_bytes = def_ad_bytes = 4;
4417 #ifdef CONFIG_X86_64
4418 case X86EMUL_MODE_PROT64:
4424 return EMULATION_FAILED;
4427 ctxt->op_bytes = def_op_bytes;
4428 ctxt->ad_bytes = def_ad_bytes;
4430 /* Legacy prefixes. */
4432 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4433 case 0x66: /* operand-size override */
4435 /* switch between 2/4 bytes */
4436 ctxt->op_bytes = def_op_bytes ^ 6;
4438 case 0x67: /* address-size override */
4439 if (mode == X86EMUL_MODE_PROT64)
4440 /* switch between 4/8 bytes */
4441 ctxt->ad_bytes = def_ad_bytes ^ 12;
4443 /* switch between 2/4 bytes */
4444 ctxt->ad_bytes = def_ad_bytes ^ 6;
4446 case 0x26: /* ES override */
4447 case 0x2e: /* CS override */
4448 case 0x36: /* SS override */
4449 case 0x3e: /* DS override */
4450 has_seg_override = true;
4451 ctxt->seg_override = (ctxt->b >> 3) & 3;
4453 case 0x64: /* FS override */
4454 case 0x65: /* GS override */
4455 has_seg_override = true;
4456 ctxt->seg_override = ctxt->b & 7;
4458 case 0x40 ... 0x4f: /* REX */
4459 if (mode != X86EMUL_MODE_PROT64)
4461 ctxt->rex_prefix = ctxt->b;
4463 case 0xf0: /* LOCK */
4464 ctxt->lock_prefix = 1;
4466 case 0xf2: /* REPNE/REPNZ */
4467 case 0xf3: /* REP/REPE/REPZ */
4468 ctxt->rep_prefix = ctxt->b;
4474 /* Any legacy prefix after a REX prefix nullifies its effect. */
4476 ctxt->rex_prefix = 0;
4482 if (ctxt->rex_prefix & 8)
4483 ctxt->op_bytes = 8; /* REX.W */
4485 /* Opcode byte(s). */
4486 opcode = opcode_table[ctxt->b];
4487 /* Two-byte opcode? */
4488 if (ctxt->b == 0x0f) {
4489 ctxt->opcode_len = 2;
4490 ctxt->b = insn_fetch(u8, ctxt);
4491 opcode = twobyte_table[ctxt->b];
4493 /* 0F_38 opcode map */
4494 if (ctxt->b == 0x38) {
4495 ctxt->opcode_len = 3;
4496 ctxt->b = insn_fetch(u8, ctxt);
4497 opcode = opcode_map_0f_38[ctxt->b];
4500 ctxt->d = opcode.flags;
4502 if (ctxt->d & ModRM)
4503 ctxt->modrm = insn_fetch(u8, ctxt);
4505 /* vex-prefix instructions are not implemented */
4506 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4507 (mode == X86EMUL_MODE_PROT64 ||
4508 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4512 while (ctxt->d & GroupMask) {
4513 switch (ctxt->d & GroupMask) {
4515 goffset = (ctxt->modrm >> 3) & 7;
4516 opcode = opcode.u.group[goffset];
4519 goffset = (ctxt->modrm >> 3) & 7;
4520 if ((ctxt->modrm >> 6) == 3)
4521 opcode = opcode.u.gdual->mod3[goffset];
4523 opcode = opcode.u.gdual->mod012[goffset];
4526 goffset = ctxt->modrm & 7;
4527 opcode = opcode.u.group[goffset];
4530 if (ctxt->rep_prefix && op_prefix)
4531 return EMULATION_FAILED;
4532 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4533 switch (simd_prefix) {
4534 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4535 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4536 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4537 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4541 if (ctxt->modrm > 0xbf)
4542 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4544 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4547 return EMULATION_FAILED;
4550 ctxt->d &= ~(u64)GroupMask;
4551 ctxt->d |= opcode.flags;
4556 return EMULATION_FAILED;
4558 ctxt->execute = opcode.u.execute;
4560 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4561 return EMULATION_FAILED;
4563 if (unlikely(ctxt->d &
4564 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch))) {
4566 * These are copied unconditionally here, and checked unconditionally
4567 * in x86_emulate_insn.
4569 ctxt->check_perm = opcode.check_perm;
4570 ctxt->intercept = opcode.intercept;
4572 if (ctxt->d & NotImpl)
4573 return EMULATION_FAILED;
4575 if (mode == X86EMUL_MODE_PROT64) {
4576 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4578 else if (ctxt->d & NearBranch)
4582 if (ctxt->d & Op3264) {
4583 if (mode == X86EMUL_MODE_PROT64)
4590 ctxt->op_bytes = 16;
4591 else if (ctxt->d & Mmx)
4595 /* ModRM and SIB bytes. */
4596 if (ctxt->d & ModRM) {
4597 rc = decode_modrm(ctxt, &ctxt->memop);
4598 if (!has_seg_override) {
4599 has_seg_override = true;
4600 ctxt->seg_override = ctxt->modrm_seg;
4602 } else if (ctxt->d & MemAbs)
4603 rc = decode_abs(ctxt, &ctxt->memop);
4604 if (rc != X86EMUL_CONTINUE)
4607 if (!has_seg_override)
4608 ctxt->seg_override = VCPU_SREG_DS;
4610 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4613 * Decode and fetch the source operand: register, memory
4616 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4617 if (rc != X86EMUL_CONTINUE)
4621 * Decode and fetch the second source operand: register, memory
4624 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4625 if (rc != X86EMUL_CONTINUE)
4628 /* Decode and fetch the destination operand: register or memory. */
4629 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4631 if (ctxt->rip_relative)
4632 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4635 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4638 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4640 return ctxt->d & PageTable;
4643 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4645 /* The second termination condition only applies for REPE
4646 * and REPNE. Test if the repeat string operation prefix is
4647 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4648 * corresponding termination condition according to:
4649 * - if REPE/REPZ and ZF = 0 then done
4650 * - if REPNE/REPNZ and ZF = 1 then done
4652 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4653 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4654 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4655 ((ctxt->eflags & EFLG_ZF) == 0))
4656 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4657 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4663 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4667 ctxt->ops->get_fpu(ctxt);
4668 asm volatile("1: fwait \n\t"
4670 ".pushsection .fixup,\"ax\" \n\t"
4672 "movb $1, %[fault] \n\t"
4675 _ASM_EXTABLE(1b, 3b)
4676 : [fault]"+qm"(fault));
4677 ctxt->ops->put_fpu(ctxt);
4679 if (unlikely(fault))
4680 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4682 return X86EMUL_CONTINUE;
4685 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4688 if (op->type == OP_MM)
4689 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4692 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4694 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4695 if (!(ctxt->d & ByteOp))
4696 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4697 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4698 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4700 : "c"(ctxt->src2.val));
4701 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4702 if (!fop) /* exception is returned in fop variable */
4703 return emulate_de(ctxt);
4704 return X86EMUL_CONTINUE;
4707 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4709 memset(&ctxt->rip_relative, 0,
4710 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4712 ctxt->io_read.pos = 0;
4713 ctxt->io_read.end = 0;
4714 ctxt->mem_read.end = 0;
4717 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4719 const struct x86_emulate_ops *ops = ctxt->ops;
4720 int rc = X86EMUL_CONTINUE;
4721 int saved_dst_type = ctxt->dst.type;
4723 ctxt->mem_read.pos = 0;
4725 /* LOCK prefix is allowed only with some instructions */
4726 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4727 rc = emulate_ud(ctxt);
4731 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4732 rc = emulate_ud(ctxt);
4736 if (unlikely(ctxt->d &
4737 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4738 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4739 (ctxt->d & Undefined)) {
4740 rc = emulate_ud(ctxt);
4744 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4745 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4746 rc = emulate_ud(ctxt);
4750 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4751 rc = emulate_nm(ctxt);
4755 if (ctxt->d & Mmx) {
4756 rc = flush_pending_x87_faults(ctxt);
4757 if (rc != X86EMUL_CONTINUE)
4760 * Now that we know the fpu is exception safe, we can fetch
4763 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4764 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4765 if (!(ctxt->d & Mov))
4766 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4769 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4770 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4771 X86_ICPT_PRE_EXCEPT);
4772 if (rc != X86EMUL_CONTINUE)
4776 /* Privileged instruction can be executed only in CPL=0 */
4777 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4778 if (ctxt->d & PrivUD)
4779 rc = emulate_ud(ctxt);
4781 rc = emulate_gp(ctxt, 0);
4785 /* Instruction can only be executed in protected mode */
4786 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4787 rc = emulate_ud(ctxt);
4791 /* Do instruction specific permission checks */
4792 if (ctxt->d & CheckPerm) {
4793 rc = ctxt->check_perm(ctxt);
4794 if (rc != X86EMUL_CONTINUE)
4798 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4799 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4800 X86_ICPT_POST_EXCEPT);
4801 if (rc != X86EMUL_CONTINUE)
4805 if (ctxt->rep_prefix && (ctxt->d & String)) {
4806 /* All REP prefixes have the same first termination condition */
4807 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4808 ctxt->eip = ctxt->_eip;
4809 ctxt->eflags &= ~EFLG_RF;
4815 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4816 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4817 ctxt->src.valptr, ctxt->src.bytes);
4818 if (rc != X86EMUL_CONTINUE)
4820 ctxt->src.orig_val64 = ctxt->src.val64;
4823 if (ctxt->src2.type == OP_MEM) {
4824 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4825 &ctxt->src2.val, ctxt->src2.bytes);
4826 if (rc != X86EMUL_CONTINUE)
4830 if ((ctxt->d & DstMask) == ImplicitOps)
4834 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4835 /* optimisation - avoid slow emulated read if Mov */
4836 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4837 &ctxt->dst.val, ctxt->dst.bytes);
4838 if (rc != X86EMUL_CONTINUE)
4841 ctxt->dst.orig_val = ctxt->dst.val;
4845 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4846 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4847 X86_ICPT_POST_MEMACCESS);
4848 if (rc != X86EMUL_CONTINUE)
4852 if (ctxt->rep_prefix && (ctxt->d & String))
4853 ctxt->eflags |= EFLG_RF;
4855 ctxt->eflags &= ~EFLG_RF;
4857 if (ctxt->execute) {
4858 if (ctxt->d & Fastop) {
4859 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4860 rc = fastop(ctxt, fop);
4861 if (rc != X86EMUL_CONTINUE)
4865 rc = ctxt->execute(ctxt);
4866 if (rc != X86EMUL_CONTINUE)
4871 if (ctxt->opcode_len == 2)
4873 else if (ctxt->opcode_len == 3)
4874 goto threebyte_insn;
4877 case 0x63: /* movsxd */
4878 if (ctxt->mode != X86EMUL_MODE_PROT64)
4879 goto cannot_emulate;
4880 ctxt->dst.val = (s32) ctxt->src.val;
4882 case 0x70 ... 0x7f: /* jcc (short) */
4883 if (test_cc(ctxt->b, ctxt->eflags))
4884 rc = jmp_rel(ctxt, ctxt->src.val);
4886 case 0x8d: /* lea r16/r32, m */
4887 ctxt->dst.val = ctxt->src.addr.mem.ea;
4889 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4890 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4891 ctxt->dst.type = OP_NONE;
4895 case 0x98: /* cbw/cwde/cdqe */
4896 switch (ctxt->op_bytes) {
4897 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4898 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4899 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4902 case 0xcc: /* int3 */
4903 rc = emulate_int(ctxt, 3);
4905 case 0xcd: /* int n */
4906 rc = emulate_int(ctxt, ctxt->src.val);
4908 case 0xce: /* into */
4909 if (ctxt->eflags & EFLG_OF)
4910 rc = emulate_int(ctxt, 4);
4912 case 0xe9: /* jmp rel */
4913 case 0xeb: /* jmp rel short */
4914 rc = jmp_rel(ctxt, ctxt->src.val);
4915 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4917 case 0xf4: /* hlt */
4918 ctxt->ops->halt(ctxt);
4920 case 0xf5: /* cmc */
4921 /* complement carry flag from eflags reg */
4922 ctxt->eflags ^= EFLG_CF;
4924 case 0xf8: /* clc */
4925 ctxt->eflags &= ~EFLG_CF;
4927 case 0xf9: /* stc */
4928 ctxt->eflags |= EFLG_CF;
4930 case 0xfc: /* cld */
4931 ctxt->eflags &= ~EFLG_DF;
4933 case 0xfd: /* std */
4934 ctxt->eflags |= EFLG_DF;
4937 goto cannot_emulate;
4940 if (rc != X86EMUL_CONTINUE)
4944 if (ctxt->d & SrcWrite) {
4945 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4946 rc = writeback(ctxt, &ctxt->src);
4947 if (rc != X86EMUL_CONTINUE)
4950 if (!(ctxt->d & NoWrite)) {
4951 rc = writeback(ctxt, &ctxt->dst);
4952 if (rc != X86EMUL_CONTINUE)
4957 * restore dst type in case the decoding will be reused
4958 * (happens for string instruction )
4960 ctxt->dst.type = saved_dst_type;
4962 if ((ctxt->d & SrcMask) == SrcSI)
4963 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4965 if ((ctxt->d & DstMask) == DstDI)
4966 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4968 if (ctxt->rep_prefix && (ctxt->d & String)) {
4970 struct read_cache *r = &ctxt->io_read;
4971 if ((ctxt->d & SrcMask) == SrcSI)
4972 count = ctxt->src.count;
4974 count = ctxt->dst.count;
4975 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4978 if (!string_insn_completed(ctxt)) {
4980 * Re-enter guest when pio read ahead buffer is empty
4981 * or, if it is not used, after each 1024 iteration.
4983 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4984 (r->end == 0 || r->end != r->pos)) {
4986 * Reset read cache. Usually happens before
4987 * decode, but since instruction is restarted
4988 * we have to do it here.
4990 ctxt->mem_read.end = 0;
4991 writeback_registers(ctxt);
4992 return EMULATION_RESTART;
4994 goto done; /* skip rip writeback */
4996 ctxt->eflags &= ~EFLG_RF;
4999 ctxt->eip = ctxt->_eip;
5002 if (rc == X86EMUL_PROPAGATE_FAULT) {
5003 WARN_ON(ctxt->exception.vector > 0x1f);
5004 ctxt->have_exception = true;
5006 if (rc == X86EMUL_INTERCEPTED)
5007 return EMULATION_INTERCEPTED;
5009 if (rc == X86EMUL_CONTINUE)
5010 writeback_registers(ctxt);
5012 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5016 case 0x09: /* wbinvd */
5017 (ctxt->ops->wbinvd)(ctxt);
5019 case 0x08: /* invd */
5020 case 0x0d: /* GrpP (prefetch) */
5021 case 0x18: /* Grp16 (prefetch/nop) */
5022 case 0x1f: /* nop */
5024 case 0x20: /* mov cr, reg */
5025 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5027 case 0x21: /* mov from dr to reg */
5028 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5030 case 0x40 ... 0x4f: /* cmov */
5031 if (test_cc(ctxt->b, ctxt->eflags))
5032 ctxt->dst.val = ctxt->src.val;
5033 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5034 ctxt->op_bytes != 4)
5035 ctxt->dst.type = OP_NONE; /* no writeback */
5037 case 0x80 ... 0x8f: /* jnz rel, etc*/
5038 if (test_cc(ctxt->b, ctxt->eflags))
5039 rc = jmp_rel(ctxt, ctxt->src.val);
5041 case 0x90 ... 0x9f: /* setcc r/m8 */
5042 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5044 case 0xb6 ... 0xb7: /* movzx */
5045 ctxt->dst.bytes = ctxt->op_bytes;
5046 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5047 : (u16) ctxt->src.val;
5049 case 0xbe ... 0xbf: /* movsx */
5050 ctxt->dst.bytes = ctxt->op_bytes;
5051 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5052 (s16) ctxt->src.val;
5054 case 0xc3: /* movnti */
5055 ctxt->dst.bytes = ctxt->op_bytes;
5056 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5057 (u32) ctxt->src.val;
5060 goto cannot_emulate;
5065 if (rc != X86EMUL_CONTINUE)
5071 return EMULATION_FAILED;
5074 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5076 invalidate_registers(ctxt);
5079 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5081 writeback_registers(ctxt);