1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
171 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
173 #define X2(x...) x, x
174 #define X3(x...) X2(x), x
175 #define X4(x...) X2(x), X2(x)
176 #define X5(x...) X4(x), x
177 #define X6(x...) X4(x), X2(x)
178 #define X7(x...) X4(x), X3(x)
179 #define X8(x...) X4(x), X4(x)
180 #define X16(x...) X8(x), X8(x)
182 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
183 #define FASTOP_SIZE 8
186 * fastop functions have a special calling convention:
191 * flags: rflags (in/out)
192 * ex: rsi (in:fastop pointer, out:zero if exception)
194 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
195 * different operand sizes can be reached by calculation, rather than a jump
196 * table (which would be bigger than the code).
198 * fastop functions are declared as taking a never-defined fastop parameter,
199 * so they can't be called from C directly.
208 int (*execute)(struct x86_emulate_ctxt *ctxt);
209 const struct opcode *group;
210 const struct group_dual *gdual;
211 const struct gprefix *gprefix;
212 const struct escape *esc;
213 void (*fastop)(struct fastop *fake);
215 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
219 struct opcode mod012[8];
220 struct opcode mod3[8];
224 struct opcode pfx_no;
225 struct opcode pfx_66;
226 struct opcode pfx_f2;
227 struct opcode pfx_f3;
232 struct opcode high[64];
235 /* EFLAGS bit definitions. */
236 #define EFLG_ID (1<<21)
237 #define EFLG_VIP (1<<20)
238 #define EFLG_VIF (1<<19)
239 #define EFLG_AC (1<<18)
240 #define EFLG_VM (1<<17)
241 #define EFLG_RF (1<<16)
242 #define EFLG_IOPL (3<<12)
243 #define EFLG_NT (1<<14)
244 #define EFLG_OF (1<<11)
245 #define EFLG_DF (1<<10)
246 #define EFLG_IF (1<<9)
247 #define EFLG_TF (1<<8)
248 #define EFLG_SF (1<<7)
249 #define EFLG_ZF (1<<6)
250 #define EFLG_AF (1<<4)
251 #define EFLG_PF (1<<2)
252 #define EFLG_CF (1<<0)
254 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255 #define EFLG_RESERVED_ONE_MASK 2
257 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
259 if (!(ctxt->regs_valid & (1 << nr))) {
260 ctxt->regs_valid |= 1 << nr;
261 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
263 return ctxt->_regs[nr];
266 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->regs_dirty |= 1 << nr;
270 return &ctxt->_regs[nr];
273 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 return reg_write(ctxt, nr);
279 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
283 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
284 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
287 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
289 ctxt->regs_dirty = 0;
290 ctxt->regs_valid = 0;
294 * These EFLAGS bits are restored from saved value during emulation, and
295 * any changes are written back to the saved value after emulation.
297 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
305 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
307 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
308 #define FOP_RET "ret \n\t"
310 #define FOP_START(op) \
311 extern void em_##op(struct fastop *fake); \
312 asm(".pushsection .text, \"ax\" \n\t" \
313 ".global em_" #op " \n\t" \
320 #define FOPNOP() FOP_ALIGN FOP_RET
322 #define FOP1E(op, dst) \
323 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
325 #define FOP1EEX(op, dst) \
326 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
328 #define FASTOP1(op) \
333 ON64(FOP1E(op##q, rax)) \
336 /* 1-operand, using src2 (for MUL/DIV r/m) */
337 #define FASTOP1SRC2(op, name) \
342 ON64(FOP1E(op, rcx)) \
345 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
346 #define FASTOP1SRC2EX(op, name) \
351 ON64(FOP1EEX(op, rcx)) \
354 #define FOP2E(op, dst, src) \
355 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
357 #define FASTOP2(op) \
359 FOP2E(op##b, al, dl) \
360 FOP2E(op##w, ax, dx) \
361 FOP2E(op##l, eax, edx) \
362 ON64(FOP2E(op##q, rax, rdx)) \
365 /* 2 operand, word only */
366 #define FASTOP2W(op) \
369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
374 /* 2 operand, src is CL */
375 #define FASTOP2CL(op) \
377 FOP2E(op##b, al, cl) \
378 FOP2E(op##w, ax, cl) \
379 FOP2E(op##l, eax, cl) \
380 ON64(FOP2E(op##q, rax, cl)) \
383 #define FOP3E(op, dst, src, src2) \
384 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
386 /* 3-operand, word-only, src2=cl */
387 #define FASTOP3WCL(op) \
390 FOP3E(op##w, ax, dx, cl) \
391 FOP3E(op##l, eax, edx, cl) \
392 ON64(FOP3E(op##q, rax, rdx, cl)) \
395 /* Special case for SETcc - 1 instruction per cc */
396 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
398 asm(".global kvm_fastop_exception \n"
399 "kvm_fastop_exception: xor %esi, %esi; ret");
420 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
423 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
424 enum x86_intercept intercept,
425 enum x86_intercept_stage stage)
427 struct x86_instruction_info info = {
428 .intercept = intercept,
429 .rep_prefix = ctxt->rep_prefix,
430 .modrm_mod = ctxt->modrm_mod,
431 .modrm_reg = ctxt->modrm_reg,
432 .modrm_rm = ctxt->modrm_rm,
433 .src_val = ctxt->src.val64,
434 .dst_val = ctxt->dst.val64,
435 .src_bytes = ctxt->src.bytes,
436 .dst_bytes = ctxt->dst.bytes,
437 .ad_bytes = ctxt->ad_bytes,
438 .next_rip = ctxt->eip,
441 return ctxt->ops->intercept(ctxt, &info, stage);
444 static void assign_masked(ulong *dest, ulong src, ulong mask)
446 *dest = (*dest & ~mask) | (src & mask);
449 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
451 return (1UL << (ctxt->ad_bytes << 3)) - 1;
454 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
457 struct desc_struct ss;
459 if (ctxt->mode == X86EMUL_MODE_PROT64)
461 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
462 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
465 static int stack_size(struct x86_emulate_ctxt *ctxt)
467 return (__fls(stack_mask(ctxt)) + 1) >> 3;
470 /* Access/update address held in a register, based on addressing mode. */
471 static inline unsigned long
472 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
474 if (ctxt->ad_bytes == sizeof(unsigned long))
477 return reg & ad_mask(ctxt);
480 static inline unsigned long
481 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
483 return address_mask(ctxt, reg);
486 static void masked_increment(ulong *reg, ulong mask, int inc)
488 assign_masked(reg, *reg + inc, mask);
492 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
496 if (ctxt->ad_bytes == sizeof(unsigned long))
499 mask = ad_mask(ctxt);
500 masked_increment(reg, mask, inc);
503 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
505 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
508 static u32 desc_limit_scaled(struct desc_struct *desc)
510 u32 limit = get_desc_limit(desc);
512 return desc->g ? (limit << 12) | 0xfff : limit;
515 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
517 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
520 return ctxt->ops->get_cached_segment_base(ctxt, seg);
523 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
524 u32 error, bool valid)
527 ctxt->exception.vector = vec;
528 ctxt->exception.error_code = error;
529 ctxt->exception.error_code_valid = valid;
530 return X86EMUL_PROPAGATE_FAULT;
533 static int emulate_db(struct x86_emulate_ctxt *ctxt)
535 return emulate_exception(ctxt, DB_VECTOR, 0, false);
538 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
540 return emulate_exception(ctxt, GP_VECTOR, err, true);
543 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
545 return emulate_exception(ctxt, SS_VECTOR, err, true);
548 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
550 return emulate_exception(ctxt, UD_VECTOR, 0, false);
553 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
555 return emulate_exception(ctxt, TS_VECTOR, err, true);
558 static int emulate_de(struct x86_emulate_ctxt *ctxt)
560 return emulate_exception(ctxt, DE_VECTOR, 0, false);
563 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
565 return emulate_exception(ctxt, NM_VECTOR, 0, false);
568 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
571 switch (ctxt->op_bytes) {
573 ctxt->_eip = (u16)dst;
576 ctxt->_eip = (u32)dst;
580 if ((cs_l && is_noncanonical_address(dst)) ||
581 (!cs_l && (dst >> 32) != 0))
582 return emulate_gp(ctxt, 0);
587 WARN(1, "unsupported eip assignment size\n");
589 return X86EMUL_CONTINUE;
592 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
594 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
597 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
599 return assign_eip_near(ctxt, ctxt->_eip + rel);
602 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
605 struct desc_struct desc;
607 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
611 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
616 struct desc_struct desc;
618 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
619 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
623 * x86 defines three classes of vector instructions: explicitly
624 * aligned, explicitly unaligned, and the rest, which change behaviour
625 * depending on whether they're AVX encoded or not.
627 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
628 * subject to the same check.
630 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
632 if (likely(size < 16))
635 if (ctxt->d & Aligned)
637 else if (ctxt->d & Unaligned)
639 else if (ctxt->d & Avx)
645 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
646 struct segmented_address addr,
647 unsigned *max_size, unsigned size,
648 bool write, bool fetch,
651 struct desc_struct desc;
658 la = seg_base(ctxt, addr.seg) +
659 (fetch || ctxt->ad_bytes == 8 ? addr.ea : (u32)addr.ea);
661 switch (ctxt->mode) {
662 case X86EMUL_MODE_PROT64:
663 if (is_noncanonical_address(la))
664 return emulate_gp(ctxt, 0);
666 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
667 if (size > *max_size)
671 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
675 /* code segment in protected mode or read-only data segment */
676 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
677 || !(desc.type & 2)) && write)
679 /* unreadable code segment */
680 if (!fetch && (desc.type & 8) && !(desc.type & 2))
682 lim = desc_limit_scaled(&desc);
683 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
684 (ctxt->d & NoBigReal)) {
685 /* la is between zero and 0xffff */
688 *max_size = 0x10000 - la;
689 } else if ((desc.type & 8) || !(desc.type & 4)) {
690 /* expand-up segment */
693 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
695 /* expand-down segment */
698 lim = desc.d ? 0xffffffff : 0xffff;
701 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
703 if (size > *max_size)
705 cpl = ctxt->ops->cpl(ctxt);
707 /* data segment or readable code segment */
710 } else if ((desc.type & 8) && !(desc.type & 4)) {
711 /* nonconforming code segment */
714 } else if ((desc.type & 8) && (desc.type & 4)) {
715 /* conforming code segment */
721 if (ctxt->mode != X86EMUL_MODE_PROT64)
723 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
724 return emulate_gp(ctxt, 0);
726 return X86EMUL_CONTINUE;
728 if (addr.seg == VCPU_SREG_SS)
729 return emulate_ss(ctxt, 0);
731 return emulate_gp(ctxt, 0);
734 static int linearize(struct x86_emulate_ctxt *ctxt,
735 struct segmented_address addr,
736 unsigned size, bool write,
740 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
744 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
745 struct segmented_address addr,
752 rc = linearize(ctxt, addr, size, false, &linear);
753 if (rc != X86EMUL_CONTINUE)
755 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
759 * Prefetch the remaining bytes of the instruction without crossing page
760 * boundary if they are not in fetch_cache yet.
762 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
765 unsigned size, max_size;
766 unsigned long linear;
767 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
768 struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 .ea = ctxt->eip + cur_size };
772 * We do not know exactly how many bytes will be needed, and
773 * __linearize is expensive, so fetch as much as possible. We
774 * just have to avoid going beyond the 15 byte limit, the end
775 * of the segment, or the end of the page.
777 * __linearize is called with size 0 so that it does not do any
778 * boundary check itself. Instead, we use max_size to check
781 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
782 if (unlikely(rc != X86EMUL_CONTINUE))
785 size = min_t(unsigned, 15UL ^ cur_size, max_size);
786 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
789 * One instruction can only straddle two pages,
790 * and one has been loaded at the beginning of
791 * x86_decode_insn. So, if not enough bytes
792 * still, we must have hit the 15-byte boundary.
794 if (unlikely(size < op_size))
795 return emulate_gp(ctxt, 0);
797 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
798 size, &ctxt->exception);
799 if (unlikely(rc != X86EMUL_CONTINUE))
801 ctxt->fetch.end += size;
802 return X86EMUL_CONTINUE;
805 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
808 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
810 if (unlikely(done_size < size))
811 return __do_insn_fetch_bytes(ctxt, size - done_size);
813 return X86EMUL_CONTINUE;
816 /* Fetch next part of the instruction being emulated. */
817 #define insn_fetch(_type, _ctxt) \
820 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
821 if (rc != X86EMUL_CONTINUE) \
823 ctxt->_eip += sizeof(_type); \
824 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
825 ctxt->fetch.ptr += sizeof(_type); \
829 #define insn_fetch_arr(_arr, _size, _ctxt) \
831 rc = do_insn_fetch_bytes(_ctxt, _size); \
832 if (rc != X86EMUL_CONTINUE) \
834 ctxt->_eip += (_size); \
835 memcpy(_arr, ctxt->fetch.ptr, _size); \
836 ctxt->fetch.ptr += (_size); \
840 * Given the 'reg' portion of a ModRM byte, and a register block, return a
841 * pointer into the block that addresses the relevant register.
842 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
844 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
848 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
850 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
851 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
853 p = reg_rmw(ctxt, modrm_reg);
857 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
858 struct segmented_address addr,
859 u16 *size, unsigned long *address, int op_bytes)
866 rc = segmented_read_std(ctxt, addr, size, 2);
867 if (rc != X86EMUL_CONTINUE)
870 rc = segmented_read_std(ctxt, addr, address, op_bytes);
884 FASTOP1SRC2(mul, mul_ex);
885 FASTOP1SRC2(imul, imul_ex);
886 FASTOP1SRC2EX(div, div_ex);
887 FASTOP1SRC2EX(idiv, idiv_ex);
916 static u8 test_cc(unsigned int condition, unsigned long flags)
919 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
921 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
922 asm("push %[flags]; popf; call *%[fastop]"
923 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
927 static void fetch_register_operand(struct operand *op)
931 op->val = *(u8 *)op->addr.reg;
934 op->val = *(u16 *)op->addr.reg;
937 op->val = *(u32 *)op->addr.reg;
940 op->val = *(u64 *)op->addr.reg;
945 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
947 ctxt->ops->get_fpu(ctxt);
949 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
950 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
951 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
952 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
953 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
954 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
955 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
956 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
958 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
959 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
960 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
961 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
962 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
963 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
964 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
965 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
969 ctxt->ops->put_fpu(ctxt);
972 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
975 ctxt->ops->get_fpu(ctxt);
977 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
978 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
979 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
980 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
981 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
982 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
983 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
984 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
986 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
987 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
988 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
989 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
990 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
991 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
992 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
993 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
997 ctxt->ops->put_fpu(ctxt);
1000 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1002 ctxt->ops->get_fpu(ctxt);
1004 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1005 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1006 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1007 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1008 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1009 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1010 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1011 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1014 ctxt->ops->put_fpu(ctxt);
1017 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1019 ctxt->ops->get_fpu(ctxt);
1021 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1022 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1023 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1024 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1025 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1026 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1027 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1028 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1031 ctxt->ops->put_fpu(ctxt);
1034 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1036 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1037 return emulate_nm(ctxt);
1039 ctxt->ops->get_fpu(ctxt);
1040 asm volatile("fninit");
1041 ctxt->ops->put_fpu(ctxt);
1042 return X86EMUL_CONTINUE;
1045 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1049 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1050 return emulate_nm(ctxt);
1052 ctxt->ops->get_fpu(ctxt);
1053 asm volatile("fnstcw %0": "+m"(fcw));
1054 ctxt->ops->put_fpu(ctxt);
1056 /* force 2 byte destination */
1057 ctxt->dst.bytes = 2;
1058 ctxt->dst.val = fcw;
1060 return X86EMUL_CONTINUE;
1063 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1067 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1068 return emulate_nm(ctxt);
1070 ctxt->ops->get_fpu(ctxt);
1071 asm volatile("fnstsw %0": "+m"(fsw));
1072 ctxt->ops->put_fpu(ctxt);
1074 /* force 2 byte destination */
1075 ctxt->dst.bytes = 2;
1076 ctxt->dst.val = fsw;
1078 return X86EMUL_CONTINUE;
1081 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1084 unsigned reg = ctxt->modrm_reg;
1086 if (!(ctxt->d & ModRM))
1087 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1089 if (ctxt->d & Sse) {
1093 read_sse_reg(ctxt, &op->vec_val, reg);
1096 if (ctxt->d & Mmx) {
1105 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1106 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1108 fetch_register_operand(op);
1109 op->orig_val = op->val;
1112 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1114 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1115 ctxt->modrm_seg = VCPU_SREG_SS;
1118 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1122 int index_reg, base_reg, scale;
1123 int rc = X86EMUL_CONTINUE;
1126 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1127 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1128 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1130 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1131 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1132 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1133 ctxt->modrm_seg = VCPU_SREG_DS;
1135 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1137 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1138 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1140 if (ctxt->d & Sse) {
1143 op->addr.xmm = ctxt->modrm_rm;
1144 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1147 if (ctxt->d & Mmx) {
1150 op->addr.mm = ctxt->modrm_rm & 7;
1153 fetch_register_operand(op);
1159 if (ctxt->ad_bytes == 2) {
1160 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1161 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1162 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1163 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1165 /* 16-bit ModR/M decode. */
1166 switch (ctxt->modrm_mod) {
1168 if (ctxt->modrm_rm == 6)
1169 modrm_ea += insn_fetch(u16, ctxt);
1172 modrm_ea += insn_fetch(s8, ctxt);
1175 modrm_ea += insn_fetch(u16, ctxt);
1178 switch (ctxt->modrm_rm) {
1180 modrm_ea += bx + si;
1183 modrm_ea += bx + di;
1186 modrm_ea += bp + si;
1189 modrm_ea += bp + di;
1198 if (ctxt->modrm_mod != 0)
1205 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1206 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1207 ctxt->modrm_seg = VCPU_SREG_SS;
1208 modrm_ea = (u16)modrm_ea;
1210 /* 32/64-bit ModR/M decode. */
1211 if ((ctxt->modrm_rm & 7) == 4) {
1212 sib = insn_fetch(u8, ctxt);
1213 index_reg |= (sib >> 3) & 7;
1214 base_reg |= sib & 7;
1217 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1218 modrm_ea += insn_fetch(s32, ctxt);
1220 modrm_ea += reg_read(ctxt, base_reg);
1221 adjust_modrm_seg(ctxt, base_reg);
1224 modrm_ea += reg_read(ctxt, index_reg) << scale;
1225 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1226 modrm_ea += insn_fetch(s32, ctxt);
1227 if (ctxt->mode == X86EMUL_MODE_PROT64)
1228 ctxt->rip_relative = 1;
1230 base_reg = ctxt->modrm_rm;
1231 modrm_ea += reg_read(ctxt, base_reg);
1232 adjust_modrm_seg(ctxt, base_reg);
1234 switch (ctxt->modrm_mod) {
1236 modrm_ea += insn_fetch(s8, ctxt);
1239 modrm_ea += insn_fetch(s32, ctxt);
1243 op->addr.mem.ea = modrm_ea;
1244 if (ctxt->ad_bytes != 8)
1245 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1251 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1254 int rc = X86EMUL_CONTINUE;
1257 switch (ctxt->ad_bytes) {
1259 op->addr.mem.ea = insn_fetch(u16, ctxt);
1262 op->addr.mem.ea = insn_fetch(u32, ctxt);
1265 op->addr.mem.ea = insn_fetch(u64, ctxt);
1272 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1276 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1277 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1279 if (ctxt->src.bytes == 2)
1280 sv = (s16)ctxt->src.val & (s16)mask;
1281 else if (ctxt->src.bytes == 4)
1282 sv = (s32)ctxt->src.val & (s32)mask;
1284 sv = (s64)ctxt->src.val & (s64)mask;
1286 ctxt->dst.addr.mem.ea += (sv >> 3);
1289 /* only subword offset */
1290 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1293 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1294 unsigned long addr, void *dest, unsigned size)
1297 struct read_cache *mc = &ctxt->mem_read;
1299 if (mc->pos < mc->end)
1302 WARN_ON((mc->end + size) >= sizeof(mc->data));
1304 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1306 if (rc != X86EMUL_CONTINUE)
1312 memcpy(dest, mc->data + mc->pos, size);
1314 return X86EMUL_CONTINUE;
1317 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1318 struct segmented_address addr,
1325 rc = linearize(ctxt, addr, size, false, &linear);
1326 if (rc != X86EMUL_CONTINUE)
1328 return read_emulated(ctxt, linear, data, size);
1331 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1332 struct segmented_address addr,
1339 rc = linearize(ctxt, addr, size, true, &linear);
1340 if (rc != X86EMUL_CONTINUE)
1342 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1346 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1347 struct segmented_address addr,
1348 const void *orig_data, const void *data,
1354 rc = linearize(ctxt, addr, size, true, &linear);
1355 if (rc != X86EMUL_CONTINUE)
1357 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1358 size, &ctxt->exception);
1361 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1362 unsigned int size, unsigned short port,
1365 struct read_cache *rc = &ctxt->io_read;
1367 if (rc->pos == rc->end) { /* refill pio read ahead */
1368 unsigned int in_page, n;
1369 unsigned int count = ctxt->rep_prefix ?
1370 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1371 in_page = (ctxt->eflags & EFLG_DF) ?
1372 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1373 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1374 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1377 rc->pos = rc->end = 0;
1378 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1383 if (ctxt->rep_prefix && (ctxt->d & String) &&
1384 !(ctxt->eflags & EFLG_DF)) {
1385 ctxt->dst.data = rc->data + rc->pos;
1386 ctxt->dst.type = OP_MEM_STR;
1387 ctxt->dst.count = (rc->end - rc->pos) / size;
1390 memcpy(dest, rc->data + rc->pos, size);
1396 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1397 u16 index, struct desc_struct *desc)
1402 ctxt->ops->get_idt(ctxt, &dt);
1404 if (dt.size < index * 8 + 7)
1405 return emulate_gp(ctxt, index << 3 | 0x2);
1407 addr = dt.address + index * 8;
1408 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1412 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1413 u16 selector, struct desc_ptr *dt)
1415 const struct x86_emulate_ops *ops = ctxt->ops;
1418 if (selector & 1 << 2) {
1419 struct desc_struct desc;
1422 memset (dt, 0, sizeof *dt);
1423 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1427 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1428 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1430 ops->get_gdt(ctxt, dt);
1433 /* allowed just for 8 bytes segments */
1434 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1435 u16 selector, struct desc_struct *desc,
1439 u16 index = selector >> 3;
1442 get_descriptor_table_ptr(ctxt, selector, &dt);
1444 if (dt.size < index * 8 + 7)
1445 return emulate_gp(ctxt, selector & 0xfffc);
1447 *desc_addr_p = addr = dt.address + index * 8;
1448 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1452 /* allowed just for 8 bytes segments */
1453 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1454 u16 selector, struct desc_struct *desc)
1457 u16 index = selector >> 3;
1460 get_descriptor_table_ptr(ctxt, selector, &dt);
1462 if (dt.size < index * 8 + 7)
1463 return emulate_gp(ctxt, selector & 0xfffc);
1465 addr = dt.address + index * 8;
1466 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1470 /* Does not support long mode */
1471 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1472 u16 selector, int seg, u8 cpl,
1473 bool in_task_switch,
1474 struct desc_struct *desc)
1476 struct desc_struct seg_desc, old_desc;
1478 unsigned err_vec = GP_VECTOR;
1480 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1486 memset(&seg_desc, 0, sizeof seg_desc);
1488 if (ctxt->mode == X86EMUL_MODE_REAL) {
1489 /* set real mode segment descriptor (keep limit etc. for
1491 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1492 set_desc_base(&seg_desc, selector << 4);
1494 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1495 /* VM86 needs a clean new segment descriptor */
1496 set_desc_base(&seg_desc, selector << 4);
1497 set_desc_limit(&seg_desc, 0xffff);
1507 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1508 if ((seg == VCPU_SREG_CS
1509 || (seg == VCPU_SREG_SS
1510 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1511 || seg == VCPU_SREG_TR)
1515 /* TR should be in GDT only */
1516 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1519 if (null_selector) /* for NULL selector skip all following checks */
1522 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1523 if (ret != X86EMUL_CONTINUE)
1526 err_code = selector & 0xfffc;
1527 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1529 /* can't load system descriptor into segment selector */
1530 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1534 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1543 * segment is not a writable data segment or segment
1544 * selector's RPL != CPL or segment selector's RPL != CPL
1546 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1550 if (!(seg_desc.type & 8))
1553 if (seg_desc.type & 4) {
1559 if (rpl > cpl || dpl != cpl)
1562 /* in long-mode d/b must be clear if l is set */
1563 if (seg_desc.d && seg_desc.l) {
1566 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1567 if (efer & EFER_LMA)
1571 /* CS(RPL) <- CPL */
1572 selector = (selector & 0xfffc) | cpl;
1575 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1577 old_desc = seg_desc;
1578 seg_desc.type |= 2; /* busy */
1579 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1580 sizeof(seg_desc), &ctxt->exception);
1581 if (ret != X86EMUL_CONTINUE)
1584 case VCPU_SREG_LDTR:
1585 if (seg_desc.s || seg_desc.type != 2)
1588 default: /* DS, ES, FS, or GS */
1590 * segment is not a data or readable code segment or
1591 * ((segment is a data or nonconforming code segment)
1592 * and (both RPL and CPL > DPL))
1594 if ((seg_desc.type & 0xa) == 0x8 ||
1595 (((seg_desc.type & 0xc) != 0xc) &&
1596 (rpl > dpl && cpl > dpl)))
1602 /* mark segment as accessed */
1604 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1605 if (ret != X86EMUL_CONTINUE)
1607 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1608 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1609 sizeof(base3), &ctxt->exception);
1610 if (ret != X86EMUL_CONTINUE)
1614 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1617 return X86EMUL_CONTINUE;
1619 return emulate_exception(ctxt, err_vec, err_code, true);
1622 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1623 u16 selector, int seg)
1625 u8 cpl = ctxt->ops->cpl(ctxt);
1626 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1629 static void write_register_operand(struct operand *op)
1631 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1632 switch (op->bytes) {
1634 *(u8 *)op->addr.reg = (u8)op->val;
1637 *(u16 *)op->addr.reg = (u16)op->val;
1640 *op->addr.reg = (u32)op->val;
1641 break; /* 64b: zero-extend */
1643 *op->addr.reg = op->val;
1648 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1652 write_register_operand(op);
1655 if (ctxt->lock_prefix)
1656 return segmented_cmpxchg(ctxt,
1662 return segmented_write(ctxt,
1668 return segmented_write(ctxt,
1671 op->bytes * op->count);
1674 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1677 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1685 return X86EMUL_CONTINUE;
1688 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1690 struct segmented_address addr;
1692 rsp_increment(ctxt, -bytes);
1693 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1694 addr.seg = VCPU_SREG_SS;
1696 return segmented_write(ctxt, addr, data, bytes);
1699 static int em_push(struct x86_emulate_ctxt *ctxt)
1701 /* Disable writeback. */
1702 ctxt->dst.type = OP_NONE;
1703 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1706 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1707 void *dest, int len)
1710 struct segmented_address addr;
1712 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1713 addr.seg = VCPU_SREG_SS;
1714 rc = segmented_read(ctxt, addr, dest, len);
1715 if (rc != X86EMUL_CONTINUE)
1718 rsp_increment(ctxt, len);
1722 static int em_pop(struct x86_emulate_ctxt *ctxt)
1724 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1727 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1728 void *dest, int len)
1731 unsigned long val, change_mask;
1732 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1733 int cpl = ctxt->ops->cpl(ctxt);
1735 rc = emulate_pop(ctxt, &val, len);
1736 if (rc != X86EMUL_CONTINUE)
1739 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1740 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1742 switch(ctxt->mode) {
1743 case X86EMUL_MODE_PROT64:
1744 case X86EMUL_MODE_PROT32:
1745 case X86EMUL_MODE_PROT16:
1747 change_mask |= EFLG_IOPL;
1749 change_mask |= EFLG_IF;
1751 case X86EMUL_MODE_VM86:
1753 return emulate_gp(ctxt, 0);
1754 change_mask |= EFLG_IF;
1756 default: /* real mode */
1757 change_mask |= (EFLG_IOPL | EFLG_IF);
1761 *(unsigned long *)dest =
1762 (ctxt->eflags & ~change_mask) | (val & change_mask);
1767 static int em_popf(struct x86_emulate_ctxt *ctxt)
1769 ctxt->dst.type = OP_REG;
1770 ctxt->dst.addr.reg = &ctxt->eflags;
1771 ctxt->dst.bytes = ctxt->op_bytes;
1772 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1775 static int em_enter(struct x86_emulate_ctxt *ctxt)
1778 unsigned frame_size = ctxt->src.val;
1779 unsigned nesting_level = ctxt->src2.val & 31;
1783 return X86EMUL_UNHANDLEABLE;
1785 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1786 rc = push(ctxt, &rbp, stack_size(ctxt));
1787 if (rc != X86EMUL_CONTINUE)
1789 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1791 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1792 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1794 return X86EMUL_CONTINUE;
1797 static int em_leave(struct x86_emulate_ctxt *ctxt)
1799 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1801 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1804 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1806 int seg = ctxt->src2.val;
1808 ctxt->src.val = get_segment_selector(ctxt, seg);
1810 return em_push(ctxt);
1813 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1815 int seg = ctxt->src2.val;
1816 unsigned long selector;
1819 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1820 if (rc != X86EMUL_CONTINUE)
1823 if (ctxt->modrm_reg == VCPU_SREG_SS)
1824 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1826 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1830 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1832 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1833 int rc = X86EMUL_CONTINUE;
1834 int reg = VCPU_REGS_RAX;
1836 while (reg <= VCPU_REGS_RDI) {
1837 (reg == VCPU_REGS_RSP) ?
1838 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1841 if (rc != X86EMUL_CONTINUE)
1850 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1852 ctxt->src.val = (unsigned long)ctxt->eflags;
1853 return em_push(ctxt);
1856 static int em_popa(struct x86_emulate_ctxt *ctxt)
1858 int rc = X86EMUL_CONTINUE;
1859 int reg = VCPU_REGS_RDI;
1861 while (reg >= VCPU_REGS_RAX) {
1862 if (reg == VCPU_REGS_RSP) {
1863 rsp_increment(ctxt, ctxt->op_bytes);
1867 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1868 if (rc != X86EMUL_CONTINUE)
1875 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1877 const struct x86_emulate_ops *ops = ctxt->ops;
1884 /* TODO: Add limit checks */
1885 ctxt->src.val = ctxt->eflags;
1887 if (rc != X86EMUL_CONTINUE)
1890 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1892 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1894 if (rc != X86EMUL_CONTINUE)
1897 ctxt->src.val = ctxt->_eip;
1899 if (rc != X86EMUL_CONTINUE)
1902 ops->get_idt(ctxt, &dt);
1904 eip_addr = dt.address + (irq << 2);
1905 cs_addr = dt.address + (irq << 2) + 2;
1907 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1908 if (rc != X86EMUL_CONTINUE)
1911 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1912 if (rc != X86EMUL_CONTINUE)
1915 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1916 if (rc != X86EMUL_CONTINUE)
1924 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1928 invalidate_registers(ctxt);
1929 rc = __emulate_int_real(ctxt, irq);
1930 if (rc == X86EMUL_CONTINUE)
1931 writeback_registers(ctxt);
1935 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1937 switch(ctxt->mode) {
1938 case X86EMUL_MODE_REAL:
1939 return __emulate_int_real(ctxt, irq);
1940 case X86EMUL_MODE_VM86:
1941 case X86EMUL_MODE_PROT16:
1942 case X86EMUL_MODE_PROT32:
1943 case X86EMUL_MODE_PROT64:
1945 /* Protected mode interrupts unimplemented yet */
1946 return X86EMUL_UNHANDLEABLE;
1950 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1952 int rc = X86EMUL_CONTINUE;
1953 unsigned long temp_eip = 0;
1954 unsigned long temp_eflags = 0;
1955 unsigned long cs = 0;
1956 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1957 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1958 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1959 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1961 /* TODO: Add stack limit check */
1963 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1965 if (rc != X86EMUL_CONTINUE)
1968 if (temp_eip & ~0xffff)
1969 return emulate_gp(ctxt, 0);
1971 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1973 if (rc != X86EMUL_CONTINUE)
1976 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1978 if (rc != X86EMUL_CONTINUE)
1981 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1983 if (rc != X86EMUL_CONTINUE)
1986 ctxt->_eip = temp_eip;
1989 if (ctxt->op_bytes == 4)
1990 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1991 else if (ctxt->op_bytes == 2) {
1992 ctxt->eflags &= ~0xffff;
1993 ctxt->eflags |= temp_eflags;
1996 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1997 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2002 static int em_iret(struct x86_emulate_ctxt *ctxt)
2004 switch(ctxt->mode) {
2005 case X86EMUL_MODE_REAL:
2006 return emulate_iret_real(ctxt);
2007 case X86EMUL_MODE_VM86:
2008 case X86EMUL_MODE_PROT16:
2009 case X86EMUL_MODE_PROT32:
2010 case X86EMUL_MODE_PROT64:
2012 /* iret from protected mode unimplemented yet */
2013 return X86EMUL_UNHANDLEABLE;
2017 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2020 unsigned short sel, old_sel;
2021 struct desc_struct old_desc, new_desc;
2022 const struct x86_emulate_ops *ops = ctxt->ops;
2023 u8 cpl = ctxt->ops->cpl(ctxt);
2025 /* Assignment of RIP may only fail in 64-bit mode */
2026 if (ctxt->mode == X86EMUL_MODE_PROT64)
2027 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2030 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2032 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2034 if (rc != X86EMUL_CONTINUE)
2037 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2038 if (rc != X86EMUL_CONTINUE) {
2039 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2040 /* assigning eip failed; restore the old cs */
2041 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2047 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2049 return assign_eip_near(ctxt, ctxt->src.val);
2052 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2057 old_eip = ctxt->_eip;
2058 rc = assign_eip_near(ctxt, ctxt->src.val);
2059 if (rc != X86EMUL_CONTINUE)
2061 ctxt->src.val = old_eip;
2066 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2068 u64 old = ctxt->dst.orig_val64;
2070 if (ctxt->dst.bytes == 16)
2071 return X86EMUL_UNHANDLEABLE;
2073 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2074 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2075 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2076 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2077 ctxt->eflags &= ~EFLG_ZF;
2079 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2080 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2082 ctxt->eflags |= EFLG_ZF;
2084 return X86EMUL_CONTINUE;
2087 static int em_ret(struct x86_emulate_ctxt *ctxt)
2092 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2093 if (rc != X86EMUL_CONTINUE)
2096 return assign_eip_near(ctxt, eip);
2099 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2102 unsigned long eip, cs;
2104 int cpl = ctxt->ops->cpl(ctxt);
2105 struct desc_struct old_desc, new_desc;
2106 const struct x86_emulate_ops *ops = ctxt->ops;
2108 if (ctxt->mode == X86EMUL_MODE_PROT64)
2109 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2112 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2113 if (rc != X86EMUL_CONTINUE)
2115 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2116 if (rc != X86EMUL_CONTINUE)
2118 /* Outer-privilege level return is not implemented */
2119 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2120 return X86EMUL_UNHANDLEABLE;
2121 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2123 if (rc != X86EMUL_CONTINUE)
2125 rc = assign_eip_far(ctxt, eip, new_desc.l);
2126 if (rc != X86EMUL_CONTINUE) {
2127 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2128 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2133 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2137 rc = em_ret_far(ctxt);
2138 if (rc != X86EMUL_CONTINUE)
2140 rsp_increment(ctxt, ctxt->src.val);
2141 return X86EMUL_CONTINUE;
2144 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2146 /* Save real source value, then compare EAX against destination. */
2147 ctxt->dst.orig_val = ctxt->dst.val;
2148 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2149 ctxt->src.orig_val = ctxt->src.val;
2150 ctxt->src.val = ctxt->dst.orig_val;
2151 fastop(ctxt, em_cmp);
2153 if (ctxt->eflags & EFLG_ZF) {
2154 /* Success: write back to memory. */
2155 ctxt->dst.val = ctxt->src.orig_val;
2157 /* Failure: write the value we saw to EAX. */
2158 ctxt->dst.type = OP_REG;
2159 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2160 ctxt->dst.val = ctxt->dst.orig_val;
2162 return X86EMUL_CONTINUE;
2165 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2167 int seg = ctxt->src2.val;
2171 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2173 rc = load_segment_descriptor(ctxt, sel, seg);
2174 if (rc != X86EMUL_CONTINUE)
2177 ctxt->dst.val = ctxt->src.val;
2182 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2183 struct desc_struct *cs, struct desc_struct *ss)
2185 cs->l = 0; /* will be adjusted later */
2186 set_desc_base(cs, 0); /* flat segment */
2187 cs->g = 1; /* 4kb granularity */
2188 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2189 cs->type = 0x0b; /* Read, Execute, Accessed */
2191 cs->dpl = 0; /* will be adjusted later */
2196 set_desc_base(ss, 0); /* flat segment */
2197 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2198 ss->g = 1; /* 4kb granularity */
2200 ss->type = 0x03; /* Read/Write, Accessed */
2201 ss->d = 1; /* 32bit stack segment */
2208 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2210 u32 eax, ebx, ecx, edx;
2213 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2214 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2215 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2216 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2219 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2221 const struct x86_emulate_ops *ops = ctxt->ops;
2222 u32 eax, ebx, ecx, edx;
2225 * syscall should always be enabled in longmode - so only become
2226 * vendor specific (cpuid) if other modes are active...
2228 if (ctxt->mode == X86EMUL_MODE_PROT64)
2233 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2235 * Intel ("GenuineIntel")
2236 * remark: Intel CPUs only support "syscall" in 64bit
2237 * longmode. Also an 64bit guest with a
2238 * 32bit compat-app running will #UD !! While this
2239 * behaviour can be fixed (by emulating) into AMD
2240 * response - CPUs of AMD can't behave like Intel.
2242 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2243 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2244 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2247 /* AMD ("AuthenticAMD") */
2248 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2249 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2250 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2253 /* AMD ("AMDisbetter!") */
2254 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2255 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2256 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2259 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2263 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2265 const struct x86_emulate_ops *ops = ctxt->ops;
2266 struct desc_struct cs, ss;
2271 /* syscall is not available in real mode */
2272 if (ctxt->mode == X86EMUL_MODE_REAL ||
2273 ctxt->mode == X86EMUL_MODE_VM86)
2274 return emulate_ud(ctxt);
2276 if (!(em_syscall_is_enabled(ctxt)))
2277 return emulate_ud(ctxt);
2279 ops->get_msr(ctxt, MSR_EFER, &efer);
2280 setup_syscalls_segments(ctxt, &cs, &ss);
2282 if (!(efer & EFER_SCE))
2283 return emulate_ud(ctxt);
2285 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2287 cs_sel = (u16)(msr_data & 0xfffc);
2288 ss_sel = (u16)(msr_data + 8);
2290 if (efer & EFER_LMA) {
2294 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2295 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2297 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2298 if (efer & EFER_LMA) {
2299 #ifdef CONFIG_X86_64
2300 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2303 ctxt->mode == X86EMUL_MODE_PROT64 ?
2304 MSR_LSTAR : MSR_CSTAR, &msr_data);
2305 ctxt->_eip = msr_data;
2307 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2308 ctxt->eflags &= ~msr_data;
2309 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2313 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2314 ctxt->_eip = (u32)msr_data;
2316 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2319 return X86EMUL_CONTINUE;
2322 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2324 const struct x86_emulate_ops *ops = ctxt->ops;
2325 struct desc_struct cs, ss;
2330 ops->get_msr(ctxt, MSR_EFER, &efer);
2331 /* inject #GP if in real mode */
2332 if (ctxt->mode == X86EMUL_MODE_REAL)
2333 return emulate_gp(ctxt, 0);
2336 * Not recognized on AMD in compat mode (but is recognized in legacy
2339 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2340 && !vendor_intel(ctxt))
2341 return emulate_ud(ctxt);
2343 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2344 * Therefore, we inject an #UD.
2346 if (ctxt->mode == X86EMUL_MODE_PROT64)
2347 return emulate_ud(ctxt);
2349 setup_syscalls_segments(ctxt, &cs, &ss);
2351 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2352 switch (ctxt->mode) {
2353 case X86EMUL_MODE_PROT32:
2354 if ((msr_data & 0xfffc) == 0x0)
2355 return emulate_gp(ctxt, 0);
2357 case X86EMUL_MODE_PROT64:
2358 if (msr_data == 0x0)
2359 return emulate_gp(ctxt, 0);
2365 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2366 cs_sel = (u16)msr_data;
2367 cs_sel &= ~SELECTOR_RPL_MASK;
2368 ss_sel = cs_sel + 8;
2369 ss_sel &= ~SELECTOR_RPL_MASK;
2370 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2375 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2376 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2378 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2379 ctxt->_eip = msr_data;
2381 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2382 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2384 return X86EMUL_CONTINUE;
2387 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2389 const struct x86_emulate_ops *ops = ctxt->ops;
2390 struct desc_struct cs, ss;
2391 u64 msr_data, rcx, rdx;
2393 u16 cs_sel = 0, ss_sel = 0;
2395 /* inject #GP if in real mode or Virtual 8086 mode */
2396 if (ctxt->mode == X86EMUL_MODE_REAL ||
2397 ctxt->mode == X86EMUL_MODE_VM86)
2398 return emulate_gp(ctxt, 0);
2400 setup_syscalls_segments(ctxt, &cs, &ss);
2402 if ((ctxt->rex_prefix & 0x8) != 0x0)
2403 usermode = X86EMUL_MODE_PROT64;
2405 usermode = X86EMUL_MODE_PROT32;
2407 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2408 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2412 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2414 case X86EMUL_MODE_PROT32:
2415 cs_sel = (u16)(msr_data + 16);
2416 if ((msr_data & 0xfffc) == 0x0)
2417 return emulate_gp(ctxt, 0);
2418 ss_sel = (u16)(msr_data + 24);
2422 case X86EMUL_MODE_PROT64:
2423 cs_sel = (u16)(msr_data + 32);
2424 if (msr_data == 0x0)
2425 return emulate_gp(ctxt, 0);
2426 ss_sel = cs_sel + 8;
2429 if (is_noncanonical_address(rcx) ||
2430 is_noncanonical_address(rdx))
2431 return emulate_gp(ctxt, 0);
2434 cs_sel |= SELECTOR_RPL_MASK;
2435 ss_sel |= SELECTOR_RPL_MASK;
2437 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2438 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2441 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2443 return X86EMUL_CONTINUE;
2446 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2449 if (ctxt->mode == X86EMUL_MODE_REAL)
2451 if (ctxt->mode == X86EMUL_MODE_VM86)
2453 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2454 return ctxt->ops->cpl(ctxt) > iopl;
2457 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2460 const struct x86_emulate_ops *ops = ctxt->ops;
2461 struct desc_struct tr_seg;
2464 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2465 unsigned mask = (1 << len) - 1;
2468 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2471 if (desc_limit_scaled(&tr_seg) < 103)
2473 base = get_desc_base(&tr_seg);
2474 #ifdef CONFIG_X86_64
2475 base |= ((u64)base3) << 32;
2477 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2478 if (r != X86EMUL_CONTINUE)
2480 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2482 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2483 if (r != X86EMUL_CONTINUE)
2485 if ((perm >> bit_idx) & mask)
2490 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2496 if (emulator_bad_iopl(ctxt))
2497 if (!emulator_io_port_access_allowed(ctxt, port, len))
2500 ctxt->perm_ok = true;
2505 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2506 struct tss_segment_16 *tss)
2508 tss->ip = ctxt->_eip;
2509 tss->flag = ctxt->eflags;
2510 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2511 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2512 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2513 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2514 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2515 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2516 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2517 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2519 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2520 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2521 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2522 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2523 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2526 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2527 struct tss_segment_16 *tss)
2532 ctxt->_eip = tss->ip;
2533 ctxt->eflags = tss->flag | 2;
2534 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2535 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2536 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2537 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2538 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2539 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2540 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2541 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2544 * SDM says that segment selectors are loaded before segment
2547 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2548 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2549 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2550 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2551 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2556 * Now load segment descriptors. If fault happens at this stage
2557 * it is handled in a context of new task
2559 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2561 if (ret != X86EMUL_CONTINUE)
2563 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2565 if (ret != X86EMUL_CONTINUE)
2567 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2569 if (ret != X86EMUL_CONTINUE)
2571 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2573 if (ret != X86EMUL_CONTINUE)
2575 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2577 if (ret != X86EMUL_CONTINUE)
2580 return X86EMUL_CONTINUE;
2583 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2584 u16 tss_selector, u16 old_tss_sel,
2585 ulong old_tss_base, struct desc_struct *new_desc)
2587 const struct x86_emulate_ops *ops = ctxt->ops;
2588 struct tss_segment_16 tss_seg;
2590 u32 new_tss_base = get_desc_base(new_desc);
2592 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2594 if (ret != X86EMUL_CONTINUE)
2595 /* FIXME: need to provide precise fault address */
2598 save_state_to_tss16(ctxt, &tss_seg);
2600 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2602 if (ret != X86EMUL_CONTINUE)
2603 /* FIXME: need to provide precise fault address */
2606 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2608 if (ret != X86EMUL_CONTINUE)
2609 /* FIXME: need to provide precise fault address */
2612 if (old_tss_sel != 0xffff) {
2613 tss_seg.prev_task_link = old_tss_sel;
2615 ret = ops->write_std(ctxt, new_tss_base,
2616 &tss_seg.prev_task_link,
2617 sizeof tss_seg.prev_task_link,
2619 if (ret != X86EMUL_CONTINUE)
2620 /* FIXME: need to provide precise fault address */
2624 return load_state_from_tss16(ctxt, &tss_seg);
2627 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2628 struct tss_segment_32 *tss)
2630 /* CR3 and ldt selector are not saved intentionally */
2631 tss->eip = ctxt->_eip;
2632 tss->eflags = ctxt->eflags;
2633 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2634 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2635 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2636 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2637 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2638 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2639 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2640 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2642 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2643 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2644 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2645 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2646 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2647 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2650 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2651 struct tss_segment_32 *tss)
2656 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2657 return emulate_gp(ctxt, 0);
2658 ctxt->_eip = tss->eip;
2659 ctxt->eflags = tss->eflags | 2;
2661 /* General purpose registers */
2662 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2663 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2664 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2665 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2666 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2667 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2668 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2669 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2672 * SDM says that segment selectors are loaded before segment
2673 * descriptors. This is important because CPL checks will
2676 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2677 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2678 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2679 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2680 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2681 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2682 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2685 * If we're switching between Protected Mode and VM86, we need to make
2686 * sure to update the mode before loading the segment descriptors so
2687 * that the selectors are interpreted correctly.
2689 if (ctxt->eflags & X86_EFLAGS_VM) {
2690 ctxt->mode = X86EMUL_MODE_VM86;
2693 ctxt->mode = X86EMUL_MODE_PROT32;
2698 * Now load segment descriptors. If fault happenes at this stage
2699 * it is handled in a context of new task
2701 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2703 if (ret != X86EMUL_CONTINUE)
2705 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2707 if (ret != X86EMUL_CONTINUE)
2709 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2711 if (ret != X86EMUL_CONTINUE)
2713 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2715 if (ret != X86EMUL_CONTINUE)
2717 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2719 if (ret != X86EMUL_CONTINUE)
2721 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2723 if (ret != X86EMUL_CONTINUE)
2725 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2727 if (ret != X86EMUL_CONTINUE)
2730 return X86EMUL_CONTINUE;
2733 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2734 u16 tss_selector, u16 old_tss_sel,
2735 ulong old_tss_base, struct desc_struct *new_desc)
2737 const struct x86_emulate_ops *ops = ctxt->ops;
2738 struct tss_segment_32 tss_seg;
2740 u32 new_tss_base = get_desc_base(new_desc);
2741 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2742 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2744 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2746 if (ret != X86EMUL_CONTINUE)
2747 /* FIXME: need to provide precise fault address */
2750 save_state_to_tss32(ctxt, &tss_seg);
2752 /* Only GP registers and segment selectors are saved */
2753 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2754 ldt_sel_offset - eip_offset, &ctxt->exception);
2755 if (ret != X86EMUL_CONTINUE)
2756 /* FIXME: need to provide precise fault address */
2759 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2761 if (ret != X86EMUL_CONTINUE)
2762 /* FIXME: need to provide precise fault address */
2765 if (old_tss_sel != 0xffff) {
2766 tss_seg.prev_task_link = old_tss_sel;
2768 ret = ops->write_std(ctxt, new_tss_base,
2769 &tss_seg.prev_task_link,
2770 sizeof tss_seg.prev_task_link,
2772 if (ret != X86EMUL_CONTINUE)
2773 /* FIXME: need to provide precise fault address */
2777 return load_state_from_tss32(ctxt, &tss_seg);
2780 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2781 u16 tss_selector, int idt_index, int reason,
2782 bool has_error_code, u32 error_code)
2784 const struct x86_emulate_ops *ops = ctxt->ops;
2785 struct desc_struct curr_tss_desc, next_tss_desc;
2787 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2788 ulong old_tss_base =
2789 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2793 /* FIXME: old_tss_base == ~0 ? */
2795 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2796 if (ret != X86EMUL_CONTINUE)
2798 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2799 if (ret != X86EMUL_CONTINUE)
2802 /* FIXME: check that next_tss_desc is tss */
2805 * Check privileges. The three cases are task switch caused by...
2807 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2808 * 2. Exception/IRQ/iret: No check is performed
2809 * 3. jmp/call to TSS: Check against DPL of the TSS
2811 if (reason == TASK_SWITCH_GATE) {
2812 if (idt_index != -1) {
2813 /* Software interrupts */
2814 struct desc_struct task_gate_desc;
2817 ret = read_interrupt_descriptor(ctxt, idt_index,
2819 if (ret != X86EMUL_CONTINUE)
2822 dpl = task_gate_desc.dpl;
2823 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2824 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2826 } else if (reason != TASK_SWITCH_IRET) {
2827 int dpl = next_tss_desc.dpl;
2828 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2829 return emulate_gp(ctxt, tss_selector);
2833 desc_limit = desc_limit_scaled(&next_tss_desc);
2834 if (!next_tss_desc.p ||
2835 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2836 desc_limit < 0x2b)) {
2837 return emulate_ts(ctxt, tss_selector & 0xfffc);
2840 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2841 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2842 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2845 if (reason == TASK_SWITCH_IRET)
2846 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2848 /* set back link to prev task only if NT bit is set in eflags
2849 note that old_tss_sel is not used after this point */
2850 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2851 old_tss_sel = 0xffff;
2853 if (next_tss_desc.type & 8)
2854 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2855 old_tss_base, &next_tss_desc);
2857 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2858 old_tss_base, &next_tss_desc);
2859 if (ret != X86EMUL_CONTINUE)
2862 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2863 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2865 if (reason != TASK_SWITCH_IRET) {
2866 next_tss_desc.type |= (1 << 1); /* set busy flag */
2867 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2870 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2871 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2873 if (has_error_code) {
2874 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2875 ctxt->lock_prefix = 0;
2876 ctxt->src.val = (unsigned long) error_code;
2877 ret = em_push(ctxt);
2883 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2884 u16 tss_selector, int idt_index, int reason,
2885 bool has_error_code, u32 error_code)
2889 invalidate_registers(ctxt);
2890 ctxt->_eip = ctxt->eip;
2891 ctxt->dst.type = OP_NONE;
2893 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2894 has_error_code, error_code);
2896 if (rc == X86EMUL_CONTINUE) {
2897 ctxt->eip = ctxt->_eip;
2898 writeback_registers(ctxt);
2901 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2904 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2907 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2909 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2910 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2913 static int em_das(struct x86_emulate_ctxt *ctxt)
2916 bool af, cf, old_cf;
2918 cf = ctxt->eflags & X86_EFLAGS_CF;
2924 af = ctxt->eflags & X86_EFLAGS_AF;
2925 if ((al & 0x0f) > 9 || af) {
2927 cf = old_cf | (al >= 250);
2932 if (old_al > 0x99 || old_cf) {
2938 /* Set PF, ZF, SF */
2939 ctxt->src.type = OP_IMM;
2941 ctxt->src.bytes = 1;
2942 fastop(ctxt, em_or);
2943 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2945 ctxt->eflags |= X86_EFLAGS_CF;
2947 ctxt->eflags |= X86_EFLAGS_AF;
2948 return X86EMUL_CONTINUE;
2951 static int em_aam(struct x86_emulate_ctxt *ctxt)
2955 if (ctxt->src.val == 0)
2956 return emulate_de(ctxt);
2958 al = ctxt->dst.val & 0xff;
2959 ah = al / ctxt->src.val;
2960 al %= ctxt->src.val;
2962 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2964 /* Set PF, ZF, SF */
2965 ctxt->src.type = OP_IMM;
2967 ctxt->src.bytes = 1;
2968 fastop(ctxt, em_or);
2970 return X86EMUL_CONTINUE;
2973 static int em_aad(struct x86_emulate_ctxt *ctxt)
2975 u8 al = ctxt->dst.val & 0xff;
2976 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2978 al = (al + (ah * ctxt->src.val)) & 0xff;
2980 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2982 /* Set PF, ZF, SF */
2983 ctxt->src.type = OP_IMM;
2985 ctxt->src.bytes = 1;
2986 fastop(ctxt, em_or);
2988 return X86EMUL_CONTINUE;
2991 static int em_call(struct x86_emulate_ctxt *ctxt)
2994 long rel = ctxt->src.val;
2996 ctxt->src.val = (unsigned long)ctxt->_eip;
2997 rc = jmp_rel(ctxt, rel);
2998 if (rc != X86EMUL_CONTINUE)
3000 return em_push(ctxt);
3003 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3008 struct desc_struct old_desc, new_desc;
3009 const struct x86_emulate_ops *ops = ctxt->ops;
3010 int cpl = ctxt->ops->cpl(ctxt);
3012 old_eip = ctxt->_eip;
3013 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3015 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3016 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3018 if (rc != X86EMUL_CONTINUE)
3019 return X86EMUL_CONTINUE;
3021 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3022 if (rc != X86EMUL_CONTINUE)
3025 ctxt->src.val = old_cs;
3027 if (rc != X86EMUL_CONTINUE)
3030 ctxt->src.val = old_eip;
3032 /* If we failed, we tainted the memory, but the very least we should
3034 if (rc != X86EMUL_CONTINUE)
3038 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3043 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3048 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3049 if (rc != X86EMUL_CONTINUE)
3051 rc = assign_eip_near(ctxt, eip);
3052 if (rc != X86EMUL_CONTINUE)
3054 rsp_increment(ctxt, ctxt->src.val);
3055 return X86EMUL_CONTINUE;
3058 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3060 /* Write back the register source. */
3061 ctxt->src.val = ctxt->dst.val;
3062 write_register_operand(&ctxt->src);
3064 /* Write back the memory destination with implicit LOCK prefix. */
3065 ctxt->dst.val = ctxt->src.orig_val;
3066 ctxt->lock_prefix = 1;
3067 return X86EMUL_CONTINUE;
3070 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3072 ctxt->dst.val = ctxt->src2.val;
3073 return fastop(ctxt, em_imul);
3076 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3078 ctxt->dst.type = OP_REG;
3079 ctxt->dst.bytes = ctxt->src.bytes;
3080 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3081 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3083 return X86EMUL_CONTINUE;
3086 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3090 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3091 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3092 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3093 return X86EMUL_CONTINUE;
3096 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3100 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3101 return emulate_gp(ctxt, 0);
3102 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3103 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3104 return X86EMUL_CONTINUE;
3107 static int em_mov(struct x86_emulate_ctxt *ctxt)
3109 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3110 return X86EMUL_CONTINUE;
3113 #define FFL(x) bit(X86_FEATURE_##x)
3115 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3117 u32 ebx, ecx, edx, eax = 1;
3121 * Check MOVBE is set in the guest-visible CPUID leaf.
3123 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3124 if (!(ecx & FFL(MOVBE)))
3125 return emulate_ud(ctxt);
3127 switch (ctxt->op_bytes) {
3130 * From MOVBE definition: "...When the operand size is 16 bits,
3131 * the upper word of the destination register remains unchanged
3134 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3135 * rules so we have to do the operation almost per hand.
3137 tmp = (u16)ctxt->src.val;
3138 ctxt->dst.val &= ~0xffffUL;
3139 ctxt->dst.val |= (unsigned long)swab16(tmp);
3142 ctxt->dst.val = swab32((u32)ctxt->src.val);
3145 ctxt->dst.val = swab64(ctxt->src.val);
3150 return X86EMUL_CONTINUE;
3153 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3155 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3156 return emulate_gp(ctxt, 0);
3158 /* Disable writeback. */
3159 ctxt->dst.type = OP_NONE;
3160 return X86EMUL_CONTINUE;
3163 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3167 if (ctxt->mode == X86EMUL_MODE_PROT64)
3168 val = ctxt->src.val & ~0ULL;
3170 val = ctxt->src.val & ~0U;
3172 /* #UD condition is already handled. */
3173 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3174 return emulate_gp(ctxt, 0);
3176 /* Disable writeback. */
3177 ctxt->dst.type = OP_NONE;
3178 return X86EMUL_CONTINUE;
3181 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3185 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3186 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3187 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3188 return emulate_gp(ctxt, 0);
3190 return X86EMUL_CONTINUE;
3193 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3197 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3198 return emulate_gp(ctxt, 0);
3200 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3201 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3202 return X86EMUL_CONTINUE;
3205 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3207 if (ctxt->modrm_reg > VCPU_SREG_GS)
3208 return emulate_ud(ctxt);
3210 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3211 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3212 ctxt->dst.bytes = 2;
3213 return X86EMUL_CONTINUE;
3216 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3218 u16 sel = ctxt->src.val;
3220 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3221 return emulate_ud(ctxt);
3223 if (ctxt->modrm_reg == VCPU_SREG_SS)
3224 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3226 /* Disable writeback. */
3227 ctxt->dst.type = OP_NONE;
3228 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3231 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3233 u16 sel = ctxt->src.val;
3235 /* Disable writeback. */
3236 ctxt->dst.type = OP_NONE;
3237 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3240 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3242 u16 sel = ctxt->src.val;
3244 /* Disable writeback. */
3245 ctxt->dst.type = OP_NONE;
3246 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3249 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3254 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3255 if (rc == X86EMUL_CONTINUE)
3256 ctxt->ops->invlpg(ctxt, linear);
3257 /* Disable writeback. */
3258 ctxt->dst.type = OP_NONE;
3259 return X86EMUL_CONTINUE;
3262 static int em_clts(struct x86_emulate_ctxt *ctxt)
3266 cr0 = ctxt->ops->get_cr(ctxt, 0);
3268 ctxt->ops->set_cr(ctxt, 0, cr0);
3269 return X86EMUL_CONTINUE;
3272 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3274 int rc = ctxt->ops->fix_hypercall(ctxt);
3276 if (rc != X86EMUL_CONTINUE)
3279 /* Let the processor re-execute the fixed hypercall */
3280 ctxt->_eip = ctxt->eip;
3281 /* Disable writeback. */
3282 ctxt->dst.type = OP_NONE;
3283 return X86EMUL_CONTINUE;
3286 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3287 void (*get)(struct x86_emulate_ctxt *ctxt,
3288 struct desc_ptr *ptr))
3290 struct desc_ptr desc_ptr;
3292 if (ctxt->mode == X86EMUL_MODE_PROT64)
3294 get(ctxt, &desc_ptr);
3295 if (ctxt->op_bytes == 2) {
3297 desc_ptr.address &= 0x00ffffff;
3299 /* Disable writeback. */
3300 ctxt->dst.type = OP_NONE;
3301 return segmented_write(ctxt, ctxt->dst.addr.mem,
3302 &desc_ptr, 2 + ctxt->op_bytes);
3305 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3307 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3310 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3312 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3315 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3317 struct desc_ptr desc_ptr;
3320 if (ctxt->mode == X86EMUL_MODE_PROT64)
3322 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3323 &desc_ptr.size, &desc_ptr.address,
3325 if (rc != X86EMUL_CONTINUE)
3327 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3328 /* Disable writeback. */
3329 ctxt->dst.type = OP_NONE;
3330 return X86EMUL_CONTINUE;
3333 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3337 rc = ctxt->ops->fix_hypercall(ctxt);
3339 /* Disable writeback. */
3340 ctxt->dst.type = OP_NONE;
3344 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3346 struct desc_ptr desc_ptr;
3349 if (ctxt->mode == X86EMUL_MODE_PROT64)
3351 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3352 &desc_ptr.size, &desc_ptr.address,
3354 if (rc != X86EMUL_CONTINUE)
3356 ctxt->ops->set_idt(ctxt, &desc_ptr);
3357 /* Disable writeback. */
3358 ctxt->dst.type = OP_NONE;
3359 return X86EMUL_CONTINUE;
3362 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3364 if (ctxt->dst.type == OP_MEM)
3365 ctxt->dst.bytes = 2;
3366 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3367 return X86EMUL_CONTINUE;
3370 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3372 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3373 | (ctxt->src.val & 0x0f));
3374 ctxt->dst.type = OP_NONE;
3375 return X86EMUL_CONTINUE;
3378 static int em_loop(struct x86_emulate_ctxt *ctxt)
3380 int rc = X86EMUL_CONTINUE;
3382 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3383 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3384 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3385 rc = jmp_rel(ctxt, ctxt->src.val);
3390 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3392 int rc = X86EMUL_CONTINUE;
3394 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3395 rc = jmp_rel(ctxt, ctxt->src.val);
3400 static int em_in(struct x86_emulate_ctxt *ctxt)
3402 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3404 return X86EMUL_IO_NEEDED;
3406 return X86EMUL_CONTINUE;
3409 static int em_out(struct x86_emulate_ctxt *ctxt)
3411 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3413 /* Disable writeback. */
3414 ctxt->dst.type = OP_NONE;
3415 return X86EMUL_CONTINUE;
3418 static int em_cli(struct x86_emulate_ctxt *ctxt)
3420 if (emulator_bad_iopl(ctxt))
3421 return emulate_gp(ctxt, 0);
3423 ctxt->eflags &= ~X86_EFLAGS_IF;
3424 return X86EMUL_CONTINUE;
3427 static int em_sti(struct x86_emulate_ctxt *ctxt)
3429 if (emulator_bad_iopl(ctxt))
3430 return emulate_gp(ctxt, 0);
3432 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3433 ctxt->eflags |= X86_EFLAGS_IF;
3434 return X86EMUL_CONTINUE;
3437 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3439 u32 eax, ebx, ecx, edx;
3441 eax = reg_read(ctxt, VCPU_REGS_RAX);
3442 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3443 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3444 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3445 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3446 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3447 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3448 return X86EMUL_CONTINUE;
3451 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3455 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3456 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3458 ctxt->eflags &= ~0xffUL;
3459 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3460 return X86EMUL_CONTINUE;
3463 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3465 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3466 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3467 return X86EMUL_CONTINUE;
3470 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3472 switch (ctxt->op_bytes) {
3473 #ifdef CONFIG_X86_64
3475 asm("bswap %0" : "+r"(ctxt->dst.val));
3479 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3482 return X86EMUL_CONTINUE;
3485 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3487 /* emulating clflush regardless of cpuid */
3488 return X86EMUL_CONTINUE;
3491 static bool valid_cr(int nr)
3503 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3505 if (!valid_cr(ctxt->modrm_reg))
3506 return emulate_ud(ctxt);
3508 return X86EMUL_CONTINUE;
3511 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3513 u64 new_val = ctxt->src.val64;
3514 int cr = ctxt->modrm_reg;
3517 static u64 cr_reserved_bits[] = {
3518 0xffffffff00000000ULL,
3519 0, 0, 0, /* CR3 checked later */
3526 return emulate_ud(ctxt);
3528 if (new_val & cr_reserved_bits[cr])
3529 return emulate_gp(ctxt, 0);
3534 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3535 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3536 return emulate_gp(ctxt, 0);
3538 cr4 = ctxt->ops->get_cr(ctxt, 4);
3539 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3541 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3542 !(cr4 & X86_CR4_PAE))
3543 return emulate_gp(ctxt, 0);
3550 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3551 if (efer & EFER_LMA)
3552 rsvd = CR3_L_MODE_RESERVED_BITS;
3555 return emulate_gp(ctxt, 0);
3560 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3562 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3563 return emulate_gp(ctxt, 0);
3569 return X86EMUL_CONTINUE;
3572 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3576 ctxt->ops->get_dr(ctxt, 7, &dr7);
3578 /* Check if DR7.Global_Enable is set */
3579 return dr7 & (1 << 13);
3582 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3584 int dr = ctxt->modrm_reg;
3588 return emulate_ud(ctxt);
3590 cr4 = ctxt->ops->get_cr(ctxt, 4);
3591 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3592 return emulate_ud(ctxt);
3594 if (check_dr7_gd(ctxt)) {
3597 ctxt->ops->get_dr(ctxt, 6, &dr6);
3599 dr6 |= DR6_BD | DR6_RTM;
3600 ctxt->ops->set_dr(ctxt, 6, dr6);
3601 return emulate_db(ctxt);
3604 return X86EMUL_CONTINUE;
3607 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3609 u64 new_val = ctxt->src.val64;
3610 int dr = ctxt->modrm_reg;
3612 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3613 return emulate_gp(ctxt, 0);
3615 return check_dr_read(ctxt);
3618 static int check_svme(struct x86_emulate_ctxt *ctxt)
3622 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3624 if (!(efer & EFER_SVME))
3625 return emulate_ud(ctxt);
3627 return X86EMUL_CONTINUE;
3630 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3632 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3634 /* Valid physical address? */
3635 if (rax & 0xffff000000000000ULL)
3636 return emulate_gp(ctxt, 0);
3638 return check_svme(ctxt);
3641 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3643 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3645 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3646 return emulate_ud(ctxt);
3648 return X86EMUL_CONTINUE;
3651 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3653 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3654 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3656 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3657 ctxt->ops->check_pmc(ctxt, rcx))
3658 return emulate_gp(ctxt, 0);
3660 return X86EMUL_CONTINUE;
3663 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3665 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3666 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3667 return emulate_gp(ctxt, 0);
3669 return X86EMUL_CONTINUE;
3672 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3674 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3675 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3676 return emulate_gp(ctxt, 0);
3678 return X86EMUL_CONTINUE;
3681 #define D(_y) { .flags = (_y) }
3682 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3683 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3684 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3685 #define N D(NotImpl)
3686 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3687 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3688 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3689 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3690 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3691 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3692 #define II(_f, _e, _i) \
3693 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3694 #define IIP(_f, _e, _i, _p) \
3695 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3696 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3697 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3699 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3700 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3701 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3702 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3703 #define I2bvIP(_f, _e, _i, _p) \
3704 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3706 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3707 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3708 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3710 static const struct opcode group7_rm0[] = {
3712 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3716 static const struct opcode group7_rm1[] = {
3717 DI(SrcNone | Priv, monitor),
3718 DI(SrcNone | Priv, mwait),
3722 static const struct opcode group7_rm3[] = {
3723 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3724 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3725 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3726 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3727 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3728 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3729 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3730 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3733 static const struct opcode group7_rm7[] = {
3735 DIP(SrcNone, rdtscp, check_rdtsc),
3739 static const struct opcode group1[] = {
3741 F(Lock | PageTable, em_or),
3744 F(Lock | PageTable, em_and),
3750 static const struct opcode group1A[] = {
3751 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3754 static const struct opcode group2[] = {
3755 F(DstMem | ModRM, em_rol),
3756 F(DstMem | ModRM, em_ror),
3757 F(DstMem | ModRM, em_rcl),
3758 F(DstMem | ModRM, em_rcr),
3759 F(DstMem | ModRM, em_shl),
3760 F(DstMem | ModRM, em_shr),
3761 F(DstMem | ModRM, em_shl),
3762 F(DstMem | ModRM, em_sar),
3765 static const struct opcode group3[] = {
3766 F(DstMem | SrcImm | NoWrite, em_test),
3767 F(DstMem | SrcImm | NoWrite, em_test),
3768 F(DstMem | SrcNone | Lock, em_not),
3769 F(DstMem | SrcNone | Lock, em_neg),
3770 F(DstXacc | Src2Mem, em_mul_ex),
3771 F(DstXacc | Src2Mem, em_imul_ex),
3772 F(DstXacc | Src2Mem, em_div_ex),
3773 F(DstXacc | Src2Mem, em_idiv_ex),
3776 static const struct opcode group4[] = {
3777 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3778 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3782 static const struct opcode group5[] = {
3783 F(DstMem | SrcNone | Lock, em_inc),
3784 F(DstMem | SrcNone | Lock, em_dec),
3785 I(SrcMem | NearBranch, em_call_near_abs),
3786 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3787 I(SrcMem | NearBranch, em_jmp_abs),
3788 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3789 I(SrcMem | Stack, em_push), D(Undefined),
3792 static const struct opcode group6[] = {
3795 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3796 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3800 static const struct group_dual group7 = { {
3801 II(Mov | DstMem, em_sgdt, sgdt),
3802 II(Mov | DstMem, em_sidt, sidt),
3803 II(SrcMem | Priv, em_lgdt, lgdt),
3804 II(SrcMem | Priv, em_lidt, lidt),
3805 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3806 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3807 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3811 N, EXT(0, group7_rm3),
3812 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3813 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3817 static const struct opcode group8[] = {
3819 F(DstMem | SrcImmByte | NoWrite, em_bt),
3820 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3821 F(DstMem | SrcImmByte | Lock, em_btr),
3822 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3825 static const struct group_dual group9 = { {
3826 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3828 N, N, N, N, N, N, N, N,
3831 static const struct opcode group11[] = {
3832 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3836 static const struct gprefix pfx_0f_ae_7 = {
3837 I(SrcMem | ByteOp, em_clflush), N, N, N,
3840 static const struct group_dual group15 = { {
3841 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3843 N, N, N, N, N, N, N, N,
3846 static const struct gprefix pfx_0f_6f_0f_7f = {
3847 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3850 static const struct gprefix pfx_0f_2b = {
3851 I(0, em_mov), I(0, em_mov), N, N,
3854 static const struct gprefix pfx_0f_28_0f_29 = {
3855 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3858 static const struct gprefix pfx_0f_e7 = {
3859 N, I(Sse, em_mov), N, N,
3862 static const struct escape escape_d9 = { {
3863 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3866 N, N, N, N, N, N, N, N,
3868 N, N, N, N, N, N, N, N,
3870 N, N, N, N, N, N, N, N,
3872 N, N, N, N, N, N, N, N,
3874 N, N, N, N, N, N, N, N,
3876 N, N, N, N, N, N, N, N,
3878 N, N, N, N, N, N, N, N,
3880 N, N, N, N, N, N, N, N,
3883 static const struct escape escape_db = { {
3884 N, N, N, N, N, N, N, N,
3887 N, N, N, N, N, N, N, N,
3889 N, N, N, N, N, N, N, N,
3891 N, N, N, N, N, N, N, N,
3893 N, N, N, N, N, N, N, N,
3895 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3897 N, N, N, N, N, N, N, N,
3899 N, N, N, N, N, N, N, N,
3901 N, N, N, N, N, N, N, N,
3904 static const struct escape escape_dd = { {
3905 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3908 N, N, N, N, N, N, N, N,
3910 N, N, N, N, N, N, N, N,
3912 N, N, N, N, N, N, N, N,
3914 N, N, N, N, N, N, N, N,
3916 N, N, N, N, N, N, N, N,
3918 N, N, N, N, N, N, N, N,
3920 N, N, N, N, N, N, N, N,
3922 N, N, N, N, N, N, N, N,
3925 static const struct opcode opcode_table[256] = {
3927 F6ALU(Lock, em_add),
3928 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3929 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3931 F6ALU(Lock | PageTable, em_or),
3932 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3935 F6ALU(Lock, em_adc),
3936 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3937 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3939 F6ALU(Lock, em_sbb),
3940 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3941 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3943 F6ALU(Lock | PageTable, em_and), N, N,
3945 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3947 F6ALU(Lock, em_xor), N, N,
3949 F6ALU(NoWrite, em_cmp), N, N,
3951 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3953 X8(I(SrcReg | Stack, em_push)),
3955 X8(I(DstReg | Stack, em_pop)),
3957 I(ImplicitOps | Stack | No64, em_pusha),
3958 I(ImplicitOps | Stack | No64, em_popa),
3959 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3962 I(SrcImm | Mov | Stack, em_push),
3963 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3964 I(SrcImmByte | Mov | Stack, em_push),
3965 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3966 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3967 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3969 X16(D(SrcImmByte | NearBranch)),
3971 G(ByteOp | DstMem | SrcImm, group1),
3972 G(DstMem | SrcImm, group1),
3973 G(ByteOp | DstMem | SrcImm | No64, group1),
3974 G(DstMem | SrcImmByte, group1),
3975 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3976 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3978 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3979 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3980 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3981 D(ModRM | SrcMem | NoAccess | DstReg),
3982 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3985 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3987 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3988 I(SrcImmFAddr | No64, em_call_far), N,
3989 II(ImplicitOps | Stack, em_pushf, pushf),
3990 II(ImplicitOps | Stack, em_popf, popf),
3991 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3993 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3994 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3995 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3996 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3998 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3999 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4000 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4001 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
4003 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4005 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4007 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4008 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4009 I(ImplicitOps | NearBranch, em_ret),
4010 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4011 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4012 G(ByteOp, group11), G(0, group11),
4014 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4015 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4016 I(ImplicitOps | Stack, em_ret_far),
4017 D(ImplicitOps), DI(SrcImmByte, intn),
4018 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4020 G(Src2One | ByteOp, group2), G(Src2One, group2),
4021 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4022 I(DstAcc | SrcImmUByte | No64, em_aam),
4023 I(DstAcc | SrcImmUByte | No64, em_aad),
4024 F(DstAcc | ByteOp | No64, em_salc),
4025 I(DstAcc | SrcXLat | ByteOp, em_mov),
4027 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4029 X3(I(SrcImmByte | NearBranch, em_loop)),
4030 I(SrcImmByte | NearBranch, em_jcxz),
4031 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4032 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4034 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4035 I(SrcImmFAddr | No64, em_jmp_far),
4036 D(SrcImmByte | ImplicitOps | NearBranch),
4037 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4038 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4040 N, DI(ImplicitOps, icebp), N, N,
4041 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4042 G(ByteOp, group3), G(0, group3),
4044 D(ImplicitOps), D(ImplicitOps),
4045 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4046 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4049 static const struct opcode twobyte_table[256] = {
4051 G(0, group6), GD(0, &group7), N, N,
4052 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4053 II(ImplicitOps | Priv, em_clts, clts), N,
4054 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4055 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4057 N, N, N, N, N, N, N, N,
4058 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4059 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4061 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4062 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4063 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4065 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4068 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4069 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4070 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4073 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4074 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4075 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4076 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4077 I(ImplicitOps | EmulateOnUD, em_sysenter),
4078 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4080 N, N, N, N, N, N, N, N,
4082 X16(D(DstReg | SrcMem | ModRM)),
4084 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4089 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4094 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4096 X16(D(SrcImm | NearBranch)),
4098 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4100 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4101 II(ImplicitOps, em_cpuid, cpuid),
4102 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4103 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4104 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4106 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4107 DI(ImplicitOps, rsm),
4108 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4109 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4110 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4111 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4113 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4114 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4115 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4116 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4117 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4118 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4122 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4123 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4124 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4126 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4127 N, D(DstMem | SrcReg | ModRM | Mov),
4128 N, N, N, GD(0, &group9),
4130 X8(I(DstReg, em_bswap)),
4132 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4134 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4135 N, N, N, N, N, N, N, N,
4137 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4140 static const struct gprefix three_byte_0f_38_f0 = {
4141 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4144 static const struct gprefix three_byte_0f_38_f1 = {
4145 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4149 * Insns below are selected by the prefix which indexed by the third opcode
4152 static const struct opcode opcode_map_0f_38[256] = {
4154 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4156 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4158 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4159 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4178 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4182 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4188 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4189 unsigned size, bool sign_extension)
4191 int rc = X86EMUL_CONTINUE;
4195 op->addr.mem.ea = ctxt->_eip;
4196 /* NB. Immediates are sign-extended as necessary. */
4197 switch (op->bytes) {
4199 op->val = insn_fetch(s8, ctxt);
4202 op->val = insn_fetch(s16, ctxt);
4205 op->val = insn_fetch(s32, ctxt);
4208 op->val = insn_fetch(s64, ctxt);
4211 if (!sign_extension) {
4212 switch (op->bytes) {
4220 op->val &= 0xffffffff;
4228 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4231 int rc = X86EMUL_CONTINUE;
4235 decode_register_operand(ctxt, op);
4238 rc = decode_imm(ctxt, op, 1, false);
4241 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4245 if (ctxt->d & BitOp)
4246 fetch_bit_operand(ctxt);
4247 op->orig_val = op->val;
4250 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4254 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4255 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4256 fetch_register_operand(op);
4257 op->orig_val = op->val;
4261 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4262 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4263 fetch_register_operand(op);
4264 op->orig_val = op->val;
4267 if (ctxt->d & ByteOp) {
4272 op->bytes = ctxt->op_bytes;
4273 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4274 fetch_register_operand(op);
4275 op->orig_val = op->val;
4279 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4281 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4282 op->addr.mem.seg = VCPU_SREG_ES;
4289 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4290 fetch_register_operand(op);
4294 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4297 rc = decode_imm(ctxt, op, 1, true);
4304 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4307 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4310 ctxt->memop.bytes = 1;
4311 if (ctxt->memop.type == OP_REG) {
4312 ctxt->memop.addr.reg = decode_register(ctxt,
4313 ctxt->modrm_rm, true);
4314 fetch_register_operand(&ctxt->memop);
4318 ctxt->memop.bytes = 2;
4321 ctxt->memop.bytes = 4;
4324 rc = decode_imm(ctxt, op, 2, false);
4327 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4331 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4333 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4334 op->addr.mem.seg = ctxt->seg_override;
4340 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4342 register_address(ctxt,
4343 reg_read(ctxt, VCPU_REGS_RBX) +
4344 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4345 op->addr.mem.seg = ctxt->seg_override;
4350 op->addr.mem.ea = ctxt->_eip;
4351 op->bytes = ctxt->op_bytes + 2;
4352 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4355 ctxt->memop.bytes = ctxt->op_bytes + 2;
4358 op->val = VCPU_SREG_ES;
4361 op->val = VCPU_SREG_CS;
4364 op->val = VCPU_SREG_SS;
4367 op->val = VCPU_SREG_DS;
4370 op->val = VCPU_SREG_FS;
4373 op->val = VCPU_SREG_GS;
4376 /* Special instructions do their own operand decoding. */
4378 op->type = OP_NONE; /* Disable writeback. */
4386 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4388 int rc = X86EMUL_CONTINUE;
4389 int mode = ctxt->mode;
4390 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4391 bool op_prefix = false;
4392 bool has_seg_override = false;
4393 struct opcode opcode;
4395 ctxt->memop.type = OP_NONE;
4396 ctxt->memopp = NULL;
4397 ctxt->_eip = ctxt->eip;
4398 ctxt->fetch.ptr = ctxt->fetch.data;
4399 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4400 ctxt->opcode_len = 1;
4402 memcpy(ctxt->fetch.data, insn, insn_len);
4404 rc = __do_insn_fetch_bytes(ctxt, 1);
4405 if (rc != X86EMUL_CONTINUE)
4410 case X86EMUL_MODE_REAL:
4411 case X86EMUL_MODE_VM86:
4412 case X86EMUL_MODE_PROT16:
4413 def_op_bytes = def_ad_bytes = 2;
4415 case X86EMUL_MODE_PROT32:
4416 def_op_bytes = def_ad_bytes = 4;
4418 #ifdef CONFIG_X86_64
4419 case X86EMUL_MODE_PROT64:
4425 return EMULATION_FAILED;
4428 ctxt->op_bytes = def_op_bytes;
4429 ctxt->ad_bytes = def_ad_bytes;
4431 /* Legacy prefixes. */
4433 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4434 case 0x66: /* operand-size override */
4436 /* switch between 2/4 bytes */
4437 ctxt->op_bytes = def_op_bytes ^ 6;
4439 case 0x67: /* address-size override */
4440 if (mode == X86EMUL_MODE_PROT64)
4441 /* switch between 4/8 bytes */
4442 ctxt->ad_bytes = def_ad_bytes ^ 12;
4444 /* switch between 2/4 bytes */
4445 ctxt->ad_bytes = def_ad_bytes ^ 6;
4447 case 0x26: /* ES override */
4448 case 0x2e: /* CS override */
4449 case 0x36: /* SS override */
4450 case 0x3e: /* DS override */
4451 has_seg_override = true;
4452 ctxt->seg_override = (ctxt->b >> 3) & 3;
4454 case 0x64: /* FS override */
4455 case 0x65: /* GS override */
4456 has_seg_override = true;
4457 ctxt->seg_override = ctxt->b & 7;
4459 case 0x40 ... 0x4f: /* REX */
4460 if (mode != X86EMUL_MODE_PROT64)
4462 ctxt->rex_prefix = ctxt->b;
4464 case 0xf0: /* LOCK */
4465 ctxt->lock_prefix = 1;
4467 case 0xf2: /* REPNE/REPNZ */
4468 case 0xf3: /* REP/REPE/REPZ */
4469 ctxt->rep_prefix = ctxt->b;
4475 /* Any legacy prefix after a REX prefix nullifies its effect. */
4477 ctxt->rex_prefix = 0;
4483 if (ctxt->rex_prefix & 8)
4484 ctxt->op_bytes = 8; /* REX.W */
4486 /* Opcode byte(s). */
4487 opcode = opcode_table[ctxt->b];
4488 /* Two-byte opcode? */
4489 if (ctxt->b == 0x0f) {
4490 ctxt->opcode_len = 2;
4491 ctxt->b = insn_fetch(u8, ctxt);
4492 opcode = twobyte_table[ctxt->b];
4494 /* 0F_38 opcode map */
4495 if (ctxt->b == 0x38) {
4496 ctxt->opcode_len = 3;
4497 ctxt->b = insn_fetch(u8, ctxt);
4498 opcode = opcode_map_0f_38[ctxt->b];
4501 ctxt->d = opcode.flags;
4503 if (ctxt->d & ModRM)
4504 ctxt->modrm = insn_fetch(u8, ctxt);
4506 /* vex-prefix instructions are not implemented */
4507 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4508 (mode == X86EMUL_MODE_PROT64 ||
4509 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4513 while (ctxt->d & GroupMask) {
4514 switch (ctxt->d & GroupMask) {
4516 goffset = (ctxt->modrm >> 3) & 7;
4517 opcode = opcode.u.group[goffset];
4520 goffset = (ctxt->modrm >> 3) & 7;
4521 if ((ctxt->modrm >> 6) == 3)
4522 opcode = opcode.u.gdual->mod3[goffset];
4524 opcode = opcode.u.gdual->mod012[goffset];
4527 goffset = ctxt->modrm & 7;
4528 opcode = opcode.u.group[goffset];
4531 if (ctxt->rep_prefix && op_prefix)
4532 return EMULATION_FAILED;
4533 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4534 switch (simd_prefix) {
4535 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4536 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4537 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4538 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4542 if (ctxt->modrm > 0xbf)
4543 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4545 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4548 return EMULATION_FAILED;
4551 ctxt->d &= ~(u64)GroupMask;
4552 ctxt->d |= opcode.flags;
4557 return EMULATION_FAILED;
4559 ctxt->execute = opcode.u.execute;
4561 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4562 return EMULATION_FAILED;
4564 if (unlikely(ctxt->d &
4565 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch))) {
4567 * These are copied unconditionally here, and checked unconditionally
4568 * in x86_emulate_insn.
4570 ctxt->check_perm = opcode.check_perm;
4571 ctxt->intercept = opcode.intercept;
4573 if (ctxt->d & NotImpl)
4574 return EMULATION_FAILED;
4576 if (mode == X86EMUL_MODE_PROT64) {
4577 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4579 else if (ctxt->d & NearBranch)
4583 if (ctxt->d & Op3264) {
4584 if (mode == X86EMUL_MODE_PROT64)
4591 ctxt->op_bytes = 16;
4592 else if (ctxt->d & Mmx)
4596 /* ModRM and SIB bytes. */
4597 if (ctxt->d & ModRM) {
4598 rc = decode_modrm(ctxt, &ctxt->memop);
4599 if (!has_seg_override) {
4600 has_seg_override = true;
4601 ctxt->seg_override = ctxt->modrm_seg;
4603 } else if (ctxt->d & MemAbs)
4604 rc = decode_abs(ctxt, &ctxt->memop);
4605 if (rc != X86EMUL_CONTINUE)
4608 if (!has_seg_override)
4609 ctxt->seg_override = VCPU_SREG_DS;
4611 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4614 * Decode and fetch the source operand: register, memory
4617 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4618 if (rc != X86EMUL_CONTINUE)
4622 * Decode and fetch the second source operand: register, memory
4625 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4626 if (rc != X86EMUL_CONTINUE)
4629 /* Decode and fetch the destination operand: register or memory. */
4630 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4632 if (ctxt->rip_relative)
4633 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4636 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4639 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4641 return ctxt->d & PageTable;
4644 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4646 /* The second termination condition only applies for REPE
4647 * and REPNE. Test if the repeat string operation prefix is
4648 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4649 * corresponding termination condition according to:
4650 * - if REPE/REPZ and ZF = 0 then done
4651 * - if REPNE/REPNZ and ZF = 1 then done
4653 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4654 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4655 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4656 ((ctxt->eflags & EFLG_ZF) == 0))
4657 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4658 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4664 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4668 ctxt->ops->get_fpu(ctxt);
4669 asm volatile("1: fwait \n\t"
4671 ".pushsection .fixup,\"ax\" \n\t"
4673 "movb $1, %[fault] \n\t"
4676 _ASM_EXTABLE(1b, 3b)
4677 : [fault]"+qm"(fault));
4678 ctxt->ops->put_fpu(ctxt);
4680 if (unlikely(fault))
4681 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4683 return X86EMUL_CONTINUE;
4686 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4689 if (op->type == OP_MM)
4690 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4693 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4695 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4696 if (!(ctxt->d & ByteOp))
4697 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4698 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4699 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4701 : "c"(ctxt->src2.val));
4702 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4703 if (!fop) /* exception is returned in fop variable */
4704 return emulate_de(ctxt);
4705 return X86EMUL_CONTINUE;
4708 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4710 memset(&ctxt->rip_relative, 0,
4711 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4713 ctxt->io_read.pos = 0;
4714 ctxt->io_read.end = 0;
4715 ctxt->mem_read.end = 0;
4718 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4720 const struct x86_emulate_ops *ops = ctxt->ops;
4721 int rc = X86EMUL_CONTINUE;
4722 int saved_dst_type = ctxt->dst.type;
4724 ctxt->mem_read.pos = 0;
4726 /* LOCK prefix is allowed only with some instructions */
4727 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4728 rc = emulate_ud(ctxt);
4732 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4733 rc = emulate_ud(ctxt);
4737 if (unlikely(ctxt->d &
4738 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4739 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4740 (ctxt->d & Undefined)) {
4741 rc = emulate_ud(ctxt);
4745 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4746 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4747 rc = emulate_ud(ctxt);
4751 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4752 rc = emulate_nm(ctxt);
4756 if (ctxt->d & Mmx) {
4757 rc = flush_pending_x87_faults(ctxt);
4758 if (rc != X86EMUL_CONTINUE)
4761 * Now that we know the fpu is exception safe, we can fetch
4764 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4765 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4766 if (!(ctxt->d & Mov))
4767 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4770 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4771 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4772 X86_ICPT_PRE_EXCEPT);
4773 if (rc != X86EMUL_CONTINUE)
4777 /* Privileged instruction can be executed only in CPL=0 */
4778 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4779 if (ctxt->d & PrivUD)
4780 rc = emulate_ud(ctxt);
4782 rc = emulate_gp(ctxt, 0);
4786 /* Instruction can only be executed in protected mode */
4787 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4788 rc = emulate_ud(ctxt);
4792 /* Do instruction specific permission checks */
4793 if (ctxt->d & CheckPerm) {
4794 rc = ctxt->check_perm(ctxt);
4795 if (rc != X86EMUL_CONTINUE)
4799 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4800 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4801 X86_ICPT_POST_EXCEPT);
4802 if (rc != X86EMUL_CONTINUE)
4806 if (ctxt->rep_prefix && (ctxt->d & String)) {
4807 /* All REP prefixes have the same first termination condition */
4808 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4809 ctxt->eip = ctxt->_eip;
4810 ctxt->eflags &= ~EFLG_RF;
4816 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4817 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4818 ctxt->src.valptr, ctxt->src.bytes);
4819 if (rc != X86EMUL_CONTINUE)
4821 ctxt->src.orig_val64 = ctxt->src.val64;
4824 if (ctxt->src2.type == OP_MEM) {
4825 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4826 &ctxt->src2.val, ctxt->src2.bytes);
4827 if (rc != X86EMUL_CONTINUE)
4831 if ((ctxt->d & DstMask) == ImplicitOps)
4835 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4836 /* optimisation - avoid slow emulated read if Mov */
4837 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4838 &ctxt->dst.val, ctxt->dst.bytes);
4839 if (rc != X86EMUL_CONTINUE)
4842 ctxt->dst.orig_val = ctxt->dst.val;
4846 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4847 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4848 X86_ICPT_POST_MEMACCESS);
4849 if (rc != X86EMUL_CONTINUE)
4853 if (ctxt->rep_prefix && (ctxt->d & String))
4854 ctxt->eflags |= EFLG_RF;
4856 ctxt->eflags &= ~EFLG_RF;
4858 if (ctxt->execute) {
4859 if (ctxt->d & Fastop) {
4860 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4861 rc = fastop(ctxt, fop);
4862 if (rc != X86EMUL_CONTINUE)
4866 rc = ctxt->execute(ctxt);
4867 if (rc != X86EMUL_CONTINUE)
4872 if (ctxt->opcode_len == 2)
4874 else if (ctxt->opcode_len == 3)
4875 goto threebyte_insn;
4878 case 0x63: /* movsxd */
4879 if (ctxt->mode != X86EMUL_MODE_PROT64)
4880 goto cannot_emulate;
4881 ctxt->dst.val = (s32) ctxt->src.val;
4883 case 0x70 ... 0x7f: /* jcc (short) */
4884 if (test_cc(ctxt->b, ctxt->eflags))
4885 rc = jmp_rel(ctxt, ctxt->src.val);
4887 case 0x8d: /* lea r16/r32, m */
4888 ctxt->dst.val = ctxt->src.addr.mem.ea;
4890 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4891 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4892 ctxt->dst.type = OP_NONE;
4896 case 0x98: /* cbw/cwde/cdqe */
4897 switch (ctxt->op_bytes) {
4898 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4899 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4900 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4903 case 0xcc: /* int3 */
4904 rc = emulate_int(ctxt, 3);
4906 case 0xcd: /* int n */
4907 rc = emulate_int(ctxt, ctxt->src.val);
4909 case 0xce: /* into */
4910 if (ctxt->eflags & EFLG_OF)
4911 rc = emulate_int(ctxt, 4);
4913 case 0xe9: /* jmp rel */
4914 case 0xeb: /* jmp rel short */
4915 rc = jmp_rel(ctxt, ctxt->src.val);
4916 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4918 case 0xf4: /* hlt */
4919 ctxt->ops->halt(ctxt);
4921 case 0xf5: /* cmc */
4922 /* complement carry flag from eflags reg */
4923 ctxt->eflags ^= EFLG_CF;
4925 case 0xf8: /* clc */
4926 ctxt->eflags &= ~EFLG_CF;
4928 case 0xf9: /* stc */
4929 ctxt->eflags |= EFLG_CF;
4931 case 0xfc: /* cld */
4932 ctxt->eflags &= ~EFLG_DF;
4934 case 0xfd: /* std */
4935 ctxt->eflags |= EFLG_DF;
4938 goto cannot_emulate;
4941 if (rc != X86EMUL_CONTINUE)
4945 if (ctxt->d & SrcWrite) {
4946 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4947 rc = writeback(ctxt, &ctxt->src);
4948 if (rc != X86EMUL_CONTINUE)
4951 if (!(ctxt->d & NoWrite)) {
4952 rc = writeback(ctxt, &ctxt->dst);
4953 if (rc != X86EMUL_CONTINUE)
4958 * restore dst type in case the decoding will be reused
4959 * (happens for string instruction )
4961 ctxt->dst.type = saved_dst_type;
4963 if ((ctxt->d & SrcMask) == SrcSI)
4964 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4966 if ((ctxt->d & DstMask) == DstDI)
4967 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4969 if (ctxt->rep_prefix && (ctxt->d & String)) {
4971 struct read_cache *r = &ctxt->io_read;
4972 if ((ctxt->d & SrcMask) == SrcSI)
4973 count = ctxt->src.count;
4975 count = ctxt->dst.count;
4976 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4979 if (!string_insn_completed(ctxt)) {
4981 * Re-enter guest when pio read ahead buffer is empty
4982 * or, if it is not used, after each 1024 iteration.
4984 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4985 (r->end == 0 || r->end != r->pos)) {
4987 * Reset read cache. Usually happens before
4988 * decode, but since instruction is restarted
4989 * we have to do it here.
4991 ctxt->mem_read.end = 0;
4992 writeback_registers(ctxt);
4993 return EMULATION_RESTART;
4995 goto done; /* skip rip writeback */
4997 ctxt->eflags &= ~EFLG_RF;
5000 ctxt->eip = ctxt->_eip;
5003 if (rc == X86EMUL_PROPAGATE_FAULT) {
5004 WARN_ON(ctxt->exception.vector > 0x1f);
5005 ctxt->have_exception = true;
5007 if (rc == X86EMUL_INTERCEPTED)
5008 return EMULATION_INTERCEPTED;
5010 if (rc == X86EMUL_CONTINUE)
5011 writeback_registers(ctxt);
5013 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5017 case 0x09: /* wbinvd */
5018 (ctxt->ops->wbinvd)(ctxt);
5020 case 0x08: /* invd */
5021 case 0x0d: /* GrpP (prefetch) */
5022 case 0x18: /* Grp16 (prefetch/nop) */
5023 case 0x1f: /* nop */
5025 case 0x20: /* mov cr, reg */
5026 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5028 case 0x21: /* mov from dr to reg */
5029 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5031 case 0x40 ... 0x4f: /* cmov */
5032 if (test_cc(ctxt->b, ctxt->eflags))
5033 ctxt->dst.val = ctxt->src.val;
5034 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5035 ctxt->op_bytes != 4)
5036 ctxt->dst.type = OP_NONE; /* no writeback */
5038 case 0x80 ... 0x8f: /* jnz rel, etc*/
5039 if (test_cc(ctxt->b, ctxt->eflags))
5040 rc = jmp_rel(ctxt, ctxt->src.val);
5042 case 0x90 ... 0x9f: /* setcc r/m8 */
5043 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5045 case 0xb6 ... 0xb7: /* movzx */
5046 ctxt->dst.bytes = ctxt->op_bytes;
5047 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5048 : (u16) ctxt->src.val;
5050 case 0xbe ... 0xbf: /* movsx */
5051 ctxt->dst.bytes = ctxt->op_bytes;
5052 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5053 (s16) ctxt->src.val;
5055 case 0xc3: /* movnti */
5056 ctxt->dst.bytes = ctxt->op_bytes;
5057 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5058 (u32) ctxt->src.val;
5061 goto cannot_emulate;
5066 if (rc != X86EMUL_CONTINUE)
5072 return EMULATION_FAILED;
5075 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5077 invalidate_registers(ctxt);
5080 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5082 writeback_registers(ctxt);