1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169 #define NearBranch ((u64)1 << 52) /* Near branches */
171 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
173 #define X2(x...) x, x
174 #define X3(x...) X2(x), x
175 #define X4(x...) X2(x), X2(x)
176 #define X5(x...) X4(x), x
177 #define X6(x...) X4(x), X2(x)
178 #define X7(x...) X4(x), X3(x)
179 #define X8(x...) X4(x), X4(x)
180 #define X16(x...) X8(x), X8(x)
182 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
183 #define FASTOP_SIZE 8
186 * fastop functions have a special calling convention:
191 * flags: rflags (in/out)
192 * ex: rsi (in:fastop pointer, out:zero if exception)
194 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
195 * different operand sizes can be reached by calculation, rather than a jump
196 * table (which would be bigger than the code).
198 * fastop functions are declared as taking a never-defined fastop parameter,
199 * so they can't be called from C directly.
208 int (*execute)(struct x86_emulate_ctxt *ctxt);
209 const struct opcode *group;
210 const struct group_dual *gdual;
211 const struct gprefix *gprefix;
212 const struct escape *esc;
213 void (*fastop)(struct fastop *fake);
215 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
219 struct opcode mod012[8];
220 struct opcode mod3[8];
224 struct opcode pfx_no;
225 struct opcode pfx_66;
226 struct opcode pfx_f2;
227 struct opcode pfx_f3;
232 struct opcode high[64];
235 /* EFLAGS bit definitions. */
236 #define EFLG_ID (1<<21)
237 #define EFLG_VIP (1<<20)
238 #define EFLG_VIF (1<<19)
239 #define EFLG_AC (1<<18)
240 #define EFLG_VM (1<<17)
241 #define EFLG_RF (1<<16)
242 #define EFLG_IOPL (3<<12)
243 #define EFLG_NT (1<<14)
244 #define EFLG_OF (1<<11)
245 #define EFLG_DF (1<<10)
246 #define EFLG_IF (1<<9)
247 #define EFLG_TF (1<<8)
248 #define EFLG_SF (1<<7)
249 #define EFLG_ZF (1<<6)
250 #define EFLG_AF (1<<4)
251 #define EFLG_PF (1<<2)
252 #define EFLG_CF (1<<0)
254 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
255 #define EFLG_RESERVED_ONE_MASK 2
257 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
259 if (!(ctxt->regs_valid & (1 << nr))) {
260 ctxt->regs_valid |= 1 << nr;
261 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
263 return ctxt->_regs[nr];
266 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->regs_dirty |= 1 << nr;
270 return &ctxt->_regs[nr];
273 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 return reg_write(ctxt, nr);
279 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
283 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
284 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
287 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
289 ctxt->regs_dirty = 0;
290 ctxt->regs_valid = 0;
294 * These EFLAGS bits are restored from saved value during emulation, and
295 * any changes are written back to the saved value after emulation.
297 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
305 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
307 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
308 #define FOP_RET "ret \n\t"
310 #define FOP_START(op) \
311 extern void em_##op(struct fastop *fake); \
312 asm(".pushsection .text, \"ax\" \n\t" \
313 ".global em_" #op " \n\t" \
320 #define FOPNOP() FOP_ALIGN FOP_RET
322 #define FOP1E(op, dst) \
323 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
325 #define FOP1EEX(op, dst) \
326 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
328 #define FASTOP1(op) \
333 ON64(FOP1E(op##q, rax)) \
336 /* 1-operand, using src2 (for MUL/DIV r/m) */
337 #define FASTOP1SRC2(op, name) \
342 ON64(FOP1E(op, rcx)) \
345 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
346 #define FASTOP1SRC2EX(op, name) \
351 ON64(FOP1EEX(op, rcx)) \
354 #define FOP2E(op, dst, src) \
355 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
357 #define FASTOP2(op) \
359 FOP2E(op##b, al, dl) \
360 FOP2E(op##w, ax, dx) \
361 FOP2E(op##l, eax, edx) \
362 ON64(FOP2E(op##q, rax, rdx)) \
365 /* 2 operand, word only */
366 #define FASTOP2W(op) \
369 FOP2E(op##w, ax, dx) \
370 FOP2E(op##l, eax, edx) \
371 ON64(FOP2E(op##q, rax, rdx)) \
374 /* 2 operand, src is CL */
375 #define FASTOP2CL(op) \
377 FOP2E(op##b, al, cl) \
378 FOP2E(op##w, ax, cl) \
379 FOP2E(op##l, eax, cl) \
380 ON64(FOP2E(op##q, rax, cl)) \
383 #define FOP3E(op, dst, src, src2) \
384 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
386 /* 3-operand, word-only, src2=cl */
387 #define FASTOP3WCL(op) \
390 FOP3E(op##w, ax, dx, cl) \
391 FOP3E(op##l, eax, edx, cl) \
392 ON64(FOP3E(op##q, rax, rdx, cl)) \
395 /* Special case for SETcc - 1 instruction per cc */
396 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
398 asm(".global kvm_fastop_exception \n"
399 "kvm_fastop_exception: xor %esi, %esi; ret");
420 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
423 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
424 enum x86_intercept intercept,
425 enum x86_intercept_stage stage)
427 struct x86_instruction_info info = {
428 .intercept = intercept,
429 .rep_prefix = ctxt->rep_prefix,
430 .modrm_mod = ctxt->modrm_mod,
431 .modrm_reg = ctxt->modrm_reg,
432 .modrm_rm = ctxt->modrm_rm,
433 .src_val = ctxt->src.val64,
434 .dst_val = ctxt->dst.val64,
435 .src_bytes = ctxt->src.bytes,
436 .dst_bytes = ctxt->dst.bytes,
437 .ad_bytes = ctxt->ad_bytes,
438 .next_rip = ctxt->eip,
441 return ctxt->ops->intercept(ctxt, &info, stage);
444 static void assign_masked(ulong *dest, ulong src, ulong mask)
446 *dest = (*dest & ~mask) | (src & mask);
449 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
451 return (1UL << (ctxt->ad_bytes << 3)) - 1;
454 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
457 struct desc_struct ss;
459 if (ctxt->mode == X86EMUL_MODE_PROT64)
461 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
462 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
465 static int stack_size(struct x86_emulate_ctxt *ctxt)
467 return (__fls(stack_mask(ctxt)) + 1) >> 3;
470 /* Access/update address held in a register, based on addressing mode. */
471 static inline unsigned long
472 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
474 if (ctxt->ad_bytes == sizeof(unsigned long))
477 return reg & ad_mask(ctxt);
480 static inline unsigned long
481 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
483 return address_mask(ctxt, reg);
486 static void masked_increment(ulong *reg, ulong mask, int inc)
488 assign_masked(reg, *reg + inc, mask);
492 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
496 if (ctxt->ad_bytes == sizeof(unsigned long))
499 mask = ad_mask(ctxt);
500 masked_increment(reg, mask, inc);
503 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
505 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
508 static u32 desc_limit_scaled(struct desc_struct *desc)
510 u32 limit = get_desc_limit(desc);
512 return desc->g ? (limit << 12) | 0xfff : limit;
515 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
517 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
520 return ctxt->ops->get_cached_segment_base(ctxt, seg);
523 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
524 u32 error, bool valid)
527 ctxt->exception.vector = vec;
528 ctxt->exception.error_code = error;
529 ctxt->exception.error_code_valid = valid;
530 return X86EMUL_PROPAGATE_FAULT;
533 static int emulate_db(struct x86_emulate_ctxt *ctxt)
535 return emulate_exception(ctxt, DB_VECTOR, 0, false);
538 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
540 return emulate_exception(ctxt, GP_VECTOR, err, true);
543 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
545 return emulate_exception(ctxt, SS_VECTOR, err, true);
548 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
550 return emulate_exception(ctxt, UD_VECTOR, 0, false);
553 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
555 return emulate_exception(ctxt, TS_VECTOR, err, true);
558 static int emulate_de(struct x86_emulate_ctxt *ctxt)
560 return emulate_exception(ctxt, DE_VECTOR, 0, false);
563 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
565 return emulate_exception(ctxt, NM_VECTOR, 0, false);
568 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
571 switch (ctxt->op_bytes) {
573 ctxt->_eip = (u16)dst;
576 ctxt->_eip = (u32)dst;
580 if ((cs_l && is_noncanonical_address(dst)) ||
581 (!cs_l && (dst >> 32) != 0))
582 return emulate_gp(ctxt, 0);
587 WARN(1, "unsupported eip assignment size\n");
589 return X86EMUL_CONTINUE;
592 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
594 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
597 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
599 return assign_eip_near(ctxt, ctxt->_eip + rel);
602 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
605 struct desc_struct desc;
607 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
611 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
616 struct desc_struct desc;
618 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
619 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
623 * x86 defines three classes of vector instructions: explicitly
624 * aligned, explicitly unaligned, and the rest, which change behaviour
625 * depending on whether they're AVX encoded or not.
627 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
628 * subject to the same check.
630 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
632 if (likely(size < 16))
635 if (ctxt->d & Aligned)
637 else if (ctxt->d & Unaligned)
639 else if (ctxt->d & Avx)
645 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
646 struct segmented_address addr,
647 unsigned *max_size, unsigned size,
648 bool write, bool fetch,
651 struct desc_struct desc;
658 la = seg_base(ctxt, addr.seg) +
659 (fetch || ctxt->ad_bytes == 8 ? addr.ea : (u32)addr.ea);
661 switch (ctxt->mode) {
662 case X86EMUL_MODE_PROT64:
663 if (is_noncanonical_address(la))
664 return emulate_gp(ctxt, 0);
666 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
667 if (size > *max_size)
671 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
675 /* code segment in protected mode or read-only data segment */
676 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
677 || !(desc.type & 2)) && write)
679 /* unreadable code segment */
680 if (!fetch && (desc.type & 8) && !(desc.type & 2))
682 lim = desc_limit_scaled(&desc);
683 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
684 (ctxt->d & NoBigReal)) {
685 /* la is between zero and 0xffff */
688 *max_size = 0x10000 - la;
689 } else if ((desc.type & 8) || !(desc.type & 4)) {
690 /* expand-up segment */
693 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
695 /* expand-down segment */
698 lim = desc.d ? 0xffffffff : 0xffff;
701 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
703 if (size > *max_size)
705 cpl = ctxt->ops->cpl(ctxt);
706 if (!(desc.type & 8)) {
710 } else if ((desc.type & 8) && !(desc.type & 4)) {
711 /* nonconforming code segment */
714 } else if ((desc.type & 8) && (desc.type & 4)) {
715 /* conforming code segment */
721 if (ctxt->mode != X86EMUL_MODE_PROT64)
723 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
724 return emulate_gp(ctxt, 0);
726 return X86EMUL_CONTINUE;
728 if (addr.seg == VCPU_SREG_SS)
729 return emulate_ss(ctxt, 0);
731 return emulate_gp(ctxt, 0);
734 static int linearize(struct x86_emulate_ctxt *ctxt,
735 struct segmented_address addr,
736 unsigned size, bool write,
740 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
744 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
745 struct segmented_address addr,
752 rc = linearize(ctxt, addr, size, false, &linear);
753 if (rc != X86EMUL_CONTINUE)
755 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
759 * Prefetch the remaining bytes of the instruction without crossing page
760 * boundary if they are not in fetch_cache yet.
762 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
765 unsigned size, max_size;
766 unsigned long linear;
767 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
768 struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 .ea = ctxt->eip + cur_size };
772 * We do not know exactly how many bytes will be needed, and
773 * __linearize is expensive, so fetch as much as possible. We
774 * just have to avoid going beyond the 15 byte limit, the end
775 * of the segment, or the end of the page.
777 * __linearize is called with size 0 so that it does not do any
778 * boundary check itself. Instead, we use max_size to check
781 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
782 if (unlikely(rc != X86EMUL_CONTINUE))
785 size = min_t(unsigned, 15UL ^ cur_size, max_size);
786 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
789 * One instruction can only straddle two pages,
790 * and one has been loaded at the beginning of
791 * x86_decode_insn. So, if not enough bytes
792 * still, we must have hit the 15-byte boundary.
794 if (unlikely(size < op_size))
795 return emulate_gp(ctxt, 0);
797 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
798 size, &ctxt->exception);
799 if (unlikely(rc != X86EMUL_CONTINUE))
801 ctxt->fetch.end += size;
802 return X86EMUL_CONTINUE;
805 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
808 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
810 if (unlikely(done_size < size))
811 return __do_insn_fetch_bytes(ctxt, size - done_size);
813 return X86EMUL_CONTINUE;
816 /* Fetch next part of the instruction being emulated. */
817 #define insn_fetch(_type, _ctxt) \
820 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
821 if (rc != X86EMUL_CONTINUE) \
823 ctxt->_eip += sizeof(_type); \
824 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
825 ctxt->fetch.ptr += sizeof(_type); \
829 #define insn_fetch_arr(_arr, _size, _ctxt) \
831 rc = do_insn_fetch_bytes(_ctxt, _size); \
832 if (rc != X86EMUL_CONTINUE) \
834 ctxt->_eip += (_size); \
835 memcpy(_arr, ctxt->fetch.ptr, _size); \
836 ctxt->fetch.ptr += (_size); \
840 * Given the 'reg' portion of a ModRM byte, and a register block, return a
841 * pointer into the block that addresses the relevant register.
842 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
844 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
848 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
850 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
851 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
853 p = reg_rmw(ctxt, modrm_reg);
857 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
858 struct segmented_address addr,
859 u16 *size, unsigned long *address, int op_bytes)
866 rc = segmented_read_std(ctxt, addr, size, 2);
867 if (rc != X86EMUL_CONTINUE)
870 rc = segmented_read_std(ctxt, addr, address, op_bytes);
884 FASTOP1SRC2(mul, mul_ex);
885 FASTOP1SRC2(imul, imul_ex);
886 FASTOP1SRC2EX(div, div_ex);
887 FASTOP1SRC2EX(idiv, idiv_ex);
916 static u8 test_cc(unsigned int condition, unsigned long flags)
919 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
921 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
922 asm("push %[flags]; popf; call *%[fastop]"
923 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
927 static void fetch_register_operand(struct operand *op)
931 op->val = *(u8 *)op->addr.reg;
934 op->val = *(u16 *)op->addr.reg;
937 op->val = *(u32 *)op->addr.reg;
940 op->val = *(u64 *)op->addr.reg;
945 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
947 ctxt->ops->get_fpu(ctxt);
949 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
950 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
951 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
952 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
953 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
954 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
955 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
956 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
958 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
959 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
960 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
961 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
962 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
963 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
964 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
965 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
969 ctxt->ops->put_fpu(ctxt);
972 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
975 ctxt->ops->get_fpu(ctxt);
977 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
978 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
979 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
980 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
981 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
982 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
983 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
984 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
986 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
987 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
988 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
989 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
990 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
991 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
992 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
993 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
997 ctxt->ops->put_fpu(ctxt);
1000 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1002 ctxt->ops->get_fpu(ctxt);
1004 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1005 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1006 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1007 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1008 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1009 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1010 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1011 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1014 ctxt->ops->put_fpu(ctxt);
1017 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1019 ctxt->ops->get_fpu(ctxt);
1021 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1022 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1023 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1024 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1025 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1026 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1027 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1028 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1031 ctxt->ops->put_fpu(ctxt);
1034 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1036 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1037 return emulate_nm(ctxt);
1039 ctxt->ops->get_fpu(ctxt);
1040 asm volatile("fninit");
1041 ctxt->ops->put_fpu(ctxt);
1042 return X86EMUL_CONTINUE;
1045 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1049 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1050 return emulate_nm(ctxt);
1052 ctxt->ops->get_fpu(ctxt);
1053 asm volatile("fnstcw %0": "+m"(fcw));
1054 ctxt->ops->put_fpu(ctxt);
1056 /* force 2 byte destination */
1057 ctxt->dst.bytes = 2;
1058 ctxt->dst.val = fcw;
1060 return X86EMUL_CONTINUE;
1063 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1067 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1068 return emulate_nm(ctxt);
1070 ctxt->ops->get_fpu(ctxt);
1071 asm volatile("fnstsw %0": "+m"(fsw));
1072 ctxt->ops->put_fpu(ctxt);
1074 /* force 2 byte destination */
1075 ctxt->dst.bytes = 2;
1076 ctxt->dst.val = fsw;
1078 return X86EMUL_CONTINUE;
1081 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1084 unsigned reg = ctxt->modrm_reg;
1086 if (!(ctxt->d & ModRM))
1087 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1089 if (ctxt->d & Sse) {
1093 read_sse_reg(ctxt, &op->vec_val, reg);
1096 if (ctxt->d & Mmx) {
1105 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1106 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1108 fetch_register_operand(op);
1109 op->orig_val = op->val;
1112 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1114 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1115 ctxt->modrm_seg = VCPU_SREG_SS;
1118 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1122 int index_reg, base_reg, scale;
1123 int rc = X86EMUL_CONTINUE;
1126 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1127 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1128 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1130 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1131 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1132 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1133 ctxt->modrm_seg = VCPU_SREG_DS;
1135 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1137 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1138 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1140 if (ctxt->d & Sse) {
1143 op->addr.xmm = ctxt->modrm_rm;
1144 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1147 if (ctxt->d & Mmx) {
1150 op->addr.mm = ctxt->modrm_rm & 7;
1153 fetch_register_operand(op);
1159 if (ctxt->ad_bytes == 2) {
1160 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1161 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1162 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1163 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1165 /* 16-bit ModR/M decode. */
1166 switch (ctxt->modrm_mod) {
1168 if (ctxt->modrm_rm == 6)
1169 modrm_ea += insn_fetch(u16, ctxt);
1172 modrm_ea += insn_fetch(s8, ctxt);
1175 modrm_ea += insn_fetch(u16, ctxt);
1178 switch (ctxt->modrm_rm) {
1180 modrm_ea += bx + si;
1183 modrm_ea += bx + di;
1186 modrm_ea += bp + si;
1189 modrm_ea += bp + di;
1198 if (ctxt->modrm_mod != 0)
1205 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1206 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1207 ctxt->modrm_seg = VCPU_SREG_SS;
1208 modrm_ea = (u16)modrm_ea;
1210 /* 32/64-bit ModR/M decode. */
1211 if ((ctxt->modrm_rm & 7) == 4) {
1212 sib = insn_fetch(u8, ctxt);
1213 index_reg |= (sib >> 3) & 7;
1214 base_reg |= sib & 7;
1217 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1218 modrm_ea += insn_fetch(s32, ctxt);
1220 modrm_ea += reg_read(ctxt, base_reg);
1221 adjust_modrm_seg(ctxt, base_reg);
1224 modrm_ea += reg_read(ctxt, index_reg) << scale;
1225 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1226 if (ctxt->mode == X86EMUL_MODE_PROT64)
1227 ctxt->rip_relative = 1;
1229 base_reg = ctxt->modrm_rm;
1230 modrm_ea += reg_read(ctxt, base_reg);
1231 adjust_modrm_seg(ctxt, base_reg);
1233 switch (ctxt->modrm_mod) {
1235 if (ctxt->modrm_rm == 5)
1236 modrm_ea += insn_fetch(s32, ctxt);
1239 modrm_ea += insn_fetch(s8, ctxt);
1242 modrm_ea += insn_fetch(s32, ctxt);
1246 op->addr.mem.ea = modrm_ea;
1247 if (ctxt->ad_bytes != 8)
1248 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1254 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1257 int rc = X86EMUL_CONTINUE;
1260 switch (ctxt->ad_bytes) {
1262 op->addr.mem.ea = insn_fetch(u16, ctxt);
1265 op->addr.mem.ea = insn_fetch(u32, ctxt);
1268 op->addr.mem.ea = insn_fetch(u64, ctxt);
1275 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1279 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1280 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1282 if (ctxt->src.bytes == 2)
1283 sv = (s16)ctxt->src.val & (s16)mask;
1284 else if (ctxt->src.bytes == 4)
1285 sv = (s32)ctxt->src.val & (s32)mask;
1287 sv = (s64)ctxt->src.val & (s64)mask;
1289 ctxt->dst.addr.mem.ea += (sv >> 3);
1292 /* only subword offset */
1293 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1296 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1297 unsigned long addr, void *dest, unsigned size)
1300 struct read_cache *mc = &ctxt->mem_read;
1302 if (mc->pos < mc->end)
1305 WARN_ON((mc->end + size) >= sizeof(mc->data));
1307 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1309 if (rc != X86EMUL_CONTINUE)
1315 memcpy(dest, mc->data + mc->pos, size);
1317 return X86EMUL_CONTINUE;
1320 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1321 struct segmented_address addr,
1328 rc = linearize(ctxt, addr, size, false, &linear);
1329 if (rc != X86EMUL_CONTINUE)
1331 return read_emulated(ctxt, linear, data, size);
1334 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1335 struct segmented_address addr,
1342 rc = linearize(ctxt, addr, size, true, &linear);
1343 if (rc != X86EMUL_CONTINUE)
1345 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1349 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1350 struct segmented_address addr,
1351 const void *orig_data, const void *data,
1357 rc = linearize(ctxt, addr, size, true, &linear);
1358 if (rc != X86EMUL_CONTINUE)
1360 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1361 size, &ctxt->exception);
1364 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1365 unsigned int size, unsigned short port,
1368 struct read_cache *rc = &ctxt->io_read;
1370 if (rc->pos == rc->end) { /* refill pio read ahead */
1371 unsigned int in_page, n;
1372 unsigned int count = ctxt->rep_prefix ?
1373 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1374 in_page = (ctxt->eflags & EFLG_DF) ?
1375 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1376 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1377 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1380 rc->pos = rc->end = 0;
1381 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1386 if (ctxt->rep_prefix && (ctxt->d & String) &&
1387 !(ctxt->eflags & EFLG_DF)) {
1388 ctxt->dst.data = rc->data + rc->pos;
1389 ctxt->dst.type = OP_MEM_STR;
1390 ctxt->dst.count = (rc->end - rc->pos) / size;
1393 memcpy(dest, rc->data + rc->pos, size);
1399 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1400 u16 index, struct desc_struct *desc)
1405 ctxt->ops->get_idt(ctxt, &dt);
1407 if (dt.size < index * 8 + 7)
1408 return emulate_gp(ctxt, index << 3 | 0x2);
1410 addr = dt.address + index * 8;
1411 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1415 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1416 u16 selector, struct desc_ptr *dt)
1418 const struct x86_emulate_ops *ops = ctxt->ops;
1421 if (selector & 1 << 2) {
1422 struct desc_struct desc;
1425 memset (dt, 0, sizeof *dt);
1426 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1430 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1431 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1433 ops->get_gdt(ctxt, dt);
1436 /* allowed just for 8 bytes segments */
1437 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1438 u16 selector, struct desc_struct *desc,
1442 u16 index = selector >> 3;
1445 get_descriptor_table_ptr(ctxt, selector, &dt);
1447 if (dt.size < index * 8 + 7)
1448 return emulate_gp(ctxt, selector & 0xfffc);
1450 *desc_addr_p = addr = dt.address + index * 8;
1451 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1455 /* allowed just for 8 bytes segments */
1456 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1457 u16 selector, struct desc_struct *desc)
1460 u16 index = selector >> 3;
1463 get_descriptor_table_ptr(ctxt, selector, &dt);
1465 if (dt.size < index * 8 + 7)
1466 return emulate_gp(ctxt, selector & 0xfffc);
1468 addr = dt.address + index * 8;
1469 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1473 /* Does not support long mode */
1474 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1475 u16 selector, int seg, u8 cpl,
1476 bool in_task_switch,
1477 struct desc_struct *desc)
1479 struct desc_struct seg_desc, old_desc;
1481 unsigned err_vec = GP_VECTOR;
1483 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1489 memset(&seg_desc, 0, sizeof seg_desc);
1491 if (ctxt->mode == X86EMUL_MODE_REAL) {
1492 /* set real mode segment descriptor (keep limit etc. for
1494 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1495 set_desc_base(&seg_desc, selector << 4);
1497 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1498 /* VM86 needs a clean new segment descriptor */
1499 set_desc_base(&seg_desc, selector << 4);
1500 set_desc_limit(&seg_desc, 0xffff);
1510 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1511 if ((seg == VCPU_SREG_CS
1512 || (seg == VCPU_SREG_SS
1513 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1514 || seg == VCPU_SREG_TR)
1518 /* TR should be in GDT only */
1519 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1522 if (null_selector) /* for NULL selector skip all following checks */
1525 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1526 if (ret != X86EMUL_CONTINUE)
1529 err_code = selector & 0xfffc;
1530 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1532 /* can't load system descriptor into segment selector */
1533 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1537 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1546 * segment is not a writable data segment or segment
1547 * selector's RPL != CPL or segment selector's RPL != CPL
1549 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1553 if (!(seg_desc.type & 8))
1556 if (seg_desc.type & 4) {
1562 if (rpl > cpl || dpl != cpl)
1565 /* in long-mode d/b must be clear if l is set */
1566 if (seg_desc.d && seg_desc.l) {
1569 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1570 if (efer & EFER_LMA)
1574 /* CS(RPL) <- CPL */
1575 selector = (selector & 0xfffc) | cpl;
1578 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1580 old_desc = seg_desc;
1581 seg_desc.type |= 2; /* busy */
1582 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1583 sizeof(seg_desc), &ctxt->exception);
1584 if (ret != X86EMUL_CONTINUE)
1587 case VCPU_SREG_LDTR:
1588 if (seg_desc.s || seg_desc.type != 2)
1591 default: /* DS, ES, FS, or GS */
1593 * segment is not a data or readable code segment or
1594 * ((segment is a data or nonconforming code segment)
1595 * and (both RPL and CPL > DPL))
1597 if ((seg_desc.type & 0xa) == 0x8 ||
1598 (((seg_desc.type & 0xc) != 0xc) &&
1599 (rpl > dpl && cpl > dpl)))
1605 /* mark segment as accessed */
1607 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1608 if (ret != X86EMUL_CONTINUE)
1610 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1611 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1612 sizeof(base3), &ctxt->exception);
1613 if (ret != X86EMUL_CONTINUE)
1617 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1620 return X86EMUL_CONTINUE;
1622 return emulate_exception(ctxt, err_vec, err_code, true);
1625 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1626 u16 selector, int seg)
1628 u8 cpl = ctxt->ops->cpl(ctxt);
1629 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1632 static void write_register_operand(struct operand *op)
1634 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1635 switch (op->bytes) {
1637 *(u8 *)op->addr.reg = (u8)op->val;
1640 *(u16 *)op->addr.reg = (u16)op->val;
1643 *op->addr.reg = (u32)op->val;
1644 break; /* 64b: zero-extend */
1646 *op->addr.reg = op->val;
1651 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1655 write_register_operand(op);
1658 if (ctxt->lock_prefix)
1659 return segmented_cmpxchg(ctxt,
1665 return segmented_write(ctxt,
1671 return segmented_write(ctxt,
1674 op->bytes * op->count);
1677 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1680 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1688 return X86EMUL_CONTINUE;
1691 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1693 struct segmented_address addr;
1695 rsp_increment(ctxt, -bytes);
1696 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1697 addr.seg = VCPU_SREG_SS;
1699 return segmented_write(ctxt, addr, data, bytes);
1702 static int em_push(struct x86_emulate_ctxt *ctxt)
1704 /* Disable writeback. */
1705 ctxt->dst.type = OP_NONE;
1706 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1709 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1710 void *dest, int len)
1713 struct segmented_address addr;
1715 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1716 addr.seg = VCPU_SREG_SS;
1717 rc = segmented_read(ctxt, addr, dest, len);
1718 if (rc != X86EMUL_CONTINUE)
1721 rsp_increment(ctxt, len);
1725 static int em_pop(struct x86_emulate_ctxt *ctxt)
1727 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1730 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1731 void *dest, int len)
1734 unsigned long val, change_mask;
1735 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1736 int cpl = ctxt->ops->cpl(ctxt);
1738 rc = emulate_pop(ctxt, &val, len);
1739 if (rc != X86EMUL_CONTINUE)
1742 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1743 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1745 switch(ctxt->mode) {
1746 case X86EMUL_MODE_PROT64:
1747 case X86EMUL_MODE_PROT32:
1748 case X86EMUL_MODE_PROT16:
1750 change_mask |= EFLG_IOPL;
1752 change_mask |= EFLG_IF;
1754 case X86EMUL_MODE_VM86:
1756 return emulate_gp(ctxt, 0);
1757 change_mask |= EFLG_IF;
1759 default: /* real mode */
1760 change_mask |= (EFLG_IOPL | EFLG_IF);
1764 *(unsigned long *)dest =
1765 (ctxt->eflags & ~change_mask) | (val & change_mask);
1770 static int em_popf(struct x86_emulate_ctxt *ctxt)
1772 ctxt->dst.type = OP_REG;
1773 ctxt->dst.addr.reg = &ctxt->eflags;
1774 ctxt->dst.bytes = ctxt->op_bytes;
1775 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1778 static int em_enter(struct x86_emulate_ctxt *ctxt)
1781 unsigned frame_size = ctxt->src.val;
1782 unsigned nesting_level = ctxt->src2.val & 31;
1786 return X86EMUL_UNHANDLEABLE;
1788 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1789 rc = push(ctxt, &rbp, stack_size(ctxt));
1790 if (rc != X86EMUL_CONTINUE)
1792 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1794 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1795 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1797 return X86EMUL_CONTINUE;
1800 static int em_leave(struct x86_emulate_ctxt *ctxt)
1802 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1804 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1807 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1809 int seg = ctxt->src2.val;
1811 ctxt->src.val = get_segment_selector(ctxt, seg);
1813 return em_push(ctxt);
1816 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1818 int seg = ctxt->src2.val;
1819 unsigned long selector;
1822 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1823 if (rc != X86EMUL_CONTINUE)
1826 if (ctxt->modrm_reg == VCPU_SREG_SS)
1827 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1829 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1833 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1835 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1836 int rc = X86EMUL_CONTINUE;
1837 int reg = VCPU_REGS_RAX;
1839 while (reg <= VCPU_REGS_RDI) {
1840 (reg == VCPU_REGS_RSP) ?
1841 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1844 if (rc != X86EMUL_CONTINUE)
1853 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1855 ctxt->src.val = (unsigned long)ctxt->eflags;
1856 return em_push(ctxt);
1859 static int em_popa(struct x86_emulate_ctxt *ctxt)
1861 int rc = X86EMUL_CONTINUE;
1862 int reg = VCPU_REGS_RDI;
1864 while (reg >= VCPU_REGS_RAX) {
1865 if (reg == VCPU_REGS_RSP) {
1866 rsp_increment(ctxt, ctxt->op_bytes);
1870 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1871 if (rc != X86EMUL_CONTINUE)
1878 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1880 const struct x86_emulate_ops *ops = ctxt->ops;
1887 /* TODO: Add limit checks */
1888 ctxt->src.val = ctxt->eflags;
1890 if (rc != X86EMUL_CONTINUE)
1893 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1895 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1897 if (rc != X86EMUL_CONTINUE)
1900 ctxt->src.val = ctxt->_eip;
1902 if (rc != X86EMUL_CONTINUE)
1905 ops->get_idt(ctxt, &dt);
1907 eip_addr = dt.address + (irq << 2);
1908 cs_addr = dt.address + (irq << 2) + 2;
1910 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1911 if (rc != X86EMUL_CONTINUE)
1914 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1915 if (rc != X86EMUL_CONTINUE)
1918 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1919 if (rc != X86EMUL_CONTINUE)
1927 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1931 invalidate_registers(ctxt);
1932 rc = __emulate_int_real(ctxt, irq);
1933 if (rc == X86EMUL_CONTINUE)
1934 writeback_registers(ctxt);
1938 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1940 switch(ctxt->mode) {
1941 case X86EMUL_MODE_REAL:
1942 return __emulate_int_real(ctxt, irq);
1943 case X86EMUL_MODE_VM86:
1944 case X86EMUL_MODE_PROT16:
1945 case X86EMUL_MODE_PROT32:
1946 case X86EMUL_MODE_PROT64:
1948 /* Protected mode interrupts unimplemented yet */
1949 return X86EMUL_UNHANDLEABLE;
1953 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1955 int rc = X86EMUL_CONTINUE;
1956 unsigned long temp_eip = 0;
1957 unsigned long temp_eflags = 0;
1958 unsigned long cs = 0;
1959 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1960 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1961 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1962 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1964 /* TODO: Add stack limit check */
1966 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1968 if (rc != X86EMUL_CONTINUE)
1971 if (temp_eip & ~0xffff)
1972 return emulate_gp(ctxt, 0);
1974 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1976 if (rc != X86EMUL_CONTINUE)
1979 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1981 if (rc != X86EMUL_CONTINUE)
1984 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1986 if (rc != X86EMUL_CONTINUE)
1989 ctxt->_eip = temp_eip;
1992 if (ctxt->op_bytes == 4)
1993 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1994 else if (ctxt->op_bytes == 2) {
1995 ctxt->eflags &= ~0xffff;
1996 ctxt->eflags |= temp_eflags;
1999 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2000 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2005 static int em_iret(struct x86_emulate_ctxt *ctxt)
2007 switch(ctxt->mode) {
2008 case X86EMUL_MODE_REAL:
2009 return emulate_iret_real(ctxt);
2010 case X86EMUL_MODE_VM86:
2011 case X86EMUL_MODE_PROT16:
2012 case X86EMUL_MODE_PROT32:
2013 case X86EMUL_MODE_PROT64:
2015 /* iret from protected mode unimplemented yet */
2016 return X86EMUL_UNHANDLEABLE;
2020 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2023 unsigned short sel, old_sel;
2024 struct desc_struct old_desc, new_desc;
2025 const struct x86_emulate_ops *ops = ctxt->ops;
2026 u8 cpl = ctxt->ops->cpl(ctxt);
2028 /* Assignment of RIP may only fail in 64-bit mode */
2029 if (ctxt->mode == X86EMUL_MODE_PROT64)
2030 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2033 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2035 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2037 if (rc != X86EMUL_CONTINUE)
2040 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2041 if (rc != X86EMUL_CONTINUE) {
2042 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2043 /* assigning eip failed; restore the old cs */
2044 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2050 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2052 return assign_eip_near(ctxt, ctxt->src.val);
2055 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2060 old_eip = ctxt->_eip;
2061 rc = assign_eip_near(ctxt, ctxt->src.val);
2062 if (rc != X86EMUL_CONTINUE)
2064 ctxt->src.val = old_eip;
2069 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2071 u64 old = ctxt->dst.orig_val64;
2073 if (ctxt->dst.bytes == 16)
2074 return X86EMUL_UNHANDLEABLE;
2076 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2077 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2078 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2079 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2080 ctxt->eflags &= ~EFLG_ZF;
2082 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2083 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2085 ctxt->eflags |= EFLG_ZF;
2087 return X86EMUL_CONTINUE;
2090 static int em_ret(struct x86_emulate_ctxt *ctxt)
2095 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2096 if (rc != X86EMUL_CONTINUE)
2099 return assign_eip_near(ctxt, eip);
2102 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2105 unsigned long eip, cs;
2107 int cpl = ctxt->ops->cpl(ctxt);
2108 struct desc_struct old_desc, new_desc;
2109 const struct x86_emulate_ops *ops = ctxt->ops;
2111 if (ctxt->mode == X86EMUL_MODE_PROT64)
2112 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2115 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2116 if (rc != X86EMUL_CONTINUE)
2118 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2119 if (rc != X86EMUL_CONTINUE)
2121 /* Outer-privilege level return is not implemented */
2122 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2123 return X86EMUL_UNHANDLEABLE;
2124 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2126 if (rc != X86EMUL_CONTINUE)
2128 rc = assign_eip_far(ctxt, eip, new_desc.l);
2129 if (rc != X86EMUL_CONTINUE) {
2130 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2131 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2136 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2140 rc = em_ret_far(ctxt);
2141 if (rc != X86EMUL_CONTINUE)
2143 rsp_increment(ctxt, ctxt->src.val);
2144 return X86EMUL_CONTINUE;
2147 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2149 /* Save real source value, then compare EAX against destination. */
2150 ctxt->dst.orig_val = ctxt->dst.val;
2151 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2152 ctxt->src.orig_val = ctxt->src.val;
2153 ctxt->src.val = ctxt->dst.orig_val;
2154 fastop(ctxt, em_cmp);
2156 if (ctxt->eflags & EFLG_ZF) {
2157 /* Success: write back to memory. */
2158 ctxt->dst.val = ctxt->src.orig_val;
2160 /* Failure: write the value we saw to EAX. */
2161 ctxt->dst.type = OP_REG;
2162 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2163 ctxt->dst.val = ctxt->dst.orig_val;
2165 return X86EMUL_CONTINUE;
2168 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2170 int seg = ctxt->src2.val;
2174 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2176 rc = load_segment_descriptor(ctxt, sel, seg);
2177 if (rc != X86EMUL_CONTINUE)
2180 ctxt->dst.val = ctxt->src.val;
2185 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2186 struct desc_struct *cs, struct desc_struct *ss)
2188 cs->l = 0; /* will be adjusted later */
2189 set_desc_base(cs, 0); /* flat segment */
2190 cs->g = 1; /* 4kb granularity */
2191 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2192 cs->type = 0x0b; /* Read, Execute, Accessed */
2194 cs->dpl = 0; /* will be adjusted later */
2199 set_desc_base(ss, 0); /* flat segment */
2200 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2201 ss->g = 1; /* 4kb granularity */
2203 ss->type = 0x03; /* Read/Write, Accessed */
2204 ss->d = 1; /* 32bit stack segment */
2211 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2213 u32 eax, ebx, ecx, edx;
2216 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2217 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2218 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2219 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2222 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2224 const struct x86_emulate_ops *ops = ctxt->ops;
2225 u32 eax, ebx, ecx, edx;
2228 * syscall should always be enabled in longmode - so only become
2229 * vendor specific (cpuid) if other modes are active...
2231 if (ctxt->mode == X86EMUL_MODE_PROT64)
2236 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2238 * Intel ("GenuineIntel")
2239 * remark: Intel CPUs only support "syscall" in 64bit
2240 * longmode. Also an 64bit guest with a
2241 * 32bit compat-app running will #UD !! While this
2242 * behaviour can be fixed (by emulating) into AMD
2243 * response - CPUs of AMD can't behave like Intel.
2245 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2246 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2247 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2250 /* AMD ("AuthenticAMD") */
2251 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2252 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2253 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2256 /* AMD ("AMDisbetter!") */
2257 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2258 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2259 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2262 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2266 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2268 const struct x86_emulate_ops *ops = ctxt->ops;
2269 struct desc_struct cs, ss;
2274 /* syscall is not available in real mode */
2275 if (ctxt->mode == X86EMUL_MODE_REAL ||
2276 ctxt->mode == X86EMUL_MODE_VM86)
2277 return emulate_ud(ctxt);
2279 if (!(em_syscall_is_enabled(ctxt)))
2280 return emulate_ud(ctxt);
2282 ops->get_msr(ctxt, MSR_EFER, &efer);
2283 setup_syscalls_segments(ctxt, &cs, &ss);
2285 if (!(efer & EFER_SCE))
2286 return emulate_ud(ctxt);
2288 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2290 cs_sel = (u16)(msr_data & 0xfffc);
2291 ss_sel = (u16)(msr_data + 8);
2293 if (efer & EFER_LMA) {
2297 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2298 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2300 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2301 if (efer & EFER_LMA) {
2302 #ifdef CONFIG_X86_64
2303 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2306 ctxt->mode == X86EMUL_MODE_PROT64 ?
2307 MSR_LSTAR : MSR_CSTAR, &msr_data);
2308 ctxt->_eip = msr_data;
2310 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2311 ctxt->eflags &= ~msr_data;
2315 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2316 ctxt->_eip = (u32)msr_data;
2318 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2321 return X86EMUL_CONTINUE;
2324 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2326 const struct x86_emulate_ops *ops = ctxt->ops;
2327 struct desc_struct cs, ss;
2332 ops->get_msr(ctxt, MSR_EFER, &efer);
2333 /* inject #GP if in real mode */
2334 if (ctxt->mode == X86EMUL_MODE_REAL)
2335 return emulate_gp(ctxt, 0);
2338 * Not recognized on AMD in compat mode (but is recognized in legacy
2341 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2342 && !vendor_intel(ctxt))
2343 return emulate_ud(ctxt);
2345 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2346 * Therefore, we inject an #UD.
2348 if (ctxt->mode == X86EMUL_MODE_PROT64)
2349 return emulate_ud(ctxt);
2351 setup_syscalls_segments(ctxt, &cs, &ss);
2353 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2354 switch (ctxt->mode) {
2355 case X86EMUL_MODE_PROT32:
2356 if ((msr_data & 0xfffc) == 0x0)
2357 return emulate_gp(ctxt, 0);
2359 case X86EMUL_MODE_PROT64:
2360 if (msr_data == 0x0)
2361 return emulate_gp(ctxt, 0);
2367 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2368 cs_sel = (u16)msr_data;
2369 cs_sel &= ~SELECTOR_RPL_MASK;
2370 ss_sel = cs_sel + 8;
2371 ss_sel &= ~SELECTOR_RPL_MASK;
2372 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2377 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2378 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2380 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2381 ctxt->_eip = msr_data;
2383 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2384 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2386 return X86EMUL_CONTINUE;
2389 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2391 const struct x86_emulate_ops *ops = ctxt->ops;
2392 struct desc_struct cs, ss;
2393 u64 msr_data, rcx, rdx;
2395 u16 cs_sel = 0, ss_sel = 0;
2397 /* inject #GP if in real mode or Virtual 8086 mode */
2398 if (ctxt->mode == X86EMUL_MODE_REAL ||
2399 ctxt->mode == X86EMUL_MODE_VM86)
2400 return emulate_gp(ctxt, 0);
2402 setup_syscalls_segments(ctxt, &cs, &ss);
2404 if ((ctxt->rex_prefix & 0x8) != 0x0)
2405 usermode = X86EMUL_MODE_PROT64;
2407 usermode = X86EMUL_MODE_PROT32;
2409 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2410 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2414 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2416 case X86EMUL_MODE_PROT32:
2417 cs_sel = (u16)(msr_data + 16);
2418 if ((msr_data & 0xfffc) == 0x0)
2419 return emulate_gp(ctxt, 0);
2420 ss_sel = (u16)(msr_data + 24);
2424 case X86EMUL_MODE_PROT64:
2425 cs_sel = (u16)(msr_data + 32);
2426 if (msr_data == 0x0)
2427 return emulate_gp(ctxt, 0);
2428 ss_sel = cs_sel + 8;
2431 if (is_noncanonical_address(rcx) ||
2432 is_noncanonical_address(rdx))
2433 return emulate_gp(ctxt, 0);
2436 cs_sel |= SELECTOR_RPL_MASK;
2437 ss_sel |= SELECTOR_RPL_MASK;
2439 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2440 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2443 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2445 return X86EMUL_CONTINUE;
2448 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2451 if (ctxt->mode == X86EMUL_MODE_REAL)
2453 if (ctxt->mode == X86EMUL_MODE_VM86)
2455 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2456 return ctxt->ops->cpl(ctxt) > iopl;
2459 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2462 const struct x86_emulate_ops *ops = ctxt->ops;
2463 struct desc_struct tr_seg;
2466 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2467 unsigned mask = (1 << len) - 1;
2470 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2473 if (desc_limit_scaled(&tr_seg) < 103)
2475 base = get_desc_base(&tr_seg);
2476 #ifdef CONFIG_X86_64
2477 base |= ((u64)base3) << 32;
2479 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2480 if (r != X86EMUL_CONTINUE)
2482 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2484 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2485 if (r != X86EMUL_CONTINUE)
2487 if ((perm >> bit_idx) & mask)
2492 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2498 if (emulator_bad_iopl(ctxt))
2499 if (!emulator_io_port_access_allowed(ctxt, port, len))
2502 ctxt->perm_ok = true;
2507 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2508 struct tss_segment_16 *tss)
2510 tss->ip = ctxt->_eip;
2511 tss->flag = ctxt->eflags;
2512 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2513 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2514 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2515 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2516 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2517 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2518 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2519 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2521 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2522 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2523 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2524 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2525 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2528 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2529 struct tss_segment_16 *tss)
2534 ctxt->_eip = tss->ip;
2535 ctxt->eflags = tss->flag | 2;
2536 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2537 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2538 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2539 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2540 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2541 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2542 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2543 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2546 * SDM says that segment selectors are loaded before segment
2549 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2550 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2551 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2552 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2553 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2558 * Now load segment descriptors. If fault happens at this stage
2559 * it is handled in a context of new task
2561 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2563 if (ret != X86EMUL_CONTINUE)
2565 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2567 if (ret != X86EMUL_CONTINUE)
2569 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2571 if (ret != X86EMUL_CONTINUE)
2573 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2575 if (ret != X86EMUL_CONTINUE)
2577 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2579 if (ret != X86EMUL_CONTINUE)
2582 return X86EMUL_CONTINUE;
2585 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2586 u16 tss_selector, u16 old_tss_sel,
2587 ulong old_tss_base, struct desc_struct *new_desc)
2589 const struct x86_emulate_ops *ops = ctxt->ops;
2590 struct tss_segment_16 tss_seg;
2592 u32 new_tss_base = get_desc_base(new_desc);
2594 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2596 if (ret != X86EMUL_CONTINUE)
2597 /* FIXME: need to provide precise fault address */
2600 save_state_to_tss16(ctxt, &tss_seg);
2602 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2604 if (ret != X86EMUL_CONTINUE)
2605 /* FIXME: need to provide precise fault address */
2608 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2610 if (ret != X86EMUL_CONTINUE)
2611 /* FIXME: need to provide precise fault address */
2614 if (old_tss_sel != 0xffff) {
2615 tss_seg.prev_task_link = old_tss_sel;
2617 ret = ops->write_std(ctxt, new_tss_base,
2618 &tss_seg.prev_task_link,
2619 sizeof tss_seg.prev_task_link,
2621 if (ret != X86EMUL_CONTINUE)
2622 /* FIXME: need to provide precise fault address */
2626 return load_state_from_tss16(ctxt, &tss_seg);
2629 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2630 struct tss_segment_32 *tss)
2632 /* CR3 and ldt selector are not saved intentionally */
2633 tss->eip = ctxt->_eip;
2634 tss->eflags = ctxt->eflags;
2635 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2636 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2637 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2638 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2639 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2640 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2641 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2642 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2644 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2645 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2646 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2647 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2648 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2649 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2652 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2653 struct tss_segment_32 *tss)
2658 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2659 return emulate_gp(ctxt, 0);
2660 ctxt->_eip = tss->eip;
2661 ctxt->eflags = tss->eflags | 2;
2663 /* General purpose registers */
2664 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2665 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2666 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2667 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2668 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2669 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2670 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2671 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2674 * SDM says that segment selectors are loaded before segment
2675 * descriptors. This is important because CPL checks will
2678 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2679 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2680 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2681 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2682 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2683 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2684 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2687 * If we're switching between Protected Mode and VM86, we need to make
2688 * sure to update the mode before loading the segment descriptors so
2689 * that the selectors are interpreted correctly.
2691 if (ctxt->eflags & X86_EFLAGS_VM) {
2692 ctxt->mode = X86EMUL_MODE_VM86;
2695 ctxt->mode = X86EMUL_MODE_PROT32;
2700 * Now load segment descriptors. If fault happenes at this stage
2701 * it is handled in a context of new task
2703 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2705 if (ret != X86EMUL_CONTINUE)
2707 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2709 if (ret != X86EMUL_CONTINUE)
2711 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2713 if (ret != X86EMUL_CONTINUE)
2715 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2717 if (ret != X86EMUL_CONTINUE)
2719 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2721 if (ret != X86EMUL_CONTINUE)
2723 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2725 if (ret != X86EMUL_CONTINUE)
2727 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2729 if (ret != X86EMUL_CONTINUE)
2732 return X86EMUL_CONTINUE;
2735 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2736 u16 tss_selector, u16 old_tss_sel,
2737 ulong old_tss_base, struct desc_struct *new_desc)
2739 const struct x86_emulate_ops *ops = ctxt->ops;
2740 struct tss_segment_32 tss_seg;
2742 u32 new_tss_base = get_desc_base(new_desc);
2743 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2744 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2746 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2748 if (ret != X86EMUL_CONTINUE)
2749 /* FIXME: need to provide precise fault address */
2752 save_state_to_tss32(ctxt, &tss_seg);
2754 /* Only GP registers and segment selectors are saved */
2755 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2756 ldt_sel_offset - eip_offset, &ctxt->exception);
2757 if (ret != X86EMUL_CONTINUE)
2758 /* FIXME: need to provide precise fault address */
2761 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2763 if (ret != X86EMUL_CONTINUE)
2764 /* FIXME: need to provide precise fault address */
2767 if (old_tss_sel != 0xffff) {
2768 tss_seg.prev_task_link = old_tss_sel;
2770 ret = ops->write_std(ctxt, new_tss_base,
2771 &tss_seg.prev_task_link,
2772 sizeof tss_seg.prev_task_link,
2774 if (ret != X86EMUL_CONTINUE)
2775 /* FIXME: need to provide precise fault address */
2779 return load_state_from_tss32(ctxt, &tss_seg);
2782 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2783 u16 tss_selector, int idt_index, int reason,
2784 bool has_error_code, u32 error_code)
2786 const struct x86_emulate_ops *ops = ctxt->ops;
2787 struct desc_struct curr_tss_desc, next_tss_desc;
2789 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2790 ulong old_tss_base =
2791 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2795 /* FIXME: old_tss_base == ~0 ? */
2797 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2798 if (ret != X86EMUL_CONTINUE)
2800 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2801 if (ret != X86EMUL_CONTINUE)
2804 /* FIXME: check that next_tss_desc is tss */
2807 * Check privileges. The three cases are task switch caused by...
2809 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2810 * 2. Exception/IRQ/iret: No check is performed
2811 * 3. jmp/call to TSS: Check against DPL of the TSS
2813 if (reason == TASK_SWITCH_GATE) {
2814 if (idt_index != -1) {
2815 /* Software interrupts */
2816 struct desc_struct task_gate_desc;
2819 ret = read_interrupt_descriptor(ctxt, idt_index,
2821 if (ret != X86EMUL_CONTINUE)
2824 dpl = task_gate_desc.dpl;
2825 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2826 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2828 } else if (reason != TASK_SWITCH_IRET) {
2829 int dpl = next_tss_desc.dpl;
2830 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2831 return emulate_gp(ctxt, tss_selector);
2835 desc_limit = desc_limit_scaled(&next_tss_desc);
2836 if (!next_tss_desc.p ||
2837 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2838 desc_limit < 0x2b)) {
2839 return emulate_ts(ctxt, tss_selector & 0xfffc);
2842 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2843 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2844 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2847 if (reason == TASK_SWITCH_IRET)
2848 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2850 /* set back link to prev task only if NT bit is set in eflags
2851 note that old_tss_sel is not used after this point */
2852 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2853 old_tss_sel = 0xffff;
2855 if (next_tss_desc.type & 8)
2856 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2857 old_tss_base, &next_tss_desc);
2859 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2860 old_tss_base, &next_tss_desc);
2861 if (ret != X86EMUL_CONTINUE)
2864 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2865 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2867 if (reason != TASK_SWITCH_IRET) {
2868 next_tss_desc.type |= (1 << 1); /* set busy flag */
2869 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2872 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2873 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2875 if (has_error_code) {
2876 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2877 ctxt->lock_prefix = 0;
2878 ctxt->src.val = (unsigned long) error_code;
2879 ret = em_push(ctxt);
2885 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2886 u16 tss_selector, int idt_index, int reason,
2887 bool has_error_code, u32 error_code)
2891 invalidate_registers(ctxt);
2892 ctxt->_eip = ctxt->eip;
2893 ctxt->dst.type = OP_NONE;
2895 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2896 has_error_code, error_code);
2898 if (rc == X86EMUL_CONTINUE) {
2899 ctxt->eip = ctxt->_eip;
2900 writeback_registers(ctxt);
2903 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2906 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2909 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2911 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2912 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2915 static int em_das(struct x86_emulate_ctxt *ctxt)
2918 bool af, cf, old_cf;
2920 cf = ctxt->eflags & X86_EFLAGS_CF;
2926 af = ctxt->eflags & X86_EFLAGS_AF;
2927 if ((al & 0x0f) > 9 || af) {
2929 cf = old_cf | (al >= 250);
2934 if (old_al > 0x99 || old_cf) {
2940 /* Set PF, ZF, SF */
2941 ctxt->src.type = OP_IMM;
2943 ctxt->src.bytes = 1;
2944 fastop(ctxt, em_or);
2945 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2947 ctxt->eflags |= X86_EFLAGS_CF;
2949 ctxt->eflags |= X86_EFLAGS_AF;
2950 return X86EMUL_CONTINUE;
2953 static int em_aam(struct x86_emulate_ctxt *ctxt)
2957 if (ctxt->src.val == 0)
2958 return emulate_de(ctxt);
2960 al = ctxt->dst.val & 0xff;
2961 ah = al / ctxt->src.val;
2962 al %= ctxt->src.val;
2964 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2966 /* Set PF, ZF, SF */
2967 ctxt->src.type = OP_IMM;
2969 ctxt->src.bytes = 1;
2970 fastop(ctxt, em_or);
2972 return X86EMUL_CONTINUE;
2975 static int em_aad(struct x86_emulate_ctxt *ctxt)
2977 u8 al = ctxt->dst.val & 0xff;
2978 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2980 al = (al + (ah * ctxt->src.val)) & 0xff;
2982 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2984 /* Set PF, ZF, SF */
2985 ctxt->src.type = OP_IMM;
2987 ctxt->src.bytes = 1;
2988 fastop(ctxt, em_or);
2990 return X86EMUL_CONTINUE;
2993 static int em_call(struct x86_emulate_ctxt *ctxt)
2996 long rel = ctxt->src.val;
2998 ctxt->src.val = (unsigned long)ctxt->_eip;
2999 rc = jmp_rel(ctxt, rel);
3000 if (rc != X86EMUL_CONTINUE)
3002 return em_push(ctxt);
3005 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3010 struct desc_struct old_desc, new_desc;
3011 const struct x86_emulate_ops *ops = ctxt->ops;
3012 int cpl = ctxt->ops->cpl(ctxt);
3014 old_eip = ctxt->_eip;
3015 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3017 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3018 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3020 if (rc != X86EMUL_CONTINUE)
3021 return X86EMUL_CONTINUE;
3023 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3024 if (rc != X86EMUL_CONTINUE)
3027 ctxt->src.val = old_cs;
3029 if (rc != X86EMUL_CONTINUE)
3032 ctxt->src.val = old_eip;
3034 /* If we failed, we tainted the memory, but the very least we should
3036 if (rc != X86EMUL_CONTINUE)
3040 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3045 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3050 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3051 if (rc != X86EMUL_CONTINUE)
3053 rc = assign_eip_near(ctxt, eip);
3054 if (rc != X86EMUL_CONTINUE)
3056 rsp_increment(ctxt, ctxt->src.val);
3057 return X86EMUL_CONTINUE;
3060 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3062 /* Write back the register source. */
3063 ctxt->src.val = ctxt->dst.val;
3064 write_register_operand(&ctxt->src);
3066 /* Write back the memory destination with implicit LOCK prefix. */
3067 ctxt->dst.val = ctxt->src.orig_val;
3068 ctxt->lock_prefix = 1;
3069 return X86EMUL_CONTINUE;
3072 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3074 ctxt->dst.val = ctxt->src2.val;
3075 return fastop(ctxt, em_imul);
3078 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3080 ctxt->dst.type = OP_REG;
3081 ctxt->dst.bytes = ctxt->src.bytes;
3082 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3083 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3085 return X86EMUL_CONTINUE;
3088 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3092 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3093 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3094 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3095 return X86EMUL_CONTINUE;
3098 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3102 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3103 return emulate_gp(ctxt, 0);
3104 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3105 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3106 return X86EMUL_CONTINUE;
3109 static int em_mov(struct x86_emulate_ctxt *ctxt)
3111 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3112 return X86EMUL_CONTINUE;
3115 #define FFL(x) bit(X86_FEATURE_##x)
3117 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3119 u32 ebx, ecx, edx, eax = 1;
3123 * Check MOVBE is set in the guest-visible CPUID leaf.
3125 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3126 if (!(ecx & FFL(MOVBE)))
3127 return emulate_ud(ctxt);
3129 switch (ctxt->op_bytes) {
3132 * From MOVBE definition: "...When the operand size is 16 bits,
3133 * the upper word of the destination register remains unchanged
3136 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3137 * rules so we have to do the operation almost per hand.
3139 tmp = (u16)ctxt->src.val;
3140 ctxt->dst.val &= ~0xffffUL;
3141 ctxt->dst.val |= (unsigned long)swab16(tmp);
3144 ctxt->dst.val = swab32((u32)ctxt->src.val);
3147 ctxt->dst.val = swab64(ctxt->src.val);
3152 return X86EMUL_CONTINUE;
3155 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3157 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3158 return emulate_gp(ctxt, 0);
3160 /* Disable writeback. */
3161 ctxt->dst.type = OP_NONE;
3162 return X86EMUL_CONTINUE;
3165 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3169 if (ctxt->mode == X86EMUL_MODE_PROT64)
3170 val = ctxt->src.val & ~0ULL;
3172 val = ctxt->src.val & ~0U;
3174 /* #UD condition is already handled. */
3175 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3176 return emulate_gp(ctxt, 0);
3178 /* Disable writeback. */
3179 ctxt->dst.type = OP_NONE;
3180 return X86EMUL_CONTINUE;
3183 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3187 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3188 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3189 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3190 return emulate_gp(ctxt, 0);
3192 return X86EMUL_CONTINUE;
3195 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3199 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3200 return emulate_gp(ctxt, 0);
3202 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3203 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3204 return X86EMUL_CONTINUE;
3207 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3209 if (ctxt->modrm_reg > VCPU_SREG_GS)
3210 return emulate_ud(ctxt);
3212 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3213 return X86EMUL_CONTINUE;
3216 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3218 u16 sel = ctxt->src.val;
3220 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3221 return emulate_ud(ctxt);
3223 if (ctxt->modrm_reg == VCPU_SREG_SS)
3224 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3226 /* Disable writeback. */
3227 ctxt->dst.type = OP_NONE;
3228 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3231 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3233 u16 sel = ctxt->src.val;
3235 /* Disable writeback. */
3236 ctxt->dst.type = OP_NONE;
3237 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3240 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3242 u16 sel = ctxt->src.val;
3244 /* Disable writeback. */
3245 ctxt->dst.type = OP_NONE;
3246 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3249 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3254 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3255 if (rc == X86EMUL_CONTINUE)
3256 ctxt->ops->invlpg(ctxt, linear);
3257 /* Disable writeback. */
3258 ctxt->dst.type = OP_NONE;
3259 return X86EMUL_CONTINUE;
3262 static int em_clts(struct x86_emulate_ctxt *ctxt)
3266 cr0 = ctxt->ops->get_cr(ctxt, 0);
3268 ctxt->ops->set_cr(ctxt, 0, cr0);
3269 return X86EMUL_CONTINUE;
3272 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3274 int rc = ctxt->ops->fix_hypercall(ctxt);
3276 if (rc != X86EMUL_CONTINUE)
3279 /* Let the processor re-execute the fixed hypercall */
3280 ctxt->_eip = ctxt->eip;
3281 /* Disable writeback. */
3282 ctxt->dst.type = OP_NONE;
3283 return X86EMUL_CONTINUE;
3286 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3287 void (*get)(struct x86_emulate_ctxt *ctxt,
3288 struct desc_ptr *ptr))
3290 struct desc_ptr desc_ptr;
3292 if (ctxt->mode == X86EMUL_MODE_PROT64)
3294 get(ctxt, &desc_ptr);
3295 if (ctxt->op_bytes == 2) {
3297 desc_ptr.address &= 0x00ffffff;
3299 /* Disable writeback. */
3300 ctxt->dst.type = OP_NONE;
3301 return segmented_write(ctxt, ctxt->dst.addr.mem,
3302 &desc_ptr, 2 + ctxt->op_bytes);
3305 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3307 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3310 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3312 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3315 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3317 struct desc_ptr desc_ptr;
3320 if (ctxt->mode == X86EMUL_MODE_PROT64)
3322 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3323 &desc_ptr.size, &desc_ptr.address,
3325 if (rc != X86EMUL_CONTINUE)
3327 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3328 /* Disable writeback. */
3329 ctxt->dst.type = OP_NONE;
3330 return X86EMUL_CONTINUE;
3333 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3337 rc = ctxt->ops->fix_hypercall(ctxt);
3339 /* Disable writeback. */
3340 ctxt->dst.type = OP_NONE;
3344 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3346 struct desc_ptr desc_ptr;
3349 if (ctxt->mode == X86EMUL_MODE_PROT64)
3351 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3352 &desc_ptr.size, &desc_ptr.address,
3354 if (rc != X86EMUL_CONTINUE)
3356 ctxt->ops->set_idt(ctxt, &desc_ptr);
3357 /* Disable writeback. */
3358 ctxt->dst.type = OP_NONE;
3359 return X86EMUL_CONTINUE;
3362 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3364 if (ctxt->dst.type == OP_MEM)
3365 ctxt->dst.bytes = 2;
3366 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3367 return X86EMUL_CONTINUE;
3370 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3372 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3373 | (ctxt->src.val & 0x0f));
3374 ctxt->dst.type = OP_NONE;
3375 return X86EMUL_CONTINUE;
3378 static int em_loop(struct x86_emulate_ctxt *ctxt)
3380 int rc = X86EMUL_CONTINUE;
3382 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3383 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3384 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3385 rc = jmp_rel(ctxt, ctxt->src.val);
3390 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3392 int rc = X86EMUL_CONTINUE;
3394 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3395 rc = jmp_rel(ctxt, ctxt->src.val);
3400 static int em_in(struct x86_emulate_ctxt *ctxt)
3402 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3404 return X86EMUL_IO_NEEDED;
3406 return X86EMUL_CONTINUE;
3409 static int em_out(struct x86_emulate_ctxt *ctxt)
3411 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3413 /* Disable writeback. */
3414 ctxt->dst.type = OP_NONE;
3415 return X86EMUL_CONTINUE;
3418 static int em_cli(struct x86_emulate_ctxt *ctxt)
3420 if (emulator_bad_iopl(ctxt))
3421 return emulate_gp(ctxt, 0);
3423 ctxt->eflags &= ~X86_EFLAGS_IF;
3424 return X86EMUL_CONTINUE;
3427 static int em_sti(struct x86_emulate_ctxt *ctxt)
3429 if (emulator_bad_iopl(ctxt))
3430 return emulate_gp(ctxt, 0);
3432 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3433 ctxt->eflags |= X86_EFLAGS_IF;
3434 return X86EMUL_CONTINUE;
3437 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3439 u32 eax, ebx, ecx, edx;
3441 eax = reg_read(ctxt, VCPU_REGS_RAX);
3442 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3443 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3444 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3445 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3446 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3447 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3448 return X86EMUL_CONTINUE;
3451 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3455 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3456 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3458 ctxt->eflags &= ~0xffUL;
3459 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3460 return X86EMUL_CONTINUE;
3463 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3465 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3466 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3467 return X86EMUL_CONTINUE;
3470 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3472 switch (ctxt->op_bytes) {
3473 #ifdef CONFIG_X86_64
3475 asm("bswap %0" : "+r"(ctxt->dst.val));
3479 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3482 return X86EMUL_CONTINUE;
3485 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3487 /* emulating clflush regardless of cpuid */
3488 return X86EMUL_CONTINUE;
3491 static bool valid_cr(int nr)
3503 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3505 if (!valid_cr(ctxt->modrm_reg))
3506 return emulate_ud(ctxt);
3508 return X86EMUL_CONTINUE;
3511 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3513 u64 new_val = ctxt->src.val64;
3514 int cr = ctxt->modrm_reg;
3517 static u64 cr_reserved_bits[] = {
3518 0xffffffff00000000ULL,
3519 0, 0, 0, /* CR3 checked later */
3526 return emulate_ud(ctxt);
3528 if (new_val & cr_reserved_bits[cr])
3529 return emulate_gp(ctxt, 0);
3534 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3535 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3536 return emulate_gp(ctxt, 0);
3538 cr4 = ctxt->ops->get_cr(ctxt, 4);
3539 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3541 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3542 !(cr4 & X86_CR4_PAE))
3543 return emulate_gp(ctxt, 0);
3550 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3551 if (efer & EFER_LMA)
3552 rsvd = CR3_L_MODE_RESERVED_BITS;
3555 return emulate_gp(ctxt, 0);
3560 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3562 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3563 return emulate_gp(ctxt, 0);
3569 return X86EMUL_CONTINUE;
3572 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3576 ctxt->ops->get_dr(ctxt, 7, &dr7);
3578 /* Check if DR7.Global_Enable is set */
3579 return dr7 & (1 << 13);
3582 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3584 int dr = ctxt->modrm_reg;
3588 return emulate_ud(ctxt);
3590 cr4 = ctxt->ops->get_cr(ctxt, 4);
3591 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3592 return emulate_ud(ctxt);
3594 if (check_dr7_gd(ctxt))
3595 return emulate_db(ctxt);
3597 return X86EMUL_CONTINUE;
3600 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3602 u64 new_val = ctxt->src.val64;
3603 int dr = ctxt->modrm_reg;
3605 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3606 return emulate_gp(ctxt, 0);
3608 return check_dr_read(ctxt);
3611 static int check_svme(struct x86_emulate_ctxt *ctxt)
3615 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3617 if (!(efer & EFER_SVME))
3618 return emulate_ud(ctxt);
3620 return X86EMUL_CONTINUE;
3623 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3625 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3627 /* Valid physical address? */
3628 if (rax & 0xffff000000000000ULL)
3629 return emulate_gp(ctxt, 0);
3631 return check_svme(ctxt);
3634 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3636 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3638 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3639 return emulate_ud(ctxt);
3641 return X86EMUL_CONTINUE;
3644 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3646 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3647 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3649 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3650 ctxt->ops->check_pmc(ctxt, rcx))
3651 return emulate_gp(ctxt, 0);
3653 return X86EMUL_CONTINUE;
3656 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3658 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3659 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3660 return emulate_gp(ctxt, 0);
3662 return X86EMUL_CONTINUE;
3665 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3667 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3668 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3669 return emulate_gp(ctxt, 0);
3671 return X86EMUL_CONTINUE;
3674 #define D(_y) { .flags = (_y) }
3675 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3676 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3677 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3678 #define N D(NotImpl)
3679 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3680 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3681 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3682 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3683 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3684 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3685 #define II(_f, _e, _i) \
3686 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3687 #define IIP(_f, _e, _i, _p) \
3688 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3689 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3690 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3692 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3693 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3694 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3695 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3696 #define I2bvIP(_f, _e, _i, _p) \
3697 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3699 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3700 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3701 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3703 static const struct opcode group7_rm0[] = {
3705 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3709 static const struct opcode group7_rm1[] = {
3710 DI(SrcNone | Priv, monitor),
3711 DI(SrcNone | Priv, mwait),
3715 static const struct opcode group7_rm3[] = {
3716 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3717 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3718 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3719 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3720 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3721 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3722 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3723 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3726 static const struct opcode group7_rm7[] = {
3728 DIP(SrcNone, rdtscp, check_rdtsc),
3732 static const struct opcode group1[] = {
3734 F(Lock | PageTable, em_or),
3737 F(Lock | PageTable, em_and),
3743 static const struct opcode group1A[] = {
3744 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3747 static const struct opcode group2[] = {
3748 F(DstMem | ModRM, em_rol),
3749 F(DstMem | ModRM, em_ror),
3750 F(DstMem | ModRM, em_rcl),
3751 F(DstMem | ModRM, em_rcr),
3752 F(DstMem | ModRM, em_shl),
3753 F(DstMem | ModRM, em_shr),
3754 F(DstMem | ModRM, em_shl),
3755 F(DstMem | ModRM, em_sar),
3758 static const struct opcode group3[] = {
3759 F(DstMem | SrcImm | NoWrite, em_test),
3760 F(DstMem | SrcImm | NoWrite, em_test),
3761 F(DstMem | SrcNone | Lock, em_not),
3762 F(DstMem | SrcNone | Lock, em_neg),
3763 F(DstXacc | Src2Mem, em_mul_ex),
3764 F(DstXacc | Src2Mem, em_imul_ex),
3765 F(DstXacc | Src2Mem, em_div_ex),
3766 F(DstXacc | Src2Mem, em_idiv_ex),
3769 static const struct opcode group4[] = {
3770 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3771 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3775 static const struct opcode group5[] = {
3776 F(DstMem | SrcNone | Lock, em_inc),
3777 F(DstMem | SrcNone | Lock, em_dec),
3778 I(SrcMem | NearBranch, em_call_near_abs),
3779 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3780 I(SrcMem | NearBranch, em_jmp_abs),
3781 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3782 I(SrcMem | Stack, em_push), D(Undefined),
3785 static const struct opcode group6[] = {
3788 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3789 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3793 static const struct group_dual group7 = { {
3794 II(Mov | DstMem, em_sgdt, sgdt),
3795 II(Mov | DstMem, em_sidt, sidt),
3796 II(SrcMem | Priv, em_lgdt, lgdt),
3797 II(SrcMem | Priv, em_lidt, lidt),
3798 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3799 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3800 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3804 N, EXT(0, group7_rm3),
3805 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3806 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3810 static const struct opcode group8[] = {
3812 F(DstMem | SrcImmByte | NoWrite, em_bt),
3813 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3814 F(DstMem | SrcImmByte | Lock, em_btr),
3815 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3818 static const struct group_dual group9 = { {
3819 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3821 N, N, N, N, N, N, N, N,
3824 static const struct opcode group11[] = {
3825 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3829 static const struct gprefix pfx_0f_ae_7 = {
3830 I(SrcMem | ByteOp, em_clflush), N, N, N,
3833 static const struct group_dual group15 = { {
3834 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3836 N, N, N, N, N, N, N, N,
3839 static const struct gprefix pfx_0f_6f_0f_7f = {
3840 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3843 static const struct gprefix pfx_0f_2b = {
3844 I(0, em_mov), I(0, em_mov), N, N,
3847 static const struct gprefix pfx_0f_28_0f_29 = {
3848 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3851 static const struct gprefix pfx_0f_e7 = {
3852 N, I(Sse, em_mov), N, N,
3855 static const struct escape escape_d9 = { {
3856 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3859 N, N, N, N, N, N, N, N,
3861 N, N, N, N, N, N, N, N,
3863 N, N, N, N, N, N, N, N,
3865 N, N, N, N, N, N, N, N,
3867 N, N, N, N, N, N, N, N,
3869 N, N, N, N, N, N, N, N,
3871 N, N, N, N, N, N, N, N,
3873 N, N, N, N, N, N, N, N,
3876 static const struct escape escape_db = { {
3877 N, N, N, N, N, N, N, N,
3880 N, N, N, N, N, N, N, N,
3882 N, N, N, N, N, N, N, N,
3884 N, N, N, N, N, N, N, N,
3886 N, N, N, N, N, N, N, N,
3888 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3890 N, N, N, N, N, N, N, N,
3892 N, N, N, N, N, N, N, N,
3894 N, N, N, N, N, N, N, N,
3897 static const struct escape escape_dd = { {
3898 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3901 N, N, N, N, N, N, N, N,
3903 N, N, N, N, N, N, N, N,
3905 N, N, N, N, N, N, N, N,
3907 N, N, N, N, N, N, N, N,
3909 N, N, N, N, N, N, N, N,
3911 N, N, N, N, N, N, N, N,
3913 N, N, N, N, N, N, N, N,
3915 N, N, N, N, N, N, N, N,
3918 static const struct opcode opcode_table[256] = {
3920 F6ALU(Lock, em_add),
3921 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3922 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3924 F6ALU(Lock | PageTable, em_or),
3925 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3928 F6ALU(Lock, em_adc),
3929 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3930 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3932 F6ALU(Lock, em_sbb),
3933 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3934 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3936 F6ALU(Lock | PageTable, em_and), N, N,
3938 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3940 F6ALU(Lock, em_xor), N, N,
3942 F6ALU(NoWrite, em_cmp), N, N,
3944 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3946 X8(I(SrcReg | Stack, em_push)),
3948 X8(I(DstReg | Stack, em_pop)),
3950 I(ImplicitOps | Stack | No64, em_pusha),
3951 I(ImplicitOps | Stack | No64, em_popa),
3952 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3955 I(SrcImm | Mov | Stack, em_push),
3956 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3957 I(SrcImmByte | Mov | Stack, em_push),
3958 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3959 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3960 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3962 X16(D(SrcImmByte | NearBranch)),
3964 G(ByteOp | DstMem | SrcImm, group1),
3965 G(DstMem | SrcImm, group1),
3966 G(ByteOp | DstMem | SrcImm | No64, group1),
3967 G(DstMem | SrcImmByte, group1),
3968 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3969 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3971 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3972 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3973 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3974 D(ModRM | SrcMem | NoAccess | DstReg),
3975 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3978 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3980 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3981 I(SrcImmFAddr | No64, em_call_far), N,
3982 II(ImplicitOps | Stack, em_pushf, pushf),
3983 II(ImplicitOps | Stack, em_popf, popf),
3984 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3986 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3987 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3988 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3989 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3991 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3992 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3993 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3994 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
3996 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3998 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4000 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4001 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4002 I(ImplicitOps | NearBranch, em_ret),
4003 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4004 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4005 G(ByteOp, group11), G(0, group11),
4007 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4008 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4009 I(ImplicitOps | Stack, em_ret_far),
4010 D(ImplicitOps), DI(SrcImmByte, intn),
4011 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4013 G(Src2One | ByteOp, group2), G(Src2One, group2),
4014 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4015 I(DstAcc | SrcImmUByte | No64, em_aam),
4016 I(DstAcc | SrcImmUByte | No64, em_aad),
4017 F(DstAcc | ByteOp | No64, em_salc),
4018 I(DstAcc | SrcXLat | ByteOp, em_mov),
4020 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4022 X3(I(SrcImmByte | NearBranch, em_loop)),
4023 I(SrcImmByte | NearBranch, em_jcxz),
4024 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4025 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4027 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4028 I(SrcImmFAddr | No64, em_jmp_far),
4029 D(SrcImmByte | ImplicitOps | NearBranch),
4030 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4031 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4033 N, DI(ImplicitOps, icebp), N, N,
4034 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4035 G(ByteOp, group3), G(0, group3),
4037 D(ImplicitOps), D(ImplicitOps),
4038 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4039 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4042 static const struct opcode twobyte_table[256] = {
4044 G(0, group6), GD(0, &group7), N, N,
4045 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4046 II(ImplicitOps | Priv, em_clts, clts), N,
4047 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4048 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4050 N, N, N, N, N, N, N, N,
4051 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4052 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4054 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4055 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4056 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4058 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4061 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4062 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4063 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4066 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4067 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4068 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4069 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4070 I(ImplicitOps | EmulateOnUD, em_sysenter),
4071 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4073 N, N, N, N, N, N, N, N,
4075 X16(D(DstReg | SrcMem | ModRM)),
4077 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4082 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4087 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4089 X16(D(SrcImm | NearBranch)),
4091 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4093 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4094 II(ImplicitOps, em_cpuid, cpuid),
4095 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4096 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4097 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4099 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4100 DI(ImplicitOps, rsm),
4101 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4102 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4103 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4104 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4106 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4107 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4108 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4109 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4110 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4111 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4115 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4116 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4117 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4119 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4120 N, D(DstMem | SrcReg | ModRM | Mov),
4121 N, N, N, GD(0, &group9),
4123 X8(I(DstReg, em_bswap)),
4125 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4127 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4128 N, N, N, N, N, N, N, N,
4130 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4133 static const struct gprefix three_byte_0f_38_f0 = {
4134 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4137 static const struct gprefix three_byte_0f_38_f1 = {
4138 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4142 * Insns below are selected by the prefix which indexed by the third opcode
4145 static const struct opcode opcode_map_0f_38[256] = {
4147 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4149 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4151 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4152 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4171 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4175 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4181 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4182 unsigned size, bool sign_extension)
4184 int rc = X86EMUL_CONTINUE;
4188 op->addr.mem.ea = ctxt->_eip;
4189 /* NB. Immediates are sign-extended as necessary. */
4190 switch (op->bytes) {
4192 op->val = insn_fetch(s8, ctxt);
4195 op->val = insn_fetch(s16, ctxt);
4198 op->val = insn_fetch(s32, ctxt);
4201 op->val = insn_fetch(s64, ctxt);
4204 if (!sign_extension) {
4205 switch (op->bytes) {
4213 op->val &= 0xffffffff;
4221 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4224 int rc = X86EMUL_CONTINUE;
4228 decode_register_operand(ctxt, op);
4231 rc = decode_imm(ctxt, op, 1, false);
4234 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4238 if (ctxt->d & BitOp)
4239 fetch_bit_operand(ctxt);
4240 op->orig_val = op->val;
4243 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4247 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4248 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4249 fetch_register_operand(op);
4250 op->orig_val = op->val;
4254 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4255 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4256 fetch_register_operand(op);
4257 op->orig_val = op->val;
4260 if (ctxt->d & ByteOp) {
4265 op->bytes = ctxt->op_bytes;
4266 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4267 fetch_register_operand(op);
4268 op->orig_val = op->val;
4272 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4274 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4275 op->addr.mem.seg = VCPU_SREG_ES;
4282 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4283 fetch_register_operand(op);
4287 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4290 rc = decode_imm(ctxt, op, 1, true);
4297 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4300 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4303 ctxt->memop.bytes = 1;
4304 if (ctxt->memop.type == OP_REG) {
4305 ctxt->memop.addr.reg = decode_register(ctxt,
4306 ctxt->modrm_rm, true);
4307 fetch_register_operand(&ctxt->memop);
4311 ctxt->memop.bytes = 2;
4314 ctxt->memop.bytes = 4;
4317 rc = decode_imm(ctxt, op, 2, false);
4320 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4324 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4326 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4327 op->addr.mem.seg = ctxt->seg_override;
4333 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4335 register_address(ctxt,
4336 reg_read(ctxt, VCPU_REGS_RBX) +
4337 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4338 op->addr.mem.seg = ctxt->seg_override;
4343 op->addr.mem.ea = ctxt->_eip;
4344 op->bytes = ctxt->op_bytes + 2;
4345 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4348 ctxt->memop.bytes = ctxt->op_bytes + 2;
4351 op->val = VCPU_SREG_ES;
4354 op->val = VCPU_SREG_CS;
4357 op->val = VCPU_SREG_SS;
4360 op->val = VCPU_SREG_DS;
4363 op->val = VCPU_SREG_FS;
4366 op->val = VCPU_SREG_GS;
4369 /* Special instructions do their own operand decoding. */
4371 op->type = OP_NONE; /* Disable writeback. */
4379 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4381 int rc = X86EMUL_CONTINUE;
4382 int mode = ctxt->mode;
4383 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4384 bool op_prefix = false;
4385 bool has_seg_override = false;
4386 struct opcode opcode;
4388 ctxt->memop.type = OP_NONE;
4389 ctxt->memopp = NULL;
4390 ctxt->_eip = ctxt->eip;
4391 ctxt->fetch.ptr = ctxt->fetch.data;
4392 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4393 ctxt->opcode_len = 1;
4395 memcpy(ctxt->fetch.data, insn, insn_len);
4397 rc = __do_insn_fetch_bytes(ctxt, 1);
4398 if (rc != X86EMUL_CONTINUE)
4403 case X86EMUL_MODE_REAL:
4404 case X86EMUL_MODE_VM86:
4405 case X86EMUL_MODE_PROT16:
4406 def_op_bytes = def_ad_bytes = 2;
4408 case X86EMUL_MODE_PROT32:
4409 def_op_bytes = def_ad_bytes = 4;
4411 #ifdef CONFIG_X86_64
4412 case X86EMUL_MODE_PROT64:
4418 return EMULATION_FAILED;
4421 ctxt->op_bytes = def_op_bytes;
4422 ctxt->ad_bytes = def_ad_bytes;
4424 /* Legacy prefixes. */
4426 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4427 case 0x66: /* operand-size override */
4429 /* switch between 2/4 bytes */
4430 ctxt->op_bytes = def_op_bytes ^ 6;
4432 case 0x67: /* address-size override */
4433 if (mode == X86EMUL_MODE_PROT64)
4434 /* switch between 4/8 bytes */
4435 ctxt->ad_bytes = def_ad_bytes ^ 12;
4437 /* switch between 2/4 bytes */
4438 ctxt->ad_bytes = def_ad_bytes ^ 6;
4440 case 0x26: /* ES override */
4441 case 0x2e: /* CS override */
4442 case 0x36: /* SS override */
4443 case 0x3e: /* DS override */
4444 has_seg_override = true;
4445 ctxt->seg_override = (ctxt->b >> 3) & 3;
4447 case 0x64: /* FS override */
4448 case 0x65: /* GS override */
4449 has_seg_override = true;
4450 ctxt->seg_override = ctxt->b & 7;
4452 case 0x40 ... 0x4f: /* REX */
4453 if (mode != X86EMUL_MODE_PROT64)
4455 ctxt->rex_prefix = ctxt->b;
4457 case 0xf0: /* LOCK */
4458 ctxt->lock_prefix = 1;
4460 case 0xf2: /* REPNE/REPNZ */
4461 case 0xf3: /* REP/REPE/REPZ */
4462 ctxt->rep_prefix = ctxt->b;
4468 /* Any legacy prefix after a REX prefix nullifies its effect. */
4470 ctxt->rex_prefix = 0;
4476 if (ctxt->rex_prefix & 8)
4477 ctxt->op_bytes = 8; /* REX.W */
4479 /* Opcode byte(s). */
4480 opcode = opcode_table[ctxt->b];
4481 /* Two-byte opcode? */
4482 if (ctxt->b == 0x0f) {
4483 ctxt->opcode_len = 2;
4484 ctxt->b = insn_fetch(u8, ctxt);
4485 opcode = twobyte_table[ctxt->b];
4487 /* 0F_38 opcode map */
4488 if (ctxt->b == 0x38) {
4489 ctxt->opcode_len = 3;
4490 ctxt->b = insn_fetch(u8, ctxt);
4491 opcode = opcode_map_0f_38[ctxt->b];
4494 ctxt->d = opcode.flags;
4496 if (ctxt->d & ModRM)
4497 ctxt->modrm = insn_fetch(u8, ctxt);
4499 /* vex-prefix instructions are not implemented */
4500 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4501 (mode == X86EMUL_MODE_PROT64 ||
4502 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4506 while (ctxt->d & GroupMask) {
4507 switch (ctxt->d & GroupMask) {
4509 goffset = (ctxt->modrm >> 3) & 7;
4510 opcode = opcode.u.group[goffset];
4513 goffset = (ctxt->modrm >> 3) & 7;
4514 if ((ctxt->modrm >> 6) == 3)
4515 opcode = opcode.u.gdual->mod3[goffset];
4517 opcode = opcode.u.gdual->mod012[goffset];
4520 goffset = ctxt->modrm & 7;
4521 opcode = opcode.u.group[goffset];
4524 if (ctxt->rep_prefix && op_prefix)
4525 return EMULATION_FAILED;
4526 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4527 switch (simd_prefix) {
4528 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4529 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4530 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4531 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4535 if (ctxt->modrm > 0xbf)
4536 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4538 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4541 return EMULATION_FAILED;
4544 ctxt->d &= ~(u64)GroupMask;
4545 ctxt->d |= opcode.flags;
4550 return EMULATION_FAILED;
4552 ctxt->execute = opcode.u.execute;
4554 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4555 return EMULATION_FAILED;
4557 if (unlikely(ctxt->d &
4558 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch))) {
4560 * These are copied unconditionally here, and checked unconditionally
4561 * in x86_emulate_insn.
4563 ctxt->check_perm = opcode.check_perm;
4564 ctxt->intercept = opcode.intercept;
4566 if (ctxt->d & NotImpl)
4567 return EMULATION_FAILED;
4569 if (mode == X86EMUL_MODE_PROT64) {
4570 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4572 else if (ctxt->d & NearBranch)
4576 if (ctxt->d & Op3264) {
4577 if (mode == X86EMUL_MODE_PROT64)
4584 ctxt->op_bytes = 16;
4585 else if (ctxt->d & Mmx)
4589 /* ModRM and SIB bytes. */
4590 if (ctxt->d & ModRM) {
4591 rc = decode_modrm(ctxt, &ctxt->memop);
4592 if (!has_seg_override) {
4593 has_seg_override = true;
4594 ctxt->seg_override = ctxt->modrm_seg;
4596 } else if (ctxt->d & MemAbs)
4597 rc = decode_abs(ctxt, &ctxt->memop);
4598 if (rc != X86EMUL_CONTINUE)
4601 if (!has_seg_override)
4602 ctxt->seg_override = VCPU_SREG_DS;
4604 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4607 * Decode and fetch the source operand: register, memory
4610 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4611 if (rc != X86EMUL_CONTINUE)
4615 * Decode and fetch the second source operand: register, memory
4618 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4619 if (rc != X86EMUL_CONTINUE)
4622 /* Decode and fetch the destination operand: register or memory. */
4623 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4625 if (ctxt->rip_relative)
4626 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4629 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4632 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4634 return ctxt->d & PageTable;
4637 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4639 /* The second termination condition only applies for REPE
4640 * and REPNE. Test if the repeat string operation prefix is
4641 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4642 * corresponding termination condition according to:
4643 * - if REPE/REPZ and ZF = 0 then done
4644 * - if REPNE/REPNZ and ZF = 1 then done
4646 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4647 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4648 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4649 ((ctxt->eflags & EFLG_ZF) == 0))
4650 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4651 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4657 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4661 ctxt->ops->get_fpu(ctxt);
4662 asm volatile("1: fwait \n\t"
4664 ".pushsection .fixup,\"ax\" \n\t"
4666 "movb $1, %[fault] \n\t"
4669 _ASM_EXTABLE(1b, 3b)
4670 : [fault]"+qm"(fault));
4671 ctxt->ops->put_fpu(ctxt);
4673 if (unlikely(fault))
4674 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4676 return X86EMUL_CONTINUE;
4679 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4682 if (op->type == OP_MM)
4683 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4686 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4688 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4689 if (!(ctxt->d & ByteOp))
4690 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4691 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4692 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4694 : "c"(ctxt->src2.val));
4695 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4696 if (!fop) /* exception is returned in fop variable */
4697 return emulate_de(ctxt);
4698 return X86EMUL_CONTINUE;
4701 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4703 memset(&ctxt->rip_relative, 0,
4704 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4706 ctxt->io_read.pos = 0;
4707 ctxt->io_read.end = 0;
4708 ctxt->mem_read.end = 0;
4711 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4713 const struct x86_emulate_ops *ops = ctxt->ops;
4714 int rc = X86EMUL_CONTINUE;
4715 int saved_dst_type = ctxt->dst.type;
4717 ctxt->mem_read.pos = 0;
4719 /* LOCK prefix is allowed only with some instructions */
4720 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4721 rc = emulate_ud(ctxt);
4725 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4726 rc = emulate_ud(ctxt);
4730 if (unlikely(ctxt->d &
4731 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4732 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4733 (ctxt->d & Undefined)) {
4734 rc = emulate_ud(ctxt);
4738 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4739 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4740 rc = emulate_ud(ctxt);
4744 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4745 rc = emulate_nm(ctxt);
4749 if (ctxt->d & Mmx) {
4750 rc = flush_pending_x87_faults(ctxt);
4751 if (rc != X86EMUL_CONTINUE)
4754 * Now that we know the fpu is exception safe, we can fetch
4757 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4758 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4759 if (!(ctxt->d & Mov))
4760 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4763 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4764 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4765 X86_ICPT_PRE_EXCEPT);
4766 if (rc != X86EMUL_CONTINUE)
4770 /* Privileged instruction can be executed only in CPL=0 */
4771 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4772 if (ctxt->d & PrivUD)
4773 rc = emulate_ud(ctxt);
4775 rc = emulate_gp(ctxt, 0);
4779 /* Instruction can only be executed in protected mode */
4780 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4781 rc = emulate_ud(ctxt);
4785 /* Do instruction specific permission checks */
4786 if (ctxt->d & CheckPerm) {
4787 rc = ctxt->check_perm(ctxt);
4788 if (rc != X86EMUL_CONTINUE)
4792 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4793 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4794 X86_ICPT_POST_EXCEPT);
4795 if (rc != X86EMUL_CONTINUE)
4799 if (ctxt->rep_prefix && (ctxt->d & String)) {
4800 /* All REP prefixes have the same first termination condition */
4801 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4802 ctxt->eip = ctxt->_eip;
4803 ctxt->eflags &= ~EFLG_RF;
4809 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4810 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4811 ctxt->src.valptr, ctxt->src.bytes);
4812 if (rc != X86EMUL_CONTINUE)
4814 ctxt->src.orig_val64 = ctxt->src.val64;
4817 if (ctxt->src2.type == OP_MEM) {
4818 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4819 &ctxt->src2.val, ctxt->src2.bytes);
4820 if (rc != X86EMUL_CONTINUE)
4824 if ((ctxt->d & DstMask) == ImplicitOps)
4828 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4829 /* optimisation - avoid slow emulated read if Mov */
4830 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4831 &ctxt->dst.val, ctxt->dst.bytes);
4832 if (rc != X86EMUL_CONTINUE)
4835 ctxt->dst.orig_val = ctxt->dst.val;
4839 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4840 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4841 X86_ICPT_POST_MEMACCESS);
4842 if (rc != X86EMUL_CONTINUE)
4846 if (ctxt->rep_prefix && (ctxt->d & String))
4847 ctxt->eflags |= EFLG_RF;
4849 ctxt->eflags &= ~EFLG_RF;
4851 if (ctxt->execute) {
4852 if (ctxt->d & Fastop) {
4853 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4854 rc = fastop(ctxt, fop);
4855 if (rc != X86EMUL_CONTINUE)
4859 rc = ctxt->execute(ctxt);
4860 if (rc != X86EMUL_CONTINUE)
4865 if (ctxt->opcode_len == 2)
4867 else if (ctxt->opcode_len == 3)
4868 goto threebyte_insn;
4871 case 0x63: /* movsxd */
4872 if (ctxt->mode != X86EMUL_MODE_PROT64)
4873 goto cannot_emulate;
4874 ctxt->dst.val = (s32) ctxt->src.val;
4876 case 0x70 ... 0x7f: /* jcc (short) */
4877 if (test_cc(ctxt->b, ctxt->eflags))
4878 rc = jmp_rel(ctxt, ctxt->src.val);
4880 case 0x8d: /* lea r16/r32, m */
4881 ctxt->dst.val = ctxt->src.addr.mem.ea;
4883 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4884 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4885 ctxt->dst.type = OP_NONE;
4889 case 0x98: /* cbw/cwde/cdqe */
4890 switch (ctxt->op_bytes) {
4891 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4892 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4893 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4896 case 0xcc: /* int3 */
4897 rc = emulate_int(ctxt, 3);
4899 case 0xcd: /* int n */
4900 rc = emulate_int(ctxt, ctxt->src.val);
4902 case 0xce: /* into */
4903 if (ctxt->eflags & EFLG_OF)
4904 rc = emulate_int(ctxt, 4);
4906 case 0xe9: /* jmp rel */
4907 case 0xeb: /* jmp rel short */
4908 rc = jmp_rel(ctxt, ctxt->src.val);
4909 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4911 case 0xf4: /* hlt */
4912 ctxt->ops->halt(ctxt);
4914 case 0xf5: /* cmc */
4915 /* complement carry flag from eflags reg */
4916 ctxt->eflags ^= EFLG_CF;
4918 case 0xf8: /* clc */
4919 ctxt->eflags &= ~EFLG_CF;
4921 case 0xf9: /* stc */
4922 ctxt->eflags |= EFLG_CF;
4924 case 0xfc: /* cld */
4925 ctxt->eflags &= ~EFLG_DF;
4927 case 0xfd: /* std */
4928 ctxt->eflags |= EFLG_DF;
4931 goto cannot_emulate;
4934 if (rc != X86EMUL_CONTINUE)
4938 if (ctxt->d & SrcWrite) {
4939 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4940 rc = writeback(ctxt, &ctxt->src);
4941 if (rc != X86EMUL_CONTINUE)
4944 if (!(ctxt->d & NoWrite)) {
4945 rc = writeback(ctxt, &ctxt->dst);
4946 if (rc != X86EMUL_CONTINUE)
4951 * restore dst type in case the decoding will be reused
4952 * (happens for string instruction )
4954 ctxt->dst.type = saved_dst_type;
4956 if ((ctxt->d & SrcMask) == SrcSI)
4957 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4959 if ((ctxt->d & DstMask) == DstDI)
4960 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4962 if (ctxt->rep_prefix && (ctxt->d & String)) {
4964 struct read_cache *r = &ctxt->io_read;
4965 if ((ctxt->d & SrcMask) == SrcSI)
4966 count = ctxt->src.count;
4968 count = ctxt->dst.count;
4969 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4972 if (!string_insn_completed(ctxt)) {
4974 * Re-enter guest when pio read ahead buffer is empty
4975 * or, if it is not used, after each 1024 iteration.
4977 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4978 (r->end == 0 || r->end != r->pos)) {
4980 * Reset read cache. Usually happens before
4981 * decode, but since instruction is restarted
4982 * we have to do it here.
4984 ctxt->mem_read.end = 0;
4985 writeback_registers(ctxt);
4986 return EMULATION_RESTART;
4988 goto done; /* skip rip writeback */
4990 ctxt->eflags &= ~EFLG_RF;
4993 ctxt->eip = ctxt->_eip;
4996 if (rc == X86EMUL_PROPAGATE_FAULT) {
4997 WARN_ON(ctxt->exception.vector > 0x1f);
4998 ctxt->have_exception = true;
5000 if (rc == X86EMUL_INTERCEPTED)
5001 return EMULATION_INTERCEPTED;
5003 if (rc == X86EMUL_CONTINUE)
5004 writeback_registers(ctxt);
5006 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5010 case 0x09: /* wbinvd */
5011 (ctxt->ops->wbinvd)(ctxt);
5013 case 0x08: /* invd */
5014 case 0x0d: /* GrpP (prefetch) */
5015 case 0x18: /* Grp16 (prefetch/nop) */
5016 case 0x1f: /* nop */
5018 case 0x20: /* mov cr, reg */
5019 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5021 case 0x21: /* mov from dr to reg */
5022 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5024 case 0x40 ... 0x4f: /* cmov */
5025 if (test_cc(ctxt->b, ctxt->eflags))
5026 ctxt->dst.val = ctxt->src.val;
5027 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5028 ctxt->op_bytes != 4)
5029 ctxt->dst.type = OP_NONE; /* no writeback */
5031 case 0x80 ... 0x8f: /* jnz rel, etc*/
5032 if (test_cc(ctxt->b, ctxt->eflags))
5033 rc = jmp_rel(ctxt, ctxt->src.val);
5035 case 0x90 ... 0x9f: /* setcc r/m8 */
5036 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5038 case 0xb6 ... 0xb7: /* movzx */
5039 ctxt->dst.bytes = ctxt->op_bytes;
5040 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5041 : (u16) ctxt->src.val;
5043 case 0xbe ... 0xbf: /* movsx */
5044 ctxt->dst.bytes = ctxt->op_bytes;
5045 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5046 (s16) ctxt->src.val;
5048 case 0xc3: /* movnti */
5049 ctxt->dst.bytes = ctxt->op_bytes;
5050 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5051 (u32) ctxt->src.val;
5054 goto cannot_emulate;
5059 if (rc != X86EMUL_CONTINUE)
5065 return EMULATION_FAILED;
5068 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5070 invalidate_registers(ctxt);
5073 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5075 writeback_registers(ctxt);