1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
170 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
172 #define X2(x...) x, x
173 #define X3(x...) X2(x), x
174 #define X4(x...) X2(x), X2(x)
175 #define X5(x...) X4(x), x
176 #define X6(x...) X4(x), X2(x)
177 #define X7(x...) X4(x), X3(x)
178 #define X8(x...) X4(x), X4(x)
179 #define X16(x...) X8(x), X8(x)
181 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
182 #define FASTOP_SIZE 8
185 * fastop functions have a special calling convention:
190 * flags: rflags (in/out)
191 * ex: rsi (in:fastop pointer, out:zero if exception)
193 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
194 * different operand sizes can be reached by calculation, rather than a jump
195 * table (which would be bigger than the code).
197 * fastop functions are declared as taking a never-defined fastop parameter,
198 * so they can't be called from C directly.
207 int (*execute)(struct x86_emulate_ctxt *ctxt);
208 const struct opcode *group;
209 const struct group_dual *gdual;
210 const struct gprefix *gprefix;
211 const struct escape *esc;
212 void (*fastop)(struct fastop *fake);
214 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
218 struct opcode mod012[8];
219 struct opcode mod3[8];
223 struct opcode pfx_no;
224 struct opcode pfx_66;
225 struct opcode pfx_f2;
226 struct opcode pfx_f3;
231 struct opcode high[64];
234 /* EFLAGS bit definitions. */
235 #define EFLG_ID (1<<21)
236 #define EFLG_VIP (1<<20)
237 #define EFLG_VIF (1<<19)
238 #define EFLG_AC (1<<18)
239 #define EFLG_VM (1<<17)
240 #define EFLG_RF (1<<16)
241 #define EFLG_IOPL (3<<12)
242 #define EFLG_NT (1<<14)
243 #define EFLG_OF (1<<11)
244 #define EFLG_DF (1<<10)
245 #define EFLG_IF (1<<9)
246 #define EFLG_TF (1<<8)
247 #define EFLG_SF (1<<7)
248 #define EFLG_ZF (1<<6)
249 #define EFLG_AF (1<<4)
250 #define EFLG_PF (1<<2)
251 #define EFLG_CF (1<<0)
253 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
254 #define EFLG_RESERVED_ONE_MASK 2
256 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
258 if (!(ctxt->regs_valid & (1 << nr))) {
259 ctxt->regs_valid |= 1 << nr;
260 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
262 return ctxt->_regs[nr];
265 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->regs_dirty |= 1 << nr;
269 return &ctxt->_regs[nr];
272 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 return reg_write(ctxt, nr);
278 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
282 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
283 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
286 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
288 ctxt->regs_dirty = 0;
289 ctxt->regs_valid = 0;
293 * These EFLAGS bits are restored from saved value during emulation, and
294 * any changes are written back to the saved value after emulation.
296 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
304 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
306 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
307 #define FOP_RET "ret \n\t"
309 #define FOP_START(op) \
310 extern void em_##op(struct fastop *fake); \
311 asm(".pushsection .text, \"ax\" \n\t" \
312 ".global em_" #op " \n\t" \
319 #define FOPNOP() FOP_ALIGN FOP_RET
321 #define FOP1E(op, dst) \
322 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
324 #define FOP1EEX(op, dst) \
325 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
327 #define FASTOP1(op) \
332 ON64(FOP1E(op##q, rax)) \
335 /* 1-operand, using src2 (for MUL/DIV r/m) */
336 #define FASTOP1SRC2(op, name) \
341 ON64(FOP1E(op, rcx)) \
344 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
345 #define FASTOP1SRC2EX(op, name) \
350 ON64(FOP1EEX(op, rcx)) \
353 #define FOP2E(op, dst, src) \
354 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
356 #define FASTOP2(op) \
358 FOP2E(op##b, al, dl) \
359 FOP2E(op##w, ax, dx) \
360 FOP2E(op##l, eax, edx) \
361 ON64(FOP2E(op##q, rax, rdx)) \
364 /* 2 operand, word only */
365 #define FASTOP2W(op) \
368 FOP2E(op##w, ax, dx) \
369 FOP2E(op##l, eax, edx) \
370 ON64(FOP2E(op##q, rax, rdx)) \
373 /* 2 operand, src is CL */
374 #define FASTOP2CL(op) \
376 FOP2E(op##b, al, cl) \
377 FOP2E(op##w, ax, cl) \
378 FOP2E(op##l, eax, cl) \
379 ON64(FOP2E(op##q, rax, cl)) \
382 #define FOP3E(op, dst, src, src2) \
383 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
385 /* 3-operand, word-only, src2=cl */
386 #define FASTOP3WCL(op) \
389 FOP3E(op##w, ax, dx, cl) \
390 FOP3E(op##l, eax, edx, cl) \
391 ON64(FOP3E(op##q, rax, rdx, cl)) \
394 /* Special case for SETcc - 1 instruction per cc */
395 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
397 asm(".global kvm_fastop_exception \n"
398 "kvm_fastop_exception: xor %esi, %esi; ret");
419 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
422 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
423 enum x86_intercept intercept,
424 enum x86_intercept_stage stage)
426 struct x86_instruction_info info = {
427 .intercept = intercept,
428 .rep_prefix = ctxt->rep_prefix,
429 .modrm_mod = ctxt->modrm_mod,
430 .modrm_reg = ctxt->modrm_reg,
431 .modrm_rm = ctxt->modrm_rm,
432 .src_val = ctxt->src.val64,
433 .dst_val = ctxt->dst.val64,
434 .src_bytes = ctxt->src.bytes,
435 .dst_bytes = ctxt->dst.bytes,
436 .ad_bytes = ctxt->ad_bytes,
437 .next_rip = ctxt->eip,
440 return ctxt->ops->intercept(ctxt, &info, stage);
443 static void assign_masked(ulong *dest, ulong src, ulong mask)
445 *dest = (*dest & ~mask) | (src & mask);
448 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
450 return (1UL << (ctxt->ad_bytes << 3)) - 1;
453 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
456 struct desc_struct ss;
458 if (ctxt->mode == X86EMUL_MODE_PROT64)
460 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
461 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
464 static int stack_size(struct x86_emulate_ctxt *ctxt)
466 return (__fls(stack_mask(ctxt)) + 1) >> 3;
469 /* Access/update address held in a register, based on addressing mode. */
470 static inline unsigned long
471 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
473 if (ctxt->ad_bytes == sizeof(unsigned long))
476 return reg & ad_mask(ctxt);
479 static inline unsigned long
480 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
482 return address_mask(ctxt, reg);
485 static void masked_increment(ulong *reg, ulong mask, int inc)
487 assign_masked(reg, *reg + inc, mask);
491 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
495 if (ctxt->ad_bytes == sizeof(unsigned long))
498 mask = ad_mask(ctxt);
499 masked_increment(reg, mask, inc);
502 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
507 static u32 desc_limit_scaled(struct desc_struct *desc)
509 u32 limit = get_desc_limit(desc);
511 return desc->g ? (limit << 12) | 0xfff : limit;
514 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
516 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
519 return ctxt->ops->get_cached_segment_base(ctxt, seg);
522 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
523 u32 error, bool valid)
526 ctxt->exception.vector = vec;
527 ctxt->exception.error_code = error;
528 ctxt->exception.error_code_valid = valid;
529 return X86EMUL_PROPAGATE_FAULT;
532 static int emulate_db(struct x86_emulate_ctxt *ctxt)
534 return emulate_exception(ctxt, DB_VECTOR, 0, false);
537 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
539 return emulate_exception(ctxt, GP_VECTOR, err, true);
542 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
544 return emulate_exception(ctxt, SS_VECTOR, err, true);
547 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
549 return emulate_exception(ctxt, UD_VECTOR, 0, false);
552 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
554 return emulate_exception(ctxt, TS_VECTOR, err, true);
557 static int emulate_de(struct x86_emulate_ctxt *ctxt)
559 return emulate_exception(ctxt, DE_VECTOR, 0, false);
562 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
564 return emulate_exception(ctxt, NM_VECTOR, 0, false);
567 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
570 switch (ctxt->op_bytes) {
572 ctxt->_eip = (u16)dst;
575 ctxt->_eip = (u32)dst;
578 if ((cs_l && is_noncanonical_address(dst)) ||
579 (!cs_l && (dst & ~(u32)-1)))
580 return emulate_gp(ctxt, 0);
584 WARN(1, "unsupported eip assignment size\n");
586 return X86EMUL_CONTINUE;
589 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
591 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
594 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
596 return assign_eip_near(ctxt, ctxt->_eip + rel);
599 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
602 struct desc_struct desc;
604 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
608 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
613 struct desc_struct desc;
615 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
616 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
620 * x86 defines three classes of vector instructions: explicitly
621 * aligned, explicitly unaligned, and the rest, which change behaviour
622 * depending on whether they're AVX encoded or not.
624 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
625 * subject to the same check.
627 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
629 if (likely(size < 16))
632 if (ctxt->d & Aligned)
634 else if (ctxt->d & Unaligned)
636 else if (ctxt->d & Avx)
642 static int __linearize(struct x86_emulate_ctxt *ctxt,
643 struct segmented_address addr,
644 unsigned *max_size, unsigned size,
645 bool write, bool fetch,
648 struct desc_struct desc;
655 la = seg_base(ctxt, addr.seg) + addr.ea;
657 switch (ctxt->mode) {
658 case X86EMUL_MODE_PROT64:
659 if (((signed long)la << 16) >> 16 != la)
660 return emulate_gp(ctxt, 0);
662 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
663 if (size > *max_size)
667 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
671 /* code segment in protected mode or read-only data segment */
672 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
673 || !(desc.type & 2)) && write)
675 /* unreadable code segment */
676 if (!fetch && (desc.type & 8) && !(desc.type & 2))
678 lim = desc_limit_scaled(&desc);
679 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
680 (ctxt->d & NoBigReal)) {
681 /* la is between zero and 0xffff */
684 *max_size = 0x10000 - la;
685 } else if ((desc.type & 8) || !(desc.type & 4)) {
686 /* expand-up segment */
689 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
691 /* expand-down segment */
694 lim = desc.d ? 0xffffffff : 0xffff;
697 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
699 if (size > *max_size)
701 cpl = ctxt->ops->cpl(ctxt);
702 if (!(desc.type & 8)) {
706 } else if ((desc.type & 8) && !(desc.type & 4)) {
707 /* nonconforming code segment */
710 } else if ((desc.type & 8) && (desc.type & 4)) {
711 /* conforming code segment */
717 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
719 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
720 return emulate_gp(ctxt, 0);
722 return X86EMUL_CONTINUE;
724 if (addr.seg == VCPU_SREG_SS)
725 return emulate_ss(ctxt, 0);
727 return emulate_gp(ctxt, 0);
730 static int linearize(struct x86_emulate_ctxt *ctxt,
731 struct segmented_address addr,
732 unsigned size, bool write,
736 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
740 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
741 struct segmented_address addr,
748 rc = linearize(ctxt, addr, size, false, &linear);
749 if (rc != X86EMUL_CONTINUE)
751 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
755 * Prefetch the remaining bytes of the instruction without crossing page
756 * boundary if they are not in fetch_cache yet.
758 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
761 unsigned size, max_size;
762 unsigned long linear;
763 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
764 struct segmented_address addr = { .seg = VCPU_SREG_CS,
765 .ea = ctxt->eip + cur_size };
768 * We do not know exactly how many bytes will be needed, and
769 * __linearize is expensive, so fetch as much as possible. We
770 * just have to avoid going beyond the 15 byte limit, the end
771 * of the segment, or the end of the page.
773 * __linearize is called with size 0 so that it does not do any
774 * boundary check itself. Instead, we use max_size to check
777 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
778 if (unlikely(rc != X86EMUL_CONTINUE))
781 size = min_t(unsigned, 15UL ^ cur_size, max_size);
782 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
785 * One instruction can only straddle two pages,
786 * and one has been loaded at the beginning of
787 * x86_decode_insn. So, if not enough bytes
788 * still, we must have hit the 15-byte boundary.
790 if (unlikely(size < op_size))
791 return emulate_gp(ctxt, 0);
793 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
794 size, &ctxt->exception);
795 if (unlikely(rc != X86EMUL_CONTINUE))
797 ctxt->fetch.end += size;
798 return X86EMUL_CONTINUE;
801 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
804 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
806 if (unlikely(done_size < size))
807 return __do_insn_fetch_bytes(ctxt, size - done_size);
809 return X86EMUL_CONTINUE;
812 /* Fetch next part of the instruction being emulated. */
813 #define insn_fetch(_type, _ctxt) \
816 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
817 if (rc != X86EMUL_CONTINUE) \
819 ctxt->_eip += sizeof(_type); \
820 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
821 ctxt->fetch.ptr += sizeof(_type); \
825 #define insn_fetch_arr(_arr, _size, _ctxt) \
827 rc = do_insn_fetch_bytes(_ctxt, _size); \
828 if (rc != X86EMUL_CONTINUE) \
830 ctxt->_eip += (_size); \
831 memcpy(_arr, ctxt->fetch.ptr, _size); \
832 ctxt->fetch.ptr += (_size); \
836 * Given the 'reg' portion of a ModRM byte, and a register block, return a
837 * pointer into the block that addresses the relevant register.
838 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
840 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
844 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
846 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
847 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
849 p = reg_rmw(ctxt, modrm_reg);
853 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
854 struct segmented_address addr,
855 u16 *size, unsigned long *address, int op_bytes)
862 rc = segmented_read_std(ctxt, addr, size, 2);
863 if (rc != X86EMUL_CONTINUE)
866 rc = segmented_read_std(ctxt, addr, address, op_bytes);
880 FASTOP1SRC2(mul, mul_ex);
881 FASTOP1SRC2(imul, imul_ex);
882 FASTOP1SRC2EX(div, div_ex);
883 FASTOP1SRC2EX(idiv, idiv_ex);
912 static u8 test_cc(unsigned int condition, unsigned long flags)
915 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
917 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
918 asm("push %[flags]; popf; call *%[fastop]"
919 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
923 static void fetch_register_operand(struct operand *op)
927 op->val = *(u8 *)op->addr.reg;
930 op->val = *(u16 *)op->addr.reg;
933 op->val = *(u32 *)op->addr.reg;
936 op->val = *(u64 *)op->addr.reg;
941 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
943 ctxt->ops->get_fpu(ctxt);
945 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
946 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
947 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
948 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
949 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
950 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
951 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
952 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
954 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
955 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
956 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
957 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
958 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
959 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
960 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
961 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
965 ctxt->ops->put_fpu(ctxt);
968 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
971 ctxt->ops->get_fpu(ctxt);
973 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
974 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
975 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
976 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
977 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
978 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
979 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
980 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
982 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
983 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
984 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
985 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
986 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
987 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
988 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
989 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
993 ctxt->ops->put_fpu(ctxt);
996 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
998 ctxt->ops->get_fpu(ctxt);
1000 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1001 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1002 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1003 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1004 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1005 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1006 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1007 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1010 ctxt->ops->put_fpu(ctxt);
1013 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1015 ctxt->ops->get_fpu(ctxt);
1017 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1018 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1019 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1020 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1021 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1022 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1023 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1024 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1027 ctxt->ops->put_fpu(ctxt);
1030 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1032 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1033 return emulate_nm(ctxt);
1035 ctxt->ops->get_fpu(ctxt);
1036 asm volatile("fninit");
1037 ctxt->ops->put_fpu(ctxt);
1038 return X86EMUL_CONTINUE;
1041 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1045 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1046 return emulate_nm(ctxt);
1048 ctxt->ops->get_fpu(ctxt);
1049 asm volatile("fnstcw %0": "+m"(fcw));
1050 ctxt->ops->put_fpu(ctxt);
1052 /* force 2 byte destination */
1053 ctxt->dst.bytes = 2;
1054 ctxt->dst.val = fcw;
1056 return X86EMUL_CONTINUE;
1059 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1063 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1064 return emulate_nm(ctxt);
1066 ctxt->ops->get_fpu(ctxt);
1067 asm volatile("fnstsw %0": "+m"(fsw));
1068 ctxt->ops->put_fpu(ctxt);
1070 /* force 2 byte destination */
1071 ctxt->dst.bytes = 2;
1072 ctxt->dst.val = fsw;
1074 return X86EMUL_CONTINUE;
1077 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1080 unsigned reg = ctxt->modrm_reg;
1082 if (!(ctxt->d & ModRM))
1083 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1085 if (ctxt->d & Sse) {
1089 read_sse_reg(ctxt, &op->vec_val, reg);
1092 if (ctxt->d & Mmx) {
1101 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1102 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1104 fetch_register_operand(op);
1105 op->orig_val = op->val;
1108 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1110 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1111 ctxt->modrm_seg = VCPU_SREG_SS;
1114 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1118 int index_reg, base_reg, scale;
1119 int rc = X86EMUL_CONTINUE;
1122 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1123 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1124 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1126 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1127 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1128 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1129 ctxt->modrm_seg = VCPU_SREG_DS;
1131 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1133 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1134 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1136 if (ctxt->d & Sse) {
1139 op->addr.xmm = ctxt->modrm_rm;
1140 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1143 if (ctxt->d & Mmx) {
1146 op->addr.mm = ctxt->modrm_rm & 7;
1149 fetch_register_operand(op);
1155 if (ctxt->ad_bytes == 2) {
1156 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1157 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1158 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1159 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1161 /* 16-bit ModR/M decode. */
1162 switch (ctxt->modrm_mod) {
1164 if (ctxt->modrm_rm == 6)
1165 modrm_ea += insn_fetch(u16, ctxt);
1168 modrm_ea += insn_fetch(s8, ctxt);
1171 modrm_ea += insn_fetch(u16, ctxt);
1174 switch (ctxt->modrm_rm) {
1176 modrm_ea += bx + si;
1179 modrm_ea += bx + di;
1182 modrm_ea += bp + si;
1185 modrm_ea += bp + di;
1194 if (ctxt->modrm_mod != 0)
1201 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1202 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1203 ctxt->modrm_seg = VCPU_SREG_SS;
1204 modrm_ea = (u16)modrm_ea;
1206 /* 32/64-bit ModR/M decode. */
1207 if ((ctxt->modrm_rm & 7) == 4) {
1208 sib = insn_fetch(u8, ctxt);
1209 index_reg |= (sib >> 3) & 7;
1210 base_reg |= sib & 7;
1213 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1214 modrm_ea += insn_fetch(s32, ctxt);
1216 modrm_ea += reg_read(ctxt, base_reg);
1217 adjust_modrm_seg(ctxt, base_reg);
1220 modrm_ea += reg_read(ctxt, index_reg) << scale;
1221 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1222 if (ctxt->mode == X86EMUL_MODE_PROT64)
1223 ctxt->rip_relative = 1;
1225 base_reg = ctxt->modrm_rm;
1226 modrm_ea += reg_read(ctxt, base_reg);
1227 adjust_modrm_seg(ctxt, base_reg);
1229 switch (ctxt->modrm_mod) {
1231 if (ctxt->modrm_rm == 5)
1232 modrm_ea += insn_fetch(s32, ctxt);
1235 modrm_ea += insn_fetch(s8, ctxt);
1238 modrm_ea += insn_fetch(s32, ctxt);
1242 op->addr.mem.ea = modrm_ea;
1243 if (ctxt->ad_bytes != 8)
1244 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1250 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1253 int rc = X86EMUL_CONTINUE;
1256 switch (ctxt->ad_bytes) {
1258 op->addr.mem.ea = insn_fetch(u16, ctxt);
1261 op->addr.mem.ea = insn_fetch(u32, ctxt);
1264 op->addr.mem.ea = insn_fetch(u64, ctxt);
1271 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1275 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1276 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1278 if (ctxt->src.bytes == 2)
1279 sv = (s16)ctxt->src.val & (s16)mask;
1280 else if (ctxt->src.bytes == 4)
1281 sv = (s32)ctxt->src.val & (s32)mask;
1283 sv = (s64)ctxt->src.val & (s64)mask;
1285 ctxt->dst.addr.mem.ea += (sv >> 3);
1288 /* only subword offset */
1289 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1292 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1293 unsigned long addr, void *dest, unsigned size)
1296 struct read_cache *mc = &ctxt->mem_read;
1298 if (mc->pos < mc->end)
1301 WARN_ON((mc->end + size) >= sizeof(mc->data));
1303 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1305 if (rc != X86EMUL_CONTINUE)
1311 memcpy(dest, mc->data + mc->pos, size);
1313 return X86EMUL_CONTINUE;
1316 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1317 struct segmented_address addr,
1324 rc = linearize(ctxt, addr, size, false, &linear);
1325 if (rc != X86EMUL_CONTINUE)
1327 return read_emulated(ctxt, linear, data, size);
1330 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1331 struct segmented_address addr,
1338 rc = linearize(ctxt, addr, size, true, &linear);
1339 if (rc != X86EMUL_CONTINUE)
1341 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1345 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1346 struct segmented_address addr,
1347 const void *orig_data, const void *data,
1353 rc = linearize(ctxt, addr, size, true, &linear);
1354 if (rc != X86EMUL_CONTINUE)
1356 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1357 size, &ctxt->exception);
1360 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1361 unsigned int size, unsigned short port,
1364 struct read_cache *rc = &ctxt->io_read;
1366 if (rc->pos == rc->end) { /* refill pio read ahead */
1367 unsigned int in_page, n;
1368 unsigned int count = ctxt->rep_prefix ?
1369 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1370 in_page = (ctxt->eflags & EFLG_DF) ?
1371 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1372 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1373 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1376 rc->pos = rc->end = 0;
1377 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1382 if (ctxt->rep_prefix && (ctxt->d & String) &&
1383 !(ctxt->eflags & EFLG_DF)) {
1384 ctxt->dst.data = rc->data + rc->pos;
1385 ctxt->dst.type = OP_MEM_STR;
1386 ctxt->dst.count = (rc->end - rc->pos) / size;
1389 memcpy(dest, rc->data + rc->pos, size);
1395 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1396 u16 index, struct desc_struct *desc)
1401 ctxt->ops->get_idt(ctxt, &dt);
1403 if (dt.size < index * 8 + 7)
1404 return emulate_gp(ctxt, index << 3 | 0x2);
1406 addr = dt.address + index * 8;
1407 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1411 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1412 u16 selector, struct desc_ptr *dt)
1414 const struct x86_emulate_ops *ops = ctxt->ops;
1417 if (selector & 1 << 2) {
1418 struct desc_struct desc;
1421 memset (dt, 0, sizeof *dt);
1422 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1426 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1427 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1429 ops->get_gdt(ctxt, dt);
1432 /* allowed just for 8 bytes segments */
1433 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1434 u16 selector, struct desc_struct *desc,
1438 u16 index = selector >> 3;
1441 get_descriptor_table_ptr(ctxt, selector, &dt);
1443 if (dt.size < index * 8 + 7)
1444 return emulate_gp(ctxt, selector & 0xfffc);
1446 *desc_addr_p = addr = dt.address + index * 8;
1447 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1451 /* allowed just for 8 bytes segments */
1452 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1453 u16 selector, struct desc_struct *desc)
1456 u16 index = selector >> 3;
1459 get_descriptor_table_ptr(ctxt, selector, &dt);
1461 if (dt.size < index * 8 + 7)
1462 return emulate_gp(ctxt, selector & 0xfffc);
1464 addr = dt.address + index * 8;
1465 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1469 /* Does not support long mode */
1470 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1471 u16 selector, int seg, u8 cpl,
1472 bool in_task_switch,
1473 struct desc_struct *desc)
1475 struct desc_struct seg_desc, old_desc;
1477 unsigned err_vec = GP_VECTOR;
1479 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1485 memset(&seg_desc, 0, sizeof seg_desc);
1487 if (ctxt->mode == X86EMUL_MODE_REAL) {
1488 /* set real mode segment descriptor (keep limit etc. for
1490 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1491 set_desc_base(&seg_desc, selector << 4);
1493 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1494 /* VM86 needs a clean new segment descriptor */
1495 set_desc_base(&seg_desc, selector << 4);
1496 set_desc_limit(&seg_desc, 0xffff);
1506 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1507 if ((seg == VCPU_SREG_CS
1508 || (seg == VCPU_SREG_SS
1509 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1510 || seg == VCPU_SREG_TR)
1514 /* TR should be in GDT only */
1515 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1518 if (null_selector) /* for NULL selector skip all following checks */
1521 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1522 if (ret != X86EMUL_CONTINUE)
1525 err_code = selector & 0xfffc;
1526 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1528 /* can't load system descriptor into segment selector */
1529 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1533 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1542 * segment is not a writable data segment or segment
1543 * selector's RPL != CPL or segment selector's RPL != CPL
1545 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1549 if (!(seg_desc.type & 8))
1552 if (seg_desc.type & 4) {
1558 if (rpl > cpl || dpl != cpl)
1561 /* in long-mode d/b must be clear if l is set */
1562 if (seg_desc.d && seg_desc.l) {
1565 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1566 if (efer & EFER_LMA)
1570 /* CS(RPL) <- CPL */
1571 selector = (selector & 0xfffc) | cpl;
1574 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1576 old_desc = seg_desc;
1577 seg_desc.type |= 2; /* busy */
1578 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1579 sizeof(seg_desc), &ctxt->exception);
1580 if (ret != X86EMUL_CONTINUE)
1583 case VCPU_SREG_LDTR:
1584 if (seg_desc.s || seg_desc.type != 2)
1587 default: /* DS, ES, FS, or GS */
1589 * segment is not a data or readable code segment or
1590 * ((segment is a data or nonconforming code segment)
1591 * and (both RPL and CPL > DPL))
1593 if ((seg_desc.type & 0xa) == 0x8 ||
1594 (((seg_desc.type & 0xc) != 0xc) &&
1595 (rpl > dpl && cpl > dpl)))
1601 /* mark segment as accessed */
1603 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1604 if (ret != X86EMUL_CONTINUE)
1606 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1607 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1608 sizeof(base3), &ctxt->exception);
1609 if (ret != X86EMUL_CONTINUE)
1613 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1616 return X86EMUL_CONTINUE;
1618 return emulate_exception(ctxt, err_vec, err_code, true);
1621 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1622 u16 selector, int seg)
1624 u8 cpl = ctxt->ops->cpl(ctxt);
1625 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1628 static void write_register_operand(struct operand *op)
1630 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1631 switch (op->bytes) {
1633 *(u8 *)op->addr.reg = (u8)op->val;
1636 *(u16 *)op->addr.reg = (u16)op->val;
1639 *op->addr.reg = (u32)op->val;
1640 break; /* 64b: zero-extend */
1642 *op->addr.reg = op->val;
1647 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1651 write_register_operand(op);
1654 if (ctxt->lock_prefix)
1655 return segmented_cmpxchg(ctxt,
1661 return segmented_write(ctxt,
1667 return segmented_write(ctxt,
1670 op->bytes * op->count);
1673 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1676 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1684 return X86EMUL_CONTINUE;
1687 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1689 struct segmented_address addr;
1691 rsp_increment(ctxt, -bytes);
1692 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1693 addr.seg = VCPU_SREG_SS;
1695 return segmented_write(ctxt, addr, data, bytes);
1698 static int em_push(struct x86_emulate_ctxt *ctxt)
1700 /* Disable writeback. */
1701 ctxt->dst.type = OP_NONE;
1702 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1705 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1706 void *dest, int len)
1709 struct segmented_address addr;
1711 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1712 addr.seg = VCPU_SREG_SS;
1713 rc = segmented_read(ctxt, addr, dest, len);
1714 if (rc != X86EMUL_CONTINUE)
1717 rsp_increment(ctxt, len);
1721 static int em_pop(struct x86_emulate_ctxt *ctxt)
1723 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1726 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1727 void *dest, int len)
1730 unsigned long val, change_mask;
1731 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1732 int cpl = ctxt->ops->cpl(ctxt);
1734 rc = emulate_pop(ctxt, &val, len);
1735 if (rc != X86EMUL_CONTINUE)
1738 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1739 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1741 switch(ctxt->mode) {
1742 case X86EMUL_MODE_PROT64:
1743 case X86EMUL_MODE_PROT32:
1744 case X86EMUL_MODE_PROT16:
1746 change_mask |= EFLG_IOPL;
1748 change_mask |= EFLG_IF;
1750 case X86EMUL_MODE_VM86:
1752 return emulate_gp(ctxt, 0);
1753 change_mask |= EFLG_IF;
1755 default: /* real mode */
1756 change_mask |= (EFLG_IOPL | EFLG_IF);
1760 *(unsigned long *)dest =
1761 (ctxt->eflags & ~change_mask) | (val & change_mask);
1766 static int em_popf(struct x86_emulate_ctxt *ctxt)
1768 ctxt->dst.type = OP_REG;
1769 ctxt->dst.addr.reg = &ctxt->eflags;
1770 ctxt->dst.bytes = ctxt->op_bytes;
1771 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1774 static int em_enter(struct x86_emulate_ctxt *ctxt)
1777 unsigned frame_size = ctxt->src.val;
1778 unsigned nesting_level = ctxt->src2.val & 31;
1782 return X86EMUL_UNHANDLEABLE;
1784 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1785 rc = push(ctxt, &rbp, stack_size(ctxt));
1786 if (rc != X86EMUL_CONTINUE)
1788 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1790 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1791 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1793 return X86EMUL_CONTINUE;
1796 static int em_leave(struct x86_emulate_ctxt *ctxt)
1798 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1800 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1803 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1805 int seg = ctxt->src2.val;
1807 ctxt->src.val = get_segment_selector(ctxt, seg);
1809 return em_push(ctxt);
1812 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1814 int seg = ctxt->src2.val;
1815 unsigned long selector;
1818 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1819 if (rc != X86EMUL_CONTINUE)
1822 if (ctxt->modrm_reg == VCPU_SREG_SS)
1823 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1825 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1829 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1831 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1832 int rc = X86EMUL_CONTINUE;
1833 int reg = VCPU_REGS_RAX;
1835 while (reg <= VCPU_REGS_RDI) {
1836 (reg == VCPU_REGS_RSP) ?
1837 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1840 if (rc != X86EMUL_CONTINUE)
1849 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1851 ctxt->src.val = (unsigned long)ctxt->eflags;
1852 return em_push(ctxt);
1855 static int em_popa(struct x86_emulate_ctxt *ctxt)
1857 int rc = X86EMUL_CONTINUE;
1858 int reg = VCPU_REGS_RDI;
1860 while (reg >= VCPU_REGS_RAX) {
1861 if (reg == VCPU_REGS_RSP) {
1862 rsp_increment(ctxt, ctxt->op_bytes);
1866 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1867 if (rc != X86EMUL_CONTINUE)
1874 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1876 const struct x86_emulate_ops *ops = ctxt->ops;
1883 /* TODO: Add limit checks */
1884 ctxt->src.val = ctxt->eflags;
1886 if (rc != X86EMUL_CONTINUE)
1889 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1891 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1893 if (rc != X86EMUL_CONTINUE)
1896 ctxt->src.val = ctxt->_eip;
1898 if (rc != X86EMUL_CONTINUE)
1901 ops->get_idt(ctxt, &dt);
1903 eip_addr = dt.address + (irq << 2);
1904 cs_addr = dt.address + (irq << 2) + 2;
1906 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1907 if (rc != X86EMUL_CONTINUE)
1910 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1911 if (rc != X86EMUL_CONTINUE)
1914 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1915 if (rc != X86EMUL_CONTINUE)
1923 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1927 invalidate_registers(ctxt);
1928 rc = __emulate_int_real(ctxt, irq);
1929 if (rc == X86EMUL_CONTINUE)
1930 writeback_registers(ctxt);
1934 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1936 switch(ctxt->mode) {
1937 case X86EMUL_MODE_REAL:
1938 return __emulate_int_real(ctxt, irq);
1939 case X86EMUL_MODE_VM86:
1940 case X86EMUL_MODE_PROT16:
1941 case X86EMUL_MODE_PROT32:
1942 case X86EMUL_MODE_PROT64:
1944 /* Protected mode interrupts unimplemented yet */
1945 return X86EMUL_UNHANDLEABLE;
1949 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1951 int rc = X86EMUL_CONTINUE;
1952 unsigned long temp_eip = 0;
1953 unsigned long temp_eflags = 0;
1954 unsigned long cs = 0;
1955 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1956 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1957 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1958 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1960 /* TODO: Add stack limit check */
1962 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1964 if (rc != X86EMUL_CONTINUE)
1967 if (temp_eip & ~0xffff)
1968 return emulate_gp(ctxt, 0);
1970 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1972 if (rc != X86EMUL_CONTINUE)
1975 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1977 if (rc != X86EMUL_CONTINUE)
1980 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1982 if (rc != X86EMUL_CONTINUE)
1985 ctxt->_eip = temp_eip;
1988 if (ctxt->op_bytes == 4)
1989 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1990 else if (ctxt->op_bytes == 2) {
1991 ctxt->eflags &= ~0xffff;
1992 ctxt->eflags |= temp_eflags;
1995 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1996 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2001 static int em_iret(struct x86_emulate_ctxt *ctxt)
2003 switch(ctxt->mode) {
2004 case X86EMUL_MODE_REAL:
2005 return emulate_iret_real(ctxt);
2006 case X86EMUL_MODE_VM86:
2007 case X86EMUL_MODE_PROT16:
2008 case X86EMUL_MODE_PROT32:
2009 case X86EMUL_MODE_PROT64:
2011 /* iret from protected mode unimplemented yet */
2012 return X86EMUL_UNHANDLEABLE;
2016 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2019 unsigned short sel, old_sel;
2020 struct desc_struct old_desc, new_desc;
2021 const struct x86_emulate_ops *ops = ctxt->ops;
2022 u8 cpl = ctxt->ops->cpl(ctxt);
2024 /* Assignment of RIP may only fail in 64-bit mode */
2025 if (ctxt->mode == X86EMUL_MODE_PROT64)
2026 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2029 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2031 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2033 if (rc != X86EMUL_CONTINUE)
2036 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2037 if (rc != X86EMUL_CONTINUE) {
2038 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
2039 /* assigning eip failed; restore the old cs */
2040 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2046 static int em_grp45(struct x86_emulate_ctxt *ctxt)
2048 int rc = X86EMUL_CONTINUE;
2050 switch (ctxt->modrm_reg) {
2051 case 2: /* call near abs */ {
2053 old_eip = ctxt->_eip;
2054 rc = assign_eip_near(ctxt, ctxt->src.val);
2055 if (rc != X86EMUL_CONTINUE)
2057 ctxt->src.val = old_eip;
2061 case 4: /* jmp abs */
2062 rc = assign_eip_near(ctxt, ctxt->src.val);
2064 case 5: /* jmp far */
2065 rc = em_jmp_far(ctxt);
2074 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2076 u64 old = ctxt->dst.orig_val64;
2078 if (ctxt->dst.bytes == 16)
2079 return X86EMUL_UNHANDLEABLE;
2081 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2082 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2083 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2084 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2085 ctxt->eflags &= ~EFLG_ZF;
2087 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2088 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2090 ctxt->eflags |= EFLG_ZF;
2092 return X86EMUL_CONTINUE;
2095 static int em_ret(struct x86_emulate_ctxt *ctxt)
2100 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2101 if (rc != X86EMUL_CONTINUE)
2104 return assign_eip_near(ctxt, eip);
2107 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2110 unsigned long eip, cs;
2112 int cpl = ctxt->ops->cpl(ctxt);
2113 struct desc_struct old_desc, new_desc;
2114 const struct x86_emulate_ops *ops = ctxt->ops;
2116 if (ctxt->mode == X86EMUL_MODE_PROT64)
2117 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2120 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2121 if (rc != X86EMUL_CONTINUE)
2123 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2124 if (rc != X86EMUL_CONTINUE)
2126 /* Outer-privilege level return is not implemented */
2127 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2128 return X86EMUL_UNHANDLEABLE;
2129 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2131 if (rc != X86EMUL_CONTINUE)
2133 rc = assign_eip_far(ctxt, eip, new_desc.l);
2134 if (rc != X86EMUL_CONTINUE) {
2135 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
2136 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2141 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2145 rc = em_ret_far(ctxt);
2146 if (rc != X86EMUL_CONTINUE)
2148 rsp_increment(ctxt, ctxt->src.val);
2149 return X86EMUL_CONTINUE;
2152 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2154 /* Save real source value, then compare EAX against destination. */
2155 ctxt->dst.orig_val = ctxt->dst.val;
2156 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2157 ctxt->src.orig_val = ctxt->src.val;
2158 ctxt->src.val = ctxt->dst.orig_val;
2159 fastop(ctxt, em_cmp);
2161 if (ctxt->eflags & EFLG_ZF) {
2162 /* Success: write back to memory. */
2163 ctxt->dst.val = ctxt->src.orig_val;
2165 /* Failure: write the value we saw to EAX. */
2166 ctxt->dst.type = OP_REG;
2167 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2168 ctxt->dst.val = ctxt->dst.orig_val;
2170 return X86EMUL_CONTINUE;
2173 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2175 int seg = ctxt->src2.val;
2179 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2181 rc = load_segment_descriptor(ctxt, sel, seg);
2182 if (rc != X86EMUL_CONTINUE)
2185 ctxt->dst.val = ctxt->src.val;
2190 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2191 struct desc_struct *cs, struct desc_struct *ss)
2193 cs->l = 0; /* will be adjusted later */
2194 set_desc_base(cs, 0); /* flat segment */
2195 cs->g = 1; /* 4kb granularity */
2196 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2197 cs->type = 0x0b; /* Read, Execute, Accessed */
2199 cs->dpl = 0; /* will be adjusted later */
2204 set_desc_base(ss, 0); /* flat segment */
2205 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2206 ss->g = 1; /* 4kb granularity */
2208 ss->type = 0x03; /* Read/Write, Accessed */
2209 ss->d = 1; /* 32bit stack segment */
2216 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2218 u32 eax, ebx, ecx, edx;
2221 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2222 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2223 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2224 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2227 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2229 const struct x86_emulate_ops *ops = ctxt->ops;
2230 u32 eax, ebx, ecx, edx;
2233 * syscall should always be enabled in longmode - so only become
2234 * vendor specific (cpuid) if other modes are active...
2236 if (ctxt->mode == X86EMUL_MODE_PROT64)
2241 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2243 * Intel ("GenuineIntel")
2244 * remark: Intel CPUs only support "syscall" in 64bit
2245 * longmode. Also an 64bit guest with a
2246 * 32bit compat-app running will #UD !! While this
2247 * behaviour can be fixed (by emulating) into AMD
2248 * response - CPUs of AMD can't behave like Intel.
2250 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2251 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2252 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2255 /* AMD ("AuthenticAMD") */
2256 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2257 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2258 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2261 /* AMD ("AMDisbetter!") */
2262 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2263 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2264 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2267 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2271 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2273 const struct x86_emulate_ops *ops = ctxt->ops;
2274 struct desc_struct cs, ss;
2279 /* syscall is not available in real mode */
2280 if (ctxt->mode == X86EMUL_MODE_REAL ||
2281 ctxt->mode == X86EMUL_MODE_VM86)
2282 return emulate_ud(ctxt);
2284 if (!(em_syscall_is_enabled(ctxt)))
2285 return emulate_ud(ctxt);
2287 ops->get_msr(ctxt, MSR_EFER, &efer);
2288 setup_syscalls_segments(ctxt, &cs, &ss);
2290 if (!(efer & EFER_SCE))
2291 return emulate_ud(ctxt);
2293 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2295 cs_sel = (u16)(msr_data & 0xfffc);
2296 ss_sel = (u16)(msr_data + 8);
2298 if (efer & EFER_LMA) {
2302 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2303 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2305 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2306 if (efer & EFER_LMA) {
2307 #ifdef CONFIG_X86_64
2308 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2311 ctxt->mode == X86EMUL_MODE_PROT64 ?
2312 MSR_LSTAR : MSR_CSTAR, &msr_data);
2313 ctxt->_eip = msr_data;
2315 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2316 ctxt->eflags &= ~msr_data;
2320 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2321 ctxt->_eip = (u32)msr_data;
2323 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2326 return X86EMUL_CONTINUE;
2329 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2331 const struct x86_emulate_ops *ops = ctxt->ops;
2332 struct desc_struct cs, ss;
2337 ops->get_msr(ctxt, MSR_EFER, &efer);
2338 /* inject #GP if in real mode */
2339 if (ctxt->mode == X86EMUL_MODE_REAL)
2340 return emulate_gp(ctxt, 0);
2343 * Not recognized on AMD in compat mode (but is recognized in legacy
2346 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2347 && !vendor_intel(ctxt))
2348 return emulate_ud(ctxt);
2350 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2351 * Therefore, we inject an #UD.
2353 if (ctxt->mode == X86EMUL_MODE_PROT64)
2354 return emulate_ud(ctxt);
2356 setup_syscalls_segments(ctxt, &cs, &ss);
2358 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2359 switch (ctxt->mode) {
2360 case X86EMUL_MODE_PROT32:
2361 if ((msr_data & 0xfffc) == 0x0)
2362 return emulate_gp(ctxt, 0);
2364 case X86EMUL_MODE_PROT64:
2365 if (msr_data == 0x0)
2366 return emulate_gp(ctxt, 0);
2372 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2373 cs_sel = (u16)msr_data;
2374 cs_sel &= ~SELECTOR_RPL_MASK;
2375 ss_sel = cs_sel + 8;
2376 ss_sel &= ~SELECTOR_RPL_MASK;
2377 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2382 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2383 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2385 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2386 ctxt->_eip = msr_data;
2388 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2389 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2391 return X86EMUL_CONTINUE;
2394 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2396 const struct x86_emulate_ops *ops = ctxt->ops;
2397 struct desc_struct cs, ss;
2398 u64 msr_data, rcx, rdx;
2400 u16 cs_sel = 0, ss_sel = 0;
2402 /* inject #GP if in real mode or Virtual 8086 mode */
2403 if (ctxt->mode == X86EMUL_MODE_REAL ||
2404 ctxt->mode == X86EMUL_MODE_VM86)
2405 return emulate_gp(ctxt, 0);
2407 setup_syscalls_segments(ctxt, &cs, &ss);
2409 if ((ctxt->rex_prefix & 0x8) != 0x0)
2410 usermode = X86EMUL_MODE_PROT64;
2412 usermode = X86EMUL_MODE_PROT32;
2414 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2415 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2419 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2421 case X86EMUL_MODE_PROT32:
2422 cs_sel = (u16)(msr_data + 16);
2423 if ((msr_data & 0xfffc) == 0x0)
2424 return emulate_gp(ctxt, 0);
2425 ss_sel = (u16)(msr_data + 24);
2427 case X86EMUL_MODE_PROT64:
2428 cs_sel = (u16)(msr_data + 32);
2429 if (msr_data == 0x0)
2430 return emulate_gp(ctxt, 0);
2431 ss_sel = cs_sel + 8;
2434 if (is_noncanonical_address(rcx) ||
2435 is_noncanonical_address(rdx))
2436 return emulate_gp(ctxt, 0);
2439 cs_sel |= SELECTOR_RPL_MASK;
2440 ss_sel |= SELECTOR_RPL_MASK;
2442 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2443 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2446 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2448 return X86EMUL_CONTINUE;
2451 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2454 if (ctxt->mode == X86EMUL_MODE_REAL)
2456 if (ctxt->mode == X86EMUL_MODE_VM86)
2458 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2459 return ctxt->ops->cpl(ctxt) > iopl;
2462 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2465 const struct x86_emulate_ops *ops = ctxt->ops;
2466 struct desc_struct tr_seg;
2469 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2470 unsigned mask = (1 << len) - 1;
2473 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2476 if (desc_limit_scaled(&tr_seg) < 103)
2478 base = get_desc_base(&tr_seg);
2479 #ifdef CONFIG_X86_64
2480 base |= ((u64)base3) << 32;
2482 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2483 if (r != X86EMUL_CONTINUE)
2485 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2487 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2488 if (r != X86EMUL_CONTINUE)
2490 if ((perm >> bit_idx) & mask)
2495 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2501 if (emulator_bad_iopl(ctxt))
2502 if (!emulator_io_port_access_allowed(ctxt, port, len))
2505 ctxt->perm_ok = true;
2510 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2511 struct tss_segment_16 *tss)
2513 tss->ip = ctxt->_eip;
2514 tss->flag = ctxt->eflags;
2515 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2516 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2517 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2518 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2519 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2520 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2521 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2522 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2524 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2525 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2526 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2527 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2528 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2531 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2532 struct tss_segment_16 *tss)
2537 ctxt->_eip = tss->ip;
2538 ctxt->eflags = tss->flag | 2;
2539 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2540 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2541 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2542 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2543 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2544 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2545 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2546 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2549 * SDM says that segment selectors are loaded before segment
2552 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2553 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2554 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2555 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2556 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2561 * Now load segment descriptors. If fault happens at this stage
2562 * it is handled in a context of new task
2564 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2566 if (ret != X86EMUL_CONTINUE)
2568 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2570 if (ret != X86EMUL_CONTINUE)
2572 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2574 if (ret != X86EMUL_CONTINUE)
2576 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2578 if (ret != X86EMUL_CONTINUE)
2580 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2582 if (ret != X86EMUL_CONTINUE)
2585 return X86EMUL_CONTINUE;
2588 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2589 u16 tss_selector, u16 old_tss_sel,
2590 ulong old_tss_base, struct desc_struct *new_desc)
2592 const struct x86_emulate_ops *ops = ctxt->ops;
2593 struct tss_segment_16 tss_seg;
2595 u32 new_tss_base = get_desc_base(new_desc);
2597 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2599 if (ret != X86EMUL_CONTINUE)
2600 /* FIXME: need to provide precise fault address */
2603 save_state_to_tss16(ctxt, &tss_seg);
2605 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2607 if (ret != X86EMUL_CONTINUE)
2608 /* FIXME: need to provide precise fault address */
2611 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2613 if (ret != X86EMUL_CONTINUE)
2614 /* FIXME: need to provide precise fault address */
2617 if (old_tss_sel != 0xffff) {
2618 tss_seg.prev_task_link = old_tss_sel;
2620 ret = ops->write_std(ctxt, new_tss_base,
2621 &tss_seg.prev_task_link,
2622 sizeof tss_seg.prev_task_link,
2624 if (ret != X86EMUL_CONTINUE)
2625 /* FIXME: need to provide precise fault address */
2629 return load_state_from_tss16(ctxt, &tss_seg);
2632 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2633 struct tss_segment_32 *tss)
2635 /* CR3 and ldt selector are not saved intentionally */
2636 tss->eip = ctxt->_eip;
2637 tss->eflags = ctxt->eflags;
2638 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2639 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2640 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2641 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2642 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2643 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2644 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2645 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2647 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2648 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2649 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2650 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2651 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2652 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2655 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2656 struct tss_segment_32 *tss)
2661 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2662 return emulate_gp(ctxt, 0);
2663 ctxt->_eip = tss->eip;
2664 ctxt->eflags = tss->eflags | 2;
2666 /* General purpose registers */
2667 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2668 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2669 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2670 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2671 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2672 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2673 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2674 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2677 * SDM says that segment selectors are loaded before segment
2678 * descriptors. This is important because CPL checks will
2681 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2682 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2683 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2684 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2685 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2686 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2687 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2690 * If we're switching between Protected Mode and VM86, we need to make
2691 * sure to update the mode before loading the segment descriptors so
2692 * that the selectors are interpreted correctly.
2694 if (ctxt->eflags & X86_EFLAGS_VM) {
2695 ctxt->mode = X86EMUL_MODE_VM86;
2698 ctxt->mode = X86EMUL_MODE_PROT32;
2703 * Now load segment descriptors. If fault happenes at this stage
2704 * it is handled in a context of new task
2706 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2708 if (ret != X86EMUL_CONTINUE)
2710 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2712 if (ret != X86EMUL_CONTINUE)
2714 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2716 if (ret != X86EMUL_CONTINUE)
2718 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2720 if (ret != X86EMUL_CONTINUE)
2722 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2724 if (ret != X86EMUL_CONTINUE)
2726 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2728 if (ret != X86EMUL_CONTINUE)
2730 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2732 if (ret != X86EMUL_CONTINUE)
2735 return X86EMUL_CONTINUE;
2738 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2739 u16 tss_selector, u16 old_tss_sel,
2740 ulong old_tss_base, struct desc_struct *new_desc)
2742 const struct x86_emulate_ops *ops = ctxt->ops;
2743 struct tss_segment_32 tss_seg;
2745 u32 new_tss_base = get_desc_base(new_desc);
2746 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2747 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2749 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2751 if (ret != X86EMUL_CONTINUE)
2752 /* FIXME: need to provide precise fault address */
2755 save_state_to_tss32(ctxt, &tss_seg);
2757 /* Only GP registers and segment selectors are saved */
2758 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2759 ldt_sel_offset - eip_offset, &ctxt->exception);
2760 if (ret != X86EMUL_CONTINUE)
2761 /* FIXME: need to provide precise fault address */
2764 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2766 if (ret != X86EMUL_CONTINUE)
2767 /* FIXME: need to provide precise fault address */
2770 if (old_tss_sel != 0xffff) {
2771 tss_seg.prev_task_link = old_tss_sel;
2773 ret = ops->write_std(ctxt, new_tss_base,
2774 &tss_seg.prev_task_link,
2775 sizeof tss_seg.prev_task_link,
2777 if (ret != X86EMUL_CONTINUE)
2778 /* FIXME: need to provide precise fault address */
2782 return load_state_from_tss32(ctxt, &tss_seg);
2785 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2786 u16 tss_selector, int idt_index, int reason,
2787 bool has_error_code, u32 error_code)
2789 const struct x86_emulate_ops *ops = ctxt->ops;
2790 struct desc_struct curr_tss_desc, next_tss_desc;
2792 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2793 ulong old_tss_base =
2794 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2798 /* FIXME: old_tss_base == ~0 ? */
2800 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2801 if (ret != X86EMUL_CONTINUE)
2803 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2804 if (ret != X86EMUL_CONTINUE)
2807 /* FIXME: check that next_tss_desc is tss */
2810 * Check privileges. The three cases are task switch caused by...
2812 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2813 * 2. Exception/IRQ/iret: No check is performed
2814 * 3. jmp/call to TSS: Check against DPL of the TSS
2816 if (reason == TASK_SWITCH_GATE) {
2817 if (idt_index != -1) {
2818 /* Software interrupts */
2819 struct desc_struct task_gate_desc;
2822 ret = read_interrupt_descriptor(ctxt, idt_index,
2824 if (ret != X86EMUL_CONTINUE)
2827 dpl = task_gate_desc.dpl;
2828 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2829 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2831 } else if (reason != TASK_SWITCH_IRET) {
2832 int dpl = next_tss_desc.dpl;
2833 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2834 return emulate_gp(ctxt, tss_selector);
2838 desc_limit = desc_limit_scaled(&next_tss_desc);
2839 if (!next_tss_desc.p ||
2840 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2841 desc_limit < 0x2b)) {
2842 return emulate_ts(ctxt, tss_selector & 0xfffc);
2845 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2846 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2847 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2850 if (reason == TASK_SWITCH_IRET)
2851 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2853 /* set back link to prev task only if NT bit is set in eflags
2854 note that old_tss_sel is not used after this point */
2855 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2856 old_tss_sel = 0xffff;
2858 if (next_tss_desc.type & 8)
2859 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2860 old_tss_base, &next_tss_desc);
2862 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2863 old_tss_base, &next_tss_desc);
2864 if (ret != X86EMUL_CONTINUE)
2867 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2868 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2870 if (reason != TASK_SWITCH_IRET) {
2871 next_tss_desc.type |= (1 << 1); /* set busy flag */
2872 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2875 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2876 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2878 if (has_error_code) {
2879 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2880 ctxt->lock_prefix = 0;
2881 ctxt->src.val = (unsigned long) error_code;
2882 ret = em_push(ctxt);
2888 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2889 u16 tss_selector, int idt_index, int reason,
2890 bool has_error_code, u32 error_code)
2894 invalidate_registers(ctxt);
2895 ctxt->_eip = ctxt->eip;
2896 ctxt->dst.type = OP_NONE;
2898 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2899 has_error_code, error_code);
2901 if (rc == X86EMUL_CONTINUE) {
2902 ctxt->eip = ctxt->_eip;
2903 writeback_registers(ctxt);
2906 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2909 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2912 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2914 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2915 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2918 static int em_das(struct x86_emulate_ctxt *ctxt)
2921 bool af, cf, old_cf;
2923 cf = ctxt->eflags & X86_EFLAGS_CF;
2929 af = ctxt->eflags & X86_EFLAGS_AF;
2930 if ((al & 0x0f) > 9 || af) {
2932 cf = old_cf | (al >= 250);
2937 if (old_al > 0x99 || old_cf) {
2943 /* Set PF, ZF, SF */
2944 ctxt->src.type = OP_IMM;
2946 ctxt->src.bytes = 1;
2947 fastop(ctxt, em_or);
2948 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2950 ctxt->eflags |= X86_EFLAGS_CF;
2952 ctxt->eflags |= X86_EFLAGS_AF;
2953 return X86EMUL_CONTINUE;
2956 static int em_aam(struct x86_emulate_ctxt *ctxt)
2960 if (ctxt->src.val == 0)
2961 return emulate_de(ctxt);
2963 al = ctxt->dst.val & 0xff;
2964 ah = al / ctxt->src.val;
2965 al %= ctxt->src.val;
2967 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2969 /* Set PF, ZF, SF */
2970 ctxt->src.type = OP_IMM;
2972 ctxt->src.bytes = 1;
2973 fastop(ctxt, em_or);
2975 return X86EMUL_CONTINUE;
2978 static int em_aad(struct x86_emulate_ctxt *ctxt)
2980 u8 al = ctxt->dst.val & 0xff;
2981 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2983 al = (al + (ah * ctxt->src.val)) & 0xff;
2985 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2987 /* Set PF, ZF, SF */
2988 ctxt->src.type = OP_IMM;
2990 ctxt->src.bytes = 1;
2991 fastop(ctxt, em_or);
2993 return X86EMUL_CONTINUE;
2996 static int em_call(struct x86_emulate_ctxt *ctxt)
2999 long rel = ctxt->src.val;
3001 ctxt->src.val = (unsigned long)ctxt->_eip;
3002 rc = jmp_rel(ctxt, rel);
3003 if (rc != X86EMUL_CONTINUE)
3005 return em_push(ctxt);
3008 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3013 struct desc_struct old_desc, new_desc;
3014 const struct x86_emulate_ops *ops = ctxt->ops;
3015 int cpl = ctxt->ops->cpl(ctxt);
3017 old_eip = ctxt->_eip;
3018 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3020 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3021 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3023 if (rc != X86EMUL_CONTINUE)
3024 return X86EMUL_CONTINUE;
3026 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3027 if (rc != X86EMUL_CONTINUE)
3030 ctxt->src.val = old_cs;
3032 if (rc != X86EMUL_CONTINUE)
3035 ctxt->src.val = old_eip;
3037 /* If we failed, we tainted the memory, but the very least we should
3039 if (rc != X86EMUL_CONTINUE)
3043 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3048 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3053 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3054 if (rc != X86EMUL_CONTINUE)
3056 rc = assign_eip_near(ctxt, eip);
3057 if (rc != X86EMUL_CONTINUE)
3059 rsp_increment(ctxt, ctxt->src.val);
3060 return X86EMUL_CONTINUE;
3063 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3065 /* Write back the register source. */
3066 ctxt->src.val = ctxt->dst.val;
3067 write_register_operand(&ctxt->src);
3069 /* Write back the memory destination with implicit LOCK prefix. */
3070 ctxt->dst.val = ctxt->src.orig_val;
3071 ctxt->lock_prefix = 1;
3072 return X86EMUL_CONTINUE;
3075 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3077 ctxt->dst.val = ctxt->src2.val;
3078 return fastop(ctxt, em_imul);
3081 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3083 ctxt->dst.type = OP_REG;
3084 ctxt->dst.bytes = ctxt->src.bytes;
3085 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3086 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3088 return X86EMUL_CONTINUE;
3091 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3095 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3096 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3097 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3098 return X86EMUL_CONTINUE;
3101 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3105 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3106 return emulate_gp(ctxt, 0);
3107 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3108 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3109 return X86EMUL_CONTINUE;
3112 static int em_mov(struct x86_emulate_ctxt *ctxt)
3114 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3115 return X86EMUL_CONTINUE;
3118 #define FFL(x) bit(X86_FEATURE_##x)
3120 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3122 u32 ebx, ecx, edx, eax = 1;
3126 * Check MOVBE is set in the guest-visible CPUID leaf.
3128 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3129 if (!(ecx & FFL(MOVBE)))
3130 return emulate_ud(ctxt);
3132 switch (ctxt->op_bytes) {
3135 * From MOVBE definition: "...When the operand size is 16 bits,
3136 * the upper word of the destination register remains unchanged
3139 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3140 * rules so we have to do the operation almost per hand.
3142 tmp = (u16)ctxt->src.val;
3143 ctxt->dst.val &= ~0xffffUL;
3144 ctxt->dst.val |= (unsigned long)swab16(tmp);
3147 ctxt->dst.val = swab32((u32)ctxt->src.val);
3150 ctxt->dst.val = swab64(ctxt->src.val);
3155 return X86EMUL_CONTINUE;
3158 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3160 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3161 return emulate_gp(ctxt, 0);
3163 /* Disable writeback. */
3164 ctxt->dst.type = OP_NONE;
3165 return X86EMUL_CONTINUE;
3168 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3172 if (ctxt->mode == X86EMUL_MODE_PROT64)
3173 val = ctxt->src.val & ~0ULL;
3175 val = ctxt->src.val & ~0U;
3177 /* #UD condition is already handled. */
3178 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3179 return emulate_gp(ctxt, 0);
3181 /* Disable writeback. */
3182 ctxt->dst.type = OP_NONE;
3183 return X86EMUL_CONTINUE;
3186 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3190 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3191 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3192 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3193 return emulate_gp(ctxt, 0);
3195 return X86EMUL_CONTINUE;
3198 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3202 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3203 return emulate_gp(ctxt, 0);
3205 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3206 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3207 return X86EMUL_CONTINUE;
3210 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3212 if (ctxt->modrm_reg > VCPU_SREG_GS)
3213 return emulate_ud(ctxt);
3215 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3216 return X86EMUL_CONTINUE;
3219 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3221 u16 sel = ctxt->src.val;
3223 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3224 return emulate_ud(ctxt);
3226 if (ctxt->modrm_reg == VCPU_SREG_SS)
3227 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3229 /* Disable writeback. */
3230 ctxt->dst.type = OP_NONE;
3231 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3234 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3236 u16 sel = ctxt->src.val;
3238 /* Disable writeback. */
3239 ctxt->dst.type = OP_NONE;
3240 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3243 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3245 u16 sel = ctxt->src.val;
3247 /* Disable writeback. */
3248 ctxt->dst.type = OP_NONE;
3249 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3252 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3257 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3258 if (rc == X86EMUL_CONTINUE)
3259 ctxt->ops->invlpg(ctxt, linear);
3260 /* Disable writeback. */
3261 ctxt->dst.type = OP_NONE;
3262 return X86EMUL_CONTINUE;
3265 static int em_clts(struct x86_emulate_ctxt *ctxt)
3269 cr0 = ctxt->ops->get_cr(ctxt, 0);
3271 ctxt->ops->set_cr(ctxt, 0, cr0);
3272 return X86EMUL_CONTINUE;
3275 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3277 int rc = ctxt->ops->fix_hypercall(ctxt);
3279 if (rc != X86EMUL_CONTINUE)
3282 /* Let the processor re-execute the fixed hypercall */
3283 ctxt->_eip = ctxt->eip;
3284 /* Disable writeback. */
3285 ctxt->dst.type = OP_NONE;
3286 return X86EMUL_CONTINUE;
3289 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3290 void (*get)(struct x86_emulate_ctxt *ctxt,
3291 struct desc_ptr *ptr))
3293 struct desc_ptr desc_ptr;
3295 if (ctxt->mode == X86EMUL_MODE_PROT64)
3297 get(ctxt, &desc_ptr);
3298 if (ctxt->op_bytes == 2) {
3300 desc_ptr.address &= 0x00ffffff;
3302 /* Disable writeback. */
3303 ctxt->dst.type = OP_NONE;
3304 return segmented_write(ctxt, ctxt->dst.addr.mem,
3305 &desc_ptr, 2 + ctxt->op_bytes);
3308 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3310 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3313 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3315 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3318 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3320 struct desc_ptr desc_ptr;
3323 if (ctxt->mode == X86EMUL_MODE_PROT64)
3325 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3326 &desc_ptr.size, &desc_ptr.address,
3328 if (rc != X86EMUL_CONTINUE)
3330 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3331 /* Disable writeback. */
3332 ctxt->dst.type = OP_NONE;
3333 return X86EMUL_CONTINUE;
3336 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3340 rc = ctxt->ops->fix_hypercall(ctxt);
3342 /* Disable writeback. */
3343 ctxt->dst.type = OP_NONE;
3347 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3349 struct desc_ptr desc_ptr;
3352 if (ctxt->mode == X86EMUL_MODE_PROT64)
3354 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3355 &desc_ptr.size, &desc_ptr.address,
3357 if (rc != X86EMUL_CONTINUE)
3359 ctxt->ops->set_idt(ctxt, &desc_ptr);
3360 /* Disable writeback. */
3361 ctxt->dst.type = OP_NONE;
3362 return X86EMUL_CONTINUE;
3365 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3367 if (ctxt->dst.type == OP_MEM)
3368 ctxt->dst.bytes = 2;
3369 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3370 return X86EMUL_CONTINUE;
3373 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3375 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3376 | (ctxt->src.val & 0x0f));
3377 ctxt->dst.type = OP_NONE;
3378 return X86EMUL_CONTINUE;
3381 static int em_loop(struct x86_emulate_ctxt *ctxt)
3383 int rc = X86EMUL_CONTINUE;
3385 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3386 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3387 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3388 rc = jmp_rel(ctxt, ctxt->src.val);
3393 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3395 int rc = X86EMUL_CONTINUE;
3397 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3398 rc = jmp_rel(ctxt, ctxt->src.val);
3403 static int em_in(struct x86_emulate_ctxt *ctxt)
3405 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3407 return X86EMUL_IO_NEEDED;
3409 return X86EMUL_CONTINUE;
3412 static int em_out(struct x86_emulate_ctxt *ctxt)
3414 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3416 /* Disable writeback. */
3417 ctxt->dst.type = OP_NONE;
3418 return X86EMUL_CONTINUE;
3421 static int em_cli(struct x86_emulate_ctxt *ctxt)
3423 if (emulator_bad_iopl(ctxt))
3424 return emulate_gp(ctxt, 0);
3426 ctxt->eflags &= ~X86_EFLAGS_IF;
3427 return X86EMUL_CONTINUE;
3430 static int em_sti(struct x86_emulate_ctxt *ctxt)
3432 if (emulator_bad_iopl(ctxt))
3433 return emulate_gp(ctxt, 0);
3435 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3436 ctxt->eflags |= X86_EFLAGS_IF;
3437 return X86EMUL_CONTINUE;
3440 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3442 u32 eax, ebx, ecx, edx;
3444 eax = reg_read(ctxt, VCPU_REGS_RAX);
3445 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3446 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3447 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3448 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3449 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3450 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3451 return X86EMUL_CONTINUE;
3454 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3458 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3459 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3461 ctxt->eflags &= ~0xffUL;
3462 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3463 return X86EMUL_CONTINUE;
3466 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3468 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3469 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3470 return X86EMUL_CONTINUE;
3473 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3475 switch (ctxt->op_bytes) {
3476 #ifdef CONFIG_X86_64
3478 asm("bswap %0" : "+r"(ctxt->dst.val));
3482 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3485 return X86EMUL_CONTINUE;
3488 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3490 /* emulating clflush regardless of cpuid */
3491 return X86EMUL_CONTINUE;
3494 static bool valid_cr(int nr)
3506 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3508 if (!valid_cr(ctxt->modrm_reg))
3509 return emulate_ud(ctxt);
3511 return X86EMUL_CONTINUE;
3514 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3516 u64 new_val = ctxt->src.val64;
3517 int cr = ctxt->modrm_reg;
3520 static u64 cr_reserved_bits[] = {
3521 0xffffffff00000000ULL,
3522 0, 0, 0, /* CR3 checked later */
3529 return emulate_ud(ctxt);
3531 if (new_val & cr_reserved_bits[cr])
3532 return emulate_gp(ctxt, 0);
3537 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3538 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3539 return emulate_gp(ctxt, 0);
3541 cr4 = ctxt->ops->get_cr(ctxt, 4);
3542 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3544 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3545 !(cr4 & X86_CR4_PAE))
3546 return emulate_gp(ctxt, 0);
3553 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3554 if (efer & EFER_LMA)
3555 rsvd = CR3_L_MODE_RESERVED_BITS;
3558 return emulate_gp(ctxt, 0);
3563 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3565 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3566 return emulate_gp(ctxt, 0);
3572 return X86EMUL_CONTINUE;
3575 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3579 ctxt->ops->get_dr(ctxt, 7, &dr7);
3581 /* Check if DR7.Global_Enable is set */
3582 return dr7 & (1 << 13);
3585 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3587 int dr = ctxt->modrm_reg;
3591 return emulate_ud(ctxt);
3593 cr4 = ctxt->ops->get_cr(ctxt, 4);
3594 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3595 return emulate_ud(ctxt);
3597 if (check_dr7_gd(ctxt))
3598 return emulate_db(ctxt);
3600 return X86EMUL_CONTINUE;
3603 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3605 u64 new_val = ctxt->src.val64;
3606 int dr = ctxt->modrm_reg;
3608 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3609 return emulate_gp(ctxt, 0);
3611 return check_dr_read(ctxt);
3614 static int check_svme(struct x86_emulate_ctxt *ctxt)
3618 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3620 if (!(efer & EFER_SVME))
3621 return emulate_ud(ctxt);
3623 return X86EMUL_CONTINUE;
3626 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3628 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3630 /* Valid physical address? */
3631 if (rax & 0xffff000000000000ULL)
3632 return emulate_gp(ctxt, 0);
3634 return check_svme(ctxt);
3637 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3639 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3641 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3642 return emulate_ud(ctxt);
3644 return X86EMUL_CONTINUE;
3647 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3649 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3650 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3652 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3653 ctxt->ops->check_pmc(ctxt, rcx))
3654 return emulate_gp(ctxt, 0);
3656 return X86EMUL_CONTINUE;
3659 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3661 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3662 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3663 return emulate_gp(ctxt, 0);
3665 return X86EMUL_CONTINUE;
3668 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3670 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3671 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3672 return emulate_gp(ctxt, 0);
3674 return X86EMUL_CONTINUE;
3677 #define D(_y) { .flags = (_y) }
3678 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3679 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3680 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3681 #define N D(NotImpl)
3682 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3683 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3684 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3685 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3686 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3687 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3688 #define II(_f, _e, _i) \
3689 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3690 #define IIP(_f, _e, _i, _p) \
3691 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3692 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3693 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3695 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3696 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3697 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3698 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3699 #define I2bvIP(_f, _e, _i, _p) \
3700 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3702 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3703 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3704 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3706 static const struct opcode group7_rm0[] = {
3708 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3712 static const struct opcode group7_rm1[] = {
3713 DI(SrcNone | Priv, monitor),
3714 DI(SrcNone | Priv, mwait),
3718 static const struct opcode group7_rm3[] = {
3719 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3720 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3721 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3722 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3723 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3724 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3725 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3726 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3729 static const struct opcode group7_rm7[] = {
3731 DIP(SrcNone, rdtscp, check_rdtsc),
3735 static const struct opcode group1[] = {
3737 F(Lock | PageTable, em_or),
3740 F(Lock | PageTable, em_and),
3746 static const struct opcode group1A[] = {
3747 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3750 static const struct opcode group2[] = {
3751 F(DstMem | ModRM, em_rol),
3752 F(DstMem | ModRM, em_ror),
3753 F(DstMem | ModRM, em_rcl),
3754 F(DstMem | ModRM, em_rcr),
3755 F(DstMem | ModRM, em_shl),
3756 F(DstMem | ModRM, em_shr),
3757 F(DstMem | ModRM, em_shl),
3758 F(DstMem | ModRM, em_sar),
3761 static const struct opcode group3[] = {
3762 F(DstMem | SrcImm | NoWrite, em_test),
3763 F(DstMem | SrcImm | NoWrite, em_test),
3764 F(DstMem | SrcNone | Lock, em_not),
3765 F(DstMem | SrcNone | Lock, em_neg),
3766 F(DstXacc | Src2Mem, em_mul_ex),
3767 F(DstXacc | Src2Mem, em_imul_ex),
3768 F(DstXacc | Src2Mem, em_div_ex),
3769 F(DstXacc | Src2Mem, em_idiv_ex),
3772 static const struct opcode group4[] = {
3773 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3774 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3778 static const struct opcode group5[] = {
3779 F(DstMem | SrcNone | Lock, em_inc),
3780 F(DstMem | SrcNone | Lock, em_dec),
3781 I(SrcMem | Stack, em_grp45),
3782 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3783 I(SrcMem | Stack, em_grp45),
3784 I(SrcMemFAddr | ImplicitOps, em_grp45),
3785 I(SrcMem | Stack, em_grp45), D(Undefined),
3788 static const struct opcode group6[] = {
3791 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3792 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3796 static const struct group_dual group7 = { {
3797 II(Mov | DstMem, em_sgdt, sgdt),
3798 II(Mov | DstMem, em_sidt, sidt),
3799 II(SrcMem | Priv, em_lgdt, lgdt),
3800 II(SrcMem | Priv, em_lidt, lidt),
3801 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3802 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3803 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3807 N, EXT(0, group7_rm3),
3808 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3809 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3813 static const struct opcode group8[] = {
3815 F(DstMem | SrcImmByte | NoWrite, em_bt),
3816 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3817 F(DstMem | SrcImmByte | Lock, em_btr),
3818 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3821 static const struct group_dual group9 = { {
3822 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3824 N, N, N, N, N, N, N, N,
3827 static const struct opcode group11[] = {
3828 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3832 static const struct gprefix pfx_0f_ae_7 = {
3833 I(SrcMem | ByteOp, em_clflush), N, N, N,
3836 static const struct group_dual group15 = { {
3837 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3839 N, N, N, N, N, N, N, N,
3842 static const struct gprefix pfx_0f_6f_0f_7f = {
3843 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3846 static const struct gprefix pfx_0f_2b = {
3847 I(0, em_mov), I(0, em_mov), N, N,
3850 static const struct gprefix pfx_0f_28_0f_29 = {
3851 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3854 static const struct gprefix pfx_0f_e7 = {
3855 N, I(Sse, em_mov), N, N,
3858 static const struct escape escape_d9 = { {
3859 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3862 N, N, N, N, N, N, N, N,
3864 N, N, N, N, N, N, N, N,
3866 N, N, N, N, N, N, N, N,
3868 N, N, N, N, N, N, N, N,
3870 N, N, N, N, N, N, N, N,
3872 N, N, N, N, N, N, N, N,
3874 N, N, N, N, N, N, N, N,
3876 N, N, N, N, N, N, N, N,
3879 static const struct escape escape_db = { {
3880 N, N, N, N, N, N, N, N,
3883 N, N, N, N, N, N, N, N,
3885 N, N, N, N, N, N, N, N,
3887 N, N, N, N, N, N, N, N,
3889 N, N, N, N, N, N, N, N,
3891 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3893 N, N, N, N, N, N, N, N,
3895 N, N, N, N, N, N, N, N,
3897 N, N, N, N, N, N, N, N,
3900 static const struct escape escape_dd = { {
3901 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3904 N, N, N, N, N, N, N, N,
3906 N, N, N, N, N, N, N, N,
3908 N, N, N, N, N, N, N, N,
3910 N, N, N, N, N, N, N, N,
3912 N, N, N, N, N, N, N, N,
3914 N, N, N, N, N, N, N, N,
3916 N, N, N, N, N, N, N, N,
3918 N, N, N, N, N, N, N, N,
3921 static const struct opcode opcode_table[256] = {
3923 F6ALU(Lock, em_add),
3924 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3925 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3927 F6ALU(Lock | PageTable, em_or),
3928 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3931 F6ALU(Lock, em_adc),
3932 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3933 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3935 F6ALU(Lock, em_sbb),
3936 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3937 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3939 F6ALU(Lock | PageTable, em_and), N, N,
3941 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3943 F6ALU(Lock, em_xor), N, N,
3945 F6ALU(NoWrite, em_cmp), N, N,
3947 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3949 X8(I(SrcReg | Stack, em_push)),
3951 X8(I(DstReg | Stack, em_pop)),
3953 I(ImplicitOps | Stack | No64, em_pusha),
3954 I(ImplicitOps | Stack | No64, em_popa),
3955 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3958 I(SrcImm | Mov | Stack, em_push),
3959 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3960 I(SrcImmByte | Mov | Stack, em_push),
3961 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3962 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3963 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3967 G(ByteOp | DstMem | SrcImm, group1),
3968 G(DstMem | SrcImm, group1),
3969 G(ByteOp | DstMem | SrcImm | No64, group1),
3970 G(DstMem | SrcImmByte, group1),
3971 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3972 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3974 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3975 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3976 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3977 D(ModRM | SrcMem | NoAccess | DstReg),
3978 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3981 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3983 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3984 I(SrcImmFAddr | No64, em_call_far), N,
3985 II(ImplicitOps | Stack, em_pushf, pushf),
3986 II(ImplicitOps | Stack, em_popf, popf),
3987 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3989 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3990 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3991 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3992 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3994 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3995 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3996 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3997 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
3999 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4001 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4003 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4004 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
4005 I(ImplicitOps | Stack, em_ret),
4006 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4007 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4008 G(ByteOp, group11), G(0, group11),
4010 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4011 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4012 I(ImplicitOps | Stack, em_ret_far),
4013 D(ImplicitOps), DI(SrcImmByte, intn),
4014 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4016 G(Src2One | ByteOp, group2), G(Src2One, group2),
4017 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4018 I(DstAcc | SrcImmUByte | No64, em_aam),
4019 I(DstAcc | SrcImmUByte | No64, em_aad),
4020 F(DstAcc | ByteOp | No64, em_salc),
4021 I(DstAcc | SrcXLat | ByteOp, em_mov),
4023 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4025 X3(I(SrcImmByte, em_loop)),
4026 I(SrcImmByte, em_jcxz),
4027 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4028 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4030 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
4031 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
4032 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4033 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4035 N, DI(ImplicitOps, icebp), N, N,
4036 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4037 G(ByteOp, group3), G(0, group3),
4039 D(ImplicitOps), D(ImplicitOps),
4040 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4041 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4044 static const struct opcode twobyte_table[256] = {
4046 G(0, group6), GD(0, &group7), N, N,
4047 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4048 II(ImplicitOps | Priv, em_clts, clts), N,
4049 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4050 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4052 N, N, N, N, N, N, N, N,
4053 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4054 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4056 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4057 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4058 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4060 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4063 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4064 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4065 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4068 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4069 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4070 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4071 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4072 I(ImplicitOps | EmulateOnUD, em_sysenter),
4073 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4075 N, N, N, N, N, N, N, N,
4077 X16(D(DstReg | SrcMem | ModRM)),
4079 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4084 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4089 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4093 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4095 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4096 II(ImplicitOps, em_cpuid, cpuid),
4097 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4098 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4099 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4101 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4102 DI(ImplicitOps, rsm),
4103 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4104 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4105 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4106 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4108 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4109 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4110 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4111 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4112 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4113 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4117 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4118 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4119 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4121 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4122 N, D(DstMem | SrcReg | ModRM | Mov),
4123 N, N, N, GD(0, &group9),
4125 X8(I(DstReg, em_bswap)),
4127 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4129 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4130 N, N, N, N, N, N, N, N,
4132 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4135 static const struct gprefix three_byte_0f_38_f0 = {
4136 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4139 static const struct gprefix three_byte_0f_38_f1 = {
4140 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4144 * Insns below are selected by the prefix which indexed by the third opcode
4147 static const struct opcode opcode_map_0f_38[256] = {
4149 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4151 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4153 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4154 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4173 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4177 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4183 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4184 unsigned size, bool sign_extension)
4186 int rc = X86EMUL_CONTINUE;
4190 op->addr.mem.ea = ctxt->_eip;
4191 /* NB. Immediates are sign-extended as necessary. */
4192 switch (op->bytes) {
4194 op->val = insn_fetch(s8, ctxt);
4197 op->val = insn_fetch(s16, ctxt);
4200 op->val = insn_fetch(s32, ctxt);
4203 op->val = insn_fetch(s64, ctxt);
4206 if (!sign_extension) {
4207 switch (op->bytes) {
4215 op->val &= 0xffffffff;
4223 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4226 int rc = X86EMUL_CONTINUE;
4230 decode_register_operand(ctxt, op);
4233 rc = decode_imm(ctxt, op, 1, false);
4236 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4240 if (ctxt->d & BitOp)
4241 fetch_bit_operand(ctxt);
4242 op->orig_val = op->val;
4245 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4249 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4250 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4251 fetch_register_operand(op);
4252 op->orig_val = op->val;
4256 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4257 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4258 fetch_register_operand(op);
4259 op->orig_val = op->val;
4262 if (ctxt->d & ByteOp) {
4267 op->bytes = ctxt->op_bytes;
4268 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4269 fetch_register_operand(op);
4270 op->orig_val = op->val;
4274 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4276 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4277 op->addr.mem.seg = VCPU_SREG_ES;
4284 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4285 fetch_register_operand(op);
4289 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4292 rc = decode_imm(ctxt, op, 1, true);
4299 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4302 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4305 ctxt->memop.bytes = 1;
4306 if (ctxt->memop.type == OP_REG) {
4307 ctxt->memop.addr.reg = decode_register(ctxt,
4308 ctxt->modrm_rm, true);
4309 fetch_register_operand(&ctxt->memop);
4313 ctxt->memop.bytes = 2;
4316 ctxt->memop.bytes = 4;
4319 rc = decode_imm(ctxt, op, 2, false);
4322 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4326 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4328 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4329 op->addr.mem.seg = ctxt->seg_override;
4335 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4337 register_address(ctxt,
4338 reg_read(ctxt, VCPU_REGS_RBX) +
4339 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4340 op->addr.mem.seg = ctxt->seg_override;
4345 op->addr.mem.ea = ctxt->_eip;
4346 op->bytes = ctxt->op_bytes + 2;
4347 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4350 ctxt->memop.bytes = ctxt->op_bytes + 2;
4353 op->val = VCPU_SREG_ES;
4356 op->val = VCPU_SREG_CS;
4359 op->val = VCPU_SREG_SS;
4362 op->val = VCPU_SREG_DS;
4365 op->val = VCPU_SREG_FS;
4368 op->val = VCPU_SREG_GS;
4371 /* Special instructions do their own operand decoding. */
4373 op->type = OP_NONE; /* Disable writeback. */
4381 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4383 int rc = X86EMUL_CONTINUE;
4384 int mode = ctxt->mode;
4385 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4386 bool op_prefix = false;
4387 bool has_seg_override = false;
4388 struct opcode opcode;
4390 ctxt->memop.type = OP_NONE;
4391 ctxt->memopp = NULL;
4392 ctxt->_eip = ctxt->eip;
4393 ctxt->fetch.ptr = ctxt->fetch.data;
4394 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4395 ctxt->opcode_len = 1;
4397 memcpy(ctxt->fetch.data, insn, insn_len);
4399 rc = __do_insn_fetch_bytes(ctxt, 1);
4400 if (rc != X86EMUL_CONTINUE)
4405 case X86EMUL_MODE_REAL:
4406 case X86EMUL_MODE_VM86:
4407 case X86EMUL_MODE_PROT16:
4408 def_op_bytes = def_ad_bytes = 2;
4410 case X86EMUL_MODE_PROT32:
4411 def_op_bytes = def_ad_bytes = 4;
4413 #ifdef CONFIG_X86_64
4414 case X86EMUL_MODE_PROT64:
4420 return EMULATION_FAILED;
4423 ctxt->op_bytes = def_op_bytes;
4424 ctxt->ad_bytes = def_ad_bytes;
4426 /* Legacy prefixes. */
4428 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4429 case 0x66: /* operand-size override */
4431 /* switch between 2/4 bytes */
4432 ctxt->op_bytes = def_op_bytes ^ 6;
4434 case 0x67: /* address-size override */
4435 if (mode == X86EMUL_MODE_PROT64)
4436 /* switch between 4/8 bytes */
4437 ctxt->ad_bytes = def_ad_bytes ^ 12;
4439 /* switch between 2/4 bytes */
4440 ctxt->ad_bytes = def_ad_bytes ^ 6;
4442 case 0x26: /* ES override */
4443 case 0x2e: /* CS override */
4444 case 0x36: /* SS override */
4445 case 0x3e: /* DS override */
4446 has_seg_override = true;
4447 ctxt->seg_override = (ctxt->b >> 3) & 3;
4449 case 0x64: /* FS override */
4450 case 0x65: /* GS override */
4451 has_seg_override = true;
4452 ctxt->seg_override = ctxt->b & 7;
4454 case 0x40 ... 0x4f: /* REX */
4455 if (mode != X86EMUL_MODE_PROT64)
4457 ctxt->rex_prefix = ctxt->b;
4459 case 0xf0: /* LOCK */
4460 ctxt->lock_prefix = 1;
4462 case 0xf2: /* REPNE/REPNZ */
4463 case 0xf3: /* REP/REPE/REPZ */
4464 ctxt->rep_prefix = ctxt->b;
4470 /* Any legacy prefix after a REX prefix nullifies its effect. */
4472 ctxt->rex_prefix = 0;
4478 if (ctxt->rex_prefix & 8)
4479 ctxt->op_bytes = 8; /* REX.W */
4481 /* Opcode byte(s). */
4482 opcode = opcode_table[ctxt->b];
4483 /* Two-byte opcode? */
4484 if (ctxt->b == 0x0f) {
4485 ctxt->opcode_len = 2;
4486 ctxt->b = insn_fetch(u8, ctxt);
4487 opcode = twobyte_table[ctxt->b];
4489 /* 0F_38 opcode map */
4490 if (ctxt->b == 0x38) {
4491 ctxt->opcode_len = 3;
4492 ctxt->b = insn_fetch(u8, ctxt);
4493 opcode = opcode_map_0f_38[ctxt->b];
4496 ctxt->d = opcode.flags;
4498 if (ctxt->d & ModRM)
4499 ctxt->modrm = insn_fetch(u8, ctxt);
4501 /* vex-prefix instructions are not implemented */
4502 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4503 (mode == X86EMUL_MODE_PROT64 ||
4504 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4508 while (ctxt->d & GroupMask) {
4509 switch (ctxt->d & GroupMask) {
4511 goffset = (ctxt->modrm >> 3) & 7;
4512 opcode = opcode.u.group[goffset];
4515 goffset = (ctxt->modrm >> 3) & 7;
4516 if ((ctxt->modrm >> 6) == 3)
4517 opcode = opcode.u.gdual->mod3[goffset];
4519 opcode = opcode.u.gdual->mod012[goffset];
4522 goffset = ctxt->modrm & 7;
4523 opcode = opcode.u.group[goffset];
4526 if (ctxt->rep_prefix && op_prefix)
4527 return EMULATION_FAILED;
4528 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4529 switch (simd_prefix) {
4530 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4531 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4532 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4533 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4537 if (ctxt->modrm > 0xbf)
4538 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4540 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4543 return EMULATION_FAILED;
4546 ctxt->d &= ~(u64)GroupMask;
4547 ctxt->d |= opcode.flags;
4552 return EMULATION_FAILED;
4554 ctxt->execute = opcode.u.execute;
4556 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4557 return EMULATION_FAILED;
4559 if (unlikely(ctxt->d &
4560 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
4562 * These are copied unconditionally here, and checked unconditionally
4563 * in x86_emulate_insn.
4565 ctxt->check_perm = opcode.check_perm;
4566 ctxt->intercept = opcode.intercept;
4568 if (ctxt->d & NotImpl)
4569 return EMULATION_FAILED;
4571 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4574 if (ctxt->d & Op3264) {
4575 if (mode == X86EMUL_MODE_PROT64)
4582 ctxt->op_bytes = 16;
4583 else if (ctxt->d & Mmx)
4587 /* ModRM and SIB bytes. */
4588 if (ctxt->d & ModRM) {
4589 rc = decode_modrm(ctxt, &ctxt->memop);
4590 if (!has_seg_override) {
4591 has_seg_override = true;
4592 ctxt->seg_override = ctxt->modrm_seg;
4594 } else if (ctxt->d & MemAbs)
4595 rc = decode_abs(ctxt, &ctxt->memop);
4596 if (rc != X86EMUL_CONTINUE)
4599 if (!has_seg_override)
4600 ctxt->seg_override = VCPU_SREG_DS;
4602 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4605 * Decode and fetch the source operand: register, memory
4608 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4609 if (rc != X86EMUL_CONTINUE)
4613 * Decode and fetch the second source operand: register, memory
4616 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4617 if (rc != X86EMUL_CONTINUE)
4620 /* Decode and fetch the destination operand: register or memory. */
4621 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4623 if (ctxt->rip_relative)
4624 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4627 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4630 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4632 return ctxt->d & PageTable;
4635 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4637 /* The second termination condition only applies for REPE
4638 * and REPNE. Test if the repeat string operation prefix is
4639 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4640 * corresponding termination condition according to:
4641 * - if REPE/REPZ and ZF = 0 then done
4642 * - if REPNE/REPNZ and ZF = 1 then done
4644 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4645 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4646 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4647 ((ctxt->eflags & EFLG_ZF) == 0))
4648 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4649 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4655 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4659 ctxt->ops->get_fpu(ctxt);
4660 asm volatile("1: fwait \n\t"
4662 ".pushsection .fixup,\"ax\" \n\t"
4664 "movb $1, %[fault] \n\t"
4667 _ASM_EXTABLE(1b, 3b)
4668 : [fault]"+qm"(fault));
4669 ctxt->ops->put_fpu(ctxt);
4671 if (unlikely(fault))
4672 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4674 return X86EMUL_CONTINUE;
4677 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4680 if (op->type == OP_MM)
4681 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4684 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4686 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4687 if (!(ctxt->d & ByteOp))
4688 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4689 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4690 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4692 : "c"(ctxt->src2.val));
4693 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4694 if (!fop) /* exception is returned in fop variable */
4695 return emulate_de(ctxt);
4696 return X86EMUL_CONTINUE;
4699 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4701 memset(&ctxt->rip_relative, 0,
4702 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4704 ctxt->io_read.pos = 0;
4705 ctxt->io_read.end = 0;
4706 ctxt->mem_read.end = 0;
4709 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4711 const struct x86_emulate_ops *ops = ctxt->ops;
4712 int rc = X86EMUL_CONTINUE;
4713 int saved_dst_type = ctxt->dst.type;
4715 ctxt->mem_read.pos = 0;
4717 /* LOCK prefix is allowed only with some instructions */
4718 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4719 rc = emulate_ud(ctxt);
4723 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4724 rc = emulate_ud(ctxt);
4728 if (unlikely(ctxt->d &
4729 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4730 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4731 (ctxt->d & Undefined)) {
4732 rc = emulate_ud(ctxt);
4736 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4737 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4738 rc = emulate_ud(ctxt);
4742 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4743 rc = emulate_nm(ctxt);
4747 if (ctxt->d & Mmx) {
4748 rc = flush_pending_x87_faults(ctxt);
4749 if (rc != X86EMUL_CONTINUE)
4752 * Now that we know the fpu is exception safe, we can fetch
4755 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4756 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4757 if (!(ctxt->d & Mov))
4758 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4761 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4762 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4763 X86_ICPT_PRE_EXCEPT);
4764 if (rc != X86EMUL_CONTINUE)
4768 /* Privileged instruction can be executed only in CPL=0 */
4769 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4770 if (ctxt->d & PrivUD)
4771 rc = emulate_ud(ctxt);
4773 rc = emulate_gp(ctxt, 0);
4777 /* Instruction can only be executed in protected mode */
4778 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4779 rc = emulate_ud(ctxt);
4783 /* Do instruction specific permission checks */
4784 if (ctxt->d & CheckPerm) {
4785 rc = ctxt->check_perm(ctxt);
4786 if (rc != X86EMUL_CONTINUE)
4790 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4791 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4792 X86_ICPT_POST_EXCEPT);
4793 if (rc != X86EMUL_CONTINUE)
4797 if (ctxt->rep_prefix && (ctxt->d & String)) {
4798 /* All REP prefixes have the same first termination condition */
4799 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4800 ctxt->eip = ctxt->_eip;
4801 ctxt->eflags &= ~EFLG_RF;
4807 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4808 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4809 ctxt->src.valptr, ctxt->src.bytes);
4810 if (rc != X86EMUL_CONTINUE)
4812 ctxt->src.orig_val64 = ctxt->src.val64;
4815 if (ctxt->src2.type == OP_MEM) {
4816 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4817 &ctxt->src2.val, ctxt->src2.bytes);
4818 if (rc != X86EMUL_CONTINUE)
4822 if ((ctxt->d & DstMask) == ImplicitOps)
4826 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4827 /* optimisation - avoid slow emulated read if Mov */
4828 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4829 &ctxt->dst.val, ctxt->dst.bytes);
4830 if (rc != X86EMUL_CONTINUE)
4833 ctxt->dst.orig_val = ctxt->dst.val;
4837 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4838 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4839 X86_ICPT_POST_MEMACCESS);
4840 if (rc != X86EMUL_CONTINUE)
4844 if (ctxt->rep_prefix && (ctxt->d & String))
4845 ctxt->eflags |= EFLG_RF;
4847 ctxt->eflags &= ~EFLG_RF;
4849 if (ctxt->execute) {
4850 if (ctxt->d & Fastop) {
4851 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4852 rc = fastop(ctxt, fop);
4853 if (rc != X86EMUL_CONTINUE)
4857 rc = ctxt->execute(ctxt);
4858 if (rc != X86EMUL_CONTINUE)
4863 if (ctxt->opcode_len == 2)
4865 else if (ctxt->opcode_len == 3)
4866 goto threebyte_insn;
4869 case 0x63: /* movsxd */
4870 if (ctxt->mode != X86EMUL_MODE_PROT64)
4871 goto cannot_emulate;
4872 ctxt->dst.val = (s32) ctxt->src.val;
4874 case 0x70 ... 0x7f: /* jcc (short) */
4875 if (test_cc(ctxt->b, ctxt->eflags))
4876 rc = jmp_rel(ctxt, ctxt->src.val);
4878 case 0x8d: /* lea r16/r32, m */
4879 ctxt->dst.val = ctxt->src.addr.mem.ea;
4881 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4882 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4883 ctxt->dst.type = OP_NONE;
4887 case 0x98: /* cbw/cwde/cdqe */
4888 switch (ctxt->op_bytes) {
4889 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4890 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4891 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4894 case 0xcc: /* int3 */
4895 rc = emulate_int(ctxt, 3);
4897 case 0xcd: /* int n */
4898 rc = emulate_int(ctxt, ctxt->src.val);
4900 case 0xce: /* into */
4901 if (ctxt->eflags & EFLG_OF)
4902 rc = emulate_int(ctxt, 4);
4904 case 0xe9: /* jmp rel */
4905 case 0xeb: /* jmp rel short */
4906 rc = jmp_rel(ctxt, ctxt->src.val);
4907 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4909 case 0xf4: /* hlt */
4910 ctxt->ops->halt(ctxt);
4912 case 0xf5: /* cmc */
4913 /* complement carry flag from eflags reg */
4914 ctxt->eflags ^= EFLG_CF;
4916 case 0xf8: /* clc */
4917 ctxt->eflags &= ~EFLG_CF;
4919 case 0xf9: /* stc */
4920 ctxt->eflags |= EFLG_CF;
4922 case 0xfc: /* cld */
4923 ctxt->eflags &= ~EFLG_DF;
4925 case 0xfd: /* std */
4926 ctxt->eflags |= EFLG_DF;
4929 goto cannot_emulate;
4932 if (rc != X86EMUL_CONTINUE)
4936 if (ctxt->d & SrcWrite) {
4937 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4938 rc = writeback(ctxt, &ctxt->src);
4939 if (rc != X86EMUL_CONTINUE)
4942 if (!(ctxt->d & NoWrite)) {
4943 rc = writeback(ctxt, &ctxt->dst);
4944 if (rc != X86EMUL_CONTINUE)
4949 * restore dst type in case the decoding will be reused
4950 * (happens for string instruction )
4952 ctxt->dst.type = saved_dst_type;
4954 if ((ctxt->d & SrcMask) == SrcSI)
4955 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4957 if ((ctxt->d & DstMask) == DstDI)
4958 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4960 if (ctxt->rep_prefix && (ctxt->d & String)) {
4962 struct read_cache *r = &ctxt->io_read;
4963 if ((ctxt->d & SrcMask) == SrcSI)
4964 count = ctxt->src.count;
4966 count = ctxt->dst.count;
4967 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4970 if (!string_insn_completed(ctxt)) {
4972 * Re-enter guest when pio read ahead buffer is empty
4973 * or, if it is not used, after each 1024 iteration.
4975 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4976 (r->end == 0 || r->end != r->pos)) {
4978 * Reset read cache. Usually happens before
4979 * decode, but since instruction is restarted
4980 * we have to do it here.
4982 ctxt->mem_read.end = 0;
4983 writeback_registers(ctxt);
4984 return EMULATION_RESTART;
4986 goto done; /* skip rip writeback */
4988 ctxt->eflags &= ~EFLG_RF;
4991 ctxt->eip = ctxt->_eip;
4994 if (rc == X86EMUL_PROPAGATE_FAULT) {
4995 WARN_ON(ctxt->exception.vector > 0x1f);
4996 ctxt->have_exception = true;
4998 if (rc == X86EMUL_INTERCEPTED)
4999 return EMULATION_INTERCEPTED;
5001 if (rc == X86EMUL_CONTINUE)
5002 writeback_registers(ctxt);
5004 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5008 case 0x09: /* wbinvd */
5009 (ctxt->ops->wbinvd)(ctxt);
5011 case 0x08: /* invd */
5012 case 0x0d: /* GrpP (prefetch) */
5013 case 0x18: /* Grp16 (prefetch/nop) */
5014 case 0x1f: /* nop */
5016 case 0x20: /* mov cr, reg */
5017 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5019 case 0x21: /* mov from dr to reg */
5020 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5022 case 0x40 ... 0x4f: /* cmov */
5023 if (test_cc(ctxt->b, ctxt->eflags))
5024 ctxt->dst.val = ctxt->src.val;
5025 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5026 ctxt->op_bytes != 4)
5027 ctxt->dst.type = OP_NONE; /* no writeback */
5029 case 0x80 ... 0x8f: /* jnz rel, etc*/
5030 if (test_cc(ctxt->b, ctxt->eflags))
5031 rc = jmp_rel(ctxt, ctxt->src.val);
5033 case 0x90 ... 0x9f: /* setcc r/m8 */
5034 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5036 case 0xb6 ... 0xb7: /* movzx */
5037 ctxt->dst.bytes = ctxt->op_bytes;
5038 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5039 : (u16) ctxt->src.val;
5041 case 0xbe ... 0xbf: /* movsx */
5042 ctxt->dst.bytes = ctxt->op_bytes;
5043 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5044 (s16) ctxt->src.val;
5046 case 0xc3: /* movnti */
5047 ctxt->dst.bytes = ctxt->op_bytes;
5048 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5049 (u32) ctxt->src.val;
5052 goto cannot_emulate;
5057 if (rc != X86EMUL_CONTINUE)
5063 return EMULATION_FAILED;
5066 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5068 invalidate_registers(ctxt);
5071 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5073 writeback_registers(ctxt);