3 #include "kvm/interrupt.h"
8 #include <asm/bootparam.h>
10 #include <sys/ioctl.h>
24 * Compatibility code. Remove this when we move to tools/kvm.
26 #ifndef KVM_EXIT_INTERNAL_ERROR
27 # define KVM_EXIT_INTERNAL_ERROR 17
30 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
32 const char *kvm_exit_reasons[] = {
33 DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
34 DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
35 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
36 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
37 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
38 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
39 DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
40 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
41 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
42 DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
43 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
44 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
45 DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
46 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
47 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
48 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
49 DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
50 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
53 static inline uint32_t segment_to_flat(uint16_t selector, uint16_t offset)
55 return ((uint32_t)selector << 4) + (uint32_t) offset;
58 static inline void *guest_flat_to_host(struct kvm *self, unsigned long offset)
60 return self->ram_start + offset;
63 static inline void *guest_real_to_host(struct kvm *self, uint16_t selector, uint16_t offset)
65 unsigned long flat = segment_to_flat(selector, offset);
67 return guest_flat_to_host(self, flat);
70 static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
74 ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
81 static struct kvm *kvm__new(void)
83 struct kvm *self = calloc(1, sizeof *self);
91 void kvm__delete(struct kvm *self)
93 free(self->ram_start);
97 struct kvm *kvm__init(void)
99 struct kvm_userspace_memory_region mem;
107 self->sys_fd = open("/dev/kvm", O_RDWR);
108 if (self->sys_fd < 0)
111 ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
112 if (ret != KVM_API_VERSION)
113 die_perror("KVM_API_VERSION ioctl");
115 self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
117 die_perror("KVM_CREATE_VM ioctl");
119 if (!kvm__supports_extension(self, KVM_CAP_USER_MEMORY))
120 die("KVM_CAP_USER_MEMORY is not supported");
122 self->ram_size = 64UL * 1024UL * 1024UL;
124 page_size = sysconf(_SC_PAGESIZE);
125 if (posix_memalign(&self->ram_start, page_size, self->ram_size) != 0)
126 die("out of memory");
128 mem = (struct kvm_userspace_memory_region) {
130 .guest_phys_addr = 0x0UL,
131 .memory_size = self->ram_size,
132 .userspace_addr = (unsigned long) self->ram_start,
135 ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem, 1);
137 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
139 if (!kvm__supports_extension(self, KVM_CAP_SET_TSS_ADDR))
140 die("KVM_CAP_SET_TSS_ADDR is not supported");
142 ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
144 die_perror("KVM_SET_TSS_ADDR ioctl");
146 self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
147 if (self->vcpu_fd < 0)
148 die_perror("KVM_CREATE_VCPU ioctl");
150 mmap_size = ioctl(self->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
152 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
154 self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
155 if (self->kvm_run == MAP_FAILED)
156 die("unable to mmap vcpu fd");
161 void kvm__enable_singlestep(struct kvm *self)
163 struct kvm_guest_debug debug = {
164 .control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
167 if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
168 warning("KVM_SET_GUEST_DEBUG failed");
171 #define BOOT_LOADER_SELECTOR 0x1000
172 #define BOOT_LOADER_IP 0x0000
173 #define BOOT_LOADER_SP 0x8000
175 static int load_flat_binary(struct kvm *self, int fd)
180 if (lseek(fd, 0, SEEK_SET) < 0)
183 p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
185 while ((nr = read(fd, p, 65536)) > 0)
188 self->boot_selector = BOOT_LOADER_SELECTOR;
189 self->boot_ip = BOOT_LOADER_IP;
190 self->boot_sp = BOOT_LOADER_SP;
196 * The protected mode kernel part of a modern bzImage is loaded at 1 MB by
199 #define BZ_KERNEL_START 0x100000UL
201 static const char *BZIMAGE_MAGIC = "HdrS";
203 #define BZ_DEFAULT_SETUP_SECTS 4
205 static bool load_bzimage(struct kvm *self, int fd, const char *kernel_cmdline)
207 struct real_intr_desc intr;
208 struct boot_params boot;
209 unsigned long setup_sects;
210 unsigned int intr_addr;
211 size_t cmdline_size, cmdline_offset;
217 * See Documentation/x86/boot.txt for details no bzImage on-disk and
221 if (lseek(fd, 0, SEEK_SET) < 0)
224 read(fd, &boot, sizeof(boot));
226 if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)) != 0)
229 if (boot.hdr.version < 0x0200) {
230 warning("Too old kernel");
234 if (lseek(fd, 0, SEEK_SET) < 0)
237 if (!boot.hdr.setup_sects)
238 boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
239 setup_sects = boot.hdr.setup_sects + 1;
241 setup_size = setup_sects << 9;
242 p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
244 if (read(fd, p, setup_size) != setup_size)
247 p = guest_flat_to_host(self, BZ_KERNEL_START);
249 while ((nr = read(fd, p, 65536)) > 0)
252 if (boot.hdr.version < 0x0202 || !(boot.hdr.loadflags & 0x01))
253 cmdline_offset = (0x9ff0 - cmdline_size) & ~15;
255 cmdline_offset = 0x10000;
257 if (boot.hdr.version < 0x0206)
258 boot.hdr.cmdline_size = 256;
260 if (kernel_cmdline) {
261 cmdline_size = strlen(kernel_cmdline) + 1;
262 if (cmdline_size > boot.hdr.cmdline_size)
263 cmdline_size = boot.hdr.cmdline_size;
265 p = guest_flat_to_host(self, cmdline_offset);
266 memset(p, 0, cmdline_size);
267 strcpy(p, kernel_cmdline);
272 if (boot.hdr.version < 0x0202 || !(boot.hdr.loadflags & 0x01))
273 cmdline_offset = (0x9ff0 - cmdline_size) & ~15;
275 cmdline_offset = 0x10000;
277 if (boot.hdr.version >= 0x0200) {
278 if (boot.hdr.version >= 0x0202) {
279 boot.hdr.cmd_line_ptr =
280 (BOOT_LOADER_SELECTOR << 4) + cmdline_offset;
281 } else if (boot.hdr.version >= 0x0201) {
282 boot.hdr.heap_end_ptr = cmdline_offset - 0x0200;
283 boot.hdr.loadflags |= CAN_USE_HEAP;
288 self->boot_selector = BOOT_LOADER_SELECTOR;
290 * The real-mode setup code starts at offset 0x200 of a bzImage. See
291 * Documentation/x86/boot.txt for details.
293 self->boot_ip = BOOT_LOADER_IP + 0x200;
294 self->boot_sp = BOOT_LOADER_SP;
297 * Setup a *fake* real mode vector table, it has only
298 * one real hadler which does just iret
300 * This is where the BIOS lives -- BDA area
302 intr_addr = BIOS_INTR_NEXT(BDA_START + 0, 16);
303 p = guest_flat_to_host(self, intr_addr);
304 memcpy(p, intfake, intfake_size);
305 intr = (struct real_intr_desc) {
306 .segment = REAL_SEGMENT(intr_addr),
309 interrupt_table__setup(&self->interrupt_table, &intr);
311 intr_addr = BIOS_INTR_NEXT(BDA_START + intfake_size, 16);
312 p = guest_flat_to_host(self, intr_addr);
313 memcpy(p, int10, int10_size);
314 intr = (struct real_intr_desc) {
315 .segment = REAL_SEGMENT(intr_addr),
318 interrupt_table__set(&self->interrupt_table, &intr, 0x10);
320 p = guest_flat_to_host(self, 0);
321 interrupt_table__copy(&self->interrupt_table, p, REAL_INTR_SIZE);
326 bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
327 const char *kernel_cmdline)
332 fd = open(kernel_filename, O_RDONLY);
334 die("unable to open kernel");
336 ret = load_bzimage(kvm, fd, kernel_cmdline);
340 ret = load_flat_binary(kvm, fd);
344 die("%s is not a valid bzImage or flat binary", kernel_filename);
350 static inline uint64_t ip_flat_to_real(struct kvm *self, uint64_t ip)
352 uint64_t cs = self->sregs.cs.selector;
354 return ip - (cs << 4);
357 static inline uint64_t ip_to_flat(struct kvm *self, uint64_t ip)
362 * NOTE! We should take code segment base address into account here.
363 * Luckily it's usually zero because Linux uses flat memory model.
365 if (self->sregs.cr0 & 0x01)
368 cs = self->sregs.cs.selector;
370 return ip + (cs << 4);
373 static inline uint32_t selector_to_base(uint16_t selector)
376 * KVM on Intel requires 'base' to be 'selector * 16' in real mode.
378 return (uint32_t)selector * 16;
381 static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
383 struct kvm_msrs *self = calloc(1, sizeof(*self) + (sizeof(struct kvm_msr_entry) * nmsrs));
386 die("out of memory");
391 #define MSR_IA32_TIME_STAMP_COUNTER 0x10
393 #define MSR_IA32_SYSENTER_CS 0x174
394 #define MSR_IA32_SYSENTER_ESP 0x175
395 #define MSR_IA32_SYSENTER_EIP 0x176
397 #define MSR_IA32_STAR 0xc0000081
398 #define MSR_IA32_LSTAR 0xc0000082
399 #define MSR_IA32_CSTAR 0xc0000083
400 #define MSR_IA32_FMASK 0xc0000084
401 #define MSR_IA32_KERNEL_GS_BASE 0xc0000102
403 #define KVM_MSR_ENTRY(_index, _data) \
404 (struct kvm_msr_entry) { .index = _index, .data = _data }
406 static void kvm__setup_msrs(struct kvm *self)
408 unsigned long ndx = 0;
410 self->msrs = kvm_msrs__new(100);
412 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS, 0x0);
413 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP, 0x0);
414 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP, 0x0);
415 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_STAR, 0x0);
416 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_CSTAR, 0x0);
417 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_KERNEL_GS_BASE, 0x0);
418 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_FMASK, 0x0);
419 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_LSTAR, 0x0);
420 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TIME_STAMP_COUNTER, 0x0);
422 self->msrs->nmsrs = ndx;
424 if (ioctl(self->vcpu_fd, KVM_SET_MSRS, self->msrs) < 0)
425 die_perror("KVM_SET_MSRS failed");
428 static void kvm__setup_fpu(struct kvm *self)
430 self->fpu = (struct kvm_fpu) {
435 if (ioctl(self->vcpu_fd, KVM_SET_FPU, &self->fpu) < 0)
436 die_perror("KVM_SET_FPU failed");
439 static void kvm__setup_regs(struct kvm *self)
441 self->regs = (struct kvm_regs) {
442 /* We start the guest in 16-bit real mode */
443 .rflags = 0x0000000000000002ULL,
445 .rip = self->boot_ip,
446 .rsp = self->boot_sp,
447 .rbp = self->boot_sp,
450 if (self->regs.rip > USHRT_MAX)
451 die("ip 0x%" PRIx64 " is too high for real mode", (uint64_t) self->regs.rip);
453 if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
454 die_perror("KVM_SET_REGS failed");
457 static void kvm__setup_sregs(struct kvm *self)
459 self->sregs = (struct kvm_sregs) {
460 .cr0 = 0x60000010ULL,
461 .cs = (struct kvm_segment) {
462 .selector = self->boot_selector,
463 .base = selector_to_base(self->boot_selector),
470 .ss = (struct kvm_segment) {
471 .selector = self->boot_selector,
472 .base = selector_to_base(self->boot_selector),
479 .ds = (struct kvm_segment) {
480 .selector = self->boot_selector,
481 .base = selector_to_base(self->boot_selector),
488 .es = (struct kvm_segment) {
489 .selector = self->boot_selector,
490 .base = selector_to_base(self->boot_selector),
497 .fs = (struct kvm_segment) {
498 .selector = self->boot_selector,
499 .base = selector_to_base(self->boot_selector),
506 .gs = (struct kvm_segment) {
507 .selector = self->boot_selector,
508 .base = selector_to_base(self->boot_selector),
515 .tr = (struct kvm_segment) {
520 .ldt = (struct kvm_segment) {
525 .gdt = (struct kvm_dtable) {
528 .idt = (struct kvm_dtable) {
533 if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
534 die_perror("KVM_SET_SREGS failed");
537 void kvm__reset_vcpu(struct kvm *self)
539 kvm__setup_sregs(self);
541 kvm__setup_regs(self);
543 kvm__setup_fpu(self);
545 kvm__setup_msrs(self);
548 void kvm__run(struct kvm *self)
550 if (ioctl(self->vcpu_fd, KVM_RUN, 0) < 0)
551 die_perror("KVM_RUN failed");
554 static void print_segment(const char *name, struct kvm_segment *seg)
556 printf(" %s %04" PRIx16 " %016" PRIx64 " %08" PRIx32 " %02" PRIx8 " %x %x %x %x %x %x %x\n",
557 name, (uint16_t) seg->selector, (uint64_t) seg->base, (uint32_t) seg->limit,
558 (uint8_t) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
561 void kvm__show_registers(struct kvm *self)
563 unsigned long cr0, cr2, cr3;
564 unsigned long cr4, cr8;
565 unsigned long rax, rbx, rcx;
566 unsigned long rdx, rsi, rdi;
567 unsigned long rbp, r8, r9;
568 unsigned long r10, r11, r12;
569 unsigned long r13, r14, r15;
570 unsigned long rip, rsp;
571 struct kvm_sregs sregs;
572 unsigned long rflags;
573 struct kvm_regs regs;
576 if (ioctl(self->vcpu_fd, KVM_GET_REGS, ®s) < 0)
577 die("KVM_GET_REGS failed");
579 rflags = regs.rflags;
581 rip = regs.rip; rsp = regs.rsp;
582 rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx;
583 rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi;
584 rbp = regs.rbp; r8 = regs.r8; r9 = regs.r9;
585 r10 = regs.r10; r11 = regs.r11; r12 = regs.r12;
586 r13 = regs.r13; r14 = regs.r14; r15 = regs.r15;
588 printf("Registers:\n");
589 printf(" rip: %016lx rsp: %016lx flags: %016lx\n", rip, rsp, rflags);
590 printf(" rax: %016lx rbx: %016lx rcx: %016lx\n", rax, rbx, rcx);
591 printf(" rdx: %016lx rsi: %016lx rdi: %016lx\n", rdx, rsi, rdi);
592 printf(" rbp: %016lx r8: %016lx r9: %016lx\n", rbp, r8, r9);
593 printf(" r10: %016lx r11: %016lx r12: %016lx\n", r10, r11, r12);
594 printf(" r13: %016lx r14: %016lx r15: %016lx\n", r13, r14, r15);
596 if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
597 die("KVM_GET_REGS failed");
599 cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
600 cr4 = sregs.cr4; cr8 = sregs.cr8;
602 printf(" cr0: %016lx cr2: %016lx cr3: %016lx\n", cr0, cr2, cr3);
603 printf(" cr4: %016lx cr8: %016lx\n", cr4, cr8);
604 printf("Segment registers:\n");
605 printf(" register selector base limit type p dpl db s l g avl\n");
606 print_segment("cs ", &sregs.cs);
607 print_segment("ss ", &sregs.ss);
608 print_segment("ds ", &sregs.ds);
609 print_segment("es ", &sregs.es);
610 print_segment("fs ", &sregs.fs);
611 print_segment("gs ", &sregs.gs);
612 print_segment("tr ", &sregs.tr);
613 print_segment("ldt", &sregs.ldt);
614 printf(" [ efer: %016lx apic base: %016lx nmi: %s ]\n", (uint64_t) sregs.efer, (uint64_t) sregs.apic_base,
615 (self->nmi_disabled ? "disabled" : "enabled"));
616 printf("Interrupt bitmap:\n");
618 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++)
619 printf("%016lx ", (uint64_t) sregs.interrupt_bitmap[i]);
623 void kvm__show_code(struct kvm *self)
625 unsigned int code_bytes = 64;
626 unsigned int code_prologue = code_bytes * 43 / 64;
627 unsigned int code_len = code_bytes;
632 if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
633 die("KVM_GET_REGS failed");
635 if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
636 die("KVM_GET_SREGS failed");
638 ip = guest_flat_to_host(self, ip_to_flat(self, self->regs.rip) - code_prologue);
642 for (i = 0; i < code_len; i++, ip++) {
645 if (ip == guest_flat_to_host(self, ip_to_flat(self, self->regs.rip)))
646 printf("<%02x> ", c);
654 void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
659 size &= ~7; /* mod 8 */
663 p = (unsigned char *)guest_flat_to_host(self, addr);
665 printf("Guest memory dump:\n");
667 for (n = 0; n < size; n+=8)
668 printf("0x%08lx: %02x%02x%02x%02x %02x%02x%02x%02x\n",
669 addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
670 p[n + 4], p[n + 5], p[n + 6], p[n + 7]);