3 #include "kvm/cpufeature.h"
4 #include "kvm/interrupt.h"
10 #include <asm/bootparam.h>
12 #include <sys/ioctl.h>
29 * Compatibility code. Remove this when we move to tools/kvm.
31 #ifndef KVM_EXIT_INTERNAL_ERROR
32 # define KVM_EXIT_INTERNAL_ERROR 17
35 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
37 const char *kvm_exit_reasons[] = {
38 DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
39 DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
40 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
41 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
42 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
43 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
44 DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
45 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
46 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
47 DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
48 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
49 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
50 DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
51 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
52 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
53 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
54 DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
55 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
58 #define DEFINE_KVM_EXT(ext) \
66 { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
67 { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) },
68 { DEFINE_KVM_EXT(KVM_CAP_PIT2) },
69 { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) },
70 { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) },
71 { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) },
72 { DEFINE_KVM_EXT(KVM_CAP_HLT) },
73 { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) },
74 { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
77 static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
81 ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
88 static int kvm__check_extensions(struct kvm *self)
92 for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
93 if (!kvm__supports_extension(self, kvm_req_ext[i].code)) {
94 error("Unsuppored KVM extension detected: %s",
103 static struct kvm *kvm__new(void)
105 struct kvm *self = calloc(1, sizeof *self);
108 die("out of memory");
113 void kvm__delete(struct kvm *self)
115 free(self->ram_start);
119 static bool kvm__cpu_supports_vm(void)
121 struct cpuid_regs regs;
125 regs = (struct cpuid_regs) {
131 case CPUID_VENDOR_INTEL_1:
133 feature = KVM__X86_FEATURE_VMX;
136 case CPUID_VENDOR_AMD_1:
137 eax_base = 0x80000000;
138 feature = KVM__X86_FEATURE_SVM;
145 regs = (struct cpuid_regs) {
150 if (regs.eax < eax_base + 0x01)
153 regs = (struct cpuid_regs) {
154 .eax = eax_base + 0x01
158 return regs.ecx & (1 << feature);
161 struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
163 struct kvm_userspace_memory_region mem;
164 struct kvm_pit_config pit_config = { .flags = 0, };
170 if (!kvm__cpu_supports_vm())
171 die("Your CPU does not support hardware virtualization");
175 self->sys_fd = open(kvm_dev, O_RDWR);
176 if (self->sys_fd < 0) {
178 die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev);
183 ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
184 if (ret != KVM_API_VERSION)
185 die_perror("KVM_API_VERSION ioctl");
187 self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
189 die_perror("KVM_CREATE_VM ioctl");
191 if (kvm__check_extensions(self))
192 die("A required KVM extention is not supported by OS");
194 ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
196 die_perror("KVM_SET_TSS_ADDR ioctl");
198 ret = ioctl(self->vm_fd, KVM_CREATE_PIT2, &pit_config);
200 die_perror("KVM_CREATE_PIT2 ioctl");
202 self->ram_size = ram_size;
204 page_size = sysconf(_SC_PAGESIZE);
205 if (posix_memalign(&self->ram_start, page_size, self->ram_size) != 0)
206 die("out of memory");
208 mem = (struct kvm_userspace_memory_region) {
210 .guest_phys_addr = 0x0UL,
211 .memory_size = self->ram_size,
212 .userspace_addr = (unsigned long) self->ram_start,
215 ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
217 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
219 ret = ioctl(self->vm_fd, KVM_CREATE_IRQCHIP);
221 die_perror("KVM_CREATE_IRQCHIP ioctl");
223 self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
224 if (self->vcpu_fd < 0)
225 die_perror("KVM_CREATE_VCPU ioctl");
227 mmap_size = ioctl(self->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
229 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
231 self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
232 if (self->kvm_run == MAP_FAILED)
233 die("unable to mmap vcpu fd");
238 void kvm__enable_singlestep(struct kvm *self)
240 struct kvm_guest_debug debug = {
241 .control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
244 if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
245 warning("KVM_SET_GUEST_DEBUG failed");
248 #define BOOT_LOADER_SELECTOR 0x1000
249 #define BOOT_LOADER_IP 0x0000
250 #define BOOT_LOADER_SP 0x8000
251 #define BOOT_CMDLINE_OFFSET 0x20000
253 #define BOOT_PROTOCOL_REQUIRED 0x206
254 #define LOAD_HIGH 0x01
256 static int load_flat_binary(struct kvm *self, int fd)
261 if (lseek(fd, 0, SEEK_SET) < 0)
264 p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
266 while ((nr = read(fd, p, 65536)) > 0)
269 self->boot_selector = BOOT_LOADER_SELECTOR;
270 self->boot_ip = BOOT_LOADER_IP;
271 self->boot_sp = BOOT_LOADER_SP;
277 * The protected mode kernel part of a modern bzImage is loaded at 1 MB by
280 #define BZ_KERNEL_START 0x100000UL
281 #define INITRD_START 0x1000000UL
282 #define BZ_DEFAULT_SETUP_SECTS 4
283 static const char *BZIMAGE_MAGIC = "HdrS";
285 static bool load_bzimage(struct kvm *self, int fd_kernel,
286 int fd_initrd, const char *kernel_cmdline)
288 struct boot_params *kern_boot;
289 unsigned long setup_sects;
290 struct boot_params boot;
297 * See Documentation/x86/boot.txt for details no bzImage on-disk and
301 if (lseek(fd_kernel, 0, SEEK_SET) < 0)
304 if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot))
307 if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)))
310 if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED) {
311 die("Too old kernel");
314 if (lseek(fd_kernel, 0, SEEK_SET) < 0)
317 if (!boot.hdr.setup_sects)
318 boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
319 setup_sects = boot.hdr.setup_sects + 1;
321 setup_size = setup_sects << 9;
322 p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
324 /* copy setup.bin to mem*/
325 if (read(fd_kernel, p, setup_size) != setup_size)
328 /* copy vmlinux.bin to BZ_KERNEL_START*/
329 p = guest_flat_to_host(self, BZ_KERNEL_START);
331 while ((nr = read(fd_kernel, p, 65536)) > 0)
334 p = guest_flat_to_host(self, BOOT_CMDLINE_OFFSET);
335 if (kernel_cmdline) {
336 cmdline_size = strlen(kernel_cmdline) + 1;
337 if (cmdline_size > boot.hdr.cmdline_size)
338 cmdline_size = boot.hdr.cmdline_size;
340 memset(p, 0, boot.hdr.cmdline_size);
341 memcpy(p, kernel_cmdline, cmdline_size - 1);
344 kern_boot = guest_real_to_host(self, BOOT_LOADER_SELECTOR, 0x00);
346 kern_boot->hdr.cmd_line_ptr = BOOT_CMDLINE_OFFSET;
347 kern_boot->hdr.type_of_loader = 0xff;
348 kern_boot->hdr.heap_end_ptr = 0xfe00;
349 kern_boot->hdr.loadflags |= CAN_USE_HEAP;
352 * Read initrd image into guest memory
354 if (fd_initrd >= 0) {
355 struct stat initrd_stat;
358 if (fstat(fd_initrd, &initrd_stat))
361 addr = boot.hdr.initrd_addr_max & ~0xfffff;
363 if (addr < BZ_KERNEL_START)
364 die("Not enough memory for initrd");
365 else if (addr < (self->ram_size - initrd_stat.st_size))
370 p = guest_flat_to_host(self, addr);
371 nr = read(fd_initrd, p, initrd_stat.st_size);
372 if (nr != initrd_stat.st_size)
373 die("Failed to read initrd");
375 kern_boot->hdr.ramdisk_image = addr;
376 kern_boot->hdr.ramdisk_size = initrd_stat.st_size;
379 self->boot_selector = BOOT_LOADER_SELECTOR;
381 * The real-mode setup code starts at offset 0x200 of a bzImage. See
382 * Documentation/x86/boot.txt for details.
384 self->boot_ip = BOOT_LOADER_IP + 0x200;
385 self->boot_sp = BOOT_LOADER_SP;
390 bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
391 const char *initrd_filename, const char *kernel_cmdline)
394 int fd_kernel = -1, fd_initrd = -1;
396 fd_kernel = open(kernel_filename, O_RDONLY);
398 die("Unable to open kernel %s", kernel_filename);
400 if (initrd_filename) {
401 fd_initrd = open(initrd_filename, O_RDONLY);
403 die("Unable to open initrd %s", initrd_filename);
406 ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline);
414 warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename);
416 ret = load_flat_binary(kvm, fd_kernel);
420 die("%s is not a valid bzImage or flat binary", kernel_filename);
426 static inline uint64_t ip_flat_to_real(struct kvm *self, uint64_t ip)
428 uint64_t cs = self->sregs.cs.selector;
430 return ip - (cs << 4);
433 static inline bool is_in_protected_mode(struct kvm *self)
435 return self->sregs.cr0 & 0x01;
438 static inline uint64_t ip_to_flat(struct kvm *self, uint64_t ip)
443 * NOTE! We should take code segment base address into account here.
444 * Luckily it's usually zero because Linux uses flat memory model.
446 if (is_in_protected_mode(self))
449 cs = self->sregs.cs.selector;
451 return ip + (cs << 4);
454 static inline uint32_t selector_to_base(uint16_t selector)
457 * KVM on Intel requires 'base' to be 'selector * 16' in real mode.
459 return (uint32_t)selector * 16;
462 static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
464 struct kvm_msrs *self = calloc(1, sizeof(*self) + (sizeof(struct kvm_msr_entry) * nmsrs));
467 die("out of memory");
472 #define MSR_IA32_TIME_STAMP_COUNTER 0x10
474 #define MSR_IA32_SYSENTER_CS 0x174
475 #define MSR_IA32_SYSENTER_ESP 0x175
476 #define MSR_IA32_SYSENTER_EIP 0x176
478 #define MSR_IA32_STAR 0xc0000081
479 #define MSR_IA32_LSTAR 0xc0000082
480 #define MSR_IA32_CSTAR 0xc0000083
481 #define MSR_IA32_FMASK 0xc0000084
482 #define MSR_IA32_KERNEL_GS_BASE 0xc0000102
484 #define KVM_MSR_ENTRY(_index, _data) \
485 (struct kvm_msr_entry) { .index = _index, .data = _data }
487 static void kvm__setup_msrs(struct kvm *self)
489 unsigned long ndx = 0;
491 self->msrs = kvm_msrs__new(100);
493 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS, 0x0);
494 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP, 0x0);
495 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP, 0x0);
497 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_STAR, 0x0);
498 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_CSTAR, 0x0);
499 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_KERNEL_GS_BASE, 0x0);
500 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_FMASK, 0x0);
501 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_LSTAR, 0x0);
503 self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TIME_STAMP_COUNTER, 0x0);
505 self->msrs->nmsrs = ndx;
507 if (ioctl(self->vcpu_fd, KVM_SET_MSRS, self->msrs) < 0)
508 die_perror("KVM_SET_MSRS failed");
511 static void kvm__setup_fpu(struct kvm *self)
513 self->fpu = (struct kvm_fpu) {
518 if (ioctl(self->vcpu_fd, KVM_SET_FPU, &self->fpu) < 0)
519 die_perror("KVM_SET_FPU failed");
522 static void kvm__setup_regs(struct kvm *self)
524 self->regs = (struct kvm_regs) {
525 /* We start the guest in 16-bit real mode */
526 .rflags = 0x0000000000000002ULL,
528 .rip = self->boot_ip,
529 .rsp = self->boot_sp,
530 .rbp = self->boot_sp,
533 if (self->regs.rip > USHRT_MAX)
534 die("ip 0x%" PRIx64 " is too high for real mode", (uint64_t) self->regs.rip);
536 if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
537 die_perror("KVM_SET_REGS failed");
540 static void kvm__setup_sregs(struct kvm *self)
543 if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
544 die_perror("KVM_GET_SREGS failed");
546 self->sregs.cs.selector = self->boot_selector;
547 self->sregs.cs.base = selector_to_base(self->boot_selector);
548 self->sregs.ss.selector = self->boot_selector;
549 self->sregs.ss.base = selector_to_base(self->boot_selector);
550 self->sregs.ds.selector = self->boot_selector;
551 self->sregs.ds.base = selector_to_base(self->boot_selector);
552 self->sregs.es.selector = self->boot_selector;
553 self->sregs.es.base = selector_to_base(self->boot_selector);
554 self->sregs.fs.selector = self->boot_selector;
555 self->sregs.fs.base = selector_to_base(self->boot_selector);
556 self->sregs.gs.selector = self->boot_selector;
557 self->sregs.gs.base = selector_to_base(self->boot_selector);
559 if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
560 die_perror("KVM_SET_SREGS failed");
564 * kvm__reset_vcpu - reset virtual CPU to a known state
566 void kvm__reset_vcpu(struct kvm *self)
568 kvm__setup_sregs(self);
569 kvm__setup_regs(self);
570 kvm__setup_fpu(self);
571 kvm__setup_msrs(self);
574 void kvm__setup_mem(struct kvm *self)
576 struct e820_entry *mem_map;
579 size = guest_flat_to_host(self, E820_MAP_SIZE);
580 mem_map = guest_flat_to_host(self, E820_MAP_START);
584 mem_map[0] = (struct e820_entry) {
585 .addr = REAL_MODE_IVT_BEGIN,
586 .size = EBDA_START - REAL_MODE_IVT_BEGIN,
587 .type = E820_MEM_USABLE,
589 mem_map[1] = (struct e820_entry) {
591 .size = VGA_RAM_BEGIN - EBDA_START,
592 .type = E820_MEM_RESERVED,
594 mem_map[2] = (struct e820_entry) {
595 .addr = MB_BIOS_BEGIN,
596 .size = MB_BIOS_END - MB_BIOS_BEGIN,
597 .type = E820_MEM_RESERVED,
599 mem_map[3] = (struct e820_entry) {
600 .addr = BZ_KERNEL_START,
601 .size = self->ram_size - BZ_KERNEL_START,
602 .type = E820_MEM_USABLE,
606 #define TIMER_INTERVAL_NS 1000000 /* 1 msec */
608 static void alarm_handler(int sig)
613 * This function sets up a timer that's used to inject interrupts from the
614 * userspace hypervisor into the guest at periodical intervals. Please note
615 * that clock interrupt, for example, is not handled here.
617 void kvm__start_timer(struct kvm *self)
619 struct itimerspec its;
623 sigfillset(&sa.sa_mask);
625 sa.sa_handler = alarm_handler;
627 sigaction(SIGALRM, &sa, NULL);
629 memset(&sev, 0, sizeof(struct sigevent));
630 sev.sigev_value.sival_int = 0;
631 sev.sigev_notify = SIGEV_SIGNAL;
632 sev.sigev_signo = SIGALRM;
634 if (timer_create(CLOCK_REALTIME, &sev, &self->timerid) < 0)
635 die("timer_create()");
637 its.it_value.tv_sec = TIMER_INTERVAL_NS / 1000000000;
638 its.it_value.tv_nsec = TIMER_INTERVAL_NS % 1000000000;
639 its.it_interval.tv_sec = its.it_value.tv_sec;
640 its.it_interval.tv_nsec = its.it_value.tv_nsec;
642 if (timer_settime(self->timerid, 0, &its, NULL) < 0)
643 die("timer_settime()");
646 void kvm__run(struct kvm *self)
650 err = ioctl(self->vcpu_fd, KVM_RUN, 0);
651 if (err && (errno != EINTR && errno != EAGAIN))
652 die_perror("KVM_RUN failed");
655 void kvm__irq_line(struct kvm *self, int irq, int level)
657 struct kvm_irq_level irq_level;
659 irq_level = (struct kvm_irq_level) {
666 if (ioctl(self->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
667 die_perror("KVM_IRQ_LINE failed");
670 static void print_dtable(const char *name, struct kvm_dtable *dtable)
672 printf(" %s %016" PRIx64 " %08" PRIx16 "\n",
673 name, (uint64_t) dtable->base, (uint16_t) dtable->limit);
676 static void print_segment(const char *name, struct kvm_segment *seg)
678 printf(" %s %04" PRIx16 " %016" PRIx64 " %08" PRIx32 " %02" PRIx8 " %x %x %x %x %x %x %x\n",
679 name, (uint16_t) seg->selector, (uint64_t) seg->base, (uint32_t) seg->limit,
680 (uint8_t) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
683 void kvm__show_registers(struct kvm *self)
685 unsigned long cr0, cr2, cr3;
686 unsigned long cr4, cr8;
687 unsigned long rax, rbx, rcx;
688 unsigned long rdx, rsi, rdi;
689 unsigned long rbp, r8, r9;
690 unsigned long r10, r11, r12;
691 unsigned long r13, r14, r15;
692 unsigned long rip, rsp;
693 struct kvm_sregs sregs;
694 unsigned long rflags;
695 struct kvm_regs regs;
698 if (ioctl(self->vcpu_fd, KVM_GET_REGS, ®s) < 0)
699 die("KVM_GET_REGS failed");
701 rflags = regs.rflags;
703 rip = regs.rip; rsp = regs.rsp;
704 rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx;
705 rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi;
706 rbp = regs.rbp; r8 = regs.r8; r9 = regs.r9;
707 r10 = regs.r10; r11 = regs.r11; r12 = regs.r12;
708 r13 = regs.r13; r14 = regs.r14; r15 = regs.r15;
710 printf("Registers:\n");
711 printf(" rip: %016lx rsp: %016lx flags: %016lx\n", rip, rsp, rflags);
712 printf(" rax: %016lx rbx: %016lx rcx: %016lx\n", rax, rbx, rcx);
713 printf(" rdx: %016lx rsi: %016lx rdi: %016lx\n", rdx, rsi, rdi);
714 printf(" rbp: %016lx r8: %016lx r9: %016lx\n", rbp, r8, r9);
715 printf(" r10: %016lx r11: %016lx r12: %016lx\n", r10, r11, r12);
716 printf(" r13: %016lx r14: %016lx r15: %016lx\n", r13, r14, r15);
718 if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
719 die("KVM_GET_REGS failed");
721 cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
722 cr4 = sregs.cr4; cr8 = sregs.cr8;
724 printf(" cr0: %016lx cr2: %016lx cr3: %016lx\n", cr0, cr2, cr3);
725 printf(" cr4: %016lx cr8: %016lx\n", cr4, cr8);
726 printf("Segment registers:\n");
727 printf(" register selector base limit type p dpl db s l g avl\n");
728 print_segment("cs ", &sregs.cs);
729 print_segment("ss ", &sregs.ss);
730 print_segment("ds ", &sregs.ds);
731 print_segment("es ", &sregs.es);
732 print_segment("fs ", &sregs.fs);
733 print_segment("gs ", &sregs.gs);
734 print_segment("tr ", &sregs.tr);
735 print_segment("ldt", &sregs.ldt);
736 print_dtable("gdt", &sregs.gdt);
737 print_dtable("idt", &sregs.idt);
738 printf(" [ efer: %016" PRIx64 " apic base: %016" PRIx64 " nmi: %s ]\n",
739 (uint64_t) sregs.efer, (uint64_t) sregs.apic_base,
740 (self->nmi_disabled ? "disabled" : "enabled"));
741 printf("Interrupt bitmap:\n");
743 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++)
744 printf("%016" PRIx64 " ", (uint64_t) sregs.interrupt_bitmap[i]);
748 void kvm__show_code(struct kvm *self)
750 unsigned int code_bytes = 64;
751 unsigned int code_prologue = code_bytes * 43 / 64;
752 unsigned int code_len = code_bytes;
757 if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
758 die("KVM_GET_REGS failed");
760 if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
761 die("KVM_GET_SREGS failed");
763 ip = guest_flat_to_host(self, ip_to_flat(self, self->regs.rip) - code_prologue);
767 for (i = 0; i < code_len; i++, ip++) {
768 if (!host_ptr_in_ram(self, ip))
773 if (ip == guest_flat_to_host(self, ip_to_flat(self, self->regs.rip)))
774 printf("<%02x> ", c);
782 kvm__dump_mem(self, self->regs.rsp, 32);
785 void kvm__show_page_tables(struct kvm *self)
792 if (!is_in_protected_mode(self))
795 if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
796 die("KVM_GET_SREGS failed");
798 pte4 = guest_flat_to_host(self, self->sregs.cr3);
799 if (!host_ptr_in_ram(self, pte4))
802 pte3 = guest_flat_to_host(self, (*pte4 & ~0xfff));
803 if (!host_ptr_in_ram(self, pte3))
806 pte2 = guest_flat_to_host(self, (*pte3 & ~0xfff));
807 if (!host_ptr_in_ram(self, pte2))
810 pte1 = guest_flat_to_host(self, (*pte2 & ~0xfff));
811 if (!host_ptr_in_ram(self, pte1))
814 printf("Page Tables:\n");
815 if (*pte2 & (1 << 7))
816 printf(" pte4: %016" PRIx64 " pte3: %016" PRIx64
817 " pte2: %016" PRIx64 "\n",
818 *pte4, *pte3, *pte2);
820 printf(" pte4: %016" PRIx64 " pte3: %016" PRIx64 " pte2: %016"
821 PRIx64 " pte1: %016" PRIx64 "\n",
822 *pte4, *pte3, *pte2, *pte1);
825 void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
830 size &= ~7; /* mod 8 */
834 p = guest_flat_to_host(self, addr);
836 for (n = 0; n < size; n+=8) {
837 if (!host_ptr_in_ram(self, p + n))
840 printf(" 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n",
841 addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
842 p[n + 4], p[n + 5], p[n + 6], p[n + 7]);