]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - tools/kvm/kvm.c
kvm, bios: Make e820 map to look similar the real one
[karo-tx-linux.git] / tools / kvm / kvm.c
1 #include "kvm/kvm.h"
2
3 #include "kvm/interrupt.h"
4 #include "kvm/cpufeature.h"
5 #include "kvm/e820.h"
6 #include "kvm/util.h"
7
8 #include <linux/kvm.h>
9
10 #include <asm/bootparam.h>
11
12 #include <sys/ioctl.h>
13 #include <inttypes.h>
14 #include <sys/mman.h>
15 #include <stdbool.h>
16 #include <assert.h>
17 #include <limits.h>
18 #include <stdarg.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <fcntl.h>
24
25 /*
26  * Compatibility code. Remove this when we move to tools/kvm.
27  */
28 #ifndef KVM_EXIT_INTERNAL_ERROR
29 # define KVM_EXIT_INTERNAL_ERROR                17
30 #endif
31
32 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
33
34 const char *kvm_exit_reasons[] = {
35         DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
36         DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
37         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
38         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
39         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
40         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
41         DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
42         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
43         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
44         DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
45         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
46         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
47         DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
48         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
49         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
50         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
51         DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
52         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
53 };
54
55 #define DEFINE_KVM_EXT(ext)             \
56         .name = #ext,                   \
57         .code = ext
58
59 struct {
60         const char *name;
61         int code;
62 } kvm_req_ext[] = {
63         { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
64         { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) },
65         { DEFINE_KVM_EXT(KVM_CAP_PIT2) },
66         { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) },
67         { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) },
68         { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) },
69         { DEFINE_KVM_EXT(KVM_CAP_HLT) },
70         { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) },
71         { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
72 };
73
74 static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
75 {
76         int ret;
77
78         ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
79         if (ret < 0)
80                 return false;
81
82         return ret;
83 }
84
85 static int kvm__check_extensions(struct kvm *self)
86 {
87         unsigned int i;
88
89         for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
90                 if (!kvm__supports_extension(self, kvm_req_ext[i].code)) {
91                         error("Unsuppored KVM extension detected: %s",
92                                 kvm_req_ext[i].name);
93                         return (int)-i;
94                 }
95         }
96
97         return 0;
98 }
99
100 static struct kvm *kvm__new(void)
101 {
102         struct kvm *self = calloc(1, sizeof *self);
103
104         if (!self)
105                 die("out of memory");
106
107         return self;
108 }
109
110 void kvm__delete(struct kvm *self)
111 {
112         free(self->ram_start);
113         free(self);
114 }
115
116 static bool kvm__cpu_supports_vm(void)
117 {
118         struct cpuid_regs regs;
119         uint32_t eax_base;
120         int feature;
121
122         regs    = (struct cpuid_regs) {
123                 .eax            = 0x00,
124         };
125         host_cpuid(&regs);
126
127         switch (regs.ebx) {
128         case CPUID_VENDOR_INTEL_1:
129                 eax_base        = 0x00;
130                 feature         = KVM__X86_FEATURE_VMX;
131                 break;
132
133         case CPUID_VENDOR_AMD_1:
134                 eax_base        = 0x80000000;
135                 feature         = KVM__X86_FEATURE_SVM;
136                 break;
137
138         default:
139                 return false;
140         }
141
142         regs    = (struct cpuid_regs) {
143                 .eax            = eax_base,
144         };
145         host_cpuid(&regs);
146
147         if (regs.eax < eax_base + 0x01)
148                 return false;
149
150         regs    = (struct cpuid_regs) {
151                 .eax            = eax_base + 0x01
152         };
153         host_cpuid(&regs);
154
155         return regs.ecx & (1 << feature);
156 }
157
158 struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
159 {
160         struct kvm_userspace_memory_region mem;
161         struct kvm_pit_config pit_config = { .flags = 0, };
162         struct kvm *self;
163         long page_size;
164         int mmap_size;
165         int ret;
166
167         if (!kvm__cpu_supports_vm())
168                 die("Your CPU does not support hardware virtualization");
169
170         self = kvm__new();
171
172         self->sys_fd = open(kvm_dev, O_RDWR);
173         if (self->sys_fd < 0) {
174                 if (errno == ENOENT)
175                         die("'%s' not found. Please make sure you have CONFIG_KVM enabled.", kvm_dev);
176
177                 die_perror("open");
178         }
179
180         ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
181         if (ret != KVM_API_VERSION)
182                 die_perror("KVM_API_VERSION ioctl");
183
184         self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
185         if (self->vm_fd < 0)
186                 die_perror("KVM_CREATE_VM ioctl");
187
188         if (kvm__check_extensions(self))
189                 die("A required KVM extention is not supported by OS");
190
191         ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
192         if (ret < 0)
193                 die_perror("KVM_SET_TSS_ADDR ioctl");
194
195         ret = ioctl(self->vm_fd, KVM_CREATE_PIT2, &pit_config);
196         if (ret < 0)
197                 die_perror("KVM_CREATE_PIT2 ioctl");
198
199         self->ram_size          = ram_size;
200
201         page_size       = sysconf(_SC_PAGESIZE);
202         if (posix_memalign(&self->ram_start, page_size, self->ram_size) != 0)
203                 die("out of memory");
204
205         mem = (struct kvm_userspace_memory_region) {
206                 .slot                   = 0,
207                 .guest_phys_addr        = 0x0UL,
208                 .memory_size            = self->ram_size,
209                 .userspace_addr         = (unsigned long) self->ram_start,
210         };
211
212         ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
213         if (ret < 0)
214                 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
215
216         ret = ioctl(self->vm_fd, KVM_CREATE_IRQCHIP);
217         if (ret < 0)
218                 die_perror("KVM_CREATE_IRQCHIP ioctl");
219
220         self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
221         if (self->vcpu_fd < 0)
222                 die_perror("KVM_CREATE_VCPU ioctl");
223
224         mmap_size = ioctl(self->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
225         if (mmap_size < 0)
226                 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
227
228         self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
229         if (self->kvm_run == MAP_FAILED)
230                 die("unable to mmap vcpu fd");
231
232         return self;
233 }
234
235 void kvm__enable_singlestep(struct kvm *self)
236 {
237         struct kvm_guest_debug debug = {
238                 .control        = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
239         };
240
241         if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
242                 warning("KVM_SET_GUEST_DEBUG failed");
243 }
244
245 #define BOOT_LOADER_SELECTOR    0x1000
246 #define BOOT_LOADER_IP          0x0000
247 #define BOOT_LOADER_SP          0x8000
248 #define BOOT_CMDLINE_OFFSET     0x20000
249
250 #define BOOT_PROTOCOL_REQUIRED  0x202
251 #define LOAD_HIGH               0x01
252
253 static int load_flat_binary(struct kvm *self, int fd)
254 {
255         void *p;
256         int nr;
257
258         if (lseek(fd, 0, SEEK_SET) < 0)
259                 die_perror("lseek");
260
261         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
262
263         while ((nr = read(fd, p, 65536)) > 0)
264                 p += nr;
265
266         self->boot_selector     = BOOT_LOADER_SELECTOR;
267         self->boot_ip           = BOOT_LOADER_IP;
268         self->boot_sp           = BOOT_LOADER_SP;
269
270         return true;
271 }
272
273 /*
274  * The protected mode kernel part of a modern bzImage is loaded at 1 MB by
275  * default.
276  */
277 #define BZ_KERNEL_START                 0x100000UL
278
279 static const char *BZIMAGE_MAGIC        = "HdrS";
280
281 #define BZ_DEFAULT_SETUP_SECTS          4
282
283 static bool load_bzimage(struct kvm *self, int fd, const char *kernel_cmdline)
284 {
285         struct boot_params *kern_boot;
286         unsigned long setup_sects;
287         struct boot_params boot;
288         size_t cmdline_size;
289         ssize_t setup_size;
290         void *p;
291         int nr;
292
293         /*
294          * See Documentation/x86/boot.txt for details no bzImage on-disk and
295          * memory layout.
296          */
297
298         if (lseek(fd, 0, SEEK_SET) < 0)
299                 die_perror("lseek");
300
301         if (read(fd, &boot, sizeof(boot)) != sizeof(boot))
302                 return false;
303
304         if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)) != 0)
305                 return false;
306
307         if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED) {
308                 warning("Too old kernel");
309                 return false;
310         }
311
312         if (lseek(fd, 0, SEEK_SET) < 0)
313                 die_perror("lseek");
314
315         if (!boot.hdr.setup_sects)
316                 boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
317         setup_sects = boot.hdr.setup_sects + 1;
318
319         setup_size = setup_sects << 9;
320         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
321
322         if (read(fd, p, setup_size) != setup_size)
323                 die_perror("read");
324
325         p = guest_flat_to_host(self, BZ_KERNEL_START);
326
327         while ((nr = read(fd, p, 65536)) > 0)
328                 p += nr;
329
330         p = guest_flat_to_host(self, BOOT_CMDLINE_OFFSET);
331         if (kernel_cmdline) {
332                 cmdline_size = strlen(kernel_cmdline) + 1;
333                 if (cmdline_size > boot.hdr.cmdline_size)
334                         cmdline_size = boot.hdr.cmdline_size;
335
336                 memset(p, 0, boot.hdr.cmdline_size);
337                 memcpy(p, kernel_cmdline, cmdline_size - 1);
338         }
339
340         kern_boot       = guest_real_to_host(self, BOOT_LOADER_SELECTOR, 0x00);
341
342         kern_boot->hdr.cmd_line_ptr     = BOOT_CMDLINE_OFFSET;
343         kern_boot->hdr.type_of_loader   = 0xff;
344         kern_boot->hdr.heap_end_ptr     = 0xfe00;
345         kern_boot->hdr.loadflags        |= CAN_USE_HEAP;
346
347         self->boot_selector     = BOOT_LOADER_SELECTOR;
348         /*
349          * The real-mode setup code starts at offset 0x200 of a bzImage. See
350          * Documentation/x86/boot.txt for details.
351          */
352         self->boot_ip           = BOOT_LOADER_IP + 0x200;
353         self->boot_sp           = BOOT_LOADER_SP;
354
355         /*
356          * Drum roll, BIOS is coming to live, oh dear...
357          */
358         setup_bios(self);
359
360         return true;
361 }
362
363 bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
364                         const char *kernel_cmdline)
365 {
366         bool ret;
367         int fd;
368
369         fd = open(kernel_filename, O_RDONLY);
370         if (fd < 0)
371                 die("unable to open kernel");
372
373         ret = load_bzimage(kvm, fd, kernel_cmdline);
374         if (ret)
375                 goto found_kernel;
376
377         ret = load_flat_binary(kvm, fd);
378         if (ret)
379                 goto found_kernel;
380
381         die("%s is not a valid bzImage or flat binary", kernel_filename);
382
383 found_kernel:
384         return ret;
385 }
386
387 static inline uint64_t ip_flat_to_real(struct kvm *self, uint64_t ip)
388 {
389         uint64_t cs = self->sregs.cs.selector;
390
391         return ip - (cs << 4);
392 }
393
394 static inline bool is_in_protected_mode(struct kvm *self)
395 {
396         return self->sregs.cr0 & 0x01;
397 }
398
399 static inline uint64_t ip_to_flat(struct kvm *self, uint64_t ip)
400 {
401         uint64_t cs;
402
403         /*
404          * NOTE! We should take code segment base address into account here.
405          * Luckily it's usually zero because Linux uses flat memory model.
406          */
407         if (is_in_protected_mode(self))
408                 return ip;
409
410         cs = self->sregs.cs.selector;
411
412         return ip + (cs << 4);
413 }
414
415 static inline uint32_t selector_to_base(uint16_t selector)
416 {
417         /*
418          * KVM on Intel requires 'base' to be 'selector * 16' in real mode.
419          */
420         return (uint32_t)selector * 16;
421 }
422
423 static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
424 {
425         struct kvm_msrs *self = calloc(1, sizeof(*self) + (sizeof(struct kvm_msr_entry) * nmsrs));
426
427         if (!self)
428                 die("out of memory");
429
430         return self;
431 }
432
433 #define MSR_IA32_TIME_STAMP_COUNTER     0x10
434
435 #define MSR_IA32_SYSENTER_CS            0x174
436 #define MSR_IA32_SYSENTER_ESP           0x175
437 #define MSR_IA32_SYSENTER_EIP           0x176
438
439 #define MSR_IA32_STAR                   0xc0000081
440 #define MSR_IA32_LSTAR                  0xc0000082
441 #define MSR_IA32_CSTAR                  0xc0000083
442 #define MSR_IA32_FMASK                  0xc0000084
443 #define MSR_IA32_KERNEL_GS_BASE         0xc0000102
444
445 #define KVM_MSR_ENTRY(_index, _data)    \
446         (struct kvm_msr_entry) { .index = _index, .data = _data }
447
448 static void kvm__setup_msrs(struct kvm *self)
449 {
450         unsigned long ndx = 0;
451
452         self->msrs = kvm_msrs__new(100);
453
454         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS,        0x0);
455         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP,       0x0);
456         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP,       0x0);
457 #ifdef CONFIG_X86_64
458         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_STAR,               0x0);
459         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_CSTAR,              0x0);
460         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_KERNEL_GS_BASE,     0x0);
461         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_FMASK,              0x0);
462         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_LSTAR,              0x0);
463 #endif
464         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TIME_STAMP_COUNTER, 0x0);
465
466         self->msrs->nmsrs       = ndx;
467
468         if (ioctl(self->vcpu_fd, KVM_SET_MSRS, self->msrs) < 0)
469                 die_perror("KVM_SET_MSRS failed");
470 }
471
472 static void kvm__setup_fpu(struct kvm *self)
473 {
474         self->fpu = (struct kvm_fpu) {
475                 .fcw            = 0x37f,
476                 .mxcsr          = 0x1f80,
477         };
478
479         if (ioctl(self->vcpu_fd, KVM_SET_FPU, &self->fpu) < 0)
480                 die_perror("KVM_SET_FPU failed");
481 }
482
483 static void kvm__setup_regs(struct kvm *self)
484 {
485         self->regs = (struct kvm_regs) {
486                 /* We start the guest in 16-bit real mode  */
487                 .rflags         = 0x0000000000000002ULL,
488
489                 .rip            = self->boot_ip,
490                 .rsp            = self->boot_sp,
491                 .rbp            = self->boot_sp,
492         };
493
494         if (self->regs.rip > USHRT_MAX)
495                 die("ip 0x%" PRIx64 " is too high for real mode", (uint64_t) self->regs.rip);
496
497         if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
498                 die_perror("KVM_SET_REGS failed");
499 }
500
501 static void kvm__setup_sregs(struct kvm *self)
502 {
503
504         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
505                 die_perror("KVM_GET_SREGS failed");
506
507         self->sregs.cs.selector = self->boot_selector;
508         self->sregs.cs.base     = selector_to_base(self->boot_selector);
509         self->sregs.ss.selector = self->boot_selector;
510         self->sregs.ss.base     = selector_to_base(self->boot_selector);
511         self->sregs.ds.selector = self->boot_selector;
512         self->sregs.ds.base     = selector_to_base(self->boot_selector);
513         self->sregs.es.selector = self->boot_selector;
514         self->sregs.es.base     = selector_to_base(self->boot_selector);
515         self->sregs.fs.selector = self->boot_selector;
516         self->sregs.fs.base     = selector_to_base(self->boot_selector);
517         self->sregs.gs.selector = self->boot_selector;
518         self->sregs.gs.base     = selector_to_base(self->boot_selector);
519
520         if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
521                 die_perror("KVM_SET_SREGS failed");
522 }
523
524 void kvm__reset_vcpu(struct kvm *self)
525 {
526         kvm__setup_sregs(self);
527
528         kvm__setup_regs(self);
529
530         kvm__setup_fpu(self);
531
532         kvm__setup_msrs(self);
533 }
534
535 void kvm__setup_mem(struct kvm *self)
536 {
537         struct e820_entry *mem_map;
538         unsigned char *size;
539
540         size            = guest_flat_to_host(self, E820_MAP_SIZE);
541         mem_map         = guest_flat_to_host(self, E820_MAP_START);
542
543         *size           = 4;
544
545         mem_map[0]      = (struct e820_entry) {
546                 .addr           = REAL_MODE_IVT_BEGIN,
547                 .size           = EBDA_START - REAL_MODE_IVT_BEGIN,
548                 .type           = E820_MEM_USABLE,
549         };
550         mem_map[1]      = (struct e820_entry) {
551                 .addr           = EBDA_START,
552                 .size           = VGA_RAM_BEGIN - EBDA_START,
553                 .type           = E820_MEM_RESERVED,
554         };
555         mem_map[2]      = (struct e820_entry) {
556                 .addr           = MB_BIOS_BEGIN,
557                 .size           = MB_BIOS_END - MB_BIOS_BEGIN,
558                 .type           = E820_MEM_RESERVED,
559         };
560         mem_map[3]      = (struct e820_entry) {
561                 .addr           = BZ_KERNEL_START,
562                 .size           = self->ram_size - BZ_KERNEL_START,
563                 .type           = E820_MEM_USABLE,
564         };
565 }
566
567 void kvm__run(struct kvm *self)
568 {
569         if (ioctl(self->vcpu_fd, KVM_RUN, 0) < 0)
570                 die_perror("KVM_RUN failed");
571 }
572
573 static void print_dtable(const char *name, struct kvm_dtable *dtable)
574 {
575         printf(" %s                 %016" PRIx64 "  %08" PRIx16 "\n",
576                 name, (uint64_t) dtable->base, (uint16_t) dtable->limit);
577 }
578
579 static void print_segment(const char *name, struct kvm_segment *seg)
580 {
581         printf(" %s       %04" PRIx16 "      %016" PRIx64 "  %08" PRIx32 "  %02" PRIx8 "    %x %x   %x  %x %x %x %x\n",
582                 name, (uint16_t) seg->selector, (uint64_t) seg->base, (uint32_t) seg->limit,
583                 (uint8_t) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
584 }
585
586 void kvm__show_registers(struct kvm *self)
587 {
588         unsigned long cr0, cr2, cr3;
589         unsigned long cr4, cr8;
590         unsigned long rax, rbx, rcx;
591         unsigned long rdx, rsi, rdi;
592         unsigned long rbp,  r8,  r9;
593         unsigned long r10, r11, r12;
594         unsigned long r13, r14, r15;
595         unsigned long rip, rsp;
596         struct kvm_sregs sregs;
597         unsigned long rflags;
598         struct kvm_regs regs;
599         int i;
600
601         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &regs) < 0)
602                 die("KVM_GET_REGS failed");
603
604         rflags = regs.rflags;
605
606         rip = regs.rip; rsp = regs.rsp;
607         rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx;
608         rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi;
609         rbp = regs.rbp; r8  = regs.r8;  r9  = regs.r9;
610         r10 = regs.r10; r11 = regs.r11; r12 = regs.r12;
611         r13 = regs.r13; r14 = regs.r14; r15 = regs.r15;
612
613         printf("Registers:\n");
614         printf(" rip: %016lx   rsp: %016lx flags: %016lx\n", rip, rsp, rflags);
615         printf(" rax: %016lx   rbx: %016lx   rcx: %016lx\n", rax, rbx, rcx);
616         printf(" rdx: %016lx   rsi: %016lx   rdi: %016lx\n", rdx, rsi, rdi);
617         printf(" rbp: %016lx   r8:  %016lx   r9:  %016lx\n", rbp, r8,  r9);
618         printf(" r10: %016lx   r11: %016lx   r12: %016lx\n", r10, r11, r12);
619         printf(" r13: %016lx   r14: %016lx   r15: %016lx\n", r13, r14, r15);
620
621         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
622                 die("KVM_GET_REGS failed");
623
624         cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
625         cr4 = sregs.cr4; cr8 = sregs.cr8;
626
627         printf(" cr0: %016lx   cr2: %016lx   cr3: %016lx\n", cr0, cr2, cr3);
628         printf(" cr4: %016lx   cr8: %016lx\n", cr4, cr8);
629         printf("Segment registers:\n");
630         printf(" register  selector  base              limit     type  p dpl db s l g avl\n");
631         print_segment("cs ", &sregs.cs);
632         print_segment("ss ", &sregs.ss);
633         print_segment("ds ", &sregs.ds);
634         print_segment("es ", &sregs.es);
635         print_segment("fs ", &sregs.fs);
636         print_segment("gs ", &sregs.gs);
637         print_segment("tr ", &sregs.tr);
638         print_segment("ldt", &sregs.ldt);
639         print_dtable("gdt", &sregs.gdt);
640         print_dtable("idt", &sregs.idt);
641         printf(" [ efer: %016" PRIx64 "  apic base: %016" PRIx64 "  nmi: %s ]\n",
642                 (uint64_t) sregs.efer, (uint64_t) sregs.apic_base,
643                 (self->nmi_disabled ? "disabled" : "enabled"));
644         printf("Interrupt bitmap:\n");
645         printf(" ");
646         for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++)
647                 printf("%016" PRIx64 " ", (uint64_t) sregs.interrupt_bitmap[i]);
648         printf("\n");
649 }
650
651 void kvm__show_code(struct kvm *self)
652 {
653         unsigned int code_bytes = 64;
654         unsigned int code_prologue = code_bytes * 43 / 64;
655         unsigned int code_len = code_bytes;
656         unsigned char c;
657         unsigned int i;
658         uint8_t *ip;
659
660         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
661                 die("KVM_GET_REGS failed");
662
663         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
664                 die("KVM_GET_SREGS failed");
665
666         ip = guest_flat_to_host(self, ip_to_flat(self, self->regs.rip) - code_prologue);
667
668         printf("Code: ");
669
670         for (i = 0; i < code_len; i++, ip++) {
671                 if (!host_ptr_in_ram(self, ip))
672                         break;
673
674                 c = *ip;
675
676                 if (ip == guest_flat_to_host(self, ip_to_flat(self, self->regs.rip)))
677                         printf("<%02x> ", c);
678                 else
679                         printf("%02x ", c);
680         }
681
682         printf("\n");
683
684         printf("Stack:\n");
685         kvm__dump_mem(self, self->regs.rsp, 32);
686 }
687
688 void kvm__show_page_tables(struct kvm *self)
689 {
690         uint64_t *pte1;
691         uint64_t *pte2;
692         uint64_t *pte3;
693         uint64_t *pte4;
694
695         if (!is_in_protected_mode(self))
696                 return;
697
698         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
699                 die("KVM_GET_SREGS failed");
700
701         pte4    = guest_flat_to_host(self, self->sregs.cr3);
702         if (!host_ptr_in_ram(self, pte4))
703                 return;
704
705         pte3    = guest_flat_to_host(self, (*pte4 & ~0xfff));
706         if (!host_ptr_in_ram(self, pte3))
707                 return;
708
709         pte2    = guest_flat_to_host(self, (*pte3 & ~0xfff));
710         if (!host_ptr_in_ram(self, pte2))
711                 return;
712
713         pte1    = guest_flat_to_host(self, (*pte2 & ~0xfff));
714         if (!host_ptr_in_ram(self, pte1))
715                 return;
716
717         printf("Page Tables:\n");
718         if (*pte2 & (1 << 7))
719                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64
720                         "   pte2: %016" PRIx64 "\n",
721                         *pte4, *pte3, *pte2);
722         else
723                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64 "   pte2: %016"
724                         PRIx64 "   pte1: %016" PRIx64 "\n",
725                         *pte4, *pte3, *pte2, *pte1);
726 }
727
728 void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
729 {
730         unsigned char *p;
731         unsigned long n;
732
733         size &= ~7; /* mod 8 */
734         if (!size)
735                 return;
736
737         p = guest_flat_to_host(self, addr);
738
739         for (n = 0; n < size; n+=8) {
740                 if (!host_ptr_in_ram(self, p + n))
741                         break;
742
743                 printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
744                         addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
745                                   p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
746         }
747 }