]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - tools/kvm/kvm.c
Merge branch 'master' of git://github.com/cyrillos/vm
[karo-tx-linux.git] / tools / kvm / kvm.c
1 #include "kvm/kvm.h"
2
3 #include "kvm/interrupt.h"
4 #include "kvm/cpufeature.h"
5 #include "kvm/e820.h"
6 #include "kvm/util.h"
7
8 #include <linux/kvm.h>
9
10 #include <asm/bootparam.h>
11
12 #include <sys/ioctl.h>
13 #include <inttypes.h>
14 #include <sys/mman.h>
15 #include <stdbool.h>
16 #include <assert.h>
17 #include <limits.h>
18 #include <stdarg.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <fcntl.h>
24 #include <sys/stat.h>
25
26 /*
27  * Compatibility code. Remove this when we move to tools/kvm.
28  */
29 #ifndef KVM_EXIT_INTERNAL_ERROR
30 # define KVM_EXIT_INTERNAL_ERROR                17
31 #endif
32
33 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
34
35 const char *kvm_exit_reasons[] = {
36         DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
37         DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
38         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
39         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
40         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
41         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
42         DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
43         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
44         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
45         DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
46         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
47         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
48         DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
49         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
50         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
51         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
52         DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
53         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
54 };
55
56 #define DEFINE_KVM_EXT(ext)             \
57         .name = #ext,                   \
58         .code = ext
59
60 struct {
61         const char *name;
62         int code;
63 } kvm_req_ext[] = {
64         { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
65         { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) },
66         { DEFINE_KVM_EXT(KVM_CAP_PIT2) },
67         { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) },
68         { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) },
69         { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) },
70         { DEFINE_KVM_EXT(KVM_CAP_HLT) },
71         { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) },
72         { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
73 };
74
75 static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
76 {
77         int ret;
78
79         ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
80         if (ret < 0)
81                 return false;
82
83         return ret;
84 }
85
86 static int kvm__check_extensions(struct kvm *self)
87 {
88         unsigned int i;
89
90         for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
91                 if (!kvm__supports_extension(self, kvm_req_ext[i].code)) {
92                         error("Unsuppored KVM extension detected: %s",
93                                 kvm_req_ext[i].name);
94                         return (int)-i;
95                 }
96         }
97
98         return 0;
99 }
100
101 static struct kvm *kvm__new(void)
102 {
103         struct kvm *self = calloc(1, sizeof *self);
104
105         if (!self)
106                 die("out of memory");
107
108         return self;
109 }
110
111 void kvm__delete(struct kvm *self)
112 {
113         free(self->ram_start);
114         free(self);
115 }
116
117 static bool kvm__cpu_supports_vm(void)
118 {
119         struct cpuid_regs regs;
120         uint32_t eax_base;
121         int feature;
122
123         regs    = (struct cpuid_regs) {
124                 .eax            = 0x00,
125         };
126         host_cpuid(&regs);
127
128         switch (regs.ebx) {
129         case CPUID_VENDOR_INTEL_1:
130                 eax_base        = 0x00;
131                 feature         = KVM__X86_FEATURE_VMX;
132                 break;
133
134         case CPUID_VENDOR_AMD_1:
135                 eax_base        = 0x80000000;
136                 feature         = KVM__X86_FEATURE_SVM;
137                 break;
138
139         default:
140                 return false;
141         }
142
143         regs    = (struct cpuid_regs) {
144                 .eax            = eax_base,
145         };
146         host_cpuid(&regs);
147
148         if (regs.eax < eax_base + 0x01)
149                 return false;
150
151         regs    = (struct cpuid_regs) {
152                 .eax            = eax_base + 0x01
153         };
154         host_cpuid(&regs);
155
156         return regs.ecx & (1 << feature);
157 }
158
159 struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
160 {
161         struct kvm_userspace_memory_region mem;
162         struct kvm_pit_config pit_config = { .flags = 0, };
163         struct kvm *self;
164         long page_size;
165         int mmap_size;
166         int ret;
167
168         if (!kvm__cpu_supports_vm())
169                 die("Your CPU does not support hardware virtualization");
170
171         self = kvm__new();
172
173         self->sys_fd = open(kvm_dev, O_RDWR);
174         if (self->sys_fd < 0) {
175                 if (errno == ENOENT)
176                         die("'%s' not found. Please make sure you have CONFIG_KVM enabled.", kvm_dev);
177
178                 die_perror("open");
179         }
180
181         ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
182         if (ret != KVM_API_VERSION)
183                 die_perror("KVM_API_VERSION ioctl");
184
185         self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
186         if (self->vm_fd < 0)
187                 die_perror("KVM_CREATE_VM ioctl");
188
189         if (kvm__check_extensions(self))
190                 die("A required KVM extention is not supported by OS");
191
192         ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
193         if (ret < 0)
194                 die_perror("KVM_SET_TSS_ADDR ioctl");
195
196         ret = ioctl(self->vm_fd, KVM_CREATE_PIT2, &pit_config);
197         if (ret < 0)
198                 die_perror("KVM_CREATE_PIT2 ioctl");
199
200         self->ram_size          = ram_size;
201
202         page_size       = sysconf(_SC_PAGESIZE);
203         if (posix_memalign(&self->ram_start, page_size, self->ram_size) != 0)
204                 die("out of memory");
205
206         mem = (struct kvm_userspace_memory_region) {
207                 .slot                   = 0,
208                 .guest_phys_addr        = 0x0UL,
209                 .memory_size            = self->ram_size,
210                 .userspace_addr         = (unsigned long) self->ram_start,
211         };
212
213         ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
214         if (ret < 0)
215                 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
216
217         ret = ioctl(self->vm_fd, KVM_CREATE_IRQCHIP);
218         if (ret < 0)
219                 die_perror("KVM_CREATE_IRQCHIP ioctl");
220
221         self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
222         if (self->vcpu_fd < 0)
223                 die_perror("KVM_CREATE_VCPU ioctl");
224
225         mmap_size = ioctl(self->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
226         if (mmap_size < 0)
227                 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
228
229         self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
230         if (self->kvm_run == MAP_FAILED)
231                 die("unable to mmap vcpu fd");
232
233         return self;
234 }
235
236 void kvm__enable_singlestep(struct kvm *self)
237 {
238         struct kvm_guest_debug debug = {
239                 .control        = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
240         };
241
242         if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
243                 warning("KVM_SET_GUEST_DEBUG failed");
244 }
245
246 #define BOOT_LOADER_SELECTOR    0x1000
247 #define BOOT_LOADER_IP          0x0000
248 #define BOOT_LOADER_SP          0x8000
249 #define BOOT_CMDLINE_OFFSET     0x20000
250
251 #define BOOT_PROTOCOL_REQUIRED  0x202
252 #define LOAD_HIGH               0x01
253
254 static int load_flat_binary(struct kvm *self, int fd)
255 {
256         void *p;
257         int nr;
258
259         if (lseek(fd, 0, SEEK_SET) < 0)
260                 die_perror("lseek");
261
262         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
263
264         while ((nr = read(fd, p, 65536)) > 0)
265                 p += nr;
266
267         self->boot_selector     = BOOT_LOADER_SELECTOR;
268         self->boot_ip           = BOOT_LOADER_IP;
269         self->boot_sp           = BOOT_LOADER_SP;
270
271         return true;
272 }
273
274 /*
275  * The protected mode kernel part of a modern bzImage is loaded at 1 MB by
276  * default.
277  */
278 #define BZ_KERNEL_START                 0x100000UL
279 #define INITRD_START                    0x1000000UL
280 #define BZ_DEFAULT_SETUP_SECTS          4
281 static const char *BZIMAGE_MAGIC        = "HdrS";
282
283 static bool load_bzimage(struct kvm *self, int fd_kernel,
284                         int fd_initrd, const char *kernel_cmdline)
285 {
286         struct boot_params *kern_boot;
287         unsigned long setup_sects;
288         struct boot_params boot;
289         size_t cmdline_size;
290         ssize_t setup_size;
291         void *p;
292         int nr;
293
294         /*
295          * See Documentation/x86/boot.txt for details no bzImage on-disk and
296          * memory layout.
297          */
298
299         if (lseek(fd_kernel, 0, SEEK_SET) < 0)
300                 die_perror("lseek");
301
302         if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot)) {
303                 warning("Failed to read kernel boot area");
304                 return false;
305         }
306
307         if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC))) {
308                 warning("Kernel header corrupted");
309                 return false;
310         }
311
312         if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED) {
313                 warning("Too old kernel");
314                 return false;
315         }
316
317         if (lseek(fd_kernel, 0, SEEK_SET) < 0)
318                 die_perror("lseek");
319
320         if (!boot.hdr.setup_sects)
321                 boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
322         setup_sects = boot.hdr.setup_sects + 1;
323
324         setup_size = setup_sects << 9;
325         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
326
327         /* copy setup.bin to mem*/
328         if (read(fd_kernel, p, setup_size) != setup_size)
329                 die_perror("read");
330
331         /* copy vmlinux.bin to BZ_KERNEL_START*/
332         p = guest_flat_to_host(self, BZ_KERNEL_START);
333
334         while ((nr = read(fd_kernel, p, 65536)) > 0)
335                 p += nr;
336
337         p = guest_flat_to_host(self, BOOT_CMDLINE_OFFSET);
338         if (kernel_cmdline) {
339                 cmdline_size = strlen(kernel_cmdline) + 1;
340                 if (cmdline_size > boot.hdr.cmdline_size)
341                         cmdline_size = boot.hdr.cmdline_size;
342
343                 memset(p, 0, boot.hdr.cmdline_size);
344                 memcpy(p, kernel_cmdline, cmdline_size - 1);
345         }
346
347         kern_boot       = guest_real_to_host(self, BOOT_LOADER_SELECTOR, 0x00);
348
349         kern_boot->hdr.cmd_line_ptr     = BOOT_CMDLINE_OFFSET;
350         kern_boot->hdr.type_of_loader   = 0xff;
351         kern_boot->hdr.heap_end_ptr     = 0xfe00;
352         kern_boot->hdr.loadflags        |= CAN_USE_HEAP;
353
354         /*
355          * Read initrd image into guest memory
356          */
357         if (fd_initrd >= 0) {
358                 struct stat initrd_stat;
359                 unsigned long addr;
360
361                 if (fstat(fd_initrd, &initrd_stat))
362                         die_perror("fstat");
363
364                 addr = boot.hdr.initrd_addr_max & ~0xfffff;
365                 for (;;) {
366                         if (addr < BZ_KERNEL_START)
367                                 die("Not enough memory for initrd");
368                         else if (addr < (self->ram_size - initrd_stat.st_size))
369                                 break;
370                         addr -= 0x100000;
371                 }
372
373                 p = guest_flat_to_host(self, addr);
374                 nr = read(fd_initrd, p, initrd_stat.st_size);
375                 if (nr != initrd_stat.st_size)
376                         die("Failed to read initrd");
377
378                 kern_boot->hdr.ramdisk_image    = addr;
379                 kern_boot->hdr.ramdisk_size     = initrd_stat.st_size;
380         }
381
382         self->boot_selector     = BOOT_LOADER_SELECTOR;
383         /*
384          * The real-mode setup code starts at offset 0x200 of a bzImage. See
385          * Documentation/x86/boot.txt for details.
386          */
387         self->boot_ip           = BOOT_LOADER_IP + 0x200;
388         self->boot_sp           = BOOT_LOADER_SP;
389
390         /*
391          * Drum roll, BIOS is coming to live, oh dear...
392          */
393         setup_bios(self);
394
395         return true;
396 }
397
398 bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
399                 const char *initrd_filename, const char *kernel_cmdline)
400 {
401         bool ret;
402         int fd_kernel = -1, fd_initrd = -1;
403
404         fd_kernel = open(kernel_filename, O_RDONLY);
405         if (fd_kernel < 0)
406                 die("unable to open kernel");
407
408         if (initrd_filename) {
409                 fd_initrd = open(initrd_filename, O_RDONLY);
410                 if (fd_initrd < 0)
411                         die("unable to open initrd");
412         }
413
414         ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline);
415         if (ret)
416                 goto found_kernel;
417
418         ret = load_flat_binary(kvm, fd_kernel);
419         if (ret)
420                 goto found_kernel;
421
422         die("%s is not a valid bzImage or flat binary", kernel_filename);
423
424 found_kernel:
425         return ret;
426 }
427
428 static inline uint64_t ip_flat_to_real(struct kvm *self, uint64_t ip)
429 {
430         uint64_t cs = self->sregs.cs.selector;
431
432         return ip - (cs << 4);
433 }
434
435 static inline bool is_in_protected_mode(struct kvm *self)
436 {
437         return self->sregs.cr0 & 0x01;
438 }
439
440 static inline uint64_t ip_to_flat(struct kvm *self, uint64_t ip)
441 {
442         uint64_t cs;
443
444         /*
445          * NOTE! We should take code segment base address into account here.
446          * Luckily it's usually zero because Linux uses flat memory model.
447          */
448         if (is_in_protected_mode(self))
449                 return ip;
450
451         cs = self->sregs.cs.selector;
452
453         return ip + (cs << 4);
454 }
455
456 static inline uint32_t selector_to_base(uint16_t selector)
457 {
458         /*
459          * KVM on Intel requires 'base' to be 'selector * 16' in real mode.
460          */
461         return (uint32_t)selector * 16;
462 }
463
464 static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
465 {
466         struct kvm_msrs *self = calloc(1, sizeof(*self) + (sizeof(struct kvm_msr_entry) * nmsrs));
467
468         if (!self)
469                 die("out of memory");
470
471         return self;
472 }
473
474 #define MSR_IA32_TIME_STAMP_COUNTER     0x10
475
476 #define MSR_IA32_SYSENTER_CS            0x174
477 #define MSR_IA32_SYSENTER_ESP           0x175
478 #define MSR_IA32_SYSENTER_EIP           0x176
479
480 #define MSR_IA32_STAR                   0xc0000081
481 #define MSR_IA32_LSTAR                  0xc0000082
482 #define MSR_IA32_CSTAR                  0xc0000083
483 #define MSR_IA32_FMASK                  0xc0000084
484 #define MSR_IA32_KERNEL_GS_BASE         0xc0000102
485
486 #define KVM_MSR_ENTRY(_index, _data)    \
487         (struct kvm_msr_entry) { .index = _index, .data = _data }
488
489 static void kvm__setup_msrs(struct kvm *self)
490 {
491         unsigned long ndx = 0;
492
493         self->msrs = kvm_msrs__new(100);
494
495         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS,        0x0);
496         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP,       0x0);
497         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP,       0x0);
498 #ifdef CONFIG_X86_64
499         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_STAR,               0x0);
500         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_CSTAR,              0x0);
501         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_KERNEL_GS_BASE,     0x0);
502         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_FMASK,              0x0);
503         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_LSTAR,              0x0);
504 #endif
505         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TIME_STAMP_COUNTER, 0x0);
506
507         self->msrs->nmsrs       = ndx;
508
509         if (ioctl(self->vcpu_fd, KVM_SET_MSRS, self->msrs) < 0)
510                 die_perror("KVM_SET_MSRS failed");
511 }
512
513 static void kvm__setup_fpu(struct kvm *self)
514 {
515         self->fpu = (struct kvm_fpu) {
516                 .fcw            = 0x37f,
517                 .mxcsr          = 0x1f80,
518         };
519
520         if (ioctl(self->vcpu_fd, KVM_SET_FPU, &self->fpu) < 0)
521                 die_perror("KVM_SET_FPU failed");
522 }
523
524 static void kvm__setup_regs(struct kvm *self)
525 {
526         self->regs = (struct kvm_regs) {
527                 /* We start the guest in 16-bit real mode  */
528                 .rflags         = 0x0000000000000002ULL,
529
530                 .rip            = self->boot_ip,
531                 .rsp            = self->boot_sp,
532                 .rbp            = self->boot_sp,
533         };
534
535         if (self->regs.rip > USHRT_MAX)
536                 die("ip 0x%" PRIx64 " is too high for real mode", (uint64_t) self->regs.rip);
537
538         if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
539                 die_perror("KVM_SET_REGS failed");
540 }
541
542 static void kvm__setup_sregs(struct kvm *self)
543 {
544
545         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
546                 die_perror("KVM_GET_SREGS failed");
547
548         self->sregs.cs.selector = self->boot_selector;
549         self->sregs.cs.base     = selector_to_base(self->boot_selector);
550         self->sregs.ss.selector = self->boot_selector;
551         self->sregs.ss.base     = selector_to_base(self->boot_selector);
552         self->sregs.ds.selector = self->boot_selector;
553         self->sregs.ds.base     = selector_to_base(self->boot_selector);
554         self->sregs.es.selector = self->boot_selector;
555         self->sregs.es.base     = selector_to_base(self->boot_selector);
556         self->sregs.fs.selector = self->boot_selector;
557         self->sregs.fs.base     = selector_to_base(self->boot_selector);
558         self->sregs.gs.selector = self->boot_selector;
559         self->sregs.gs.base     = selector_to_base(self->boot_selector);
560
561         if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
562                 die_perror("KVM_SET_SREGS failed");
563 }
564
565 void kvm__reset_vcpu(struct kvm *self)
566 {
567         kvm__setup_sregs(self);
568
569         kvm__setup_regs(self);
570
571         kvm__setup_fpu(self);
572
573         kvm__setup_msrs(self);
574 }
575
576 void kvm__setup_mem(struct kvm *self)
577 {
578         struct e820_entry *mem_map;
579         unsigned char *size;
580
581         size            = guest_flat_to_host(self, E820_MAP_SIZE);
582         mem_map         = guest_flat_to_host(self, E820_MAP_START);
583
584         *size           = 4;
585
586         mem_map[0]      = (struct e820_entry) {
587                 .addr           = REAL_MODE_IVT_BEGIN,
588                 .size           = EBDA_START - REAL_MODE_IVT_BEGIN,
589                 .type           = E820_MEM_USABLE,
590         };
591         mem_map[1]      = (struct e820_entry) {
592                 .addr           = EBDA_START,
593                 .size           = VGA_RAM_BEGIN - EBDA_START,
594                 .type           = E820_MEM_RESERVED,
595         };
596         mem_map[2]      = (struct e820_entry) {
597                 .addr           = MB_BIOS_BEGIN,
598                 .size           = MB_BIOS_END - MB_BIOS_BEGIN,
599                 .type           = E820_MEM_RESERVED,
600         };
601         mem_map[3]      = (struct e820_entry) {
602                 .addr           = BZ_KERNEL_START,
603                 .size           = self->ram_size - BZ_KERNEL_START,
604                 .type           = E820_MEM_USABLE,
605         };
606 }
607
608 void kvm__run(struct kvm *self)
609 {
610         if (ioctl(self->vcpu_fd, KVM_RUN, 0) < 0)
611                 die_perror("KVM_RUN failed");
612 }
613
614 static void print_dtable(const char *name, struct kvm_dtable *dtable)
615 {
616         printf(" %s                 %016" PRIx64 "  %08" PRIx16 "\n",
617                 name, (uint64_t) dtable->base, (uint16_t) dtable->limit);
618 }
619
620 static void print_segment(const char *name, struct kvm_segment *seg)
621 {
622         printf(" %s       %04" PRIx16 "      %016" PRIx64 "  %08" PRIx32 "  %02" PRIx8 "    %x %x   %x  %x %x %x %x\n",
623                 name, (uint16_t) seg->selector, (uint64_t) seg->base, (uint32_t) seg->limit,
624                 (uint8_t) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
625 }
626
627 void kvm__show_registers(struct kvm *self)
628 {
629         unsigned long cr0, cr2, cr3;
630         unsigned long cr4, cr8;
631         unsigned long rax, rbx, rcx;
632         unsigned long rdx, rsi, rdi;
633         unsigned long rbp,  r8,  r9;
634         unsigned long r10, r11, r12;
635         unsigned long r13, r14, r15;
636         unsigned long rip, rsp;
637         struct kvm_sregs sregs;
638         unsigned long rflags;
639         struct kvm_regs regs;
640         int i;
641
642         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &regs) < 0)
643                 die("KVM_GET_REGS failed");
644
645         rflags = regs.rflags;
646
647         rip = regs.rip; rsp = regs.rsp;
648         rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx;
649         rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi;
650         rbp = regs.rbp; r8  = regs.r8;  r9  = regs.r9;
651         r10 = regs.r10; r11 = regs.r11; r12 = regs.r12;
652         r13 = regs.r13; r14 = regs.r14; r15 = regs.r15;
653
654         printf("Registers:\n");
655         printf(" rip: %016lx   rsp: %016lx flags: %016lx\n", rip, rsp, rflags);
656         printf(" rax: %016lx   rbx: %016lx   rcx: %016lx\n", rax, rbx, rcx);
657         printf(" rdx: %016lx   rsi: %016lx   rdi: %016lx\n", rdx, rsi, rdi);
658         printf(" rbp: %016lx   r8:  %016lx   r9:  %016lx\n", rbp, r8,  r9);
659         printf(" r10: %016lx   r11: %016lx   r12: %016lx\n", r10, r11, r12);
660         printf(" r13: %016lx   r14: %016lx   r15: %016lx\n", r13, r14, r15);
661
662         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
663                 die("KVM_GET_REGS failed");
664
665         cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
666         cr4 = sregs.cr4; cr8 = sregs.cr8;
667
668         printf(" cr0: %016lx   cr2: %016lx   cr3: %016lx\n", cr0, cr2, cr3);
669         printf(" cr4: %016lx   cr8: %016lx\n", cr4, cr8);
670         printf("Segment registers:\n");
671         printf(" register  selector  base              limit     type  p dpl db s l g avl\n");
672         print_segment("cs ", &sregs.cs);
673         print_segment("ss ", &sregs.ss);
674         print_segment("ds ", &sregs.ds);
675         print_segment("es ", &sregs.es);
676         print_segment("fs ", &sregs.fs);
677         print_segment("gs ", &sregs.gs);
678         print_segment("tr ", &sregs.tr);
679         print_segment("ldt", &sregs.ldt);
680         print_dtable("gdt", &sregs.gdt);
681         print_dtable("idt", &sregs.idt);
682         printf(" [ efer: %016" PRIx64 "  apic base: %016" PRIx64 "  nmi: %s ]\n",
683                 (uint64_t) sregs.efer, (uint64_t) sregs.apic_base,
684                 (self->nmi_disabled ? "disabled" : "enabled"));
685         printf("Interrupt bitmap:\n");
686         printf(" ");
687         for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++)
688                 printf("%016" PRIx64 " ", (uint64_t) sregs.interrupt_bitmap[i]);
689         printf("\n");
690 }
691
692 void kvm__show_code(struct kvm *self)
693 {
694         unsigned int code_bytes = 64;
695         unsigned int code_prologue = code_bytes * 43 / 64;
696         unsigned int code_len = code_bytes;
697         unsigned char c;
698         unsigned int i;
699         uint8_t *ip;
700
701         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
702                 die("KVM_GET_REGS failed");
703
704         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
705                 die("KVM_GET_SREGS failed");
706
707         ip = guest_flat_to_host(self, ip_to_flat(self, self->regs.rip) - code_prologue);
708
709         printf("Code: ");
710
711         for (i = 0; i < code_len; i++, ip++) {
712                 if (!host_ptr_in_ram(self, ip))
713                         break;
714
715                 c = *ip;
716
717                 if (ip == guest_flat_to_host(self, ip_to_flat(self, self->regs.rip)))
718                         printf("<%02x> ", c);
719                 else
720                         printf("%02x ", c);
721         }
722
723         printf("\n");
724
725         printf("Stack:\n");
726         kvm__dump_mem(self, self->regs.rsp, 32);
727 }
728
729 void kvm__show_page_tables(struct kvm *self)
730 {
731         uint64_t *pte1;
732         uint64_t *pte2;
733         uint64_t *pte3;
734         uint64_t *pte4;
735
736         if (!is_in_protected_mode(self))
737                 return;
738
739         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
740                 die("KVM_GET_SREGS failed");
741
742         pte4    = guest_flat_to_host(self, self->sregs.cr3);
743         if (!host_ptr_in_ram(self, pte4))
744                 return;
745
746         pte3    = guest_flat_to_host(self, (*pte4 & ~0xfff));
747         if (!host_ptr_in_ram(self, pte3))
748                 return;
749
750         pte2    = guest_flat_to_host(self, (*pte3 & ~0xfff));
751         if (!host_ptr_in_ram(self, pte2))
752                 return;
753
754         pte1    = guest_flat_to_host(self, (*pte2 & ~0xfff));
755         if (!host_ptr_in_ram(self, pte1))
756                 return;
757
758         printf("Page Tables:\n");
759         if (*pte2 & (1 << 7))
760                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64
761                         "   pte2: %016" PRIx64 "\n",
762                         *pte4, *pte3, *pte2);
763         else
764                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64 "   pte2: %016"
765                         PRIx64 "   pte1: %016" PRIx64 "\n",
766                         *pte4, *pte3, *pte2, *pte1);
767 }
768
769 void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
770 {
771         unsigned char *p;
772         unsigned long n;
773
774         size &= ~7; /* mod 8 */
775         if (!size)
776                 return;
777
778         p = guest_flat_to_host(self, addr);
779
780         for (n = 0; n < size; n+=8) {
781                 if (!host_ptr_in_ram(self, p + n))
782                         break;
783
784                 printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
785                         addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
786                                   p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
787         }
788 }