]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - tools/kvm/kvm.c
kvm: Fix kernel loading error messages
[karo-tx-linux.git] / tools / kvm / kvm.c
1 #include "kvm/kvm.h"
2
3 #include "kvm/interrupt.h"
4 #include "kvm/cpufeature.h"
5 #include "kvm/e820.h"
6 #include "kvm/util.h"
7
8 #include <linux/kvm.h>
9
10 #include <asm/bootparam.h>
11
12 #include <sys/ioctl.h>
13 #include <inttypes.h>
14 #include <sys/mman.h>
15 #include <stdbool.h>
16 #include <assert.h>
17 #include <limits.h>
18 #include <stdarg.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <fcntl.h>
24 #include <sys/stat.h>
25
26 /*
27  * Compatibility code. Remove this when we move to tools/kvm.
28  */
29 #ifndef KVM_EXIT_INTERNAL_ERROR
30 # define KVM_EXIT_INTERNAL_ERROR                17
31 #endif
32
33 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
34
35 const char *kvm_exit_reasons[] = {
36         DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
37         DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
38         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
39         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
40         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
41         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
42         DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
43         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
44         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
45         DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
46         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
47         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
48         DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
49         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
50         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
51         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
52         DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
53         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
54 };
55
56 #define DEFINE_KVM_EXT(ext)             \
57         .name = #ext,                   \
58         .code = ext
59
60 struct {
61         const char *name;
62         int code;
63 } kvm_req_ext[] = {
64         { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
65         { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) },
66         { DEFINE_KVM_EXT(KVM_CAP_PIT2) },
67         { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) },
68         { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) },
69         { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) },
70         { DEFINE_KVM_EXT(KVM_CAP_HLT) },
71         { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) },
72         { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
73 };
74
75 static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
76 {
77         int ret;
78
79         ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
80         if (ret < 0)
81                 return false;
82
83         return ret;
84 }
85
86 static int kvm__check_extensions(struct kvm *self)
87 {
88         unsigned int i;
89
90         for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
91                 if (!kvm__supports_extension(self, kvm_req_ext[i].code)) {
92                         error("Unsuppored KVM extension detected: %s",
93                                 kvm_req_ext[i].name);
94                         return (int)-i;
95                 }
96         }
97
98         return 0;
99 }
100
101 static struct kvm *kvm__new(void)
102 {
103         struct kvm *self = calloc(1, sizeof *self);
104
105         if (!self)
106                 die("out of memory");
107
108         return self;
109 }
110
111 void kvm__delete(struct kvm *self)
112 {
113         free(self->ram_start);
114         free(self);
115 }
116
117 static bool kvm__cpu_supports_vm(void)
118 {
119         struct cpuid_regs regs;
120         uint32_t eax_base;
121         int feature;
122
123         regs    = (struct cpuid_regs) {
124                 .eax            = 0x00,
125         };
126         host_cpuid(&regs);
127
128         switch (regs.ebx) {
129         case CPUID_VENDOR_INTEL_1:
130                 eax_base        = 0x00;
131                 feature         = KVM__X86_FEATURE_VMX;
132                 break;
133
134         case CPUID_VENDOR_AMD_1:
135                 eax_base        = 0x80000000;
136                 feature         = KVM__X86_FEATURE_SVM;
137                 break;
138
139         default:
140                 return false;
141         }
142
143         regs    = (struct cpuid_regs) {
144                 .eax            = eax_base,
145         };
146         host_cpuid(&regs);
147
148         if (regs.eax < eax_base + 0x01)
149                 return false;
150
151         regs    = (struct cpuid_regs) {
152                 .eax            = eax_base + 0x01
153         };
154         host_cpuid(&regs);
155
156         return regs.ecx & (1 << feature);
157 }
158
159 struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
160 {
161         struct kvm_userspace_memory_region mem;
162         struct kvm_pit_config pit_config = { .flags = 0, };
163         struct kvm *self;
164         long page_size;
165         int mmap_size;
166         int ret;
167
168         if (!kvm__cpu_supports_vm())
169                 die("Your CPU does not support hardware virtualization");
170
171         self = kvm__new();
172
173         self->sys_fd = open(kvm_dev, O_RDWR);
174         if (self->sys_fd < 0) {
175                 if (errno == ENOENT)
176                         die("'%s' not found. Please make sure you have CONFIG_KVM enabled.", kvm_dev);
177
178                 die_perror("open");
179         }
180
181         ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
182         if (ret != KVM_API_VERSION)
183                 die_perror("KVM_API_VERSION ioctl");
184
185         self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
186         if (self->vm_fd < 0)
187                 die_perror("KVM_CREATE_VM ioctl");
188
189         if (kvm__check_extensions(self))
190                 die("A required KVM extention is not supported by OS");
191
192         ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
193         if (ret < 0)
194                 die_perror("KVM_SET_TSS_ADDR ioctl");
195
196         ret = ioctl(self->vm_fd, KVM_CREATE_PIT2, &pit_config);
197         if (ret < 0)
198                 die_perror("KVM_CREATE_PIT2 ioctl");
199
200         self->ram_size          = ram_size;
201
202         page_size       = sysconf(_SC_PAGESIZE);
203         if (posix_memalign(&self->ram_start, page_size, self->ram_size) != 0)
204                 die("out of memory");
205
206         mem = (struct kvm_userspace_memory_region) {
207                 .slot                   = 0,
208                 .guest_phys_addr        = 0x0UL,
209                 .memory_size            = self->ram_size,
210                 .userspace_addr         = (unsigned long) self->ram_start,
211         };
212
213         ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
214         if (ret < 0)
215                 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
216
217         ret = ioctl(self->vm_fd, KVM_CREATE_IRQCHIP);
218         if (ret < 0)
219                 die_perror("KVM_CREATE_IRQCHIP ioctl");
220
221         self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
222         if (self->vcpu_fd < 0)
223                 die_perror("KVM_CREATE_VCPU ioctl");
224
225         mmap_size = ioctl(self->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
226         if (mmap_size < 0)
227                 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
228
229         self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
230         if (self->kvm_run == MAP_FAILED)
231                 die("unable to mmap vcpu fd");
232
233         return self;
234 }
235
236 void kvm__enable_singlestep(struct kvm *self)
237 {
238         struct kvm_guest_debug debug = {
239                 .control        = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
240         };
241
242         if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
243                 warning("KVM_SET_GUEST_DEBUG failed");
244 }
245
246 #define BOOT_LOADER_SELECTOR    0x1000
247 #define BOOT_LOADER_IP          0x0000
248 #define BOOT_LOADER_SP          0x8000
249 #define BOOT_CMDLINE_OFFSET     0x20000
250
251 #define BOOT_PROTOCOL_REQUIRED  0x202
252 #define LOAD_HIGH               0x01
253
254 static int load_flat_binary(struct kvm *self, int fd)
255 {
256         void *p;
257         int nr;
258
259         if (lseek(fd, 0, SEEK_SET) < 0)
260                 die_perror("lseek");
261
262         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
263
264         while ((nr = read(fd, p, 65536)) > 0)
265                 p += nr;
266
267         self->boot_selector     = BOOT_LOADER_SELECTOR;
268         self->boot_ip           = BOOT_LOADER_IP;
269         self->boot_sp           = BOOT_LOADER_SP;
270
271         return true;
272 }
273
274 /*
275  * The protected mode kernel part of a modern bzImage is loaded at 1 MB by
276  * default.
277  */
278 #define BZ_KERNEL_START                 0x100000UL
279 #define INITRD_START                    0x1000000UL
280 #define BZ_DEFAULT_SETUP_SECTS          4
281 static const char *BZIMAGE_MAGIC        = "HdrS";
282
283 static bool load_bzimage(struct kvm *self, int fd_kernel,
284                         int fd_initrd, const char *kernel_cmdline)
285 {
286         struct boot_params *kern_boot;
287         unsigned long setup_sects;
288         struct boot_params boot;
289         size_t cmdline_size;
290         ssize_t setup_size;
291         void *p;
292         int nr;
293
294         /*
295          * See Documentation/x86/boot.txt for details no bzImage on-disk and
296          * memory layout.
297          */
298
299         if (lseek(fd_kernel, 0, SEEK_SET) < 0)
300                 die_perror("lseek");
301
302         if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot))
303                 return false;
304
305         if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)))
306                 return false;
307
308         if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED) {
309                 die("Too old kernel");
310         }
311
312         if (lseek(fd_kernel, 0, SEEK_SET) < 0)
313                 die_perror("lseek");
314
315         if (!boot.hdr.setup_sects)
316                 boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
317         setup_sects = boot.hdr.setup_sects + 1;
318
319         setup_size = setup_sects << 9;
320         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
321
322         /* copy setup.bin to mem*/
323         if (read(fd_kernel, p, setup_size) != setup_size)
324                 die_perror("read");
325
326         /* copy vmlinux.bin to BZ_KERNEL_START*/
327         p = guest_flat_to_host(self, BZ_KERNEL_START);
328
329         while ((nr = read(fd_kernel, p, 65536)) > 0)
330                 p += nr;
331
332         p = guest_flat_to_host(self, BOOT_CMDLINE_OFFSET);
333         if (kernel_cmdline) {
334                 cmdline_size = strlen(kernel_cmdline) + 1;
335                 if (cmdline_size > boot.hdr.cmdline_size)
336                         cmdline_size = boot.hdr.cmdline_size;
337
338                 memset(p, 0, boot.hdr.cmdline_size);
339                 memcpy(p, kernel_cmdline, cmdline_size - 1);
340         }
341
342         kern_boot       = guest_real_to_host(self, BOOT_LOADER_SELECTOR, 0x00);
343
344         kern_boot->hdr.cmd_line_ptr     = BOOT_CMDLINE_OFFSET;
345         kern_boot->hdr.type_of_loader   = 0xff;
346         kern_boot->hdr.heap_end_ptr     = 0xfe00;
347         kern_boot->hdr.loadflags        |= CAN_USE_HEAP;
348
349         /*
350          * Read initrd image into guest memory
351          */
352         if (fd_initrd >= 0) {
353                 struct stat initrd_stat;
354                 unsigned long addr;
355
356                 if (fstat(fd_initrd, &initrd_stat))
357                         die_perror("fstat");
358
359                 addr = boot.hdr.initrd_addr_max & ~0xfffff;
360                 for (;;) {
361                         if (addr < BZ_KERNEL_START)
362                                 die("Not enough memory for initrd");
363                         else if (addr < (self->ram_size - initrd_stat.st_size))
364                                 break;
365                         addr -= 0x100000;
366                 }
367
368                 p = guest_flat_to_host(self, addr);
369                 nr = read(fd_initrd, p, initrd_stat.st_size);
370                 if (nr != initrd_stat.st_size)
371                         die("Failed to read initrd");
372
373                 kern_boot->hdr.ramdisk_image    = addr;
374                 kern_boot->hdr.ramdisk_size     = initrd_stat.st_size;
375         }
376
377         self->boot_selector     = BOOT_LOADER_SELECTOR;
378         /*
379          * The real-mode setup code starts at offset 0x200 of a bzImage. See
380          * Documentation/x86/boot.txt for details.
381          */
382         self->boot_ip           = BOOT_LOADER_IP + 0x200;
383         self->boot_sp           = BOOT_LOADER_SP;
384
385         /*
386          * Drum roll, BIOS is coming to live, oh dear...
387          */
388         setup_bios(self);
389
390         return true;
391 }
392
393 bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
394                 const char *initrd_filename, const char *kernel_cmdline)
395 {
396         bool ret;
397         int fd_kernel = -1, fd_initrd = -1;
398
399         fd_kernel = open(kernel_filename, O_RDONLY);
400         if (fd_kernel < 0)
401                 die("Unable to open kernel %s", kernel_filename);
402
403         if (initrd_filename) {
404                 fd_initrd = open(initrd_filename, O_RDONLY);
405                 if (fd_initrd < 0)
406                         die("Unable to open initrd %s", initrd_filename);
407         }
408
409         ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline);
410         if (ret)
411                 goto found_kernel;
412
413         warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename);
414
415         ret = load_flat_binary(kvm, fd_kernel);
416         if (ret)
417                 goto found_kernel;
418
419         die("%s is not a valid bzImage or flat binary", kernel_filename);
420
421 found_kernel:
422         return ret;
423 }
424
425 static inline uint64_t ip_flat_to_real(struct kvm *self, uint64_t ip)
426 {
427         uint64_t cs = self->sregs.cs.selector;
428
429         return ip - (cs << 4);
430 }
431
432 static inline bool is_in_protected_mode(struct kvm *self)
433 {
434         return self->sregs.cr0 & 0x01;
435 }
436
437 static inline uint64_t ip_to_flat(struct kvm *self, uint64_t ip)
438 {
439         uint64_t cs;
440
441         /*
442          * NOTE! We should take code segment base address into account here.
443          * Luckily it's usually zero because Linux uses flat memory model.
444          */
445         if (is_in_protected_mode(self))
446                 return ip;
447
448         cs = self->sregs.cs.selector;
449
450         return ip + (cs << 4);
451 }
452
453 static inline uint32_t selector_to_base(uint16_t selector)
454 {
455         /*
456          * KVM on Intel requires 'base' to be 'selector * 16' in real mode.
457          */
458         return (uint32_t)selector * 16;
459 }
460
461 static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
462 {
463         struct kvm_msrs *self = calloc(1, sizeof(*self) + (sizeof(struct kvm_msr_entry) * nmsrs));
464
465         if (!self)
466                 die("out of memory");
467
468         return self;
469 }
470
471 #define MSR_IA32_TIME_STAMP_COUNTER     0x10
472
473 #define MSR_IA32_SYSENTER_CS            0x174
474 #define MSR_IA32_SYSENTER_ESP           0x175
475 #define MSR_IA32_SYSENTER_EIP           0x176
476
477 #define MSR_IA32_STAR                   0xc0000081
478 #define MSR_IA32_LSTAR                  0xc0000082
479 #define MSR_IA32_CSTAR                  0xc0000083
480 #define MSR_IA32_FMASK                  0xc0000084
481 #define MSR_IA32_KERNEL_GS_BASE         0xc0000102
482
483 #define KVM_MSR_ENTRY(_index, _data)    \
484         (struct kvm_msr_entry) { .index = _index, .data = _data }
485
486 static void kvm__setup_msrs(struct kvm *self)
487 {
488         unsigned long ndx = 0;
489
490         self->msrs = kvm_msrs__new(100);
491
492         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS,        0x0);
493         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP,       0x0);
494         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP,       0x0);
495 #ifdef CONFIG_X86_64
496         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_STAR,               0x0);
497         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_CSTAR,              0x0);
498         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_KERNEL_GS_BASE,     0x0);
499         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_FMASK,              0x0);
500         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_LSTAR,              0x0);
501 #endif
502         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TIME_STAMP_COUNTER, 0x0);
503
504         self->msrs->nmsrs       = ndx;
505
506         if (ioctl(self->vcpu_fd, KVM_SET_MSRS, self->msrs) < 0)
507                 die_perror("KVM_SET_MSRS failed");
508 }
509
510 static void kvm__setup_fpu(struct kvm *self)
511 {
512         self->fpu = (struct kvm_fpu) {
513                 .fcw            = 0x37f,
514                 .mxcsr          = 0x1f80,
515         };
516
517         if (ioctl(self->vcpu_fd, KVM_SET_FPU, &self->fpu) < 0)
518                 die_perror("KVM_SET_FPU failed");
519 }
520
521 static void kvm__setup_regs(struct kvm *self)
522 {
523         self->regs = (struct kvm_regs) {
524                 /* We start the guest in 16-bit real mode  */
525                 .rflags         = 0x0000000000000002ULL,
526
527                 .rip            = self->boot_ip,
528                 .rsp            = self->boot_sp,
529                 .rbp            = self->boot_sp,
530         };
531
532         if (self->regs.rip > USHRT_MAX)
533                 die("ip 0x%" PRIx64 " is too high for real mode", (uint64_t) self->regs.rip);
534
535         if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
536                 die_perror("KVM_SET_REGS failed");
537 }
538
539 static void kvm__setup_sregs(struct kvm *self)
540 {
541
542         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
543                 die_perror("KVM_GET_SREGS failed");
544
545         self->sregs.cs.selector = self->boot_selector;
546         self->sregs.cs.base     = selector_to_base(self->boot_selector);
547         self->sregs.ss.selector = self->boot_selector;
548         self->sregs.ss.base     = selector_to_base(self->boot_selector);
549         self->sregs.ds.selector = self->boot_selector;
550         self->sregs.ds.base     = selector_to_base(self->boot_selector);
551         self->sregs.es.selector = self->boot_selector;
552         self->sregs.es.base     = selector_to_base(self->boot_selector);
553         self->sregs.fs.selector = self->boot_selector;
554         self->sregs.fs.base     = selector_to_base(self->boot_selector);
555         self->sregs.gs.selector = self->boot_selector;
556         self->sregs.gs.base     = selector_to_base(self->boot_selector);
557
558         if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
559                 die_perror("KVM_SET_SREGS failed");
560 }
561
562 void kvm__reset_vcpu(struct kvm *self)
563 {
564         kvm__setup_sregs(self);
565
566         kvm__setup_regs(self);
567
568         kvm__setup_fpu(self);
569
570         kvm__setup_msrs(self);
571 }
572
573 void kvm__setup_mem(struct kvm *self)
574 {
575         struct e820_entry *mem_map;
576         unsigned char *size;
577
578         size            = guest_flat_to_host(self, E820_MAP_SIZE);
579         mem_map         = guest_flat_to_host(self, E820_MAP_START);
580
581         *size           = 4;
582
583         mem_map[0]      = (struct e820_entry) {
584                 .addr           = REAL_MODE_IVT_BEGIN,
585                 .size           = EBDA_START - REAL_MODE_IVT_BEGIN,
586                 .type           = E820_MEM_USABLE,
587         };
588         mem_map[1]      = (struct e820_entry) {
589                 .addr           = EBDA_START,
590                 .size           = VGA_RAM_BEGIN - EBDA_START,
591                 .type           = E820_MEM_RESERVED,
592         };
593         mem_map[2]      = (struct e820_entry) {
594                 .addr           = MB_BIOS_BEGIN,
595                 .size           = MB_BIOS_END - MB_BIOS_BEGIN,
596                 .type           = E820_MEM_RESERVED,
597         };
598         mem_map[3]      = (struct e820_entry) {
599                 .addr           = BZ_KERNEL_START,
600                 .size           = self->ram_size - BZ_KERNEL_START,
601                 .type           = E820_MEM_USABLE,
602         };
603 }
604
605 void kvm__run(struct kvm *self)
606 {
607         if (ioctl(self->vcpu_fd, KVM_RUN, 0) < 0)
608                 die_perror("KVM_RUN failed");
609 }
610
611 void kvm__irq_line(struct kvm *self, int irq, int level)
612 {
613         struct kvm_irq_level irq_level;
614
615         irq_level       = (struct kvm_irq_level) {
616                 {
617                         .irq            = irq,
618                 },
619                 .level          = level,
620         };
621
622         if (ioctl(self->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
623                 die_perror("KVM_IRQ_LINE failed");
624 }
625
626 static void print_dtable(const char *name, struct kvm_dtable *dtable)
627 {
628         printf(" %s                 %016" PRIx64 "  %08" PRIx16 "\n",
629                 name, (uint64_t) dtable->base, (uint16_t) dtable->limit);
630 }
631
632 static void print_segment(const char *name, struct kvm_segment *seg)
633 {
634         printf(" %s       %04" PRIx16 "      %016" PRIx64 "  %08" PRIx32 "  %02" PRIx8 "    %x %x   %x  %x %x %x %x\n",
635                 name, (uint16_t) seg->selector, (uint64_t) seg->base, (uint32_t) seg->limit,
636                 (uint8_t) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
637 }
638
639 void kvm__show_registers(struct kvm *self)
640 {
641         unsigned long cr0, cr2, cr3;
642         unsigned long cr4, cr8;
643         unsigned long rax, rbx, rcx;
644         unsigned long rdx, rsi, rdi;
645         unsigned long rbp,  r8,  r9;
646         unsigned long r10, r11, r12;
647         unsigned long r13, r14, r15;
648         unsigned long rip, rsp;
649         struct kvm_sregs sregs;
650         unsigned long rflags;
651         struct kvm_regs regs;
652         int i;
653
654         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &regs) < 0)
655                 die("KVM_GET_REGS failed");
656
657         rflags = regs.rflags;
658
659         rip = regs.rip; rsp = regs.rsp;
660         rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx;
661         rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi;
662         rbp = regs.rbp; r8  = regs.r8;  r9  = regs.r9;
663         r10 = regs.r10; r11 = regs.r11; r12 = regs.r12;
664         r13 = regs.r13; r14 = regs.r14; r15 = regs.r15;
665
666         printf("Registers:\n");
667         printf(" rip: %016lx   rsp: %016lx flags: %016lx\n", rip, rsp, rflags);
668         printf(" rax: %016lx   rbx: %016lx   rcx: %016lx\n", rax, rbx, rcx);
669         printf(" rdx: %016lx   rsi: %016lx   rdi: %016lx\n", rdx, rsi, rdi);
670         printf(" rbp: %016lx   r8:  %016lx   r9:  %016lx\n", rbp, r8,  r9);
671         printf(" r10: %016lx   r11: %016lx   r12: %016lx\n", r10, r11, r12);
672         printf(" r13: %016lx   r14: %016lx   r15: %016lx\n", r13, r14, r15);
673
674         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
675                 die("KVM_GET_REGS failed");
676
677         cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
678         cr4 = sregs.cr4; cr8 = sregs.cr8;
679
680         printf(" cr0: %016lx   cr2: %016lx   cr3: %016lx\n", cr0, cr2, cr3);
681         printf(" cr4: %016lx   cr8: %016lx\n", cr4, cr8);
682         printf("Segment registers:\n");
683         printf(" register  selector  base              limit     type  p dpl db s l g avl\n");
684         print_segment("cs ", &sregs.cs);
685         print_segment("ss ", &sregs.ss);
686         print_segment("ds ", &sregs.ds);
687         print_segment("es ", &sregs.es);
688         print_segment("fs ", &sregs.fs);
689         print_segment("gs ", &sregs.gs);
690         print_segment("tr ", &sregs.tr);
691         print_segment("ldt", &sregs.ldt);
692         print_dtable("gdt", &sregs.gdt);
693         print_dtable("idt", &sregs.idt);
694         printf(" [ efer: %016" PRIx64 "  apic base: %016" PRIx64 "  nmi: %s ]\n",
695                 (uint64_t) sregs.efer, (uint64_t) sregs.apic_base,
696                 (self->nmi_disabled ? "disabled" : "enabled"));
697         printf("Interrupt bitmap:\n");
698         printf(" ");
699         for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++)
700                 printf("%016" PRIx64 " ", (uint64_t) sregs.interrupt_bitmap[i]);
701         printf("\n");
702 }
703
704 void kvm__show_code(struct kvm *self)
705 {
706         unsigned int code_bytes = 64;
707         unsigned int code_prologue = code_bytes * 43 / 64;
708         unsigned int code_len = code_bytes;
709         unsigned char c;
710         unsigned int i;
711         uint8_t *ip;
712
713         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
714                 die("KVM_GET_REGS failed");
715
716         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
717                 die("KVM_GET_SREGS failed");
718
719         ip = guest_flat_to_host(self, ip_to_flat(self, self->regs.rip) - code_prologue);
720
721         printf("Code: ");
722
723         for (i = 0; i < code_len; i++, ip++) {
724                 if (!host_ptr_in_ram(self, ip))
725                         break;
726
727                 c = *ip;
728
729                 if (ip == guest_flat_to_host(self, ip_to_flat(self, self->regs.rip)))
730                         printf("<%02x> ", c);
731                 else
732                         printf("%02x ", c);
733         }
734
735         printf("\n");
736
737         printf("Stack:\n");
738         kvm__dump_mem(self, self->regs.rsp, 32);
739 }
740
741 void kvm__show_page_tables(struct kvm *self)
742 {
743         uint64_t *pte1;
744         uint64_t *pte2;
745         uint64_t *pte3;
746         uint64_t *pte4;
747
748         if (!is_in_protected_mode(self))
749                 return;
750
751         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
752                 die("KVM_GET_SREGS failed");
753
754         pte4    = guest_flat_to_host(self, self->sregs.cr3);
755         if (!host_ptr_in_ram(self, pte4))
756                 return;
757
758         pte3    = guest_flat_to_host(self, (*pte4 & ~0xfff));
759         if (!host_ptr_in_ram(self, pte3))
760                 return;
761
762         pte2    = guest_flat_to_host(self, (*pte3 & ~0xfff));
763         if (!host_ptr_in_ram(self, pte2))
764                 return;
765
766         pte1    = guest_flat_to_host(self, (*pte2 & ~0xfff));
767         if (!host_ptr_in_ram(self, pte1))
768                 return;
769
770         printf("Page Tables:\n");
771         if (*pte2 & (1 << 7))
772                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64
773                         "   pte2: %016" PRIx64 "\n",
774                         *pte4, *pte3, *pte2);
775         else
776                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64 "   pte2: %016"
777                         PRIx64 "   pte1: %016" PRIx64 "\n",
778                         *pte4, *pte3, *pte2, *pte1);
779 }
780
781 void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
782 {
783         unsigned char *p;
784         unsigned long n;
785
786         size &= ~7; /* mod 8 */
787         if (!size)
788                 return;
789
790         p = guest_flat_to_host(self, addr);
791
792         for (n = 0; n < size; n+=8) {
793                 if (!host_ptr_in_ram(self, p + n))
794                         break;
795
796                 printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
797                         addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
798                                   p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
799         }
800 }