]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - tools/kvm/kvm.c
kvm: Introduce '--kvm-dev' command line option
[karo-tx-linux.git] / tools / kvm / kvm.c
1 #include "kvm/kvm.h"
2
3 #include "kvm/interrupt.h"
4 #include "kvm/util.h"
5
6 #include <linux/kvm.h>
7
8 #include <asm/bootparam.h>
9
10 #include <sys/ioctl.h>
11 #include <inttypes.h>
12 #include <sys/mman.h>
13 #include <stdbool.h>
14 #include <assert.h>
15 #include <limits.h>
16 #include <stdarg.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <unistd.h>
20 #include <stdio.h>
21 #include <fcntl.h>
22
23 /*
24  * Compatibility code. Remove this when we move to tools/kvm.
25  */
26 #ifndef KVM_EXIT_INTERNAL_ERROR
27 # define KVM_EXIT_INTERNAL_ERROR                17
28 #endif
29
30 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
31
32 const char *kvm_exit_reasons[] = {
33         DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
34         DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
35         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
36         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
37         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
38         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
39         DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
40         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
41         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
42         DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
43         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
44         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
45         DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
46         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
47         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
48         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
49         DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
50         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
51 };
52
53 #define DEFINE_KVM_EXT(ext)             \
54         .name = #ext,                   \
55         .code = ext
56
57 struct {
58         const char *name;
59         int code;
60 } kvm_req_ext[] = {
61         { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
62         { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) },
63         { DEFINE_KVM_EXT(KVM_CAP_PIT2) },
64         { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) },
65         { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) },
66         { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) },
67         { DEFINE_KVM_EXT(KVM_CAP_HLT) },
68         { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) },
69         { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
70 };
71
72 static inline bool host_ptr_in_ram(struct kvm *self, void *p)
73 {
74         return self->ram_start <= p && p < (self->ram_start + self->ram_size);
75 }
76
77 static inline uint32_t segment_to_flat(uint16_t selector, uint16_t offset)
78 {
79         return ((uint32_t)selector << 4) + (uint32_t) offset;
80 }
81
82 static inline void *guest_flat_to_host(struct kvm *self, unsigned long offset)
83 {
84         return self->ram_start + offset;
85 }
86
87 static inline void *guest_real_to_host(struct kvm *self, uint16_t selector, uint16_t offset)
88 {
89         unsigned long flat = segment_to_flat(selector, offset);
90
91         return guest_flat_to_host(self, flat);
92 }
93
94 static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
95 {
96         int ret;
97
98         ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
99         if (ret < 0)
100                 return false;
101
102         return ret;
103 }
104
105 static int kvm__check_extensions(struct kvm *self)
106 {
107         unsigned int i;
108
109         for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
110                 if (!kvm__supports_extension(self, kvm_req_ext[i].code)) {
111                         error("Unsuppored KVM extension detected: %s",
112                                 kvm_req_ext[i].name);
113                         return (int)-i;
114                 }
115         }
116
117         return 0;
118 }
119
120 static struct kvm *kvm__new(void)
121 {
122         struct kvm *self = calloc(1, sizeof *self);
123
124         if (!self)
125                 die("out of memory");
126
127         return self;
128 }
129
130 void kvm__delete(struct kvm *self)
131 {
132         free(self->ram_start);
133         free(self);
134 }
135
136 struct kvm *kvm__init(const char *kvm_dev)
137 {
138         struct kvm_userspace_memory_region mem;
139         struct kvm_pit_config pit_config = { .flags = 0, };
140         struct kvm *self;
141         long page_size;
142         int mmap_size;
143         int ret;
144
145         self = kvm__new();
146
147         self->sys_fd = open(kvm_dev, O_RDWR);
148         if (self->sys_fd < 0) {
149                 if (errno == ENOENT)
150                         die("'%s' not found. Please make sure you have CONFIG_KVM enabled.", kvm_dev);
151
152                 die_perror("open");
153         }
154
155         ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
156         if (ret != KVM_API_VERSION)
157                 die_perror("KVM_API_VERSION ioctl");
158
159         self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
160         if (self->vm_fd < 0)
161                 die_perror("KVM_CREATE_VM ioctl");
162
163         if (kvm__check_extensions(self))
164                 die("A required KVM extention is not supported by OS");
165
166         ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
167         if (ret < 0)
168                 die_perror("KVM_SET_TSS_ADDR ioctl");
169
170         ret = ioctl(self->vm_fd, KVM_CREATE_PIT2, &pit_config);
171         if (ret < 0)
172                 die_perror("KVM_CREATE_PIT2 ioctl");
173
174         self->ram_size          = 64UL * 1024UL * 1024UL;
175
176         page_size       = sysconf(_SC_PAGESIZE);
177         if (posix_memalign(&self->ram_start, page_size, self->ram_size) != 0)
178                 die("out of memory");
179
180         mem = (struct kvm_userspace_memory_region) {
181                 .slot                   = 0,
182                 .guest_phys_addr        = 0x0UL,
183                 .memory_size            = self->ram_size,
184                 .userspace_addr         = (unsigned long) self->ram_start,
185         };
186
187         ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
188         if (ret < 0)
189                 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
190
191         ret = ioctl(self->vm_fd, KVM_CREATE_IRQCHIP);
192         if (ret < 0)
193                 die_perror("KVM_CREATE_IRQCHIP ioctl");
194
195         self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
196         if (self->vcpu_fd < 0)
197                 die_perror("KVM_CREATE_VCPU ioctl");
198
199         mmap_size = ioctl(self->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
200         if (mmap_size < 0)
201                 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
202
203         self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
204         if (self->kvm_run == MAP_FAILED)
205                 die("unable to mmap vcpu fd");
206
207         return self;
208 }
209
210 void kvm__enable_singlestep(struct kvm *self)
211 {
212         struct kvm_guest_debug debug = {
213                 .control        = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
214         };
215
216         if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
217                 warning("KVM_SET_GUEST_DEBUG failed");
218 }
219
220 #define BOOT_LOADER_SELECTOR    0x1000
221 #define BOOT_LOADER_IP          0x0000
222 #define BOOT_LOADER_SP          0x8000
223 #define BOOT_CMDLINE_OFFSET     0x20000
224
225 #define BOOT_PROTOCOL_REQUIRED  0x202
226 #define LOAD_HIGH               0x01
227
228 static int load_flat_binary(struct kvm *self, int fd)
229 {
230         void *p;
231         int nr;
232
233         if (lseek(fd, 0, SEEK_SET) < 0)
234                 die_perror("lseek");
235
236         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
237
238         while ((nr = read(fd, p, 65536)) > 0)
239                 p += nr;
240
241         self->boot_selector     = BOOT_LOADER_SELECTOR;
242         self->boot_ip           = BOOT_LOADER_IP;
243         self->boot_sp           = BOOT_LOADER_SP;
244
245         return true;
246 }
247
248 /*
249  * The protected mode kernel part of a modern bzImage is loaded at 1 MB by
250  * default.
251  */
252 #define BZ_KERNEL_START                 0x100000UL
253
254 static const char *BZIMAGE_MAGIC        = "HdrS";
255
256 #define BZ_DEFAULT_SETUP_SECTS          4
257
258 static bool load_bzimage(struct kvm *self, int fd, const char *kernel_cmdline)
259 {
260         struct real_intr_desc intr;
261         struct boot_params boot;
262         unsigned long setup_sects;
263         unsigned int intr_addr;
264         size_t cmdline_size;
265         ssize_t setup_size;
266         void *p;
267         int nr;
268
269         /*
270          * See Documentation/x86/boot.txt for details no bzImage on-disk and
271          * memory layout.
272          */
273
274         if (lseek(fd, 0, SEEK_SET) < 0)
275                 die_perror("lseek");
276
277         read(fd, &boot, sizeof(boot));
278
279         if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)) != 0)
280                 return false;
281
282         if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED) {
283                 warning("Too old kernel");
284                 return false;
285         }
286
287         if (lseek(fd, 0, SEEK_SET) < 0)
288                 die_perror("lseek");
289
290         if (!boot.hdr.setup_sects)
291                 boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
292         setup_sects = boot.hdr.setup_sects + 1;
293
294         setup_size = setup_sects << 9;
295         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
296
297         if (read(fd, p, setup_size) != setup_size)
298                 die_perror("read");
299
300         p = guest_flat_to_host(self, BZ_KERNEL_START);
301
302         while ((nr = read(fd, p, 65536)) > 0)
303                 p += nr;
304
305         p = guest_flat_to_host(self, BOOT_CMDLINE_OFFSET);
306         if (kernel_cmdline) {
307                 cmdline_size = strlen(kernel_cmdline) + 1;
308                 if (cmdline_size > boot.hdr.cmdline_size)
309                         cmdline_size = boot.hdr.cmdline_size;
310
311                 memset(p, 0, boot.hdr.cmdline_size);
312                 memcpy(p, kernel_cmdline, cmdline_size - 1);
313         }
314
315 #define hdr_offset(member)                      \
316         offsetof(struct boot_params, hdr) +     \
317         offsetof(struct setup_header, member)
318 #define guest_hdr(kvm, member)                  \
319         guest_real_to_host(kvm,                 \
320                 BOOT_LOADER_SELECTOR,           \
321                 hdr_offset(member))
322
323         /* some fields in guest header have to be updated */
324         p = guest_hdr(self, cmd_line_ptr);
325         *(uint32_t *)p = BOOT_CMDLINE_OFFSET;
326
327         p = guest_hdr(self, type_of_loader);
328         *(uint8_t *)p = 0xff;
329
330         p = guest_hdr(self, heap_end_ptr);
331         *(uint16_t *)p = 0xfe00;
332
333         p = guest_hdr(self, loadflags);
334         *(uint8_t *)p |= CAN_USE_HEAP;
335
336         self->boot_selector     = BOOT_LOADER_SELECTOR;
337         /*
338          * The real-mode setup code starts at offset 0x200 of a bzImage. See
339          * Documentation/x86/boot.txt for details.
340          */
341         self->boot_ip           = BOOT_LOADER_IP + 0x200;
342         self->boot_sp           = BOOT_LOADER_SP;
343
344         /*
345          * Setup a *fake* real mode vector table, it has only
346          * one real hadler which does just iret
347          *
348          * This is where the BIOS lives -- BDA area
349          */
350         intr_addr = BIOS_INTR_NEXT(BDA_START + 0, 16);
351         p = guest_flat_to_host(self, intr_addr);
352         memcpy(p, intfake, intfake_end - intfake);
353         intr = (struct real_intr_desc) {
354                 .segment        = REAL_SEGMENT(intr_addr),
355                 .offset         = 0,
356         };
357         interrupt_table__setup(&self->interrupt_table, &intr);
358
359         intr_addr = BIOS_INTR_NEXT(BDA_START + (intfake_end - intfake), 16);
360         p = guest_flat_to_host(self, intr_addr);
361         memcpy(p, int10, int10_end - int10);
362         intr = (struct real_intr_desc) {
363                 .segment        = REAL_SEGMENT(intr_addr),
364                 .offset         = 0,
365         };
366         interrupt_table__set(&self->interrupt_table, &intr, 0x10);
367
368         p = guest_flat_to_host(self, 0);
369         interrupt_table__copy(&self->interrupt_table, p, REAL_INTR_SIZE);
370
371         return true;
372 }
373
374 bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
375                         const char *kernel_cmdline)
376 {
377         bool ret;
378         int fd;
379
380         fd = open(kernel_filename, O_RDONLY);
381         if (fd < 0)
382                 die("unable to open kernel");
383
384         ret = load_bzimage(kvm, fd, kernel_cmdline);
385         if (ret)
386                 goto found_kernel;
387
388         ret = load_flat_binary(kvm, fd);
389         if (ret)
390                 goto found_kernel;
391
392         die("%s is not a valid bzImage or flat binary", kernel_filename);
393
394 found_kernel:
395         return ret;
396 }
397
398 static inline uint64_t ip_flat_to_real(struct kvm *self, uint64_t ip)
399 {
400         uint64_t cs = self->sregs.cs.selector;
401
402         return ip - (cs << 4);
403 }
404
405 static inline bool is_in_protected_mode(struct kvm *self)
406 {
407         return self->sregs.cr0 & 0x01;
408 }
409
410 static inline uint64_t ip_to_flat(struct kvm *self, uint64_t ip)
411 {
412         uint64_t cs;
413
414         /*
415          * NOTE! We should take code segment base address into account here.
416          * Luckily it's usually zero because Linux uses flat memory model.
417          */
418         if (is_in_protected_mode(self))
419                 return ip;
420
421         cs = self->sregs.cs.selector;
422
423         return ip + (cs << 4);
424 }
425
426 static inline uint32_t selector_to_base(uint16_t selector)
427 {
428         /*
429          * KVM on Intel requires 'base' to be 'selector * 16' in real mode.
430          */
431         return (uint32_t)selector * 16;
432 }
433
434 static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
435 {
436         struct kvm_msrs *self = calloc(1, sizeof(*self) + (sizeof(struct kvm_msr_entry) * nmsrs));
437
438         if (!self)
439                 die("out of memory");
440
441         return self;
442 }
443
444 #define MSR_IA32_TIME_STAMP_COUNTER     0x10
445
446 #define MSR_IA32_SYSENTER_CS            0x174
447 #define MSR_IA32_SYSENTER_ESP           0x175
448 #define MSR_IA32_SYSENTER_EIP           0x176
449
450 #define MSR_IA32_STAR                   0xc0000081
451 #define MSR_IA32_LSTAR                  0xc0000082
452 #define MSR_IA32_CSTAR                  0xc0000083
453 #define MSR_IA32_FMASK                  0xc0000084
454 #define MSR_IA32_KERNEL_GS_BASE         0xc0000102
455
456 #define KVM_MSR_ENTRY(_index, _data)    \
457         (struct kvm_msr_entry) { .index = _index, .data = _data }
458
459 static void kvm__setup_msrs(struct kvm *self)
460 {
461         unsigned long ndx = 0;
462
463         self->msrs = kvm_msrs__new(100);
464
465         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS,        0x0);
466         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP,       0x0);
467         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP,       0x0);
468 #ifdef CONFIG_X86_64
469         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_STAR,               0x0);
470         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_CSTAR,              0x0);
471         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_KERNEL_GS_BASE,     0x0);
472         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_FMASK,              0x0);
473         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_LSTAR,              0x0);
474 #endif
475         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TIME_STAMP_COUNTER, 0x0);
476
477         self->msrs->nmsrs       = ndx;
478
479         if (ioctl(self->vcpu_fd, KVM_SET_MSRS, self->msrs) < 0)
480                 die_perror("KVM_SET_MSRS failed");
481 }
482
483 static void kvm__setup_fpu(struct kvm *self)
484 {
485         self->fpu = (struct kvm_fpu) {
486                 .fcw            = 0x37f,
487                 .mxcsr          = 0x1f80,
488         };
489
490         if (ioctl(self->vcpu_fd, KVM_SET_FPU, &self->fpu) < 0)
491                 die_perror("KVM_SET_FPU failed");
492 }
493
494 static void kvm__setup_regs(struct kvm *self)
495 {
496         self->regs = (struct kvm_regs) {
497                 /* We start the guest in 16-bit real mode  */
498                 .rflags         = 0x0000000000000002ULL,
499
500                 .rip            = self->boot_ip,
501                 .rsp            = self->boot_sp,
502                 .rbp            = self->boot_sp,
503         };
504
505         if (self->regs.rip > USHRT_MAX)
506                 die("ip 0x%" PRIx64 " is too high for real mode", (uint64_t) self->regs.rip);
507
508         if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
509                 die_perror("KVM_SET_REGS failed");
510 }
511
512 static void kvm__setup_sregs(struct kvm *self)
513 {
514
515         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
516                 die_perror("KVM_GET_SREGS failed");
517
518         self->sregs.cs.selector = self->boot_selector;
519         self->sregs.cs.base     = selector_to_base(self->boot_selector);
520         self->sregs.ss.selector = self->boot_selector;
521         self->sregs.ss.base     = selector_to_base(self->boot_selector);
522         self->sregs.ds.selector = self->boot_selector;
523         self->sregs.ds.base     = selector_to_base(self->boot_selector);
524         self->sregs.es.selector = self->boot_selector;
525         self->sregs.es.base     = selector_to_base(self->boot_selector);
526         self->sregs.fs.selector = self->boot_selector;
527         self->sregs.fs.base     = selector_to_base(self->boot_selector);
528         self->sregs.gs.selector = self->boot_selector;
529         self->sregs.gs.base     = selector_to_base(self->boot_selector);
530
531         if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
532                 die_perror("KVM_SET_SREGS failed");
533 }
534
535 void kvm__reset_vcpu(struct kvm *self)
536 {
537         kvm__setup_sregs(self);
538
539         kvm__setup_regs(self);
540
541         kvm__setup_fpu(self);
542
543         kvm__setup_msrs(self);
544 }
545
546 void kvm__run(struct kvm *self)
547 {
548         if (ioctl(self->vcpu_fd, KVM_RUN, 0) < 0)
549                 die_perror("KVM_RUN failed");
550 }
551
552 static void print_dtable(const char *name, struct kvm_dtable *dtable)
553 {
554         printf(" %s                 %016" PRIx64 "  %08" PRIx16 "\n",
555                 name, (uint64_t) dtable->base, (uint16_t) dtable->limit);
556 }
557
558 static void print_segment(const char *name, struct kvm_segment *seg)
559 {
560         printf(" %s       %04" PRIx16 "      %016" PRIx64 "  %08" PRIx32 "  %02" PRIx8 "    %x %x   %x  %x %x %x %x\n",
561                 name, (uint16_t) seg->selector, (uint64_t) seg->base, (uint32_t) seg->limit,
562                 (uint8_t) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
563 }
564
565 void kvm__show_registers(struct kvm *self)
566 {
567         unsigned long cr0, cr2, cr3;
568         unsigned long cr4, cr8;
569         unsigned long rax, rbx, rcx;
570         unsigned long rdx, rsi, rdi;
571         unsigned long rbp,  r8,  r9;
572         unsigned long r10, r11, r12;
573         unsigned long r13, r14, r15;
574         unsigned long rip, rsp;
575         struct kvm_sregs sregs;
576         unsigned long rflags;
577         struct kvm_regs regs;
578         int i;
579
580         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &regs) < 0)
581                 die("KVM_GET_REGS failed");
582
583         rflags = regs.rflags;
584
585         rip = regs.rip; rsp = regs.rsp;
586         rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx;
587         rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi;
588         rbp = regs.rbp; r8  = regs.r8;  r9  = regs.r9;
589         r10 = regs.r10; r11 = regs.r11; r12 = regs.r12;
590         r13 = regs.r13; r14 = regs.r14; r15 = regs.r15;
591
592         printf("Registers:\n");
593         printf(" rip: %016lx   rsp: %016lx flags: %016lx\n", rip, rsp, rflags);
594         printf(" rax: %016lx   rbx: %016lx   rcx: %016lx\n", rax, rbx, rcx);
595         printf(" rdx: %016lx   rsi: %016lx   rdi: %016lx\n", rdx, rsi, rdi);
596         printf(" rbp: %016lx   r8:  %016lx   r9:  %016lx\n", rbp, r8,  r9);
597         printf(" r10: %016lx   r11: %016lx   r12: %016lx\n", r10, r11, r12);
598         printf(" r13: %016lx   r14: %016lx   r15: %016lx\n", r13, r14, r15);
599
600         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
601                 die("KVM_GET_REGS failed");
602
603         cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
604         cr4 = sregs.cr4; cr8 = sregs.cr8;
605
606         printf(" cr0: %016lx   cr2: %016lx   cr3: %016lx\n", cr0, cr2, cr3);
607         printf(" cr4: %016lx   cr8: %016lx\n", cr4, cr8);
608         printf("Segment registers:\n");
609         printf(" register  selector  base              limit     type  p dpl db s l g avl\n");
610         print_segment("cs ", &sregs.cs);
611         print_segment("ss ", &sregs.ss);
612         print_segment("ds ", &sregs.ds);
613         print_segment("es ", &sregs.es);
614         print_segment("fs ", &sregs.fs);
615         print_segment("gs ", &sregs.gs);
616         print_segment("tr ", &sregs.tr);
617         print_segment("ldt", &sregs.ldt);
618         print_dtable("gdt", &sregs.gdt);
619         print_dtable("idt", &sregs.idt);
620         printf(" [ efer: %016" PRIx64 "  apic base: %016" PRIx64 "  nmi: %s ]\n",
621                 (uint64_t) sregs.efer, (uint64_t) sregs.apic_base,
622                 (self->nmi_disabled ? "disabled" : "enabled"));
623         printf("Interrupt bitmap:\n");
624         printf(" ");
625         for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++)
626                 printf("%016" PRIx64 " ", (uint64_t) sregs.interrupt_bitmap[i]);
627         printf("\n");
628 }
629
630 void kvm__show_code(struct kvm *self)
631 {
632         unsigned int code_bytes = 64;
633         unsigned int code_prologue = code_bytes * 43 / 64;
634         unsigned int code_len = code_bytes;
635         unsigned char c;
636         unsigned int i;
637         uint8_t *ip;
638
639         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
640                 die("KVM_GET_REGS failed");
641
642         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
643                 die("KVM_GET_SREGS failed");
644
645         ip = guest_flat_to_host(self, ip_to_flat(self, self->regs.rip) - code_prologue);
646
647         printf("Code: ");
648
649         for (i = 0; i < code_len; i++, ip++) {
650                 if (!host_ptr_in_ram(self, ip))
651                         break;
652
653                 c = *ip;
654
655                 if (ip == guest_flat_to_host(self, ip_to_flat(self, self->regs.rip)))
656                         printf("<%02x> ", c);
657                 else
658                         printf("%02x ", c);
659         }
660
661         printf("\n");
662
663         printf("Stack:\n");
664         kvm__dump_mem(self, self->regs.rsp, 32);
665 }
666
667 void kvm__show_page_tables(struct kvm *self)
668 {
669         uint64_t *pte1;
670         uint64_t *pte2;
671         uint64_t *pte3;
672         uint64_t *pte4;
673
674         if (!is_in_protected_mode(self))
675                 return;
676
677         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
678                 die("KVM_GET_SREGS failed");
679
680         pte4    = guest_flat_to_host(self, self->sregs.cr3);
681         if (!host_ptr_in_ram(self, pte4))
682                 return;
683
684         pte3    = guest_flat_to_host(self, (*pte4 & ~0xfff));
685         if (!host_ptr_in_ram(self, pte3))
686                 return;
687
688         pte2    = guest_flat_to_host(self, (*pte3 & ~0xfff));
689         if (!host_ptr_in_ram(self, pte2))
690                 return;
691
692         pte1    = guest_flat_to_host(self, (*pte2 & ~0xfff));
693         if (!host_ptr_in_ram(self, pte1))
694                 return;
695
696         printf("Page Tables:\n");
697         if (*pte2 & (1 << 7))
698                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64
699                         "   pte2: %016" PRIx64 "\n",
700                         *pte4, *pte3, *pte2);
701         else
702                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64 "   pte2: %016"
703                         PRIx64 "   pte1: %016" PRIx64 "\n",
704                         *pte4, *pte3, *pte2, *pte1);
705 }
706
707 void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
708 {
709         unsigned char *p;
710         unsigned long n;
711
712         size &= ~7; /* mod 8 */
713         if (!size)
714                 return;
715
716         p = guest_flat_to_host(self, addr);
717
718         for (n = 0; n < size; n+=8) {
719                 if (!host_ptr_in_ram(self, p + n))
720                         break;
721
722                 printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
723                         addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
724                                   p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
725         }
726 }