]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - tools/kvm/kvm.c
kvm: Use PRIx qualificator for printf
[karo-tx-linux.git] / tools / kvm / kvm.c
1 #include "kvm/kvm.h"
2
3 #include "kvm/interrupt.h"
4 #include "kvm/util.h"
5
6 #include <linux/kvm.h>
7
8 #include <asm/bootparam.h>
9
10 #include <sys/ioctl.h>
11 #include <inttypes.h>
12 #include <sys/mman.h>
13 #include <stdbool.h>
14 #include <assert.h>
15 #include <limits.h>
16 #include <stdarg.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <unistd.h>
20 #include <stdio.h>
21 #include <fcntl.h>
22
23 /*
24  * Compatibility code. Remove this when we move to tools/kvm.
25  */
26 #ifndef KVM_EXIT_INTERNAL_ERROR
27 # define KVM_EXIT_INTERNAL_ERROR                17
28 #endif
29
30 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
31
32 const char *kvm_exit_reasons[] = {
33         DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
34         DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
35         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
36         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
37         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
38         DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
39         DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
40         DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
41         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
42         DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
43         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
44         DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
45         DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
46         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
47         DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
48         DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
49         DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
50         DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
51 };
52
53 static inline bool host_ptr_in_ram(struct kvm *self, void *p)
54 {
55         return self->ram_start <= p && p < (self->ram_start + self->ram_size);
56 }
57
58 static inline uint32_t segment_to_flat(uint16_t selector, uint16_t offset)
59 {
60         return ((uint32_t)selector << 4) + (uint32_t) offset;
61 }
62
63 static inline void *guest_flat_to_host(struct kvm *self, unsigned long offset)
64 {
65         return self->ram_start + offset;
66 }
67
68 static inline void *guest_real_to_host(struct kvm *self, uint16_t selector, uint16_t offset)
69 {
70         unsigned long flat = segment_to_flat(selector, offset);
71
72         return guest_flat_to_host(self, flat);
73 }
74
75 static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
76 {
77         int ret;
78
79         ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
80         if (ret < 0)
81                 return false;
82
83         return ret;
84 }
85
86 static struct kvm *kvm__new(void)
87 {
88         struct kvm *self = calloc(1, sizeof *self);
89
90         if (!self)
91                 die("out of memory");
92
93         return self;
94 }
95
96 void kvm__delete(struct kvm *self)
97 {
98         free(self->ram_start);
99         free(self);
100 }
101
102 struct kvm *kvm__init(void)
103 {
104         struct kvm_userspace_memory_region mem;
105         struct kvm *self;
106         long page_size;
107         int mmap_size;
108         int ret;
109
110         self = kvm__new();
111
112         self->sys_fd = open("/dev/kvm", O_RDWR);
113         if (self->sys_fd < 0)
114                 die_perror("open");
115
116         ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
117         if (ret != KVM_API_VERSION)
118                 die_perror("KVM_API_VERSION ioctl");
119
120         self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
121         if (self->vm_fd < 0)
122                 die_perror("KVM_CREATE_VM ioctl");
123
124         if (!kvm__supports_extension(self, KVM_CAP_USER_MEMORY))
125                 die("KVM_CAP_USER_MEMORY is not supported");
126
127         self->ram_size          = 64UL * 1024UL * 1024UL;
128
129         page_size       = sysconf(_SC_PAGESIZE);
130         if (posix_memalign(&self->ram_start, page_size, self->ram_size) != 0)
131                 die("out of memory");
132
133         mem = (struct kvm_userspace_memory_region) {
134                 .slot                   = 0,
135                 .guest_phys_addr        = 0x0UL,
136                 .memory_size            = self->ram_size,
137                 .userspace_addr         = (unsigned long) self->ram_start,
138         };
139
140         ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem, 1);
141         if (ret < 0)
142                 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
143
144         if (!kvm__supports_extension(self, KVM_CAP_SET_TSS_ADDR))
145                 die("KVM_CAP_SET_TSS_ADDR is not supported");
146
147         ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
148         if (ret < 0)
149                 die_perror("KVM_SET_TSS_ADDR ioctl");
150
151         self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
152         if (self->vcpu_fd < 0)
153                 die_perror("KVM_CREATE_VCPU ioctl");
154
155         mmap_size = ioctl(self->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
156         if (mmap_size < 0)
157                 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
158
159         self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
160         if (self->kvm_run == MAP_FAILED)
161                 die("unable to mmap vcpu fd");
162
163         return self;
164 }
165
166 void kvm__enable_singlestep(struct kvm *self)
167 {
168         struct kvm_guest_debug debug = {
169                 .control        = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
170         };
171
172         if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
173                 warning("KVM_SET_GUEST_DEBUG failed");
174 }
175
176 #define BOOT_LOADER_SELECTOR    0x1000
177 #define BOOT_LOADER_IP          0x0000
178 #define BOOT_LOADER_SP          0x8000
179 #define BOOT_CMDLINE_OFFSET     0x20000
180
181 #define BOOT_PROTOCOL_REQUIRED  0x202
182 #define LOAD_HIGH               0x01
183
184 static int load_flat_binary(struct kvm *self, int fd)
185 {
186         void *p;
187         int nr;
188
189         if (lseek(fd, 0, SEEK_SET) < 0)
190                 die_perror("lseek");
191
192         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
193
194         while ((nr = read(fd, p, 65536)) > 0)
195                 p += nr;
196
197         self->boot_selector     = BOOT_LOADER_SELECTOR;
198         self->boot_ip           = BOOT_LOADER_IP;
199         self->boot_sp           = BOOT_LOADER_SP;
200
201         return true;
202 }
203
204 /*
205  * The protected mode kernel part of a modern bzImage is loaded at 1 MB by
206  * default.
207  */
208 #define BZ_KERNEL_START                 0x100000UL
209
210 static const char *BZIMAGE_MAGIC        = "HdrS";
211
212 #define BZ_DEFAULT_SETUP_SECTS          4
213
214 static bool load_bzimage(struct kvm *self, int fd, const char *kernel_cmdline)
215 {
216         struct real_intr_desc intr;
217         struct boot_params boot;
218         unsigned long setup_sects;
219         unsigned int intr_addr;
220         size_t cmdline_size;
221         ssize_t setup_size;
222         void *p;
223         int nr;
224
225         /*
226          * See Documentation/x86/boot.txt for details no bzImage on-disk and
227          * memory layout.
228          */
229
230         if (lseek(fd, 0, SEEK_SET) < 0)
231                 die_perror("lseek");
232
233         read(fd, &boot, sizeof(boot));
234
235         if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)) != 0)
236                 return false;
237
238         if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED) {
239                 warning("Too old kernel");
240                 return false;
241         }
242
243         if (lseek(fd, 0, SEEK_SET) < 0)
244                 die_perror("lseek");
245
246         if (!boot.hdr.setup_sects)
247                 boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
248         setup_sects = boot.hdr.setup_sects + 1;
249
250         setup_size = setup_sects << 9;
251         p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
252
253         if (read(fd, p, setup_size) != setup_size)
254                 die_perror("read");
255
256         p = guest_flat_to_host(self, BZ_KERNEL_START);
257
258         while ((nr = read(fd, p, 65536)) > 0)
259                 p += nr;
260
261         p = guest_flat_to_host(self, BOOT_CMDLINE_OFFSET);
262         if (kernel_cmdline) {
263                 cmdline_size = strlen(kernel_cmdline) + 1;
264                 if (cmdline_size > boot.hdr.cmdline_size)
265                         cmdline_size = boot.hdr.cmdline_size;
266
267                 memset(p, 0, boot.hdr.cmdline_size);
268                 memcpy(p, kernel_cmdline, cmdline_size - 1);
269         }
270
271 #define hdr_offset(member)                      \
272         offsetof(struct boot_params, hdr) +     \
273         offsetof(struct setup_header, member)
274 #define guest_hdr(kvm, member)                  \
275         guest_real_to_host(kvm,                 \
276                 BOOT_LOADER_SELECTOR,           \
277                 hdr_offset(member))
278
279         /* some fields in guest header have to be updated */
280         p = guest_hdr(self, cmd_line_ptr);
281         *(uint32_t *)p = BOOT_CMDLINE_OFFSET;
282
283         p = guest_hdr(self, type_of_loader);
284         *(uint8_t *)p = 0xff;
285
286         p = guest_hdr(self, heap_end_ptr);
287         *(uint16_t *)p = 0xfe00;
288
289         p = guest_hdr(self, loadflags);
290         *(uint8_t *)p |= CAN_USE_HEAP;
291
292         self->boot_selector     = BOOT_LOADER_SELECTOR;
293         /*
294          * The real-mode setup code starts at offset 0x200 of a bzImage. See
295          * Documentation/x86/boot.txt for details.
296          */
297         self->boot_ip           = BOOT_LOADER_IP + 0x200;
298         self->boot_sp           = BOOT_LOADER_SP;
299
300         /*
301          * Setup a *fake* real mode vector table, it has only
302          * one real hadler which does just iret
303          *
304          * This is where the BIOS lives -- BDA area
305          */
306         intr_addr = BIOS_INTR_NEXT(BDA_START + 0, 16);
307         p = guest_flat_to_host(self, intr_addr);
308         memcpy(p, intfake, intfake_end - intfake);
309         intr = (struct real_intr_desc) {
310                 .segment        = REAL_SEGMENT(intr_addr),
311                 .offset         = 0,
312         };
313         interrupt_table__setup(&self->interrupt_table, &intr);
314
315         intr_addr = BIOS_INTR_NEXT(BDA_START + (intfake_end - intfake), 16);
316         p = guest_flat_to_host(self, intr_addr);
317         memcpy(p, int10, int10_end - int10);
318         intr = (struct real_intr_desc) {
319                 .segment        = REAL_SEGMENT(intr_addr),
320                 .offset         = 0,
321         };
322         interrupt_table__set(&self->interrupt_table, &intr, 0x10);
323
324         p = guest_flat_to_host(self, 0);
325         interrupt_table__copy(&self->interrupt_table, p, REAL_INTR_SIZE);
326
327         return true;
328 }
329
330 bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
331                         const char *kernel_cmdline)
332 {
333         bool ret;
334         int fd;
335
336         fd = open(kernel_filename, O_RDONLY);
337         if (fd < 0)
338                 die("unable to open kernel");
339
340         ret = load_bzimage(kvm, fd, kernel_cmdline);
341         if (ret)
342                 goto found_kernel;
343
344         ret = load_flat_binary(kvm, fd);
345         if (ret)
346                 goto found_kernel;
347
348         die("%s is not a valid bzImage or flat binary", kernel_filename);
349
350 found_kernel:
351         return ret;
352 }
353
354 static inline uint64_t ip_flat_to_real(struct kvm *self, uint64_t ip)
355 {
356         uint64_t cs = self->sregs.cs.selector;
357
358         return ip - (cs << 4);
359 }
360
361 static inline bool is_in_protected_mode(struct kvm *self)
362 {
363         return self->sregs.cr0 & 0x01;
364 }
365
366 static inline uint64_t ip_to_flat(struct kvm *self, uint64_t ip)
367 {
368         uint64_t cs;
369
370         /*
371          * NOTE! We should take code segment base address into account here.
372          * Luckily it's usually zero because Linux uses flat memory model.
373          */
374         if (is_in_protected_mode(self))
375                 return ip;
376
377         cs = self->sregs.cs.selector;
378
379         return ip + (cs << 4);
380 }
381
382 static inline uint32_t selector_to_base(uint16_t selector)
383 {
384         /*
385          * KVM on Intel requires 'base' to be 'selector * 16' in real mode.
386          */
387         return (uint32_t)selector * 16;
388 }
389
390 static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
391 {
392         struct kvm_msrs *self = calloc(1, sizeof(*self) + (sizeof(struct kvm_msr_entry) * nmsrs));
393
394         if (!self)
395                 die("out of memory");
396
397         return self;
398 }
399
400 #define MSR_IA32_TIME_STAMP_COUNTER     0x10
401
402 #define MSR_IA32_SYSENTER_CS            0x174
403 #define MSR_IA32_SYSENTER_ESP           0x175
404 #define MSR_IA32_SYSENTER_EIP           0x176
405
406 #define MSR_IA32_STAR                   0xc0000081
407 #define MSR_IA32_LSTAR                  0xc0000082
408 #define MSR_IA32_CSTAR                  0xc0000083
409 #define MSR_IA32_FMASK                  0xc0000084
410 #define MSR_IA32_KERNEL_GS_BASE         0xc0000102
411
412 #define KVM_MSR_ENTRY(_index, _data)    \
413         (struct kvm_msr_entry) { .index = _index, .data = _data }
414
415 static void kvm__setup_msrs(struct kvm *self)
416 {
417         unsigned long ndx = 0;
418
419         self->msrs = kvm_msrs__new(100);
420
421         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS,        0x0);
422         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP,       0x0);
423         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP,       0x0);
424 #ifdef CONFIG_X86_64
425         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_STAR,               0x0);
426         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_CSTAR,              0x0);
427         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_KERNEL_GS_BASE,     0x0);
428         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_FMASK,              0x0);
429         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_LSTAR,              0x0);
430 #endif
431         self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TIME_STAMP_COUNTER, 0x0);
432
433         self->msrs->nmsrs       = ndx;
434
435         if (ioctl(self->vcpu_fd, KVM_SET_MSRS, self->msrs) < 0)
436                 die_perror("KVM_SET_MSRS failed");
437 }
438
439 static void kvm__setup_fpu(struct kvm *self)
440 {
441         self->fpu = (struct kvm_fpu) {
442                 .fcw            = 0x37f,
443                 .mxcsr          = 0x1f80,
444         };
445
446         if (ioctl(self->vcpu_fd, KVM_SET_FPU, &self->fpu) < 0)
447                 die_perror("KVM_SET_FPU failed");
448 }
449
450 static void kvm__setup_regs(struct kvm *self)
451 {
452         self->regs = (struct kvm_regs) {
453                 /* We start the guest in 16-bit real mode  */
454                 .rflags         = 0x0000000000000002ULL,
455
456                 .rip            = self->boot_ip,
457                 .rsp            = self->boot_sp,
458                 .rbp            = self->boot_sp,
459         };
460
461         if (self->regs.rip > USHRT_MAX)
462                 die("ip 0x%" PRIx64 " is too high for real mode", (uint64_t) self->regs.rip);
463
464         if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
465                 die_perror("KVM_SET_REGS failed");
466 }
467
468 static void kvm__setup_sregs(struct kvm *self)
469 {
470         self->sregs = (struct kvm_sregs) {
471                 .cr0            = 0x60000010ULL,
472                 .cs             = (struct kvm_segment) {
473                         .selector       = self->boot_selector,
474                         .base           = selector_to_base(self->boot_selector),
475                         .limit          = 0xffffU,
476                         .type           = 0x0bU,
477                         .present        = 1,
478                         .dpl            = 0x03,
479                         .s              = 1,
480                 },
481                 .ss             = (struct kvm_segment) {
482                         .selector       = self->boot_selector,
483                         .base           = selector_to_base(self->boot_selector),
484                         .limit          = 0xffffU,
485                         .type           = 0x03U,
486                         .present        = 1,
487                         .dpl            = 0x03,
488                         .s              = 1,
489                 },
490                 .ds             = (struct kvm_segment) {
491                         .selector       = self->boot_selector,
492                         .base           = selector_to_base(self->boot_selector),
493                         .limit          = 0xffffU,
494                         .type           = 0x03U,
495                         .present        = 1,
496                         .dpl            = 0x03,
497                         .s              = 1,
498                 },
499                 .es             = (struct kvm_segment) {
500                         .selector       = self->boot_selector,
501                         .base           = selector_to_base(self->boot_selector),
502                         .limit          = 0xffffU,
503                         .type           = 0x03U,
504                         .present        = 1,
505                         .dpl            = 0x03,
506                         .s              = 1,
507                 },
508                 .fs             = (struct kvm_segment) {
509                         .selector       = self->boot_selector,
510                         .base           = selector_to_base(self->boot_selector),
511                         .limit          = 0xffffU,
512                         .type           = 0x03U,
513                         .present        = 1,
514                         .dpl            = 0x03,
515                         .s              = 1,
516                 },
517                 .gs             = (struct kvm_segment) {
518                         .selector       = self->boot_selector,
519                         .base           = selector_to_base(self->boot_selector),
520                         .limit          = 0xffffU,
521                         .type           = 0x03U,
522                         .present        = 1,
523                         .dpl            = 0x03,
524                         .s              = 1,
525                 },
526                 .tr             = (struct kvm_segment) {
527                         .limit          = 0xffffU,
528                         .present        = 1,
529                         .type           = 0x03U,
530                 },
531                 .ldt            = (struct kvm_segment) {
532                         .limit          = 0xffffU,
533                         .present        = 1,
534                         .type           = 0x02U,
535                 },
536                 .gdt            = (struct kvm_dtable) {
537                         .limit          = 0xffffU,
538                 },
539                 .idt            = (struct kvm_dtable) {
540                         .limit          = 0xffffU,
541                 },
542         };
543
544         if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
545                 die_perror("KVM_SET_SREGS failed");
546 }
547
548 void kvm__reset_vcpu(struct kvm *self)
549 {
550         kvm__setup_sregs(self);
551
552         kvm__setup_regs(self);
553
554         kvm__setup_fpu(self);
555
556         kvm__setup_msrs(self);
557 }
558
559 void kvm__run(struct kvm *self)
560 {
561         if (ioctl(self->vcpu_fd, KVM_RUN, 0) < 0)
562                 die_perror("KVM_RUN failed");
563 }
564
565 static void print_dtable(const char *name, struct kvm_dtable *dtable)
566 {
567         printf(" %s                 %016" PRIx64 "  %08" PRIx16 "\n",
568                 name, (uint64_t) dtable->base, (uint16_t) dtable->limit);
569 }
570
571 static void print_segment(const char *name, struct kvm_segment *seg)
572 {
573         printf(" %s       %04" PRIx16 "      %016" PRIx64 "  %08" PRIx32 "  %02" PRIx8 "    %x %x   %x  %x %x %x %x\n",
574                 name, (uint16_t) seg->selector, (uint64_t) seg->base, (uint32_t) seg->limit,
575                 (uint8_t) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
576 }
577
578 void kvm__show_registers(struct kvm *self)
579 {
580         unsigned long cr0, cr2, cr3;
581         unsigned long cr4, cr8;
582         unsigned long rax, rbx, rcx;
583         unsigned long rdx, rsi, rdi;
584         unsigned long rbp,  r8,  r9;
585         unsigned long r10, r11, r12;
586         unsigned long r13, r14, r15;
587         unsigned long rip, rsp;
588         struct kvm_sregs sregs;
589         unsigned long rflags;
590         struct kvm_regs regs;
591         int i;
592
593         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &regs) < 0)
594                 die("KVM_GET_REGS failed");
595
596         rflags = regs.rflags;
597
598         rip = regs.rip; rsp = regs.rsp;
599         rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx;
600         rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi;
601         rbp = regs.rbp; r8  = regs.r8;  r9  = regs.r9;
602         r10 = regs.r10; r11 = regs.r11; r12 = regs.r12;
603         r13 = regs.r13; r14 = regs.r14; r15 = regs.r15;
604
605         printf("Registers:\n");
606         printf(" rip: %016lx   rsp: %016lx flags: %016lx\n", rip, rsp, rflags);
607         printf(" rax: %016lx   rbx: %016lx   rcx: %016lx\n", rax, rbx, rcx);
608         printf(" rdx: %016lx   rsi: %016lx   rdi: %016lx\n", rdx, rsi, rdi);
609         printf(" rbp: %016lx   r8:  %016lx   r9:  %016lx\n", rbp, r8,  r9);
610         printf(" r10: %016lx   r11: %016lx   r12: %016lx\n", r10, r11, r12);
611         printf(" r13: %016lx   r14: %016lx   r15: %016lx\n", r13, r14, r15);
612
613         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
614                 die("KVM_GET_REGS failed");
615
616         cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
617         cr4 = sregs.cr4; cr8 = sregs.cr8;
618
619         printf(" cr0: %016lx   cr2: %016lx   cr3: %016lx\n", cr0, cr2, cr3);
620         printf(" cr4: %016lx   cr8: %016lx\n", cr4, cr8);
621         printf("Segment registers:\n");
622         printf(" register  selector  base              limit     type  p dpl db s l g avl\n");
623         print_segment("cs ", &sregs.cs);
624         print_segment("ss ", &sregs.ss);
625         print_segment("ds ", &sregs.ds);
626         print_segment("es ", &sregs.es);
627         print_segment("fs ", &sregs.fs);
628         print_segment("gs ", &sregs.gs);
629         print_segment("tr ", &sregs.tr);
630         print_segment("ldt", &sregs.ldt);
631         print_dtable("gdt", &sregs.gdt);
632         print_dtable("idt", &sregs.idt);
633         printf(" [ efer: %016" PRIx64 "  apic base: %016" PRIx64 "  nmi: %s ]\n",
634                 (uint64_t) sregs.efer, (uint64_t) sregs.apic_base,
635                 (self->nmi_disabled ? "disabled" : "enabled"));
636         printf("Interrupt bitmap:\n");
637         printf(" ");
638         for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++)
639                 printf("%016" PRIx64 " ", (uint64_t) sregs.interrupt_bitmap[i]);
640         printf("\n");
641 }
642
643 void kvm__show_code(struct kvm *self)
644 {
645         unsigned int code_bytes = 64;
646         unsigned int code_prologue = code_bytes * 43 / 64;
647         unsigned int code_len = code_bytes;
648         unsigned char c;
649         unsigned int i;
650         uint8_t *ip;
651
652         if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
653                 die("KVM_GET_REGS failed");
654
655         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
656                 die("KVM_GET_SREGS failed");
657
658         ip = guest_flat_to_host(self, ip_to_flat(self, self->regs.rip) - code_prologue);
659
660         printf("Code: ");
661
662         for (i = 0; i < code_len; i++, ip++) {
663                 if (!host_ptr_in_ram(self, ip))
664                         break;
665
666                 c = *ip;
667
668                 if (ip == guest_flat_to_host(self, ip_to_flat(self, self->regs.rip)))
669                         printf("<%02x> ", c);
670                 else
671                         printf("%02x ", c);
672         }
673
674         printf("\n");
675
676         printf("Stack:\n");
677         kvm__dump_mem(self, self->regs.rsp, 32);
678 }
679
680 void kvm__show_page_tables(struct kvm *self)
681 {
682         uint64_t *pte1;
683         uint64_t *pte2;
684         uint64_t *pte3;
685         uint64_t *pte4;
686
687         if (!is_in_protected_mode(self))
688                 return;
689
690         if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
691                 die("KVM_GET_SREGS failed");
692
693         pte4    = guest_flat_to_host(self, self->sregs.cr3);
694         if (!host_ptr_in_ram(self, pte4))
695                 return;
696
697         pte3    = guest_flat_to_host(self, (*pte4 & ~0xfff));
698         if (!host_ptr_in_ram(self, pte3))
699                 return;
700
701         pte2    = guest_flat_to_host(self, (*pte3 & ~0xfff));
702         if (!host_ptr_in_ram(self, pte2))
703                 return;
704
705         pte1    = guest_flat_to_host(self, (*pte2 & ~0xfff));
706         if (!host_ptr_in_ram(self, pte1))
707                 return;
708
709         printf("Page Tables:\n");
710         if (*pte2 & (1 << 7))
711                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64
712                         "   pte2: %016" PRIx64 "\n",
713                         *pte4, *pte3, *pte2);
714         else
715                 printf(" pte4: %016" PRIx64 "   pte3: %016" PRIx64 "   pte2: %016"
716                         PRIx64 "   pte1: %016" PRIx64 "\n",
717                         *pte4, *pte3, *pte2, *pte1);
718 }
719
720 void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
721 {
722         unsigned char *p;
723         unsigned long n;
724
725         size &= ~7; /* mod 8 */
726         if (!size)
727                 return;
728
729         p = guest_flat_to_host(self, addr);
730
731         for (n = 0; n < size; n+=8) {
732                 if (!host_ptr_in_ram(self, p + n))
733                         break;
734
735                 printf("  0x%08lx: %02x %02x %02x %02x  %02x %02x %02x %02x\n",
736                         addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
737                                   p[n + 4], p[n + 5], p[n + 6], p[n + 7]);
738         }
739 }