5 #include <asm/bootparam.h>
22 * Compatibility code. Remove this when we move to tools/kvm.
24 #ifndef KVM_EXIT_INTERNAL_ERROR
25 # define KVM_EXIT_INTERNAL_ERROR 17
28 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
30 const char *kvm_exit_reasons[] = {
31 DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
32 DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
33 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
34 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
35 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
36 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
37 DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
38 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
39 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
40 DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
41 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
42 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
43 DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
44 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
45 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
46 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
47 DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
48 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
51 static inline void *guest_addr_to_host(struct kvm *self, unsigned long offset)
53 return self->ram_start + offset;
56 static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
60 ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
67 static struct kvm *kvm__new(void)
69 struct kvm *self = calloc(1, sizeof *self);
77 struct kvm *kvm__init(void)
79 struct kvm_userspace_memory_region mem;
87 self->sys_fd = open("/dev/kvm", O_RDWR);
91 ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
92 if (ret != KVM_API_VERSION)
93 die_perror("KVM_API_VERSION ioctl");
95 self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
97 die_perror("KVM_CREATE_VM ioctl");
99 if (!kvm__supports_extension(self, KVM_CAP_USER_MEMORY))
100 die("KVM_CAP_USER_MEMORY is not supported");
102 self->ram_size = 64UL * 1024UL * 1024UL;
104 page_size = sysconf(_SC_PAGESIZE);
105 if (posix_memalign(&self->ram_start, page_size, self->ram_size) != 0)
106 die("out of memory");
108 mem = (struct kvm_userspace_memory_region) {
110 .guest_phys_addr = 0x0UL,
111 .memory_size = self->ram_size,
112 .userspace_addr = (unsigned long) self->ram_start,
115 ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem, 1);
117 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
119 if (!kvm__supports_extension(self, KVM_CAP_SET_TSS_ADDR))
120 die("KVM_CAP_SET_TSS_ADDR is not supported");
122 ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
124 die_perror("KVM_SET_TSS_ADDR ioctl");
126 self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
127 if (self->vcpu_fd < 0)
128 die_perror("KVM_CREATE_VCPU ioctl");
130 mmap_size = ioctl(self->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
132 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
134 self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
135 if (self->kvm_run == MAP_FAILED)
136 die("unable to mmap vcpu fd");
141 void kvm__enable_singlestep(struct kvm *self)
143 struct kvm_guest_debug debug = {
144 .control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
147 if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
148 warning("KVM_SET_GUEST_DEBUG failed");
151 static inline uint32_t segment_to_flat(uint16_t selector, uint16_t offset)
153 return ((uint32_t)selector << 4) + (uint32_t) offset;
156 #define BOOT_LOADER_CS 0x0000
157 #define BOOT_LOADER_IP 0x7c00
159 static int load_flat_binary(struct kvm *kvm, int fd)
164 if (lseek(fd, 0, SEEK_SET) < 0)
167 p = guest_addr_to_host(kvm, segment_to_flat(BOOT_LOADER_CS, BOOT_LOADER_IP));
169 while ((nr = read(fd, p, 65536)) > 0)
176 * The protected mode kernel part of a modern bzImage is loaded at 1 MB by
179 #define BZ_KERNEL_START 0x100000UL
181 static const char *BZIMAGE_MAGIC = "HdrS";
183 #define BZ_DEFAULT_SETUP_SECTS 4
185 static bool load_bzimage(struct kvm *kvm, int fd)
187 unsigned long setup_sects;
188 struct boot_params boot;
194 * See Documentation/x86/boot.txt for details no bzImage on-disk and
198 if (lseek(fd, 0, SEEK_SET) < 0)
201 read(fd, &boot, sizeof(boot));
203 if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)) != 0)
206 if (lseek(fd, 0, SEEK_SET) < 0)
209 setup_sects = boot.hdr.setup_sects + 1;
210 if (setup_sects == 0)
211 setup_sects = BZ_DEFAULT_SETUP_SECTS;
213 setup_size = setup_sects << 9;
214 p = guest_addr_to_host(kvm, segment_to_flat(BOOT_LOADER_CS, BOOT_LOADER_IP));
216 if (read(fd, p, setup_size) != setup_size)
219 p = guest_addr_to_host(kvm, BZ_KERNEL_START);
221 while ((nr = read(fd, p, 65536)) > 0)
227 bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename)
232 fd = open(kernel_filename, O_RDONLY);
234 die("unable to open kernel");
236 ret = load_bzimage(kvm, fd);
240 ret = load_flat_binary(kvm, fd);
244 die("%s is not a valid bzImage or flat binary", kernel_filename);
250 static inline uint64_t ip_flat_to_real(struct kvm *self, uint64_t ip)
252 uint64_t cs = self->sregs.cs.selector;
254 return ip - (cs << 4);
257 static inline uint64_t ip_real_to_flat(struct kvm *self, uint64_t ip)
259 uint64_t cs = self->sregs.cs.selector;
261 return ip + (cs << 4);
264 void kvm__reset_vcpu(struct kvm *self)
266 self->sregs = (struct kvm_sregs) {
267 .cr0 = 0x60000010ULL,
268 .cs = (struct kvm_segment) {
270 * KVM on Intel requires 'base' to be 'selector * 16' in
273 .selector = BOOT_LOADER_CS,
274 .base = BOOT_LOADER_CS * 16,
281 .ss = (struct kvm_segment) {
288 .ds = (struct kvm_segment) {
295 .es = (struct kvm_segment) {
302 .fs = (struct kvm_segment) {
309 .gs = (struct kvm_segment) {
316 .tr = (struct kvm_segment) {
321 .ldt = (struct kvm_segment) {
326 .gdt = (struct kvm_dtable) {
329 .idt = (struct kvm_dtable) {
334 if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
335 die_perror("KVM_SET_SREGS failed");
337 self->regs = (struct kvm_regs) {
338 .rip = BOOT_LOADER_IP,
339 /* We start the guest in 16-bit real mode */
340 .rflags = 0x0000000000000002ULL,
343 if (self->regs.rip > USHRT_MAX)
344 die("ip 0x%" PRIx64 " is too high for real mode", (uint64_t) self->regs.rip);
346 if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
347 die_perror("KVM_SET_REGS failed");
351 void kvm__run(struct kvm *self)
353 if (ioctl(self->vcpu_fd, KVM_RUN, 0) < 0)
354 die_perror("KVM_RUN failed");
357 static void kvm__emulate_io_out(struct kvm *self, uint16_t port, void *data, int size, uint32_t count)
359 fprintf(stderr, "%s port=%x, size=%d, count=%" PRIu32 "\n", __func__, port, size, count);
362 static void kvm__emulate_io_in(struct kvm *self, uint16_t port, void *data, int size, uint32_t count)
364 fprintf(stderr, "%s port=%x, size=%d, count=%" PRIu32 "\n", __func__, port, size, count);
367 void kvm__emulate_io(struct kvm *self, uint16_t port, void *data, int direction, int size, uint32_t count)
369 if (direction == KVM_EXIT_IO_IN)
370 kvm__emulate_io_in(self, port, data, size, count);
372 kvm__emulate_io_out(self, port, data, size, count);
375 static void print_segment(const char *name, struct kvm_segment *seg)
377 printf(" %s %04" PRIx16 " %016" PRIx64 " %08" PRIx32 " %02" PRIx8 " %x %x %x %x %x %x %x\n",
378 name, (uint16_t) seg->selector, (uint64_t) seg->base, (uint32_t) seg->limit,
379 (uint8_t) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
382 void kvm__show_registers(struct kvm *self)
384 unsigned long cr0, cr2, cr3;
385 unsigned long cr4, cr8;
386 unsigned long rax, rbx, rcx;
387 unsigned long rdx, rsi, rdi;
388 unsigned long rbp, r8, r9;
389 unsigned long r10, r11, r12;
390 unsigned long r13, r14, r15;
391 unsigned long rip, rsp;
392 struct kvm_sregs sregs;
393 unsigned long rflags;
394 struct kvm_regs regs;
397 if (ioctl(self->vcpu_fd, KVM_GET_REGS, ®s) < 0)
398 die("KVM_GET_REGS failed");
400 rflags = regs.rflags;
402 rip = regs.rip; rsp = regs.rsp;
403 rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx;
404 rdx = regs.rdx; rsi = regs.rsi; rdi = regs.rdi;
405 rbp = regs.rbp; r8 = regs.r8; r9 = regs.r9;
406 r10 = regs.r10; r11 = regs.r11; r12 = regs.r12;
407 r13 = regs.r13; r14 = regs.r14; r15 = regs.r15;
409 printf("Registers:\n");
410 printf(" rip: %016lx rsp: %016lx flags: %016lx\n", rip, rsp, rflags);
411 printf(" rax: %016lx ebx: %016lx ecx: %016lx\n", rax, rbx, rcx);
412 printf(" rdx: %016lx rsi: %016lx rdi: %016lx\n", rdx, rsi, rdi);
413 printf(" rbp: %016lx r8: %016lx r9: %016lx\n", rbp, r8, r9);
414 printf(" r10: %016lx r11: %016lx r12: %016lx\n", r10, r11, r12);
415 printf(" r13: %016lx r14: %016lx r15: %016lx\n", r13, r14, r15);
417 if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
418 die("KVM_GET_REGS failed");
420 cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
421 cr4 = sregs.cr4; cr8 = sregs.cr8;
423 printf(" cr0: %016lx cr2: %016lx cr3: %016lx\n", cr0, cr2, cr3);
424 printf(" cr4: %016lx cr8: %016lx\n", cr4, cr8);
425 printf("Segment registers:\n");
426 printf(" register selector base limit type p dpl db s l g avl\n");
427 print_segment("cs ", &sregs.cs);
428 print_segment("ss ", &sregs.ss);
429 print_segment("ds ", &sregs.ds);
430 print_segment("es ", &sregs.es);
431 print_segment("fs ", &sregs.fs);
432 print_segment("gs ", &sregs.gs);
433 print_segment("tr ", &sregs.tr);
434 print_segment("ldt", &sregs.ldt);
435 printf(" [ efer: %016lx apic base: %016lx ]\n", (uint64_t) sregs.efer, (uint64_t) sregs.apic_base);
436 printf("Interrupt bitmap:\n");
438 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++)
439 printf("%016lx ", (uint64_t) sregs.interrupt_bitmap[i]);
443 void kvm__show_code(struct kvm *self)
445 unsigned int code_bytes = 64;
446 unsigned int code_prologue = code_bytes * 43 / 64;
447 unsigned int code_len = code_bytes;
452 if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
453 die("KVM_GET_REGS failed");
455 if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
456 die("KVM_GET_SREGS failed");
458 ip = guest_addr_to_host(self, ip_real_to_flat(self, self->regs.rip) - code_prologue);
462 for (i = 0; i < code_len; i++, ip++) {
465 if (ip == guest_addr_to_host(self, ip_real_to_flat(self, self->regs.rip)))
466 printf("<%02x> ", c);