3 #include "kvm/cpufeature.h"
4 #include "kvm/interrupt.h"
5 #include "kvm/boot-protocol.h"
7 #include "kvm/mptable.h"
11 #include <asm/bootparam.h>
13 #include <sys/ioctl.h>
28 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
30 const char *kvm_exit_reasons[] = {
31 DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN),
32 DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION),
33 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO),
34 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL),
35 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG),
36 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT),
37 DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO),
38 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN),
39 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN),
40 DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY),
41 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR),
42 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR),
43 DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS),
44 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC),
45 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET),
46 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR),
47 DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI),
48 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR),
51 #define DEFINE_KVM_EXT(ext) \
59 { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) },
60 { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR) },
61 { DEFINE_KVM_EXT(KVM_CAP_PIT2) },
62 { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY) },
63 { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING) },
64 { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP) },
65 { DEFINE_KVM_EXT(KVM_CAP_HLT) },
66 { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS) },
67 { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
70 static bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
74 ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
81 static int kvm__check_extensions(struct kvm *kvm)
85 for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
86 if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
87 error("Unsuppored KVM extension detected: %s",
96 static struct kvm *kvm__new(void)
98 struct kvm *kvm = calloc(1, sizeof *kvm);
101 die("out of memory");
106 void kvm__delete(struct kvm *kvm)
108 kvm__stop_timer(kvm);
110 munmap(kvm->ram_start, kvm->ram_size);
114 static bool kvm__cpu_supports_vm(void)
116 struct cpuid_regs regs;
120 regs = (struct cpuid_regs) {
126 case CPUID_VENDOR_INTEL_1:
128 feature = KVM__X86_FEATURE_VMX;
131 case CPUID_VENDOR_AMD_1:
132 eax_base = 0x80000000;
133 feature = KVM__X86_FEATURE_SVM;
140 regs = (struct cpuid_regs) {
145 if (regs.eax < eax_base + 0x01)
148 regs = (struct cpuid_regs) {
149 .eax = eax_base + 0x01
153 return regs.ecx & (1 << feature);
156 static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
158 struct kvm_userspace_memory_region mem;
161 mem = (struct kvm_userspace_memory_region) {
163 .guest_phys_addr = guest_phys,
165 .userspace_addr = (unsigned long)userspace_addr,
168 ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
170 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
174 * Allocating RAM size bigger than 4GB requires us to leave a gap
175 * in the RAM which is used for PCI MMIO, hotplug, and unconfigured
176 * devices (see documentation of e820_setup_gap() for details).
178 * If we're required to initialize RAM bigger than 4GB, we will create
179 * a gap between 0xe0000000 and 0x100000000 in the guest virtual mem space.
182 void kvm__init_ram(struct kvm *kvm)
184 u64 phys_start, phys_size;
187 if (kvm->ram_size < KVM_32BIT_GAP_START) {
188 /* Use a single block of RAM for 32bit RAM */
191 phys_size = kvm->ram_size;
192 host_mem = kvm->ram_start;
194 kvm_register_mem_slot(kvm, 0, 0, kvm->ram_size, kvm->ram_start);
196 /* First RAM range from zero to the PCI gap: */
199 phys_size = KVM_32BIT_GAP_START;
200 host_mem = kvm->ram_start;
202 kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
204 /* Second RAM range from 4GB to the end of RAM: */
206 phys_start = 0x100000000ULL;
207 phys_size = kvm->ram_size - phys_size;
208 host_mem = kvm->ram_start + phys_start;
210 kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
214 int kvm__max_cpus(struct kvm *kvm)
218 ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
220 die_perror("KVM_CAP_NR_VCPUS");
225 struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
227 struct kvm_pit_config pit_config = { .flags = 0, };
231 if (!kvm__cpu_supports_vm())
232 die("Your CPU does not support hardware virtualization");
236 kvm->sys_fd = open(kvm_dev, O_RDWR);
237 if (kvm->sys_fd < 0) {
239 die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev);
241 die("'%s' KVM driver not available.\n # (If the KVM module is loaded then 'dmesg' may offer further clues about the failure.)", kvm_dev);
243 fprintf(stderr, " Fatal, could not open %s: ", kvm_dev);
248 ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
249 if (ret != KVM_API_VERSION)
250 die_perror("KVM_API_VERSION ioctl");
252 kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
254 die_perror("KVM_CREATE_VM ioctl");
256 if (kvm__check_extensions(kvm))
257 die("A required KVM extention is not supported by OS");
259 ret = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
261 die_perror("KVM_SET_TSS_ADDR ioctl");
263 ret = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &pit_config);
265 die_perror("KVM_CREATE_PIT2 ioctl");
267 kvm->ram_size = ram_size;
269 if (kvm->ram_size < KVM_32BIT_GAP_START) {
270 kvm->ram_start = mmap(NULL, ram_size, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
272 kvm->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
273 if (kvm->ram_start != MAP_FAILED) {
275 * We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that
276 * if we accidently write to it, we will know.
278 mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE);
281 if (kvm->ram_start == MAP_FAILED)
282 die("out of memory");
284 ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
286 die_perror("KVM_CREATE_IRQCHIP ioctl");
291 #define BOOT_LOADER_SELECTOR 0x1000
292 #define BOOT_LOADER_IP 0x0000
293 #define BOOT_LOADER_SP 0x8000
294 #define BOOT_CMDLINE_OFFSET 0x20000
296 #define BOOT_PROTOCOL_REQUIRED 0x206
297 #define LOAD_HIGH 0x01
299 static int load_flat_binary(struct kvm *kvm, int fd)
304 if (lseek(fd, 0, SEEK_SET) < 0)
307 p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
309 while ((nr = read(fd, p, 65536)) > 0)
312 kvm->boot_selector = BOOT_LOADER_SELECTOR;
313 kvm->boot_ip = BOOT_LOADER_IP;
314 kvm->boot_sp = BOOT_LOADER_SP;
319 static const char *BZIMAGE_MAGIC = "HdrS";
321 static bool load_bzimage(struct kvm *kvm, int fd_kernel,
322 int fd_initrd, const char *kernel_cmdline)
324 struct boot_params *kern_boot;
325 unsigned long setup_sects;
326 struct boot_params boot;
333 * See Documentation/x86/boot.txt for details no bzImage on-disk and
337 if (lseek(fd_kernel, 0, SEEK_SET) < 0)
340 if (read(fd_kernel, &boot, sizeof(boot)) != sizeof(boot))
343 if (memcmp(&boot.hdr.header, BZIMAGE_MAGIC, strlen(BZIMAGE_MAGIC)))
346 if (boot.hdr.version < BOOT_PROTOCOL_REQUIRED)
347 die("Too old kernel");
349 if (lseek(fd_kernel, 0, SEEK_SET) < 0)
352 if (!boot.hdr.setup_sects)
353 boot.hdr.setup_sects = BZ_DEFAULT_SETUP_SECTS;
354 setup_sects = boot.hdr.setup_sects + 1;
356 setup_size = setup_sects << 9;
357 p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
359 /* copy setup.bin to mem*/
360 if (read(fd_kernel, p, setup_size) != setup_size)
363 /* copy vmlinux.bin to BZ_KERNEL_START*/
364 p = guest_flat_to_host(kvm, BZ_KERNEL_START);
366 while ((nr = read(fd_kernel, p, 65536)) > 0)
369 p = guest_flat_to_host(kvm, BOOT_CMDLINE_OFFSET);
370 if (kernel_cmdline) {
371 cmdline_size = strlen(kernel_cmdline) + 1;
372 if (cmdline_size > boot.hdr.cmdline_size)
373 cmdline_size = boot.hdr.cmdline_size;
375 memset(p, 0, boot.hdr.cmdline_size);
376 memcpy(p, kernel_cmdline, cmdline_size - 1);
379 kern_boot = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, 0x00);
381 kern_boot->hdr.cmd_line_ptr = BOOT_CMDLINE_OFFSET;
382 kern_boot->hdr.type_of_loader = 0xff;
383 kern_boot->hdr.heap_end_ptr = 0xfe00;
384 kern_boot->hdr.loadflags |= CAN_USE_HEAP;
387 * Read initrd image into guest memory
389 if (fd_initrd >= 0) {
390 struct stat initrd_stat;
393 if (fstat(fd_initrd, &initrd_stat))
396 addr = boot.hdr.initrd_addr_max & ~0xfffff;
398 if (addr < BZ_KERNEL_START)
399 die("Not enough memory for initrd");
400 else if (addr < (kvm->ram_size - initrd_stat.st_size))
405 p = guest_flat_to_host(kvm, addr);
406 nr = read(fd_initrd, p, initrd_stat.st_size);
407 if (nr != initrd_stat.st_size)
408 die("Failed to read initrd");
410 kern_boot->hdr.ramdisk_image = addr;
411 kern_boot->hdr.ramdisk_size = initrd_stat.st_size;
414 kvm->boot_selector = BOOT_LOADER_SELECTOR;
416 * The real-mode setup code starts at offset 0x200 of a bzImage. See
417 * Documentation/x86/boot.txt for details.
419 kvm->boot_ip = BOOT_LOADER_IP + 0x200;
420 kvm->boot_sp = BOOT_LOADER_SP;
425 bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
426 const char *initrd_filename, const char *kernel_cmdline)
429 int fd_kernel = -1, fd_initrd = -1;
431 fd_kernel = open(kernel_filename, O_RDONLY);
433 die("Unable to open kernel %s", kernel_filename);
435 if (initrd_filename) {
436 fd_initrd = open(initrd_filename, O_RDONLY);
438 die("Unable to open initrd %s", initrd_filename);
441 ret = load_bzimage(kvm, fd_kernel, fd_initrd, kernel_cmdline);
449 warning("%s is not a bzImage. Trying to load it as a flat binary...", kernel_filename);
451 ret = load_flat_binary(kvm, fd_kernel);
457 die("%s is not a valid bzImage or flat binary", kernel_filename);
466 * kvm__setup_bios - inject BIOS into guest system memory
467 * @kvm - guest system descriptor
469 * This function is a main routine where we poke guest memory
470 * and install BIOS there.
472 void kvm__setup_bios(struct kvm *kvm)
474 /* standart minimal configuration */
477 /* FIXME: SMP, ACPI and friends here */
480 mptable_setup(kvm, kvm->nrcpus);
483 #define TIMER_INTERVAL_NS 1000000 /* 1 msec */
486 * This function sets up a timer that's used to inject interrupts from the
487 * userspace hypervisor into the guest at periodical intervals. Please note
488 * that clock interrupt, for example, is not handled here.
490 void kvm__start_timer(struct kvm *kvm)
492 struct itimerspec its;
495 memset(&sev, 0, sizeof(struct sigevent));
496 sev.sigev_value.sival_int = 0;
497 sev.sigev_notify = SIGEV_SIGNAL;
498 sev.sigev_signo = SIGALRM;
500 if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0)
501 die("timer_create()");
503 its.it_value.tv_sec = TIMER_INTERVAL_NS / 1000000000;
504 its.it_value.tv_nsec = TIMER_INTERVAL_NS % 1000000000;
505 its.it_interval.tv_sec = its.it_value.tv_sec;
506 its.it_interval.tv_nsec = its.it_value.tv_nsec;
508 if (timer_settime(kvm->timerid, 0, &its, NULL) < 0)
509 die("timer_settime()");
512 void kvm__stop_timer(struct kvm *kvm)
515 if (timer_delete(kvm->timerid) < 0)
516 die("timer_delete()");
521 void kvm__irq_line(struct kvm *kvm, int irq, int level)
523 struct kvm_irq_level irq_level;
525 irq_level = (struct kvm_irq_level) {
532 if (ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
533 die_perror("KVM_IRQ_LINE failed");
536 void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size)
541 size &= ~7; /* mod 8 */
545 p = guest_flat_to_host(kvm, addr);
547 for (n = 0; n < size; n += 8) {
548 if (!host_ptr_in_ram(kvm, p + n))
551 printf(" 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n",
552 addr + n, p[n + 0], p[n + 1], p[n + 2], p[n + 3],
553 p[n + 4], p[n + 5], p[n + 6], p[n + 7]);