]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/x86/kvm/x86.c
KVM: Make EFER reads safe when EFER does not exist
[karo-tx-linux.git] / arch / x86 / kvm / x86.c
index 9f1cdb011cfff3d93b4cf8fe1f64349768183742..662cf12d1db9436ea06e844ae7fce19219ba98d3 100644 (file)
@@ -752,6 +752,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_MC0_MISC+8:
        case MSR_IA32_MC0_MISC+12:
        case MSR_IA32_MC0_MISC+16:
+       case MSR_IA32_MC0_MISC+20:
        case MSR_IA32_UCODE_REV:
        case MSR_IA32_EBL_CR_POWERON:
                data = 0;
@@ -883,6 +884,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_PIT:
        case KVM_CAP_NOP_IO_DELAY:
        case KVM_CAP_MP_STATE:
+       case KVM_CAP_SYNC_MMU:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -981,9 +983,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
 static int is_efer_nx(void)
 {
-       u64 efer;
+       unsigned long long efer = 0;
 
-       rdmsrl(MSR_EFER, efer);
+       rdmsrl_safe(MSR_EFER, &efer);
        return efer & EFER_NX;
 }
 
@@ -1302,28 +1304,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        struct kvm_vcpu *vcpu = filp->private_data;
        void __user *argp = (void __user *)arg;
        int r;
+       struct kvm_lapic_state *lapic = NULL;
 
        switch (ioctl) {
        case KVM_GET_LAPIC: {
-               struct kvm_lapic_state lapic;
+               lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
 
-               memset(&lapic, 0, sizeof lapic);
-               r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
+               r = -ENOMEM;
+               if (!lapic)
+                       goto out;
+               r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
                if (r)
                        goto out;
                r = -EFAULT;
-               if (copy_to_user(argp, &lapic, sizeof lapic))
+               if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
                        goto out;
                r = 0;
                break;
        }
        case KVM_SET_LAPIC: {
-               struct kvm_lapic_state lapic;
-
+               lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
+               r = -ENOMEM;
+               if (!lapic)
+                       goto out;
                r = -EFAULT;
-               if (copy_from_user(&lapic, argp, sizeof lapic))
+               if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
                        goto out;
-               r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
+               r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
                if (r)
                        goto out;
                r = 0;
@@ -1421,6 +1428,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EINVAL;
        }
 out:
+       if (lapic)
+               kfree(lapic);
        return r;
 }
 
@@ -1495,6 +1504,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
                goto out;
 
        down_write(&kvm->slots_lock);
+       spin_lock(&kvm->mmu_lock);
 
        p = &kvm->arch.aliases[alias->slot];
        p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -1506,6 +1516,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
                        break;
        kvm->arch.naliases = n;
 
+       spin_unlock(&kvm->mmu_lock);
        kvm_mmu_zap_all(kvm);
 
        up_write(&kvm->slots_lock);
@@ -1627,6 +1638,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
        struct kvm *kvm = filp->private_data;
        void __user *argp = (void __user *)arg;
        int r = -EINVAL;
+       /*
+        * This union makes it completely explicit to gcc-3.x
+        * that these two variables' stack usage should be
+        * combined, not added together.
+        */
+       union {
+               struct kvm_pit_state ps;
+               struct kvm_memory_alias alias;
+       } u;
 
        switch (ioctl) {
        case KVM_SET_TSS_ADDR:
@@ -1658,17 +1678,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
        case KVM_GET_NR_MMU_PAGES:
                r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
                break;
-       case KVM_SET_MEMORY_ALIAS: {
-               struct kvm_memory_alias alias;
-
+       case KVM_SET_MEMORY_ALIAS:
                r = -EFAULT;
-               if (copy_from_user(&alias, argp, sizeof alias))
+               if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
                        goto out;
-               r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
+               r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
                if (r)
                        goto out;
                break;
-       }
        case KVM_CREATE_IRQCHIP:
                r = -ENOMEM;
                kvm->arch.vpic = kvm_create_pic(kvm);
@@ -1710,65 +1727,77 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
        case KVM_GET_IRQCHIP: {
                /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
-               struct kvm_irqchip chip;
+               struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
 
-               r = -EFAULT;
-               if (copy_from_user(&chip, argp, sizeof chip))
+               r = -ENOMEM;
+               if (!chip)
                        goto out;
+               r = -EFAULT;
+               if (copy_from_user(chip, argp, sizeof *chip))
+                       goto get_irqchip_out;
                r = -ENXIO;
                if (!irqchip_in_kernel(kvm))
-                       goto out;
-               r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
+                       goto get_irqchip_out;
+               r = kvm_vm_ioctl_get_irqchip(kvm, chip);
                if (r)
-                       goto out;
+                       goto get_irqchip_out;
                r = -EFAULT;
-               if (copy_to_user(argp, &chip, sizeof chip))
-                       goto out;
+               if (copy_to_user(argp, chip, sizeof *chip))
+                       goto get_irqchip_out;
                r = 0;
+       get_irqchip_out:
+               kfree(chip);
+               if (r)
+                       goto out;
                break;
        }
        case KVM_SET_IRQCHIP: {
                /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
-               struct kvm_irqchip chip;
+               struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
 
-               r = -EFAULT;
-               if (copy_from_user(&chip, argp, sizeof chip))
+               r = -ENOMEM;
+               if (!chip)
                        goto out;
+               r = -EFAULT;
+               if (copy_from_user(chip, argp, sizeof *chip))
+                       goto set_irqchip_out;
                r = -ENXIO;
                if (!irqchip_in_kernel(kvm))
-                       goto out;
-               r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
+                       goto set_irqchip_out;
+               r = kvm_vm_ioctl_set_irqchip(kvm, chip);
                if (r)
-                       goto out;
+                       goto set_irqchip_out;
                r = 0;
+       set_irqchip_out:
+               kfree(chip);
+               if (r)
+                       goto out;
                break;
        }
        case KVM_GET_PIT: {
-               struct kvm_pit_state ps;
                r = -EFAULT;
-               if (copy_from_user(&ps, argp, sizeof ps))
+               if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
                        goto out;
                r = -ENXIO;
                if (!kvm->arch.vpit)
                        goto out;
-               r = kvm_vm_ioctl_get_pit(kvm, &ps);
+               r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
                if (r)
                        goto out;
                r = -EFAULT;
-               if (copy_to_user(argp, &ps, sizeof ps))
+               if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
                        goto out;
                r = 0;
                break;
        }
        case KVM_SET_PIT: {
-               struct kvm_pit_state ps;
                r = -EFAULT;
-               if (copy_from_user(&ps, argp, sizeof ps))
+               if (copy_from_user(&u.ps, argp, sizeof u.ps))
                        goto out;
                r = -ENXIO;
                if (!kvm->arch.vpit)
                        goto out;
-               r = kvm_vm_ioctl_set_pit(kvm, &ps);
+               r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
                if (r)
                        goto out;
                r = 0;
@@ -2810,10 +2839,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        down_read(&vcpu->kvm->slots_lock);
        vapic_enter(vcpu);
 
-preempted:
-       if (vcpu->guest_debug.enabled)
-               kvm_x86_ops->guest_debug_pre(vcpu);
-
 again:
        if (vcpu->requests)
                if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
@@ -2867,6 +2892,9 @@ again:
                goto out;
        }
 
+       if (vcpu->guest_debug.enabled)
+               kvm_x86_ops->guest_debug_pre(vcpu);
+
        vcpu->guest_mode = 1;
        /*
         * Make sure that guest_mode assignment won't happen after
@@ -2941,7 +2969,7 @@ out:
        if (r > 0) {
                kvm_resched(vcpu);
                down_read(&vcpu->kvm->slots_lock);
-               goto preempted;
+               goto again;
        }
 
        post_kvm_run_save(vcpu, kvm_run);
@@ -3184,6 +3212,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
        kvm_desct->base |= seg_desc->base2 << 24;
        kvm_desct->limit = seg_desc->limit0;
        kvm_desct->limit |= seg_desc->limit << 16;
+       if (seg_desc->g) {
+               kvm_desct->limit <<= 12;
+               kvm_desct->limit |= 0xfff;
+       }
        kvm_desct->selector = selector;
        kvm_desct->type = seg_desc->type;
        kvm_desct->present = seg_desc->p;
@@ -3223,6 +3255,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
+       gpa_t gpa;
        struct descriptor_table dtable;
        u16 index = selector >> 3;
 
@@ -3232,13 +3265,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
                return 1;
        }
-       return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
+       gpa += index * 8;
+       return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
 }
 
 /* allowed just for 8 bytes segments */
 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
+       gpa_t gpa;
        struct descriptor_table dtable;
        u16 index = selector >> 3;
 
@@ -3246,7 +3282,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
 
        if (dtable.limit < index * 8 + 7)
                return 1;
-       return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
+       gpa += index * 8;
+       return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
 }
 
 static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
@@ -3258,55 +3296,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
        base_addr |= (seg_desc->base1 << 16);
        base_addr |= (seg_desc->base2 << 24);
 
-       return base_addr;
-}
-
-static int load_tss_segment32(struct kvm_vcpu *vcpu,
-                             struct desc_struct *seg_desc,
-                             struct tss_segment_32 *tss)
-{
-       u32 base_addr;
-
-       base_addr = get_tss_base_addr(vcpu, seg_desc);
-
-       return kvm_read_guest(vcpu->kvm, base_addr, tss,
-                             sizeof(struct tss_segment_32));
-}
-
-static int save_tss_segment32(struct kvm_vcpu *vcpu,
-                             struct desc_struct *seg_desc,
-                             struct tss_segment_32 *tss)
-{
-       u32 base_addr;
-
-       base_addr = get_tss_base_addr(vcpu, seg_desc);
-
-       return kvm_write_guest(vcpu->kvm, base_addr, tss,
-                              sizeof(struct tss_segment_32));
-}
-
-static int load_tss_segment16(struct kvm_vcpu *vcpu,
-                             struct desc_struct *seg_desc,
-                             struct tss_segment_16 *tss)
-{
-       u32 base_addr;
-
-       base_addr = get_tss_base_addr(vcpu, seg_desc);
-
-       return kvm_read_guest(vcpu->kvm, base_addr, tss,
-                             sizeof(struct tss_segment_16));
-}
-
-static int save_tss_segment16(struct kvm_vcpu *vcpu,
-                             struct desc_struct *seg_desc,
-                             struct tss_segment_16 *tss)
-{
-       u32 base_addr;
-
-       base_addr = get_tss_base_addr(vcpu, seg_desc);
-
-       return kvm_write_guest(vcpu->kvm, base_addr, tss,
-                              sizeof(struct tss_segment_16));
+       return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
 }
 
 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -3329,11 +3319,33 @@ static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
+{
+       struct kvm_segment segvar = {
+               .base = selector << 4,
+               .limit = 0xffff,
+               .selector = selector,
+               .type = 3,
+               .present = 1,
+               .dpl = 3,
+               .db = 0,
+               .s = 1,
+               .l = 0,
+               .g = 0,
+               .avl = 0,
+               .unusable = 0,
+       };
+       kvm_x86_ops->set_segment(vcpu, &segvar, seg);
+       return 0;
+}
+
 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                int type_bits, int seg)
 {
        struct kvm_segment kvm_seg;
 
+       if (!(vcpu->arch.cr0 & X86_CR0_PE))
+               return kvm_load_realmode_segment(vcpu, selector, seg);
        if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
                return 1;
        kvm_seg.type |= type_bits;
@@ -3466,20 +3478,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
 }
 
 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
-                      struct desc_struct *cseg_desc,
+                      u32 old_tss_base,
                       struct desc_struct *nseg_desc)
 {
        struct tss_segment_16 tss_segment_16;
        int ret = 0;
 
-       if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
+       if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
+                          sizeof tss_segment_16))
                goto out;
 
        save_state_to_tss16(vcpu, &tss_segment_16);
-       save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
 
-       if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
+       if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
+                           sizeof tss_segment_16))
                goto out;
+
+       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+                          &tss_segment_16, sizeof tss_segment_16))
+               goto out;
+
        if (load_state_from_tss16(vcpu, &tss_segment_16))
                goto out;
 
@@ -3489,20 +3507,26 @@ out:
 }
 
 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
-                      struct desc_struct *cseg_desc,
+                      u32 old_tss_base,
                       struct desc_struct *nseg_desc)
 {
        struct tss_segment_32 tss_segment_32;
        int ret = 0;
 
-       if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
+       if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
+                          sizeof tss_segment_32))
                goto out;
 
        save_state_to_tss32(vcpu, &tss_segment_32);
-       save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
 
-       if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
+       if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
+                           sizeof tss_segment_32))
                goto out;
+
+       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+                          &tss_segment_32, sizeof tss_segment_32))
+               goto out;
+
        if (load_state_from_tss32(vcpu, &tss_segment_32))
                goto out;
 
@@ -3517,16 +3541,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        struct desc_struct cseg_desc;
        struct desc_struct nseg_desc;
        int ret = 0;
+       u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
+       u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
 
-       kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+       old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
 
+       /* FIXME: Handle errors. Failure to read either TSS or their
+        * descriptors should generate a pagefault.
+        */
        if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
                goto out;
 
-       if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
+       if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
                goto out;
 
-
        if (reason != TASK_SWITCH_IRET) {
                int cpl;
 
@@ -3544,8 +3572,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
 
        if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
                cseg_desc.type &= ~(1 << 1); //clear the B flag
-               save_guest_segment_descriptor(vcpu, tr_seg.selector,
-                                             &cseg_desc);
+               save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
        }
 
        if (reason == TASK_SWITCH_IRET) {
@@ -3557,10 +3584,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        kvm_x86_ops->cache_regs(vcpu);
 
        if (nseg_desc.type & 8)
-               ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
+               ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
                                         &nseg_desc);
        else
-               ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
+               ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
                                         &nseg_desc);
 
        if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
@@ -3995,16 +4022,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
         */
        if (!user_alloc) {
                if (npages && !old.rmap) {
+                       unsigned long userspace_addr;
+
                        down_write(&current->mm->mmap_sem);
-                       memslot->userspace_addr = do_mmap(NULL, 0,
-                                                    npages * PAGE_SIZE,
-                                                    PROT_READ | PROT_WRITE,
-                                                    MAP_SHARED | MAP_ANONYMOUS,
-                                                    0);
+                       userspace_addr = do_mmap(NULL, 0,
+                                                npages * PAGE_SIZE,
+                                                PROT_READ | PROT_WRITE,
+                                                MAP_PRIVATE | MAP_ANONYMOUS,
+                                                0);
                        up_write(&current->mm->mmap_sem);
 
-                       if (IS_ERR((void *)memslot->userspace_addr))
-                               return PTR_ERR((void *)memslot->userspace_addr);
+                       if (IS_ERR((void *)userspace_addr))
+                               return PTR_ERR((void *)userspace_addr);
+
+                       /* set userspace_addr atomically for kvm_hva_to_rmapp */
+                       spin_lock(&kvm->mmu_lock);
+                       memslot->userspace_addr = userspace_addr;
+                       spin_unlock(&kvm->mmu_lock);
                } else {
                        if (!old.user_alloc && old.rmap) {
                                int ret;