]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/x86/kvm/vmx.c
KVM: x86: Clear DR6[0:3] on #DB during handle_dr
[karo-tx-linux.git] / arch / x86 / kvm / vmx.c
index a8b76c4c95e2807c73fbe2a14606cb9b9929ebdd..ad5e0806e1e12f90a93463275762f949dfbbe5ad 100644 (file)
@@ -105,7 +105,7 @@ module_param(nested, bool, S_IRUGO);
        (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
 #define KVM_CR4_GUEST_OWNED_BITS                                     \
        (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
-        | X86_CR4_OSXMMEXCPT)
+        | X86_CR4_OSXMMEXCPT | X86_CR4_TSD)
 
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
@@ -720,12 +720,15 @@ static const unsigned short vmcs_field_to_offset_table[] = {
        FIELD(HOST_RSP, host_rsp),
        FIELD(HOST_RIP, host_rip),
 };
-static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
 
 static inline short vmcs_field_to_offset(unsigned long field)
 {
-       if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
-               return -1;
+       BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
+
+       if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
+           vmcs_field_to_offset_table[field] == 0)
+               return -ENOENT;
+
        return vmcs_field_to_offset_table[field];
 }
 
@@ -3110,76 +3113,6 @@ static __init int alloc_kvm_area(void)
        return 0;
 }
 
-static __init int hardware_setup(void)
-{
-       if (setup_vmcs_config(&vmcs_config) < 0)
-               return -EIO;
-
-       if (boot_cpu_has(X86_FEATURE_NX))
-               kvm_enable_efer_bits(EFER_NX);
-
-       if (!cpu_has_vmx_vpid())
-               enable_vpid = 0;
-       if (!cpu_has_vmx_shadow_vmcs())
-               enable_shadow_vmcs = 0;
-       if (enable_shadow_vmcs)
-               init_vmcs_shadow_fields();
-
-       if (!cpu_has_vmx_ept() ||
-           !cpu_has_vmx_ept_4levels()) {
-               enable_ept = 0;
-               enable_unrestricted_guest = 0;
-               enable_ept_ad_bits = 0;
-       }
-
-       if (!cpu_has_vmx_ept_ad_bits())
-               enable_ept_ad_bits = 0;
-
-       if (!cpu_has_vmx_unrestricted_guest())
-               enable_unrestricted_guest = 0;
-
-       if (!cpu_has_vmx_flexpriority()) {
-               flexpriority_enabled = 0;
-
-               /*
-                * set_apic_access_page_addr() is used to reload apic access
-                * page upon invalidation.  No need to do anything if the
-                * processor does not have the APIC_ACCESS_ADDR VMCS field.
-                */
-               kvm_x86_ops->set_apic_access_page_addr = NULL;
-       }
-
-       if (!cpu_has_vmx_tpr_shadow())
-               kvm_x86_ops->update_cr8_intercept = NULL;
-
-       if (enable_ept && !cpu_has_vmx_ept_2m_page())
-               kvm_disable_largepages();
-
-       if (!cpu_has_vmx_ple())
-               ple_gap = 0;
-
-       if (!cpu_has_vmx_apicv())
-               enable_apicv = 0;
-
-       if (enable_apicv)
-               kvm_x86_ops->update_cr8_intercept = NULL;
-       else {
-               kvm_x86_ops->hwapic_irr_update = NULL;
-               kvm_x86_ops->deliver_posted_interrupt = NULL;
-               kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
-       }
-
-       if (nested)
-               nested_vmx_setup_ctls_msrs();
-
-       return alloc_kvm_area();
-}
-
-static __exit void hardware_unsetup(void)
-{
-       free_kvm_area();
-}
-
 static bool emulation_required(struct kvm_vcpu *vcpu)
 {
        return emulate_invalid_guest_state && !guest_state_valid(vcpu);
@@ -4579,7 +4512,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
                vmcs_write32(TPR_THRESHOLD, 0);
        }
 
-       kvm_vcpu_reload_apic_access_page(vcpu);
+       kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 
        if (vmx_vm_has_apicv(vcpu->kvm))
                memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@@ -5163,13 +5096,20 @@ static int handle_cr(struct kvm_vcpu *vcpu)
 static int handle_dr(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
-       int dr, reg;
+       int dr, dr7, reg;
+
+       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+       dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
+
+       /* First, if DR does not exist, trigger UD */
+       if (!kvm_require_dr(vcpu, dr))
+               return 1;
 
        /* Do not handle if the CPL > 0, will trigger GP on re-entry */
        if (!kvm_require_cpl(vcpu, 0))
                return 1;
-       dr = vmcs_readl(GUEST_DR7);
-       if (dr & DR7_GD) {
+       dr7 = vmcs_readl(GUEST_DR7);
+       if (dr7 & DR7_GD) {
                /*
                 * As the vm-exit takes precedence over the debug trap, we
                 * need to emulate the latter, either for the host or the
@@ -5177,7 +5117,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
                 */
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
                        vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
-                       vcpu->run->debug.arch.dr7 = dr;
+                       vcpu->run->debug.arch.dr7 = dr7;
                        vcpu->run->debug.arch.pc =
                                vmcs_readl(GUEST_CS_BASE) +
                                vmcs_readl(GUEST_RIP);
@@ -5185,9 +5125,8 @@ static int handle_dr(struct kvm_vcpu *vcpu)
                        vcpu->run->exit_reason = KVM_EXIT_DEBUG;
                        return 0;
                } else {
-                       vcpu->arch.dr7 &= ~DR7_GD;
+                       vcpu->arch.dr6 &= ~15;
                        vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
-                       vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
                        kvm_queue_exception(vcpu, DB_VECTOR);
                        return 1;
                }
@@ -5209,8 +5148,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-       dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
        reg = DEBUG_REG_ACCESS_REG(exit_qualification);
        if (exit_qualification & TYPE_MOV_FROM_DR) {
                unsigned long val;
@@ -5492,7 +5429,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
        }
 
        /* clear all local breakpoint enable flags */
-       vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~0x55);
+       vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~0x155);
 
        /*
         * TODO: What about debug traps on tss switch?
@@ -5785,6 +5722,204 @@ static void update_ple_window_actual_max(void)
                                            ple_window_grow, INT_MIN);
 }
 
+static __init int hardware_setup(void)
+{
+       int r = -ENOMEM, i, msr;
+
+       rdmsrl_safe(MSR_EFER, &host_efer);
+
+       for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
+               kvm_define_shared_msr(i, vmx_msr_index[i]);
+
+       vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
+       if (!vmx_io_bitmap_a)
+               return r;
+
+       vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
+       if (!vmx_io_bitmap_b)
+               goto out;
+
+       vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
+       if (!vmx_msr_bitmap_legacy)
+               goto out1;
+
+       vmx_msr_bitmap_legacy_x2apic =
+                               (unsigned long *)__get_free_page(GFP_KERNEL);
+       if (!vmx_msr_bitmap_legacy_x2apic)
+               goto out2;
+
+       vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
+       if (!vmx_msr_bitmap_longmode)
+               goto out3;
+
+       vmx_msr_bitmap_longmode_x2apic =
+                               (unsigned long *)__get_free_page(GFP_KERNEL);
+       if (!vmx_msr_bitmap_longmode_x2apic)
+               goto out4;
+       vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+       if (!vmx_vmread_bitmap)
+               goto out5;
+
+       vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+       if (!vmx_vmwrite_bitmap)
+               goto out6;
+
+       memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
+       memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
+
+       /*
+        * Allow direct access to the PC debug port (it is often used for I/O
+        * delays, but the vmexits simply slow things down).
+        */
+       memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
+       clear_bit(0x80, vmx_io_bitmap_a);
+
+       memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
+
+       memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
+       memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
+
+       vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
+       vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
+       vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
+       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
+       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
+       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+       vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
+
+       memcpy(vmx_msr_bitmap_legacy_x2apic,
+                       vmx_msr_bitmap_legacy, PAGE_SIZE);
+       memcpy(vmx_msr_bitmap_longmode_x2apic,
+                       vmx_msr_bitmap_longmode, PAGE_SIZE);
+
+       if (enable_apicv) {
+               for (msr = 0x800; msr <= 0x8ff; msr++)
+                       vmx_disable_intercept_msr_read_x2apic(msr);
+
+               /* According SDM, in x2apic mode, the whole id reg is used.
+                * But in KVM, it only use the highest eight bits. Need to
+                * intercept it */
+               vmx_enable_intercept_msr_read_x2apic(0x802);
+               /* TMCCT */
+               vmx_enable_intercept_msr_read_x2apic(0x839);
+               /* TPR */
+               vmx_disable_intercept_msr_write_x2apic(0x808);
+               /* EOI */
+               vmx_disable_intercept_msr_write_x2apic(0x80b);
+               /* SELF-IPI */
+               vmx_disable_intercept_msr_write_x2apic(0x83f);
+       }
+
+       if (enable_ept) {
+               kvm_mmu_set_mask_ptes(0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+                       0ull, VMX_EPT_EXECUTABLE_MASK);
+               ept_set_mmio_spte_mask();
+               kvm_enable_tdp();
+       } else
+               kvm_disable_tdp();
+
+       update_ple_window_actual_max();
+
+       if (setup_vmcs_config(&vmcs_config) < 0) {
+               r = -EIO;
+               goto out7;
+    }
+
+       if (boot_cpu_has(X86_FEATURE_NX))
+               kvm_enable_efer_bits(EFER_NX);
+
+       if (!cpu_has_vmx_vpid())
+               enable_vpid = 0;
+       if (!cpu_has_vmx_shadow_vmcs())
+               enable_shadow_vmcs = 0;
+       if (enable_shadow_vmcs)
+               init_vmcs_shadow_fields();
+
+       if (!cpu_has_vmx_ept() ||
+           !cpu_has_vmx_ept_4levels()) {
+               enable_ept = 0;
+               enable_unrestricted_guest = 0;
+               enable_ept_ad_bits = 0;
+       }
+
+       if (!cpu_has_vmx_ept_ad_bits())
+               enable_ept_ad_bits = 0;
+
+       if (!cpu_has_vmx_unrestricted_guest())
+               enable_unrestricted_guest = 0;
+
+       if (!cpu_has_vmx_flexpriority()) {
+               flexpriority_enabled = 0;
+
+               /*
+                * set_apic_access_page_addr() is used to reload apic access
+                * page upon invalidation.  No need to do anything if the
+                * processor does not have the APIC_ACCESS_ADDR VMCS field.
+                */
+               kvm_x86_ops->set_apic_access_page_addr = NULL;
+       }
+
+       if (!cpu_has_vmx_tpr_shadow())
+               kvm_x86_ops->update_cr8_intercept = NULL;
+
+       if (enable_ept && !cpu_has_vmx_ept_2m_page())
+               kvm_disable_largepages();
+
+       if (!cpu_has_vmx_ple())
+               ple_gap = 0;
+
+       if (!cpu_has_vmx_apicv())
+               enable_apicv = 0;
+
+       if (enable_apicv)
+               kvm_x86_ops->update_cr8_intercept = NULL;
+       else {
+               kvm_x86_ops->hwapic_irr_update = NULL;
+               kvm_x86_ops->deliver_posted_interrupt = NULL;
+               kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
+       }
+
+       if (nested)
+               nested_vmx_setup_ctls_msrs();
+
+       return alloc_kvm_area();
+
+out7:
+       free_page((unsigned long)vmx_vmwrite_bitmap);
+out6:
+       free_page((unsigned long)vmx_vmread_bitmap);
+out5:
+       free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
+out4:
+       free_page((unsigned long)vmx_msr_bitmap_longmode);
+out3:
+       free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+out2:
+       free_page((unsigned long)vmx_msr_bitmap_legacy);
+out1:
+       free_page((unsigned long)vmx_io_bitmap_b);
+out:
+       free_page((unsigned long)vmx_io_bitmap_a);
+
+    return r;
+}
+
+static __exit void hardware_unsetup(void)
+{
+       free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+       free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
+       free_page((unsigned long)vmx_msr_bitmap_legacy);
+       free_page((unsigned long)vmx_msr_bitmap_longmode);
+       free_page((unsigned long)vmx_io_bitmap_b);
+       free_page((unsigned long)vmx_io_bitmap_a);
+       free_page((unsigned long)vmx_vmwrite_bitmap);
+       free_page((unsigned long)vmx_vmread_bitmap);
+
+       free_kvm_area();
+}
+
 /*
  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
@@ -6361,58 +6496,60 @@ static inline int vmcs_field_readonly(unsigned long field)
  * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
  * 64-bit fields are to be returned).
  */
-static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
-                                       unsigned long field, u64 *ret)
+static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
+                                 unsigned long field, u64 *ret)
 {
        short offset = vmcs_field_to_offset(field);
        char *p;
 
        if (offset < 0)
-               return 0;
+               return offset;
 
        p = ((char *)(get_vmcs12(vcpu))) + offset;
 
        switch (vmcs_field_type(field)) {
        case VMCS_FIELD_TYPE_NATURAL_WIDTH:
                *ret = *((natural_width *)p);
-               return 1;
+               return 0;
        case VMCS_FIELD_TYPE_U16:
                *ret = *((u16 *)p);
-               return 1;
+               return 0;
        case VMCS_FIELD_TYPE_U32:
                *ret = *((u32 *)p);
-               return 1;
+               return 0;
        case VMCS_FIELD_TYPE_U64:
                *ret = *((u64 *)p);
-               return 1;
+               return 0;
        default:
-               return 0; /* can never happen. */
+               WARN_ON(1);
+               return -ENOENT;
        }
 }
 
 
-static inline bool vmcs12_write_any(struct kvm_vcpu *vcpu,
-                                   unsigned long field, u64 field_value){
+static inline int vmcs12_write_any(struct kvm_vcpu *vcpu,
+                                  unsigned long field, u64 field_value){
        short offset = vmcs_field_to_offset(field);
        char *p = ((char *) get_vmcs12(vcpu)) + offset;
        if (offset < 0)
-               return false;
+               return offset;
 
        switch (vmcs_field_type(field)) {
        case VMCS_FIELD_TYPE_U16:
                *(u16 *)p = field_value;
-               return true;
+               return 0;
        case VMCS_FIELD_TYPE_U32:
                *(u32 *)p = field_value;
-               return true;
+               return 0;
        case VMCS_FIELD_TYPE_U64:
                *(u64 *)p = field_value;
-               return true;
+               return 0;
        case VMCS_FIELD_TYPE_NATURAL_WIDTH:
                *(natural_width *)p = field_value;
-               return true;
+               return 0;
        default:
-               return false; /* can never happen. */
+               WARN_ON(1);
+               return -ENOENT;
        }
 
 }
@@ -6426,6 +6563,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
        const unsigned long *fields = shadow_read_write_fields;
        const int num_fields = max_shadow_read_write_fields;
 
+       preempt_disable();
+
        vmcs_load(shadow_vmcs);
 
        for (i = 0; i < num_fields; i++) {
@@ -6443,12 +6582,17 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
                case VMCS_FIELD_TYPE_NATURAL_WIDTH:
                        field_value = vmcs_readl(field);
                        break;
+               default:
+                       WARN_ON(1);
+                       continue;
                }
                vmcs12_write_any(&vmx->vcpu, field, field_value);
        }
 
        vmcs_clear(shadow_vmcs);
        vmcs_load(vmx->loaded_vmcs->vmcs);
+
+       preempt_enable();
 }
 
 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
@@ -6486,6 +6630,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
                        case VMCS_FIELD_TYPE_NATURAL_WIDTH:
                                vmcs_writel(field, (long)field_value);
                                break;
+                       default:
+                               WARN_ON(1);
+                               break;
                        }
                }
        }
@@ -6524,7 +6671,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
        /* Decode instruction info and find the field to read */
        field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
        /* Read the field, zero-extended to a u64 field_value */
-       if (!vmcs12_read_any(vcpu, field, &field_value)) {
+       if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
                nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
                skip_emulated_instruction(vcpu);
                return 1;
@@ -6594,7 +6741,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       if (!vmcs12_write_any(vcpu, field, field_value)) {
+       if (vmcs12_write_any(vcpu, field, field_value) < 0) {
                nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
                skip_emulated_instruction(vcpu);
                return 1;
@@ -7273,6 +7420,9 @@ static void vmx_set_rvi(int vector)
        u16 status;
        u8 old;
 
+       if (vector == -1)
+               vector = 0;
+
        status = vmcs_read16(GUEST_INTR_STATUS);
        old = (u8)status & 0xff;
        if ((u8)vector != old) {
@@ -7284,22 +7434,23 @@ static void vmx_set_rvi(int vector)
 
 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
 {
+       if (!is_guest_mode(vcpu)) {
+               vmx_set_rvi(max_irr);
+               return;
+       }
+
        if (max_irr == -1)
                return;
 
        /*
-        * If a vmexit is needed, vmx_check_nested_events handles it.
+        * In guest mode.  If a vmexit is needed, vmx_check_nested_events
+        * handles it.
         */
-       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
-               return;
-
-       if (!is_guest_mode(vcpu)) {
-               vmx_set_rvi(max_irr);
+       if (nested_exit_on_intr(vcpu))
                return;
-       }
 
        /*
-        * Fall back to pre-APICv interrupt injection since L2
+        * Else, fall back to pre-APICv interrupt injection since L2
         * is run without virtual interrupt delivery.
         */
        if (!kvm_event_needs_reinjection(vcpu) &&
@@ -9180,150 +9331,21 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
 static int __init vmx_init(void)
 {
-       int r, i, msr;
-
-       rdmsrl_safe(MSR_EFER, &host_efer);
-
-       for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
-               kvm_define_shared_msr(i, vmx_msr_index[i]);
-
-       vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_io_bitmap_a)
-               return -ENOMEM;
-
-       r = -ENOMEM;
-
-       vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_io_bitmap_b)
-               goto out;
-
-       vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_legacy)
-               goto out1;
-
-       vmx_msr_bitmap_legacy_x2apic =
-                               (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_legacy_x2apic)
-               goto out2;
-
-       vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_longmode)
-               goto out3;
-
-       vmx_msr_bitmap_longmode_x2apic =
-                               (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_longmode_x2apic)
-               goto out4;
-       vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_vmread_bitmap)
-               goto out5;
-
-       vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_vmwrite_bitmap)
-               goto out6;
-
-       memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
-       memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
-
-       /*
-        * Allow direct access to the PC debug port (it is often used for I/O
-        * delays, but the vmexits simply slow things down).
-        */
-       memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
-       clear_bit(0x80, vmx_io_bitmap_a);
-
-       memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
-
-       memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
-       memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
-
-       set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
-
-       r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
-                    __alignof__(struct vcpu_vmx), THIS_MODULE);
+       int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+                     __alignof__(struct vcpu_vmx), THIS_MODULE);
        if (r)
-               goto out7;
+               return r;
 
 #ifdef CONFIG_KEXEC
        rcu_assign_pointer(crash_vmclear_loaded_vmcss,
                           crash_vmclear_local_loaded_vmcss);
 #endif
 
-       vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
-       vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
-       vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
-       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
-       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
-       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
-       vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
-
-       memcpy(vmx_msr_bitmap_legacy_x2apic,
-                       vmx_msr_bitmap_legacy, PAGE_SIZE);
-       memcpy(vmx_msr_bitmap_longmode_x2apic,
-                       vmx_msr_bitmap_longmode, PAGE_SIZE);
-
-       if (enable_apicv) {
-               for (msr = 0x800; msr <= 0x8ff; msr++)
-                       vmx_disable_intercept_msr_read_x2apic(msr);
-
-               /* According SDM, in x2apic mode, the whole id reg is used.
-                * But in KVM, it only use the highest eight bits. Need to
-                * intercept it */
-               vmx_enable_intercept_msr_read_x2apic(0x802);
-               /* TMCCT */
-               vmx_enable_intercept_msr_read_x2apic(0x839);
-               /* TPR */
-               vmx_disable_intercept_msr_write_x2apic(0x808);
-               /* EOI */
-               vmx_disable_intercept_msr_write_x2apic(0x80b);
-               /* SELF-IPI */
-               vmx_disable_intercept_msr_write_x2apic(0x83f);
-       }
-
-       if (enable_ept) {
-               kvm_mmu_set_mask_ptes(0ull,
-                       (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
-                       (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
-                       0ull, VMX_EPT_EXECUTABLE_MASK);
-               ept_set_mmio_spte_mask();
-               kvm_enable_tdp();
-       } else
-               kvm_disable_tdp();
-
-       update_ple_window_actual_max();
-
        return 0;
-
-out7:
-       free_page((unsigned long)vmx_vmwrite_bitmap);
-out6:
-       free_page((unsigned long)vmx_vmread_bitmap);
-out5:
-       free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out4:
-       free_page((unsigned long)vmx_msr_bitmap_longmode);
-out3:
-       free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-out2:
-       free_page((unsigned long)vmx_msr_bitmap_legacy);
-out1:
-       free_page((unsigned long)vmx_io_bitmap_b);
-out:
-       free_page((unsigned long)vmx_io_bitmap_a);
-       return r;
 }
 
 static void __exit vmx_exit(void)
 {
-       free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-       free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-       free_page((unsigned long)vmx_msr_bitmap_legacy);
-       free_page((unsigned long)vmx_msr_bitmap_longmode);
-       free_page((unsigned long)vmx_io_bitmap_b);
-       free_page((unsigned long)vmx_io_bitmap_a);
-       free_page((unsigned long)vmx_vmwrite_bitmap);
-       free_page((unsigned long)vmx_vmread_bitmap);
-
 #ifdef CONFIG_KEXEC
        RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
        synchronize_rcu();