]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/x86/kvm/vmx.c
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / arch / x86 / kvm / vmx.c
index 64a79f271276a025ea76759189cddfbe967ae1ed..7758680db20b78abbf35f1b263c5f9d8da33b5e1 100644 (file)
@@ -4979,6 +4979,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        if (vmx_xsaves_supported())
                vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
 
+       if (enable_pml) {
+               ASSERT(vmx->pml_pg);
+               vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+               vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+       }
+
        return 0;
 }
 
@@ -7937,22 +7943,6 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
        *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
 }
 
-static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
-{
-       struct page *pml_pg;
-
-       pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       if (!pml_pg)
-               return -ENOMEM;
-
-       vmx->pml_pg = pml_pg;
-
-       vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
-       vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
-
-       return 0;
-}
-
 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
 {
        if (vmx->pml_pg) {
@@ -8224,6 +8214,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
        if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
                        (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
                        exit_reason != EXIT_REASON_EPT_VIOLATION &&
+                       exit_reason != EXIT_REASON_PML_FULL &&
                        exit_reason != EXIT_REASON_TASK_SWITCH)) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
@@ -8854,6 +8845,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
        put_cpu();
 }
 
+/*
+ * Ensure that the current vmcs of the logical processor is the
+ * vmcs01 of the vcpu before calling free_nested().
+ */
+static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       int r;
+
+       r = vcpu_load(vcpu);
+       BUG_ON(r);
+       vmx_load_vmcs01(vcpu);
+       free_nested(vmx);
+       vcpu_put(vcpu);
+}
+
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -8862,8 +8869,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
                vmx_destroy_pml_buffer(vmx);
        free_vpid(vmx->vpid);
        leave_guest_mode(vcpu);
-       vmx_load_vmcs01(vcpu);
-       free_nested(vmx);
+       vmx_free_vcpu_nested(vcpu);
        free_loaded_vmcs(vmx->loaded_vmcs);
        kfree(vmx->guest_msrs);
        kvm_vcpu_uninit(vcpu);
@@ -8885,14 +8891,26 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_vcpu;
 
+       err = -ENOMEM;
+
+       /*
+        * If PML is turned on, failure on enabling PML just results in failure
+        * of creating the vcpu, therefore we can simplify PML logic (by
+        * avoiding dealing with cases, such as enabling PML partially on vcpus
+        * for the guest, etc.
+        */
+       if (enable_pml) {
+               vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!vmx->pml_pg)
+                       goto uninit_vcpu;
+       }
+
        vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
        BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
                     > PAGE_SIZE);
 
-       err = -ENOMEM;
-       if (!vmx->guest_msrs) {
-               goto uninit_vcpu;
-       }
+       if (!vmx->guest_msrs)
+               goto free_pml;
 
        vmx->loaded_vmcs = &vmx->vmcs01;
        vmx->loaded_vmcs->vmcs = alloc_vmcs();
@@ -8936,18 +8954,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        vmx->nested.current_vmptr = -1ull;
        vmx->nested.current_vmcs12 = NULL;
 
-       /*
-        * If PML is turned on, failure on enabling PML just results in failure
-        * of creating the vcpu, therefore we can simplify PML logic (by
-        * avoiding dealing with cases, such as enabling PML partially on vcpus
-        * for the guest, etc.
-        */
-       if (enable_pml) {
-               err = vmx_create_pml_buffer(vmx);
-               if (err)
-                       goto free_vmcs;
-       }
-
        return &vmx->vcpu;
 
 free_vmcs:
@@ -8955,6 +8961,8 @@ free_vmcs:
        free_loaded_vmcs(vmx->loaded_vmcs);
 free_msrs:
        kfree(vmx->guest_msrs);
+free_pml:
+       vmx_destroy_pml_buffer(vmx);
 uninit_vcpu:
        kvm_vcpu_uninit(&vmx->vcpu);
 free_vcpu: