]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/x86/kvm/vmx.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[karo-tx-linux.git] / arch / x86 / kvm / vmx.c
index e7d929103f4af6297f47d093017c3ead20c44c36..c5fd459c404367405f36d728951982322bc5ada6 100644 (file)
@@ -926,7 +926,6 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
  */
 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
-static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
 
 /*
  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
@@ -2043,14 +2042,13 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
  */
 static unsigned long segment_base(u16 selector)
 {
-       struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
        struct desc_struct *table;
        unsigned long v;
 
        if (!(selector & ~SEGMENT_RPL_MASK))
                return 0;
 
-       table = (struct desc_struct *)gdt->address;
+       table = get_current_gdt_ro();
 
        if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
                u16 ldt_selector = kvm_read_ldt();
@@ -2155,7 +2153,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
 #endif
        if (vmx->host_state.msr_host_bndcfgs)
                wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
-       load_gdt(this_cpu_ptr(&host_gdt));
+       load_fixmap_gdt(raw_smp_processor_id());
 }
 
 static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -2252,7 +2250,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        }
 
        if (!already_loaded) {
-               struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
+               void *gdt = get_current_gdt_ro();
                unsigned long sysenter_esp;
 
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
@@ -2263,7 +2261,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 */
                vmcs_writel(HOST_TR_BASE,
                            (unsigned long)this_cpu_ptr(&cpu_tss));
-               vmcs_writel(HOST_GDTR_BASE, gdt->address);
+               vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
 
                /*
                 * VM exits change the host TR limit to 0x67 after a VM
@@ -3447,8 +3445,6 @@ static int hardware_enable(void)
        kvm_cpu_vmxon(phys_addr);
        ept_sync_global();
 
-       native_store_gdt(this_cpu_ptr(&host_gdt));
-
        return 0;
 }
 
@@ -8117,6 +8113,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
                return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
        case EXIT_REASON_PREEMPTION_TIMER:
                return false;
+       case EXIT_REASON_PML_FULL:
+               /* We don't expose PML support to L1. */
+               return false;
        default:
                return true;
        }
@@ -10163,6 +10162,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 
        }
 
+       if (enable_pml) {
+               /*
+                * Conceptually we want to copy the PML address and index from
+                * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+                * since we always flush the log on each vmexit, this happens
+                * to be equivalent to simply resetting the fields in vmcs02.
+                */
+               ASSERT(vmx->pml_pg);
+               vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+               vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+       }
+
        if (nested_cpu_has_ept(vmcs12)) {
                if (nested_ept_init_mmu_context(vcpu)) {
                        *entry_failure_code = ENTRY_FAIL_DEFAULT;