]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
KVM: x86: fix maintenance of guest/host xcr0 state
authorMarcelo Tosatti <mtosatti@redhat.com>
Tue, 16 Apr 2013 02:30:13 +0000 (23:30 -0300)
committerGleb Natapov <gleb@redhat.com>
Wed, 8 May 2013 09:47:43 +0000 (12:47 +0300)
Emulation of xcr0 writes zero guest_xcr0_loaded variable so that
subsequent VM-entry reloads CPU's xcr0 with guests xcr0 value.

However, this is incorrect because guest_xcr0_loaded variable is
read to decide whether to reload hosts xcr0.

In case the vcpu thread is scheduled out after the guest_xcr0_loaded = 0
assignment, and scheduler decides to preload FPU:

switch_to
{
  __switch_to
    __math_state_restore
      restore_fpu_checking
        fpu_restore_checking
          if (use_xsave())
              fpu_xrstor_checking
xrstor64 with CPU's xcr0 == guests xcr0

Fix by properly restoring hosts xcr0 during emulation of xcr0 writes.

Analyzed-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
arch/x86/kvm/x86.c

index 05a8b1a2300df0997d116e753506bf6e6ffa1fe2..094b5d96ab1468c1875a2a2a5e682245f2909db9 100644 (file)
@@ -555,6 +555,25 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
+static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
+                       !vcpu->guest_xcr0_loaded) {
+               /* kvm_set_xcr() also depends on this */
+               xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+               vcpu->guest_xcr0_loaded = 1;
+       }
+}
+
+static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->guest_xcr0_loaded) {
+               if (vcpu->arch.xcr0 != host_xcr0)
+                       xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+               vcpu->guest_xcr0_loaded = 0;
+       }
+}
+
 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
        u64 xcr0;
@@ -571,8 +590,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
                return 1;
        if (xcr0 & ~host_xcr0)
                return 1;
+       kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
-       vcpu->guest_xcr0_loaded = 0;
        return 0;
 }
 
@@ -5614,25 +5633,6 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
        }
 }
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
-                       !vcpu->guest_xcr0_loaded) {
-               /* kvm_set_xcr() also depends on this */
-               xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
-               vcpu->guest_xcr0_loaded = 1;
-       }
-}
-
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-       if (vcpu->guest_xcr0_loaded) {
-               if (vcpu->arch.xcr0 != host_xcr0)
-                       xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
-               vcpu->guest_xcr0_loaded = 0;
-       }
-}
-
 static void process_nmi(struct kvm_vcpu *vcpu)
 {
        unsigned limit = 2;