]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/x86/kvm/x86.c
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / arch / x86 / kvm / x86.c
index 9a9a198303219b6430159af03d4d1e1d898ec6f7..bda65690788ebb5ec27bab1b934532cfd1034a25 100644 (file)
@@ -663,9 +663,9 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
        /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
        if (index != XCR_XFEATURE_ENABLED_MASK)
                return 1;
-       if (!(xcr0 & XSTATE_FP))
+       if (!(xcr0 & XFEATURE_MASK_FP))
                return 1;
-       if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
+       if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
                return 1;
 
        /*
@@ -673,23 +673,24 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
         * saving.  However, xcr0 bit 0 is always set, even if the
         * emulated CPU does not support XSAVE (see fx_init).
         */
-       valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP;
+       valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
        if (xcr0 & ~valid_bits)
                return 1;
 
-       if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))
+       if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
+           (!(xcr0 & XFEATURE_MASK_BNDCSR)))
                return 1;
 
-       if (xcr0 & XSTATE_AVX512) {
-               if (!(xcr0 & XSTATE_YMM))
+       if (xcr0 & XFEATURE_MASK_AVX512) {
+               if (!(xcr0 & XFEATURE_MASK_YMM))
                        return 1;
-               if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512)
+               if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
                        return 1;
        }
        kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
 
-       if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK)
+       if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
                kvm_update_cpuid(vcpu);
        return 0;
 }
@@ -2905,7 +2906,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
         * Copy each region from the possibly compacted offset to the
         * non-compacted offset.
         */
-       valid = xstate_bv & ~XSTATE_FPSSE;
+       valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
        while (valid) {
                u64 feature = valid & -valid;
                int index = fls64(feature) - 1;
@@ -2943,7 +2944,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
         * Copy each region from the non-compacted offset to the
         * possibly compacted offset.
         */
-       valid = xstate_bv & ~XSTATE_FPSSE;
+       valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
        while (valid) {
                u64 feature = valid & -valid;
                int index = fls64(feature) - 1;
@@ -2971,7 +2972,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
                        &vcpu->arch.guest_fpu.state.fxsave,
                        sizeof(struct fxregs_state));
                *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
-                       XSTATE_FPSSE;
+                       XFEATURE_MASK_FPSSE;
        }
 }
 
@@ -2991,7 +2992,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
                        return -EINVAL;
                load_xsave(vcpu, (u8 *)guest_xsave->region);
        } else {
-               if (xstate_bv & ~XSTATE_FPSSE)
+               if (xstate_bv & ~XFEATURE_MASK_FPSSE)
                        return -EINVAL;
                memcpy(&vcpu->arch.guest_fpu.state.fxsave,
                        guest_xsave->region, sizeof(struct fxregs_state));
@@ -7005,7 +7006,7 @@ static void fx_init(struct kvm_vcpu *vcpu)
        /*
         * Ensure guest xcr0 is valid for loading
         */
-       vcpu->arch.xcr0 = XSTATE_FP;
+       vcpu->arch.xcr0 = XFEATURE_MASK_FP;
 
        vcpu->arch.cr0 |= X86_CR0_ET;
 }