]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
KVM: x86: advertise KVM_CAP_X86_SMM
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 1 Apr 2015 12:25:33 +0000 (14:25 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 5 Jun 2015 15:26:38 +0000 (17:26 +0200)
... and we're done. :)

Because SMBASE is usually relocated above 1M on modern chipsets, and
SMM handlers might indeed rely on 4G segment limits, we only expose it
if KVM is able to run the guest in big real mode.  This includes any
of VMX+emulate_invalid_guest_state, VMX+unrestricted_guest, or SVM.

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 47006683f2fef4994aac9430bc7a5a3c3850097c..8ca32cfbcbd8a8ed55c4996cc490334a34ae4489 100644 (file)
@@ -709,6 +709,7 @@ struct kvm_x86_ops {
        int (*hardware_setup)(void);               /* __init */
        void (*hardware_unsetup)(void);            /* __exit */
        bool (*cpu_has_accelerated_tpr)(void);
        int (*hardware_setup)(void);               /* __init */
        void (*hardware_unsetup)(void);            /* __exit */
        bool (*cpu_has_accelerated_tpr)(void);
+       bool (*cpu_has_high_real_mode_segbase)(void);
        void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
        /* Create, but do not attach this VCPU */
        void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
        /* Create, but do not attach this VCPU */
index 6ff1faf4a2e8bb1e6378a1d0b3f5c7a7eea85bb6..68075318648974b0d56c6f64ad296dd253ca26c6 100644 (file)
@@ -4080,6 +4080,11 @@ static bool svm_cpu_has_accelerated_tpr(void)
        return false;
 }
 
        return false;
 }
 
+static bool svm_has_high_real_mode_segbase(void)
+{
+       return true;
+}
+
 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 {
        return 0;
 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 {
        return 0;
@@ -4353,6 +4358,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .hardware_enable = svm_hardware_enable,
        .hardware_disable = svm_hardware_disable,
        .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
        .hardware_enable = svm_hardware_enable,
        .hardware_disable = svm_hardware_disable,
        .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
+       .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
 
        .vcpu_create = svm_create_vcpu,
        .vcpu_free = svm_free_vcpu,
 
        .vcpu_create = svm_create_vcpu,
        .vcpu_free = svm_free_vcpu,
index 862fa8f2c61d93a5f2a315d5bec852232ec323d6..06a186b07631d8774e1b9a407f006b686b2783be 100644 (file)
@@ -8139,6 +8139,11 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
                local_irq_enable();
 }
 
                local_irq_enable();
 }
 
+static bool vmx_has_high_real_mode_segbase(void)
+{
+       return enable_unrestricted_guest || emulate_invalid_guest_state;
+}
+
 static bool vmx_mpx_supported(void)
 {
        return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
 static bool vmx_mpx_supported(void)
 {
        return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
@@ -10296,6 +10301,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .hardware_enable = hardware_enable,
        .hardware_disable = hardware_disable,
        .cpu_has_accelerated_tpr = report_flexpriority,
        .hardware_enable = hardware_enable,
        .hardware_disable = hardware_disable,
        .cpu_has_accelerated_tpr = report_flexpriority,
+       .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
 
        .vcpu_create = vmx_create_vcpu,
        .vcpu_free = vmx_free_vcpu,
 
        .vcpu_create = vmx_create_vcpu,
        .vcpu_free = vmx_free_vcpu,
index 7489871b63dfd7819a1bb666e0b2fb57cf30ab05..43f0df7ddc9ccd19d963ca5e383fc019aa33ec42 100644 (file)
@@ -2900,6 +2900,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #endif
                r = 1;
                break;
 #endif
                r = 1;
                break;
+       case KVM_CAP_X86_SMM:
+               /* SMBASE is usually relocated above 1M on modern chipsets,
+                * and SMM handlers might indeed rely on 4G segment limits,
+                * so do not report SMM to be available if real mode is
+                * emulated via vm86 mode.  Still, do not go to great lengths
+                * to avoid userspace's usage of the feature, because it is a
+                * fringe case that is not enabled except via specific settings
+                * of the module parameters.
+                */
+               r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+               break;
        case KVM_CAP_COALESCED_MMIO:
                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
                break;
        case KVM_CAP_COALESCED_MMIO:
                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
                break;
@@ -4299,6 +4310,10 @@ static void kvm_init_msr_list(void)
 
        for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
                switch (emulated_msrs[i]) {
 
        for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
                switch (emulated_msrs[i]) {
+               case MSR_IA32_SMBASE:
+                       if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
+                               continue;
+                       break;
                default:
                        break;
                }
                default:
                        break;
                }