]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
KVM: MMU: introduce the framework to check zero bits on sptes
authorXiao Guangrong <guangrong.xiao@linux.intel.com>
Wed, 5 Aug 2015 04:04:24 +0000 (12:04 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 5 Aug 2015 10:47:24 +0000 (12:47 +0200)
We have abstracted the data struct and functions which are used to check
reserved bit on guest page tables, now we extend the logic to check
zero bits on shadow page tables

The zero bits on sptes include not only reserved bits on hardware but also
the bits that SPTEs willnever use.  For example, shadow pages will never
use GB pages unless the guest uses them too.

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/svm.c

index 847b37cbf211f0abb62c3c54d3c9c88d7625c1cf..c12e845f59e6b40c5afb62d8ecf1e08f22dce4db 100644 (file)
@@ -294,6 +294,14 @@ struct kvm_mmu {
 
        u64 *pae_root;
        u64 *lm_root;
+
+       /*
+        * check zero bits on shadow page table entries, these
+        * bits include not only hardware reserved bits but also
+        * the bits spte never used.
+        */
+       struct rsvd_bits_validate shadow_zero_check;
+
        struct rsvd_bits_validate guest_rsvd_check;
 
        /*
index a965f3f58586b4fd40d2455974da5be9c27db920..2e3e6454d9777eddd755ce6a2e8001665f8603af 100644 (file)
@@ -3699,6 +3699,53 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
                                    cpuid_maxphyaddr(vcpu), execonly);
 }
 
+/*
+ * the page table on host is the shadow page table for the page
+ * table in guest or amd nested guest, its mmu features completely
+ * follow the features in guest.
+ */
+void
+reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+{
+       __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+                               boot_cpu_data.x86_phys_bits,
+                               context->shadow_root_level, context->nx,
+                               guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
+}
+EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
+
+/*
+ * the direct page table on host, use as much mmu features as
+ * possible, however, kvm currently does not do execution-protection.
+ */
+static void
+reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+                               struct kvm_mmu *context)
+{
+       if (guest_cpuid_is_amd(vcpu))
+               __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+                                       boot_cpu_data.x86_phys_bits,
+                                       context->shadow_root_level, false,
+                                       cpu_has_gbpages, true);
+       else
+               __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+                                           boot_cpu_data.x86_phys_bits,
+                                           false);
+
+}
+
+/*
+ * as the comments in reset_shadow_zero_bits_mask() except it
+ * is the shadow page table for intel nested guest.
+ */
+static void
+reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+                               struct kvm_mmu *context, bool execonly)
+{
+       __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+                                   boot_cpu_data.x86_phys_bits, execonly);
+}
+
 static void update_permission_bitmask(struct kvm_vcpu *vcpu,
                                      struct kvm_mmu *mmu, bool ept)
 {
@@ -3877,6 +3924,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 
        update_permission_bitmask(vcpu, context, false);
        update_last_pte_bitmap(vcpu, context);
+       reset_tdp_shadow_zero_bits_mask(vcpu, context);
 }
 
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
@@ -3904,6 +3952,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
        context->base_role.smap_andnot_wp
                = smap && !is_write_protection(vcpu);
        context->base_role.smm = is_smm(vcpu);
+       reset_shadow_zero_bits_mask(vcpu, context);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
 
@@ -3927,6 +3976,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
 
        update_permission_bitmask(vcpu, context, true);
        reset_rsvds_bits_mask_ept(vcpu, context, execonly);
+       reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
 
index 398d21c0f6dd05273fea48598bef016e1c26b51c..2299d15c91dcfba7d3dc2ea0c32ab172436fdd4d 100644 (file)
@@ -53,6 +53,9 @@ static inline u64 rsvd_bits(int s, int e)
 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
 
+void
+reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+
 /*
  * Return values of handle_mmio_page_fault_common:
  * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
index d1a114d8d22b7f2c797883784f8211c202e5dafe..74d825716f4fc158af4d52cd860bf63bbb8ccdf3 100644 (file)
@@ -2107,6 +2107,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
        vcpu->arch.mmu.get_pdptr         = nested_svm_get_tdp_pdptr;
        vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
        vcpu->arch.mmu.shadow_root_level = get_npt_level();
+       reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
        vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
 }