]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
KVM: MMU: allow more page become unsync at getting sp time
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Mon, 24 May 2010 07:41:33 +0000 (15:41 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 1 Aug 2010 07:35:52 +0000 (10:35 +0300)
Allow more page become asynchronous at getting sp time, if need create new
shadow page for gfn but it not allow unsync(level > 1), we should unsync all
gfn's unsync page

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index ba119dae890ebf75f40bc63a479e18804a572bc8..07673487fd5d5daf723195cd7ad1f4aaf61bdf22 100644 (file)
@@ -1220,6 +1220,35 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        return __kvm_sync_page(vcpu, sp, true);
 }
 
+/* @gfn should be write-protected at the call site */
+static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
+{
+       struct hlist_head *bucket;
+       struct kvm_mmu_page *s;
+       struct hlist_node *node, *n;
+       unsigned index;
+       bool flush = false;
+
+       index = kvm_page_table_hashfn(gfn);
+       bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+       hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
+               if (s->gfn != gfn || !s->unsync || s->role.invalid)
+                       continue;
+
+               WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+               if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
+                       (vcpu->arch.mmu.sync_page(vcpu, s))) {
+                       kvm_mmu_zap_page(vcpu->kvm, s);
+                       continue;
+               }
+               kvm_unlink_unsync_page(vcpu->kvm, s);
+               flush = true;
+       }
+
+       if (flush)
+               kvm_mmu_flush_tlb(vcpu);
+}
+
 struct mmu_page_path {
        struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
        unsigned int idx[PT64_ROOT_LEVEL-1];
@@ -1318,8 +1347,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        unsigned index;
        unsigned quadrant;
        struct hlist_head *bucket;
-       struct kvm_mmu_page *sp, *unsync_sp = NULL;
+       struct kvm_mmu_page *sp;
        struct hlist_node *node, *tmp;
+       bool need_sync = false;
 
        role = vcpu->arch.mmu.base_role;
        role.level = level;
@@ -1336,17 +1366,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
        hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
                if (sp->gfn == gfn) {
-                       if (sp->unsync)
-                               unsync_sp = sp;
+                       if (!need_sync && sp->unsync)
+                               need_sync = true;
 
                        if (sp->role.word != role.word)
                                continue;
 
-                       if (!direct && unsync_sp &&
-                             kvm_sync_page_transient(vcpu, unsync_sp)) {
-                               unsync_sp = NULL;
+                       if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
                                break;
-                       }
 
                        mmu_page_add_parent_pte(vcpu, sp, parent_pte);
                        if (sp->unsync_children) {
@@ -1358,9 +1385,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                        trace_kvm_mmu_get_page(sp, false);
                        return sp;
                }
-       if (!direct && unsync_sp)
-               kvm_sync_page(vcpu, unsync_sp);
-
        ++vcpu->kvm->stat.mmu_cache_miss;
        sp = kvm_mmu_alloc_page(vcpu, parent_pte);
        if (!sp)
@@ -1371,6 +1395,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        if (!direct) {
                if (rmap_write_protect(vcpu->kvm, gfn))
                        kvm_flush_remote_tlbs(vcpu->kvm);
+               if (level > PT_PAGE_TABLE_LEVEL && need_sync)
+                       kvm_sync_pages(vcpu, gfn);
+
                account_shadowed(vcpu->kvm, gfn);
        }
        if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)