]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
KVM: Drop FOLL_GET in GUP when doing async page fault
authorchai wen <chaiw.fnst@cn.fujitsu.com>
Mon, 14 Oct 2013 14:22:33 +0000 (22:22 +0800)
committerGleb Natapov <gleb@redhat.com>
Tue, 15 Oct 2013 10:43:37 +0000 (13:43 +0300)
Page pinning is not mandatory in kvm async page fault processing since
after async page fault event is delivered to a guest it accesses page once
again and does its own GUP.  Drop the FOLL_GET flag in GUP in async_pf
code, and do some simplifying in check/clear processing.

Suggested-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Gu zheng <guz.fnst@cn.fujitsu.com>
Signed-off-by: chai wen <chaiw.fnst@cn.fujitsu.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
arch/x86/kvm/x86.c
include/linux/kvm_host.h
include/trace/events/kvm.h
virt/kvm/async_pf.c

index c951c71dc80bda7b4ff03c9d526e19d4c6151144..edf2a07df3a3e01ba42b85d2e2dc1d1bc146d11e 100644 (file)
@@ -7298,7 +7298,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
        int r;
 
        if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
-             is_error_page(work->page))
+             work->wakeup_all)
                return;
 
        r = kvm_mmu_reload(vcpu);
@@ -7408,7 +7408,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
        struct x86_exception fault;
 
        trace_kvm_async_pf_ready(work->arch.token, work->gva);
-       if (is_error_page(work->page))
+       if (work->wakeup_all)
                work->arch.token = ~0; /* broadcast wakeup */
        else
                kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
index f6dccde755f675672cb915f620679f62876eaaed..c9d4236ab442d0ba90f3b96dd520d87f1ec59a7c 100644 (file)
@@ -189,7 +189,7 @@ struct kvm_async_pf {
        gva_t gva;
        unsigned long addr;
        struct kvm_arch_async_pf arch;
-       struct page *page;
+       bool   wakeup_all;
 };
 
 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
index 7005d1109ec94c8c2839bbff6984395e20e8a873..131a0bda7aecec634b61ac72078d07d31eb1602c 100644 (file)
@@ -296,23 +296,21 @@ DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
 
 TRACE_EVENT(
        kvm_async_pf_completed,
-       TP_PROTO(unsigned long address, struct page *page, u64 gva),
-       TP_ARGS(address, page, gva),
+       TP_PROTO(unsigned long address, u64 gva),
+       TP_ARGS(address, gva),
 
        TP_STRUCT__entry(
                __field(unsigned long, address)
-               __field(pfn_t, pfn)
                __field(u64, gva)
                ),
 
        TP_fast_assign(
                __entry->address = address;
-               __entry->pfn = page ? page_to_pfn(page) : 0;
                __entry->gva = gva;
                ),
 
-       TP_printk("gva %#llx address %#lx pfn %#llx",  __entry->gva,
-                 __entry->address, __entry->pfn)
+       TP_printk("gva %#llx address %#lx",  __entry->gva,
+                 __entry->address)
 );
 
 #endif
index b197950ac4d5893c146590de3e1b13ab60cc631d..8631d9c14320bea69b4e9713013ab54b2e3752be 100644 (file)
@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
 
 static void async_pf_execute(struct work_struct *work)
 {
-       struct page *page = NULL;
        struct kvm_async_pf *apf =
                container_of(work, struct kvm_async_pf, work);
        struct mm_struct *mm = apf->mm;
@@ -68,13 +67,12 @@ static void async_pf_execute(struct work_struct *work)
 
        use_mm(mm);
        down_read(&mm->mmap_sem);
-       get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
+       get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
        up_read(&mm->mmap_sem);
        unuse_mm(mm);
 
        spin_lock(&vcpu->async_pf.lock);
        list_add_tail(&apf->link, &vcpu->async_pf.done);
-       apf->page = page;
        spin_unlock(&vcpu->async_pf.lock);
 
        /*
@@ -82,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
         * this point
         */
 
-       trace_kvm_async_pf_completed(addr, page, gva);
+       trace_kvm_async_pf_completed(addr, gva);
 
        if (waitqueue_active(&vcpu->wq))
                wake_up_interruptible(&vcpu->wq);
@@ -112,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                        list_entry(vcpu->async_pf.done.next,
                                   typeof(*work), link);
                list_del(&work->link);
-               if (!is_error_page(work->page))
-                       kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
        spin_unlock(&vcpu->async_pf.lock);
@@ -133,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
                list_del(&work->link);
                spin_unlock(&vcpu->async_pf.lock);
 
-               if (work->page)
-                       kvm_arch_async_page_ready(vcpu, work);
+               kvm_arch_async_page_ready(vcpu, work);
                kvm_arch_async_page_present(vcpu, work);
 
                list_del(&work->queue);
                vcpu->async_pf.queued--;
-               if (!is_error_page(work->page))
-                       kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
 }
@@ -163,7 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
        if (!work)
                return 0;
 
-       work->page = NULL;
+       work->wakeup_all = false;
        work->vcpu = vcpu;
        work->gva = gva;
        work->addr = gfn_to_hva(vcpu->kvm, gfn);
@@ -203,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
        if (!work)
                return -ENOMEM;
 
-       work->page = KVM_ERR_PTR_BAD_PAGE;
+       work->wakeup_all = true;
        INIT_LIST_HEAD(&work->queue); /* for list_del to work */
 
        spin_lock(&vcpu->async_pf.lock);