]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - virt/kvm/async_pf.c
Merge remote-tracking branch 'kvm/linux-next'
[karo-tx-linux.git] / virt / kvm / async_pf.c
index 8a39dda7a3254677df5996c07c965a2f9f5ef8a8..8631d9c14320bea69b4e9713013ab54b2e3752be 100644 (file)
@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
 
 static void async_pf_execute(struct work_struct *work)
 {
-       struct page *page = NULL;
        struct kvm_async_pf *apf =
                container_of(work, struct kvm_async_pf, work);
        struct mm_struct *mm = apf->mm;
@@ -68,14 +67,12 @@ static void async_pf_execute(struct work_struct *work)
 
        use_mm(mm);
        down_read(&mm->mmap_sem);
-       get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
+       get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
        up_read(&mm->mmap_sem);
        unuse_mm(mm);
 
        spin_lock(&vcpu->async_pf.lock);
        list_add_tail(&apf->link, &vcpu->async_pf.done);
-       apf->page = page;
-       apf->done = true;
        spin_unlock(&vcpu->async_pf.lock);
 
        /*
@@ -83,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
         * this point
         */
 
-       trace_kvm_async_pf_completed(addr, page, gva);
+       trace_kvm_async_pf_completed(addr, gva);
 
        if (waitqueue_active(&vcpu->wq))
                wake_up_interruptible(&vcpu->wq);
@@ -99,9 +96,8 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                struct kvm_async_pf *work =
                        list_entry(vcpu->async_pf.queue.next,
                                   typeof(*work), queue);
-               cancel_work_sync(&work->work);
                list_del(&work->queue);
-               if (!work->done) { /* work was canceled */
+               if (cancel_work_sync(&work->work)) {
                        mmdrop(work->mm);
                        kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
                        kmem_cache_free(async_pf_cache, work);
@@ -114,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                        list_entry(vcpu->async_pf.done.next,
                                   typeof(*work), link);
                list_del(&work->link);
-               if (!is_error_page(work->page))
-                       kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
        spin_unlock(&vcpu->async_pf.lock);
@@ -135,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
                list_del(&work->link);
                spin_unlock(&vcpu->async_pf.lock);
 
-               if (work->page)
-                       kvm_arch_async_page_ready(vcpu, work);
+               kvm_arch_async_page_ready(vcpu, work);
                kvm_arch_async_page_present(vcpu, work);
 
                list_del(&work->queue);
                vcpu->async_pf.queued--;
-               if (!is_error_page(work->page))
-                       kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
 }
@@ -165,8 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
        if (!work)
                return 0;
 
-       work->page = NULL;
-       work->done = false;
+       work->wakeup_all = false;
        work->vcpu = vcpu;
        work->gva = gva;
        work->addr = gfn_to_hva(vcpu->kvm, gfn);
@@ -206,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
        if (!work)
                return -ENOMEM;
 
-       work->page = KVM_ERR_PTR_BAD_PAGE;
+       work->wakeup_all = true;
        INIT_LIST_HEAD(&work->queue); /* for list_del to work */
 
        spin_lock(&vcpu->async_pf.lock);