]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/powerpc/kvm/book3s_hv.c
Merge tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[karo-tx-linux.git] / arch / powerpc / kvm / book3s_hv.c
index f5416934932b17078fb60fdd4b0302df9e7c0e62..9de24f8e03c71b44e0407b65bbe137c3506650e3 100644 (file)
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
+void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+       int me;
+       int cpu = vcpu->cpu;
+       wait_queue_head_t *wqp;
+
+       wqp = kvm_arch_vcpu_wq(vcpu);
+       if (waitqueue_active(wqp)) {
+               wake_up_interruptible(wqp);
+               ++vcpu->stat.halt_wakeup;
+       }
+
+       me = get_cpu();
+
+       /* CPU points to the first thread of the core */
+       if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
+               int real_cpu = cpu + vcpu->arch.ptid;
+               if (paca[real_cpu].kvm_hstate.xics_phys)
+                       xics_wake_cpu(real_cpu);
+               else if (cpu_online(cpu))
+                       smp_send_reschedule(cpu);
+       }
+       put_cpu();
+}
+
 /*
  * We use the vcpu_load/put functions to measure stolen time.
  * Stolen time is counted as time when either the vcpu is able to
@@ -259,7 +284,7 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
                        len = ((struct reg_vpa *)va)->length.hword;
                else
                        len = ((struct reg_vpa *)va)->length.word;
-               kvmppc_unpin_guest_page(kvm, va);
+               kvmppc_unpin_guest_page(kvm, va, vpa, false);
 
                /* Check length */
                if (len > nb || len < sizeof(struct reg_vpa))
@@ -359,13 +384,13 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
                va = NULL;
                nb = 0;
                if (gpa)
-                       va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+                       va = kvmppc_pin_guest_page(kvm, gpa, &nb);
                spin_lock(&vcpu->arch.vpa_update_lock);
                if (gpa == vpap->next_gpa)
                        break;
                /* sigh... unpin that one and try again */
                if (va)
-                       kvmppc_unpin_guest_page(kvm, va);
+                       kvmppc_unpin_guest_page(kvm, va, gpa, false);
        }
 
        vpap->update_pending = 0;
@@ -375,12 +400,15 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
                 * has changed the mappings underlying guest memory,
                 * so unregister the region.
                 */
-               kvmppc_unpin_guest_page(kvm, va);
+               kvmppc_unpin_guest_page(kvm, va, gpa, false);
                va = NULL;
        }
        if (vpap->pinned_addr)
-               kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
+               kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
+                                       vpap->dirty);
+       vpap->gpa = gpa;
        vpap->pinned_addr = va;
+       vpap->dirty = false;
        if (va)
                vpap->pinned_end = va + vpap->len;
 }
@@ -472,6 +500,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
        /* order writing *dt vs. writing vpa->dtl_idx */
        smp_wmb();
        vpa->dtl_idx = ++vcpu->arch.dtl_index;
+       vcpu->arch.dtl.dirty = true;
 }
 
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
@@ -479,7 +508,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        unsigned long req = kvmppc_get_gpr(vcpu, 3);
        unsigned long target, ret = H_SUCCESS;
        struct kvm_vcpu *tvcpu;
-       int idx;
+       int idx, rc;
 
        switch (req) {
        case H_ENTER:
@@ -515,6 +544,28 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                                        kvmppc_get_gpr(vcpu, 5),
                                        kvmppc_get_gpr(vcpu, 6));
                break;
+       case H_RTAS:
+               if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+                       return RESUME_HOST;
+
+               rc = kvmppc_rtas_hcall(vcpu);
+
+               if (rc == -ENOENT)
+                       return RESUME_HOST;
+               else if (rc == 0)
+                       break;
+
+               /* Send the error out to userspace via KVM_RUN */
+               return rc;
+
+       case H_XIRR:
+       case H_CPPR:
+       case H_EOI:
+       case H_IPI:
+               if (kvmppc_xics_enabled(vcpu)) {
+                       ret = kvmppc_xics_hcall(vcpu, req);
+                       break;
+               } /* fallthrough */
        default:
                return RESUME_HOST;
        }
@@ -913,15 +964,19 @@ out:
        return ERR_PTR(err);
 }
 
+static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
+{
+       if (vpa->pinned_addr)
+               kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
+                                       vpa->dirty);
+}
+
 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
 {
        spin_lock(&vcpu->arch.vpa_update_lock);
-       if (vcpu->arch.dtl.pinned_addr)
-               kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
-       if (vcpu->arch.slb_shadow.pinned_addr)
-               kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
-       if (vcpu->arch.vpa.pinned_addr)
-               kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
+       unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
+       unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
+       unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
        spin_unlock(&vcpu->arch.vpa_update_lock);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -955,7 +1010,6 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
 }
 
 extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-extern void xics_wake_cpu(int cpu);
 
 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
                                   struct kvm_vcpu *vcpu)
@@ -1330,9 +1384,12 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                        break;
                vc->runner = vcpu;
                n_ceded = 0;
-               list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
+               list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
                        if (!v->arch.pending_exceptions)
                                n_ceded += v->arch.ceded;
+                       else
+                               v->arch.ceded = 0;
+               }
                if (n_ceded == vc->n_runnable)
                        kvmppc_vcore_blocked(vc);
                else
@@ -1645,12 +1702,12 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
 
 void kvmppc_core_commit_memory_region(struct kvm *kvm,
                                      struct kvm_userspace_memory_region *mem,
-                                     struct kvm_memory_slot old)
+                                     const struct kvm_memory_slot *old)
 {
        unsigned long npages = mem->memory_size >> PAGE_SHIFT;
        struct kvm_memory_slot *memslot;
 
-       if (npages && old.npages) {
+       if (npages && old->npages) {
                /*
                 * If modifying a memslot, reset all the rmap dirty bits.
                 * If this is a new memslot, we don't need to do anything
@@ -1827,6 +1884,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
        cpumask_setall(&kvm->arch.need_tlb_flush);
 
        INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+       INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
 
        kvm->arch.rma = NULL;
 
@@ -1872,6 +1930,8 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
                kvm->arch.rma = NULL;
        }
 
+       kvmppc_rtas_tokens_free(kvm);
+
        kvmppc_free_hpt(kvm);
        WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
 }