]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'kvm-ppc/kvm-ppc-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 16 Jan 2014 04:28:13 +0000 (15:28 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 16 Jan 2014 04:28:13 +0000 (15:28 +1100)
1  2 
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S

index 3818bd95327c5f1cf14e200d04a468bb46da6c59,93203bbe571441bbf05c312470ec728fa969cc70..5bac13c71c8d2d508f72077b4a7b4ec40c509bb6
@@@ -131,9 -131,8 +131,9 @@@ static void kvmppc_fast_vcpu_kick_hv(st
  static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
  {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 +      unsigned long flags;
  
 -      spin_lock(&vcpu->arch.tbacct_lock);
 +      spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
        if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
            vc->preempt_tb != TB_NIL) {
                vc->stolen_tb += mftb() - vc->preempt_tb;
                vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
                vcpu->arch.busy_preempt = TB_NIL;
        }
 -      spin_unlock(&vcpu->arch.tbacct_lock);
 +      spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
  }
  
  static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
  {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 +      unsigned long flags;
  
 -      spin_lock(&vcpu->arch.tbacct_lock);
 +      spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
        if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
                vc->preempt_tb = mftb();
        if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
                vcpu->arch.busy_preempt = mftb();
 -      spin_unlock(&vcpu->arch.tbacct_lock);
 +      spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
  }
  
  static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@@ -488,11 -486,11 +488,11 @@@ static u64 vcore_stolen_time(struct kvm
         */
        if (vc->vcore_state != VCORE_INACTIVE &&
            vc->runner->arch.run_task != current) {
 -              spin_lock(&vc->runner->arch.tbacct_lock);
 +              spin_lock_irq(&vc->runner->arch.tbacct_lock);
                p = vc->stolen_tb;
                if (vc->preempt_tb != TB_NIL)
                        p += now - vc->preempt_tb;
 -              spin_unlock(&vc->runner->arch.tbacct_lock);
 +              spin_unlock_irq(&vc->runner->arch.tbacct_lock);
        } else {
                p = vc->stolen_tb;
        }
@@@ -514,10 -512,10 +514,10 @@@ static void kvmppc_create_dtl_entry(str
        core_stolen = vcore_stolen_time(vc, now);
        stolen = core_stolen - vcpu->arch.stolen_logged;
        vcpu->arch.stolen_logged = core_stolen;
 -      spin_lock(&vcpu->arch.tbacct_lock);
 +      spin_lock_irq(&vcpu->arch.tbacct_lock);
        stolen += vcpu->arch.busy_stolen;
        vcpu->arch.busy_stolen = 0;
 -      spin_unlock(&vcpu->arch.tbacct_lock);
 +      spin_unlock_irq(&vcpu->arch.tbacct_lock);
        if (!dt || !vpa)
                return;
        memset(dt, 0, sizeof(struct dtl_entry));
@@@ -591,9 -589,7 +591,9 @@@ int kvmppc_pseries_do_hcall(struct kvm_
                if (list_empty(&vcpu->kvm->arch.rtas_tokens))
                        return RESUME_HOST;
  
 +              idx = srcu_read_lock(&vcpu->kvm->srcu);
                rc = kvmppc_rtas_hcall(vcpu);
 +              srcu_read_unlock(&vcpu->kvm->srcu, idx);
  
                if (rc == -ENOENT)
                        return RESUME_HOST;
@@@ -673,12 -669,10 +673,10 @@@ static int kvmppc_handle_exit_hv(struc
                /* hcall - punt to userspace */
                int i;
  
-               if (vcpu->arch.shregs.msr & MSR_PR) {
-                       /* sc 1 from userspace - reflect to guest syscall */
-                       kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
-                       r = RESUME_GUEST;
-                       break;
-               }
+               /* hypercall with MSR_PR has already been handled in rmode,
+                * and never reaches here.
+                */
                run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
                for (i = 0; i < 9; ++i)
                        run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
@@@ -1119,13 -1113,13 +1117,13 @@@ static void kvmppc_remove_runnable(stru
  
        if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
                return;
 -      spin_lock(&vcpu->arch.tbacct_lock);
 +      spin_lock_irq(&vcpu->arch.tbacct_lock);
        now = mftb();
        vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
                vcpu->arch.stolen_logged;
        vcpu->arch.busy_preempt = now;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 -      spin_unlock(&vcpu->arch.tbacct_lock);
 +      spin_unlock_irq(&vcpu->arch.tbacct_lock);
        --vc->n_runnable;
        list_del(&vcpu->arch.run_list);
  }
@@@ -1352,7 -1346,7 +1350,7 @@@ static void kvmppc_run_core(struct kvmp
        kvm_guest_exit();
  
        preempt_enable();
 -      kvm_resched(vcpu);
 +      cond_resched();
  
        spin_lock(&vc->lock);
        now = get_tb();
index be4fa04a37c96d56d5f07d241395afe22e1627dd,d5ddc2d10748443d4237f03de10987bae6bff070..2c71780f8f01d4dade64c8d9e8a1c6e43f34b3c2
@@@ -153,6 -153,7 +153,6 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206
  
  13:   b       machine_check_fwnmi
  
 -
  /*
   * We come in here when wakened from nap mode on a secondary hw thread.
   * Relocation is off and most register values are lost.
@@@ -223,11 -224,6 +223,11 @@@ kvm_start_guest
        /* Clear our vcpu pointer so we don't come back in early */
        li      r0, 0
        std     r0, HSTATE_KVM_VCPU(r13)
 +      /*
 +       * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
 +       * the nap_count, because once the increment to nap_count is
 +       * visible we could be given another vcpu.
 +       */
        lwsync
        /* Clear any pending IPI - we're an offline thread */
        ld      r5, HSTATE_XICS_PHYS(r13)
        /* increment the nap count and then go to nap mode */
        ld      r4, HSTATE_KVM_VCORE(r13)
        addi    r4, r4, VCORE_NAP_COUNT
 -      lwsync                          /* make previous updates visible */
  51:   lwarx   r3, 0, r4
        addi    r3, r3, 1
        stwcx.  r3, 0, r4
@@@ -689,6 -686,13 +689,13 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206
  5:    mtspr   SPRN_SRR0, r6
        mtspr   SPRN_SRR1, r7
  
+ /*
+  * Required state:
+  * R4 = vcpu
+  * R10: value for HSRR0
+  * R11: value for HSRR1
+  * R13 = PACA
+  */
  fast_guest_return:
        li      r0,0
        stb     r0,VCPU_CEDED(r4)       /* cancel cede */
@@@ -754,14 -758,15 +761,14 @@@ kvmppc_interrupt_hv
         * guest CR, R12 saved in shadow VCPU SCRATCH1/0
         * guest R13 saved in SPRN_SCRATCH0
         */
 -      /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
 -      std     r9, HSTATE_HOST_R2(r13)
 +      std     r9, HSTATE_SCRATCH2(r13)
  
        lbz     r9, HSTATE_IN_GUEST(r13)
        cmpwi   r9, KVM_GUEST_MODE_HOST_HV
        beq     kvmppc_bad_host_intr
  #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
        cmpwi   r9, KVM_GUEST_MODE_GUEST
 -      ld      r9, HSTATE_HOST_R2(r13)
 +      ld      r9, HSTATE_SCRATCH2(r13)
        beq     kvmppc_interrupt_pr
  #endif
        /* We're now back in the host but in guest MMU context */
        std     r6, VCPU_GPR(R6)(r9)
        std     r7, VCPU_GPR(R7)(r9)
        std     r8, VCPU_GPR(R8)(r9)
 -      ld      r0, HSTATE_HOST_R2(r13)
 +      ld      r0, HSTATE_SCRATCH2(r13)
        std     r0, VCPU_GPR(R9)(r9)
        std     r10, VCPU_GPR(R10)(r9)
        std     r11, VCPU_GPR(R11)(r9)
@@@ -992,13 -997,14 +999,13 @@@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201
         */
        /* Increment the threads-exiting-guest count in the 0xff00
           bits of vcore->entry_exit_count */
 -      lwsync
        ld      r5,HSTATE_KVM_VCORE(r13)
        addi    r6,r5,VCORE_ENTRY_EXIT
  41:   lwarx   r3,0,r6
        addi    r0,r3,0x100
        stwcx.  r0,0,r6
        bne     41b
 -      lwsync
 +      isync           /* order stwcx. vs. reading napping_threads */
  
        /*
         * At this point we have an interrupt that we have to pass
        sld     r0,r0,r4
        andc.   r3,r3,r0                /* no sense IPI'ing ourselves */
        beq     43f
 +      /* Order entry/exit update vs. IPIs */
 +      sync
        mulli   r4,r4,PACA_SIZE         /* get paca for thread 0 */
        subf    r6,r4,r13
  42:   andi.   r0,r3,1
@@@ -1474,7 -1478,8 +1481,8 @@@ kvmppc_hisi
  hcall_try_real_mode:
        ld      r3,VCPU_GPR(R3)(r9)
        andi.   r0,r11,MSR_PR
-       bne     guest_exit_cont
+       /* sc 1 from userspace - reflect to guest syscall */
+       bne     sc_1_fast_return
        clrrdi  r3,r3,2
        cmpldi  r3,hcall_real_table_end - hcall_real_table
        bge     guest_exit_cont
        ld      r11,VCPU_MSR(r4)
        b       fast_guest_return
  
+ sc_1_fast_return:
+       mtspr   SPRN_SRR0,r10
+       mtspr   SPRN_SRR1,r11
+       li      r10, BOOK3S_INTERRUPT_SYSCALL
+       li      r11, (MSR_ME << 1) | 1  /* synthesize MSR_SF | MSR_ME */
+       rotldi  r11, r11, 63
+       mr      r4,r9
+       b       fast_guest_return
        /* We've attempted a real mode hcall, but it's punted it back
         * to userspace.  We need to restore some clobbered volatiles
         * before resuming the pass-it-to-qemu path */
@@@ -1641,10 -1655,10 +1658,10 @@@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206
        bge     kvm_cede_exit
        stwcx.  r4,0,r6
        bne     31b
 +      /* order napping_threads update vs testing entry_exit_count */
 +      isync
        li      r0,1
        stb     r0,HSTATE_NAPPING(r13)
 -      /* order napping_threads update vs testing entry_exit_count */
 -      lwsync
        mr      r4,r3
        lwz     r7,VCORE_ENTRY_EXIT(r5)
        cmpwi   r7,0x100