]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/powerpc/kvm/book3s_pr.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm into next
[karo-tx-linux.git] / arch / powerpc / kvm / book3s_pr.c
index 02f1defd8bb9aa092dca510f4c80edeb45fafce6..8eef1e5190773c9c290bf46581e7fd025f7dfaeb 100644 (file)
@@ -53,6 +53,7 @@
 
 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
                             ulong msr);
+static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
 
 /* Some compatibility defines */
 #ifdef CONFIG_PPC_BOOK3S_32
@@ -89,6 +90,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
 #endif
 
        kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
+       kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
        vcpu->cpu = -1;
 }
 
@@ -115,6 +117,9 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
        svcpu->ctr = vcpu->arch.ctr;
        svcpu->lr  = vcpu->arch.lr;
        svcpu->pc  = vcpu->arch.pc;
+#ifdef CONFIG_PPC_BOOK3S_64
+       svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
+#endif
        svcpu->in_use = true;
 }
 
@@ -158,6 +163,9 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
        vcpu->arch.fault_dar   = svcpu->fault_dar;
        vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
        vcpu->arch.last_inst   = svcpu->last_inst;
+#ifdef CONFIG_PPC_BOOK3S_64
+       vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
+#endif
        svcpu->in_use = false;
 
 out:
@@ -246,14 +254,15 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
 
 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
 {
-       ulong smsr = vcpu->arch.shared->msr;
+       ulong guest_msr = kvmppc_get_msr(vcpu);
+       ulong smsr = guest_msr;
 
        /* Guest MSR values */
-       smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
+       smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
        /* Process MSR values */
        smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
        /* External providers the guest reserved */
-       smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
+       smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
        /* 64-bit Process MSR values */
 #ifdef CONFIG_PPC_BOOK3S_64
        smsr |= MSR_ISF | MSR_HV;
@@ -263,14 +272,14 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
 
 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
 {
-       ulong old_msr = vcpu->arch.shared->msr;
+       ulong old_msr = kvmppc_get_msr(vcpu);
 
 #ifdef EXIT_DEBUG
        printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
 #endif
 
        msr &= to_book3s(vcpu)->msr_mask;
-       vcpu->arch.shared->msr = msr;
+       kvmppc_set_msr_fast(vcpu, msr);
        kvmppc_recalc_shadow_msr(vcpu);
 
        if (msr & MSR_POW) {
@@ -281,11 +290,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
 
                        /* Unset POW bit after we woke up */
                        msr &= ~MSR_POW;
-                       vcpu->arch.shared->msr = msr;
+                       kvmppc_set_msr_fast(vcpu, msr);
                }
        }
 
-       if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+       if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
                   (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
                kvmppc_mmu_flush_segments(vcpu);
                kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
@@ -317,7 +326,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
        }
 
        /* Preload FPU if it's enabled */
-       if (vcpu->arch.shared->msr & MSR_FP)
+       if (kvmppc_get_msr(vcpu) & MSR_FP)
                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
 }
 
@@ -427,8 +436,8 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
 
        /* patch dcbz into reserved instruction, so we trap */
        for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
-               if ((page[i] & 0xff0007ff) == INS_DCBZ)
-                       page[i] &= 0xfffffff7;
+               if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
+                       page[i] &= cpu_to_be32(0xfffffff7);
 
        kunmap_atomic(page);
        put_page(hpage);
@@ -438,7 +447,7 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        ulong mp_pa = vcpu->arch.magic_page_pa;
 
-       if (!(vcpu->arch.shared->msr & MSR_SF))
+       if (!(kvmppc_get_msr(vcpu) & MSR_SF))
                mp_pa = (uint32_t)mp_pa;
 
        if (unlikely(mp_pa) &&
@@ -459,8 +468,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
        int page_found = 0;
        struct kvmppc_pte pte;
        bool is_mmio = false;
-       bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
-       bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
+       bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
+       bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
        u64 vsid;
 
        relocated = data ? dr : ir;
@@ -480,7 +489,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                pte.page_size = MMU_PAGE_64K;
        }
 
-       switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+       switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
        case 0:
                pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
                break;
@@ -488,7 +497,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
        case MSR_IR:
                vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
 
-               if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
+               if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
                        pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
                else
                        pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
@@ -511,22 +520,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
        if (page_found == -ENOENT) {
                /* Page not found in guest PTE entries */
-               vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
-               vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
-               vcpu->arch.shared->msr |=
-                       vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
+               u64 ssrr1 = vcpu->arch.shadow_srr1;
+               u64 msr = kvmppc_get_msr(vcpu);
+               kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
+               kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
+               kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
                kvmppc_book3s_queue_irqprio(vcpu, vec);
        } else if (page_found == -EPERM) {
                /* Storage protection */
-               vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
-               vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
-               vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
-               vcpu->arch.shared->msr |=
-                       vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
+               u32 dsisr = vcpu->arch.fault_dsisr;
+               u64 ssrr1 = vcpu->arch.shadow_srr1;
+               u64 msr = kvmppc_get_msr(vcpu);
+               kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
+               dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
+               kvmppc_set_dsisr(vcpu, dsisr);
+               kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
                kvmppc_book3s_queue_irqprio(vcpu, vec);
        } else if (page_found == -EINVAL) {
                /* Page not found in guest SLB */
-               vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+               kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
                kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
        } else if (!is_mmio &&
                   kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
@@ -606,6 +618,25 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
        kvmppc_recalc_shadow_msr(vcpu);
 }
 
+/* Give up facility (TAR / EBB / DSCR) */
+static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+       if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
+               /* Facility not available to the guest, ignore giveup request*/
+               return;
+       }
+
+       switch (fac) {
+       case FSCR_TAR_LG:
+               vcpu->arch.tar = mfspr(SPRN_TAR);
+               mtspr(SPRN_TAR, current->thread.tar);
+               vcpu->arch.shadow_fscr &= ~FSCR_TAR;
+               break;
+       }
+#endif
+}
+
 static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
 {
        ulong srr0 = kvmppc_get_pc(vcpu);
@@ -614,11 +645,12 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
 
        ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
        if (ret == -ENOENT) {
-               ulong msr = vcpu->arch.shared->msr;
+               ulong msr = kvmppc_get_msr(vcpu);
 
                msr = kvmppc_set_field(msr, 33, 33, 1);
                msr = kvmppc_set_field(msr, 34, 36, 0);
-               vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
+               msr = kvmppc_set_field(msr, 42, 47, 0);
+               kvmppc_set_msr_fast(vcpu, msr);
                kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
                return EMULATE_AGAIN;
        }
@@ -651,7 +683,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
        if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
                return RESUME_GUEST;
 
-       if (!(vcpu->arch.shared->msr & msr)) {
+       if (!(kvmppc_get_msr(vcpu) & msr)) {
                kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                return RESUME_GUEST;
        }
@@ -683,16 +715,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
 #endif
 
        if (msr & MSR_FP) {
+               preempt_disable();
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
                t->fp_save_area = &vcpu->arch.fp;
+               preempt_enable();
        }
 
        if (msr & MSR_VEC) {
 #ifdef CONFIG_ALTIVEC
+               preempt_disable();
                enable_kernel_altivec();
                load_vr_state(&vcpu->arch.vr);
                t->vr_save_area = &vcpu->arch.vr;
+               preempt_enable();
 #endif
        }
 
@@ -716,18 +752,90 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
                return;
 
        if (lost_ext & MSR_FP) {
+               preempt_disable();
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               preempt_enable();
        }
 #ifdef CONFIG_ALTIVEC
        if (lost_ext & MSR_VEC) {
+               preempt_disable();
                enable_kernel_altivec();
                load_vr_state(&vcpu->arch.vr);
+               preempt_enable();
        }
 #endif
        current->thread.regs->msr |= lost_ext;
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+
+static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
+{
+       /* Inject the Interrupt Cause field and trigger a guest interrupt */
+       vcpu->arch.fscr &= ~(0xffULL << 56);
+       vcpu->arch.fscr |= (fac << 56);
+       kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
+}
+
+static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
+{
+       enum emulation_result er = EMULATE_FAIL;
+
+       if (!(kvmppc_get_msr(vcpu) & MSR_PR))
+               er = kvmppc_emulate_instruction(vcpu->run, vcpu);
+
+       if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
+               /* Couldn't emulate, trigger interrupt in guest */
+               kvmppc_trigger_fac_interrupt(vcpu, fac);
+       }
+}
+
+/* Enable facilities (TAR, EBB, DSCR) for the guest */
+static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
+{
+       bool guest_fac_enabled;
+       BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
+
+       /*
+        * Not every facility is enabled by FSCR bits, check whether the
+        * guest has this facility enabled at all.
+        */
+       switch (fac) {
+       case FSCR_TAR_LG:
+       case FSCR_EBB_LG:
+               guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
+               break;
+       case FSCR_TM_LG:
+               guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
+               break;
+       default:
+               guest_fac_enabled = false;
+               break;
+       }
+
+       if (!guest_fac_enabled) {
+               /* Facility not enabled by the guest */
+               kvmppc_trigger_fac_interrupt(vcpu, fac);
+               return RESUME_GUEST;
+       }
+
+       switch (fac) {
+       case FSCR_TAR_LG:
+               /* TAR switching isn't lazy in Linux yet */
+               current->thread.tar = mfspr(SPRN_TAR);
+               mtspr(SPRN_TAR, vcpu->arch.tar);
+               vcpu->arch.shadow_fscr |= FSCR_TAR;
+               break;
+       default:
+               kvmppc_emulate_fac(vcpu, fac);
+               break;
+       }
+
+       return RESUME_GUEST;
+}
+#endif
+
 int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
                          unsigned int exit_nr)
 {
@@ -784,7 +892,9 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
                        r = RESUME_GUEST;
                } else {
-                       vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
+                       u64 msr = kvmppc_get_msr(vcpu);
+                       msr |= shadow_srr1 & 0x58000000;
+                       kvmppc_set_msr_fast(vcpu, msr);
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                        r = RESUME_GUEST;
                }
@@ -824,8 +934,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
                        srcu_read_unlock(&vcpu->kvm->srcu, idx);
                } else {
-                       vcpu->arch.shared->dar = dar;
-                       vcpu->arch.shared->dsisr = fault_dsisr;
+                       kvmppc_set_dar(vcpu, dar);
+                       kvmppc_set_dsisr(vcpu, fault_dsisr);
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                        r = RESUME_GUEST;
                }
@@ -833,7 +943,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
        }
        case BOOK3S_INTERRUPT_DATA_SEGMENT:
                if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
-                       vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+                       kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
                        kvmppc_book3s_queue_irqprio(vcpu,
                                BOOK3S_INTERRUPT_DATA_SEGMENT);
                }
@@ -871,7 +981,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
 program_interrupt:
                flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
 
-               if (vcpu->arch.shared->msr & MSR_PR) {
+               if (kvmppc_get_msr(vcpu) & MSR_PR) {
 #ifdef EXIT_DEBUG
                        printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
 #endif
@@ -913,7 +1023,7 @@ program_interrupt:
        case BOOK3S_INTERRUPT_SYSCALL:
                if (vcpu->arch.papr_enabled &&
                    (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
-                   !(vcpu->arch.shared->msr & MSR_PR)) {
+                   !(kvmppc_get_msr(vcpu) & MSR_PR)) {
                        /* SC 1 papr hypercalls */
                        ulong cmd = kvmppc_get_gpr(vcpu, 3);
                        int i;
@@ -945,7 +1055,7 @@ program_interrupt:
                                gprs[i] = kvmppc_get_gpr(vcpu, i);
                        vcpu->arch.osi_needed = 1;
                        r = RESUME_HOST_NV;
-               } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
+               } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
                    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
                        /* KVM PV hypercalls */
                        kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
@@ -986,14 +1096,26 @@ program_interrupt:
        }
        case BOOK3S_INTERRUPT_ALIGNMENT:
                if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
-                       vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
-                               kvmppc_get_last_inst(vcpu));
-                       vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
-                               kvmppc_get_last_inst(vcpu));
+                       u32 last_inst = kvmppc_get_last_inst(vcpu);
+                       u32 dsisr;
+                       u64 dar;
+
+                       dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
+                       dar = kvmppc_alignment_dar(vcpu, last_inst);
+
+                       kvmppc_set_dsisr(vcpu, dsisr);
+                       kvmppc_set_dar(vcpu, dar);
+
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                }
                r = RESUME_GUEST;
                break;
+#ifdef CONFIG_PPC_BOOK3S_64
+       case BOOK3S_INTERRUPT_FAC_UNAVAIL:
+               kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
+               r = RESUME_GUEST;
+               break;
+#endif
        case BOOK3S_INTERRUPT_MACHINE_CHECK:
        case BOOK3S_INTERRUPT_TRACE:
                kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
@@ -1054,7 +1176,7 @@ static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
                }
        } else {
                for (i = 0; i < 16; i++)
-                       sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
+                       sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
 
                for (i = 0; i < 8; i++) {
                        sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
@@ -1110,6 +1232,15 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_HIOR:
                *val = get_reg_val(id, to_book3s(vcpu)->hior);
                break;
+       case KVM_REG_PPC_LPCR:
+               /*
+                * We are only interested in the LPCR_ILE bit
+                */
+               if (vcpu->arch.intr_msr & MSR_LE)
+                       *val = get_reg_val(id, LPCR_ILE);
+               else
+                       *val = get_reg_val(id, 0);
+               break;
        default:
                r = -EINVAL;
                break;
@@ -1118,6 +1249,14 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
        return r;
 }
 
+static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
+{
+       if (new_lpcr & LPCR_ILE)
+               vcpu->arch.intr_msr |= MSR_LE;
+       else
+               vcpu->arch.intr_msr &= ~MSR_LE;
+}
+
 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
                                 union kvmppc_one_reg *val)
 {
@@ -1128,6 +1267,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
                to_book3s(vcpu)->hior = set_reg_val(id, *val);
                to_book3s(vcpu)->hior_explicit = true;
                break;
+       case KVM_REG_PPC_LPCR:
+               kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
+               break;
        default:
                r = -EINVAL;
                break;
@@ -1170,8 +1312,14 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
                goto uninit_vcpu;
        /* the real shared page fills the last 4k of our page */
        vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
-
 #ifdef CONFIG_PPC_BOOK3S_64
+       /* Always start the shared struct in native endian mode */
+#ifdef __BIG_ENDIAN__
+        vcpu->arch.shared_big_endian = true;
+#else
+        vcpu->arch.shared_big_endian = false;
+#endif
+
        /*
         * Default to the same as the host if we're on sufficiently
         * recent machine that we have 1TB segments;
@@ -1180,6 +1328,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
        vcpu->arch.pvr = 0x3C0301;
        if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
                vcpu->arch.pvr = mfspr(SPRN_PVR);
+       vcpu->arch.intr_msr = MSR_SF;
 #else
        /* default to book3s_32 (750) */
        vcpu->arch.pvr = 0x84202;
@@ -1187,7 +1336,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
        kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
        vcpu->arch.slb_nr = 64;
 
-       vcpu->arch.shadow_msr = MSR_USER64;
+       vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
 
        err = kvmppc_mmu_init(vcpu);
        if (err < 0)
@@ -1264,7 +1413,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 #endif
 
        /* Preload FPU if it's enabled */
-       if (vcpu->arch.shared->msr & MSR_FP)
+       if (kvmppc_get_msr(vcpu) & MSR_FP)
                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
 
        kvmppc_fix_ee_before_entry();
@@ -1277,6 +1426,9 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        /* Make sure we save the guest FPU/Altivec/VSX state */
        kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
 
+       /* Make sure we save the guest TAR/EBB/DSCR state */
+       kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
+
 out:
        vcpu->mode = OUTSIDE_GUEST_MODE;
        return ret;