]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
KVM: MIPS/Emulate: Properly implement TLBR for T&E
authorJames Hogan <james.hogan@imgtec.com>
Tue, 14 Mar 2017 17:00:08 +0000 (17:00 +0000)
committerJames Hogan <james.hogan@imgtec.com>
Tue, 28 Mar 2017 15:31:37 +0000 (16:31 +0100)
Properly implement emulation of the TLBR instruction for Trap & Emulate.
This instruction reads the TLB entry pointed at by the CP0_Index
register into the other TLB registers, which may have the side effect of
changing the current ASID. Therefore abstract the CP0_EntryHi and ASID
changing code into a common function in the process.

A comment indicated that Linux doesn't use TLBR, which is true during
normal use, however dumping of the TLB does use it (for example with the
relatively recent 'x' magic sysrq key), as does a wired TLB entries test
case in my KVM tests.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
arch/mips/kvm/emulate.c

index 4833ebad89d9b875971c19e47256fb599f605a99..34e78a3ee9d79ece862574274f4ce11fcf3e6d7a 100644 (file)
@@ -990,17 +990,62 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
        return EMULATE_DONE;
 }
 
-/*
- * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
- * we can catch this, if things ever change
- */
+static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
+                                   unsigned long entryhi)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+       int cpu, i;
+       u32 nasid = entryhi & KVM_ENTRYHI_ASID;
+
+       if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
+               trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
+                                     KVM_ENTRYHI_ASID, nasid);
+
+               /*
+                * Flush entries from the GVA page tables.
+                * Guest user page table will get flushed lazily on re-entry to
+                * guest user if the guest ASID actually changes.
+                */
+               kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
+
+               /*
+                * Regenerate/invalidate kernel MMU context.
+                * The user MMU context will be regenerated lazily on re-entry
+                * to guest user if the guest ASID actually changes.
+                */
+               preempt_disable();
+               cpu = smp_processor_id();
+               get_new_mmu_context(kern_mm, cpu);
+               for_each_possible_cpu(i)
+                       if (i != cpu)
+                               cpu_context(i, kern_mm) = 0;
+               preempt_enable();
+       }
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+}
+
 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
 {
        struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_mips_tlb *tlb;
        unsigned long pc = vcpu->arch.pc;
+       int index;
 
-       kvm_err("[%#lx] COP0_TLBR [%d]\n", pc, kvm_read_c0_guest_index(cop0));
-       return EMULATE_FAIL;
+       index = kvm_read_c0_guest_index(cop0);
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               /* UNDEFINED */
+               kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
+               index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+       kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
+       kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
+       kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
+       kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
+
+       return EMULATE_DONE;
 }
 
 /**
@@ -1222,11 +1267,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
                                           struct kvm_vcpu *vcpu)
 {
        struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
        enum emulation_result er = EMULATE_DONE;
        u32 rt, rd, sel;
        unsigned long curr_pc;
-       int cpu, i;
 
        /*
         * Update PC and hold onto current PC in case there is
@@ -1328,44 +1371,8 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
                                kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
                                                          vcpu->arch.gprs[rt]);
                        } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
-                               u32 nasid =
-                                       vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
-                               if (((kvm_read_c0_guest_entryhi(cop0) &
-                                     KVM_ENTRYHI_ASID) != nasid)) {
-                                       trace_kvm_asid_change(vcpu,
-                                               kvm_read_c0_guest_entryhi(cop0)
-                                                       & KVM_ENTRYHI_ASID,
-                                               nasid);
-
-                                       /*
-                                        * Flush entries from the GVA page
-                                        * tables.
-                                        * Guest user page table will get
-                                        * flushed lazily on re-entry to guest
-                                        * user if the guest ASID actually
-                                        * changes.
-                                        */
-                                       kvm_mips_flush_gva_pt(kern_mm->pgd,
-                                                             KMF_KERN);
-
-                                       /*
-                                        * Regenerate/invalidate kernel MMU
-                                        * context.
-                                        * The user MMU context will be
-                                        * regenerated lazily on re-entry to
-                                        * guest user if the guest ASID actually
-                                        * changes.
-                                        */
-                                       preempt_disable();
-                                       cpu = smp_processor_id();
-                                       get_new_mmu_context(kern_mm, cpu);
-                                       for_each_possible_cpu(i)
-                                               if (i != cpu)
-                                                       cpu_context(i, kern_mm) = 0;
-                                       preempt_enable();
-                               }
-                               kvm_write_c0_guest_entryhi(cop0,
-                                                          vcpu->arch.gprs[rt]);
+                               kvm_mips_change_entryhi(vcpu,
+                                                       vcpu->arch.gprs[rt]);
                        }
                        /* Are we writing to COUNT */
                        else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {