2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
27 #include <asm/tlbdebug.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
36 #ifdef CONFIG_KVM_MIPS_VZ
37 unsigned long GUESTID_MASK;
38 EXPORT_SYMBOL_GPL(GUESTID_MASK);
39 unsigned long GUESTID_FIRST_VERSION;
40 EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
41 unsigned long GUESTID_VERSION_MASK;
42 EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
44 static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
46 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
51 return cpu_asid(smp_processor_id(), gpa_mm);
55 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
57 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
58 int cpu = smp_processor_id();
60 return cpu_asid(cpu, kern_mm);
63 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
65 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
66 int cpu = smp_processor_id();
68 return cpu_asid(cpu, user_mm);
71 /* Structure defining an tlb entry data set. */
73 void kvm_mips_dump_host_tlbs(void)
77 local_irq_save(flags);
79 kvm_info("HOST TLBs:\n");
84 local_irq_restore(flags);
86 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
88 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
90 struct mips_coproc *cop0 = vcpu->arch.cop0;
91 struct kvm_mips_tlb tlb;
94 kvm_info("Guest TLBs:\n");
95 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
97 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
98 tlb = vcpu->arch.guest_tlb[i];
99 kvm_info("TLB%c%3d Hi 0x%08lx ",
100 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
103 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
104 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
105 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
106 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
107 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
108 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
109 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
110 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
111 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
112 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
116 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
118 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
122 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
124 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
125 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
126 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
132 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
133 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
137 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
139 static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
143 write_c0_entryhi(entryhi);
148 idx = read_c0_index();
150 if (idx >= current_cpu_data.tlbsize)
154 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
155 write_c0_entrylo0(0);
156 write_c0_entrylo1(0);
166 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
167 bool user, bool kernel)
169 int idx_user, idx_kernel;
170 unsigned long flags, old_entryhi;
172 local_irq_save(flags);
174 old_entryhi = read_c0_entryhi();
177 idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
178 kvm_mips_get_user_asid(vcpu));
180 idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
181 kvm_mips_get_kernel_asid(vcpu));
183 write_c0_entryhi(old_entryhi);
186 local_irq_restore(flags);
189 * We don't want to get reserved instruction exceptions for missing tlb
192 if (cpu_has_vtag_icache)
195 if (user && idx_user >= 0)
196 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
197 __func__, (va & VPN2_MASK) |
198 kvm_mips_get_user_asid(vcpu), idx_user);
199 if (kernel && idx_kernel >= 0)
200 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
201 __func__, (va & VPN2_MASK) |
202 kvm_mips_get_kernel_asid(vcpu), idx_kernel);
206 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
208 #ifdef CONFIG_KVM_MIPS_VZ
210 /* GuestID management */
213 * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
215 static inline void clear_root_gid(void)
217 if (cpu_has_guestid) {
218 clear_c0_guestctl1(MIPS_GCTL1_RID);
224 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
226 * Sets the root GuestID to match the current guest GuestID, for TLB operation
227 * on the GPA->RPA mappings in the root TLB.
229 * The caller must be sure to disable HTW while the root GID is set, and
230 * possibly longer if TLB registers are modified.
232 static inline void set_root_gid_to_guest_gid(void)
234 unsigned int guestctl1;
236 if (cpu_has_guestid) {
237 back_to_back_c0_hazard();
238 guestctl1 = read_c0_guestctl1();
239 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
240 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
241 << MIPS_GCTL1_RID_SHIFT;
242 write_c0_guestctl1(guestctl1);
247 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
250 unsigned long flags, old_entryhi;
252 local_irq_save(flags);
255 /* Set root GuestID for root probe and write of guest TLB entry */
256 set_root_gid_to_guest_gid();
258 old_entryhi = read_c0_entryhi();
260 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
261 kvm_mips_get_root_asid(vcpu));
263 write_c0_entryhi(old_entryhi);
268 local_irq_restore(flags);
271 * We don't want to get reserved instruction exceptions for missing tlb
274 if (cpu_has_vtag_icache)
278 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
279 __func__, (va & VPN2_MASK) |
280 kvm_mips_get_root_asid(vcpu), idx);
284 EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
287 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
288 * @vcpu: KVM VCPU pointer.
289 * @gpa: Guest virtual address in a TLB mapped guest segment.
290 * @gpa: Ponter to output guest physical address it maps to.
292 * Converts a guest virtual address in a guest TLB mapped segment to a guest
293 * physical address, by probing the guest TLB.
295 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
297 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
300 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
303 unsigned long o_entryhi, o_entrylo[2], o_pagemask;
304 unsigned int o_index;
305 unsigned long entrylo[2], pagemask, pagemaskbit, pa;
309 /* Probe the guest TLB for a mapping */
310 local_irq_save(flags);
311 /* Set root GuestID for root probe of guest TLB entry */
313 set_root_gid_to_guest_gid();
315 o_entryhi = read_gc0_entryhi();
316 o_index = read_gc0_index();
318 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
323 index = read_gc0_index();
326 write_gc0_entryhi(o_entryhi);
327 write_gc0_index(o_index);
331 local_irq_restore(flags);
335 /* Match! read the TLB entry */
336 o_entrylo[0] = read_gc0_entrylo0();
337 o_entrylo[1] = read_gc0_entrylo1();
338 o_pagemask = read_gc0_pagemask();
344 entrylo[0] = read_gc0_entrylo0();
345 entrylo[1] = read_gc0_entrylo1();
346 pagemask = ~read_gc0_pagemask() & ~0x1fffl;
348 write_gc0_entryhi(o_entryhi);
349 write_gc0_index(o_index);
350 write_gc0_entrylo0(o_entrylo[0]);
351 write_gc0_entrylo1(o_entrylo[1]);
352 write_gc0_pagemask(o_pagemask);
356 local_irq_restore(flags);
358 /* Select one of the EntryLo values and interpret the GPA */
359 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
360 pa = entrylo[!!(gva & pagemaskbit)];
363 * TLB entry may have become invalid since TLB probe if physical FTLB
364 * entries are shared between threads (e.g. I6400).
366 if (!(pa & ENTRYLO_V))
370 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
371 * split with XI/RI in the middle.
373 pa = (pa << 6) & ~0xfffl;
374 pa |= gva & ~(pagemask | pagemaskbit);
379 EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
382 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
385 * Invalidate all entries in root tlb which are GPA mappings.
387 void kvm_vz_local_flush_roottlb_all_guests(void)
390 unsigned long old_entryhi, old_pagemask, old_guestctl1;
393 if (WARN_ON(!cpu_has_guestid))
396 local_irq_save(flags);
399 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
400 old_entryhi = read_c0_entryhi();
401 old_pagemask = read_c0_pagemask();
402 old_guestctl1 = read_c0_guestctl1();
405 * Invalidate guest entries in root TLB while leaving root entries
406 * intact when possible.
408 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
409 write_c0_index(entry);
414 /* Don't invalidate non-guest (RVA) mappings in the root TLB */
415 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
418 /* Make sure all entries differ. */
419 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
420 write_c0_entrylo0(0);
421 write_c0_entrylo1(0);
422 write_c0_guestctl1(0);
427 write_c0_entryhi(old_entryhi);
428 write_c0_pagemask(old_pagemask);
429 write_c0_guestctl1(old_guestctl1);
433 local_irq_restore(flags);
435 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
438 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
440 * Invalidate all entries in guest tlb irrespective of guestid.
442 void kvm_vz_local_flush_guesttlb_all(void)
445 unsigned long old_index;
446 unsigned long old_entryhi;
447 unsigned long old_entrylo[2];
448 unsigned long old_pagemask;
452 local_irq_save(flags);
454 /* Preserve all clobbered guest registers */
455 old_index = read_gc0_index();
456 old_entryhi = read_gc0_entryhi();
457 old_entrylo[0] = read_gc0_entrylo0();
458 old_entrylo[1] = read_gc0_entrylo1();
459 old_pagemask = read_gc0_pagemask();
461 switch (current_cpu_type()) {
462 case CPU_CAVIUM_OCTEON3:
463 /* Inhibit machine check due to multiple matching TLB entries */
464 cvmmemctl2 = read_c0_cvmmemctl2();
465 cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
466 write_c0_cvmmemctl2(cvmmemctl2);
470 /* Invalidate guest entries in guest TLB */
471 write_gc0_entrylo0(0);
472 write_gc0_entrylo1(0);
473 write_gc0_pagemask(0);
474 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
475 /* Make sure all entries differ. */
476 write_gc0_index(entry);
477 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
479 guest_tlb_write_indexed();
483 cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
484 write_c0_cvmmemctl2(cvmmemctl2);
487 write_gc0_index(old_index);
488 write_gc0_entryhi(old_entryhi);
489 write_gc0_entrylo0(old_entrylo[0]);
490 write_gc0_entrylo1(old_entrylo[1]);
491 write_gc0_pagemask(old_pagemask);
494 local_irq_restore(flags);
496 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
499 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
500 * @buf: Buffer to write TLB entries into.
501 * @index: Start index.
502 * @count: Number of entries to save.
504 * Save a range of guest TLB entries. The caller must ensure interrupts are
507 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
510 unsigned int end = index + count;
511 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
512 unsigned int guestctl1 = 0;
515 /* Save registers we're about to clobber */
516 old_index = read_gc0_index();
517 old_entryhi = read_gc0_entryhi();
518 old_entrylo0 = read_gc0_entrylo0();
519 old_entrylo1 = read_gc0_entrylo1();
520 old_pagemask = read_gc0_pagemask();
522 /* Set root GuestID for root probe */
524 set_root_gid_to_guest_gid();
526 guestctl1 = read_c0_guestctl1();
528 /* Read each entry from guest TLB */
529 for (i = index; i < end; ++i, ++buf) {
536 if (cpu_has_guestid &&
537 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
538 /* Entry invalid or belongs to another guest */
539 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
544 /* Entry belongs to the right guest */
545 buf->tlb_hi = read_gc0_entryhi();
546 buf->tlb_lo[0] = read_gc0_entrylo0();
547 buf->tlb_lo[1] = read_gc0_entrylo1();
548 buf->tlb_mask = read_gc0_pagemask();
552 /* Clear root GuestID again */
556 /* Restore clobbered registers */
557 write_gc0_index(old_index);
558 write_gc0_entryhi(old_entryhi);
559 write_gc0_entrylo0(old_entrylo0);
560 write_gc0_entrylo1(old_entrylo1);
561 write_gc0_pagemask(old_pagemask);
565 EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
568 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
569 * @buf: Buffer to read TLB entries from.
570 * @index: Start index.
571 * @count: Number of entries to load.
573 * Load a range of guest TLB entries. The caller must ensure interrupts are
576 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
579 unsigned int end = index + count;
580 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
583 /* Save registers we're about to clobber */
584 old_index = read_gc0_index();
585 old_entryhi = read_gc0_entryhi();
586 old_entrylo0 = read_gc0_entrylo0();
587 old_entrylo1 = read_gc0_entrylo1();
588 old_pagemask = read_gc0_pagemask();
590 /* Set root GuestID for root probe */
592 set_root_gid_to_guest_gid();
594 /* Write each entry to guest TLB */
595 for (i = index; i < end; ++i, ++buf) {
597 write_gc0_entryhi(buf->tlb_hi);
598 write_gc0_entrylo0(buf->tlb_lo[0]);
599 write_gc0_entrylo1(buf->tlb_lo[1]);
600 write_gc0_pagemask(buf->tlb_mask);
603 guest_tlb_write_indexed();
606 /* Clear root GuestID again */
610 /* Restore clobbered registers */
611 write_gc0_index(old_index);
612 write_gc0_entryhi(old_entryhi);
613 write_gc0_entrylo0(old_entrylo0);
614 write_gc0_entrylo1(old_entrylo1);
615 write_gc0_pagemask(old_pagemask);
619 EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
624 * kvm_mips_suspend_mm() - Suspend the active mm.
625 * @cpu The CPU we're running on.
627 * Suspend the active_mm, ready for a switch to a KVM guest virtual address
628 * space. This is left active for the duration of guest context, including time
629 * with interrupts enabled, so we need to be careful not to confuse e.g. cache
632 * kvm_mips_resume_mm() should be called before context switching to a different
633 * process so we don't need to worry about reference counting.
635 * This needs to be in static kernel code to avoid exporting init_mm.
637 void kvm_mips_suspend_mm(int cpu)
639 cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
640 current->active_mm = &init_mm;
642 EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
645 * kvm_mips_resume_mm() - Resume the current process mm.
646 * @cpu The CPU we're running on.
648 * Resume the mm of the current process, after a switch back from a KVM guest
649 * virtual address space (see kvm_mips_suspend_mm()).
651 void kvm_mips_resume_mm(int cpu)
653 cpumask_set_cpu(cpu, mm_cpumask(current->mm));
654 current->active_mm = current->mm;
656 EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);