2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
24 /* Translate address of a vmalloc'd thing to a linear map address */
25 static void *real_vmalloc_addr(void *x)
27 unsigned long addr = (unsigned long) x;
30 p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
31 if (!p || !pte_present(*p))
33 /* assume we don't have huge pages in vmalloc space... */
34 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
38 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
39 static int global_invalidates(struct kvm *kvm, unsigned long flags)
44 * If there is only one vcore, and it's currently running,
45 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
46 * we can use tlbiel as long as we mark all other physical
47 * cores as potentially having stale TLB entries for this lpid.
48 * If we're not using MMU notifiers, we never take pages away
49 * from the guest, so we can use tlbiel if requested.
50 * Otherwise, don't use tlbiel.
52 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
54 else if (kvm->arch.using_mmu_notifiers)
57 global = !(flags & H_LOCAL);
60 /* any other core might now have stale TLB entries... */
62 cpumask_setall(&kvm->arch.need_tlb_flush);
63 cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
64 &kvm->arch.need_tlb_flush);
71 * Add this HPTE into the chain for the real page.
72 * Must be called with the chain locked; it unlocks the chain.
74 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
75 unsigned long *rmap, long pte_index, int realmode)
77 struct revmap_entry *head, *tail;
80 if (*rmap & KVMPPC_RMAP_PRESENT) {
81 i = *rmap & KVMPPC_RMAP_INDEX;
82 head = &kvm->arch.revmap[i];
84 head = real_vmalloc_addr(head);
85 tail = &kvm->arch.revmap[head->back];
87 tail = real_vmalloc_addr(tail);
89 rev->back = head->back;
90 tail->forw = pte_index;
91 head->back = pte_index;
93 rev->forw = rev->back = pte_index;
94 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
95 pte_index | KVMPPC_RMAP_PRESENT;
99 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
101 /* Remove this HPTE from the chain for a real page */
102 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
103 struct revmap_entry *rev,
104 unsigned long hpte_v, unsigned long hpte_r)
106 struct revmap_entry *next, *prev;
107 unsigned long gfn, ptel, head;
108 struct kvm_memory_slot *memslot;
110 unsigned long rcbits;
112 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
113 ptel = rev->guest_rpte |= rcbits;
114 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
115 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
119 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
122 head = *rmap & KVMPPC_RMAP_INDEX;
123 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
124 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
125 next->back = rev->back;
126 prev->forw = rev->forw;
127 if (head == pte_index) {
129 if (head == pte_index)
130 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
132 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
134 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
138 static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
139 int writing, unsigned long *pte_sizep)
142 unsigned long ps = *pte_sizep;
143 unsigned int hugepage_shift;
145 ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift);
149 *pte_sizep = 1ul << hugepage_shift;
151 *pte_sizep = PAGE_SIZE;
154 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
157 static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
159 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
163 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
164 long pte_index, unsigned long pteh, unsigned long ptel,
165 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
167 unsigned long i, pa, gpa, gfn, psize;
168 unsigned long slot_fn, hva;
170 struct revmap_entry *rev;
171 unsigned long g_ptel;
172 struct kvm_memory_slot *memslot;
173 unsigned long *physp, pte_size;
177 unsigned int writing;
178 unsigned long mmu_seq;
179 unsigned long rcbits;
181 psize = hpte_page_size(pteh, ptel);
184 writing = hpte_is_writable(ptel);
185 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
186 ptel &= ~HPTE_GR_RESERVED;
189 /* used later to detect if we might have been invalidated */
190 mmu_seq = kvm->mmu_notifier_seq;
193 /* Find the memslot (if any) for this address */
194 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
195 gfn = gpa >> PAGE_SHIFT;
196 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
200 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
201 /* PPC970 can't do emulated MMIO */
202 if (!cpu_has_feature(CPU_FTR_ARCH_206))
204 /* Emulated MMIO - mark this with key=31 */
205 pteh |= HPTE_V_ABSENT;
206 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
210 /* Check if the requested page fits entirely in the memslot. */
211 if (!slot_is_aligned(memslot, psize))
213 slot_fn = gfn - memslot->base_gfn;
214 rmap = &memslot->arch.rmap[slot_fn];
216 if (!kvm->arch.using_mmu_notifiers) {
217 physp = memslot->arch.slot_phys;
222 physp = real_vmalloc_addr(physp);
226 is_io = pa & (HPTE_R_I | HPTE_R_W);
227 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
229 pa |= gpa & ~PAGE_MASK;
231 /* Translate to host virtual address */
232 hva = __gfn_to_hva_memslot(memslot, gfn);
234 /* Look up the Linux PTE for the backing page */
236 pte = lookup_linux_pte_and_update(pgdir, hva, writing,
238 if (pte_present(pte)) {
239 if (writing && !pte_write(pte))
240 /* make the actual HPTE be read-only */
241 ptel = hpte_make_readonly(ptel);
242 is_io = hpte_cache_bits(pte_val(pte));
243 pa = pte_pfn(pte) << PAGE_SHIFT;
244 pa |= hva & (pte_size - 1);
245 pa |= gpa & ~PAGE_MASK;
249 if (pte_size < psize)
252 ptel &= ~(HPTE_R_PP0 - psize);
256 pteh |= HPTE_V_VALID;
258 pteh |= HPTE_V_ABSENT;
261 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
265 * Allow guest to map emulated device memory as
266 * uncacheable, but actually make it cacheable.
268 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
272 /* Find and lock the HPTEG slot to use */
274 if (pte_index >= kvm->arch.hpt_npte)
276 if (likely((flags & H_EXACT) == 0)) {
278 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
279 for (i = 0; i < 8; ++i) {
280 if ((*hpte & HPTE_V_VALID) == 0 &&
281 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
288 * Since try_lock_hpte doesn't retry (not even stdcx.
289 * failures), it could be that there is a free slot
290 * but we transiently failed to lock it. Try again,
291 * actually locking each slot and checking it.
294 for (i = 0; i < 8; ++i) {
295 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
297 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
299 *hpte &= ~HPTE_V_HVLOCK;
307 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
308 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
310 /* Lock the slot and check again */
311 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
313 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
314 *hpte &= ~HPTE_V_HVLOCK;
320 /* Save away the guest's idea of the second HPTE dword */
321 rev = &kvm->arch.revmap[pte_index];
323 rev = real_vmalloc_addr(rev);
325 rev->guest_rpte = g_ptel;
326 note_hpte_modification(kvm, rev);
329 /* Link HPTE into reverse-map chain */
330 if (pteh & HPTE_V_VALID) {
332 rmap = real_vmalloc_addr(rmap);
334 /* Check for pending invalidations under the rmap chain lock */
335 if (kvm->arch.using_mmu_notifiers &&
336 mmu_notifier_retry(kvm, mmu_seq)) {
337 /* inval in progress, write a non-present HPTE */
338 pteh |= HPTE_V_ABSENT;
339 pteh &= ~HPTE_V_VALID;
342 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
344 /* Only set R/C in real HPTE if already set in *rmap */
345 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
346 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
352 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
355 asm volatile("ptesync" : : : "memory");
357 *pte_idx_ret = pte_index;
360 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
362 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
363 long pte_index, unsigned long pteh, unsigned long ptel)
365 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
366 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
369 #ifdef __BIG_ENDIAN__
370 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
372 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
375 static inline int try_lock_tlbie(unsigned int *lock)
377 unsigned int tmp, old;
378 unsigned int token = LOCK_TOKEN;
380 asm volatile("1:lwarx %1,0,%2\n"
387 : "=&r" (tmp), "=&r" (old)
388 : "r" (lock), "r" (token)
394 * tlbie/tlbiel is a bit different on the PPC970 compared to later
395 * processors such as POWER7; the large page bit is in the instruction
396 * not RB, and the top 16 bits and the bottom 12 bits of the VA
399 static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
400 long npages, int global, bool need_sync)
405 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
408 asm volatile("ptesync" : : : "memory");
409 for (i = 0; i < npages; ++i) {
410 unsigned long rb = rbvalues[i];
412 if (rb & 1) /* large page */
413 asm volatile("tlbie %0,1" : :
414 "r" (rb & 0x0000fffffffff000ul));
416 asm volatile("tlbie %0,0" : :
417 "r" (rb & 0x0000fffffffff000ul));
419 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
420 kvm->arch.tlbie_lock = 0;
423 asm volatile("ptesync" : : : "memory");
424 for (i = 0; i < npages; ++i) {
425 unsigned long rb = rbvalues[i];
427 if (rb & 1) /* large page */
428 asm volatile("tlbiel %0,1" : :
429 "r" (rb & 0x0000fffffffff000ul));
431 asm volatile("tlbiel %0,0" : :
432 "r" (rb & 0x0000fffffffff000ul));
434 asm volatile("ptesync" : : : "memory");
438 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
439 long npages, int global, bool need_sync)
443 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
444 /* PPC970 tlbie instruction is a bit different */
445 do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
449 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
452 asm volatile("ptesync" : : : "memory");
453 for (i = 0; i < npages; ++i)
454 asm volatile(PPC_TLBIE(%1,%0) : :
455 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
456 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
457 kvm->arch.tlbie_lock = 0;
460 asm volatile("ptesync" : : : "memory");
461 for (i = 0; i < npages; ++i)
462 asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
463 asm volatile("ptesync" : : : "memory");
467 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
468 unsigned long pte_index, unsigned long avpn,
469 unsigned long *hpret)
472 unsigned long v, r, rb;
473 struct revmap_entry *rev;
475 if (pte_index >= kvm->arch.hpt_npte)
477 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
478 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
480 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
481 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
482 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
483 hpte[0] &= ~HPTE_V_HVLOCK;
487 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
488 v = hpte[0] & ~HPTE_V_HVLOCK;
489 if (v & HPTE_V_VALID) {
490 hpte[0] &= ~HPTE_V_VALID;
491 rb = compute_tlbie_rb(v, hpte[1], pte_index);
492 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
493 /* Read PTE low word after tlbie to get final R/C values */
494 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
496 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
497 note_hpte_modification(kvm, rev);
498 unlock_hpte(hpte, 0);
504 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
506 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
507 unsigned long pte_index, unsigned long avpn)
509 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
513 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
515 struct kvm *kvm = vcpu->kvm;
516 unsigned long *args = &vcpu->arch.gpr[4];
517 unsigned long *hp, *hptes[4], tlbrb[4];
518 long int i, j, k, n, found, indexes[4];
519 unsigned long flags, req, pte_index, rcbits;
521 long int ret = H_SUCCESS;
522 struct revmap_entry *rev, *revs[4];
524 global = global_invalidates(kvm, 0);
525 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
530 flags = pte_index >> 56;
531 pte_index &= ((1ul << 56) - 1);
534 if (req == 3) { /* no more requests */
538 if (req != 1 || flags == 3 ||
539 pte_index >= kvm->arch.hpt_npte) {
540 /* parameter error */
541 args[j] = ((0xa0 | flags) << 56) + pte_index;
545 hp = (unsigned long *)
546 (kvm->arch.hpt_virt + (pte_index << 4));
547 /* to avoid deadlock, don't spin except for first */
548 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
551 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
555 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
557 case 0: /* absolute */
560 case 1: /* andcond */
561 if (!(hp[0] & args[j + 1]))
565 if ((hp[0] & ~0x7fUL) == args[j + 1])
571 hp[0] &= ~HPTE_V_HVLOCK;
572 args[j] = ((0x90 | flags) << 56) + pte_index;
576 args[j] = ((0x80 | flags) << 56) + pte_index;
577 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
578 note_hpte_modification(kvm, rev);
580 if (!(hp[0] & HPTE_V_VALID)) {
581 /* insert R and C bits from PTE */
582 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
583 args[j] |= rcbits << (56 - 5);
588 hp[0] &= ~HPTE_V_VALID; /* leave it locked */
589 tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
599 /* Now that we've collected a batch, do the tlbies */
600 do_tlbies(kvm, tlbrb, n, global, true);
602 /* Read PTE low words after tlbie to get final R/C values */
603 for (k = 0; k < n; ++k) {
605 pte_index = args[j] & ((1ul << 56) - 1);
608 remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
609 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
610 args[j] |= rcbits << (56 - 5);
618 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
619 unsigned long pte_index, unsigned long avpn,
622 struct kvm *kvm = vcpu->kvm;
624 struct revmap_entry *rev;
625 unsigned long v, r, rb, mask, bits;
627 if (pte_index >= kvm->arch.hpt_npte)
630 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
631 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
633 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
634 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
635 hpte[0] &= ~HPTE_V_HVLOCK;
640 bits = (flags << 55) & HPTE_R_PP0;
641 bits |= (flags << 48) & HPTE_R_KEY_HI;
642 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
644 /* Update guest view of 2nd HPTE dword */
645 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
646 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
647 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
649 r = (rev->guest_rpte & ~mask) | bits;
651 note_hpte_modification(kvm, rev);
653 r = (hpte[1] & ~mask) | bits;
656 if (v & HPTE_V_VALID) {
657 rb = compute_tlbie_rb(v, r, pte_index);
658 hpte[0] = v & ~HPTE_V_VALID;
659 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
661 * If the host has this page as readonly but the guest
662 * wants to make it read/write, reduce the permissions.
663 * Checking the host permissions involves finding the
664 * memslot and then the Linux PTE for the page.
666 if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
667 unsigned long psize, gfn, hva;
668 struct kvm_memory_slot *memslot;
669 pgd_t *pgdir = vcpu->arch.pgdir;
672 psize = hpte_page_size(v, r);
673 gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
674 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
676 hva = __gfn_to_hva_memslot(memslot, gfn);
677 pte = lookup_linux_pte_and_update(pgdir, hva,
679 if (pte_present(pte) && !pte_write(pte))
680 r = hpte_make_readonly(r);
686 hpte[0] = v & ~HPTE_V_HVLOCK;
687 asm volatile("ptesync" : : : "memory");
691 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
692 unsigned long pte_index)
694 struct kvm *kvm = vcpu->kvm;
695 unsigned long *hpte, v, r;
697 struct revmap_entry *rev = NULL;
699 if (pte_index >= kvm->arch.hpt_npte)
701 if (flags & H_READ_4) {
705 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
706 for (i = 0; i < n; ++i, ++pte_index) {
707 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
708 v = hpte[0] & ~HPTE_V_HVLOCK;
710 if (v & HPTE_V_ABSENT) {
714 if (v & HPTE_V_VALID) {
715 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
716 r &= ~HPTE_GR_RESERVED;
718 vcpu->arch.gpr[4 + i * 2] = v;
719 vcpu->arch.gpr[5 + i * 2] = r;
724 void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
725 unsigned long pte_index)
729 hptep[0] &= ~HPTE_V_VALID;
730 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
731 do_tlbies(kvm, &rb, 1, 1, true);
733 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
735 void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
736 unsigned long pte_index)
741 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
742 rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
743 /* modify only the second-last byte, which contains the ref bit */
744 *((char *)hptep + 14) = rbyte;
745 do_tlbies(kvm, &rb, 1, 1, false);
747 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
749 static int slb_base_page_shift[4] = {
753 20, /* 1M, unsupported */
756 /* When called from virtmode, this func should be protected by
757 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
758 * can trigger deadlock issue.
760 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
765 unsigned long somask;
766 unsigned long vsid, hash;
769 unsigned long mask, val;
772 /* Get page shift, work out hash and AVPN etc. */
773 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
776 if (slb_v & SLB_VSID_L) {
777 mask |= HPTE_V_LARGE;
779 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
781 if (slb_v & SLB_VSID_B_1T) {
782 somask = (1UL << 40) - 1;
783 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
786 somask = (1UL << 28) - 1;
787 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
789 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
790 avpn = slb_v & ~(somask >> 16); /* also includes B */
791 avpn |= (eaddr & somask) >> 16;
794 avpn &= ~((1UL << (pshift - 16)) - 1);
800 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
802 for (i = 0; i < 16; i += 2) {
803 /* Read the PTE racily */
804 v = hpte[i] & ~HPTE_V_HVLOCK;
806 /* Check valid/absent, hash, segment size and AVPN */
807 if (!(v & valid) || (v & mask) != val)
810 /* Lock the PTE and read it under the lock */
811 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
813 v = hpte[i] & ~HPTE_V_HVLOCK;
817 * Check the HPTE again, including large page size
818 * Since we don't currently allow any MPSS (mixed
819 * page-size segment) page sizes, it is sufficient
820 * to check against the actual page size.
822 if ((v & valid) && (v & mask) == val &&
823 hpte_page_size(v, r) == (1ul << pshift))
824 /* Return with the HPTE still locked */
825 return (hash << 3) + (i >> 1);
827 /* Unlock and move on */
831 if (val & HPTE_V_SECONDARY)
833 val |= HPTE_V_SECONDARY;
834 hash = hash ^ kvm->arch.hpt_mask;
838 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
841 * Called in real mode to check whether an HPTE not found fault
842 * is due to accessing a paged-out page or an emulated MMIO page,
843 * or if a protection fault is due to accessing a page that the
844 * guest wanted read/write access to but which we made read-only.
845 * Returns a possibly modified status (DSISR) value if not
846 * (i.e. pass the interrupt to the guest),
847 * -1 to pass the fault up to host kernel mode code, -2 to do that
848 * and also load the instruction word (for MMIO emulation),
849 * or 0 if we should make the guest retry the access.
851 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
852 unsigned long slb_v, unsigned int status, bool data)
854 struct kvm *kvm = vcpu->kvm;
856 unsigned long v, r, gr;
859 struct revmap_entry *rev;
860 unsigned long pp, key;
862 /* For protection fault, expect to find a valid HPTE */
863 valid = HPTE_V_VALID;
864 if (status & DSISR_NOHPTE)
865 valid |= HPTE_V_ABSENT;
867 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
869 if (status & DSISR_NOHPTE)
870 return status; /* there really was no HPTE */
871 return 0; /* for prot fault, HPTE disappeared */
873 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
874 v = hpte[0] & ~HPTE_V_HVLOCK;
876 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
877 gr = rev->guest_rpte;
879 unlock_hpte(hpte, v);
881 /* For not found, if the HPTE is valid by now, retry the instruction */
882 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
885 /* Check access permissions to the page */
886 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
887 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
888 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
890 if (gr & (HPTE_R_N | HPTE_R_G))
891 return status | SRR1_ISI_N_OR_G;
892 if (!hpte_read_permission(pp, slb_v & key))
893 return status | SRR1_ISI_PROT;
894 } else if (status & DSISR_ISSTORE) {
895 /* check write permission */
896 if (!hpte_write_permission(pp, slb_v & key))
897 return status | DSISR_PROTFAULT;
899 if (!hpte_read_permission(pp, slb_v & key))
900 return status | DSISR_PROTFAULT;
903 /* Check storage key, if applicable */
904 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
905 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
906 if (status & DSISR_ISSTORE)
909 return status | DSISR_KEYFAULT;
912 /* Save HPTE info for virtual-mode handler */
913 vcpu->arch.pgfault_addr = addr;
914 vcpu->arch.pgfault_index = index;
915 vcpu->arch.pgfault_hpte[0] = v;
916 vcpu->arch.pgfault_hpte[1] = r;
918 /* Check the storage key to see if it is possibly emulated MMIO */
919 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
920 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
921 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
922 return -2; /* MMIO emulation - load instr word */
924 return -1; /* send fault up to host kernel mode */