]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/kvm/book3s_64_mmu_hv.c
Merge remote-tracking branch 'kvm/linux-next'
[karo-tx-linux.git] / arch / powerpc / kvm / book3s_64_mmu_hv.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  */
17
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
27 #include <linux/srcu.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
30 #include <linux/debugfs.h>
31
32 #include <asm/tlbflush.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu-hash64.h>
36 #include <asm/hvcall.h>
37 #include <asm/synch.h>
38 #include <asm/ppc-opcode.h>
39 #include <asm/cputable.h>
40
41 #include "trace_hv.h"
42
43 /* Power architecture requires HPT is at least 256kB */
44 #define PPC_MIN_HPT_ORDER       18
45
46 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
47                                 long pte_index, unsigned long pteh,
48                                 unsigned long ptel, unsigned long *pte_idx_ret);
49 static void kvmppc_rmap_reset(struct kvm *kvm);
50
51 long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
52 {
53         unsigned long hpt = 0;
54         struct revmap_entry *rev;
55         struct page *page = NULL;
56         long order = KVM_DEFAULT_HPT_ORDER;
57
58         if (htab_orderp) {
59                 order = *htab_orderp;
60                 if (order < PPC_MIN_HPT_ORDER)
61                         order = PPC_MIN_HPT_ORDER;
62         }
63
64         kvm->arch.hpt_cma_alloc = 0;
65         page = kvm_alloc_hpt(1ul << (order - PAGE_SHIFT));
66         if (page) {
67                 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
68                 memset((void *)hpt, 0, (1ul << order));
69                 kvm->arch.hpt_cma_alloc = 1;
70         }
71
72         /* Lastly try successively smaller sizes from the page allocator */
73         /* Only do this if userspace didn't specify a size via ioctl */
74         while (!hpt && order > PPC_MIN_HPT_ORDER && !htab_orderp) {
75                 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
76                                        __GFP_NOWARN, order - PAGE_SHIFT);
77                 if (!hpt)
78                         --order;
79         }
80
81         if (!hpt)
82                 return -ENOMEM;
83
84         kvm->arch.hpt_virt = hpt;
85         kvm->arch.hpt_order = order;
86         /* HPTEs are 2**4 bytes long */
87         kvm->arch.hpt_npte = 1ul << (order - 4);
88         /* 128 (2**7) bytes in each HPTEG */
89         kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
90
91         /* Allocate reverse map array */
92         rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
93         if (!rev) {
94                 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
95                 goto out_freehpt;
96         }
97         kvm->arch.revmap = rev;
98         kvm->arch.sdr1 = __pa(hpt) | (order - 18);
99
100         pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
101                 hpt, order, kvm->arch.lpid);
102
103         if (htab_orderp)
104                 *htab_orderp = order;
105         return 0;
106
107  out_freehpt:
108         if (kvm->arch.hpt_cma_alloc)
109                 kvm_release_hpt(page, 1 << (order - PAGE_SHIFT));
110         else
111                 free_pages(hpt, order - PAGE_SHIFT);
112         return -ENOMEM;
113 }
114
115 long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
116 {
117         long err = -EBUSY;
118         long order;
119
120         mutex_lock(&kvm->lock);
121         if (kvm->arch.hpte_setup_done) {
122                 kvm->arch.hpte_setup_done = 0;
123                 /* order hpte_setup_done vs. vcpus_running */
124                 smp_mb();
125                 if (atomic_read(&kvm->arch.vcpus_running)) {
126                         kvm->arch.hpte_setup_done = 1;
127                         goto out;
128                 }
129         }
130         if (kvm->arch.hpt_virt) {
131                 order = kvm->arch.hpt_order;
132                 /* Set the entire HPT to 0, i.e. invalid HPTEs */
133                 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
134                 /*
135                  * Reset all the reverse-mapping chains for all memslots
136                  */
137                 kvmppc_rmap_reset(kvm);
138                 /* Ensure that each vcpu will flush its TLB on next entry. */
139                 cpumask_setall(&kvm->arch.need_tlb_flush);
140                 *htab_orderp = order;
141                 err = 0;
142         } else {
143                 err = kvmppc_alloc_hpt(kvm, htab_orderp);
144                 order = *htab_orderp;
145         }
146  out:
147         mutex_unlock(&kvm->lock);
148         return err;
149 }
150
151 void kvmppc_free_hpt(struct kvm *kvm)
152 {
153         kvmppc_free_lpid(kvm->arch.lpid);
154         vfree(kvm->arch.revmap);
155         if (kvm->arch.hpt_cma_alloc)
156                 kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt),
157                                 1 << (kvm->arch.hpt_order - PAGE_SHIFT));
158         else
159                 free_pages(kvm->arch.hpt_virt,
160                            kvm->arch.hpt_order - PAGE_SHIFT);
161 }
162
163 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
164 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
165 {
166         return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
167 }
168
169 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
170 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
171 {
172         return (pgsize == 0x10000) ? 0x1000 : 0;
173 }
174
175 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
176                      unsigned long porder)
177 {
178         unsigned long i;
179         unsigned long npages;
180         unsigned long hp_v, hp_r;
181         unsigned long addr, hash;
182         unsigned long psize;
183         unsigned long hp0, hp1;
184         unsigned long idx_ret;
185         long ret;
186         struct kvm *kvm = vcpu->kvm;
187
188         psize = 1ul << porder;
189         npages = memslot->npages >> (porder - PAGE_SHIFT);
190
191         /* VRMA can't be > 1TB */
192         if (npages > 1ul << (40 - porder))
193                 npages = 1ul << (40 - porder);
194         /* Can't use more than 1 HPTE per HPTEG */
195         if (npages > kvm->arch.hpt_mask + 1)
196                 npages = kvm->arch.hpt_mask + 1;
197
198         hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
199                 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
200         hp1 = hpte1_pgsize_encoding(psize) |
201                 HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
202
203         for (i = 0; i < npages; ++i) {
204                 addr = i << porder;
205                 /* can't use hpt_hash since va > 64 bits */
206                 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
207                 /*
208                  * We assume that the hash table is empty and no
209                  * vcpus are using it at this stage.  Since we create
210                  * at most one HPTE per HPTEG, we just assume entry 7
211                  * is available and use it.
212                  */
213                 hash = (hash << 3) + 7;
214                 hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
215                 hp_r = hp1 | addr;
216                 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
217                                                  &idx_ret);
218                 if (ret != H_SUCCESS) {
219                         pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
220                                addr, ret);
221                         break;
222                 }
223         }
224 }
225
226 int kvmppc_mmu_hv_init(void)
227 {
228         unsigned long host_lpid, rsvd_lpid;
229
230         if (!cpu_has_feature(CPU_FTR_HVMODE))
231                 return -EINVAL;
232
233         /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
234         host_lpid = mfspr(SPRN_LPID);
235         rsvd_lpid = LPID_RSVD;
236
237         kvmppc_init_lpid(rsvd_lpid + 1);
238
239         kvmppc_claim_lpid(host_lpid);
240         /* rsvd_lpid is reserved for use in partition switching */
241         kvmppc_claim_lpid(rsvd_lpid);
242
243         return 0;
244 }
245
246 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
247 {
248         unsigned long msr = vcpu->arch.intr_msr;
249
250         /* If transactional, change to suspend mode on IRQ delivery */
251         if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
252                 msr |= MSR_TS_S;
253         else
254                 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
255         kvmppc_set_msr(vcpu, msr);
256 }
257
258 long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
259                                 long pte_index, unsigned long pteh,
260                                 unsigned long ptel, unsigned long *pte_idx_ret)
261 {
262         long ret;
263
264         /* Protect linux PTE lookup from page table destruction */
265         rcu_read_lock_sched();  /* this disables preemption too */
266         ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
267                                 current->mm->pgd, false, pte_idx_ret);
268         rcu_read_unlock_sched();
269         if (ret == H_TOO_HARD) {
270                 /* this can't happen */
271                 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
272                 ret = H_RESOURCE;       /* or something */
273         }
274         return ret;
275
276 }
277
278 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
279                                                          gva_t eaddr)
280 {
281         u64 mask;
282         int i;
283
284         for (i = 0; i < vcpu->arch.slb_nr; i++) {
285                 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
286                         continue;
287
288                 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
289                         mask = ESID_MASK_1T;
290                 else
291                         mask = ESID_MASK;
292
293                 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
294                         return &vcpu->arch.slb[i];
295         }
296         return NULL;
297 }
298
299 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
300                         unsigned long ea)
301 {
302         unsigned long ra_mask;
303
304         ra_mask = hpte_page_size(v, r) - 1;
305         return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
306 }
307
308 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
309                         struct kvmppc_pte *gpte, bool data, bool iswrite)
310 {
311         struct kvm *kvm = vcpu->kvm;
312         struct kvmppc_slb *slbe;
313         unsigned long slb_v;
314         unsigned long pp, key;
315         unsigned long v, gr;
316         __be64 *hptep;
317         int index;
318         int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
319
320         /* Get SLB entry */
321         if (virtmode) {
322                 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
323                 if (!slbe)
324                         return -EINVAL;
325                 slb_v = slbe->origv;
326         } else {
327                 /* real mode access */
328                 slb_v = vcpu->kvm->arch.vrma_slb_v;
329         }
330
331         preempt_disable();
332         /* Find the HPTE in the hash table */
333         index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
334                                          HPTE_V_VALID | HPTE_V_ABSENT);
335         if (index < 0) {
336                 preempt_enable();
337                 return -ENOENT;
338         }
339         hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
340         v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
341         gr = kvm->arch.revmap[index].guest_rpte;
342
343         unlock_hpte(hptep, v);
344         preempt_enable();
345
346         gpte->eaddr = eaddr;
347         gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
348
349         /* Get PP bits and key for permission check */
350         pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
351         key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
352         key &= slb_v;
353
354         /* Calculate permissions */
355         gpte->may_read = hpte_read_permission(pp, key);
356         gpte->may_write = hpte_write_permission(pp, key);
357         gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
358
359         /* Storage key permission check for POWER7 */
360         if (data && virtmode) {
361                 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
362                 if (amrfield & 1)
363                         gpte->may_read = 0;
364                 if (amrfield & 2)
365                         gpte->may_write = 0;
366         }
367
368         /* Get the guest physical address */
369         gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
370         return 0;
371 }
372
373 /*
374  * Quick test for whether an instruction is a load or a store.
375  * If the instruction is a load or a store, then this will indicate
376  * which it is, at least on server processors.  (Embedded processors
377  * have some external PID instructions that don't follow the rule
378  * embodied here.)  If the instruction isn't a load or store, then
379  * this doesn't return anything useful.
380  */
381 static int instruction_is_store(unsigned int instr)
382 {
383         unsigned int mask;
384
385         mask = 0x10000000;
386         if ((instr & 0xfc000000) == 0x7c000000)
387                 mask = 0x100;           /* major opcode 31 */
388         return (instr & mask) != 0;
389 }
390
391 static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
392                                   unsigned long gpa, gva_t ea, int is_store)
393 {
394         u32 last_inst;
395
396         /*
397          * If we fail, we just return to the guest and try executing it again.
398          */
399         if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
400                 EMULATE_DONE)
401                 return RESUME_GUEST;
402
403         /*
404          * WARNING: We do not know for sure whether the instruction we just
405          * read from memory is the same that caused the fault in the first
406          * place.  If the instruction we read is neither an load or a store,
407          * then it can't access memory, so we don't need to worry about
408          * enforcing access permissions.  So, assuming it is a load or
409          * store, we just check that its direction (load or store) is
410          * consistent with the original fault, since that's what we
411          * checked the access permissions against.  If there is a mismatch
412          * we just return and retry the instruction.
413          */
414
415         if (instruction_is_store(last_inst) != !!is_store)
416                 return RESUME_GUEST;
417
418         /*
419          * Emulated accesses are emulated by looking at the hash for
420          * translation once, then performing the access later. The
421          * translation could be invalidated in the meantime in which
422          * point performing the subsequent memory access on the old
423          * physical address could possibly be a security hole for the
424          * guest (but not the host).
425          *
426          * This is less of an issue for MMIO stores since they aren't
427          * globally visible. It could be an issue for MMIO loads to
428          * a certain extent but we'll ignore it for now.
429          */
430
431         vcpu->arch.paddr_accessed = gpa;
432         vcpu->arch.vaddr_accessed = ea;
433         return kvmppc_emulate_mmio(run, vcpu);
434 }
435
436 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
437                                 unsigned long ea, unsigned long dsisr)
438 {
439         struct kvm *kvm = vcpu->kvm;
440         unsigned long hpte[3], r;
441         __be64 *hptep;
442         unsigned long mmu_seq, psize, pte_size;
443         unsigned long gpa_base, gfn_base;
444         unsigned long gpa, gfn, hva, pfn;
445         struct kvm_memory_slot *memslot;
446         unsigned long *rmap;
447         struct revmap_entry *rev;
448         struct page *page, *pages[1];
449         long index, ret, npages;
450         unsigned long is_io;
451         unsigned int writing, write_ok;
452         struct vm_area_struct *vma;
453         unsigned long rcbits;
454
455         /*
456          * Real-mode code has already searched the HPT and found the
457          * entry we're interested in.  Lock the entry and check that
458          * it hasn't changed.  If it has, just return and re-execute the
459          * instruction.
460          */
461         if (ea != vcpu->arch.pgfault_addr)
462                 return RESUME_GUEST;
463         index = vcpu->arch.pgfault_index;
464         hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
465         rev = &kvm->arch.revmap[index];
466         preempt_disable();
467         while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
468                 cpu_relax();
469         hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
470         hpte[1] = be64_to_cpu(hptep[1]);
471         hpte[2] = r = rev->guest_rpte;
472         unlock_hpte(hptep, hpte[0]);
473         preempt_enable();
474
475         if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
476             hpte[1] != vcpu->arch.pgfault_hpte[1])
477                 return RESUME_GUEST;
478
479         /* Translate the logical address and get the page */
480         psize = hpte_page_size(hpte[0], r);
481         gpa_base = r & HPTE_R_RPN & ~(psize - 1);
482         gfn_base = gpa_base >> PAGE_SHIFT;
483         gpa = gpa_base | (ea & (psize - 1));
484         gfn = gpa >> PAGE_SHIFT;
485         memslot = gfn_to_memslot(kvm, gfn);
486
487         trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
488
489         /* No memslot means it's an emulated MMIO region */
490         if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
491                 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
492                                               dsisr & DSISR_ISSTORE);
493
494         /*
495          * This should never happen, because of the slot_is_aligned()
496          * check in kvmppc_do_h_enter().
497          */
498         if (gfn_base < memslot->base_gfn)
499                 return -EFAULT;
500
501         /* used to check for invalidations in progress */
502         mmu_seq = kvm->mmu_notifier_seq;
503         smp_rmb();
504
505         ret = -EFAULT;
506         is_io = 0;
507         pfn = 0;
508         page = NULL;
509         pte_size = PAGE_SIZE;
510         writing = (dsisr & DSISR_ISSTORE) != 0;
511         /* If writing != 0, then the HPTE must allow writing, if we get here */
512         write_ok = writing;
513         hva = gfn_to_hva_memslot(memslot, gfn);
514         npages = get_user_pages_fast(hva, 1, writing, pages);
515         if (npages < 1) {
516                 /* Check if it's an I/O mapping */
517                 down_read(&current->mm->mmap_sem);
518                 vma = find_vma(current->mm, hva);
519                 if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
520                     (vma->vm_flags & VM_PFNMAP)) {
521                         pfn = vma->vm_pgoff +
522                                 ((hva - vma->vm_start) >> PAGE_SHIFT);
523                         pte_size = psize;
524                         is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
525                         write_ok = vma->vm_flags & VM_WRITE;
526                 }
527                 up_read(&current->mm->mmap_sem);
528                 if (!pfn)
529                         goto out_put;
530         } else {
531                 page = pages[0];
532                 pfn = page_to_pfn(page);
533                 if (PageHuge(page)) {
534                         page = compound_head(page);
535                         pte_size <<= compound_order(page);
536                 }
537                 /* if the guest wants write access, see if that is OK */
538                 if (!writing && hpte_is_writable(r)) {
539                         pte_t *ptep, pte;
540                         unsigned long flags;
541                         /*
542                          * We need to protect against page table destruction
543                          * hugepage split and collapse.
544                          */
545                         local_irq_save(flags);
546                         ptep = find_linux_pte_or_hugepte(current->mm->pgd,
547                                                          hva, NULL, NULL);
548                         if (ptep) {
549                                 pte = kvmppc_read_update_linux_pte(ptep, 1);
550                                 if (pte_write(pte))
551                                         write_ok = 1;
552                         }
553                         local_irq_restore(flags);
554                 }
555         }
556
557         if (psize > pte_size)
558                 goto out_put;
559
560         /* Check WIMG vs. the actual page we're accessing */
561         if (!hpte_cache_flags_ok(r, is_io)) {
562                 if (is_io)
563                         goto out_put;
564
565                 /*
566                  * Allow guest to map emulated device memory as
567                  * uncacheable, but actually make it cacheable.
568                  */
569                 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
570         }
571
572         /*
573          * Set the HPTE to point to pfn.
574          * Since the pfn is at PAGE_SIZE granularity, make sure we
575          * don't mask out lower-order bits if psize < PAGE_SIZE.
576          */
577         if (psize < PAGE_SIZE)
578                 psize = PAGE_SIZE;
579         r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
580         if (hpte_is_writable(r) && !write_ok)
581                 r = hpte_make_readonly(r);
582         ret = RESUME_GUEST;
583         preempt_disable();
584         while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
585                 cpu_relax();
586         if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] ||
587                 be64_to_cpu(hptep[1]) != hpte[1] ||
588                 rev->guest_rpte != hpte[2])
589                 /* HPTE has been changed under us; let the guest retry */
590                 goto out_unlock;
591         hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
592
593         /* Always put the HPTE in the rmap chain for the page base address */
594         rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn];
595         lock_rmap(rmap);
596
597         /* Check if we might have been invalidated; let the guest retry if so */
598         ret = RESUME_GUEST;
599         if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
600                 unlock_rmap(rmap);
601                 goto out_unlock;
602         }
603
604         /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
605         rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
606         r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
607
608         if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
609                 /* HPTE was previously valid, so we need to invalidate it */
610                 unlock_rmap(rmap);
611                 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
612                 kvmppc_invalidate_hpte(kvm, hptep, index);
613                 /* don't lose previous R and C bits */
614                 r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
615         } else {
616                 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
617         }
618
619         hptep[1] = cpu_to_be64(r);
620         eieio();
621         __unlock_hpte(hptep, hpte[0]);
622         asm volatile("ptesync" : : : "memory");
623         preempt_enable();
624         if (page && hpte_is_writable(r))
625                 SetPageDirty(page);
626
627  out_put:
628         trace_kvm_page_fault_exit(vcpu, hpte, ret);
629
630         if (page) {
631                 /*
632                  * We drop pages[0] here, not page because page might
633                  * have been set to the head page of a compound, but
634                  * we have to drop the reference on the correct tail
635                  * page to match the get inside gup()
636                  */
637                 put_page(pages[0]);
638         }
639         return ret;
640
641  out_unlock:
642         __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
643         preempt_enable();
644         goto out_put;
645 }
646
647 static void kvmppc_rmap_reset(struct kvm *kvm)
648 {
649         struct kvm_memslots *slots;
650         struct kvm_memory_slot *memslot;
651         int srcu_idx;
652
653         srcu_idx = srcu_read_lock(&kvm->srcu);
654         slots = kvm_memslots(kvm);
655         kvm_for_each_memslot(memslot, slots) {
656                 /*
657                  * This assumes it is acceptable to lose reference and
658                  * change bits across a reset.
659                  */
660                 memset(memslot->arch.rmap, 0,
661                        memslot->npages * sizeof(*memslot->arch.rmap));
662         }
663         srcu_read_unlock(&kvm->srcu, srcu_idx);
664 }
665
666 static int kvm_handle_hva_range(struct kvm *kvm,
667                                 unsigned long start,
668                                 unsigned long end,
669                                 int (*handler)(struct kvm *kvm,
670                                                unsigned long *rmapp,
671                                                unsigned long gfn))
672 {
673         int ret;
674         int retval = 0;
675         struct kvm_memslots *slots;
676         struct kvm_memory_slot *memslot;
677
678         slots = kvm_memslots(kvm);
679         kvm_for_each_memslot(memslot, slots) {
680                 unsigned long hva_start, hva_end;
681                 gfn_t gfn, gfn_end;
682
683                 hva_start = max(start, memslot->userspace_addr);
684                 hva_end = min(end, memslot->userspace_addr +
685                                         (memslot->npages << PAGE_SHIFT));
686                 if (hva_start >= hva_end)
687                         continue;
688                 /*
689                  * {gfn(page) | page intersects with [hva_start, hva_end)} =
690                  * {gfn, gfn+1, ..., gfn_end-1}.
691                  */
692                 gfn = hva_to_gfn_memslot(hva_start, memslot);
693                 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
694
695                 for (; gfn < gfn_end; ++gfn) {
696                         gfn_t gfn_offset = gfn - memslot->base_gfn;
697
698                         ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
699                         retval |= ret;
700                 }
701         }
702
703         return retval;
704 }
705
706 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
707                           int (*handler)(struct kvm *kvm, unsigned long *rmapp,
708                                          unsigned long gfn))
709 {
710         return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
711 }
712
713 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
714                            unsigned long gfn)
715 {
716         struct revmap_entry *rev = kvm->arch.revmap;
717         unsigned long h, i, j;
718         __be64 *hptep;
719         unsigned long ptel, psize, rcbits;
720
721         for (;;) {
722                 lock_rmap(rmapp);
723                 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
724                         unlock_rmap(rmapp);
725                         break;
726                 }
727
728                 /*
729                  * To avoid an ABBA deadlock with the HPTE lock bit,
730                  * we can't spin on the HPTE lock while holding the
731                  * rmap chain lock.
732                  */
733                 i = *rmapp & KVMPPC_RMAP_INDEX;
734                 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
735                 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
736                         /* unlock rmap before spinning on the HPTE lock */
737                         unlock_rmap(rmapp);
738                         while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
739                                 cpu_relax();
740                         continue;
741                 }
742                 j = rev[i].forw;
743                 if (j == i) {
744                         /* chain is now empty */
745                         *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
746                 } else {
747                         /* remove i from chain */
748                         h = rev[i].back;
749                         rev[h].forw = j;
750                         rev[j].back = h;
751                         rev[i].forw = rev[i].back = i;
752                         *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
753                 }
754
755                 /* Now check and modify the HPTE */
756                 ptel = rev[i].guest_rpte;
757                 psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
758                 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
759                     hpte_rpn(ptel, psize) == gfn) {
760                         hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
761                         kvmppc_invalidate_hpte(kvm, hptep, i);
762                         /* Harvest R and C */
763                         rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
764                         *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
765                         if (rcbits & HPTE_R_C)
766                                 kvmppc_update_rmap_change(rmapp, psize);
767                         if (rcbits & ~rev[i].guest_rpte) {
768                                 rev[i].guest_rpte = ptel | rcbits;
769                                 note_hpte_modification(kvm, &rev[i]);
770                         }
771                 }
772                 unlock_rmap(rmapp);
773                 __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
774         }
775         return 0;
776 }
777
778 int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
779 {
780         kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
781         return 0;
782 }
783
784 int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
785 {
786         kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
787         return 0;
788 }
789
790 void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
791                                   struct kvm_memory_slot *memslot)
792 {
793         unsigned long *rmapp;
794         unsigned long gfn;
795         unsigned long n;
796
797         rmapp = memslot->arch.rmap;
798         gfn = memslot->base_gfn;
799         for (n = memslot->npages; n; --n) {
800                 /*
801                  * Testing the present bit without locking is OK because
802                  * the memslot has been marked invalid already, and hence
803                  * no new HPTEs referencing this page can be created,
804                  * thus the present bit can't go from 0 to 1.
805                  */
806                 if (*rmapp & KVMPPC_RMAP_PRESENT)
807                         kvm_unmap_rmapp(kvm, rmapp, gfn);
808                 ++rmapp;
809                 ++gfn;
810         }
811 }
812
813 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
814                          unsigned long gfn)
815 {
816         struct revmap_entry *rev = kvm->arch.revmap;
817         unsigned long head, i, j;
818         __be64 *hptep;
819         int ret = 0;
820
821  retry:
822         lock_rmap(rmapp);
823         if (*rmapp & KVMPPC_RMAP_REFERENCED) {
824                 *rmapp &= ~KVMPPC_RMAP_REFERENCED;
825                 ret = 1;
826         }
827         if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
828                 unlock_rmap(rmapp);
829                 return ret;
830         }
831
832         i = head = *rmapp & KVMPPC_RMAP_INDEX;
833         do {
834                 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
835                 j = rev[i].forw;
836
837                 /* If this HPTE isn't referenced, ignore it */
838                 if (!(be64_to_cpu(hptep[1]) & HPTE_R_R))
839                         continue;
840
841                 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
842                         /* unlock rmap before spinning on the HPTE lock */
843                         unlock_rmap(rmapp);
844                         while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
845                                 cpu_relax();
846                         goto retry;
847                 }
848
849                 /* Now check and modify the HPTE */
850                 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
851                     (be64_to_cpu(hptep[1]) & HPTE_R_R)) {
852                         kvmppc_clear_ref_hpte(kvm, hptep, i);
853                         if (!(rev[i].guest_rpte & HPTE_R_R)) {
854                                 rev[i].guest_rpte |= HPTE_R_R;
855                                 note_hpte_modification(kvm, &rev[i]);
856                         }
857                         ret = 1;
858                 }
859                 __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
860         } while ((i = j) != head);
861
862         unlock_rmap(rmapp);
863         return ret;
864 }
865
866 int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
867 {
868         return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp);
869 }
870
871 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
872                               unsigned long gfn)
873 {
874         struct revmap_entry *rev = kvm->arch.revmap;
875         unsigned long head, i, j;
876         unsigned long *hp;
877         int ret = 1;
878
879         if (*rmapp & KVMPPC_RMAP_REFERENCED)
880                 return 1;
881
882         lock_rmap(rmapp);
883         if (*rmapp & KVMPPC_RMAP_REFERENCED)
884                 goto out;
885
886         if (*rmapp & KVMPPC_RMAP_PRESENT) {
887                 i = head = *rmapp & KVMPPC_RMAP_INDEX;
888                 do {
889                         hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
890                         j = rev[i].forw;
891                         if (be64_to_cpu(hp[1]) & HPTE_R_R)
892                                 goto out;
893                 } while ((i = j) != head);
894         }
895         ret = 0;
896
897  out:
898         unlock_rmap(rmapp);
899         return ret;
900 }
901
902 int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
903 {
904         return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
905 }
906
907 void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
908 {
909         kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
910 }
911
912 static int vcpus_running(struct kvm *kvm)
913 {
914         return atomic_read(&kvm->arch.vcpus_running) != 0;
915 }
916
917 /*
918  * Returns the number of system pages that are dirty.
919  * This can be more than 1 if we find a huge-page HPTE.
920  */
921 static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
922 {
923         struct revmap_entry *rev = kvm->arch.revmap;
924         unsigned long head, i, j;
925         unsigned long n;
926         unsigned long v, r;
927         __be64 *hptep;
928         int npages_dirty = 0;
929
930  retry:
931         lock_rmap(rmapp);
932         if (*rmapp & KVMPPC_RMAP_CHANGED) {
933                 long change_order = (*rmapp & KVMPPC_RMAP_CHG_ORDER)
934                         >> KVMPPC_RMAP_CHG_SHIFT;
935                 *rmapp &= ~(KVMPPC_RMAP_CHANGED | KVMPPC_RMAP_CHG_ORDER);
936                 npages_dirty = 1;
937                 if (change_order > PAGE_SHIFT)
938                         npages_dirty = 1ul << (change_order - PAGE_SHIFT);
939         }
940         if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
941                 unlock_rmap(rmapp);
942                 return npages_dirty;
943         }
944
945         i = head = *rmapp & KVMPPC_RMAP_INDEX;
946         do {
947                 unsigned long hptep1;
948                 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
949                 j = rev[i].forw;
950
951                 /*
952                  * Checking the C (changed) bit here is racy since there
953                  * is no guarantee about when the hardware writes it back.
954                  * If the HPTE is not writable then it is stable since the
955                  * page can't be written to, and we would have done a tlbie
956                  * (which forces the hardware to complete any writeback)
957                  * when making the HPTE read-only.
958                  * If vcpus are running then this call is racy anyway
959                  * since the page could get dirtied subsequently, so we
960                  * expect there to be a further call which would pick up
961                  * any delayed C bit writeback.
962                  * Otherwise we need to do the tlbie even if C==0 in
963                  * order to pick up any delayed writeback of C.
964                  */
965                 hptep1 = be64_to_cpu(hptep[1]);
966                 if (!(hptep1 & HPTE_R_C) &&
967                     (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
968                         continue;
969
970                 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
971                         /* unlock rmap before spinning on the HPTE lock */
972                         unlock_rmap(rmapp);
973                         while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK))
974                                 cpu_relax();
975                         goto retry;
976                 }
977
978                 /* Now check and modify the HPTE */
979                 if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
980                         __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
981                         continue;
982                 }
983
984                 /* need to make it temporarily absent so C is stable */
985                 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
986                 kvmppc_invalidate_hpte(kvm, hptep, i);
987                 v = be64_to_cpu(hptep[0]);
988                 r = be64_to_cpu(hptep[1]);
989                 if (r & HPTE_R_C) {
990                         hptep[1] = cpu_to_be64(r & ~HPTE_R_C);
991                         if (!(rev[i].guest_rpte & HPTE_R_C)) {
992                                 rev[i].guest_rpte |= HPTE_R_C;
993                                 note_hpte_modification(kvm, &rev[i]);
994                         }
995                         n = hpte_page_size(v, r);
996                         n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT;
997                         if (n > npages_dirty)
998                                 npages_dirty = n;
999                         eieio();
1000                 }
1001                 v &= ~HPTE_V_ABSENT;
1002                 v |= HPTE_V_VALID;
1003                 __unlock_hpte(hptep, v);
1004         } while ((i = j) != head);
1005
1006         unlock_rmap(rmapp);
1007         return npages_dirty;
1008 }
1009
1010 static void harvest_vpa_dirty(struct kvmppc_vpa *vpa,
1011                               struct kvm_memory_slot *memslot,
1012                               unsigned long *map)
1013 {
1014         unsigned long gfn;
1015
1016         if (!vpa->dirty || !vpa->pinned_addr)
1017                 return;
1018         gfn = vpa->gpa >> PAGE_SHIFT;
1019         if (gfn < memslot->base_gfn ||
1020             gfn >= memslot->base_gfn + memslot->npages)
1021                 return;
1022
1023         vpa->dirty = false;
1024         if (map)
1025                 __set_bit_le(gfn - memslot->base_gfn, map);
1026 }
1027
1028 long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
1029                              unsigned long *map)
1030 {
1031         unsigned long i, j;
1032         unsigned long *rmapp;
1033         struct kvm_vcpu *vcpu;
1034
1035         preempt_disable();
1036         rmapp = memslot->arch.rmap;
1037         for (i = 0; i < memslot->npages; ++i) {
1038                 int npages = kvm_test_clear_dirty_npages(kvm, rmapp);
1039                 /*
1040                  * Note that if npages > 0 then i must be a multiple of npages,
1041                  * since we always put huge-page HPTEs in the rmap chain
1042                  * corresponding to their page base address.
1043                  */
1044                 if (npages && map)
1045                         for (j = i; npages; ++j, --npages)
1046                                 __set_bit_le(j, map);
1047                 ++rmapp;
1048         }
1049
1050         /* Harvest dirty bits from VPA and DTL updates */
1051         /* Note: we never modify the SLB shadow buffer areas */
1052         kvm_for_each_vcpu(i, vcpu, kvm) {
1053                 spin_lock(&vcpu->arch.vpa_update_lock);
1054                 harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map);
1055                 harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map);
1056                 spin_unlock(&vcpu->arch.vpa_update_lock);
1057         }
1058         preempt_enable();
1059         return 0;
1060 }
1061
1062 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1063                             unsigned long *nb_ret)
1064 {
1065         struct kvm_memory_slot *memslot;
1066         unsigned long gfn = gpa >> PAGE_SHIFT;
1067         struct page *page, *pages[1];
1068         int npages;
1069         unsigned long hva, offset;
1070         int srcu_idx;
1071
1072         srcu_idx = srcu_read_lock(&kvm->srcu);
1073         memslot = gfn_to_memslot(kvm, gfn);
1074         if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1075                 goto err;
1076         hva = gfn_to_hva_memslot(memslot, gfn);
1077         npages = get_user_pages_fast(hva, 1, 1, pages);
1078         if (npages < 1)
1079                 goto err;
1080         page = pages[0];
1081         srcu_read_unlock(&kvm->srcu, srcu_idx);
1082
1083         offset = gpa & (PAGE_SIZE - 1);
1084         if (nb_ret)
1085                 *nb_ret = PAGE_SIZE - offset;
1086         return page_address(page) + offset;
1087
1088  err:
1089         srcu_read_unlock(&kvm->srcu, srcu_idx);
1090         return NULL;
1091 }
1092
1093 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
1094                              bool dirty)
1095 {
1096         struct page *page = virt_to_page(va);
1097         struct kvm_memory_slot *memslot;
1098         unsigned long gfn;
1099         unsigned long *rmap;
1100         int srcu_idx;
1101
1102         put_page(page);
1103
1104         if (!dirty)
1105                 return;
1106
1107         /* We need to mark this page dirty in the rmap chain */
1108         gfn = gpa >> PAGE_SHIFT;
1109         srcu_idx = srcu_read_lock(&kvm->srcu);
1110         memslot = gfn_to_memslot(kvm, gfn);
1111         if (memslot) {
1112                 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
1113                 lock_rmap(rmap);
1114                 *rmap |= KVMPPC_RMAP_CHANGED;
1115                 unlock_rmap(rmap);
1116         }
1117         srcu_read_unlock(&kvm->srcu, srcu_idx);
1118 }
1119
1120 /*
1121  * Functions for reading and writing the hash table via reads and
1122  * writes on a file descriptor.
1123  *
1124  * Reads return the guest view of the hash table, which has to be
1125  * pieced together from the real hash table and the guest_rpte
1126  * values in the revmap array.
1127  *
1128  * On writes, each HPTE written is considered in turn, and if it
1129  * is valid, it is written to the HPT as if an H_ENTER with the
1130  * exact flag set was done.  When the invalid count is non-zero
1131  * in the header written to the stream, the kernel will make
1132  * sure that that many HPTEs are invalid, and invalidate them
1133  * if not.
1134  */
1135
1136 struct kvm_htab_ctx {
1137         unsigned long   index;
1138         unsigned long   flags;
1139         struct kvm      *kvm;
1140         int             first_pass;
1141 };
1142
1143 #define HPTE_SIZE       (2 * sizeof(unsigned long))
1144
1145 /*
1146  * Returns 1 if this HPT entry has been modified or has pending
1147  * R/C bit changes.
1148  */
1149 static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp)
1150 {
1151         unsigned long rcbits_unset;
1152
1153         if (revp->guest_rpte & HPTE_GR_MODIFIED)
1154                 return 1;
1155
1156         /* Also need to consider changes in reference and changed bits */
1157         rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1158         if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) &&
1159             (be64_to_cpu(hptp[1]) & rcbits_unset))
1160                 return 1;
1161
1162         return 0;
1163 }
1164
1165 static long record_hpte(unsigned long flags, __be64 *hptp,
1166                         unsigned long *hpte, struct revmap_entry *revp,
1167                         int want_valid, int first_pass)
1168 {
1169         unsigned long v, r;
1170         unsigned long rcbits_unset;
1171         int ok = 1;
1172         int valid, dirty;
1173
1174         /* Unmodified entries are uninteresting except on the first pass */
1175         dirty = hpte_dirty(revp, hptp);
1176         if (!first_pass && !dirty)
1177                 return 0;
1178
1179         valid = 0;
1180         if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1181                 valid = 1;
1182                 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
1183                     !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED))
1184                         valid = 0;
1185         }
1186         if (valid != want_valid)
1187                 return 0;
1188
1189         v = r = 0;
1190         if (valid || dirty) {
1191                 /* lock the HPTE so it's stable and read it */
1192                 preempt_disable();
1193                 while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
1194                         cpu_relax();
1195                 v = be64_to_cpu(hptp[0]);
1196
1197                 /* re-evaluate valid and dirty from synchronized HPTE value */
1198                 valid = !!(v & HPTE_V_VALID);
1199                 dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
1200
1201                 /* Harvest R and C into guest view if necessary */
1202                 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1203                 if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) {
1204                         revp->guest_rpte |= (be64_to_cpu(hptp[1]) &
1205                                 (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
1206                         dirty = 1;
1207                 }
1208
1209                 if (v & HPTE_V_ABSENT) {
1210                         v &= ~HPTE_V_ABSENT;
1211                         v |= HPTE_V_VALID;
1212                         valid = 1;
1213                 }
1214                 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
1215                         valid = 0;
1216
1217                 r = revp->guest_rpte;
1218                 /* only clear modified if this is the right sort of entry */
1219                 if (valid == want_valid && dirty) {
1220                         r &= ~HPTE_GR_MODIFIED;
1221                         revp->guest_rpte = r;
1222                 }
1223                 unlock_hpte(hptp, be64_to_cpu(hptp[0]));
1224                 preempt_enable();
1225                 if (!(valid == want_valid && (first_pass || dirty)))
1226                         ok = 0;
1227         }
1228         hpte[0] = cpu_to_be64(v);
1229         hpte[1] = cpu_to_be64(r);
1230         return ok;
1231 }
1232
1233 static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1234                              size_t count, loff_t *ppos)
1235 {
1236         struct kvm_htab_ctx *ctx = file->private_data;
1237         struct kvm *kvm = ctx->kvm;
1238         struct kvm_get_htab_header hdr;
1239         __be64 *hptp;
1240         struct revmap_entry *revp;
1241         unsigned long i, nb, nw;
1242         unsigned long __user *lbuf;
1243         struct kvm_get_htab_header __user *hptr;
1244         unsigned long flags;
1245         int first_pass;
1246         unsigned long hpte[2];
1247
1248         if (!access_ok(VERIFY_WRITE, buf, count))
1249                 return -EFAULT;
1250
1251         first_pass = ctx->first_pass;
1252         flags = ctx->flags;
1253
1254         i = ctx->index;
1255         hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1256         revp = kvm->arch.revmap + i;
1257         lbuf = (unsigned long __user *)buf;
1258
1259         nb = 0;
1260         while (nb + sizeof(hdr) + HPTE_SIZE < count) {
1261                 /* Initialize header */
1262                 hptr = (struct kvm_get_htab_header __user *)buf;
1263                 hdr.n_valid = 0;
1264                 hdr.n_invalid = 0;
1265                 nw = nb;
1266                 nb += sizeof(hdr);
1267                 lbuf = (unsigned long __user *)(buf + sizeof(hdr));
1268
1269                 /* Skip uninteresting entries, i.e. clean on not-first pass */
1270                 if (!first_pass) {
1271                         while (i < kvm->arch.hpt_npte &&
1272                                !hpte_dirty(revp, hptp)) {
1273                                 ++i;
1274                                 hptp += 2;
1275                                 ++revp;
1276                         }
1277                 }
1278                 hdr.index = i;
1279
1280                 /* Grab a series of valid entries */
1281                 while (i < kvm->arch.hpt_npte &&
1282                        hdr.n_valid < 0xffff &&
1283                        nb + HPTE_SIZE < count &&
1284                        record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
1285                         /* valid entry, write it out */
1286                         ++hdr.n_valid;
1287                         if (__put_user(hpte[0], lbuf) ||
1288                             __put_user(hpte[1], lbuf + 1))
1289                                 return -EFAULT;
1290                         nb += HPTE_SIZE;
1291                         lbuf += 2;
1292                         ++i;
1293                         hptp += 2;
1294                         ++revp;
1295                 }
1296                 /* Now skip invalid entries while we can */
1297                 while (i < kvm->arch.hpt_npte &&
1298                        hdr.n_invalid < 0xffff &&
1299                        record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
1300                         /* found an invalid entry */
1301                         ++hdr.n_invalid;
1302                         ++i;
1303                         hptp += 2;
1304                         ++revp;
1305                 }
1306
1307                 if (hdr.n_valid || hdr.n_invalid) {
1308                         /* write back the header */
1309                         if (__copy_to_user(hptr, &hdr, sizeof(hdr)))
1310                                 return -EFAULT;
1311                         nw = nb;
1312                         buf = (char __user *)lbuf;
1313                 } else {
1314                         nb = nw;
1315                 }
1316
1317                 /* Check if we've wrapped around the hash table */
1318                 if (i >= kvm->arch.hpt_npte) {
1319                         i = 0;
1320                         ctx->first_pass = 0;
1321                         break;
1322                 }
1323         }
1324
1325         ctx->index = i;
1326
1327         return nb;
1328 }
1329
1330 static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1331                               size_t count, loff_t *ppos)
1332 {
1333         struct kvm_htab_ctx *ctx = file->private_data;
1334         struct kvm *kvm = ctx->kvm;
1335         struct kvm_get_htab_header hdr;
1336         unsigned long i, j;
1337         unsigned long v, r;
1338         unsigned long __user *lbuf;
1339         __be64 *hptp;
1340         unsigned long tmp[2];
1341         ssize_t nb;
1342         long int err, ret;
1343         int hpte_setup;
1344
1345         if (!access_ok(VERIFY_READ, buf, count))
1346                 return -EFAULT;
1347
1348         /* lock out vcpus from running while we're doing this */
1349         mutex_lock(&kvm->lock);
1350         hpte_setup = kvm->arch.hpte_setup_done;
1351         if (hpte_setup) {
1352                 kvm->arch.hpte_setup_done = 0;  /* temporarily */
1353                 /* order hpte_setup_done vs. vcpus_running */
1354                 smp_mb();
1355                 if (atomic_read(&kvm->arch.vcpus_running)) {
1356                         kvm->arch.hpte_setup_done = 1;
1357                         mutex_unlock(&kvm->lock);
1358                         return -EBUSY;
1359                 }
1360         }
1361
1362         err = 0;
1363         for (nb = 0; nb + sizeof(hdr) <= count; ) {
1364                 err = -EFAULT;
1365                 if (__copy_from_user(&hdr, buf, sizeof(hdr)))
1366                         break;
1367
1368                 err = 0;
1369                 if (nb + hdr.n_valid * HPTE_SIZE > count)
1370                         break;
1371
1372                 nb += sizeof(hdr);
1373                 buf += sizeof(hdr);
1374
1375                 err = -EINVAL;
1376                 i = hdr.index;
1377                 if (i >= kvm->arch.hpt_npte ||
1378                     i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
1379                         break;
1380
1381                 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1382                 lbuf = (unsigned long __user *)buf;
1383                 for (j = 0; j < hdr.n_valid; ++j) {
1384                         __be64 hpte_v;
1385                         __be64 hpte_r;
1386
1387                         err = -EFAULT;
1388                         if (__get_user(hpte_v, lbuf) ||
1389                             __get_user(hpte_r, lbuf + 1))
1390                                 goto out;
1391                         v = be64_to_cpu(hpte_v);
1392                         r = be64_to_cpu(hpte_r);
1393                         err = -EINVAL;
1394                         if (!(v & HPTE_V_VALID))
1395                                 goto out;
1396                         lbuf += 2;
1397                         nb += HPTE_SIZE;
1398
1399                         if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1400                                 kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1401                         err = -EIO;
1402                         ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
1403                                                          tmp);
1404                         if (ret != H_SUCCESS) {
1405                                 pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
1406                                        "r=%lx\n", ret, i, v, r);
1407                                 goto out;
1408                         }
1409                         if (!hpte_setup && is_vrma_hpte(v)) {
1410                                 unsigned long psize = hpte_base_page_size(v, r);
1411                                 unsigned long senc = slb_pgsize_encoding(psize);
1412                                 unsigned long lpcr;
1413
1414                                 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1415                                         (VRMA_VSID << SLB_VSID_SHIFT_1T);
1416                                 lpcr = senc << (LPCR_VRMASD_SH - 4);
1417                                 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
1418                                 hpte_setup = 1;
1419                         }
1420                         ++i;
1421                         hptp += 2;
1422                 }
1423
1424                 for (j = 0; j < hdr.n_invalid; ++j) {
1425                         if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1426                                 kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1427                         ++i;
1428                         hptp += 2;
1429                 }
1430                 err = 0;
1431         }
1432
1433  out:
1434         /* Order HPTE updates vs. hpte_setup_done */
1435         smp_wmb();
1436         kvm->arch.hpte_setup_done = hpte_setup;
1437         mutex_unlock(&kvm->lock);
1438
1439         if (err)
1440                 return err;
1441         return nb;
1442 }
1443
1444 static int kvm_htab_release(struct inode *inode, struct file *filp)
1445 {
1446         struct kvm_htab_ctx *ctx = filp->private_data;
1447
1448         filp->private_data = NULL;
1449         if (!(ctx->flags & KVM_GET_HTAB_WRITE))
1450                 atomic_dec(&ctx->kvm->arch.hpte_mod_interest);
1451         kvm_put_kvm(ctx->kvm);
1452         kfree(ctx);
1453         return 0;
1454 }
1455
1456 static const struct file_operations kvm_htab_fops = {
1457         .read           = kvm_htab_read,
1458         .write          = kvm_htab_write,
1459         .llseek         = default_llseek,
1460         .release        = kvm_htab_release,
1461 };
1462
1463 int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
1464 {
1465         int ret;
1466         struct kvm_htab_ctx *ctx;
1467         int rwflag;
1468
1469         /* reject flags we don't recognize */
1470         if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE))
1471                 return -EINVAL;
1472         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1473         if (!ctx)
1474                 return -ENOMEM;
1475         kvm_get_kvm(kvm);
1476         ctx->kvm = kvm;
1477         ctx->index = ghf->start_index;
1478         ctx->flags = ghf->flags;
1479         ctx->first_pass = 1;
1480
1481         rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
1482         ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
1483         if (ret < 0) {
1484                 kvm_put_kvm(kvm);
1485                 return ret;
1486         }
1487
1488         if (rwflag == O_RDONLY) {
1489                 mutex_lock(&kvm->slots_lock);
1490                 atomic_inc(&kvm->arch.hpte_mod_interest);
1491                 /* make sure kvmppc_do_h_enter etc. see the increment */
1492                 synchronize_srcu_expedited(&kvm->srcu);
1493                 mutex_unlock(&kvm->slots_lock);
1494         }
1495
1496         return ret;
1497 }
1498
1499 struct debugfs_htab_state {
1500         struct kvm      *kvm;
1501         struct mutex    mutex;
1502         unsigned long   hpt_index;
1503         int             chars_left;
1504         int             buf_index;
1505         char            buf[64];
1506 };
1507
1508 static int debugfs_htab_open(struct inode *inode, struct file *file)
1509 {
1510         struct kvm *kvm = inode->i_private;
1511         struct debugfs_htab_state *p;
1512
1513         p = kzalloc(sizeof(*p), GFP_KERNEL);
1514         if (!p)
1515                 return -ENOMEM;
1516
1517         kvm_get_kvm(kvm);
1518         p->kvm = kvm;
1519         mutex_init(&p->mutex);
1520         file->private_data = p;
1521
1522         return nonseekable_open(inode, file);
1523 }
1524
1525 static int debugfs_htab_release(struct inode *inode, struct file *file)
1526 {
1527         struct debugfs_htab_state *p = file->private_data;
1528
1529         kvm_put_kvm(p->kvm);
1530         kfree(p);
1531         return 0;
1532 }
1533
1534 static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
1535                                  size_t len, loff_t *ppos)
1536 {
1537         struct debugfs_htab_state *p = file->private_data;
1538         ssize_t ret, r;
1539         unsigned long i, n;
1540         unsigned long v, hr, gr;
1541         struct kvm *kvm;
1542         __be64 *hptp;
1543
1544         ret = mutex_lock_interruptible(&p->mutex);
1545         if (ret)
1546                 return ret;
1547
1548         if (p->chars_left) {
1549                 n = p->chars_left;
1550                 if (n > len)
1551                         n = len;
1552                 r = copy_to_user(buf, p->buf + p->buf_index, n);
1553                 n -= r;
1554                 p->chars_left -= n;
1555                 p->buf_index += n;
1556                 buf += n;
1557                 len -= n;
1558                 ret = n;
1559                 if (r) {
1560                         if (!n)
1561                                 ret = -EFAULT;
1562                         goto out;
1563                 }
1564         }
1565
1566         kvm = p->kvm;
1567         i = p->hpt_index;
1568         hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1569         for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) {
1570                 if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
1571                         continue;
1572
1573                 /* lock the HPTE so it's stable and read it */
1574                 preempt_disable();
1575                 while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
1576                         cpu_relax();
1577                 v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
1578                 hr = be64_to_cpu(hptp[1]);
1579                 gr = kvm->arch.revmap[i].guest_rpte;
1580                 unlock_hpte(hptp, v);
1581                 preempt_enable();
1582
1583                 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
1584                         continue;
1585
1586                 n = scnprintf(p->buf, sizeof(p->buf),
1587                               "%6lx %.16lx %.16lx %.16lx\n",
1588                               i, v, hr, gr);
1589                 p->chars_left = n;
1590                 if (n > len)
1591                         n = len;
1592                 r = copy_to_user(buf, p->buf, n);
1593                 n -= r;
1594                 p->chars_left -= n;
1595                 p->buf_index = n;
1596                 buf += n;
1597                 len -= n;
1598                 ret += n;
1599                 if (r) {
1600                         if (!ret)
1601                                 ret = -EFAULT;
1602                         goto out;
1603                 }
1604         }
1605         p->hpt_index = i;
1606
1607  out:
1608         mutex_unlock(&p->mutex);
1609         return ret;
1610 }
1611
1612 ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
1613                            size_t len, loff_t *ppos)
1614 {
1615         return -EACCES;
1616 }
1617
1618 static const struct file_operations debugfs_htab_fops = {
1619         .owner   = THIS_MODULE,
1620         .open    = debugfs_htab_open,
1621         .release = debugfs_htab_release,
1622         .read    = debugfs_htab_read,
1623         .write   = debugfs_htab_write,
1624         .llseek  = generic_file_llseek,
1625 };
1626
1627 void kvmppc_mmu_debugfs_init(struct kvm *kvm)
1628 {
1629         kvm->arch.htab_dentry = debugfs_create_file("htab", 0400,
1630                                                     kvm->arch.debugfs_dir, kvm,
1631                                                     &debugfs_htab_fops);
1632 }
1633
1634 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
1635 {
1636         struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
1637
1638         vcpu->arch.slb_nr = 32;         /* POWER7/POWER8 */
1639
1640         mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
1641         mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
1642
1643         vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
1644 }