2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <linux/hugetlb.h>
23 #include <trace/events/kvm.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_mmio.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_emulate.h>
34 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
36 static pgd_t *boot_hyp_pgd;
37 static pgd_t *hyp_pgd;
38 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
40 static void *init_bounce_page;
41 static unsigned long hyp_idmap_start;
42 static unsigned long hyp_idmap_end;
43 static phys_addr_t hyp_idmap_vector;
45 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
47 #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
48 #define kvm_pud_huge(_x) pud_huge(_x)
50 #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
51 #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
53 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
55 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
59 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
60 * @kvm: pointer to kvm structure.
62 * Interface to HYP function to flush all VM TLB entries
64 void kvm_flush_remote_tlbs(struct kvm *kvm)
66 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
69 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
72 * This function also gets called when dealing with HYP page
73 * tables. As HYP doesn't have an associated struct kvm (and
74 * the HYP page tables are fairly static), we don't do
78 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
82 * stage2_dissolve_pmd() - clear and flush huge PMD entry
83 * @kvm: pointer to kvm structure.
85 * @pmd: pmd pointer for IPA
87 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
88 * pages in the range dirty.
90 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
92 if (!kvm_pmd_huge(*pmd))
96 kvm_tlb_flush_vmid_ipa(kvm, addr);
97 put_page(virt_to_page(pmd));
100 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
105 BUG_ON(max > KVM_NR_MEM_OBJS);
106 if (cache->nobjs >= min)
108 while (cache->nobjs < max) {
109 page = (void *)__get_free_page(PGALLOC_GFP);
112 cache->objects[cache->nobjs++] = page;
117 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
120 free_page((unsigned long)mc->objects[--mc->nobjs]);
123 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
127 BUG_ON(!mc || !mc->nobjs);
128 p = mc->objects[--mc->nobjs];
132 static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
134 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
136 kvm_tlb_flush_vmid_ipa(kvm, addr);
137 pud_free(NULL, pud_table);
138 put_page(virt_to_page(pgd));
141 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
143 pmd_t *pmd_table = pmd_offset(pud, 0);
144 VM_BUG_ON(pud_huge(*pud));
146 kvm_tlb_flush_vmid_ipa(kvm, addr);
147 pmd_free(NULL, pmd_table);
148 put_page(virt_to_page(pud));
151 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
153 pte_t *pte_table = pte_offset_kernel(pmd, 0);
154 VM_BUG_ON(kvm_pmd_huge(*pmd));
156 kvm_tlb_flush_vmid_ipa(kvm, addr);
157 pte_free_kernel(NULL, pte_table);
158 put_page(virt_to_page(pmd));
161 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
162 phys_addr_t addr, phys_addr_t end)
164 phys_addr_t start_addr = addr;
165 pte_t *pte, *start_pte;
167 start_pte = pte = pte_offset_kernel(pmd, addr);
169 if (!pte_none(*pte)) {
170 kvm_set_pte(pte, __pte(0));
171 put_page(virt_to_page(pte));
172 kvm_tlb_flush_vmid_ipa(kvm, addr);
174 } while (pte++, addr += PAGE_SIZE, addr != end);
176 if (kvm_pte_table_empty(kvm, start_pte))
177 clear_pmd_entry(kvm, pmd, start_addr);
180 static void unmap_pmds(struct kvm *kvm, pud_t *pud,
181 phys_addr_t addr, phys_addr_t end)
183 phys_addr_t next, start_addr = addr;
184 pmd_t *pmd, *start_pmd;
186 start_pmd = pmd = pmd_offset(pud, addr);
188 next = kvm_pmd_addr_end(addr, end);
189 if (!pmd_none(*pmd)) {
190 if (kvm_pmd_huge(*pmd)) {
192 kvm_tlb_flush_vmid_ipa(kvm, addr);
193 put_page(virt_to_page(pmd));
195 unmap_ptes(kvm, pmd, addr, next);
198 } while (pmd++, addr = next, addr != end);
200 if (kvm_pmd_table_empty(kvm, start_pmd))
201 clear_pud_entry(kvm, pud, start_addr);
204 static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
205 phys_addr_t addr, phys_addr_t end)
207 phys_addr_t next, start_addr = addr;
208 pud_t *pud, *start_pud;
210 start_pud = pud = pud_offset(pgd, addr);
212 next = kvm_pud_addr_end(addr, end);
213 if (!pud_none(*pud)) {
214 if (pud_huge(*pud)) {
216 kvm_tlb_flush_vmid_ipa(kvm, addr);
217 put_page(virt_to_page(pud));
219 unmap_pmds(kvm, pud, addr, next);
222 } while (pud++, addr = next, addr != end);
224 if (kvm_pud_table_empty(kvm, start_pud))
225 clear_pgd_entry(kvm, pgd, start_addr);
229 static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
230 phys_addr_t start, u64 size)
233 phys_addr_t addr = start, end = start + size;
236 pgd = pgdp + pgd_index(addr);
238 next = kvm_pgd_addr_end(addr, end);
240 unmap_puds(kvm, pgd, addr, next);
241 } while (pgd++, addr = next, addr != end);
244 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
245 phys_addr_t addr, phys_addr_t end)
249 pte = pte_offset_kernel(pmd, addr);
251 if (!pte_none(*pte)) {
252 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
253 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
255 } while (pte++, addr += PAGE_SIZE, addr != end);
258 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
259 phys_addr_t addr, phys_addr_t end)
264 pmd = pmd_offset(pud, addr);
266 next = kvm_pmd_addr_end(addr, end);
267 if (!pmd_none(*pmd)) {
268 if (kvm_pmd_huge(*pmd)) {
269 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
270 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
272 stage2_flush_ptes(kvm, pmd, addr, next);
275 } while (pmd++, addr = next, addr != end);
278 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
279 phys_addr_t addr, phys_addr_t end)
284 pud = pud_offset(pgd, addr);
286 next = kvm_pud_addr_end(addr, end);
287 if (!pud_none(*pud)) {
288 if (pud_huge(*pud)) {
289 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
290 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
292 stage2_flush_pmds(kvm, pud, addr, next);
295 } while (pud++, addr = next, addr != end);
298 static void stage2_flush_memslot(struct kvm *kvm,
299 struct kvm_memory_slot *memslot)
301 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
302 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
306 pgd = kvm->arch.pgd + pgd_index(addr);
308 next = kvm_pgd_addr_end(addr, end);
309 stage2_flush_puds(kvm, pgd, addr, next);
310 } while (pgd++, addr = next, addr != end);
314 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
315 * @kvm: The struct kvm pointer
317 * Go through the stage 2 page tables and invalidate any cache lines
318 * backing memory already mapped to the VM.
320 void stage2_flush_vm(struct kvm *kvm)
322 struct kvm_memslots *slots;
323 struct kvm_memory_slot *memslot;
326 idx = srcu_read_lock(&kvm->srcu);
327 spin_lock(&kvm->mmu_lock);
329 slots = kvm_memslots(kvm);
330 kvm_for_each_memslot(memslot, slots)
331 stage2_flush_memslot(kvm, memslot);
333 spin_unlock(&kvm->mmu_lock);
334 srcu_read_unlock(&kvm->srcu, idx);
338 * free_boot_hyp_pgd - free HYP boot page tables
340 * Free the HYP boot page tables. The bounce page is also freed.
342 void free_boot_hyp_pgd(void)
344 mutex_lock(&kvm_hyp_pgd_mutex);
347 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
348 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
349 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
354 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
356 free_page((unsigned long)init_bounce_page);
357 init_bounce_page = NULL;
359 mutex_unlock(&kvm_hyp_pgd_mutex);
363 * free_hyp_pgds - free Hyp-mode page tables
365 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
366 * therefore contains either mappings in the kernel memory area (above
367 * PAGE_OFFSET), or device mappings in the vmalloc range (from
368 * VMALLOC_START to VMALLOC_END).
370 * boot_hyp_pgd should only map two pages for the init code.
372 void free_hyp_pgds(void)
378 mutex_lock(&kvm_hyp_pgd_mutex);
381 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
382 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
383 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
384 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
386 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
390 mutex_unlock(&kvm_hyp_pgd_mutex);
393 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
394 unsigned long end, unsigned long pfn,
402 pte = pte_offset_kernel(pmd, addr);
403 kvm_set_pte(pte, pfn_pte(pfn, prot));
404 get_page(virt_to_page(pte));
405 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
407 } while (addr += PAGE_SIZE, addr != end);
410 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
411 unsigned long end, unsigned long pfn,
416 unsigned long addr, next;
420 pmd = pmd_offset(pud, addr);
422 BUG_ON(pmd_sect(*pmd));
424 if (pmd_none(*pmd)) {
425 pte = pte_alloc_one_kernel(NULL, addr);
427 kvm_err("Cannot allocate Hyp pte\n");
430 pmd_populate_kernel(NULL, pmd, pte);
431 get_page(virt_to_page(pmd));
432 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
435 next = pmd_addr_end(addr, end);
437 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
438 pfn += (next - addr) >> PAGE_SHIFT;
439 } while (addr = next, addr != end);
444 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
445 unsigned long end, unsigned long pfn,
450 unsigned long addr, next;
455 pud = pud_offset(pgd, addr);
457 if (pud_none_or_clear_bad(pud)) {
458 pmd = pmd_alloc_one(NULL, addr);
460 kvm_err("Cannot allocate Hyp pmd\n");
463 pud_populate(NULL, pud, pmd);
464 get_page(virt_to_page(pud));
465 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
468 next = pud_addr_end(addr, end);
469 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
472 pfn += (next - addr) >> PAGE_SHIFT;
473 } while (addr = next, addr != end);
478 static int __create_hyp_mappings(pgd_t *pgdp,
479 unsigned long start, unsigned long end,
480 unsigned long pfn, pgprot_t prot)
484 unsigned long addr, next;
487 mutex_lock(&kvm_hyp_pgd_mutex);
488 addr = start & PAGE_MASK;
489 end = PAGE_ALIGN(end);
491 pgd = pgdp + pgd_index(addr);
493 if (pgd_none(*pgd)) {
494 pud = pud_alloc_one(NULL, addr);
496 kvm_err("Cannot allocate Hyp pud\n");
500 pgd_populate(NULL, pgd, pud);
501 get_page(virt_to_page(pgd));
502 kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
505 next = pgd_addr_end(addr, end);
506 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
509 pfn += (next - addr) >> PAGE_SHIFT;
510 } while (addr = next, addr != end);
512 mutex_unlock(&kvm_hyp_pgd_mutex);
516 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
518 if (!is_vmalloc_addr(kaddr)) {
519 BUG_ON(!virt_addr_valid(kaddr));
522 return page_to_phys(vmalloc_to_page(kaddr)) +
523 offset_in_page(kaddr);
528 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
529 * @from: The virtual kernel start address of the range
530 * @to: The virtual kernel end address of the range (exclusive)
532 * The same virtual address as the kernel virtual address is also used
533 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
536 int create_hyp_mappings(void *from, void *to)
538 phys_addr_t phys_addr;
539 unsigned long virt_addr;
540 unsigned long start = KERN_TO_HYP((unsigned long)from);
541 unsigned long end = KERN_TO_HYP((unsigned long)to);
543 start = start & PAGE_MASK;
544 end = PAGE_ALIGN(end);
546 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
549 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
550 err = __create_hyp_mappings(hyp_pgd, virt_addr,
551 virt_addr + PAGE_SIZE,
552 __phys_to_pfn(phys_addr),
562 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
563 * @from: The kernel start VA of the range
564 * @to: The kernel end VA of the range (exclusive)
565 * @phys_addr: The physical start address which gets mapped
567 * The resulting HYP VA is the same as the kernel VA, modulo
570 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
572 unsigned long start = KERN_TO_HYP((unsigned long)from);
573 unsigned long end = KERN_TO_HYP((unsigned long)to);
575 /* Check for a valid kernel IO mapping */
576 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
579 return __create_hyp_mappings(hyp_pgd, start, end,
580 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
584 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
585 * @kvm: The KVM struct pointer for the VM.
587 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
588 * support either full 40-bit input addresses or limited to 32-bit input
589 * addresses). Clears the allocated pages.
591 * Note we don't need locking here as this is only called when the VM is
592 * created, which can only be done once.
594 int kvm_alloc_stage2_pgd(struct kvm *kvm)
599 if (kvm->arch.pgd != NULL) {
600 kvm_err("kvm_arch already initialized?\n");
604 if (KVM_PREALLOC_LEVEL > 0) {
606 * Allocate fake pgd for the page table manipulation macros to
607 * work. This is not used by the hardware and we have no
608 * alignment requirement for this allocation.
610 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
611 GFP_KERNEL | __GFP_ZERO);
614 * Allocate actual first-level Stage-2 page table used by the
615 * hardware for Stage-2 page table walks.
617 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
623 ret = kvm_prealloc_hwpgd(kvm, pgd);
631 if (KVM_PREALLOC_LEVEL > 0)
634 free_pages((unsigned long)pgd, S2_PGD_ORDER);
639 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
640 * @kvm: The VM pointer
641 * @start: The intermediate physical base address of the range to unmap
642 * @size: The size of the area to unmap
644 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
645 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
646 * destroying the VM), otherwise another faulting VCPU may come in and mess
647 * with things behind our backs.
649 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
651 unmap_range(kvm, kvm->arch.pgd, start, size);
654 static void stage2_unmap_memslot(struct kvm *kvm,
655 struct kvm_memory_slot *memslot)
657 hva_t hva = memslot->userspace_addr;
658 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
659 phys_addr_t size = PAGE_SIZE * memslot->npages;
660 hva_t reg_end = hva + size;
663 * A memory region could potentially cover multiple VMAs, and any holes
664 * between them, so iterate over all of them to find out if we should
667 * +--------------------------------------------+
668 * +---------------+----------------+ +----------------+
669 * | : VMA 1 | VMA 2 | | VMA 3 : |
670 * +---------------+----------------+ +----------------+
672 * +--------------------------------------------+
675 struct vm_area_struct *vma = find_vma(current->mm, hva);
676 hva_t vm_start, vm_end;
678 if (!vma || vma->vm_start >= reg_end)
682 * Take the intersection of this VMA with the memory region
684 vm_start = max(hva, vma->vm_start);
685 vm_end = min(reg_end, vma->vm_end);
687 if (!(vma->vm_flags & VM_PFNMAP)) {
688 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
689 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
692 } while (hva < reg_end);
696 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
697 * @kvm: The struct kvm pointer
699 * Go through the memregions and unmap any reguler RAM
700 * backing memory already mapped to the VM.
702 void stage2_unmap_vm(struct kvm *kvm)
704 struct kvm_memslots *slots;
705 struct kvm_memory_slot *memslot;
708 idx = srcu_read_lock(&kvm->srcu);
709 spin_lock(&kvm->mmu_lock);
711 slots = kvm_memslots(kvm);
712 kvm_for_each_memslot(memslot, slots)
713 stage2_unmap_memslot(kvm, memslot);
715 spin_unlock(&kvm->mmu_lock);
716 srcu_read_unlock(&kvm->srcu, idx);
720 * kvm_free_stage2_pgd - free all stage-2 tables
721 * @kvm: The KVM struct pointer for the VM.
723 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
724 * underlying level-2 and level-3 tables before freeing the actual level-1 table
725 * and setting the struct pointer to NULL.
727 * Note we don't need locking here as this is only called when the VM is
728 * destroyed, which can only be done once.
730 void kvm_free_stage2_pgd(struct kvm *kvm)
732 if (kvm->arch.pgd == NULL)
735 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
737 if (KVM_PREALLOC_LEVEL > 0)
738 kfree(kvm->arch.pgd);
740 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
741 kvm->arch.pgd = NULL;
744 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
750 pgd = kvm->arch.pgd + pgd_index(addr);
751 if (WARN_ON(pgd_none(*pgd))) {
754 pud = mmu_memory_cache_alloc(cache);
755 pgd_populate(NULL, pgd, pud);
756 get_page(virt_to_page(pgd));
759 return pud_offset(pgd, addr);
762 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
768 pud = stage2_get_pud(kvm, cache, addr);
769 if (pud_none(*pud)) {
772 pmd = mmu_memory_cache_alloc(cache);
773 pud_populate(NULL, pud, pmd);
774 get_page(virt_to_page(pud));
777 return pmd_offset(pud, addr);
780 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
781 *cache, phys_addr_t addr, const pmd_t *new_pmd)
785 pmd = stage2_get_pmd(kvm, cache, addr);
789 * Mapping in huge pages should only happen through a fault. If a
790 * page is merged into a transparent huge page, the individual
791 * subpages of that huge page should be unmapped through MMU
792 * notifiers before we get here.
794 * Merging of CompoundPages is not supported; they should become
795 * splitting first, unmapped, merged, and mapped back in on-demand.
797 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
800 kvm_set_pmd(pmd, *new_pmd);
801 if (pmd_present(old_pmd))
802 kvm_tlb_flush_vmid_ipa(kvm, addr);
804 get_page(virt_to_page(pmd));
808 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
809 phys_addr_t addr, const pte_t *new_pte,
814 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
815 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
817 VM_BUG_ON(logging_active && !cache);
819 /* Create stage-2 page table mapping - Levels 0 and 1 */
820 pmd = stage2_get_pmd(kvm, cache, addr);
823 * Ignore calls from kvm_set_spte_hva for unallocated
830 * While dirty page logging - dissolve huge PMD, then continue on to
834 stage2_dissolve_pmd(kvm, addr, pmd);
836 /* Create stage-2 page mappings - Level 2 */
837 if (pmd_none(*pmd)) {
839 return 0; /* ignore calls from kvm_set_spte_hva */
840 pte = mmu_memory_cache_alloc(cache);
842 pmd_populate_kernel(NULL, pmd, pte);
843 get_page(virt_to_page(pmd));
846 pte = pte_offset_kernel(pmd, addr);
848 if (iomap && pte_present(*pte))
851 /* Create 2nd stage page table mapping - Level 3 */
853 kvm_set_pte(pte, *new_pte);
854 if (pte_present(old_pte))
855 kvm_tlb_flush_vmid_ipa(kvm, addr);
857 get_page(virt_to_page(pte));
863 * kvm_phys_addr_ioremap - map a device range to guest IPA
865 * @kvm: The KVM pointer
866 * @guest_ipa: The IPA at which to insert the mapping
867 * @pa: The physical address of the device
868 * @size: The size of the mapping
870 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
871 phys_addr_t pa, unsigned long size, bool writable)
873 phys_addr_t addr, end;
876 struct kvm_mmu_memory_cache cache = { 0, };
878 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
879 pfn = __phys_to_pfn(pa);
881 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
882 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
885 kvm_set_s2pte_writable(&pte);
887 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
891 spin_lock(&kvm->mmu_lock);
892 ret = stage2_set_pte(kvm, &cache, addr, &pte,
893 KVM_S2PTE_FLAG_IS_IOMAP);
894 spin_unlock(&kvm->mmu_lock);
902 mmu_free_memory_cache(&cache);
906 static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
909 gfn_t gfn = *ipap >> PAGE_SHIFT;
911 if (PageTransCompound(pfn_to_page(pfn))) {
914 * The address we faulted on is backed by a transparent huge
915 * page. However, because we map the compound huge page and
916 * not the individual tail page, we need to transfer the
917 * refcount to the head page. We have to be careful that the
918 * THP doesn't start to split while we are adjusting the
921 * We are sure this doesn't happen, because mmu_notifier_retry
922 * was successful and we are holding the mmu_lock, so if this
923 * THP is trying to split, it will be blocked in the mmu
924 * notifier before touching any of the pages, specifically
925 * before being able to call __split_huge_page_refcount().
927 * We can therefore safely transfer the refcount from PG_tail
928 * to PG_head and switch the pfn from a tail page to the head
931 mask = PTRS_PER_PMD - 1;
932 VM_BUG_ON((gfn & mask) != (pfn & mask));
935 kvm_release_pfn_clean(pfn);
947 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
949 if (kvm_vcpu_trap_is_iabt(vcpu))
952 return kvm_vcpu_dabt_iswrite(vcpu);
955 static bool kvm_is_device_pfn(unsigned long pfn)
957 return !pfn_valid(pfn);
961 * stage2_wp_ptes - write protect PMD range
962 * @pmd: pointer to pmd entry
963 * @addr: range start address
964 * @end: range end address
966 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
970 pte = pte_offset_kernel(pmd, addr);
972 if (!pte_none(*pte)) {
973 if (!kvm_s2pte_readonly(pte))
974 kvm_set_s2pte_readonly(pte);
976 } while (pte++, addr += PAGE_SIZE, addr != end);
980 * stage2_wp_pmds - write protect PUD range
981 * @pud: pointer to pud entry
982 * @addr: range start address
983 * @end: range end address
985 static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
990 pmd = pmd_offset(pud, addr);
993 next = kvm_pmd_addr_end(addr, end);
994 if (!pmd_none(*pmd)) {
995 if (kvm_pmd_huge(*pmd)) {
996 if (!kvm_s2pmd_readonly(pmd))
997 kvm_set_s2pmd_readonly(pmd);
999 stage2_wp_ptes(pmd, addr, next);
1002 } while (pmd++, addr = next, addr != end);
1006 * stage2_wp_puds - write protect PGD range
1007 * @pgd: pointer to pgd entry
1008 * @addr: range start address
1009 * @end: range end address
1011 * Process PUD entries, for a huge PUD we cause a panic.
1013 static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
1018 pud = pud_offset(pgd, addr);
1020 next = kvm_pud_addr_end(addr, end);
1021 if (!pud_none(*pud)) {
1022 /* TODO:PUD not supported, revisit later if supported */
1023 BUG_ON(kvm_pud_huge(*pud));
1024 stage2_wp_pmds(pud, addr, next);
1026 } while (pud++, addr = next, addr != end);
1030 * stage2_wp_range() - write protect stage2 memory region range
1031 * @kvm: The KVM pointer
1032 * @addr: Start address of range
1033 * @end: End address of range
1035 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1040 pgd = kvm->arch.pgd + pgd_index(addr);
1043 * Release kvm_mmu_lock periodically if the memory region is
1044 * large. Otherwise, we may see kernel panics with
1045 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1046 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1047 * will also starve other vCPUs.
1049 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
1050 cond_resched_lock(&kvm->mmu_lock);
1052 next = kvm_pgd_addr_end(addr, end);
1053 if (pgd_present(*pgd))
1054 stage2_wp_puds(pgd, addr, next);
1055 } while (pgd++, addr = next, addr != end);
1059 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1060 * @kvm: The KVM pointer
1061 * @slot: The memory slot to write protect
1063 * Called to start logging dirty pages after memory region
1064 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1065 * all present PMD and PTEs are write protected in the memory region.
1066 * Afterwards read of dirty page log can be called.
1068 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1069 * serializing operations for VM memory regions.
1071 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1073 struct kvm_memory_slot *memslot = id_to_memslot(kvm->memslots, slot);
1074 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1075 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1077 spin_lock(&kvm->mmu_lock);
1078 stage2_wp_range(kvm, start, end);
1079 spin_unlock(&kvm->mmu_lock);
1080 kvm_flush_remote_tlbs(kvm);
1084 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1085 * @kvm: The KVM pointer
1086 * @slot: The memory slot associated with mask
1087 * @gfn_offset: The gfn offset in memory slot
1088 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1089 * slot to be write protected
1091 * Walks bits set in mask write protects the associated pte's. Caller must
1092 * acquire kvm_mmu_lock.
1094 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1095 struct kvm_memory_slot *slot,
1096 gfn_t gfn_offset, unsigned long mask)
1098 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1099 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1100 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1102 stage2_wp_range(kvm, start, end);
1106 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1109 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1110 * enable dirty logging for them.
1112 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1113 struct kvm_memory_slot *slot,
1114 gfn_t gfn_offset, unsigned long mask)
1116 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1119 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1120 struct kvm_memory_slot *memslot, unsigned long hva,
1121 unsigned long fault_status)
1124 bool write_fault, writable, hugetlb = false, force_pte = false;
1125 unsigned long mmu_seq;
1126 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1127 struct kvm *kvm = vcpu->kvm;
1128 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1129 struct vm_area_struct *vma;
1131 pgprot_t mem_type = PAGE_S2;
1132 bool fault_ipa_uncached;
1133 bool logging_active = memslot_is_logging(memslot);
1134 unsigned long flags = 0;
1136 write_fault = kvm_is_write_fault(vcpu);
1137 if (fault_status == FSC_PERM && !write_fault) {
1138 kvm_err("Unexpected L2 read permission error\n");
1142 /* Let's check if we will get back a huge page backed by hugetlbfs */
1143 down_read(¤t->mm->mmap_sem);
1144 vma = find_vma_intersection(current->mm, hva, hva + 1);
1145 if (unlikely(!vma)) {
1146 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1147 up_read(¤t->mm->mmap_sem);
1151 if (is_vm_hugetlb_page(vma) && !logging_active) {
1153 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
1156 * Pages belonging to memslots that don't have the same
1157 * alignment for userspace and IPA cannot be mapped using
1158 * block descriptors even if the pages belong to a THP for
1159 * the process, because the stage-2 block descriptor will
1160 * cover more than a single THP and we loose atomicity for
1161 * unmapping, updates, and splits of the THP or other pages
1162 * in the stage-2 block range.
1164 if ((memslot->userspace_addr & ~PMD_MASK) !=
1165 ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
1168 up_read(¤t->mm->mmap_sem);
1170 /* We need minimum second+third level pages */
1171 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
1176 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1178 * Ensure the read of mmu_notifier_seq happens before we call
1179 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1180 * the page we just got a reference to gets unmapped before we have a
1181 * chance to grab the mmu_lock, which ensure that if the page gets
1182 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1183 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1184 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1188 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1189 if (is_error_pfn(pfn))
1192 if (kvm_is_device_pfn(pfn)) {
1193 mem_type = PAGE_S2_DEVICE;
1194 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1195 } else if (logging_active) {
1197 * Faults on pages in a memslot with logging enabled
1198 * should not be mapped with huge pages (it introduces churn
1199 * and performance degradation), so force a pte mapping.
1202 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1205 * Only actually map the page as writable if this was a write
1212 spin_lock(&kvm->mmu_lock);
1213 if (mmu_notifier_retry(kvm, mmu_seq))
1216 if (!hugetlb && !force_pte)
1217 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
1219 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
1222 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
1223 new_pmd = pmd_mkhuge(new_pmd);
1225 kvm_set_s2pmd_writable(&new_pmd);
1226 kvm_set_pfn_dirty(pfn);
1228 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
1229 fault_ipa_uncached);
1230 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1232 pte_t new_pte = pfn_pte(pfn, mem_type);
1235 kvm_set_s2pte_writable(&new_pte);
1236 kvm_set_pfn_dirty(pfn);
1237 mark_page_dirty(kvm, gfn);
1239 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
1240 fault_ipa_uncached);
1241 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1245 spin_unlock(&kvm->mmu_lock);
1246 kvm_release_pfn_clean(pfn);
1251 * kvm_handle_guest_abort - handles all 2nd stage aborts
1252 * @vcpu: the VCPU pointer
1253 * @run: the kvm_run structure
1255 * Any abort that gets to the host is almost guaranteed to be caused by a
1256 * missing second stage translation table entry, which can mean that either the
1257 * guest simply needs more memory and we must allocate an appropriate page or it
1258 * can mean that the guest tried to access I/O memory, which is emulated by user
1259 * space. The distinction is based on the IPA causing the fault and whether this
1260 * memory region has been registered as standard RAM by user space.
1262 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1264 unsigned long fault_status;
1265 phys_addr_t fault_ipa;
1266 struct kvm_memory_slot *memslot;
1268 bool is_iabt, write_fault, writable;
1272 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1273 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1275 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1276 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1278 /* Check the stage-2 fault is trans. fault or write fault */
1279 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1280 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
1281 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1282 kvm_vcpu_trap_get_class(vcpu),
1283 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1284 (unsigned long)kvm_vcpu_get_hsr(vcpu));
1288 idx = srcu_read_lock(&vcpu->kvm->srcu);
1290 gfn = fault_ipa >> PAGE_SHIFT;
1291 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1292 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1293 write_fault = kvm_is_write_fault(vcpu);
1294 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1296 /* Prefetch Abort on I/O address */
1297 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1303 * The IPA is reported as [MAX:12], so we need to
1304 * complement it with the bottom 12 bits from the
1305 * faulting VA. This is always 12 bits, irrespective
1308 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1309 ret = io_mem_abort(vcpu, run, fault_ipa);
1313 /* Userspace should not be able to register out-of-bounds IPAs */
1314 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1316 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1320 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1324 static void handle_hva_to_gpa(struct kvm *kvm,
1325 unsigned long start,
1327 void (*handler)(struct kvm *kvm,
1328 gpa_t gpa, void *data),
1331 struct kvm_memslots *slots;
1332 struct kvm_memory_slot *memslot;
1334 slots = kvm_memslots(kvm);
1336 /* we only care about the pages that the guest sees */
1337 kvm_for_each_memslot(memslot, slots) {
1338 unsigned long hva_start, hva_end;
1341 hva_start = max(start, memslot->userspace_addr);
1342 hva_end = min(end, memslot->userspace_addr +
1343 (memslot->npages << PAGE_SHIFT));
1344 if (hva_start >= hva_end)
1348 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1349 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1351 gfn = hva_to_gfn_memslot(hva_start, memslot);
1352 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1354 for (; gfn < gfn_end; ++gfn) {
1355 gpa_t gpa = gfn << PAGE_SHIFT;
1356 handler(kvm, gpa, data);
1361 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1363 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
1366 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1368 unsigned long end = hva + PAGE_SIZE;
1373 trace_kvm_unmap_hva(hva);
1374 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1378 int kvm_unmap_hva_range(struct kvm *kvm,
1379 unsigned long start, unsigned long end)
1384 trace_kvm_unmap_hva_range(start, end);
1385 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1389 static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1391 pte_t *pte = (pte_t *)data;
1394 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
1395 * flag clear because MMU notifiers will have unmapped a huge PMD before
1396 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1397 * therefore stage2_set_pte() never needs to clear out a huge PMD
1398 * through this calling path.
1400 stage2_set_pte(kvm, NULL, gpa, pte, 0);
1404 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1406 unsigned long end = hva + PAGE_SIZE;
1412 trace_kvm_set_spte_hva(hva);
1413 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1414 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1417 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1419 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1422 phys_addr_t kvm_mmu_get_httbr(void)
1424 return virt_to_phys(hyp_pgd);
1427 phys_addr_t kvm_mmu_get_boot_httbr(void)
1429 return virt_to_phys(boot_hyp_pgd);
1432 phys_addr_t kvm_get_idmap_vector(void)
1434 return hyp_idmap_vector;
1437 int kvm_mmu_init(void)
1441 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1442 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1443 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1445 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
1447 * Our init code is crossing a page boundary. Allocate
1448 * a bounce page, copy the code over and use that.
1450 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1451 phys_addr_t phys_base;
1453 init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
1454 if (!init_bounce_page) {
1455 kvm_err("Couldn't allocate HYP init bounce page\n");
1460 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
1462 * Warning: the code we just copied to the bounce page
1463 * must be flushed to the point of coherency.
1464 * Otherwise, the data may be sitting in L2, and HYP
1465 * mode won't be able to observe it as it runs with
1466 * caches off at that point.
1468 kvm_flush_dcache_to_poc(init_bounce_page, len);
1470 phys_base = kvm_virt_to_phys(init_bounce_page);
1471 hyp_idmap_vector += phys_base - hyp_idmap_start;
1472 hyp_idmap_start = phys_base;
1473 hyp_idmap_end = phys_base + len;
1475 kvm_info("Using HYP init bounce page @%lx\n",
1476 (unsigned long)phys_base);
1479 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1480 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1482 if (!hyp_pgd || !boot_hyp_pgd) {
1483 kvm_err("Hyp mode PGD not allocated\n");
1488 /* Create the idmap in the boot page tables */
1489 err = __create_hyp_mappings(boot_hyp_pgd,
1490 hyp_idmap_start, hyp_idmap_end,
1491 __phys_to_pfn(hyp_idmap_start),
1495 kvm_err("Failed to idmap %lx-%lx\n",
1496 hyp_idmap_start, hyp_idmap_end);
1500 /* Map the very same page at the trampoline VA */
1501 err = __create_hyp_mappings(boot_hyp_pgd,
1502 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1503 __phys_to_pfn(hyp_idmap_start),
1506 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1511 /* Map the same page again into the runtime page tables */
1512 err = __create_hyp_mappings(hyp_pgd,
1513 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1514 __phys_to_pfn(hyp_idmap_start),
1517 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
1528 void kvm_arch_commit_memory_region(struct kvm *kvm,
1529 struct kvm_userspace_memory_region *mem,
1530 const struct kvm_memory_slot *old,
1531 enum kvm_mr_change change)
1534 * At this point memslot has been committed and there is an
1535 * allocated dirty_bitmap[], dirty pages will be be tracked while the
1536 * memory slot is write protected.
1538 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
1539 kvm_mmu_wp_memory_region(kvm, mem->slot);
1542 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1543 struct kvm_memory_slot *memslot,
1544 struct kvm_userspace_memory_region *mem,
1545 enum kvm_mr_change change)
1547 hva_t hva = mem->userspace_addr;
1548 hva_t reg_end = hva + mem->memory_size;
1549 bool writable = !(mem->flags & KVM_MEM_READONLY);
1552 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1553 change != KVM_MR_FLAGS_ONLY)
1557 * Prevent userspace from creating a memory region outside of the IPA
1558 * space addressable by the KVM guest IPA space.
1560 if (memslot->base_gfn + memslot->npages >=
1561 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1565 * A memory region could potentially cover multiple VMAs, and any holes
1566 * between them, so iterate over all of them to find out if we can map
1567 * any of them right now.
1569 * +--------------------------------------------+
1570 * +---------------+----------------+ +----------------+
1571 * | : VMA 1 | VMA 2 | | VMA 3 : |
1572 * +---------------+----------------+ +----------------+
1574 * +--------------------------------------------+
1577 struct vm_area_struct *vma = find_vma(current->mm, hva);
1578 hva_t vm_start, vm_end;
1580 if (!vma || vma->vm_start >= reg_end)
1584 * Mapping a read-only VMA is only allowed if the
1585 * memory region is configured as read-only.
1587 if (writable && !(vma->vm_flags & VM_WRITE)) {
1593 * Take the intersection of this VMA with the memory region
1595 vm_start = max(hva, vma->vm_start);
1596 vm_end = min(reg_end, vma->vm_end);
1598 if (vma->vm_flags & VM_PFNMAP) {
1599 gpa_t gpa = mem->guest_phys_addr +
1600 (vm_start - mem->userspace_addr);
1601 phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
1602 vm_start - vma->vm_start;
1604 /* IO region dirty page logging not allowed */
1605 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
1608 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1615 } while (hva < reg_end);
1617 if (change == KVM_MR_FLAGS_ONLY)
1620 spin_lock(&kvm->mmu_lock);
1622 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1624 stage2_flush_memslot(kvm, memslot);
1625 spin_unlock(&kvm->mmu_lock);
1629 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1630 struct kvm_memory_slot *dont)
1634 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1635 unsigned long npages)
1638 * Readonly memslots are not incoherent with the caches by definition,
1639 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1640 * that the guest may consider devices and hence map as uncached.
1641 * To prevent incoherency issues in these cases, tag all readonly
1642 * regions as incoherent.
1644 if (slot->flags & KVM_MEM_READONLY)
1645 slot->flags |= KVM_MEMSLOT_INCOHERENT;
1649 void kvm_arch_memslots_updated(struct kvm *kvm)
1653 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1657 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1658 struct kvm_memory_slot *slot)
1660 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1661 phys_addr_t size = slot->npages << PAGE_SHIFT;
1663 spin_lock(&kvm->mmu_lock);
1664 unmap_stage2_range(kvm, gpa, size);
1665 spin_unlock(&kvm->mmu_lock);