2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
19 #include <linux/slab.h>
20 #include <linux/swapops.h>
21 #include <linux/sysctl.h>
22 #include <linux/ksm.h>
23 #include <linux/mman.h>
25 #include <asm/pgtable.h>
26 #include <asm/pgalloc.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
32 #define FRAG_MASK 0x03
36 unsigned long *crst_table_alloc(struct mm_struct *mm)
38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
42 return (unsigned long *) page_to_phys(page);
45 void crst_table_free(struct mm_struct *mm, unsigned long *table)
47 free_pages((unsigned long) table, ALLOC_ORDER);
50 static void __crst_table_upgrade(void *arg)
52 struct mm_struct *mm = arg;
54 if (current->active_mm == mm) {
61 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
63 unsigned long *table, *pgd;
67 BUG_ON(limit > (1UL << 53));
70 table = crst_table_alloc(mm);
73 spin_lock_bh(&mm->page_table_lock);
74 if (mm->context.asce_limit < limit) {
75 pgd = (unsigned long *) mm->pgd;
76 if (mm->context.asce_limit <= (1UL << 31)) {
77 entry = _REGION3_ENTRY_EMPTY;
78 mm->context.asce_limit = 1UL << 42;
79 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
83 entry = _REGION2_ENTRY_EMPTY;
84 mm->context.asce_limit = 1UL << 53;
85 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
89 crst_table_init(table, entry);
90 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
91 mm->pgd = (pgd_t *) table;
92 mm->task_size = mm->context.asce_limit;
96 spin_unlock_bh(&mm->page_table_lock);
98 crst_table_free(mm, table);
99 if (mm->context.asce_limit < limit)
102 on_each_cpu(__crst_table_upgrade, mm, 0);
106 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
110 if (current->active_mm == mm) {
114 while (mm->context.asce_limit > limit) {
116 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
117 case _REGION_ENTRY_TYPE_R2:
118 mm->context.asce_limit = 1UL << 42;
119 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
123 case _REGION_ENTRY_TYPE_R3:
124 mm->context.asce_limit = 1UL << 31;
125 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
132 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
133 mm->task_size = mm->context.asce_limit;
134 crst_table_free(mm, (unsigned long *) pgd);
136 if (current->active_mm == mm)
143 * gmap_alloc - allocate a guest address space
144 * @mm: pointer to the parent mm_struct
145 * @limit: maximum size of the gmap address space
147 * Returns a guest address space structure.
149 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
153 unsigned long *table;
154 unsigned long etype, atype;
156 if (limit < (1UL << 31)) {
157 limit = (1UL << 31) - 1;
158 atype = _ASCE_TYPE_SEGMENT;
159 etype = _SEGMENT_ENTRY_EMPTY;
160 } else if (limit < (1UL << 42)) {
161 limit = (1UL << 42) - 1;
162 atype = _ASCE_TYPE_REGION3;
163 etype = _REGION3_ENTRY_EMPTY;
164 } else if (limit < (1UL << 53)) {
165 limit = (1UL << 53) - 1;
166 atype = _ASCE_TYPE_REGION2;
167 etype = _REGION2_ENTRY_EMPTY;
170 atype = _ASCE_TYPE_REGION1;
171 etype = _REGION1_ENTRY_EMPTY;
173 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
176 INIT_LIST_HEAD(&gmap->crst_list);
177 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
178 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
179 spin_lock_init(&gmap->guest_table_lock);
181 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
185 list_add(&page->lru, &gmap->crst_list);
186 table = (unsigned long *) page_to_phys(page);
187 crst_table_init(table, etype);
189 gmap->asce = atype | _ASCE_TABLE_LENGTH |
190 _ASCE_USER_BITS | __pa(table);
191 gmap->asce_end = limit;
192 down_write(&mm->mmap_sem);
193 list_add(&gmap->list, &mm->context.gmap_list);
194 up_write(&mm->mmap_sem);
202 EXPORT_SYMBOL_GPL(gmap_alloc);
204 static void gmap_flush_tlb(struct gmap *gmap)
206 if (MACHINE_HAS_IDTE)
207 __tlb_flush_asce(gmap->mm, gmap->asce);
209 __tlb_flush_global();
212 static void gmap_radix_tree_free(struct radix_tree_root *root)
214 struct radix_tree_iter iter;
215 unsigned long indices[16];
220 /* A radix tree is freed by deleting all of its entries */
224 radix_tree_for_each_slot(slot, root, &iter, index) {
225 indices[nr] = iter.index;
229 for (i = 0; i < nr; i++) {
231 radix_tree_delete(root, index);
237 * gmap_free - free a guest address space
238 * @gmap: pointer to the guest address space structure
240 void gmap_free(struct gmap *gmap)
242 struct page *page, *next;
245 if (MACHINE_HAS_IDTE)
246 __tlb_flush_asce(gmap->mm, gmap->asce);
248 __tlb_flush_global();
250 /* Free all segment & region tables. */
251 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
252 __free_pages(page, ALLOC_ORDER);
253 gmap_radix_tree_free(&gmap->guest_to_host);
254 gmap_radix_tree_free(&gmap->host_to_guest);
255 down_write(&gmap->mm->mmap_sem);
256 list_del(&gmap->list);
257 up_write(&gmap->mm->mmap_sem);
260 EXPORT_SYMBOL_GPL(gmap_free);
263 * gmap_enable - switch primary space to the guest address space
264 * @gmap: pointer to the guest address space structure
266 void gmap_enable(struct gmap *gmap)
268 S390_lowcore.gmap = (unsigned long) gmap;
270 EXPORT_SYMBOL_GPL(gmap_enable);
273 * gmap_disable - switch back to the standard primary address space
274 * @gmap: pointer to the guest address space structure
276 void gmap_disable(struct gmap *gmap)
278 S390_lowcore.gmap = 0UL;
280 EXPORT_SYMBOL_GPL(gmap_disable);
283 * gmap_alloc_table is assumed to be called with mmap_sem held
285 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
286 unsigned long init, unsigned long gaddr)
291 /* since we dont free the gmap table until gmap_free we can unlock */
292 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
295 new = (unsigned long *) page_to_phys(page);
296 crst_table_init(new, init);
297 spin_lock(&gmap->mm->page_table_lock);
298 if (*table & _REGION_ENTRY_INVALID) {
299 list_add(&page->lru, &gmap->crst_list);
300 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
301 (*table & _REGION_ENTRY_TYPE_MASK);
305 spin_unlock(&gmap->mm->page_table_lock);
307 __free_pages(page, ALLOC_ORDER);
312 * __gmap_segment_gaddr - find virtual address from segment pointer
313 * @entry: pointer to a segment table entry in the guest address space
315 * Returns the virtual address in the guest address space for the segment
317 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
320 unsigned long offset, mask;
322 offset = (unsigned long) entry / sizeof(unsigned long);
323 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
324 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
325 page = virt_to_page((void *)((unsigned long) entry & mask));
326 return page->index + offset;
330 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
331 * @gmap: pointer to the guest address space structure
332 * @vmaddr: address in the host process address space
334 * Returns 1 if a TLB flush is required
336 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
338 unsigned long *entry;
341 spin_lock(&gmap->guest_table_lock);
342 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
344 flush = (*entry != _SEGMENT_ENTRY_INVALID);
345 *entry = _SEGMENT_ENTRY_INVALID;
347 spin_unlock(&gmap->guest_table_lock);
352 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
353 * @gmap: pointer to the guest address space structure
354 * @gaddr: address in the guest address space
356 * Returns 1 if a TLB flush is required
358 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
360 unsigned long vmaddr;
362 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
364 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
368 * gmap_unmap_segment - unmap segment from the guest address space
369 * @gmap: pointer to the guest address space structure
370 * @to: address in the guest address space
371 * @len: length of the memory area to unmap
373 * Returns 0 if the unmap succeeded, -EINVAL if not.
375 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
380 if ((to | len) & (PMD_SIZE - 1))
382 if (len == 0 || to + len < to)
386 down_write(&gmap->mm->mmap_sem);
387 for (off = 0; off < len; off += PMD_SIZE)
388 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
389 up_write(&gmap->mm->mmap_sem);
391 gmap_flush_tlb(gmap);
394 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
397 * gmap_mmap_segment - map a segment to the guest address space
398 * @gmap: pointer to the guest address space structure
399 * @from: source address in the parent address space
400 * @to: target address in the guest address space
401 * @len: length of the memory area to map
403 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
405 int gmap_map_segment(struct gmap *gmap, unsigned long from,
406 unsigned long to, unsigned long len)
411 if ((from | to | len) & (PMD_SIZE - 1))
413 if (len == 0 || from + len < from || to + len < to ||
414 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
418 down_write(&gmap->mm->mmap_sem);
419 for (off = 0; off < len; off += PMD_SIZE) {
420 /* Remove old translation */
421 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
422 /* Store new translation */
423 if (radix_tree_insert(&gmap->guest_to_host,
424 (to + off) >> PMD_SHIFT,
425 (void *) from + off))
428 up_write(&gmap->mm->mmap_sem);
430 gmap_flush_tlb(gmap);
433 gmap_unmap_segment(gmap, to, len);
436 EXPORT_SYMBOL_GPL(gmap_map_segment);
439 * __gmap_translate - translate a guest address to a user space address
440 * @gmap: pointer to guest mapping meta data structure
441 * @gaddr: guest address
443 * Returns user space address which corresponds to the guest address or
444 * -EFAULT if no such mapping exists.
445 * This function does not establish potentially missing page table entries.
446 * The mmap_sem of the mm that belongs to the address space must be held
447 * when this function gets called.
449 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
451 unsigned long vmaddr;
453 vmaddr = (unsigned long)
454 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
455 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
457 EXPORT_SYMBOL_GPL(__gmap_translate);
460 * gmap_translate - translate a guest address to a user space address
461 * @gmap: pointer to guest mapping meta data structure
462 * @gaddr: guest address
464 * Returns user space address which corresponds to the guest address or
465 * -EFAULT if no such mapping exists.
466 * This function does not establish potentially missing page table entries.
468 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
472 down_read(&gmap->mm->mmap_sem);
473 rc = __gmap_translate(gmap, gaddr);
474 up_read(&gmap->mm->mmap_sem);
477 EXPORT_SYMBOL_GPL(gmap_translate);
480 * gmap_unlink - disconnect a page table from the gmap shadow tables
481 * @gmap: pointer to guest mapping meta data structure
482 * @table: pointer to the host page table
483 * @vmaddr: vm address associated with the host page table
485 static void gmap_unlink(struct mm_struct *mm, unsigned long *table,
486 unsigned long vmaddr)
491 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
492 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
494 gmap_flush_tlb(gmap);
499 * gmap_link - set up shadow page tables to connect a host to a guest address
500 * @gmap: pointer to guest mapping meta data structure
501 * @gaddr: guest address
502 * @vmaddr: vm address
504 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
505 * if the vm address is already mapped to a different guest segment.
506 * The mmap_sem of the mm that belongs to the address space must be held
507 * when this function gets called.
509 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
511 struct mm_struct *mm;
512 unsigned long *table;
519 /* Create higher level tables in the gmap page table */
521 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
522 table += (gaddr >> 53) & 0x7ff;
523 if ((*table & _REGION_ENTRY_INVALID) &&
524 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
525 gaddr & 0xffe0000000000000UL))
527 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
529 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
530 table += (gaddr >> 42) & 0x7ff;
531 if ((*table & _REGION_ENTRY_INVALID) &&
532 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
533 gaddr & 0xfffffc0000000000UL))
535 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
537 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
538 table += (gaddr >> 31) & 0x7ff;
539 if ((*table & _REGION_ENTRY_INVALID) &&
540 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
541 gaddr & 0xffffffff80000000UL))
543 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
545 table += (gaddr >> 20) & 0x7ff;
546 /* Walk the parent mm page table */
548 pgd = pgd_offset(mm, vmaddr);
549 VM_BUG_ON(pgd_none(*pgd));
550 pud = pud_offset(pgd, vmaddr);
551 VM_BUG_ON(pud_none(*pud));
552 pmd = pmd_offset(pud, vmaddr);
553 VM_BUG_ON(pmd_none(*pmd));
554 /* large pmds cannot yet be handled */
557 /* Link gmap segment table entry location to page table. */
558 rc = radix_tree_preload(GFP_KERNEL);
561 ptl = pmd_lock(mm, pmd);
562 spin_lock(&gmap->guest_table_lock);
563 if (*table == _SEGMENT_ENTRY_INVALID) {
564 rc = radix_tree_insert(&gmap->host_to_guest,
565 vmaddr >> PMD_SHIFT, table);
567 *table = pmd_val(*pmd);
570 spin_unlock(&gmap->guest_table_lock);
572 radix_tree_preload_end();
577 * gmap_fault - resolve a fault on a guest address
578 * @gmap: pointer to guest mapping meta data structure
579 * @gaddr: guest address
580 * @fault_flags: flags to pass down to handle_mm_fault()
582 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
583 * if the vm address is already mapped to a different guest segment.
585 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
586 unsigned int fault_flags)
588 unsigned long vmaddr;
591 down_read(&gmap->mm->mmap_sem);
592 vmaddr = __gmap_translate(gmap, gaddr);
593 if (IS_ERR_VALUE(vmaddr)) {
597 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) {
601 rc = __gmap_link(gmap, gaddr, vmaddr);
603 up_read(&gmap->mm->mmap_sem);
606 EXPORT_SYMBOL_GPL(gmap_fault);
608 static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
610 if (!non_swap_entry(entry))
611 dec_mm_counter(mm, MM_SWAPENTS);
612 else if (is_migration_entry(entry)) {
613 struct page *page = migration_entry_to_page(entry);
616 dec_mm_counter(mm, MM_ANONPAGES);
618 dec_mm_counter(mm, MM_FILEPAGES);
620 free_swap_and_cache(entry);
624 * this function is assumed to be called with mmap_sem held
626 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
628 unsigned long vmaddr, ptev, pgstev;
633 /* Find the vm address for the guest address */
634 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
638 vmaddr |= gaddr & ~PMD_MASK;
639 /* Get pointer to the page table entry */
640 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
646 /* Zap unused and logically-zero pages */
647 pgste = pgste_get_lock(ptep);
648 pgstev = pgste_val(pgste);
650 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
651 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
652 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm);
653 pte_clear(gmap->mm, vmaddr, ptep);
655 pgste_set_unlock(ptep, pgste);
657 pte_unmap_unlock(ptep, ptl);
659 EXPORT_SYMBOL_GPL(__gmap_zap);
661 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
663 unsigned long gaddr, vmaddr, size;
664 struct vm_area_struct *vma;
666 down_read(&gmap->mm->mmap_sem);
667 for (gaddr = from; gaddr < to;
668 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
669 /* Find the vm address for the guest address */
670 vmaddr = (unsigned long)
671 radix_tree_lookup(&gmap->guest_to_host,
675 vmaddr |= gaddr & ~PMD_MASK;
676 /* Find vma in the parent mm */
677 vma = find_vma(gmap->mm, vmaddr);
678 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
679 zap_page_range(vma, vmaddr, size, NULL);
681 up_read(&gmap->mm->mmap_sem);
683 EXPORT_SYMBOL_GPL(gmap_discard);
685 static LIST_HEAD(gmap_notifier_list);
686 static DEFINE_SPINLOCK(gmap_notifier_lock);
689 * gmap_register_ipte_notifier - register a pte invalidation callback
690 * @nb: pointer to the gmap notifier block
692 void gmap_register_ipte_notifier(struct gmap_notifier *nb)
694 spin_lock(&gmap_notifier_lock);
695 list_add(&nb->list, &gmap_notifier_list);
696 spin_unlock(&gmap_notifier_lock);
698 EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
701 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
702 * @nb: pointer to the gmap notifier block
704 void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
706 spin_lock(&gmap_notifier_lock);
707 list_del_init(&nb->list);
708 spin_unlock(&gmap_notifier_lock);
710 EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
713 * gmap_ipte_notify - mark a range of ptes for invalidation notification
714 * @gmap: pointer to guest mapping meta data structure
715 * @gaddr: virtual address in the guest address space
718 * Returns 0 if for each page in the given range a gmap mapping exists and
719 * the invalidation notification could be set. If the gmap mapping is missing
720 * for one or more pages -EFAULT is returned. If no memory could be allocated
721 * -ENOMEM is returned. This function establishes missing page table entries.
723 int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
731 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
733 down_read(&gmap->mm->mmap_sem);
735 /* Convert gmap address and connect the page tables */
736 addr = __gmap_translate(gmap, gaddr);
737 if (IS_ERR_VALUE(addr)) {
741 /* Get the page mapped */
742 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
746 rc = __gmap_link(gmap, gaddr, addr);
749 /* Walk the process page table, lock and get pte pointer */
750 ptep = get_locked_pte(gmap->mm, addr, &ptl);
752 /* Set notification bit in the pgste of the pte */
754 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
755 pgste = pgste_get_lock(ptep);
756 pgste_val(pgste) |= PGSTE_IN_BIT;
757 pgste_set_unlock(ptep, pgste);
761 pte_unmap_unlock(ptep, ptl);
763 up_read(&gmap->mm->mmap_sem);
766 EXPORT_SYMBOL_GPL(gmap_ipte_notify);
769 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
770 * @mm: pointer to the process mm_struct
771 * @addr: virtual address in the process address space
772 * @pte: pointer to the page table entry
774 * This function is assumed to be called with the page table lock held
775 * for the pte to notify.
777 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
779 unsigned long offset, gaddr;
780 unsigned long *table;
781 struct gmap_notifier *nb;
784 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
785 offset = offset * (4096 / sizeof(pte_t));
786 spin_lock(&gmap_notifier_lock);
787 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
788 table = radix_tree_lookup(&gmap->host_to_guest,
789 vmaddr >> PMD_SHIFT);
792 gaddr = __gmap_segment_gaddr(table) + offset;
793 list_for_each_entry(nb, &gmap_notifier_list, list)
794 nb->notifier_call(gmap, gaddr);
796 spin_unlock(&gmap_notifier_lock);
798 EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
800 static inline int page_table_with_pgste(struct page *page)
802 return atomic_read(&page->_mapcount) == 0;
805 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
808 unsigned long *table;
810 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
813 if (!pgtable_page_ctor(page)) {
817 atomic_set(&page->_mapcount, 0);
818 table = (unsigned long *) page_to_phys(page);
819 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
820 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
824 static inline void page_table_free_pgste(unsigned long *table)
828 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
829 pgtable_page_dtor(page);
830 atomic_set(&page->_mapcount, -1);
834 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
835 unsigned long key, bool nq)
841 down_read(&mm->mmap_sem);
843 ptep = get_locked_pte(mm, addr, &ptl);
844 if (unlikely(!ptep)) {
845 up_read(&mm->mmap_sem);
848 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
849 (pte_val(*ptep) & _PAGE_PROTECT)) {
850 pte_unmap_unlock(ptep, ptl);
851 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
852 up_read(&mm->mmap_sem);
858 new = old = pgste_get_lock(ptep);
859 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
860 PGSTE_ACC_BITS | PGSTE_FP_BIT);
861 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
862 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
863 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
864 unsigned long address, bits, skey;
866 address = pte_val(*ptep) & PAGE_MASK;
867 skey = (unsigned long) page_get_storage_key(address);
868 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
869 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
870 /* Set storage key ACC and FP */
871 page_set_storage_key(address, skey, !nq);
872 /* Merge host changed & referenced into pgste */
873 pgste_val(new) |= bits << 52;
875 /* changing the guest storage key is considered a change of the page */
876 if ((pgste_val(new) ^ pgste_val(old)) &
877 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
878 pgste_val(new) |= PGSTE_UC_BIT;
880 pgste_set_unlock(ptep, new);
881 pte_unmap_unlock(ptep, ptl);
882 up_read(&mm->mmap_sem);
885 EXPORT_SYMBOL(set_guest_storage_key);
887 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
893 unsigned long key = 0;
895 down_read(&mm->mmap_sem);
896 ptep = get_locked_pte(mm, addr, &ptl);
897 if (unlikely(!ptep)) {
898 up_read(&mm->mmap_sem);
901 pgste = pgste_get_lock(ptep);
903 if (pte_val(*ptep) & _PAGE_INVALID) {
904 key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
905 key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
906 key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
907 key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
909 physaddr = pte_val(*ptep) & PAGE_MASK;
910 key = page_get_storage_key(physaddr);
912 /* Reflect guest's logical view, not physical */
913 if (pgste_val(pgste) & PGSTE_GR_BIT)
914 key |= _PAGE_REFERENCED;
915 if (pgste_val(pgste) & PGSTE_GC_BIT)
916 key |= _PAGE_CHANGED;
919 pgste_set_unlock(ptep, pgste);
920 pte_unmap_unlock(ptep, ptl);
921 up_read(&mm->mmap_sem);
924 EXPORT_SYMBOL(get_guest_storage_key);
926 static int page_table_allocate_pgste_min = 0;
927 static int page_table_allocate_pgste_max = 1;
928 int page_table_allocate_pgste = 0;
929 EXPORT_SYMBOL(page_table_allocate_pgste);
931 static struct ctl_table page_table_sysctl[] = {
933 .procname = "allocate_pgste",
934 .data = &page_table_allocate_pgste,
935 .maxlen = sizeof(int),
936 .mode = S_IRUGO | S_IWUSR,
937 .proc_handler = proc_dointvec,
938 .extra1 = &page_table_allocate_pgste_min,
939 .extra2 = &page_table_allocate_pgste_max,
944 static struct ctl_table page_table_sysctl_dir[] = {
949 .child = page_table_sysctl,
954 static int __init page_table_register_sysctl(void)
956 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
958 __initcall(page_table_register_sysctl);
960 #else /* CONFIG_PGSTE */
962 static inline int page_table_with_pgste(struct page *page)
967 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
972 static inline void page_table_free_pgste(unsigned long *table)
976 static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table,
977 unsigned long vmaddr)
981 #endif /* CONFIG_PGSTE */
983 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
985 unsigned int old, new;
988 old = atomic_read(v);
990 } while (atomic_cmpxchg(v, old, new) != old);
995 * page table entry allocation/free routines.
997 unsigned long *page_table_alloc(struct mm_struct *mm)
999 unsigned long *uninitialized_var(table);
1000 struct page *uninitialized_var(page);
1001 unsigned int mask, bit;
1003 if (mm_alloc_pgste(mm))
1004 return page_table_alloc_pgste(mm);
1005 /* Allocate fragments of a 4K page as 1K/2K page table */
1006 spin_lock_bh(&mm->context.list_lock);
1008 if (!list_empty(&mm->context.pgtable_list)) {
1009 page = list_first_entry(&mm->context.pgtable_list,
1011 table = (unsigned long *) page_to_phys(page);
1012 mask = atomic_read(&page->_mapcount);
1013 mask = mask | (mask >> 4);
1015 if ((mask & FRAG_MASK) == FRAG_MASK) {
1016 spin_unlock_bh(&mm->context.list_lock);
1017 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
1020 if (!pgtable_page_ctor(page)) {
1024 atomic_set(&page->_mapcount, 1);
1025 table = (unsigned long *) page_to_phys(page);
1026 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
1027 spin_lock_bh(&mm->context.list_lock);
1028 list_add(&page->lru, &mm->context.pgtable_list);
1030 for (bit = 1; mask & bit; bit <<= 1)
1031 table += PTRS_PER_PTE;
1032 mask = atomic_xor_bits(&page->_mapcount, bit);
1033 if ((mask & FRAG_MASK) == FRAG_MASK)
1034 list_del(&page->lru);
1036 spin_unlock_bh(&mm->context.list_lock);
1040 void page_table_free(struct mm_struct *mm, unsigned long *table)
1043 unsigned int bit, mask;
1045 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1046 if (page_table_with_pgste(page))
1047 return page_table_free_pgste(table);
1048 /* Free 1K/2K page table fragment of a 4K page */
1049 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
1050 spin_lock_bh(&mm->context.list_lock);
1051 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1052 list_del(&page->lru);
1053 mask = atomic_xor_bits(&page->_mapcount, bit);
1054 if (mask & FRAG_MASK)
1055 list_add(&page->lru, &mm->context.pgtable_list);
1056 spin_unlock_bh(&mm->context.list_lock);
1058 pgtable_page_dtor(page);
1059 atomic_set(&page->_mapcount, -1);
1064 static void __page_table_free_rcu(void *table, unsigned bit)
1068 if (bit == FRAG_MASK)
1069 return page_table_free_pgste(table);
1070 /* Free 1K/2K page table fragment of a 4K page */
1071 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1072 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
1073 pgtable_page_dtor(page);
1074 atomic_set(&page->_mapcount, -1);
1079 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
1080 unsigned long vmaddr)
1082 struct mm_struct *mm;
1084 unsigned int bit, mask;
1087 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1088 if (page_table_with_pgste(page)) {
1089 gmap_unlink(mm, table, vmaddr);
1090 table = (unsigned long *) (__pa(table) | FRAG_MASK);
1091 tlb_remove_table(tlb, table);
1094 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
1095 spin_lock_bh(&mm->context.list_lock);
1096 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1097 list_del(&page->lru);
1098 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
1099 if (mask & FRAG_MASK)
1100 list_add_tail(&page->lru, &mm->context.pgtable_list);
1101 spin_unlock_bh(&mm->context.list_lock);
1102 table = (unsigned long *) (__pa(table) | (bit << 4));
1103 tlb_remove_table(tlb, table);
1106 static void __tlb_remove_table(void *_table)
1108 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
1109 void *table = (void *)((unsigned long) _table & ~mask);
1110 unsigned type = (unsigned long) _table & mask;
1113 __page_table_free_rcu(table, type);
1115 free_pages((unsigned long) table, ALLOC_ORDER);
1118 static void tlb_remove_table_smp_sync(void *arg)
1120 /* Simply deliver the interrupt */
1123 static void tlb_remove_table_one(void *table)
1126 * This isn't an RCU grace period and hence the page-tables cannot be
1127 * assumed to be actually RCU-freed.
1129 * It is however sufficient for software page-table walkers that rely
1130 * on IRQ disabling. See the comment near struct mmu_table_batch.
1132 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1133 __tlb_remove_table(table);
1136 static void tlb_remove_table_rcu(struct rcu_head *head)
1138 struct mmu_table_batch *batch;
1141 batch = container_of(head, struct mmu_table_batch, rcu);
1143 for (i = 0; i < batch->nr; i++)
1144 __tlb_remove_table(batch->tables[i]);
1146 free_page((unsigned long)batch);
1149 void tlb_table_flush(struct mmu_gather *tlb)
1151 struct mmu_table_batch **batch = &tlb->batch;
1154 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1159 void tlb_remove_table(struct mmu_gather *tlb, void *table)
1161 struct mmu_table_batch **batch = &tlb->batch;
1163 tlb->mm->context.flush_mm = 1;
1164 if (*batch == NULL) {
1165 *batch = (struct mmu_table_batch *)
1166 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1167 if (*batch == NULL) {
1168 __tlb_flush_mm_lazy(tlb->mm);
1169 tlb_remove_table_one(table);
1174 (*batch)->tables[(*batch)->nr++] = table;
1175 if ((*batch)->nr == MAX_TABLE_BATCH)
1179 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1180 static inline void thp_split_vma(struct vm_area_struct *vma)
1184 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1185 follow_page(vma, addr, FOLL_SPLIT);
1188 static inline void thp_split_mm(struct mm_struct *mm)
1190 struct vm_area_struct *vma;
1192 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1194 vma->vm_flags &= ~VM_HUGEPAGE;
1195 vma->vm_flags |= VM_NOHUGEPAGE;
1197 mm->def_flags |= VM_NOHUGEPAGE;
1200 static inline void thp_split_mm(struct mm_struct *mm)
1203 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1206 * switch on pgstes for its userspace process (for kvm)
1208 int s390_enable_sie(void)
1210 struct mm_struct *mm = current->mm;
1212 /* Do we have pgstes? if yes, we are done */
1213 if (mm_has_pgste(mm))
1215 /* Fail if the page tables are 2K */
1216 if (!mm_alloc_pgste(mm))
1218 down_write(&mm->mmap_sem);
1219 mm->context.has_pgste = 1;
1220 /* split thp mappings and disable thp for future mappings */
1222 up_write(&mm->mmap_sem);
1225 EXPORT_SYMBOL_GPL(s390_enable_sie);
1228 * Enable storage key handling from now on and initialize the storage
1229 * keys with the default key.
1231 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1232 unsigned long next, struct mm_walk *walk)
1237 pgste = pgste_get_lock(pte);
1239 * Remove all zero page mappings,
1240 * after establishing a policy to forbid zero page mappings
1241 * following faults for that page will get fresh anonymous pages
1243 if (is_zero_pfn(pte_pfn(*pte))) {
1244 ptep_flush_direct(walk->mm, addr, pte);
1245 pte_val(*pte) = _PAGE_INVALID;
1247 /* Clear storage key */
1248 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1249 PGSTE_GR_BIT | PGSTE_GC_BIT);
1250 ptev = pte_val(*pte);
1251 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
1252 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
1253 pgste_set_unlock(pte, pgste);
1257 int s390_enable_skey(void)
1259 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1260 struct mm_struct *mm = current->mm;
1261 struct vm_area_struct *vma;
1264 down_write(&mm->mmap_sem);
1265 if (mm_use_skey(mm))
1268 mm->context.use_skey = 1;
1269 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1270 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1271 MADV_UNMERGEABLE, &vma->vm_flags)) {
1272 mm->context.use_skey = 0;
1277 mm->def_flags &= ~VM_MERGEABLE;
1280 walk_page_range(0, TASK_SIZE, &walk);
1283 up_write(&mm->mmap_sem);
1286 EXPORT_SYMBOL_GPL(s390_enable_skey);
1289 * Reset CMMA state, make all pages stable again.
1291 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
1292 unsigned long next, struct mm_walk *walk)
1296 pgste = pgste_get_lock(pte);
1297 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
1298 pgste_set_unlock(pte, pgste);
1302 void s390_reset_cmma(struct mm_struct *mm)
1304 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
1306 down_write(&mm->mmap_sem);
1308 walk_page_range(0, TASK_SIZE, &walk);
1309 up_write(&mm->mmap_sem);
1311 EXPORT_SYMBOL_GPL(s390_reset_cmma);
1314 * Test and reset if a guest page is dirty
1316 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
1322 pte = get_locked_pte(gmap->mm, address, &ptl);
1326 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
1332 EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
1334 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1335 int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1338 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1339 /* No need to flush TLB
1340 * On s390 reference bits are in storage key and never in TLB */
1341 return pmdp_test_and_clear_young(vma, address, pmdp);
1344 int pmdp_set_access_flags(struct vm_area_struct *vma,
1345 unsigned long address, pmd_t *pmdp,
1346 pmd_t entry, int dirty)
1348 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1350 entry = pmd_mkyoung(entry);
1352 entry = pmd_mkdirty(entry);
1353 if (pmd_same(*pmdp, entry))
1355 pmdp_invalidate(vma, address, pmdp);
1356 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1360 static void pmdp_splitting_flush_sync(void *arg)
1362 /* Simply deliver the interrupt */
1365 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1368 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1369 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1370 (unsigned long *) pmdp)) {
1371 /* need to serialize against gup-fast (IRQ disabled) */
1372 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1376 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1379 struct list_head *lh = (struct list_head *) pgtable;
1381 assert_spin_locked(pmd_lockptr(mm, pmdp));
1384 if (!pmd_huge_pte(mm, pmdp))
1387 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1388 pmd_huge_pte(mm, pmdp) = pgtable;
1391 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1393 struct list_head *lh;
1397 assert_spin_locked(pmd_lockptr(mm, pmdp));
1400 pgtable = pmd_huge_pte(mm, pmdp);
1401 lh = (struct list_head *) pgtable;
1403 pmd_huge_pte(mm, pmdp) = NULL;
1405 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1408 ptep = (pte_t *) pgtable;
1409 pte_val(*ptep) = _PAGE_INVALID;
1411 pte_val(*ptep) = _PAGE_INVALID;
1414 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */