]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/mm/pgtable-book3s64.c
Merge branch 'nvme-4.10-fixes' of git://git.infradead.org/nvme into for-linus
[karo-tx-linux.git] / arch / powerpc / mm / pgtable-book3s64.c
1 /*
2  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9
10 #include <linux/sched.h>
11 #include <asm/pgalloc.h>
12 #include <asm/tlb.h>
13
14 #include "mmu_decl.h"
15 #include <trace/events/thp.h>
16
17 int (*register_process_table)(unsigned long base, unsigned long page_size,
18                               unsigned long tbl_size);
19
20 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
21 /*
22  * This is called when relaxing access to a hugepage. It's also called in the page
23  * fault path when we don't hit any of the major fault cases, ie, a minor
24  * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
25  * handled those two for us, we additionally deal with missing execute
26  * permission here on some processors
27  */
28 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
29                           pmd_t *pmdp, pmd_t entry, int dirty)
30 {
31         int changed;
32 #ifdef CONFIG_DEBUG_VM
33         WARN_ON(!pmd_trans_huge(*pmdp));
34         assert_spin_locked(&vma->vm_mm->page_table_lock);
35 #endif
36         changed = !pmd_same(*(pmdp), entry);
37         if (changed) {
38                 __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
39                                         pmd_pte(entry), address);
40                 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
41         }
42         return changed;
43 }
44
45 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
46                               unsigned long address, pmd_t *pmdp)
47 {
48         return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
49 }
50 /*
51  * set a new huge pmd. We should not be called for updating
52  * an existing pmd entry. That should go via pmd_hugepage_update.
53  */
54 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
55                 pmd_t *pmdp, pmd_t pmd)
56 {
57 #ifdef CONFIG_DEBUG_VM
58         WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
59         assert_spin_locked(&mm->page_table_lock);
60         WARN_ON(!pmd_trans_huge(pmd));
61 #endif
62         trace_hugepage_set_pmd(addr, pmd_val(pmd));
63         return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
64 }
65 /*
66  * We use this to invalidate a pmdp entry before switching from a
67  * hugepte to regular pmd entry.
68  */
69 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
70                      pmd_t *pmdp)
71 {
72         pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
73         flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
74         /*
75          * This ensures that generic code that rely on IRQ disabling
76          * to prevent a parallel THP split work as expected.
77          */
78         kick_all_cpus_sync();
79 }
80
81 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
82 {
83         return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
84 }
85
86 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
87 {
88         unsigned long pmdv;
89
90         pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
91         return pmd_set_protbits(__pmd(pmdv), pgprot);
92 }
93
94 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
95 {
96         return pfn_pmd(page_to_pfn(page), pgprot);
97 }
98
99 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
100 {
101         unsigned long pmdv;
102
103         pmdv = pmd_val(pmd);
104         pmdv &= _HPAGE_CHG_MASK;
105         return pmd_set_protbits(__pmd(pmdv), newprot);
106 }
107
108 /*
109  * This is called at the end of handling a user page fault, when the
110  * fault has been handled by updating a HUGE PMD entry in the linux page tables.
111  * We use it to preload an HPTE into the hash table corresponding to
112  * the updated linux HUGE PMD entry.
113  */
114 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
115                           pmd_t *pmd)
116 {
117         return;
118 }
119 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
120
121 /* For use by kexec */
122 void mmu_cleanup_all(void)
123 {
124         if (radix_enabled())
125                 radix__mmu_cleanup_all();
126         else if (mmu_hash_ops.hpte_clear_all)
127                 mmu_hash_ops.hpte_clear_all();
128 }
129
130 #ifdef CONFIG_MEMORY_HOTPLUG
131 int create_section_mapping(unsigned long start, unsigned long end)
132 {
133         if (radix_enabled())
134                 return -ENODEV;
135
136         return hash__create_section_mapping(start, end);
137 }
138
139 int remove_section_mapping(unsigned long start, unsigned long end)
140 {
141         if (radix_enabled())
142                 return -ENODEV;
143
144         return hash__remove_section_mapping(start, end);
145 }
146 #endif /* CONFIG_MEMORY_HOTPLUG */