]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/mm/hugepage-hash64.c
powerpc/THP: Add code to handle HPTE faults for hugepages
[karo-tx-linux.git] / arch / powerpc / mm / hugepage-hash64.c
1 /*
2  * Copyright IBM Corporation, 2013
3  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2.1 of the GNU Lesser General Public License
7  * as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12  *
13  */
14
15 /*
16  * PPC64 THP Support for hash based MMUs
17  */
18 #include <linux/mm.h>
19 #include <asm/machdep.h>
20
21 int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
22                     pmd_t *pmdp, unsigned long trap, int local, int ssize,
23                     unsigned int psize)
24 {
25         unsigned int index, valid;
26         unsigned char *hpte_slot_array;
27         unsigned long rflags, pa, hidx;
28         unsigned long old_pmd, new_pmd;
29         int ret, lpsize = MMU_PAGE_16M;
30         unsigned long vpn, hash, shift, slot;
31
32         /*
33          * atomically mark the linux large page PMD busy and dirty
34          */
35         do {
36                 old_pmd = pmd_val(*pmdp);
37                 /* If PMD busy, retry the access */
38                 if (unlikely(old_pmd & _PAGE_BUSY))
39                         return 0;
40                 /* If PMD permissions don't match, take page fault */
41                 if (unlikely(access & ~old_pmd))
42                         return 1;
43                 /*
44                  * Try to lock the PTE, add ACCESSED and DIRTY if it was
45                  * a write access
46                  */
47                 new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
48                 if (access & _PAGE_RW)
49                         new_pmd |= _PAGE_DIRTY;
50         } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
51                                           old_pmd, new_pmd));
52         /*
53          * PP bits. _PAGE_USER is already PP bit 0x2, so we only
54          * need to add in 0x1 if it's a read-only user page
55          */
56         rflags = new_pmd & _PAGE_USER;
57         if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
58                                            (new_pmd & _PAGE_DIRTY)))
59                 rflags |= 0x1;
60         /*
61          * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
62          */
63         rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
64
65 #if 0
66         if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
67
68                 /*
69                  * No CPU has hugepages but lacks no execute, so we
70                  * don't need to worry about that case
71                  */
72                 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
73         }
74 #endif
75         /*
76          * Find the slot index details for this ea, using base page size.
77          */
78         shift = mmu_psize_defs[psize].shift;
79         index = (ea & ~HPAGE_PMD_MASK) >> shift;
80         BUG_ON(index >= 4096);
81
82         vpn = hpt_vpn(ea, vsid, ssize);
83         hash = hpt_hash(vpn, shift, ssize);
84         hpte_slot_array = get_hpte_slot_array(pmdp);
85
86         valid = hpte_valid(hpte_slot_array, index);
87         if (valid) {
88                 /* update the hpte bits */
89                 hidx =  hpte_hash_index(hpte_slot_array, index);
90                 if (hidx & _PTEIDX_SECONDARY)
91                         hash = ~hash;
92                 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
93                 slot += hidx & _PTEIDX_GROUP_IX;
94
95                 ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
96                                            psize, lpsize, ssize, local);
97                 /*
98                  * We failed to update, try to insert a new entry.
99                  */
100                 if (ret == -1) {
101                         /*
102                          * large pte is marked busy, so we can be sure
103                          * nobody is looking at hpte_slot_array. hence we can
104                          * safely update this here.
105                          */
106                         valid = 0;
107                         new_pmd &= ~_PAGE_HPTEFLAGS;
108                         hpte_slot_array[index] = 0;
109                 } else
110                         /* clear the busy bits and set the hash pte bits */
111                         new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
112         }
113
114         if (!valid) {
115                 unsigned long hpte_group;
116
117                 /* insert new entry */
118                 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
119 repeat:
120                 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
121
122                 /* clear the busy bits and set the hash pte bits */
123                 new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
124
125                 /* Add in WIMG bits */
126                 rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
127                                       _PAGE_COHERENT | _PAGE_GUARDED));
128
129                 /* Insert into the hash table, primary slot */
130                 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
131                                           psize, lpsize, ssize);
132                 /*
133                  * Primary is full, try the secondary
134                  */
135                 if (unlikely(slot == -1)) {
136                         hpte_group = ((~hash & htab_hash_mask) *
137                                       HPTES_PER_GROUP) & ~0x7UL;
138                         slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
139                                                   rflags, HPTE_V_SECONDARY,
140                                                   psize, lpsize, ssize);
141                         if (slot == -1) {
142                                 if (mftb() & 0x1)
143                                         hpte_group = ((hash & htab_hash_mask) *
144                                                       HPTES_PER_GROUP) & ~0x7UL;
145
146                                 ppc_md.hpte_remove(hpte_group);
147                                 goto repeat;
148                         }
149                 }
150                 /*
151                  * Hypervisor failure. Restore old pmd and return -1
152                  * similar to __hash_page_*
153                  */
154                 if (unlikely(slot == -2)) {
155                         *pmdp = __pmd(old_pmd);
156                         hash_failure_debug(ea, access, vsid, trap, ssize,
157                                            psize, lpsize, old_pmd);
158                         return -1;
159                 }
160                 /*
161                  * large pte is marked busy, so we can be sure
162                  * nobody is looking at hpte_slot_array. hence we can
163                  * safely update this here.
164                  */
165                 mark_hpte_slot_valid(hpte_slot_array, index, slot);
166         }
167         /*
168          * No need to use ldarx/stdcx here
169          */
170         *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
171         return 0;
172 }