2 * MMU context allocation for 64-bit kernels.
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
19 #include <linux/spinlock.h>
20 #include <linux/idr.h>
21 #include <linux/export.h>
22 #include <linux/gfp.h>
23 #include <linux/slab.h>
25 #include <asm/mmu_context.h>
26 #include <asm/pgalloc.h>
30 static DEFINE_SPINLOCK(mmu_context_lock);
31 static DEFINE_IDA(mmu_context_ida);
33 static int alloc_context_id(int min_id, int max_id)
38 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
41 spin_lock(&mmu_context_lock);
42 err = ida_get_new_above(&mmu_context_ida, min_id, &index);
43 spin_unlock(&mmu_context_lock);
51 spin_lock(&mmu_context_lock);
52 ida_remove(&mmu_context_ida, index);
53 spin_unlock(&mmu_context_lock);
60 void hash__reserve_context_id(int id)
65 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
68 spin_lock(&mmu_context_lock);
69 rc = ida_get_new_above(&mmu_context_ida, id, &result);
70 spin_unlock(&mmu_context_lock);
71 } while (rc == -EAGAIN);
73 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
76 int hash__alloc_context_id(void)
80 if (mmu_has_feature(MMU_FTR_68_BIT_VA))
81 max = MAX_USER_CONTEXT;
83 max = MAX_USER_CONTEXT_65BIT_VA;
85 return alloc_context_id(MIN_USER_CONTEXT, max);
87 EXPORT_SYMBOL_GPL(hash__alloc_context_id);
89 static int hash__init_new_context(struct mm_struct *mm)
93 index = hash__alloc_context_id();
98 * We do switch_slb() early in fork, even before we setup the
99 * mm->context.addr_limit. Default to max task size so that we copy the
100 * default values to paca which will help us to handle slb miss early.
102 mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
105 * The old code would re-promote on fork, we don't do that when using
106 * slices as it could cause problem promoting slices that have been
109 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
110 * explicitly against context.id == 0. This ensures that we properly
111 * initialize context slice details for newly allocated mm's (which will
112 * have id == 0) and don't alter context slice inherited via fork (which
113 * will have id != 0).
115 * We should not be calling init_new_context() on init_mm. Hence a
116 * check against 0 is OK.
118 if (mm->context.id == 0)
119 slice_set_user_psize(mm, mmu_virtual_psize);
121 subpage_prot_init_new_context(mm);
126 static int radix__init_new_context(struct mm_struct *mm)
128 unsigned long rts_field;
131 index = alloc_context_id(1, PRTB_ENTRIES - 1);
136 * set the process table entry,
138 rts_field = radix__get_tree_size();
139 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
142 * Order the above store with subsequent update of the PID
143 * register (at which point HW can start loading/caching
144 * the entry) and the corresponding load by the MMU from
147 asm volatile("ptesync;isync" : : : "memory");
149 mm->context.npu_context = NULL;
154 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
159 index = radix__init_new_context(mm);
161 index = hash__init_new_context(mm);
166 mm->context.id = index;
167 #ifdef CONFIG_PPC_ICSWX
168 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
169 if (!mm->context.cop_lockp) {
170 __destroy_context(index);
171 subpage_prot_free(mm);
172 mm->context.id = MMU_NO_CONTEXT;
175 spin_lock_init(mm->context.cop_lockp);
176 #endif /* CONFIG_PPC_ICSWX */
178 #ifdef CONFIG_PPC_64K_PAGES
179 mm->context.pte_frag = NULL;
181 #ifdef CONFIG_SPAPR_TCE_IOMMU
187 void __destroy_context(int context_id)
189 spin_lock(&mmu_context_lock);
190 ida_remove(&mmu_context_ida, context_id);
191 spin_unlock(&mmu_context_lock);
193 EXPORT_SYMBOL_GPL(__destroy_context);
195 #ifdef CONFIG_PPC_64K_PAGES
196 static void destroy_pagetable_page(struct mm_struct *mm)
202 pte_frag = mm->context.pte_frag;
206 page = virt_to_page(pte_frag);
207 /* drop all the pending references */
208 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
209 /* We allow PTE_FRAG_NR fragments from a PTE page */
210 if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
211 pgtable_page_dtor(page);
212 free_hot_cold_page(page, 0);
217 static inline void destroy_pagetable_page(struct mm_struct *mm)
223 void destroy_context(struct mm_struct *mm)
225 #ifdef CONFIG_SPAPR_TCE_IOMMU
226 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
228 #ifdef CONFIG_PPC_ICSWX
229 drop_cop(mm->context.acop, mm);
230 kfree(mm->context.cop_lockp);
231 mm->context.cop_lockp = NULL;
232 #endif /* CONFIG_PPC_ICSWX */
234 if (radix_enabled()) {
236 * Radix doesn't have a valid bit in the process table
237 * entries. However we know that at least P9 implementation
238 * will avoid caching an entry with an invalid RTS field,
239 * and 0 is invalid. So this will do.
241 process_tb[mm->context.id].prtb0 = 0;
243 subpage_prot_free(mm);
244 destroy_pagetable_page(mm);
245 __destroy_context(mm->context.id);
246 mm->context.id = MMU_NO_CONTEXT;
249 #ifdef CONFIG_PPC_RADIX_MMU
250 void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
253 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
255 mtspr(SPRN_PID, next->context.id);
257 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
259 mtspr(SPRN_PID, next->context.id);