]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/mremap.c
fs/ncpfs/dir.c: remove unnecessary new_valid_dev() check
[karo-tx-linux.git] / mm / mremap.c
1 /*
2  *      mm/mremap.c
3  *
4  *      (C) Copyright 1996 Linus Torvalds
5  *
6  *      Address space accounting code   <alan@lxorguk.ukuu.org.uk>
7  *      (C) Copyright 2002 Red Hat Inc, All Rights Reserved
8  */
9
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/shm.h>
13 #include <linux/ksm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/capability.h>
17 #include <linux/fs.h>
18 #include <linux/swapops.h>
19 #include <linux/highmem.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/mmu_notifier.h>
23 #include <linux/sched/sysctl.h>
24 #include <linux/uaccess.h>
25 #include <linux/mm-arch-hooks.h>
26
27 #include <asm/cacheflush.h>
28 #include <asm/tlbflush.h>
29
30 #include "internal.h"
31
32 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
33 {
34         pgd_t *pgd;
35         pud_t *pud;
36         pmd_t *pmd;
37
38         pgd = pgd_offset(mm, addr);
39         if (pgd_none_or_clear_bad(pgd))
40                 return NULL;
41
42         pud = pud_offset(pgd, addr);
43         if (pud_none_or_clear_bad(pud))
44                 return NULL;
45
46         pmd = pmd_offset(pud, addr);
47         if (pmd_none(*pmd))
48                 return NULL;
49
50         return pmd;
51 }
52
53 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
54                             unsigned long addr)
55 {
56         pgd_t *pgd;
57         pud_t *pud;
58         pmd_t *pmd;
59
60         pgd = pgd_offset(mm, addr);
61         pud = pud_alloc(mm, pgd, addr);
62         if (!pud)
63                 return NULL;
64
65         pmd = pmd_alloc(mm, pud, addr);
66         if (!pmd)
67                 return NULL;
68
69         VM_BUG_ON(pmd_trans_huge(*pmd));
70
71         return pmd;
72 }
73
74 static pte_t move_soft_dirty_pte(pte_t pte)
75 {
76         /*
77          * Set soft dirty bit so we can notice
78          * in userspace the ptes were moved.
79          */
80 #ifdef CONFIG_MEM_SOFT_DIRTY
81         if (pte_present(pte))
82                 pte = pte_mksoft_dirty(pte);
83         else if (is_swap_pte(pte))
84                 pte = pte_swp_mksoft_dirty(pte);
85 #endif
86         return pte;
87 }
88
89 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
90                 unsigned long old_addr, unsigned long old_end,
91                 struct vm_area_struct *new_vma, pmd_t *new_pmd,
92                 unsigned long new_addr, bool need_rmap_locks)
93 {
94         struct address_space *mapping = NULL;
95         struct anon_vma *anon_vma = NULL;
96         struct mm_struct *mm = vma->vm_mm;
97         pte_t *old_pte, *new_pte, pte;
98         spinlock_t *old_ptl, *new_ptl;
99
100         /*
101          * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
102          * locks to ensure that rmap will always observe either the old or the
103          * new ptes. This is the easiest way to avoid races with
104          * truncate_pagecache(), page migration, etc...
105          *
106          * When need_rmap_locks is false, we use other ways to avoid
107          * such races:
108          *
109          * - During exec() shift_arg_pages(), we use a specially tagged vma
110          *   which rmap call sites look for using is_vma_temporary_stack().
111          *
112          * - During mremap(), new_vma is often known to be placed after vma
113          *   in rmap traversal order. This ensures rmap will always observe
114          *   either the old pte, or the new pte, or both (the page table locks
115          *   serialize access to individual ptes, but only rmap traversal
116          *   order guarantees that we won't miss both the old and new ptes).
117          */
118         if (need_rmap_locks) {
119                 if (vma->vm_file) {
120                         mapping = vma->vm_file->f_mapping;
121                         i_mmap_lock_write(mapping);
122                 }
123                 if (vma->anon_vma) {
124                         anon_vma = vma->anon_vma;
125                         anon_vma_lock_write(anon_vma);
126                 }
127         }
128
129         /*
130          * We don't have to worry about the ordering of src and dst
131          * pte locks because exclusive mmap_sem prevents deadlock.
132          */
133         old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
134         new_pte = pte_offset_map(new_pmd, new_addr);
135         new_ptl = pte_lockptr(mm, new_pmd);
136         if (new_ptl != old_ptl)
137                 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
138         arch_enter_lazy_mmu_mode();
139
140         for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
141                                    new_pte++, new_addr += PAGE_SIZE) {
142                 if (pte_none(*old_pte))
143                         continue;
144                 pte = ptep_get_and_clear(mm, old_addr, old_pte);
145                 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
146                 pte = move_soft_dirty_pte(pte);
147                 set_pte_at(mm, new_addr, new_pte, pte);
148         }
149
150         arch_leave_lazy_mmu_mode();
151         if (new_ptl != old_ptl)
152                 spin_unlock(new_ptl);
153         pte_unmap(new_pte - 1);
154         pte_unmap_unlock(old_pte - 1, old_ptl);
155         if (anon_vma)
156                 anon_vma_unlock_write(anon_vma);
157         if (mapping)
158                 i_mmap_unlock_write(mapping);
159 }
160
161 #define LATENCY_LIMIT   (64 * PAGE_SIZE)
162
163 unsigned long move_page_tables(struct vm_area_struct *vma,
164                 unsigned long old_addr, struct vm_area_struct *new_vma,
165                 unsigned long new_addr, unsigned long len,
166                 bool need_rmap_locks)
167 {
168         unsigned long extent, next, old_end;
169         pmd_t *old_pmd, *new_pmd;
170         bool need_flush = false;
171         unsigned long mmun_start;       /* For mmu_notifiers */
172         unsigned long mmun_end;         /* For mmu_notifiers */
173
174         old_end = old_addr + len;
175         flush_cache_range(vma, old_addr, old_end);
176
177         mmun_start = old_addr;
178         mmun_end   = old_end;
179         mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
180
181         for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
182                 cond_resched();
183                 next = (old_addr + PMD_SIZE) & PMD_MASK;
184                 /* even if next overflowed, extent below will be ok */
185                 extent = next - old_addr;
186                 if (extent > old_end - old_addr)
187                         extent = old_end - old_addr;
188                 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
189                 if (!old_pmd)
190                         continue;
191                 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
192                 if (!new_pmd)
193                         break;
194                 if (pmd_trans_huge(*old_pmd)) {
195                         if (extent == HPAGE_PMD_SIZE) {
196                                 bool moved;
197                                 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
198                                               vma);
199                                 /* See comment in move_ptes() */
200                                 if (need_rmap_locks)
201                                         anon_vma_lock_write(vma->anon_vma);
202                                 moved = move_huge_pmd(vma, new_vma, old_addr,
203                                                     new_addr, old_end,
204                                                     old_pmd, new_pmd);
205                                 if (need_rmap_locks)
206                                         anon_vma_unlock_write(vma->anon_vma);
207                                 if (moved) {
208                                         need_flush = true;
209                                         continue;
210                                 }
211                         }
212                         split_huge_pmd(vma, old_pmd, old_addr);
213                         VM_BUG_ON(pmd_trans_huge(*old_pmd));
214                 }
215                 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
216                                                       new_pmd, new_addr))
217                         break;
218                 next = (new_addr + PMD_SIZE) & PMD_MASK;
219                 if (extent > next - new_addr)
220                         extent = next - new_addr;
221                 if (extent > LATENCY_LIMIT)
222                         extent = LATENCY_LIMIT;
223                 move_ptes(vma, old_pmd, old_addr, old_addr + extent,
224                           new_vma, new_pmd, new_addr, need_rmap_locks);
225                 need_flush = true;
226         }
227         if (likely(need_flush))
228                 flush_tlb_range(vma, old_end-len, old_addr);
229
230         mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
231
232         return len + old_addr - old_end;        /* how much done */
233 }
234
235 static unsigned long move_vma(struct vm_area_struct *vma,
236                 unsigned long old_addr, unsigned long old_len,
237                 unsigned long new_len, unsigned long new_addr, bool *locked)
238 {
239         struct mm_struct *mm = vma->vm_mm;
240         struct vm_area_struct *new_vma;
241         unsigned long vm_flags = vma->vm_flags;
242         unsigned long new_pgoff;
243         unsigned long moved_len;
244         unsigned long excess = 0;
245         unsigned long hiwater_vm;
246         int split = 0;
247         int err;
248         bool need_rmap_locks;
249
250         /*
251          * We'd prefer to avoid failure later on in do_munmap:
252          * which may split one vma into three before unmapping.
253          */
254         if (mm->map_count >= sysctl_max_map_count - 3)
255                 return -ENOMEM;
256
257         /*
258          * Advise KSM to break any KSM pages in the area to be moved:
259          * it would be confusing if they were to turn up at the new
260          * location, where they happen to coincide with different KSM
261          * pages recently unmapped.  But leave vma->vm_flags as it was,
262          * so KSM can come around to merge on vma and new_vma afterwards.
263          */
264         err = ksm_madvise(vma, old_addr, old_addr + old_len,
265                                                 MADV_UNMERGEABLE, &vm_flags);
266         if (err)
267                 return err;
268
269         new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
270         new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
271                            &need_rmap_locks);
272         if (!new_vma)
273                 return -ENOMEM;
274
275         moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
276                                      need_rmap_locks);
277         if (moved_len < old_len) {
278                 err = -ENOMEM;
279         } else if (vma->vm_ops && vma->vm_ops->mremap) {
280                 err = vma->vm_ops->mremap(new_vma);
281         }
282
283         if (unlikely(err)) {
284                 /*
285                  * On error, move entries back from new area to old,
286                  * which will succeed since page tables still there,
287                  * and then proceed to unmap new area instead of old.
288                  */
289                 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
290                                  true);
291                 vma = new_vma;
292                 old_len = new_len;
293                 old_addr = new_addr;
294                 new_addr = err;
295         } else {
296                 arch_remap(mm, old_addr, old_addr + old_len,
297                            new_addr, new_addr + new_len);
298         }
299
300         /* Conceal VM_ACCOUNT so old reservation is not undone */
301         if (vm_flags & VM_ACCOUNT) {
302                 vma->vm_flags &= ~VM_ACCOUNT;
303                 excess = vma->vm_end - vma->vm_start - old_len;
304                 if (old_addr > vma->vm_start &&
305                     old_addr + old_len < vma->vm_end)
306                         split = 1;
307         }
308
309         /*
310          * If we failed to move page tables we still do total_vm increment
311          * since do_munmap() will decrement it by old_len == new_len.
312          *
313          * Since total_vm is about to be raised artificially high for a
314          * moment, we need to restore high watermark afterwards: if stats
315          * are taken meanwhile, total_vm and hiwater_vm appear too high.
316          * If this were a serious issue, we'd add a flag to do_munmap().
317          */
318         hiwater_vm = mm->hiwater_vm;
319         vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
320
321         if (do_munmap(mm, old_addr, old_len) < 0) {
322                 /* OOM: unable to split vma, just get accounts right */
323                 vm_unacct_memory(excess >> PAGE_SHIFT);
324                 excess = 0;
325         }
326         mm->hiwater_vm = hiwater_vm;
327
328         /* Restore VM_ACCOUNT if one or two pieces of vma left */
329         if (excess) {
330                 vma->vm_flags |= VM_ACCOUNT;
331                 if (split)
332                         vma->vm_next->vm_flags |= VM_ACCOUNT;
333         }
334
335         if (vm_flags & VM_LOCKED) {
336                 mm->locked_vm += new_len >> PAGE_SHIFT;
337                 *locked = true;
338         }
339
340         return new_addr;
341 }
342
343 static struct vm_area_struct *vma_to_resize(unsigned long addr,
344         unsigned long old_len, unsigned long new_len, unsigned long *p)
345 {
346         struct mm_struct *mm = current->mm;
347         struct vm_area_struct *vma = find_vma(mm, addr);
348         unsigned long pgoff;
349
350         if (!vma || vma->vm_start > addr)
351                 return ERR_PTR(-EFAULT);
352
353         if (is_vm_hugetlb_page(vma))
354                 return ERR_PTR(-EINVAL);
355
356         /* We can't remap across vm area boundaries */
357         if (old_len > vma->vm_end - addr)
358                 return ERR_PTR(-EFAULT);
359
360         if (new_len == old_len)
361                 return vma;
362
363         /* Need to be careful about a growing mapping */
364         pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
365         pgoff += vma->vm_pgoff;
366         if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
367                 return ERR_PTR(-EINVAL);
368
369         if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
370                 return ERR_PTR(-EFAULT);
371
372         if (vma->vm_flags & VM_LOCKED) {
373                 unsigned long locked, lock_limit;
374                 locked = mm->locked_vm << PAGE_SHIFT;
375                 lock_limit = rlimit(RLIMIT_MEMLOCK);
376                 locked += new_len - old_len;
377                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
378                         return ERR_PTR(-EAGAIN);
379         }
380
381         if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
382                 return ERR_PTR(-ENOMEM);
383
384         if (vma->vm_flags & VM_ACCOUNT) {
385                 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
386                 if (security_vm_enough_memory_mm(mm, charged))
387                         return ERR_PTR(-ENOMEM);
388                 *p = charged;
389         }
390
391         return vma;
392 }
393
394 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
395                 unsigned long new_addr, unsigned long new_len, bool *locked)
396 {
397         struct mm_struct *mm = current->mm;
398         struct vm_area_struct *vma;
399         unsigned long ret = -EINVAL;
400         unsigned long charged = 0;
401         unsigned long map_flags;
402
403         if (offset_in_page(new_addr))
404                 goto out;
405
406         if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
407                 goto out;
408
409         /* Ensure the old/new locations do not overlap */
410         if (addr + old_len > new_addr && new_addr + new_len > addr)
411                 goto out;
412
413         ret = do_munmap(mm, new_addr, new_len);
414         if (ret)
415                 goto out;
416
417         if (old_len >= new_len) {
418                 ret = do_munmap(mm, addr+new_len, old_len - new_len);
419                 if (ret && old_len != new_len)
420                         goto out;
421                 old_len = new_len;
422         }
423
424         vma = vma_to_resize(addr, old_len, new_len, &charged);
425         if (IS_ERR(vma)) {
426                 ret = PTR_ERR(vma);
427                 goto out;
428         }
429
430         map_flags = MAP_FIXED;
431         if (vma->vm_flags & VM_MAYSHARE)
432                 map_flags |= MAP_SHARED;
433
434         ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
435                                 ((addr - vma->vm_start) >> PAGE_SHIFT),
436                                 map_flags);
437         if (offset_in_page(ret))
438                 goto out1;
439
440         ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
441         if (!(offset_in_page(ret)))
442                 goto out;
443 out1:
444         vm_unacct_memory(charged);
445
446 out:
447         return ret;
448 }
449
450 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
451 {
452         unsigned long end = vma->vm_end + delta;
453         if (end < vma->vm_end) /* overflow */
454                 return 0;
455         if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
456                 return 0;
457         if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
458                               0, MAP_FIXED) & ~PAGE_MASK)
459                 return 0;
460         return 1;
461 }
462
463 /*
464  * Expand (or shrink) an existing mapping, potentially moving it at the
465  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
466  *
467  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
468  * This option implies MREMAP_MAYMOVE.
469  */
470 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
471                 unsigned long, new_len, unsigned long, flags,
472                 unsigned long, new_addr)
473 {
474         struct mm_struct *mm = current->mm;
475         struct vm_area_struct *vma;
476         unsigned long ret = -EINVAL;
477         unsigned long charged = 0;
478         bool locked = false;
479
480         if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
481                 return ret;
482
483         if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
484                 return ret;
485
486         if (offset_in_page(addr))
487                 return ret;
488
489         old_len = PAGE_ALIGN(old_len);
490         new_len = PAGE_ALIGN(new_len);
491
492         /*
493          * We allow a zero old-len as a special case
494          * for DOS-emu "duplicate shm area" thing. But
495          * a zero new-len is nonsensical.
496          */
497         if (!new_len)
498                 return ret;
499
500         down_write(&current->mm->mmap_sem);
501
502         if (flags & MREMAP_FIXED) {
503                 ret = mremap_to(addr, old_len, new_addr, new_len,
504                                 &locked);
505                 goto out;
506         }
507
508         /*
509          * Always allow a shrinking remap: that just unmaps
510          * the unnecessary pages..
511          * do_munmap does all the needed commit accounting
512          */
513         if (old_len >= new_len) {
514                 ret = do_munmap(mm, addr+new_len, old_len - new_len);
515                 if (ret && old_len != new_len)
516                         goto out;
517                 ret = addr;
518                 goto out;
519         }
520
521         /*
522          * Ok, we need to grow..
523          */
524         vma = vma_to_resize(addr, old_len, new_len, &charged);
525         if (IS_ERR(vma)) {
526                 ret = PTR_ERR(vma);
527                 goto out;
528         }
529
530         /* old_len exactly to the end of the area..
531          */
532         if (old_len == vma->vm_end - addr) {
533                 /* can we just expand the current mapping? */
534                 if (vma_expandable(vma, new_len - old_len)) {
535                         int pages = (new_len - old_len) >> PAGE_SHIFT;
536
537                         if (vma_adjust(vma, vma->vm_start, addr + new_len,
538                                        vma->vm_pgoff, NULL)) {
539                                 ret = -ENOMEM;
540                                 goto out;
541                         }
542
543                         vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
544                         if (vma->vm_flags & VM_LOCKED) {
545                                 mm->locked_vm += pages;
546                                 locked = true;
547                                 new_addr = addr;
548                         }
549                         ret = addr;
550                         goto out;
551                 }
552         }
553
554         /*
555          * We weren't able to just expand or shrink the area,
556          * we need to create a new one and move it..
557          */
558         ret = -ENOMEM;
559         if (flags & MREMAP_MAYMOVE) {
560                 unsigned long map_flags = 0;
561                 if (vma->vm_flags & VM_MAYSHARE)
562                         map_flags |= MAP_SHARED;
563
564                 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
565                                         vma->vm_pgoff +
566                                         ((addr - vma->vm_start) >> PAGE_SHIFT),
567                                         map_flags);
568                 if (offset_in_page(new_addr)) {
569                         ret = new_addr;
570                         goto out;
571                 }
572
573                 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
574         }
575 out:
576         if (offset_in_page(ret)) {
577                 vm_unacct_memory(charged);
578                 locked = 0;
579         }
580         up_write(&current->mm->mmap_sem);
581         if (locked && new_len > old_len)
582                 mm_populate(new_addr + old_len, new_len - old_len);
583         return ret;
584 }