]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
[PATCH] mm: unlink vma before pagetables
authorHugh Dickins <hugh@veritas.com>
Sun, 30 Oct 2005 01:16:29 +0000 (18:16 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 30 Oct 2005 04:40:40 +0000 (21:40 -0700)
In most places the descent from pgd to pud to pmd to pte holds mmap_sem
(exclusively or not), which ensures that free_pgtables cannot be freeing page
tables from any level at the same time.  But truncation and reverse mapping
descend without mmap_sem.

No problem: just make sure that a vma is unlinked from its prio_tree (or
nonlinear list) and from its anon_vma list, after zapping the vma, but before
freeing its page tables.  Then neither vmtruncate nor rmap can reach that vma
whose page tables are now volatile (nor do they need to reach it, since all
its page entries have been zapped by this stage).

The i_mmap_lock and anon_vma->lock already serialize this correctly; but the
locking hierarchy is such that we cannot take them while holding
page_table_lock.  Well, we're trying to push that down anyway.  So in this
patch, move anon_vma_unlink and unlink_file_vma into free_pgtables, at the
same time as moving page_table_lock around calls to unmap_vmas.

tlb_gather_mmu and tlb_finish_mmu then fall outside the page_table_lock, but
we made them preempt_disable and preempt_enable earlier; and a long source
audit of all the architectures has shown no problem with removing
page_table_lock from them.  free_pgtables doesn't need page_table_lock for
itself, nor for what it calls; tlb->mm->nr_ptes is usually protected by
page_table_lock, but partly by non-exclusive mmap_sem - here it's decremented
with exclusive mmap_sem, or mm_users 0.  update_hiwater_rss and
vm_unacct_memory don't need page_table_lock either.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
mm/memory.c
mm/mmap.c

index 24ba688876d636275811ab8a139e9eb6298059a5..4ea89a2e3a833209561c793e20e00e708769a83a 100644 (file)
@@ -260,6 +260,12 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
                struct vm_area_struct *next = vma->vm_next;
                unsigned long addr = vma->vm_start;
 
+               /*
+                * Hide vma from rmap and vmtruncate before freeing pgtables
+                */
+               anon_vma_unlink(vma);
+               unlink_file_vma(vma);
+
                if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
                        hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
                                floor, next? next->vm_start: ceiling);
@@ -272,6 +278,8 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
                                                        HPAGE_SIZE)) {
                                vma = next;
                                next = vma->vm_next;
+                               anon_vma_unlink(vma);
+                               unlink_file_vma(vma);
                        }
                        free_pgd_range(tlb, addr, vma->vm_end,
                                floor, next? next->vm_start: ceiling);
@@ -798,12 +806,12 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
        }
 
        lru_add_drain();
-       spin_lock(&mm->page_table_lock);
        tlb = tlb_gather_mmu(mm, 0);
        update_hiwater_rss(mm);
+       spin_lock(&mm->page_table_lock);
        end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
-       tlb_finish_mmu(tlb, address, end);
        spin_unlock(&mm->page_table_lock);
+       tlb_finish_mmu(tlb, address, end);
        return end;
 }
 
index d931d7e49ac9bd09dd222279113b4829faf764e8..fa35323a3c5b94a9e5290b93fd43f1717b3cb173 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -203,14 +203,6 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
 {
        struct vm_area_struct *next = vma->vm_next;
 
-       /*
-        * Hide vma from rmap and vmtruncate before freeing page tables:
-        * to be moved into free_pgtables once page_table_lock is lifted
-        * from it, but until then lock ordering forbids that move.
-        */
-       anon_vma_unlink(vma);
-       unlink_file_vma(vma);
-
        might_sleep();
        if (vma->vm_ops && vma->vm_ops->close)
                vma->vm_ops->close(vma);
@@ -1679,15 +1671,15 @@ static void unmap_region(struct mm_struct *mm,
        unsigned long nr_accounted = 0;
 
        lru_add_drain();
-       spin_lock(&mm->page_table_lock);
        tlb = tlb_gather_mmu(mm, 0);
        update_hiwater_rss(mm);
+       spin_lock(&mm->page_table_lock);
        unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
+       spin_unlock(&mm->page_table_lock);
        vm_unacct_memory(nr_accounted);
        free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
                                 next? next->vm_start: 0);
        tlb_finish_mmu(tlb, start, end);
-       spin_unlock(&mm->page_table_lock);
 }
 
 /*
@@ -1962,23 +1954,20 @@ void exit_mmap(struct mm_struct *mm)
        unsigned long end;
 
        lru_add_drain();
-
-       spin_lock(&mm->page_table_lock);
-
        flush_cache_mm(mm);
        tlb = tlb_gather_mmu(mm, 1);
        /* Don't update_hiwater_rss(mm) here, do_exit already did */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
+       spin_lock(&mm->page_table_lock);
        end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
+       spin_unlock(&mm->page_table_lock);
        vm_unacct_memory(nr_accounted);
        free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
        tlb_finish_mmu(tlb, 0, end);
 
-       spin_unlock(&mm->page_table_lock);
-
        /*
-        * Walk the list again, actually closing and freeing it
-        * without holding any MM locks.
+        * Walk the list again, actually closing and freeing it,
+        * with preemption enabled, without holding any MM locks.
         */
        while (vma)
                vma = remove_vma(vma);