]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/memory.c
[PATCH] mm: zap_pte_range dont dirty anon
[karo-tx-linux.git] / mm / memory.c
index ae8161f1f4595bd4d58afa0b20ecf4419ff23e71..fd5d4c6dc762c83f0a3ce7b808824f38b40db6a2 100644 (file)
@@ -410,7 +410,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 {
        pte_t *src_pte, *dst_pte;
        unsigned long vm_flags = vma->vm_flags;
-       int progress;
+       int progress = 0;
 
 again:
        dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
@@ -418,17 +418,19 @@ again:
                return -ENOMEM;
        src_pte = pte_offset_map_nested(src_pmd, addr);
 
-       progress = 0;
        spin_lock(&src_mm->page_table_lock);
        do {
                /*
                 * We are holding two locks at this point - either of them
                 * could generate latencies in another task on another CPU.
                 */
-               if (progress >= 32 && (need_resched() ||
-                   need_lockbreak(&src_mm->page_table_lock) ||
-                   need_lockbreak(&dst_mm->page_table_lock)))
-                       break;
+               if (progress >= 32) {
+                       progress = 0;
+                       if (need_resched() ||
+                           need_lockbreak(&src_mm->page_table_lock) ||
+                           need_lockbreak(&dst_mm->page_table_lock))
+                               break;
+               }
                if (pte_none(*src_pte)) {
                        progress++;
                        continue;
@@ -572,12 +574,14 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
                                                addr) != page->index)
                                set_pte_at(tlb->mm, addr, pte,
                                           pgoff_to_pte(page->index));
-                       if (pte_dirty(ptent))
-                               set_page_dirty(page);
                        if (PageAnon(page))
                                dec_mm_counter(tlb->mm, anon_rss);
-                       else if (pte_young(ptent))
-                               mark_page_accessed(page);
+                       else {
+                               if (pte_dirty(ptent))
+                                       set_page_dirty(page);
+                               if (pte_young(ptent))
+                                       mark_page_accessed(page);
+                       }
                        tlb->freed++;
                        page_remove_rmap(page);
                        tlb_remove_page(tlb, page);
@@ -2045,8 +2049,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
 
        inc_page_state(pgfault);
 
-       if (is_vm_hugetlb_page(vma))
-               return VM_FAULT_SIGBUS; /* mapping truncation does this. */
+       if (unlikely(is_vm_hugetlb_page(vma)))
+               return hugetlb_fault(mm, vma, address, write_access);
 
        /*
         * We need the page table lock to synchronize with kswapd