]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/migrate.c
mm/memory-hotplug: switch locking to a percpu rwsem
[karo-tx-linux.git] / mm / migrate.c
index 89a0a1707f4c67deb77dc466cb6f5c2ecfe19051..62767155187356d54d1fa7333ad402e76183ca0b 100644 (file)
@@ -227,25 +227,26 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
                if (is_write_migration_entry(entry))
                        pte = maybe_mkwrite(pte, vma);
 
+               flush_dcache_page(new);
 #ifdef CONFIG_HUGETLB_PAGE
                if (PageHuge(new)) {
                        pte = pte_mkhuge(pte);
                        pte = arch_make_huge_pte(pte, vma, new, 0);
-               }
-#endif
-               flush_dcache_page(new);
-               set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
-
-               if (PageHuge(new)) {
+                       set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
                        if (PageAnon(new))
                                hugepage_add_anon_rmap(new, vma, pvmw.address);
                        else
                                page_dup_rmap(new, true);
-               } else if (PageAnon(new))
-                       page_add_anon_rmap(new, vma, pvmw.address, false);
-               else
-                       page_add_file_rmap(new, false);
+               } else
+#endif
+               {
+                       set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
 
+                       if (PageAnon(new))
+                               page_add_anon_rmap(new, vma, pvmw.address, false);
+                       else
+                               page_add_file_rmap(new, false);
+               }
                if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
                        mlock_vma_page(new);
 
@@ -1251,6 +1252,8 @@ put_anon:
 out:
        if (rc != -EAGAIN)
                putback_active_hugepage(hpage);
+       if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
+               num_poisoned_pages_inc();
 
        /*
         * If migration was not successful and there's a freeing callback, use
@@ -1913,7 +1916,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        int page_lru = page_is_file_cache(page);
        unsigned long mmun_start = address & HPAGE_PMD_MASK;
        unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
-       pmd_t orig_entry;
 
        /*
         * Rate-limit the amount of data that is being migrated to a node.
@@ -1956,8 +1958,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        /* Recheck the target PMD */
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        ptl = pmd_lock(mm, pmd);
-       if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
-fail_putback:
+       if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
                spin_unlock(ptl);
                mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
@@ -1979,7 +1980,6 @@ fail_putback:
                goto out_unlock;
        }
 
-       orig_entry = *pmd;
        entry = mk_huge_pmd(new_page, vma->vm_page_prot);
        entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 
@@ -1996,15 +1996,7 @@ fail_putback:
        set_pmd_at(mm, mmun_start, pmd, entry);
        update_mmu_cache_pmd(vma, address, &entry);
 
-       if (page_count(page) != 2) {
-               set_pmd_at(mm, mmun_start, pmd, orig_entry);
-               flush_pmd_tlb_range(vma, mmun_start, mmun_end);
-               mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
-               update_mmu_cache_pmd(vma, address, &entry);
-               page_remove_rmap(new_page, true);
-               goto fail_putback;
-       }
-
+       page_ref_unfreeze(page, 2);
        mlock_migrate_page(new_page, page);
        page_remove_rmap(page, true);
        set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);