]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/migrate.c
mm,numa: fix update_mmu_cache_pmd call
[karo-tx-linux.git] / mm / migrate.c
index 6e46485f014c8a206d8ca5d425ed00cfb0060d52..3b676b0c5c3ecca91d5e91d8ef2adc2982353f57 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/hugetlb.h>
 #include <linux/hugetlb_cgroup.h>
 #include <linux/gfp.h>
+#include <linux/balloon_compaction.h>
 
 #include <asm/tlbflush.h>
 
@@ -82,7 +83,30 @@ void putback_lru_pages(struct list_head *l)
                list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
-               putback_lru_page(page);
+                       putback_lru_page(page);
+       }
+}
+
+/*
+ * Put previously isolated pages back onto the appropriate lists
+ * from where they were once taken off for compaction/migration.
+ *
+ * This function shall be used instead of putback_lru_pages(),
+ * whenever the isolated pageset has been built by isolate_migratepages_range()
+ */
+void putback_movable_pages(struct list_head *l)
+{
+       struct page *page;
+       struct page *page2;
+
+       list_for_each_entry_safe(page, page2, l, lru) {
+               list_del(&page->lru);
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                               page_is_file_cache(page));
+               if (unlikely(balloon_page_movable(page)))
+                       balloon_page_putback(page);
+               else
+                       putback_lru_page(page);
        }
 }
 
@@ -94,8 +118,6 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
 {
        struct mm_struct *mm = vma->vm_mm;
        swp_entry_t entry;
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
        pte_t *ptep, pte;
        spinlock_t *ptl;
@@ -106,19 +128,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
                        goto out;
                ptl = &mm->page_table_lock;
        } else {
-               pgd = pgd_offset(mm, addr);
-               if (!pgd_present(*pgd))
-                       goto out;
-
-               pud = pud_offset(pgd, addr);
-               if (!pud_present(*pud))
+               pmd = mm_find_pmd(mm, addr);
+               if (!pmd)
                        goto out;
-
-               pmd = pmd_offset(pud, addr);
                if (pmd_trans_huge(*pmd))
                        goto out;
-               if (!pmd_present(*pmd))
-                       goto out;
 
                ptep = pte_offset_map(pmd, addr);
 
@@ -289,7 +303,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
                /* Anonymous page without mapping */
                if (page_count(page) != 1)
                        return -EAGAIN;
-               return 0;
+               return MIGRATEPAGE_SUCCESS;
        }
 
        spin_lock_irq(&mapping->tree_lock);
@@ -359,7 +373,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        }
        spin_unlock_irq(&mapping->tree_lock);
 
-       return 0;
+       return MIGRATEPAGE_SUCCESS;
 }
 
 /*
@@ -375,7 +389,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
        if (!mapping) {
                if (page_count(page) != 1)
                        return -EAGAIN;
-               return 0;
+               return MIGRATEPAGE_SUCCESS;
        }
 
        spin_lock_irq(&mapping->tree_lock);
@@ -402,7 +416,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
        page_unfreeze_refs(page, expected_count - 1);
 
        spin_unlock_irq(&mapping->tree_lock);
-       return 0;
+       return MIGRATEPAGE_SUCCESS;
 }
 
 /*
@@ -489,11 +503,11 @@ int migrate_page(struct address_space *mapping,
 
        rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
 
-       if (rc)
+       if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
 
        migrate_page_copy(newpage, page);
-       return 0;
+       return MIGRATEPAGE_SUCCESS;
 }
 EXPORT_SYMBOL(migrate_page);
 
@@ -516,7 +530,7 @@ int buffer_migrate_page(struct address_space *mapping,
 
        rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
 
-       if (rc)
+       if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
 
        /*
@@ -552,7 +566,7 @@ int buffer_migrate_page(struct address_space *mapping,
 
        } while (bh != head);
 
-       return 0;
+       return MIGRATEPAGE_SUCCESS;
 }
 EXPORT_SYMBOL(buffer_migrate_page);
 #endif
@@ -631,7 +645,7 @@ static int fallback_migrate_page(struct address_space *mapping,
  *
  * Return value:
  *   < 0 - error code
- *  == 0 - success
+ *  MIGRATEPAGE_SUCCESS - success
  */
 static int move_to_new_page(struct page *newpage, struct page *page,
                                int remap_swapcache, enum migrate_mode mode)
@@ -668,7 +682,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
        else
                rc = fallback_migrate_page(mapping, newpage, page, mode);
 
-       if (rc) {
+       if (rc != MIGRATEPAGE_SUCCESS) {
                newpage->mapping = NULL;
        } else {
                if (remap_swapcache)
@@ -781,6 +795,18 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                }
        }
 
+       if (unlikely(balloon_page_movable(page))) {
+               /*
+                * A ballooned page does not need any special attention from
+                * physical to virtual reverse mapping procedures.
+                * Skip any attempt to unmap PTEs or to remap swap cache,
+                * in order to avoid burning cycles at rmap level, and perform
+                * the page migration right away (proteced by page lock).
+                */
+               rc = balloon_page_migrate(newpage, page, mode);
+               goto uncharge;
+       }
+
        /*
         * Corner case handling:
         * 1. When a new swap-cache page is read into, it is added to the LRU
@@ -817,7 +843,9 @@ skip_unmap:
                put_anon_vma(anon_vma);
 
 uncharge:
-       mem_cgroup_end_migration(mem, page, newpage, rc == 0);
+       mem_cgroup_end_migration(mem, page, newpage,
+                                (rc == MIGRATEPAGE_SUCCESS ||
+                                 rc == MIGRATEPAGE_BALLOON_SUCCESS));
 unlock:
        unlock_page(page);
 out:
@@ -849,6 +877,18 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                        goto out;
 
        rc = __unmap_and_move(page, newpage, force, offlining, mode);
+
+       if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
+               /*
+                * A ballooned page has been migrated already.
+                * Now, it's the time to wrap-up counters,
+                * handle the page back to Buddy and return.
+                */
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                                   page_is_file_cache(page));
+               balloon_page_free(page);
+               return MIGRATEPAGE_SUCCESS;
+       }
 out:
        if (rc != -EAGAIN) {
                /*
@@ -991,7 +1031,7 @@ int migrate_pages(struct list_head *from,
                        case -EAGAIN:
                                retry++;
                                break;
-                       case 0:
+                       case MIGRATEPAGE_SUCCESS:
                                nr_succeeded++;
                                break;
                        default:
@@ -1001,7 +1041,7 @@ int migrate_pages(struct list_head *from,
                        }
                }
        }
-       rc = 0;
+       rc = nr_failed + retry;
 out:
        if (nr_succeeded)
                count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
@@ -1012,10 +1052,7 @@ out:
        if (!swapwrite)
                current->flags &= ~PF_SWAPWRITE;
 
-       if (rc)
-               return rc;
-
-       return nr_failed + retry;
+       return rc;
 }
 
 int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
@@ -1035,7 +1072,7 @@ int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
                        /* try again */
                        cond_resched();
                        break;
-               case 0:
+               case MIGRATEPAGE_SUCCESS:
                        goto out;
                default:
                        rc = -EIO;
@@ -1213,7 +1250,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                        if (node < 0 || node >= MAX_NUMNODES)
                                goto out_pm;
 
-                       if (!node_state(node, N_HIGH_MEMORY))
+                       if (!node_state(node, N_MEMORY))
                                goto out_pm;
 
                        err = -EACCES;
@@ -1697,7 +1734,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        page_add_new_anon_rmap(new_page, vma, haddr);
 
        set_pmd_at(mm, haddr, pmd, entry);
-       update_mmu_cache_pmd(vma, address, entry);
+       update_mmu_cache_pmd(vma, address, &entry);
        page_remove_rmap(page);
        /*
         * Finish the charge transaction under the page table lock to