]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/swapfile.c
phy: IC+101G and PHY_HAS_INTERRUPT flag
[karo-tx-linux.git] / mm / swapfile.c
index b1cd120607230b0770c35e0823d6389782ae734a..d999f090dfdabb6e5282aaaf3429029c5e3f839d 100644 (file)
@@ -667,10 +667,10 @@ int try_to_free_swap(struct page *page)
         * original page might be freed under memory pressure, then
         * later read back in from swap, now with the wrong data.
         *
-        * Hibernation clears bits from gfp_allowed_mask to prevent
-        * memory reclaim from writing to disk, so check that here.
+        * Hibration suspends storage while it is writing the image
+        * to disk so check that here.
         */
-       if (!(gfp_allowed_mask & __GFP_IO))
+       if (pm_suspended_storage())
                return 0;
 
        delete_from_swap_cache(page);
@@ -847,12 +847,13 @@ unsigned int count_swap_pages(int type, int free)
 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, swp_entry_t entry, struct page *page)
 {
-       struct mem_cgroup *ptr;
+       struct mem_cgroup *memcg;
        spinlock_t *ptl;
        pte_t *pte;
        int ret = 1;
 
-       if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
+       if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
+                                        GFP_KERNEL, &memcg)) {
                ret = -ENOMEM;
                goto out_nolock;
        }
@@ -860,7 +861,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
                if (ret > 0)
-                       mem_cgroup_cancel_charge_swapin(ptr);
+                       mem_cgroup_cancel_charge_swapin(memcg);
                ret = 0;
                goto out;
        }
@@ -871,7 +872,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        set_pte_at(vma->vm_mm, addr, pte,
                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
        page_add_anon_rmap(page, vma, addr);
-       mem_cgroup_commit_charge_swapin(page, ptr);
+       mem_cgroup_commit_charge_swapin(page, memcg);
        swap_free(entry);
        /*
         * Move the page to the active list so it is not