]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/huge_memory.c
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
[karo-tx-linux.git] / mm / huge_memory.c
index 95d1acb0f3d237f55f5f1d6405de5774f0f9fc6a..25fab7150fa0e10ab7d019174f7535b7afbcb541 100644 (file)
@@ -712,7 +712,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
        pgtable_t pgtable;
        spinlock_t *ptl;
 
-       VM_BUG_ON(!PageCompound(page));
+       VM_BUG_ON_PAGE(!PageCompound(page), page);
        pgtable = pte_alloc_one(mm, haddr);
        if (unlikely(!pgtable))
                return VM_FAULT_OOM;
@@ -893,7 +893,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                goto out;
        }
        src_page = pmd_page(pmd);
-       VM_BUG_ON(!PageHead(src_page));
+       VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
        get_page(src_page);
        page_dup_rmap(src_page);
        add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
@@ -1067,7 +1067,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        ptl = pmd_lock(mm, pmd);
        if (unlikely(!pmd_same(*pmd, orig_pmd)))
                goto out_free_pages;
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
 
        pmdp_clear_flush(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
@@ -1133,7 +1133,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_unlock;
 
        page = pmd_page(orig_pmd);
-       VM_BUG_ON(!PageCompound(page) || !PageHead(page));
+       VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
        if (page_mapcount(page) == 1) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
@@ -1211,7 +1211,7 @@ alloc:
                        add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
                        put_huge_zero_page();
                } else {
-                       VM_BUG_ON(!PageHead(page));
+                       VM_BUG_ON_PAGE(!PageHead(page), page);
                        page_remove_rmap(page);
                        put_page(page);
                }
@@ -1249,7 +1249,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                goto out;
 
        page = pmd_page(*pmd);
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        if (flags & FOLL_TOUCH) {
                pmd_t _pmd;
                /*
@@ -1274,7 +1274,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                }
        }
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
-       VM_BUG_ON(!PageCompound(page));
+       VM_BUG_ON_PAGE(!PageCompound(page), page);
        if (flags & FOLL_GET)
                get_page_foll(page);
 
@@ -1432,9 +1432,9 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                } else {
                        page = pmd_page(orig_pmd);
                        page_remove_rmap(page);
-                       VM_BUG_ON(page_mapcount(page) < 0);
+                       VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-                       VM_BUG_ON(!PageHead(page));
+                       VM_BUG_ON_PAGE(!PageHead(page), page);
                        atomic_long_dec(&tlb->mm->nr_ptes);
                        spin_unlock(ptl);
                        tlb_remove_page(tlb, page);
@@ -2176,9 +2176,9 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                if (unlikely(!page))
                        goto out;
 
-               VM_BUG_ON(PageCompound(page));
-               BUG_ON(!PageAnon(page));
-               VM_BUG_ON(!PageSwapBacked(page));
+               VM_BUG_ON_PAGE(PageCompound(page), page);
+               VM_BUG_ON_PAGE(!PageAnon(page), page);
+               VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
                /* cannot use mapcount: can't collapse if there's a gup pin */
                if (page_count(page) != 1)
@@ -2201,8 +2201,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                }
                /* 0 stands for page_is_file_cache(page) == false */
                inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
-               VM_BUG_ON(!PageLocked(page));
-               VM_BUG_ON(PageLRU(page));
+               VM_BUG_ON_PAGE(!PageLocked(page), page);
+               VM_BUG_ON_PAGE(PageLRU(page), page);
 
                /* If there is no mapped pte young don't collapse the page */
                if (pte_young(pteval) || PageReferenced(page) ||
@@ -2232,7 +2232,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                } else {
                        src_page = pte_page(pteval);
                        copy_user_highpage(page, src_page, address, vma);
-                       VM_BUG_ON(page_mapcount(src_page) != 1);
+                       VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
                        release_pte_page(src_page);
                        /*
                         * ptl mostly unnecessary, but preempt has to
@@ -2311,7 +2311,7 @@ static struct page
                       struct vm_area_struct *vma, unsigned long address,
                       int node)
 {
-       VM_BUG_ON(*hpage);
+       VM_BUG_ON_PAGE(*hpage, *hpage);
        /*
         * Allocate the page while the vma is still valid and under
         * the mmap_sem read mode so there is no memory allocation
@@ -2580,7 +2580,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                 */
                node = page_to_nid(page);
                khugepaged_node_load[node]++;
-               VM_BUG_ON(PageCompound(page));
+               VM_BUG_ON_PAGE(PageCompound(page), page);
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
                        goto out_unmap;
                /* cannot use mapcount: can't collapse if there's a gup pin */
@@ -2876,7 +2876,7 @@ again:
                return;
        }
        page = pmd_page(*pmd);
-       VM_BUG_ON(!page_count(page));
+       VM_BUG_ON_PAGE(!page_count(page), page);
        get_page(page);
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);