]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/hugetlb.c
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
[karo-tx-linux.git] / mm / hugetlb.c
index dee6cf4e6d34135e1880c5c01c7627aa1a33c69a..c01cb9fedb18f8495c815d4caa380580734e1c8e 100644 (file)
@@ -584,7 +584,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
                                1 << PG_active | 1 << PG_reserved |
                                1 << PG_private | 1 << PG_writeback);
        }
-       VM_BUG_ON(hugetlb_cgroup_from_page(page));
+       VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
        arch_release_hugepage(page);
@@ -690,15 +690,11 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
  */
 int PageHuge(struct page *page)
 {
-       compound_page_dtor *dtor;
-
        if (!PageCompound(page))
                return 0;
 
        page = compound_head(page);
-       dtor = get_compound_page_dtor(page);
-
-       return dtor == free_huge_page;
+       return get_compound_page_dtor(page) == free_huge_page;
 }
 EXPORT_SYMBOL_GPL(PageHuge);
 
@@ -708,16 +704,11 @@ EXPORT_SYMBOL_GPL(PageHuge);
  */
 int PageHeadHuge(struct page *page_head)
 {
-       compound_page_dtor *dtor;
-
        if (!PageHead(page_head))
                return 0;
 
-       dtor = get_compound_page_dtor(page_head);
-
-       return dtor == free_huge_page;
+       return get_compound_page_dtor(page_head) == free_huge_page;
 }
-EXPORT_SYMBOL_GPL(PageHeadHuge);
 
 pgoff_t __basepage_index(struct page *page)
 {
@@ -1098,7 +1089,7 @@ retry:
                 * no users -- drop the buddy allocator's reference.
                 */
                put_page_testzero(page);
-               VM_BUG_ON(page_count(page));
+               VM_BUG_ON_PAGE(page_count(page), page);
                enqueue_huge_page(h, page);
        }
 free:
@@ -1280,9 +1271,9 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
        for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
                void *addr;
 
-               addr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
-                               huge_page_size(h), huge_page_size(h), 0);
-
+               addr = memblock_virt_alloc_try_nid_nopanic(
+                               huge_page_size(h), huge_page_size(h),
+                               0, BOOTMEM_ALLOC_ACCESSIBLE, node);
                if (addr) {
                        /*
                         * Use the beginning of the huge page to store the
@@ -1322,8 +1313,8 @@ static void __init gather_bootmem_prealloc(void)
 
 #ifdef CONFIG_HIGHMEM
                page = pfn_to_page(m->phys >> PAGE_SHIFT);
-               free_bootmem_late((unsigned long)m,
-                                 sizeof(struct huge_bootmem_page));
+               memblock_free_late(__pa(m),
+                                  sizeof(struct huge_bootmem_page));
 #else
                page = virt_to_page(m);
 #endif
@@ -2355,17 +2346,27 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
        int cow;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
+       int ret = 0;
 
        cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 
+       mmun_start = vma->vm_start;
+       mmun_end = vma->vm_end;
+       if (cow)
+               mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
+
        for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
                spinlock_t *src_ptl, *dst_ptl;
                src_pte = huge_pte_offset(src, addr);
                if (!src_pte)
                        continue;
                dst_pte = huge_pte_alloc(dst, addr, sz);
-               if (!dst_pte)
-                       goto nomem;
+               if (!dst_pte) {
+                       ret = -ENOMEM;
+                       break;
+               }
 
                /* If the pagetables are shared don't copy or take references */
                if (dst_pte == src_pte)
@@ -2386,10 +2387,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
        }
-       return 0;
 
-nomem:
-       return -ENOMEM;
+       if (cow)
+               mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
+
+       return ret;
 }
 
 static int is_hugetlb_entry_migration(pte_t pte)
@@ -3079,7 +3081,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 same_page:
                if (pages) {
                        pages[i] = mem_map_offset(page, pfn_offset);
-                       get_page(pages[i]);
+                       get_page_foll(pages[i]);
                }
 
                if (vmas)
@@ -3501,7 +3503,7 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
 
 bool isolate_huge_page(struct page *page, struct list_head *list)
 {
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        if (!get_page_unless_zero(page))
                return false;
        spin_lock(&hugetlb_lock);
@@ -3512,7 +3514,7 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
 
 void putback_active_hugepage(struct page *page)
 {
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
        spin_unlock(&hugetlb_lock);
@@ -3521,7 +3523,7 @@ void putback_active_hugepage(struct page *page)
 
 bool is_hugepage_active(struct page *page)
 {
-       VM_BUG_ON(!PageHuge(page));
+       VM_BUG_ON_PAGE(!PageHuge(page), page);
        /*
         * This function can be called for a tail page because the caller,
         * scan_movable_pages, scans through a given pfn-range which typically