]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/huge_memory.c
target/user: Fix time calc in expired cmd processing
[karo-tx-linux.git] / mm / huge_memory.c
index bbac913f96bc1662ac3d7ef37c82080d9c17562f..62fe06bb7d04bacccc8c2213a6a65134fa274010 100644 (file)
@@ -116,7 +116,7 @@ static void set_recommended_min_free_kbytes(void)
        for_each_populated_zone(zone)
                nr_zones++;
 
-       /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
+       /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
        recommended_min = pageblock_nr_pages * nr_zones * 2;
 
        /*
@@ -151,7 +151,7 @@ static int start_stop_khugepaged(void)
                if (!khugepaged_thread)
                        khugepaged_thread = kthread_run(khugepaged, NULL,
                                                        "khugepaged");
-               if (unlikely(IS_ERR(khugepaged_thread))) {
+               if (IS_ERR(khugepaged_thread)) {
                        pr_err("khugepaged: kthread_run(khugepaged) failed\n");
                        err = PTR_ERR(khugepaged_thread);
                        khugepaged_thread = NULL;
@@ -786,7 +786,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 
 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 {
-       return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
+       return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
 }
 
 /* Caller must hold page table lock. */
@@ -1307,7 +1307,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                          pmd, _pmd,  1))
                        update_mmu_cache_pmd(vma, addr, pmd);
        }
-       if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
+       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
                if (page->mapping && trylock_page(page)) {
                        lru_add_drain();
                        if (page->mapping)
@@ -1755,8 +1755,7 @@ static void __split_huge_page_refcount(struct page *page,
                                      (1L << PG_unevictable)));
                page_tail->flags |= (1L << PG_dirty);
 
-               /* clear PageTail before overwriting first_page */
-               smp_wmb();
+               clear_compound_head(page_tail);
 
                if (page_is_young(page))
                        set_page_young(page_tail);
@@ -1880,7 +1879,7 @@ static int __split_huge_page_map(struct page *page,
                 * here). But it is generally safer to never allow
                 * small and huge TLB entries for the same virtual
                 * address to be loaded simultaneously. So instead of
-                * doing "pmd_populate(); flush_tlb_range();" we first
+                * doing "pmd_populate(); flush_pmd_tlb_range();" we first
                 * mark the current pmd notpresent (atomically because
                 * here the pmd_trans_huge and pmd_trans_splitting
                 * must remain set at all times on the pmd until the
@@ -2010,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                /*
                 * Be somewhat over-protective like KSM for now!
                 */
-               if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
+               if (*vm_flags & VM_NO_THP)
                        return -EINVAL;
                *vm_flags &= ~VM_NOHUGEPAGE;
                *vm_flags |= VM_HUGEPAGE;
@@ -2026,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                /*
                 * Be somewhat over-protective like KSM for now!
                 */
-               if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
+               if (*vm_flags & VM_NO_THP)
                        return -EINVAL;
                *vm_flags &= ~VM_HUGEPAGE;
                *vm_flags |= VM_NOHUGEPAGE;
@@ -2413,8 +2412,7 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 
 static struct page *
 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
-                      struct vm_area_struct *vma, unsigned long address,
-                      int node)
+                      unsigned long address, int node)
 {
        VM_BUG_ON_PAGE(*hpage, *hpage);
 
@@ -2481,8 +2479,7 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 
 static struct page *
 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
-                      struct vm_area_struct *vma, unsigned long address,
-                      int node)
+                      unsigned long address, int node)
 {
        up_read(&mm->mmap_sem);
        VM_BUG_ON(!*hpage);
@@ -2530,7 +2527,7 @@ static void collapse_huge_page(struct mm_struct *mm,
                __GFP_THISNODE;
 
        /* release the mmap_sem read lock. */
-       new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
+       new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
        if (!new_page)
                return;