]> git.kernelconcepts.de Git - mv-sheeva.git/commitdiff
mm: FOLL flags for GUP flags
authorHugh Dickins <hugh.dickins@tiscali.co.uk>
Tue, 22 Sep 2009 00:03:31 +0000 (17:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Sep 2009 14:17:40 +0000 (07:17 -0700)
__get_user_pages() has been taking its own GUP flags, then processing
them into FOLL flags for follow_page().  Though oddly named, the FOLL
flags are more widely used, so pass them to __get_user_pages() now.
Sorry, VM flags, VM_FAULT flags and FAULT_FLAGs are still distinct.

(The patch to __get_user_pages() looks peculiar, with both gup_flags
and foll_flags: the gup_flags remain constant; but as before there's
an exceptional case, out of scope of the patch, in which foll_flags
per page have FOLL_WRITE masked off.)

Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Rik van Riel <riel@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/internal.h
mm/memory.c
mm/mlock.c
mm/nommu.c

index 45ee5b5a343dbcb20f7017c9fa6b3afa9afaa064..5409eced7aae0c0d0a29364656ee875975e11bfc 100644 (file)
@@ -1232,6 +1232,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
 #define FOLL_TOUCH     0x02    /* mark page accessed */
 #define FOLL_GET       0x04    /* do get_page on page */
 #define FOLL_DUMP      0x08    /* give error on hole if it would be zero */
+#define FOLL_FORCE     0x10    /* get_user_pages read/write w/o permission */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
index d41475078b2005fffe995d08478f771f14967ced..75596574911e4c8f2ac83fa49144fba7cfc0aaaf 100644 (file)
@@ -250,12 +250,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
 }
 #endif /* CONFIG_SPARSEMEM */
 
-#define GUP_FLAGS_WRITE                0x01
-#define GUP_FLAGS_FORCE                0x02
-#define GUP_FLAGS_DUMP         0x04
-
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, int len, int flags,
+                    unsigned long start, int len, unsigned int foll_flags,
                     struct page **pages, struct vm_area_struct **vmas);
 
 #define ZONE_RECLAIM_NOSCAN    -2
index c8b5b9435a92c09067e09daaed21fa3c7dbbe78e..5c694f2b9c12d02d6a9ca129afb702d68237a9c9 100644 (file)
@@ -1209,27 +1209,29 @@ no_page_table:
 }
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, int nr_pages, int flags,
+                    unsigned long start, int nr_pages, unsigned int gup_flags,
                     struct page **pages, struct vm_area_struct **vmas)
 {
        int i;
-       unsigned int vm_flags = 0;
-       int write = !!(flags & GUP_FLAGS_WRITE);
-       int force = !!(flags & GUP_FLAGS_FORCE);
+       unsigned long vm_flags;
 
        if (nr_pages <= 0)
                return 0;
+
+       VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
        /* 
         * Require read or write permissions.
-        * If 'force' is set, we only require the "MAY" flags.
+        * If FOLL_FORCE is set, we only require the "MAY" flags.
         */
-       vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
-       vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+       vm_flags  = (gup_flags & FOLL_WRITE) ?
+                       (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+       vm_flags &= (gup_flags & FOLL_FORCE) ?
+                       (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
        i = 0;
 
        do {
                struct vm_area_struct *vma;
-               unsigned int foll_flags;
 
                vma = find_extend_vma(mm, start);
                if (!vma && in_gate_area(tsk, start)) {
@@ -1241,7 +1243,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        pte_t *pte;
 
                        /* user gate pages are read-only */
-                       if (write)
+                       if (gup_flags & FOLL_WRITE)
                                return i ? : -EFAULT;
                        if (pg > TASK_SIZE)
                                pgd = pgd_offset_k(pg);
@@ -1278,22 +1280,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                    !(vm_flags & vma->vm_flags))
                        return i ? : -EFAULT;
 
-               foll_flags = FOLL_TOUCH;
-               if (pages)
-                       foll_flags |= FOLL_GET;
-               if (flags & GUP_FLAGS_DUMP)
-                       foll_flags |= FOLL_DUMP;
-               if (write)
-                       foll_flags |= FOLL_WRITE;
-
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                       &start, &nr_pages, i, foll_flags);
+                                       &start, &nr_pages, i, gup_flags);
                        continue;
                }
 
                do {
                        struct page *page;
+                       unsigned int foll_flags = gup_flags;
 
                        /*
                         * If we have a pending SIGKILL, don't keep faulting
@@ -1302,9 +1297,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        if (unlikely(fatal_signal_pending(current)))
                                return i ? i : -ERESTARTSYS;
 
-                       if (write)
-                               foll_flags |= FOLL_WRITE;
-
                        cond_resched();
                        while (!(page = follow_page(vma, start, foll_flags))) {
                                int ret;
@@ -1415,12 +1407,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, int nr_pages, int write, int force,
                struct page **pages, struct vm_area_struct **vmas)
 {
-       int flags = 0;
+       int flags = FOLL_TOUCH;
 
+       if (pages)
+               flags |= FOLL_GET;
        if (write)
-               flags |= GUP_FLAGS_WRITE;
+               flags |= FOLL_WRITE;
        if (force)
-               flags |= GUP_FLAGS_FORCE;
+               flags |= FOLL_FORCE;
 
        return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
 }
@@ -1447,7 +1441,7 @@ struct page *get_dump_page(unsigned long addr)
        struct page *page;
 
        if (__get_user_pages(current, current->mm, addr, 1,
-                       GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1)
+                       FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
                return NULL;
        if (page == ZERO_PAGE(0)) {
                page_cache_release(page);
index e13918d4fc4f89ce6f48dd2782e9c503f2c4b7e4..22041aa9f5c1d7793b9cc8094153c2c09975e211 100644 (file)
@@ -166,9 +166,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        VM_BUG_ON(end   > vma->vm_end);
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       gup_flags = 0;
+       gup_flags = FOLL_TOUCH | FOLL_GET;
        if (vma->vm_flags & VM_WRITE)
-               gup_flags = GUP_FLAGS_WRITE;
+               gup_flags |= FOLL_WRITE;
 
        while (nr_pages > 0) {
                int i;
index 386443e9d2c6cbcc929ee36991259d8f9dd66077..2d02ca17ce188182043b5ef8f41f8b87e68d2ce2 100644 (file)
@@ -168,20 +168,20 @@ unsigned int kobjsize(const void *objp)
 }
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, int nr_pages, int flags,
+                    unsigned long start, int nr_pages, int foll_flags,
                     struct page **pages, struct vm_area_struct **vmas)
 {
        struct vm_area_struct *vma;
        unsigned long vm_flags;
        int i;
-       int write = !!(flags & GUP_FLAGS_WRITE);
-       int force = !!(flags & GUP_FLAGS_FORCE);
 
        /* calculate required read or write permissions.
-        * - if 'force' is set, we only require the "MAY" flags.
+        * If FOLL_FORCE is set, we only require the "MAY" flags.
         */
-       vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
-       vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+       vm_flags  = (foll_flags & FOLL_WRITE) ?
+                       (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+       vm_flags &= (foll_flags & FOLL_FORCE) ?
+                       (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
 
        for (i = 0; i < nr_pages; i++) {
                vma = find_vma(mm, start);
@@ -223,9 +223,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        int flags = 0;
 
        if (write)
-               flags |= GUP_FLAGS_WRITE;
+               flags |= FOLL_WRITE;
        if (force)
-               flags |= GUP_FLAGS_FORCE;
+               flags |= FOLL_FORCE;
 
        return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
 }