]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Some kernel components pin user space memory (infiniband and perf) (by
authorChristoph Lameter <cl@linux.com>
Wed, 24 Aug 2011 23:46:54 +0000 (09:46 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 5 Sep 2011 07:02:19 +0000 (17:02 +1000)
increasing the page count) and account that memory as "mlocked".

The difference between mlocking and pinning is:

A. mlocked pages are marked with PG_mlocked and are exempt from
   swapping. Page migration may move them around though.
   They are kept on a special LRU list.

B. Pinned pages cannot be moved because something needs to
   directly access physical memory. They may not be on any
   LRU list.

I recently saw an mlockalled process where mm->locked_vm became
bigger than the virtual size of the process (!) because some
memory was accounted for twice:

Once when the page was mlocked and once when the Infiniband
layer increased the refcount because it needt to pin the RDMA
memory.

This patch introduces a separate counter for pinned pages and
accounts them seperately.

Signed-off-by: Christoph Lameter <cl@linux.com>
Cc: Mike Marciniszyn <infinipath@qlogic.com>
Cc: Roland Dreier <roland@kernel.org>
Cc: Sean Hefty <sean.hefty@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/infiniband/core/umem.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/qib/qib_user_pages.c
fs/proc/task_mmu.c
include/linux/mm_types.h
kernel/events/core.c

index cc92137b3e02452e0af79b9a7cf313e376fe188c..71f0c0f7df949ac0a104f50cc3b1900d3751d308 100644 (file)
@@ -137,7 +137,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
        down_write(&current->mm->mmap_sem);
 
-       locked     = npages + current->mm->locked_vm;
+       locked     = npages + current->mm->pinned_vm;
        lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
        if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@@ -207,7 +207,7 @@ out:
                __ib_umem_release(context->device, umem, 0);
                kfree(umem);
        } else
-               current->mm->locked_vm = locked;
+               current->mm->pinned_vm = locked;
 
        up_write(&current->mm->mmap_sem);
        if (vma_list)
@@ -223,7 +223,7 @@ static void ib_umem_account(struct work_struct *work)
        struct ib_umem *umem = container_of(work, struct ib_umem, work);
 
        down_write(&umem->mm->mmap_sem);
-       umem->mm->locked_vm -= umem->diff;
+       umem->mm->pinned_vm -= umem->diff;
        up_write(&umem->mm->mmap_sem);
        mmput(umem->mm);
        kfree(umem);
index cfed5399f0746acc71e48f1c4d723c81774cc5e4..dc66c450691602f572b53e53a8dbacfca16e9c4b 100644 (file)
@@ -79,7 +79,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
                        goto bail_release;
        }
 
-       current->mm->locked_vm += num_pages;
+       current->mm->pinned_vm += num_pages;
 
        ret = 0;
        goto bail;
@@ -178,7 +178,7 @@ void ipath_release_user_pages(struct page **p, size_t num_pages)
 
        __ipath_release_user_pages(p, num_pages, 1);
 
-       current->mm->locked_vm -= num_pages;
+       current->mm->pinned_vm -= num_pages;
 
        up_write(&current->mm->mmap_sem);
 }
@@ -195,7 +195,7 @@ static void user_pages_account(struct work_struct *_work)
                container_of(_work, struct ipath_user_pages_work, work);
 
        down_write(&work->mm->mmap_sem);
-       work->mm->locked_vm -= work->num_pages;
+       work->mm->pinned_vm -= work->num_pages;
        up_write(&work->mm->mmap_sem);
        mmput(work->mm);
        kfree(work);
index 7689e49c13c9c80fe6857659bb24dff02c9b16f1..2bc1d2b96298dd5d59029cb141d83a715ab51f54 100644 (file)
@@ -74,7 +74,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
                        goto bail_release;
        }
 
-       current->mm->locked_vm += num_pages;
+       current->mm->pinned_vm += num_pages;
 
        ret = 0;
        goto bail;
@@ -151,7 +151,7 @@ void qib_release_user_pages(struct page **p, size_t num_pages)
        __qib_release_user_pages(p, num_pages, 1);
 
        if (current->mm) {
-               current->mm->locked_vm -= num_pages;
+               current->mm->pinned_vm -= num_pages;
                up_write(&current->mm->mmap_sem);
        }
 }
index 25b6a887adb916a93d20aa092481f67df451ff8c..d1931b9ab21b3cfca433e078b00f747bf6c735d2 100644 (file)
@@ -44,6 +44,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
                "VmPeak:\t%8lu kB\n"
                "VmSize:\t%8lu kB\n"
                "VmLck:\t%8lu kB\n"
+               "VmPin:\t%8lu kB\n"
                "VmHWM:\t%8lu kB\n"
                "VmRSS:\t%8lu kB\n"
                "VmData:\t%8lu kB\n"
@@ -55,6 +56,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
                hiwater_vm << (PAGE_SHIFT-10),
                (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
                mm->locked_vm << (PAGE_SHIFT-10),
+               mm->pinned_vm << (PAGE_SHIFT-10),
                hiwater_rss << (PAGE_SHIFT-10),
                total_rss << (PAGE_SHIFT-10),
                data << (PAGE_SHIFT-10),
index 7870e473033cacb50bb931450616b285c1b027fd..1b699460aea3bf5acb3594e748e119630d59f168 100644 (file)
@@ -293,7 +293,7 @@ struct mm_struct {
        unsigned long hiwater_rss;      /* High-watermark of RSS usage */
        unsigned long hiwater_vm;       /* High-water virtual memory usage */
 
-       unsigned long total_vm, locked_vm, shared_vm, exec_vm;
+       unsigned long total_vm, locked_vm, pinned_vm, shared_vm, exec_vm;
        unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
        unsigned long start_code, end_code, start_data, end_data;
        unsigned long start_brk, brk, start_stack;
index 5a0eeda4ad9e699d9091508b891476238e0490b8..115d701a2c362dc80a4a0b2796a4c51597cf717c 100644 (file)
@@ -3501,7 +3501,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                struct ring_buffer *rb = event->rb;
 
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
-               vma->vm_mm->locked_vm -= event->mmap_locked;
+               vma->vm_mm->pinned_vm -= event->mmap_locked;
                rcu_assign_pointer(event->rb, NULL);
                mutex_unlock(&event->mmap_mutex);
 
@@ -3582,7 +3582,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 
        lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
-       locked = vma->vm_mm->locked_vm + extra;
+       locked = vma->vm_mm->pinned_vm + extra;
 
        if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
                !capable(CAP_IPC_LOCK)) {
@@ -3608,7 +3608,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        atomic_long_add(user_extra, &user->locked_vm);
        event->mmap_locked = extra;
        event->mmap_user = get_current_user();
-       vma->vm_mm->locked_vm += event->mmap_locked;
+       vma->vm_mm->pinned_vm += event->mmap_locked;
 
 unlock:
        if (!ret)