]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/shmem.c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux...
[karo-tx-linux.git] / mm / shmem.c
index feead1943d927f6c2660791bab30a370c3f8027c..78307d5c5bd9b6c166f8db5f949cf8fb6fe96066 100644 (file)
@@ -127,7 +127,7 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
 static inline int shmem_acct_size(unsigned long flags, loff_t size)
 {
        return (flags & VM_NORESERVE) ?
-               0 : security_vm_enough_memory_kern(VM_ACCT(size));
+               0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
 }
 
 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
@@ -145,7 +145,7 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
 static inline int shmem_acct_block(unsigned long flags)
 {
        return (flags & VM_NORESERVE) ?
-               security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
+               security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
 }
 
 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
 /*
  * Pagevec may contain swap entries, so shuffle up pages before releasing.
  */
-static void shmem_pagevec_release(struct pagevec *pvec)
+static void shmem_deswap_pagevec(struct pagevec *pvec)
 {
        int i, j;
 
@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
                        pvec->pages[j++] = page;
        }
        pvec->nr = j;
-       pagevec_release(pvec);
+}
+
+/*
+ * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
+ */
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+       struct pagevec pvec;
+       pgoff_t indices[PAGEVEC_SIZE];
+       pgoff_t index = 0;
+
+       pagevec_init(&pvec, 0);
+       /*
+        * Minor point, but we might as well stop if someone else SHM_LOCKs it.
+        */
+       while (!mapping_unevictable(mapping)) {
+               /*
+                * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
+                * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
+                */
+               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+                                       PAGEVEC_SIZE, pvec.pages, indices);
+               if (!pvec.nr)
+                       break;
+               index = indices[pvec.nr - 1] + 1;
+               shmem_deswap_pagevec(&pvec);
+               check_move_unevictable_pages(pvec.pages, pvec.nr);
+               pagevec_release(&pvec);
+               cond_resched();
+       }
 }
 
 /*
@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        }
                        unlock_page(page);
                }
-               shmem_pagevec_release(&pvec);
+               shmem_deswap_pagevec(&pvec);
+               pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
                index++;
@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        continue;
                }
                if (index == start && indices[0] > end) {
-                       shmem_pagevec_release(&pvec);
+                       shmem_deswap_pagevec(&pvec);
+                       pagevec_release(&pvec);
                        break;
                }
                mem_cgroup_uncharge_start();
@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        }
                        unlock_page(page);
                }
-               shmem_pagevec_release(&pvec);
+               shmem_deswap_pagevec(&pvec);
+               pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                index++;
        }
@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
                user_shm_unlock(inode->i_size, user);
                info->flags &= ~VM_LOCKED;
                mapping_clear_unevictable(file->f_mapping);
-               /*
-                * Ensure that a racing putback_lru_page() can see
-                * the pages of this mapping are evictable when we
-                * skip them due to !PageLRU during the scan.
-                */
-               smp_mb__after_clear_bit();
-               scan_mapping_unevictable_pages(file->f_mapping);
        }
        retval = 0;
 
@@ -1631,9 +1656,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                }
                inode->i_mapping->a_ops = &shmem_aops;
                inode->i_op = &shmem_symlink_inode_operations;
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                memcpy(kaddr, symname, len);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                set_page_dirty(page);
                unlock_page(page);
                page_cache_release(page);
@@ -2445,6 +2470,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
        return 0;
 }
 
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+}
+
 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 {
        truncate_inode_pages_range(inode->i_mapping, lstart, lend);