* BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
* might be reconsidered if it ever diverges from PAGE_SIZE.
*
- * __GFP_MOVABLE is masked out as swap vectors cannot move
+ * Mobility flags are masked out as swap vectors cannot move
*/
- return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
+ return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
PAGE_CACHE_SHIFT-PAGE_SHIFT);
}
struct inode *inode;
BUG_ON(!PageLocked(page));
+ /*
+ * shmem_backing_dev_info's capabilities prevent regular writeback or
+ * sync from ever calling shmem_writepage; but a stacking filesystem
+ * may use the ->writepage of its underlying filesystem, in which case
+ * we want to do nothing when that underlying filesystem is tmpfs
+ * (writing out to swap is useful as a response to memory pressure, but
+ * of no use to stabilize the data) - just redirty the page, unlock it
+ * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the
+ * page_mapped check below, must be avoided unless we're in reclaim.
+ */
+ if (!wbc->for_reclaim) {
+ set_page_dirty(page);
+ unlock_page(page);
+ return 0;
+ }
BUG_ON(page_mapped(page));
mapping = page->mapping;
*nodelist++ = '\0';
if (nodelist_parse(nodelist, *policy_nodes))
goto out;
- if (!nodes_subset(*policy_nodes, node_online_map))
+ if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
goto out;
}
if (!strcmp(value, "default")) {
err = 0;
} else if (!strcmp(value, "interleave")) {
*policy = MPOL_INTERLEAVE;
- /* Default to nodes online if no nodelist */
+ /*
+ * Default to online nodes with memory if no nodelist
+ */
if (!nodelist)
- *policy_nodes = node_online_map;
+ *policy_nodes = node_states[N_HIGH_MEMORY];
err = 0;
}
out:
return page;
}
-struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
- unsigned long idx)
+static struct page *shmem_swapin(struct shmem_inode_info *info,
+ swp_entry_t entry, unsigned long idx)
{
struct shared_policy *p = &info->policy;
int i, num;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
pvma.vm_pgoff = idx;
pvma.vm_end = PAGE_SIZE;
- page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
+ page = alloc_page_vma(gfp, &pvma, 0);
mpol_free(pvma.vm_policy);
return page;
}
#else
-static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
+static inline int shmem_parse_mpol(char *value, int *policy,
+ nodemask_t *policy_nodes)
{
return 1;
}
static inline struct page *
shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
{
- return alloc_page(gfp | __GFP_ZERO);
+ return alloc_page(gfp);
}
#endif
* Normally, filepage is NULL on entry, and either found
* uptodate immediately, or allocated and zeroed, or read
* in under swappage, which is then assigned to filepage.
- * But shmem_readpage and shmem_prepare_write pass in a locked
+ * But shmem_readpage and shmem_write_begin pass in a locked
* filepage, which may be found not uptodate by other callers
* too, and may need to be copied from the swappage read in.
*/
info->alloced++;
spin_unlock(&info->lock);
+ clear_highpage(filepage);
flush_dcache_page(filepage);
SetPageUptodate(filepage);
}
}
#ifdef CONFIG_NUMA
-int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
+static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
{
struct inode *i = vma->vm_file->f_path.dentry->d_inode;
return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
}
-struct mempolicy *
-shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
+static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
+ unsigned long addr)
{
struct inode *i = vma->vm_file->f_path.dentry->d_inode;
unsigned long idx;
static const struct inode_operations shmem_symlink_inline_operations;
/*
- * Normally tmpfs avoids the use of shmem_readpage and shmem_prepare_write;
+ * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
* but providing them allows a tmpfs file to be used for splice, sendfile, and
* below the loop driver, in the generic fashion that many filesystems support.
*/
}
static int
-shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+shmem_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
{
- struct inode *inode = page->mapping->host;
- return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
+ struct inode *inode = mapping->host;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ *pagep = NULL;
+ return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
+}
+
+static int
+shmem_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+
+ set_page_dirty(page);
+ page_cache_release(page);
+
+ if (pos+copied > inode->i_size)
+ i_size_write(inode, pos+copied);
+
+ return copied;
}
static ssize_t
return ino->i_ino == inum && fh[0] == ino->i_generation;
}
-static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh)
+static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type)
{
- struct dentry *de = NULL;
struct inode *inode;
- __u32 *fh = vfh;
- __u64 inum = fh[2];
- inum = (inum << 32) | fh[1];
+ struct dentry *dentry = NULL;
+ u64 inum = fid->raw[2];
+ inum = (inum << 32) | fid->raw[1];
+
+ if (fh_len < 3)
+ return NULL;
- inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh);
+ inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
+ shmem_match, fid->raw);
if (inode) {
- de = d_find_alias(inode);
+ dentry = d_find_alias(inode);
iput(inode);
}
- return de? de: ERR_PTR(-ESTALE);
-}
-
-static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
- int len, int type,
- int (*acceptable)(void *context, struct dentry *de),
- void *context)
-{
- if (len < 3)
- return ERR_PTR(-ESTALE);
-
- return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
- context);
+ return dentry;
}
static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
return 1;
}
-static struct export_operations shmem_export_ops = {
+static const struct export_operations shmem_export_ops = {
.get_parent = shmem_get_parent,
- .get_dentry = shmem_get_dentry,
.encode_fh = shmem_encode_fh,
- .decode_fh = shmem_decode_fh,
+ .fh_to_dentry = shmem_fh_to_dentry,
};
static int shmem_parse_options(char *options, int *mode, uid_t *uid,
unsigned long blocks = 0;
unsigned long inodes = 0;
int policy = MPOL_DEFAULT;
- nodemask_t policy_nodes = node_online_map;
+ nodemask_t policy_nodes = node_states[N_HIGH_MEMORY];
#ifdef CONFIG_TMPFS
/*
kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
}
-static void init_once(void *foo, struct kmem_cache *cachep,
- unsigned long flags)
+static void init_once(struct kmem_cache *cachep, void *foo)
{
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
{
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
sizeof(struct shmem_inode_info),
- 0, 0, init_once);
- if (shmem_inode_cachep == NULL)
- return -ENOMEM;
+ 0, SLAB_PANIC, init_once);
return 0;
}
.set_page_dirty = __set_page_dirty_no_writeback,
#ifdef CONFIG_TMPFS
.readpage = shmem_readpage,
- .prepare_write = shmem_prepare_write,
- .commit_write = simple_commit_write,
+ .write_begin = shmem_write_begin,
+ .write_end = shmem_write_end,
#endif
.migratepage = migrate_page,
};
{
int error;
+ error = bdi_init(&shmem_backing_dev_info);
+ if (error)
+ goto out4;
+
error = init_inodecache();
if (error)
goto out3;
out2:
destroy_inodecache();
out3:
+ bdi_destroy(&shmem_backing_dev_info);
+out4:
shm_mnt = ERR_PTR(error);
return error;
}
d_instantiate(dentry, inode);
inode->i_size = size;
inode->i_nlink = 0; /* It is unlinked */
- file->f_path.mnt = mntget(shm_mnt);
- file->f_path.dentry = dentry;
- file->f_mapping = inode->i_mapping;
- file->f_op = &shmem_file_operations;
- file->f_mode = FMODE_WRITE | FMODE_READ;
+ init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
+ &shmem_file_operations);
return file;
close_file: