2 * (C) 1997 Linus Torvalds
3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5 #include <linux/export.h>
8 #include <linux/backing-dev.h>
9 #include <linux/hash.h>
10 #include <linux/swap.h>
11 #include <linux/security.h>
12 #include <linux/cdev.h>
13 #include <linux/bootmem.h>
14 #include <linux/fsnotify.h>
15 #include <linux/mount.h>
16 #include <linux/posix_acl.h>
17 #include <linux/prefetch.h>
18 #include <linux/buffer_head.h> /* for inode_has_buffers */
19 #include <linux/ratelimit.h>
23 * Inode locking rules:
25 * inode->i_lock protects:
26 * inode->i_state, inode->i_hash, __iget()
27 * inode->i_sb->s_inode_lru_lock protects:
28 * inode->i_sb->s_inode_lru, inode->i_lru
29 * inode_sb_list_lock protects:
30 * sb->s_inodes, inode->i_sb_list
31 * bdi->wb.list_lock protects:
32 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
33 * inode_hash_lock protects:
34 * inode_hashtable, inode->i_hash
40 * inode->i_sb->s_inode_lru_lock
53 static unsigned int i_hash_mask __read_mostly;
54 static unsigned int i_hash_shift __read_mostly;
55 static struct hlist_head *inode_hashtable __read_mostly;
56 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
58 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
61 * Empty aops. Can be used for the cases where the user does not
62 * define any of the address_space operations.
64 const struct address_space_operations empty_aops = {
66 EXPORT_SYMBOL(empty_aops);
69 * Statistics gathering..
71 struct inodes_stat_t inodes_stat;
73 static DEFINE_PER_CPU(unsigned long, nr_inodes);
74 static DEFINE_PER_CPU(unsigned long, nr_unused);
76 static struct kmem_cache *inode_cachep __read_mostly;
78 static long get_nr_inodes(void)
82 for_each_possible_cpu(i)
83 sum += per_cpu(nr_inodes, i);
84 return sum < 0 ? 0 : sum;
87 static inline long get_nr_inodes_unused(void)
91 for_each_possible_cpu(i)
92 sum += per_cpu(nr_unused, i);
93 return sum < 0 ? 0 : sum;
96 long get_nr_dirty_inodes(void)
98 /* not actually dirty inodes, but a wild approximation */
99 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
100 return nr_dirty > 0 ? nr_dirty : 0;
104 * Handle nr_inode sysctl
107 int proc_nr_inodes(ctl_table *table, int write,
108 void __user *buffer, size_t *lenp, loff_t *ppos)
110 inodes_stat.nr_inodes = get_nr_inodes();
111 inodes_stat.nr_unused = get_nr_inodes_unused();
112 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
117 * inode_init_always - perform inode structure intialisation
118 * @sb: superblock inode belongs to
119 * @inode: inode to initialise
121 * These are initializations that need to be done on every inode
122 * allocation as the fields are not initialised by slab allocation.
124 int inode_init_always(struct super_block *sb, struct inode *inode)
126 static const struct inode_operations empty_iops;
127 static const struct file_operations empty_fops;
128 struct address_space *const mapping = &inode->i_data;
131 inode->i_blkbits = sb->s_blocksize_bits;
133 atomic_set(&inode->i_count, 1);
134 inode->i_op = &empty_iops;
135 inode->i_fop = &empty_fops;
136 inode->__i_nlink = 1;
137 inode->i_opflags = 0;
138 i_uid_write(inode, 0);
139 i_gid_write(inode, 0);
140 atomic_set(&inode->i_writecount, 0);
144 inode->i_generation = 0;
146 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
148 inode->i_pipe = NULL;
149 inode->i_bdev = NULL;
150 inode->i_cdev = NULL;
152 inode->dirtied_when = 0;
154 if (security_inode_alloc(inode))
156 spin_lock_init(&inode->i_lock);
157 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
159 mutex_init(&inode->i_mutex);
160 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
162 atomic_set(&inode->i_dio_count, 0);
164 mapping->a_ops = &empty_aops;
165 mapping->host = inode;
167 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
168 mapping->private_data = NULL;
169 mapping->backing_dev_info = &default_backing_dev_info;
170 mapping->writeback_index = 0;
173 * If the block_device provides a backing_dev_info for client
174 * inodes then use that. Otherwise the inode share the bdev's
178 struct backing_dev_info *bdi;
180 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
181 mapping->backing_dev_info = bdi;
183 inode->i_private = NULL;
184 inode->i_mapping = mapping;
185 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
186 #ifdef CONFIG_FS_POSIX_ACL
187 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
190 #ifdef CONFIG_FSNOTIFY
191 inode->i_fsnotify_mask = 0;
194 this_cpu_inc(nr_inodes);
200 EXPORT_SYMBOL(inode_init_always);
202 static struct inode *alloc_inode(struct super_block *sb)
206 if (sb->s_op->alloc_inode)
207 inode = sb->s_op->alloc_inode(sb);
209 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
214 if (unlikely(inode_init_always(sb, inode))) {
215 if (inode->i_sb->s_op->destroy_inode)
216 inode->i_sb->s_op->destroy_inode(inode);
218 kmem_cache_free(inode_cachep, inode);
225 void free_inode_nonrcu(struct inode *inode)
227 kmem_cache_free(inode_cachep, inode);
229 EXPORT_SYMBOL(free_inode_nonrcu);
231 void __destroy_inode(struct inode *inode)
233 BUG_ON(inode_has_buffers(inode));
234 security_inode_free(inode);
235 fsnotify_inode_delete(inode);
236 if (!inode->i_nlink) {
237 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
238 atomic_long_dec(&inode->i_sb->s_remove_count);
241 #ifdef CONFIG_FS_POSIX_ACL
242 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
243 posix_acl_release(inode->i_acl);
244 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
245 posix_acl_release(inode->i_default_acl);
247 this_cpu_dec(nr_inodes);
249 EXPORT_SYMBOL(__destroy_inode);
251 static void i_callback(struct rcu_head *head)
253 struct inode *inode = container_of(head, struct inode, i_rcu);
254 kmem_cache_free(inode_cachep, inode);
257 static void destroy_inode(struct inode *inode)
259 BUG_ON(!list_empty(&inode->i_lru));
260 __destroy_inode(inode);
261 if (inode->i_sb->s_op->destroy_inode)
262 inode->i_sb->s_op->destroy_inode(inode);
264 call_rcu(&inode->i_rcu, i_callback);
268 * drop_nlink - directly drop an inode's link count
271 * This is a low-level filesystem helper to replace any
272 * direct filesystem manipulation of i_nlink. In cases
273 * where we are attempting to track writes to the
274 * filesystem, a decrement to zero means an imminent
275 * write when the file is truncated and actually unlinked
278 void drop_nlink(struct inode *inode)
280 WARN_ON(inode->i_nlink == 0);
283 atomic_long_inc(&inode->i_sb->s_remove_count);
285 EXPORT_SYMBOL(drop_nlink);
288 * clear_nlink - directly zero an inode's link count
291 * This is a low-level filesystem helper to replace any
292 * direct filesystem manipulation of i_nlink. See
293 * drop_nlink() for why we care about i_nlink hitting zero.
295 void clear_nlink(struct inode *inode)
297 if (inode->i_nlink) {
298 inode->__i_nlink = 0;
299 atomic_long_inc(&inode->i_sb->s_remove_count);
302 EXPORT_SYMBOL(clear_nlink);
305 * set_nlink - directly set an inode's link count
307 * @nlink: new nlink (should be non-zero)
309 * This is a low-level filesystem helper to replace any
310 * direct filesystem manipulation of i_nlink.
312 void set_nlink(struct inode *inode, unsigned int nlink)
317 /* Yes, some filesystems do change nlink from zero to one */
318 if (inode->i_nlink == 0)
319 atomic_long_dec(&inode->i_sb->s_remove_count);
321 inode->__i_nlink = nlink;
324 EXPORT_SYMBOL(set_nlink);
327 * inc_nlink - directly increment an inode's link count
330 * This is a low-level filesystem helper to replace any
331 * direct filesystem manipulation of i_nlink. Currently,
332 * it is only here for parity with dec_nlink().
334 void inc_nlink(struct inode *inode)
336 if (unlikely(inode->i_nlink == 0)) {
337 WARN_ON(!(inode->i_state & I_LINKABLE));
338 atomic_long_dec(&inode->i_sb->s_remove_count);
343 EXPORT_SYMBOL(inc_nlink);
345 void address_space_init_once(struct address_space *mapping)
347 memset(mapping, 0, sizeof(*mapping));
348 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
349 spin_lock_init(&mapping->tree_lock);
350 mutex_init(&mapping->i_mmap_mutex);
351 INIT_LIST_HEAD(&mapping->private_list);
352 spin_lock_init(&mapping->private_lock);
353 mapping->i_mmap = RB_ROOT;
354 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
356 EXPORT_SYMBOL(address_space_init_once);
359 * These are initializations that only need to be done
360 * once, because the fields are idempotent across use
361 * of the inode, so let the slab aware of that.
363 void inode_init_once(struct inode *inode)
365 memset(inode, 0, sizeof(*inode));
366 INIT_HLIST_NODE(&inode->i_hash);
367 INIT_LIST_HEAD(&inode->i_devices);
368 INIT_LIST_HEAD(&inode->i_wb_list);
369 INIT_LIST_HEAD(&inode->i_lru);
370 address_space_init_once(&inode->i_data);
371 i_size_ordered_init(inode);
372 #ifdef CONFIG_FSNOTIFY
373 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
376 EXPORT_SYMBOL(inode_init_once);
378 static void init_once(void *foo)
380 struct inode *inode = (struct inode *) foo;
382 inode_init_once(inode);
386 * inode->i_lock must be held
388 void __iget(struct inode *inode)
390 atomic_inc(&inode->i_count);
394 * get additional reference to inode; caller must already hold one.
396 void ihold(struct inode *inode)
398 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
400 EXPORT_SYMBOL(ihold);
402 static void inode_lru_list_add(struct inode *inode)
404 spin_lock(&inode->i_sb->s_inode_lru_lock);
405 if (list_empty(&inode->i_lru)) {
406 list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
407 inode->i_sb->s_nr_inodes_unused++;
408 this_cpu_inc(nr_unused);
410 spin_unlock(&inode->i_sb->s_inode_lru_lock);
414 * Add inode to LRU if needed (inode is unused and clean).
416 * Needs inode->i_lock held.
418 void inode_add_lru(struct inode *inode)
420 if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) &&
421 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
422 inode_lru_list_add(inode);
426 static void inode_lru_list_del(struct inode *inode)
428 spin_lock(&inode->i_sb->s_inode_lru_lock);
429 if (!list_empty(&inode->i_lru)) {
430 list_del_init(&inode->i_lru);
431 inode->i_sb->s_nr_inodes_unused--;
432 this_cpu_dec(nr_unused);
434 spin_unlock(&inode->i_sb->s_inode_lru_lock);
438 * inode_sb_list_add - add inode to the superblock list of inodes
439 * @inode: inode to add
441 void inode_sb_list_add(struct inode *inode)
443 spin_lock(&inode_sb_list_lock);
444 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
445 spin_unlock(&inode_sb_list_lock);
447 EXPORT_SYMBOL_GPL(inode_sb_list_add);
449 static inline void inode_sb_list_del(struct inode *inode)
451 if (!list_empty(&inode->i_sb_list)) {
452 spin_lock(&inode_sb_list_lock);
453 list_del_init(&inode->i_sb_list);
454 spin_unlock(&inode_sb_list_lock);
458 static unsigned long hash(struct super_block *sb, unsigned long hashval)
462 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
464 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
465 return tmp & i_hash_mask;
469 * __insert_inode_hash - hash an inode
470 * @inode: unhashed inode
471 * @hashval: unsigned long value used to locate this object in the
474 * Add an inode to the inode hash for this superblock.
476 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
478 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
480 spin_lock(&inode_hash_lock);
481 spin_lock(&inode->i_lock);
482 hlist_add_head(&inode->i_hash, b);
483 spin_unlock(&inode->i_lock);
484 spin_unlock(&inode_hash_lock);
486 EXPORT_SYMBOL(__insert_inode_hash);
489 * __remove_inode_hash - remove an inode from the hash
490 * @inode: inode to unhash
492 * Remove an inode from the superblock.
494 void __remove_inode_hash(struct inode *inode)
496 spin_lock(&inode_hash_lock);
497 spin_lock(&inode->i_lock);
498 hlist_del_init(&inode->i_hash);
499 spin_unlock(&inode->i_lock);
500 spin_unlock(&inode_hash_lock);
502 EXPORT_SYMBOL(__remove_inode_hash);
504 void clear_inode(struct inode *inode)
508 * We have to cycle tree_lock here because reclaim can be still in the
509 * process of removing the last page (in __delete_from_page_cache())
510 * and we must not free mapping under it.
512 spin_lock_irq(&inode->i_data.tree_lock);
513 BUG_ON(inode->i_data.nrpages);
514 spin_unlock_irq(&inode->i_data.tree_lock);
515 BUG_ON(!list_empty(&inode->i_data.private_list));
516 BUG_ON(!(inode->i_state & I_FREEING));
517 BUG_ON(inode->i_state & I_CLEAR);
518 /* don't need i_lock here, no concurrent mods to i_state */
519 inode->i_state = I_FREEING | I_CLEAR;
521 EXPORT_SYMBOL(clear_inode);
524 * Free the inode passed in, removing it from the lists it is still connected
525 * to. We remove any pages still attached to the inode and wait for any IO that
526 * is still in progress before finally destroying the inode.
528 * An inode must already be marked I_FREEING so that we avoid the inode being
529 * moved back onto lists if we race with other code that manipulates the lists
530 * (e.g. writeback_single_inode). The caller is responsible for setting this.
532 * An inode must already be removed from the LRU list before being evicted from
533 * the cache. This should occur atomically with setting the I_FREEING state
534 * flag, so no inodes here should ever be on the LRU when being evicted.
536 static void evict(struct inode *inode)
538 const struct super_operations *op = inode->i_sb->s_op;
540 BUG_ON(!(inode->i_state & I_FREEING));
541 BUG_ON(!list_empty(&inode->i_lru));
543 if (!list_empty(&inode->i_wb_list))
544 inode_wb_list_del(inode);
546 inode_sb_list_del(inode);
549 * Wait for flusher thread to be done with the inode so that filesystem
550 * does not start destroying it while writeback is still running. Since
551 * the inode has I_FREEING set, flusher thread won't start new work on
552 * the inode. We just have to wait for running writeback to finish.
554 inode_wait_for_writeback(inode);
556 if (op->evict_inode) {
557 op->evict_inode(inode);
559 if (inode->i_data.nrpages)
560 truncate_inode_pages(&inode->i_data, 0);
563 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
565 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
568 remove_inode_hash(inode);
570 spin_lock(&inode->i_lock);
571 wake_up_bit(&inode->i_state, __I_NEW);
572 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
573 spin_unlock(&inode->i_lock);
575 destroy_inode(inode);
579 * dispose_list - dispose of the contents of a local list
580 * @head: the head of the list to free
582 * Dispose-list gets a local list with local inodes in it, so it doesn't
583 * need to worry about list corruption and SMP locks.
585 static void dispose_list(struct list_head *head)
587 while (!list_empty(head)) {
590 inode = list_first_entry(head, struct inode, i_lru);
591 list_del_init(&inode->i_lru);
598 * evict_inodes - evict all evictable inodes for a superblock
599 * @sb: superblock to operate on
601 * Make sure that no inodes with zero refcount are retained. This is
602 * called by superblock shutdown after having MS_ACTIVE flag removed,
603 * so any inode reaching zero refcount during or after that call will
604 * be immediately evicted.
606 void evict_inodes(struct super_block *sb)
608 struct inode *inode, *next;
611 spin_lock(&inode_sb_list_lock);
612 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
613 if (atomic_read(&inode->i_count))
616 spin_lock(&inode->i_lock);
617 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
618 spin_unlock(&inode->i_lock);
622 inode->i_state |= I_FREEING;
623 inode_lru_list_del(inode);
624 spin_unlock(&inode->i_lock);
625 list_add(&inode->i_lru, &dispose);
627 spin_unlock(&inode_sb_list_lock);
629 dispose_list(&dispose);
633 * invalidate_inodes - attempt to free all inodes on a superblock
634 * @sb: superblock to operate on
635 * @kill_dirty: flag to guide handling of dirty inodes
637 * Attempts to free all inodes for a given superblock. If there were any
638 * busy inodes return a non-zero value, else zero.
639 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
642 int invalidate_inodes(struct super_block *sb, bool kill_dirty)
645 struct inode *inode, *next;
648 spin_lock(&inode_sb_list_lock);
649 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
650 spin_lock(&inode->i_lock);
651 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
652 spin_unlock(&inode->i_lock);
655 if (inode->i_state & I_DIRTY && !kill_dirty) {
656 spin_unlock(&inode->i_lock);
660 if (atomic_read(&inode->i_count)) {
661 spin_unlock(&inode->i_lock);
666 inode->i_state |= I_FREEING;
667 inode_lru_list_del(inode);
668 spin_unlock(&inode->i_lock);
669 list_add(&inode->i_lru, &dispose);
671 spin_unlock(&inode_sb_list_lock);
673 dispose_list(&dispose);
678 static int can_unuse(struct inode *inode)
680 if (inode->i_state & ~I_REFERENCED)
682 if (inode_has_buffers(inode))
684 if (atomic_read(&inode->i_count))
686 if (inode->i_data.nrpages)
692 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
693 * This is called from the superblock shrinker function with a number of inodes
694 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
695 * then are freed outside inode_lock by dispose_list().
697 * Any inodes which are pinned purely because of attached pagecache have their
698 * pagecache removed. If the inode has metadata buffers attached to
699 * mapping->private_list then try to remove them.
701 * If the inode has the I_REFERENCED flag set, then it means that it has been
702 * used recently - the flag is set in iput_final(). When we encounter such an
703 * inode, clear the flag and move it to the back of the LRU so it gets another
704 * pass through the LRU before it gets reclaimed. This is necessary because of
705 * the fact we are doing lazy LRU updates to minimise lock contention so the
706 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
707 * with this flag set because they are the inodes that are out of order.
709 long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan)
714 unsigned long reap = 0;
716 spin_lock(&sb->s_inode_lru_lock);
717 for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) {
720 if (list_empty(&sb->s_inode_lru))
723 inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
726 * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
727 * so use a trylock. If we fail to get the lock, just move the
728 * inode to the back of the list so we don't spin on it.
730 if (!spin_trylock(&inode->i_lock)) {
731 list_move(&inode->i_lru, &sb->s_inode_lru);
736 * Referenced or dirty inodes are still in use. Give them
737 * another pass through the LRU as we canot reclaim them now.
739 if (atomic_read(&inode->i_count) ||
740 (inode->i_state & ~I_REFERENCED)) {
741 list_del_init(&inode->i_lru);
742 spin_unlock(&inode->i_lock);
743 sb->s_nr_inodes_unused--;
744 this_cpu_dec(nr_unused);
748 /* recently referenced inodes get one more pass */
749 if (inode->i_state & I_REFERENCED) {
750 inode->i_state &= ~I_REFERENCED;
751 list_move(&inode->i_lru, &sb->s_inode_lru);
752 spin_unlock(&inode->i_lock);
755 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
757 spin_unlock(&inode->i_lock);
758 spin_unlock(&sb->s_inode_lru_lock);
759 if (remove_inode_buffers(inode))
760 reap += invalidate_mapping_pages(&inode->i_data,
763 spin_lock(&sb->s_inode_lru_lock);
765 if (inode != list_entry(sb->s_inode_lru.next,
766 struct inode, i_lru))
767 continue; /* wrong inode or list_empty */
768 /* avoid lock inversions with trylock */
769 if (!spin_trylock(&inode->i_lock))
771 if (!can_unuse(inode)) {
772 spin_unlock(&inode->i_lock);
776 WARN_ON(inode->i_state & I_NEW);
777 inode->i_state |= I_FREEING;
778 spin_unlock(&inode->i_lock);
780 list_move(&inode->i_lru, &freeable);
781 sb->s_nr_inodes_unused--;
782 this_cpu_dec(nr_unused);
785 if (current_is_kswapd())
786 __count_vm_events(KSWAPD_INODESTEAL, reap);
788 __count_vm_events(PGINODESTEAL, reap);
789 spin_unlock(&sb->s_inode_lru_lock);
790 if (current->reclaim_state)
791 current->reclaim_state->reclaimed_slab += reap;
793 dispose_list(&freeable);
797 static void __wait_on_freeing_inode(struct inode *inode);
799 * Called with the inode lock held.
801 static struct inode *find_inode(struct super_block *sb,
802 struct hlist_head *head,
803 int (*test)(struct inode *, void *),
806 struct inode *inode = NULL;
809 hlist_for_each_entry(inode, head, i_hash) {
810 spin_lock(&inode->i_lock);
811 if (inode->i_sb != sb) {
812 spin_unlock(&inode->i_lock);
815 if (!test(inode, data)) {
816 spin_unlock(&inode->i_lock);
819 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
820 __wait_on_freeing_inode(inode);
824 spin_unlock(&inode->i_lock);
831 * find_inode_fast is the fast path version of find_inode, see the comment at
832 * iget_locked for details.
834 static struct inode *find_inode_fast(struct super_block *sb,
835 struct hlist_head *head, unsigned long ino)
837 struct inode *inode = NULL;
840 hlist_for_each_entry(inode, head, i_hash) {
841 spin_lock(&inode->i_lock);
842 if (inode->i_ino != ino) {
843 spin_unlock(&inode->i_lock);
846 if (inode->i_sb != sb) {
847 spin_unlock(&inode->i_lock);
850 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
851 __wait_on_freeing_inode(inode);
855 spin_unlock(&inode->i_lock);
862 * Each cpu owns a range of LAST_INO_BATCH numbers.
863 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
864 * to renew the exhausted range.
866 * This does not significantly increase overflow rate because every CPU can
867 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
868 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
869 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
870 * overflow rate by 2x, which does not seem too significant.
872 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
873 * error if st_ino won't fit in target struct field. Use 32bit counter
874 * here to attempt to avoid that.
876 #define LAST_INO_BATCH 1024
877 static DEFINE_PER_CPU(unsigned int, last_ino);
879 unsigned int get_next_ino(void)
881 unsigned int *p = &get_cpu_var(last_ino);
882 unsigned int res = *p;
885 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
886 static atomic_t shared_last_ino;
887 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
889 res = next - LAST_INO_BATCH;
894 put_cpu_var(last_ino);
897 EXPORT_SYMBOL(get_next_ino);
900 * new_inode_pseudo - obtain an inode
903 * Allocates a new inode for given superblock.
904 * Inode wont be chained in superblock s_inodes list
906 * - fs can't be unmount
907 * - quotas, fsnotify, writeback can't work
909 struct inode *new_inode_pseudo(struct super_block *sb)
911 struct inode *inode = alloc_inode(sb);
914 spin_lock(&inode->i_lock);
916 spin_unlock(&inode->i_lock);
917 INIT_LIST_HEAD(&inode->i_sb_list);
923 * new_inode - obtain an inode
926 * Allocates a new inode for given superblock. The default gfp_mask
927 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
928 * If HIGHMEM pages are unsuitable or it is known that pages allocated
929 * for the page cache are not reclaimable or migratable,
930 * mapping_set_gfp_mask() must be called with suitable flags on the
931 * newly created inode's mapping
934 struct inode *new_inode(struct super_block *sb)
938 spin_lock_prefetch(&inode_sb_list_lock);
940 inode = new_inode_pseudo(sb);
942 inode_sb_list_add(inode);
945 EXPORT_SYMBOL(new_inode);
947 #ifdef CONFIG_DEBUG_LOCK_ALLOC
948 void lockdep_annotate_inode_mutex_key(struct inode *inode)
950 if (S_ISDIR(inode->i_mode)) {
951 struct file_system_type *type = inode->i_sb->s_type;
953 /* Set new key only if filesystem hasn't already changed it */
954 if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
956 * ensure nobody is actually holding i_mutex
958 mutex_destroy(&inode->i_mutex);
959 mutex_init(&inode->i_mutex);
960 lockdep_set_class(&inode->i_mutex,
961 &type->i_mutex_dir_key);
965 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
969 * unlock_new_inode - clear the I_NEW state and wake up any waiters
970 * @inode: new inode to unlock
972 * Called when the inode is fully initialised to clear the new state of the
973 * inode and wake up anyone waiting for the inode to finish initialisation.
975 void unlock_new_inode(struct inode *inode)
977 lockdep_annotate_inode_mutex_key(inode);
978 spin_lock(&inode->i_lock);
979 WARN_ON(!(inode->i_state & I_NEW));
980 inode->i_state &= ~I_NEW;
982 wake_up_bit(&inode->i_state, __I_NEW);
983 spin_unlock(&inode->i_lock);
985 EXPORT_SYMBOL(unlock_new_inode);
988 * iget5_locked - obtain an inode from a mounted file system
989 * @sb: super block of file system
990 * @hashval: hash value (usually inode number) to get
991 * @test: callback used for comparisons between inodes
992 * @set: callback used to initialize a new struct inode
993 * @data: opaque data pointer to pass to @test and @set
995 * Search for the inode specified by @hashval and @data in the inode cache,
996 * and if present it is return it with an increased reference count. This is
997 * a generalized version of iget_locked() for file systems where the inode
998 * number is not sufficient for unique identification of an inode.
1000 * If the inode is not in cache, allocate a new inode and return it locked,
1001 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1002 * before unlocking it via unlock_new_inode().
1004 * Note both @test and @set are called with the inode_hash_lock held, so can't
1007 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1008 int (*test)(struct inode *, void *),
1009 int (*set)(struct inode *, void *), void *data)
1011 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1012 struct inode *inode;
1014 spin_lock(&inode_hash_lock);
1015 inode = find_inode(sb, head, test, data);
1016 spin_unlock(&inode_hash_lock);
1019 wait_on_inode(inode);
1023 inode = alloc_inode(sb);
1027 spin_lock(&inode_hash_lock);
1028 /* We released the lock, so.. */
1029 old = find_inode(sb, head, test, data);
1031 if (set(inode, data))
1034 spin_lock(&inode->i_lock);
1035 inode->i_state = I_NEW;
1036 hlist_add_head(&inode->i_hash, head);
1037 spin_unlock(&inode->i_lock);
1038 inode_sb_list_add(inode);
1039 spin_unlock(&inode_hash_lock);
1041 /* Return the locked inode with I_NEW set, the
1042 * caller is responsible for filling in the contents
1048 * Uhhuh, somebody else created the same inode under
1049 * us. Use the old inode instead of the one we just
1052 spin_unlock(&inode_hash_lock);
1053 destroy_inode(inode);
1055 wait_on_inode(inode);
1060 spin_unlock(&inode_hash_lock);
1061 destroy_inode(inode);
1064 EXPORT_SYMBOL(iget5_locked);
1067 * iget_locked - obtain an inode from a mounted file system
1068 * @sb: super block of file system
1069 * @ino: inode number to get
1071 * Search for the inode specified by @ino in the inode cache and if present
1072 * return it with an increased reference count. This is for file systems
1073 * where the inode number is sufficient for unique identification of an inode.
1075 * If the inode is not in cache, allocate a new inode and return it locked,
1076 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1077 * before unlocking it via unlock_new_inode().
1079 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1081 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1082 struct inode *inode;
1084 spin_lock(&inode_hash_lock);
1085 inode = find_inode_fast(sb, head, ino);
1086 spin_unlock(&inode_hash_lock);
1088 wait_on_inode(inode);
1092 inode = alloc_inode(sb);
1096 spin_lock(&inode_hash_lock);
1097 /* We released the lock, so.. */
1098 old = find_inode_fast(sb, head, ino);
1101 spin_lock(&inode->i_lock);
1102 inode->i_state = I_NEW;
1103 hlist_add_head(&inode->i_hash, head);
1104 spin_unlock(&inode->i_lock);
1105 inode_sb_list_add(inode);
1106 spin_unlock(&inode_hash_lock);
1108 /* Return the locked inode with I_NEW set, the
1109 * caller is responsible for filling in the contents
1115 * Uhhuh, somebody else created the same inode under
1116 * us. Use the old inode instead of the one we just
1119 spin_unlock(&inode_hash_lock);
1120 destroy_inode(inode);
1122 wait_on_inode(inode);
1126 EXPORT_SYMBOL(iget_locked);
1129 * search the inode cache for a matching inode number.
1130 * If we find one, then the inode number we are trying to
1131 * allocate is not unique and so we should not use it.
1133 * Returns 1 if the inode number is unique, 0 if it is not.
1135 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1137 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1138 struct inode *inode;
1140 spin_lock(&inode_hash_lock);
1141 hlist_for_each_entry(inode, b, i_hash) {
1142 if (inode->i_ino == ino && inode->i_sb == sb) {
1143 spin_unlock(&inode_hash_lock);
1147 spin_unlock(&inode_hash_lock);
1153 * iunique - get a unique inode number
1155 * @max_reserved: highest reserved inode number
1157 * Obtain an inode number that is unique on the system for a given
1158 * superblock. This is used by file systems that have no natural
1159 * permanent inode numbering system. An inode number is returned that
1160 * is higher than the reserved limit but unique.
1163 * With a large number of inodes live on the file system this function
1164 * currently becomes quite slow.
1166 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1169 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1170 * error if st_ino won't fit in target struct field. Use 32bit counter
1171 * here to attempt to avoid that.
1173 static DEFINE_SPINLOCK(iunique_lock);
1174 static unsigned int counter;
1177 spin_lock(&iunique_lock);
1179 if (counter <= max_reserved)
1180 counter = max_reserved + 1;
1182 } while (!test_inode_iunique(sb, res));
1183 spin_unlock(&iunique_lock);
1187 EXPORT_SYMBOL(iunique);
1189 struct inode *igrab(struct inode *inode)
1191 spin_lock(&inode->i_lock);
1192 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1194 spin_unlock(&inode->i_lock);
1196 spin_unlock(&inode->i_lock);
1198 * Handle the case where s_op->clear_inode is not been
1199 * called yet, and somebody is calling igrab
1200 * while the inode is getting freed.
1206 EXPORT_SYMBOL(igrab);
1209 * ilookup5_nowait - search for an inode in the inode cache
1210 * @sb: super block of file system to search
1211 * @hashval: hash value (usually inode number) to search for
1212 * @test: callback used for comparisons between inodes
1213 * @data: opaque data pointer to pass to @test
1215 * Search for the inode specified by @hashval and @data in the inode cache.
1216 * If the inode is in the cache, the inode is returned with an incremented
1219 * Note: I_NEW is not waited upon so you have to be very careful what you do
1220 * with the returned inode. You probably should be using ilookup5() instead.
1222 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1224 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1225 int (*test)(struct inode *, void *), void *data)
1227 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1228 struct inode *inode;
1230 spin_lock(&inode_hash_lock);
1231 inode = find_inode(sb, head, test, data);
1232 spin_unlock(&inode_hash_lock);
1236 EXPORT_SYMBOL(ilookup5_nowait);
1239 * ilookup5 - search for an inode in the inode cache
1240 * @sb: super block of file system to search
1241 * @hashval: hash value (usually inode number) to search for
1242 * @test: callback used for comparisons between inodes
1243 * @data: opaque data pointer to pass to @test
1245 * Search for the inode specified by @hashval and @data in the inode cache,
1246 * and if the inode is in the cache, return the inode with an incremented
1247 * reference count. Waits on I_NEW before returning the inode.
1248 * returned with an incremented reference count.
1250 * This is a generalized version of ilookup() for file systems where the
1251 * inode number is not sufficient for unique identification of an inode.
1253 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1255 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1256 int (*test)(struct inode *, void *), void *data)
1258 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1261 wait_on_inode(inode);
1264 EXPORT_SYMBOL(ilookup5);
1267 * ilookup - search for an inode in the inode cache
1268 * @sb: super block of file system to search
1269 * @ino: inode number to search for
1271 * Search for the inode @ino in the inode cache, and if the inode is in the
1272 * cache, the inode is returned with an incremented reference count.
1274 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1276 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1277 struct inode *inode;
1279 spin_lock(&inode_hash_lock);
1280 inode = find_inode_fast(sb, head, ino);
1281 spin_unlock(&inode_hash_lock);
1284 wait_on_inode(inode);
1287 EXPORT_SYMBOL(ilookup);
1289 int insert_inode_locked(struct inode *inode)
1291 struct super_block *sb = inode->i_sb;
1292 ino_t ino = inode->i_ino;
1293 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1296 struct inode *old = NULL;
1297 spin_lock(&inode_hash_lock);
1298 hlist_for_each_entry(old, head, i_hash) {
1299 if (old->i_ino != ino)
1301 if (old->i_sb != sb)
1303 spin_lock(&old->i_lock);
1304 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1305 spin_unlock(&old->i_lock);
1311 spin_lock(&inode->i_lock);
1312 inode->i_state |= I_NEW;
1313 hlist_add_head(&inode->i_hash, head);
1314 spin_unlock(&inode->i_lock);
1315 spin_unlock(&inode_hash_lock);
1319 spin_unlock(&old->i_lock);
1320 spin_unlock(&inode_hash_lock);
1322 if (unlikely(!inode_unhashed(old))) {
1329 EXPORT_SYMBOL(insert_inode_locked);
1331 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1332 int (*test)(struct inode *, void *), void *data)
1334 struct super_block *sb = inode->i_sb;
1335 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1338 struct inode *old = NULL;
1340 spin_lock(&inode_hash_lock);
1341 hlist_for_each_entry(old, head, i_hash) {
1342 if (old->i_sb != sb)
1344 if (!test(old, data))
1346 spin_lock(&old->i_lock);
1347 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1348 spin_unlock(&old->i_lock);
1354 spin_lock(&inode->i_lock);
1355 inode->i_state |= I_NEW;
1356 hlist_add_head(&inode->i_hash, head);
1357 spin_unlock(&inode->i_lock);
1358 spin_unlock(&inode_hash_lock);
1362 spin_unlock(&old->i_lock);
1363 spin_unlock(&inode_hash_lock);
1365 if (unlikely(!inode_unhashed(old))) {
1372 EXPORT_SYMBOL(insert_inode_locked4);
1375 int generic_delete_inode(struct inode *inode)
1379 EXPORT_SYMBOL(generic_delete_inode);
1382 * Called when we're dropping the last reference
1385 * Call the FS "drop_inode()" function, defaulting to
1386 * the legacy UNIX filesystem behaviour. If it tells
1387 * us to evict inode, do so. Otherwise, retain inode
1388 * in cache if fs is alive, sync and evict if fs is
1391 static void iput_final(struct inode *inode)
1393 struct super_block *sb = inode->i_sb;
1394 const struct super_operations *op = inode->i_sb->s_op;
1397 WARN_ON(inode->i_state & I_NEW);
1400 drop = op->drop_inode(inode);
1402 drop = generic_drop_inode(inode);
1404 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1405 inode->i_state |= I_REFERENCED;
1406 inode_add_lru(inode);
1407 spin_unlock(&inode->i_lock);
1412 inode->i_state |= I_WILL_FREE;
1413 spin_unlock(&inode->i_lock);
1414 write_inode_now(inode, 1);
1415 spin_lock(&inode->i_lock);
1416 WARN_ON(inode->i_state & I_NEW);
1417 inode->i_state &= ~I_WILL_FREE;
1420 inode->i_state |= I_FREEING;
1421 if (!list_empty(&inode->i_lru))
1422 inode_lru_list_del(inode);
1423 spin_unlock(&inode->i_lock);
1429 * iput - put an inode
1430 * @inode: inode to put
1432 * Puts an inode, dropping its usage count. If the inode use count hits
1433 * zero, the inode is then freed and may also be destroyed.
1435 * Consequently, iput() can sleep.
1437 void iput(struct inode *inode)
1440 BUG_ON(inode->i_state & I_CLEAR);
1442 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1446 EXPORT_SYMBOL(iput);
1449 * bmap - find a block number in a file
1450 * @inode: inode of file
1451 * @block: block to find
1453 * Returns the block number on the device holding the inode that
1454 * is the disk block number for the block of the file requested.
1455 * That is, asked for block 4 of inode 1 the function will return the
1456 * disk block relative to the disk start that holds that block of the
1459 sector_t bmap(struct inode *inode, sector_t block)
1462 if (inode->i_mapping->a_ops->bmap)
1463 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1466 EXPORT_SYMBOL(bmap);
1469 * With relative atime, only update atime if the previous atime is
1470 * earlier than either the ctime or mtime or if at least a day has
1471 * passed since the last atime update.
1473 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1474 struct timespec now)
1477 if (!(mnt->mnt_flags & MNT_RELATIME))
1480 * Is mtime younger than atime? If yes, update atime:
1482 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1485 * Is ctime younger than atime? If yes, update atime:
1487 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1491 * Is the previous atime value older than a day? If yes,
1494 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1497 * Good, we can skip the atime update:
1503 * This does the actual work of updating an inodes time or version. Must have
1504 * had called mnt_want_write() before calling this.
1506 static int update_time(struct inode *inode, struct timespec *time, int flags)
1508 if (inode->i_op->update_time)
1509 return inode->i_op->update_time(inode, time, flags);
1511 if (flags & S_ATIME)
1512 inode->i_atime = *time;
1513 if (flags & S_VERSION)
1514 inode_inc_iversion(inode);
1515 if (flags & S_CTIME)
1516 inode->i_ctime = *time;
1517 if (flags & S_MTIME)
1518 inode->i_mtime = *time;
1519 mark_inode_dirty_sync(inode);
1524 * touch_atime - update the access time
1525 * @path: the &struct path to update
1527 * Update the accessed time on an inode and mark it for writeback.
1528 * This function automatically handles read only file systems and media,
1529 * as well as the "noatime" flag and inode specific "noatime" markers.
1531 void touch_atime(struct path *path)
1533 struct vfsmount *mnt = path->mnt;
1534 struct inode *inode = path->dentry->d_inode;
1535 struct timespec now;
1537 if (inode->i_flags & S_NOATIME)
1539 if (IS_NOATIME(inode))
1541 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1544 if (mnt->mnt_flags & MNT_NOATIME)
1546 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1549 now = current_fs_time(inode->i_sb);
1551 if (!relatime_need_update(mnt, inode, now))
1554 if (timespec_equal(&inode->i_atime, &now))
1557 if (!sb_start_write_trylock(inode->i_sb))
1560 if (__mnt_want_write(mnt))
1563 * File systems can error out when updating inodes if they need to
1564 * allocate new space to modify an inode (such is the case for
1565 * Btrfs), but since we touch atime while walking down the path we
1566 * really don't care if we failed to update the atime of the file,
1567 * so just ignore the return value.
1568 * We may also fail on filesystems that have the ability to make parts
1569 * of the fs read only, e.g. subvolumes in Btrfs.
1571 update_time(inode, &now, S_ATIME);
1572 __mnt_drop_write(mnt);
1574 sb_end_write(inode->i_sb);
1576 EXPORT_SYMBOL(touch_atime);
1579 * The logic we want is
1581 * if suid or (sgid and xgrp)
1584 int should_remove_suid(struct dentry *dentry)
1586 umode_t mode = dentry->d_inode->i_mode;
1589 /* suid always must be killed */
1590 if (unlikely(mode & S_ISUID))
1591 kill = ATTR_KILL_SUID;
1594 * sgid without any exec bits is just a mandatory locking mark; leave
1595 * it alone. If some exec bits are set, it's a real sgid; kill it.
1597 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1598 kill |= ATTR_KILL_SGID;
1600 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1605 EXPORT_SYMBOL(should_remove_suid);
1607 static int __remove_suid(struct dentry *dentry, int kill)
1609 struct iattr newattrs;
1611 newattrs.ia_valid = ATTR_FORCE | kill;
1612 return notify_change(dentry, &newattrs);
1615 int file_remove_suid(struct file *file)
1617 struct dentry *dentry = file->f_path.dentry;
1618 struct inode *inode = dentry->d_inode;
1623 /* Fast path for nothing security related */
1624 if (IS_NOSEC(inode))
1627 killsuid = should_remove_suid(dentry);
1628 killpriv = security_inode_need_killpriv(dentry);
1633 error = security_inode_killpriv(dentry);
1634 if (!error && killsuid)
1635 error = __remove_suid(dentry, killsuid);
1636 if (!error && (inode->i_sb->s_flags & MS_NOSEC))
1637 inode->i_flags |= S_NOSEC;
1641 EXPORT_SYMBOL(file_remove_suid);
1644 * file_update_time - update mtime and ctime time
1645 * @file: file accessed
1647 * Update the mtime and ctime members of an inode and mark the inode
1648 * for writeback. Note that this function is meant exclusively for
1649 * usage in the file write path of filesystems, and filesystems may
1650 * choose to explicitly ignore update via this function with the
1651 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1652 * timestamps are handled by the server. This can return an error for
1653 * file systems who need to allocate space in order to update an inode.
1656 int file_update_time(struct file *file)
1658 struct inode *inode = file_inode(file);
1659 struct timespec now;
1663 /* First try to exhaust all avenues to not sync */
1664 if (IS_NOCMTIME(inode))
1667 now = current_fs_time(inode->i_sb);
1668 if (!timespec_equal(&inode->i_mtime, &now))
1671 if (!timespec_equal(&inode->i_ctime, &now))
1674 if (IS_I_VERSION(inode))
1675 sync_it |= S_VERSION;
1680 /* Finally allowed to write? Takes lock. */
1681 if (__mnt_want_write_file(file))
1684 ret = update_time(inode, &now, sync_it);
1685 __mnt_drop_write_file(file);
1689 EXPORT_SYMBOL(file_update_time);
1691 int inode_needs_sync(struct inode *inode)
1695 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1699 EXPORT_SYMBOL(inode_needs_sync);
1701 int inode_wait(void *word)
1706 EXPORT_SYMBOL(inode_wait);
1709 * If we try to find an inode in the inode hash while it is being
1710 * deleted, we have to wait until the filesystem completes its
1711 * deletion before reporting that it isn't found. This function waits
1712 * until the deletion _might_ have completed. Callers are responsible
1713 * to recheck inode state.
1715 * It doesn't matter if I_NEW is not set initially, a call to
1716 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1719 static void __wait_on_freeing_inode(struct inode *inode)
1721 wait_queue_head_t *wq;
1722 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1723 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1724 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1725 spin_unlock(&inode->i_lock);
1726 spin_unlock(&inode_hash_lock);
1728 finish_wait(wq, &wait.wait);
1729 spin_lock(&inode_hash_lock);
1732 static __initdata unsigned long ihash_entries;
1733 static int __init set_ihash_entries(char *str)
1737 ihash_entries = simple_strtoul(str, &str, 0);
1740 __setup("ihash_entries=", set_ihash_entries);
1743 * Initialize the waitqueues and inode hash table.
1745 void __init inode_init_early(void)
1749 /* If hashes are distributed across NUMA nodes, defer
1750 * hash allocation until vmalloc space is available.
1756 alloc_large_system_hash("Inode-cache",
1757 sizeof(struct hlist_head),
1766 for (loop = 0; loop < (1U << i_hash_shift); loop++)
1767 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1770 void __init inode_init(void)
1774 /* inode slab cache */
1775 inode_cachep = kmem_cache_create("inode_cache",
1776 sizeof(struct inode),
1778 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1782 /* Hash may have been set up in inode_init_early */
1787 alloc_large_system_hash("Inode-cache",
1788 sizeof(struct hlist_head),
1797 for (loop = 0; loop < (1U << i_hash_shift); loop++)
1798 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1801 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1803 inode->i_mode = mode;
1804 if (S_ISCHR(mode)) {
1805 inode->i_fop = &def_chr_fops;
1806 inode->i_rdev = rdev;
1807 } else if (S_ISBLK(mode)) {
1808 inode->i_fop = &def_blk_fops;
1809 inode->i_rdev = rdev;
1810 } else if (S_ISFIFO(mode))
1811 inode->i_fop = &pipefifo_fops;
1812 else if (S_ISSOCK(mode))
1813 inode->i_fop = &bad_sock_fops;
1815 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1816 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1819 EXPORT_SYMBOL(init_special_inode);
1822 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1824 * @dir: Directory inode
1825 * @mode: mode of the new inode
1827 void inode_init_owner(struct inode *inode, const struct inode *dir,
1830 inode->i_uid = current_fsuid();
1831 if (dir && dir->i_mode & S_ISGID) {
1832 inode->i_gid = dir->i_gid;
1836 inode->i_gid = current_fsgid();
1837 inode->i_mode = mode;
1839 EXPORT_SYMBOL(inode_init_owner);
1842 * inode_owner_or_capable - check current task permissions to inode
1843 * @inode: inode being checked
1845 * Return true if current either has CAP_FOWNER to the inode, or
1848 bool inode_owner_or_capable(const struct inode *inode)
1850 if (uid_eq(current_fsuid(), inode->i_uid))
1852 if (inode_capable(inode, CAP_FOWNER))
1856 EXPORT_SYMBOL(inode_owner_or_capable);
1859 * Direct i/o helper functions
1861 static void __inode_dio_wait(struct inode *inode)
1863 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
1864 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
1867 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
1868 if (atomic_read(&inode->i_dio_count))
1870 } while (atomic_read(&inode->i_dio_count));
1871 finish_wait(wq, &q.wait);
1875 * inode_dio_wait - wait for outstanding DIO requests to finish
1876 * @inode: inode to wait for
1878 * Waits for all pending direct I/O requests to finish so that we can
1879 * proceed with a truncate or equivalent operation.
1881 * Must be called under a lock that serializes taking new references
1882 * to i_dio_count, usually by inode->i_mutex.
1884 void inode_dio_wait(struct inode *inode)
1886 if (atomic_read(&inode->i_dio_count))
1887 __inode_dio_wait(inode);
1889 EXPORT_SYMBOL(inode_dio_wait);
1892 * inode_dio_done - signal finish of a direct I/O requests
1893 * @inode: inode the direct I/O happens on
1895 * This is called once we've finished processing a direct I/O request,
1896 * and is used to wake up callers waiting for direct I/O to be quiesced.
1898 void inode_dio_done(struct inode *inode)
1900 if (atomic_dec_and_test(&inode->i_dio_count))
1901 wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
1903 EXPORT_SYMBOL(inode_dio_done);