4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
45 * dcache->d_inode->i_lock protects:
46 * - i_dentry, d_alias, d_inode of aliases
47 * dcache_hash_bucket lock protects:
48 * - the dcache hash table
49 * s_anon bl list spinlock protects:
50 * - the s_anon list (see __d_drop)
51 * dentry->d_sb->s_dentry_lru_lock protects:
52 * - the dcache lru lists and counters
59 * - d_parent and d_subdirs
60 * - childrens' d_child and d_parent
64 * dentry->d_inode->i_lock
66 * dentry->d_sb->s_dentry_lru_lock
67 * dcache_hash_bucket lock
70 * If there is an ancestor relationship:
71 * dentry->d_parent->...->d_parent->d_lock
73 * dentry->d_parent->d_lock
76 * If no ancestor relationship:
77 * if (dentry1 < dentry2)
81 int sysctl_vfs_cache_pressure __read_mostly = 100;
82 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
84 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
86 EXPORT_SYMBOL(rename_lock);
88 static struct kmem_cache *dentry_cache __read_mostly;
91 * This is the single most critical data structure when it comes
92 * to the dcache: the hashtable for lookups. Somebody should try
93 * to make this good - I've just made it work.
95 * This hash-function tries to avoid losing too many bits of hash
96 * information, yet avoid using a prime hash-size or similar.
98 #define D_HASHBITS d_hash_shift
99 #define D_HASHMASK d_hash_mask
101 static unsigned int d_hash_mask __read_mostly;
102 static unsigned int d_hash_shift __read_mostly;
104 static struct hlist_bl_head *dentry_hashtable __read_mostly;
106 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
109 hash += (unsigned long) parent / L1_CACHE_BYTES;
110 hash = hash + (hash >> D_HASHBITS);
111 return dentry_hashtable + (hash & D_HASHMASK);
114 /* Statistics gathering. */
115 struct dentry_stat_t dentry_stat = {
119 static DEFINE_PER_CPU(long, nr_dentry);
120 static DEFINE_PER_CPU(long, nr_dentry_unused);
122 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
125 * Here we resort to our own counters instead of using generic per-cpu counters
126 * for consistency with what the vfs inode code does. We are expected to harvest
127 * better code and performance by having our own specialized counters.
129 * Please note that the loop is done over all possible CPUs, not over all online
130 * CPUs. The reason for this is that we don't want to play games with CPUs going
131 * on and off. If one of them goes off, we will just keep their counters.
133 * glommer: See cffbc8a for details, and if you ever intend to change this,
134 * please update all vfs counters to match.
136 static long get_nr_dentry(void)
140 for_each_possible_cpu(i)
141 sum += per_cpu(nr_dentry, i);
142 return sum < 0 ? 0 : sum;
145 static long get_nr_dentry_unused(void)
149 for_each_possible_cpu(i)
150 sum += per_cpu(nr_dentry_unused, i);
151 return sum < 0 ? 0 : sum;
154 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
155 size_t *lenp, loff_t *ppos)
157 dentry_stat.nr_dentry = get_nr_dentry();
158 dentry_stat.nr_unused = get_nr_dentry_unused();
159 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
164 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
165 * The strings are both count bytes long, and count is non-zero.
167 #ifdef CONFIG_DCACHE_WORD_ACCESS
169 #include <asm/word-at-a-time.h>
171 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
172 * aligned allocation for this particular component. We don't
173 * strictly need the load_unaligned_zeropad() safety, but it
174 * doesn't hurt either.
176 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
177 * need the careful unaligned handling.
179 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
181 unsigned long a,b,mask;
184 a = *(unsigned long *)cs;
185 b = load_unaligned_zeropad(ct);
186 if (tcount < sizeof(unsigned long))
188 if (unlikely(a != b))
190 cs += sizeof(unsigned long);
191 ct += sizeof(unsigned long);
192 tcount -= sizeof(unsigned long);
196 mask = ~(~0ul << tcount*8);
197 return unlikely(!!((a ^ b) & mask));
202 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
216 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
218 const unsigned char *cs;
220 * Be careful about RCU walk racing with rename:
221 * use ACCESS_ONCE to fetch the name pointer.
223 * NOTE! Even if a rename will mean that the length
224 * was not loaded atomically, we don't care. The
225 * RCU walk will check the sequence count eventually,
226 * and catch it. And we won't overrun the buffer,
227 * because we're reading the name pointer atomically,
228 * and a dentry name is guaranteed to be properly
229 * terminated with a NUL byte.
231 * End result: even if 'len' is wrong, we'll exit
232 * early because the data cannot match (there can
233 * be no NUL in the ct/tcount data)
235 cs = ACCESS_ONCE(dentry->d_name.name);
236 smp_read_barrier_depends();
237 return dentry_string_cmp(cs, ct, tcount);
240 static void __d_free(struct rcu_head *head)
242 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
244 WARN_ON(!hlist_unhashed(&dentry->d_alias));
245 if (dname_external(dentry))
246 kfree(dentry->d_name.name);
247 kmem_cache_free(dentry_cache, dentry);
253 static void d_free(struct dentry *dentry)
255 BUG_ON(dentry->d_count);
256 this_cpu_dec(nr_dentry);
257 if (dentry->d_op && dentry->d_op->d_release)
258 dentry->d_op->d_release(dentry);
260 /* if dentry was never visible to RCU, immediate free is OK */
261 if (!(dentry->d_flags & DCACHE_RCUACCESS))
262 __d_free(&dentry->d_u.d_rcu);
264 call_rcu(&dentry->d_u.d_rcu, __d_free);
268 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
269 * @dentry: the target dentry
270 * After this call, in-progress rcu-walk path lookup will fail. This
271 * should be called after unhashing, and after changing d_inode (if
272 * the dentry has not already been unhashed).
274 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
276 assert_spin_locked(&dentry->d_lock);
277 /* Go through a barrier */
278 write_seqcount_barrier(&dentry->d_seq);
282 * Release the dentry's inode, using the filesystem
283 * d_iput() operation if defined. Dentry has no refcount
286 static void dentry_iput(struct dentry * dentry)
287 __releases(dentry->d_lock)
288 __releases(dentry->d_inode->i_lock)
290 struct inode *inode = dentry->d_inode;
292 dentry->d_inode = NULL;
293 hlist_del_init(&dentry->d_alias);
294 spin_unlock(&dentry->d_lock);
295 spin_unlock(&inode->i_lock);
297 fsnotify_inoderemove(inode);
298 if (dentry->d_op && dentry->d_op->d_iput)
299 dentry->d_op->d_iput(dentry, inode);
303 spin_unlock(&dentry->d_lock);
308 * Release the dentry's inode, using the filesystem
309 * d_iput() operation if defined. dentry remains in-use.
311 static void dentry_unlink_inode(struct dentry * dentry)
312 __releases(dentry->d_lock)
313 __releases(dentry->d_inode->i_lock)
315 struct inode *inode = dentry->d_inode;
316 dentry->d_inode = NULL;
317 hlist_del_init(&dentry->d_alias);
318 dentry_rcuwalk_barrier(dentry);
319 spin_unlock(&dentry->d_lock);
320 spin_unlock(&inode->i_lock);
322 fsnotify_inoderemove(inode);
323 if (dentry->d_op && dentry->d_op->d_iput)
324 dentry->d_op->d_iput(dentry, inode);
330 * dentry_lru_(add|del|move_list) must be called with d_lock held.
332 static void dentry_lru_add(struct dentry *dentry)
334 if (list_empty(&dentry->d_lru)) {
335 spin_lock(&dentry->d_sb->s_dentry_lru_lock);
336 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
337 dentry->d_sb->s_nr_dentry_unused++;
338 this_cpu_inc(nr_dentry_unused);
339 spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
343 static void __dentry_lru_del(struct dentry *dentry)
345 list_del_init(&dentry->d_lru);
346 dentry->d_sb->s_nr_dentry_unused--;
347 this_cpu_dec(nr_dentry_unused);
351 * Remove a dentry with references from the LRU.
353 * If we are on the shrink list, then we can get to try_prune_one_dentry() and
354 * lose our last reference through the parent walk. In this case, we need to
355 * remove ourselves from the shrink list, not the LRU.
357 static void dentry_lru_del(struct dentry *dentry)
359 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
360 list_del_init(&dentry->d_lru);
361 dentry->d_flags &= ~DCACHE_SHRINK_LIST;
365 if (!list_empty(&dentry->d_lru)) {
366 spin_lock(&dentry->d_sb->s_dentry_lru_lock);
367 __dentry_lru_del(dentry);
368 spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
372 static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
374 BUG_ON(dentry->d_flags & DCACHE_SHRINK_LIST);
376 spin_lock(&dentry->d_sb->s_dentry_lru_lock);
377 if (list_empty(&dentry->d_lru)) {
378 list_add_tail(&dentry->d_lru, list);
380 list_move_tail(&dentry->d_lru, list);
381 dentry->d_sb->s_nr_dentry_unused--;
382 this_cpu_dec(nr_dentry_unused);
384 spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
388 * d_kill - kill dentry and return parent
389 * @dentry: dentry to kill
390 * @parent: parent dentry
392 * The dentry must already be unhashed and removed from the LRU.
394 * If this is the root of the dentry tree, return NULL.
396 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
399 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
400 __releases(dentry->d_lock)
401 __releases(parent->d_lock)
402 __releases(dentry->d_inode->i_lock)
404 list_del(&dentry->d_u.d_child);
406 * Inform try_to_ascend() that we are no longer attached to the
409 dentry->d_flags |= DCACHE_DENTRY_KILLED;
411 spin_unlock(&parent->d_lock);
414 * dentry_iput drops the locks, at which point nobody (except
415 * transient RCU lookups) can reach this dentry.
422 * Unhash a dentry without inserting an RCU walk barrier or checking that
423 * dentry->d_lock is locked. The caller must take care of that, if
426 static void __d_shrink(struct dentry *dentry)
428 if (!d_unhashed(dentry)) {
429 struct hlist_bl_head *b;
430 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
431 b = &dentry->d_sb->s_anon;
433 b = d_hash(dentry->d_parent, dentry->d_name.hash);
436 __hlist_bl_del(&dentry->d_hash);
437 dentry->d_hash.pprev = NULL;
443 * d_drop - drop a dentry
444 * @dentry: dentry to drop
446 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
447 * be found through a VFS lookup any more. Note that this is different from
448 * deleting the dentry - d_delete will try to mark the dentry negative if
449 * possible, giving a successful _negative_ lookup, while d_drop will
450 * just make the cache lookup fail.
452 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
453 * reason (NFS timeouts or autofs deletes).
455 * __d_drop requires dentry->d_lock.
457 void __d_drop(struct dentry *dentry)
459 if (!d_unhashed(dentry)) {
461 dentry_rcuwalk_barrier(dentry);
464 EXPORT_SYMBOL(__d_drop);
466 void d_drop(struct dentry *dentry)
468 spin_lock(&dentry->d_lock);
470 spin_unlock(&dentry->d_lock);
472 EXPORT_SYMBOL(d_drop);
475 * Finish off a dentry we've decided to kill.
476 * dentry->d_lock must be held, returns with it unlocked.
477 * If ref is non-zero, then decrement the refcount too.
478 * Returns dentry requiring refcount drop, or NULL if we're done.
480 static inline struct dentry *
481 dentry_kill(struct dentry *dentry, int ref, int unlock_on_failure)
482 __releases(dentry->d_lock)
485 struct dentry *parent;
487 inode = dentry->d_inode;
488 if (inode && !spin_trylock(&inode->i_lock)) {
490 if (unlock_on_failure) {
491 spin_unlock(&dentry->d_lock);
494 return dentry; /* try again with same dentry */
499 parent = dentry->d_parent;
500 if (parent && !spin_trylock(&parent->d_lock)) {
502 spin_unlock(&inode->i_lock);
509 * inform the fs via d_prune that this dentry is about to be
510 * unhashed and destroyed.
512 if (dentry->d_flags & DCACHE_OP_PRUNE)
513 dentry->d_op->d_prune(dentry);
515 dentry_lru_del(dentry);
516 /* if it was on the hash then remove it */
518 return d_kill(dentry, parent);
524 * This is complicated by the fact that we do not want to put
525 * dentries that are no longer on any hash chain on the unused
526 * list: we'd much rather just get rid of them immediately.
528 * However, that implies that we have to traverse the dentry
529 * tree upwards to the parents which might _also_ now be
530 * scheduled for deletion (it may have been only waiting for
531 * its last child to go away).
533 * This tail recursion is done by hand as we don't want to depend
534 * on the compiler to always get this right (gcc generally doesn't).
535 * Real recursion would eat up our stack space.
539 * dput - release a dentry
540 * @dentry: dentry to release
542 * Release a dentry. This will drop the usage count and if appropriate
543 * call the dentry unlink method as well as removing it from the queues and
544 * releasing its resources. If the parent dentries were scheduled for release
545 * they too may now get deleted.
547 void dput(struct dentry *dentry)
553 if (dentry->d_count == 1)
555 spin_lock(&dentry->d_lock);
556 BUG_ON(!dentry->d_count);
557 if (dentry->d_count > 1) {
559 spin_unlock(&dentry->d_lock);
563 if (dentry->d_flags & DCACHE_OP_DELETE) {
564 if (dentry->d_op->d_delete(dentry))
568 /* Unreachable? Get rid of it */
569 if (d_unhashed(dentry))
572 dentry->d_flags |= DCACHE_REFERENCED;
573 dentry_lru_add(dentry);
576 spin_unlock(&dentry->d_lock);
580 dentry = dentry_kill(dentry, 1, 1);
587 * d_invalidate - invalidate a dentry
588 * @dentry: dentry to invalidate
590 * Try to invalidate the dentry if it turns out to be
591 * possible. If there are other dentries that can be
592 * reached through this one we can't delete it and we
593 * return -EBUSY. On success we return 0.
598 int d_invalidate(struct dentry * dentry)
601 * If it's already been dropped, return OK.
603 spin_lock(&dentry->d_lock);
604 if (d_unhashed(dentry)) {
605 spin_unlock(&dentry->d_lock);
609 * Check whether to do a partial shrink_dcache
610 * to get rid of unused child entries.
612 if (!list_empty(&dentry->d_subdirs)) {
613 spin_unlock(&dentry->d_lock);
614 shrink_dcache_parent(dentry);
615 spin_lock(&dentry->d_lock);
619 * Somebody else still using it?
621 * If it's a directory, we can't drop it
622 * for fear of somebody re-populating it
623 * with children (even though dropping it
624 * would make it unreachable from the root,
625 * we might still populate it if it was a
626 * working directory or similar).
627 * We also need to leave mountpoints alone,
630 if (dentry->d_count > 1 && dentry->d_inode) {
631 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
632 spin_unlock(&dentry->d_lock);
638 spin_unlock(&dentry->d_lock);
641 EXPORT_SYMBOL(d_invalidate);
643 /* This must be called with d_lock held */
644 static inline void __dget_dlock(struct dentry *dentry)
649 static inline void __dget(struct dentry *dentry)
651 spin_lock(&dentry->d_lock);
652 __dget_dlock(dentry);
653 spin_unlock(&dentry->d_lock);
656 struct dentry *dget_parent(struct dentry *dentry)
662 * Don't need rcu_dereference because we re-check it was correct under
666 ret = dentry->d_parent;
667 spin_lock(&ret->d_lock);
668 if (unlikely(ret != dentry->d_parent)) {
669 spin_unlock(&ret->d_lock);
674 BUG_ON(!ret->d_count);
676 spin_unlock(&ret->d_lock);
679 EXPORT_SYMBOL(dget_parent);
682 * d_find_alias - grab a hashed alias of inode
683 * @inode: inode in question
684 * @want_discon: flag, used by d_splice_alias, to request
685 * that only a DISCONNECTED alias be returned.
687 * If inode has a hashed alias, or is a directory and has any alias,
688 * acquire the reference to alias and return it. Otherwise return NULL.
689 * Notice that if inode is a directory there can be only one alias and
690 * it can be unhashed only if it has no children, or if it is the root
693 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
694 * any other hashed alias over that one unless @want_discon is set,
695 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
697 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
699 struct dentry *alias, *discon_alias;
703 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
704 spin_lock(&alias->d_lock);
705 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
706 if (IS_ROOT(alias) &&
707 (alias->d_flags & DCACHE_DISCONNECTED)) {
708 discon_alias = alias;
709 } else if (!want_discon) {
711 spin_unlock(&alias->d_lock);
715 spin_unlock(&alias->d_lock);
718 alias = discon_alias;
719 spin_lock(&alias->d_lock);
720 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
721 if (IS_ROOT(alias) &&
722 (alias->d_flags & DCACHE_DISCONNECTED)) {
724 spin_unlock(&alias->d_lock);
728 spin_unlock(&alias->d_lock);
734 struct dentry *d_find_alias(struct inode *inode)
736 struct dentry *de = NULL;
738 if (!hlist_empty(&inode->i_dentry)) {
739 spin_lock(&inode->i_lock);
740 de = __d_find_alias(inode, 0);
741 spin_unlock(&inode->i_lock);
745 EXPORT_SYMBOL(d_find_alias);
748 * Try to kill dentries associated with this inode.
749 * WARNING: you must own a reference to inode.
751 void d_prune_aliases(struct inode *inode)
753 struct dentry *dentry;
755 spin_lock(&inode->i_lock);
756 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
757 spin_lock(&dentry->d_lock);
758 if (!dentry->d_count) {
759 __dget_dlock(dentry);
761 spin_unlock(&dentry->d_lock);
762 spin_unlock(&inode->i_lock);
766 spin_unlock(&dentry->d_lock);
768 spin_unlock(&inode->i_lock);
770 EXPORT_SYMBOL(d_prune_aliases);
773 * Try to throw away a dentry - free the inode, dput the parent.
774 * Requires dentry->d_lock is held, and dentry->d_count == 0.
775 * Releases dentry->d_lock.
777 * This may fail if locks cannot be acquired no problem, just try again.
779 static struct dentry * try_prune_one_dentry(struct dentry *dentry)
780 __releases(dentry->d_lock)
782 struct dentry *parent;
784 parent = dentry_kill(dentry, 0, 0);
786 * If dentry_kill returns NULL, we have nothing more to do.
787 * if it returns the same dentry, trylocks failed. In either
788 * case, just loop again.
790 * Otherwise, we need to prune ancestors too. This is necessary
791 * to prevent quadratic behavior of shrink_dcache_parent(), but
792 * is also expected to be beneficial in reducing dentry cache
797 if (parent == dentry)
800 /* Prune ancestors. */
803 spin_lock(&dentry->d_lock);
804 if (dentry->d_count > 1) {
806 spin_unlock(&dentry->d_lock);
809 dentry = dentry_kill(dentry, 1, 1);
814 static void shrink_dentry_list(struct list_head *list)
816 struct dentry *dentry;
820 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
821 if (&dentry->d_lru == list)
823 spin_lock(&dentry->d_lock);
824 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
825 spin_unlock(&dentry->d_lock);
830 * The dispose list is isolated and dentries are not accounted
831 * to the LRU here, so we can simply remove it from the list
832 * here regardless of whether it is referenced or not.
834 list_del_init(&dentry->d_lru);
835 dentry->d_flags &= ~DCACHE_SHRINK_LIST;
838 * We found an inuse dentry which was not removed from
839 * the LRU because of laziness during lookup. Do not free it.
841 if (dentry->d_count) {
842 spin_unlock(&dentry->d_lock);
847 dentry = try_prune_one_dentry(dentry);
851 dentry->d_flags |= DCACHE_SHRINK_LIST;
852 list_add(&dentry->d_lru, list);
853 spin_unlock(&dentry->d_lock);
860 * prune_dcache_sb - shrink the dcache
862 * @count: number of entries to try to free
864 * Attempt to shrink the superblock dcache LRU by @count entries. This is
865 * done when we need more memory an called from the superblock shrinker
868 * This function may fail to free any resources if all the dentries are in
871 void prune_dcache_sb(struct super_block *sb, int count)
873 struct dentry *dentry;
874 LIST_HEAD(referenced);
878 spin_lock(&sb->s_dentry_lru_lock);
879 while (!list_empty(&sb->s_dentry_lru)) {
880 dentry = list_entry(sb->s_dentry_lru.prev,
881 struct dentry, d_lru);
882 BUG_ON(dentry->d_sb != sb);
884 if (!spin_trylock(&dentry->d_lock)) {
885 spin_unlock(&sb->s_dentry_lru_lock);
890 if (dentry->d_flags & DCACHE_REFERENCED) {
891 dentry->d_flags &= ~DCACHE_REFERENCED;
892 list_move(&dentry->d_lru, &referenced);
893 spin_unlock(&dentry->d_lock);
895 list_move(&dentry->d_lru, &tmp);
896 dentry->d_flags |= DCACHE_SHRINK_LIST;
897 this_cpu_dec(nr_dentry_unused);
898 sb->s_nr_dentry_unused--;
899 spin_unlock(&dentry->d_lock);
903 cond_resched_lock(&sb->s_dentry_lru_lock);
905 if (!list_empty(&referenced))
906 list_splice(&referenced, &sb->s_dentry_lru);
907 spin_unlock(&sb->s_dentry_lru_lock);
909 shrink_dentry_list(&tmp);
913 * Mark all the dentries as on being the dispose list so we don't think they are
914 * still on the LRU if we try to kill them from ascending the parent chain in
915 * try_prune_one_dentry() rather than directly from the dispose list.
919 struct list_head *dispose)
921 struct dentry *dentry;
924 list_for_each_entry_rcu(dentry, dispose, d_lru) {
925 spin_lock(&dentry->d_lock);
926 dentry->d_flags |= DCACHE_SHRINK_LIST;
927 spin_unlock(&dentry->d_lock);
930 shrink_dentry_list(dispose);
934 * shrink_dcache_sb - shrink dcache for a superblock
937 * Shrink the dcache for the specified super block. This is used to free
938 * the dcache before unmounting a file system.
940 void shrink_dcache_sb(struct super_block *sb)
944 spin_lock(&sb->s_dentry_lru_lock);
945 while (!list_empty(&sb->s_dentry_lru)) {
947 * account for removal here so we don't need to handle it later
948 * even though the dentry is no longer on the lru list.
950 list_splice_init(&sb->s_dentry_lru, &tmp);
951 this_cpu_sub(nr_dentry_unused, sb->s_nr_dentry_unused);
952 sb->s_nr_dentry_unused = 0;
953 spin_unlock(&sb->s_dentry_lru_lock);
955 shrink_dcache_list(&tmp);
957 spin_lock(&sb->s_dentry_lru_lock);
959 spin_unlock(&sb->s_dentry_lru_lock);
961 EXPORT_SYMBOL(shrink_dcache_sb);
964 * destroy a single subtree of dentries for unmount
965 * - see the comments on shrink_dcache_for_umount() for a description of the
968 static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
970 struct dentry *parent;
972 BUG_ON(!IS_ROOT(dentry));
975 /* descend to the first leaf in the current subtree */
976 while (!list_empty(&dentry->d_subdirs))
977 dentry = list_entry(dentry->d_subdirs.next,
978 struct dentry, d_u.d_child);
980 /* consume the dentries from this leaf up through its parents
981 * until we find one with children or run out altogether */
986 * inform the fs that this dentry is about to be
987 * unhashed and destroyed.
989 if (dentry->d_flags & DCACHE_OP_PRUNE)
990 dentry->d_op->d_prune(dentry);
992 dentry_lru_del(dentry);
995 if (dentry->d_count != 0) {
997 "BUG: Dentry %p{i=%lx,n=%s}"
999 " [unmount of %s %s]\n",
1002 dentry->d_inode->i_ino : 0UL,
1003 dentry->d_name.name,
1005 dentry->d_sb->s_type->name,
1006 dentry->d_sb->s_id);
1010 if (IS_ROOT(dentry)) {
1012 list_del(&dentry->d_u.d_child);
1014 parent = dentry->d_parent;
1016 list_del(&dentry->d_u.d_child);
1019 inode = dentry->d_inode;
1021 dentry->d_inode = NULL;
1022 hlist_del_init(&dentry->d_alias);
1023 if (dentry->d_op && dentry->d_op->d_iput)
1024 dentry->d_op->d_iput(dentry, inode);
1031 /* finished when we fall off the top of the tree,
1032 * otherwise we ascend to the parent and move to the
1033 * next sibling if there is one */
1037 } while (list_empty(&dentry->d_subdirs));
1039 dentry = list_entry(dentry->d_subdirs.next,
1040 struct dentry, d_u.d_child);
1045 * destroy the dentries attached to a superblock on unmounting
1046 * - we don't need to use dentry->d_lock because:
1047 * - the superblock is detached from all mountings and open files, so the
1048 * dentry trees will not be rearranged by the VFS
1049 * - s_umount is write-locked, so the memory pressure shrinker will ignore
1050 * any dentries belonging to this superblock that it comes across
1051 * - the filesystem itself is no longer permitted to rearrange the dentries
1052 * in this superblock
1054 void shrink_dcache_for_umount(struct super_block *sb)
1056 struct dentry *dentry;
1058 if (down_read_trylock(&sb->s_umount))
1061 dentry = sb->s_root;
1064 shrink_dcache_for_umount_subtree(dentry);
1066 while (!hlist_bl_empty(&sb->s_anon)) {
1067 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
1068 shrink_dcache_for_umount_subtree(dentry);
1073 * This tries to ascend one level of parenthood, but
1074 * we can race with renaming, so we need to re-check
1075 * the parenthood after dropping the lock and check
1076 * that the sequence number still matches.
1078 static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
1080 struct dentry *new = old->d_parent;
1083 spin_unlock(&old->d_lock);
1084 spin_lock(&new->d_lock);
1087 * might go back up the wrong parent if we have had a rename
1090 if (new != old->d_parent ||
1091 (old->d_flags & DCACHE_DENTRY_KILLED) ||
1092 (!locked && read_seqretry(&rename_lock, seq))) {
1093 spin_unlock(&new->d_lock);
1102 * Search for at least 1 mount point in the dentry's subdirs.
1103 * We descend to the next level whenever the d_subdirs
1104 * list is non-empty and continue searching.
1108 * have_submounts - check for mounts over a dentry
1109 * @parent: dentry to check.
1111 * Return true if the parent or its subdirectories contain
1114 int have_submounts(struct dentry *parent)
1116 struct dentry *this_parent;
1117 struct list_head *next;
1121 seq = read_seqbegin(&rename_lock);
1123 this_parent = parent;
1125 if (d_mountpoint(parent))
1127 spin_lock(&this_parent->d_lock);
1129 next = this_parent->d_subdirs.next;
1131 while (next != &this_parent->d_subdirs) {
1132 struct list_head *tmp = next;
1133 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1136 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1137 /* Have we found a mount point ? */
1138 if (d_mountpoint(dentry)) {
1139 spin_unlock(&dentry->d_lock);
1140 spin_unlock(&this_parent->d_lock);
1143 if (!list_empty(&dentry->d_subdirs)) {
1144 spin_unlock(&this_parent->d_lock);
1145 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1146 this_parent = dentry;
1147 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1150 spin_unlock(&dentry->d_lock);
1153 * All done at this level ... ascend and resume the search.
1155 if (this_parent != parent) {
1156 struct dentry *child = this_parent;
1157 this_parent = try_to_ascend(this_parent, locked, seq);
1160 next = child->d_u.d_child.next;
1163 spin_unlock(&this_parent->d_lock);
1164 if (!locked && read_seqretry(&rename_lock, seq))
1167 write_sequnlock(&rename_lock);
1168 return 0; /* No mount points found in tree */
1170 if (!locked && read_seqretry(&rename_lock, seq))
1173 write_sequnlock(&rename_lock);
1180 write_seqlock(&rename_lock);
1183 EXPORT_SYMBOL(have_submounts);
1186 * Search the dentry child list of the specified parent,
1187 * and move any unused dentries to the end of the unused
1188 * list for prune_dcache(). We descend to the next level
1189 * whenever the d_subdirs list is non-empty and continue
1192 * It returns zero iff there are no unused children,
1193 * otherwise it returns the number of children moved to
1194 * the end of the unused list. This may not be the total
1195 * number of unused children, because select_parent can
1196 * drop the lock and return early due to latency
1199 static int select_parent(struct dentry *parent, struct list_head *dispose)
1201 struct dentry *this_parent;
1202 struct list_head *next;
1207 seq = read_seqbegin(&rename_lock);
1209 this_parent = parent;
1210 spin_lock(&this_parent->d_lock);
1212 next = this_parent->d_subdirs.next;
1214 while (next != &this_parent->d_subdirs) {
1215 struct list_head *tmp = next;
1216 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1219 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1222 * move only zero ref count dentries to the dispose list.
1224 * Those which are presently on the shrink list, being processed
1225 * by shrink_dentry_list(), shouldn't be moved. Otherwise the
1226 * loop in shrink_dcache_parent() might not make any progress
1229 if (dentry->d_count) {
1230 dentry_lru_del(dentry);
1231 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1232 dentry_lru_move_list(dentry, dispose);
1233 dentry->d_flags |= DCACHE_SHRINK_LIST;
1237 * We can return to the caller if we have found some (this
1238 * ensures forward progress). We'll be coming back to find
1241 if (found && need_resched()) {
1242 spin_unlock(&dentry->d_lock);
1247 * Descend a level if the d_subdirs list is non-empty.
1249 if (!list_empty(&dentry->d_subdirs)) {
1250 spin_unlock(&this_parent->d_lock);
1251 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1252 this_parent = dentry;
1253 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1257 spin_unlock(&dentry->d_lock);
1260 * All done at this level ... ascend and resume the search.
1262 if (this_parent != parent) {
1263 struct dentry *child = this_parent;
1264 this_parent = try_to_ascend(this_parent, locked, seq);
1267 next = child->d_u.d_child.next;
1271 spin_unlock(&this_parent->d_lock);
1272 if (!locked && read_seqretry(&rename_lock, seq))
1275 write_sequnlock(&rename_lock);
1284 write_seqlock(&rename_lock);
1289 * shrink_dcache_parent - prune dcache
1290 * @parent: parent of entries to prune
1292 * Prune the dcache to remove unused children of the parent dentry.
1294 void shrink_dcache_parent(struct dentry * parent)
1299 while ((found = select_parent(parent, &dispose)) != 0) {
1300 shrink_dentry_list(&dispose);
1304 EXPORT_SYMBOL(shrink_dcache_parent);
1307 * __d_alloc - allocate a dcache entry
1308 * @sb: filesystem it will belong to
1309 * @name: qstr of the name
1311 * Allocates a dentry. It returns %NULL if there is insufficient memory
1312 * available. On a success the dentry is returned. The name passed in is
1313 * copied and the copy passed in may be reused after this call.
1316 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1318 struct dentry *dentry;
1321 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1326 * We guarantee that the inline name is always NUL-terminated.
1327 * This way the memcpy() done by the name switching in rename
1328 * will still always have a NUL at the end, even if we might
1329 * be overwriting an internal NUL character
1331 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1332 if (name->len > DNAME_INLINE_LEN-1) {
1333 dname = kmalloc(name->len + 1, GFP_KERNEL);
1335 kmem_cache_free(dentry_cache, dentry);
1339 dname = dentry->d_iname;
1342 dentry->d_name.len = name->len;
1343 dentry->d_name.hash = name->hash;
1344 memcpy(dname, name->name, name->len);
1345 dname[name->len] = 0;
1347 /* Make sure we always see the terminating NUL character */
1349 dentry->d_name.name = dname;
1351 dentry->d_count = 1;
1352 dentry->d_flags = 0;
1353 spin_lock_init(&dentry->d_lock);
1354 seqcount_init(&dentry->d_seq);
1355 dentry->d_inode = NULL;
1356 dentry->d_parent = dentry;
1358 dentry->d_op = NULL;
1359 dentry->d_fsdata = NULL;
1360 INIT_HLIST_BL_NODE(&dentry->d_hash);
1361 INIT_LIST_HEAD(&dentry->d_lru);
1362 INIT_LIST_HEAD(&dentry->d_subdirs);
1363 INIT_HLIST_NODE(&dentry->d_alias);
1364 INIT_LIST_HEAD(&dentry->d_u.d_child);
1365 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1367 this_cpu_inc(nr_dentry);
1373 * d_alloc - allocate a dcache entry
1374 * @parent: parent of entry to allocate
1375 * @name: qstr of the name
1377 * Allocates a dentry. It returns %NULL if there is insufficient memory
1378 * available. On a success the dentry is returned. The name passed in is
1379 * copied and the copy passed in may be reused after this call.
1381 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1383 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1387 spin_lock(&parent->d_lock);
1389 * don't need child lock because it is not subject
1390 * to concurrency here
1392 __dget_dlock(parent);
1393 dentry->d_parent = parent;
1394 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1395 spin_unlock(&parent->d_lock);
1399 EXPORT_SYMBOL(d_alloc);
1401 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1403 struct dentry *dentry = __d_alloc(sb, name);
1405 dentry->d_flags |= DCACHE_DISCONNECTED;
1408 EXPORT_SYMBOL(d_alloc_pseudo);
1410 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1415 q.len = strlen(name);
1416 q.hash = full_name_hash(q.name, q.len);
1417 return d_alloc(parent, &q);
1419 EXPORT_SYMBOL(d_alloc_name);
1421 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1423 WARN_ON_ONCE(dentry->d_op);
1424 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1426 DCACHE_OP_REVALIDATE |
1427 DCACHE_OP_WEAK_REVALIDATE |
1428 DCACHE_OP_DELETE ));
1433 dentry->d_flags |= DCACHE_OP_HASH;
1435 dentry->d_flags |= DCACHE_OP_COMPARE;
1436 if (op->d_revalidate)
1437 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1438 if (op->d_weak_revalidate)
1439 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1441 dentry->d_flags |= DCACHE_OP_DELETE;
1443 dentry->d_flags |= DCACHE_OP_PRUNE;
1446 EXPORT_SYMBOL(d_set_d_op);
1448 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1450 spin_lock(&dentry->d_lock);
1452 if (unlikely(IS_AUTOMOUNT(inode)))
1453 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
1454 hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1456 dentry->d_inode = inode;
1457 dentry_rcuwalk_barrier(dentry);
1458 spin_unlock(&dentry->d_lock);
1459 fsnotify_d_instantiate(dentry, inode);
1463 * d_instantiate - fill in inode information for a dentry
1464 * @entry: dentry to complete
1465 * @inode: inode to attach to this dentry
1467 * Fill in inode information in the entry.
1469 * This turns negative dentries into productive full members
1472 * NOTE! This assumes that the inode count has been incremented
1473 * (or otherwise set) by the caller to indicate that it is now
1474 * in use by the dcache.
1477 void d_instantiate(struct dentry *entry, struct inode * inode)
1479 BUG_ON(!hlist_unhashed(&entry->d_alias));
1481 spin_lock(&inode->i_lock);
1482 __d_instantiate(entry, inode);
1484 spin_unlock(&inode->i_lock);
1485 security_d_instantiate(entry, inode);
1487 EXPORT_SYMBOL(d_instantiate);
1490 * d_instantiate_unique - instantiate a non-aliased dentry
1491 * @entry: dentry to instantiate
1492 * @inode: inode to attach to this dentry
1494 * Fill in inode information in the entry. On success, it returns NULL.
1495 * If an unhashed alias of "entry" already exists, then we return the
1496 * aliased dentry instead and drop one reference to inode.
1498 * Note that in order to avoid conflicts with rename() etc, the caller
1499 * had better be holding the parent directory semaphore.
1501 * This also assumes that the inode count has been incremented
1502 * (or otherwise set) by the caller to indicate that it is now
1503 * in use by the dcache.
1505 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1506 struct inode *inode)
1508 struct dentry *alias;
1509 int len = entry->d_name.len;
1510 const char *name = entry->d_name.name;
1511 unsigned int hash = entry->d_name.hash;
1514 __d_instantiate(entry, NULL);
1518 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1520 * Don't need alias->d_lock here, because aliases with
1521 * d_parent == entry->d_parent are not subject to name or
1522 * parent changes, because the parent inode i_mutex is held.
1524 if (alias->d_name.hash != hash)
1526 if (alias->d_parent != entry->d_parent)
1528 if (alias->d_name.len != len)
1530 if (dentry_cmp(alias, name, len))
1536 __d_instantiate(entry, inode);
1540 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1542 struct dentry *result;
1544 BUG_ON(!hlist_unhashed(&entry->d_alias));
1547 spin_lock(&inode->i_lock);
1548 result = __d_instantiate_unique(entry, inode);
1550 spin_unlock(&inode->i_lock);
1553 security_d_instantiate(entry, inode);
1557 BUG_ON(!d_unhashed(result));
1562 EXPORT_SYMBOL(d_instantiate_unique);
1564 struct dentry *d_make_root(struct inode *root_inode)
1566 struct dentry *res = NULL;
1569 static const struct qstr name = QSTR_INIT("/", 1);
1571 res = __d_alloc(root_inode->i_sb, &name);
1573 d_instantiate(res, root_inode);
1579 EXPORT_SYMBOL(d_make_root);
1581 static struct dentry * __d_find_any_alias(struct inode *inode)
1583 struct dentry *alias;
1585 if (hlist_empty(&inode->i_dentry))
1587 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1593 * d_find_any_alias - find any alias for a given inode
1594 * @inode: inode to find an alias for
1596 * If any aliases exist for the given inode, take and return a
1597 * reference for one of them. If no aliases exist, return %NULL.
1599 struct dentry *d_find_any_alias(struct inode *inode)
1603 spin_lock(&inode->i_lock);
1604 de = __d_find_any_alias(inode);
1605 spin_unlock(&inode->i_lock);
1608 EXPORT_SYMBOL(d_find_any_alias);
1611 * d_obtain_alias - find or allocate a dentry for a given inode
1612 * @inode: inode to allocate the dentry for
1614 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1615 * similar open by handle operations. The returned dentry may be anonymous,
1616 * or may have a full name (if the inode was already in the cache).
1618 * When called on a directory inode, we must ensure that the inode only ever
1619 * has one dentry. If a dentry is found, that is returned instead of
1620 * allocating a new one.
1622 * On successful return, the reference to the inode has been transferred
1623 * to the dentry. In case of an error the reference on the inode is released.
1624 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1625 * be passed in and will be the error will be propagate to the return value,
1626 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1628 struct dentry *d_obtain_alias(struct inode *inode)
1630 static const struct qstr anonstring = QSTR_INIT("/", 1);
1635 return ERR_PTR(-ESTALE);
1637 return ERR_CAST(inode);
1639 res = d_find_any_alias(inode);
1643 tmp = __d_alloc(inode->i_sb, &anonstring);
1645 res = ERR_PTR(-ENOMEM);
1649 spin_lock(&inode->i_lock);
1650 res = __d_find_any_alias(inode);
1652 spin_unlock(&inode->i_lock);
1657 /* attach a disconnected dentry */
1658 spin_lock(&tmp->d_lock);
1659 tmp->d_inode = inode;
1660 tmp->d_flags |= DCACHE_DISCONNECTED;
1661 hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1662 hlist_bl_lock(&tmp->d_sb->s_anon);
1663 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1664 hlist_bl_unlock(&tmp->d_sb->s_anon);
1665 spin_unlock(&tmp->d_lock);
1666 spin_unlock(&inode->i_lock);
1667 security_d_instantiate(tmp, inode);
1672 if (res && !IS_ERR(res))
1673 security_d_instantiate(res, inode);
1677 EXPORT_SYMBOL(d_obtain_alias);
1680 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1681 * @inode: the inode which may have a disconnected dentry
1682 * @dentry: a negative dentry which we want to point to the inode.
1684 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1685 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1686 * and return it, else simply d_add the inode to the dentry and return NULL.
1688 * This is needed in the lookup routine of any filesystem that is exportable
1689 * (via knfsd) so that we can build dcache paths to directories effectively.
1691 * If a dentry was found and moved, then it is returned. Otherwise NULL
1692 * is returned. This matches the expected return value of ->lookup.
1694 * Cluster filesystems may call this function with a negative, hashed dentry.
1695 * In that case, we know that the inode will be a regular file, and also this
1696 * will only occur during atomic_open. So we need to check for the dentry
1697 * being already hashed only in the final case.
1699 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1701 struct dentry *new = NULL;
1704 return ERR_CAST(inode);
1706 if (inode && S_ISDIR(inode->i_mode)) {
1707 spin_lock(&inode->i_lock);
1708 new = __d_find_alias(inode, 1);
1710 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1711 spin_unlock(&inode->i_lock);
1712 security_d_instantiate(new, inode);
1713 d_move(new, dentry);
1716 /* already taking inode->i_lock, so d_add() by hand */
1717 __d_instantiate(dentry, inode);
1718 spin_unlock(&inode->i_lock);
1719 security_d_instantiate(dentry, inode);
1723 d_instantiate(dentry, inode);
1724 if (d_unhashed(dentry))
1729 EXPORT_SYMBOL(d_splice_alias);
1732 * d_add_ci - lookup or allocate new dentry with case-exact name
1733 * @inode: the inode case-insensitive lookup has found
1734 * @dentry: the negative dentry that was passed to the parent's lookup func
1735 * @name: the case-exact name to be associated with the returned dentry
1737 * This is to avoid filling the dcache with case-insensitive names to the
1738 * same inode, only the actual correct case is stored in the dcache for
1739 * case-insensitive filesystems.
1741 * For a case-insensitive lookup match and if the the case-exact dentry
1742 * already exists in in the dcache, use it and return it.
1744 * If no entry exists with the exact case name, allocate new dentry with
1745 * the exact case, and return the spliced entry.
1747 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1750 struct dentry *found;
1754 * First check if a dentry matching the name already exists,
1755 * if not go ahead and create it now.
1757 found = d_hash_and_lookup(dentry->d_parent, name);
1758 if (unlikely(IS_ERR(found)))
1761 new = d_alloc(dentry->d_parent, name);
1763 found = ERR_PTR(-ENOMEM);
1767 found = d_splice_alias(inode, new);
1776 * If a matching dentry exists, and it's not negative use it.
1778 * Decrement the reference count to balance the iget() done
1781 if (found->d_inode) {
1782 if (unlikely(found->d_inode != inode)) {
1783 /* This can't happen because bad inodes are unhashed. */
1784 BUG_ON(!is_bad_inode(inode));
1785 BUG_ON(!is_bad_inode(found->d_inode));
1792 * Negative dentry: instantiate it unless the inode is a directory and
1793 * already has a dentry.
1795 new = d_splice_alias(inode, found);
1806 EXPORT_SYMBOL(d_add_ci);
1809 * Do the slow-case of the dentry name compare.
1811 * Unlike the dentry_cmp() function, we need to atomically
1812 * load the name and length information, so that the
1813 * filesystem can rely on them, and can use the 'name' and
1814 * 'len' information without worrying about walking off the
1815 * end of memory etc.
1817 * Thus the read_seqcount_retry() and the "duplicate" info
1818 * in arguments (the low-level filesystem should not look
1819 * at the dentry inode or name contents directly, since
1820 * rename can change them while we're in RCU mode).
1822 enum slow_d_compare {
1828 static noinline enum slow_d_compare slow_dentry_cmp(
1829 const struct dentry *parent,
1830 struct dentry *dentry,
1832 const struct qstr *name)
1834 int tlen = dentry->d_name.len;
1835 const char *tname = dentry->d_name.name;
1837 if (read_seqcount_retry(&dentry->d_seq, seq)) {
1839 return D_COMP_SEQRETRY;
1841 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
1842 return D_COMP_NOMATCH;
1847 * __d_lookup_rcu - search for a dentry (racy, store-free)
1848 * @parent: parent dentry
1849 * @name: qstr of name we wish to find
1850 * @seqp: returns d_seq value at the point where the dentry was found
1851 * Returns: dentry, or NULL
1853 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
1854 * resolution (store-free path walking) design described in
1855 * Documentation/filesystems/path-lookup.txt.
1857 * This is not to be used outside core vfs.
1859 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
1860 * held, and rcu_read_lock held. The returned dentry must not be stored into
1861 * without taking d_lock and checking d_seq sequence count against @seq
1864 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount
1867 * Alternatively, __d_lookup_rcu may be called again to look up the child of
1868 * the returned dentry, so long as its parent's seqlock is checked after the
1869 * child is looked up. Thus, an interlocking stepping of sequence lock checks
1870 * is formed, giving integrity down the path walk.
1872 * NOTE! The caller *has* to check the resulting dentry against the sequence
1873 * number we've returned before using any of the resulting dentry state!
1875 struct dentry *__d_lookup_rcu(const struct dentry *parent,
1876 const struct qstr *name,
1879 u64 hashlen = name->hash_len;
1880 const unsigned char *str = name->name;
1881 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
1882 struct hlist_bl_node *node;
1883 struct dentry *dentry;
1886 * Note: There is significant duplication with __d_lookup_rcu which is
1887 * required to prevent single threaded performance regressions
1888 * especially on architectures where smp_rmb (in seqcounts) are costly.
1889 * Keep the two functions in sync.
1893 * The hash list is protected using RCU.
1895 * Carefully use d_seq when comparing a candidate dentry, to avoid
1896 * races with d_move().
1898 * It is possible that concurrent renames can mess up our list
1899 * walk here and result in missing our dentry, resulting in the
1900 * false-negative result. d_lookup() protects against concurrent
1901 * renames using rename_lock seqlock.
1903 * See Documentation/filesystems/path-lookup.txt for more details.
1905 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
1910 * The dentry sequence count protects us from concurrent
1911 * renames, and thus protects parent and name fields.
1913 * The caller must perform a seqcount check in order
1914 * to do anything useful with the returned dentry.
1916 * NOTE! We do a "raw" seqcount_begin here. That means that
1917 * we don't wait for the sequence count to stabilize if it
1918 * is in the middle of a sequence change. If we do the slow
1919 * dentry compare, we will do seqretries until it is stable,
1920 * and if we end up with a successful lookup, we actually
1921 * want to exit RCU lookup anyway.
1923 seq = raw_seqcount_begin(&dentry->d_seq);
1924 if (dentry->d_parent != parent)
1926 if (d_unhashed(dentry))
1929 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
1930 if (dentry->d_name.hash != hashlen_hash(hashlen))
1933 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
1936 case D_COMP_NOMATCH:
1943 if (dentry->d_name.hash_len != hashlen)
1946 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
1953 * d_lookup - search for a dentry
1954 * @parent: parent dentry
1955 * @name: qstr of name we wish to find
1956 * Returns: dentry, or NULL
1958 * d_lookup searches the children of the parent dentry for the name in
1959 * question. If the dentry is found its reference count is incremented and the
1960 * dentry is returned. The caller must use dput to free the entry when it has
1961 * finished using it. %NULL is returned if the dentry does not exist.
1963 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
1965 struct dentry *dentry;
1969 seq = read_seqbegin(&rename_lock);
1970 dentry = __d_lookup(parent, name);
1973 } while (read_seqretry(&rename_lock, seq));
1976 EXPORT_SYMBOL(d_lookup);
1979 * __d_lookup - search for a dentry (racy)
1980 * @parent: parent dentry
1981 * @name: qstr of name we wish to find
1982 * Returns: dentry, or NULL
1984 * __d_lookup is like d_lookup, however it may (rarely) return a
1985 * false-negative result due to unrelated rename activity.
1987 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
1988 * however it must be used carefully, eg. with a following d_lookup in
1989 * the case of failure.
1991 * __d_lookup callers must be commented.
1993 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
1995 unsigned int len = name->len;
1996 unsigned int hash = name->hash;
1997 const unsigned char *str = name->name;
1998 struct hlist_bl_head *b = d_hash(parent, hash);
1999 struct hlist_bl_node *node;
2000 struct dentry *found = NULL;
2001 struct dentry *dentry;
2004 * Note: There is significant duplication with __d_lookup_rcu which is
2005 * required to prevent single threaded performance regressions
2006 * especially on architectures where smp_rmb (in seqcounts) are costly.
2007 * Keep the two functions in sync.
2011 * The hash list is protected using RCU.
2013 * Take d_lock when comparing a candidate dentry, to avoid races
2016 * It is possible that concurrent renames can mess up our list
2017 * walk here and result in missing our dentry, resulting in the
2018 * false-negative result. d_lookup() protects against concurrent
2019 * renames using rename_lock seqlock.
2021 * See Documentation/filesystems/path-lookup.txt for more details.
2025 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2027 if (dentry->d_name.hash != hash)
2030 spin_lock(&dentry->d_lock);
2031 if (dentry->d_parent != parent)
2033 if (d_unhashed(dentry))
2037 * It is safe to compare names since d_move() cannot
2038 * change the qstr (protected by d_lock).
2040 if (parent->d_flags & DCACHE_OP_COMPARE) {
2041 int tlen = dentry->d_name.len;
2042 const char *tname = dentry->d_name.name;
2043 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2046 if (dentry->d_name.len != len)
2048 if (dentry_cmp(dentry, str, len))
2054 spin_unlock(&dentry->d_lock);
2057 spin_unlock(&dentry->d_lock);
2065 * d_hash_and_lookup - hash the qstr then search for a dentry
2066 * @dir: Directory to search in
2067 * @name: qstr of name we wish to find
2069 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2071 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2074 * Check for a fs-specific hash function. Note that we must
2075 * calculate the standard hash first, as the d_op->d_hash()
2076 * routine may choose to leave the hash value unchanged.
2078 name->hash = full_name_hash(name->name, name->len);
2079 if (dir->d_flags & DCACHE_OP_HASH) {
2080 int err = dir->d_op->d_hash(dir, name);
2081 if (unlikely(err < 0))
2082 return ERR_PTR(err);
2084 return d_lookup(dir, name);
2086 EXPORT_SYMBOL(d_hash_and_lookup);
2089 * d_validate - verify dentry provided from insecure source (deprecated)
2090 * @dentry: The dentry alleged to be valid child of @dparent
2091 * @dparent: The parent dentry (known to be valid)
2093 * An insecure source has sent us a dentry, here we verify it and dget() it.
2094 * This is used by ncpfs in its readdir implementation.
2095 * Zero is returned in the dentry is invalid.
2097 * This function is slow for big directories, and deprecated, do not use it.
2099 int d_validate(struct dentry *dentry, struct dentry *dparent)
2101 struct dentry *child;
2103 spin_lock(&dparent->d_lock);
2104 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2105 if (dentry == child) {
2106 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2107 __dget_dlock(dentry);
2108 spin_unlock(&dentry->d_lock);
2109 spin_unlock(&dparent->d_lock);
2113 spin_unlock(&dparent->d_lock);
2117 EXPORT_SYMBOL(d_validate);
2120 * When a file is deleted, we have two options:
2121 * - turn this dentry into a negative dentry
2122 * - unhash this dentry and free it.
2124 * Usually, we want to just turn this into
2125 * a negative dentry, but if anybody else is
2126 * currently using the dentry or the inode
2127 * we can't do that and we fall back on removing
2128 * it from the hash queues and waiting for
2129 * it to be deleted later when it has no users
2133 * d_delete - delete a dentry
2134 * @dentry: The dentry to delete
2136 * Turn the dentry into a negative dentry if possible, otherwise
2137 * remove it from the hash queues so it can be deleted later
2140 void d_delete(struct dentry * dentry)
2142 struct inode *inode;
2145 * Are we the only user?
2148 spin_lock(&dentry->d_lock);
2149 inode = dentry->d_inode;
2150 isdir = S_ISDIR(inode->i_mode);
2151 if (dentry->d_count == 1) {
2152 if (!spin_trylock(&inode->i_lock)) {
2153 spin_unlock(&dentry->d_lock);
2157 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2158 dentry_unlink_inode(dentry);
2159 fsnotify_nameremove(dentry, isdir);
2163 if (!d_unhashed(dentry))
2166 spin_unlock(&dentry->d_lock);
2168 fsnotify_nameremove(dentry, isdir);
2170 EXPORT_SYMBOL(d_delete);
2172 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2174 BUG_ON(!d_unhashed(entry));
2176 entry->d_flags |= DCACHE_RCUACCESS;
2177 hlist_bl_add_head_rcu(&entry->d_hash, b);
2181 static void _d_rehash(struct dentry * entry)
2183 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2187 * d_rehash - add an entry back to the hash
2188 * @entry: dentry to add to the hash
2190 * Adds a dentry to the hash according to its name.
2193 void d_rehash(struct dentry * entry)
2195 spin_lock(&entry->d_lock);
2197 spin_unlock(&entry->d_lock);
2199 EXPORT_SYMBOL(d_rehash);
2202 * dentry_update_name_case - update case insensitive dentry with a new name
2203 * @dentry: dentry to be updated
2206 * Update a case insensitive dentry with new case of name.
2208 * dentry must have been returned by d_lookup with name @name. Old and new
2209 * name lengths must match (ie. no d_compare which allows mismatched name
2212 * Parent inode i_mutex must be held over d_lookup and into this call (to
2213 * keep renames and concurrent inserts, and readdir(2) away).
2215 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2217 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2218 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2220 spin_lock(&dentry->d_lock);
2221 write_seqcount_begin(&dentry->d_seq);
2222 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2223 write_seqcount_end(&dentry->d_seq);
2224 spin_unlock(&dentry->d_lock);
2226 EXPORT_SYMBOL(dentry_update_name_case);
2228 static void switch_names(struct dentry *dentry, struct dentry *target)
2230 if (dname_external(target)) {
2231 if (dname_external(dentry)) {
2233 * Both external: swap the pointers
2235 swap(target->d_name.name, dentry->d_name.name);
2238 * dentry:internal, target:external. Steal target's
2239 * storage and make target internal.
2241 memcpy(target->d_iname, dentry->d_name.name,
2242 dentry->d_name.len + 1);
2243 dentry->d_name.name = target->d_name.name;
2244 target->d_name.name = target->d_iname;
2247 if (dname_external(dentry)) {
2249 * dentry:external, target:internal. Give dentry's
2250 * storage to target and make dentry internal
2252 memcpy(dentry->d_iname, target->d_name.name,
2253 target->d_name.len + 1);
2254 target->d_name.name = dentry->d_name.name;
2255 dentry->d_name.name = dentry->d_iname;
2258 * Both are internal. Just copy target to dentry
2260 memcpy(dentry->d_iname, target->d_name.name,
2261 target->d_name.len + 1);
2262 dentry->d_name.len = target->d_name.len;
2266 swap(dentry->d_name.len, target->d_name.len);
2269 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2272 * XXXX: do we really need to take target->d_lock?
2274 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2275 spin_lock(&target->d_parent->d_lock);
2277 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2278 spin_lock(&dentry->d_parent->d_lock);
2279 spin_lock_nested(&target->d_parent->d_lock,
2280 DENTRY_D_LOCK_NESTED);
2282 spin_lock(&target->d_parent->d_lock);
2283 spin_lock_nested(&dentry->d_parent->d_lock,
2284 DENTRY_D_LOCK_NESTED);
2287 if (target < dentry) {
2288 spin_lock_nested(&target->d_lock, 2);
2289 spin_lock_nested(&dentry->d_lock, 3);
2291 spin_lock_nested(&dentry->d_lock, 2);
2292 spin_lock_nested(&target->d_lock, 3);
2296 static void dentry_unlock_parents_for_move(struct dentry *dentry,
2297 struct dentry *target)
2299 if (target->d_parent != dentry->d_parent)
2300 spin_unlock(&dentry->d_parent->d_lock);
2301 if (target->d_parent != target)
2302 spin_unlock(&target->d_parent->d_lock);
2306 * When switching names, the actual string doesn't strictly have to
2307 * be preserved in the target - because we're dropping the target
2308 * anyway. As such, we can just do a simple memcpy() to copy over
2309 * the new name before we switch.
2311 * Note that we have to be a lot more careful about getting the hash
2312 * switched - we have to switch the hash value properly even if it
2313 * then no longer matches the actual (corrupted) string of the target.
2314 * The hash value has to match the hash queue that the dentry is on..
2317 * __d_move - move a dentry
2318 * @dentry: entry to move
2319 * @target: new dentry
2321 * Update the dcache to reflect the move of a file name. Negative
2322 * dcache entries should not be moved in this way. Caller must hold
2323 * rename_lock, the i_mutex of the source and target directories,
2324 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2326 static void __d_move(struct dentry * dentry, struct dentry * target)
2328 if (!dentry->d_inode)
2329 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2331 BUG_ON(d_ancestor(dentry, target));
2332 BUG_ON(d_ancestor(target, dentry));
2334 dentry_lock_for_move(dentry, target);
2336 write_seqcount_begin(&dentry->d_seq);
2337 write_seqcount_begin(&target->d_seq);
2339 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2342 * Move the dentry to the target hash queue. Don't bother checking
2343 * for the same hash queue because of how unlikely it is.
2346 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2348 /* Unhash the target: dput() will then get rid of it */
2351 list_del(&dentry->d_u.d_child);
2352 list_del(&target->d_u.d_child);
2354 /* Switch the names.. */
2355 switch_names(dentry, target);
2356 swap(dentry->d_name.hash, target->d_name.hash);
2358 /* ... and switch the parents */
2359 if (IS_ROOT(dentry)) {
2360 dentry->d_parent = target->d_parent;
2361 target->d_parent = target;
2362 INIT_LIST_HEAD(&target->d_u.d_child);
2364 swap(dentry->d_parent, target->d_parent);
2366 /* And add them back to the (new) parent lists */
2367 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2370 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2372 write_seqcount_end(&target->d_seq);
2373 write_seqcount_end(&dentry->d_seq);
2375 dentry_unlock_parents_for_move(dentry, target);
2376 spin_unlock(&target->d_lock);
2377 fsnotify_d_move(dentry);
2378 spin_unlock(&dentry->d_lock);
2382 * d_move - move a dentry
2383 * @dentry: entry to move
2384 * @target: new dentry
2386 * Update the dcache to reflect the move of a file name. Negative
2387 * dcache entries should not be moved in this way. See the locking
2388 * requirements for __d_move.
2390 void d_move(struct dentry *dentry, struct dentry *target)
2392 write_seqlock(&rename_lock);
2393 __d_move(dentry, target);
2394 write_sequnlock(&rename_lock);
2396 EXPORT_SYMBOL(d_move);
2399 * d_ancestor - search for an ancestor
2400 * @p1: ancestor dentry
2403 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2404 * an ancestor of p2, else NULL.
2406 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2410 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2411 if (p->d_parent == p1)
2418 * This helper attempts to cope with remotely renamed directories
2420 * It assumes that the caller is already holding
2421 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2423 * Note: If ever the locking in lock_rename() changes, then please
2424 * remember to update this too...
2426 static struct dentry *__d_unalias(struct inode *inode,
2427 struct dentry *dentry, struct dentry *alias)
2429 struct mutex *m1 = NULL, *m2 = NULL;
2430 struct dentry *ret = ERR_PTR(-EBUSY);
2432 /* If alias and dentry share a parent, then no extra locks required */
2433 if (alias->d_parent == dentry->d_parent)
2436 /* See lock_rename() */
2437 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2439 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2440 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2442 m2 = &alias->d_parent->d_inode->i_mutex;
2444 if (likely(!d_mountpoint(alias))) {
2445 __d_move(alias, dentry);
2449 spin_unlock(&inode->i_lock);
2458 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2459 * named dentry in place of the dentry to be replaced.
2460 * returns with anon->d_lock held!
2462 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2464 struct dentry *dparent;
2466 dentry_lock_for_move(anon, dentry);
2468 write_seqcount_begin(&dentry->d_seq);
2469 write_seqcount_begin(&anon->d_seq);
2471 dparent = dentry->d_parent;
2473 switch_names(dentry, anon);
2474 swap(dentry->d_name.hash, anon->d_name.hash);
2476 dentry->d_parent = dentry;
2477 list_del_init(&dentry->d_u.d_child);
2478 anon->d_parent = dparent;
2479 list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2481 write_seqcount_end(&dentry->d_seq);
2482 write_seqcount_end(&anon->d_seq);
2484 dentry_unlock_parents_for_move(anon, dentry);
2485 spin_unlock(&dentry->d_lock);
2487 /* anon->d_lock still locked, returns locked */
2488 anon->d_flags &= ~DCACHE_DISCONNECTED;
2492 * d_materialise_unique - introduce an inode into the tree
2493 * @dentry: candidate dentry
2494 * @inode: inode to bind to the dentry, to which aliases may be attached
2496 * Introduces an dentry into the tree, substituting an extant disconnected
2497 * root directory alias in its place if there is one. Caller must hold the
2498 * i_mutex of the parent directory.
2500 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2502 struct dentry *actual;
2504 BUG_ON(!d_unhashed(dentry));
2508 __d_instantiate(dentry, NULL);
2513 spin_lock(&inode->i_lock);
2515 if (S_ISDIR(inode->i_mode)) {
2516 struct dentry *alias;
2518 /* Does an aliased dentry already exist? */
2519 alias = __d_find_alias(inode, 0);
2522 write_seqlock(&rename_lock);
2524 if (d_ancestor(alias, dentry)) {
2525 /* Check for loops */
2526 actual = ERR_PTR(-ELOOP);
2527 spin_unlock(&inode->i_lock);
2528 } else if (IS_ROOT(alias)) {
2529 /* Is this an anonymous mountpoint that we
2530 * could splice into our tree? */
2531 __d_materialise_dentry(dentry, alias);
2532 write_sequnlock(&rename_lock);
2536 /* Nope, but we must(!) avoid directory
2537 * aliasing. This drops inode->i_lock */
2538 actual = __d_unalias(inode, dentry, alias);
2540 write_sequnlock(&rename_lock);
2541 if (IS_ERR(actual)) {
2542 if (PTR_ERR(actual) == -ELOOP)
2543 pr_warn_ratelimited(
2544 "VFS: Lookup of '%s' in %s %s"
2545 " would have caused loop\n",
2546 dentry->d_name.name,
2547 inode->i_sb->s_type->name,
2555 /* Add a unique reference */
2556 actual = __d_instantiate_unique(dentry, inode);
2560 BUG_ON(!d_unhashed(actual));
2562 spin_lock(&actual->d_lock);
2565 spin_unlock(&actual->d_lock);
2566 spin_unlock(&inode->i_lock);
2568 if (actual == dentry) {
2569 security_d_instantiate(dentry, inode);
2576 EXPORT_SYMBOL_GPL(d_materialise_unique);
2578 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2582 return -ENAMETOOLONG;
2584 memcpy(*buffer, str, namelen);
2588 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2590 return prepend(buffer, buflen, name->name, name->len);
2594 * prepend_path - Prepend path string to a buffer
2595 * @path: the dentry/vfsmount to report
2596 * @root: root vfsmnt/dentry
2597 * @buffer: pointer to the end of the buffer
2598 * @buflen: pointer to buffer length
2600 * Caller holds the rename_lock.
2602 static int prepend_path(const struct path *path,
2603 const struct path *root,
2604 char **buffer, int *buflen)
2606 struct dentry *dentry = path->dentry;
2607 struct vfsmount *vfsmnt = path->mnt;
2608 struct mount *mnt = real_mount(vfsmnt);
2612 while (dentry != root->dentry || vfsmnt != root->mnt) {
2613 struct dentry * parent;
2615 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2617 if (!mnt_has_parent(mnt))
2619 dentry = mnt->mnt_mountpoint;
2620 mnt = mnt->mnt_parent;
2624 parent = dentry->d_parent;
2626 spin_lock(&dentry->d_lock);
2627 error = prepend_name(buffer, buflen, &dentry->d_name);
2628 spin_unlock(&dentry->d_lock);
2630 error = prepend(buffer, buflen, "/", 1);
2638 if (!error && !slash)
2639 error = prepend(buffer, buflen, "/", 1);
2645 * Filesystems needing to implement special "root names"
2646 * should do so with ->d_dname()
2648 if (IS_ROOT(dentry) &&
2649 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
2650 WARN(1, "Root dentry has weird name <%.*s>\n",
2651 (int) dentry->d_name.len, dentry->d_name.name);
2654 error = prepend(buffer, buflen, "/", 1);
2656 error = is_mounted(vfsmnt) ? 1 : 2;
2661 * __d_path - return the path of a dentry
2662 * @path: the dentry/vfsmount to report
2663 * @root: root vfsmnt/dentry
2664 * @buf: buffer to return value in
2665 * @buflen: buffer length
2667 * Convert a dentry into an ASCII path name.
2669 * Returns a pointer into the buffer or an error code if the
2670 * path was too long.
2672 * "buflen" should be positive.
2674 * If the path is not reachable from the supplied root, return %NULL.
2676 char *__d_path(const struct path *path,
2677 const struct path *root,
2678 char *buf, int buflen)
2680 char *res = buf + buflen;
2683 prepend(&res, &buflen, "\0", 1);
2684 br_read_lock(&vfsmount_lock);
2685 write_seqlock(&rename_lock);
2686 error = prepend_path(path, root, &res, &buflen);
2687 write_sequnlock(&rename_lock);
2688 br_read_unlock(&vfsmount_lock);
2691 return ERR_PTR(error);
2697 char *d_absolute_path(const struct path *path,
2698 char *buf, int buflen)
2700 struct path root = {};
2701 char *res = buf + buflen;
2704 prepend(&res, &buflen, "\0", 1);
2705 br_read_lock(&vfsmount_lock);
2706 write_seqlock(&rename_lock);
2707 error = prepend_path(path, &root, &res, &buflen);
2708 write_sequnlock(&rename_lock);
2709 br_read_unlock(&vfsmount_lock);
2714 return ERR_PTR(error);
2719 * same as __d_path but appends "(deleted)" for unlinked files.
2721 static int path_with_deleted(const struct path *path,
2722 const struct path *root,
2723 char **buf, int *buflen)
2725 prepend(buf, buflen, "\0", 1);
2726 if (d_unlinked(path->dentry)) {
2727 int error = prepend(buf, buflen, " (deleted)", 10);
2732 return prepend_path(path, root, buf, buflen);
2735 static int prepend_unreachable(char **buffer, int *buflen)
2737 return prepend(buffer, buflen, "(unreachable)", 13);
2741 * d_path - return the path of a dentry
2742 * @path: path to report
2743 * @buf: buffer to return value in
2744 * @buflen: buffer length
2746 * Convert a dentry into an ASCII path name. If the entry has been deleted
2747 * the string " (deleted)" is appended. Note that this is ambiguous.
2749 * Returns a pointer into the buffer or an error code if the path was
2750 * too long. Note: Callers should use the returned pointer, not the passed
2751 * in buffer, to use the name! The implementation often starts at an offset
2752 * into the buffer, and may leave 0 bytes at the start.
2754 * "buflen" should be positive.
2756 char *d_path(const struct path *path, char *buf, int buflen)
2758 char *res = buf + buflen;
2763 * We have various synthetic filesystems that never get mounted. On
2764 * these filesystems dentries are never used for lookup purposes, and
2765 * thus don't need to be hashed. They also don't need a name until a
2766 * user wants to identify the object in /proc/pid/fd/. The little hack
2767 * below allows us to generate a name for these objects on demand:
2769 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2770 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2772 get_fs_root(current->fs, &root);
2773 br_read_lock(&vfsmount_lock);
2774 write_seqlock(&rename_lock);
2775 error = path_with_deleted(path, &root, &res, &buflen);
2776 write_sequnlock(&rename_lock);
2777 br_read_unlock(&vfsmount_lock);
2779 res = ERR_PTR(error);
2783 EXPORT_SYMBOL(d_path);
2786 * Helper function for dentry_operations.d_dname() members
2788 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2789 const char *fmt, ...)
2795 va_start(args, fmt);
2796 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2799 if (sz > sizeof(temp) || sz > buflen)
2800 return ERR_PTR(-ENAMETOOLONG);
2802 buffer += buflen - sz;
2803 return memcpy(buffer, temp, sz);
2807 * Write full pathname from the root of the filesystem into the buffer.
2809 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
2811 char *end = buf + buflen;
2814 prepend(&end, &buflen, "\0", 1);
2821 while (!IS_ROOT(dentry)) {
2822 struct dentry *parent = dentry->d_parent;
2826 spin_lock(&dentry->d_lock);
2827 error = prepend_name(&end, &buflen, &dentry->d_name);
2828 spin_unlock(&dentry->d_lock);
2829 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
2837 return ERR_PTR(-ENAMETOOLONG);
2840 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
2844 write_seqlock(&rename_lock);
2845 retval = __dentry_path(dentry, buf, buflen);
2846 write_sequnlock(&rename_lock);
2850 EXPORT_SYMBOL(dentry_path_raw);
2852 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2857 write_seqlock(&rename_lock);
2858 if (d_unlinked(dentry)) {
2860 if (prepend(&p, &buflen, "//deleted", 10) != 0)
2864 retval = __dentry_path(dentry, buf, buflen);
2865 write_sequnlock(&rename_lock);
2866 if (!IS_ERR(retval) && p)
2867 *p = '/'; /* restore '/' overriden with '\0' */
2870 return ERR_PTR(-ENAMETOOLONG);
2874 * NOTE! The user-level library version returns a
2875 * character pointer. The kernel system call just
2876 * returns the length of the buffer filled (which
2877 * includes the ending '\0' character), or a negative
2878 * error value. So libc would do something like
2880 * char *getcwd(char * buf, size_t size)
2884 * retval = sys_getcwd(buf, size);
2891 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2894 struct path pwd, root;
2895 char *page = (char *) __get_free_page(GFP_USER);
2900 get_fs_root_and_pwd(current->fs, &root, &pwd);
2903 br_read_lock(&vfsmount_lock);
2904 write_seqlock(&rename_lock);
2905 if (!d_unlinked(pwd.dentry)) {
2907 char *cwd = page + PAGE_SIZE;
2908 int buflen = PAGE_SIZE;
2910 prepend(&cwd, &buflen, "\0", 1);
2911 error = prepend_path(&pwd, &root, &cwd, &buflen);
2912 write_sequnlock(&rename_lock);
2913 br_read_unlock(&vfsmount_lock);
2918 /* Unreachable from current root */
2920 error = prepend_unreachable(&cwd, &buflen);
2926 len = PAGE_SIZE + page - cwd;
2929 if (copy_to_user(buf, cwd, len))
2933 write_sequnlock(&rename_lock);
2934 br_read_unlock(&vfsmount_lock);
2940 free_page((unsigned long) page);
2945 * Test whether new_dentry is a subdirectory of old_dentry.
2947 * Trivially implemented using the dcache structure
2951 * is_subdir - is new dentry a subdirectory of old_dentry
2952 * @new_dentry: new dentry
2953 * @old_dentry: old dentry
2955 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
2956 * Returns 0 otherwise.
2957 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
2960 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2965 if (new_dentry == old_dentry)
2969 /* for restarting inner loop in case of seq retry */
2970 seq = read_seqbegin(&rename_lock);
2972 * Need rcu_readlock to protect against the d_parent trashing
2976 if (d_ancestor(old_dentry, new_dentry))
2981 } while (read_seqretry(&rename_lock, seq));
2986 void d_genocide(struct dentry *root)
2988 struct dentry *this_parent;
2989 struct list_head *next;
2993 seq = read_seqbegin(&rename_lock);
2996 spin_lock(&this_parent->d_lock);
2998 next = this_parent->d_subdirs.next;
3000 while (next != &this_parent->d_subdirs) {
3001 struct list_head *tmp = next;
3002 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
3005 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3006 if (d_unhashed(dentry) || !dentry->d_inode) {
3007 spin_unlock(&dentry->d_lock);
3010 if (!list_empty(&dentry->d_subdirs)) {
3011 spin_unlock(&this_parent->d_lock);
3012 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
3013 this_parent = dentry;
3014 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
3017 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3018 dentry->d_flags |= DCACHE_GENOCIDE;
3021 spin_unlock(&dentry->d_lock);
3023 if (this_parent != root) {
3024 struct dentry *child = this_parent;
3025 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
3026 this_parent->d_flags |= DCACHE_GENOCIDE;
3027 this_parent->d_count--;
3029 this_parent = try_to_ascend(this_parent, locked, seq);
3032 next = child->d_u.d_child.next;
3035 spin_unlock(&this_parent->d_lock);
3036 if (!locked && read_seqretry(&rename_lock, seq))
3039 write_sequnlock(&rename_lock);
3046 write_seqlock(&rename_lock);
3050 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3052 inode_dec_link_count(inode);
3053 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3054 !hlist_unhashed(&dentry->d_alias) ||
3055 !d_unlinked(dentry));
3056 spin_lock(&dentry->d_parent->d_lock);
3057 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3058 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3059 (unsigned long long)inode->i_ino);
3060 spin_unlock(&dentry->d_lock);
3061 spin_unlock(&dentry->d_parent->d_lock);
3062 d_instantiate(dentry, inode);
3064 EXPORT_SYMBOL(d_tmpfile);
3066 static __initdata unsigned long dhash_entries;
3067 static int __init set_dhash_entries(char *str)
3071 dhash_entries = simple_strtoul(str, &str, 0);
3074 __setup("dhash_entries=", set_dhash_entries);
3076 static void __init dcache_init_early(void)
3080 /* If hashes are distributed across NUMA nodes, defer
3081 * hash allocation until vmalloc space is available.
3087 alloc_large_system_hash("Dentry cache",
3088 sizeof(struct hlist_bl_head),
3097 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3098 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3101 static void __init dcache_init(void)
3106 * A constructor could be added for stable state like the lists,
3107 * but it is probably not worth it because of the cache nature
3110 dentry_cache = KMEM_CACHE(dentry,
3111 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3113 /* Hash may have been set up in dcache_init_early */
3118 alloc_large_system_hash("Dentry cache",
3119 sizeof(struct hlist_bl_head),
3128 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3129 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3132 /* SLAB cache for __getname() consumers */
3133 struct kmem_cache *names_cachep __read_mostly;
3134 EXPORT_SYMBOL(names_cachep);
3136 EXPORT_SYMBOL(d_genocide);
3138 void __init vfs_caches_init_early(void)
3140 dcache_init_early();
3144 void __init vfs_caches_init(unsigned long mempages)
3146 unsigned long reserve;
3148 /* Base hash sizes on available memory, with a reserve equal to
3149 150% of current kernel size */
3151 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3152 mempages -= reserve;
3154 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3155 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3159 files_init(mempages);