]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
dentry: move to per-sb LRU locks
authorDave Chinner <dchinner@redhat.com>
Wed, 3 Jul 2013 00:19:50 +0000 (10:19 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 17 Jul 2013 02:34:51 +0000 (12:34 +1000)
With the dentry LRUs being per-sb structures, there is no real need for
a global dentry_lru_lock. The locking can be made more fine-grained by
moving to a per-sb LRU lock, isolating the LRU operations of different
filesytsems completely from each other. The need for this is independent
of any performance consideration that may arise: in the interest of
abstracting the lru operations away, it is mandatory that each lru works
around its own lock instead of a global lock for all of them.

[glommer@openvz.org: updated changelog ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/dcache.c
fs/super.c
include/linux/fs.h

index 941a35af8e2e0240f34f39ec1b6c605fc0666961..1f2107793c364c4a3b435f760bfedde6fe499998 100644 (file)
@@ -48,7 +48,7 @@
  *   - the dcache hash table
  * s_anon bl list spinlock protects:
  *   - the s_anon list (see __d_drop)
- * dcache_lru_lock protects:
+ * dentry->d_sb->s_dentry_lru_lock protects:
  *   - the dcache lru lists and counters
  * d_lock protects:
  *   - d_flags
@@ -63,7 +63,7 @@
  * Ordering:
  * dentry->d_inode->i_lock
  *   dentry->d_lock
- *     dcache_lru_lock
+ *     dentry->d_sb->s_dentry_lru_lock
  *     dcache_hash_bucket lock
  *     s_anon lock
  *
@@ -81,7 +81,6 @@
 int sysctl_vfs_cache_pressure __read_mostly = 100;
 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
 EXPORT_SYMBOL(rename_lock);
@@ -333,11 +332,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
 static void dentry_lru_add(struct dentry *dentry)
 {
        if (list_empty(&dentry->d_lru)) {
-               spin_lock(&dcache_lru_lock);
+               spin_lock(&dentry->d_sb->s_dentry_lru_lock);
                list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
                dentry->d_sb->s_nr_dentry_unused++;
                this_cpu_inc(nr_dentry_unused);
-               spin_unlock(&dcache_lru_lock);
+               spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
        }
 }
 
@@ -355,15 +354,15 @@ static void __dentry_lru_del(struct dentry *dentry)
 static void dentry_lru_del(struct dentry *dentry)
 {
        if (!list_empty(&dentry->d_lru)) {
-               spin_lock(&dcache_lru_lock);
+               spin_lock(&dentry->d_sb->s_dentry_lru_lock);
                __dentry_lru_del(dentry);
-               spin_unlock(&dcache_lru_lock);
+               spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
        }
 }
 
 static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
 {
-       spin_lock(&dcache_lru_lock);
+       spin_lock(&dentry->d_sb->s_dentry_lru_lock);
        if (list_empty(&dentry->d_lru)) {
                list_add_tail(&dentry->d_lru, list);
                dentry->d_sb->s_nr_dentry_unused++;
@@ -371,7 +370,7 @@ static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
        } else {
                list_move_tail(&dentry->d_lru, list);
        }
-       spin_unlock(&dcache_lru_lock);
+       spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
 }
 
 /**
@@ -851,14 +850,14 @@ void prune_dcache_sb(struct super_block *sb, int count)
        LIST_HEAD(tmp);
 
 relock:
-       spin_lock(&dcache_lru_lock);
+       spin_lock(&sb->s_dentry_lru_lock);
        while (!list_empty(&sb->s_dentry_lru)) {
                dentry = list_entry(sb->s_dentry_lru.prev,
                                struct dentry, d_lru);
                BUG_ON(dentry->d_sb != sb);
 
                if (!spin_trylock(&dentry->d_lock)) {
-                       spin_unlock(&dcache_lru_lock);
+                       spin_unlock(&sb->s_dentry_lru_lock);
                        cpu_relax();
                        goto relock;
                }
@@ -874,11 +873,11 @@ relock:
                        if (!--count)
                                break;
                }
-               cond_resched_lock(&dcache_lru_lock);
+               cond_resched_lock(&sb->s_dentry_lru_lock);
        }
        if (!list_empty(&referenced))
                list_splice(&referenced, &sb->s_dentry_lru);
-       spin_unlock(&dcache_lru_lock);
+       spin_unlock(&sb->s_dentry_lru_lock);
 
        shrink_dentry_list(&tmp);
 }
@@ -894,14 +893,14 @@ void shrink_dcache_sb(struct super_block *sb)
 {
        LIST_HEAD(tmp);
 
-       spin_lock(&dcache_lru_lock);
+       spin_lock(&sb->s_dentry_lru_lock);
        while (!list_empty(&sb->s_dentry_lru)) {
                list_splice_init(&sb->s_dentry_lru, &tmp);
-               spin_unlock(&dcache_lru_lock);
+               spin_unlock(&sb->s_dentry_lru_lock);
                shrink_dentry_list(&tmp);
-               spin_lock(&dcache_lru_lock);
+               spin_lock(&sb->s_dentry_lru_lock);
        }
-       spin_unlock(&dcache_lru_lock);
+       spin_unlock(&sb->s_dentry_lru_lock);
 }
 EXPORT_SYMBOL(shrink_dcache_sb);
 
index 2a37fd618ac2753cf3af1e4e4a700d428606f4dd..0be75fb29b4c9d4ea9be05accdb8adca2e53d2b8 100644 (file)
@@ -182,6 +182,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
                INIT_HLIST_BL_HEAD(&s->s_anon);
                INIT_LIST_HEAD(&s->s_inodes);
                INIT_LIST_HEAD(&s->s_dentry_lru);
+               spin_lock_init(&s->s_dentry_lru_lock);
                INIT_LIST_HEAD(&s->s_inode_lru);
                spin_lock_init(&s->s_inode_lru_lock);
                INIT_LIST_HEAD(&s->s_mounts);
index b707fa5d3d40babbb859bc6fa115340a11bbc0df..07093ec12da1501a81a7b58e91a6710490a3fa08 100644 (file)
@@ -1269,7 +1269,9 @@ struct super_block {
        struct list_head        s_files;
 #endif
        struct list_head        s_mounts;       /* list of mounts; _not_ for fs use */
-       /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
+
+       /* s_dentry_lru_lock protects s_dentry_lru and s_nr_dentry_unused */
+       spinlock_t              s_dentry_lru_lock ____cacheline_aligned_in_smp;
        struct list_head        s_dentry_lru;   /* unused dentry lru */
        long                    s_nr_dentry_unused;     /* # of dentry on lru */