]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
list_lru: dynamically adjust node arrays
authorGlauber Costa <glommer@openvz.org>
Wed, 3 Jul 2013 00:20:01 +0000 (10:20 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 17 Jul 2013 02:34:58 +0000 (12:34 +1000)
We currently use a compile-time constant to size the node array for the
list_lru structure.  Due to this, we don't need to allocate any memory at
initialization time.  But as a consequence, the structures that contain
embedded list_lru lists can become way too big (the superblock for
instance contains two of them).

This patch aims at ameliorating this situation by dynamically allocating
the node arrays with the firmware provided nr_node_ids.

Signed-off-by: Glauber Costa <glommer@openvz.org>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/super.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_qm.c
include/linux/list_lru.h
mm/list_lru.c

index ca41919df1de3df355a09bbf3a156fe0b7c6573d..b79e732555cbb77b4f915ef631aa37ea728359a6 100644 (file)
@@ -201,8 +201,12 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
                INIT_HLIST_NODE(&s->s_instances);
                INIT_HLIST_BL_HEAD(&s->s_anon);
                INIT_LIST_HEAD(&s->s_inodes);
-               list_lru_init(&s->s_dentry_lru);
-               list_lru_init(&s->s_inode_lru);
+
+               if (list_lru_init(&s->s_dentry_lru))
+                       goto err_out;
+               if (list_lru_init(&s->s_inode_lru))
+                       goto err_out_dentry_lru;
+
                INIT_LIST_HEAD(&s->s_mounts);
                init_rwsem(&s->s_umount);
                lockdep_set_class(&s->s_umount, &type->s_umount_key);
@@ -242,6 +246,9 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
        }
 out:
        return s;
+
+err_out_dentry_lru:
+       list_lru_destroy(&s->s_dentry_lru);
 err_out:
        security_sb_free(s);
 #ifdef CONFIG_SMP
index 99f977627ee32e56858bd597c764abbb1ca2ceec..6c77431808857dc31f522346da41e2808994cbe0 100644 (file)
@@ -1591,6 +1591,7 @@ xfs_free_buftarg(
        struct xfs_mount        *mp,
        struct xfs_buftarg      *btp)
 {
+       list_lru_destroy(&btp->bt_lru);
        unregister_shrinker(&btp->bt_shrinker);
 
        if (mp->m_flags & XFS_MOUNT_BARRIER)
@@ -1665,9 +1666,12 @@ xfs_alloc_buftarg(
        if (!btp->bt_bdi)
                goto error;
 
-       list_lru_init(&btp->bt_lru);
        if (xfs_setsize_buftarg_early(btp, bdev))
                goto error;
+
+       if (list_lru_init(&btp->bt_lru))
+               goto error;
+
        btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
        btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
        btp->bt_shrinker.seeks = DEFAULT_SEEKS;
index 6c4a587aac71004d93e93532c22d4e639ee970b9..cc6e78da0d31232f5576cf86156f92cd537e0711 100644 (file)
@@ -829,11 +829,18 @@ xfs_qm_init_quotainfo(
 
        qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
 
+       if ((error = list_lru_init(&qinf->qi_lru))) {
+               kmem_free(qinf);
+               mp->m_quotainfo = NULL;
+               return error;
+       }
+
        /*
         * See if quotainodes are setup, and if not, allocate them,
         * and change the superblock accordingly.
         */
        if ((error = xfs_qm_init_quotainos(mp))) {
+               list_lru_destroy(&qinf->qi_lru);
                kmem_free(qinf);
                mp->m_quotainfo = NULL;
                return error;
@@ -844,8 +851,6 @@ xfs_qm_init_quotainfo(
        INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
        mutex_init(&qinf->qi_tree_lock);
 
-       list_lru_init(&qinf->qi_lru);
-
        /* mutex used to serialize quotaoffs */
        mutex_init(&qinf->qi_quotaofflock);
 
@@ -933,6 +938,7 @@ xfs_qm_destroy_quotainfo(
        qi = mp->m_quotainfo;
        ASSERT(qi != NULL);
 
+       list_lru_destroy(&qi->qi_lru);
        unregister_shrinker(&qi->qi_shrinker);
 
        if (qi->qi_uquotaip) {
index 4d02ad3badab51b8621b2dbffa61db76a02d6784..3ce541753c88bf02a4feead675ecaaa7b01f579b 100644 (file)
@@ -27,20 +27,11 @@ struct list_lru_node {
 } ____cacheline_aligned_in_smp;
 
 struct list_lru {
-       /*
-        * Because we use a fixed-size array, this struct can be very big if
-        * MAX_NUMNODES is big. If this becomes a problem this is fixable by
-        * turning this into a pointer and dynamically allocating this to
-        * nr_node_ids. This quantity is firwmare-provided, and still would
-        * provide room for all nodes at the cost of a pointer lookup and an
-        * extra allocation. Because that allocation will most likely come from
-        * a different slab cache than the main structure holding this
-        * structure, we may very well fail.
-        */
-       struct list_lru_node    node[MAX_NUMNODES];
+       struct list_lru_node    *node;
        nodemask_t              active_nodes;
 };
 
+void list_lru_destroy(struct list_lru *lru);
 int list_lru_init(struct list_lru *lru);
 
 /**
index ad7601d844ef45e9967294af51656e4db517cf57..dc716593d568137613bfcfafd84fceb9aa4b4a5d 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/list_lru.h>
+#include <linux/slab.h>
 
 bool list_lru_add(struct list_lru *lru, struct list_head *item)
 {
@@ -120,9 +121,14 @@ EXPORT_SYMBOL_GPL(list_lru_walk_node);
 int list_lru_init(struct list_lru *lru)
 {
        int i;
+       size_t size = sizeof(*lru->node) * nr_node_ids;
+
+       lru->node = kzalloc(size, GFP_KERNEL);
+       if (!lru->node)
+               return -ENOMEM;
 
        nodes_clear(lru->active_nodes);
-       for (i = 0; i < MAX_NUMNODES; i++) {
+       for (i = 0; i < nr_node_ids; i++) {
                spin_lock_init(&lru->node[i].lock);
                INIT_LIST_HEAD(&lru->node[i].list);
                lru->node[i].nr_items = 0;
@@ -130,3 +136,9 @@ int list_lru_init(struct list_lru *lru)
        return 0;
 }
 EXPORT_SYMBOL_GPL(list_lru_init);
+
+void list_lru_destroy(struct list_lru *lru)
+{
+       kfree(lru->node);
+}
+EXPORT_SYMBOL_GPL(list_lru_destroy);