]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
blkcg: replace blkcg_policy->pd_size with ->pd_alloc/free_fn() methods
authorTejun Heo <tj@kernel.org>
Tue, 18 Aug 2015 21:55:11 +0000 (14:55 -0700)
committerJens Axboe <axboe@fb.com>
Tue, 18 Aug 2015 22:49:16 +0000 (15:49 -0700)
A blkg (blkcg_gq) represents the relationship between a cgroup and
request_queue.  Each active policy has a pd (blkg_policy_data) on each
blkg.  The pd's were allocated by blkcg core and each policy could
request to allocate extra space at the end by setting
blkcg_policy->pd_size larger than the size of pd.

This is a bit unusual but was done this way mostly to simplify error
handling and all the existing use cases could be handled this way;
however, this is becoming too restrictive now that percpu memory can
be allocated without blocking.

This introduces two new mandatory blkcg_policy methods - pd_alloc_fn()
and pd_free_fn() - which are used to allocate and release pd for a
given policy.  As pd allocation is now done from policy side, it can
simply allocate a larger area which embeds pd at the beginning.  This
change makes ->pd_size pointless.  Removed.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-cgroup.c
block/blk-throttle.c
block/cfq-iosched.c
include/linux/blk-cgroup.h

index 4defbbabc0ff20d2fd1caca44e266f20578db65d..d1bc6099bd1e82e09747fb45a9c241bb7277291e 100644 (file)
@@ -68,7 +68,8 @@ static void blkg_free(struct blkcg_gq *blkg)
                return;
 
        for (i = 0; i < BLKCG_MAX_POLS; i++)
-               kfree(blkg->pd[i]);
+               if (blkg->pd[i])
+                       blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
        if (blkg->blkcg != &blkcg_root)
                blk_exit_rl(&blkg->rl);
@@ -114,7 +115,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
                        continue;
 
                /* alloc per-policy data and attach it to blkg */
-               pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
+               pd = pol->pd_alloc_fn(gfp_mask, q->node);
                if (!pd)
                        goto err_free;
 
@@ -1057,7 +1058,7 @@ int blkcg_activate_policy(struct request_queue *q,
        blk_queue_bypass_start(q);
 pd_prealloc:
        if (!pd_prealloc) {
-               pd_prealloc = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
+               pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
                if (!pd_prealloc) {
                        ret = -ENOMEM;
                        goto out_bypass_end;
@@ -1072,7 +1073,7 @@ pd_prealloc:
                if (blkg->pd[pol->plid])
                        continue;
 
-               pd = kzalloc_node(pol->pd_size, GFP_NOWAIT, q->node);
+               pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
                if (!pd)
                        swap(pd, pd_prealloc);
                if (!pd) {
@@ -1093,7 +1094,8 @@ pd_prealloc:
        spin_unlock_irq(q->queue_lock);
 out_bypass_end:
        blk_queue_bypass_end(q);
-       kfree(pd_prealloc);
+       if (pd_prealloc)
+               pol->pd_free_fn(pd_prealloc);
        return ret;
 }
 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@@ -1128,8 +1130,10 @@ void blkcg_deactivate_policy(struct request_queue *q,
                if (pol->pd_exit_fn)
                        pol->pd_exit_fn(blkg);
 
-               kfree(blkg->pd[pol->plid]);
-               blkg->pd[pol->plid] = NULL;
+               if (blkg->pd[pol->plid]) {
+                       pol->pd_free_fn(blkg->pd[pol->plid]);
+                       blkg->pd[pol->plid] = NULL;
+               }
 
                spin_unlock(&blkg->blkcg->lock);
        }
@@ -1151,9 +1155,6 @@ int blkcg_policy_register(struct blkcg_policy *pol)
        struct blkcg *blkcg;
        int i, ret;
 
-       if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
-               return -EINVAL;
-
        mutex_lock(&blkcg_pol_register_mutex);
        mutex_lock(&blkcg_pol_mutex);
 
index b23193518ac7a964a9f0d75b26c6fa844f6eea40..f1dd691c53593ca91dce47fbd42474b13887fabe 100644 (file)
@@ -403,6 +403,11 @@ static void throtl_service_queue_exit(struct throtl_service_queue *sq)
        del_timer_sync(&sq->pending_timer);
 }
 
+static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
+{
+       return kzalloc_node(sizeof(struct throtl_grp), gfp, node);
+}
+
 static void throtl_pd_init(struct blkcg_gq *blkg)
 {
        struct throtl_grp *tg = blkg_to_tg(blkg);
@@ -493,6 +498,11 @@ static void throtl_pd_exit(struct blkcg_gq *blkg)
        throtl_service_queue_exit(&tg->service_queue);
 }
 
+static void throtl_pd_free(struct blkg_policy_data *pd)
+{
+       kfree(pd);
+}
+
 static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
 {
        struct throtl_grp *tg = blkg_to_tg(blkg);
@@ -1468,12 +1478,13 @@ static void throtl_shutdown_wq(struct request_queue *q)
 }
 
 static struct blkcg_policy blkcg_policy_throtl = {
-       .pd_size                = sizeof(struct throtl_grp),
        .cftypes                = throtl_files,
 
+       .pd_alloc_fn            = throtl_pd_alloc,
        .pd_init_fn             = throtl_pd_init,
        .pd_online_fn           = throtl_pd_online,
        .pd_exit_fn             = throtl_pd_exit,
+       .pd_free_fn             = throtl_pd_free,
        .pd_reset_stats_fn      = throtl_pd_reset_stats,
 };
 
index 9c9ec7cc9f999dbd722eea8a46e21a181106275a..69ce2883099e321c4fa92e79b378fc383fdfad5c 100644 (file)
@@ -1582,6 +1582,11 @@ static void cfq_cpd_init(const struct blkcg *blkcg)
        }
 }
 
+static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
+{
+       return kzalloc_node(sizeof(struct cfq_group), gfp, node);
+}
+
 static void cfq_pd_init(struct blkcg_gq *blkg)
 {
        struct cfq_group *cfqg = blkg_to_cfqg(blkg);
@@ -1618,6 +1623,11 @@ static void cfq_pd_offline(struct blkcg_gq *blkg)
        cfqg_stats_xfer_dead(cfqg);
 }
 
+static void cfq_pd_free(struct blkg_policy_data *pd)
+{
+       return kfree(pd);
+}
+
 /* offset delta from cfqg->stats to cfqg->dead_stats */
 static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
                                        offsetof(struct cfq_group, stats);
@@ -4633,13 +4643,14 @@ static struct elevator_type iosched_cfq = {
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 static struct blkcg_policy blkcg_policy_cfq = {
-       .pd_size                = sizeof(struct cfq_group),
        .cpd_size               = sizeof(struct cfq_group_data),
        .cftypes                = cfq_blkcg_files,
 
        .cpd_init_fn            = cfq_cpd_init,
+       .pd_alloc_fn            = cfq_pd_alloc,
        .pd_init_fn             = cfq_pd_init,
        .pd_offline_fn          = cfq_pd_offline,
+       .pd_free_fn             = cfq_pd_free,
        .pd_reset_stats_fn      = cfq_pd_reset_stats,
 };
 #endif
index db822880242a88cc873e221a980a9b3ca271eebe..bd173ea360ce92cc04baa26173d0a17ef89a2e50 100644 (file)
@@ -68,13 +68,11 @@ struct blkg_rwstat {
  * request_queue (q).  This is used by blkcg policies which need to track
  * information per blkcg - q pair.
  *
- * There can be multiple active blkcg policies and each has its private
- * data on each blkg, the size of which is determined by
- * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
- * together with blkg and invokes pd_init/exit_fn() methods.
- *
- * Such private data must embed struct blkg_policy_data (pd) at the
- * beginning and pd_size can't be smaller than pd.
+ * There can be multiple active blkcg policies and each blkg:policy pair is
+ * represented by a blkg_policy_data which is allocated and freed by each
+ * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
+ * area by allocating larger data structure which embeds blkg_policy_data
+ * at the beginning.
  */
 struct blkg_policy_data {
        /* the blkg and policy id this per-policy data belongs to */
@@ -126,16 +124,16 @@ struct blkcg_gq {
 };
 
 typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
+typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
 typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
 typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
 
 struct blkcg_policy {
        int                             plid;
-       /* policy specific private data size */
-       size_t                          pd_size;
        /* policy specific per-blkcg data size */
        size_t                          cpd_size;
        /* cgroup files for the policy */
@@ -143,10 +141,12 @@ struct blkcg_policy {
 
        /* operations */
        blkcg_pol_init_cpd_fn           *cpd_init_fn;
+       blkcg_pol_alloc_pd_fn           *pd_alloc_fn;
        blkcg_pol_init_pd_fn            *pd_init_fn;
        blkcg_pol_online_pd_fn          *pd_online_fn;
        blkcg_pol_offline_pd_fn         *pd_offline_fn;
        blkcg_pol_exit_pd_fn            *pd_exit_fn;
+       blkcg_pol_free_pd_fn            *pd_free_fn;
        blkcg_pol_reset_pd_stats_fn     *pd_reset_stats_fn;
 };