]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-4.4/core' into for-next
authorJens Axboe <axboe@fb.com>
Tue, 3 Nov 2015 15:42:28 +0000 (08:42 -0700)
committerJens Axboe <axboe@fb.com>
Tue, 3 Nov 2015 15:42:28 +0000 (08:42 -0700)
1  2 
block/blk-mq.c

diff --combined block/blk-mq.c
index 9e6922ded60a9327f19109cd724124009e2f595c,22db728dbe246265c3921b84bb8cf20fb7bce4fe..70819b7b021161a4a806ac1dd1c191544061d340
@@@ -78,13 -78,47 +78,13 @@@ static void blk_mq_hctx_clear_pending(s
        clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
  }
  
 -static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
 -{
 -      while (true) {
 -              int ret;
 -
 -              if (percpu_ref_tryget_live(&q->mq_usage_counter))
 -                      return 0;
 -
 -              if (!(gfp & __GFP_WAIT))
 -                      return -EBUSY;
 -
 -              ret = wait_event_interruptible(q->mq_freeze_wq,
 -                              !atomic_read(&q->mq_freeze_depth) ||
 -                              blk_queue_dying(q));
 -              if (blk_queue_dying(q))
 -                      return -ENODEV;
 -              if (ret)
 -                      return ret;
 -      }
 -}
 -
 -static void blk_mq_queue_exit(struct request_queue *q)
 -{
 -      percpu_ref_put(&q->mq_usage_counter);
 -}
 -
 -static void blk_mq_usage_counter_release(struct percpu_ref *ref)
 -{
 -      struct request_queue *q =
 -              container_of(ref, struct request_queue, mq_usage_counter);
 -
 -      wake_up_all(&q->mq_freeze_wq);
 -}
 -
  void blk_mq_freeze_queue_start(struct request_queue *q)
  {
        int freeze_depth;
  
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
 -              percpu_ref_kill(&q->mq_usage_counter);
 +              percpu_ref_kill(&q->q_usage_counter);
                blk_mq_run_hw_queues(q, false);
        }
  }
@@@ -92,34 -126,18 +92,34 @@@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_s
  
  static void blk_mq_freeze_queue_wait(struct request_queue *q)
  {
 -      wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
 +      wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
  }
  
  /*
   * Guarantee no request is in use, so we can change any data structure of
   * the queue afterward.
   */
 -void blk_mq_freeze_queue(struct request_queue *q)
 +void blk_freeze_queue(struct request_queue *q)
  {
 +      /*
 +       * In the !blk_mq case we are only calling this to kill the
 +       * q_usage_counter, otherwise this increases the freeze depth
 +       * and waits for it to return to zero.  For this reason there is
 +       * no blk_unfreeze_queue(), and blk_freeze_queue() is not
 +       * exported to drivers as the only user for unfreeze is blk_mq.
 +       */
        blk_mq_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
  }
 +
 +void blk_mq_freeze_queue(struct request_queue *q)
 +{
 +      /*
 +       * ...just an alias to keep freeze and unfreeze actions balanced
 +       * in the blk_mq_* namespace
 +       */
 +      blk_freeze_queue(q);
 +}
  EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
  
  void blk_mq_unfreeze_queue(struct request_queue *q)
        freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
        WARN_ON_ONCE(freeze_depth < 0);
        if (!freeze_depth) {
 -              percpu_ref_reinit(&q->mq_usage_counter);
 +              percpu_ref_reinit(&q->q_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
        }
  }
@@@ -238,7 -256,7 +238,7 @@@ struct request *blk_mq_alloc_request(st
        struct blk_mq_alloc_data alloc_data;
        int ret;
  
 -      ret = blk_mq_queue_enter(q, gfp);
 +      ret = blk_queue_enter(q, gfp);
        if (ret)
                return ERR_PTR(ret);
  
        }
        blk_mq_put_ctx(ctx);
        if (!rq) {
 -              blk_mq_queue_exit(q);
 +              blk_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
        }
        return rq;
@@@ -280,7 -298,7 +280,7 @@@ static void __blk_mq_free_request(struc
  
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
 -      blk_mq_queue_exit(q);
 +      blk_queue_exit(q);
  }
  
  void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@@ -1167,7 -1185,11 +1167,7 @@@ static struct request *blk_mq_map_reque
        int rw = bio_data_dir(bio);
        struct blk_mq_alloc_data alloc_data;
  
 -      if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
 -              bio_io_error(bio);
 -              return NULL;
 -      }
 -
 +      blk_queue_enter_live(q);
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
  
@@@ -1673,7 -1695,7 +1673,7 @@@ static int blk_mq_init_hctx(struct requ
        INIT_LIST_HEAD(&hctx->dispatch);
        hctx->queue = q;
        hctx->queue_num = hctx_idx;
-       hctx->flags = set->flags;
+       hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
  
        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                        blk_mq_hctx_notify, hctx);
@@@ -1860,27 -1882,26 +1860,26 @@@ static void blk_mq_map_swqueue(struct r
        }
  }
  
- static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
+ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
  {
        struct blk_mq_hw_ctx *hctx;
-       struct request_queue *q;
-       bool shared;
        int i;
  
-       if (set->tag_list.next == set->tag_list.prev)
-               shared = false;
-       else
-               shared = true;
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (shared)
+                       hctx->flags |= BLK_MQ_F_TAG_SHARED;
+               else
+                       hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+       }
+ }
+ static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+ {
+       struct request_queue *q;
  
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
                blk_mq_freeze_queue(q);
-               queue_for_each_hw_ctx(q, hctx, i) {
-                       if (shared)
-                               hctx->flags |= BLK_MQ_F_TAG_SHARED;
-                       else
-                               hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
-               }
+               queue_set_hctx_shared(q, shared);
                blk_mq_unfreeze_queue(q);
        }
  }
@@@ -1891,7 -1912,12 +1890,12 @@@ static void blk_mq_del_queue_tag_set(st
  
        mutex_lock(&set->tag_list_lock);
        list_del_init(&q->tag_set_list);
-       blk_mq_update_tag_set_depth(set);
+       if (list_is_singular(&set->tag_list)) {
+               /* just transitioned to unshared */
+               set->flags &= ~BLK_MQ_F_TAG_SHARED;
+               /* update existing queue */
+               blk_mq_update_tag_set_depth(set, false);
+       }
        mutex_unlock(&set->tag_list_lock);
  }
  
@@@ -1901,8 -1927,17 +1905,17 @@@ static void blk_mq_add_queue_tag_set(st
        q->tag_set = set;
  
        mutex_lock(&set->tag_list_lock);
+       /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
+       if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
+               set->flags |= BLK_MQ_F_TAG_SHARED;
+               /* update existing queue */
+               blk_mq_update_tag_set_depth(set, true);
+       }
+       if (set->flags & BLK_MQ_F_TAG_SHARED)
+               queue_set_hctx_shared(q, true);
        list_add_tail(&q->tag_set_list, &set->tag_list);
-       blk_mq_update_tag_set_depth(set);
        mutex_unlock(&set->tag_list_lock);
  }
  
@@@ -1989,6 -2024,14 +2002,6 @@@ struct request_queue *blk_mq_init_alloc
                hctxs[i]->queue_num = i;
        }
  
 -      /*
 -       * Init percpu_ref in atomic mode so that it's faster to shutdown.
 -       * See blk_register_queue() for details.
 -       */
 -      if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
 -                          PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
 -              goto err_hctxs;
 -
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
  
@@@ -2069,6 -2112,8 +2082,6 @@@ void blk_mq_free_queue(struct request_q
  
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
 -
 -      percpu_ref_exit(&q->mq_usage_counter);
  }
  
  /* Basically redo blk_mq_init_queue with queue frozen */