]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - block/blk-mq-sched.h
blk-throttle: ignore idle cgroup limit
[karo-tx-linux.git] / block / blk-mq-sched.h
index 7b5f3b95c78e93c41d7e680090fa67b638ea5be2..a75b16b123f7aadac672651a7eef5c79f5553e16 100644 (file)
@@ -122,17 +122,27 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
        return false;
 }
 
-static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx)
+/*
+ * Mark a hardware queue as needing a restart.
+ */
+static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
 {
-       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
+       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
                set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-               if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
-                       struct request_queue *q = hctx->queue;
+}
+
+/*
+ * Mark a hardware queue and the request queue it belongs to as needing a
+ * restart.
+ */
+static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
+{
+       struct request_queue *q = hctx->queue;
 
-                       if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
-                               set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
-               }
-       }
+       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+       if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
+               set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
 }
 
 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)