]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - block/blk-settings.c
nbd: use dev_err_ratelimited in io path
[karo-tx-linux.git] / block / blk-settings.c
index 331e4eee0dda0c29cc673b63c7e7341ae45e5859..8a2bc124a6840f2542355bdd01b543404a333796 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/gfp.h>
 
 #include "blk.h"
+#include "blk-wbt.h"
 
 unsigned long blk_max_low_pfn;
 EXPORT_SYMBOL(blk_max_low_pfn);
@@ -95,6 +96,7 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->max_dev_sectors = 0;
        lim->chunk_sectors = 0;
        lim->max_write_same_sectors = 0;
+       lim->max_write_zeroes_sectors = 0;
        lim->max_discard_sectors = 0;
        lim->max_hw_discard_sectors = 0;
        lim->discard_granularity = 0;
@@ -107,6 +109,7 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->io_opt = 0;
        lim->misaligned = 0;
        lim->cluster = 1;
+       lim->zoned = BLK_ZONED_NONE;
 }
 EXPORT_SYMBOL(blk_set_default_limits);
 
@@ -130,6 +133,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
        lim->max_sectors = UINT_MAX;
        lim->max_dev_sectors = UINT_MAX;
        lim->max_write_same_sectors = UINT_MAX;
+       lim->max_write_zeroes_sectors = UINT_MAX;
 }
 EXPORT_SYMBOL(blk_set_stacking_limits);
 
@@ -297,6 +301,19 @@ void blk_queue_max_write_same_sectors(struct request_queue *q,
 }
 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
 
+/**
+ * blk_queue_max_write_zeroes_sectors - set max sectors for a single
+ *                                      write zeroes
+ * @q:  the request queue for the device
+ * @max_write_zeroes_sectors: maximum number of sectors to write per command
+ **/
+void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
+               unsigned int max_write_zeroes_sectors)
+{
+       q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
+
 /**
  * blk_queue_max_segments - set max hw segments for a request for this queue
  * @q:  the request queue for the device
@@ -525,6 +542,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
        t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
        t->max_write_same_sectors = min(t->max_write_same_sectors,
                                        b->max_write_same_sectors);
+       t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
+                                       b->max_write_zeroes_sectors);
        t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 
        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
@@ -630,6 +649,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                        t->discard_granularity;
        }
 
+       if (b->chunk_sectors)
+               t->chunk_sectors = min_not_zero(t->chunk_sectors,
+                                               b->chunk_sectors);
+
        return ret;
 }
 EXPORT_SYMBOL(blk_stack_limits);
@@ -820,31 +843,54 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
 }
 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 
+void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
+{
+       spin_lock_irq(q->queue_lock);
+       if (queueable)
+               clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+       else
+               set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+       spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+
 /**
- * blk_queue_flush - configure queue's cache flush capability
+ * blk_set_queue_depth - tell the block layer about the device queue depth
  * @q:         the request queue for the device
- * @flush:     0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
+ * @depth:             queue depth
  *
- * Tell block layer cache flush capability of @q.  If it supports
- * flushing, REQ_FLUSH should be set.  If it supports bypassing
- * write cache for individual writes, REQ_FUA should be set.
  */
-void blk_queue_flush(struct request_queue *q, unsigned int flush)
+void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
 {
-       WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
-
-       if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
-               flush &= ~REQ_FUA;
-
-       q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
+       q->queue_depth = depth;
+       wbt_set_queue_depth(q->rq_wb, depth);
 }
-EXPORT_SYMBOL_GPL(blk_queue_flush);
+EXPORT_SYMBOL(blk_set_queue_depth);
 
-void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
-{
-       q->flush_not_queueable = !queueable;
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q:         the request queue for the device
+ * @wc:                write back cache on or off
+ * @fua:       device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+       spin_lock_irq(q->queue_lock);
+       if (wc)
+               queue_flag_set(QUEUE_FLAG_WC, q);
+       else
+               queue_flag_clear(QUEUE_FLAG_WC, q);
+       if (fua)
+               queue_flag_set(QUEUE_FLAG_FUA, q);
+       else
+               queue_flag_clear(QUEUE_FLAG_FUA, q);
+       spin_unlock_irq(q->queue_lock);
+
+       wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
 
 static int __init blk_settings_init(void)
 {