#include <linux/gfp.h>
#include "blk.h"
+#include "blk-wbt.h"
unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn);
lim->max_dev_sectors = 0;
lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0;
+ lim->max_write_zeroes_sectors = 0;
lim->max_discard_sectors = 0;
lim->max_hw_discard_sectors = 0;
lim->discard_granularity = 0;
lim->io_opt = 0;
lim->misaligned = 0;
lim->cluster = 1;
+ lim->zoned = BLK_ZONED_NONE;
}
EXPORT_SYMBOL(blk_set_default_limits);
lim->max_sectors = UINT_MAX;
lim->max_dev_sectors = UINT_MAX;
lim->max_write_same_sectors = UINT_MAX;
+ lim->max_write_zeroes_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
}
EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
+/**
+ * blk_queue_max_write_zeroes_sectors - set max sectors for a single
+ * write zeroes
+ * @q: the request queue for the device
+ * @max_write_zeroes_sectors: maximum number of sectors to write per command
+ **/
+void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
+ unsigned int max_write_zeroes_sectors)
+{
+ q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
+
/**
* blk_queue_max_segments - set max hw segments for a request for this queue
* @q: the request queue for the device
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_same_sectors = min(t->max_write_same_sectors,
b->max_write_same_sectors);
+ t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
+ b->max_write_zeroes_sectors);
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
t->discard_granularity;
}
+ if (b->chunk_sectors)
+ t->chunk_sectors = min_not_zero(t->chunk_sectors,
+ b->chunk_sectors);
+
return ret;
}
EXPORT_SYMBOL(blk_stack_limits);
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
+void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
+{
+ spin_lock_irq(q->queue_lock);
+ if (queueable)
+ clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ else
+ set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+
/**
- * blk_queue_flush - configure queue's cache flush capability
+ * blk_set_queue_depth - tell the block layer about the device queue depth
* @q: the request queue for the device
- * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
+ * @depth: queue depth
*
- * Tell block layer cache flush capability of @q. If it supports
- * flushing, REQ_FLUSH should be set. If it supports bypassing
- * write cache for individual writes, REQ_FUA should be set.
*/
-void blk_queue_flush(struct request_queue *q, unsigned int flush)
+void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
- WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
-
- if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
- flush &= ~REQ_FUA;
-
- q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
+ q->queue_depth = depth;
+ wbt_set_queue_depth(q->rq_wb, depth);
}
-EXPORT_SYMBOL_GPL(blk_queue_flush);
+EXPORT_SYMBOL(blk_set_queue_depth);
-void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
-{
- q->flush_not_queueable = !queueable;
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q: the request queue for the device
+ * @wc: write back cache on or off
+ * @fua: device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+ spin_lock_irq(q->queue_lock);
+ if (wc)
+ queue_flag_set(QUEUE_FLAG_WC, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_WC, q);
+ if (fua)
+ queue_flag_set(QUEUE_FLAG_FUA, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_FUA, q);
+ spin_unlock_irq(q->queue_lock);
+
+ wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
static int __init blk_settings_init(void)
{