struct list_head queuelist;
union {
struct call_single_data csd;
- unsigned long fifo_time;
+ u64 fifo_time;
};
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
- u64 cmd_flags;
+ int cpu;
unsigned cmd_type;
+ u64 cmd_flags;
unsigned long atomic_flags;
- int cpu;
-
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
sector_t __sector; /* sector cursor */
#define QUEUE_FLAG_WC 23 /* Write back caching */
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
+#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_secure_erase(q) \
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
+#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
(offset & (q->limits.chunk_sectors - 1));
}
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
+static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
+ sector_t offset)
{
struct request_queue *q = rq->q;
if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
return blk_queue_get_max_sectors(q, req_op(rq));
- return min(blk_max_size_offset(q, blk_rq_pos(rq)),
+ return min(blk_max_size_offset(q, offset),
blk_queue_get_max_sectors(q, req_op(rq)));
}