2 * Block stat tracking code
4 * Copyright (C) 2016 Jens Axboe
6 #include <linux/kernel.h>
7 #include <linux/blk-mq.h>
12 static void blk_stat_flush_batch(struct blk_rq_stat *stat)
14 const s32 nr_batch = READ_ONCE(stat->nr_batch);
15 const s32 nr_samples = READ_ONCE(stat->nr_samples);
20 stat->mean = div64_s64(stat->batch, nr_batch);
22 stat->mean = div64_s64((stat->mean * nr_samples) +
24 nr_batch + nr_samples);
27 stat->nr_samples += nr_batch;
28 stat->nr_batch = stat->batch = 0;
31 static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
33 blk_stat_flush_batch(src);
38 dst->min = min(dst->min, src->min);
39 dst->max = max(dst->max, src->max);
42 dst->mean = src->mean;
44 dst->mean = div64_s64((src->mean * src->nr_samples) +
45 (dst->mean * dst->nr_samples),
46 dst->nr_samples + src->nr_samples);
48 dst->nr_samples += src->nr_samples;
51 static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
53 struct blk_mq_hw_ctx *hctx;
54 struct blk_mq_ctx *ctx;
58 blk_stat_init(&dst[READ]);
59 blk_stat_init(&dst[WRITE]);
65 queue_for_each_hw_ctx(q, hctx, i) {
66 hctx_for_each_ctx(hctx, ctx, j) {
67 blk_stat_flush_batch(&ctx->stat[READ]);
68 blk_stat_flush_batch(&ctx->stat[WRITE]);
70 if (!ctx->stat[READ].nr_samples &&
71 !ctx->stat[WRITE].nr_samples)
73 if (ctx->stat[READ].time > newest)
74 newest = ctx->stat[READ].time;
75 if (ctx->stat[WRITE].time > newest)
76 newest = ctx->stat[WRITE].time;
89 queue_for_each_hw_ctx(q, hctx, i) {
90 hctx_for_each_ctx(hctx, ctx, j) {
91 if (ctx->stat[READ].time == newest) {
92 blk_stat_sum(&dst[READ],
96 if (ctx->stat[WRITE].time == newest) {
97 blk_stat_sum(&dst[WRITE],
104 * If we race on finding an entry, just loop back again.
105 * Should be very rare.
109 dst[READ].time = dst[WRITE].time = latest;
112 void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
115 blk_mq_stat_get(q, dst);
117 blk_stat_flush_batch(&q->rq_stats[READ]);
118 blk_stat_flush_batch(&q->rq_stats[WRITE]);
119 memcpy(&dst[READ], &q->rq_stats[READ],
120 sizeof(struct blk_rq_stat));
121 memcpy(&dst[WRITE], &q->rq_stats[WRITE],
122 sizeof(struct blk_rq_stat));
126 void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
128 struct blk_mq_ctx *ctx;
135 hctx_for_each_ctx(hctx, ctx, i) {
136 blk_stat_flush_batch(&ctx->stat[READ]);
137 blk_stat_flush_batch(&ctx->stat[WRITE]);
139 if (!ctx->stat[READ].nr_samples &&
140 !ctx->stat[WRITE].nr_samples)
143 if (ctx->stat[READ].time > newest)
144 newest = ctx->stat[READ].time;
145 if (ctx->stat[WRITE].time > newest)
146 newest = ctx->stat[WRITE].time;
152 hctx_for_each_ctx(hctx, ctx, i) {
153 if (ctx->stat[READ].time == newest) {
154 blk_stat_sum(&dst[READ], &ctx->stat[READ]);
157 if (ctx->stat[WRITE].time == newest) {
158 blk_stat_sum(&dst[WRITE], &ctx->stat[WRITE]);
163 * If we race on finding an entry, just loop back again.
164 * Should be very rare, as the window is only updated
170 static void __blk_stat_init(struct blk_rq_stat *stat, s64 time_now)
173 stat->max = stat->nr_samples = stat->mean = 0;
174 stat->batch = stat->nr_batch = 0;
175 stat->time = time_now & BLK_STAT_NSEC_MASK;
178 void blk_stat_init(struct blk_rq_stat *stat)
180 __blk_stat_init(stat, ktime_to_ns(ktime_get()));
183 static bool __blk_stat_is_current(struct blk_rq_stat *stat, s64 now)
185 return (now & BLK_STAT_NSEC_MASK) == (stat->time & BLK_STAT_NSEC_MASK);
188 bool blk_stat_is_current(struct blk_rq_stat *stat)
190 return __blk_stat_is_current(stat, ktime_to_ns(ktime_get()));
193 void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
197 now = __blk_stat_time(ktime_to_ns(ktime_get()));
198 if (now < blk_stat_time(&rq->issue_stat))
201 if (!__blk_stat_is_current(stat, now))
202 __blk_stat_init(stat, now);
204 value = now - blk_stat_time(&rq->issue_stat);
205 if (value > stat->max)
207 if (value < stat->min)
210 if (stat->batch + value < stat->batch ||
211 stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
212 blk_stat_flush_batch(stat);
214 stat->batch += value;
218 void blk_stat_clear(struct request_queue *q)
221 struct blk_mq_hw_ctx *hctx;
222 struct blk_mq_ctx *ctx;
225 queue_for_each_hw_ctx(q, hctx, i) {
226 hctx_for_each_ctx(hctx, ctx, j) {
227 blk_stat_init(&ctx->stat[READ]);
228 blk_stat_init(&ctx->stat[WRITE]);
232 blk_stat_init(&q->rq_stats[READ]);
233 blk_stat_init(&q->rq_stats[WRITE]);
237 void blk_stat_set_issue_time(struct blk_issue_stat *stat)
239 stat->time = (stat->time & BLK_STAT_MASK) |
240 (ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK);
244 * Enable stat tracking, return whether it was enabled
246 bool blk_stat_enable(struct request_queue *q)
248 if (!test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
249 set_bit(QUEUE_FLAG_STATS, &q->queue_flags);