2 * Block stat tracking code
4 * Copyright (C) 2016 Jens Axboe
6 #include <linux/kernel.h>
7 #include <linux/blk-mq.h>
12 static void blk_stat_flush_batch(struct blk_rq_stat *stat)
14 const s32 nr_batch = READ_ONCE(stat->nr_batch);
15 const s32 nr_samples = READ_ONCE(stat->nr_batch);
20 stat->mean = div64_s64(stat->batch, nr_batch);
22 stat->mean = div64_s64((stat->mean * nr_samples) +
24 nr_batch + nr_samples);
27 stat->nr_samples += nr_batch;
28 stat->nr_batch = stat->batch = 0;
31 static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
36 blk_stat_flush_batch(src);
38 dst->min = min(dst->min, src->min);
39 dst->max = max(dst->max, src->max);
42 dst->mean = src->mean;
44 dst->mean = div64_s64((src->mean * src->nr_samples) +
45 (dst->mean * dst->nr_samples),
46 dst->nr_samples + src->nr_samples);
48 dst->nr_samples += src->nr_samples;
51 static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
53 struct blk_mq_hw_ctx *hctx;
54 struct blk_mq_ctx *ctx;
58 blk_stat_init(&dst[BLK_STAT_READ]);
59 blk_stat_init(&dst[BLK_STAT_WRITE]);
65 queue_for_each_hw_ctx(q, hctx, i) {
66 hctx_for_each_ctx(hctx, ctx, j) {
67 if (!ctx->stat[BLK_STAT_READ].nr_samples &&
68 !ctx->stat[BLK_STAT_WRITE].nr_samples)
70 if (ctx->stat[BLK_STAT_READ].time > newest)
71 newest = ctx->stat[BLK_STAT_READ].time;
72 if (ctx->stat[BLK_STAT_WRITE].time > newest)
73 newest = ctx->stat[BLK_STAT_WRITE].time;
86 queue_for_each_hw_ctx(q, hctx, i) {
87 hctx_for_each_ctx(hctx, ctx, j) {
88 if (ctx->stat[BLK_STAT_READ].time == newest) {
89 blk_stat_sum(&dst[BLK_STAT_READ],
90 &ctx->stat[BLK_STAT_READ]);
93 if (ctx->stat[BLK_STAT_WRITE].time == newest) {
94 blk_stat_sum(&dst[BLK_STAT_WRITE],
95 &ctx->stat[BLK_STAT_WRITE]);
101 * If we race on finding an entry, just loop back again.
102 * Should be very rare.
106 dst[BLK_STAT_READ].time = dst[BLK_STAT_WRITE].time = latest;
109 void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
112 blk_mq_stat_get(q, dst);
114 memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ],
115 sizeof(struct blk_rq_stat));
116 memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE],
117 sizeof(struct blk_rq_stat));
121 void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
123 struct blk_mq_ctx *ctx;
130 hctx_for_each_ctx(hctx, ctx, i) {
131 if (!ctx->stat[BLK_STAT_READ].nr_samples &&
132 !ctx->stat[BLK_STAT_WRITE].nr_samples)
135 if (ctx->stat[BLK_STAT_READ].time > newest)
136 newest = ctx->stat[BLK_STAT_READ].time;
137 if (ctx->stat[BLK_STAT_WRITE].time > newest)
138 newest = ctx->stat[BLK_STAT_WRITE].time;
144 hctx_for_each_ctx(hctx, ctx, i) {
145 if (ctx->stat[BLK_STAT_READ].time == newest) {
146 blk_stat_sum(&dst[BLK_STAT_READ],
147 &ctx->stat[BLK_STAT_READ]);
150 if (ctx->stat[BLK_STAT_WRITE].time == newest) {
151 blk_stat_sum(&dst[BLK_STAT_WRITE],
152 &ctx->stat[BLK_STAT_WRITE]);
157 * If we race on finding an entry, just loop back again.
158 * Should be very rare, as the window is only updated
164 static void __blk_stat_init(struct blk_rq_stat *stat, s64 time_now)
167 stat->max = stat->nr_samples = stat->mean = 0;
168 stat->batch = stat->nr_batch = 0;
169 stat->time = time_now & BLK_STAT_NSEC_MASK;
172 void blk_stat_init(struct blk_rq_stat *stat)
174 __blk_stat_init(stat, ktime_to_ns(ktime_get()));
177 static bool __blk_stat_is_current(struct blk_rq_stat *stat, s64 now)
179 return (now & BLK_STAT_NSEC_MASK) == (stat->time & BLK_STAT_NSEC_MASK);
182 bool blk_stat_is_current(struct blk_rq_stat *stat)
184 return __blk_stat_is_current(stat, ktime_to_ns(ktime_get()));
187 void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
191 now = __blk_stat_time(ktime_to_ns(ktime_get()));
192 if (now < blk_stat_time(&rq->issue_stat))
195 if (!__blk_stat_is_current(stat, now))
196 __blk_stat_init(stat, now);
198 value = now - blk_stat_time(&rq->issue_stat);
199 if (value > stat->max)
201 if (value < stat->min)
204 if (stat->batch + value < stat->batch ||
205 stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
206 blk_stat_flush_batch(stat);
208 stat->batch += value;
212 void blk_stat_clear(struct request_queue *q)
215 struct blk_mq_hw_ctx *hctx;
216 struct blk_mq_ctx *ctx;
219 queue_for_each_hw_ctx(q, hctx, i) {
220 hctx_for_each_ctx(hctx, ctx, j) {
221 blk_stat_init(&ctx->stat[BLK_STAT_READ]);
222 blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
226 blk_stat_init(&q->rq_stats[BLK_STAT_READ]);
227 blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]);
231 void blk_stat_set_issue_time(struct blk_issue_stat *stat)
233 stat->time = (stat->time & BLK_STAT_MASK) |
234 (ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK);
238 * Enable stat tracking, return whether it was enabled
240 bool blk_stat_enable(struct request_queue *q)
242 if (!test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
243 set_bit(QUEUE_FLAG_STATS, &q->queue_flags);