]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - block/blk-mq-sysfs.c
blk-mq: introduce blk_mq_delay_kick_requeue_list()
[karo-tx-linux.git] / block / blk-mq-sysfs.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15
16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19
20 struct blk_mq_ctx_sysfs_entry {
21         struct attribute attr;
22         ssize_t (*show)(struct blk_mq_ctx *, char *);
23         ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24 };
25
26 struct blk_mq_hw_ctx_sysfs_entry {
27         struct attribute attr;
28         ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29         ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30 };
31
32 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33                                  char *page)
34 {
35         struct blk_mq_ctx_sysfs_entry *entry;
36         struct blk_mq_ctx *ctx;
37         struct request_queue *q;
38         ssize_t res;
39
40         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42         q = ctx->queue;
43
44         if (!entry->show)
45                 return -EIO;
46
47         res = -ENOENT;
48         mutex_lock(&q->sysfs_lock);
49         if (!blk_queue_dying(q))
50                 res = entry->show(ctx, page);
51         mutex_unlock(&q->sysfs_lock);
52         return res;
53 }
54
55 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56                                   const char *page, size_t length)
57 {
58         struct blk_mq_ctx_sysfs_entry *entry;
59         struct blk_mq_ctx *ctx;
60         struct request_queue *q;
61         ssize_t res;
62
63         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65         q = ctx->queue;
66
67         if (!entry->store)
68                 return -EIO;
69
70         res = -ENOENT;
71         mutex_lock(&q->sysfs_lock);
72         if (!blk_queue_dying(q))
73                 res = entry->store(ctx, page, length);
74         mutex_unlock(&q->sysfs_lock);
75         return res;
76 }
77
78 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79                                     struct attribute *attr, char *page)
80 {
81         struct blk_mq_hw_ctx_sysfs_entry *entry;
82         struct blk_mq_hw_ctx *hctx;
83         struct request_queue *q;
84         ssize_t res;
85
86         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88         q = hctx->queue;
89
90         if (!entry->show)
91                 return -EIO;
92
93         res = -ENOENT;
94         mutex_lock(&q->sysfs_lock);
95         if (!blk_queue_dying(q))
96                 res = entry->show(hctx, page);
97         mutex_unlock(&q->sysfs_lock);
98         return res;
99 }
100
101 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102                                      struct attribute *attr, const char *page,
103                                      size_t length)
104 {
105         struct blk_mq_hw_ctx_sysfs_entry *entry;
106         struct blk_mq_hw_ctx *hctx;
107         struct request_queue *q;
108         ssize_t res;
109
110         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112         q = hctx->queue;
113
114         if (!entry->store)
115                 return -EIO;
116
117         res = -ENOENT;
118         mutex_lock(&q->sysfs_lock);
119         if (!blk_queue_dying(q))
120                 res = entry->store(hctx, page, length);
121         mutex_unlock(&q->sysfs_lock);
122         return res;
123 }
124
125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126 {
127         return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128                                 ctx->rq_dispatched[0]);
129 }
130
131 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132 {
133         return sprintf(page, "%lu\n", ctx->rq_merged);
134 }
135
136 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137 {
138         return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139                                 ctx->rq_completed[0]);
140 }
141
142 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
143 {
144         struct request *rq;
145         int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
146
147         list_for_each_entry(rq, list, queuelist) {
148                 const int rq_len = 2 * sizeof(rq) + 2;
149
150                 /* if the output will be truncated */
151                 if (PAGE_SIZE - 1 < len + rq_len) {
152                         /* backspacing if it can't hold '\t...\n' */
153                         if (PAGE_SIZE - 1 < len + 5)
154                                 len -= rq_len;
155                         len += snprintf(page + len, PAGE_SIZE - 1 - len,
156                                         "\t...\n");
157                         break;
158                 }
159                 len += snprintf(page + len, PAGE_SIZE - 1 - len,
160                                 "\t%p\n", rq);
161         }
162
163         return len;
164 }
165
166 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
167 {
168         ssize_t ret;
169
170         spin_lock(&ctx->lock);
171         ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
172         spin_unlock(&ctx->lock);
173
174         return ret;
175 }
176
177 static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
178 {
179         return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
180                        hctx->poll_considered, hctx->poll_invoked,
181                        hctx->poll_success);
182 }
183
184 static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
185                                           const char *page, size_t size)
186 {
187         hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
188
189         return size;
190 }
191
192 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
193                                            char *page)
194 {
195         return sprintf(page, "%lu\n", hctx->queued);
196 }
197
198 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
199 {
200         return sprintf(page, "%lu\n", hctx->run);
201 }
202
203 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
204                                                char *page)
205 {
206         char *start_page = page;
207         int i;
208
209         page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
210
211         for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
212                 unsigned long d = 1U << (i - 1);
213
214                 page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
215         }
216
217         return page - start_page;
218 }
219
220 static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
221                                             char *page)
222 {
223         ssize_t ret;
224
225         spin_lock(&hctx->lock);
226         ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
227         spin_unlock(&hctx->lock);
228
229         return ret;
230 }
231
232 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
233 {
234         return blk_mq_tag_sysfs_show(hctx->tags, page);
235 }
236
237 static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
238 {
239         return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
240 }
241
242 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
243 {
244         unsigned int i, first = 1;
245         ssize_t ret = 0;
246
247         for_each_cpu(i, hctx->cpumask) {
248                 if (first)
249                         ret += sprintf(ret + page, "%u", i);
250                 else
251                         ret += sprintf(ret + page, ", %u", i);
252
253                 first = 0;
254         }
255
256         ret += sprintf(ret + page, "\n");
257         return ret;
258 }
259
260 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
261         .attr = {.name = "dispatched", .mode = S_IRUGO },
262         .show = blk_mq_sysfs_dispatched_show,
263 };
264 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
265         .attr = {.name = "merged", .mode = S_IRUGO },
266         .show = blk_mq_sysfs_merged_show,
267 };
268 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
269         .attr = {.name = "completed", .mode = S_IRUGO },
270         .show = blk_mq_sysfs_completed_show,
271 };
272 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
273         .attr = {.name = "rq_list", .mode = S_IRUGO },
274         .show = blk_mq_sysfs_rq_list_show,
275 };
276
277 static struct attribute *default_ctx_attrs[] = {
278         &blk_mq_sysfs_dispatched.attr,
279         &blk_mq_sysfs_merged.attr,
280         &blk_mq_sysfs_completed.attr,
281         &blk_mq_sysfs_rq_list.attr,
282         NULL,
283 };
284
285 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
286         .attr = {.name = "queued", .mode = S_IRUGO },
287         .show = blk_mq_hw_sysfs_queued_show,
288 };
289 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
290         .attr = {.name = "run", .mode = S_IRUGO },
291         .show = blk_mq_hw_sysfs_run_show,
292 };
293 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
294         .attr = {.name = "dispatched", .mode = S_IRUGO },
295         .show = blk_mq_hw_sysfs_dispatched_show,
296 };
297 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
298         .attr = {.name = "active", .mode = S_IRUGO },
299         .show = blk_mq_hw_sysfs_active_show,
300 };
301 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
302         .attr = {.name = "pending", .mode = S_IRUGO },
303         .show = blk_mq_hw_sysfs_rq_list_show,
304 };
305 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
306         .attr = {.name = "tags", .mode = S_IRUGO },
307         .show = blk_mq_hw_sysfs_tags_show,
308 };
309 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
310         .attr = {.name = "cpu_list", .mode = S_IRUGO },
311         .show = blk_mq_hw_sysfs_cpus_show,
312 };
313 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
314         .attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
315         .show = blk_mq_hw_sysfs_poll_show,
316         .store = blk_mq_hw_sysfs_poll_store,
317 };
318
319 static struct attribute *default_hw_ctx_attrs[] = {
320         &blk_mq_hw_sysfs_queued.attr,
321         &blk_mq_hw_sysfs_run.attr,
322         &blk_mq_hw_sysfs_dispatched.attr,
323         &blk_mq_hw_sysfs_pending.attr,
324         &blk_mq_hw_sysfs_tags.attr,
325         &blk_mq_hw_sysfs_cpus.attr,
326         &blk_mq_hw_sysfs_active.attr,
327         &blk_mq_hw_sysfs_poll.attr,
328         NULL,
329 };
330
331 static const struct sysfs_ops blk_mq_sysfs_ops = {
332         .show   = blk_mq_sysfs_show,
333         .store  = blk_mq_sysfs_store,
334 };
335
336 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
337         .show   = blk_mq_hw_sysfs_show,
338         .store  = blk_mq_hw_sysfs_store,
339 };
340
341 static struct kobj_type blk_mq_ktype = {
342         .sysfs_ops      = &blk_mq_sysfs_ops,
343         .release        = blk_mq_sysfs_release,
344 };
345
346 static struct kobj_type blk_mq_ctx_ktype = {
347         .sysfs_ops      = &blk_mq_sysfs_ops,
348         .default_attrs  = default_ctx_attrs,
349         .release        = blk_mq_sysfs_release,
350 };
351
352 static struct kobj_type blk_mq_hw_ktype = {
353         .sysfs_ops      = &blk_mq_hw_sysfs_ops,
354         .default_attrs  = default_hw_ctx_attrs,
355         .release        = blk_mq_sysfs_release,
356 };
357
358 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
359 {
360         struct blk_mq_ctx *ctx;
361         int i;
362
363         if (!hctx->nr_ctx)
364                 return;
365
366         hctx_for_each_ctx(hctx, ctx, i)
367                 kobject_del(&ctx->kobj);
368
369         kobject_del(&hctx->kobj);
370 }
371
372 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
373 {
374         struct request_queue *q = hctx->queue;
375         struct blk_mq_ctx *ctx;
376         int i, ret;
377
378         if (!hctx->nr_ctx)
379                 return 0;
380
381         ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
382         if (ret)
383                 return ret;
384
385         hctx_for_each_ctx(hctx, ctx, i) {
386                 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
387                 if (ret)
388                         break;
389         }
390
391         return ret;
392 }
393
394 static void __blk_mq_unregister_disk(struct gendisk *disk)
395 {
396         struct request_queue *q = disk->queue;
397         struct blk_mq_hw_ctx *hctx;
398         struct blk_mq_ctx *ctx;
399         int i, j;
400
401         queue_for_each_hw_ctx(q, hctx, i) {
402                 blk_mq_unregister_hctx(hctx);
403
404                 hctx_for_each_ctx(hctx, ctx, j)
405                         kobject_put(&ctx->kobj);
406
407                 kobject_put(&hctx->kobj);
408         }
409
410         kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
411         kobject_del(&q->mq_kobj);
412         kobject_put(&q->mq_kobj);
413
414         kobject_put(&disk_to_dev(disk)->kobj);
415
416         q->mq_sysfs_init_done = false;
417 }
418
419 void blk_mq_unregister_disk(struct gendisk *disk)
420 {
421         blk_mq_disable_hotplug();
422         __blk_mq_unregister_disk(disk);
423         blk_mq_enable_hotplug();
424 }
425
426 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
427 {
428         kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
429 }
430
431 static void blk_mq_sysfs_init(struct request_queue *q)
432 {
433         struct blk_mq_ctx *ctx;
434         int cpu;
435
436         kobject_init(&q->mq_kobj, &blk_mq_ktype);
437
438         for_each_possible_cpu(cpu) {
439                 ctx = per_cpu_ptr(q->queue_ctx, cpu);
440                 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
441         }
442 }
443
444 int blk_mq_register_disk(struct gendisk *disk)
445 {
446         struct device *dev = disk_to_dev(disk);
447         struct request_queue *q = disk->queue;
448         struct blk_mq_hw_ctx *hctx;
449         int ret, i;
450
451         blk_mq_disable_hotplug();
452
453         blk_mq_sysfs_init(q);
454
455         ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
456         if (ret < 0)
457                 goto out;
458
459         kobject_uevent(&q->mq_kobj, KOBJ_ADD);
460
461         queue_for_each_hw_ctx(q, hctx, i) {
462                 ret = blk_mq_register_hctx(hctx);
463                 if (ret)
464                         break;
465         }
466
467         if (ret)
468                 __blk_mq_unregister_disk(disk);
469         else
470                 q->mq_sysfs_init_done = true;
471 out:
472         blk_mq_enable_hotplug();
473
474         return ret;
475 }
476 EXPORT_SYMBOL_GPL(blk_mq_register_disk);
477
478 void blk_mq_sysfs_unregister(struct request_queue *q)
479 {
480         struct blk_mq_hw_ctx *hctx;
481         int i;
482
483         if (!q->mq_sysfs_init_done)
484                 return;
485
486         queue_for_each_hw_ctx(q, hctx, i)
487                 blk_mq_unregister_hctx(hctx);
488 }
489
490 int blk_mq_sysfs_register(struct request_queue *q)
491 {
492         struct blk_mq_hw_ctx *hctx;
493         int i, ret = 0;
494
495         if (!q->mq_sysfs_init_done)
496                 return ret;
497
498         queue_for_each_hw_ctx(q, hctx, i) {
499                 ret = blk_mq_register_hctx(hctx);
500                 if (ret)
501                         break;
502         }
503
504         return ret;
505 }