1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
5 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
12 #include <linux/blk-mq.h>
14 #include "blk-mq-tag.h"
16 static void blk_mq_sysfs_release(struct kobject *kobj)
20 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
22 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
28 struct blk_mq_ctx_sysfs_entry {
29 struct attribute attr;
30 ssize_t (*show)(struct blk_mq_ctx *, char *);
31 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
34 struct blk_mq_hw_ctx_sysfs_entry {
35 struct attribute attr;
36 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
37 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
40 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
43 struct blk_mq_ctx_sysfs_entry *entry;
44 struct blk_mq_ctx *ctx;
45 struct request_queue *q;
48 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
49 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
56 mutex_lock(&q->sysfs_lock);
57 if (!blk_queue_dying(q))
58 res = entry->show(ctx, page);
59 mutex_unlock(&q->sysfs_lock);
63 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
64 const char *page, size_t length)
66 struct blk_mq_ctx_sysfs_entry *entry;
67 struct blk_mq_ctx *ctx;
68 struct request_queue *q;
71 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
72 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
79 mutex_lock(&q->sysfs_lock);
80 if (!blk_queue_dying(q))
81 res = entry->store(ctx, page, length);
82 mutex_unlock(&q->sysfs_lock);
86 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
87 struct attribute *attr, char *page)
89 struct blk_mq_hw_ctx_sysfs_entry *entry;
90 struct blk_mq_hw_ctx *hctx;
91 struct request_queue *q;
94 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
95 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
102 mutex_lock(&q->sysfs_lock);
103 if (!blk_queue_dying(q))
104 res = entry->show(hctx, page);
105 mutex_unlock(&q->sysfs_lock);
109 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
110 struct attribute *attr, const char *page,
113 struct blk_mq_hw_ctx_sysfs_entry *entry;
114 struct blk_mq_hw_ctx *hctx;
115 struct request_queue *q;
118 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
119 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
126 mutex_lock(&q->sysfs_lock);
127 if (!blk_queue_dying(q))
128 res = entry->store(hctx, page, length);
129 mutex_unlock(&q->sysfs_lock);
133 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
136 return sprintf(page, "%u\n", hctx->tags->nr_tags);
139 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
142 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
145 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
147 unsigned int i, first = 1;
150 for_each_cpu(i, hctx->cpumask) {
152 ret += sprintf(ret + page, "%u", i);
154 ret += sprintf(ret + page, ", %u", i);
159 ret += sprintf(ret + page, "\n");
163 static struct attribute *default_ctx_attrs[] = {
167 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
168 .attr = {.name = "nr_tags", .mode = S_IRUGO },
169 .show = blk_mq_hw_sysfs_nr_tags_show,
171 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
172 .attr = {.name = "nr_reserved_tags", .mode = S_IRUGO },
173 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
175 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
176 .attr = {.name = "cpu_list", .mode = S_IRUGO },
177 .show = blk_mq_hw_sysfs_cpus_show,
180 static struct attribute *default_hw_ctx_attrs[] = {
181 &blk_mq_hw_sysfs_nr_tags.attr,
182 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
183 &blk_mq_hw_sysfs_cpus.attr,
187 static const struct sysfs_ops blk_mq_sysfs_ops = {
188 .show = blk_mq_sysfs_show,
189 .store = blk_mq_sysfs_store,
192 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
193 .show = blk_mq_hw_sysfs_show,
194 .store = blk_mq_hw_sysfs_store,
197 static struct kobj_type blk_mq_ktype = {
198 .sysfs_ops = &blk_mq_sysfs_ops,
199 .release = blk_mq_sysfs_release,
202 static struct kobj_type blk_mq_ctx_ktype = {
203 .sysfs_ops = &blk_mq_sysfs_ops,
204 .default_attrs = default_ctx_attrs,
205 .release = blk_mq_sysfs_release,
208 static struct kobj_type blk_mq_hw_ktype = {
209 .sysfs_ops = &blk_mq_hw_sysfs_ops,
210 .default_attrs = default_hw_ctx_attrs,
211 .release = blk_mq_hw_sysfs_release,
214 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
216 struct blk_mq_ctx *ctx;
222 hctx_for_each_ctx(hctx, ctx, i)
223 kobject_del(&ctx->kobj);
225 kobject_del(&hctx->kobj);
228 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
230 struct request_queue *q = hctx->queue;
231 struct blk_mq_ctx *ctx;
237 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
241 hctx_for_each_ctx(hctx, ctx, i) {
242 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
250 static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
252 struct blk_mq_hw_ctx *hctx;
255 queue_for_each_hw_ctx(q, hctx, i)
256 blk_mq_unregister_hctx(hctx);
258 blk_mq_debugfs_unregister_hctxs(q);
260 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
261 kobject_del(&q->mq_kobj);
262 kobject_put(&dev->kobj);
264 q->mq_sysfs_init_done = false;
267 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
269 blk_mq_disable_hotplug();
270 __blk_mq_unregister_dev(dev, q);
271 blk_mq_enable_hotplug();
274 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
276 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
279 void blk_mq_sysfs_deinit(struct request_queue *q)
281 struct blk_mq_ctx *ctx;
284 for_each_possible_cpu(cpu) {
285 ctx = per_cpu_ptr(q->queue_ctx, cpu);
286 kobject_put(&ctx->kobj);
288 kobject_put(&q->mq_kobj);
291 void blk_mq_sysfs_init(struct request_queue *q)
293 struct blk_mq_ctx *ctx;
296 kobject_init(&q->mq_kobj, &blk_mq_ktype);
298 for_each_possible_cpu(cpu) {
299 ctx = per_cpu_ptr(q->queue_ctx, cpu);
300 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
304 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
306 struct blk_mq_hw_ctx *hctx;
309 blk_mq_disable_hotplug();
311 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
315 kobject_uevent(&q->mq_kobj, KOBJ_ADD);
317 blk_mq_debugfs_register(q, kobject_name(&dev->kobj));
319 queue_for_each_hw_ctx(q, hctx, i) {
320 ret = blk_mq_register_hctx(hctx);
326 __blk_mq_unregister_dev(dev, q);
328 q->mq_sysfs_init_done = true;
330 blk_mq_enable_hotplug();
334 EXPORT_SYMBOL_GPL(blk_mq_register_dev);
336 void blk_mq_sysfs_unregister(struct request_queue *q)
338 struct blk_mq_hw_ctx *hctx;
341 if (!q->mq_sysfs_init_done)
344 blk_mq_debugfs_unregister_hctxs(q);
346 queue_for_each_hw_ctx(q, hctx, i)
347 blk_mq_unregister_hctx(hctx);
350 int blk_mq_sysfs_register(struct request_queue *q)
352 struct blk_mq_hw_ctx *hctx;
355 if (!q->mq_sysfs_init_done)
358 blk_mq_debugfs_register_hctxs(q);
360 queue_for_each_hw_ctx(q, hctx, i) {
361 ret = blk_mq_register_hctx(hctx);