]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - block/blk-mq-sysfs.c
blk-mq: make lifetime consistent between hctx and its kobject
[karo-tx-linux.git] / block / blk-mq-sysfs.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15
16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19
20 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
21 {
22         struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
23                                                   kobj);
24         kfree(hctx->ctxs);
25         kfree(hctx);
26 }
27
28 struct blk_mq_ctx_sysfs_entry {
29         struct attribute attr;
30         ssize_t (*show)(struct blk_mq_ctx *, char *);
31         ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
32 };
33
34 struct blk_mq_hw_ctx_sysfs_entry {
35         struct attribute attr;
36         ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
37         ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
38 };
39
40 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
41                                  char *page)
42 {
43         struct blk_mq_ctx_sysfs_entry *entry;
44         struct blk_mq_ctx *ctx;
45         struct request_queue *q;
46         ssize_t res;
47
48         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
49         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
50         q = ctx->queue;
51
52         if (!entry->show)
53                 return -EIO;
54
55         res = -ENOENT;
56         mutex_lock(&q->sysfs_lock);
57         if (!blk_queue_dying(q))
58                 res = entry->show(ctx, page);
59         mutex_unlock(&q->sysfs_lock);
60         return res;
61 }
62
63 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
64                                   const char *page, size_t length)
65 {
66         struct blk_mq_ctx_sysfs_entry *entry;
67         struct blk_mq_ctx *ctx;
68         struct request_queue *q;
69         ssize_t res;
70
71         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
72         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
73         q = ctx->queue;
74
75         if (!entry->store)
76                 return -EIO;
77
78         res = -ENOENT;
79         mutex_lock(&q->sysfs_lock);
80         if (!blk_queue_dying(q))
81                 res = entry->store(ctx, page, length);
82         mutex_unlock(&q->sysfs_lock);
83         return res;
84 }
85
86 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
87                                     struct attribute *attr, char *page)
88 {
89         struct blk_mq_hw_ctx_sysfs_entry *entry;
90         struct blk_mq_hw_ctx *hctx;
91         struct request_queue *q;
92         ssize_t res;
93
94         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
95         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
96         q = hctx->queue;
97
98         if (!entry->show)
99                 return -EIO;
100
101         res = -ENOENT;
102         mutex_lock(&q->sysfs_lock);
103         if (!blk_queue_dying(q))
104                 res = entry->show(hctx, page);
105         mutex_unlock(&q->sysfs_lock);
106         return res;
107 }
108
109 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
110                                      struct attribute *attr, const char *page,
111                                      size_t length)
112 {
113         struct blk_mq_hw_ctx_sysfs_entry *entry;
114         struct blk_mq_hw_ctx *hctx;
115         struct request_queue *q;
116         ssize_t res;
117
118         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
119         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
120         q = hctx->queue;
121
122         if (!entry->store)
123                 return -EIO;
124
125         res = -ENOENT;
126         mutex_lock(&q->sysfs_lock);
127         if (!blk_queue_dying(q))
128                 res = entry->store(hctx, page, length);
129         mutex_unlock(&q->sysfs_lock);
130         return res;
131 }
132
133 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
134                                             char *page)
135 {
136         return sprintf(page, "%u\n", hctx->tags->nr_tags);
137 }
138
139 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
140                                                      char *page)
141 {
142         return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
143 }
144
145 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
146 {
147         unsigned int i, first = 1;
148         ssize_t ret = 0;
149
150         for_each_cpu(i, hctx->cpumask) {
151                 if (first)
152                         ret += sprintf(ret + page, "%u", i);
153                 else
154                         ret += sprintf(ret + page, ", %u", i);
155
156                 first = 0;
157         }
158
159         ret += sprintf(ret + page, "\n");
160         return ret;
161 }
162
163 static struct attribute *default_ctx_attrs[] = {
164         NULL,
165 };
166
167 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
168         .attr = {.name = "nr_tags", .mode = S_IRUGO },
169         .show = blk_mq_hw_sysfs_nr_tags_show,
170 };
171 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
172         .attr = {.name = "nr_reserved_tags", .mode = S_IRUGO },
173         .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
174 };
175 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
176         .attr = {.name = "cpu_list", .mode = S_IRUGO },
177         .show = blk_mq_hw_sysfs_cpus_show,
178 };
179
180 static struct attribute *default_hw_ctx_attrs[] = {
181         &blk_mq_hw_sysfs_nr_tags.attr,
182         &blk_mq_hw_sysfs_nr_reserved_tags.attr,
183         &blk_mq_hw_sysfs_cpus.attr,
184         NULL,
185 };
186
187 static const struct sysfs_ops blk_mq_sysfs_ops = {
188         .show   = blk_mq_sysfs_show,
189         .store  = blk_mq_sysfs_store,
190 };
191
192 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
193         .show   = blk_mq_hw_sysfs_show,
194         .store  = blk_mq_hw_sysfs_store,
195 };
196
197 static struct kobj_type blk_mq_ktype = {
198         .sysfs_ops      = &blk_mq_sysfs_ops,
199         .release        = blk_mq_sysfs_release,
200 };
201
202 static struct kobj_type blk_mq_ctx_ktype = {
203         .sysfs_ops      = &blk_mq_sysfs_ops,
204         .default_attrs  = default_ctx_attrs,
205         .release        = blk_mq_sysfs_release,
206 };
207
208 static struct kobj_type blk_mq_hw_ktype = {
209         .sysfs_ops      = &blk_mq_hw_sysfs_ops,
210         .default_attrs  = default_hw_ctx_attrs,
211         .release        = blk_mq_hw_sysfs_release,
212 };
213
214 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
215 {
216         struct blk_mq_ctx *ctx;
217         int i;
218
219         if (!hctx->nr_ctx)
220                 return;
221
222         hctx_for_each_ctx(hctx, ctx, i)
223                 kobject_del(&ctx->kobj);
224
225         kobject_del(&hctx->kobj);
226 }
227
228 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
229 {
230         struct request_queue *q = hctx->queue;
231         struct blk_mq_ctx *ctx;
232         int i, ret;
233
234         if (!hctx->nr_ctx)
235                 return 0;
236
237         ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
238         if (ret)
239                 return ret;
240
241         hctx_for_each_ctx(hctx, ctx, i) {
242                 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
243                 if (ret)
244                         break;
245         }
246
247         return ret;
248 }
249
250 static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
251 {
252         struct blk_mq_hw_ctx *hctx;
253         int i;
254
255         queue_for_each_hw_ctx(q, hctx, i)
256                 blk_mq_unregister_hctx(hctx);
257
258         blk_mq_debugfs_unregister_hctxs(q);
259
260         kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
261         kobject_del(&q->mq_kobj);
262         kobject_put(&dev->kobj);
263
264         q->mq_sysfs_init_done = false;
265 }
266
267 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
268 {
269         blk_mq_disable_hotplug();
270         __blk_mq_unregister_dev(dev, q);
271         blk_mq_enable_hotplug();
272 }
273
274 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
275 {
276         kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
277 }
278
279 void blk_mq_sysfs_deinit(struct request_queue *q)
280 {
281         struct blk_mq_ctx *ctx;
282         int cpu;
283
284         for_each_possible_cpu(cpu) {
285                 ctx = per_cpu_ptr(q->queue_ctx, cpu);
286                 kobject_put(&ctx->kobj);
287         }
288         kobject_put(&q->mq_kobj);
289 }
290
291 void blk_mq_sysfs_init(struct request_queue *q)
292 {
293         struct blk_mq_ctx *ctx;
294         int cpu;
295
296         kobject_init(&q->mq_kobj, &blk_mq_ktype);
297
298         for_each_possible_cpu(cpu) {
299                 ctx = per_cpu_ptr(q->queue_ctx, cpu);
300                 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
301         }
302 }
303
304 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
305 {
306         struct blk_mq_hw_ctx *hctx;
307         int ret, i;
308
309         blk_mq_disable_hotplug();
310
311         ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
312         if (ret < 0)
313                 goto out;
314
315         kobject_uevent(&q->mq_kobj, KOBJ_ADD);
316
317         blk_mq_debugfs_register(q, kobject_name(&dev->kobj));
318
319         queue_for_each_hw_ctx(q, hctx, i) {
320                 ret = blk_mq_register_hctx(hctx);
321                 if (ret)
322                         break;
323         }
324
325         if (ret)
326                 __blk_mq_unregister_dev(dev, q);
327         else
328                 q->mq_sysfs_init_done = true;
329 out:
330         blk_mq_enable_hotplug();
331
332         return ret;
333 }
334 EXPORT_SYMBOL_GPL(blk_mq_register_dev);
335
336 void blk_mq_sysfs_unregister(struct request_queue *q)
337 {
338         struct blk_mq_hw_ctx *hctx;
339         int i;
340
341         if (!q->mq_sysfs_init_done)
342                 return;
343
344         blk_mq_debugfs_unregister_hctxs(q);
345
346         queue_for_each_hw_ctx(q, hctx, i)
347                 blk_mq_unregister_hctx(hctx);
348 }
349
350 int blk_mq_sysfs_register(struct request_queue *q)
351 {
352         struct blk_mq_hw_ctx *hctx;
353         int i, ret = 0;
354
355         if (!q->mq_sysfs_init_done)
356                 return ret;
357
358         blk_mq_debugfs_register_hctxs(q);
359
360         queue_for_each_hw_ctx(q, hctx, i) {
361                 ret = blk_mq_register_hctx(hctx);
362                 if (ret)
363                         break;
364         }
365
366         return ret;
367 }