]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/nvdimm/dimm_devs.c
Merge tag 'xtensa-20170202' of git://github.com/jcmvbkbc/linux-xtensa
[karo-tx-linux.git] / drivers / nvdimm / dimm_devs.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/ndctl.h>
17 #include <linux/slab.h>
18 #include <linux/io.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include "nd-core.h"
22 #include "label.h"
23 #include "nd.h"
24
25 static DEFINE_IDA(dimm_ida);
26
27 /*
28  * Retrieve bus and dimm handle and return if this bus supports
29  * get_config_data commands
30  */
31 int nvdimm_check_config_data(struct device *dev)
32 {
33         struct nvdimm *nvdimm = to_nvdimm(dev);
34
35         if (!nvdimm->cmd_mask ||
36             !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
37                 if (nvdimm->flags & NDD_ALIASING)
38                         return -ENXIO;
39                 else
40                         return -ENOTTY;
41         }
42
43         return 0;
44 }
45
46 static int validate_dimm(struct nvdimm_drvdata *ndd)
47 {
48         int rc;
49
50         if (!ndd)
51                 return -EINVAL;
52
53         rc = nvdimm_check_config_data(ndd->dev);
54         if (rc)
55                 dev_dbg(ndd->dev, "%pf: %s error: %d\n",
56                                 __builtin_return_address(0), __func__, rc);
57         return rc;
58 }
59
60 /**
61  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
62  * @nvdimm: dimm to initialize
63  */
64 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
65 {
66         struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
67         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
68         struct nvdimm_bus_descriptor *nd_desc;
69         int rc = validate_dimm(ndd);
70
71         if (rc)
72                 return rc;
73
74         if (cmd->config_size)
75                 return 0; /* already valid */
76
77         memset(cmd, 0, sizeof(*cmd));
78         nd_desc = nvdimm_bus->nd_desc;
79         return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
80                         ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), NULL);
81 }
82
83 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
84 {
85         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
86         struct nd_cmd_get_config_data_hdr *cmd;
87         struct nvdimm_bus_descriptor *nd_desc;
88         int rc = validate_dimm(ndd);
89         u32 max_cmd_size, config_size;
90         size_t offset;
91
92         if (rc)
93                 return rc;
94
95         if (ndd->data)
96                 return 0;
97
98         if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
99                         || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
100                 dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
101                                 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
102                 return -ENXIO;
103         }
104
105         ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
106         if (!ndd->data)
107                 ndd->data = vmalloc(ndd->nsarea.config_size);
108
109         if (!ndd->data)
110                 return -ENOMEM;
111
112         max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
113         cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
114         if (!cmd)
115                 return -ENOMEM;
116
117         nd_desc = nvdimm_bus->nd_desc;
118         for (config_size = ndd->nsarea.config_size, offset = 0;
119                         config_size; config_size -= cmd->in_length,
120                         offset += cmd->in_length) {
121                 cmd->in_length = min(config_size, max_cmd_size);
122                 cmd->in_offset = offset;
123                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
124                                 ND_CMD_GET_CONFIG_DATA, cmd,
125                                 cmd->in_length + sizeof(*cmd), NULL);
126                 if (rc || cmd->status) {
127                         rc = -ENXIO;
128                         break;
129                 }
130                 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
131         }
132         dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
133         kfree(cmd);
134
135         return rc;
136 }
137
138 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
139                 void *buf, size_t len)
140 {
141         int rc = validate_dimm(ndd);
142         size_t max_cmd_size, buf_offset;
143         struct nd_cmd_set_config_hdr *cmd;
144         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
145         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
146
147         if (rc)
148                 return rc;
149
150         if (!ndd->data)
151                 return -ENXIO;
152
153         if (offset + len > ndd->nsarea.config_size)
154                 return -ENXIO;
155
156         max_cmd_size = min_t(u32, PAGE_SIZE, len);
157         max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
158         cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
159         if (!cmd)
160                 return -ENOMEM;
161
162         for (buf_offset = 0; len; len -= cmd->in_length,
163                         buf_offset += cmd->in_length) {
164                 size_t cmd_size;
165                 u32 *status;
166
167                 cmd->in_offset = offset + buf_offset;
168                 cmd->in_length = min(max_cmd_size, len);
169                 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
170
171                 /* status is output in the last 4-bytes of the command buffer */
172                 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
173                 status = ((void *) cmd) + cmd_size - sizeof(u32);
174
175                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
176                                 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
177                 if (rc || *status) {
178                         rc = rc ? rc : -ENXIO;
179                         break;
180                 }
181         }
182         kfree(cmd);
183
184         return rc;
185 }
186
187 void nvdimm_set_aliasing(struct device *dev)
188 {
189         struct nvdimm *nvdimm = to_nvdimm(dev);
190
191         nvdimm->flags |= NDD_ALIASING;
192 }
193
194 static void nvdimm_release(struct device *dev)
195 {
196         struct nvdimm *nvdimm = to_nvdimm(dev);
197
198         ida_simple_remove(&dimm_ida, nvdimm->id);
199         kfree(nvdimm);
200 }
201
202 static struct device_type nvdimm_device_type = {
203         .name = "nvdimm",
204         .release = nvdimm_release,
205 };
206
207 bool is_nvdimm(struct device *dev)
208 {
209         return dev->type == &nvdimm_device_type;
210 }
211
212 struct nvdimm *to_nvdimm(struct device *dev)
213 {
214         struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
215
216         WARN_ON(!is_nvdimm(dev));
217         return nvdimm;
218 }
219 EXPORT_SYMBOL_GPL(to_nvdimm);
220
221 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
222 {
223         struct nd_region *nd_region = &ndbr->nd_region;
224         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
225
226         return nd_mapping->nvdimm;
227 }
228 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
229
230 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
231 {
232         struct nvdimm *nvdimm = nd_mapping->nvdimm;
233
234         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
235
236         return dev_get_drvdata(&nvdimm->dev);
237 }
238 EXPORT_SYMBOL(to_ndd);
239
240 void nvdimm_drvdata_release(struct kref *kref)
241 {
242         struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
243         struct device *dev = ndd->dev;
244         struct resource *res, *_r;
245
246         dev_dbg(dev, "%s\n", __func__);
247
248         nvdimm_bus_lock(dev);
249         for_each_dpa_resource_safe(ndd, res, _r)
250                 nvdimm_free_dpa(ndd, res);
251         nvdimm_bus_unlock(dev);
252
253         kvfree(ndd->data);
254         kfree(ndd);
255         put_device(dev);
256 }
257
258 void get_ndd(struct nvdimm_drvdata *ndd)
259 {
260         kref_get(&ndd->kref);
261 }
262
263 void put_ndd(struct nvdimm_drvdata *ndd)
264 {
265         if (ndd)
266                 kref_put(&ndd->kref, nvdimm_drvdata_release);
267 }
268
269 const char *nvdimm_name(struct nvdimm *nvdimm)
270 {
271         return dev_name(&nvdimm->dev);
272 }
273 EXPORT_SYMBOL_GPL(nvdimm_name);
274
275 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
276 {
277         return &nvdimm->dev.kobj;
278 }
279 EXPORT_SYMBOL_GPL(nvdimm_kobj);
280
281 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
282 {
283         return nvdimm->cmd_mask;
284 }
285 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
286
287 void *nvdimm_provider_data(struct nvdimm *nvdimm)
288 {
289         if (nvdimm)
290                 return nvdimm->provider_data;
291         return NULL;
292 }
293 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
294
295 static ssize_t commands_show(struct device *dev,
296                 struct device_attribute *attr, char *buf)
297 {
298         struct nvdimm *nvdimm = to_nvdimm(dev);
299         int cmd, len = 0;
300
301         if (!nvdimm->cmd_mask)
302                 return sprintf(buf, "\n");
303
304         for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
305                 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
306         len += sprintf(buf + len, "\n");
307         return len;
308 }
309 static DEVICE_ATTR_RO(commands);
310
311 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
312                 char *buf)
313 {
314         struct nvdimm *nvdimm = to_nvdimm(dev);
315
316         /*
317          * The state may be in the process of changing, userspace should
318          * quiesce probing if it wants a static answer
319          */
320         nvdimm_bus_lock(dev);
321         nvdimm_bus_unlock(dev);
322         return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
323                         ? "active" : "idle");
324 }
325 static DEVICE_ATTR_RO(state);
326
327 static ssize_t available_slots_show(struct device *dev,
328                 struct device_attribute *attr, char *buf)
329 {
330         struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
331         ssize_t rc;
332         u32 nfree;
333
334         if (!ndd)
335                 return -ENXIO;
336
337         nvdimm_bus_lock(dev);
338         nfree = nd_label_nfree(ndd);
339         if (nfree - 1 > nfree) {
340                 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
341                 nfree = 0;
342         } else
343                 nfree--;
344         rc = sprintf(buf, "%d\n", nfree);
345         nvdimm_bus_unlock(dev);
346         return rc;
347 }
348 static DEVICE_ATTR_RO(available_slots);
349
350 static struct attribute *nvdimm_attributes[] = {
351         &dev_attr_state.attr,
352         &dev_attr_commands.attr,
353         &dev_attr_available_slots.attr,
354         NULL,
355 };
356
357 struct attribute_group nvdimm_attribute_group = {
358         .attrs = nvdimm_attributes,
359 };
360 EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
361
362 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
363                 const struct attribute_group **groups, unsigned long flags,
364                 unsigned long cmd_mask, int num_flush,
365                 struct resource *flush_wpq)
366 {
367         struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
368         struct device *dev;
369
370         if (!nvdimm)
371                 return NULL;
372
373         nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
374         if (nvdimm->id < 0) {
375                 kfree(nvdimm);
376                 return NULL;
377         }
378         nvdimm->provider_data = provider_data;
379         nvdimm->flags = flags;
380         nvdimm->cmd_mask = cmd_mask;
381         nvdimm->num_flush = num_flush;
382         nvdimm->flush_wpq = flush_wpq;
383         atomic_set(&nvdimm->busy, 0);
384         dev = &nvdimm->dev;
385         dev_set_name(dev, "nmem%d", nvdimm->id);
386         dev->parent = &nvdimm_bus->dev;
387         dev->type = &nvdimm_device_type;
388         dev->devt = MKDEV(nvdimm_major, nvdimm->id);
389         dev->groups = groups;
390         nd_device_register(dev);
391
392         return nvdimm;
393 }
394 EXPORT_SYMBOL_GPL(nvdimm_create);
395
396 int alias_dpa_busy(struct device *dev, void *data)
397 {
398         resource_size_t map_end, blk_start, new, busy;
399         struct blk_alloc_info *info = data;
400         struct nd_mapping *nd_mapping;
401         struct nd_region *nd_region;
402         struct nvdimm_drvdata *ndd;
403         struct resource *res;
404         int i;
405
406         if (!is_nd_pmem(dev))
407                 return 0;
408
409         nd_region = to_nd_region(dev);
410         for (i = 0; i < nd_region->ndr_mappings; i++) {
411                 nd_mapping  = &nd_region->mapping[i];
412                 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
413                         break;
414         }
415
416         if (i >= nd_region->ndr_mappings)
417                 return 0;
418
419         ndd = to_ndd(nd_mapping);
420         map_end = nd_mapping->start + nd_mapping->size - 1;
421         blk_start = nd_mapping->start;
422
423         /*
424          * In the allocation case ->res is set to free space that we are
425          * looking to validate against PMEM aliasing collision rules
426          * (i.e. BLK is allocated after all aliased PMEM).
427          */
428         if (info->res) {
429                 if (info->res->start >= nd_mapping->start
430                                 && info->res->start < map_end)
431                         /* pass */;
432                 else
433                         return 0;
434         }
435
436  retry:
437         /*
438          * Find the free dpa from the end of the last pmem allocation to
439          * the end of the interleave-set mapping that is not already
440          * covered by a blk allocation.
441          */
442         busy = 0;
443         for_each_dpa_resource(ndd, res) {
444                 if ((res->start >= blk_start && res->start < map_end)
445                                 || (res->end >= blk_start
446                                         && res->end <= map_end)) {
447                         if (strncmp(res->name, "pmem", 4) == 0) {
448                                 new = max(blk_start, min(map_end + 1,
449                                                         res->end + 1));
450                                 if (new != blk_start) {
451                                         blk_start = new;
452                                         goto retry;
453                                 }
454                         } else
455                                 busy += min(map_end, res->end)
456                                         - max(nd_mapping->start, res->start) + 1;
457                 } else if (nd_mapping->start > res->start
458                                 && map_end < res->end) {
459                         /* total eclipse of the PMEM region mapping */
460                         busy += nd_mapping->size;
461                         break;
462                 }
463         }
464
465         /* update the free space range with the probed blk_start */
466         if (info->res && blk_start > info->res->start) {
467                 info->res->start = max(info->res->start, blk_start);
468                 if (info->res->start > info->res->end)
469                         info->res->end = info->res->start - 1;
470                 return 1;
471         }
472
473         info->available -= blk_start - nd_mapping->start + busy;
474
475         return 0;
476 }
477
478 static int blk_dpa_busy(struct device *dev, void *data)
479 {
480         struct blk_alloc_info *info = data;
481         struct nd_mapping *nd_mapping;
482         struct nd_region *nd_region;
483         resource_size_t map_end;
484         int i;
485
486         if (!is_nd_pmem(dev))
487                 return 0;
488
489         nd_region = to_nd_region(dev);
490         for (i = 0; i < nd_region->ndr_mappings; i++) {
491                 nd_mapping  = &nd_region->mapping[i];
492                 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
493                         break;
494         }
495
496         if (i >= nd_region->ndr_mappings)
497                 return 0;
498
499         map_end = nd_mapping->start + nd_mapping->size - 1;
500         if (info->res->start >= nd_mapping->start
501                         && info->res->start < map_end) {
502                 if (info->res->end <= map_end) {
503                         info->busy = 0;
504                         return 1;
505                 } else {
506                         info->busy -= info->res->end - map_end;
507                         return 0;
508                 }
509         } else if (info->res->end >= nd_mapping->start
510                         && info->res->end <= map_end) {
511                 info->busy -= nd_mapping->start - info->res->start;
512                 return 0;
513         } else {
514                 info->busy -= nd_mapping->size;
515                 return 0;
516         }
517 }
518
519 /**
520  * nd_blk_available_dpa - account the unused dpa of BLK region
521  * @nd_mapping: container of dpa-resource-root + labels
522  *
523  * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
524  * we arrange for them to never start at an lower dpa than the last
525  * PMEM allocation in an aliased region.
526  */
527 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
528 {
529         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
530         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
531         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
532         struct blk_alloc_info info = {
533                 .nd_mapping = nd_mapping,
534                 .available = nd_mapping->size,
535                 .res = NULL,
536         };
537         struct resource *res;
538
539         if (!ndd)
540                 return 0;
541
542         device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
543
544         /* now account for busy blk allocations in unaliased dpa */
545         for_each_dpa_resource(ndd, res) {
546                 if (strncmp(res->name, "blk", 3) != 0)
547                         continue;
548
549                 info.res = res;
550                 info.busy = resource_size(res);
551                 device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
552                 info.available -= info.busy;
553         }
554
555         return info.available;
556 }
557
558 /**
559  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
560  * @nd_mapping: container of dpa-resource-root + labels
561  * @nd_region: constrain available space check to this reference region
562  * @overlap: calculate available space assuming this level of overlap
563  *
564  * Validate that a PMEM label, if present, aligns with the start of an
565  * interleave set and truncate the available size at the lowest BLK
566  * overlap point.
567  *
568  * The expectation is that this routine is called multiple times as it
569  * probes for the largest BLK encroachment for any single member DIMM of
570  * the interleave set.  Once that value is determined the PMEM-limit for
571  * the set can be established.
572  */
573 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
574                 struct nd_mapping *nd_mapping, resource_size_t *overlap)
575 {
576         resource_size_t map_start, map_end, busy = 0, available, blk_start;
577         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
578         struct resource *res;
579         const char *reason;
580
581         if (!ndd)
582                 return 0;
583
584         map_start = nd_mapping->start;
585         map_end = map_start + nd_mapping->size - 1;
586         blk_start = max(map_start, map_end + 1 - *overlap);
587         for_each_dpa_resource(ndd, res) {
588                 if (res->start >= map_start && res->start < map_end) {
589                         if (strncmp(res->name, "blk", 3) == 0)
590                                 blk_start = min(blk_start,
591                                                 max(map_start, res->start));
592                         else if (res->end > map_end) {
593                                 reason = "misaligned to iset";
594                                 goto err;
595                         } else
596                                 busy += resource_size(res);
597                 } else if (res->end >= map_start && res->end <= map_end) {
598                         if (strncmp(res->name, "blk", 3) == 0) {
599                                 /*
600                                  * If a BLK allocation overlaps the start of
601                                  * PMEM the entire interleave set may now only
602                                  * be used for BLK.
603                                  */
604                                 blk_start = map_start;
605                         } else
606                                 busy += resource_size(res);
607                 } else if (map_start > res->start && map_start < res->end) {
608                         /* total eclipse of the mapping */
609                         busy += nd_mapping->size;
610                         blk_start = map_start;
611                 }
612         }
613
614         *overlap = map_end + 1 - blk_start;
615         available = blk_start - map_start;
616         if (busy < available)
617                 return available - busy;
618         return 0;
619
620  err:
621         nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
622         return 0;
623 }
624
625 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
626 {
627         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
628         kfree(res->name);
629         __release_region(&ndd->dpa, res->start, resource_size(res));
630 }
631
632 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
633                 struct nd_label_id *label_id, resource_size_t start,
634                 resource_size_t n)
635 {
636         char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
637         struct resource *res;
638
639         if (!name)
640                 return NULL;
641
642         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
643         res = __request_region(&ndd->dpa, start, n, name, 0);
644         if (!res)
645                 kfree(name);
646         return res;
647 }
648
649 /**
650  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
651  * @nvdimm: container of dpa-resource-root + labels
652  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
653  */
654 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
655                 struct nd_label_id *label_id)
656 {
657         resource_size_t allocated = 0;
658         struct resource *res;
659
660         for_each_dpa_resource(ndd, res)
661                 if (strcmp(res->name, label_id->id) == 0)
662                         allocated += resource_size(res);
663
664         return allocated;
665 }
666
667 static int count_dimms(struct device *dev, void *c)
668 {
669         int *count = c;
670
671         if (is_nvdimm(dev))
672                 (*count)++;
673         return 0;
674 }
675
676 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
677 {
678         int count = 0;
679         /* Flush any possible dimm registration failures */
680         nd_synchronize();
681
682         device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
683         dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
684         if (count != dimm_count)
685                 return -ENXIO;
686         return 0;
687 }
688 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
689
690 void __exit nvdimm_devs_exit(void)
691 {
692         ida_destroy(&dimm_ida);
693 }