]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/nvdimm/region_devs.c
Merge tag 'kvm-arm-for-v4.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / drivers / nvdimm / region_devs.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/sort.h>
19 #include <linux/io.h>
20 #include <linux/nd.h>
21 #include "nd-core.h"
22 #include "nd.h"
23
24 /*
25  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
26  * irrelevant.
27  */
28 #include <linux/io-64-nonatomic-hi-lo.h>
29
30 static DEFINE_IDA(region_ida);
31 static DEFINE_PER_CPU(int, flush_idx);
32
33 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
34                 struct nd_region_data *ndrd)
35 {
36         int i, j;
37
38         dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
39                         nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
40         for (i = 0; i < (1 << ndrd->hints_shift); i++) {
41                 struct resource *res = &nvdimm->flush_wpq[i];
42                 unsigned long pfn = PHYS_PFN(res->start);
43                 void __iomem *flush_page;
44
45                 /* check if flush hints share a page */
46                 for (j = 0; j < i; j++) {
47                         struct resource *res_j = &nvdimm->flush_wpq[j];
48                         unsigned long pfn_j = PHYS_PFN(res_j->start);
49
50                         if (pfn == pfn_j)
51                                 break;
52                 }
53
54                 if (j < i)
55                         flush_page = (void __iomem *) ((unsigned long)
56                                         ndrd_get_flush_wpq(ndrd, dimm, j)
57                                         & PAGE_MASK);
58                 else
59                         flush_page = devm_nvdimm_ioremap(dev,
60                                         PFN_PHYS(pfn), PAGE_SIZE);
61                 if (!flush_page)
62                         return -ENXIO;
63                 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
64                                 + (res->start & ~PAGE_MASK));
65         }
66
67         return 0;
68 }
69
70 int nd_region_activate(struct nd_region *nd_region)
71 {
72         int i, j, num_flush = 0;
73         struct nd_region_data *ndrd;
74         struct device *dev = &nd_region->dev;
75         size_t flush_data_size = sizeof(void *);
76
77         nvdimm_bus_lock(&nd_region->dev);
78         for (i = 0; i < nd_region->ndr_mappings; i++) {
79                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
80                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
81
82                 /* at least one null hint slot per-dimm for the "no-hint" case */
83                 flush_data_size += sizeof(void *);
84                 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
85                 if (!nvdimm->num_flush)
86                         continue;
87                 flush_data_size += nvdimm->num_flush * sizeof(void *);
88         }
89         nvdimm_bus_unlock(&nd_region->dev);
90
91         ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
92         if (!ndrd)
93                 return -ENOMEM;
94         dev_set_drvdata(dev, ndrd);
95
96         if (!num_flush)
97                 return 0;
98
99         ndrd->hints_shift = ilog2(num_flush);
100         for (i = 0; i < nd_region->ndr_mappings; i++) {
101                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
102                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
103                 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
104
105                 if (rc)
106                         return rc;
107         }
108
109         /*
110          * Clear out entries that are duplicates. This should prevent the
111          * extra flushings.
112          */
113         for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
114                 /* ignore if NULL already */
115                 if (!ndrd_get_flush_wpq(ndrd, i, 0))
116                         continue;
117
118                 for (j = i + 1; j < nd_region->ndr_mappings; j++)
119                         if (ndrd_get_flush_wpq(ndrd, i, 0) ==
120                             ndrd_get_flush_wpq(ndrd, j, 0))
121                                 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
122         }
123
124         return 0;
125 }
126
127 static void nd_region_release(struct device *dev)
128 {
129         struct nd_region *nd_region = to_nd_region(dev);
130         u16 i;
131
132         for (i = 0; i < nd_region->ndr_mappings; i++) {
133                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
134                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
135
136                 put_device(&nvdimm->dev);
137         }
138         free_percpu(nd_region->lane);
139         ida_simple_remove(&region_ida, nd_region->id);
140         if (is_nd_blk(dev))
141                 kfree(to_nd_blk_region(dev));
142         else
143                 kfree(nd_region);
144 }
145
146 static struct device_type nd_blk_device_type = {
147         .name = "nd_blk",
148         .release = nd_region_release,
149 };
150
151 static struct device_type nd_pmem_device_type = {
152         .name = "nd_pmem",
153         .release = nd_region_release,
154 };
155
156 static struct device_type nd_volatile_device_type = {
157         .name = "nd_volatile",
158         .release = nd_region_release,
159 };
160
161 bool is_nd_pmem(struct device *dev)
162 {
163         return dev ? dev->type == &nd_pmem_device_type : false;
164 }
165
166 bool is_nd_blk(struct device *dev)
167 {
168         return dev ? dev->type == &nd_blk_device_type : false;
169 }
170
171 bool is_nd_volatile(struct device *dev)
172 {
173         return dev ? dev->type == &nd_volatile_device_type : false;
174 }
175
176 struct nd_region *to_nd_region(struct device *dev)
177 {
178         struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
179
180         WARN_ON(dev->type->release != nd_region_release);
181         return nd_region;
182 }
183 EXPORT_SYMBOL_GPL(to_nd_region);
184
185 struct nd_blk_region *to_nd_blk_region(struct device *dev)
186 {
187         struct nd_region *nd_region = to_nd_region(dev);
188
189         WARN_ON(!is_nd_blk(dev));
190         return container_of(nd_region, struct nd_blk_region, nd_region);
191 }
192 EXPORT_SYMBOL_GPL(to_nd_blk_region);
193
194 void *nd_region_provider_data(struct nd_region *nd_region)
195 {
196         return nd_region->provider_data;
197 }
198 EXPORT_SYMBOL_GPL(nd_region_provider_data);
199
200 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
201 {
202         return ndbr->blk_provider_data;
203 }
204 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
205
206 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
207 {
208         ndbr->blk_provider_data = data;
209 }
210 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
211
212 /**
213  * nd_region_to_nstype() - region to an integer namespace type
214  * @nd_region: region-device to interrogate
215  *
216  * This is the 'nstype' attribute of a region as well, an input to the
217  * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
218  * namespace devices with namespace drivers.
219  */
220 int nd_region_to_nstype(struct nd_region *nd_region)
221 {
222         if (is_memory(&nd_region->dev)) {
223                 u16 i, alias;
224
225                 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
226                         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
227                         struct nvdimm *nvdimm = nd_mapping->nvdimm;
228
229                         if (test_bit(NDD_ALIASING, &nvdimm->flags))
230                                 alias++;
231                 }
232                 if (alias)
233                         return ND_DEVICE_NAMESPACE_PMEM;
234                 else
235                         return ND_DEVICE_NAMESPACE_IO;
236         } else if (is_nd_blk(&nd_region->dev)) {
237                 return ND_DEVICE_NAMESPACE_BLK;
238         }
239
240         return 0;
241 }
242 EXPORT_SYMBOL(nd_region_to_nstype);
243
244 static ssize_t size_show(struct device *dev,
245                 struct device_attribute *attr, char *buf)
246 {
247         struct nd_region *nd_region = to_nd_region(dev);
248         unsigned long long size = 0;
249
250         if (is_memory(dev)) {
251                 size = nd_region->ndr_size;
252         } else if (nd_region->ndr_mappings == 1) {
253                 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
254
255                 size = nd_mapping->size;
256         }
257
258         return sprintf(buf, "%llu\n", size);
259 }
260 static DEVICE_ATTR_RO(size);
261
262 static ssize_t deep_flush_show(struct device *dev,
263                 struct device_attribute *attr, char *buf)
264 {
265         struct nd_region *nd_region = to_nd_region(dev);
266
267         /*
268          * NOTE: in the nvdimm_has_flush() error case this attribute is
269          * not visible.
270          */
271         return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
272 }
273
274 static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
275                 const char *buf, size_t len)
276 {
277         bool flush;
278         int rc = strtobool(buf, &flush);
279         struct nd_region *nd_region = to_nd_region(dev);
280
281         if (rc)
282                 return rc;
283         if (!flush)
284                 return -EINVAL;
285         nvdimm_flush(nd_region);
286
287         return len;
288 }
289 static DEVICE_ATTR_RW(deep_flush);
290
291 static ssize_t mappings_show(struct device *dev,
292                 struct device_attribute *attr, char *buf)
293 {
294         struct nd_region *nd_region = to_nd_region(dev);
295
296         return sprintf(buf, "%d\n", nd_region->ndr_mappings);
297 }
298 static DEVICE_ATTR_RO(mappings);
299
300 static ssize_t nstype_show(struct device *dev,
301                 struct device_attribute *attr, char *buf)
302 {
303         struct nd_region *nd_region = to_nd_region(dev);
304
305         return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
306 }
307 static DEVICE_ATTR_RO(nstype);
308
309 static ssize_t set_cookie_show(struct device *dev,
310                 struct device_attribute *attr, char *buf)
311 {
312         struct nd_region *nd_region = to_nd_region(dev);
313         struct nd_interleave_set *nd_set = nd_region->nd_set;
314         ssize_t rc = 0;
315
316         if (is_memory(dev) && nd_set)
317                 /* pass, should be precluded by region_visible */;
318         else
319                 return -ENXIO;
320
321         /*
322          * The cookie to show depends on which specification of the
323          * labels we are using. If there are not labels then default to
324          * the v1.1 namespace label cookie definition. To read all this
325          * data we need to wait for probing to settle.
326          */
327         device_lock(dev);
328         nvdimm_bus_lock(dev);
329         wait_nvdimm_bus_probe_idle(dev);
330         if (nd_region->ndr_mappings) {
331                 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
332                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
333
334                 if (ndd) {
335                         struct nd_namespace_index *nsindex;
336
337                         nsindex = to_namespace_index(ndd, ndd->ns_current);
338                         rc = sprintf(buf, "%#llx\n",
339                                         nd_region_interleave_set_cookie(nd_region,
340                                                 nsindex));
341                 }
342         }
343         nvdimm_bus_unlock(dev);
344         device_unlock(dev);
345
346         if (rc)
347                 return rc;
348         return sprintf(buf, "%#llx\n", nd_set->cookie1);
349 }
350 static DEVICE_ATTR_RO(set_cookie);
351
352 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
353 {
354         resource_size_t blk_max_overlap = 0, available, overlap;
355         int i;
356
357         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
358
359  retry:
360         available = 0;
361         overlap = blk_max_overlap;
362         for (i = 0; i < nd_region->ndr_mappings; i++) {
363                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
364                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
365
366                 /* if a dimm is disabled the available capacity is zero */
367                 if (!ndd)
368                         return 0;
369
370                 if (is_memory(&nd_region->dev)) {
371                         available += nd_pmem_available_dpa(nd_region,
372                                         nd_mapping, &overlap);
373                         if (overlap > blk_max_overlap) {
374                                 blk_max_overlap = overlap;
375                                 goto retry;
376                         }
377                 } else if (is_nd_blk(&nd_region->dev))
378                         available += nd_blk_available_dpa(nd_region);
379         }
380
381         return available;
382 }
383
384 static ssize_t available_size_show(struct device *dev,
385                 struct device_attribute *attr, char *buf)
386 {
387         struct nd_region *nd_region = to_nd_region(dev);
388         unsigned long long available = 0;
389
390         /*
391          * Flush in-flight updates and grab a snapshot of the available
392          * size.  Of course, this value is potentially invalidated the
393          * memory nvdimm_bus_lock() is dropped, but that's userspace's
394          * problem to not race itself.
395          */
396         nvdimm_bus_lock(dev);
397         wait_nvdimm_bus_probe_idle(dev);
398         available = nd_region_available_dpa(nd_region);
399         nvdimm_bus_unlock(dev);
400
401         return sprintf(buf, "%llu\n", available);
402 }
403 static DEVICE_ATTR_RO(available_size);
404
405 static ssize_t init_namespaces_show(struct device *dev,
406                 struct device_attribute *attr, char *buf)
407 {
408         struct nd_region_data *ndrd = dev_get_drvdata(dev);
409         ssize_t rc;
410
411         nvdimm_bus_lock(dev);
412         if (ndrd)
413                 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
414         else
415                 rc = -ENXIO;
416         nvdimm_bus_unlock(dev);
417
418         return rc;
419 }
420 static DEVICE_ATTR_RO(init_namespaces);
421
422 static ssize_t namespace_seed_show(struct device *dev,
423                 struct device_attribute *attr, char *buf)
424 {
425         struct nd_region *nd_region = to_nd_region(dev);
426         ssize_t rc;
427
428         nvdimm_bus_lock(dev);
429         if (nd_region->ns_seed)
430                 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
431         else
432                 rc = sprintf(buf, "\n");
433         nvdimm_bus_unlock(dev);
434         return rc;
435 }
436 static DEVICE_ATTR_RO(namespace_seed);
437
438 static ssize_t btt_seed_show(struct device *dev,
439                 struct device_attribute *attr, char *buf)
440 {
441         struct nd_region *nd_region = to_nd_region(dev);
442         ssize_t rc;
443
444         nvdimm_bus_lock(dev);
445         if (nd_region->btt_seed)
446                 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
447         else
448                 rc = sprintf(buf, "\n");
449         nvdimm_bus_unlock(dev);
450
451         return rc;
452 }
453 static DEVICE_ATTR_RO(btt_seed);
454
455 static ssize_t pfn_seed_show(struct device *dev,
456                 struct device_attribute *attr, char *buf)
457 {
458         struct nd_region *nd_region = to_nd_region(dev);
459         ssize_t rc;
460
461         nvdimm_bus_lock(dev);
462         if (nd_region->pfn_seed)
463                 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
464         else
465                 rc = sprintf(buf, "\n");
466         nvdimm_bus_unlock(dev);
467
468         return rc;
469 }
470 static DEVICE_ATTR_RO(pfn_seed);
471
472 static ssize_t dax_seed_show(struct device *dev,
473                 struct device_attribute *attr, char *buf)
474 {
475         struct nd_region *nd_region = to_nd_region(dev);
476         ssize_t rc;
477
478         nvdimm_bus_lock(dev);
479         if (nd_region->dax_seed)
480                 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
481         else
482                 rc = sprintf(buf, "\n");
483         nvdimm_bus_unlock(dev);
484
485         return rc;
486 }
487 static DEVICE_ATTR_RO(dax_seed);
488
489 static ssize_t read_only_show(struct device *dev,
490                 struct device_attribute *attr, char *buf)
491 {
492         struct nd_region *nd_region = to_nd_region(dev);
493
494         return sprintf(buf, "%d\n", nd_region->ro);
495 }
496
497 static ssize_t read_only_store(struct device *dev,
498                 struct device_attribute *attr, const char *buf, size_t len)
499 {
500         bool ro;
501         int rc = strtobool(buf, &ro);
502         struct nd_region *nd_region = to_nd_region(dev);
503
504         if (rc)
505                 return rc;
506
507         nd_region->ro = ro;
508         return len;
509 }
510 static DEVICE_ATTR_RW(read_only);
511
512 static ssize_t region_badblocks_show(struct device *dev,
513                 struct device_attribute *attr, char *buf)
514 {
515         struct nd_region *nd_region = to_nd_region(dev);
516
517         return badblocks_show(&nd_region->bb, buf, 0);
518 }
519
520 static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
521
522 static ssize_t resource_show(struct device *dev,
523                 struct device_attribute *attr, char *buf)
524 {
525         struct nd_region *nd_region = to_nd_region(dev);
526
527         return sprintf(buf, "%#llx\n", nd_region->ndr_start);
528 }
529 static DEVICE_ATTR_RO(resource);
530
531 static struct attribute *nd_region_attributes[] = {
532         &dev_attr_size.attr,
533         &dev_attr_nstype.attr,
534         &dev_attr_mappings.attr,
535         &dev_attr_btt_seed.attr,
536         &dev_attr_pfn_seed.attr,
537         &dev_attr_dax_seed.attr,
538         &dev_attr_deep_flush.attr,
539         &dev_attr_read_only.attr,
540         &dev_attr_set_cookie.attr,
541         &dev_attr_available_size.attr,
542         &dev_attr_namespace_seed.attr,
543         &dev_attr_init_namespaces.attr,
544         &dev_attr_badblocks.attr,
545         &dev_attr_resource.attr,
546         NULL,
547 };
548
549 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
550 {
551         struct device *dev = container_of(kobj, typeof(*dev), kobj);
552         struct nd_region *nd_region = to_nd_region(dev);
553         struct nd_interleave_set *nd_set = nd_region->nd_set;
554         int type = nd_region_to_nstype(nd_region);
555
556         if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
557                 return 0;
558
559         if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
560                 return 0;
561
562         if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
563                 return 0;
564
565         if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
566                 return 0;
567
568         if (a == &dev_attr_deep_flush.attr) {
569                 int has_flush = nvdimm_has_flush(nd_region);
570
571                 if (has_flush == 1)
572                         return a->mode;
573                 else if (has_flush == 0)
574                         return 0444;
575                 else
576                         return 0;
577         }
578
579         if (a != &dev_attr_set_cookie.attr
580                         && a != &dev_attr_available_size.attr)
581                 return a->mode;
582
583         if ((type == ND_DEVICE_NAMESPACE_PMEM
584                                 || type == ND_DEVICE_NAMESPACE_BLK)
585                         && a == &dev_attr_available_size.attr)
586                 return a->mode;
587         else if (is_memory(dev) && nd_set)
588                 return a->mode;
589
590         return 0;
591 }
592
593 struct attribute_group nd_region_attribute_group = {
594         .attrs = nd_region_attributes,
595         .is_visible = region_visible,
596 };
597 EXPORT_SYMBOL_GPL(nd_region_attribute_group);
598
599 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
600                 struct nd_namespace_index *nsindex)
601 {
602         struct nd_interleave_set *nd_set = nd_region->nd_set;
603
604         if (!nd_set)
605                 return 0;
606
607         if (nsindex && __le16_to_cpu(nsindex->major) == 1
608                         && __le16_to_cpu(nsindex->minor) == 1)
609                 return nd_set->cookie1;
610         return nd_set->cookie2;
611 }
612
613 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
614 {
615         struct nd_interleave_set *nd_set = nd_region->nd_set;
616
617         if (nd_set)
618                 return nd_set->altcookie;
619         return 0;
620 }
621
622 void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
623 {
624         struct nd_label_ent *label_ent, *e;
625
626         lockdep_assert_held(&nd_mapping->lock);
627         list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
628                 list_del(&label_ent->list);
629                 kfree(label_ent);
630         }
631 }
632
633 /*
634  * Upon successful probe/remove, take/release a reference on the
635  * associated interleave set (if present), and plant new btt + namespace
636  * seeds.  Also, on the removal of a BLK region, notify the provider to
637  * disable the region.
638  */
639 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
640                 struct device *dev, bool probe)
641 {
642         struct nd_region *nd_region;
643
644         if (!probe && is_nd_region(dev)) {
645                 int i;
646
647                 nd_region = to_nd_region(dev);
648                 for (i = 0; i < nd_region->ndr_mappings; i++) {
649                         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
650                         struct nvdimm_drvdata *ndd = nd_mapping->ndd;
651                         struct nvdimm *nvdimm = nd_mapping->nvdimm;
652
653                         mutex_lock(&nd_mapping->lock);
654                         nd_mapping_free_labels(nd_mapping);
655                         mutex_unlock(&nd_mapping->lock);
656
657                         put_ndd(ndd);
658                         nd_mapping->ndd = NULL;
659                         if (ndd)
660                                 atomic_dec(&nvdimm->busy);
661                 }
662         }
663         if (dev->parent && is_nd_region(dev->parent) && probe) {
664                 nd_region = to_nd_region(dev->parent);
665                 nvdimm_bus_lock(dev);
666                 if (nd_region->ns_seed == dev)
667                         nd_region_create_ns_seed(nd_region);
668                 nvdimm_bus_unlock(dev);
669         }
670         if (is_nd_btt(dev) && probe) {
671                 struct nd_btt *nd_btt = to_nd_btt(dev);
672
673                 nd_region = to_nd_region(dev->parent);
674                 nvdimm_bus_lock(dev);
675                 if (nd_region->btt_seed == dev)
676                         nd_region_create_btt_seed(nd_region);
677                 if (nd_region->ns_seed == &nd_btt->ndns->dev)
678                         nd_region_create_ns_seed(nd_region);
679                 nvdimm_bus_unlock(dev);
680         }
681         if (is_nd_pfn(dev) && probe) {
682                 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
683
684                 nd_region = to_nd_region(dev->parent);
685                 nvdimm_bus_lock(dev);
686                 if (nd_region->pfn_seed == dev)
687                         nd_region_create_pfn_seed(nd_region);
688                 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
689                         nd_region_create_ns_seed(nd_region);
690                 nvdimm_bus_unlock(dev);
691         }
692         if (is_nd_dax(dev) && probe) {
693                 struct nd_dax *nd_dax = to_nd_dax(dev);
694
695                 nd_region = to_nd_region(dev->parent);
696                 nvdimm_bus_lock(dev);
697                 if (nd_region->dax_seed == dev)
698                         nd_region_create_dax_seed(nd_region);
699                 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
700                         nd_region_create_ns_seed(nd_region);
701                 nvdimm_bus_unlock(dev);
702         }
703 }
704
705 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
706 {
707         nd_region_notify_driver_action(nvdimm_bus, dev, true);
708 }
709
710 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
711 {
712         nd_region_notify_driver_action(nvdimm_bus, dev, false);
713 }
714
715 static ssize_t mappingN(struct device *dev, char *buf, int n)
716 {
717         struct nd_region *nd_region = to_nd_region(dev);
718         struct nd_mapping *nd_mapping;
719         struct nvdimm *nvdimm;
720
721         if (n >= nd_region->ndr_mappings)
722                 return -ENXIO;
723         nd_mapping = &nd_region->mapping[n];
724         nvdimm = nd_mapping->nvdimm;
725
726         return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
727                         nd_mapping->start, nd_mapping->size);
728 }
729
730 #define REGION_MAPPING(idx) \
731 static ssize_t mapping##idx##_show(struct device *dev,          \
732                 struct device_attribute *attr, char *buf)       \
733 {                                                               \
734         return mappingN(dev, buf, idx);                         \
735 }                                                               \
736 static DEVICE_ATTR_RO(mapping##idx)
737
738 /*
739  * 32 should be enough for a while, even in the presence of socket
740  * interleave a 32-way interleave set is a degenerate case.
741  */
742 REGION_MAPPING(0);
743 REGION_MAPPING(1);
744 REGION_MAPPING(2);
745 REGION_MAPPING(3);
746 REGION_MAPPING(4);
747 REGION_MAPPING(5);
748 REGION_MAPPING(6);
749 REGION_MAPPING(7);
750 REGION_MAPPING(8);
751 REGION_MAPPING(9);
752 REGION_MAPPING(10);
753 REGION_MAPPING(11);
754 REGION_MAPPING(12);
755 REGION_MAPPING(13);
756 REGION_MAPPING(14);
757 REGION_MAPPING(15);
758 REGION_MAPPING(16);
759 REGION_MAPPING(17);
760 REGION_MAPPING(18);
761 REGION_MAPPING(19);
762 REGION_MAPPING(20);
763 REGION_MAPPING(21);
764 REGION_MAPPING(22);
765 REGION_MAPPING(23);
766 REGION_MAPPING(24);
767 REGION_MAPPING(25);
768 REGION_MAPPING(26);
769 REGION_MAPPING(27);
770 REGION_MAPPING(28);
771 REGION_MAPPING(29);
772 REGION_MAPPING(30);
773 REGION_MAPPING(31);
774
775 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
776 {
777         struct device *dev = container_of(kobj, struct device, kobj);
778         struct nd_region *nd_region = to_nd_region(dev);
779
780         if (n < nd_region->ndr_mappings)
781                 return a->mode;
782         return 0;
783 }
784
785 static struct attribute *mapping_attributes[] = {
786         &dev_attr_mapping0.attr,
787         &dev_attr_mapping1.attr,
788         &dev_attr_mapping2.attr,
789         &dev_attr_mapping3.attr,
790         &dev_attr_mapping4.attr,
791         &dev_attr_mapping5.attr,
792         &dev_attr_mapping6.attr,
793         &dev_attr_mapping7.attr,
794         &dev_attr_mapping8.attr,
795         &dev_attr_mapping9.attr,
796         &dev_attr_mapping10.attr,
797         &dev_attr_mapping11.attr,
798         &dev_attr_mapping12.attr,
799         &dev_attr_mapping13.attr,
800         &dev_attr_mapping14.attr,
801         &dev_attr_mapping15.attr,
802         &dev_attr_mapping16.attr,
803         &dev_attr_mapping17.attr,
804         &dev_attr_mapping18.attr,
805         &dev_attr_mapping19.attr,
806         &dev_attr_mapping20.attr,
807         &dev_attr_mapping21.attr,
808         &dev_attr_mapping22.attr,
809         &dev_attr_mapping23.attr,
810         &dev_attr_mapping24.attr,
811         &dev_attr_mapping25.attr,
812         &dev_attr_mapping26.attr,
813         &dev_attr_mapping27.attr,
814         &dev_attr_mapping28.attr,
815         &dev_attr_mapping29.attr,
816         &dev_attr_mapping30.attr,
817         &dev_attr_mapping31.attr,
818         NULL,
819 };
820
821 struct attribute_group nd_mapping_attribute_group = {
822         .is_visible = mapping_visible,
823         .attrs = mapping_attributes,
824 };
825 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
826
827 int nd_blk_region_init(struct nd_region *nd_region)
828 {
829         struct device *dev = &nd_region->dev;
830         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
831
832         if (!is_nd_blk(dev))
833                 return 0;
834
835         if (nd_region->ndr_mappings < 1) {
836                 dev_dbg(dev, "invalid BLK region\n");
837                 return -ENXIO;
838         }
839
840         return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
841 }
842
843 /**
844  * nd_region_acquire_lane - allocate and lock a lane
845  * @nd_region: region id and number of lanes possible
846  *
847  * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
848  * We optimize for the common case where there are 256 lanes, one
849  * per-cpu.  For larger systems we need to lock to share lanes.  For now
850  * this implementation assumes the cost of maintaining an allocator for
851  * free lanes is on the order of the lock hold time, so it implements a
852  * static lane = cpu % num_lanes mapping.
853  *
854  * In the case of a BTT instance on top of a BLK namespace a lane may be
855  * acquired recursively.  We lock on the first instance.
856  *
857  * In the case of a BTT instance on top of PMEM, we only acquire a lane
858  * for the BTT metadata updates.
859  */
860 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
861 {
862         unsigned int cpu, lane;
863
864         cpu = get_cpu();
865         if (nd_region->num_lanes < nr_cpu_ids) {
866                 struct nd_percpu_lane *ndl_lock, *ndl_count;
867
868                 lane = cpu % nd_region->num_lanes;
869                 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
870                 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
871                 if (ndl_count->count++ == 0)
872                         spin_lock(&ndl_lock->lock);
873         } else
874                 lane = cpu;
875
876         return lane;
877 }
878 EXPORT_SYMBOL(nd_region_acquire_lane);
879
880 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
881 {
882         if (nd_region->num_lanes < nr_cpu_ids) {
883                 unsigned int cpu = get_cpu();
884                 struct nd_percpu_lane *ndl_lock, *ndl_count;
885
886                 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
887                 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
888                 if (--ndl_count->count == 0)
889                         spin_unlock(&ndl_lock->lock);
890                 put_cpu();
891         }
892         put_cpu();
893 }
894 EXPORT_SYMBOL(nd_region_release_lane);
895
896 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
897                 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
898                 const char *caller)
899 {
900         struct nd_region *nd_region;
901         struct device *dev;
902         void *region_buf;
903         unsigned int i;
904         int ro = 0;
905
906         for (i = 0; i < ndr_desc->num_mappings; i++) {
907                 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
908                 struct nvdimm *nvdimm = mapping->nvdimm;
909
910                 if ((mapping->start | mapping->size) % SZ_4K) {
911                         dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
912                                         caller, dev_name(&nvdimm->dev), i);
913
914                         return NULL;
915                 }
916
917                 if (test_bit(NDD_UNARMED, &nvdimm->flags))
918                         ro = 1;
919         }
920
921         if (dev_type == &nd_blk_device_type) {
922                 struct nd_blk_region_desc *ndbr_desc;
923                 struct nd_blk_region *ndbr;
924
925                 ndbr_desc = to_blk_region_desc(ndr_desc);
926                 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
927                                 * ndr_desc->num_mappings,
928                                 GFP_KERNEL);
929                 if (ndbr) {
930                         nd_region = &ndbr->nd_region;
931                         ndbr->enable = ndbr_desc->enable;
932                         ndbr->do_io = ndbr_desc->do_io;
933                 }
934                 region_buf = ndbr;
935         } else {
936                 nd_region = kzalloc(sizeof(struct nd_region)
937                                 + sizeof(struct nd_mapping)
938                                 * ndr_desc->num_mappings,
939                                 GFP_KERNEL);
940                 region_buf = nd_region;
941         }
942
943         if (!region_buf)
944                 return NULL;
945         nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
946         if (nd_region->id < 0)
947                 goto err_id;
948
949         nd_region->lane = alloc_percpu(struct nd_percpu_lane);
950         if (!nd_region->lane)
951                 goto err_percpu;
952
953         for (i = 0; i < nr_cpu_ids; i++) {
954                 struct nd_percpu_lane *ndl;
955
956                 ndl = per_cpu_ptr(nd_region->lane, i);
957                 spin_lock_init(&ndl->lock);
958                 ndl->count = 0;
959         }
960
961         for (i = 0; i < ndr_desc->num_mappings; i++) {
962                 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
963                 struct nvdimm *nvdimm = mapping->nvdimm;
964
965                 nd_region->mapping[i].nvdimm = nvdimm;
966                 nd_region->mapping[i].start = mapping->start;
967                 nd_region->mapping[i].size = mapping->size;
968                 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
969                 mutex_init(&nd_region->mapping[i].lock);
970
971                 get_device(&nvdimm->dev);
972         }
973         nd_region->ndr_mappings = ndr_desc->num_mappings;
974         nd_region->provider_data = ndr_desc->provider_data;
975         nd_region->nd_set = ndr_desc->nd_set;
976         nd_region->num_lanes = ndr_desc->num_lanes;
977         nd_region->flags = ndr_desc->flags;
978         nd_region->ro = ro;
979         nd_region->numa_node = ndr_desc->numa_node;
980         ida_init(&nd_region->ns_ida);
981         ida_init(&nd_region->btt_ida);
982         ida_init(&nd_region->pfn_ida);
983         ida_init(&nd_region->dax_ida);
984         dev = &nd_region->dev;
985         dev_set_name(dev, "region%d", nd_region->id);
986         dev->parent = &nvdimm_bus->dev;
987         dev->type = dev_type;
988         dev->groups = ndr_desc->attr_groups;
989         nd_region->ndr_size = resource_size(ndr_desc->res);
990         nd_region->ndr_start = ndr_desc->res->start;
991         nd_device_register(dev);
992
993         return nd_region;
994
995  err_percpu:
996         ida_simple_remove(&region_ida, nd_region->id);
997  err_id:
998         kfree(region_buf);
999         return NULL;
1000 }
1001
1002 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1003                 struct nd_region_desc *ndr_desc)
1004 {
1005         ndr_desc->num_lanes = ND_MAX_LANES;
1006         return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1007                         __func__);
1008 }
1009 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1010
1011 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1012                 struct nd_region_desc *ndr_desc)
1013 {
1014         if (ndr_desc->num_mappings > 1)
1015                 return NULL;
1016         ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
1017         return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1018                         __func__);
1019 }
1020 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1021
1022 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1023                 struct nd_region_desc *ndr_desc)
1024 {
1025         ndr_desc->num_lanes = ND_MAX_LANES;
1026         return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1027                         __func__);
1028 }
1029 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
1030
1031 /**
1032  * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1033  * @nd_region: blk or interleaved pmem region
1034  */
1035 void nvdimm_flush(struct nd_region *nd_region)
1036 {
1037         struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1038         int i, idx;
1039
1040         /*
1041          * Try to encourage some diversity in flush hint addresses
1042          * across cpus assuming a limited number of flush hints.
1043          */
1044         idx = this_cpu_read(flush_idx);
1045         idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1046
1047         /*
1048          * The first wmb() is needed to 'sfence' all previous writes
1049          * such that they are architecturally visible for the platform
1050          * buffer flush.  Note that we've already arranged for pmem
1051          * writes to avoid the cache via memcpy_flushcache().  The final
1052          * wmb() ensures ordering for the NVDIMM flush write.
1053          */
1054         wmb();
1055         for (i = 0; i < nd_region->ndr_mappings; i++)
1056                 if (ndrd_get_flush_wpq(ndrd, i, 0))
1057                         writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1058         wmb();
1059 }
1060 EXPORT_SYMBOL_GPL(nvdimm_flush);
1061
1062 /**
1063  * nvdimm_has_flush - determine write flushing requirements
1064  * @nd_region: blk or interleaved pmem region
1065  *
1066  * Returns 1 if writes require flushing
1067  * Returns 0 if writes do not require flushing
1068  * Returns -ENXIO if flushing capability can not be determined
1069  */
1070 int nvdimm_has_flush(struct nd_region *nd_region)
1071 {
1072         int i;
1073
1074         /* no nvdimm or pmem api == flushing capability unknown */
1075         if (nd_region->ndr_mappings == 0
1076                         || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1077                 return -ENXIO;
1078
1079         for (i = 0; i < nd_region->ndr_mappings; i++) {
1080                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1081                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1082
1083                 /* flush hints present / available */
1084                 if (nvdimm->num_flush)
1085                         return 1;
1086         }
1087
1088         /*
1089          * The platform defines dimm devices without hints, assume
1090          * platform persistence mechanism like ADR
1091          */
1092         return 0;
1093 }
1094 EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1095
1096 int nvdimm_has_cache(struct nd_region *nd_region)
1097 {
1098         return is_nd_pmem(&nd_region->dev);
1099 }
1100 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1101
1102 void __exit nd_region_devs_exit(void)
1103 {
1104         ida_destroy(&region_ida);
1105 }