]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/lightnvm/gennvm.c
Merge remote-tracking branch 'asoc/fix/dapm' into asoc-linus
[karo-tx-linux.git] / drivers / lightnvm / gennvm.c
1 /*
2  * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License version
6  * 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; see the file COPYING.  If not, write to
15  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16  * USA.
17  *
18  * Implementation of a generic nvm manager for Open-Channel SSDs.
19  */
20
21 #include "gennvm.h"
22
23 static void gennvm_blocks_free(struct nvm_dev *dev)
24 {
25         struct gen_nvm *gn = dev->mp;
26         struct gen_lun *lun;
27         int i;
28
29         gennvm_for_each_lun(gn, lun, i) {
30                 if (!lun->vlun.blocks)
31                         break;
32                 vfree(lun->vlun.blocks);
33         }
34 }
35
36 static void gennvm_luns_free(struct nvm_dev *dev)
37 {
38         struct gen_nvm *gn = dev->mp;
39
40         kfree(gn->luns);
41 }
42
43 static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
44 {
45         struct gen_lun *lun;
46         int i;
47
48         gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
49         if (!gn->luns)
50                 return -ENOMEM;
51
52         gennvm_for_each_lun(gn, lun, i) {
53                 spin_lock_init(&lun->vlun.lock);
54                 INIT_LIST_HEAD(&lun->free_list);
55                 INIT_LIST_HEAD(&lun->used_list);
56                 INIT_LIST_HEAD(&lun->bb_list);
57
58                 lun->reserved_blocks = 2; /* for GC only */
59                 lun->vlun.id = i;
60                 lun->vlun.lun_id = i % dev->luns_per_chnl;
61                 lun->vlun.chnl_id = i / dev->luns_per_chnl;
62                 lun->vlun.nr_free_blocks = dev->blks_per_lun;
63         }
64         return 0;
65 }
66
67 static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
68                                                                 void *private)
69 {
70         struct gen_nvm *gn = private;
71         struct gen_lun *lun = &gn->luns[lun_id];
72         struct nvm_block *blk;
73         int i;
74
75         if (unlikely(bitmap_empty(bb_bitmap, nr_blocks)))
76                 return 0;
77
78         i = -1;
79         while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) {
80                 blk = &lun->vlun.blocks[i];
81                 if (!blk) {
82                         pr_err("gennvm: BB data is out of bounds.\n");
83                         return -EINVAL;
84                 }
85
86                 list_move_tail(&blk->list, &lun->bb_list);
87         }
88
89         return 0;
90 }
91
92 static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
93 {
94         struct nvm_dev *dev = private;
95         struct gen_nvm *gn = dev->mp;
96         sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
97         u64 elba = slba + nlb;
98         struct gen_lun *lun;
99         struct nvm_block *blk;
100         u64 i;
101         int lun_id;
102
103         if (unlikely(elba > dev->total_pages)) {
104                 pr_err("gennvm: L2P data from device is out of bounds!\n");
105                 return -EINVAL;
106         }
107
108         for (i = 0; i < nlb; i++) {
109                 u64 pba = le64_to_cpu(entries[i]);
110
111                 if (unlikely(pba >= max_pages && pba != U64_MAX)) {
112                         pr_err("gennvm: L2P data entry is out of bounds!\n");
113                         return -EINVAL;
114                 }
115
116                 /* Address zero is a special one. The first page on a disk is
117                  * protected. It often holds internal device boot
118                  * information.
119                  */
120                 if (!pba)
121                         continue;
122
123                 /* resolve block from physical address */
124                 lun_id = div_u64(pba, dev->sec_per_lun);
125                 lun = &gn->luns[lun_id];
126
127                 /* Calculate block offset into lun */
128                 pba = pba - (dev->sec_per_lun * lun_id);
129                 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
130
131                 if (!blk->type) {
132                         /* at this point, we don't know anything about the
133                          * block. It's up to the FTL on top to re-etablish the
134                          * block state
135                          */
136                         list_move_tail(&blk->list, &lun->used_list);
137                         blk->type = 1;
138                         lun->vlun.nr_free_blocks--;
139                 }
140         }
141
142         return 0;
143 }
144
145 static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
146 {
147         struct gen_lun *lun;
148         struct nvm_block *block;
149         sector_t lun_iter, blk_iter, cur_block_id = 0;
150         int ret;
151
152         gennvm_for_each_lun(gn, lun, lun_iter) {
153                 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
154                                                         dev->blks_per_lun);
155                 if (!lun->vlun.blocks)
156                         return -ENOMEM;
157
158                 for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
159                         block = &lun->vlun.blocks[blk_iter];
160
161                         INIT_LIST_HEAD(&block->list);
162
163                         block->lun = &lun->vlun;
164                         block->id = cur_block_id++;
165
166                         /* First block is reserved for device */
167                         if (unlikely(lun_iter == 0 && blk_iter == 0))
168                                 continue;
169
170                         list_add_tail(&block->list, &lun->free_list);
171                 }
172
173                 if (dev->ops->get_bb_tbl) {
174                         ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id,
175                                         dev->blks_per_lun, gennvm_block_bb, gn);
176                         if (ret)
177                                 pr_err("gennvm: could not read BB table\n");
178                 }
179         }
180
181         if (dev->ops->get_l2p_tbl) {
182                 ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
183                                                         gennvm_block_map, dev);
184                 if (ret) {
185                         pr_err("gennvm: could not read L2P table.\n");
186                         pr_warn("gennvm: default block initialization");
187                 }
188         }
189
190         return 0;
191 }
192
193 static int gennvm_register(struct nvm_dev *dev)
194 {
195         struct gen_nvm *gn;
196         int ret;
197
198         gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
199         if (!gn)
200                 return -ENOMEM;
201
202         gn->nr_luns = dev->nr_luns;
203         dev->mp = gn;
204
205         ret = gennvm_luns_init(dev, gn);
206         if (ret) {
207                 pr_err("gennvm: could not initialize luns\n");
208                 goto err;
209         }
210
211         ret = gennvm_blocks_init(dev, gn);
212         if (ret) {
213                 pr_err("gennvm: could not initialize blocks\n");
214                 goto err;
215         }
216
217         return 1;
218 err:
219         kfree(gn);
220         return ret;
221 }
222
223 static void gennvm_unregister(struct nvm_dev *dev)
224 {
225         gennvm_blocks_free(dev);
226         gennvm_luns_free(dev);
227         kfree(dev->mp);
228         dev->mp = NULL;
229 }
230
231 static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
232                                 struct nvm_lun *vlun, unsigned long flags)
233 {
234         struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
235         struct nvm_block *blk = NULL;
236         int is_gc = flags & NVM_IOTYPE_GC;
237
238         spin_lock(&vlun->lock);
239
240         if (list_empty(&lun->free_list)) {
241                 pr_err_ratelimited("gennvm: lun %u have no free pages available",
242                                                                 lun->vlun.id);
243                 spin_unlock(&vlun->lock);
244                 goto out;
245         }
246
247         while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) {
248                 spin_unlock(&vlun->lock);
249                 goto out;
250         }
251
252         blk = list_first_entry(&lun->free_list, struct nvm_block, list);
253         list_move_tail(&blk->list, &lun->used_list);
254         blk->type = 1;
255
256         lun->vlun.nr_free_blocks--;
257
258         spin_unlock(&vlun->lock);
259 out:
260         return blk;
261 }
262
263 static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
264 {
265         struct nvm_lun *vlun = blk->lun;
266         struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
267
268         spin_lock(&vlun->lock);
269
270         switch (blk->type) {
271         case 1:
272                 list_move_tail(&blk->list, &lun->free_list);
273                 lun->vlun.nr_free_blocks++;
274                 blk->type = 0;
275                 break;
276         case 2:
277                 list_move_tail(&blk->list, &lun->bb_list);
278                 break;
279         default:
280                 WARN_ON_ONCE(1);
281                 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
282                                                         blk->id, blk->type);
283                 list_move_tail(&blk->list, &lun->bb_list);
284         }
285
286         spin_unlock(&vlun->lock);
287 }
288
289 static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
290 {
291         int i;
292
293         if (rqd->nr_pages > 1) {
294                 for (i = 0; i < rqd->nr_pages; i++)
295                         rqd->ppa_list[i] = addr_to_generic_mode(dev,
296                                                         rqd->ppa_list[i]);
297         } else {
298                 rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr);
299         }
300 }
301
302 static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
303 {
304         int i;
305
306         if (rqd->nr_pages > 1) {
307                 for (i = 0; i < rqd->nr_pages; i++)
308                         rqd->ppa_list[i] = generic_to_addr_mode(dev,
309                                                         rqd->ppa_list[i]);
310         } else {
311                 rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr);
312         }
313 }
314
315 static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
316 {
317         if (!dev->ops->submit_io)
318                 return 0;
319
320         /* Convert address space */
321         gennvm_generic_to_addr_mode(dev, rqd);
322
323         rqd->dev = dev;
324         return dev->ops->submit_io(dev->q, rqd);
325 }
326
327 static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
328                                                                 int type)
329 {
330         struct gen_nvm *gn = dev->mp;
331         struct gen_lun *lun;
332         struct nvm_block *blk;
333
334         if (unlikely(ppa->g.ch > dev->nr_chnls ||
335                                         ppa->g.lun > dev->luns_per_chnl ||
336                                         ppa->g.blk > dev->blks_per_lun)) {
337                 WARN_ON_ONCE(1);
338                 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
339                                 ppa->g.ch, dev->nr_chnls,
340                                 ppa->g.lun, dev->luns_per_chnl,
341                                 ppa->g.blk, dev->blks_per_lun);
342                 return;
343         }
344
345         lun = &gn->luns[ppa->g.lun * ppa->g.ch];
346         blk = &lun->vlun.blocks[ppa->g.blk];
347
348         /* will be moved to bb list on put_blk from target */
349         blk->type = type;
350 }
351
352 /* mark block bad. It is expected the target recover from the error. */
353 static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
354 {
355         int i;
356
357         if (!dev->ops->set_bb)
358                 return;
359
360         if (dev->ops->set_bb(dev->q, rqd, 1))
361                 return;
362
363         gennvm_addr_to_generic_mode(dev, rqd);
364
365         /* look up blocks and mark them as bad */
366         if (rqd->nr_pages > 1)
367                 for (i = 0; i < rqd->nr_pages; i++)
368                         gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
369         else
370                 gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
371 }
372
373 static int gennvm_end_io(struct nvm_rq *rqd, int error)
374 {
375         struct nvm_tgt_instance *ins = rqd->ins;
376         int ret = 0;
377
378         switch (error) {
379         case NVM_RSP_SUCCESS:
380                 break;
381         case NVM_RSP_ERR_EMPTYPAGE:
382                 break;
383         case NVM_RSP_ERR_FAILWRITE:
384                 gennvm_mark_blk_bad(rqd->dev, rqd);
385         default:
386                 ret++;
387         }
388
389         ret += ins->tt->end_io(rqd, error);
390
391         return ret;
392 }
393
394 static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
395                                                         unsigned long flags)
396 {
397         int plane_cnt = 0, pl_idx, ret;
398         struct ppa_addr addr;
399         struct nvm_rq rqd;
400
401         if (!dev->ops->erase_block)
402                 return 0;
403
404         addr = block_to_ppa(dev, blk);
405
406         if (dev->plane_mode == NVM_PLANE_SINGLE) {
407                 rqd.nr_pages = 1;
408                 rqd.ppa_addr = addr;
409         } else {
410                 plane_cnt = (1 << dev->plane_mode);
411                 rqd.nr_pages = plane_cnt;
412
413                 rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
414                                                         &rqd.dma_ppa_list);
415                 if (!rqd.ppa_list) {
416                         pr_err("gennvm: failed to allocate dma memory\n");
417                         return -ENOMEM;
418                 }
419
420                 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
421                         addr.g.pl = pl_idx;
422                         rqd.ppa_list[pl_idx] = addr;
423                 }
424         }
425
426         gennvm_generic_to_addr_mode(dev, &rqd);
427
428         ret = dev->ops->erase_block(dev->q, &rqd);
429
430         if (plane_cnt)
431                 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
432
433         return ret;
434 }
435
436 static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
437 {
438         struct gen_nvm *gn = dev->mp;
439
440         return &gn->luns[lunid].vlun;
441 }
442
443 static void gennvm_free_blocks_print(struct nvm_dev *dev)
444 {
445         struct gen_nvm *gn = dev->mp;
446         struct gen_lun *lun;
447         unsigned int i;
448
449         gennvm_for_each_lun(gn, lun, i)
450                 pr_info("%s: lun%8u\t%u\n",
451                                         dev->name, i, lun->vlun.nr_free_blocks);
452 }
453
454 static struct nvmm_type gennvm = {
455         .name           = "gennvm",
456         .version        = {0, 1, 0},
457
458         .register_mgr   = gennvm_register,
459         .unregister_mgr = gennvm_unregister,
460
461         .get_blk        = gennvm_get_blk,
462         .put_blk        = gennvm_put_blk,
463
464         .submit_io      = gennvm_submit_io,
465         .end_io         = gennvm_end_io,
466         .erase_blk      = gennvm_erase_blk,
467
468         .get_lun        = gennvm_get_lun,
469         .free_blocks_print = gennvm_free_blocks_print,
470 };
471
472 static int __init gennvm_module_init(void)
473 {
474         return nvm_register_mgr(&gennvm);
475 }
476
477 static void gennvm_module_exit(void)
478 {
479         nvm_unregister_mgr(&gennvm);
480 }
481
482 module_init(gennvm_module_init);
483 module_exit(gennvm_module_exit);
484 MODULE_LICENSE("GPL v2");
485 MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");