]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/lightnvm/pblk-init.c
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[karo-tx-linux.git] / drivers / lightnvm / pblk-init.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * Implementation of a physical block-device target for Open-channel SSDs.
17  *
18  * pblk-init.c - pblk's initialization.
19  */
20
21 #include "pblk.h"
22
23 static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_r_rq_cache,
24                                         *pblk_w_rq_cache, *pblk_line_meta_cache;
25 static DECLARE_RWSEM(pblk_lock);
26
27 static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
28                           struct bio *bio)
29 {
30         int ret;
31
32         /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
33          * constraint. Writes can be of arbitrary size.
34          */
35         if (bio_data_dir(bio) == READ) {
36                 blk_queue_split(q, &bio, q->bio_split);
37                 ret = pblk_submit_read(pblk, bio);
38                 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
39                         bio_put(bio);
40
41                 return ret;
42         }
43
44         /* Prevent deadlock in the case of a modest LUN configuration and large
45          * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
46          * available for user I/O.
47          */
48         if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
49                 blk_queue_split(q, &bio, q->bio_split);
50
51         return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
52 }
53
54 static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
55 {
56         struct pblk *pblk = q->queuedata;
57
58         if (bio_op(bio) == REQ_OP_DISCARD) {
59                 pblk_discard(pblk, bio);
60                 if (!(bio->bi_opf & REQ_PREFLUSH)) {
61                         bio_endio(bio);
62                         return BLK_QC_T_NONE;
63                 }
64         }
65
66         switch (pblk_rw_io(q, pblk, bio)) {
67         case NVM_IO_ERR:
68                 bio_io_error(bio);
69                 break;
70         case NVM_IO_DONE:
71                 bio_endio(bio);
72                 break;
73         }
74
75         return BLK_QC_T_NONE;
76 }
77
78 static void pblk_l2p_free(struct pblk *pblk)
79 {
80         vfree(pblk->trans_map);
81 }
82
83 static int pblk_l2p_init(struct pblk *pblk)
84 {
85         sector_t i;
86         struct ppa_addr ppa;
87         int entry_size = 8;
88
89         if (pblk->ppaf_bitsize < 32)
90                 entry_size = 4;
91
92         pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
93         if (!pblk->trans_map)
94                 return -ENOMEM;
95
96         pblk_ppa_set_empty(&ppa);
97
98         for (i = 0; i < pblk->rl.nr_secs; i++)
99                 pblk_trans_map_set(pblk, i, ppa);
100
101         return 0;
102 }
103
104 static void pblk_rwb_free(struct pblk *pblk)
105 {
106         if (pblk_rb_tear_down_check(&pblk->rwb))
107                 pr_err("pblk: write buffer error on tear down\n");
108
109         pblk_rb_data_free(&pblk->rwb);
110         vfree(pblk_rb_entries_ref(&pblk->rwb));
111 }
112
113 static int pblk_rwb_init(struct pblk *pblk)
114 {
115         struct nvm_tgt_dev *dev = pblk->dev;
116         struct nvm_geo *geo = &dev->geo;
117         struct pblk_rb_entry *entries;
118         unsigned long nr_entries;
119         unsigned int power_size, power_seg_sz;
120
121         nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
122
123         entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
124         if (!entries)
125                 return -ENOMEM;
126
127         power_size = get_count_order(nr_entries);
128         power_seg_sz = get_count_order(geo->sec_size);
129
130         return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
131 }
132
133 /* Minimum pages needed within a lun */
134 #define PAGE_POOL_SIZE 16
135 #define ADDR_POOL_SIZE 64
136
137 static int pblk_set_ppaf(struct pblk *pblk)
138 {
139         struct nvm_tgt_dev *dev = pblk->dev;
140         struct nvm_geo *geo = &dev->geo;
141         struct nvm_addr_format ppaf = geo->ppaf;
142         int power_len;
143
144         /* Re-calculate channel and lun format to adapt to configuration */
145         power_len = get_count_order(geo->nr_chnls);
146         if (1 << power_len != geo->nr_chnls) {
147                 pr_err("pblk: supports only power-of-two channel config.\n");
148                 return -EINVAL;
149         }
150         ppaf.ch_len = power_len;
151
152         power_len = get_count_order(geo->luns_per_chnl);
153         if (1 << power_len != geo->luns_per_chnl) {
154                 pr_err("pblk: supports only power-of-two LUN config.\n");
155                 return -EINVAL;
156         }
157         ppaf.lun_len = power_len;
158
159         pblk->ppaf.sec_offset = 0;
160         pblk->ppaf.pln_offset = ppaf.sect_len;
161         pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
162         pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
163         pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
164         pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
165         pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
166         pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
167                                                         pblk->ppaf.pln_offset;
168         pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
169                                                         pblk->ppaf.ch_offset;
170         pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
171                                                         pblk->ppaf.lun_offset;
172         pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
173                                                         pblk->ppaf.pg_offset;
174         pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
175                                                         pblk->ppaf.blk_offset;
176
177         pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
178
179         return 0;
180 }
181
182 static int pblk_init_global_caches(struct pblk *pblk)
183 {
184         char cache_name[PBLK_CACHE_NAME_LEN];
185
186         down_write(&pblk_lock);
187         pblk_blk_ws_cache = kmem_cache_create("pblk_blk_ws",
188                                 sizeof(struct pblk_line_ws), 0, 0, NULL);
189         if (!pblk_blk_ws_cache) {
190                 up_write(&pblk_lock);
191                 return -ENOMEM;
192         }
193
194         pblk_rec_cache = kmem_cache_create("pblk_rec",
195                                 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
196         if (!pblk_rec_cache) {
197                 kmem_cache_destroy(pblk_blk_ws_cache);
198                 up_write(&pblk_lock);
199                 return -ENOMEM;
200         }
201
202         pblk_r_rq_cache = kmem_cache_create("pblk_r_rq", pblk_r_rq_size,
203                                 0, 0, NULL);
204         if (!pblk_r_rq_cache) {
205                 kmem_cache_destroy(pblk_blk_ws_cache);
206                 kmem_cache_destroy(pblk_rec_cache);
207                 up_write(&pblk_lock);
208                 return -ENOMEM;
209         }
210
211         pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
212                                 0, 0, NULL);
213         if (!pblk_w_rq_cache) {
214                 kmem_cache_destroy(pblk_blk_ws_cache);
215                 kmem_cache_destroy(pblk_rec_cache);
216                 kmem_cache_destroy(pblk_r_rq_cache);
217                 up_write(&pblk_lock);
218                 return -ENOMEM;
219         }
220
221         snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
222                                                         pblk->disk->disk_name);
223         pblk_line_meta_cache = kmem_cache_create(cache_name,
224                                 pblk->lm.sec_bitmap_len, 0, 0, NULL);
225         if (!pblk_line_meta_cache) {
226                 kmem_cache_destroy(pblk_blk_ws_cache);
227                 kmem_cache_destroy(pblk_rec_cache);
228                 kmem_cache_destroy(pblk_r_rq_cache);
229                 kmem_cache_destroy(pblk_w_rq_cache);
230                 up_write(&pblk_lock);
231                 return -ENOMEM;
232         }
233         up_write(&pblk_lock);
234
235         return 0;
236 }
237
238 static int pblk_core_init(struct pblk *pblk)
239 {
240         struct nvm_tgt_dev *dev = pblk->dev;
241         struct nvm_geo *geo = &dev->geo;
242         int max_write_ppas;
243         int mod;
244
245         pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
246         max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
247         pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
248                                 max_write_ppas : nvm_max_phys_sects(dev);
249         pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
250                                                 geo->nr_planes * geo->nr_luns;
251
252         if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
253                 pr_err("pblk: cannot support device max_phys_sect\n");
254                 return -EINVAL;
255         }
256
257         div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
258         if (mod) {
259                 pr_err("pblk: bad configuration of sectors/pages\n");
260                 return -EINVAL;
261         }
262
263         if (pblk_init_global_caches(pblk))
264                 return -ENOMEM;
265
266         pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
267         if (!pblk->page_pool)
268                 return -ENOMEM;
269
270         pblk->line_ws_pool = mempool_create_slab_pool(geo->nr_luns,
271                                                         pblk_blk_ws_cache);
272         if (!pblk->line_ws_pool)
273                 goto free_page_pool;
274
275         pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
276         if (!pblk->rec_pool)
277                 goto free_blk_ws_pool;
278
279         pblk->r_rq_pool = mempool_create_slab_pool(64, pblk_r_rq_cache);
280         if (!pblk->r_rq_pool)
281                 goto free_rec_pool;
282
283         pblk->w_rq_pool = mempool_create_slab_pool(64, pblk_w_rq_cache);
284         if (!pblk->w_rq_pool)
285                 goto free_r_rq_pool;
286
287         pblk->line_meta_pool =
288                         mempool_create_slab_pool(16, pblk_line_meta_cache);
289         if (!pblk->line_meta_pool)
290                 goto free_w_rq_pool;
291
292         pblk->kw_wq = alloc_workqueue("pblk-aux-wq",
293                                         WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
294         if (!pblk->kw_wq)
295                 goto free_line_meta_pool;
296
297         if (pblk_set_ppaf(pblk))
298                 goto free_kw_wq;
299
300         if (pblk_rwb_init(pblk))
301                 goto free_kw_wq;
302
303         INIT_LIST_HEAD(&pblk->compl_list);
304         return 0;
305
306 free_kw_wq:
307         destroy_workqueue(pblk->kw_wq);
308 free_line_meta_pool:
309         mempool_destroy(pblk->line_meta_pool);
310 free_w_rq_pool:
311         mempool_destroy(pblk->w_rq_pool);
312 free_r_rq_pool:
313         mempool_destroy(pblk->r_rq_pool);
314 free_rec_pool:
315         mempool_destroy(pblk->rec_pool);
316 free_blk_ws_pool:
317         mempool_destroy(pblk->line_ws_pool);
318 free_page_pool:
319         mempool_destroy(pblk->page_pool);
320         return -ENOMEM;
321 }
322
323 static void pblk_core_free(struct pblk *pblk)
324 {
325         if (pblk->kw_wq)
326                 destroy_workqueue(pblk->kw_wq);
327
328         mempool_destroy(pblk->page_pool);
329         mempool_destroy(pblk->line_ws_pool);
330         mempool_destroy(pblk->rec_pool);
331         mempool_destroy(pblk->r_rq_pool);
332         mempool_destroy(pblk->w_rq_pool);
333         mempool_destroy(pblk->line_meta_pool);
334
335         kmem_cache_destroy(pblk_blk_ws_cache);
336         kmem_cache_destroy(pblk_rec_cache);
337         kmem_cache_destroy(pblk_r_rq_cache);
338         kmem_cache_destroy(pblk_w_rq_cache);
339         kmem_cache_destroy(pblk_line_meta_cache);
340 }
341
342 static void pblk_luns_free(struct pblk *pblk)
343 {
344         kfree(pblk->luns);
345 }
346
347 static void pblk_lines_free(struct pblk *pblk)
348 {
349         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
350         struct pblk_line *line;
351         int i;
352
353         spin_lock(&l_mg->free_lock);
354         for (i = 0; i < l_mg->nr_lines; i++) {
355                 line = &pblk->lines[i];
356
357                 pblk_line_free(pblk, line);
358                 kfree(line->blk_bitmap);
359                 kfree(line->erase_bitmap);
360         }
361         spin_unlock(&l_mg->free_lock);
362 }
363
364 static void pblk_line_meta_free(struct pblk *pblk)
365 {
366         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
367         int i;
368
369         kfree(l_mg->bb_template);
370         kfree(l_mg->bb_aux);
371
372         for (i = 0; i < PBLK_DATA_LINES; i++) {
373                 pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
374                 pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
375         }
376
377         kfree(pblk->lines);
378 }
379
380 static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
381 {
382         struct nvm_geo *geo = &dev->geo;
383         struct ppa_addr ppa;
384         u8 *blks;
385         int nr_blks, ret;
386
387         nr_blks = geo->blks_per_lun * geo->plane_mode;
388         blks = kmalloc(nr_blks, GFP_KERNEL);
389         if (!blks)
390                 return -ENOMEM;
391
392         ppa.ppa = 0;
393         ppa.g.ch = rlun->bppa.g.ch;
394         ppa.g.lun = rlun->bppa.g.lun;
395
396         ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
397         if (ret)
398                 goto out;
399
400         nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
401         if (nr_blks < 0) {
402                 ret = nr_blks;
403                 goto out;
404         }
405
406         rlun->bb_list = blks;
407
408         return 0;
409 out:
410         kfree(blks);
411         return ret;
412 }
413
414 static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line)
415 {
416         struct pblk_line_meta *lm = &pblk->lm;
417         struct pblk_lun *rlun;
418         int bb_cnt = 0;
419         int i;
420
421         line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
422         if (!line->blk_bitmap)
423                 return -ENOMEM;
424
425         line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
426         if (!line->erase_bitmap) {
427                 kfree(line->blk_bitmap);
428                 return -ENOMEM;
429         }
430
431         for (i = 0; i < lm->blk_per_line; i++) {
432                 rlun = &pblk->luns[i];
433                 if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
434                         continue;
435
436                 set_bit(i, line->blk_bitmap);
437                 bb_cnt++;
438         }
439
440         return bb_cnt;
441 }
442
443 static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
444 {
445         struct nvm_tgt_dev *dev = pblk->dev;
446         struct nvm_geo *geo = &dev->geo;
447         struct pblk_lun *rlun;
448         int i, ret;
449
450         /* TODO: Implement unbalanced LUN support */
451         if (geo->luns_per_chnl < 0) {
452                 pr_err("pblk: unbalanced LUN config.\n");
453                 return -EINVAL;
454         }
455
456         pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
457         if (!pblk->luns)
458                 return -ENOMEM;
459
460         for (i = 0; i < geo->nr_luns; i++) {
461                 /* Stripe across channels */
462                 int ch = i % geo->nr_chnls;
463                 int lun_raw = i / geo->nr_chnls;
464                 int lunid = lun_raw + ch * geo->luns_per_chnl;
465
466                 rlun = &pblk->luns[i];
467                 rlun->bppa = luns[lunid];
468
469                 sema_init(&rlun->wr_sem, 1);
470
471                 ret = pblk_bb_discovery(dev, rlun);
472                 if (ret) {
473                         while (--i >= 0)
474                                 kfree(pblk->luns[i].bb_list);
475                         return ret;
476                 }
477         }
478
479         return 0;
480 }
481
482 static int pblk_lines_configure(struct pblk *pblk, int flags)
483 {
484         struct pblk_line *line = NULL;
485         int ret = 0;
486
487         if (!(flags & NVM_TARGET_FACTORY)) {
488                 line = pblk_recov_l2p(pblk);
489                 if (IS_ERR(line)) {
490                         pr_err("pblk: could not recover l2p table\n");
491                         ret = -EFAULT;
492                 }
493         }
494
495         if (!line) {
496                 /* Configure next line for user data */
497                 line = pblk_line_get_first_data(pblk);
498                 if (!line) {
499                         pr_err("pblk: line list corrupted\n");
500                         ret = -EFAULT;
501                 }
502         }
503
504         return ret;
505 }
506
507 /* See comment over struct line_emeta definition */
508 static unsigned int calc_emeta_len(struct pblk *pblk, struct pblk_line_meta *lm)
509 {
510         return (sizeof(struct line_emeta) +
511                         ((lm->sec_per_line - lm->emeta_sec) * sizeof(u64)) +
512                         (pblk->l_mg.nr_lines * sizeof(u32)) +
513                         lm->blk_bitmap_len);
514 }
515
516 static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
517 {
518         struct nvm_tgt_dev *dev = pblk->dev;
519         struct nvm_geo *geo = &dev->geo;
520         sector_t provisioned;
521
522         pblk->over_pct = 20;
523
524         provisioned = nr_free_blks;
525         provisioned *= (100 - pblk->over_pct);
526         sector_div(provisioned, 100);
527
528         /* Internally pblk manages all free blocks, but all calculations based
529          * on user capacity consider only provisioned blocks
530          */
531         pblk->rl.total_blocks = nr_free_blks;
532         pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
533         pblk->capacity = provisioned * geo->sec_per_blk;
534         atomic_set(&pblk->rl.free_blocks, nr_free_blks);
535 }
536
537 static int pblk_lines_init(struct pblk *pblk)
538 {
539         struct nvm_tgt_dev *dev = pblk->dev;
540         struct nvm_geo *geo = &dev->geo;
541         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
542         struct pblk_line_meta *lm = &pblk->lm;
543         struct pblk_line *line;
544         unsigned int smeta_len, emeta_len;
545         long nr_bad_blks, nr_meta_blks, nr_free_blks;
546         int bb_distance;
547         int i;
548         int ret;
549
550         lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
551         lm->blk_per_line = geo->nr_luns;
552         lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
553         lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
554         lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
555         lm->high_thrs = lm->sec_per_line / 2;
556         lm->mid_thrs = lm->sec_per_line / 4;
557
558         /* Calculate necessary pages for smeta. See comment over struct
559          * line_smeta definition
560          */
561         lm->smeta_len = sizeof(struct line_smeta) +
562                                 PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
563
564         i = 1;
565 add_smeta_page:
566         lm->smeta_sec = i * geo->sec_per_pl;
567         lm->smeta_len = lm->smeta_sec * geo->sec_size;
568
569         smeta_len = sizeof(struct line_smeta) +
570                                 PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
571         if (smeta_len > lm->smeta_len) {
572                 i++;
573                 goto add_smeta_page;
574         }
575
576         /* Calculate necessary pages for emeta. See comment over struct
577          * line_emeta definition
578          */
579         i = 1;
580 add_emeta_page:
581         lm->emeta_sec = i * geo->sec_per_pl;
582         lm->emeta_len = lm->emeta_sec * geo->sec_size;
583
584         emeta_len = calc_emeta_len(pblk, lm);
585         if (emeta_len > lm->emeta_len) {
586                 i++;
587                 goto add_emeta_page;
588         }
589         lm->emeta_bb = geo->nr_luns - i;
590
591         nr_meta_blks = (lm->smeta_sec + lm->emeta_sec +
592                                 (geo->sec_per_blk / 2)) / geo->sec_per_blk;
593         lm->min_blk_line = nr_meta_blks + 1;
594
595         l_mg->nr_lines = geo->blks_per_lun;
596         l_mg->log_line = l_mg->data_line = NULL;
597         l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
598         l_mg->nr_free_lines = 0;
599         bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
600
601         /* smeta is always small enough to fit on a kmalloc memory allocation,
602          * emeta depends on the number of LUNs allocated to the pblk instance
603          */
604         l_mg->smeta_alloc_type = PBLK_KMALLOC_META;
605         for (i = 0; i < PBLK_DATA_LINES; i++) {
606                 l_mg->sline_meta[i].meta = kmalloc(lm->smeta_len, GFP_KERNEL);
607                 if (!l_mg->sline_meta[i].meta)
608                         while (--i >= 0) {
609                                 kfree(l_mg->sline_meta[i].meta);
610                                 ret = -ENOMEM;
611                                 goto fail;
612                         }
613         }
614
615         if (lm->emeta_len > KMALLOC_MAX_CACHE_SIZE) {
616                 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
617
618                 for (i = 0; i < PBLK_DATA_LINES; i++) {
619                         l_mg->eline_meta[i].meta = vmalloc(lm->emeta_len);
620                         if (!l_mg->eline_meta[i].meta)
621                                 while (--i >= 0) {
622                                         vfree(l_mg->eline_meta[i].meta);
623                                         ret = -ENOMEM;
624                                         goto fail;
625                                 }
626                 }
627         } else {
628                 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
629
630                 for (i = 0; i < PBLK_DATA_LINES; i++) {
631                         l_mg->eline_meta[i].meta =
632                                         kmalloc(lm->emeta_len, GFP_KERNEL);
633                         if (!l_mg->eline_meta[i].meta)
634                                 while (--i >= 0) {
635                                         kfree(l_mg->eline_meta[i].meta);
636                                         ret = -ENOMEM;
637                                         goto fail;
638                                 }
639                 }
640         }
641
642         l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
643         if (!l_mg->bb_template) {
644                 ret = -ENOMEM;
645                 goto fail_free_meta;
646         }
647
648         l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
649         if (!l_mg->bb_aux) {
650                 ret = -ENOMEM;
651                 goto fail_free_bb_template;
652         }
653
654         bb_distance = (geo->nr_luns) * geo->sec_per_pl;
655         for (i = 0; i < lm->sec_per_line; i += bb_distance)
656                 bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
657
658         INIT_LIST_HEAD(&l_mg->free_list);
659         INIT_LIST_HEAD(&l_mg->corrupt_list);
660         INIT_LIST_HEAD(&l_mg->bad_list);
661         INIT_LIST_HEAD(&l_mg->gc_full_list);
662         INIT_LIST_HEAD(&l_mg->gc_high_list);
663         INIT_LIST_HEAD(&l_mg->gc_mid_list);
664         INIT_LIST_HEAD(&l_mg->gc_low_list);
665         INIT_LIST_HEAD(&l_mg->gc_empty_list);
666
667         l_mg->gc_lists[0] = &l_mg->gc_high_list;
668         l_mg->gc_lists[1] = &l_mg->gc_mid_list;
669         l_mg->gc_lists[2] = &l_mg->gc_low_list;
670
671         spin_lock_init(&l_mg->free_lock);
672         spin_lock_init(&l_mg->gc_lock);
673
674         pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
675                                                                 GFP_KERNEL);
676         if (!pblk->lines) {
677                 ret = -ENOMEM;
678                 goto fail_free_bb_aux;
679         }
680
681         nr_free_blks = 0;
682         for (i = 0; i < l_mg->nr_lines; i++) {
683                 int blk_in_line;
684
685                 line = &pblk->lines[i];
686
687                 line->pblk = pblk;
688                 line->id = i;
689                 line->type = PBLK_LINETYPE_FREE;
690                 line->state = PBLK_LINESTATE_FREE;
691                 line->gc_group = PBLK_LINEGC_NONE;
692                 spin_lock_init(&line->lock);
693
694                 nr_bad_blks = pblk_bb_line(pblk, line);
695                 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
696                         ret = -EINVAL;
697                         goto fail_free_lines;
698                 }
699
700                 blk_in_line = lm->blk_per_line - nr_bad_blks;
701                 if (blk_in_line < lm->min_blk_line) {
702                         line->state = PBLK_LINESTATE_BAD;
703                         list_add_tail(&line->list, &l_mg->bad_list);
704                         continue;
705                 }
706
707                 nr_free_blks += blk_in_line;
708                 atomic_set(&line->blk_in_line, blk_in_line);
709
710                 l_mg->nr_free_lines++;
711                 list_add_tail(&line->list, &l_mg->free_list);
712         }
713
714         pblk_set_provision(pblk, nr_free_blks);
715
716         sema_init(&pblk->erase_sem, 1);
717
718         /* Cleanup per-LUN bad block lists - managed within lines on run-time */
719         for (i = 0; i < geo->nr_luns; i++)
720                 kfree(pblk->luns[i].bb_list);
721
722         return 0;
723 fail_free_lines:
724         kfree(pblk->lines);
725 fail_free_bb_aux:
726         kfree(l_mg->bb_aux);
727 fail_free_bb_template:
728         kfree(l_mg->bb_template);
729 fail_free_meta:
730         for (i = 0; i < PBLK_DATA_LINES; i++) {
731                 pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
732                 pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
733         }
734 fail:
735         for (i = 0; i < geo->nr_luns; i++)
736                 kfree(pblk->luns[i].bb_list);
737
738         return ret;
739 }
740
741 static int pblk_writer_init(struct pblk *pblk)
742 {
743         setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
744         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
745
746         pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
747         if (IS_ERR(pblk->writer_ts)) {
748                 pr_err("pblk: could not allocate writer kthread\n");
749                 return PTR_ERR(pblk->writer_ts);
750         }
751
752         return 0;
753 }
754
755 static void pblk_writer_stop(struct pblk *pblk)
756 {
757         if (pblk->writer_ts)
758                 kthread_stop(pblk->writer_ts);
759         del_timer(&pblk->wtimer);
760 }
761
762 static void pblk_free(struct pblk *pblk)
763 {
764         pblk_luns_free(pblk);
765         pblk_lines_free(pblk);
766         pblk_line_meta_free(pblk);
767         pblk_core_free(pblk);
768         pblk_l2p_free(pblk);
769
770         kfree(pblk);
771 }
772
773 static void pblk_tear_down(struct pblk *pblk)
774 {
775         pblk_flush_writer(pblk);
776         pblk_writer_stop(pblk);
777         pblk_rb_sync_l2p(&pblk->rwb);
778         pblk_recov_pad(pblk);
779         pblk_rwb_free(pblk);
780         pblk_rl_free(&pblk->rl);
781
782         pr_debug("pblk: consistent tear down\n");
783 }
784
785 static void pblk_exit(void *private)
786 {
787         struct pblk *pblk = private;
788
789         down_write(&pblk_lock);
790         pblk_gc_exit(pblk);
791         pblk_tear_down(pblk);
792         pblk_free(pblk);
793         up_write(&pblk_lock);
794 }
795
796 static sector_t pblk_capacity(void *private)
797 {
798         struct pblk *pblk = private;
799
800         return pblk->capacity * NR_PHY_IN_LOG;
801 }
802
803 static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
804                        int flags)
805 {
806         struct nvm_geo *geo = &dev->geo;
807         struct request_queue *bqueue = dev->q;
808         struct request_queue *tqueue = tdisk->queue;
809         struct pblk *pblk;
810         int ret;
811
812         if (dev->identity.dom & NVM_RSP_L2P) {
813                 pr_err("pblk: device-side L2P table not supported. (%x)\n",
814                                                         dev->identity.dom);
815                 return ERR_PTR(-EINVAL);
816         }
817
818         pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
819         if (!pblk)
820                 return ERR_PTR(-ENOMEM);
821
822         pblk->dev = dev;
823         pblk->disk = tdisk;
824
825         spin_lock_init(&pblk->trans_lock);
826         spin_lock_init(&pblk->lock);
827
828         if (flags & NVM_TARGET_FACTORY)
829                 pblk_setup_uuid(pblk);
830
831 #ifdef CONFIG_NVM_DEBUG
832         atomic_long_set(&pblk->inflight_writes, 0);
833         atomic_long_set(&pblk->padded_writes, 0);
834         atomic_long_set(&pblk->padded_wb, 0);
835         atomic_long_set(&pblk->nr_flush, 0);
836         atomic_long_set(&pblk->req_writes, 0);
837         atomic_long_set(&pblk->sub_writes, 0);
838         atomic_long_set(&pblk->sync_writes, 0);
839         atomic_long_set(&pblk->compl_writes, 0);
840         atomic_long_set(&pblk->inflight_reads, 0);
841         atomic_long_set(&pblk->sync_reads, 0);
842         atomic_long_set(&pblk->recov_writes, 0);
843         atomic_long_set(&pblk->recov_writes, 0);
844         atomic_long_set(&pblk->recov_gc_writes, 0);
845 #endif
846
847         atomic_long_set(&pblk->read_failed, 0);
848         atomic_long_set(&pblk->read_empty, 0);
849         atomic_long_set(&pblk->read_high_ecc, 0);
850         atomic_long_set(&pblk->read_failed_gc, 0);
851         atomic_long_set(&pblk->write_failed, 0);
852         atomic_long_set(&pblk->erase_failed, 0);
853
854         ret = pblk_luns_init(pblk, dev->luns);
855         if (ret) {
856                 pr_err("pblk: could not initialize luns\n");
857                 goto fail;
858         }
859
860         ret = pblk_lines_init(pblk);
861         if (ret) {
862                 pr_err("pblk: could not initialize lines\n");
863                 goto fail_free_luns;
864         }
865
866         ret = pblk_core_init(pblk);
867         if (ret) {
868                 pr_err("pblk: could not initialize core\n");
869                 goto fail_free_line_meta;
870         }
871
872         ret = pblk_l2p_init(pblk);
873         if (ret) {
874                 pr_err("pblk: could not initialize maps\n");
875                 goto fail_free_core;
876         }
877
878         ret = pblk_lines_configure(pblk, flags);
879         if (ret) {
880                 pr_err("pblk: could not configure lines\n");
881                 goto fail_free_l2p;
882         }
883
884         ret = pblk_writer_init(pblk);
885         if (ret) {
886                 pr_err("pblk: could not initialize write thread\n");
887                 goto fail_free_lines;
888         }
889
890         ret = pblk_gc_init(pblk);
891         if (ret) {
892                 pr_err("pblk: could not initialize gc\n");
893                 goto fail_stop_writer;
894         }
895
896         /* inherit the size from the underlying device */
897         blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
898         blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
899
900         blk_queue_write_cache(tqueue, true, false);
901
902         tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
903         tqueue->limits.discard_alignment = 0;
904         blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
905         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
906
907         pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
908                         geo->nr_luns, pblk->l_mg.nr_lines,
909                         (unsigned long long)pblk->rl.nr_secs,
910                         pblk->rwb.nr_entries);
911
912         wake_up_process(pblk->writer_ts);
913         return pblk;
914
915 fail_stop_writer:
916         pblk_writer_stop(pblk);
917 fail_free_lines:
918         pblk_lines_free(pblk);
919 fail_free_l2p:
920         pblk_l2p_free(pblk);
921 fail_free_core:
922         pblk_core_free(pblk);
923 fail_free_line_meta:
924         pblk_line_meta_free(pblk);
925 fail_free_luns:
926         pblk_luns_free(pblk);
927 fail:
928         kfree(pblk);
929         return ERR_PTR(ret);
930 }
931
932 /* physical block device target */
933 static struct nvm_tgt_type tt_pblk = {
934         .name           = "pblk",
935         .version        = {1, 0, 0},
936
937         .make_rq        = pblk_make_rq,
938         .capacity       = pblk_capacity,
939
940         .init           = pblk_init,
941         .exit           = pblk_exit,
942
943         .sysfs_init     = pblk_sysfs_init,
944         .sysfs_exit     = pblk_sysfs_exit,
945 };
946
947 static int __init pblk_module_init(void)
948 {
949         return nvm_register_tgt_type(&tt_pblk);
950 }
951
952 static void pblk_module_exit(void)
953 {
954         nvm_unregister_tgt_type(&tt_pblk);
955 }
956
957 module_init(pblk_module_init);
958 module_exit(pblk_module_exit);
959 MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
960 MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
961 MODULE_LICENSE("GPL v2");
962 MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");