]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/lightnvm/pblk-core.c
kvm: x86: hyperv: make VP_INDEX managed by userspace
[karo-tx-linux.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20
21 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
22                          struct ppa_addr *ppa)
23 {
24         struct nvm_tgt_dev *dev = pblk->dev;
25         struct nvm_geo *geo = &dev->geo;
26         int pos = pblk_dev_ppa_to_pos(geo, *ppa);
27
28         pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
29         atomic_long_inc(&pblk->erase_failed);
30
31         atomic_dec(&line->blk_in_line);
32         if (test_and_set_bit(pos, line->blk_bitmap))
33                 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
34                                                         line->id, pos);
35
36         pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, pblk->bb_wq);
37 }
38
39 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
40 {
41         struct pblk_line *line;
42
43         line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
44         atomic_dec(&line->left_seblks);
45
46         if (rqd->error) {
47                 struct ppa_addr *ppa;
48
49                 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
50                 if (!ppa)
51                         return;
52
53                 *ppa = rqd->ppa_addr;
54                 pblk_mark_bb(pblk, line, ppa);
55         }
56
57         atomic_dec(&pblk->inflight_io);
58 }
59
60 /* Erase completion assumes that only one block is erased at the time */
61 static void pblk_end_io_erase(struct nvm_rq *rqd)
62 {
63         struct pblk *pblk = rqd->private;
64
65         __pblk_end_io_erase(pblk, rqd);
66         mempool_free(rqd, pblk->g_rq_pool);
67 }
68
69 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
70                            u64 paddr)
71 {
72         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
73         struct list_head *move_list = NULL;
74
75         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
76          * table is modified with reclaimed sectors, a check is done to endure
77          * that newer updates are not overwritten.
78          */
79         spin_lock(&line->lock);
80         if (line->state == PBLK_LINESTATE_GC ||
81                                         line->state == PBLK_LINESTATE_FREE) {
82                 spin_unlock(&line->lock);
83                 return;
84         }
85
86         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
87                 WARN_ONCE(1, "pblk: double invalidate\n");
88                 spin_unlock(&line->lock);
89                 return;
90         }
91         le32_add_cpu(line->vsc, -1);
92
93         if (line->state == PBLK_LINESTATE_CLOSED)
94                 move_list = pblk_line_gc_list(pblk, line);
95         spin_unlock(&line->lock);
96
97         if (move_list) {
98                 spin_lock(&l_mg->gc_lock);
99                 spin_lock(&line->lock);
100                 /* Prevent moving a line that has just been chosen for GC */
101                 if (line->state == PBLK_LINESTATE_GC ||
102                                         line->state == PBLK_LINESTATE_FREE) {
103                         spin_unlock(&line->lock);
104                         spin_unlock(&l_mg->gc_lock);
105                         return;
106                 }
107                 spin_unlock(&line->lock);
108
109                 list_move_tail(&line->list, move_list);
110                 spin_unlock(&l_mg->gc_lock);
111         }
112 }
113
114 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
115 {
116         struct pblk_line *line;
117         u64 paddr;
118         int line_id;
119
120 #ifdef CONFIG_NVM_DEBUG
121         /* Callers must ensure that the ppa points to a device address */
122         BUG_ON(pblk_addr_in_cache(ppa));
123         BUG_ON(pblk_ppa_empty(ppa));
124 #endif
125
126         line_id = pblk_tgt_ppa_to_line(ppa);
127         line = &pblk->lines[line_id];
128         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
129
130         __pblk_map_invalidate(pblk, line, paddr);
131 }
132
133 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
134                                   unsigned int nr_secs)
135 {
136         sector_t lba;
137
138         spin_lock(&pblk->trans_lock);
139         for (lba = slba; lba < slba + nr_secs; lba++) {
140                 struct ppa_addr ppa;
141
142                 ppa = pblk_trans_map_get(pblk, lba);
143
144                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
145                         pblk_map_invalidate(pblk, ppa);
146
147                 pblk_ppa_set_empty(&ppa);
148                 pblk_trans_map_set(pblk, lba, ppa);
149         }
150         spin_unlock(&pblk->trans_lock);
151 }
152
153 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
154 {
155         mempool_t *pool;
156         struct nvm_rq *rqd;
157         int rq_size;
158
159         if (rw == WRITE) {
160                 pool = pblk->w_rq_pool;
161                 rq_size = pblk_w_rq_size;
162         } else {
163                 pool = pblk->g_rq_pool;
164                 rq_size = pblk_g_rq_size;
165         }
166
167         rqd = mempool_alloc(pool, GFP_KERNEL);
168         memset(rqd, 0, rq_size);
169
170         return rqd;
171 }
172
173 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
174 {
175         mempool_t *pool;
176
177         if (rw == WRITE)
178                 pool = pblk->w_rq_pool;
179         else
180                 pool = pblk->g_rq_pool;
181
182         mempool_free(rqd, pool);
183 }
184
185 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
186                          int nr_pages)
187 {
188         struct bio_vec bv;
189         int i;
190
191         WARN_ON(off + nr_pages != bio->bi_vcnt);
192
193         bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
194         for (i = off; i < nr_pages + off; i++) {
195                 bv = bio->bi_io_vec[i];
196                 mempool_free(bv.bv_page, pblk->page_pool);
197         }
198 }
199
200 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
201                        int nr_pages)
202 {
203         struct request_queue *q = pblk->dev->q;
204         struct page *page;
205         int i, ret;
206
207         for (i = 0; i < nr_pages; i++) {
208                 page = mempool_alloc(pblk->page_pool, flags);
209                 if (!page)
210                         goto err;
211
212                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
213                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
214                         pr_err("pblk: could not add page to bio\n");
215                         mempool_free(page, pblk->page_pool);
216                         goto err;
217                 }
218         }
219
220         return 0;
221 err:
222         pblk_bio_free_pages(pblk, bio, 0, i - 1);
223         return -1;
224 }
225
226 static void pblk_write_kick(struct pblk *pblk)
227 {
228         wake_up_process(pblk->writer_ts);
229         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
230 }
231
232 void pblk_write_timer_fn(unsigned long data)
233 {
234         struct pblk *pblk = (struct pblk *)data;
235
236         /* kick the write thread every tick to flush outstanding data */
237         pblk_write_kick(pblk);
238 }
239
240 void pblk_write_should_kick(struct pblk *pblk)
241 {
242         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
243
244         if (secs_avail >= pblk->min_write_pgs)
245                 pblk_write_kick(pblk);
246 }
247
248 void pblk_end_bio_sync(struct bio *bio)
249 {
250         struct completion *waiting = bio->bi_private;
251
252         complete(waiting);
253 }
254
255 void pblk_end_io_sync(struct nvm_rq *rqd)
256 {
257         struct completion *waiting = rqd->private;
258
259         complete(waiting);
260 }
261
262 void pblk_wait_for_meta(struct pblk *pblk)
263 {
264         do {
265                 if (!atomic_read(&pblk->inflight_io))
266                         break;
267
268                 schedule();
269         } while (1);
270 }
271
272 static void pblk_flush_writer(struct pblk *pblk)
273 {
274         pblk_rb_flush(&pblk->rwb);
275         do {
276                 if (!pblk_rb_sync_count(&pblk->rwb))
277                         break;
278
279                 pblk_write_kick(pblk);
280                 schedule();
281         } while (1);
282 }
283
284 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
285 {
286         struct pblk_line_meta *lm = &pblk->lm;
287         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
288         struct list_head *move_list = NULL;
289         int vsc = le32_to_cpu(*line->vsc);
290
291         lockdep_assert_held(&line->lock);
292
293         if (!vsc) {
294                 if (line->gc_group != PBLK_LINEGC_FULL) {
295                         line->gc_group = PBLK_LINEGC_FULL;
296                         move_list = &l_mg->gc_full_list;
297                 }
298         } else if (vsc < lm->high_thrs) {
299                 if (line->gc_group != PBLK_LINEGC_HIGH) {
300                         line->gc_group = PBLK_LINEGC_HIGH;
301                         move_list = &l_mg->gc_high_list;
302                 }
303         } else if (vsc < lm->mid_thrs) {
304                 if (line->gc_group != PBLK_LINEGC_MID) {
305                         line->gc_group = PBLK_LINEGC_MID;
306                         move_list = &l_mg->gc_mid_list;
307                 }
308         } else if (vsc < line->sec_in_line) {
309                 if (line->gc_group != PBLK_LINEGC_LOW) {
310                         line->gc_group = PBLK_LINEGC_LOW;
311                         move_list = &l_mg->gc_low_list;
312                 }
313         } else if (vsc == line->sec_in_line) {
314                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
315                         line->gc_group = PBLK_LINEGC_EMPTY;
316                         move_list = &l_mg->gc_empty_list;
317                 }
318         } else {
319                 line->state = PBLK_LINESTATE_CORRUPT;
320                 line->gc_group = PBLK_LINEGC_NONE;
321                 move_list =  &l_mg->corrupt_list;
322                 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
323                                                 line->id, vsc,
324                                                 line->sec_in_line,
325                                                 lm->high_thrs, lm->mid_thrs);
326         }
327
328         return move_list;
329 }
330
331 void pblk_discard(struct pblk *pblk, struct bio *bio)
332 {
333         sector_t slba = pblk_get_lba(bio);
334         sector_t nr_secs = pblk_get_secs(bio);
335
336         pblk_invalidate_range(pblk, slba, nr_secs);
337 }
338
339 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
340 {
341         struct ppa_addr ppa;
342
343         spin_lock(&pblk->trans_lock);
344         ppa = pblk_trans_map_get(pblk, lba);
345         spin_unlock(&pblk->trans_lock);
346
347         return ppa;
348 }
349
350 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
351 {
352         atomic_long_inc(&pblk->write_failed);
353 #ifdef CONFIG_NVM_DEBUG
354         pblk_print_failed_rqd(pblk, rqd, rqd->error);
355 #endif
356 }
357
358 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
359 {
360         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
361         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
362                 atomic_long_inc(&pblk->read_empty);
363                 return;
364         }
365
366         switch (rqd->error) {
367         case NVM_RSP_WARN_HIGHECC:
368                 atomic_long_inc(&pblk->read_high_ecc);
369                 break;
370         case NVM_RSP_ERR_FAILECC:
371         case NVM_RSP_ERR_FAILCRC:
372                 atomic_long_inc(&pblk->read_failed);
373                 break;
374         default:
375                 pr_err("pblk: unknown read error:%d\n", rqd->error);
376         }
377 #ifdef CONFIG_NVM_DEBUG
378         pblk_print_failed_rqd(pblk, rqd, rqd->error);
379 #endif
380 }
381
382 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
383 {
384         pblk->sec_per_write = sec_per_write;
385 }
386
387 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
388 {
389         struct nvm_tgt_dev *dev = pblk->dev;
390
391 #ifdef CONFIG_NVM_DEBUG
392         struct ppa_addr *ppa_list;
393
394         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
395         if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
396                 WARN_ON(1);
397                 return -EINVAL;
398         }
399
400         if (rqd->opcode == NVM_OP_PWRITE) {
401                 struct pblk_line *line;
402                 struct ppa_addr ppa;
403                 int i;
404
405                 for (i = 0; i < rqd->nr_ppas; i++) {
406                         ppa = ppa_list[i];
407                         line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
408
409                         spin_lock(&line->lock);
410                         if (line->state != PBLK_LINESTATE_OPEN) {
411                                 pr_err("pblk: bad ppa: line:%d,state:%d\n",
412                                                         line->id, line->state);
413                                 WARN_ON(1);
414                                 spin_unlock(&line->lock);
415                                 return -EINVAL;
416                         }
417                         spin_unlock(&line->lock);
418                 }
419         }
420 #endif
421
422         atomic_inc(&pblk->inflight_io);
423
424         return nvm_submit_io(dev, rqd);
425 }
426
427 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
428                               unsigned int nr_secs, unsigned int len,
429                               int alloc_type, gfp_t gfp_mask)
430 {
431         struct nvm_tgt_dev *dev = pblk->dev;
432         void *kaddr = data;
433         struct page *page;
434         struct bio *bio;
435         int i, ret;
436
437         if (alloc_type == PBLK_KMALLOC_META)
438                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
439
440         bio = bio_kmalloc(gfp_mask, nr_secs);
441         if (!bio)
442                 return ERR_PTR(-ENOMEM);
443
444         for (i = 0; i < nr_secs; i++) {
445                 page = vmalloc_to_page(kaddr);
446                 if (!page) {
447                         pr_err("pblk: could not map vmalloc bio\n");
448                         bio_put(bio);
449                         bio = ERR_PTR(-ENOMEM);
450                         goto out;
451                 }
452
453                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
454                 if (ret != PAGE_SIZE) {
455                         pr_err("pblk: could not add page to bio\n");
456                         bio_put(bio);
457                         bio = ERR_PTR(-ENOMEM);
458                         goto out;
459                 }
460
461                 kaddr += PAGE_SIZE;
462         }
463 out:
464         return bio;
465 }
466
467 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
468                    unsigned long secs_to_flush)
469 {
470         int max = pblk->sec_per_write;
471         int min = pblk->min_write_pgs;
472         int secs_to_sync = 0;
473
474         if (secs_avail >= max)
475                 secs_to_sync = max;
476         else if (secs_avail >= min)
477                 secs_to_sync = min * (secs_avail / min);
478         else if (secs_to_flush)
479                 secs_to_sync = min;
480
481         return secs_to_sync;
482 }
483
484 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
485 {
486         u64 addr;
487         int i;
488
489         addr = find_next_zero_bit(line->map_bitmap,
490                                         pblk->lm.sec_per_line, line->cur_sec);
491         line->cur_sec = addr - nr_secs;
492
493         for (i = 0; i < nr_secs; i++, line->cur_sec--)
494                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
495 }
496
497 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
498 {
499         u64 addr;
500         int i;
501
502         lockdep_assert_held(&line->lock);
503
504         /* logic error: ppa out-of-bounds. Prevent generating bad address */
505         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
506                 WARN(1, "pblk: page allocation out of bounds\n");
507                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
508         }
509
510         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
511                                         pblk->lm.sec_per_line, line->cur_sec);
512         for (i = 0; i < nr_secs; i++, line->cur_sec++)
513                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
514
515         return addr;
516 }
517
518 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
519 {
520         u64 addr;
521
522         /* Lock needed in case a write fails and a recovery needs to remap
523          * failed write buffer entries
524          */
525         spin_lock(&line->lock);
526         addr = __pblk_alloc_page(pblk, line, nr_secs);
527         line->left_msecs -= nr_secs;
528         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
529         spin_unlock(&line->lock);
530
531         return addr;
532 }
533
534 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
535 {
536         u64 paddr;
537
538         spin_lock(&line->lock);
539         paddr = find_next_zero_bit(line->map_bitmap,
540                                         pblk->lm.sec_per_line, line->cur_sec);
541         spin_unlock(&line->lock);
542
543         return paddr;
544 }
545
546 /*
547  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
548  * taking the per LUN semaphore.
549  */
550 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
551                                      void *emeta_buf, u64 paddr, int dir)
552 {
553         struct nvm_tgt_dev *dev = pblk->dev;
554         struct nvm_geo *geo = &dev->geo;
555         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
556         struct pblk_line_meta *lm = &pblk->lm;
557         void *ppa_list, *meta_list;
558         struct bio *bio;
559         struct nvm_rq rqd;
560         dma_addr_t dma_ppa_list, dma_meta_list;
561         int min = pblk->min_write_pgs;
562         int left_ppas = lm->emeta_sec[0];
563         int id = line->id;
564         int rq_ppas, rq_len;
565         int cmd_op, bio_op;
566         int i, j;
567         int ret;
568         DECLARE_COMPLETION_ONSTACK(wait);
569
570         if (dir == WRITE) {
571                 bio_op = REQ_OP_WRITE;
572                 cmd_op = NVM_OP_PWRITE;
573         } else if (dir == READ) {
574                 bio_op = REQ_OP_READ;
575                 cmd_op = NVM_OP_PREAD;
576         } else
577                 return -EINVAL;
578
579         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
580                                                         &dma_meta_list);
581         if (!meta_list)
582                 return -ENOMEM;
583
584         ppa_list = meta_list + pblk_dma_meta_size;
585         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
586
587 next_rq:
588         memset(&rqd, 0, sizeof(struct nvm_rq));
589
590         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
591         rq_len = rq_ppas * geo->sec_size;
592
593         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
594                                         l_mg->emeta_alloc_type, GFP_KERNEL);
595         if (IS_ERR(bio)) {
596                 ret = PTR_ERR(bio);
597                 goto free_rqd_dma;
598         }
599
600         bio->bi_iter.bi_sector = 0; /* internal bio */
601         bio_set_op_attrs(bio, bio_op, 0);
602
603         rqd.bio = bio;
604         rqd.meta_list = meta_list;
605         rqd.ppa_list = ppa_list;
606         rqd.dma_meta_list = dma_meta_list;
607         rqd.dma_ppa_list = dma_ppa_list;
608         rqd.opcode = cmd_op;
609         rqd.nr_ppas = rq_ppas;
610         rqd.end_io = pblk_end_io_sync;
611         rqd.private = &wait;
612
613         if (dir == WRITE) {
614                 struct pblk_sec_meta *meta_list = rqd.meta_list;
615
616                 rqd.flags = pblk_set_progr_mode(pblk, WRITE);
617                 for (i = 0; i < rqd.nr_ppas; ) {
618                         spin_lock(&line->lock);
619                         paddr = __pblk_alloc_page(pblk, line, min);
620                         spin_unlock(&line->lock);
621                         for (j = 0; j < min; j++, i++, paddr++) {
622                                 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
623                                 rqd.ppa_list[i] =
624                                         addr_to_gen_ppa(pblk, paddr, id);
625                         }
626                 }
627         } else {
628                 for (i = 0; i < rqd.nr_ppas; ) {
629                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
630                         int pos = pblk_dev_ppa_to_pos(geo, ppa);
631                         int read_type = PBLK_READ_RANDOM;
632
633                         if (pblk_io_aligned(pblk, rq_ppas))
634                                 read_type = PBLK_READ_SEQUENTIAL;
635                         rqd.flags = pblk_set_read_mode(pblk, read_type);
636
637                         while (test_bit(pos, line->blk_bitmap)) {
638                                 paddr += min;
639                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
640                                         pr_err("pblk: corrupt emeta line:%d\n",
641                                                                 line->id);
642                                         bio_put(bio);
643                                         ret = -EINTR;
644                                         goto free_rqd_dma;
645                                 }
646
647                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
648                                 pos = pblk_dev_ppa_to_pos(geo, ppa);
649                         }
650
651                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
652                                 pr_err("pblk: corrupt emeta line:%d\n",
653                                                                 line->id);
654                                 bio_put(bio);
655                                 ret = -EINTR;
656                                 goto free_rqd_dma;
657                         }
658
659                         for (j = 0; j < min; j++, i++, paddr++)
660                                 rqd.ppa_list[i] =
661                                         addr_to_gen_ppa(pblk, paddr, line->id);
662                 }
663         }
664
665         ret = pblk_submit_io(pblk, &rqd);
666         if (ret) {
667                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
668                 bio_put(bio);
669                 goto free_rqd_dma;
670         }
671
672         if (!wait_for_completion_io_timeout(&wait,
673                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
674                 pr_err("pblk: emeta I/O timed out\n");
675         }
676         atomic_dec(&pblk->inflight_io);
677         reinit_completion(&wait);
678
679         if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
680                 bio_put(bio);
681
682         if (rqd.error) {
683                 if (dir == WRITE)
684                         pblk_log_write_err(pblk, &rqd);
685                 else
686                         pblk_log_read_err(pblk, &rqd);
687         }
688
689         emeta_buf += rq_len;
690         left_ppas -= rq_ppas;
691         if (left_ppas)
692                 goto next_rq;
693 free_rqd_dma:
694         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
695         return ret;
696 }
697
698 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
699 {
700         struct nvm_tgt_dev *dev = pblk->dev;
701         struct nvm_geo *geo = &dev->geo;
702         struct pblk_line_meta *lm = &pblk->lm;
703         int bit;
704
705         /* This usually only happens on bad lines */
706         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
707         if (bit >= lm->blk_per_line)
708                 return -1;
709
710         return bit * geo->sec_per_pl;
711 }
712
713 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
714                                      u64 paddr, int dir)
715 {
716         struct nvm_tgt_dev *dev = pblk->dev;
717         struct pblk_line_meta *lm = &pblk->lm;
718         struct bio *bio;
719         struct nvm_rq rqd;
720         __le64 *lba_list = NULL;
721         int i, ret;
722         int cmd_op, bio_op;
723         int flags;
724         DECLARE_COMPLETION_ONSTACK(wait);
725
726         if (dir == WRITE) {
727                 bio_op = REQ_OP_WRITE;
728                 cmd_op = NVM_OP_PWRITE;
729                 flags = pblk_set_progr_mode(pblk, WRITE);
730                 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
731         } else if (dir == READ) {
732                 bio_op = REQ_OP_READ;
733                 cmd_op = NVM_OP_PREAD;
734                 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
735         } else
736                 return -EINVAL;
737
738         memset(&rqd, 0, sizeof(struct nvm_rq));
739
740         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
741                                                         &rqd.dma_meta_list);
742         if (!rqd.meta_list)
743                 return -ENOMEM;
744
745         rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
746         rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
747
748         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
749         if (IS_ERR(bio)) {
750                 ret = PTR_ERR(bio);
751                 goto free_ppa_list;
752         }
753
754         bio->bi_iter.bi_sector = 0; /* internal bio */
755         bio_set_op_attrs(bio, bio_op, 0);
756
757         rqd.bio = bio;
758         rqd.opcode = cmd_op;
759         rqd.flags = flags;
760         rqd.nr_ppas = lm->smeta_sec;
761         rqd.end_io = pblk_end_io_sync;
762         rqd.private = &wait;
763
764         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
765                 struct pblk_sec_meta *meta_list = rqd.meta_list;
766
767                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
768
769                 if (dir == WRITE) {
770                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
771
772                         meta_list[i].lba = lba_list[paddr] = addr_empty;
773                 }
774         }
775
776         /*
777          * This I/O is sent by the write thread when a line is replace. Since
778          * the write thread is the only one sending write and erase commands,
779          * there is no need to take the LUN semaphore.
780          */
781         ret = pblk_submit_io(pblk, &rqd);
782         if (ret) {
783                 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
784                 bio_put(bio);
785                 goto free_ppa_list;
786         }
787
788         if (!wait_for_completion_io_timeout(&wait,
789                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
790                 pr_err("pblk: smeta I/O timed out\n");
791         }
792         atomic_dec(&pblk->inflight_io);
793
794         if (rqd.error) {
795                 if (dir == WRITE)
796                         pblk_log_write_err(pblk, &rqd);
797                 else
798                         pblk_log_read_err(pblk, &rqd);
799         }
800
801 free_ppa_list:
802         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
803
804         return ret;
805 }
806
807 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
808 {
809         u64 bpaddr = pblk_line_smeta_start(pblk, line);
810
811         return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
812 }
813
814 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
815                          void *emeta_buf)
816 {
817         return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
818                                                 line->emeta_ssec, READ);
819 }
820
821 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
822                             struct ppa_addr ppa)
823 {
824         rqd->opcode = NVM_OP_ERASE;
825         rqd->ppa_addr = ppa;
826         rqd->nr_ppas = 1;
827         rqd->flags = pblk_set_progr_mode(pblk, ERASE);
828         rqd->bio = NULL;
829 }
830
831 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
832 {
833         struct nvm_rq rqd;
834         int ret = 0;
835         DECLARE_COMPLETION_ONSTACK(wait);
836
837         memset(&rqd, 0, sizeof(struct nvm_rq));
838
839         pblk_setup_e_rq(pblk, &rqd, ppa);
840
841         rqd.end_io = pblk_end_io_sync;
842         rqd.private = &wait;
843
844         /* The write thread schedules erases so that it minimizes disturbances
845          * with writes. Thus, there is no need to take the LUN semaphore.
846          */
847         ret = pblk_submit_io(pblk, &rqd);
848         if (ret) {
849                 struct nvm_tgt_dev *dev = pblk->dev;
850                 struct nvm_geo *geo = &dev->geo;
851
852                 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
853                                         pblk_dev_ppa_to_line(ppa),
854                                         pblk_dev_ppa_to_pos(geo, ppa));
855
856                 rqd.error = ret;
857                 goto out;
858         }
859
860         if (!wait_for_completion_io_timeout(&wait,
861                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
862                 pr_err("pblk: sync erase timed out\n");
863         }
864
865 out:
866         rqd.private = pblk;
867         __pblk_end_io_erase(pblk, &rqd);
868
869         return ret;
870 }
871
872 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
873 {
874         struct pblk_line_meta *lm = &pblk->lm;
875         struct ppa_addr ppa;
876         int ret, bit = -1;
877
878         /* Erase only good blocks, one at a time */
879         do {
880                 spin_lock(&line->lock);
881                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
882                                                                 bit + 1);
883                 if (bit >= lm->blk_per_line) {
884                         spin_unlock(&line->lock);
885                         break;
886                 }
887
888                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
889                 ppa.g.blk = line->id;
890
891                 atomic_dec(&line->left_eblks);
892                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
893                 spin_unlock(&line->lock);
894
895                 ret = pblk_blk_erase_sync(pblk, ppa);
896                 if (ret) {
897                         pr_err("pblk: failed to erase line %d\n", line->id);
898                         return ret;
899                 }
900         } while (1);
901
902         return 0;
903 }
904
905 static void pblk_line_setup_metadata(struct pblk_line *line,
906                                      struct pblk_line_mgmt *l_mg,
907                                      struct pblk_line_meta *lm)
908 {
909         int meta_line;
910
911         lockdep_assert_held(&l_mg->free_lock);
912
913 retry_meta:
914         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
915         if (meta_line == PBLK_DATA_LINES) {
916                 spin_unlock(&l_mg->free_lock);
917                 io_schedule();
918                 spin_lock(&l_mg->free_lock);
919                 goto retry_meta;
920         }
921
922         set_bit(meta_line, &l_mg->meta_bitmap);
923         line->meta_line = meta_line;
924
925         line->smeta = l_mg->sline_meta[meta_line];
926         line->emeta = l_mg->eline_meta[meta_line];
927
928         memset(line->smeta, 0, lm->smeta_len);
929         memset(line->emeta->buf, 0, lm->emeta_len[0]);
930
931         line->emeta->mem = 0;
932         atomic_set(&line->emeta->sync, 0);
933 }
934
935 /* For now lines are always assumed full lines. Thus, smeta former and current
936  * lun bitmaps are omitted.
937  */
938 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
939                                   struct pblk_line *cur)
940 {
941         struct nvm_tgt_dev *dev = pblk->dev;
942         struct nvm_geo *geo = &dev->geo;
943         struct pblk_line_meta *lm = &pblk->lm;
944         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
945         struct pblk_emeta *emeta = line->emeta;
946         struct line_emeta *emeta_buf = emeta->buf;
947         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
948         int nr_blk_line;
949
950         /* After erasing the line, new bad blocks might appear and we risk
951          * having an invalid line
952          */
953         nr_blk_line = lm->blk_per_line -
954                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
955         if (nr_blk_line < lm->min_blk_line) {
956                 spin_lock(&l_mg->free_lock);
957                 spin_lock(&line->lock);
958                 line->state = PBLK_LINESTATE_BAD;
959                 spin_unlock(&line->lock);
960
961                 list_add_tail(&line->list, &l_mg->bad_list);
962                 spin_unlock(&l_mg->free_lock);
963
964                 pr_debug("pblk: line %d is bad\n", line->id);
965
966                 return 0;
967         }
968
969         /* Run-time metadata */
970         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
971
972         /* Mark LUNs allocated in this line (all for now) */
973         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
974
975         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
976         memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
977         smeta_buf->header.id = cpu_to_le32(line->id);
978         smeta_buf->header.type = cpu_to_le16(line->type);
979         smeta_buf->header.version = cpu_to_le16(1);
980
981         /* Start metadata */
982         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
983         smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
984
985         /* Fill metadata among lines */
986         if (cur) {
987                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
988                 smeta_buf->prev_id = cpu_to_le32(cur->id);
989                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
990         } else {
991                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
992         }
993
994         /* All smeta must be set at this point */
995         smeta_buf->header.crc = cpu_to_le32(
996                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
997         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
998
999         /* End metadata */
1000         memcpy(&emeta_buf->header, &smeta_buf->header,
1001                                                 sizeof(struct line_header));
1002         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1003         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1004         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1005         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1006         emeta_buf->crc = cpu_to_le32(0);
1007         emeta_buf->prev_id = smeta_buf->prev_id;
1008
1009         return 1;
1010 }
1011
1012 /* For now lines are always assumed full lines. Thus, smeta former and current
1013  * lun bitmaps are omitted.
1014  */
1015 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1016                              int init)
1017 {
1018         struct nvm_tgt_dev *dev = pblk->dev;
1019         struct nvm_geo *geo = &dev->geo;
1020         struct pblk_line_meta *lm = &pblk->lm;
1021         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1022         int nr_bb = 0;
1023         u64 off;
1024         int bit = -1;
1025
1026         line->sec_in_line = lm->sec_per_line;
1027
1028         /* Capture bad block information on line mapping bitmaps */
1029         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1030                                         bit + 1)) < lm->blk_per_line) {
1031                 off = bit * geo->sec_per_pl;
1032                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1033                                                         lm->sec_per_line);
1034                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1035                                                         lm->sec_per_line);
1036                 line->sec_in_line -= geo->sec_per_blk;
1037                 if (bit >= lm->emeta_bb)
1038                         nr_bb++;
1039         }
1040
1041         /* Mark smeta metadata sectors as bad sectors */
1042         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1043         off = bit * geo->sec_per_pl;
1044         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1045         line->sec_in_line -= lm->smeta_sec;
1046         line->smeta_ssec = off;
1047         line->cur_sec = off + lm->smeta_sec;
1048
1049         if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1050                 pr_debug("pblk: line smeta I/O failed. Retry\n");
1051                 return 1;
1052         }
1053
1054         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1055
1056         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1057          * blocks to make sure that there are enough sectors to store emeta
1058          */
1059         bit = lm->sec_per_line;
1060         off = lm->sec_per_line - lm->emeta_sec[0];
1061         bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
1062         while (nr_bb) {
1063                 off -= geo->sec_per_pl;
1064                 if (!test_bit(off, line->invalid_bitmap)) {
1065                         bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1066                         nr_bb--;
1067                 }
1068         }
1069
1070         line->sec_in_line -= lm->emeta_sec[0];
1071         line->emeta_ssec = off;
1072         line->nr_valid_lbas = 0;
1073         line->left_msecs = line->sec_in_line;
1074         *line->vsc = cpu_to_le32(line->sec_in_line);
1075
1076         if (lm->sec_per_line - line->sec_in_line !=
1077                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1078                 spin_lock(&line->lock);
1079                 line->state = PBLK_LINESTATE_BAD;
1080                 spin_unlock(&line->lock);
1081
1082                 list_add_tail(&line->list, &l_mg->bad_list);
1083                 pr_err("pblk: unexpected line %d is bad\n", line->id);
1084
1085                 return 0;
1086         }
1087
1088         return 1;
1089 }
1090
1091 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1092 {
1093         struct pblk_line_meta *lm = &pblk->lm;
1094         int blk_in_line = atomic_read(&line->blk_in_line);
1095
1096         line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1097         if (!line->map_bitmap)
1098                 return -ENOMEM;
1099         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1100
1101         /* invalid_bitmap is special since it is used when line is closed. No
1102          * need to zeroized; it will be initialized using bb info form
1103          * map_bitmap
1104          */
1105         line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1106         if (!line->invalid_bitmap) {
1107                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1108                 return -ENOMEM;
1109         }
1110
1111         spin_lock(&line->lock);
1112         if (line->state != PBLK_LINESTATE_FREE) {
1113                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1114                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1115                 spin_unlock(&line->lock);
1116                 WARN(1, "pblk: corrupted line %d, state %d\n",
1117                                                         line->id, line->state);
1118                 return -EAGAIN;
1119         }
1120
1121         line->state = PBLK_LINESTATE_OPEN;
1122
1123         atomic_set(&line->left_eblks, blk_in_line);
1124         atomic_set(&line->left_seblks, blk_in_line);
1125
1126         line->meta_distance = lm->meta_distance;
1127         spin_unlock(&line->lock);
1128
1129         /* Bad blocks do not need to be erased */
1130         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1131
1132         kref_init(&line->ref);
1133
1134         return 0;
1135 }
1136
1137 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1138 {
1139         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1140         int ret;
1141
1142         spin_lock(&l_mg->free_lock);
1143         l_mg->data_line = line;
1144         list_del(&line->list);
1145
1146         ret = pblk_line_prepare(pblk, line);
1147         if (ret) {
1148                 list_add(&line->list, &l_mg->free_list);
1149                 spin_unlock(&l_mg->free_lock);
1150                 return ret;
1151         }
1152         spin_unlock(&l_mg->free_lock);
1153
1154         pblk_rl_free_lines_dec(&pblk->rl, line);
1155
1156         if (!pblk_line_init_bb(pblk, line, 0)) {
1157                 list_add(&line->list, &l_mg->free_list);
1158                 return -EINTR;
1159         }
1160
1161         return 0;
1162 }
1163
1164 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1165 {
1166         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1167         line->map_bitmap = NULL;
1168         line->smeta = NULL;
1169         line->emeta = NULL;
1170 }
1171
1172 struct pblk_line *pblk_line_get(struct pblk *pblk)
1173 {
1174         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1175         struct pblk_line_meta *lm = &pblk->lm;
1176         struct pblk_line *line;
1177         int ret, bit;
1178
1179         lockdep_assert_held(&l_mg->free_lock);
1180
1181 retry:
1182         if (list_empty(&l_mg->free_list)) {
1183                 pr_err("pblk: no free lines\n");
1184                 return NULL;
1185         }
1186
1187         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1188         list_del(&line->list);
1189         l_mg->nr_free_lines--;
1190
1191         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1192         if (unlikely(bit >= lm->blk_per_line)) {
1193                 spin_lock(&line->lock);
1194                 line->state = PBLK_LINESTATE_BAD;
1195                 spin_unlock(&line->lock);
1196
1197                 list_add_tail(&line->list, &l_mg->bad_list);
1198
1199                 pr_debug("pblk: line %d is bad\n", line->id);
1200                 goto retry;
1201         }
1202
1203         ret = pblk_line_prepare(pblk, line);
1204         if (ret) {
1205                 if (ret == -EAGAIN) {
1206                         list_add(&line->list, &l_mg->corrupt_list);
1207                         goto retry;
1208                 } else {
1209                         pr_err("pblk: failed to prepare line %d\n", line->id);
1210                         list_add(&line->list, &l_mg->free_list);
1211                         l_mg->nr_free_lines++;
1212                         return NULL;
1213                 }
1214         }
1215
1216         return line;
1217 }
1218
1219 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1220                                          struct pblk_line *line)
1221 {
1222         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1223         struct pblk_line *retry_line;
1224
1225 retry:
1226         spin_lock(&l_mg->free_lock);
1227         retry_line = pblk_line_get(pblk);
1228         if (!retry_line) {
1229                 l_mg->data_line = NULL;
1230                 spin_unlock(&l_mg->free_lock);
1231                 return NULL;
1232         }
1233
1234         retry_line->smeta = line->smeta;
1235         retry_line->emeta = line->emeta;
1236         retry_line->meta_line = line->meta_line;
1237
1238         pblk_line_free(pblk, line);
1239         l_mg->data_line = retry_line;
1240         spin_unlock(&l_mg->free_lock);
1241
1242         pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1243
1244         if (pblk_line_erase(pblk, retry_line))
1245                 goto retry;
1246
1247         return retry_line;
1248 }
1249
1250 static void pblk_set_space_limit(struct pblk *pblk)
1251 {
1252         struct pblk_rl *rl = &pblk->rl;
1253
1254         atomic_set(&rl->rb_space, 0);
1255 }
1256
1257 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1258 {
1259         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1260         struct pblk_line *line;
1261         int is_next = 0;
1262
1263         spin_lock(&l_mg->free_lock);
1264         line = pblk_line_get(pblk);
1265         if (!line) {
1266                 spin_unlock(&l_mg->free_lock);
1267                 return NULL;
1268         }
1269
1270         line->seq_nr = l_mg->d_seq_nr++;
1271         line->type = PBLK_LINETYPE_DATA;
1272         l_mg->data_line = line;
1273
1274         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1275
1276         /* Allocate next line for preparation */
1277         l_mg->data_next = pblk_line_get(pblk);
1278         if (!l_mg->data_next) {
1279                 /* If we cannot get a new line, we need to stop the pipeline.
1280                  * Only allow as many writes in as we can store safely and then
1281                  * fail gracefully
1282                  */
1283                 pblk_set_space_limit(pblk);
1284
1285                 l_mg->data_next = NULL;
1286         } else {
1287                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1288                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1289                 is_next = 1;
1290         }
1291         spin_unlock(&l_mg->free_lock);
1292
1293         if (pblk_line_erase(pblk, line)) {
1294                 line = pblk_line_retry(pblk, line);
1295                 if (!line)
1296                         return NULL;
1297         }
1298
1299         pblk_rl_free_lines_dec(&pblk->rl, line);
1300         if (is_next)
1301                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1302
1303 retry_setup:
1304         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1305                 line = pblk_line_retry(pblk, line);
1306                 if (!line)
1307                         return NULL;
1308
1309                 goto retry_setup;
1310         }
1311
1312         if (!pblk_line_init_bb(pblk, line, 1)) {
1313                 line = pblk_line_retry(pblk, line);
1314                 if (!line)
1315                         return NULL;
1316
1317                 goto retry_setup;
1318         }
1319
1320         return line;
1321 }
1322
1323 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1324 {
1325         lockdep_assert_held(&pblk->l_mg.free_lock);
1326
1327         pblk_set_space_limit(pblk);
1328         pblk->state = PBLK_STATE_STOPPING;
1329 }
1330
1331 void pblk_pipeline_stop(struct pblk *pblk)
1332 {
1333         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1334         int ret;
1335
1336         spin_lock(&l_mg->free_lock);
1337         if (pblk->state == PBLK_STATE_RECOVERING ||
1338                                         pblk->state == PBLK_STATE_STOPPED) {
1339                 spin_unlock(&l_mg->free_lock);
1340                 return;
1341         }
1342         pblk->state = PBLK_STATE_RECOVERING;
1343         spin_unlock(&l_mg->free_lock);
1344
1345         pblk_flush_writer(pblk);
1346         pblk_wait_for_meta(pblk);
1347
1348         ret = pblk_recov_pad(pblk);
1349         if (ret) {
1350                 pr_err("pblk: could not close data on teardown(%d)\n", ret);
1351                 return;
1352         }
1353
1354         flush_workqueue(pblk->bb_wq);
1355         pblk_line_close_meta_sync(pblk);
1356
1357         spin_lock(&l_mg->free_lock);
1358         pblk->state = PBLK_STATE_STOPPED;
1359         l_mg->data_line = NULL;
1360         l_mg->data_next = NULL;
1361         spin_unlock(&l_mg->free_lock);
1362 }
1363
1364 void pblk_line_replace_data(struct pblk *pblk)
1365 {
1366         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1367         struct pblk_line *cur, *new;
1368         unsigned int left_seblks;
1369         int is_next = 0;
1370
1371         cur = l_mg->data_line;
1372         new = l_mg->data_next;
1373         if (!new)
1374                 return;
1375         l_mg->data_line = new;
1376
1377         spin_lock(&l_mg->free_lock);
1378         if (pblk->state != PBLK_STATE_RUNNING) {
1379                 l_mg->data_line = NULL;
1380                 l_mg->data_next = NULL;
1381                 spin_unlock(&l_mg->free_lock);
1382                 return;
1383         }
1384
1385         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1386         spin_unlock(&l_mg->free_lock);
1387
1388 retry_erase:
1389         left_seblks = atomic_read(&new->left_seblks);
1390         if (left_seblks) {
1391                 /* If line is not fully erased, erase it */
1392                 if (atomic_read(&new->left_eblks)) {
1393                         if (pblk_line_erase(pblk, new))
1394                                 return;
1395                 } else {
1396                         io_schedule();
1397                 }
1398                 goto retry_erase;
1399         }
1400
1401 retry_setup:
1402         if (!pblk_line_init_metadata(pblk, new, cur)) {
1403                 new = pblk_line_retry(pblk, new);
1404                 if (!new)
1405                         return;
1406
1407                 goto retry_setup;
1408         }
1409
1410         if (!pblk_line_init_bb(pblk, new, 1)) {
1411                 new = pblk_line_retry(pblk, new);
1412                 if (!new)
1413                         return;
1414
1415                 goto retry_setup;
1416         }
1417
1418         /* Allocate next line for preparation */
1419         spin_lock(&l_mg->free_lock);
1420         l_mg->data_next = pblk_line_get(pblk);
1421         if (!l_mg->data_next) {
1422                 /* If we cannot get a new line, we need to stop the pipeline.
1423                  * Only allow as many writes in as we can store safely and then
1424                  * fail gracefully
1425                  */
1426                 pblk_stop_writes(pblk, new);
1427                 l_mg->data_next = NULL;
1428         } else {
1429                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1430                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1431                 is_next = 1;
1432         }
1433         spin_unlock(&l_mg->free_lock);
1434
1435         if (is_next)
1436                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1437 }
1438
1439 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1440 {
1441         if (line->map_bitmap)
1442                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1443         if (line->invalid_bitmap)
1444                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1445
1446         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1447
1448         line->map_bitmap = NULL;
1449         line->invalid_bitmap = NULL;
1450         line->smeta = NULL;
1451         line->emeta = NULL;
1452 }
1453
1454 void pblk_line_put(struct kref *ref)
1455 {
1456         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1457         struct pblk *pblk = line->pblk;
1458         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1459
1460         spin_lock(&line->lock);
1461         WARN_ON(line->state != PBLK_LINESTATE_GC);
1462         line->state = PBLK_LINESTATE_FREE;
1463         line->gc_group = PBLK_LINEGC_NONE;
1464         pblk_line_free(pblk, line);
1465         spin_unlock(&line->lock);
1466
1467         spin_lock(&l_mg->free_lock);
1468         list_add_tail(&line->list, &l_mg->free_list);
1469         l_mg->nr_free_lines++;
1470         spin_unlock(&l_mg->free_lock);
1471
1472         pblk_rl_free_lines_inc(&pblk->rl, line);
1473 }
1474
1475 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1476 {
1477         struct nvm_rq *rqd;
1478         int err;
1479
1480         rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1481         memset(rqd, 0, pblk_g_rq_size);
1482
1483         pblk_setup_e_rq(pblk, rqd, ppa);
1484
1485         rqd->end_io = pblk_end_io_erase;
1486         rqd->private = pblk;
1487
1488         /* The write thread schedules erases so that it minimizes disturbances
1489          * with writes. Thus, there is no need to take the LUN semaphore.
1490          */
1491         err = pblk_submit_io(pblk, rqd);
1492         if (err) {
1493                 struct nvm_tgt_dev *dev = pblk->dev;
1494                 struct nvm_geo *geo = &dev->geo;
1495
1496                 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1497                                         pblk_dev_ppa_to_line(ppa),
1498                                         pblk_dev_ppa_to_pos(geo, ppa));
1499         }
1500
1501         return err;
1502 }
1503
1504 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1505 {
1506         return pblk->l_mg.data_line;
1507 }
1508
1509 /* For now, always erase next line */
1510 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1511 {
1512         return pblk->l_mg.data_next;
1513 }
1514
1515 int pblk_line_is_full(struct pblk_line *line)
1516 {
1517         return (line->left_msecs == 0);
1518 }
1519
1520 void pblk_line_close_meta_sync(struct pblk *pblk)
1521 {
1522         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1523         struct pblk_line_meta *lm = &pblk->lm;
1524         struct pblk_line *line, *tline;
1525         LIST_HEAD(list);
1526
1527         spin_lock(&l_mg->close_lock);
1528         if (list_empty(&l_mg->emeta_list)) {
1529                 spin_unlock(&l_mg->close_lock);
1530                 return;
1531         }
1532
1533         list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1534         spin_unlock(&l_mg->close_lock);
1535
1536         list_for_each_entry_safe(line, tline, &list, list) {
1537                 struct pblk_emeta *emeta = line->emeta;
1538
1539                 while (emeta->mem < lm->emeta_len[0]) {
1540                         int ret;
1541
1542                         ret = pblk_submit_meta_io(pblk, line);
1543                         if (ret) {
1544                                 pr_err("pblk: sync meta line %d failed (%d)\n",
1545                                                         line->id, ret);
1546                                 return;
1547                         }
1548                 }
1549         }
1550
1551         pblk_wait_for_meta(pblk);
1552         flush_workqueue(pblk->close_wq);
1553 }
1554
1555 static void pblk_line_should_sync_meta(struct pblk *pblk)
1556 {
1557         if (pblk_rl_is_limit(&pblk->rl))
1558                 pblk_line_close_meta_sync(pblk);
1559 }
1560
1561 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1562 {
1563         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1564         struct list_head *move_list;
1565
1566 #ifdef CONFIG_NVM_DEBUG
1567         struct pblk_line_meta *lm = &pblk->lm;
1568
1569         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1570                                 "pblk: corrupt closed line %d\n", line->id);
1571 #endif
1572
1573         spin_lock(&l_mg->free_lock);
1574         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1575         spin_unlock(&l_mg->free_lock);
1576
1577         spin_lock(&l_mg->gc_lock);
1578         spin_lock(&line->lock);
1579         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1580         line->state = PBLK_LINESTATE_CLOSED;
1581         move_list = pblk_line_gc_list(pblk, line);
1582
1583         list_add_tail(&line->list, move_list);
1584
1585         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1586         line->map_bitmap = NULL;
1587         line->smeta = NULL;
1588         line->emeta = NULL;
1589
1590         spin_unlock(&line->lock);
1591         spin_unlock(&l_mg->gc_lock);
1592
1593         pblk_gc_should_kick(pblk);
1594 }
1595
1596 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1597 {
1598         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1599         struct pblk_line_meta *lm = &pblk->lm;
1600         struct pblk_emeta *emeta = line->emeta;
1601         struct line_emeta *emeta_buf = emeta->buf;
1602
1603         /* No need for exact vsc value; avoid a big line lock and take aprox. */
1604         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1605         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1606
1607         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1608         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1609
1610         spin_lock(&l_mg->close_lock);
1611         spin_lock(&line->lock);
1612         list_add_tail(&line->list, &l_mg->emeta_list);
1613         spin_unlock(&line->lock);
1614         spin_unlock(&l_mg->close_lock);
1615
1616         pblk_line_should_sync_meta(pblk);
1617 }
1618
1619 void pblk_line_close_ws(struct work_struct *work)
1620 {
1621         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1622                                                                         ws);
1623         struct pblk *pblk = line_ws->pblk;
1624         struct pblk_line *line = line_ws->line;
1625
1626         pblk_line_close(pblk, line);
1627         mempool_free(line_ws, pblk->line_ws_pool);
1628 }
1629
1630 void pblk_line_mark_bb(struct work_struct *work)
1631 {
1632         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1633                                                                         ws);
1634         struct pblk *pblk = line_ws->pblk;
1635         struct nvm_tgt_dev *dev = pblk->dev;
1636         struct ppa_addr *ppa = line_ws->priv;
1637         int ret;
1638
1639         ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1640         if (ret) {
1641                 struct pblk_line *line;
1642                 int pos;
1643
1644                 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1645                 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1646
1647                 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1648                                 line->id, pos);
1649         }
1650
1651         kfree(ppa);
1652         mempool_free(line_ws, pblk->line_ws_pool);
1653 }
1654
1655 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1656                       void (*work)(struct work_struct *),
1657                       struct workqueue_struct *wq)
1658 {
1659         struct pblk_line_ws *line_ws;
1660
1661         line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1662         if (!line_ws)
1663                 return;
1664
1665         line_ws->pblk = pblk;
1666         line_ws->line = line;
1667         line_ws->priv = priv;
1668
1669         INIT_WORK(&line_ws->ws, work);
1670         queue_work(wq, &line_ws->ws);
1671 }
1672
1673 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1674                   unsigned long *lun_bitmap)
1675 {
1676         struct nvm_tgt_dev *dev = pblk->dev;
1677         struct nvm_geo *geo = &dev->geo;
1678         struct pblk_lun *rlun;
1679         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1680         int ret;
1681
1682         /*
1683          * Only send one inflight I/O per LUN. Since we map at a page
1684          * granurality, all ppas in the I/O will map to the same LUN
1685          */
1686 #ifdef CONFIG_NVM_DEBUG
1687         int i;
1688
1689         for (i = 1; i < nr_ppas; i++)
1690                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1691                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1692 #endif
1693         /* If the LUN has been locked for this same request, do no attempt to
1694          * lock it again
1695          */
1696         if (test_and_set_bit(pos, lun_bitmap))
1697                 return;
1698
1699         rlun = &pblk->luns[pos];
1700         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1701         if (ret) {
1702                 switch (ret) {
1703                 case -ETIME:
1704                         pr_err("pblk: lun semaphore timed out\n");
1705                         break;
1706                 case -EINTR:
1707                         pr_err("pblk: lun semaphore timed out\n");
1708                         break;
1709                 }
1710         }
1711 }
1712
1713 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1714                 unsigned long *lun_bitmap)
1715 {
1716         struct nvm_tgt_dev *dev = pblk->dev;
1717         struct nvm_geo *geo = &dev->geo;
1718         struct pblk_lun *rlun;
1719         int nr_luns = geo->nr_luns;
1720         int bit = -1;
1721
1722         while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1723                 rlun = &pblk->luns[bit];
1724                 up(&rlun->wr_sem);
1725         }
1726
1727         kfree(lun_bitmap);
1728 }
1729
1730 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1731 {
1732         struct ppa_addr l2p_ppa;
1733
1734         /* logic error: lba out-of-bounds. Ignore update */
1735         if (!(lba < pblk->rl.nr_secs)) {
1736                 WARN(1, "pblk: corrupted L2P map request\n");
1737                 return;
1738         }
1739
1740         spin_lock(&pblk->trans_lock);
1741         l2p_ppa = pblk_trans_map_get(pblk, lba);
1742
1743         if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1744                 pblk_map_invalidate(pblk, l2p_ppa);
1745
1746         pblk_trans_map_set(pblk, lba, ppa);
1747         spin_unlock(&pblk->trans_lock);
1748 }
1749
1750 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1751 {
1752 #ifdef CONFIG_NVM_DEBUG
1753         /* Callers must ensure that the ppa points to a cache address */
1754         BUG_ON(!pblk_addr_in_cache(ppa));
1755         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1756 #endif
1757
1758         pblk_update_map(pblk, lba, ppa);
1759 }
1760
1761 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1762                        struct pblk_line *gc_line)
1763 {
1764         struct ppa_addr l2p_ppa;
1765         int ret = 1;
1766
1767 #ifdef CONFIG_NVM_DEBUG
1768         /* Callers must ensure that the ppa points to a cache address */
1769         BUG_ON(!pblk_addr_in_cache(ppa));
1770         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1771 #endif
1772
1773         /* logic error: lba out-of-bounds. Ignore update */
1774         if (!(lba < pblk->rl.nr_secs)) {
1775                 WARN(1, "pblk: corrupted L2P map request\n");
1776                 return 0;
1777         }
1778
1779         spin_lock(&pblk->trans_lock);
1780         l2p_ppa = pblk_trans_map_get(pblk, lba);
1781
1782         /* Prevent updated entries to be overwritten by GC */
1783         if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1784                                 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1785                 ret = 0;
1786                 goto out;
1787         }
1788
1789         pblk_trans_map_set(pblk, lba, ppa);
1790 out:
1791         spin_unlock(&pblk->trans_lock);
1792         return ret;
1793 }
1794
1795 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1796                          struct ppa_addr entry_line)
1797 {
1798         struct ppa_addr l2p_line;
1799
1800 #ifdef CONFIG_NVM_DEBUG
1801         /* Callers must ensure that the ppa points to a device address */
1802         BUG_ON(pblk_addr_in_cache(ppa));
1803 #endif
1804         /* Invalidate and discard padded entries */
1805         if (lba == ADDR_EMPTY) {
1806 #ifdef CONFIG_NVM_DEBUG
1807                 atomic_long_inc(&pblk->padded_wb);
1808 #endif
1809                 pblk_map_invalidate(pblk, ppa);
1810                 return;
1811         }
1812
1813         /* logic error: lba out-of-bounds. Ignore update */
1814         if (!(lba < pblk->rl.nr_secs)) {
1815                 WARN(1, "pblk: corrupted L2P map request\n");
1816                 return;
1817         }
1818
1819         spin_lock(&pblk->trans_lock);
1820         l2p_line = pblk_trans_map_get(pblk, lba);
1821
1822         /* Do not update L2P if the cacheline has been updated. In this case,
1823          * the mapped ppa must be invalidated
1824          */
1825         if (l2p_line.ppa != entry_line.ppa) {
1826                 if (!pblk_ppa_empty(ppa))
1827                         pblk_map_invalidate(pblk, ppa);
1828                 goto out;
1829         }
1830
1831 #ifdef CONFIG_NVM_DEBUG
1832         WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1833 #endif
1834
1835         pblk_trans_map_set(pblk, lba, ppa);
1836 out:
1837         spin_unlock(&pblk->trans_lock);
1838 }
1839
1840 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1841                          sector_t blba, int nr_secs)
1842 {
1843         int i;
1844
1845         spin_lock(&pblk->trans_lock);
1846         for (i = 0; i < nr_secs; i++)
1847                 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1848         spin_unlock(&pblk->trans_lock);
1849 }
1850
1851 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1852                           u64 *lba_list, int nr_secs)
1853 {
1854         sector_t lba;
1855         int i;
1856
1857         spin_lock(&pblk->trans_lock);
1858         for (i = 0; i < nr_secs; i++) {
1859                 lba = lba_list[i];
1860                 if (lba == ADDR_EMPTY) {
1861                         ppas[i].ppa = ADDR_EMPTY;
1862                 } else {
1863                         /* logic error: lba out-of-bounds. Ignore update */
1864                         if (!(lba < pblk->rl.nr_secs)) {
1865                                 WARN(1, "pblk: corrupted L2P map request\n");
1866                                 continue;
1867                         }
1868                         ppas[i] = pblk_trans_map_get(pblk, lba);
1869                 }
1870         }
1871         spin_unlock(&pblk->trans_lock);
1872 }