]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/lightnvm/pblk-recovery.c
Merge tag 'pinctrl-v4.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[karo-tx-linux.git] / drivers / lightnvm / pblk-recovery.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial: Javier Gonzalez <javier@cnexlabs.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * pblk-recovery.c - pblk's recovery path
15  */
16
17 #include "pblk.h"
18
19 void pblk_submit_rec(struct work_struct *work)
20 {
21         struct pblk_rec_ctx *recovery =
22                         container_of(work, struct pblk_rec_ctx, ws_rec);
23         struct pblk *pblk = recovery->pblk;
24         struct nvm_tgt_dev *dev = pblk->dev;
25         struct nvm_rq *rqd = recovery->rqd;
26         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
27         int max_secs = nvm_max_phys_sects(dev);
28         struct bio *bio;
29         unsigned int nr_rec_secs;
30         unsigned int pgs_read;
31         int ret;
32
33         nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status,
34                                                                 max_secs);
35
36         bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
37         if (!bio) {
38                 pr_err("pblk: not able to create recovery bio\n");
39                 return;
40         }
41
42         bio->bi_iter.bi_sector = 0;
43         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
44         rqd->bio = bio;
45         rqd->nr_ppas = nr_rec_secs;
46
47         pgs_read = pblk_rb_read_to_bio_list(&pblk->rwb, bio, &recovery->failed,
48                                                                 nr_rec_secs);
49         if (pgs_read != nr_rec_secs) {
50                 pr_err("pblk: could not read recovery entries\n");
51                 goto err;
52         }
53
54         if (pblk_setup_w_rec_rq(pblk, rqd, c_ctx)) {
55                 pr_err("pblk: could not setup recovery request\n");
56                 goto err;
57         }
58
59 #ifdef CONFIG_NVM_DEBUG
60         atomic_long_add(nr_rec_secs, &pblk->recov_writes);
61 #endif
62
63         ret = pblk_submit_io(pblk, rqd);
64         if (ret) {
65                 pr_err("pblk: I/O submission failed: %d\n", ret);
66                 goto err;
67         }
68
69         mempool_free(recovery, pblk->rec_pool);
70         return;
71
72 err:
73         bio_put(bio);
74         pblk_free_rqd(pblk, rqd, WRITE);
75 }
76
77 int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
78                         struct pblk_rec_ctx *recovery, u64 *comp_bits,
79                         unsigned int comp)
80 {
81         struct nvm_tgt_dev *dev = pblk->dev;
82         int max_secs = nvm_max_phys_sects(dev);
83         struct nvm_rq *rec_rqd;
84         struct pblk_c_ctx *rec_ctx;
85         int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
86
87         rec_rqd = pblk_alloc_rqd(pblk, WRITE);
88         if (IS_ERR(rec_rqd)) {
89                 pr_err("pblk: could not create recovery req.\n");
90                 return -ENOMEM;
91         }
92
93         rec_ctx = nvm_rq_to_pdu(rec_rqd);
94
95         /* Copy completion bitmap, but exclude the first X completed entries */
96         bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status,
97                                 (unsigned long int *)comp_bits,
98                                 comp, max_secs);
99
100         /* Save the context for the entries that need to be re-written and
101          * update current context with the completed entries.
102          */
103         rec_ctx->sentry = pblk_rb_wrap_pos(&pblk->rwb, c_ctx->sentry + comp);
104         if (comp >= c_ctx->nr_valid) {
105                 rec_ctx->nr_valid = 0;
106                 rec_ctx->nr_padded = nr_entries - comp;
107
108                 c_ctx->nr_padded = comp - c_ctx->nr_valid;
109         } else {
110                 rec_ctx->nr_valid = c_ctx->nr_valid - comp;
111                 rec_ctx->nr_padded = c_ctx->nr_padded;
112
113                 c_ctx->nr_valid = comp;
114                 c_ctx->nr_padded = 0;
115         }
116
117         recovery->rqd = rec_rqd;
118         recovery->pblk = pblk;
119
120         return 0;
121 }
122
123 __le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta_buf)
124 {
125         u32 crc;
126
127         crc = pblk_calc_emeta_crc(pblk, emeta_buf);
128         if (le32_to_cpu(emeta_buf->crc) != crc)
129                 return NULL;
130
131         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
132                 return NULL;
133
134         return emeta_to_lbas(pblk, emeta_buf);
135 }
136
137 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
138 {
139         struct nvm_tgt_dev *dev = pblk->dev;
140         struct nvm_geo *geo = &dev->geo;
141         struct pblk_line_meta *lm = &pblk->lm;
142         struct pblk_emeta *emeta = line->emeta;
143         struct line_emeta *emeta_buf = emeta->buf;
144         __le64 *lba_list;
145         int data_start;
146         int nr_data_lbas, nr_valid_lbas, nr_lbas = 0;
147         int i;
148
149         lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
150         if (!lba_list)
151                 return 1;
152
153         data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
154         nr_data_lbas = lm->sec_per_line - lm->emeta_sec[0];
155         nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
156
157         for (i = data_start; i < nr_data_lbas && nr_lbas < nr_valid_lbas; i++) {
158                 struct ppa_addr ppa;
159                 int pos;
160
161                 ppa = addr_to_pblk_ppa(pblk, i, line->id);
162                 pos = pblk_ppa_to_pos(geo, ppa);
163
164                 /* Do not update bad blocks */
165                 if (test_bit(pos, line->blk_bitmap))
166                         continue;
167
168                 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
169                         spin_lock(&line->lock);
170                         if (test_and_set_bit(i, line->invalid_bitmap))
171                                 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
172                         else
173                                 le32_add_cpu(line->vsc, -1);
174                         spin_unlock(&line->lock);
175
176                         continue;
177                 }
178
179                 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
180                 nr_lbas++;
181         }
182
183         if (nr_valid_lbas != nr_lbas)
184                 pr_err("pblk: line %d - inconsistent lba list(%llu/%d)\n",
185                                 line->id, emeta_buf->nr_valid_lbas, nr_lbas);
186
187         line->left_msecs = 0;
188
189         return 0;
190 }
191
192 static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
193 {
194         struct nvm_tgt_dev *dev = pblk->dev;
195         struct nvm_geo *geo = &dev->geo;
196         struct pblk_line_meta *lm = &pblk->lm;
197         int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
198
199         return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
200                                 nr_bb * geo->sec_per_blk;
201 }
202
203 struct pblk_recov_alloc {
204         struct ppa_addr *ppa_list;
205         struct pblk_sec_meta *meta_list;
206         struct nvm_rq *rqd;
207         void *data;
208         dma_addr_t dma_ppa_list;
209         dma_addr_t dma_meta_list;
210 };
211
212 static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
213                                struct pblk_recov_alloc p, u64 r_ptr)
214 {
215         struct nvm_tgt_dev *dev = pblk->dev;
216         struct nvm_geo *geo = &dev->geo;
217         struct ppa_addr *ppa_list;
218         struct pblk_sec_meta *meta_list;
219         struct nvm_rq *rqd;
220         struct bio *bio;
221         void *data;
222         dma_addr_t dma_ppa_list, dma_meta_list;
223         u64 r_ptr_int;
224         int left_ppas;
225         int rq_ppas, rq_len;
226         int i, j;
227         int ret = 0;
228         DECLARE_COMPLETION_ONSTACK(wait);
229
230         ppa_list = p.ppa_list;
231         meta_list = p.meta_list;
232         rqd = p.rqd;
233         data = p.data;
234         dma_ppa_list = p.dma_ppa_list;
235         dma_meta_list = p.dma_meta_list;
236
237         left_ppas = line->cur_sec - r_ptr;
238         if (!left_ppas)
239                 return 0;
240
241         r_ptr_int = r_ptr;
242
243 next_read_rq:
244         memset(rqd, 0, pblk_g_rq_size);
245
246         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
247         if (!rq_ppas)
248                 rq_ppas = pblk->min_write_pgs;
249         rq_len = rq_ppas * geo->sec_size;
250
251         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
252         if (IS_ERR(bio))
253                 return PTR_ERR(bio);
254
255         bio->bi_iter.bi_sector = 0; /* internal bio */
256         bio_set_op_attrs(bio, REQ_OP_READ, 0);
257
258         rqd->bio = bio;
259         rqd->opcode = NVM_OP_PREAD;
260         rqd->meta_list = meta_list;
261         rqd->nr_ppas = rq_ppas;
262         rqd->ppa_list = ppa_list;
263         rqd->dma_ppa_list = dma_ppa_list;
264         rqd->dma_meta_list = dma_meta_list;
265         rqd->end_io = pblk_end_io_sync;
266         rqd->private = &wait;
267
268         if (pblk_io_aligned(pblk, rq_ppas))
269                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
270         else
271                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
272
273         for (i = 0; i < rqd->nr_ppas; ) {
274                 struct ppa_addr ppa;
275                 int pos;
276
277                 ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
278                 pos = pblk_dev_ppa_to_pos(geo, ppa);
279
280                 while (test_bit(pos, line->blk_bitmap)) {
281                         r_ptr_int += pblk->min_write_pgs;
282                         ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
283                         pos = pblk_dev_ppa_to_pos(geo, ppa);
284                 }
285
286                 for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
287                         rqd->ppa_list[i] =
288                                 addr_to_gen_ppa(pblk, r_ptr_int, line->id);
289         }
290
291         /* If read fails, more padding is needed */
292         ret = pblk_submit_io(pblk, rqd);
293         if (ret) {
294                 pr_err("pblk: I/O submission failed: %d\n", ret);
295                 return ret;
296         }
297
298         if (!wait_for_completion_io_timeout(&wait,
299                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
300                 pr_err("pblk: L2P recovery read timed out\n");
301                 return -EINTR;
302         }
303         atomic_dec(&pblk->inflight_io);
304         reinit_completion(&wait);
305
306         /* At this point, the read should not fail. If it does, it is a problem
307          * we cannot recover from here. Need FTL log.
308          */
309         if (rqd->error) {
310                 pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
311                 return -EINTR;
312         }
313
314         for (i = 0; i < rqd->nr_ppas; i++) {
315                 u64 lba = le64_to_cpu(meta_list[i].lba);
316
317                 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
318                         continue;
319
320                 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
321         }
322
323         left_ppas -= rq_ppas;
324         if (left_ppas > 0)
325                 goto next_read_rq;
326
327         return 0;
328 }
329
330 static void pblk_recov_complete(struct kref *ref)
331 {
332         struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
333
334         complete(&pad_rq->wait);
335 }
336
337 static void pblk_end_io_recov(struct nvm_rq *rqd)
338 {
339         struct pblk_pad_rq *pad_rq = rqd->private;
340         struct pblk *pblk = pad_rq->pblk;
341         struct nvm_tgt_dev *dev = pblk->dev;
342
343         kref_put(&pad_rq->ref, pblk_recov_complete);
344         nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
345         pblk_free_rqd(pblk, rqd, WRITE);
346 }
347
348 static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
349                               int left_ppas)
350 {
351         struct nvm_tgt_dev *dev = pblk->dev;
352         struct nvm_geo *geo = &dev->geo;
353         struct ppa_addr *ppa_list;
354         struct pblk_sec_meta *meta_list;
355         struct pblk_pad_rq *pad_rq;
356         struct nvm_rq *rqd;
357         struct bio *bio;
358         void *data;
359         dma_addr_t dma_ppa_list, dma_meta_list;
360         __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
361         u64 w_ptr = line->cur_sec;
362         int left_line_ppas, rq_ppas, rq_len;
363         int i, j;
364         int ret = 0;
365
366         spin_lock(&line->lock);
367         left_line_ppas = line->left_msecs;
368         spin_unlock(&line->lock);
369
370         pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
371         if (!pad_rq)
372                 return -ENOMEM;
373
374         data = vzalloc(pblk->max_write_pgs * geo->sec_size);
375         if (!data) {
376                 ret = -ENOMEM;
377                 goto free_rq;
378         }
379
380         pad_rq->pblk = pblk;
381         init_completion(&pad_rq->wait);
382         kref_init(&pad_rq->ref);
383
384 next_pad_rq:
385         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
386         if (rq_ppas < pblk->min_write_pgs) {
387                 pr_err("pblk: corrupted pad line %d\n", line->id);
388                 goto free_rq;
389         }
390
391         rq_len = rq_ppas * geo->sec_size;
392
393         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
394         if (!meta_list) {
395                 ret = -ENOMEM;
396                 goto free_data;
397         }
398
399         ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
400         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
401
402         rqd = pblk_alloc_rqd(pblk, WRITE);
403         if (IS_ERR(rqd)) {
404                 ret = PTR_ERR(rqd);
405                 goto fail_free_meta;
406         }
407         memset(rqd, 0, pblk_w_rq_size);
408
409         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
410         if (IS_ERR(bio)) {
411                 ret = PTR_ERR(bio);
412                 goto fail_free_rqd;
413         }
414
415         bio->bi_iter.bi_sector = 0; /* internal bio */
416         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
417
418         rqd->bio = bio;
419         rqd->opcode = NVM_OP_PWRITE;
420         rqd->flags = pblk_set_progr_mode(pblk, WRITE);
421         rqd->meta_list = meta_list;
422         rqd->nr_ppas = rq_ppas;
423         rqd->ppa_list = ppa_list;
424         rqd->dma_ppa_list = dma_ppa_list;
425         rqd->dma_meta_list = dma_meta_list;
426         rqd->end_io = pblk_end_io_recov;
427         rqd->private = pad_rq;
428
429         for (i = 0; i < rqd->nr_ppas; ) {
430                 struct ppa_addr ppa;
431                 int pos;
432
433                 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
434                 ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
435                 pos = pblk_ppa_to_pos(geo, ppa);
436
437                 while (test_bit(pos, line->blk_bitmap)) {
438                         w_ptr += pblk->min_write_pgs;
439                         ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
440                         pos = pblk_ppa_to_pos(geo, ppa);
441                 }
442
443                 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
444                         struct ppa_addr dev_ppa;
445                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
446
447                         dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
448
449                         pblk_map_invalidate(pblk, dev_ppa);
450                         lba_list[w_ptr] = meta_list[i].lba = addr_empty;
451                         rqd->ppa_list[i] = dev_ppa;
452                 }
453         }
454
455         kref_get(&pad_rq->ref);
456
457         ret = pblk_submit_io(pblk, rqd);
458         if (ret) {
459                 pr_err("pblk: I/O submission failed: %d\n", ret);
460                 goto free_data;
461         }
462
463         atomic_dec(&pblk->inflight_io);
464
465         left_line_ppas -= rq_ppas;
466         left_ppas -= rq_ppas;
467         if (left_ppas && left_line_ppas)
468                 goto next_pad_rq;
469
470         kref_put(&pad_rq->ref, pblk_recov_complete);
471
472         if (!wait_for_completion_io_timeout(&pad_rq->wait,
473                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
474                 pr_err("pblk: pad write timed out\n");
475                 ret = -ETIME;
476         }
477
478 free_rq:
479         kfree(pad_rq);
480 free_data:
481         vfree(data);
482         return ret;
483
484 fail_free_rqd:
485         pblk_free_rqd(pblk, rqd, WRITE);
486 fail_free_meta:
487         nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
488         kfree(pad_rq);
489         return ret;
490 }
491
492 /* When this function is called, it means that not all upper pages have been
493  * written in a page that contains valid data. In order to recover this data, we
494  * first find the write pointer on the device, then we pad all necessary
495  * sectors, and finally attempt to read the valid data
496  */
497 static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
498                                    struct pblk_recov_alloc p)
499 {
500         struct nvm_tgt_dev *dev = pblk->dev;
501         struct nvm_geo *geo = &dev->geo;
502         struct ppa_addr *ppa_list;
503         struct pblk_sec_meta *meta_list;
504         struct nvm_rq *rqd;
505         struct bio *bio;
506         void *data;
507         dma_addr_t dma_ppa_list, dma_meta_list;
508         u64 w_ptr = 0, r_ptr;
509         int rq_ppas, rq_len;
510         int i, j;
511         int ret = 0;
512         int rec_round;
513         int left_ppas = pblk_calc_sec_in_line(pblk, line) - line->cur_sec;
514         DECLARE_COMPLETION_ONSTACK(wait);
515
516         ppa_list = p.ppa_list;
517         meta_list = p.meta_list;
518         rqd = p.rqd;
519         data = p.data;
520         dma_ppa_list = p.dma_ppa_list;
521         dma_meta_list = p.dma_meta_list;
522
523         /* we could recover up until the line write pointer */
524         r_ptr = line->cur_sec;
525         rec_round = 0;
526
527 next_rq:
528         memset(rqd, 0, pblk_g_rq_size);
529
530         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
531         if (!rq_ppas)
532                 rq_ppas = pblk->min_write_pgs;
533         rq_len = rq_ppas * geo->sec_size;
534
535         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
536         if (IS_ERR(bio))
537                 return PTR_ERR(bio);
538
539         bio->bi_iter.bi_sector = 0; /* internal bio */
540         bio_set_op_attrs(bio, REQ_OP_READ, 0);
541
542         rqd->bio = bio;
543         rqd->opcode = NVM_OP_PREAD;
544         rqd->meta_list = meta_list;
545         rqd->nr_ppas = rq_ppas;
546         rqd->ppa_list = ppa_list;
547         rqd->dma_ppa_list = dma_ppa_list;
548         rqd->dma_meta_list = dma_meta_list;
549         rqd->end_io = pblk_end_io_sync;
550         rqd->private = &wait;
551
552         if (pblk_io_aligned(pblk, rq_ppas))
553                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
554         else
555                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
556
557         for (i = 0; i < rqd->nr_ppas; ) {
558                 struct ppa_addr ppa;
559                 int pos;
560
561                 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
562                 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
563                 pos = pblk_dev_ppa_to_pos(geo, ppa);
564
565                 while (test_bit(pos, line->blk_bitmap)) {
566                         w_ptr += pblk->min_write_pgs;
567                         ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
568                         pos = pblk_dev_ppa_to_pos(geo, ppa);
569                 }
570
571                 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
572                         rqd->ppa_list[i] =
573                                 addr_to_gen_ppa(pblk, w_ptr, line->id);
574         }
575
576         ret = pblk_submit_io(pblk, rqd);
577         if (ret) {
578                 pr_err("pblk: I/O submission failed: %d\n", ret);
579                 return ret;
580         }
581
582         if (!wait_for_completion_io_timeout(&wait,
583                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
584                 pr_err("pblk: L2P recovery read timed out\n");
585         }
586         atomic_dec(&pblk->inflight_io);
587         reinit_completion(&wait);
588
589         /* This should not happen since the read failed during normal recovery,
590          * but the media works funny sometimes...
591          */
592         if (!rec_round++ && !rqd->error) {
593                 rec_round = 0;
594                 for (i = 0; i < rqd->nr_ppas; i++, r_ptr++) {
595                         u64 lba = le64_to_cpu(meta_list[i].lba);
596
597                         if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
598                                 continue;
599
600                         pblk_update_map(pblk, lba, rqd->ppa_list[i]);
601                 }
602         }
603
604         /* Reached the end of the written line */
605         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
606                 int pad_secs, nr_error_bits, bit;
607                 int ret;
608
609                 bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
610                 nr_error_bits = rqd->nr_ppas - bit;
611
612                 /* Roll back failed sectors */
613                 line->cur_sec -= nr_error_bits;
614                 line->left_msecs += nr_error_bits;
615                 bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
616
617                 pad_secs = pblk_pad_distance(pblk);
618                 if (pad_secs > line->left_msecs)
619                         pad_secs = line->left_msecs;
620
621                 ret = pblk_recov_pad_oob(pblk, line, pad_secs);
622                 if (ret)
623                         pr_err("pblk: OOB padding failed (err:%d)\n", ret);
624
625                 ret = pblk_recov_read_oob(pblk, line, p, r_ptr);
626                 if (ret)
627                         pr_err("pblk: OOB read failed (err:%d)\n", ret);
628
629                 left_ppas = 0;
630         }
631
632         left_ppas -= rq_ppas;
633         if (left_ppas > 0)
634                 goto next_rq;
635
636         return ret;
637 }
638
639 static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
640                                struct pblk_recov_alloc p, int *done)
641 {
642         struct nvm_tgt_dev *dev = pblk->dev;
643         struct nvm_geo *geo = &dev->geo;
644         struct ppa_addr *ppa_list;
645         struct pblk_sec_meta *meta_list;
646         struct nvm_rq *rqd;
647         struct bio *bio;
648         void *data;
649         dma_addr_t dma_ppa_list, dma_meta_list;
650         u64 paddr;
651         int rq_ppas, rq_len;
652         int i, j;
653         int ret = 0;
654         int left_ppas = pblk_calc_sec_in_line(pblk, line);
655         DECLARE_COMPLETION_ONSTACK(wait);
656
657         ppa_list = p.ppa_list;
658         meta_list = p.meta_list;
659         rqd = p.rqd;
660         data = p.data;
661         dma_ppa_list = p.dma_ppa_list;
662         dma_meta_list = p.dma_meta_list;
663
664         *done = 1;
665
666 next_rq:
667         memset(rqd, 0, pblk_g_rq_size);
668
669         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
670         if (!rq_ppas)
671                 rq_ppas = pblk->min_write_pgs;
672         rq_len = rq_ppas * geo->sec_size;
673
674         bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
675         if (IS_ERR(bio))
676                 return PTR_ERR(bio);
677
678         bio->bi_iter.bi_sector = 0; /* internal bio */
679         bio_set_op_attrs(bio, REQ_OP_READ, 0);
680
681         rqd->bio = bio;
682         rqd->opcode = NVM_OP_PREAD;
683         rqd->meta_list = meta_list;
684         rqd->nr_ppas = rq_ppas;
685         rqd->ppa_list = ppa_list;
686         rqd->dma_ppa_list = dma_ppa_list;
687         rqd->dma_meta_list = dma_meta_list;
688         rqd->end_io = pblk_end_io_sync;
689         rqd->private = &wait;
690
691         if (pblk_io_aligned(pblk, rq_ppas))
692                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
693         else
694                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
695
696         for (i = 0; i < rqd->nr_ppas; ) {
697                 struct ppa_addr ppa;
698                 int pos;
699
700                 paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
701                 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
702                 pos = pblk_dev_ppa_to_pos(geo, ppa);
703
704                 while (test_bit(pos, line->blk_bitmap)) {
705                         paddr += pblk->min_write_pgs;
706                         ppa = addr_to_gen_ppa(pblk, paddr, line->id);
707                         pos = pblk_dev_ppa_to_pos(geo, ppa);
708                 }
709
710                 for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
711                         rqd->ppa_list[i] =
712                                 addr_to_gen_ppa(pblk, paddr, line->id);
713         }
714
715         ret = pblk_submit_io(pblk, rqd);
716         if (ret) {
717                 pr_err("pblk: I/O submission failed: %d\n", ret);
718                 bio_put(bio);
719                 return ret;
720         }
721
722         if (!wait_for_completion_io_timeout(&wait,
723                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
724                 pr_err("pblk: L2P recovery read timed out\n");
725         }
726         atomic_dec(&pblk->inflight_io);
727         reinit_completion(&wait);
728
729         /* Reached the end of the written line */
730         if (rqd->error) {
731                 int nr_error_bits, bit;
732
733                 bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
734                 nr_error_bits = rqd->nr_ppas - bit;
735
736                 /* Roll back failed sectors */
737                 line->cur_sec -= nr_error_bits;
738                 line->left_msecs += nr_error_bits;
739                 bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
740
741                 left_ppas = 0;
742                 rqd->nr_ppas = bit;
743
744                 if (rqd->error != NVM_RSP_ERR_EMPTYPAGE)
745                         *done = 0;
746         }
747
748         for (i = 0; i < rqd->nr_ppas; i++) {
749                 u64 lba = le64_to_cpu(meta_list[i].lba);
750
751                 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
752                         continue;
753
754                 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
755         }
756
757         left_ppas -= rq_ppas;
758         if (left_ppas > 0)
759                 goto next_rq;
760
761         return ret;
762 }
763
764 /* Scan line for lbas on out of bound area */
765 static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
766 {
767         struct nvm_tgt_dev *dev = pblk->dev;
768         struct nvm_geo *geo = &dev->geo;
769         struct nvm_rq *rqd;
770         struct ppa_addr *ppa_list;
771         struct pblk_sec_meta *meta_list;
772         struct pblk_recov_alloc p;
773         void *data;
774         dma_addr_t dma_ppa_list, dma_meta_list;
775         int done, ret = 0;
776
777         rqd = pblk_alloc_rqd(pblk, READ);
778         if (IS_ERR(rqd))
779                 return PTR_ERR(rqd);
780
781         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
782         if (!meta_list) {
783                 ret = -ENOMEM;
784                 goto free_rqd;
785         }
786
787         ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
788         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
789
790         data = kcalloc(pblk->max_write_pgs, geo->sec_size, GFP_KERNEL);
791         if (!data) {
792                 ret = -ENOMEM;
793                 goto free_meta_list;
794         }
795
796         p.ppa_list = ppa_list;
797         p.meta_list = meta_list;
798         p.rqd = rqd;
799         p.data = data;
800         p.dma_ppa_list = dma_ppa_list;
801         p.dma_meta_list = dma_meta_list;
802
803         ret = pblk_recov_scan_oob(pblk, line, p, &done);
804         if (ret) {
805                 pr_err("pblk: could not recover L2P from OOB\n");
806                 goto out;
807         }
808
809         if (!done) {
810                 ret = pblk_recov_scan_all_oob(pblk, line, p);
811                 if (ret) {
812                         pr_err("pblk: could not recover L2P from OOB\n");
813                         goto out;
814                 }
815         }
816
817         if (pblk_line_is_full(line))
818                 pblk_line_recov_close(pblk, line);
819
820 out:
821         kfree(data);
822 free_meta_list:
823         nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
824 free_rqd:
825         pblk_free_rqd(pblk, rqd, READ);
826
827         return ret;
828 }
829
830 /* Insert lines ordered by sequence number (seq_num) on list */
831 static void pblk_recov_line_add_ordered(struct list_head *head,
832                                         struct pblk_line *line)
833 {
834         struct pblk_line *t = NULL;
835
836         list_for_each_entry(t, head, list)
837                 if (t->seq_nr > line->seq_nr)
838                         break;
839
840         __list_add(&line->list, t->list.prev, &t->list);
841 }
842
843 struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
844 {
845         struct nvm_tgt_dev *dev = pblk->dev;
846         struct nvm_geo *geo = &dev->geo;
847         struct pblk_line_meta *lm = &pblk->lm;
848         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
849         struct pblk_line *line, *tline, *data_line = NULL;
850         struct pblk_smeta *smeta;
851         struct pblk_emeta *emeta;
852         struct line_smeta *smeta_buf;
853         int found_lines = 0, recovered_lines = 0, open_lines = 0;
854         int is_next = 0;
855         int meta_line;
856         int i, valid_uuid = 0;
857         LIST_HEAD(recov_list);
858
859         /* TODO: Implement FTL snapshot */
860
861         /* Scan recovery - takes place when FTL snapshot fails */
862         spin_lock(&l_mg->free_lock);
863         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
864         set_bit(meta_line, &l_mg->meta_bitmap);
865         smeta = l_mg->sline_meta[meta_line];
866         emeta = l_mg->eline_meta[meta_line];
867         smeta_buf = (struct line_smeta *)smeta;
868         spin_unlock(&l_mg->free_lock);
869
870         /* Order data lines using their sequence number */
871         for (i = 0; i < l_mg->nr_lines; i++) {
872                 u32 crc;
873
874                 line = &pblk->lines[i];
875
876                 memset(smeta, 0, lm->smeta_len);
877                 line->smeta = smeta;
878                 line->lun_bitmap = ((void *)(smeta_buf)) +
879                                                 sizeof(struct line_smeta);
880
881                 /* Lines that cannot be read are assumed as not written here */
882                 if (pblk_line_read_smeta(pblk, line))
883                         continue;
884
885                 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
886                 if (le32_to_cpu(smeta_buf->crc) != crc)
887                         continue;
888
889                 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
890                         continue;
891
892                 if (le16_to_cpu(smeta_buf->header.version) != 1) {
893                         pr_err("pblk: found incompatible line version %u\n",
894                                         smeta_buf->header.version);
895                         return ERR_PTR(-EINVAL);
896                 }
897
898                 /* The first valid instance uuid is used for initialization */
899                 if (!valid_uuid) {
900                         memcpy(pblk->instance_uuid, smeta_buf->header.uuid, 16);
901                         valid_uuid = 1;
902                 }
903
904                 if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
905                         pr_debug("pblk: ignore line %u due to uuid mismatch\n",
906                                         i);
907                         continue;
908                 }
909
910                 /* Update line metadata */
911                 spin_lock(&line->lock);
912                 line->id = le32_to_cpu(smeta_buf->header.id);
913                 line->type = le16_to_cpu(smeta_buf->header.type);
914                 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
915                 spin_unlock(&line->lock);
916
917                 /* Update general metadata */
918                 spin_lock(&l_mg->free_lock);
919                 if (line->seq_nr >= l_mg->d_seq_nr)
920                         l_mg->d_seq_nr = line->seq_nr + 1;
921                 l_mg->nr_free_lines--;
922                 spin_unlock(&l_mg->free_lock);
923
924                 if (pblk_line_recov_alloc(pblk, line))
925                         goto out;
926
927                 pblk_recov_line_add_ordered(&recov_list, line);
928                 found_lines++;
929                 pr_debug("pblk: recovering data line %d, seq:%llu\n",
930                                                 line->id, smeta_buf->seq_nr);
931         }
932
933         if (!found_lines) {
934                 pblk_setup_uuid(pblk);
935
936                 spin_lock(&l_mg->free_lock);
937                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
938                                                         &l_mg->meta_bitmap));
939                 spin_unlock(&l_mg->free_lock);
940
941                 goto out;
942         }
943
944         /* Verify closed blocks and recover this portion of L2P table*/
945         list_for_each_entry_safe(line, tline, &recov_list, list) {
946                 int off, nr_bb;
947
948                 recovered_lines++;
949                 /* Calculate where emeta starts based on the line bb */
950                 off = lm->sec_per_line - lm->emeta_sec[0];
951                 nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
952                 off -= nr_bb * geo->sec_per_pl;
953
954                 line->emeta_ssec = off;
955                 line->emeta = emeta;
956                 memset(line->emeta->buf, 0, lm->emeta_len[0]);
957
958                 if (pblk_line_read_emeta(pblk, line, line->emeta->buf)) {
959                         pblk_recov_l2p_from_oob(pblk, line);
960                         goto next;
961                 }
962
963                 if (pblk_recov_l2p_from_emeta(pblk, line))
964                         pblk_recov_l2p_from_oob(pblk, line);
965
966 next:
967                 if (pblk_line_is_full(line)) {
968                         struct list_head *move_list;
969
970                         spin_lock(&line->lock);
971                         line->state = PBLK_LINESTATE_CLOSED;
972                         move_list = pblk_line_gc_list(pblk, line);
973                         spin_unlock(&line->lock);
974
975                         spin_lock(&l_mg->gc_lock);
976                         list_move_tail(&line->list, move_list);
977                         spin_unlock(&l_mg->gc_lock);
978
979                         mempool_free(line->map_bitmap, pblk->line_meta_pool);
980                         line->map_bitmap = NULL;
981                         line->smeta = NULL;
982                         line->emeta = NULL;
983                 } else {
984                         if (open_lines > 1)
985                                 pr_err("pblk: failed to recover L2P\n");
986
987                         open_lines++;
988                         line->meta_line = meta_line;
989                         data_line = line;
990                 }
991         }
992
993         spin_lock(&l_mg->free_lock);
994         if (!open_lines) {
995                 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
996                                                         &l_mg->meta_bitmap));
997                 pblk_line_replace_data(pblk);
998         } else {
999                 /* Allocate next line for preparation */
1000                 l_mg->data_next = pblk_line_get(pblk);
1001                 if (l_mg->data_next) {
1002                         l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1003                         l_mg->data_next->type = PBLK_LINETYPE_DATA;
1004                         is_next = 1;
1005                 }
1006         }
1007         spin_unlock(&l_mg->free_lock);
1008
1009         if (is_next) {
1010                 pblk_line_erase(pblk, l_mg->data_next);
1011                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1012         }
1013
1014 out:
1015         if (found_lines != recovered_lines)
1016                 pr_err("pblk: failed to recover all found lines %d/%d\n",
1017                                                 found_lines, recovered_lines);
1018
1019         return data_line;
1020 }
1021
1022 /*
1023  * Pad current line
1024  */
1025 int pblk_recov_pad(struct pblk *pblk)
1026 {
1027         struct pblk_line *line;
1028         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1029         int left_msecs;
1030         int ret = 0;
1031
1032         spin_lock(&l_mg->free_lock);
1033         line = l_mg->data_line;
1034         left_msecs = line->left_msecs;
1035         spin_unlock(&l_mg->free_lock);
1036
1037         ret = pblk_recov_pad_oob(pblk, line, left_msecs);
1038         if (ret) {
1039                 pr_err("pblk: Tear down padding failed (%d)\n", ret);
1040                 return ret;
1041         }
1042
1043         pblk_line_close_meta(pblk, line);
1044         return ret;
1045 }