2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
17 #include <linux/crc32.h>
18 #include <linux/bitmap.h>
22 * init_seen - allocate memory for used for debugging.
23 * @ubi: UBI device description object
25 static inline unsigned long *init_seen(struct ubi_device *ubi)
29 if (!ubi_dbg_chk_fastmap(ubi))
32 ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
35 return ERR_PTR(-ENOMEM);
41 * free_seen - free the seen logic integer array.
42 * @seen: integer array of @ubi->peb_count size
44 static inline void free_seen(unsigned long *seen)
50 * set_seen - mark a PEB as seen.
51 * @ubi: UBI device description object
52 * @pnum: The PEB to be makred as seen
53 * @seen: integer array of @ubi->peb_count size
55 static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
57 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
64 * self_check_seen - check whether all PEB have been seen by fastmap.
65 * @ubi: UBI device description object
66 * @seen: integer array of @ubi->peb_count size
68 static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
72 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
75 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
76 if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
77 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
86 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
87 * @ubi: UBI device description object
89 size_t ubi_calc_fm_size(struct ubi_device *ubi)
93 size = sizeof(struct ubi_fm_sb) +
94 sizeof(struct ubi_fm_hdr) +
95 sizeof(struct ubi_fm_scan_pool) +
96 sizeof(struct ubi_fm_scan_pool) +
97 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
98 (sizeof(struct ubi_fm_eba) +
99 (ubi->peb_count * sizeof(__be32))) +
100 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
101 return roundup(size, ubi->leb_size);
106 * new_fm_vhdr - allocate a new volume header for fastmap usage.
107 * @ubi: UBI device description object
108 * @vol_id: the VID of the new header
110 * Returns a new struct ubi_vid_hdr on success.
111 * NULL indicates out of memory.
113 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
115 struct ubi_vid_hdr *new;
117 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
121 new->vol_type = UBI_VID_DYNAMIC;
122 new->vol_id = cpu_to_be32(vol_id);
124 /* UBI implementations without fastmap support have to delete the
127 new->compat = UBI_COMPAT_DELETE;
134 * add_aeb - create and add a attach erase block to a given list.
135 * @ai: UBI attach info object
136 * @list: the target list
137 * @pnum: PEB number of the new attach erase block
138 * @ec: erease counter of the new LEB
139 * @scrub: scrub this PEB after attaching
141 * Returns 0 on success, < 0 indicates an internal error.
143 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
144 int pnum, int ec, int scrub)
146 struct ubi_ainf_peb *aeb;
148 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
156 aeb->copy_flag = aeb->sqnum = 0;
158 ai->ec_sum += aeb->ec;
161 if (ai->max_ec < aeb->ec)
162 ai->max_ec = aeb->ec;
164 if (ai->min_ec > aeb->ec)
165 ai->min_ec = aeb->ec;
167 list_add_tail(&aeb->u.list, list);
173 * add_vol - create and add a new volume to ubi_attach_info.
174 * @ai: ubi_attach_info object
175 * @vol_id: VID of the new volume
176 * @used_ebs: number of used EBS
177 * @data_pad: data padding value of the new volume
178 * @vol_type: volume type
179 * @last_eb_bytes: number of bytes in the last LEB
181 * Returns the new struct ubi_ainf_volume on success.
182 * NULL indicates an error.
184 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
185 int used_ebs, int data_pad, u8 vol_type,
188 struct ubi_ainf_volume *av;
190 av = ubi_add_av(ai, vol_id);
194 av->data_pad = data_pad;
195 av->last_data_size = last_eb_bytes;
197 av->vol_type = vol_type;
198 if (av->vol_type == UBI_STATIC_VOLUME)
199 av->used_ebs = used_ebs;
201 dbg_bld("found volume (ID %i)", vol_id);
206 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
207 * from it's original list.
208 * @ai: ubi_attach_info object
209 * @aeb: the to be assigned SEB
210 * @av: target scan volume
212 static void assign_aeb_to_av(struct ubi_attach_info *ai,
213 struct ubi_ainf_peb *aeb,
214 struct ubi_ainf_volume *av)
216 struct ubi_ainf_peb *tmp_aeb;
217 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
219 p = &av->root.rb_node;
223 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
224 if (aeb->lnum != tmp_aeb->lnum) {
225 if (aeb->lnum < tmp_aeb->lnum)
235 list_del(&aeb->u.list);
238 rb_link_node(&aeb->u.rb, parent, p);
239 rb_insert_color(&aeb->u.rb, &av->root);
243 * update_vol - inserts or updates a LEB which was found a pool.
244 * @ubi: the UBI device object
245 * @ai: attach info object
246 * @av: the volume this LEB belongs to
247 * @new_vh: the volume header derived from new_aeb
248 * @new_aeb: the AEB to be examined
250 * Returns 0 on success, < 0 indicates an internal error.
252 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
253 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
254 struct ubi_ainf_peb *new_aeb)
256 struct rb_node **p = &av->root.rb_node, *parent = NULL;
257 struct ubi_ainf_peb *aeb, *victim;
262 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
264 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
265 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
273 /* This case can happen if the fastmap gets written
274 * because of a volume change (creation, deletion, ..).
275 * Then a PEB can be within the persistent EBA and the pool.
277 if (aeb->pnum == new_aeb->pnum) {
278 ubi_assert(aeb->lnum == new_aeb->lnum);
279 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
284 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
288 /* new_aeb is newer */
290 victim = kmem_cache_alloc(ai->aeb_slab_cache,
295 victim->ec = aeb->ec;
296 victim->pnum = aeb->pnum;
297 list_add_tail(&victim->u.list, &ai->erase);
299 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
301 be32_to_cpu(new_vh->data_size);
303 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
304 av->vol_id, aeb->lnum, new_aeb->pnum);
306 aeb->ec = new_aeb->ec;
307 aeb->pnum = new_aeb->pnum;
308 aeb->copy_flag = new_vh->copy_flag;
309 aeb->scrub = new_aeb->scrub;
310 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
312 /* new_aeb is older */
314 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
315 av->vol_id, aeb->lnum, new_aeb->pnum);
316 list_add_tail(&new_aeb->u.list, &ai->erase);
321 /* This LEB is new, let's add it to the volume */
323 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
324 av->highest_lnum = be32_to_cpu(new_vh->lnum);
325 av->last_data_size = be32_to_cpu(new_vh->data_size);
328 if (av->vol_type == UBI_STATIC_VOLUME)
329 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
333 rb_link_node(&new_aeb->u.rb, parent, p);
334 rb_insert_color(&new_aeb->u.rb, &av->root);
340 * process_pool_aeb - we found a non-empty PEB in a pool.
341 * @ubi: UBI device object
342 * @ai: attach info object
343 * @new_vh: the volume header derived from new_aeb
344 * @new_aeb: the AEB to be examined
346 * Returns 0 on success, < 0 indicates an internal error.
348 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
349 struct ubi_vid_hdr *new_vh,
350 struct ubi_ainf_peb *new_aeb)
352 int vol_id = be32_to_cpu(new_vh->vol_id);
353 struct ubi_ainf_volume *av;
355 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
356 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
361 /* Find the volume this SEB belongs to */
362 av = ubi_find_av(ai, vol_id);
364 ubi_err(ubi, "orphaned volume in fastmap pool!");
365 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
366 return UBI_BAD_FASTMAP;
369 ubi_assert(vol_id == av->vol_id);
371 return update_vol(ubi, ai, av, new_vh, new_aeb);
375 * unmap_peb - unmap a PEB.
376 * If fastmap detects a free PEB in the pool it has to check whether
377 * this PEB has been unmapped after writing the fastmap.
379 * @ai: UBI attach info object
380 * @pnum: The PEB to be unmapped
382 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
384 struct ubi_ainf_volume *av;
385 struct rb_node *node, *node2;
386 struct ubi_ainf_peb *aeb;
388 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
389 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
390 if (aeb->pnum == pnum) {
391 rb_erase(&aeb->u.rb, &av->root);
393 kmem_cache_free(ai->aeb_slab_cache, aeb);
401 * scan_pool - scans a pool for changed (no longer empty PEBs).
402 * @ubi: UBI device object
403 * @ai: attach info object
404 * @pebs: an array of all PEB numbers in the to be scanned pool
405 * @pool_size: size of the pool (number of entries in @pebs)
406 * @max_sqnum: pointer to the maximal sequence number
407 * @free: list of PEBs which are most likely free (and go into @ai->free)
409 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
410 * < 0 indicates an internal error.
412 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
413 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
414 struct list_head *free)
416 struct ubi_vid_hdr *vh;
417 struct ubi_ec_hdr *ech;
418 struct ubi_ainf_peb *new_aeb;
419 int i, pnum, err, ret = 0;
421 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
425 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
431 dbg_bld("scanning fastmap pool: size = %i", pool_size);
434 * Now scan all PEBs in the pool to find changes which have been made
435 * after the creation of the fastmap
437 for (i = 0; i < pool_size; i++) {
441 pnum = be32_to_cpu(pebs[i]);
443 if (ubi_io_is_bad(ubi, pnum)) {
444 ubi_err(ubi, "bad PEB in fastmap pool!");
445 ret = UBI_BAD_FASTMAP;
449 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
450 if (err && err != UBI_IO_BITFLIPS) {
451 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
453 ret = err > 0 ? UBI_BAD_FASTMAP : err;
455 } else if (err == UBI_IO_BITFLIPS)
459 * Older UBI implementations have image_seq set to zero, so
460 * we shouldn't fail if image_seq == 0.
462 image_seq = be32_to_cpu(ech->image_seq);
464 if (image_seq && (image_seq != ubi->image_seq)) {
465 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
466 be32_to_cpu(ech->image_seq), ubi->image_seq);
467 ret = UBI_BAD_FASTMAP;
471 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
472 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
473 unsigned long long ec = be64_to_cpu(ech->ec);
475 dbg_bld("Adding PEB to free: %i", pnum);
477 if (err == UBI_IO_FF_BITFLIPS)
480 add_aeb(ai, free, pnum, ec, scrub);
482 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
483 dbg_bld("Found non empty PEB:%i in pool", pnum);
485 if (err == UBI_IO_BITFLIPS)
488 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
495 new_aeb->ec = be64_to_cpu(ech->ec);
496 new_aeb->pnum = pnum;
497 new_aeb->lnum = be32_to_cpu(vh->lnum);
498 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
499 new_aeb->copy_flag = vh->copy_flag;
500 new_aeb->scrub = scrub;
502 if (*max_sqnum < new_aeb->sqnum)
503 *max_sqnum = new_aeb->sqnum;
505 err = process_pool_aeb(ubi, ai, vh, new_aeb);
507 ret = err > 0 ? UBI_BAD_FASTMAP : err;
511 /* We are paranoid and fall back to scanning mode */
512 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
513 ret = err > 0 ? UBI_BAD_FASTMAP : err;
520 ubi_free_vid_hdr(ubi, vh);
526 * count_fastmap_pebs - Counts the PEBs found by fastmap.
527 * @ai: The UBI attach info object
529 static int count_fastmap_pebs(struct ubi_attach_info *ai)
531 struct ubi_ainf_peb *aeb;
532 struct ubi_ainf_volume *av;
533 struct rb_node *rb1, *rb2;
536 list_for_each_entry(aeb, &ai->erase, u.list)
539 list_for_each_entry(aeb, &ai->free, u.list)
542 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
543 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
550 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
551 * @ubi: UBI device object
552 * @ai: UBI attach info object
553 * @fm: the fastmap to be attached
555 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
556 * < 0 indicates an internal error.
558 static int ubi_attach_fastmap(struct ubi_device *ubi,
559 struct ubi_attach_info *ai,
560 struct ubi_fastmap_layout *fm)
562 struct list_head used, free;
563 struct ubi_ainf_volume *av;
564 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
565 struct ubi_fm_sb *fmsb;
566 struct ubi_fm_hdr *fmhdr;
567 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
568 struct ubi_fm_ec *fmec;
569 struct ubi_fm_volhdr *fmvhdr;
570 struct ubi_fm_eba *fm_eba;
571 int ret, i, j, pool_size, wl_pool_size;
572 size_t fm_pos = 0, fm_size = ubi->fm_size;
573 unsigned long long max_sqnum = 0;
574 void *fm_raw = ubi->fm_buf;
576 INIT_LIST_HEAD(&used);
577 INIT_LIST_HEAD(&free);
578 ai->min_ec = UBI_MAX_ERASECOUNTER;
580 fmsb = (struct ubi_fm_sb *)(fm_raw);
581 ai->max_sqnum = fmsb->sqnum;
582 fm_pos += sizeof(struct ubi_fm_sb);
583 if (fm_pos >= fm_size)
586 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
587 fm_pos += sizeof(*fmhdr);
588 if (fm_pos >= fm_size)
591 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
592 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
593 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
597 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
598 fm_pos += sizeof(*fmpl);
599 if (fm_pos >= fm_size)
601 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
602 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
603 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
607 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
608 fm_pos += sizeof(*fmpl_wl);
609 if (fm_pos >= fm_size)
611 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
612 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
613 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
617 pool_size = be16_to_cpu(fmpl->size);
618 wl_pool_size = be16_to_cpu(fmpl_wl->size);
619 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
620 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
622 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
623 ubi_err(ubi, "bad pool size: %i", pool_size);
627 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
628 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
633 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
634 fm->max_pool_size < 0) {
635 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
639 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
640 fm->max_wl_pool_size < 0) {
641 ubi_err(ubi, "bad maximal WL pool size: %i",
642 fm->max_wl_pool_size);
646 /* read EC values from free list */
647 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
648 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
649 fm_pos += sizeof(*fmec);
650 if (fm_pos >= fm_size)
653 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
654 be32_to_cpu(fmec->ec), 0);
657 /* read EC values from used list */
658 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
659 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
660 fm_pos += sizeof(*fmec);
661 if (fm_pos >= fm_size)
664 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
665 be32_to_cpu(fmec->ec), 0);
668 /* read EC values from scrub list */
669 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
670 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
671 fm_pos += sizeof(*fmec);
672 if (fm_pos >= fm_size)
675 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
676 be32_to_cpu(fmec->ec), 1);
679 /* read EC values from erase list */
680 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
681 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
682 fm_pos += sizeof(*fmec);
683 if (fm_pos >= fm_size)
686 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
687 be32_to_cpu(fmec->ec), 1);
690 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
691 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
693 /* Iterate over all volumes and read their EBA table */
694 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
695 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
696 fm_pos += sizeof(*fmvhdr);
697 if (fm_pos >= fm_size)
700 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
701 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
702 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
706 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
707 be32_to_cpu(fmvhdr->used_ebs),
708 be32_to_cpu(fmvhdr->data_pad),
710 be32_to_cpu(fmvhdr->last_eb_bytes));
714 if (PTR_ERR(av) == -EINVAL) {
715 ubi_err(ubi, "volume (ID %i) already exists",
721 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
722 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
724 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
725 fm_pos += sizeof(*fm_eba);
726 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
727 if (fm_pos >= fm_size)
730 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
731 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
732 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
736 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
737 int pnum = be32_to_cpu(fm_eba->pnum[j]);
743 list_for_each_entry(tmp_aeb, &used, u.list) {
744 if (tmp_aeb->pnum == pnum) {
751 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
757 if (av->highest_lnum <= aeb->lnum)
758 av->highest_lnum = aeb->lnum;
760 assign_aeb_to_av(ai, aeb, av);
762 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
763 aeb->pnum, aeb->lnum, av->vol_id);
767 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
771 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
775 if (max_sqnum > ai->max_sqnum)
776 ai->max_sqnum = max_sqnum;
778 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
779 list_move_tail(&tmp_aeb->u.list, &ai->free);
781 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
782 list_move_tail(&tmp_aeb->u.list, &ai->erase);
784 ubi_assert(list_empty(&free));
787 * If fastmap is leaking PEBs (must not happen), raise a
788 * fat warning and fall back to scanning mode.
789 * We do this here because in ubi_wl_init() it's too late
790 * and we cannot fall back to scanning.
792 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
793 ai->bad_peb_count - fm->used_blocks))
799 ret = UBI_BAD_FASTMAP;
801 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
802 list_del(&tmp_aeb->u.list);
803 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
805 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
806 list_del(&tmp_aeb->u.list);
807 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
814 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
815 * @ai: UBI attach info to be filled
817 static int find_fm_anchor(struct ubi_attach_info *ai)
820 struct ubi_ainf_peb *aeb;
821 unsigned long long max_sqnum = 0;
823 list_for_each_entry(aeb, &ai->fastmap, u.list) {
824 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
825 max_sqnum = aeb->sqnum;
834 * ubi_scan_fastmap - scan the fastmap.
835 * @ubi: UBI device object
836 * @ai: UBI attach info to be filled
837 * @scan_ai: UBI attach info from the first 64 PEBs,
838 * used to find the most recent Fastmap data structure
840 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
841 * UBI_BAD_FASTMAP if one was found but is not usable.
842 * < 0 indicates an internal error.
844 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
845 struct ubi_attach_info *scan_ai)
847 struct ubi_fm_sb *fmsb, *fmsb2;
848 struct ubi_vid_hdr *vh;
849 struct ubi_ec_hdr *ech;
850 struct ubi_fastmap_layout *fm;
851 struct ubi_ainf_peb *tmp_aeb, *aeb;
852 int i, used_blocks, pnum, fm_anchor, ret = 0;
855 unsigned long long sqnum = 0;
857 fm_anchor = find_fm_anchor(scan_ai);
859 return UBI_NO_FASTMAP;
861 /* Move all (possible) fastmap blocks into our new attach structure. */
862 list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
863 list_move_tail(&aeb->u.list, &ai->fastmap);
865 down_write(&ubi->fm_protect);
866 memset(ubi->fm_buf, 0, ubi->fm_size);
868 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
874 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
881 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
882 if (ret && ret != UBI_IO_BITFLIPS)
884 else if (ret == UBI_IO_BITFLIPS)
885 fm->to_be_tortured[0] = 1;
887 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
888 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
889 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
890 ret = UBI_BAD_FASTMAP;
894 if (fmsb->version != UBI_FM_FMT_VERSION) {
895 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
896 fmsb->version, UBI_FM_FMT_VERSION);
897 ret = UBI_BAD_FASTMAP;
901 used_blocks = be32_to_cpu(fmsb->used_blocks);
902 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
903 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
905 ret = UBI_BAD_FASTMAP;
909 fm_size = ubi->leb_size * used_blocks;
910 if (fm_size != ubi->fm_size) {
911 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
912 fm_size, ubi->fm_size);
913 ret = UBI_BAD_FASTMAP;
917 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
923 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
929 for (i = 0; i < used_blocks; i++) {
932 pnum = be32_to_cpu(fmsb->block_loc[i]);
934 if (ubi_io_is_bad(ubi, pnum)) {
935 ret = UBI_BAD_FASTMAP;
939 if (i == 0 && pnum != fm_anchor) {
940 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
942 ret = UBI_BAD_FASTMAP;
946 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
947 if (ret && ret != UBI_IO_BITFLIPS) {
948 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
951 ret = UBI_BAD_FASTMAP;
953 } else if (ret == UBI_IO_BITFLIPS)
954 fm->to_be_tortured[i] = 1;
956 image_seq = be32_to_cpu(ech->image_seq);
958 ubi->image_seq = image_seq;
961 * Older UBI implementations have image_seq set to zero, so
962 * we shouldn't fail if image_seq == 0.
964 if (image_seq && (image_seq != ubi->image_seq)) {
965 ubi_err(ubi, "wrong image seq:%d instead of %d",
966 be32_to_cpu(ech->image_seq), ubi->image_seq);
967 ret = UBI_BAD_FASTMAP;
971 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
972 if (ret && ret != UBI_IO_BITFLIPS) {
973 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
979 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
980 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
981 be32_to_cpu(vh->vol_id),
982 UBI_FM_SB_VOLUME_ID);
983 ret = UBI_BAD_FASTMAP;
987 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
988 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
989 be32_to_cpu(vh->vol_id),
990 UBI_FM_DATA_VOLUME_ID);
991 ret = UBI_BAD_FASTMAP;
996 if (sqnum < be64_to_cpu(vh->sqnum))
997 sqnum = be64_to_cpu(vh->sqnum);
999 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1000 ubi->leb_start, ubi->leb_size);
1001 if (ret && ret != UBI_IO_BITFLIPS) {
1002 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1003 "err: %i)", i, pnum, ret);
1011 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1012 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1013 fmsb2->data_crc = 0;
1014 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1015 if (crc != tmp_crc) {
1016 ubi_err(ubi, "fastmap data CRC is invalid");
1017 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1019 ret = UBI_BAD_FASTMAP;
1023 fmsb2->sqnum = sqnum;
1025 fm->used_blocks = used_blocks;
1027 ret = ubi_attach_fastmap(ubi, ai, fm);
1030 ret = UBI_BAD_FASTMAP;
1034 for (i = 0; i < used_blocks; i++) {
1035 struct ubi_wl_entry *e;
1037 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1046 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1047 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1052 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1053 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1054 ubi_msg(ubi, "attached by fastmap");
1055 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1056 ubi_msg(ubi, "fastmap WL pool size: %d",
1057 ubi->fm_wl_pool.max_size);
1058 ubi->fm_disabled = 0;
1059 ubi->fast_attach = 1;
1061 ubi_free_vid_hdr(ubi, vh);
1064 up_write(&ubi->fm_protect);
1065 if (ret == UBI_BAD_FASTMAP)
1066 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1070 ubi_free_vid_hdr(ubi, vh);
1079 * ubi_write_fastmap - writes a fastmap.
1080 * @ubi: UBI device object
1081 * @new_fm: the to be written fastmap
1083 * Returns 0 on success, < 0 indicates an internal error.
1085 static int ubi_write_fastmap(struct ubi_device *ubi,
1086 struct ubi_fastmap_layout *new_fm)
1090 struct ubi_fm_sb *fmsb;
1091 struct ubi_fm_hdr *fmh;
1092 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1093 struct ubi_fm_ec *fec;
1094 struct ubi_fm_volhdr *fvh;
1095 struct ubi_fm_eba *feba;
1096 struct ubi_wl_entry *wl_e;
1097 struct ubi_volume *vol;
1098 struct ubi_vid_hdr *avhdr, *dvhdr;
1099 struct ubi_work *ubi_wrk;
1100 struct rb_node *tmp_rb;
1101 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1102 int scrub_peb_count, erase_peb_count;
1103 unsigned long *seen_pebs = NULL;
1105 fm_raw = ubi->fm_buf;
1106 memset(ubi->fm_buf, 0, ubi->fm_size);
1108 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1114 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1120 seen_pebs = init_seen(ubi);
1121 if (IS_ERR(seen_pebs)) {
1122 ret = PTR_ERR(seen_pebs);
1126 spin_lock(&ubi->volumes_lock);
1127 spin_lock(&ubi->wl_lock);
1129 fmsb = (struct ubi_fm_sb *)fm_raw;
1130 fm_pos += sizeof(*fmsb);
1131 ubi_assert(fm_pos <= ubi->fm_size);
1133 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1134 fm_pos += sizeof(*fmh);
1135 ubi_assert(fm_pos <= ubi->fm_size);
1137 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1138 fmsb->version = UBI_FM_FMT_VERSION;
1139 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1140 /* the max sqnum will be filled in while *reading* the fastmap */
1143 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1146 scrub_peb_count = 0;
1147 erase_peb_count = 0;
1150 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1151 fm_pos += sizeof(*fmpl);
1152 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1153 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1154 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1156 for (i = 0; i < ubi->fm_pool.size; i++) {
1157 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1158 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1161 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1162 fm_pos += sizeof(*fmpl_wl);
1163 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1164 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1165 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1167 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1168 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1169 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1172 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1173 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1175 fec->pnum = cpu_to_be32(wl_e->pnum);
1176 set_seen(ubi, wl_e->pnum, seen_pebs);
1177 fec->ec = cpu_to_be32(wl_e->ec);
1180 fm_pos += sizeof(*fec);
1181 ubi_assert(fm_pos <= ubi->fm_size);
1183 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1185 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1186 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1188 fec->pnum = cpu_to_be32(wl_e->pnum);
1189 set_seen(ubi, wl_e->pnum, seen_pebs);
1190 fec->ec = cpu_to_be32(wl_e->ec);
1193 fm_pos += sizeof(*fec);
1194 ubi_assert(fm_pos <= ubi->fm_size);
1197 ubi_for_each_protected_peb(ubi, i, wl_e) {
1198 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1200 fec->pnum = cpu_to_be32(wl_e->pnum);
1201 set_seen(ubi, wl_e->pnum, seen_pebs);
1202 fec->ec = cpu_to_be32(wl_e->ec);
1205 fm_pos += sizeof(*fec);
1206 ubi_assert(fm_pos <= ubi->fm_size);
1208 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1210 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1211 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1213 fec->pnum = cpu_to_be32(wl_e->pnum);
1214 set_seen(ubi, wl_e->pnum, seen_pebs);
1215 fec->ec = cpu_to_be32(wl_e->ec);
1218 fm_pos += sizeof(*fec);
1219 ubi_assert(fm_pos <= ubi->fm_size);
1221 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1224 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1225 if (ubi_is_erase_work(ubi_wrk)) {
1229 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1231 fec->pnum = cpu_to_be32(wl_e->pnum);
1232 set_seen(ubi, wl_e->pnum, seen_pebs);
1233 fec->ec = cpu_to_be32(wl_e->ec);
1236 fm_pos += sizeof(*fec);
1237 ubi_assert(fm_pos <= ubi->fm_size);
1240 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1242 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1243 vol = ubi->volumes[i];
1250 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1251 fm_pos += sizeof(*fvh);
1252 ubi_assert(fm_pos <= ubi->fm_size);
1254 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1255 fvh->vol_id = cpu_to_be32(vol->vol_id);
1256 fvh->vol_type = vol->vol_type;
1257 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1258 fvh->data_pad = cpu_to_be32(vol->data_pad);
1259 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1261 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1262 vol->vol_type == UBI_STATIC_VOLUME);
1264 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1265 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1266 ubi_assert(fm_pos <= ubi->fm_size);
1268 for (j = 0; j < vol->reserved_pebs; j++)
1269 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1271 feba->reserved_pebs = cpu_to_be32(j);
1272 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1274 fmh->vol_count = cpu_to_be32(vol_count);
1275 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1277 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1280 spin_unlock(&ubi->wl_lock);
1281 spin_unlock(&ubi->volumes_lock);
1283 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1284 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1286 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1290 for (i = 0; i < new_fm->used_blocks; i++) {
1291 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1292 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1293 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1297 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1300 for (i = 1; i < new_fm->used_blocks; i++) {
1301 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1302 dvhdr->lnum = cpu_to_be32(i);
1303 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1304 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1305 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1307 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1308 new_fm->e[i]->pnum);
1313 for (i = 0; i < new_fm->used_blocks; i++) {
1314 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1315 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1317 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1318 new_fm->e[i]->pnum);
1326 ret = self_check_seen(ubi, seen_pebs);
1327 dbg_bld("fastmap written!");
1330 ubi_free_vid_hdr(ubi, avhdr);
1331 ubi_free_vid_hdr(ubi, dvhdr);
1332 free_seen(seen_pebs);
1338 * erase_block - Manually erase a PEB.
1339 * @ubi: UBI device object
1340 * @pnum: PEB to be erased
1342 * Returns the new EC value on success, < 0 indicates an internal error.
1344 static int erase_block(struct ubi_device *ubi, int pnum)
1347 struct ubi_ec_hdr *ec_hdr;
1350 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1354 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1357 else if (ret && ret != UBI_IO_BITFLIPS) {
1362 ret = ubi_io_sync_erase(ubi, pnum, 0);
1366 ec = be64_to_cpu(ec_hdr->ec);
1368 if (ec > UBI_MAX_ERASECOUNTER) {
1373 ec_hdr->ec = cpu_to_be64(ec);
1374 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1385 * invalidate_fastmap - destroys a fastmap.
1386 * @ubi: UBI device object
1388 * This function ensures that upon next UBI attach a full scan
1389 * is issued. We need this if UBI is about to write a new fastmap
1390 * but is unable to do so. In this case we have two options:
1391 * a) Make sure that the current fastmap will not be usued upon
1392 * attach time and contine or b) fall back to RO mode to have the
1393 * current fastmap in a valid state.
1394 * Returns 0 on success, < 0 indicates an internal error.
1396 static int invalidate_fastmap(struct ubi_device *ubi)
1399 struct ubi_fastmap_layout *fm;
1400 struct ubi_wl_entry *e;
1401 struct ubi_vid_hdr *vh = NULL;
1409 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1413 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1418 e = ubi_wl_get_fm_peb(ubi, 1);
1423 * Create fake fastmap such that UBI will fall back
1426 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1427 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1429 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1433 fm->used_blocks = 1;
1439 ubi_free_vid_hdr(ubi, vh);
1448 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1450 * @ubi: UBI device object
1451 * @fm: fastmap layout object
1453 static void return_fm_pebs(struct ubi_device *ubi,
1454 struct ubi_fastmap_layout *fm)
1461 for (i = 0; i < fm->used_blocks; i++) {
1463 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1464 fm->to_be_tortured[i]);
1471 * ubi_update_fastmap - will be called by UBI if a volume changes or
1472 * a fastmap pool becomes full.
1473 * @ubi: UBI device object
1475 * Returns 0 on success, < 0 indicates an internal error.
1477 int ubi_update_fastmap(struct ubi_device *ubi)
1480 struct ubi_fastmap_layout *new_fm, *old_fm;
1481 struct ubi_wl_entry *tmp_e;
1483 down_write(&ubi->fm_protect);
1485 ubi_refill_pools(ubi);
1487 if (ubi->ro_mode || ubi->fm_disabled) {
1488 up_write(&ubi->fm_protect);
1492 ret = ubi_ensure_anchor_pebs(ubi);
1494 up_write(&ubi->fm_protect);
1498 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1500 up_write(&ubi->fm_protect);
1504 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1508 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1509 ubi_err(ubi, "fastmap too large");
1514 for (i = 1; i < new_fm->used_blocks; i++) {
1515 spin_lock(&ubi->wl_lock);
1516 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1517 spin_unlock(&ubi->wl_lock);
1520 if (old_fm && old_fm->e[i]) {
1521 ret = erase_block(ubi, old_fm->e[i]->pnum);
1523 ubi_err(ubi, "could not erase old fastmap PEB");
1525 for (j = 1; j < i; j++) {
1526 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1528 new_fm->e[j] = NULL;
1532 new_fm->e[i] = old_fm->e[i];
1533 old_fm->e[i] = NULL;
1535 ubi_err(ubi, "could not get any free erase block");
1537 for (j = 1; j < i; j++) {
1538 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1539 new_fm->e[j] = NULL;
1546 new_fm->e[i] = tmp_e;
1548 if (old_fm && old_fm->e[i]) {
1549 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1550 old_fm->to_be_tortured[i]);
1551 old_fm->e[i] = NULL;
1556 /* Old fastmap is larger than the new one */
1557 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1558 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1559 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1560 old_fm->to_be_tortured[i]);
1561 old_fm->e[i] = NULL;
1565 spin_lock(&ubi->wl_lock);
1566 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1567 spin_unlock(&ubi->wl_lock);
1570 /* no fresh anchor PEB was found, reuse the old one */
1572 ret = erase_block(ubi, old_fm->e[0]->pnum);
1574 ubi_err(ubi, "could not erase old anchor PEB");
1576 for (i = 1; i < new_fm->used_blocks; i++) {
1577 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1579 new_fm->e[i] = NULL;
1583 new_fm->e[0] = old_fm->e[0];
1584 new_fm->e[0]->ec = ret;
1585 old_fm->e[0] = NULL;
1587 /* we've got a new anchor PEB, return the old one */
1588 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1589 old_fm->to_be_tortured[0]);
1590 new_fm->e[0] = tmp_e;
1591 old_fm->e[0] = NULL;
1595 ubi_err(ubi, "could not find any anchor PEB");
1597 for (i = 1; i < new_fm->used_blocks; i++) {
1598 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1599 new_fm->e[i] = NULL;
1605 new_fm->e[0] = tmp_e;
1608 down_write(&ubi->work_sem);
1609 down_write(&ubi->fm_eba_sem);
1610 ret = ubi_write_fastmap(ubi, new_fm);
1611 up_write(&ubi->fm_eba_sem);
1612 up_write(&ubi->work_sem);
1618 up_write(&ubi->fm_protect);
1623 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1625 ret = invalidate_fastmap(ubi);
1627 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1630 return_fm_pebs(ubi, old_fm);
1631 return_fm_pebs(ubi, new_fm);