]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - drivers/mtd/ubi/wl.c
mtd,ubi,ubifs: sync with linux v3.15
[karo-tx-uboot.git] / drivers / mtd / ubi / wl.c
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * SPDX-License-Identifier:     GPL-2.0+
5  *
6  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
7  */
8
9 /*
10  * UBI wear-leveling sub-system.
11  *
12  * This sub-system is responsible for wear-leveling. It works in terms of
13  * physical eraseblocks and erase counters and knows nothing about logical
14  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
15  * eraseblocks are of two types - used and free. Used physical eraseblocks are
16  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
17  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
18  *
19  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
20  * header. The rest of the physical eraseblock contains only %0xFF bytes.
21  *
22  * When physical eraseblocks are returned to the WL sub-system by means of the
23  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
24  * done asynchronously in context of the per-UBI device background thread,
25  * which is also managed by the WL sub-system.
26  *
27  * The wear-leveling is ensured by means of moving the contents of used
28  * physical eraseblocks with low erase counter to free physical eraseblocks
29  * with high erase counter.
30  *
31  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
32  * bad.
33  *
34  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
35  * in a physical eraseblock, it has to be moved. Technically this is the same
36  * as moving it for wear-leveling reasons.
37  *
38  * As it was said, for the UBI sub-system all physical eraseblocks are either
39  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
40  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
41  * RB-trees, as well as (temporarily) in the @wl->pq queue.
42  *
43  * When the WL sub-system returns a physical eraseblock, the physical
44  * eraseblock is protected from being moved for some "time". For this reason,
45  * the physical eraseblock is not directly moved from the @wl->free tree to the
46  * @wl->used tree. There is a protection queue in between where this
47  * physical eraseblock is temporarily stored (@wl->pq).
48  *
49  * All this protection stuff is needed because:
50  *  o we don't want to move physical eraseblocks just after we have given them
51  *    to the user; instead, we first want to let users fill them up with data;
52  *
53  *  o there is a chance that the user will put the physical eraseblock very
54  *    soon, so it makes sense not to move it for some time, but wait.
55  *
56  * Physical eraseblocks stay protected only for limited time. But the "time" is
57  * measured in erase cycles in this case. This is implemented with help of the
58  * protection queue. Eraseblocks are put to the tail of this queue when they
59  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
60  * head of the queue on each erase operation (for any eraseblock). So the
61  * length of the queue defines how may (global) erase cycles PEBs are protected.
62  *
63  * To put it differently, each physical eraseblock has 2 main states: free and
64  * used. The former state corresponds to the @wl->free tree. The latter state
65  * is split up on several sub-states:
66  * o the WL movement is allowed (@wl->used tree);
67  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
68  *   erroneous - e.g., there was a read error;
69  * o the WL movement is temporarily prohibited (@wl->pq queue);
70  * o scrubbing is needed (@wl->scrub tree).
71  *
72  * Depending on the sub-state, wear-leveling entries of the used physical
73  * eraseblocks may be kept in one of those structures.
74  *
75  * Note, in this implementation, we keep a small in-RAM object for each physical
76  * eraseblock. This is surely not a scalable solution. But it appears to be good
77  * enough for moderately large flashes and it is simple. In future, one may
78  * re-work this sub-system and make it more scalable.
79  *
80  * At the moment this sub-system does not utilize the sequence number, which
81  * was introduced relatively recently. But it would be wise to do this because
82  * the sequence number of a logical eraseblock characterizes how old is it. For
83  * example, when we move a PEB with low erase counter, and we need to pick the
84  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
85  * pick target PEB with an average EC if our PEB is not very "old". This is a
86  * room for future re-works of the WL sub-system.
87  */
88
89 #define __UBOOT__
90 #ifndef __UBOOT__
91 #include <linux/slab.h>
92 #include <linux/crc32.h>
93 #include <linux/freezer.h>
94 #include <linux/kthread.h>
95 #else
96 #include <ubi_uboot.h>
97 #endif
98
99 #include "ubi.h"
100
101 /* Number of physical eraseblocks reserved for wear-leveling purposes */
102 #define WL_RESERVED_PEBS 1
103
104 /*
105  * Maximum difference between two erase counters. If this threshold is
106  * exceeded, the WL sub-system starts moving data from used physical
107  * eraseblocks with low erase counter to free physical eraseblocks with high
108  * erase counter.
109  */
110 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
111
112 /*
113  * When a physical eraseblock is moved, the WL sub-system has to pick the target
114  * physical eraseblock to move to. The simplest way would be just to pick the
115  * one with the highest erase counter. But in certain workloads this could lead
116  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
117  * situation when the picked physical eraseblock is constantly erased after the
118  * data is written to it. So, we have a constant which limits the highest erase
119  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
120  * does not pick eraseblocks with erase counter greater than the lowest erase
121  * counter plus %WL_FREE_MAX_DIFF.
122  */
123 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
124
125 /*
126  * Maximum number of consecutive background thread failures which is enough to
127  * switch to read-only mode.
128  */
129 #define WL_MAX_FAILURES 32
130
131 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
132 static int self_check_in_wl_tree(const struct ubi_device *ubi,
133                                  struct ubi_wl_entry *e, struct rb_root *root);
134 static int self_check_in_pq(const struct ubi_device *ubi,
135                             struct ubi_wl_entry *e);
136
137 #ifdef CONFIG_MTD_UBI_FASTMAP
138 #ifndef __UBOOT__
139 /**
140  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
141  * @wrk: the work description object
142  */
143 static void update_fastmap_work_fn(struct work_struct *wrk)
144 {
145         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
146         ubi_update_fastmap(ubi);
147 }
148 #endif
149
150 /**
151  *  ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
152  *  @ubi: UBI device description object
153  *  @pnum: the to be checked PEB
154  */
155 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
156 {
157         int i;
158
159         if (!ubi->fm)
160                 return 0;
161
162         for (i = 0; i < ubi->fm->used_blocks; i++)
163                 if (ubi->fm->e[i]->pnum == pnum)
164                         return 1;
165
166         return 0;
167 }
168 #else
169 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
170 {
171         return 0;
172 }
173 #endif
174
175 /**
176  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
177  * @e: the wear-leveling entry to add
178  * @root: the root of the tree
179  *
180  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
181  * the @ubi->used and @ubi->free RB-trees.
182  */
183 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
184 {
185         struct rb_node **p, *parent = NULL;
186
187         p = &root->rb_node;
188         while (*p) {
189                 struct ubi_wl_entry *e1;
190
191                 parent = *p;
192                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
193
194                 if (e->ec < e1->ec)
195                         p = &(*p)->rb_left;
196                 else if (e->ec > e1->ec)
197                         p = &(*p)->rb_right;
198                 else {
199                         ubi_assert(e->pnum != e1->pnum);
200                         if (e->pnum < e1->pnum)
201                                 p = &(*p)->rb_left;
202                         else
203                                 p = &(*p)->rb_right;
204                 }
205         }
206
207         rb_link_node(&e->u.rb, parent, p);
208         rb_insert_color(&e->u.rb, root);
209 }
210
211 /**
212  * do_work - do one pending work.
213  * @ubi: UBI device description object
214  *
215  * This function returns zero in case of success and a negative error code in
216  * case of failure.
217  */
218 static int do_work(struct ubi_device *ubi)
219 {
220         int err;
221         struct ubi_work *wrk;
222
223         cond_resched();
224
225         /*
226          * @ubi->work_sem is used to synchronize with the workers. Workers take
227          * it in read mode, so many of them may be doing works at a time. But
228          * the queue flush code has to be sure the whole queue of works is
229          * done, and it takes the mutex in write mode.
230          */
231         down_read(&ubi->work_sem);
232         spin_lock(&ubi->wl_lock);
233         if (list_empty(&ubi->works)) {
234                 spin_unlock(&ubi->wl_lock);
235                 up_read(&ubi->work_sem);
236                 return 0;
237         }
238
239         wrk = list_entry(ubi->works.next, struct ubi_work, list);
240         list_del(&wrk->list);
241         ubi->works_count -= 1;
242         ubi_assert(ubi->works_count >= 0);
243         spin_unlock(&ubi->wl_lock);
244
245         /*
246          * Call the worker function. Do not touch the work structure
247          * after this call as it will have been freed or reused by that
248          * time by the worker function.
249          */
250         err = wrk->func(ubi, wrk, 0);
251         if (err)
252                 ubi_err("work failed with error code %d", err);
253         up_read(&ubi->work_sem);
254
255         return err;
256 }
257
258 /**
259  * produce_free_peb - produce a free physical eraseblock.
260  * @ubi: UBI device description object
261  *
262  * This function tries to make a free PEB by means of synchronous execution of
263  * pending works. This may be needed if, for example the background thread is
264  * disabled. Returns zero in case of success and a negative error code in case
265  * of failure.
266  */
267 static int produce_free_peb(struct ubi_device *ubi)
268 {
269         int err;
270
271         while (!ubi->free.rb_node) {
272                 spin_unlock(&ubi->wl_lock);
273
274                 dbg_wl("do one work synchronously");
275                 err = do_work(ubi);
276
277                 spin_lock(&ubi->wl_lock);
278                 if (err)
279                         return err;
280         }
281
282         return 0;
283 }
284
285 /**
286  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
287  * @e: the wear-leveling entry to check
288  * @root: the root of the tree
289  *
290  * This function returns non-zero if @e is in the @root RB-tree and zero if it
291  * is not.
292  */
293 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
294 {
295         struct rb_node *p;
296
297         p = root->rb_node;
298         while (p) {
299                 struct ubi_wl_entry *e1;
300
301                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
302
303                 if (e->pnum == e1->pnum) {
304                         ubi_assert(e == e1);
305                         return 1;
306                 }
307
308                 if (e->ec < e1->ec)
309                         p = p->rb_left;
310                 else if (e->ec > e1->ec)
311                         p = p->rb_right;
312                 else {
313                         ubi_assert(e->pnum != e1->pnum);
314                         if (e->pnum < e1->pnum)
315                                 p = p->rb_left;
316                         else
317                                 p = p->rb_right;
318                 }
319         }
320
321         return 0;
322 }
323
324 /**
325  * prot_queue_add - add physical eraseblock to the protection queue.
326  * @ubi: UBI device description object
327  * @e: the physical eraseblock to add
328  *
329  * This function adds @e to the tail of the protection queue @ubi->pq, where
330  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
331  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
332  * be locked.
333  */
334 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
335 {
336         int pq_tail = ubi->pq_head - 1;
337
338         if (pq_tail < 0)
339                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
340         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
341         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
342         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
343 }
344
345 /**
346  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
347  * @ubi: UBI device description object
348  * @root: the RB-tree where to look for
349  * @diff: maximum possible difference from the smallest erase counter
350  *
351  * This function looks for a wear leveling entry with erase counter closest to
352  * min + @diff, where min is the smallest erase counter.
353  */
354 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
355                                           struct rb_root *root, int diff)
356 {
357         struct rb_node *p;
358         struct ubi_wl_entry *e, *prev_e = NULL;
359         int max;
360
361         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
362         max = e->ec + diff;
363
364         p = root->rb_node;
365         while (p) {
366                 struct ubi_wl_entry *e1;
367
368                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
369                 if (e1->ec >= max)
370                         p = p->rb_left;
371                 else {
372                         p = p->rb_right;
373                         prev_e = e;
374                         e = e1;
375                 }
376         }
377
378         /* If no fastmap has been written and this WL entry can be used
379          * as anchor PEB, hold it back and return the second best WL entry
380          * such that fastmap can use the anchor PEB later. */
381         if (prev_e && !ubi->fm_disabled &&
382             !ubi->fm && e->pnum < UBI_FM_MAX_START)
383                 return prev_e;
384
385         return e;
386 }
387
388 /**
389  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
390  * @ubi: UBI device description object
391  * @root: the RB-tree where to look for
392  *
393  * This function looks for a wear leveling entry with medium erase counter,
394  * but not greater or equivalent than the lowest erase counter plus
395  * %WL_FREE_MAX_DIFF/2.
396  */
397 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
398                                                struct rb_root *root)
399 {
400         struct ubi_wl_entry *e, *first, *last;
401
402         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
403         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
404
405         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
406                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
407
408 #ifdef CONFIG_MTD_UBI_FASTMAP
409                 /* If no fastmap has been written and this WL entry can be used
410                  * as anchor PEB, hold it back and return the second best
411                  * WL entry such that fastmap can use the anchor PEB later. */
412                 if (e && !ubi->fm_disabled && !ubi->fm &&
413                     e->pnum < UBI_FM_MAX_START)
414                         e = rb_entry(rb_next(root->rb_node),
415                                      struct ubi_wl_entry, u.rb);
416 #endif
417         } else
418                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
419
420         return e;
421 }
422
423 #ifdef CONFIG_MTD_UBI_FASTMAP
424 /**
425  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
426  * @root: the RB-tree where to look for
427  */
428 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
429 {
430         struct rb_node *p;
431         struct ubi_wl_entry *e, *victim = NULL;
432         int max_ec = UBI_MAX_ERASECOUNTER;
433
434         ubi_rb_for_each_entry(p, e, root, u.rb) {
435                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
436                         victim = e;
437                         max_ec = e->ec;
438                 }
439         }
440
441         return victim;
442 }
443
444 static int anchor_pebs_avalible(struct rb_root *root)
445 {
446         struct rb_node *p;
447         struct ubi_wl_entry *e;
448
449         ubi_rb_for_each_entry(p, e, root, u.rb)
450                 if (e->pnum < UBI_FM_MAX_START)
451                         return 1;
452
453         return 0;
454 }
455
456 /**
457  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
458  * @ubi: UBI device description object
459  * @anchor: This PEB will be used as anchor PEB by fastmap
460  *
461  * The function returns a physical erase block with a given maximal number
462  * and removes it from the wl subsystem.
463  * Must be called with wl_lock held!
464  */
465 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
466 {
467         struct ubi_wl_entry *e = NULL;
468
469         if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
470                 goto out;
471
472         if (anchor)
473                 e = find_anchor_wl_entry(&ubi->free);
474         else
475                 e = find_mean_wl_entry(ubi, &ubi->free);
476
477         if (!e)
478                 goto out;
479
480         self_check_in_wl_tree(ubi, e, &ubi->free);
481
482         /* remove it from the free list,
483          * the wl subsystem does no longer know this erase block */
484         rb_erase(&e->u.rb, &ubi->free);
485         ubi->free_count--;
486 out:
487         return e;
488 }
489 #endif
490
491 /**
492  * __wl_get_peb - get a physical eraseblock.
493  * @ubi: UBI device description object
494  *
495  * This function returns a physical eraseblock in case of success and a
496  * negative error code in case of failure.
497  */
498 static int __wl_get_peb(struct ubi_device *ubi)
499 {
500         int err;
501         struct ubi_wl_entry *e;
502
503 retry:
504         if (!ubi->free.rb_node) {
505                 if (ubi->works_count == 0) {
506                         ubi_err("no free eraseblocks");
507                         ubi_assert(list_empty(&ubi->works));
508                         return -ENOSPC;
509                 }
510
511                 err = produce_free_peb(ubi);
512                 if (err < 0)
513                         return err;
514                 goto retry;
515         }
516
517         e = find_mean_wl_entry(ubi, &ubi->free);
518         if (!e) {
519                 ubi_err("no free eraseblocks");
520                 return -ENOSPC;
521         }
522
523         self_check_in_wl_tree(ubi, e, &ubi->free);
524
525         /*
526          * Move the physical eraseblock to the protection queue where it will
527          * be protected from being moved for some time.
528          */
529         rb_erase(&e->u.rb, &ubi->free);
530         ubi->free_count--;
531         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
532 #ifndef CONFIG_MTD_UBI_FASTMAP
533         /* We have to enqueue e only if fastmap is disabled,
534          * is fastmap enabled prot_queue_add() will be called by
535          * ubi_wl_get_peb() after removing e from the pool. */
536         prot_queue_add(ubi, e);
537 #endif
538         return e->pnum;
539 }
540
541 #ifdef CONFIG_MTD_UBI_FASTMAP
542 /**
543  * return_unused_pool_pebs - returns unused PEB to the free tree.
544  * @ubi: UBI device description object
545  * @pool: fastmap pool description object
546  */
547 static void return_unused_pool_pebs(struct ubi_device *ubi,
548                                     struct ubi_fm_pool *pool)
549 {
550         int i;
551         struct ubi_wl_entry *e;
552
553         for (i = pool->used; i < pool->size; i++) {
554                 e = ubi->lookuptbl[pool->pebs[i]];
555                 wl_tree_add(e, &ubi->free);
556                 ubi->free_count++;
557         }
558 }
559
560 /**
561  * refill_wl_pool - refills all the fastmap pool used by the
562  * WL sub-system.
563  * @ubi: UBI device description object
564  */
565 static void refill_wl_pool(struct ubi_device *ubi)
566 {
567         struct ubi_wl_entry *e;
568         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
569
570         return_unused_pool_pebs(ubi, pool);
571
572         for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
573                 if (!ubi->free.rb_node ||
574                    (ubi->free_count - ubi->beb_rsvd_pebs < 5))
575                         break;
576
577                 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
578                 self_check_in_wl_tree(ubi, e, &ubi->free);
579                 rb_erase(&e->u.rb, &ubi->free);
580                 ubi->free_count--;
581
582                 pool->pebs[pool->size] = e->pnum;
583         }
584         pool->used = 0;
585 }
586
587 /**
588  * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
589  * @ubi: UBI device description object
590  */
591 static void refill_wl_user_pool(struct ubi_device *ubi)
592 {
593         struct ubi_fm_pool *pool = &ubi->fm_pool;
594
595         return_unused_pool_pebs(ubi, pool);
596
597         for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
598                 pool->pebs[pool->size] = __wl_get_peb(ubi);
599                 if (pool->pebs[pool->size] < 0)
600                         break;
601         }
602         pool->used = 0;
603 }
604
605 /**
606  * ubi_refill_pools - refills all fastmap PEB pools.
607  * @ubi: UBI device description object
608  */
609 void ubi_refill_pools(struct ubi_device *ubi)
610 {
611         spin_lock(&ubi->wl_lock);
612         refill_wl_pool(ubi);
613         refill_wl_user_pool(ubi);
614         spin_unlock(&ubi->wl_lock);
615 }
616
617 /* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
618  * the fastmap pool.
619  */
620 int ubi_wl_get_peb(struct ubi_device *ubi)
621 {
622         int ret;
623         struct ubi_fm_pool *pool = &ubi->fm_pool;
624         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
625
626         if (!pool->size || !wl_pool->size || pool->used == pool->size ||
627             wl_pool->used == wl_pool->size)
628                 ubi_update_fastmap(ubi);
629
630         /* we got not a single free PEB */
631         if (!pool->size)
632                 ret = -ENOSPC;
633         else {
634                 spin_lock(&ubi->wl_lock);
635                 ret = pool->pebs[pool->used++];
636                 prot_queue_add(ubi, ubi->lookuptbl[ret]);
637                 spin_unlock(&ubi->wl_lock);
638         }
639
640         return ret;
641 }
642
643 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
644  *
645  * @ubi: UBI device description object
646  */
647 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
648 {
649         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
650         int pnum;
651
652         if (pool->used == pool->size || !pool->size) {
653                 /* We cannot update the fastmap here because this
654                  * function is called in atomic context.
655                  * Let's fail here and refill/update it as soon as possible. */
656 #ifndef __UBOOT__
657                 schedule_work(&ubi->fm_work);
658 #else
659                 /* In U-Boot we must call this directly */
660                 ubi_update_fastmap(ubi);
661 #endif
662                 return NULL;
663         } else {
664                 pnum = pool->pebs[pool->used++];
665                 return ubi->lookuptbl[pnum];
666         }
667 }
668 #else
669 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
670 {
671         struct ubi_wl_entry *e;
672
673         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
674         self_check_in_wl_tree(ubi, e, &ubi->free);
675         ubi->free_count--;
676         ubi_assert(ubi->free_count >= 0);
677         rb_erase(&e->u.rb, &ubi->free);
678
679         return e;
680 }
681
682 int ubi_wl_get_peb(struct ubi_device *ubi)
683 {
684         int peb, err;
685
686         spin_lock(&ubi->wl_lock);
687         peb = __wl_get_peb(ubi);
688         spin_unlock(&ubi->wl_lock);
689
690         if (peb < 0)
691                 return peb;
692
693         err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
694                                     ubi->peb_size - ubi->vid_hdr_aloffset);
695         if (err) {
696                 ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
697                 return err;
698         }
699
700         return peb;
701 }
702 #endif
703
704 /**
705  * prot_queue_del - remove a physical eraseblock from the protection queue.
706  * @ubi: UBI device description object
707  * @pnum: the physical eraseblock to remove
708  *
709  * This function deletes PEB @pnum from the protection queue and returns zero
710  * in case of success and %-ENODEV if the PEB was not found.
711  */
712 static int prot_queue_del(struct ubi_device *ubi, int pnum)
713 {
714         struct ubi_wl_entry *e;
715
716         e = ubi->lookuptbl[pnum];
717         if (!e)
718                 return -ENODEV;
719
720         if (self_check_in_pq(ubi, e))
721                 return -ENODEV;
722
723         list_del(&e->u.list);
724         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
725         return 0;
726 }
727
728 /**
729  * sync_erase - synchronously erase a physical eraseblock.
730  * @ubi: UBI device description object
731  * @e: the the physical eraseblock to erase
732  * @torture: if the physical eraseblock has to be tortured
733  *
734  * This function returns zero in case of success and a negative error code in
735  * case of failure.
736  */
737 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
738                       int torture)
739 {
740         int err;
741         struct ubi_ec_hdr *ec_hdr;
742         unsigned long long ec = e->ec;
743
744         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
745
746         err = self_check_ec(ubi, e->pnum, e->ec);
747         if (err)
748                 return -EINVAL;
749
750         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
751         if (!ec_hdr)
752                 return -ENOMEM;
753
754         err = ubi_io_sync_erase(ubi, e->pnum, torture);
755         if (err < 0)
756                 goto out_free;
757
758         ec += err;
759         if (ec > UBI_MAX_ERASECOUNTER) {
760                 /*
761                  * Erase counter overflow. Upgrade UBI and use 64-bit
762                  * erase counters internally.
763                  */
764                 ubi_err("erase counter overflow at PEB %d, EC %llu",
765                         e->pnum, ec);
766                 err = -EINVAL;
767                 goto out_free;
768         }
769
770         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
771
772         ec_hdr->ec = cpu_to_be64(ec);
773
774         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
775         if (err)
776                 goto out_free;
777
778         e->ec = ec;
779         spin_lock(&ubi->wl_lock);
780         if (e->ec > ubi->max_ec)
781                 ubi->max_ec = e->ec;
782         spin_unlock(&ubi->wl_lock);
783
784 out_free:
785         kfree(ec_hdr);
786         return err;
787 }
788
789 /**
790  * serve_prot_queue - check if it is time to stop protecting PEBs.
791  * @ubi: UBI device description object
792  *
793  * This function is called after each erase operation and removes PEBs from the
794  * tail of the protection queue. These PEBs have been protected for long enough
795  * and should be moved to the used tree.
796  */
797 static void serve_prot_queue(struct ubi_device *ubi)
798 {
799         struct ubi_wl_entry *e, *tmp;
800         int count;
801
802         /*
803          * There may be several protected physical eraseblock to remove,
804          * process them all.
805          */
806 repeat:
807         count = 0;
808         spin_lock(&ubi->wl_lock);
809         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
810                 dbg_wl("PEB %d EC %d protection over, move to used tree",
811                         e->pnum, e->ec);
812
813                 list_del(&e->u.list);
814                 wl_tree_add(e, &ubi->used);
815                 if (count++ > 32) {
816                         /*
817                          * Let's be nice and avoid holding the spinlock for
818                          * too long.
819                          */
820                         spin_unlock(&ubi->wl_lock);
821                         cond_resched();
822                         goto repeat;
823                 }
824         }
825
826         ubi->pq_head += 1;
827         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
828                 ubi->pq_head = 0;
829         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
830         spin_unlock(&ubi->wl_lock);
831 }
832
833 /**
834  * __schedule_ubi_work - schedule a work.
835  * @ubi: UBI device description object
836  * @wrk: the work to schedule
837  *
838  * This function adds a work defined by @wrk to the tail of the pending works
839  * list. Can only be used of ubi->work_sem is already held in read mode!
840  */
841 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
842 {
843         spin_lock(&ubi->wl_lock);
844         list_add_tail(&wrk->list, &ubi->works);
845         ubi_assert(ubi->works_count >= 0);
846         ubi->works_count += 1;
847 #ifndef __UBOOT__
848         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
849                 wake_up_process(ubi->bgt_thread);
850 #else
851         /*
852          * U-Boot special: We have no bgt_thread in U-Boot!
853          * So just call do_work() here directly.
854          */
855         do_work(ubi);
856 #endif
857         spin_unlock(&ubi->wl_lock);
858 }
859
860 /**
861  * schedule_ubi_work - schedule a work.
862  * @ubi: UBI device description object
863  * @wrk: the work to schedule
864  *
865  * This function adds a work defined by @wrk to the tail of the pending works
866  * list.
867  */
868 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
869 {
870         down_read(&ubi->work_sem);
871         __schedule_ubi_work(ubi, wrk);
872         up_read(&ubi->work_sem);
873 }
874
875 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
876                         int cancel);
877
878 #ifdef CONFIG_MTD_UBI_FASTMAP
879 /**
880  * ubi_is_erase_work - checks whether a work is erase work.
881  * @wrk: The work object to be checked
882  */
883 int ubi_is_erase_work(struct ubi_work *wrk)
884 {
885         return wrk->func == erase_worker;
886 }
887 #endif
888
889 /**
890  * schedule_erase - schedule an erase work.
891  * @ubi: UBI device description object
892  * @e: the WL entry of the physical eraseblock to erase
893  * @vol_id: the volume ID that last used this PEB
894  * @lnum: the last used logical eraseblock number for the PEB
895  * @torture: if the physical eraseblock has to be tortured
896  *
897  * This function returns zero in case of success and a %-ENOMEM in case of
898  * failure.
899  */
900 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
901                           int vol_id, int lnum, int torture)
902 {
903         struct ubi_work *wl_wrk;
904
905         ubi_assert(e);
906         ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
907
908         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
909                e->pnum, e->ec, torture);
910
911         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
912         if (!wl_wrk)
913                 return -ENOMEM;
914
915         wl_wrk->func = &erase_worker;
916         wl_wrk->e = e;
917         wl_wrk->vol_id = vol_id;
918         wl_wrk->lnum = lnum;
919         wl_wrk->torture = torture;
920
921         schedule_ubi_work(ubi, wl_wrk);
922         return 0;
923 }
924
925 /**
926  * do_sync_erase - run the erase worker synchronously.
927  * @ubi: UBI device description object
928  * @e: the WL entry of the physical eraseblock to erase
929  * @vol_id: the volume ID that last used this PEB
930  * @lnum: the last used logical eraseblock number for the PEB
931  * @torture: if the physical eraseblock has to be tortured
932  *
933  */
934 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
935                          int vol_id, int lnum, int torture)
936 {
937         struct ubi_work *wl_wrk;
938
939         dbg_wl("sync erase of PEB %i", e->pnum);
940
941         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
942         if (!wl_wrk)
943                 return -ENOMEM;
944
945         wl_wrk->e = e;
946         wl_wrk->vol_id = vol_id;
947         wl_wrk->lnum = lnum;
948         wl_wrk->torture = torture;
949
950         return erase_worker(ubi, wl_wrk, 0);
951 }
952
953 #ifdef CONFIG_MTD_UBI_FASTMAP
954 /**
955  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
956  * sub-system.
957  * see: ubi_wl_put_peb()
958  *
959  * @ubi: UBI device description object
960  * @fm_e: physical eraseblock to return
961  * @lnum: the last used logical eraseblock number for the PEB
962  * @torture: if this physical eraseblock has to be tortured
963  */
964 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
965                       int lnum, int torture)
966 {
967         struct ubi_wl_entry *e;
968         int vol_id, pnum = fm_e->pnum;
969
970         dbg_wl("PEB %d", pnum);
971
972         ubi_assert(pnum >= 0);
973         ubi_assert(pnum < ubi->peb_count);
974
975         spin_lock(&ubi->wl_lock);
976         e = ubi->lookuptbl[pnum];
977
978         /* This can happen if we recovered from a fastmap the very
979          * first time and writing now a new one. In this case the wl system
980          * has never seen any PEB used by the original fastmap.
981          */
982         if (!e) {
983                 e = fm_e;
984                 ubi_assert(e->ec >= 0);
985                 ubi->lookuptbl[pnum] = e;
986         } else {
987                 e->ec = fm_e->ec;
988                 kfree(fm_e);
989         }
990
991         spin_unlock(&ubi->wl_lock);
992
993         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
994         return schedule_erase(ubi, e, vol_id, lnum, torture);
995 }
996 #endif
997
998 /**
999  * wear_leveling_worker - wear-leveling worker function.
1000  * @ubi: UBI device description object
1001  * @wrk: the work object
1002  * @cancel: non-zero if the worker has to free memory and exit
1003  *
1004  * This function copies a more worn out physical eraseblock to a less worn out
1005  * one. Returns zero in case of success and a negative error code in case of
1006  * failure.
1007  */
1008 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1009                                 int cancel)
1010 {
1011         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1012         int vol_id = -1, uninitialized_var(lnum);
1013 #ifdef CONFIG_MTD_UBI_FASTMAP
1014         int anchor = wrk->anchor;
1015 #endif
1016         struct ubi_wl_entry *e1, *e2;
1017         struct ubi_vid_hdr *vid_hdr;
1018
1019         kfree(wrk);
1020         if (cancel)
1021                 return 0;
1022
1023         vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1024         if (!vid_hdr)
1025                 return -ENOMEM;
1026
1027         mutex_lock(&ubi->move_mutex);
1028         spin_lock(&ubi->wl_lock);
1029         ubi_assert(!ubi->move_from && !ubi->move_to);
1030         ubi_assert(!ubi->move_to_put);
1031
1032         if (!ubi->free.rb_node ||
1033             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
1034                 /*
1035                  * No free physical eraseblocks? Well, they must be waiting in
1036                  * the queue to be erased. Cancel movement - it will be
1037                  * triggered again when a free physical eraseblock appears.
1038                  *
1039                  * No used physical eraseblocks? They must be temporarily
1040                  * protected from being moved. They will be moved to the
1041                  * @ubi->used tree later and the wear-leveling will be
1042                  * triggered again.
1043                  */
1044                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
1045                        !ubi->free.rb_node, !ubi->used.rb_node);
1046                 goto out_cancel;
1047         }
1048
1049 #ifdef CONFIG_MTD_UBI_FASTMAP
1050         /* Check whether we need to produce an anchor PEB */
1051         if (!anchor)
1052                 anchor = !anchor_pebs_avalible(&ubi->free);
1053
1054         if (anchor) {
1055                 e1 = find_anchor_wl_entry(&ubi->used);
1056                 if (!e1)
1057                         goto out_cancel;
1058                 e2 = get_peb_for_wl(ubi);
1059                 if (!e2)
1060                         goto out_cancel;
1061
1062                 self_check_in_wl_tree(ubi, e1, &ubi->used);
1063                 rb_erase(&e1->u.rb, &ubi->used);
1064                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
1065         } else if (!ubi->scrub.rb_node) {
1066 #else
1067         if (!ubi->scrub.rb_node) {
1068 #endif
1069                 /*
1070                  * Now pick the least worn-out used physical eraseblock and a
1071                  * highly worn-out free physical eraseblock. If the erase
1072                  * counters differ much enough, start wear-leveling.
1073                  */
1074                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1075                 e2 = get_peb_for_wl(ubi);
1076                 if (!e2)
1077                         goto out_cancel;
1078
1079                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
1080                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
1081                                e1->ec, e2->ec);
1082
1083                         /* Give the unused PEB back */
1084                         wl_tree_add(e2, &ubi->free);
1085                         ubi->free_count++;
1086                         goto out_cancel;
1087                 }
1088                 self_check_in_wl_tree(ubi, e1, &ubi->used);
1089                 rb_erase(&e1->u.rb, &ubi->used);
1090                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
1091                        e1->pnum, e1->ec, e2->pnum, e2->ec);
1092         } else {
1093                 /* Perform scrubbing */
1094                 scrubbing = 1;
1095                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
1096                 e2 = get_peb_for_wl(ubi);
1097                 if (!e2)
1098                         goto out_cancel;
1099
1100                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
1101                 rb_erase(&e1->u.rb, &ubi->scrub);
1102                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
1103         }
1104
1105         ubi->move_from = e1;
1106         ubi->move_to = e2;
1107         spin_unlock(&ubi->wl_lock);
1108
1109         /*
1110          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
1111          * We so far do not know which logical eraseblock our physical
1112          * eraseblock (@e1) belongs to. We have to read the volume identifier
1113          * header first.
1114          *
1115          * Note, we are protected from this PEB being unmapped and erased. The
1116          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
1117          * which is being moved was unmapped.
1118          */
1119
1120         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
1121         if (err && err != UBI_IO_BITFLIPS) {
1122                 if (err == UBI_IO_FF) {
1123                         /*
1124                          * We are trying to move PEB without a VID header. UBI
1125                          * always write VID headers shortly after the PEB was
1126                          * given, so we have a situation when it has not yet
1127                          * had a chance to write it, because it was preempted.
1128                          * So add this PEB to the protection queue so far,
1129                          * because presumably more data will be written there
1130                          * (including the missing VID header), and then we'll
1131                          * move it.
1132                          */
1133                         dbg_wl("PEB %d has no VID header", e1->pnum);
1134                         protect = 1;
1135                         goto out_not_moved;
1136                 } else if (err == UBI_IO_FF_BITFLIPS) {
1137                         /*
1138                          * The same situation as %UBI_IO_FF, but bit-flips were
1139                          * detected. It is better to schedule this PEB for
1140                          * scrubbing.
1141                          */
1142                         dbg_wl("PEB %d has no VID header but has bit-flips",
1143                                e1->pnum);
1144                         scrubbing = 1;
1145                         goto out_not_moved;
1146                 }
1147
1148                 ubi_err("error %d while reading VID header from PEB %d",
1149                         err, e1->pnum);
1150                 goto out_error;
1151         }
1152
1153         vol_id = be32_to_cpu(vid_hdr->vol_id);
1154         lnum = be32_to_cpu(vid_hdr->lnum);
1155
1156         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
1157         if (err) {
1158                 if (err == MOVE_CANCEL_RACE) {
1159                         /*
1160                          * The LEB has not been moved because the volume is
1161                          * being deleted or the PEB has been put meanwhile. We
1162                          * should prevent this PEB from being selected for
1163                          * wear-leveling movement again, so put it to the
1164                          * protection queue.
1165                          */
1166                         protect = 1;
1167                         goto out_not_moved;
1168                 }
1169                 if (err == MOVE_RETRY) {
1170                         scrubbing = 1;
1171                         goto out_not_moved;
1172                 }
1173                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
1174                     err == MOVE_TARGET_RD_ERR) {
1175                         /*
1176                          * Target PEB had bit-flips or write error - torture it.
1177                          */
1178                         torture = 1;
1179                         goto out_not_moved;
1180                 }
1181
1182                 if (err == MOVE_SOURCE_RD_ERR) {
1183                         /*
1184                          * An error happened while reading the source PEB. Do
1185                          * not switch to R/O mode in this case, and give the
1186                          * upper layers a possibility to recover from this,
1187                          * e.g. by unmapping corresponding LEB. Instead, just
1188                          * put this PEB to the @ubi->erroneous list to prevent
1189                          * UBI from trying to move it over and over again.
1190                          */
1191                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
1192                                 ubi_err("too many erroneous eraseblocks (%d)",
1193                                         ubi->erroneous_peb_count);
1194                                 goto out_error;
1195                         }
1196                         erroneous = 1;
1197                         goto out_not_moved;
1198                 }
1199
1200                 if (err < 0)
1201                         goto out_error;
1202
1203                 ubi_assert(0);
1204         }
1205
1206         /* The PEB has been successfully moved */
1207         if (scrubbing)
1208                 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
1209                         e1->pnum, vol_id, lnum, e2->pnum);
1210         ubi_free_vid_hdr(ubi, vid_hdr);
1211
1212         spin_lock(&ubi->wl_lock);
1213         if (!ubi->move_to_put) {
1214                 wl_tree_add(e2, &ubi->used);
1215                 e2 = NULL;
1216         }
1217         ubi->move_from = ubi->move_to = NULL;
1218         ubi->move_to_put = ubi->wl_scheduled = 0;
1219         spin_unlock(&ubi->wl_lock);
1220
1221         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
1222         if (err) {
1223                 kmem_cache_free(ubi_wl_entry_slab, e1);
1224                 if (e2)
1225                         kmem_cache_free(ubi_wl_entry_slab, e2);
1226                 goto out_ro;
1227         }
1228
1229         if (e2) {
1230                 /*
1231                  * Well, the target PEB was put meanwhile, schedule it for
1232                  * erasure.
1233                  */
1234                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
1235                        e2->pnum, vol_id, lnum);
1236                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
1237                 if (err) {
1238                         kmem_cache_free(ubi_wl_entry_slab, e2);
1239                         goto out_ro;
1240                 }
1241         }
1242
1243         dbg_wl("done");
1244         mutex_unlock(&ubi->move_mutex);
1245         return 0;
1246
1247         /*
1248          * For some reasons the LEB was not moved, might be an error, might be
1249          * something else. @e1 was not changed, so return it back. @e2 might
1250          * have been changed, schedule it for erasure.
1251          */
1252 out_not_moved:
1253         if (vol_id != -1)
1254                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
1255                        e1->pnum, vol_id, lnum, e2->pnum, err);
1256         else
1257                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
1258                        e1->pnum, e2->pnum, err);
1259         spin_lock(&ubi->wl_lock);
1260         if (protect)
1261                 prot_queue_add(ubi, e1);
1262         else if (erroneous) {
1263                 wl_tree_add(e1, &ubi->erroneous);
1264                 ubi->erroneous_peb_count += 1;
1265         } else if (scrubbing)
1266                 wl_tree_add(e1, &ubi->scrub);
1267         else
1268                 wl_tree_add(e1, &ubi->used);
1269         ubi_assert(!ubi->move_to_put);
1270         ubi->move_from = ubi->move_to = NULL;
1271         ubi->wl_scheduled = 0;
1272         spin_unlock(&ubi->wl_lock);
1273
1274         ubi_free_vid_hdr(ubi, vid_hdr);
1275         err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
1276         if (err) {
1277                 kmem_cache_free(ubi_wl_entry_slab, e2);
1278                 goto out_ro;
1279         }
1280         mutex_unlock(&ubi->move_mutex);
1281         return 0;
1282
1283 out_error:
1284         if (vol_id != -1)
1285                 ubi_err("error %d while moving PEB %d to PEB %d",
1286                         err, e1->pnum, e2->pnum);
1287         else
1288                 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
1289                         err, e1->pnum, vol_id, lnum, e2->pnum);
1290         spin_lock(&ubi->wl_lock);
1291         ubi->move_from = ubi->move_to = NULL;
1292         ubi->move_to_put = ubi->wl_scheduled = 0;
1293         spin_unlock(&ubi->wl_lock);
1294
1295         ubi_free_vid_hdr(ubi, vid_hdr);
1296         kmem_cache_free(ubi_wl_entry_slab, e1);
1297         kmem_cache_free(ubi_wl_entry_slab, e2);
1298
1299 out_ro:
1300         ubi_ro_mode(ubi);
1301         mutex_unlock(&ubi->move_mutex);
1302         ubi_assert(err != 0);
1303         return err < 0 ? err : -EIO;
1304
1305 out_cancel:
1306         ubi->wl_scheduled = 0;
1307         spin_unlock(&ubi->wl_lock);
1308         mutex_unlock(&ubi->move_mutex);
1309         ubi_free_vid_hdr(ubi, vid_hdr);
1310         return 0;
1311 }
1312
1313 /**
1314  * ensure_wear_leveling - schedule wear-leveling if it is needed.
1315  * @ubi: UBI device description object
1316  * @nested: set to non-zero if this function is called from UBI worker
1317  *
1318  * This function checks if it is time to start wear-leveling and schedules it
1319  * if yes. This function returns zero in case of success and a negative error
1320  * code in case of failure.
1321  */
1322 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1323 {
1324         int err = 0;
1325         struct ubi_wl_entry *e1;
1326         struct ubi_wl_entry *e2;
1327         struct ubi_work *wrk;
1328
1329         spin_lock(&ubi->wl_lock);
1330         if (ubi->wl_scheduled)
1331                 /* Wear-leveling is already in the work queue */
1332                 goto out_unlock;
1333
1334         /*
1335          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1336          * the WL worker has to be scheduled anyway.
1337          */
1338         if (!ubi->scrub.rb_node) {
1339                 if (!ubi->used.rb_node || !ubi->free.rb_node)
1340                         /* No physical eraseblocks - no deal */
1341                         goto out_unlock;
1342
1343                 /*
1344                  * We schedule wear-leveling only if the difference between the
1345                  * lowest erase counter of used physical eraseblocks and a high
1346                  * erase counter of free physical eraseblocks is greater than
1347                  * %UBI_WL_THRESHOLD.
1348                  */
1349                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1350                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1351
1352                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1353                         goto out_unlock;
1354                 dbg_wl("schedule wear-leveling");
1355         } else
1356                 dbg_wl("schedule scrubbing");
1357
1358         ubi->wl_scheduled = 1;
1359         spin_unlock(&ubi->wl_lock);
1360
1361         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1362         if (!wrk) {
1363                 err = -ENOMEM;
1364                 goto out_cancel;
1365         }
1366
1367         wrk->anchor = 0;
1368         wrk->func = &wear_leveling_worker;
1369         if (nested)
1370                 __schedule_ubi_work(ubi, wrk);
1371         else
1372                 schedule_ubi_work(ubi, wrk);
1373         return err;
1374
1375 out_cancel:
1376         spin_lock(&ubi->wl_lock);
1377         ubi->wl_scheduled = 0;
1378 out_unlock:
1379         spin_unlock(&ubi->wl_lock);
1380         return err;
1381 }
1382
1383 #ifdef CONFIG_MTD_UBI_FASTMAP
1384 /**
1385  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
1386  * @ubi: UBI device description object
1387  */
1388 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1389 {
1390         struct ubi_work *wrk;
1391
1392         spin_lock(&ubi->wl_lock);
1393         if (ubi->wl_scheduled) {
1394                 spin_unlock(&ubi->wl_lock);
1395                 return 0;
1396         }
1397         ubi->wl_scheduled = 1;
1398         spin_unlock(&ubi->wl_lock);
1399
1400         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1401         if (!wrk) {
1402                 spin_lock(&ubi->wl_lock);
1403                 ubi->wl_scheduled = 0;
1404                 spin_unlock(&ubi->wl_lock);
1405                 return -ENOMEM;
1406         }
1407
1408         wrk->anchor = 1;
1409         wrk->func = &wear_leveling_worker;
1410         schedule_ubi_work(ubi, wrk);
1411         return 0;
1412 }
1413 #endif
1414
1415 /**
1416  * erase_worker - physical eraseblock erase worker function.
1417  * @ubi: UBI device description object
1418  * @wl_wrk: the work object
1419  * @cancel: non-zero if the worker has to free memory and exit
1420  *
1421  * This function erases a physical eraseblock and perform torture testing if
1422  * needed. It also takes care about marking the physical eraseblock bad if
1423  * needed. Returns zero in case of success and a negative error code in case of
1424  * failure.
1425  */
1426 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1427                         int cancel)
1428 {
1429         struct ubi_wl_entry *e = wl_wrk->e;
1430         int pnum = e->pnum;
1431         int vol_id = wl_wrk->vol_id;
1432         int lnum = wl_wrk->lnum;
1433         int err, available_consumed = 0;
1434
1435         if (cancel) {
1436                 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1437                 kfree(wl_wrk);
1438                 kmem_cache_free(ubi_wl_entry_slab, e);
1439                 return 0;
1440         }
1441
1442         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1443                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1444
1445         ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1446
1447         err = sync_erase(ubi, e, wl_wrk->torture);
1448         if (!err) {
1449                 /* Fine, we've erased it successfully */
1450                 kfree(wl_wrk);
1451
1452                 spin_lock(&ubi->wl_lock);
1453                 wl_tree_add(e, &ubi->free);
1454                 ubi->free_count++;
1455                 spin_unlock(&ubi->wl_lock);
1456
1457                 /*
1458                  * One more erase operation has happened, take care about
1459                  * protected physical eraseblocks.
1460                  */
1461                 serve_prot_queue(ubi);
1462
1463                 /* And take care about wear-leveling */
1464                 err = ensure_wear_leveling(ubi, 1);
1465                 return err;
1466         }
1467
1468         ubi_err("failed to erase PEB %d, error %d", pnum, err);
1469         kfree(wl_wrk);
1470
1471         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1472             err == -EBUSY) {
1473                 int err1;
1474
1475                 /* Re-schedule the LEB for erasure */
1476                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1477                 if (err1) {
1478                         err = err1;
1479                         goto out_ro;
1480                 }
1481                 return err;
1482         }
1483
1484         kmem_cache_free(ubi_wl_entry_slab, e);
1485         if (err != -EIO)
1486                 /*
1487                  * If this is not %-EIO, we have no idea what to do. Scheduling
1488                  * this physical eraseblock for erasure again would cause
1489                  * errors again and again. Well, lets switch to R/O mode.
1490                  */
1491                 goto out_ro;
1492
1493         /* It is %-EIO, the PEB went bad */
1494
1495         if (!ubi->bad_allowed) {
1496                 ubi_err("bad physical eraseblock %d detected", pnum);
1497                 goto out_ro;
1498         }
1499
1500         spin_lock(&ubi->volumes_lock);
1501         if (ubi->beb_rsvd_pebs == 0) {
1502                 if (ubi->avail_pebs == 0) {
1503                         spin_unlock(&ubi->volumes_lock);
1504                         ubi_err("no reserved/available physical eraseblocks");
1505                         goto out_ro;
1506                 }
1507                 ubi->avail_pebs -= 1;
1508                 available_consumed = 1;
1509         }
1510         spin_unlock(&ubi->volumes_lock);
1511
1512         ubi_msg("mark PEB %d as bad", pnum);
1513         err = ubi_io_mark_bad(ubi, pnum);
1514         if (err)
1515                 goto out_ro;
1516
1517         spin_lock(&ubi->volumes_lock);
1518         if (ubi->beb_rsvd_pebs > 0) {
1519                 if (available_consumed) {
1520                         /*
1521                          * The amount of reserved PEBs increased since we last
1522                          * checked.
1523                          */
1524                         ubi->avail_pebs += 1;
1525                         available_consumed = 0;
1526                 }
1527                 ubi->beb_rsvd_pebs -= 1;
1528         }
1529         ubi->bad_peb_count += 1;
1530         ubi->good_peb_count -= 1;
1531         ubi_calculate_reserved(ubi);
1532         if (available_consumed)
1533                 ubi_warn("no PEBs in the reserved pool, used an available PEB");
1534         else if (ubi->beb_rsvd_pebs)
1535                 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1536         else
1537                 ubi_warn("last PEB from the reserve was used");
1538         spin_unlock(&ubi->volumes_lock);
1539
1540         return err;
1541
1542 out_ro:
1543         if (available_consumed) {
1544                 spin_lock(&ubi->volumes_lock);
1545                 ubi->avail_pebs += 1;
1546                 spin_unlock(&ubi->volumes_lock);
1547         }
1548         ubi_ro_mode(ubi);
1549         return err;
1550 }
1551
1552 /**
1553  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1554  * @ubi: UBI device description object
1555  * @vol_id: the volume ID that last used this PEB
1556  * @lnum: the last used logical eraseblock number for the PEB
1557  * @pnum: physical eraseblock to return
1558  * @torture: if this physical eraseblock has to be tortured
1559  *
1560  * This function is called to return physical eraseblock @pnum to the pool of
1561  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1562  * occurred to this @pnum and it has to be tested. This function returns zero
1563  * in case of success, and a negative error code in case of failure.
1564  */
1565 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1566                    int pnum, int torture)
1567 {
1568         int err;
1569         struct ubi_wl_entry *e;
1570
1571         dbg_wl("PEB %d", pnum);
1572         ubi_assert(pnum >= 0);
1573         ubi_assert(pnum < ubi->peb_count);
1574
1575 retry:
1576         spin_lock(&ubi->wl_lock);
1577         e = ubi->lookuptbl[pnum];
1578         if (e == ubi->move_from) {
1579                 /*
1580                  * User is putting the physical eraseblock which was selected to
1581                  * be moved. It will be scheduled for erasure in the
1582                  * wear-leveling worker.
1583                  */
1584                 dbg_wl("PEB %d is being moved, wait", pnum);
1585                 spin_unlock(&ubi->wl_lock);
1586
1587                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1588                 mutex_lock(&ubi->move_mutex);
1589                 mutex_unlock(&ubi->move_mutex);
1590                 goto retry;
1591         } else if (e == ubi->move_to) {
1592                 /*
1593                  * User is putting the physical eraseblock which was selected
1594                  * as the target the data is moved to. It may happen if the EBA
1595                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1596                  * but the WL sub-system has not put the PEB to the "used" tree
1597                  * yet, but it is about to do this. So we just set a flag which
1598                  * will tell the WL worker that the PEB is not needed anymore
1599                  * and should be scheduled for erasure.
1600                  */
1601                 dbg_wl("PEB %d is the target of data moving", pnum);
1602                 ubi_assert(!ubi->move_to_put);
1603                 ubi->move_to_put = 1;
1604                 spin_unlock(&ubi->wl_lock);
1605                 return 0;
1606         } else {
1607                 if (in_wl_tree(e, &ubi->used)) {
1608                         self_check_in_wl_tree(ubi, e, &ubi->used);
1609                         rb_erase(&e->u.rb, &ubi->used);
1610                 } else if (in_wl_tree(e, &ubi->scrub)) {
1611                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1612                         rb_erase(&e->u.rb, &ubi->scrub);
1613                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1614                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1615                         rb_erase(&e->u.rb, &ubi->erroneous);
1616                         ubi->erroneous_peb_count -= 1;
1617                         ubi_assert(ubi->erroneous_peb_count >= 0);
1618                         /* Erroneous PEBs should be tortured */
1619                         torture = 1;
1620                 } else {
1621                         err = prot_queue_del(ubi, e->pnum);
1622                         if (err) {
1623                                 ubi_err("PEB %d not found", pnum);
1624                                 ubi_ro_mode(ubi);
1625                                 spin_unlock(&ubi->wl_lock);
1626                                 return err;
1627                         }
1628                 }
1629         }
1630         spin_unlock(&ubi->wl_lock);
1631
1632         err = schedule_erase(ubi, e, vol_id, lnum, torture);
1633         if (err) {
1634                 spin_lock(&ubi->wl_lock);
1635                 wl_tree_add(e, &ubi->used);
1636                 spin_unlock(&ubi->wl_lock);
1637         }
1638
1639         return err;
1640 }
1641
1642 /**
1643  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1644  * @ubi: UBI device description object
1645  * @pnum: the physical eraseblock to schedule
1646  *
1647  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1648  * needs scrubbing. This function schedules a physical eraseblock for
1649  * scrubbing which is done in background. This function returns zero in case of
1650  * success and a negative error code in case of failure.
1651  */
1652 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1653 {
1654         struct ubi_wl_entry *e;
1655
1656         ubi_msg("schedule PEB %d for scrubbing", pnum);
1657
1658 retry:
1659         spin_lock(&ubi->wl_lock);
1660         e = ubi->lookuptbl[pnum];
1661         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1662                                    in_wl_tree(e, &ubi->erroneous)) {
1663                 spin_unlock(&ubi->wl_lock);
1664                 return 0;
1665         }
1666
1667         if (e == ubi->move_to) {
1668                 /*
1669                  * This physical eraseblock was used to move data to. The data
1670                  * was moved but the PEB was not yet inserted to the proper
1671                  * tree. We should just wait a little and let the WL worker
1672                  * proceed.
1673                  */
1674                 spin_unlock(&ubi->wl_lock);
1675                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1676                 yield();
1677                 goto retry;
1678         }
1679
1680         if (in_wl_tree(e, &ubi->used)) {
1681                 self_check_in_wl_tree(ubi, e, &ubi->used);
1682                 rb_erase(&e->u.rb, &ubi->used);
1683         } else {
1684                 int err;
1685
1686                 err = prot_queue_del(ubi, e->pnum);
1687                 if (err) {
1688                         ubi_err("PEB %d not found", pnum);
1689                         ubi_ro_mode(ubi);
1690                         spin_unlock(&ubi->wl_lock);
1691                         return err;
1692                 }
1693         }
1694
1695         wl_tree_add(e, &ubi->scrub);
1696         spin_unlock(&ubi->wl_lock);
1697
1698         /*
1699          * Technically scrubbing is the same as wear-leveling, so it is done
1700          * by the WL worker.
1701          */
1702         return ensure_wear_leveling(ubi, 0);
1703 }
1704
1705 /**
1706  * ubi_wl_flush - flush all pending works.
1707  * @ubi: UBI device description object
1708  * @vol_id: the volume id to flush for
1709  * @lnum: the logical eraseblock number to flush for
1710  *
1711  * This function executes all pending works for a particular volume id /
1712  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1713  * acts as a wildcard for all of the corresponding volume numbers or logical
1714  * eraseblock numbers. It returns zero in case of success and a negative error
1715  * code in case of failure.
1716  */
1717 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1718 {
1719         int err = 0;
1720         int found = 1;
1721
1722         /*
1723          * Erase while the pending works queue is not empty, but not more than
1724          * the number of currently pending works.
1725          */
1726         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1727                vol_id, lnum, ubi->works_count);
1728
1729         while (found) {
1730                 struct ubi_work *wrk;
1731                 found = 0;
1732
1733                 down_read(&ubi->work_sem);
1734                 spin_lock(&ubi->wl_lock);
1735                 list_for_each_entry(wrk, &ubi->works, list) {
1736                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1737                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1738                                 list_del(&wrk->list);
1739                                 ubi->works_count -= 1;
1740                                 ubi_assert(ubi->works_count >= 0);
1741                                 spin_unlock(&ubi->wl_lock);
1742
1743                                 err = wrk->func(ubi, wrk, 0);
1744                                 if (err) {
1745                                         up_read(&ubi->work_sem);
1746                                         return err;
1747                                 }
1748
1749                                 spin_lock(&ubi->wl_lock);
1750                                 found = 1;
1751                                 break;
1752                         }
1753                 }
1754                 spin_unlock(&ubi->wl_lock);
1755                 up_read(&ubi->work_sem);
1756         }
1757
1758         /*
1759          * Make sure all the works which have been done in parallel are
1760          * finished.
1761          */
1762         down_write(&ubi->work_sem);
1763         up_write(&ubi->work_sem);
1764
1765         return err;
1766 }
1767
1768 /**
1769  * tree_destroy - destroy an RB-tree.
1770  * @root: the root of the tree to destroy
1771  */
1772 static void tree_destroy(struct rb_root *root)
1773 {
1774         struct rb_node *rb;
1775         struct ubi_wl_entry *e;
1776
1777         rb = root->rb_node;
1778         while (rb) {
1779                 if (rb->rb_left)
1780                         rb = rb->rb_left;
1781                 else if (rb->rb_right)
1782                         rb = rb->rb_right;
1783                 else {
1784                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1785
1786                         rb = rb_parent(rb);
1787                         if (rb) {
1788                                 if (rb->rb_left == &e->u.rb)
1789                                         rb->rb_left = NULL;
1790                                 else
1791                                         rb->rb_right = NULL;
1792                         }
1793
1794                         kmem_cache_free(ubi_wl_entry_slab, e);
1795                 }
1796         }
1797 }
1798
1799 /**
1800  * ubi_thread - UBI background thread.
1801  * @u: the UBI device description object pointer
1802  */
1803 int ubi_thread(void *u)
1804 {
1805         int failures = 0;
1806         struct ubi_device *ubi = u;
1807
1808         ubi_msg("background thread \"%s\" started, PID %d",
1809                 ubi->bgt_name, task_pid_nr(current));
1810
1811         set_freezable();
1812         for (;;) {
1813                 int err;
1814
1815                 if (kthread_should_stop())
1816                         break;
1817
1818                 if (try_to_freeze())
1819                         continue;
1820
1821                 spin_lock(&ubi->wl_lock);
1822                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1823                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1824                         set_current_state(TASK_INTERRUPTIBLE);
1825                         spin_unlock(&ubi->wl_lock);
1826                         schedule();
1827                         continue;
1828                 }
1829                 spin_unlock(&ubi->wl_lock);
1830
1831                 err = do_work(ubi);
1832                 if (err) {
1833                         ubi_err("%s: work failed with error code %d",
1834                                 ubi->bgt_name, err);
1835                         if (failures++ > WL_MAX_FAILURES) {
1836                                 /*
1837                                  * Too many failures, disable the thread and
1838                                  * switch to read-only mode.
1839                                  */
1840                                 ubi_msg("%s: %d consecutive failures",
1841                                         ubi->bgt_name, WL_MAX_FAILURES);
1842                                 ubi_ro_mode(ubi);
1843                                 ubi->thread_enabled = 0;
1844                                 continue;
1845                         }
1846                 } else
1847                         failures = 0;
1848
1849                 cond_resched();
1850         }
1851
1852         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1853         return 0;
1854 }
1855
1856 /**
1857  * cancel_pending - cancel all pending works.
1858  * @ubi: UBI device description object
1859  */
1860 static void cancel_pending(struct ubi_device *ubi)
1861 {
1862         while (!list_empty(&ubi->works)) {
1863                 struct ubi_work *wrk;
1864
1865                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1866                 list_del(&wrk->list);
1867                 wrk->func(ubi, wrk, 1);
1868                 ubi->works_count -= 1;
1869                 ubi_assert(ubi->works_count >= 0);
1870         }
1871 }
1872
1873 /**
1874  * ubi_wl_init - initialize the WL sub-system using attaching information.
1875  * @ubi: UBI device description object
1876  * @ai: attaching information
1877  *
1878  * This function returns zero in case of success, and a negative error code in
1879  * case of failure.
1880  */
1881 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1882 {
1883         int err, i, reserved_pebs, found_pebs = 0;
1884         struct rb_node *rb1, *rb2;
1885         struct ubi_ainf_volume *av;
1886         struct ubi_ainf_peb *aeb, *tmp;
1887         struct ubi_wl_entry *e;
1888
1889         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1890         spin_lock_init(&ubi->wl_lock);
1891         mutex_init(&ubi->move_mutex);
1892         init_rwsem(&ubi->work_sem);
1893         ubi->max_ec = ai->max_ec;
1894         INIT_LIST_HEAD(&ubi->works);
1895 #ifndef __UBOOT__
1896 #ifdef CONFIG_MTD_UBI_FASTMAP
1897         INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1898 #endif
1899 #endif
1900
1901         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1902
1903         err = -ENOMEM;
1904         ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1905         if (!ubi->lookuptbl)
1906                 return err;
1907
1908         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1909                 INIT_LIST_HEAD(&ubi->pq[i]);
1910         ubi->pq_head = 0;
1911
1912         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1913                 cond_resched();
1914
1915                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1916                 if (!e)
1917                         goto out_free;
1918
1919                 e->pnum = aeb->pnum;
1920                 e->ec = aeb->ec;
1921                 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1922                 ubi->lookuptbl[e->pnum] = e;
1923                 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1924                         kmem_cache_free(ubi_wl_entry_slab, e);
1925                         goto out_free;
1926                 }
1927
1928                 found_pebs++;
1929         }
1930
1931         ubi->free_count = 0;
1932         list_for_each_entry(aeb, &ai->free, u.list) {
1933                 cond_resched();
1934
1935                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1936                 if (!e)
1937                         goto out_free;
1938
1939                 e->pnum = aeb->pnum;
1940                 e->ec = aeb->ec;
1941                 ubi_assert(e->ec >= 0);
1942                 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1943
1944                 wl_tree_add(e, &ubi->free);
1945                 ubi->free_count++;
1946
1947                 ubi->lookuptbl[e->pnum] = e;
1948
1949                 found_pebs++;
1950         }
1951
1952         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1953                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1954                         cond_resched();
1955
1956                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1957                         if (!e)
1958                                 goto out_free;
1959
1960                         e->pnum = aeb->pnum;
1961                         e->ec = aeb->ec;
1962                         ubi->lookuptbl[e->pnum] = e;
1963
1964                         if (!aeb->scrub) {
1965                                 dbg_wl("add PEB %d EC %d to the used tree",
1966                                        e->pnum, e->ec);
1967                                 wl_tree_add(e, &ubi->used);
1968                         } else {
1969                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1970                                        e->pnum, e->ec);
1971                                 wl_tree_add(e, &ubi->scrub);
1972                         }
1973
1974                         found_pebs++;
1975                 }
1976         }
1977
1978         dbg_wl("found %i PEBs", found_pebs);
1979
1980         if (ubi->fm)
1981                 ubi_assert(ubi->good_peb_count == \
1982                            found_pebs + ubi->fm->used_blocks);
1983         else
1984                 ubi_assert(ubi->good_peb_count == found_pebs);
1985
1986         reserved_pebs = WL_RESERVED_PEBS;
1987 #ifdef CONFIG_MTD_UBI_FASTMAP
1988         /* Reserve enough LEBs to store two fastmaps. */
1989         reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1990 #endif
1991
1992         if (ubi->avail_pebs < reserved_pebs) {
1993                 ubi_err("no enough physical eraseblocks (%d, need %d)",
1994                         ubi->avail_pebs, reserved_pebs);
1995                 if (ubi->corr_peb_count)
1996                         ubi_err("%d PEBs are corrupted and not used",
1997                                 ubi->corr_peb_count);
1998                 goto out_free;
1999         }
2000         ubi->avail_pebs -= reserved_pebs;
2001         ubi->rsvd_pebs += reserved_pebs;
2002
2003         /* Schedule wear-leveling if needed */
2004         err = ensure_wear_leveling(ubi, 0);
2005         if (err)
2006                 goto out_free;
2007
2008         return 0;
2009
2010 out_free:
2011         cancel_pending(ubi);
2012         tree_destroy(&ubi->used);
2013         tree_destroy(&ubi->free);
2014         tree_destroy(&ubi->scrub);
2015         kfree(ubi->lookuptbl);
2016         return err;
2017 }
2018
2019 /**
2020  * protection_queue_destroy - destroy the protection queue.
2021  * @ubi: UBI device description object
2022  */
2023 static void protection_queue_destroy(struct ubi_device *ubi)
2024 {
2025         int i;
2026         struct ubi_wl_entry *e, *tmp;
2027
2028         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
2029                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
2030                         list_del(&e->u.list);
2031                         kmem_cache_free(ubi_wl_entry_slab, e);
2032                 }
2033         }
2034 }
2035
2036 /**
2037  * ubi_wl_close - close the wear-leveling sub-system.
2038  * @ubi: UBI device description object
2039  */
2040 void ubi_wl_close(struct ubi_device *ubi)
2041 {
2042         dbg_wl("close the WL sub-system");
2043         cancel_pending(ubi);
2044         protection_queue_destroy(ubi);
2045         tree_destroy(&ubi->used);
2046         tree_destroy(&ubi->erroneous);
2047         tree_destroy(&ubi->free);
2048         tree_destroy(&ubi->scrub);
2049         kfree(ubi->lookuptbl);
2050 }
2051
2052 /**
2053  * self_check_ec - make sure that the erase counter of a PEB is correct.
2054  * @ubi: UBI device description object
2055  * @pnum: the physical eraseblock number to check
2056  * @ec: the erase counter to check
2057  *
2058  * This function returns zero if the erase counter of physical eraseblock @pnum
2059  * is equivalent to @ec, and a negative error code if not or if an error
2060  * occurred.
2061  */
2062 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
2063 {
2064         int err;
2065         long long read_ec;
2066         struct ubi_ec_hdr *ec_hdr;
2067
2068         if (!ubi_dbg_chk_gen(ubi))
2069                 return 0;
2070
2071         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2072         if (!ec_hdr)
2073                 return -ENOMEM;
2074
2075         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2076         if (err && err != UBI_IO_BITFLIPS) {
2077                 /* The header does not have to exist */
2078                 err = 0;
2079                 goto out_free;
2080         }
2081
2082         read_ec = be64_to_cpu(ec_hdr->ec);
2083         if (ec != read_ec && read_ec - ec > 1) {
2084                 ubi_err("self-check failed for PEB %d", pnum);
2085                 ubi_err("read EC is %lld, should be %d", read_ec, ec);
2086                 dump_stack();
2087                 err = 1;
2088         } else
2089                 err = 0;
2090
2091 out_free:
2092         kfree(ec_hdr);
2093         return err;
2094 }
2095
2096 /**
2097  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
2098  * @ubi: UBI device description object
2099  * @e: the wear-leveling entry to check
2100  * @root: the root of the tree
2101  *
2102  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2103  * is not.
2104  */
2105 static int self_check_in_wl_tree(const struct ubi_device *ubi,
2106                                  struct ubi_wl_entry *e, struct rb_root *root)
2107 {
2108         if (!ubi_dbg_chk_gen(ubi))
2109                 return 0;
2110
2111         if (in_wl_tree(e, root))
2112                 return 0;
2113
2114         ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
2115                 e->pnum, e->ec, root);
2116         dump_stack();
2117         return -EINVAL;
2118 }
2119
2120 /**
2121  * self_check_in_pq - check if wear-leveling entry is in the protection
2122  *                        queue.
2123  * @ubi: UBI device description object
2124  * @e: the wear-leveling entry to check
2125  *
2126  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2127  */
2128 static int self_check_in_pq(const struct ubi_device *ubi,
2129                             struct ubi_wl_entry *e)
2130 {
2131         struct ubi_wl_entry *p;
2132         int i;
2133
2134         if (!ubi_dbg_chk_gen(ubi))
2135                 return 0;
2136
2137         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
2138                 list_for_each_entry(p, &ubi->pq[i], u.list)
2139                         if (p == e)
2140                                 return 0;
2141
2142         ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
2143                 e->pnum, e->ec);
2144         dump_stack();
2145         return -EINVAL;
2146 }