]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - drivers/mtd/ubi/wl.c
Merge branch 'master' of git://git.denx.de/u-boot-i2c
[karo-tx-uboot.git] / drivers / mtd / ubi / wl.c
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * SPDX-License-Identifier:     GPL-2.0+
5  *
6  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
7  */
8
9 /*
10  * UBI wear-leveling unit.
11  *
12  * This unit is responsible for wear-leveling. It works in terms of physical
13  * eraseblocks and erase counters and knows nothing about logical eraseblocks,
14  * volumes, etc. From this unit's perspective all physical eraseblocks are of
15  * two types - used and free. Used physical eraseblocks are those that were
16  * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
17  * those that were put by the 'ubi_wl_put_peb()' function.
18  *
19  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
20  * header. The rest of the physical eraseblock contains only 0xFF bytes.
21  *
22  * When physical eraseblocks are returned to the WL unit by means of the
23  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
24  * done asynchronously in context of the per-UBI device background thread,
25  * which is also managed by the WL unit.
26  *
27  * The wear-leveling is ensured by means of moving the contents of used
28  * physical eraseblocks with low erase counter to free physical eraseblocks
29  * with high erase counter.
30  *
31  * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
32  * an "optimal" physical eraseblock. For example, when it is known that the
33  * physical eraseblock will be "put" soon because it contains short-term data,
34  * the WL unit may pick a free physical eraseblock with low erase counter, and
35  * so forth.
36  *
37  * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
38  *
39  * This unit is also responsible for scrubbing. If a bit-flip is detected in a
40  * physical eraseblock, it has to be moved. Technically this is the same as
41  * moving it for wear-leveling reasons.
42  *
43  * As it was said, for the UBI unit all physical eraseblocks are either "free"
44  * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
45  * eraseblocks are kept in a set of different RB-trees: @wl->used,
46  * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
47  *
48  * Note, in this implementation, we keep a small in-RAM object for each physical
49  * eraseblock. This is surely not a scalable solution. But it appears to be good
50  * enough for moderately large flashes and it is simple. In future, one may
51  * re-work this unit and make it more scalable.
52  *
53  * At the moment this unit does not utilize the sequence number, which was
54  * introduced relatively recently. But it would be wise to do this because the
55  * sequence number of a logical eraseblock characterizes how old is it. For
56  * example, when we move a PEB with low erase counter, and we need to pick the
57  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
58  * pick target PEB with an average EC if our PEB is not very "old". This is a
59  * room for future re-works of the WL unit.
60  *
61  * FIXME: looks too complex, should be simplified (later).
62  */
63
64 #ifdef UBI_LINUX
65 #include <linux/slab.h>
66 #include <linux/crc32.h>
67 #include <linux/freezer.h>
68 #include <linux/kthread.h>
69 #endif
70
71 #include <ubi_uboot.h>
72 #include "ubi.h"
73
74 /* Number of physical eraseblocks reserved for wear-leveling purposes */
75 #define WL_RESERVED_PEBS 1
76
77 /*
78  * How many erase cycles are short term, unknown, and long term physical
79  * eraseblocks protected.
80  */
81 #define ST_PROTECTION 16
82 #define U_PROTECTION  10
83 #define LT_PROTECTION 4
84
85 /*
86  * Maximum difference between two erase counters. If this threshold is
87  * exceeded, the WL unit starts moving data from used physical eraseblocks with
88  * low erase counter to free physical eraseblocks with high erase counter.
89  */
90 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
91
92 /*
93  * When a physical eraseblock is moved, the WL unit has to pick the target
94  * physical eraseblock to move to. The simplest way would be just to pick the
95  * one with the highest erase counter. But in certain workloads this could lead
96  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
97  * situation when the picked physical eraseblock is constantly erased after the
98  * data is written to it. So, we have a constant which limits the highest erase
99  * counter of the free physical eraseblock to pick. Namely, the WL unit does
100  * not pick eraseblocks with erase counter greater then the lowest erase
101  * counter plus %WL_FREE_MAX_DIFF.
102  */
103 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
104
105 /*
106  * Maximum number of consecutive background thread failures which is enough to
107  * switch to read-only mode.
108  */
109 #define WL_MAX_FAILURES 32
110
111 /**
112  * struct ubi_wl_prot_entry - PEB protection entry.
113  * @rb_pnum: link in the @wl->prot.pnum RB-tree
114  * @rb_aec: link in the @wl->prot.aec RB-tree
115  * @abs_ec: the absolute erase counter value when the protection ends
116  * @e: the wear-leveling entry of the physical eraseblock under protection
117  *
118  * When the WL unit returns a physical eraseblock, the physical eraseblock is
119  * protected from being moved for some "time". For this reason, the physical
120  * eraseblock is not directly moved from the @wl->free tree to the @wl->used
121  * tree. There is one more tree in between where this physical eraseblock is
122  * temporarily stored (@wl->prot).
123  *
124  * All this protection stuff is needed because:
125  *  o we don't want to move physical eraseblocks just after we have given them
126  *    to the user; instead, we first want to let users fill them up with data;
127  *
128  *  o there is a chance that the user will put the physical eraseblock very
129  *    soon, so it makes sense not to move it for some time, but wait; this is
130  *    especially important in case of "short term" physical eraseblocks.
131  *
132  * Physical eraseblocks stay protected only for limited time. But the "time" is
133  * measured in erase cycles in this case. This is implemented with help of the
134  * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
135  * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
136  * the @wl->used tree.
137  *
138  * Protected physical eraseblocks are searched by physical eraseblock number
139  * (when they are put) and by the absolute erase counter (to check if it is
140  * time to move them to the @wl->used tree). So there are actually 2 RB-trees
141  * storing the protected physical eraseblocks: @wl->prot.pnum and
142  * @wl->prot.aec. They are referred to as the "protection" trees. The
143  * first one is indexed by the physical eraseblock number. The second one is
144  * indexed by the absolute erase counter. Both trees store
145  * &struct ubi_wl_prot_entry objects.
146  *
147  * Each physical eraseblock has 2 main states: free and used. The former state
148  * corresponds to the @wl->free tree. The latter state is split up on several
149  * sub-states:
150  * o the WL movement is allowed (@wl->used tree);
151  * o the WL movement is temporarily prohibited (@wl->prot.pnum and
152  * @wl->prot.aec trees);
153  * o scrubbing is needed (@wl->scrub tree).
154  *
155  * Depending on the sub-state, wear-leveling entries of the used physical
156  * eraseblocks may be kept in one of those trees.
157  */
158 struct ubi_wl_prot_entry {
159         struct rb_node rb_pnum;
160         struct rb_node rb_aec;
161         unsigned long long abs_ec;
162         struct ubi_wl_entry *e;
163 };
164
165 /**
166  * struct ubi_work - UBI work description data structure.
167  * @list: a link in the list of pending works
168  * @func: worker function
169  * @priv: private data of the worker function
170  *
171  * @e: physical eraseblock to erase
172  * @torture: if the physical eraseblock has to be tortured
173  *
174  * The @func pointer points to the worker function. If the @cancel argument is
175  * not zero, the worker has to free the resources and exit immediately. The
176  * worker has to return zero in case of success and a negative error code in
177  * case of failure.
178  */
179 struct ubi_work {
180         struct list_head list;
181         int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
182         /* The below fields are only relevant to erasure works */
183         struct ubi_wl_entry *e;
184         int torture;
185 };
186
187 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
188 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
189 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
190                                      struct rb_root *root);
191 #else
192 #define paranoid_check_ec(ubi, pnum, ec) 0
193 #define paranoid_check_in_wl_tree(e, root)
194 #endif
195
196 /**
197  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
198  * @e: the wear-leveling entry to add
199  * @root: the root of the tree
200  *
201  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
202  * the @ubi->used and @ubi->free RB-trees.
203  */
204 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
205 {
206         struct rb_node **p, *parent = NULL;
207
208         p = &root->rb_node;
209         while (*p) {
210                 struct ubi_wl_entry *e1;
211
212                 parent = *p;
213                 e1 = rb_entry(parent, struct ubi_wl_entry, rb);
214
215                 if (e->ec < e1->ec)
216                         p = &(*p)->rb_left;
217                 else if (e->ec > e1->ec)
218                         p = &(*p)->rb_right;
219                 else {
220                         ubi_assert(e->pnum != e1->pnum);
221                         if (e->pnum < e1->pnum)
222                                 p = &(*p)->rb_left;
223                         else
224                                 p = &(*p)->rb_right;
225                 }
226         }
227
228         rb_link_node(&e->rb, parent, p);
229         rb_insert_color(&e->rb, root);
230 }
231
232 /**
233  * do_work - do one pending work.
234  * @ubi: UBI device description object
235  *
236  * This function returns zero in case of success and a negative error code in
237  * case of failure.
238  */
239 static int do_work(struct ubi_device *ubi)
240 {
241         int err;
242         struct ubi_work *wrk;
243
244         cond_resched();
245
246         /*
247          * @ubi->work_sem is used to synchronize with the workers. Workers take
248          * it in read mode, so many of them may be doing works at a time. But
249          * the queue flush code has to be sure the whole queue of works is
250          * done, and it takes the mutex in write mode.
251          */
252         down_read(&ubi->work_sem);
253         spin_lock(&ubi->wl_lock);
254         if (list_empty(&ubi->works)) {
255                 spin_unlock(&ubi->wl_lock);
256                 up_read(&ubi->work_sem);
257                 return 0;
258         }
259
260         wrk = list_entry(ubi->works.next, struct ubi_work, list);
261         list_del(&wrk->list);
262         ubi->works_count -= 1;
263         ubi_assert(ubi->works_count >= 0);
264         spin_unlock(&ubi->wl_lock);
265
266         /*
267          * Call the worker function. Do not touch the work structure
268          * after this call as it will have been freed or reused by that
269          * time by the worker function.
270          */
271         err = wrk->func(ubi, wrk, 0);
272         if (err)
273                 ubi_err("work failed with error code %d", err);
274         up_read(&ubi->work_sem);
275
276         return err;
277 }
278
279 /**
280  * produce_free_peb - produce a free physical eraseblock.
281  * @ubi: UBI device description object
282  *
283  * This function tries to make a free PEB by means of synchronous execution of
284  * pending works. This may be needed if, for example the background thread is
285  * disabled. Returns zero in case of success and a negative error code in case
286  * of failure.
287  */
288 static int produce_free_peb(struct ubi_device *ubi)
289 {
290         int err;
291
292         spin_lock(&ubi->wl_lock);
293         while (!ubi->free.rb_node) {
294                 spin_unlock(&ubi->wl_lock);
295
296                 dbg_wl("do one work synchronously");
297                 err = do_work(ubi);
298                 if (err)
299                         return err;
300
301                 spin_lock(&ubi->wl_lock);
302         }
303         spin_unlock(&ubi->wl_lock);
304
305         return 0;
306 }
307
308 /**
309  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
310  * @e: the wear-leveling entry to check
311  * @root: the root of the tree
312  *
313  * This function returns non-zero if @e is in the @root RB-tree and zero if it
314  * is not.
315  */
316 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
317 {
318         struct rb_node *p;
319
320         p = root->rb_node;
321         while (p) {
322                 struct ubi_wl_entry *e1;
323
324                 e1 = rb_entry(p, struct ubi_wl_entry, rb);
325
326                 if (e->pnum == e1->pnum) {
327                         ubi_assert(e == e1);
328                         return 1;
329                 }
330
331                 if (e->ec < e1->ec)
332                         p = p->rb_left;
333                 else if (e->ec > e1->ec)
334                         p = p->rb_right;
335                 else {
336                         ubi_assert(e->pnum != e1->pnum);
337                         if (e->pnum < e1->pnum)
338                                 p = p->rb_left;
339                         else
340                                 p = p->rb_right;
341                 }
342         }
343
344         return 0;
345 }
346
347 /**
348  * prot_tree_add - add physical eraseblock to protection trees.
349  * @ubi: UBI device description object
350  * @e: the physical eraseblock to add
351  * @pe: protection entry object to use
352  * @abs_ec: absolute erase counter value when this physical eraseblock has
353  * to be removed from the protection trees.
354  *
355  * @wl->lock has to be locked.
356  */
357 static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
358                           struct ubi_wl_prot_entry *pe, int abs_ec)
359 {
360         struct rb_node **p, *parent = NULL;
361         struct ubi_wl_prot_entry *pe1;
362
363         pe->e = e;
364         pe->abs_ec = ubi->abs_ec + abs_ec;
365
366         p = &ubi->prot.pnum.rb_node;
367         while (*p) {
368                 parent = *p;
369                 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
370
371                 if (e->pnum < pe1->e->pnum)
372                         p = &(*p)->rb_left;
373                 else
374                         p = &(*p)->rb_right;
375         }
376         rb_link_node(&pe->rb_pnum, parent, p);
377         rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
378
379         p = &ubi->prot.aec.rb_node;
380         parent = NULL;
381         while (*p) {
382                 parent = *p;
383                 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
384
385                 if (pe->abs_ec < pe1->abs_ec)
386                         p = &(*p)->rb_left;
387                 else
388                         p = &(*p)->rb_right;
389         }
390         rb_link_node(&pe->rb_aec, parent, p);
391         rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
392 }
393
394 /**
395  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
396  * @root: the RB-tree where to look for
397  * @max: highest possible erase counter
398  *
399  * This function looks for a wear leveling entry with erase counter closest to
400  * @max and less then @max.
401  */
402 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
403 {
404         struct rb_node *p;
405         struct ubi_wl_entry *e;
406
407         e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
408         max += e->ec;
409
410         p = root->rb_node;
411         while (p) {
412                 struct ubi_wl_entry *e1;
413
414                 e1 = rb_entry(p, struct ubi_wl_entry, rb);
415                 if (e1->ec >= max)
416                         p = p->rb_left;
417                 else {
418                         p = p->rb_right;
419                         e = e1;
420                 }
421         }
422
423         return e;
424 }
425
426 /**
427  * ubi_wl_get_peb - get a physical eraseblock.
428  * @ubi: UBI device description object
429  * @dtype: type of data which will be stored in this physical eraseblock
430  *
431  * This function returns a physical eraseblock in case of success and a
432  * negative error code in case of failure. Might sleep.
433  */
434 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
435 {
436         int err, protect, medium_ec;
437         struct ubi_wl_entry *e, *first, *last;
438         struct ubi_wl_prot_entry *pe;
439
440         ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
441                    dtype == UBI_UNKNOWN);
442
443         pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
444         if (!pe)
445                 return -ENOMEM;
446
447 retry:
448         spin_lock(&ubi->wl_lock);
449         if (!ubi->free.rb_node) {
450                 if (ubi->works_count == 0) {
451                         ubi_assert(list_empty(&ubi->works));
452                         ubi_err("no free eraseblocks");
453                         spin_unlock(&ubi->wl_lock);
454                         kfree(pe);
455                         return -ENOSPC;
456                 }
457                 spin_unlock(&ubi->wl_lock);
458
459                 err = produce_free_peb(ubi);
460                 if (err < 0) {
461                         kfree(pe);
462                         return err;
463                 }
464                 goto retry;
465         }
466
467         switch (dtype) {
468                 case UBI_LONGTERM:
469                         /*
470                          * For long term data we pick a physical eraseblock
471                          * with high erase counter. But the highest erase
472                          * counter we can pick is bounded by the the lowest
473                          * erase counter plus %WL_FREE_MAX_DIFF.
474                          */
475                         e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
476                         protect = LT_PROTECTION;
477                         break;
478                 case UBI_UNKNOWN:
479                         /*
480                          * For unknown data we pick a physical eraseblock with
481                          * medium erase counter. But we by no means can pick a
482                          * physical eraseblock with erase counter greater or
483                          * equivalent than the lowest erase counter plus
484                          * %WL_FREE_MAX_DIFF.
485                          */
486                         first = rb_entry(rb_first(&ubi->free),
487                                          struct ubi_wl_entry, rb);
488                         last = rb_entry(rb_last(&ubi->free),
489                                         struct ubi_wl_entry, rb);
490
491                         if (last->ec - first->ec < WL_FREE_MAX_DIFF)
492                                 e = rb_entry(ubi->free.rb_node,
493                                                 struct ubi_wl_entry, rb);
494                         else {
495                                 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
496                                 e = find_wl_entry(&ubi->free, medium_ec);
497                         }
498                         protect = U_PROTECTION;
499                         break;
500                 case UBI_SHORTTERM:
501                         /*
502                          * For short term data we pick a physical eraseblock
503                          * with the lowest erase counter as we expect it will
504                          * be erased soon.
505                          */
506                         e = rb_entry(rb_first(&ubi->free),
507                                      struct ubi_wl_entry, rb);
508                         protect = ST_PROTECTION;
509                         break;
510                 default:
511                         protect = 0;
512                         e = NULL;
513                         BUG();
514         }
515
516         /*
517          * Move the physical eraseblock to the protection trees where it will
518          * be protected from being moved for some time.
519          */
520         paranoid_check_in_wl_tree(e, &ubi->free);
521         rb_erase(&e->rb, &ubi->free);
522         prot_tree_add(ubi, e, pe, protect);
523
524         dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
525         spin_unlock(&ubi->wl_lock);
526
527         return e->pnum;
528 }
529
530 /**
531  * prot_tree_del - remove a physical eraseblock from the protection trees
532  * @ubi: UBI device description object
533  * @pnum: the physical eraseblock to remove
534  *
535  * This function returns PEB @pnum from the protection trees and returns zero
536  * in case of success and %-ENODEV if the PEB was not found in the protection
537  * trees.
538  */
539 static int prot_tree_del(struct ubi_device *ubi, int pnum)
540 {
541         struct rb_node *p;
542         struct ubi_wl_prot_entry *pe = NULL;
543
544         p = ubi->prot.pnum.rb_node;
545         while (p) {
546
547                 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
548
549                 if (pnum == pe->e->pnum)
550                         goto found;
551
552                 if (pnum < pe->e->pnum)
553                         p = p->rb_left;
554                 else
555                         p = p->rb_right;
556         }
557
558         return -ENODEV;
559
560 found:
561         ubi_assert(pe->e->pnum == pnum);
562         rb_erase(&pe->rb_aec, &ubi->prot.aec);
563         rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
564         kfree(pe);
565         return 0;
566 }
567
568 /**
569  * sync_erase - synchronously erase a physical eraseblock.
570  * @ubi: UBI device description object
571  * @e: the the physical eraseblock to erase
572  * @torture: if the physical eraseblock has to be tortured
573  *
574  * This function returns zero in case of success and a negative error code in
575  * case of failure.
576  */
577 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
578 {
579         int err;
580         struct ubi_ec_hdr *ec_hdr;
581         unsigned long long ec = e->ec;
582
583         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
584
585         err = paranoid_check_ec(ubi, e->pnum, e->ec);
586         if (err > 0)
587                 return -EINVAL;
588
589         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
590         if (!ec_hdr)
591                 return -ENOMEM;
592
593         err = ubi_io_sync_erase(ubi, e->pnum, torture);
594         if (err < 0)
595                 goto out_free;
596
597         ec += err;
598         if (ec > UBI_MAX_ERASECOUNTER) {
599                 /*
600                  * Erase counter overflow. Upgrade UBI and use 64-bit
601                  * erase counters internally.
602                  */
603                 ubi_err("erase counter overflow at PEB %d, EC %llu",
604                         e->pnum, ec);
605                 err = -EINVAL;
606                 goto out_free;
607         }
608
609         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
610
611         ec_hdr->ec = cpu_to_be64(ec);
612
613         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
614         if (err)
615                 goto out_free;
616
617         e->ec = ec;
618         spin_lock(&ubi->wl_lock);
619         if (e->ec > ubi->max_ec)
620                 ubi->max_ec = e->ec;
621         spin_unlock(&ubi->wl_lock);
622
623 out_free:
624         kfree(ec_hdr);
625         return err;
626 }
627
628 /**
629  * check_protection_over - check if it is time to stop protecting some
630  * physical eraseblocks.
631  * @ubi: UBI device description object
632  *
633  * This function is called after each erase operation, when the absolute erase
634  * counter is incremented, to check if some physical eraseblock  have not to be
635  * protected any longer. These physical eraseblocks are moved from the
636  * protection trees to the used tree.
637  */
638 static void check_protection_over(struct ubi_device *ubi)
639 {
640         struct ubi_wl_prot_entry *pe;
641
642         /*
643          * There may be several protected physical eraseblock to remove,
644          * process them all.
645          */
646         while (1) {
647                 spin_lock(&ubi->wl_lock);
648                 if (!ubi->prot.aec.rb_node) {
649                         spin_unlock(&ubi->wl_lock);
650                         break;
651                 }
652
653                 pe = rb_entry(rb_first(&ubi->prot.aec),
654                               struct ubi_wl_prot_entry, rb_aec);
655
656                 if (pe->abs_ec > ubi->abs_ec) {
657                         spin_unlock(&ubi->wl_lock);
658                         break;
659                 }
660
661                 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
662                        pe->e->pnum, ubi->abs_ec, pe->abs_ec);
663                 rb_erase(&pe->rb_aec, &ubi->prot.aec);
664                 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
665                 wl_tree_add(pe->e, &ubi->used);
666                 spin_unlock(&ubi->wl_lock);
667
668                 kfree(pe);
669                 cond_resched();
670         }
671 }
672
673 /**
674  * schedule_ubi_work - schedule a work.
675  * @ubi: UBI device description object
676  * @wrk: the work to schedule
677  *
678  * This function enqueues a work defined by @wrk to the tail of the pending
679  * works list.
680  */
681 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
682 {
683         spin_lock(&ubi->wl_lock);
684         list_add_tail(&wrk->list, &ubi->works);
685         ubi_assert(ubi->works_count >= 0);
686         ubi->works_count += 1;
687
688         /*
689          * U-Boot special: We have no bgt_thread in U-Boot!
690          * So just call do_work() here directly.
691          */
692         do_work(ubi);
693
694         spin_unlock(&ubi->wl_lock);
695 }
696
697 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
698                         int cancel);
699
700 /**
701  * schedule_erase - schedule an erase work.
702  * @ubi: UBI device description object
703  * @e: the WL entry of the physical eraseblock to erase
704  * @torture: if the physical eraseblock has to be tortured
705  *
706  * This function returns zero in case of success and a %-ENOMEM in case of
707  * failure.
708  */
709 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
710                           int torture)
711 {
712         struct ubi_work *wl_wrk;
713
714         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
715                e->pnum, e->ec, torture);
716
717         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
718         if (!wl_wrk)
719                 return -ENOMEM;
720
721         wl_wrk->func = &erase_worker;
722         wl_wrk->e = e;
723         wl_wrk->torture = torture;
724
725         schedule_ubi_work(ubi, wl_wrk);
726         return 0;
727 }
728
729 /**
730  * wear_leveling_worker - wear-leveling worker function.
731  * @ubi: UBI device description object
732  * @wrk: the work object
733  * @cancel: non-zero if the worker has to free memory and exit
734  *
735  * This function copies a more worn out physical eraseblock to a less worn out
736  * one. Returns zero in case of success and a negative error code in case of
737  * failure.
738  */
739 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
740                                 int cancel)
741 {
742         int err, put = 0, scrubbing = 0, protect = 0;
743         struct ubi_wl_prot_entry *uninitialized_var(pe);
744         struct ubi_wl_entry *e1, *e2;
745         struct ubi_vid_hdr *vid_hdr;
746
747         kfree(wrk);
748
749         if (cancel)
750                 return 0;
751
752         vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
753         if (!vid_hdr)
754                 return -ENOMEM;
755
756         mutex_lock(&ubi->move_mutex);
757         spin_lock(&ubi->wl_lock);
758         ubi_assert(!ubi->move_from && !ubi->move_to);
759         ubi_assert(!ubi->move_to_put);
760
761         if (!ubi->free.rb_node ||
762             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
763                 /*
764                  * No free physical eraseblocks? Well, they must be waiting in
765                  * the queue to be erased. Cancel movement - it will be
766                  * triggered again when a free physical eraseblock appears.
767                  *
768                  * No used physical eraseblocks? They must be temporarily
769                  * protected from being moved. They will be moved to the
770                  * @ubi->used tree later and the wear-leveling will be
771                  * triggered again.
772                  */
773                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
774                        !ubi->free.rb_node, !ubi->used.rb_node);
775                 goto out_cancel;
776         }
777
778         if (!ubi->scrub.rb_node) {
779                 /*
780                  * Now pick the least worn-out used physical eraseblock and a
781                  * highly worn-out free physical eraseblock. If the erase
782                  * counters differ much enough, start wear-leveling.
783                  */
784                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
785                 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
786
787                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
788                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
789                                e1->ec, e2->ec);
790                         goto out_cancel;
791                 }
792                 paranoid_check_in_wl_tree(e1, &ubi->used);
793                 rb_erase(&e1->rb, &ubi->used);
794                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
795                        e1->pnum, e1->ec, e2->pnum, e2->ec);
796         } else {
797                 /* Perform scrubbing */
798                 scrubbing = 1;
799                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
800                 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
801                 paranoid_check_in_wl_tree(e1, &ubi->scrub);
802                 rb_erase(&e1->rb, &ubi->scrub);
803                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
804         }
805
806         paranoid_check_in_wl_tree(e2, &ubi->free);
807         rb_erase(&e2->rb, &ubi->free);
808         ubi->move_from = e1;
809         ubi->move_to = e2;
810         spin_unlock(&ubi->wl_lock);
811
812         /*
813          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
814          * We so far do not know which logical eraseblock our physical
815          * eraseblock (@e1) belongs to. We have to read the volume identifier
816          * header first.
817          *
818          * Note, we are protected from this PEB being unmapped and erased. The
819          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
820          * which is being moved was unmapped.
821          */
822
823         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
824         if (err && err != UBI_IO_BITFLIPS) {
825                 if (err == UBI_IO_PEB_FREE) {
826                         /*
827                          * We are trying to move PEB without a VID header. UBI
828                          * always write VID headers shortly after the PEB was
829                          * given, so we have a situation when it did not have
830                          * chance to write it down because it was preempted.
831                          * Just re-schedule the work, so that next time it will
832                          * likely have the VID header in place.
833                          */
834                         dbg_wl("PEB %d has no VID header", e1->pnum);
835                         goto out_not_moved;
836                 }
837
838                 ubi_err("error %d while reading VID header from PEB %d",
839                         err, e1->pnum);
840                 if (err > 0)
841                         err = -EIO;
842                 goto out_error;
843         }
844
845         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
846         if (err) {
847
848                 if (err < 0)
849                         goto out_error;
850                 if (err == 1)
851                         goto out_not_moved;
852
853                 /*
854                  * For some reason the LEB was not moved - it might be because
855                  * the volume is being deleted. We should prevent this PEB from
856                  * being selected for wear-levelling movement for some "time",
857                  * so put it to the protection tree.
858                  */
859
860                 dbg_wl("cancelled moving PEB %d", e1->pnum);
861                 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
862                 if (!pe) {
863                         err = -ENOMEM;
864                         goto out_error;
865                 }
866
867                 protect = 1;
868         }
869
870         ubi_free_vid_hdr(ubi, vid_hdr);
871         spin_lock(&ubi->wl_lock);
872         if (protect)
873                 prot_tree_add(ubi, e1, pe, protect);
874         if (!ubi->move_to_put)
875                 wl_tree_add(e2, &ubi->used);
876         else
877                 put = 1;
878         ubi->move_from = ubi->move_to = NULL;
879         ubi->move_to_put = ubi->wl_scheduled = 0;
880         spin_unlock(&ubi->wl_lock);
881
882         if (put) {
883                 /*
884                  * Well, the target PEB was put meanwhile, schedule it for
885                  * erasure.
886                  */
887                 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
888                 err = schedule_erase(ubi, e2, 0);
889                 if (err)
890                         goto out_error;
891         }
892
893         if (!protect) {
894                 err = schedule_erase(ubi, e1, 0);
895                 if (err)
896                         goto out_error;
897         }
898
899
900         dbg_wl("done");
901         mutex_unlock(&ubi->move_mutex);
902         return 0;
903
904         /*
905          * For some reasons the LEB was not moved, might be an error, might be
906          * something else. @e1 was not changed, so return it back. @e2 might
907          * be changed, schedule it for erasure.
908          */
909 out_not_moved:
910         ubi_free_vid_hdr(ubi, vid_hdr);
911         spin_lock(&ubi->wl_lock);
912         if (scrubbing)
913                 wl_tree_add(e1, &ubi->scrub);
914         else
915                 wl_tree_add(e1, &ubi->used);
916         ubi->move_from = ubi->move_to = NULL;
917         ubi->move_to_put = ubi->wl_scheduled = 0;
918         spin_unlock(&ubi->wl_lock);
919
920         err = schedule_erase(ubi, e2, 0);
921         if (err)
922                 goto out_error;
923
924         mutex_unlock(&ubi->move_mutex);
925         return 0;
926
927 out_error:
928         ubi_err("error %d while moving PEB %d to PEB %d",
929                 err, e1->pnum, e2->pnum);
930
931         ubi_free_vid_hdr(ubi, vid_hdr);
932         spin_lock(&ubi->wl_lock);
933         ubi->move_from = ubi->move_to = NULL;
934         ubi->move_to_put = ubi->wl_scheduled = 0;
935         spin_unlock(&ubi->wl_lock);
936
937         kmem_cache_free(ubi_wl_entry_slab, e1);
938         kmem_cache_free(ubi_wl_entry_slab, e2);
939         ubi_ro_mode(ubi);
940
941         mutex_unlock(&ubi->move_mutex);
942         return err;
943
944 out_cancel:
945         ubi->wl_scheduled = 0;
946         spin_unlock(&ubi->wl_lock);
947         mutex_unlock(&ubi->move_mutex);
948         ubi_free_vid_hdr(ubi, vid_hdr);
949         return 0;
950 }
951
952 /**
953  * ensure_wear_leveling - schedule wear-leveling if it is needed.
954  * @ubi: UBI device description object
955  *
956  * This function checks if it is time to start wear-leveling and schedules it
957  * if yes. This function returns zero in case of success and a negative error
958  * code in case of failure.
959  */
960 static int ensure_wear_leveling(struct ubi_device *ubi)
961 {
962         int err = 0;
963         struct ubi_wl_entry *e1;
964         struct ubi_wl_entry *e2;
965         struct ubi_work *wrk;
966
967         spin_lock(&ubi->wl_lock);
968         if (ubi->wl_scheduled)
969                 /* Wear-leveling is already in the work queue */
970                 goto out_unlock;
971
972         /*
973          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
974          * the WL worker has to be scheduled anyway.
975          */
976         if (!ubi->scrub.rb_node) {
977                 if (!ubi->used.rb_node || !ubi->free.rb_node)
978                         /* No physical eraseblocks - no deal */
979                         goto out_unlock;
980
981                 /*
982                  * We schedule wear-leveling only if the difference between the
983                  * lowest erase counter of used physical eraseblocks and a high
984                  * erase counter of free physical eraseblocks is greater then
985                  * %UBI_WL_THRESHOLD.
986                  */
987                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
988                 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
989
990                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
991                         goto out_unlock;
992                 dbg_wl("schedule wear-leveling");
993         } else
994                 dbg_wl("schedule scrubbing");
995
996         ubi->wl_scheduled = 1;
997         spin_unlock(&ubi->wl_lock);
998
999         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1000         if (!wrk) {
1001                 err = -ENOMEM;
1002                 goto out_cancel;
1003         }
1004
1005         wrk->func = &wear_leveling_worker;
1006         schedule_ubi_work(ubi, wrk);
1007         return err;
1008
1009 out_cancel:
1010         spin_lock(&ubi->wl_lock);
1011         ubi->wl_scheduled = 0;
1012 out_unlock:
1013         spin_unlock(&ubi->wl_lock);
1014         return err;
1015 }
1016
1017 /**
1018  * erase_worker - physical eraseblock erase worker function.
1019  * @ubi: UBI device description object
1020  * @wl_wrk: the work object
1021  * @cancel: non-zero if the worker has to free memory and exit
1022  *
1023  * This function erases a physical eraseblock and perform torture testing if
1024  * needed. It also takes care about marking the physical eraseblock bad if
1025  * needed. Returns zero in case of success and a negative error code in case of
1026  * failure.
1027  */
1028 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1029                         int cancel)
1030 {
1031         struct ubi_wl_entry *e = wl_wrk->e;
1032         int pnum = e->pnum, err, need;
1033
1034         if (cancel) {
1035                 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1036                 kfree(wl_wrk);
1037                 kmem_cache_free(ubi_wl_entry_slab, e);
1038                 return 0;
1039         }
1040
1041         dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1042
1043         err = sync_erase(ubi, e, wl_wrk->torture);
1044         if (!err) {
1045                 /* Fine, we've erased it successfully */
1046                 kfree(wl_wrk);
1047
1048                 spin_lock(&ubi->wl_lock);
1049                 ubi->abs_ec += 1;
1050                 wl_tree_add(e, &ubi->free);
1051                 spin_unlock(&ubi->wl_lock);
1052
1053                 /*
1054                  * One more erase operation has happened, take care about protected
1055                  * physical eraseblocks.
1056                  */
1057                 check_protection_over(ubi);
1058
1059                 /* And take care about wear-leveling */
1060                 err = ensure_wear_leveling(ubi);
1061                 return err;
1062         }
1063
1064         ubi_err("failed to erase PEB %d, error %d", pnum, err);
1065         kfree(wl_wrk);
1066         kmem_cache_free(ubi_wl_entry_slab, e);
1067
1068         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1069             err == -EBUSY) {
1070                 int err1;
1071
1072                 /* Re-schedule the LEB for erasure */
1073                 err1 = schedule_erase(ubi, e, 0);
1074                 if (err1) {
1075                         err = err1;
1076                         goto out_ro;
1077                 }
1078                 return err;
1079         } else if (err != -EIO) {
1080                 /*
1081                  * If this is not %-EIO, we have no idea what to do. Scheduling
1082                  * this physical eraseblock for erasure again would cause
1083                  * errors again and again. Well, lets switch to RO mode.
1084                  */
1085                 goto out_ro;
1086         }
1087
1088         /* It is %-EIO, the PEB went bad */
1089
1090         if (!ubi->bad_allowed) {
1091                 ubi_err("bad physical eraseblock %d detected", pnum);
1092                 goto out_ro;
1093         }
1094
1095         spin_lock(&ubi->volumes_lock);
1096         need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1097         if (need > 0) {
1098                 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1099                 ubi->avail_pebs -= need;
1100                 ubi->rsvd_pebs += need;
1101                 ubi->beb_rsvd_pebs += need;
1102                 if (need > 0)
1103                         ubi_msg("reserve more %d PEBs", need);
1104         }
1105
1106         if (ubi->beb_rsvd_pebs == 0) {
1107                 spin_unlock(&ubi->volumes_lock);
1108                 ubi_err("no reserved physical eraseblocks");
1109                 goto out_ro;
1110         }
1111
1112         spin_unlock(&ubi->volumes_lock);
1113         ubi_msg("mark PEB %d as bad", pnum);
1114
1115         err = ubi_io_mark_bad(ubi, pnum);
1116         if (err)
1117                 goto out_ro;
1118
1119         spin_lock(&ubi->volumes_lock);
1120         ubi->beb_rsvd_pebs -= 1;
1121         ubi->bad_peb_count += 1;
1122         ubi->good_peb_count -= 1;
1123         ubi_calculate_reserved(ubi);
1124         if (ubi->beb_rsvd_pebs == 0)
1125                 ubi_warn("last PEB from the reserved pool was used");
1126         spin_unlock(&ubi->volumes_lock);
1127
1128         return err;
1129
1130 out_ro:
1131         ubi_ro_mode(ubi);
1132         return err;
1133 }
1134
1135 /**
1136  * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
1137  * @ubi: UBI device description object
1138  * @pnum: physical eraseblock to return
1139  * @torture: if this physical eraseblock has to be tortured
1140  *
1141  * This function is called to return physical eraseblock @pnum to the pool of
1142  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1143  * occurred to this @pnum and it has to be tested. This function returns zero
1144  * in case of success, and a negative error code in case of failure.
1145  */
1146 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1147 {
1148         int err;
1149         struct ubi_wl_entry *e;
1150
1151         dbg_wl("PEB %d", pnum);
1152         ubi_assert(pnum >= 0);
1153         ubi_assert(pnum < ubi->peb_count);
1154
1155 retry:
1156         spin_lock(&ubi->wl_lock);
1157         e = ubi->lookuptbl[pnum];
1158         if (e == ubi->move_from) {
1159                 /*
1160                  * User is putting the physical eraseblock which was selected to
1161                  * be moved. It will be scheduled for erasure in the
1162                  * wear-leveling worker.
1163                  */
1164                 dbg_wl("PEB %d is being moved, wait", pnum);
1165                 spin_unlock(&ubi->wl_lock);
1166
1167                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1168                 mutex_lock(&ubi->move_mutex);
1169                 mutex_unlock(&ubi->move_mutex);
1170                 goto retry;
1171         } else if (e == ubi->move_to) {
1172                 /*
1173                  * User is putting the physical eraseblock which was selected
1174                  * as the target the data is moved to. It may happen if the EBA
1175                  * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
1176                  * the WL unit has not put the PEB to the "used" tree yet, but
1177                  * it is about to do this. So we just set a flag which will
1178                  * tell the WL worker that the PEB is not needed anymore and
1179                  * should be scheduled for erasure.
1180                  */
1181                 dbg_wl("PEB %d is the target of data moving", pnum);
1182                 ubi_assert(!ubi->move_to_put);
1183                 ubi->move_to_put = 1;
1184                 spin_unlock(&ubi->wl_lock);
1185                 return 0;
1186         } else {
1187                 if (in_wl_tree(e, &ubi->used)) {
1188                         paranoid_check_in_wl_tree(e, &ubi->used);
1189                         rb_erase(&e->rb, &ubi->used);
1190                 } else if (in_wl_tree(e, &ubi->scrub)) {
1191                         paranoid_check_in_wl_tree(e, &ubi->scrub);
1192                         rb_erase(&e->rb, &ubi->scrub);
1193                 } else {
1194                         err = prot_tree_del(ubi, e->pnum);
1195                         if (err) {
1196                                 ubi_err("PEB %d not found", pnum);
1197                                 ubi_ro_mode(ubi);
1198                                 spin_unlock(&ubi->wl_lock);
1199                                 return err;
1200                         }
1201                 }
1202         }
1203         spin_unlock(&ubi->wl_lock);
1204
1205         err = schedule_erase(ubi, e, torture);
1206         if (err) {
1207                 spin_lock(&ubi->wl_lock);
1208                 wl_tree_add(e, &ubi->used);
1209                 spin_unlock(&ubi->wl_lock);
1210         }
1211
1212         return err;
1213 }
1214
1215 /**
1216  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1217  * @ubi: UBI device description object
1218  * @pnum: the physical eraseblock to schedule
1219  *
1220  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1221  * needs scrubbing. This function schedules a physical eraseblock for
1222  * scrubbing which is done in background. This function returns zero in case of
1223  * success and a negative error code in case of failure.
1224  */
1225 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1226 {
1227         struct ubi_wl_entry *e;
1228
1229         ubi_msg("schedule PEB %d for scrubbing", pnum);
1230
1231 retry:
1232         spin_lock(&ubi->wl_lock);
1233         e = ubi->lookuptbl[pnum];
1234         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1235                 spin_unlock(&ubi->wl_lock);
1236                 return 0;
1237         }
1238
1239         if (e == ubi->move_to) {
1240                 /*
1241                  * This physical eraseblock was used to move data to. The data
1242                  * was moved but the PEB was not yet inserted to the proper
1243                  * tree. We should just wait a little and let the WL worker
1244                  * proceed.
1245                  */
1246                 spin_unlock(&ubi->wl_lock);
1247                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1248                 yield();
1249                 goto retry;
1250         }
1251
1252         if (in_wl_tree(e, &ubi->used)) {
1253                 paranoid_check_in_wl_tree(e, &ubi->used);
1254                 rb_erase(&e->rb, &ubi->used);
1255         } else {
1256                 int err;
1257
1258                 err = prot_tree_del(ubi, e->pnum);
1259                 if (err) {
1260                         ubi_err("PEB %d not found", pnum);
1261                         ubi_ro_mode(ubi);
1262                         spin_unlock(&ubi->wl_lock);
1263                         return err;
1264                 }
1265         }
1266
1267         wl_tree_add(e, &ubi->scrub);
1268         spin_unlock(&ubi->wl_lock);
1269
1270         /*
1271          * Technically scrubbing is the same as wear-leveling, so it is done
1272          * by the WL worker.
1273          */
1274         return ensure_wear_leveling(ubi);
1275 }
1276
1277 /**
1278  * ubi_wl_flush - flush all pending works.
1279  * @ubi: UBI device description object
1280  *
1281  * This function returns zero in case of success and a negative error code in
1282  * case of failure.
1283  */
1284 int ubi_wl_flush(struct ubi_device *ubi)
1285 {
1286         int err;
1287
1288         /*
1289          * Erase while the pending works queue is not empty, but not more then
1290          * the number of currently pending works.
1291          */
1292         dbg_wl("flush (%d pending works)", ubi->works_count);
1293         while (ubi->works_count) {
1294                 err = do_work(ubi);
1295                 if (err)
1296                         return err;
1297         }
1298
1299         /*
1300          * Make sure all the works which have been done in parallel are
1301          * finished.
1302          */
1303         down_write(&ubi->work_sem);
1304         up_write(&ubi->work_sem);
1305
1306         /*
1307          * And in case last was the WL worker and it cancelled the LEB
1308          * movement, flush again.
1309          */
1310         while (ubi->works_count) {
1311                 dbg_wl("flush more (%d pending works)", ubi->works_count);
1312                 err = do_work(ubi);
1313                 if (err)
1314                         return err;
1315         }
1316
1317         return 0;
1318 }
1319
1320 /**
1321  * tree_destroy - destroy an RB-tree.
1322  * @root: the root of the tree to destroy
1323  */
1324 static void tree_destroy(struct rb_root *root)
1325 {
1326         struct rb_node *rb;
1327         struct ubi_wl_entry *e;
1328
1329         rb = root->rb_node;
1330         while (rb) {
1331                 if (rb->rb_left)
1332                         rb = rb->rb_left;
1333                 else if (rb->rb_right)
1334                         rb = rb->rb_right;
1335                 else {
1336                         e = rb_entry(rb, struct ubi_wl_entry, rb);
1337
1338                         rb = rb_parent(rb);
1339                         if (rb) {
1340                                 if (rb->rb_left == &e->rb)
1341                                         rb->rb_left = NULL;
1342                                 else
1343                                         rb->rb_right = NULL;
1344                         }
1345
1346                         kmem_cache_free(ubi_wl_entry_slab, e);
1347                 }
1348         }
1349 }
1350
1351 /**
1352  * ubi_thread - UBI background thread.
1353  * @u: the UBI device description object pointer
1354  */
1355 int ubi_thread(void *u)
1356 {
1357         int failures = 0;
1358         struct ubi_device *ubi = u;
1359
1360         ubi_msg("background thread \"%s\" started, PID %d",
1361                 ubi->bgt_name, task_pid_nr(current));
1362
1363         set_freezable();
1364         for (;;) {
1365                 int err;
1366
1367                 if (kthread_should_stop())
1368                         break;
1369
1370                 if (try_to_freeze())
1371                         continue;
1372
1373                 spin_lock(&ubi->wl_lock);
1374                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1375                                !ubi->thread_enabled) {
1376                         set_current_state(TASK_INTERRUPTIBLE);
1377                         spin_unlock(&ubi->wl_lock);
1378                         schedule();
1379                         continue;
1380                 }
1381                 spin_unlock(&ubi->wl_lock);
1382
1383                 err = do_work(ubi);
1384                 if (err) {
1385                         ubi_err("%s: work failed with error code %d",
1386                                 ubi->bgt_name, err);
1387                         if (failures++ > WL_MAX_FAILURES) {
1388                                 /*
1389                                  * Too many failures, disable the thread and
1390                                  * switch to read-only mode.
1391                                  */
1392                                 ubi_msg("%s: %d consecutive failures",
1393                                         ubi->bgt_name, WL_MAX_FAILURES);
1394                                 ubi_ro_mode(ubi);
1395                                 break;
1396                         }
1397                 } else
1398                         failures = 0;
1399
1400                 cond_resched();
1401         }
1402
1403         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1404         return 0;
1405 }
1406
1407 /**
1408  * cancel_pending - cancel all pending works.
1409  * @ubi: UBI device description object
1410  */
1411 static void cancel_pending(struct ubi_device *ubi)
1412 {
1413         while (!list_empty(&ubi->works)) {
1414                 struct ubi_work *wrk;
1415
1416                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1417                 list_del(&wrk->list);
1418                 wrk->func(ubi, wrk, 1);
1419                 ubi->works_count -= 1;
1420                 ubi_assert(ubi->works_count >= 0);
1421         }
1422 }
1423
1424 /**
1425  * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
1426  * information.
1427  * @ubi: UBI device description object
1428  * @si: scanning information
1429  *
1430  * This function returns zero in case of success, and a negative error code in
1431  * case of failure.
1432  */
1433 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1434 {
1435         int err;
1436         struct rb_node *rb1, *rb2;
1437         struct ubi_scan_volume *sv;
1438         struct ubi_scan_leb *seb, *tmp;
1439         struct ubi_wl_entry *e;
1440
1441
1442         ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1443         ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1444         spin_lock_init(&ubi->wl_lock);
1445         mutex_init(&ubi->move_mutex);
1446         init_rwsem(&ubi->work_sem);
1447         ubi->max_ec = si->max_ec;
1448         INIT_LIST_HEAD(&ubi->works);
1449
1450         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1451
1452         err = -ENOMEM;
1453         ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1454         if (!ubi->lookuptbl)
1455                 return err;
1456
1457         list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1458                 cond_resched();
1459
1460                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1461                 if (!e)
1462                         goto out_free;
1463
1464                 e->pnum = seb->pnum;
1465                 e->ec = seb->ec;
1466                 ubi->lookuptbl[e->pnum] = e;
1467                 if (schedule_erase(ubi, e, 0)) {
1468                         kmem_cache_free(ubi_wl_entry_slab, e);
1469                         goto out_free;
1470                 }
1471         }
1472
1473         list_for_each_entry(seb, &si->free, u.list) {
1474                 cond_resched();
1475
1476                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1477                 if (!e)
1478                         goto out_free;
1479
1480                 e->pnum = seb->pnum;
1481                 e->ec = seb->ec;
1482                 ubi_assert(e->ec >= 0);
1483                 wl_tree_add(e, &ubi->free);
1484                 ubi->lookuptbl[e->pnum] = e;
1485         }
1486
1487         list_for_each_entry(seb, &si->corr, u.list) {
1488                 cond_resched();
1489
1490                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1491                 if (!e)
1492                         goto out_free;
1493
1494                 e->pnum = seb->pnum;
1495                 e->ec = seb->ec;
1496                 ubi->lookuptbl[e->pnum] = e;
1497                 if (schedule_erase(ubi, e, 0)) {
1498                         kmem_cache_free(ubi_wl_entry_slab, e);
1499                         goto out_free;
1500                 }
1501         }
1502
1503         ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1504                 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1505                         cond_resched();
1506
1507                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1508                         if (!e)
1509                                 goto out_free;
1510
1511                         e->pnum = seb->pnum;
1512                         e->ec = seb->ec;
1513                         ubi->lookuptbl[e->pnum] = e;
1514                         if (!seb->scrub) {
1515                                 dbg_wl("add PEB %d EC %d to the used tree",
1516                                        e->pnum, e->ec);
1517                                 wl_tree_add(e, &ubi->used);
1518                         } else {
1519                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1520                                        e->pnum, e->ec);
1521                                 wl_tree_add(e, &ubi->scrub);
1522                         }
1523                 }
1524         }
1525
1526         if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1527                 ubi_err("no enough physical eraseblocks (%d, need %d)",
1528                         ubi->avail_pebs, WL_RESERVED_PEBS);
1529                 err = -ENOSPC;
1530                 goto out_free;
1531         }
1532         ubi->avail_pebs -= WL_RESERVED_PEBS;
1533         ubi->rsvd_pebs += WL_RESERVED_PEBS;
1534
1535         /* Schedule wear-leveling if needed */
1536         err = ensure_wear_leveling(ubi);
1537         if (err)
1538                 goto out_free;
1539
1540         return 0;
1541
1542 out_free:
1543         cancel_pending(ubi);
1544         tree_destroy(&ubi->used);
1545         tree_destroy(&ubi->free);
1546         tree_destroy(&ubi->scrub);
1547         kfree(ubi->lookuptbl);
1548         return err;
1549 }
1550
1551 /**
1552  * protection_trees_destroy - destroy the protection RB-trees.
1553  * @ubi: UBI device description object
1554  */
1555 static void protection_trees_destroy(struct ubi_device *ubi)
1556 {
1557         struct rb_node *rb;
1558         struct ubi_wl_prot_entry *pe;
1559
1560         rb = ubi->prot.aec.rb_node;
1561         while (rb) {
1562                 if (rb->rb_left)
1563                         rb = rb->rb_left;
1564                 else if (rb->rb_right)
1565                         rb = rb->rb_right;
1566                 else {
1567                         pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1568
1569                         rb = rb_parent(rb);
1570                         if (rb) {
1571                                 if (rb->rb_left == &pe->rb_aec)
1572                                         rb->rb_left = NULL;
1573                                 else
1574                                         rb->rb_right = NULL;
1575                         }
1576
1577                         kmem_cache_free(ubi_wl_entry_slab, pe->e);
1578                         kfree(pe);
1579                 }
1580         }
1581 }
1582
1583 /**
1584  * ubi_wl_close - close the wear-leveling unit.
1585  * @ubi: UBI device description object
1586  */
1587 void ubi_wl_close(struct ubi_device *ubi)
1588 {
1589         dbg_wl("close the UBI wear-leveling unit");
1590
1591         cancel_pending(ubi);
1592         protection_trees_destroy(ubi);
1593         tree_destroy(&ubi->used);
1594         tree_destroy(&ubi->free);
1595         tree_destroy(&ubi->scrub);
1596         kfree(ubi->lookuptbl);
1597 }
1598
1599 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1600
1601 /**
1602  * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
1603  * is correct.
1604  * @ubi: UBI device description object
1605  * @pnum: the physical eraseblock number to check
1606  * @ec: the erase counter to check
1607  *
1608  * This function returns zero if the erase counter of physical eraseblock @pnum
1609  * is equivalent to @ec, %1 if not, and a negative error code if an error
1610  * occurred.
1611  */
1612 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1613 {
1614         int err;
1615         long long read_ec;
1616         struct ubi_ec_hdr *ec_hdr;
1617
1618         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1619         if (!ec_hdr)
1620                 return -ENOMEM;
1621
1622         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1623         if (err && err != UBI_IO_BITFLIPS) {
1624                 /* The header does not have to exist */
1625                 err = 0;
1626                 goto out_free;
1627         }
1628
1629         read_ec = be64_to_cpu(ec_hdr->ec);
1630         if (ec != read_ec) {
1631                 ubi_err("paranoid check failed for PEB %d", pnum);
1632                 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1633                 ubi_dbg_dump_stack();
1634                 err = 1;
1635         } else
1636                 err = 0;
1637
1638 out_free:
1639         kfree(ec_hdr);
1640         return err;
1641 }
1642
1643 /**
1644  * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
1645  * in a WL RB-tree.
1646  * @e: the wear-leveling entry to check
1647  * @root: the root of the tree
1648  *
1649  * This function returns zero if @e is in the @root RB-tree and %1 if it
1650  * is not.
1651  */
1652 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1653                                      struct rb_root *root)
1654 {
1655         if (in_wl_tree(e, root))
1656                 return 0;
1657
1658         ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1659                 e->pnum, e->ec, root);
1660         ubi_dbg_dump_stack();
1661         return 1;
1662 }
1663
1664 #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */