]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mtd/ubi/fastmap-wl.c
067aa9042ec11ff113e3e51693cbc056cb8685cd
[karo-tx-linux.git] / drivers / mtd / ubi / fastmap-wl.c
1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Copyright (c) 2014 sigma star gmbh
4  * Author: Richard Weinberger <richard@nod.at>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13  * the GNU General Public License for more details.
14  *
15  */
16
17 /**
18  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
19  * @wrk: the work description object
20  */
21 static void update_fastmap_work_fn(struct work_struct *wrk)
22 {
23         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
24
25         ubi_update_fastmap(ubi);
26         spin_lock(&ubi->wl_lock);
27         ubi->fm_work_scheduled = 0;
28         spin_unlock(&ubi->wl_lock);
29 }
30
31 /**
32  *  is_fm_block - returns 1 if a PEB is currently used in a fastmap.
33  *  @ubi: UBI device description object
34  *  @pnum: the to be checked PEB
35  */
36 static int is_fm_block(struct ubi_device *ubi, int pnum)
37 {
38         int i;
39
40         if (!ubi->fm)
41                 return 0;
42
43         for (i = 0; i < ubi->fm->used_blocks; i++)
44                 if (ubi->fm->e[i]->pnum == pnum)
45                         return 1;
46
47         return 0;
48 }
49
50 /**
51  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
52  * @root: the RB-tree where to look for
53  */
54 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
55 {
56         struct rb_node *p;
57         struct ubi_wl_entry *e, *victim = NULL;
58         int max_ec = UBI_MAX_ERASECOUNTER;
59
60         ubi_rb_for_each_entry(p, e, root, u.rb) {
61                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
62                         victim = e;
63                         max_ec = e->ec;
64                 }
65         }
66
67         return victim;
68 }
69
70 /**
71  * return_unused_pool_pebs - returns unused PEB to the free tree.
72  * @ubi: UBI device description object
73  * @pool: fastmap pool description object
74  */
75 static void return_unused_pool_pebs(struct ubi_device *ubi,
76                                     struct ubi_fm_pool *pool)
77 {
78         int i;
79         struct ubi_wl_entry *e;
80
81         for (i = pool->used; i < pool->size; i++) {
82                 e = ubi->lookuptbl[pool->pebs[i]];
83                 wl_tree_add(e, &ubi->free);
84                 ubi->free_count++;
85         }
86 }
87
88 static int anchor_pebs_avalible(struct rb_root *root)
89 {
90         struct rb_node *p;
91         struct ubi_wl_entry *e;
92
93         ubi_rb_for_each_entry(p, e, root, u.rb)
94                 if (e->pnum < UBI_FM_MAX_START)
95                         return 1;
96
97         return 0;
98 }
99
100 /**
101  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
102  * @ubi: UBI device description object
103  * @anchor: This PEB will be used as anchor PEB by fastmap
104  *
105  * The function returns a physical erase block with a given maximal number
106  * and removes it from the wl subsystem.
107  * Must be called with wl_lock held!
108  */
109 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
110 {
111         struct ubi_wl_entry *e = NULL;
112
113         if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
114                 goto out;
115
116         if (anchor)
117                 e = find_anchor_wl_entry(&ubi->free);
118         else
119                 e = find_mean_wl_entry(ubi, &ubi->free);
120
121         if (!e)
122                 goto out;
123
124         self_check_in_wl_tree(ubi, e, &ubi->free);
125
126         /* remove it from the free list,
127          * the wl subsystem does no longer know this erase block */
128         rb_erase(&e->u.rb, &ubi->free);
129         ubi->free_count--;
130 out:
131         return e;
132 }
133
134 /**
135  * ubi_refill_pools - refills all fastmap PEB pools.
136  * @ubi: UBI device description object
137  */
138 void ubi_refill_pools(struct ubi_device *ubi)
139 {
140         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
141         struct ubi_fm_pool *pool = &ubi->fm_pool;
142         struct ubi_wl_entry *e;
143         int enough;
144
145         spin_lock(&ubi->wl_lock);
146
147         return_unused_pool_pebs(ubi, wl_pool);
148         return_unused_pool_pebs(ubi, pool);
149
150         wl_pool->size = 0;
151         pool->size = 0;
152
153         for (;;) {
154                 enough = 0;
155                 if (pool->size < pool->max_size) {
156                         if (!ubi->free.rb_node)
157                                 break;
158
159                         e = wl_get_wle(ubi);
160                         if (!e)
161                                 break;
162
163                         pool->pebs[pool->size] = e->pnum;
164                         pool->size++;
165                 } else
166                         enough++;
167
168                 if (wl_pool->size < wl_pool->max_size) {
169                         if (!ubi->free.rb_node ||
170                            (ubi->free_count - ubi->beb_rsvd_pebs < 5))
171                                 break;
172
173                         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
174                         self_check_in_wl_tree(ubi, e, &ubi->free);
175                         rb_erase(&e->u.rb, &ubi->free);
176                         ubi->free_count--;
177
178                         wl_pool->pebs[wl_pool->size] = e->pnum;
179                         wl_pool->size++;
180                 } else
181                         enough++;
182
183                 if (enough == 2)
184                         break;
185         }
186
187         wl_pool->used = 0;
188         pool->used = 0;
189
190         spin_unlock(&ubi->wl_lock);
191 }
192
193 /**
194  * ubi_wl_get_peb - get a physical eraseblock.
195  * @ubi: UBI device description object
196  *
197  * This function returns a physical eraseblock in case of success and a
198  * negative error code in case of failure.
199  * Returns with ubi->fm_eba_sem held in read mode!
200  */
201 int ubi_wl_get_peb(struct ubi_device *ubi)
202 {
203         int ret, retried = 0;
204         struct ubi_fm_pool *pool = &ubi->fm_pool;
205         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
206
207 again:
208         down_read(&ubi->fm_eba_sem);
209         spin_lock(&ubi->wl_lock);
210
211         /* We check here also for the WL pool because at this point we can
212          * refill the WL pool synchronous. */
213         if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
214                 spin_unlock(&ubi->wl_lock);
215                 up_read(&ubi->fm_eba_sem);
216                 ret = ubi_update_fastmap(ubi);
217                 if (ret) {
218                         ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
219                         down_read(&ubi->fm_eba_sem);
220                         return -ENOSPC;
221                 }
222                 down_read(&ubi->fm_eba_sem);
223                 spin_lock(&ubi->wl_lock);
224         }
225
226         if (pool->used == pool->size) {
227                 spin_unlock(&ubi->wl_lock);
228                 if (retried) {
229                         ubi_err(ubi, "Unable to get a free PEB from user WL pool");
230                         ret = -ENOSPC;
231                         goto out;
232                 }
233                 retried = 1;
234                 up_read(&ubi->fm_eba_sem);
235                 goto again;
236         }
237
238         ubi_assert(pool->used < pool->size);
239         ret = pool->pebs[pool->used++];
240         prot_queue_add(ubi, ubi->lookuptbl[ret]);
241         spin_unlock(&ubi->wl_lock);
242 out:
243         return ret;
244 }
245
246 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
247  *
248  * @ubi: UBI device description object
249  */
250 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
251 {
252         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
253         int pnum;
254
255         if (pool->used == pool->size) {
256                 /* We cannot update the fastmap here because this
257                  * function is called in atomic context.
258                  * Let's fail here and refill/update it as soon as possible. */
259                 if (!ubi->fm_work_scheduled) {
260                         ubi->fm_work_scheduled = 1;
261                         schedule_work(&ubi->fm_work);
262                 }
263                 return NULL;
264         }
265
266         pnum = pool->pebs[pool->used++];
267         return ubi->lookuptbl[pnum];
268 }
269
270 /**
271  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
272  * @ubi: UBI device description object
273  */
274 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
275 {
276         struct ubi_work *wrk;
277
278         spin_lock(&ubi->wl_lock);
279         if (ubi->wl_scheduled) {
280                 spin_unlock(&ubi->wl_lock);
281                 return 0;
282         }
283         ubi->wl_scheduled = 1;
284         spin_unlock(&ubi->wl_lock);
285
286         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
287         if (!wrk) {
288                 spin_lock(&ubi->wl_lock);
289                 ubi->wl_scheduled = 0;
290                 spin_unlock(&ubi->wl_lock);
291                 return -ENOMEM;
292         }
293
294         wrk->anchor = 1;
295         wrk->func = &wear_leveling_worker;
296         schedule_ubi_work(ubi, wrk);
297         return 0;
298 }
299
300 /**
301  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
302  * sub-system.
303  * see: ubi_wl_put_peb()
304  *
305  * @ubi: UBI device description object
306  * @fm_e: physical eraseblock to return
307  * @lnum: the last used logical eraseblock number for the PEB
308  * @torture: if this physical eraseblock has to be tortured
309  */
310 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
311                       int lnum, int torture)
312 {
313         struct ubi_wl_entry *e;
314         int vol_id, pnum = fm_e->pnum;
315
316         dbg_wl("PEB %d", pnum);
317
318         ubi_assert(pnum >= 0);
319         ubi_assert(pnum < ubi->peb_count);
320
321         spin_lock(&ubi->wl_lock);
322         e = ubi->lookuptbl[pnum];
323
324         /* This can happen if we recovered from a fastmap the very
325          * first time and writing now a new one. In this case the wl system
326          * has never seen any PEB used by the original fastmap.
327          */
328         if (!e) {
329                 e = fm_e;
330                 ubi_assert(e->ec >= 0);
331                 ubi->lookuptbl[pnum] = e;
332         }
333
334         spin_unlock(&ubi->wl_lock);
335
336         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
337         return schedule_erase(ubi, e, vol_id, lnum, torture);
338 }
339
340 /**
341  * ubi_is_erase_work - checks whether a work is erase work.
342  * @wrk: The work object to be checked
343  */
344 int ubi_is_erase_work(struct ubi_work *wrk)
345 {
346         return wrk->func == erase_worker;
347 }
348
349 static void ubi_fastmap_close(struct ubi_device *ubi)
350 {
351         int i;
352
353         flush_work(&ubi->fm_work);
354         return_unused_pool_pebs(ubi, &ubi->fm_pool);
355         return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
356
357         if (ubi->fm) {
358                 for (i = 0; i < ubi->fm->used_blocks; i++)
359                         kfree(ubi->fm->e[i]);
360         }
361         kfree(ubi->fm);
362 }
363
364 /**
365  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
366  * See find_mean_wl_entry()
367  *
368  * @ubi: UBI device description object
369  * @e: physical eraseblock to return
370  * @root: RB tree to test against.
371  */
372 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
373                                            struct ubi_wl_entry *e,
374                                            struct rb_root *root) {
375         if (e && !ubi->fm_disabled && !ubi->fm &&
376             e->pnum < UBI_FM_MAX_START)
377                 e = rb_entry(rb_next(root->rb_node),
378                              struct ubi_wl_entry, u.rb);
379
380         return e;
381 }