1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
7
8 /**
9 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
10 * @wrk: the work description object
11 */
update_fastmap_work_fn(struct work_struct * wrk)12 static void update_fastmap_work_fn(struct work_struct *wrk)
13 {
14 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
15
16 ubi_update_fastmap(ubi);
17 spin_lock(&ubi->wl_lock);
18 ubi->fm_work_scheduled = 0;
19 spin_unlock(&ubi->wl_lock);
20 }
21
22 /**
23 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
24 * @root: the RB-tree where to look for
25 */
find_anchor_wl_entry(struct rb_root * root)26 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
27 {
28 struct rb_node *p;
29 struct ubi_wl_entry *e, *victim = NULL;
30 int max_ec = UBI_MAX_ERASECOUNTER;
31
32 ubi_rb_for_each_entry(p, e, root, u.rb) {
33 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
34 victim = e;
35 max_ec = e->ec;
36 }
37 }
38
39 return victim;
40 }
41
return_unused_peb(struct ubi_device * ubi,struct ubi_wl_entry * e)42 static inline void return_unused_peb(struct ubi_device *ubi,
43 struct ubi_wl_entry *e)
44 {
45 wl_tree_add(e, &ubi->free);
46 ubi->free_count++;
47 }
48
49 /**
50 * return_unused_pool_pebs - returns unused PEB to the free tree.
51 * @ubi: UBI device description object
52 * @pool: fastmap pool description object
53 */
return_unused_pool_pebs(struct ubi_device * ubi,struct ubi_fm_pool * pool)54 static void return_unused_pool_pebs(struct ubi_device *ubi,
55 struct ubi_fm_pool *pool)
56 {
57 int i;
58 struct ubi_wl_entry *e;
59
60 for (i = pool->used; i < pool->size; i++) {
61 e = ubi->lookuptbl[pool->pebs[i]];
62 return_unused_peb(ubi, e);
63 }
64 }
65
66 /**
67 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
68 * @ubi: UBI device description object
69 * @anchor: This PEB will be used as anchor PEB by fastmap
70 *
71 * The function returns a physical erase block with a given maximal number
72 * and removes it from the wl subsystem.
73 * Must be called with wl_lock held!
74 */
ubi_wl_get_fm_peb(struct ubi_device * ubi,int anchor)75 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
76 {
77 struct ubi_wl_entry *e = NULL;
78
79 if (!ubi->free.rb_node)
80 goto out;
81
82 if (anchor)
83 e = find_anchor_wl_entry(&ubi->free);
84 else
85 e = find_mean_wl_entry(ubi, &ubi->free);
86
87 if (!e)
88 goto out;
89
90 self_check_in_wl_tree(ubi, e, &ubi->free);
91
92 /* remove it from the free list,
93 * the wl subsystem does no longer know this erase block */
94 rb_erase(&e->u.rb, &ubi->free);
95 ubi->free_count--;
96 out:
97 return e;
98 }
99
100 /*
101 * wait_free_pebs_for_pool - wait until there enough free pebs
102 * @ubi: UBI device description object
103 *
104 * Wait and execute do_work until there are enough free pebs, fill pool
105 * as much as we can. This will reduce pool refilling times, which can
106 * reduce the fastmap updating frequency.
107 */
wait_free_pebs_for_pool(struct ubi_device * ubi)108 static void wait_free_pebs_for_pool(struct ubi_device *ubi)
109 {
110 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
111 struct ubi_fm_pool *pool = &ubi->fm_pool;
112 int free, expect_free, executed;
113 /*
114 * There are at least following free pebs which reserved by UBI:
115 * 1. WL_RESERVED_PEBS[1]
116 * 2. EBA_RESERVED_PEBS[1]
117 * 3. fm pebs - 1: Twice fastmap size deducted by fastmap and fm_anchor
118 * 4. beb_rsvd_pebs: This value should be get under lock ubi->wl_lock
119 */
120 int reserved = WL_RESERVED_PEBS + EBA_RESERVED_PEBS +
121 ubi->fm_size / ubi->leb_size - 1 + ubi->fm_pool_rsv_cnt;
122
123 do {
124 spin_lock(&ubi->wl_lock);
125 free = ubi->free_count;
126 free += pool->size - pool->used + wl_pool->size - wl_pool->used;
127 expect_free = reserved + ubi->beb_rsvd_pebs;
128 spin_unlock(&ubi->wl_lock);
129
130 /*
131 * Break out if there are no works or work is executed failure,
132 * given the fact that erase_worker will schedule itself when
133 * -EBUSY is returned from mtd layer caused by system shutdown.
134 */
135 if (do_work(ubi, &executed) || !executed)
136 break;
137 } while (free < expect_free);
138 }
139
140 /*
141 * left_free_count - returns the number of free pebs to fill fm pools
142 * @ubi: UBI device description object
143 *
144 * This helper function returns the number of free pebs (deducted
145 * by fastmap pebs) to fill fm_pool and fm_wl_pool.
146 */
left_free_count(struct ubi_device * ubi)147 static int left_free_count(struct ubi_device *ubi)
148 {
149 int fm_used = 0; // fastmap non anchor pebs.
150
151 if (!ubi->free.rb_node)
152 return 0;
153
154 if (!ubi->ro_mode && !ubi->fm_disabled)
155 fm_used = ubi->fm_size / ubi->leb_size - 1;
156
157 return ubi->free_count - fm_used;
158 }
159
160 /*
161 * can_fill_pools - whether free PEBs will be left after filling pools
162 * @ubi: UBI device description object
163 * @free: current number of free PEBs
164 *
165 * Return %1 if there are still left free PEBs after filling pools,
166 * otherwise %0 is returned.
167 */
can_fill_pools(struct ubi_device * ubi,int free)168 static int can_fill_pools(struct ubi_device *ubi, int free)
169 {
170 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
171 struct ubi_fm_pool *pool = &ubi->fm_pool;
172 int pool_need = pool->max_size - pool->size +
173 wl_pool->max_size - wl_pool->size;
174
175 if (free - pool_need < 1)
176 return 0;
177
178 return 1;
179 }
180
181 /**
182 * ubi_refill_pools_and_lock - refills all fastmap PEB pools and takes fm locks.
183 * @ubi: UBI device description object
184 */
ubi_refill_pools_and_lock(struct ubi_device * ubi)185 void ubi_refill_pools_and_lock(struct ubi_device *ubi)
186 {
187 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
188 struct ubi_fm_pool *pool = &ubi->fm_pool;
189 struct ubi_wl_entry *e;
190 int enough;
191
192 if (!ubi->ro_mode && !ubi->fm_disabled)
193 wait_free_pebs_for_pool(ubi);
194
195 down_write(&ubi->fm_protect);
196 down_write(&ubi->work_sem);
197 down_write(&ubi->fm_eba_sem);
198
199 spin_lock(&ubi->wl_lock);
200
201 return_unused_pool_pebs(ubi, wl_pool);
202 return_unused_pool_pebs(ubi, pool);
203
204 wl_pool->size = 0;
205 pool->size = 0;
206
207 if (ubi->fm_anchor) {
208 wl_tree_add(ubi->fm_anchor, &ubi->free);
209 ubi->free_count++;
210 ubi->fm_anchor = NULL;
211 }
212
213 if (!ubi->fm_disabled)
214 /*
215 * All available PEBs are in ubi->free, now is the time to get
216 * the best anchor PEBs.
217 */
218 ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
219
220 for (;;) {
221 enough = 0;
222 if (pool->size < pool->max_size) {
223 if (left_free_count(ubi) <= 0)
224 break;
225
226 e = wl_get_wle(ubi);
227 if (!e)
228 break;
229
230 pool->pebs[pool->size] = e->pnum;
231 pool->size++;
232 } else
233 enough++;
234
235 if (wl_pool->size < wl_pool->max_size) {
236 int left_free = left_free_count(ubi);
237
238 if (left_free <= 0)
239 break;
240
241 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF,
242 !can_fill_pools(ubi, left_free));
243 self_check_in_wl_tree(ubi, e, &ubi->free);
244 rb_erase(&e->u.rb, &ubi->free);
245 ubi->free_count--;
246
247 wl_pool->pebs[wl_pool->size] = e->pnum;
248 wl_pool->size++;
249 } else
250 enough++;
251
252 if (enough == 2)
253 break;
254 }
255
256 wl_pool->used = 0;
257 pool->used = 0;
258
259 spin_unlock(&ubi->wl_lock);
260 }
261
262 /**
263 * produce_free_peb - produce a free physical eraseblock.
264 * @ubi: UBI device description object
265 *
266 * This function tries to make a free PEB by means of synchronous execution of
267 * pending works. This may be needed if, for example the background thread is
268 * disabled. Returns zero in case of success and a negative error code in case
269 * of failure.
270 */
produce_free_peb(struct ubi_device * ubi)271 static int produce_free_peb(struct ubi_device *ubi)
272 {
273 int err;
274
275 while (!ubi->free.rb_node && ubi->works_count) {
276 dbg_wl("do one work synchronously");
277 err = do_work(ubi, NULL);
278
279 if (err)
280 return err;
281 }
282
283 return 0;
284 }
285
286 /**
287 * ubi_wl_get_peb - get a physical eraseblock.
288 * @ubi: UBI device description object
289 *
290 * This function returns a physical eraseblock in case of success and a
291 * negative error code in case of failure.
292 * Returns with ubi->fm_eba_sem held in read mode!
293 */
ubi_wl_get_peb(struct ubi_device * ubi)294 int ubi_wl_get_peb(struct ubi_device *ubi)
295 {
296 int ret, attempts = 0;
297 struct ubi_fm_pool *pool = &ubi->fm_pool;
298 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
299
300 again:
301 down_read(&ubi->fm_eba_sem);
302 spin_lock(&ubi->wl_lock);
303
304 /* We check here also for the WL pool because at this point we can
305 * refill the WL pool synchronous. */
306 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
307 spin_unlock(&ubi->wl_lock);
308 up_read(&ubi->fm_eba_sem);
309 ret = ubi_update_fastmap(ubi);
310 if (ret) {
311 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
312 down_read(&ubi->fm_eba_sem);
313 return -ENOSPC;
314 }
315 down_read(&ubi->fm_eba_sem);
316 spin_lock(&ubi->wl_lock);
317 }
318
319 if (pool->used == pool->size) {
320 spin_unlock(&ubi->wl_lock);
321 attempts++;
322 if (attempts == 10) {
323 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
324 ret = -ENOSPC;
325 goto out;
326 }
327 up_read(&ubi->fm_eba_sem);
328 ret = produce_free_peb(ubi);
329 if (ret < 0) {
330 down_read(&ubi->fm_eba_sem);
331 goto out;
332 }
333 goto again;
334 }
335
336 ubi_assert(pool->used < pool->size);
337 ret = pool->pebs[pool->used++];
338 prot_queue_add(ubi, ubi->lookuptbl[ret]);
339 spin_unlock(&ubi->wl_lock);
340 out:
341 return ret;
342 }
343
344 /**
345 * next_peb_for_wl - returns next PEB to be used internally by the
346 * WL sub-system.
347 *
348 * @ubi: UBI device description object
349 * @need_fill: whether to fill wear-leveling pool when no PEBs are found
350 */
next_peb_for_wl(struct ubi_device * ubi,bool need_fill)351 static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi,
352 bool need_fill)
353 {
354 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
355 int pnum;
356
357 if (pool->used == pool->size) {
358 if (need_fill && !ubi->fm_work_scheduled) {
359 /*
360 * We cannot update the fastmap here because this
361 * function is called in atomic context.
362 * Let's fail here and refill/update it as soon as
363 * possible.
364 */
365 ubi->fm_work_scheduled = 1;
366 schedule_work(&ubi->fm_work);
367 }
368 return NULL;
369 }
370
371 pnum = pool->pebs[pool->used];
372 return ubi->lookuptbl[pnum];
373 }
374
375 /**
376 * need_wear_leveling - checks whether to trigger a wear leveling work.
377 * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
378 * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
379 * 'wl_pool' by ubi_refill_pools().
380 *
381 * @ubi: UBI device description object
382 */
need_wear_leveling(struct ubi_device * ubi)383 static bool need_wear_leveling(struct ubi_device *ubi)
384 {
385 int ec;
386 struct ubi_wl_entry *e;
387
388 if (!ubi->used.rb_node)
389 return false;
390
391 e = next_peb_for_wl(ubi, false);
392 if (!e) {
393 if (!ubi->free.rb_node)
394 return false;
395 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
396 ec = e->ec;
397 } else {
398 ec = e->ec;
399 if (ubi->free.rb_node) {
400 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
401 ec = max(ec, e->ec);
402 }
403 }
404 e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
405
406 return ec - e->ec >= UBI_WL_THRESHOLD;
407 }
408
409 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
410 *
411 * @ubi: UBI device description object
412 */
get_peb_for_wl(struct ubi_device * ubi)413 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
414 {
415 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
416 int pnum;
417
418 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
419
420 if (pool->used == pool->size) {
421 /* We cannot update the fastmap here because this
422 * function is called in atomic context.
423 * Let's fail here and refill/update it as soon as possible. */
424 if (!ubi->fm_work_scheduled) {
425 ubi->fm_work_scheduled = 1;
426 schedule_work(&ubi->fm_work);
427 }
428 return NULL;
429 }
430
431 pnum = pool->pebs[pool->used++];
432 return ubi->lookuptbl[pnum];
433 }
434
435 /**
436 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
437 * @ubi: UBI device description object
438 */
ubi_ensure_anchor_pebs(struct ubi_device * ubi)439 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
440 {
441 struct ubi_work *wrk;
442 struct ubi_wl_entry *anchor;
443
444 spin_lock(&ubi->wl_lock);
445
446 /* Do we already have an anchor? */
447 if (ubi->fm_anchor) {
448 spin_unlock(&ubi->wl_lock);
449 return 0;
450 }
451
452 /* See if we can find an anchor PEB on the list of free PEBs */
453 anchor = ubi_wl_get_fm_peb(ubi, 1);
454 if (anchor) {
455 ubi->fm_anchor = anchor;
456 spin_unlock(&ubi->wl_lock);
457 return 0;
458 }
459
460 ubi->fm_do_produce_anchor = 1;
461 /* No luck, trigger wear leveling to produce a new anchor PEB. */
462 if (ubi->wl_scheduled) {
463 spin_unlock(&ubi->wl_lock);
464 return 0;
465 }
466 ubi->wl_scheduled = 1;
467 spin_unlock(&ubi->wl_lock);
468
469 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
470 if (!wrk) {
471 spin_lock(&ubi->wl_lock);
472 ubi->wl_scheduled = 0;
473 spin_unlock(&ubi->wl_lock);
474 return -ENOMEM;
475 }
476
477 wrk->func = &wear_leveling_worker;
478 __schedule_ubi_work(ubi, wrk);
479 return 0;
480 }
481
482 /**
483 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
484 * sub-system.
485 * see: ubi_wl_put_peb()
486 *
487 * @ubi: UBI device description object
488 * @fm_e: physical eraseblock to return
489 * @lnum: the last used logical eraseblock number for the PEB
490 * @torture: if this physical eraseblock has to be tortured
491 */
ubi_wl_put_fm_peb(struct ubi_device * ubi,struct ubi_wl_entry * fm_e,int lnum,int torture)492 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
493 int lnum, int torture)
494 {
495 struct ubi_wl_entry *e;
496 int vol_id, pnum = fm_e->pnum;
497
498 dbg_wl("PEB %d", pnum);
499
500 ubi_assert(pnum >= 0);
501 ubi_assert(pnum < ubi->peb_count);
502
503 spin_lock(&ubi->wl_lock);
504 e = ubi->lookuptbl[pnum];
505
506 /* This can happen if we recovered from a fastmap the very
507 * first time and writing now a new one. In this case the wl system
508 * has never seen any PEB used by the original fastmap.
509 */
510 if (!e) {
511 e = fm_e;
512 ubi_assert(e->ec >= 0);
513 ubi->lookuptbl[pnum] = e;
514 }
515
516 spin_unlock(&ubi->wl_lock);
517
518 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
519 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
520 }
521
522 /**
523 * ubi_is_erase_work - checks whether a work is erase work.
524 * @wrk: The work object to be checked
525 */
ubi_is_erase_work(struct ubi_work * wrk)526 int ubi_is_erase_work(struct ubi_work *wrk)
527 {
528 return wrk->func == erase_worker;
529 }
530
ubi_fastmap_close(struct ubi_device * ubi)531 static void ubi_fastmap_close(struct ubi_device *ubi)
532 {
533 int i;
534
535 return_unused_pool_pebs(ubi, &ubi->fm_pool);
536 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
537
538 if (ubi->fm_anchor) {
539 return_unused_peb(ubi, ubi->fm_anchor);
540 ubi->fm_anchor = NULL;
541 }
542
543 if (ubi->fm) {
544 for (i = 0; i < ubi->fm->used_blocks; i++)
545 kfree(ubi->fm->e[i]);
546 }
547 kfree(ubi->fm);
548 }
549
550 /**
551 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
552 * See find_mean_wl_entry()
553 *
554 * @ubi: UBI device description object
555 * @e: physical eraseblock to return
556 * @root: RB tree to test against.
557 */
may_reserve_for_fm(struct ubi_device * ubi,struct ubi_wl_entry * e,struct rb_root * root)558 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
559 struct ubi_wl_entry *e,
560 struct rb_root *root) {
561 if (e && !ubi->fm_disabled && !ubi->fm && !ubi->fm_anchor &&
562 e->pnum < UBI_FM_MAX_START)
563 e = rb_entry(rb_next(root->rb_node),
564 struct ubi_wl_entry, u.rb);
565
566 return e;
567 }
568