xref: /linux/drivers/mtd/ubi/wl.c (revision 0dd9ac63ce26ec87b080ca9c3e6efed33c23ace6)
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19  */
20 
21 /*
22  * UBI wear-leveling sub-system.
23  *
24  * This sub-system is responsible for wear-leveling. It works in terms of
25  * physical eraseblocks and erase counters and knows nothing about logical
26  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27  * eraseblocks are of two types - used and free. Used physical eraseblocks are
28  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
30  *
31  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32  * header. The rest of the physical eraseblock contains only %0xFF bytes.
33  *
34  * When physical eraseblocks are returned to the WL sub-system by means of the
35  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36  * done asynchronously in context of the per-UBI device background thread,
37  * which is also managed by the WL sub-system.
38  *
39  * The wear-leveling is ensured by means of moving the contents of used
40  * physical eraseblocks with low erase counter to free physical eraseblocks
41  * with high erase counter.
42  *
43  * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44  * an "optimal" physical eraseblock. For example, when it is known that the
45  * physical eraseblock will be "put" soon because it contains short-term data,
46  * the WL sub-system may pick a free physical eraseblock with low erase
47  * counter, and so forth.
48  *
49  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
50  * bad.
51  *
52  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
53  * in a physical eraseblock, it has to be moved. Technically this is the same
54  * as moving it for wear-leveling reasons.
55  *
56  * As it was said, for the UBI sub-system all physical eraseblocks are either
57  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
59  * RB-trees, as well as (temporarily) in the @wl->pq queue.
60  *
61  * When the WL sub-system returns a physical eraseblock, the physical
62  * eraseblock is protected from being moved for some "time". For this reason,
63  * the physical eraseblock is not directly moved from the @wl->free tree to the
64  * @wl->used tree. There is a protection queue in between where this
65  * physical eraseblock is temporarily stored (@wl->pq).
66  *
67  * All this protection stuff is needed because:
68  *  o we don't want to move physical eraseblocks just after we have given them
69  *    to the user; instead, we first want to let users fill them up with data;
70  *
71  *  o there is a chance that the user will put the physical eraseblock very
72  *    soon, so it makes sense not to move it for some time, but wait; this is
73  *    especially important in case of "short term" physical eraseblocks.
74  *
75  * Physical eraseblocks stay protected only for limited time. But the "time" is
76  * measured in erase cycles in this case. This is implemented with help of the
77  * protection queue. Eraseblocks are put to the tail of this queue when they
78  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
79  * head of the queue on each erase operation (for any eraseblock). So the
80  * length of the queue defines how may (global) erase cycles PEBs are protected.
81  *
82  * To put it differently, each physical eraseblock has 2 main states: free and
83  * used. The former state corresponds to the @wl->free tree. The latter state
84  * is split up on several sub-states:
85  * o the WL movement is allowed (@wl->used tree);
86  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
87  *   erroneous - e.g., there was a read error;
88  * o the WL movement is temporarily prohibited (@wl->pq queue);
89  * o scrubbing is needed (@wl->scrub tree).
90  *
91  * Depending on the sub-state, wear-leveling entries of the used physical
92  * eraseblocks may be kept in one of those structures.
93  *
94  * Note, in this implementation, we keep a small in-RAM object for each physical
95  * eraseblock. This is surely not a scalable solution. But it appears to be good
96  * enough for moderately large flashes and it is simple. In future, one may
97  * re-work this sub-system and make it more scalable.
98  *
99  * At the moment this sub-system does not utilize the sequence number, which
100  * was introduced relatively recently. But it would be wise to do this because
101  * the sequence number of a logical eraseblock characterizes how old is it. For
102  * example, when we move a PEB with low erase counter, and we need to pick the
103  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
104  * pick target PEB with an average EC if our PEB is not very "old". This is a
105  * room for future re-works of the WL sub-system.
106  */
107 
108 #include <linux/slab.h>
109 #include <linux/crc32.h>
110 #include <linux/freezer.h>
111 #include <linux/kthread.h>
112 #include "ubi.h"
113 
114 /* Number of physical eraseblocks reserved for wear-leveling purposes */
115 #define WL_RESERVED_PEBS 1
116 
117 /*
118  * Maximum difference between two erase counters. If this threshold is
119  * exceeded, the WL sub-system starts moving data from used physical
120  * eraseblocks with low erase counter to free physical eraseblocks with high
121  * erase counter.
122  */
123 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
124 
125 /*
126  * When a physical eraseblock is moved, the WL sub-system has to pick the target
127  * physical eraseblock to move to. The simplest way would be just to pick the
128  * one with the highest erase counter. But in certain workloads this could lead
129  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
130  * situation when the picked physical eraseblock is constantly erased after the
131  * data is written to it. So, we have a constant which limits the highest erase
132  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
133  * does not pick eraseblocks with erase counter greater than the lowest erase
134  * counter plus %WL_FREE_MAX_DIFF.
135  */
136 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
137 
138 /*
139  * Maximum number of consecutive background thread failures which is enough to
140  * switch to read-only mode.
141  */
142 #define WL_MAX_FAILURES 32
143 
144 /**
145  * struct ubi_work - UBI work description data structure.
146  * @list: a link in the list of pending works
147  * @func: worker function
148  * @e: physical eraseblock to erase
149  * @torture: if the physical eraseblock has to be tortured
150  *
151  * The @func pointer points to the worker function. If the @cancel argument is
152  * not zero, the worker has to free the resources and exit immediately. The
153  * worker has to return zero in case of success and a negative error code in
154  * case of failure.
155  */
156 struct ubi_work {
157 	struct list_head list;
158 	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
159 	/* The below fields are only relevant to erasure works */
160 	struct ubi_wl_entry *e;
161 	int torture;
162 };
163 
164 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
165 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
166 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
167 				     struct rb_root *root);
168 static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
169 #else
170 #define paranoid_check_ec(ubi, pnum, ec) 0
171 #define paranoid_check_in_wl_tree(e, root)
172 #define paranoid_check_in_pq(ubi, e) 0
173 #endif
174 
175 /**
176  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
177  * @e: the wear-leveling entry to add
178  * @root: the root of the tree
179  *
180  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
181  * the @ubi->used and @ubi->free RB-trees.
182  */
183 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
184 {
185 	struct rb_node **p, *parent = NULL;
186 
187 	p = &root->rb_node;
188 	while (*p) {
189 		struct ubi_wl_entry *e1;
190 
191 		parent = *p;
192 		e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
193 
194 		if (e->ec < e1->ec)
195 			p = &(*p)->rb_left;
196 		else if (e->ec > e1->ec)
197 			p = &(*p)->rb_right;
198 		else {
199 			ubi_assert(e->pnum != e1->pnum);
200 			if (e->pnum < e1->pnum)
201 				p = &(*p)->rb_left;
202 			else
203 				p = &(*p)->rb_right;
204 		}
205 	}
206 
207 	rb_link_node(&e->u.rb, parent, p);
208 	rb_insert_color(&e->u.rb, root);
209 }
210 
211 /**
212  * do_work - do one pending work.
213  * @ubi: UBI device description object
214  *
215  * This function returns zero in case of success and a negative error code in
216  * case of failure.
217  */
218 static int do_work(struct ubi_device *ubi)
219 {
220 	int err;
221 	struct ubi_work *wrk;
222 
223 	cond_resched();
224 
225 	/*
226 	 * @ubi->work_sem is used to synchronize with the workers. Workers take
227 	 * it in read mode, so many of them may be doing works at a time. But
228 	 * the queue flush code has to be sure the whole queue of works is
229 	 * done, and it takes the mutex in write mode.
230 	 */
231 	down_read(&ubi->work_sem);
232 	spin_lock(&ubi->wl_lock);
233 	if (list_empty(&ubi->works)) {
234 		spin_unlock(&ubi->wl_lock);
235 		up_read(&ubi->work_sem);
236 		return 0;
237 	}
238 
239 	wrk = list_entry(ubi->works.next, struct ubi_work, list);
240 	list_del(&wrk->list);
241 	ubi->works_count -= 1;
242 	ubi_assert(ubi->works_count >= 0);
243 	spin_unlock(&ubi->wl_lock);
244 
245 	/*
246 	 * Call the worker function. Do not touch the work structure
247 	 * after this call as it will have been freed or reused by that
248 	 * time by the worker function.
249 	 */
250 	err = wrk->func(ubi, wrk, 0);
251 	if (err)
252 		ubi_err("work failed with error code %d", err);
253 	up_read(&ubi->work_sem);
254 
255 	return err;
256 }
257 
258 /**
259  * produce_free_peb - produce a free physical eraseblock.
260  * @ubi: UBI device description object
261  *
262  * This function tries to make a free PEB by means of synchronous execution of
263  * pending works. This may be needed if, for example the background thread is
264  * disabled. Returns zero in case of success and a negative error code in case
265  * of failure.
266  */
267 static int produce_free_peb(struct ubi_device *ubi)
268 {
269 	int err;
270 
271 	spin_lock(&ubi->wl_lock);
272 	while (!ubi->free.rb_node) {
273 		spin_unlock(&ubi->wl_lock);
274 
275 		dbg_wl("do one work synchronously");
276 		err = do_work(ubi);
277 		if (err)
278 			return err;
279 
280 		spin_lock(&ubi->wl_lock);
281 	}
282 	spin_unlock(&ubi->wl_lock);
283 
284 	return 0;
285 }
286 
287 /**
288  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
289  * @e: the wear-leveling entry to check
290  * @root: the root of the tree
291  *
292  * This function returns non-zero if @e is in the @root RB-tree and zero if it
293  * is not.
294  */
295 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
296 {
297 	struct rb_node *p;
298 
299 	p = root->rb_node;
300 	while (p) {
301 		struct ubi_wl_entry *e1;
302 
303 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
304 
305 		if (e->pnum == e1->pnum) {
306 			ubi_assert(e == e1);
307 			return 1;
308 		}
309 
310 		if (e->ec < e1->ec)
311 			p = p->rb_left;
312 		else if (e->ec > e1->ec)
313 			p = p->rb_right;
314 		else {
315 			ubi_assert(e->pnum != e1->pnum);
316 			if (e->pnum < e1->pnum)
317 				p = p->rb_left;
318 			else
319 				p = p->rb_right;
320 		}
321 	}
322 
323 	return 0;
324 }
325 
326 /**
327  * prot_queue_add - add physical eraseblock to the protection queue.
328  * @ubi: UBI device description object
329  * @e: the physical eraseblock to add
330  *
331  * This function adds @e to the tail of the protection queue @ubi->pq, where
332  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
333  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
334  * be locked.
335  */
336 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
337 {
338 	int pq_tail = ubi->pq_head - 1;
339 
340 	if (pq_tail < 0)
341 		pq_tail = UBI_PROT_QUEUE_LEN - 1;
342 	ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
343 	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
344 	dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
345 }
346 
347 /**
348  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
349  * @root: the RB-tree where to look for
350  * @max: highest possible erase counter
351  *
352  * This function looks for a wear leveling entry with erase counter closest to
353  * @max and less than @max.
354  */
355 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
356 {
357 	struct rb_node *p;
358 	struct ubi_wl_entry *e;
359 
360 	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
361 	max += e->ec;
362 
363 	p = root->rb_node;
364 	while (p) {
365 		struct ubi_wl_entry *e1;
366 
367 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
368 		if (e1->ec >= max)
369 			p = p->rb_left;
370 		else {
371 			p = p->rb_right;
372 			e = e1;
373 		}
374 	}
375 
376 	return e;
377 }
378 
379 /**
380  * ubi_wl_get_peb - get a physical eraseblock.
381  * @ubi: UBI device description object
382  * @dtype: type of data which will be stored in this physical eraseblock
383  *
384  * This function returns a physical eraseblock in case of success and a
385  * negative error code in case of failure. Might sleep.
386  */
387 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
388 {
389 	int err, medium_ec;
390 	struct ubi_wl_entry *e, *first, *last;
391 
392 	ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
393 		   dtype == UBI_UNKNOWN);
394 
395 retry:
396 	spin_lock(&ubi->wl_lock);
397 	if (!ubi->free.rb_node) {
398 		if (ubi->works_count == 0) {
399 			ubi_assert(list_empty(&ubi->works));
400 			ubi_err("no free eraseblocks");
401 			spin_unlock(&ubi->wl_lock);
402 			return -ENOSPC;
403 		}
404 		spin_unlock(&ubi->wl_lock);
405 
406 		err = produce_free_peb(ubi);
407 		if (err < 0)
408 			return err;
409 		goto retry;
410 	}
411 
412 	switch (dtype) {
413 	case UBI_LONGTERM:
414 		/*
415 		 * For long term data we pick a physical eraseblock with high
416 		 * erase counter. But the highest erase counter we can pick is
417 		 * bounded by the the lowest erase counter plus
418 		 * %WL_FREE_MAX_DIFF.
419 		 */
420 		e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
421 		break;
422 	case UBI_UNKNOWN:
423 		/*
424 		 * For unknown data we pick a physical eraseblock with medium
425 		 * erase counter. But we by no means can pick a physical
426 		 * eraseblock with erase counter greater or equivalent than the
427 		 * lowest erase counter plus %WL_FREE_MAX_DIFF.
428 		 */
429 		first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
430 					u.rb);
431 		last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
432 
433 		if (last->ec - first->ec < WL_FREE_MAX_DIFF)
434 			e = rb_entry(ubi->free.rb_node,
435 					struct ubi_wl_entry, u.rb);
436 		else {
437 			medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
438 			e = find_wl_entry(&ubi->free, medium_ec);
439 		}
440 		break;
441 	case UBI_SHORTTERM:
442 		/*
443 		 * For short term data we pick a physical eraseblock with the
444 		 * lowest erase counter as we expect it will be erased soon.
445 		 */
446 		e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
447 		break;
448 	default:
449 		BUG();
450 	}
451 
452 	paranoid_check_in_wl_tree(e, &ubi->free);
453 
454 	/*
455 	 * Move the physical eraseblock to the protection queue where it will
456 	 * be protected from being moved for some time.
457 	 */
458 	rb_erase(&e->u.rb, &ubi->free);
459 	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
460 	prot_queue_add(ubi, e);
461 	spin_unlock(&ubi->wl_lock);
462 
463 	err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
464 				   ubi->peb_size - ubi->vid_hdr_aloffset);
465 	if (err) {
466 		ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
467 		return err;
468 	}
469 
470 	return e->pnum;
471 }
472 
473 /**
474  * prot_queue_del - remove a physical eraseblock from the protection queue.
475  * @ubi: UBI device description object
476  * @pnum: the physical eraseblock to remove
477  *
478  * This function deletes PEB @pnum from the protection queue and returns zero
479  * in case of success and %-ENODEV if the PEB was not found.
480  */
481 static int prot_queue_del(struct ubi_device *ubi, int pnum)
482 {
483 	struct ubi_wl_entry *e;
484 
485 	e = ubi->lookuptbl[pnum];
486 	if (!e)
487 		return -ENODEV;
488 
489 	if (paranoid_check_in_pq(ubi, e))
490 		return -ENODEV;
491 
492 	list_del(&e->u.list);
493 	dbg_wl("deleted PEB %d from the protection queue", e->pnum);
494 	return 0;
495 }
496 
497 /**
498  * sync_erase - synchronously erase a physical eraseblock.
499  * @ubi: UBI device description object
500  * @e: the the physical eraseblock to erase
501  * @torture: if the physical eraseblock has to be tortured
502  *
503  * This function returns zero in case of success and a negative error code in
504  * case of failure.
505  */
506 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
507 		      int torture)
508 {
509 	int err;
510 	struct ubi_ec_hdr *ec_hdr;
511 	unsigned long long ec = e->ec;
512 
513 	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
514 
515 	err = paranoid_check_ec(ubi, e->pnum, e->ec);
516 	if (err)
517 		return -EINVAL;
518 
519 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
520 	if (!ec_hdr)
521 		return -ENOMEM;
522 
523 	err = ubi_io_sync_erase(ubi, e->pnum, torture);
524 	if (err < 0)
525 		goto out_free;
526 
527 	ec += err;
528 	if (ec > UBI_MAX_ERASECOUNTER) {
529 		/*
530 		 * Erase counter overflow. Upgrade UBI and use 64-bit
531 		 * erase counters internally.
532 		 */
533 		ubi_err("erase counter overflow at PEB %d, EC %llu",
534 			e->pnum, ec);
535 		err = -EINVAL;
536 		goto out_free;
537 	}
538 
539 	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
540 
541 	ec_hdr->ec = cpu_to_be64(ec);
542 
543 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
544 	if (err)
545 		goto out_free;
546 
547 	e->ec = ec;
548 	spin_lock(&ubi->wl_lock);
549 	if (e->ec > ubi->max_ec)
550 		ubi->max_ec = e->ec;
551 	spin_unlock(&ubi->wl_lock);
552 
553 out_free:
554 	kfree(ec_hdr);
555 	return err;
556 }
557 
558 /**
559  * serve_prot_queue - check if it is time to stop protecting PEBs.
560  * @ubi: UBI device description object
561  *
562  * This function is called after each erase operation and removes PEBs from the
563  * tail of the protection queue. These PEBs have been protected for long enough
564  * and should be moved to the used tree.
565  */
566 static void serve_prot_queue(struct ubi_device *ubi)
567 {
568 	struct ubi_wl_entry *e, *tmp;
569 	int count;
570 
571 	/*
572 	 * There may be several protected physical eraseblock to remove,
573 	 * process them all.
574 	 */
575 repeat:
576 	count = 0;
577 	spin_lock(&ubi->wl_lock);
578 	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
579 		dbg_wl("PEB %d EC %d protection over, move to used tree",
580 			e->pnum, e->ec);
581 
582 		list_del(&e->u.list);
583 		wl_tree_add(e, &ubi->used);
584 		if (count++ > 32) {
585 			/*
586 			 * Let's be nice and avoid holding the spinlock for
587 			 * too long.
588 			 */
589 			spin_unlock(&ubi->wl_lock);
590 			cond_resched();
591 			goto repeat;
592 		}
593 	}
594 
595 	ubi->pq_head += 1;
596 	if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
597 		ubi->pq_head = 0;
598 	ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
599 	spin_unlock(&ubi->wl_lock);
600 }
601 
602 /**
603  * schedule_ubi_work - schedule a work.
604  * @ubi: UBI device description object
605  * @wrk: the work to schedule
606  *
607  * This function adds a work defined by @wrk to the tail of the pending works
608  * list.
609  */
610 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
611 {
612 	spin_lock(&ubi->wl_lock);
613 	list_add_tail(&wrk->list, &ubi->works);
614 	ubi_assert(ubi->works_count >= 0);
615 	ubi->works_count += 1;
616 	if (ubi->thread_enabled)
617 		wake_up_process(ubi->bgt_thread);
618 	spin_unlock(&ubi->wl_lock);
619 }
620 
621 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
622 			int cancel);
623 
624 /**
625  * schedule_erase - schedule an erase work.
626  * @ubi: UBI device description object
627  * @e: the WL entry of the physical eraseblock to erase
628  * @torture: if the physical eraseblock has to be tortured
629  *
630  * This function returns zero in case of success and a %-ENOMEM in case of
631  * failure.
632  */
633 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
634 			  int torture)
635 {
636 	struct ubi_work *wl_wrk;
637 
638 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
639 	       e->pnum, e->ec, torture);
640 
641 	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
642 	if (!wl_wrk)
643 		return -ENOMEM;
644 
645 	wl_wrk->func = &erase_worker;
646 	wl_wrk->e = e;
647 	wl_wrk->torture = torture;
648 
649 	schedule_ubi_work(ubi, wl_wrk);
650 	return 0;
651 }
652 
653 /**
654  * wear_leveling_worker - wear-leveling worker function.
655  * @ubi: UBI device description object
656  * @wrk: the work object
657  * @cancel: non-zero if the worker has to free memory and exit
658  *
659  * This function copies a more worn out physical eraseblock to a less worn out
660  * one. Returns zero in case of success and a negative error code in case of
661  * failure.
662  */
663 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
664 				int cancel)
665 {
666 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
667 	int vol_id = -1, uninitialized_var(lnum);
668 	struct ubi_wl_entry *e1, *e2;
669 	struct ubi_vid_hdr *vid_hdr;
670 
671 	kfree(wrk);
672 	if (cancel)
673 		return 0;
674 
675 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
676 	if (!vid_hdr)
677 		return -ENOMEM;
678 
679 	mutex_lock(&ubi->move_mutex);
680 	spin_lock(&ubi->wl_lock);
681 	ubi_assert(!ubi->move_from && !ubi->move_to);
682 	ubi_assert(!ubi->move_to_put);
683 
684 	if (!ubi->free.rb_node ||
685 	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
686 		/*
687 		 * No free physical eraseblocks? Well, they must be waiting in
688 		 * the queue to be erased. Cancel movement - it will be
689 		 * triggered again when a free physical eraseblock appears.
690 		 *
691 		 * No used physical eraseblocks? They must be temporarily
692 		 * protected from being moved. They will be moved to the
693 		 * @ubi->used tree later and the wear-leveling will be
694 		 * triggered again.
695 		 */
696 		dbg_wl("cancel WL, a list is empty: free %d, used %d",
697 		       !ubi->free.rb_node, !ubi->used.rb_node);
698 		goto out_cancel;
699 	}
700 
701 	if (!ubi->scrub.rb_node) {
702 		/*
703 		 * Now pick the least worn-out used physical eraseblock and a
704 		 * highly worn-out free physical eraseblock. If the erase
705 		 * counters differ much enough, start wear-leveling.
706 		 */
707 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
708 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
709 
710 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
711 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
712 			       e1->ec, e2->ec);
713 			goto out_cancel;
714 		}
715 		paranoid_check_in_wl_tree(e1, &ubi->used);
716 		rb_erase(&e1->u.rb, &ubi->used);
717 		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
718 		       e1->pnum, e1->ec, e2->pnum, e2->ec);
719 	} else {
720 		/* Perform scrubbing */
721 		scrubbing = 1;
722 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
723 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
724 		paranoid_check_in_wl_tree(e1, &ubi->scrub);
725 		rb_erase(&e1->u.rb, &ubi->scrub);
726 		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
727 	}
728 
729 	paranoid_check_in_wl_tree(e2, &ubi->free);
730 	rb_erase(&e2->u.rb, &ubi->free);
731 	ubi->move_from = e1;
732 	ubi->move_to = e2;
733 	spin_unlock(&ubi->wl_lock);
734 
735 	/*
736 	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
737 	 * We so far do not know which logical eraseblock our physical
738 	 * eraseblock (@e1) belongs to. We have to read the volume identifier
739 	 * header first.
740 	 *
741 	 * Note, we are protected from this PEB being unmapped and erased. The
742 	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
743 	 * which is being moved was unmapped.
744 	 */
745 
746 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
747 	if (err && err != UBI_IO_BITFLIPS) {
748 		if (err == UBI_IO_PEB_FREE) {
749 			/*
750 			 * We are trying to move PEB without a VID header. UBI
751 			 * always write VID headers shortly after the PEB was
752 			 * given, so we have a situation when it has not yet
753 			 * had a chance to write it, because it was preempted.
754 			 * So add this PEB to the protection queue so far,
755 			 * because presumably more data will be written there
756 			 * (including the missing VID header), and then we'll
757 			 * move it.
758 			 */
759 			dbg_wl("PEB %d has no VID header", e1->pnum);
760 			protect = 1;
761 			goto out_not_moved;
762 		}
763 
764 		ubi_err("error %d while reading VID header from PEB %d",
765 			err, e1->pnum);
766 		goto out_error;
767 	}
768 
769 	vol_id = be32_to_cpu(vid_hdr->vol_id);
770 	lnum = be32_to_cpu(vid_hdr->lnum);
771 
772 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
773 	if (err) {
774 		if (err == MOVE_CANCEL_RACE) {
775 			/*
776 			 * The LEB has not been moved because the volume is
777 			 * being deleted or the PEB has been put meanwhile. We
778 			 * should prevent this PEB from being selected for
779 			 * wear-leveling movement again, so put it to the
780 			 * protection queue.
781 			 */
782 			protect = 1;
783 			goto out_not_moved;
784 		}
785 
786 		if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
787 		    err == MOVE_TARGET_RD_ERR) {
788 			/*
789 			 * Target PEB had bit-flips or write error - torture it.
790 			 */
791 			torture = 1;
792 			goto out_not_moved;
793 		}
794 
795 		if (err == MOVE_SOURCE_RD_ERR) {
796 			/*
797 			 * An error happened while reading the source PEB. Do
798 			 * not switch to R/O mode in this case, and give the
799 			 * upper layers a possibility to recover from this,
800 			 * e.g. by unmapping corresponding LEB. Instead, just
801 			 * put this PEB to the @ubi->erroneous list to prevent
802 			 * UBI from trying to move it over and over again.
803 			 */
804 			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
805 				ubi_err("too many erroneous eraseblocks (%d)",
806 					ubi->erroneous_peb_count);
807 				goto out_error;
808 			}
809 			erroneous = 1;
810 			goto out_not_moved;
811 		}
812 
813 		if (err < 0)
814 			goto out_error;
815 
816 		ubi_assert(0);
817 	}
818 
819 	/* The PEB has been successfully moved */
820 	if (scrubbing)
821 		ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
822 			e1->pnum, vol_id, lnum, e2->pnum);
823 	ubi_free_vid_hdr(ubi, vid_hdr);
824 
825 	spin_lock(&ubi->wl_lock);
826 	if (!ubi->move_to_put) {
827 		wl_tree_add(e2, &ubi->used);
828 		e2 = NULL;
829 	}
830 	ubi->move_from = ubi->move_to = NULL;
831 	ubi->move_to_put = ubi->wl_scheduled = 0;
832 	spin_unlock(&ubi->wl_lock);
833 
834 	err = schedule_erase(ubi, e1, 0);
835 	if (err) {
836 		kmem_cache_free(ubi_wl_entry_slab, e1);
837 		if (e2)
838 			kmem_cache_free(ubi_wl_entry_slab, e2);
839 		goto out_ro;
840 	}
841 
842 	if (e2) {
843 		/*
844 		 * Well, the target PEB was put meanwhile, schedule it for
845 		 * erasure.
846 		 */
847 		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
848 		       e2->pnum, vol_id, lnum);
849 		err = schedule_erase(ubi, e2, 0);
850 		if (err) {
851 			kmem_cache_free(ubi_wl_entry_slab, e2);
852 			goto out_ro;
853 		}
854 	}
855 
856 	dbg_wl("done");
857 	mutex_unlock(&ubi->move_mutex);
858 	return 0;
859 
860 	/*
861 	 * For some reasons the LEB was not moved, might be an error, might be
862 	 * something else. @e1 was not changed, so return it back. @e2 might
863 	 * have been changed, schedule it for erasure.
864 	 */
865 out_not_moved:
866 	if (vol_id != -1)
867 		dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
868 		       e1->pnum, vol_id, lnum, e2->pnum, err);
869 	else
870 		dbg_wl("cancel moving PEB %d to PEB %d (%d)",
871 		       e1->pnum, e2->pnum, err);
872 	spin_lock(&ubi->wl_lock);
873 	if (protect)
874 		prot_queue_add(ubi, e1);
875 	else if (erroneous) {
876 		wl_tree_add(e1, &ubi->erroneous);
877 		ubi->erroneous_peb_count += 1;
878 	} else if (scrubbing)
879 		wl_tree_add(e1, &ubi->scrub);
880 	else
881 		wl_tree_add(e1, &ubi->used);
882 	ubi_assert(!ubi->move_to_put);
883 	ubi->move_from = ubi->move_to = NULL;
884 	ubi->wl_scheduled = 0;
885 	spin_unlock(&ubi->wl_lock);
886 
887 	ubi_free_vid_hdr(ubi, vid_hdr);
888 	err = schedule_erase(ubi, e2, torture);
889 	if (err) {
890 		kmem_cache_free(ubi_wl_entry_slab, e2);
891 		goto out_ro;
892 	}
893 	mutex_unlock(&ubi->move_mutex);
894 	return 0;
895 
896 out_error:
897 	if (vol_id != -1)
898 		ubi_err("error %d while moving PEB %d to PEB %d",
899 			err, e1->pnum, e2->pnum);
900 	else
901 		ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
902 			err, e1->pnum, vol_id, lnum, e2->pnum);
903 	spin_lock(&ubi->wl_lock);
904 	ubi->move_from = ubi->move_to = NULL;
905 	ubi->move_to_put = ubi->wl_scheduled = 0;
906 	spin_unlock(&ubi->wl_lock);
907 
908 	ubi_free_vid_hdr(ubi, vid_hdr);
909 	kmem_cache_free(ubi_wl_entry_slab, e1);
910 	kmem_cache_free(ubi_wl_entry_slab, e2);
911 
912 out_ro:
913 	ubi_ro_mode(ubi);
914 	mutex_unlock(&ubi->move_mutex);
915 	ubi_assert(err != 0);
916 	return err < 0 ? err : -EIO;
917 
918 out_cancel:
919 	ubi->wl_scheduled = 0;
920 	spin_unlock(&ubi->wl_lock);
921 	mutex_unlock(&ubi->move_mutex);
922 	ubi_free_vid_hdr(ubi, vid_hdr);
923 	return 0;
924 }
925 
926 /**
927  * ensure_wear_leveling - schedule wear-leveling if it is needed.
928  * @ubi: UBI device description object
929  *
930  * This function checks if it is time to start wear-leveling and schedules it
931  * if yes. This function returns zero in case of success and a negative error
932  * code in case of failure.
933  */
934 static int ensure_wear_leveling(struct ubi_device *ubi)
935 {
936 	int err = 0;
937 	struct ubi_wl_entry *e1;
938 	struct ubi_wl_entry *e2;
939 	struct ubi_work *wrk;
940 
941 	spin_lock(&ubi->wl_lock);
942 	if (ubi->wl_scheduled)
943 		/* Wear-leveling is already in the work queue */
944 		goto out_unlock;
945 
946 	/*
947 	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
948 	 * the WL worker has to be scheduled anyway.
949 	 */
950 	if (!ubi->scrub.rb_node) {
951 		if (!ubi->used.rb_node || !ubi->free.rb_node)
952 			/* No physical eraseblocks - no deal */
953 			goto out_unlock;
954 
955 		/*
956 		 * We schedule wear-leveling only if the difference between the
957 		 * lowest erase counter of used physical eraseblocks and a high
958 		 * erase counter of free physical eraseblocks is greater than
959 		 * %UBI_WL_THRESHOLD.
960 		 */
961 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
962 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
963 
964 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
965 			goto out_unlock;
966 		dbg_wl("schedule wear-leveling");
967 	} else
968 		dbg_wl("schedule scrubbing");
969 
970 	ubi->wl_scheduled = 1;
971 	spin_unlock(&ubi->wl_lock);
972 
973 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
974 	if (!wrk) {
975 		err = -ENOMEM;
976 		goto out_cancel;
977 	}
978 
979 	wrk->func = &wear_leveling_worker;
980 	schedule_ubi_work(ubi, wrk);
981 	return err;
982 
983 out_cancel:
984 	spin_lock(&ubi->wl_lock);
985 	ubi->wl_scheduled = 0;
986 out_unlock:
987 	spin_unlock(&ubi->wl_lock);
988 	return err;
989 }
990 
991 /**
992  * erase_worker - physical eraseblock erase worker function.
993  * @ubi: UBI device description object
994  * @wl_wrk: the work object
995  * @cancel: non-zero if the worker has to free memory and exit
996  *
997  * This function erases a physical eraseblock and perform torture testing if
998  * needed. It also takes care about marking the physical eraseblock bad if
999  * needed. Returns zero in case of success and a negative error code in case of
1000  * failure.
1001  */
1002 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1003 			int cancel)
1004 {
1005 	struct ubi_wl_entry *e = wl_wrk->e;
1006 	int pnum = e->pnum, err, need;
1007 
1008 	if (cancel) {
1009 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1010 		kfree(wl_wrk);
1011 		kmem_cache_free(ubi_wl_entry_slab, e);
1012 		return 0;
1013 	}
1014 
1015 	dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1016 
1017 	err = sync_erase(ubi, e, wl_wrk->torture);
1018 	if (!err) {
1019 		/* Fine, we've erased it successfully */
1020 		kfree(wl_wrk);
1021 
1022 		spin_lock(&ubi->wl_lock);
1023 		wl_tree_add(e, &ubi->free);
1024 		spin_unlock(&ubi->wl_lock);
1025 
1026 		/*
1027 		 * One more erase operation has happened, take care about
1028 		 * protected physical eraseblocks.
1029 		 */
1030 		serve_prot_queue(ubi);
1031 
1032 		/* And take care about wear-leveling */
1033 		err = ensure_wear_leveling(ubi);
1034 		return err;
1035 	}
1036 
1037 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
1038 	kfree(wl_wrk);
1039 	kmem_cache_free(ubi_wl_entry_slab, e);
1040 
1041 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1042 	    err == -EBUSY) {
1043 		int err1;
1044 
1045 		/* Re-schedule the LEB for erasure */
1046 		err1 = schedule_erase(ubi, e, 0);
1047 		if (err1) {
1048 			err = err1;
1049 			goto out_ro;
1050 		}
1051 		return err;
1052 	} else if (err != -EIO) {
1053 		/*
1054 		 * If this is not %-EIO, we have no idea what to do. Scheduling
1055 		 * this physical eraseblock for erasure again would cause
1056 		 * errors again and again. Well, lets switch to R/O mode.
1057 		 */
1058 		goto out_ro;
1059 	}
1060 
1061 	/* It is %-EIO, the PEB went bad */
1062 
1063 	if (!ubi->bad_allowed) {
1064 		ubi_err("bad physical eraseblock %d detected", pnum);
1065 		goto out_ro;
1066 	}
1067 
1068 	spin_lock(&ubi->volumes_lock);
1069 	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1070 	if (need > 0) {
1071 		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1072 		ubi->avail_pebs -= need;
1073 		ubi->rsvd_pebs += need;
1074 		ubi->beb_rsvd_pebs += need;
1075 		if (need > 0)
1076 			ubi_msg("reserve more %d PEBs", need);
1077 	}
1078 
1079 	if (ubi->beb_rsvd_pebs == 0) {
1080 		spin_unlock(&ubi->volumes_lock);
1081 		ubi_err("no reserved physical eraseblocks");
1082 		goto out_ro;
1083 	}
1084 	spin_unlock(&ubi->volumes_lock);
1085 
1086 	ubi_msg("mark PEB %d as bad", pnum);
1087 	err = ubi_io_mark_bad(ubi, pnum);
1088 	if (err)
1089 		goto out_ro;
1090 
1091 	spin_lock(&ubi->volumes_lock);
1092 	ubi->beb_rsvd_pebs -= 1;
1093 	ubi->bad_peb_count += 1;
1094 	ubi->good_peb_count -= 1;
1095 	ubi_calculate_reserved(ubi);
1096 	if (ubi->beb_rsvd_pebs)
1097 		ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1098 	else
1099 		ubi_warn("last PEB from the reserved pool was used");
1100 	spin_unlock(&ubi->volumes_lock);
1101 
1102 	return err;
1103 
1104 out_ro:
1105 	ubi_ro_mode(ubi);
1106 	return err;
1107 }
1108 
1109 /**
1110  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1111  * @ubi: UBI device description object
1112  * @pnum: physical eraseblock to return
1113  * @torture: if this physical eraseblock has to be tortured
1114  *
1115  * This function is called to return physical eraseblock @pnum to the pool of
1116  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1117  * occurred to this @pnum and it has to be tested. This function returns zero
1118  * in case of success, and a negative error code in case of failure.
1119  */
1120 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1121 {
1122 	int err;
1123 	struct ubi_wl_entry *e;
1124 
1125 	dbg_wl("PEB %d", pnum);
1126 	ubi_assert(pnum >= 0);
1127 	ubi_assert(pnum < ubi->peb_count);
1128 
1129 retry:
1130 	spin_lock(&ubi->wl_lock);
1131 	e = ubi->lookuptbl[pnum];
1132 	if (e == ubi->move_from) {
1133 		/*
1134 		 * User is putting the physical eraseblock which was selected to
1135 		 * be moved. It will be scheduled for erasure in the
1136 		 * wear-leveling worker.
1137 		 */
1138 		dbg_wl("PEB %d is being moved, wait", pnum);
1139 		spin_unlock(&ubi->wl_lock);
1140 
1141 		/* Wait for the WL worker by taking the @ubi->move_mutex */
1142 		mutex_lock(&ubi->move_mutex);
1143 		mutex_unlock(&ubi->move_mutex);
1144 		goto retry;
1145 	} else if (e == ubi->move_to) {
1146 		/*
1147 		 * User is putting the physical eraseblock which was selected
1148 		 * as the target the data is moved to. It may happen if the EBA
1149 		 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1150 		 * but the WL sub-system has not put the PEB to the "used" tree
1151 		 * yet, but it is about to do this. So we just set a flag which
1152 		 * will tell the WL worker that the PEB is not needed anymore
1153 		 * and should be scheduled for erasure.
1154 		 */
1155 		dbg_wl("PEB %d is the target of data moving", pnum);
1156 		ubi_assert(!ubi->move_to_put);
1157 		ubi->move_to_put = 1;
1158 		spin_unlock(&ubi->wl_lock);
1159 		return 0;
1160 	} else {
1161 		if (in_wl_tree(e, &ubi->used)) {
1162 			paranoid_check_in_wl_tree(e, &ubi->used);
1163 			rb_erase(&e->u.rb, &ubi->used);
1164 		} else if (in_wl_tree(e, &ubi->scrub)) {
1165 			paranoid_check_in_wl_tree(e, &ubi->scrub);
1166 			rb_erase(&e->u.rb, &ubi->scrub);
1167 		} else if (in_wl_tree(e, &ubi->erroneous)) {
1168 			paranoid_check_in_wl_tree(e, &ubi->erroneous);
1169 			rb_erase(&e->u.rb, &ubi->erroneous);
1170 			ubi->erroneous_peb_count -= 1;
1171 			ubi_assert(ubi->erroneous_peb_count >= 0);
1172 			/* Erroneous PEBs should be tortured */
1173 			torture = 1;
1174 		} else {
1175 			err = prot_queue_del(ubi, e->pnum);
1176 			if (err) {
1177 				ubi_err("PEB %d not found", pnum);
1178 				ubi_ro_mode(ubi);
1179 				spin_unlock(&ubi->wl_lock);
1180 				return err;
1181 			}
1182 		}
1183 	}
1184 	spin_unlock(&ubi->wl_lock);
1185 
1186 	err = schedule_erase(ubi, e, torture);
1187 	if (err) {
1188 		spin_lock(&ubi->wl_lock);
1189 		wl_tree_add(e, &ubi->used);
1190 		spin_unlock(&ubi->wl_lock);
1191 	}
1192 
1193 	return err;
1194 }
1195 
1196 /**
1197  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1198  * @ubi: UBI device description object
1199  * @pnum: the physical eraseblock to schedule
1200  *
1201  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1202  * needs scrubbing. This function schedules a physical eraseblock for
1203  * scrubbing which is done in background. This function returns zero in case of
1204  * success and a negative error code in case of failure.
1205  */
1206 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1207 {
1208 	struct ubi_wl_entry *e;
1209 
1210 	dbg_msg("schedule PEB %d for scrubbing", pnum);
1211 
1212 retry:
1213 	spin_lock(&ubi->wl_lock);
1214 	e = ubi->lookuptbl[pnum];
1215 	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1216 		spin_unlock(&ubi->wl_lock);
1217 		return 0;
1218 	}
1219 
1220 	if (e == ubi->move_to) {
1221 		/*
1222 		 * This physical eraseblock was used to move data to. The data
1223 		 * was moved but the PEB was not yet inserted to the proper
1224 		 * tree. We should just wait a little and let the WL worker
1225 		 * proceed.
1226 		 */
1227 		spin_unlock(&ubi->wl_lock);
1228 		dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1229 		yield();
1230 		goto retry;
1231 	}
1232 
1233 	if (in_wl_tree(e, &ubi->used)) {
1234 		paranoid_check_in_wl_tree(e, &ubi->used);
1235 		rb_erase(&e->u.rb, &ubi->used);
1236 	} else {
1237 		int err;
1238 
1239 		err = prot_queue_del(ubi, e->pnum);
1240 		if (err) {
1241 			ubi_err("PEB %d not found", pnum);
1242 			ubi_ro_mode(ubi);
1243 			spin_unlock(&ubi->wl_lock);
1244 			return err;
1245 		}
1246 	}
1247 
1248 	wl_tree_add(e, &ubi->scrub);
1249 	spin_unlock(&ubi->wl_lock);
1250 
1251 	/*
1252 	 * Technically scrubbing is the same as wear-leveling, so it is done
1253 	 * by the WL worker.
1254 	 */
1255 	return ensure_wear_leveling(ubi);
1256 }
1257 
1258 /**
1259  * ubi_wl_flush - flush all pending works.
1260  * @ubi: UBI device description object
1261  *
1262  * This function returns zero in case of success and a negative error code in
1263  * case of failure.
1264  */
1265 int ubi_wl_flush(struct ubi_device *ubi)
1266 {
1267 	int err;
1268 
1269 	/*
1270 	 * Erase while the pending works queue is not empty, but not more than
1271 	 * the number of currently pending works.
1272 	 */
1273 	dbg_wl("flush (%d pending works)", ubi->works_count);
1274 	while (ubi->works_count) {
1275 		err = do_work(ubi);
1276 		if (err)
1277 			return err;
1278 	}
1279 
1280 	/*
1281 	 * Make sure all the works which have been done in parallel are
1282 	 * finished.
1283 	 */
1284 	down_write(&ubi->work_sem);
1285 	up_write(&ubi->work_sem);
1286 
1287 	/*
1288 	 * And in case last was the WL worker and it canceled the LEB
1289 	 * movement, flush again.
1290 	 */
1291 	while (ubi->works_count) {
1292 		dbg_wl("flush more (%d pending works)", ubi->works_count);
1293 		err = do_work(ubi);
1294 		if (err)
1295 			return err;
1296 	}
1297 
1298 	return 0;
1299 }
1300 
1301 /**
1302  * tree_destroy - destroy an RB-tree.
1303  * @root: the root of the tree to destroy
1304  */
1305 static void tree_destroy(struct rb_root *root)
1306 {
1307 	struct rb_node *rb;
1308 	struct ubi_wl_entry *e;
1309 
1310 	rb = root->rb_node;
1311 	while (rb) {
1312 		if (rb->rb_left)
1313 			rb = rb->rb_left;
1314 		else if (rb->rb_right)
1315 			rb = rb->rb_right;
1316 		else {
1317 			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1318 
1319 			rb = rb_parent(rb);
1320 			if (rb) {
1321 				if (rb->rb_left == &e->u.rb)
1322 					rb->rb_left = NULL;
1323 				else
1324 					rb->rb_right = NULL;
1325 			}
1326 
1327 			kmem_cache_free(ubi_wl_entry_slab, e);
1328 		}
1329 	}
1330 }
1331 
1332 /**
1333  * ubi_thread - UBI background thread.
1334  * @u: the UBI device description object pointer
1335  */
1336 int ubi_thread(void *u)
1337 {
1338 	int failures = 0;
1339 	struct ubi_device *ubi = u;
1340 
1341 	ubi_msg("background thread \"%s\" started, PID %d",
1342 		ubi->bgt_name, task_pid_nr(current));
1343 
1344 	set_freezable();
1345 	for (;;) {
1346 		int err;
1347 
1348 		if (kthread_should_stop())
1349 			break;
1350 
1351 		if (try_to_freeze())
1352 			continue;
1353 
1354 		spin_lock(&ubi->wl_lock);
1355 		if (list_empty(&ubi->works) || ubi->ro_mode ||
1356 			       !ubi->thread_enabled) {
1357 			set_current_state(TASK_INTERRUPTIBLE);
1358 			spin_unlock(&ubi->wl_lock);
1359 			schedule();
1360 			continue;
1361 		}
1362 		spin_unlock(&ubi->wl_lock);
1363 
1364 		err = do_work(ubi);
1365 		if (err) {
1366 			ubi_err("%s: work failed with error code %d",
1367 				ubi->bgt_name, err);
1368 			if (failures++ > WL_MAX_FAILURES) {
1369 				/*
1370 				 * Too many failures, disable the thread and
1371 				 * switch to read-only mode.
1372 				 */
1373 				ubi_msg("%s: %d consecutive failures",
1374 					ubi->bgt_name, WL_MAX_FAILURES);
1375 				ubi_ro_mode(ubi);
1376 				ubi->thread_enabled = 0;
1377 				continue;
1378 			}
1379 		} else
1380 			failures = 0;
1381 
1382 		cond_resched();
1383 	}
1384 
1385 	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1386 	return 0;
1387 }
1388 
1389 /**
1390  * cancel_pending - cancel all pending works.
1391  * @ubi: UBI device description object
1392  */
1393 static void cancel_pending(struct ubi_device *ubi)
1394 {
1395 	while (!list_empty(&ubi->works)) {
1396 		struct ubi_work *wrk;
1397 
1398 		wrk = list_entry(ubi->works.next, struct ubi_work, list);
1399 		list_del(&wrk->list);
1400 		wrk->func(ubi, wrk, 1);
1401 		ubi->works_count -= 1;
1402 		ubi_assert(ubi->works_count >= 0);
1403 	}
1404 }
1405 
1406 /**
1407  * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
1408  * @ubi: UBI device description object
1409  * @si: scanning information
1410  *
1411  * This function returns zero in case of success, and a negative error code in
1412  * case of failure.
1413  */
1414 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1415 {
1416 	int err, i;
1417 	struct rb_node *rb1, *rb2;
1418 	struct ubi_scan_volume *sv;
1419 	struct ubi_scan_leb *seb, *tmp;
1420 	struct ubi_wl_entry *e;
1421 
1422 	ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1423 	spin_lock_init(&ubi->wl_lock);
1424 	mutex_init(&ubi->move_mutex);
1425 	init_rwsem(&ubi->work_sem);
1426 	ubi->max_ec = si->max_ec;
1427 	INIT_LIST_HEAD(&ubi->works);
1428 
1429 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1430 
1431 	err = -ENOMEM;
1432 	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1433 	if (!ubi->lookuptbl)
1434 		return err;
1435 
1436 	for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1437 		INIT_LIST_HEAD(&ubi->pq[i]);
1438 	ubi->pq_head = 0;
1439 
1440 	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1441 		cond_resched();
1442 
1443 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1444 		if (!e)
1445 			goto out_free;
1446 
1447 		e->pnum = seb->pnum;
1448 		e->ec = seb->ec;
1449 		ubi->lookuptbl[e->pnum] = e;
1450 		if (schedule_erase(ubi, e, 0)) {
1451 			kmem_cache_free(ubi_wl_entry_slab, e);
1452 			goto out_free;
1453 		}
1454 	}
1455 
1456 	list_for_each_entry(seb, &si->free, u.list) {
1457 		cond_resched();
1458 
1459 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1460 		if (!e)
1461 			goto out_free;
1462 
1463 		e->pnum = seb->pnum;
1464 		e->ec = seb->ec;
1465 		ubi_assert(e->ec >= 0);
1466 		wl_tree_add(e, &ubi->free);
1467 		ubi->lookuptbl[e->pnum] = e;
1468 	}
1469 
1470 	list_for_each_entry(seb, &si->corr, u.list) {
1471 		cond_resched();
1472 
1473 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1474 		if (!e)
1475 			goto out_free;
1476 
1477 		e->pnum = seb->pnum;
1478 		e->ec = seb->ec;
1479 		ubi->lookuptbl[e->pnum] = e;
1480 		if (schedule_erase(ubi, e, 0)) {
1481 			kmem_cache_free(ubi_wl_entry_slab, e);
1482 			goto out_free;
1483 		}
1484 	}
1485 
1486 	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1487 		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1488 			cond_resched();
1489 
1490 			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1491 			if (!e)
1492 				goto out_free;
1493 
1494 			e->pnum = seb->pnum;
1495 			e->ec = seb->ec;
1496 			ubi->lookuptbl[e->pnum] = e;
1497 			if (!seb->scrub) {
1498 				dbg_wl("add PEB %d EC %d to the used tree",
1499 				       e->pnum, e->ec);
1500 				wl_tree_add(e, &ubi->used);
1501 			} else {
1502 				dbg_wl("add PEB %d EC %d to the scrub tree",
1503 				       e->pnum, e->ec);
1504 				wl_tree_add(e, &ubi->scrub);
1505 			}
1506 		}
1507 	}
1508 
1509 	if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1510 		ubi_err("no enough physical eraseblocks (%d, need %d)",
1511 			ubi->avail_pebs, WL_RESERVED_PEBS);
1512 		goto out_free;
1513 	}
1514 	ubi->avail_pebs -= WL_RESERVED_PEBS;
1515 	ubi->rsvd_pebs += WL_RESERVED_PEBS;
1516 
1517 	/* Schedule wear-leveling if needed */
1518 	err = ensure_wear_leveling(ubi);
1519 	if (err)
1520 		goto out_free;
1521 
1522 	return 0;
1523 
1524 out_free:
1525 	cancel_pending(ubi);
1526 	tree_destroy(&ubi->used);
1527 	tree_destroy(&ubi->free);
1528 	tree_destroy(&ubi->scrub);
1529 	kfree(ubi->lookuptbl);
1530 	return err;
1531 }
1532 
1533 /**
1534  * protection_queue_destroy - destroy the protection queue.
1535  * @ubi: UBI device description object
1536  */
1537 static void protection_queue_destroy(struct ubi_device *ubi)
1538 {
1539 	int i;
1540 	struct ubi_wl_entry *e, *tmp;
1541 
1542 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1543 		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1544 			list_del(&e->u.list);
1545 			kmem_cache_free(ubi_wl_entry_slab, e);
1546 		}
1547 	}
1548 }
1549 
1550 /**
1551  * ubi_wl_close - close the wear-leveling sub-system.
1552  * @ubi: UBI device description object
1553  */
1554 void ubi_wl_close(struct ubi_device *ubi)
1555 {
1556 	dbg_wl("close the WL sub-system");
1557 	cancel_pending(ubi);
1558 	protection_queue_destroy(ubi);
1559 	tree_destroy(&ubi->used);
1560 	tree_destroy(&ubi->erroneous);
1561 	tree_destroy(&ubi->free);
1562 	tree_destroy(&ubi->scrub);
1563 	kfree(ubi->lookuptbl);
1564 }
1565 
1566 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1567 
1568 /**
1569  * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
1570  * @ubi: UBI device description object
1571  * @pnum: the physical eraseblock number to check
1572  * @ec: the erase counter to check
1573  *
1574  * This function returns zero if the erase counter of physical eraseblock @pnum
1575  * is equivalent to @ec, and a negative error code if not or if an error occurred.
1576  */
1577 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1578 {
1579 	int err;
1580 	long long read_ec;
1581 	struct ubi_ec_hdr *ec_hdr;
1582 
1583 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1584 	if (!ec_hdr)
1585 		return -ENOMEM;
1586 
1587 	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1588 	if (err && err != UBI_IO_BITFLIPS) {
1589 		/* The header does not have to exist */
1590 		err = 0;
1591 		goto out_free;
1592 	}
1593 
1594 	read_ec = be64_to_cpu(ec_hdr->ec);
1595 	if (ec != read_ec) {
1596 		ubi_err("paranoid check failed for PEB %d", pnum);
1597 		ubi_err("read EC is %lld, should be %d", read_ec, ec);
1598 		ubi_dbg_dump_stack();
1599 		err = 1;
1600 	} else
1601 		err = 0;
1602 
1603 out_free:
1604 	kfree(ec_hdr);
1605 	return err;
1606 }
1607 
1608 /**
1609  * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1610  * @e: the wear-leveling entry to check
1611  * @root: the root of the tree
1612  *
1613  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1614  * is not.
1615  */
1616 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1617 				     struct rb_root *root)
1618 {
1619 	if (in_wl_tree(e, root))
1620 		return 0;
1621 
1622 	ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1623 		e->pnum, e->ec, root);
1624 	ubi_dbg_dump_stack();
1625 	return -EINVAL;
1626 }
1627 
1628 /**
1629  * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1630  *                        queue.
1631  * @ubi: UBI device description object
1632  * @e: the wear-leveling entry to check
1633  *
1634  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1635  */
1636 static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1637 {
1638 	struct ubi_wl_entry *p;
1639 	int i;
1640 
1641 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1642 		list_for_each_entry(p, &ubi->pq[i], u.list)
1643 			if (p == e)
1644 				return 0;
1645 
1646 	ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1647 		e->pnum, e->ec);
1648 	ubi_dbg_dump_stack();
1649 	return -EINVAL;
1650 }
1651 #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
1652