1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
7
8 #include <linux/crc32.h>
9 #include <linux/bitmap.h>
10 #include "ubi.h"
11
12 /**
13 * init_seen - allocate memory for used for debugging.
14 * @ubi: UBI device description object
15 */
init_seen(struct ubi_device * ubi)16 static inline unsigned long *init_seen(struct ubi_device *ubi)
17 {
18 unsigned long *ret;
19
20 if (!ubi_dbg_chk_fastmap(ubi))
21 return NULL;
22
23 ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS);
24 if (!ret)
25 return ERR_PTR(-ENOMEM);
26
27 return ret;
28 }
29
30 /**
31 * free_seen - free the seen logic integer array.
32 * @seen: integer array of @ubi->peb_count size
33 */
free_seen(unsigned long * seen)34 static inline void free_seen(unsigned long *seen)
35 {
36 bitmap_free(seen);
37 }
38
39 /**
40 * set_seen - mark a PEB as seen.
41 * @ubi: UBI device description object
42 * @pnum: The PEB to be makred as seen
43 * @seen: integer array of @ubi->peb_count size
44 */
set_seen(struct ubi_device * ubi,int pnum,unsigned long * seen)45 static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
46 {
47 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
48 return;
49
50 set_bit(pnum, seen);
51 }
52
53 /**
54 * self_check_seen - check whether all PEB have been seen by fastmap.
55 * @ubi: UBI device description object
56 * @seen: integer array of @ubi->peb_count size
57 */
self_check_seen(struct ubi_device * ubi,unsigned long * seen)58 static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
59 {
60 int pnum, ret = 0;
61
62 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
63 return 0;
64
65 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
66 if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
67 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
68 ret = -EINVAL;
69 }
70 }
71
72 return ret;
73 }
74
75 /**
76 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
77 * @ubi: UBI device description object
78 */
ubi_calc_fm_size(struct ubi_device * ubi)79 size_t ubi_calc_fm_size(struct ubi_device *ubi)
80 {
81 size_t size;
82
83 size = sizeof(struct ubi_fm_sb) +
84 sizeof(struct ubi_fm_hdr) +
85 sizeof(struct ubi_fm_scan_pool) +
86 sizeof(struct ubi_fm_scan_pool) +
87 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
88 ((sizeof(struct ubi_fm_eba) +
89 sizeof(struct ubi_fm_volhdr)) *
90 (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
91 (ubi->peb_count * sizeof(__be32));
92 return roundup(size, ubi->leb_size);
93 }
94
95
96 /**
97 * new_fm_vbuf() - allocate a new volume header for fastmap usage.
98 * @ubi: UBI device description object
99 * @vol_id: the VID of the new header
100 *
101 * Returns a new struct ubi_vid_hdr on success.
102 * NULL indicates out of memory.
103 */
new_fm_vbuf(struct ubi_device * ubi,int vol_id)104 static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
105 {
106 struct ubi_vid_io_buf *new;
107 struct ubi_vid_hdr *vh;
108
109 new = ubi_alloc_vid_buf(ubi, GFP_NOFS);
110 if (!new)
111 goto out;
112
113 vh = ubi_get_vid_hdr(new);
114 vh->vol_type = UBI_VID_DYNAMIC;
115 vh->vol_id = cpu_to_be32(vol_id);
116
117 /* UBI implementations without fastmap support have to delete the
118 * fastmap.
119 */
120 vh->compat = UBI_COMPAT_DELETE;
121
122 out:
123 return new;
124 }
125
126 /**
127 * add_aeb - create and add a attach erase block to a given list.
128 * @ai: UBI attach info object
129 * @list: the target list
130 * @pnum: PEB number of the new attach erase block
131 * @ec: erease counter of the new LEB
132 * @scrub: scrub this PEB after attaching
133 *
134 * Returns 0 on success, < 0 indicates an internal error.
135 */
add_aeb(struct ubi_attach_info * ai,struct list_head * list,int pnum,int ec,int scrub)136 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
137 int pnum, int ec, int scrub)
138 {
139 struct ubi_ainf_peb *aeb;
140
141 aeb = ubi_alloc_aeb(ai, pnum, ec);
142 if (!aeb)
143 return -ENOMEM;
144
145 aeb->lnum = -1;
146 aeb->scrub = scrub;
147 aeb->copy_flag = aeb->sqnum = 0;
148
149 ai->ec_sum += aeb->ec;
150 ai->ec_count++;
151
152 if (ai->max_ec < aeb->ec)
153 ai->max_ec = aeb->ec;
154
155 if (ai->min_ec > aeb->ec)
156 ai->min_ec = aeb->ec;
157
158 list_add_tail(&aeb->u.list, list);
159
160 return 0;
161 }
162
163 /**
164 * add_vol - create and add a new volume to ubi_attach_info.
165 * @ai: ubi_attach_info object
166 * @vol_id: VID of the new volume
167 * @used_ebs: number of used EBS
168 * @data_pad: data padding value of the new volume
169 * @vol_type: volume type
170 * @last_eb_bytes: number of bytes in the last LEB
171 *
172 * Returns the new struct ubi_ainf_volume on success.
173 * NULL indicates an error.
174 */
add_vol(struct ubi_attach_info * ai,int vol_id,int used_ebs,int data_pad,u8 vol_type,int last_eb_bytes)175 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
176 int used_ebs, int data_pad, u8 vol_type,
177 int last_eb_bytes)
178 {
179 struct ubi_ainf_volume *av;
180
181 av = ubi_add_av(ai, vol_id);
182 if (IS_ERR(av))
183 return av;
184
185 av->data_pad = data_pad;
186 av->last_data_size = last_eb_bytes;
187 av->compat = 0;
188 av->vol_type = vol_type;
189 if (av->vol_type == UBI_STATIC_VOLUME)
190 av->used_ebs = used_ebs;
191
192 dbg_bld("found volume (ID %i)", vol_id);
193 return av;
194 }
195
196 /**
197 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
198 * from it's original list.
199 * @ai: ubi_attach_info object
200 * @aeb: the to be assigned SEB
201 * @av: target scan volume
202 */
assign_aeb_to_av(struct ubi_attach_info * ai,struct ubi_ainf_peb * aeb,struct ubi_ainf_volume * av)203 static void assign_aeb_to_av(struct ubi_attach_info *ai,
204 struct ubi_ainf_peb *aeb,
205 struct ubi_ainf_volume *av)
206 {
207 struct ubi_ainf_peb *tmp_aeb;
208 struct rb_node **p = &av->root.rb_node, *parent = NULL;
209
210 while (*p) {
211 parent = *p;
212
213 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
214 if (aeb->lnum != tmp_aeb->lnum) {
215 if (aeb->lnum < tmp_aeb->lnum)
216 p = &(*p)->rb_left;
217 else
218 p = &(*p)->rb_right;
219
220 continue;
221 } else
222 break;
223 }
224
225 list_del(&aeb->u.list);
226 av->leb_count++;
227
228 rb_link_node(&aeb->u.rb, parent, p);
229 rb_insert_color(&aeb->u.rb, &av->root);
230 }
231
232 /**
233 * update_vol - inserts or updates a LEB which was found a pool.
234 * @ubi: the UBI device object
235 * @ai: attach info object
236 * @av: the volume this LEB belongs to
237 * @new_vh: the volume header derived from new_aeb
238 * @new_aeb: the AEB to be examined
239 *
240 * Returns 0 on success, < 0 indicates an internal error.
241 */
update_vol(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_ainf_volume * av,struct ubi_vid_hdr * new_vh,struct ubi_ainf_peb * new_aeb)242 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
243 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
244 struct ubi_ainf_peb *new_aeb)
245 {
246 struct rb_node **p = &av->root.rb_node, *parent = NULL;
247 struct ubi_ainf_peb *aeb, *victim;
248 int cmp_res;
249
250 while (*p) {
251 parent = *p;
252 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
253
254 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
255 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
256 p = &(*p)->rb_left;
257 else
258 p = &(*p)->rb_right;
259
260 continue;
261 }
262
263 /* This case can happen if the fastmap gets written
264 * because of a volume change (creation, deletion, ..).
265 * Then a PEB can be within the persistent EBA and the pool.
266 */
267 if (aeb->pnum == new_aeb->pnum) {
268 ubi_assert(aeb->lnum == new_aeb->lnum);
269 ubi_free_aeb(ai, new_aeb);
270
271 return 0;
272 }
273
274 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
275 if (cmp_res < 0)
276 return cmp_res;
277
278 /* new_aeb is newer */
279 if (cmp_res & 1) {
280 victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
281 if (!victim)
282 return -ENOMEM;
283
284 list_add_tail(&victim->u.list, &ai->erase);
285
286 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
287 av->last_data_size =
288 be32_to_cpu(new_vh->data_size);
289
290 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
291 av->vol_id, aeb->lnum, new_aeb->pnum);
292
293 aeb->ec = new_aeb->ec;
294 aeb->pnum = new_aeb->pnum;
295 aeb->copy_flag = new_vh->copy_flag;
296 aeb->scrub = new_aeb->scrub;
297 aeb->sqnum = new_aeb->sqnum;
298 ubi_free_aeb(ai, new_aeb);
299
300 /* new_aeb is older */
301 } else {
302 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
303 av->vol_id, aeb->lnum, new_aeb->pnum);
304 list_add_tail(&new_aeb->u.list, &ai->erase);
305 }
306
307 return 0;
308 }
309 /* This LEB is new, let's add it to the volume */
310
311 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
312 av->highest_lnum = be32_to_cpu(new_vh->lnum);
313 av->last_data_size = be32_to_cpu(new_vh->data_size);
314 }
315
316 if (av->vol_type == UBI_STATIC_VOLUME)
317 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
318
319 av->leb_count++;
320
321 rb_link_node(&new_aeb->u.rb, parent, p);
322 rb_insert_color(&new_aeb->u.rb, &av->root);
323
324 return 0;
325 }
326
327 /**
328 * process_pool_aeb - we found a non-empty PEB in a pool.
329 * @ubi: UBI device object
330 * @ai: attach info object
331 * @new_vh: the volume header derived from new_aeb
332 * @new_aeb: the AEB to be examined
333 *
334 * Returns 0 on success, < 0 indicates an internal error.
335 */
process_pool_aeb(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_vid_hdr * new_vh,struct ubi_ainf_peb * new_aeb)336 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
337 struct ubi_vid_hdr *new_vh,
338 struct ubi_ainf_peb *new_aeb)
339 {
340 int vol_id = be32_to_cpu(new_vh->vol_id);
341 struct ubi_ainf_volume *av;
342
343 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
344 ubi_free_aeb(ai, new_aeb);
345
346 return 0;
347 }
348
349 /* Find the volume this SEB belongs to */
350 av = ubi_find_av(ai, vol_id);
351 if (!av) {
352 ubi_err(ubi, "orphaned volume in fastmap pool!");
353 ubi_free_aeb(ai, new_aeb);
354 return UBI_BAD_FASTMAP;
355 }
356
357 ubi_assert(vol_id == av->vol_id);
358
359 return update_vol(ubi, ai, av, new_vh, new_aeb);
360 }
361
362 /**
363 * unmap_peb - unmap a PEB.
364 * If fastmap detects a free PEB in the pool it has to check whether
365 * this PEB has been unmapped after writing the fastmap.
366 *
367 * @ai: UBI attach info object
368 * @pnum: The PEB to be unmapped
369 */
unmap_peb(struct ubi_attach_info * ai,int pnum)370 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
371 {
372 struct ubi_ainf_volume *av;
373 struct rb_node *node, *node2;
374 struct ubi_ainf_peb *aeb;
375
376 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
377 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
378 if (aeb->pnum == pnum) {
379 rb_erase(&aeb->u.rb, &av->root);
380 av->leb_count--;
381 ubi_free_aeb(ai, aeb);
382 return;
383 }
384 }
385 }
386 }
387
388 /**
389 * scan_pool - scans a pool for changed (no longer empty PEBs).
390 * @ubi: UBI device object
391 * @ai: attach info object
392 * @pebs: an array of all PEB numbers in the to be scanned pool
393 * @pool_size: size of the pool (number of entries in @pebs)
394 * @max_sqnum: pointer to the maximal sequence number
395 * @free: list of PEBs which are most likely free (and go into @ai->free)
396 *
397 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
398 * < 0 indicates an internal error.
399 */
scan_pool(struct ubi_device * ubi,struct ubi_attach_info * ai,__be32 * pebs,int pool_size,unsigned long long * max_sqnum,struct list_head * free)400 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
401 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
402 struct list_head *free)
403 {
404 struct ubi_vid_io_buf *vb;
405 struct ubi_vid_hdr *vh;
406 struct ubi_ec_hdr *ech;
407 struct ubi_ainf_peb *new_aeb;
408 int i, pnum, err, ret = 0;
409
410 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
411 if (!ech)
412 return -ENOMEM;
413
414 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
415 if (!vb) {
416 kfree(ech);
417 return -ENOMEM;
418 }
419
420 vh = ubi_get_vid_hdr(vb);
421
422 dbg_bld("scanning fastmap pool: size = %i", pool_size);
423
424 /*
425 * Now scan all PEBs in the pool to find changes which have been made
426 * after the creation of the fastmap
427 */
428 for (i = 0; i < pool_size; i++) {
429 int scrub = 0;
430 int image_seq;
431
432 pnum = be32_to_cpu(pebs[i]);
433
434 if (ubi_io_is_bad(ubi, pnum)) {
435 ubi_err(ubi, "bad PEB in fastmap pool!");
436 ret = UBI_BAD_FASTMAP;
437 goto out;
438 }
439
440 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
441 if (err && err != UBI_IO_BITFLIPS) {
442 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
443 pnum, err);
444 ret = err > 0 ? UBI_BAD_FASTMAP : err;
445 goto out;
446 } else if (err == UBI_IO_BITFLIPS)
447 scrub = 1;
448
449 /*
450 * Older UBI implementations have image_seq set to zero, so
451 * we shouldn't fail if image_seq == 0.
452 */
453 image_seq = be32_to_cpu(ech->image_seq);
454
455 if (image_seq && (image_seq != ubi->image_seq)) {
456 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
457 be32_to_cpu(ech->image_seq), ubi->image_seq);
458 ret = UBI_BAD_FASTMAP;
459 goto out;
460 }
461
462 err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
463 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
464 unsigned long long ec = be64_to_cpu(ech->ec);
465 unmap_peb(ai, pnum);
466 dbg_bld("Adding PEB to free: %i", pnum);
467
468 if (err == UBI_IO_FF_BITFLIPS)
469 scrub = 1;
470
471 ret = add_aeb(ai, free, pnum, ec, scrub);
472 if (ret)
473 goto out;
474 continue;
475 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
476 dbg_bld("Found non empty PEB:%i in pool", pnum);
477
478 if (err == UBI_IO_BITFLIPS)
479 scrub = 1;
480
481 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
482 if (!new_aeb) {
483 ret = -ENOMEM;
484 goto out;
485 }
486
487 new_aeb->lnum = be32_to_cpu(vh->lnum);
488 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
489 new_aeb->copy_flag = vh->copy_flag;
490 new_aeb->scrub = scrub;
491
492 if (*max_sqnum < new_aeb->sqnum)
493 *max_sqnum = new_aeb->sqnum;
494
495 err = process_pool_aeb(ubi, ai, vh, new_aeb);
496 if (err) {
497 ret = err > 0 ? UBI_BAD_FASTMAP : err;
498 goto out;
499 }
500 } else {
501 /* We are paranoid and fall back to scanning mode */
502 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
503 ret = err > 0 ? UBI_BAD_FASTMAP : err;
504 goto out;
505 }
506
507 }
508
509 out:
510 ubi_free_vid_buf(vb);
511 kfree(ech);
512 return ret;
513 }
514
515 /**
516 * count_fastmap_pebs - Counts the PEBs found by fastmap.
517 * @ai: The UBI attach info object
518 */
count_fastmap_pebs(struct ubi_attach_info * ai)519 static int count_fastmap_pebs(struct ubi_attach_info *ai)
520 {
521 struct ubi_ainf_peb *aeb;
522 struct ubi_ainf_volume *av;
523 struct rb_node *rb1, *rb2;
524 int n = 0;
525
526 list_for_each_entry(aeb, &ai->erase, u.list)
527 n++;
528
529 list_for_each_entry(aeb, &ai->free, u.list)
530 n++;
531
532 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
533 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
534 n++;
535
536 return n;
537 }
538
539 /**
540 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
541 * @ubi: UBI device object
542 * @ai: UBI attach info object
543 * @fm: the fastmap to be attached
544 *
545 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
546 * < 0 indicates an internal error.
547 */
ubi_attach_fastmap(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_fastmap_layout * fm)548 static int ubi_attach_fastmap(struct ubi_device *ubi,
549 struct ubi_attach_info *ai,
550 struct ubi_fastmap_layout *fm)
551 {
552 struct list_head used, free;
553 struct ubi_ainf_volume *av;
554 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
555 struct ubi_fm_sb *fmsb;
556 struct ubi_fm_hdr *fmhdr;
557 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
558 struct ubi_fm_ec *fmec;
559 struct ubi_fm_volhdr *fmvhdr;
560 struct ubi_fm_eba *fm_eba;
561 int ret, i, j, pool_size, wl_pool_size;
562 size_t fm_pos = 0, fm_size = ubi->fm_size;
563 unsigned long long max_sqnum = 0;
564 void *fm_raw = ubi->fm_buf;
565
566 INIT_LIST_HEAD(&used);
567 INIT_LIST_HEAD(&free);
568 ai->min_ec = UBI_MAX_ERASECOUNTER;
569
570 fmsb = (struct ubi_fm_sb *)(fm_raw);
571 ai->max_sqnum = fmsb->sqnum;
572 fm_pos += sizeof(struct ubi_fm_sb);
573 if (fm_pos >= fm_size)
574 goto fail_bad;
575
576 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
577 fm_pos += sizeof(*fmhdr);
578 if (fm_pos >= fm_size)
579 goto fail_bad;
580
581 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
582 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
583 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
584 goto fail_bad;
585 }
586
587 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
588 fm_pos += sizeof(*fmpl);
589 if (fm_pos >= fm_size)
590 goto fail_bad;
591 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
592 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
593 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
594 goto fail_bad;
595 }
596
597 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
598 fm_pos += sizeof(*fmpl_wl);
599 if (fm_pos >= fm_size)
600 goto fail_bad;
601 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
602 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
603 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
604 goto fail_bad;
605 }
606
607 pool_size = be16_to_cpu(fmpl->size);
608 wl_pool_size = be16_to_cpu(fmpl_wl->size);
609 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
610 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
611
612 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
613 ubi_err(ubi, "bad pool size: %i", pool_size);
614 goto fail_bad;
615 }
616
617 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
618 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
619 goto fail_bad;
620 }
621
622
623 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
624 fm->max_pool_size < 0) {
625 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
626 goto fail_bad;
627 }
628
629 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
630 fm->max_wl_pool_size < 0) {
631 ubi_err(ubi, "bad maximal WL pool size: %i",
632 fm->max_wl_pool_size);
633 goto fail_bad;
634 }
635
636 /* read EC values from free list */
637 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
638 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
639 fm_pos += sizeof(*fmec);
640 if (fm_pos >= fm_size)
641 goto fail_bad;
642
643 ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
644 be32_to_cpu(fmec->ec), 0);
645 if (ret)
646 goto fail;
647 }
648
649 /* read EC values from used list */
650 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
651 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
652 fm_pos += sizeof(*fmec);
653 if (fm_pos >= fm_size)
654 goto fail_bad;
655
656 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
657 be32_to_cpu(fmec->ec), 0);
658 if (ret)
659 goto fail;
660 }
661
662 /* read EC values from scrub list */
663 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
664 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
665 fm_pos += sizeof(*fmec);
666 if (fm_pos >= fm_size)
667 goto fail_bad;
668
669 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
670 be32_to_cpu(fmec->ec), 1);
671 if (ret)
672 goto fail;
673 }
674
675 /* read EC values from erase list */
676 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
677 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
678 fm_pos += sizeof(*fmec);
679 if (fm_pos >= fm_size)
680 goto fail_bad;
681
682 ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
683 be32_to_cpu(fmec->ec), 1);
684 if (ret)
685 goto fail;
686 }
687
688 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
689 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
690
691 /* Iterate over all volumes and read their EBA table */
692 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
693 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
694 fm_pos += sizeof(*fmvhdr);
695 if (fm_pos >= fm_size)
696 goto fail_bad;
697
698 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
699 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
700 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
701 goto fail_bad;
702 }
703
704 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
705 be32_to_cpu(fmvhdr->used_ebs),
706 be32_to_cpu(fmvhdr->data_pad),
707 fmvhdr->vol_type,
708 be32_to_cpu(fmvhdr->last_eb_bytes));
709
710 if (IS_ERR(av)) {
711 if (PTR_ERR(av) == -EEXIST)
712 ubi_err(ubi, "volume (ID %i) already exists",
713 fmvhdr->vol_id);
714
715 goto fail_bad;
716 }
717
718 ai->vols_found++;
719 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
720 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
721
722 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
723 fm_pos += sizeof(*fm_eba);
724 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
725 if (fm_pos >= fm_size)
726 goto fail_bad;
727
728 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
729 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
730 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
731 goto fail_bad;
732 }
733
734 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
735 int pnum = be32_to_cpu(fm_eba->pnum[j]);
736
737 if (pnum < 0)
738 continue;
739
740 aeb = NULL;
741 list_for_each_entry(tmp_aeb, &used, u.list) {
742 if (tmp_aeb->pnum == pnum) {
743 aeb = tmp_aeb;
744 break;
745 }
746 }
747
748 if (!aeb) {
749 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
750 goto fail_bad;
751 }
752
753 aeb->lnum = j;
754
755 if (av->highest_lnum <= aeb->lnum)
756 av->highest_lnum = aeb->lnum;
757
758 assign_aeb_to_av(ai, aeb, av);
759
760 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
761 aeb->pnum, aeb->lnum, av->vol_id);
762 }
763 }
764
765 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
766 if (ret)
767 goto fail;
768
769 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
770 if (ret)
771 goto fail;
772
773 if (max_sqnum > ai->max_sqnum)
774 ai->max_sqnum = max_sqnum;
775
776 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
777 list_move_tail(&tmp_aeb->u.list, &ai->free);
778
779 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
780 list_move_tail(&tmp_aeb->u.list, &ai->erase);
781
782 ubi_assert(list_empty(&free));
783
784 /*
785 * If fastmap is leaking PEBs (must not happen), raise a
786 * fat warning and fall back to scanning mode.
787 * We do this here because in ubi_wl_init() it's too late
788 * and we cannot fall back to scanning.
789 */
790 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
791 ai->bad_peb_count - fm->used_blocks))
792 goto fail_bad;
793
794 return 0;
795
796 fail_bad:
797 ret = UBI_BAD_FASTMAP;
798 fail:
799 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
800 list_del(&tmp_aeb->u.list);
801 ubi_free_aeb(ai, tmp_aeb);
802 }
803 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
804 list_del(&tmp_aeb->u.list);
805 ubi_free_aeb(ai, tmp_aeb);
806 }
807
808 return ret;
809 }
810
811 /**
812 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
813 * @ai: UBI attach info to be filled
814 */
find_fm_anchor(struct ubi_attach_info * ai)815 static int find_fm_anchor(struct ubi_attach_info *ai)
816 {
817 int ret = -1;
818 struct ubi_ainf_peb *aeb;
819 unsigned long long max_sqnum = 0;
820
821 list_for_each_entry(aeb, &ai->fastmap, u.list) {
822 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
823 max_sqnum = aeb->sqnum;
824 ret = aeb->pnum;
825 }
826 }
827
828 return ret;
829 }
830
clone_aeb(struct ubi_attach_info * ai,struct ubi_ainf_peb * old)831 static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
832 struct ubi_ainf_peb *old)
833 {
834 struct ubi_ainf_peb *new;
835
836 new = ubi_alloc_aeb(ai, old->pnum, old->ec);
837 if (!new)
838 return NULL;
839
840 new->vol_id = old->vol_id;
841 new->sqnum = old->sqnum;
842 new->lnum = old->lnum;
843 new->scrub = old->scrub;
844 new->copy_flag = old->copy_flag;
845
846 return new;
847 }
848
849 /**
850 * ubi_scan_fastmap - scan the fastmap.
851 * @ubi: UBI device object
852 * @ai: UBI attach info to be filled
853 * @scan_ai: UBI attach info from the first 64 PEBs,
854 * used to find the most recent Fastmap data structure
855 *
856 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
857 * UBI_BAD_FASTMAP if one was found but is not usable.
858 * < 0 indicates an internal error.
859 */
ubi_scan_fastmap(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_attach_info * scan_ai)860 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
861 struct ubi_attach_info *scan_ai)
862 {
863 struct ubi_fm_sb *fmsb, *fmsb2;
864 struct ubi_vid_io_buf *vb;
865 struct ubi_vid_hdr *vh;
866 struct ubi_ec_hdr *ech;
867 struct ubi_fastmap_layout *fm;
868 struct ubi_ainf_peb *aeb;
869 int i, used_blocks, pnum, fm_anchor, ret = 0;
870 size_t fm_size;
871 __be32 crc, tmp_crc;
872 unsigned long long sqnum = 0;
873
874 fm_anchor = find_fm_anchor(scan_ai);
875 if (fm_anchor < 0)
876 return UBI_NO_FASTMAP;
877
878 /* Copy all (possible) fastmap blocks into our new attach structure. */
879 list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
880 struct ubi_ainf_peb *new;
881
882 new = clone_aeb(ai, aeb);
883 if (!new)
884 return -ENOMEM;
885
886 list_add(&new->u.list, &ai->fastmap);
887 }
888
889 down_write(&ubi->fm_protect);
890 memset(ubi->fm_buf, 0, ubi->fm_size);
891
892 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
893 if (!fmsb) {
894 ret = -ENOMEM;
895 goto out;
896 }
897
898 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
899 if (!fm) {
900 ret = -ENOMEM;
901 kfree(fmsb);
902 goto out;
903 }
904
905 ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
906 if (ret && ret != UBI_IO_BITFLIPS)
907 goto free_fm_sb;
908 else if (ret == UBI_IO_BITFLIPS)
909 fm->to_be_tortured[0] = 1;
910
911 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
912 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
913 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
914 ret = UBI_BAD_FASTMAP;
915 goto free_fm_sb;
916 }
917
918 if (fmsb->version != UBI_FM_FMT_VERSION) {
919 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
920 fmsb->version, UBI_FM_FMT_VERSION);
921 ret = UBI_BAD_FASTMAP;
922 goto free_fm_sb;
923 }
924
925 used_blocks = be32_to_cpu(fmsb->used_blocks);
926 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
927 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
928 used_blocks);
929 ret = UBI_BAD_FASTMAP;
930 goto free_fm_sb;
931 }
932
933 fm_size = ubi->leb_size * used_blocks;
934 if (fm_size != ubi->fm_size) {
935 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
936 fm_size, ubi->fm_size);
937 ret = UBI_BAD_FASTMAP;
938 goto free_fm_sb;
939 }
940
941 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
942 if (!ech) {
943 ret = -ENOMEM;
944 goto free_fm_sb;
945 }
946
947 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
948 if (!vb) {
949 ret = -ENOMEM;
950 goto free_hdr;
951 }
952
953 vh = ubi_get_vid_hdr(vb);
954
955 for (i = 0; i < used_blocks; i++) {
956 int image_seq;
957
958 pnum = be32_to_cpu(fmsb->block_loc[i]);
959
960 if (ubi_io_is_bad(ubi, pnum)) {
961 ret = UBI_BAD_FASTMAP;
962 goto free_hdr;
963 }
964
965 if (i == 0 && pnum != fm_anchor) {
966 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
967 pnum, fm_anchor);
968 ret = UBI_BAD_FASTMAP;
969 goto free_hdr;
970 }
971
972 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
973 if (ret && ret != UBI_IO_BITFLIPS) {
974 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
975 i, pnum);
976 if (ret > 0)
977 ret = UBI_BAD_FASTMAP;
978 goto free_hdr;
979 } else if (ret == UBI_IO_BITFLIPS)
980 fm->to_be_tortured[i] = 1;
981
982 image_seq = be32_to_cpu(ech->image_seq);
983 if (!ubi->image_seq)
984 ubi->image_seq = image_seq;
985
986 /*
987 * Older UBI implementations have image_seq set to zero, so
988 * we shouldn't fail if image_seq == 0.
989 */
990 if (image_seq && (image_seq != ubi->image_seq)) {
991 ubi_err(ubi, "wrong image seq:%d instead of %d",
992 be32_to_cpu(ech->image_seq), ubi->image_seq);
993 ret = UBI_BAD_FASTMAP;
994 goto free_hdr;
995 }
996
997 ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
998 if (ret && ret != UBI_IO_BITFLIPS) {
999 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
1000 i, pnum);
1001 goto free_hdr;
1002 }
1003
1004 if (i == 0) {
1005 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1006 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1007 be32_to_cpu(vh->vol_id),
1008 UBI_FM_SB_VOLUME_ID);
1009 ret = UBI_BAD_FASTMAP;
1010 goto free_hdr;
1011 }
1012 } else {
1013 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1014 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1015 be32_to_cpu(vh->vol_id),
1016 UBI_FM_DATA_VOLUME_ID);
1017 ret = UBI_BAD_FASTMAP;
1018 goto free_hdr;
1019 }
1020 }
1021
1022 if (sqnum < be64_to_cpu(vh->sqnum))
1023 sqnum = be64_to_cpu(vh->sqnum);
1024
1025 ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1026 pnum, 0, ubi->leb_size);
1027 if (ret && ret != UBI_IO_BITFLIPS) {
1028 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1029 "err: %i)", i, pnum, ret);
1030 goto free_hdr;
1031 }
1032 }
1033
1034 kfree(fmsb);
1035 fmsb = NULL;
1036
1037 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1038 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1039 fmsb2->data_crc = 0;
1040 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1041 if (crc != tmp_crc) {
1042 ubi_err(ubi, "fastmap data CRC is invalid");
1043 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1044 tmp_crc, crc);
1045 ret = UBI_BAD_FASTMAP;
1046 goto free_hdr;
1047 }
1048
1049 fmsb2->sqnum = sqnum;
1050
1051 fm->used_blocks = used_blocks;
1052
1053 ret = ubi_attach_fastmap(ubi, ai, fm);
1054 if (ret) {
1055 if (ret > 0)
1056 ret = UBI_BAD_FASTMAP;
1057 goto free_hdr;
1058 }
1059
1060 for (i = 0; i < used_blocks; i++) {
1061 struct ubi_wl_entry *e;
1062
1063 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1064 if (!e) {
1065 while (i--)
1066 kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1067
1068 ret = -ENOMEM;
1069 goto free_hdr;
1070 }
1071
1072 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1073 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1074 fm->e[i] = e;
1075 }
1076
1077 ubi->fm = fm;
1078 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1079 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1080 ubi_msg(ubi, "attached by fastmap");
1081 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1082 ubi_msg(ubi, "fastmap WL pool size: %d",
1083 ubi->fm_wl_pool.max_size);
1084 ubi->fm_disabled = 0;
1085 ubi->fast_attach = 1;
1086
1087 ubi_free_vid_buf(vb);
1088 kfree(ech);
1089 out:
1090 up_write(&ubi->fm_protect);
1091 if (ret == UBI_BAD_FASTMAP)
1092 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1093 return ret;
1094
1095 free_hdr:
1096 ubi_free_vid_buf(vb);
1097 kfree(ech);
1098 free_fm_sb:
1099 kfree(fmsb);
1100 kfree(fm);
1101 goto out;
1102 }
1103
ubi_fastmap_init_checkmap(struct ubi_volume * vol,int leb_count)1104 int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
1105 {
1106 struct ubi_device *ubi = vol->ubi;
1107
1108 if (!ubi->fast_attach)
1109 return 0;
1110
1111 vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
1112 if (!vol->checkmap)
1113 return -ENOMEM;
1114
1115 return 0;
1116 }
1117
ubi_fastmap_destroy_checkmap(struct ubi_volume * vol)1118 void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
1119 {
1120 bitmap_free(vol->checkmap);
1121 }
1122
1123 /**
1124 * ubi_write_fastmap - writes a fastmap.
1125 * @ubi: UBI device object
1126 * @new_fm: the to be written fastmap
1127 *
1128 * Returns 0 on success, < 0 indicates an internal error.
1129 */
ubi_write_fastmap(struct ubi_device * ubi,struct ubi_fastmap_layout * new_fm)1130 static int ubi_write_fastmap(struct ubi_device *ubi,
1131 struct ubi_fastmap_layout *new_fm)
1132 {
1133 size_t fm_pos = 0;
1134 void *fm_raw;
1135 struct ubi_fm_sb *fmsb;
1136 struct ubi_fm_hdr *fmh;
1137 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1138 struct ubi_fm_ec *fec;
1139 struct ubi_fm_volhdr *fvh;
1140 struct ubi_fm_eba *feba;
1141 struct ubi_wl_entry *wl_e;
1142 struct ubi_volume *vol;
1143 struct ubi_vid_io_buf *avbuf, *dvbuf;
1144 struct ubi_vid_hdr *avhdr, *dvhdr;
1145 struct ubi_work *ubi_wrk;
1146 struct rb_node *tmp_rb;
1147 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1148 int scrub_peb_count, erase_peb_count;
1149 unsigned long *seen_pebs;
1150
1151 fm_raw = ubi->fm_buf;
1152 memset(ubi->fm_buf, 0, ubi->fm_size);
1153
1154 avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1155 if (!avbuf) {
1156 ret = -ENOMEM;
1157 goto out;
1158 }
1159
1160 dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1161 if (!dvbuf) {
1162 ret = -ENOMEM;
1163 goto out_free_avbuf;
1164 }
1165
1166 avhdr = ubi_get_vid_hdr(avbuf);
1167 dvhdr = ubi_get_vid_hdr(dvbuf);
1168
1169 seen_pebs = init_seen(ubi);
1170 if (IS_ERR(seen_pebs)) {
1171 ret = PTR_ERR(seen_pebs);
1172 goto out_free_dvbuf;
1173 }
1174
1175 spin_lock(&ubi->volumes_lock);
1176 spin_lock(&ubi->wl_lock);
1177
1178 fmsb = (struct ubi_fm_sb *)fm_raw;
1179 fm_pos += sizeof(*fmsb);
1180 ubi_assert(fm_pos <= ubi->fm_size);
1181
1182 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1183 fm_pos += sizeof(*fmh);
1184 ubi_assert(fm_pos <= ubi->fm_size);
1185
1186 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1187 fmsb->version = UBI_FM_FMT_VERSION;
1188 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1189 /* the max sqnum will be filled in while *reading* the fastmap */
1190 fmsb->sqnum = 0;
1191
1192 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1193 free_peb_count = 0;
1194 used_peb_count = 0;
1195 scrub_peb_count = 0;
1196 erase_peb_count = 0;
1197 vol_count = 0;
1198
1199 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1200 fm_pos += sizeof(*fmpl);
1201 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1202 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1203 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1204
1205 for (i = 0; i < ubi->fm_pool.size; i++) {
1206 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1207 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1208 }
1209
1210 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1211 fm_pos += sizeof(*fmpl_wl);
1212 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1213 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1214 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1215
1216 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1217 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1218 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1219 }
1220
1221 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1222 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1223
1224 fec->pnum = cpu_to_be32(wl_e->pnum);
1225 set_seen(ubi, wl_e->pnum, seen_pebs);
1226 fec->ec = cpu_to_be32(wl_e->ec);
1227
1228 free_peb_count++;
1229 fm_pos += sizeof(*fec);
1230 ubi_assert(fm_pos <= ubi->fm_size);
1231 }
1232 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1233
1234 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1235 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1236
1237 fec->pnum = cpu_to_be32(wl_e->pnum);
1238 set_seen(ubi, wl_e->pnum, seen_pebs);
1239 fec->ec = cpu_to_be32(wl_e->ec);
1240
1241 used_peb_count++;
1242 fm_pos += sizeof(*fec);
1243 ubi_assert(fm_pos <= ubi->fm_size);
1244 }
1245
1246 ubi_for_each_protected_peb(ubi, i, wl_e) {
1247 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1248
1249 fec->pnum = cpu_to_be32(wl_e->pnum);
1250 set_seen(ubi, wl_e->pnum, seen_pebs);
1251 fec->ec = cpu_to_be32(wl_e->ec);
1252
1253 used_peb_count++;
1254 fm_pos += sizeof(*fec);
1255 ubi_assert(fm_pos <= ubi->fm_size);
1256 }
1257 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1258
1259 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1260 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1261
1262 fec->pnum = cpu_to_be32(wl_e->pnum);
1263 set_seen(ubi, wl_e->pnum, seen_pebs);
1264 fec->ec = cpu_to_be32(wl_e->ec);
1265
1266 scrub_peb_count++;
1267 fm_pos += sizeof(*fec);
1268 ubi_assert(fm_pos <= ubi->fm_size);
1269 }
1270 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1271
1272
1273 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1274 if (ubi_is_erase_work(ubi_wrk)) {
1275 wl_e = ubi_wrk->e;
1276 ubi_assert(wl_e);
1277
1278 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1279
1280 fec->pnum = cpu_to_be32(wl_e->pnum);
1281 set_seen(ubi, wl_e->pnum, seen_pebs);
1282 fec->ec = cpu_to_be32(wl_e->ec);
1283
1284 erase_peb_count++;
1285 fm_pos += sizeof(*fec);
1286 ubi_assert(fm_pos <= ubi->fm_size);
1287 }
1288 }
1289 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1290
1291 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1292 vol = ubi->volumes[i];
1293
1294 if (!vol)
1295 continue;
1296
1297 vol_count++;
1298
1299 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1300 fm_pos += sizeof(*fvh);
1301 ubi_assert(fm_pos <= ubi->fm_size);
1302
1303 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1304 fvh->vol_id = cpu_to_be32(vol->vol_id);
1305 fvh->vol_type = vol->vol_type;
1306 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1307 fvh->data_pad = cpu_to_be32(vol->data_pad);
1308 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1309
1310 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1311 vol->vol_type == UBI_STATIC_VOLUME);
1312
1313 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1314 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1315 ubi_assert(fm_pos <= ubi->fm_size);
1316
1317 for (j = 0; j < vol->reserved_pebs; j++) {
1318 struct ubi_eba_leb_desc ldesc;
1319
1320 ubi_eba_get_ldesc(vol, j, &ldesc);
1321 feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1322 }
1323
1324 feba->reserved_pebs = cpu_to_be32(j);
1325 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1326 }
1327 fmh->vol_count = cpu_to_be32(vol_count);
1328 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1329
1330 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1331 avhdr->lnum = 0;
1332
1333 spin_unlock(&ubi->wl_lock);
1334 spin_unlock(&ubi->volumes_lock);
1335
1336 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1337 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1338 if (ret) {
1339 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1340 goto out_free_seen;
1341 }
1342
1343 for (i = 0; i < new_fm->used_blocks; i++) {
1344 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1345 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1346 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1347 }
1348
1349 fmsb->data_crc = 0;
1350 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1351 ubi->fm_size));
1352
1353 for (i = 1; i < new_fm->used_blocks; i++) {
1354 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1355 dvhdr->lnum = cpu_to_be32(i);
1356 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1357 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1358 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1359 if (ret) {
1360 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1361 new_fm->e[i]->pnum);
1362 goto out_free_seen;
1363 }
1364 }
1365
1366 for (i = 0; i < new_fm->used_blocks; i++) {
1367 ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1368 new_fm->e[i]->pnum, 0, ubi->leb_size);
1369 if (ret) {
1370 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1371 new_fm->e[i]->pnum);
1372 goto out_free_seen;
1373 }
1374 }
1375
1376 ubi_assert(new_fm);
1377 ubi->fm = new_fm;
1378
1379 ret = self_check_seen(ubi, seen_pebs);
1380 dbg_bld("fastmap written!");
1381
1382 out_free_seen:
1383 free_seen(seen_pebs);
1384 out_free_dvbuf:
1385 ubi_free_vid_buf(dvbuf);
1386 out_free_avbuf:
1387 ubi_free_vid_buf(avbuf);
1388
1389 out:
1390 return ret;
1391 }
1392
1393 /**
1394 * invalidate_fastmap - destroys a fastmap.
1395 * @ubi: UBI device object
1396 *
1397 * This function ensures that upon next UBI attach a full scan
1398 * is issued. We need this if UBI is about to write a new fastmap
1399 * but is unable to do so. In this case we have two options:
1400 * a) Make sure that the current fastmap will not be usued upon
1401 * attach time and contine or b) fall back to RO mode to have the
1402 * current fastmap in a valid state.
1403 * Returns 0 on success, < 0 indicates an internal error.
1404 */
invalidate_fastmap(struct ubi_device * ubi)1405 static int invalidate_fastmap(struct ubi_device *ubi)
1406 {
1407 int ret;
1408 struct ubi_fastmap_layout *fm;
1409 struct ubi_wl_entry *e;
1410 struct ubi_vid_io_buf *vb = NULL;
1411 struct ubi_vid_hdr *vh;
1412
1413 if (!ubi->fm)
1414 return 0;
1415
1416 ubi->fm = NULL;
1417
1418 ret = -ENOMEM;
1419 fm = kzalloc(sizeof(*fm), GFP_NOFS);
1420 if (!fm)
1421 goto out;
1422
1423 vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1424 if (!vb)
1425 goto out_free_fm;
1426
1427 vh = ubi_get_vid_hdr(vb);
1428
1429 ret = -ENOSPC;
1430 e = ubi_wl_get_fm_peb(ubi, 1);
1431 if (!e)
1432 goto out_free_fm;
1433
1434 /*
1435 * Create fake fastmap such that UBI will fall back
1436 * to scanning mode.
1437 */
1438 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1439 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1440 if (ret < 0) {
1441 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1442 goto out_free_fm;
1443 }
1444
1445 fm->used_blocks = 1;
1446 fm->e[0] = e;
1447
1448 ubi->fm = fm;
1449
1450 out:
1451 ubi_free_vid_buf(vb);
1452 return ret;
1453
1454 out_free_fm:
1455 kfree(fm);
1456 goto out;
1457 }
1458
1459 /**
1460 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1461 * WL sub-system.
1462 * @ubi: UBI device object
1463 * @fm: fastmap layout object
1464 */
return_fm_pebs(struct ubi_device * ubi,struct ubi_fastmap_layout * fm)1465 static void return_fm_pebs(struct ubi_device *ubi,
1466 struct ubi_fastmap_layout *fm)
1467 {
1468 int i;
1469
1470 if (!fm)
1471 return;
1472
1473 for (i = 0; i < fm->used_blocks; i++) {
1474 if (fm->e[i]) {
1475 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1476 fm->to_be_tortured[i]);
1477 fm->e[i] = NULL;
1478 }
1479 }
1480 }
1481
1482 /**
1483 * ubi_update_fastmap - will be called by UBI if a volume changes or
1484 * a fastmap pool becomes full.
1485 * @ubi: UBI device object
1486 *
1487 * Returns 0 on success, < 0 indicates an internal error.
1488 */
ubi_update_fastmap(struct ubi_device * ubi)1489 int ubi_update_fastmap(struct ubi_device *ubi)
1490 {
1491 int ret, i, j;
1492 struct ubi_fastmap_layout *new_fm, *old_fm;
1493 struct ubi_wl_entry *tmp_e;
1494
1495 ubi_refill_pools_and_lock(ubi);
1496
1497 if (ubi->ro_mode || ubi->fm_disabled) {
1498 up_write(&ubi->fm_eba_sem);
1499 up_write(&ubi->work_sem);
1500 up_write(&ubi->fm_protect);
1501 return 0;
1502 }
1503
1504 new_fm = kzalloc(sizeof(*new_fm), GFP_NOFS);
1505 if (!new_fm) {
1506 up_write(&ubi->fm_eba_sem);
1507 up_write(&ubi->work_sem);
1508 up_write(&ubi->fm_protect);
1509 return -ENOMEM;
1510 }
1511
1512 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1513 old_fm = ubi->fm;
1514 ubi->fm = NULL;
1515
1516 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1517 ubi_err(ubi, "fastmap too large");
1518 ret = -ENOSPC;
1519 goto err;
1520 }
1521
1522 for (i = 1; i < new_fm->used_blocks; i++) {
1523 spin_lock(&ubi->wl_lock);
1524 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1525 spin_unlock(&ubi->wl_lock);
1526
1527 if (!tmp_e) {
1528 if (old_fm && old_fm->e[i]) {
1529 ret = ubi_sync_erase(ubi, old_fm->e[i], 0);
1530 if (ret < 0) {
1531 ubi_err(ubi, "could not erase old fastmap PEB");
1532
1533 for (j = 1; j < i; j++) {
1534 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1535 j, 0);
1536 new_fm->e[j] = NULL;
1537 }
1538 goto err;
1539 }
1540 new_fm->e[i] = old_fm->e[i];
1541 old_fm->e[i] = NULL;
1542 } else {
1543 ubi_err(ubi, "could not get any free erase block");
1544
1545 for (j = 1; j < i; j++) {
1546 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1547 new_fm->e[j] = NULL;
1548 }
1549
1550 ret = -ENOSPC;
1551 goto err;
1552 }
1553 } else {
1554 new_fm->e[i] = tmp_e;
1555
1556 if (old_fm && old_fm->e[i]) {
1557 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1558 old_fm->to_be_tortured[i]);
1559 old_fm->e[i] = NULL;
1560 }
1561 }
1562 }
1563
1564 /* Old fastmap is larger than the new one */
1565 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1566 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1567 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1568 old_fm->to_be_tortured[i]);
1569 old_fm->e[i] = NULL;
1570 }
1571 }
1572
1573 spin_lock(&ubi->wl_lock);
1574 tmp_e = ubi->fm_anchor;
1575 ubi->fm_anchor = NULL;
1576 spin_unlock(&ubi->wl_lock);
1577
1578 if (old_fm) {
1579 /* no fresh anchor PEB was found, reuse the old one */
1580 if (!tmp_e) {
1581 ret = ubi_sync_erase(ubi, old_fm->e[0], 0);
1582 if (ret < 0) {
1583 ubi_err(ubi, "could not erase old anchor PEB");
1584
1585 for (i = 1; i < new_fm->used_blocks; i++) {
1586 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1587 i, 0);
1588 new_fm->e[i] = NULL;
1589 }
1590 goto err;
1591 }
1592 new_fm->e[0] = old_fm->e[0];
1593 old_fm->e[0] = NULL;
1594 } else {
1595 /* we've got a new anchor PEB, return the old one */
1596 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1597 old_fm->to_be_tortured[0]);
1598 new_fm->e[0] = tmp_e;
1599 old_fm->e[0] = NULL;
1600 }
1601 } else {
1602 if (!tmp_e) {
1603 ubi_err(ubi, "could not find any anchor PEB");
1604
1605 for (i = 1; i < new_fm->used_blocks; i++) {
1606 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1607 new_fm->e[i] = NULL;
1608 }
1609
1610 ret = -ENOSPC;
1611 goto err;
1612 }
1613 new_fm->e[0] = tmp_e;
1614 }
1615
1616 ret = ubi_write_fastmap(ubi, new_fm);
1617
1618 if (ret)
1619 goto err;
1620
1621 out_unlock:
1622 up_write(&ubi->fm_eba_sem);
1623 up_write(&ubi->work_sem);
1624 up_write(&ubi->fm_protect);
1625 kfree(old_fm);
1626
1627 ubi_ensure_anchor_pebs(ubi);
1628
1629 return ret;
1630
1631 err:
1632 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1633
1634 ret = invalidate_fastmap(ubi);
1635 if (ret < 0) {
1636 ubi_err(ubi, "Unable to invalidate current fastmap!");
1637 ubi_ro_mode(ubi);
1638 } else {
1639 return_fm_pebs(ubi, old_fm);
1640 return_fm_pebs(ubi, new_fm);
1641 ret = 0;
1642 }
1643
1644 kfree(new_fm);
1645 goto out_unlock;
1646 }
1647