1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2012 Linutronix GmbH 4 * Copyright (c) 2014 sigma star gmbh 5 * Author: Richard Weinberger <richard@nod.at> 6 */ 7 8 #include <linux/crc32.h> 9 #include <linux/bitmap.h> 10 #include "ubi.h" 11 12 /** 13 * init_seen - allocate memory for used for debugging. 14 * @ubi: UBI device description object 15 */ 16 static inline unsigned long *init_seen(struct ubi_device *ubi) 17 { 18 unsigned long *ret; 19 20 if (!ubi_dbg_chk_fastmap(ubi)) 21 return NULL; 22 23 ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS); 24 if (!ret) 25 return ERR_PTR(-ENOMEM); 26 27 return ret; 28 } 29 30 /** 31 * free_seen - free the seen logic integer array. 32 * @seen: integer array of @ubi->peb_count size 33 */ 34 static inline void free_seen(unsigned long *seen) 35 { 36 bitmap_free(seen); 37 } 38 39 /** 40 * set_seen - mark a PEB as seen. 41 * @ubi: UBI device description object 42 * @pnum: The PEB to be makred as seen 43 * @seen: integer array of @ubi->peb_count size 44 */ 45 static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen) 46 { 47 if (!ubi_dbg_chk_fastmap(ubi) || !seen) 48 return; 49 50 set_bit(pnum, seen); 51 } 52 53 /** 54 * self_check_seen - check whether all PEB have been seen by fastmap. 55 * @ubi: UBI device description object 56 * @seen: integer array of @ubi->peb_count size 57 */ 58 static int self_check_seen(struct ubi_device *ubi, unsigned long *seen) 59 { 60 int pnum, ret = 0; 61 62 if (!ubi_dbg_chk_fastmap(ubi) || !seen) 63 return 0; 64 65 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 66 if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) { 67 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum); 68 ret = -EINVAL; 69 } 70 } 71 72 return ret; 73 } 74 75 /** 76 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. 77 * @ubi: UBI device description object 78 */ 79 size_t ubi_calc_fm_size(struct ubi_device *ubi) 80 { 81 size_t size; 82 83 size = sizeof(struct ubi_fm_sb) + 84 sizeof(struct ubi_fm_hdr) + 85 sizeof(struct ubi_fm_scan_pool) + 86 sizeof(struct ubi_fm_scan_pool) + 87 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + 88 (sizeof(struct ubi_fm_eba) + 89 (ubi->peb_count * sizeof(__be32))) + 90 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; 91 return roundup(size, ubi->leb_size); 92 } 93 94 95 /** 96 * new_fm_vbuf() - allocate a new volume header for fastmap usage. 97 * @ubi: UBI device description object 98 * @vol_id: the VID of the new header 99 * 100 * Returns a new struct ubi_vid_hdr on success. 101 * NULL indicates out of memory. 102 */ 103 static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id) 104 { 105 struct ubi_vid_io_buf *new; 106 struct ubi_vid_hdr *vh; 107 108 new = ubi_alloc_vid_buf(ubi, GFP_NOFS); 109 if (!new) 110 goto out; 111 112 vh = ubi_get_vid_hdr(new); 113 vh->vol_type = UBI_VID_DYNAMIC; 114 vh->vol_id = cpu_to_be32(vol_id); 115 116 /* UBI implementations without fastmap support have to delete the 117 * fastmap. 118 */ 119 vh->compat = UBI_COMPAT_DELETE; 120 121 out: 122 return new; 123 } 124 125 /** 126 * add_aeb - create and add a attach erase block to a given list. 127 * @ai: UBI attach info object 128 * @list: the target list 129 * @pnum: PEB number of the new attach erase block 130 * @ec: erease counter of the new LEB 131 * @scrub: scrub this PEB after attaching 132 * 133 * Returns 0 on success, < 0 indicates an internal error. 134 */ 135 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, 136 int pnum, int ec, int scrub) 137 { 138 struct ubi_ainf_peb *aeb; 139 140 aeb = ubi_alloc_aeb(ai, pnum, ec); 141 if (!aeb) 142 return -ENOMEM; 143 144 aeb->lnum = -1; 145 aeb->scrub = scrub; 146 aeb->copy_flag = aeb->sqnum = 0; 147 148 ai->ec_sum += aeb->ec; 149 ai->ec_count++; 150 151 if (ai->max_ec < aeb->ec) 152 ai->max_ec = aeb->ec; 153 154 if (ai->min_ec > aeb->ec) 155 ai->min_ec = aeb->ec; 156 157 list_add_tail(&aeb->u.list, list); 158 159 return 0; 160 } 161 162 /** 163 * add_vol - create and add a new volume to ubi_attach_info. 164 * @ai: ubi_attach_info object 165 * @vol_id: VID of the new volume 166 * @used_ebs: number of used EBS 167 * @data_pad: data padding value of the new volume 168 * @vol_type: volume type 169 * @last_eb_bytes: number of bytes in the last LEB 170 * 171 * Returns the new struct ubi_ainf_volume on success. 172 * NULL indicates an error. 173 */ 174 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, 175 int used_ebs, int data_pad, u8 vol_type, 176 int last_eb_bytes) 177 { 178 struct ubi_ainf_volume *av; 179 180 av = ubi_add_av(ai, vol_id); 181 if (IS_ERR(av)) 182 return av; 183 184 av->data_pad = data_pad; 185 av->last_data_size = last_eb_bytes; 186 av->compat = 0; 187 av->vol_type = vol_type; 188 if (av->vol_type == UBI_STATIC_VOLUME) 189 av->used_ebs = used_ebs; 190 191 dbg_bld("found volume (ID %i)", vol_id); 192 return av; 193 } 194 195 /** 196 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it 197 * from it's original list. 198 * @ai: ubi_attach_info object 199 * @aeb: the to be assigned SEB 200 * @av: target scan volume 201 */ 202 static void assign_aeb_to_av(struct ubi_attach_info *ai, 203 struct ubi_ainf_peb *aeb, 204 struct ubi_ainf_volume *av) 205 { 206 struct ubi_ainf_peb *tmp_aeb; 207 struct rb_node **p = &av->root.rb_node, *parent = NULL; 208 209 while (*p) { 210 parent = *p; 211 212 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 213 if (aeb->lnum != tmp_aeb->lnum) { 214 if (aeb->lnum < tmp_aeb->lnum) 215 p = &(*p)->rb_left; 216 else 217 p = &(*p)->rb_right; 218 219 continue; 220 } else 221 break; 222 } 223 224 list_del(&aeb->u.list); 225 av->leb_count++; 226 227 rb_link_node(&aeb->u.rb, parent, p); 228 rb_insert_color(&aeb->u.rb, &av->root); 229 } 230 231 /** 232 * update_vol - inserts or updates a LEB which was found a pool. 233 * @ubi: the UBI device object 234 * @ai: attach info object 235 * @av: the volume this LEB belongs to 236 * @new_vh: the volume header derived from new_aeb 237 * @new_aeb: the AEB to be examined 238 * 239 * Returns 0 on success, < 0 indicates an internal error. 240 */ 241 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, 242 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, 243 struct ubi_ainf_peb *new_aeb) 244 { 245 struct rb_node **p = &av->root.rb_node, *parent = NULL; 246 struct ubi_ainf_peb *aeb, *victim; 247 int cmp_res; 248 249 while (*p) { 250 parent = *p; 251 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 252 253 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { 254 if (be32_to_cpu(new_vh->lnum) < aeb->lnum) 255 p = &(*p)->rb_left; 256 else 257 p = &(*p)->rb_right; 258 259 continue; 260 } 261 262 /* This case can happen if the fastmap gets written 263 * because of a volume change (creation, deletion, ..). 264 * Then a PEB can be within the persistent EBA and the pool. 265 */ 266 if (aeb->pnum == new_aeb->pnum) { 267 ubi_assert(aeb->lnum == new_aeb->lnum); 268 ubi_free_aeb(ai, new_aeb); 269 270 return 0; 271 } 272 273 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); 274 if (cmp_res < 0) 275 return cmp_res; 276 277 /* new_aeb is newer */ 278 if (cmp_res & 1) { 279 victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec); 280 if (!victim) 281 return -ENOMEM; 282 283 list_add_tail(&victim->u.list, &ai->erase); 284 285 if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) 286 av->last_data_size = 287 be32_to_cpu(new_vh->data_size); 288 289 dbg_bld("vol %i: AEB %i's PEB %i is the newer", 290 av->vol_id, aeb->lnum, new_aeb->pnum); 291 292 aeb->ec = new_aeb->ec; 293 aeb->pnum = new_aeb->pnum; 294 aeb->copy_flag = new_vh->copy_flag; 295 aeb->scrub = new_aeb->scrub; 296 aeb->sqnum = new_aeb->sqnum; 297 ubi_free_aeb(ai, new_aeb); 298 299 /* new_aeb is older */ 300 } else { 301 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", 302 av->vol_id, aeb->lnum, new_aeb->pnum); 303 list_add_tail(&new_aeb->u.list, &ai->erase); 304 } 305 306 return 0; 307 } 308 /* This LEB is new, let's add it to the volume */ 309 310 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { 311 av->highest_lnum = be32_to_cpu(new_vh->lnum); 312 av->last_data_size = be32_to_cpu(new_vh->data_size); 313 } 314 315 if (av->vol_type == UBI_STATIC_VOLUME) 316 av->used_ebs = be32_to_cpu(new_vh->used_ebs); 317 318 av->leb_count++; 319 320 rb_link_node(&new_aeb->u.rb, parent, p); 321 rb_insert_color(&new_aeb->u.rb, &av->root); 322 323 return 0; 324 } 325 326 /** 327 * process_pool_aeb - we found a non-empty PEB in a pool. 328 * @ubi: UBI device object 329 * @ai: attach info object 330 * @new_vh: the volume header derived from new_aeb 331 * @new_aeb: the AEB to be examined 332 * 333 * Returns 0 on success, < 0 indicates an internal error. 334 */ 335 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, 336 struct ubi_vid_hdr *new_vh, 337 struct ubi_ainf_peb *new_aeb) 338 { 339 int vol_id = be32_to_cpu(new_vh->vol_id); 340 struct ubi_ainf_volume *av; 341 342 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) { 343 ubi_free_aeb(ai, new_aeb); 344 345 return 0; 346 } 347 348 /* Find the volume this SEB belongs to */ 349 av = ubi_find_av(ai, vol_id); 350 if (!av) { 351 ubi_err(ubi, "orphaned volume in fastmap pool!"); 352 ubi_free_aeb(ai, new_aeb); 353 return UBI_BAD_FASTMAP; 354 } 355 356 ubi_assert(vol_id == av->vol_id); 357 358 return update_vol(ubi, ai, av, new_vh, new_aeb); 359 } 360 361 /** 362 * unmap_peb - unmap a PEB. 363 * If fastmap detects a free PEB in the pool it has to check whether 364 * this PEB has been unmapped after writing the fastmap. 365 * 366 * @ai: UBI attach info object 367 * @pnum: The PEB to be unmapped 368 */ 369 static void unmap_peb(struct ubi_attach_info *ai, int pnum) 370 { 371 struct ubi_ainf_volume *av; 372 struct rb_node *node, *node2; 373 struct ubi_ainf_peb *aeb; 374 375 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) { 376 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) { 377 if (aeb->pnum == pnum) { 378 rb_erase(&aeb->u.rb, &av->root); 379 av->leb_count--; 380 ubi_free_aeb(ai, aeb); 381 return; 382 } 383 } 384 } 385 } 386 387 /** 388 * scan_pool - scans a pool for changed (no longer empty PEBs). 389 * @ubi: UBI device object 390 * @ai: attach info object 391 * @pebs: an array of all PEB numbers in the to be scanned pool 392 * @pool_size: size of the pool (number of entries in @pebs) 393 * @max_sqnum: pointer to the maximal sequence number 394 * @free: list of PEBs which are most likely free (and go into @ai->free) 395 * 396 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. 397 * < 0 indicates an internal error. 398 */ 399 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, 400 __be32 *pebs, int pool_size, unsigned long long *max_sqnum, 401 struct list_head *free) 402 { 403 struct ubi_vid_io_buf *vb; 404 struct ubi_vid_hdr *vh; 405 struct ubi_ec_hdr *ech; 406 struct ubi_ainf_peb *new_aeb; 407 int i, pnum, err, ret = 0; 408 409 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 410 if (!ech) 411 return -ENOMEM; 412 413 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); 414 if (!vb) { 415 kfree(ech); 416 return -ENOMEM; 417 } 418 419 vh = ubi_get_vid_hdr(vb); 420 421 dbg_bld("scanning fastmap pool: size = %i", pool_size); 422 423 /* 424 * Now scan all PEBs in the pool to find changes which have been made 425 * after the creation of the fastmap 426 */ 427 for (i = 0; i < pool_size; i++) { 428 int scrub = 0; 429 int image_seq; 430 431 pnum = be32_to_cpu(pebs[i]); 432 433 if (ubi_io_is_bad(ubi, pnum)) { 434 ubi_err(ubi, "bad PEB in fastmap pool!"); 435 ret = UBI_BAD_FASTMAP; 436 goto out; 437 } 438 439 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 440 if (err && err != UBI_IO_BITFLIPS) { 441 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i", 442 pnum, err); 443 ret = err > 0 ? UBI_BAD_FASTMAP : err; 444 goto out; 445 } else if (err == UBI_IO_BITFLIPS) 446 scrub = 1; 447 448 /* 449 * Older UBI implementations have image_seq set to zero, so 450 * we shouldn't fail if image_seq == 0. 451 */ 452 image_seq = be32_to_cpu(ech->image_seq); 453 454 if (image_seq && (image_seq != ubi->image_seq)) { 455 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x", 456 be32_to_cpu(ech->image_seq), ubi->image_seq); 457 ret = UBI_BAD_FASTMAP; 458 goto out; 459 } 460 461 err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0); 462 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { 463 unsigned long long ec = be64_to_cpu(ech->ec); 464 unmap_peb(ai, pnum); 465 dbg_bld("Adding PEB to free: %i", pnum); 466 467 if (err == UBI_IO_FF_BITFLIPS) 468 scrub = 1; 469 470 ret = add_aeb(ai, free, pnum, ec, scrub); 471 if (ret) 472 goto out; 473 continue; 474 } else if (err == 0 || err == UBI_IO_BITFLIPS) { 475 dbg_bld("Found non empty PEB:%i in pool", pnum); 476 477 if (err == UBI_IO_BITFLIPS) 478 scrub = 1; 479 480 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec)); 481 if (!new_aeb) { 482 ret = -ENOMEM; 483 goto out; 484 } 485 486 new_aeb->lnum = be32_to_cpu(vh->lnum); 487 new_aeb->sqnum = be64_to_cpu(vh->sqnum); 488 new_aeb->copy_flag = vh->copy_flag; 489 new_aeb->scrub = scrub; 490 491 if (*max_sqnum < new_aeb->sqnum) 492 *max_sqnum = new_aeb->sqnum; 493 494 err = process_pool_aeb(ubi, ai, vh, new_aeb); 495 if (err) { 496 ret = err > 0 ? UBI_BAD_FASTMAP : err; 497 goto out; 498 } 499 } else { 500 /* We are paranoid and fall back to scanning mode */ 501 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!"); 502 ret = err > 0 ? UBI_BAD_FASTMAP : err; 503 goto out; 504 } 505 506 } 507 508 out: 509 ubi_free_vid_buf(vb); 510 kfree(ech); 511 return ret; 512 } 513 514 /** 515 * count_fastmap_pebs - Counts the PEBs found by fastmap. 516 * @ai: The UBI attach info object 517 */ 518 static int count_fastmap_pebs(struct ubi_attach_info *ai) 519 { 520 struct ubi_ainf_peb *aeb; 521 struct ubi_ainf_volume *av; 522 struct rb_node *rb1, *rb2; 523 int n = 0; 524 525 list_for_each_entry(aeb, &ai->erase, u.list) 526 n++; 527 528 list_for_each_entry(aeb, &ai->free, u.list) 529 n++; 530 531 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) 532 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) 533 n++; 534 535 return n; 536 } 537 538 /** 539 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. 540 * @ubi: UBI device object 541 * @ai: UBI attach info object 542 * @fm: the fastmap to be attached 543 * 544 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. 545 * < 0 indicates an internal error. 546 */ 547 static int ubi_attach_fastmap(struct ubi_device *ubi, 548 struct ubi_attach_info *ai, 549 struct ubi_fastmap_layout *fm) 550 { 551 struct list_head used, free; 552 struct ubi_ainf_volume *av; 553 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; 554 struct ubi_fm_sb *fmsb; 555 struct ubi_fm_hdr *fmhdr; 556 struct ubi_fm_scan_pool *fmpl, *fmpl_wl; 557 struct ubi_fm_ec *fmec; 558 struct ubi_fm_volhdr *fmvhdr; 559 struct ubi_fm_eba *fm_eba; 560 int ret, i, j, pool_size, wl_pool_size; 561 size_t fm_pos = 0, fm_size = ubi->fm_size; 562 unsigned long long max_sqnum = 0; 563 void *fm_raw = ubi->fm_buf; 564 565 INIT_LIST_HEAD(&used); 566 INIT_LIST_HEAD(&free); 567 ai->min_ec = UBI_MAX_ERASECOUNTER; 568 569 fmsb = (struct ubi_fm_sb *)(fm_raw); 570 ai->max_sqnum = fmsb->sqnum; 571 fm_pos += sizeof(struct ubi_fm_sb); 572 if (fm_pos >= fm_size) 573 goto fail_bad; 574 575 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 576 fm_pos += sizeof(*fmhdr); 577 if (fm_pos >= fm_size) 578 goto fail_bad; 579 580 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { 581 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x", 582 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); 583 goto fail_bad; 584 } 585 586 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 587 fm_pos += sizeof(*fmpl); 588 if (fm_pos >= fm_size) 589 goto fail_bad; 590 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) { 591 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", 592 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC); 593 goto fail_bad; 594 } 595 596 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 597 fm_pos += sizeof(*fmpl_wl); 598 if (fm_pos >= fm_size) 599 goto fail_bad; 600 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) { 601 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x", 602 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC); 603 goto fail_bad; 604 } 605 606 pool_size = be16_to_cpu(fmpl->size); 607 wl_pool_size = be16_to_cpu(fmpl_wl->size); 608 fm->max_pool_size = be16_to_cpu(fmpl->max_size); 609 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size); 610 611 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { 612 ubi_err(ubi, "bad pool size: %i", pool_size); 613 goto fail_bad; 614 } 615 616 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { 617 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size); 618 goto fail_bad; 619 } 620 621 622 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || 623 fm->max_pool_size < 0) { 624 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size); 625 goto fail_bad; 626 } 627 628 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || 629 fm->max_wl_pool_size < 0) { 630 ubi_err(ubi, "bad maximal WL pool size: %i", 631 fm->max_wl_pool_size); 632 goto fail_bad; 633 } 634 635 /* read EC values from free list */ 636 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { 637 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 638 fm_pos += sizeof(*fmec); 639 if (fm_pos >= fm_size) 640 goto fail_bad; 641 642 ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), 643 be32_to_cpu(fmec->ec), 0); 644 if (ret) 645 goto fail; 646 } 647 648 /* read EC values from used list */ 649 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { 650 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 651 fm_pos += sizeof(*fmec); 652 if (fm_pos >= fm_size) 653 goto fail_bad; 654 655 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 656 be32_to_cpu(fmec->ec), 0); 657 if (ret) 658 goto fail; 659 } 660 661 /* read EC values from scrub list */ 662 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { 663 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 664 fm_pos += sizeof(*fmec); 665 if (fm_pos >= fm_size) 666 goto fail_bad; 667 668 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 669 be32_to_cpu(fmec->ec), 1); 670 if (ret) 671 goto fail; 672 } 673 674 /* read EC values from erase list */ 675 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { 676 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 677 fm_pos += sizeof(*fmec); 678 if (fm_pos >= fm_size) 679 goto fail_bad; 680 681 ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), 682 be32_to_cpu(fmec->ec), 1); 683 if (ret) 684 goto fail; 685 } 686 687 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); 688 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); 689 690 /* Iterate over all volumes and read their EBA table */ 691 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { 692 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 693 fm_pos += sizeof(*fmvhdr); 694 if (fm_pos >= fm_size) 695 goto fail_bad; 696 697 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { 698 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x", 699 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); 700 goto fail_bad; 701 } 702 703 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), 704 be32_to_cpu(fmvhdr->used_ebs), 705 be32_to_cpu(fmvhdr->data_pad), 706 fmvhdr->vol_type, 707 be32_to_cpu(fmvhdr->last_eb_bytes)); 708 709 if (IS_ERR(av)) { 710 if (PTR_ERR(av) == -EEXIST) 711 ubi_err(ubi, "volume (ID %i) already exists", 712 fmvhdr->vol_id); 713 714 goto fail_bad; 715 } 716 717 ai->vols_found++; 718 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) 719 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); 720 721 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 722 fm_pos += sizeof(*fm_eba); 723 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); 724 if (fm_pos >= fm_size) 725 goto fail_bad; 726 727 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { 728 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x", 729 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); 730 goto fail_bad; 731 } 732 733 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { 734 int pnum = be32_to_cpu(fm_eba->pnum[j]); 735 736 if (pnum < 0) 737 continue; 738 739 aeb = NULL; 740 list_for_each_entry(tmp_aeb, &used, u.list) { 741 if (tmp_aeb->pnum == pnum) { 742 aeb = tmp_aeb; 743 break; 744 } 745 } 746 747 if (!aeb) { 748 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum); 749 goto fail_bad; 750 } 751 752 aeb->lnum = j; 753 754 if (av->highest_lnum <= aeb->lnum) 755 av->highest_lnum = aeb->lnum; 756 757 assign_aeb_to_av(ai, aeb, av); 758 759 dbg_bld("inserting PEB:%i (LEB %i) to vol %i", 760 aeb->pnum, aeb->lnum, av->vol_id); 761 } 762 } 763 764 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free); 765 if (ret) 766 goto fail; 767 768 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free); 769 if (ret) 770 goto fail; 771 772 if (max_sqnum > ai->max_sqnum) 773 ai->max_sqnum = max_sqnum; 774 775 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) 776 list_move_tail(&tmp_aeb->u.list, &ai->free); 777 778 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) 779 list_move_tail(&tmp_aeb->u.list, &ai->erase); 780 781 ubi_assert(list_empty(&free)); 782 783 /* 784 * If fastmap is leaking PEBs (must not happen), raise a 785 * fat warning and fall back to scanning mode. 786 * We do this here because in ubi_wl_init() it's too late 787 * and we cannot fall back to scanning. 788 */ 789 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - 790 ai->bad_peb_count - fm->used_blocks)) 791 goto fail_bad; 792 793 return 0; 794 795 fail_bad: 796 ret = UBI_BAD_FASTMAP; 797 fail: 798 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { 799 list_del(&tmp_aeb->u.list); 800 ubi_free_aeb(ai, tmp_aeb); 801 } 802 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { 803 list_del(&tmp_aeb->u.list); 804 ubi_free_aeb(ai, tmp_aeb); 805 } 806 807 return ret; 808 } 809 810 /** 811 * find_fm_anchor - find the most recent Fastmap superblock (anchor) 812 * @ai: UBI attach info to be filled 813 */ 814 static int find_fm_anchor(struct ubi_attach_info *ai) 815 { 816 int ret = -1; 817 struct ubi_ainf_peb *aeb; 818 unsigned long long max_sqnum = 0; 819 820 list_for_each_entry(aeb, &ai->fastmap, u.list) { 821 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) { 822 max_sqnum = aeb->sqnum; 823 ret = aeb->pnum; 824 } 825 } 826 827 return ret; 828 } 829 830 static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai, 831 struct ubi_ainf_peb *old) 832 { 833 struct ubi_ainf_peb *new; 834 835 new = ubi_alloc_aeb(ai, old->pnum, old->ec); 836 if (!new) 837 return NULL; 838 839 new->vol_id = old->vol_id; 840 new->sqnum = old->sqnum; 841 new->lnum = old->lnum; 842 new->scrub = old->scrub; 843 new->copy_flag = old->copy_flag; 844 845 return new; 846 } 847 848 /** 849 * ubi_scan_fastmap - scan the fastmap. 850 * @ubi: UBI device object 851 * @ai: UBI attach info to be filled 852 * @scan_ai: UBI attach info from the first 64 PEBs, 853 * used to find the most recent Fastmap data structure 854 * 855 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, 856 * UBI_BAD_FASTMAP if one was found but is not usable. 857 * < 0 indicates an internal error. 858 */ 859 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, 860 struct ubi_attach_info *scan_ai) 861 { 862 struct ubi_fm_sb *fmsb, *fmsb2; 863 struct ubi_vid_io_buf *vb; 864 struct ubi_vid_hdr *vh; 865 struct ubi_ec_hdr *ech; 866 struct ubi_fastmap_layout *fm; 867 struct ubi_ainf_peb *aeb; 868 int i, used_blocks, pnum, fm_anchor, ret = 0; 869 size_t fm_size; 870 __be32 crc, tmp_crc; 871 unsigned long long sqnum = 0; 872 873 fm_anchor = find_fm_anchor(scan_ai); 874 if (fm_anchor < 0) 875 return UBI_NO_FASTMAP; 876 877 /* Copy all (possible) fastmap blocks into our new attach structure. */ 878 list_for_each_entry(aeb, &scan_ai->fastmap, u.list) { 879 struct ubi_ainf_peb *new; 880 881 new = clone_aeb(ai, aeb); 882 if (!new) 883 return -ENOMEM; 884 885 list_add(&new->u.list, &ai->fastmap); 886 } 887 888 down_write(&ubi->fm_protect); 889 memset(ubi->fm_buf, 0, ubi->fm_size); 890 891 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); 892 if (!fmsb) { 893 ret = -ENOMEM; 894 goto out; 895 } 896 897 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 898 if (!fm) { 899 ret = -ENOMEM; 900 kfree(fmsb); 901 goto out; 902 } 903 904 ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb)); 905 if (ret && ret != UBI_IO_BITFLIPS) 906 goto free_fm_sb; 907 else if (ret == UBI_IO_BITFLIPS) 908 fm->to_be_tortured[0] = 1; 909 910 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { 911 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x", 912 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); 913 ret = UBI_BAD_FASTMAP; 914 goto free_fm_sb; 915 } 916 917 if (fmsb->version != UBI_FM_FMT_VERSION) { 918 ubi_err(ubi, "bad fastmap version: %i, expected: %i", 919 fmsb->version, UBI_FM_FMT_VERSION); 920 ret = UBI_BAD_FASTMAP; 921 goto free_fm_sb; 922 } 923 924 used_blocks = be32_to_cpu(fmsb->used_blocks); 925 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { 926 ubi_err(ubi, "number of fastmap blocks is invalid: %i", 927 used_blocks); 928 ret = UBI_BAD_FASTMAP; 929 goto free_fm_sb; 930 } 931 932 fm_size = ubi->leb_size * used_blocks; 933 if (fm_size != ubi->fm_size) { 934 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi", 935 fm_size, ubi->fm_size); 936 ret = UBI_BAD_FASTMAP; 937 goto free_fm_sb; 938 } 939 940 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 941 if (!ech) { 942 ret = -ENOMEM; 943 goto free_fm_sb; 944 } 945 946 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); 947 if (!vb) { 948 ret = -ENOMEM; 949 goto free_hdr; 950 } 951 952 vh = ubi_get_vid_hdr(vb); 953 954 for (i = 0; i < used_blocks; i++) { 955 int image_seq; 956 957 pnum = be32_to_cpu(fmsb->block_loc[i]); 958 959 if (ubi_io_is_bad(ubi, pnum)) { 960 ret = UBI_BAD_FASTMAP; 961 goto free_hdr; 962 } 963 964 if (i == 0 && pnum != fm_anchor) { 965 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i", 966 pnum, fm_anchor); 967 ret = UBI_BAD_FASTMAP; 968 goto free_hdr; 969 } 970 971 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 972 if (ret && ret != UBI_IO_BITFLIPS) { 973 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)", 974 i, pnum); 975 if (ret > 0) 976 ret = UBI_BAD_FASTMAP; 977 goto free_hdr; 978 } else if (ret == UBI_IO_BITFLIPS) 979 fm->to_be_tortured[i] = 1; 980 981 image_seq = be32_to_cpu(ech->image_seq); 982 if (!ubi->image_seq) 983 ubi->image_seq = image_seq; 984 985 /* 986 * Older UBI implementations have image_seq set to zero, so 987 * we shouldn't fail if image_seq == 0. 988 */ 989 if (image_seq && (image_seq != ubi->image_seq)) { 990 ubi_err(ubi, "wrong image seq:%d instead of %d", 991 be32_to_cpu(ech->image_seq), ubi->image_seq); 992 ret = UBI_BAD_FASTMAP; 993 goto free_hdr; 994 } 995 996 ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0); 997 if (ret && ret != UBI_IO_BITFLIPS) { 998 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)", 999 i, pnum); 1000 goto free_hdr; 1001 } 1002 1003 if (i == 0) { 1004 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { 1005 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x", 1006 be32_to_cpu(vh->vol_id), 1007 UBI_FM_SB_VOLUME_ID); 1008 ret = UBI_BAD_FASTMAP; 1009 goto free_hdr; 1010 } 1011 } else { 1012 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { 1013 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x", 1014 be32_to_cpu(vh->vol_id), 1015 UBI_FM_DATA_VOLUME_ID); 1016 ret = UBI_BAD_FASTMAP; 1017 goto free_hdr; 1018 } 1019 } 1020 1021 if (sqnum < be64_to_cpu(vh->sqnum)) 1022 sqnum = be64_to_cpu(vh->sqnum); 1023 1024 ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i), 1025 pnum, 0, ubi->leb_size); 1026 if (ret && ret != UBI_IO_BITFLIPS) { 1027 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, " 1028 "err: %i)", i, pnum, ret); 1029 goto free_hdr; 1030 } 1031 } 1032 1033 kfree(fmsb); 1034 fmsb = NULL; 1035 1036 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); 1037 tmp_crc = be32_to_cpu(fmsb2->data_crc); 1038 fmsb2->data_crc = 0; 1039 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); 1040 if (crc != tmp_crc) { 1041 ubi_err(ubi, "fastmap data CRC is invalid"); 1042 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x", 1043 tmp_crc, crc); 1044 ret = UBI_BAD_FASTMAP; 1045 goto free_hdr; 1046 } 1047 1048 fmsb2->sqnum = sqnum; 1049 1050 fm->used_blocks = used_blocks; 1051 1052 ret = ubi_attach_fastmap(ubi, ai, fm); 1053 if (ret) { 1054 if (ret > 0) 1055 ret = UBI_BAD_FASTMAP; 1056 goto free_hdr; 1057 } 1058 1059 for (i = 0; i < used_blocks; i++) { 1060 struct ubi_wl_entry *e; 1061 1062 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1063 if (!e) { 1064 while (i--) 1065 kmem_cache_free(ubi_wl_entry_slab, fm->e[i]); 1066 1067 ret = -ENOMEM; 1068 goto free_hdr; 1069 } 1070 1071 e->pnum = be32_to_cpu(fmsb2->block_loc[i]); 1072 e->ec = be32_to_cpu(fmsb2->block_ec[i]); 1073 fm->e[i] = e; 1074 } 1075 1076 ubi->fm = fm; 1077 ubi->fm_pool.max_size = ubi->fm->max_pool_size; 1078 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; 1079 ubi_msg(ubi, "attached by fastmap"); 1080 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size); 1081 ubi_msg(ubi, "fastmap WL pool size: %d", 1082 ubi->fm_wl_pool.max_size); 1083 ubi->fm_disabled = 0; 1084 ubi->fast_attach = 1; 1085 1086 ubi_free_vid_buf(vb); 1087 kfree(ech); 1088 out: 1089 up_write(&ubi->fm_protect); 1090 if (ret == UBI_BAD_FASTMAP) 1091 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!"); 1092 return ret; 1093 1094 free_hdr: 1095 ubi_free_vid_buf(vb); 1096 kfree(ech); 1097 free_fm_sb: 1098 kfree(fmsb); 1099 kfree(fm); 1100 goto out; 1101 } 1102 1103 int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) 1104 { 1105 struct ubi_device *ubi = vol->ubi; 1106 1107 if (!ubi->fast_attach) 1108 return 0; 1109 1110 vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL); 1111 if (!vol->checkmap) 1112 return -ENOMEM; 1113 1114 return 0; 1115 } 1116 1117 void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) 1118 { 1119 bitmap_free(vol->checkmap); 1120 } 1121 1122 /** 1123 * ubi_write_fastmap - writes a fastmap. 1124 * @ubi: UBI device object 1125 * @new_fm: the to be written fastmap 1126 * 1127 * Returns 0 on success, < 0 indicates an internal error. 1128 */ 1129 static int ubi_write_fastmap(struct ubi_device *ubi, 1130 struct ubi_fastmap_layout *new_fm) 1131 { 1132 size_t fm_pos = 0; 1133 void *fm_raw; 1134 struct ubi_fm_sb *fmsb; 1135 struct ubi_fm_hdr *fmh; 1136 struct ubi_fm_scan_pool *fmpl, *fmpl_wl; 1137 struct ubi_fm_ec *fec; 1138 struct ubi_fm_volhdr *fvh; 1139 struct ubi_fm_eba *feba; 1140 struct ubi_wl_entry *wl_e; 1141 struct ubi_volume *vol; 1142 struct ubi_vid_io_buf *avbuf, *dvbuf; 1143 struct ubi_vid_hdr *avhdr, *dvhdr; 1144 struct ubi_work *ubi_wrk; 1145 struct rb_node *tmp_rb; 1146 int ret, i, j, free_peb_count, used_peb_count, vol_count; 1147 int scrub_peb_count, erase_peb_count; 1148 unsigned long *seen_pebs; 1149 1150 fm_raw = ubi->fm_buf; 1151 memset(ubi->fm_buf, 0, ubi->fm_size); 1152 1153 avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID); 1154 if (!avbuf) { 1155 ret = -ENOMEM; 1156 goto out; 1157 } 1158 1159 dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID); 1160 if (!dvbuf) { 1161 ret = -ENOMEM; 1162 goto out_free_avbuf; 1163 } 1164 1165 avhdr = ubi_get_vid_hdr(avbuf); 1166 dvhdr = ubi_get_vid_hdr(dvbuf); 1167 1168 seen_pebs = init_seen(ubi); 1169 if (IS_ERR(seen_pebs)) { 1170 ret = PTR_ERR(seen_pebs); 1171 goto out_free_dvbuf; 1172 } 1173 1174 spin_lock(&ubi->volumes_lock); 1175 spin_lock(&ubi->wl_lock); 1176 1177 fmsb = (struct ubi_fm_sb *)fm_raw; 1178 fm_pos += sizeof(*fmsb); 1179 ubi_assert(fm_pos <= ubi->fm_size); 1180 1181 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 1182 fm_pos += sizeof(*fmh); 1183 ubi_assert(fm_pos <= ubi->fm_size); 1184 1185 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); 1186 fmsb->version = UBI_FM_FMT_VERSION; 1187 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); 1188 /* the max sqnum will be filled in while *reading* the fastmap */ 1189 fmsb->sqnum = 0; 1190 1191 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); 1192 free_peb_count = 0; 1193 used_peb_count = 0; 1194 scrub_peb_count = 0; 1195 erase_peb_count = 0; 1196 vol_count = 0; 1197 1198 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1199 fm_pos += sizeof(*fmpl); 1200 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1201 fmpl->size = cpu_to_be16(ubi->fm_pool.size); 1202 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size); 1203 1204 for (i = 0; i < ubi->fm_pool.size; i++) { 1205 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); 1206 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs); 1207 } 1208 1209 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1210 fm_pos += sizeof(*fmpl_wl); 1211 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1212 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size); 1213 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); 1214 1215 for (i = 0; i < ubi->fm_wl_pool.size; i++) { 1216 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); 1217 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs); 1218 } 1219 1220 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) { 1221 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1222 1223 fec->pnum = cpu_to_be32(wl_e->pnum); 1224 set_seen(ubi, wl_e->pnum, seen_pebs); 1225 fec->ec = cpu_to_be32(wl_e->ec); 1226 1227 free_peb_count++; 1228 fm_pos += sizeof(*fec); 1229 ubi_assert(fm_pos <= ubi->fm_size); 1230 } 1231 fmh->free_peb_count = cpu_to_be32(free_peb_count); 1232 1233 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { 1234 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1235 1236 fec->pnum = cpu_to_be32(wl_e->pnum); 1237 set_seen(ubi, wl_e->pnum, seen_pebs); 1238 fec->ec = cpu_to_be32(wl_e->ec); 1239 1240 used_peb_count++; 1241 fm_pos += sizeof(*fec); 1242 ubi_assert(fm_pos <= ubi->fm_size); 1243 } 1244 1245 ubi_for_each_protected_peb(ubi, i, wl_e) { 1246 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1247 1248 fec->pnum = cpu_to_be32(wl_e->pnum); 1249 set_seen(ubi, wl_e->pnum, seen_pebs); 1250 fec->ec = cpu_to_be32(wl_e->ec); 1251 1252 used_peb_count++; 1253 fm_pos += sizeof(*fec); 1254 ubi_assert(fm_pos <= ubi->fm_size); 1255 } 1256 fmh->used_peb_count = cpu_to_be32(used_peb_count); 1257 1258 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) { 1259 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1260 1261 fec->pnum = cpu_to_be32(wl_e->pnum); 1262 set_seen(ubi, wl_e->pnum, seen_pebs); 1263 fec->ec = cpu_to_be32(wl_e->ec); 1264 1265 scrub_peb_count++; 1266 fm_pos += sizeof(*fec); 1267 ubi_assert(fm_pos <= ubi->fm_size); 1268 } 1269 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); 1270 1271 1272 list_for_each_entry(ubi_wrk, &ubi->works, list) { 1273 if (ubi_is_erase_work(ubi_wrk)) { 1274 wl_e = ubi_wrk->e; 1275 ubi_assert(wl_e); 1276 1277 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1278 1279 fec->pnum = cpu_to_be32(wl_e->pnum); 1280 set_seen(ubi, wl_e->pnum, seen_pebs); 1281 fec->ec = cpu_to_be32(wl_e->ec); 1282 1283 erase_peb_count++; 1284 fm_pos += sizeof(*fec); 1285 ubi_assert(fm_pos <= ubi->fm_size); 1286 } 1287 } 1288 fmh->erase_peb_count = cpu_to_be32(erase_peb_count); 1289 1290 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { 1291 vol = ubi->volumes[i]; 1292 1293 if (!vol) 1294 continue; 1295 1296 vol_count++; 1297 1298 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 1299 fm_pos += sizeof(*fvh); 1300 ubi_assert(fm_pos <= ubi->fm_size); 1301 1302 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); 1303 fvh->vol_id = cpu_to_be32(vol->vol_id); 1304 fvh->vol_type = vol->vol_type; 1305 fvh->used_ebs = cpu_to_be32(vol->used_ebs); 1306 fvh->data_pad = cpu_to_be32(vol->data_pad); 1307 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); 1308 1309 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || 1310 vol->vol_type == UBI_STATIC_VOLUME); 1311 1312 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 1313 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); 1314 ubi_assert(fm_pos <= ubi->fm_size); 1315 1316 for (j = 0; j < vol->reserved_pebs; j++) { 1317 struct ubi_eba_leb_desc ldesc; 1318 1319 ubi_eba_get_ldesc(vol, j, &ldesc); 1320 feba->pnum[j] = cpu_to_be32(ldesc.pnum); 1321 } 1322 1323 feba->reserved_pebs = cpu_to_be32(j); 1324 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); 1325 } 1326 fmh->vol_count = cpu_to_be32(vol_count); 1327 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); 1328 1329 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1330 avhdr->lnum = 0; 1331 1332 spin_unlock(&ubi->wl_lock); 1333 spin_unlock(&ubi->volumes_lock); 1334 1335 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); 1336 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf); 1337 if (ret) { 1338 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); 1339 goto out_free_seen; 1340 } 1341 1342 for (i = 0; i < new_fm->used_blocks; i++) { 1343 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); 1344 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs); 1345 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); 1346 } 1347 1348 fmsb->data_crc = 0; 1349 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, 1350 ubi->fm_size)); 1351 1352 for (i = 1; i < new_fm->used_blocks; i++) { 1353 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1354 dvhdr->lnum = cpu_to_be32(i); 1355 dbg_bld("writing fastmap data to PEB %i sqnum %llu", 1356 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); 1357 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf); 1358 if (ret) { 1359 ubi_err(ubi, "unable to write vid_hdr to PEB %i!", 1360 new_fm->e[i]->pnum); 1361 goto out_free_seen; 1362 } 1363 } 1364 1365 for (i = 0; i < new_fm->used_blocks; i++) { 1366 ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size), 1367 new_fm->e[i]->pnum, 0, ubi->leb_size); 1368 if (ret) { 1369 ubi_err(ubi, "unable to write fastmap to PEB %i!", 1370 new_fm->e[i]->pnum); 1371 goto out_free_seen; 1372 } 1373 } 1374 1375 ubi_assert(new_fm); 1376 ubi->fm = new_fm; 1377 1378 ret = self_check_seen(ubi, seen_pebs); 1379 dbg_bld("fastmap written!"); 1380 1381 out_free_seen: 1382 free_seen(seen_pebs); 1383 out_free_dvbuf: 1384 ubi_free_vid_buf(dvbuf); 1385 out_free_avbuf: 1386 ubi_free_vid_buf(avbuf); 1387 1388 out: 1389 return ret; 1390 } 1391 1392 /** 1393 * invalidate_fastmap - destroys a fastmap. 1394 * @ubi: UBI device object 1395 * 1396 * This function ensures that upon next UBI attach a full scan 1397 * is issued. We need this if UBI is about to write a new fastmap 1398 * but is unable to do so. In this case we have two options: 1399 * a) Make sure that the current fastmap will not be usued upon 1400 * attach time and contine or b) fall back to RO mode to have the 1401 * current fastmap in a valid state. 1402 * Returns 0 on success, < 0 indicates an internal error. 1403 */ 1404 static int invalidate_fastmap(struct ubi_device *ubi) 1405 { 1406 int ret; 1407 struct ubi_fastmap_layout *fm; 1408 struct ubi_wl_entry *e; 1409 struct ubi_vid_io_buf *vb = NULL; 1410 struct ubi_vid_hdr *vh; 1411 1412 if (!ubi->fm) 1413 return 0; 1414 1415 ubi->fm = NULL; 1416 1417 ret = -ENOMEM; 1418 fm = kzalloc(sizeof(*fm), GFP_NOFS); 1419 if (!fm) 1420 goto out; 1421 1422 vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID); 1423 if (!vb) 1424 goto out_free_fm; 1425 1426 vh = ubi_get_vid_hdr(vb); 1427 1428 ret = -ENOSPC; 1429 e = ubi_wl_get_fm_peb(ubi, 1); 1430 if (!e) 1431 goto out_free_fm; 1432 1433 /* 1434 * Create fake fastmap such that UBI will fall back 1435 * to scanning mode. 1436 */ 1437 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1438 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb); 1439 if (ret < 0) { 1440 ubi_wl_put_fm_peb(ubi, e, 0, 0); 1441 goto out_free_fm; 1442 } 1443 1444 fm->used_blocks = 1; 1445 fm->e[0] = e; 1446 1447 ubi->fm = fm; 1448 1449 out: 1450 ubi_free_vid_buf(vb); 1451 return ret; 1452 1453 out_free_fm: 1454 kfree(fm); 1455 goto out; 1456 } 1457 1458 /** 1459 * return_fm_pebs - returns all PEBs used by a fastmap back to the 1460 * WL sub-system. 1461 * @ubi: UBI device object 1462 * @fm: fastmap layout object 1463 */ 1464 static void return_fm_pebs(struct ubi_device *ubi, 1465 struct ubi_fastmap_layout *fm) 1466 { 1467 int i; 1468 1469 if (!fm) 1470 return; 1471 1472 for (i = 0; i < fm->used_blocks; i++) { 1473 if (fm->e[i]) { 1474 ubi_wl_put_fm_peb(ubi, fm->e[i], i, 1475 fm->to_be_tortured[i]); 1476 fm->e[i] = NULL; 1477 } 1478 } 1479 } 1480 1481 /** 1482 * ubi_update_fastmap - will be called by UBI if a volume changes or 1483 * a fastmap pool becomes full. 1484 * @ubi: UBI device object 1485 * 1486 * Returns 0 on success, < 0 indicates an internal error. 1487 */ 1488 int ubi_update_fastmap(struct ubi_device *ubi) 1489 { 1490 int ret, i, j; 1491 struct ubi_fastmap_layout *new_fm, *old_fm; 1492 struct ubi_wl_entry *tmp_e; 1493 1494 ubi_refill_pools_and_lock(ubi); 1495 1496 if (ubi->ro_mode || ubi->fm_disabled) { 1497 up_write(&ubi->fm_eba_sem); 1498 up_write(&ubi->work_sem); 1499 up_write(&ubi->fm_protect); 1500 return 0; 1501 } 1502 1503 new_fm = kzalloc(sizeof(*new_fm), GFP_NOFS); 1504 if (!new_fm) { 1505 up_write(&ubi->fm_eba_sem); 1506 up_write(&ubi->work_sem); 1507 up_write(&ubi->fm_protect); 1508 return -ENOMEM; 1509 } 1510 1511 new_fm->used_blocks = ubi->fm_size / ubi->leb_size; 1512 old_fm = ubi->fm; 1513 ubi->fm = NULL; 1514 1515 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { 1516 ubi_err(ubi, "fastmap too large"); 1517 ret = -ENOSPC; 1518 goto err; 1519 } 1520 1521 for (i = 1; i < new_fm->used_blocks; i++) { 1522 spin_lock(&ubi->wl_lock); 1523 tmp_e = ubi_wl_get_fm_peb(ubi, 0); 1524 spin_unlock(&ubi->wl_lock); 1525 1526 if (!tmp_e) { 1527 if (old_fm && old_fm->e[i]) { 1528 ret = ubi_sync_erase(ubi, old_fm->e[i], 0); 1529 if (ret < 0) { 1530 ubi_err(ubi, "could not erase old fastmap PEB"); 1531 1532 for (j = 1; j < i; j++) { 1533 ubi_wl_put_fm_peb(ubi, new_fm->e[j], 1534 j, 0); 1535 new_fm->e[j] = NULL; 1536 } 1537 goto err; 1538 } 1539 new_fm->e[i] = old_fm->e[i]; 1540 old_fm->e[i] = NULL; 1541 } else { 1542 ubi_err(ubi, "could not get any free erase block"); 1543 1544 for (j = 1; j < i; j++) { 1545 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); 1546 new_fm->e[j] = NULL; 1547 } 1548 1549 ret = -ENOSPC; 1550 goto err; 1551 } 1552 } else { 1553 new_fm->e[i] = tmp_e; 1554 1555 if (old_fm && old_fm->e[i]) { 1556 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1557 old_fm->to_be_tortured[i]); 1558 old_fm->e[i] = NULL; 1559 } 1560 } 1561 } 1562 1563 /* Old fastmap is larger than the new one */ 1564 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) { 1565 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) { 1566 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1567 old_fm->to_be_tortured[i]); 1568 old_fm->e[i] = NULL; 1569 } 1570 } 1571 1572 spin_lock(&ubi->wl_lock); 1573 tmp_e = ubi->fm_anchor; 1574 ubi->fm_anchor = NULL; 1575 spin_unlock(&ubi->wl_lock); 1576 1577 if (old_fm) { 1578 /* no fresh anchor PEB was found, reuse the old one */ 1579 if (!tmp_e) { 1580 ret = ubi_sync_erase(ubi, old_fm->e[0], 0); 1581 if (ret < 0) { 1582 ubi_err(ubi, "could not erase old anchor PEB"); 1583 1584 for (i = 1; i < new_fm->used_blocks; i++) { 1585 ubi_wl_put_fm_peb(ubi, new_fm->e[i], 1586 i, 0); 1587 new_fm->e[i] = NULL; 1588 } 1589 goto err; 1590 } 1591 new_fm->e[0] = old_fm->e[0]; 1592 old_fm->e[0] = NULL; 1593 } else { 1594 /* we've got a new anchor PEB, return the old one */ 1595 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, 1596 old_fm->to_be_tortured[0]); 1597 new_fm->e[0] = tmp_e; 1598 old_fm->e[0] = NULL; 1599 } 1600 } else { 1601 if (!tmp_e) { 1602 ubi_err(ubi, "could not find any anchor PEB"); 1603 1604 for (i = 1; i < new_fm->used_blocks; i++) { 1605 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); 1606 new_fm->e[i] = NULL; 1607 } 1608 1609 ret = -ENOSPC; 1610 goto err; 1611 } 1612 new_fm->e[0] = tmp_e; 1613 } 1614 1615 ret = ubi_write_fastmap(ubi, new_fm); 1616 1617 if (ret) 1618 goto err; 1619 1620 out_unlock: 1621 up_write(&ubi->fm_eba_sem); 1622 up_write(&ubi->work_sem); 1623 up_write(&ubi->fm_protect); 1624 kfree(old_fm); 1625 1626 ubi_ensure_anchor_pebs(ubi); 1627 1628 return ret; 1629 1630 err: 1631 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret); 1632 1633 ret = invalidate_fastmap(ubi); 1634 if (ret < 0) { 1635 ubi_err(ubi, "Unable to invalidate current fastmap!"); 1636 ubi_ro_mode(ubi); 1637 } else { 1638 return_fm_pebs(ubi, old_fm); 1639 return_fm_pebs(ubi, new_fm); 1640 ret = 0; 1641 } 1642 1643 kfree(new_fm); 1644 goto out_unlock; 1645 } 1646