1 /* 2 * This file is part of UBIFS. 3 * 4 * Copyright (C) 2006-2008 Nokia Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 51 17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * 19 * Authors: Artem Bityutskiy (Битюцкий Артём) 20 * Adrian Hunter 21 */ 22 23 /* 24 * This file is a part of UBIFS journal implementation and contains various 25 * functions which manipulate the log. The log is a fixed area on the flash 26 * which does not contain any data but refers to buds. The log is a part of the 27 * journal. 28 */ 29 30 #include "ubifs.h" 31 32 #ifdef CONFIG_UBIFS_FS_DEBUG 33 static int dbg_check_bud_bytes(struct ubifs_info *c); 34 #else 35 #define dbg_check_bud_bytes(c) 0 36 #endif 37 38 /** 39 * ubifs_search_bud - search bud LEB. 40 * @c: UBIFS file-system description object 41 * @lnum: logical eraseblock number to search 42 * 43 * This function searches bud LEB @lnum. Returns bud description object in case 44 * of success and %NULL if there is no bud with this LEB number. 45 */ 46 struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum) 47 { 48 struct rb_node *p; 49 struct ubifs_bud *bud; 50 51 spin_lock(&c->buds_lock); 52 p = c->buds.rb_node; 53 while (p) { 54 bud = rb_entry(p, struct ubifs_bud, rb); 55 if (lnum < bud->lnum) 56 p = p->rb_left; 57 else if (lnum > bud->lnum) 58 p = p->rb_right; 59 else { 60 spin_unlock(&c->buds_lock); 61 return bud; 62 } 63 } 64 spin_unlock(&c->buds_lock); 65 return NULL; 66 } 67 68 /** 69 * ubifs_get_wbuf - get the wbuf associated with a LEB, if there is one. 70 * @c: UBIFS file-system description object 71 * @lnum: logical eraseblock number to search 72 * 73 * This functions returns the wbuf for @lnum or %NULL if there is not one. 74 */ 75 struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum) 76 { 77 struct rb_node *p; 78 struct ubifs_bud *bud; 79 int jhead; 80 81 if (!c->jheads) 82 return NULL; 83 84 spin_lock(&c->buds_lock); 85 p = c->buds.rb_node; 86 while (p) { 87 bud = rb_entry(p, struct ubifs_bud, rb); 88 if (lnum < bud->lnum) 89 p = p->rb_left; 90 else if (lnum > bud->lnum) 91 p = p->rb_right; 92 else { 93 jhead = bud->jhead; 94 spin_unlock(&c->buds_lock); 95 return &c->jheads[jhead].wbuf; 96 } 97 } 98 spin_unlock(&c->buds_lock); 99 return NULL; 100 } 101 102 /** 103 * empty_log_bytes - calculate amount of empty space in the log. 104 * @c: UBIFS file-system description object 105 */ 106 static inline long long empty_log_bytes(const struct ubifs_info *c) 107 { 108 long long h, t; 109 110 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs; 111 t = (long long)c->ltail_lnum * c->leb_size; 112 113 if (h >= t) 114 return c->log_bytes - h + t; 115 else 116 return t - h; 117 } 118 119 /** 120 * ubifs_add_bud - add bud LEB to the tree of buds and its journal head list. 121 * @c: UBIFS file-system description object 122 * @bud: the bud to add 123 */ 124 void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud) 125 { 126 struct rb_node **p, *parent = NULL; 127 struct ubifs_bud *b; 128 struct ubifs_jhead *jhead; 129 130 spin_lock(&c->buds_lock); 131 p = &c->buds.rb_node; 132 while (*p) { 133 parent = *p; 134 b = rb_entry(parent, struct ubifs_bud, rb); 135 ubifs_assert(bud->lnum != b->lnum); 136 if (bud->lnum < b->lnum) 137 p = &(*p)->rb_left; 138 else 139 p = &(*p)->rb_right; 140 } 141 142 rb_link_node(&bud->rb, parent, p); 143 rb_insert_color(&bud->rb, &c->buds); 144 if (c->jheads) { 145 jhead = &c->jheads[bud->jhead]; 146 list_add_tail(&bud->list, &jhead->buds_list); 147 } else 148 ubifs_assert(c->replaying && c->ro_mount); 149 150 /* 151 * Note, although this is a new bud, we anyway account this space now, 152 * before any data has been written to it, because this is about to 153 * guarantee fixed mount time, and this bud will anyway be read and 154 * scanned. 155 */ 156 c->bud_bytes += c->leb_size - bud->start; 157 158 dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum, 159 bud->start, dbg_jhead(bud->jhead), c->bud_bytes); 160 spin_unlock(&c->buds_lock); 161 } 162 163 /** 164 * ubifs_add_bud_to_log - add a new bud to the log. 165 * @c: UBIFS file-system description object 166 * @jhead: journal head the bud belongs to 167 * @lnum: LEB number of the bud 168 * @offs: starting offset of the bud 169 * 170 * This function writes reference node for the new bud LEB @lnum it to the log, 171 * and adds it to the buds tress. It also makes sure that log size does not 172 * exceed the 'c->max_bud_bytes' limit. Returns zero in case of success, 173 * %-EAGAIN if commit is required, and a negative error codes in case of 174 * failure. 175 */ 176 int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) 177 { 178 int err; 179 struct ubifs_bud *bud; 180 struct ubifs_ref_node *ref; 181 182 bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS); 183 if (!bud) 184 return -ENOMEM; 185 ref = kzalloc(c->ref_node_alsz, GFP_NOFS); 186 if (!ref) { 187 kfree(bud); 188 return -ENOMEM; 189 } 190 191 mutex_lock(&c->log_mutex); 192 ubifs_assert(!c->ro_media && !c->ro_mount); 193 if (c->ro_error) { 194 err = -EROFS; 195 goto out_unlock; 196 } 197 198 /* Make sure we have enough space in the log */ 199 if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) { 200 dbg_log("not enough log space - %lld, required %d", 201 empty_log_bytes(c), c->min_log_bytes); 202 ubifs_commit_required(c); 203 err = -EAGAIN; 204 goto out_unlock; 205 } 206 207 /* 208 * Make sure the amount of space in buds will not exceed the 209 * 'c->max_bud_bytes' limit, because we want to guarantee mount time 210 * limits. 211 * 212 * It is not necessary to hold @c->buds_lock when reading @c->bud_bytes 213 * because we are holding @c->log_mutex. All @c->bud_bytes take place 214 * when both @c->log_mutex and @c->bud_bytes are locked. 215 */ 216 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) { 217 dbg_log("bud bytes %lld (%lld max), require commit", 218 c->bud_bytes, c->max_bud_bytes); 219 ubifs_commit_required(c); 220 err = -EAGAIN; 221 goto out_unlock; 222 } 223 224 /* 225 * If the journal is full enough - start background commit. Note, it is 226 * OK to read 'c->cmt_state' without spinlock because integer reads 227 * are atomic in the kernel. 228 */ 229 if (c->bud_bytes >= c->bg_bud_bytes && 230 c->cmt_state == COMMIT_RESTING) { 231 dbg_log("bud bytes %lld (%lld max), initiate BG commit", 232 c->bud_bytes, c->max_bud_bytes); 233 ubifs_request_bg_commit(c); 234 } 235 236 bud->lnum = lnum; 237 bud->start = offs; 238 bud->jhead = jhead; 239 240 ref->ch.node_type = UBIFS_REF_NODE; 241 ref->lnum = cpu_to_le32(bud->lnum); 242 ref->offs = cpu_to_le32(bud->start); 243 ref->jhead = cpu_to_le32(jhead); 244 245 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { 246 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 247 c->lhead_offs = 0; 248 } 249 250 if (c->lhead_offs == 0) { 251 /* Must ensure next log LEB has been unmapped */ 252 err = ubifs_leb_unmap(c, c->lhead_lnum); 253 if (err) 254 goto out_unlock; 255 } 256 257 if (bud->start == 0) { 258 /* 259 * Before writing the LEB reference which refers an empty LEB 260 * to the log, we have to make sure it is mapped, because 261 * otherwise we'd risk to refer an LEB with garbage in case of 262 * an unclean reboot, because the target LEB might have been 263 * unmapped, but not yet physically erased. 264 */ 265 err = ubifs_leb_map(c, bud->lnum, UBI_SHORTTERM); 266 if (err) 267 goto out_unlock; 268 } 269 270 dbg_log("write ref LEB %d:%d", 271 c->lhead_lnum, c->lhead_offs); 272 err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum, 273 c->lhead_offs, UBI_SHORTTERM); 274 if (err) 275 goto out_unlock; 276 277 c->lhead_offs += c->ref_node_alsz; 278 279 ubifs_add_bud(c, bud); 280 281 mutex_unlock(&c->log_mutex); 282 kfree(ref); 283 return 0; 284 285 out_unlock: 286 mutex_unlock(&c->log_mutex); 287 kfree(ref); 288 kfree(bud); 289 return err; 290 } 291 292 /** 293 * remove_buds - remove used buds. 294 * @c: UBIFS file-system description object 295 * 296 * This function removes use buds from the buds tree. It does not remove the 297 * buds which are pointed to by journal heads. 298 */ 299 static void remove_buds(struct ubifs_info *c) 300 { 301 struct rb_node *p; 302 303 ubifs_assert(list_empty(&c->old_buds)); 304 c->cmt_bud_bytes = 0; 305 spin_lock(&c->buds_lock); 306 p = rb_first(&c->buds); 307 while (p) { 308 struct rb_node *p1 = p; 309 struct ubifs_bud *bud; 310 struct ubifs_wbuf *wbuf; 311 312 p = rb_next(p); 313 bud = rb_entry(p1, struct ubifs_bud, rb); 314 wbuf = &c->jheads[bud->jhead].wbuf; 315 316 if (wbuf->lnum == bud->lnum) { 317 /* 318 * Do not remove buds which are pointed to by journal 319 * heads (non-closed buds). 320 */ 321 c->cmt_bud_bytes += wbuf->offs - bud->start; 322 dbg_log("preserve %d:%d, jhead %s, bud bytes %d, " 323 "cmt_bud_bytes %lld", bud->lnum, bud->start, 324 dbg_jhead(bud->jhead), wbuf->offs - bud->start, 325 c->cmt_bud_bytes); 326 bud->start = wbuf->offs; 327 } else { 328 c->cmt_bud_bytes += c->leb_size - bud->start; 329 dbg_log("remove %d:%d, jhead %s, bud bytes %d, " 330 "cmt_bud_bytes %lld", bud->lnum, bud->start, 331 dbg_jhead(bud->jhead), c->leb_size - bud->start, 332 c->cmt_bud_bytes); 333 rb_erase(p1, &c->buds); 334 /* 335 * If the commit does not finish, the recovery will need 336 * to replay the journal, in which case the old buds 337 * must be unchanged. Do not release them until post 338 * commit i.e. do not allow them to be garbage 339 * collected. 340 */ 341 list_move(&bud->list, &c->old_buds); 342 } 343 } 344 spin_unlock(&c->buds_lock); 345 } 346 347 /** 348 * ubifs_log_start_commit - start commit. 349 * @c: UBIFS file-system description object 350 * @ltail_lnum: return new log tail LEB number 351 * 352 * The commit operation starts with writing "commit start" node to the log and 353 * reference nodes for all journal heads which will define new journal after 354 * the commit has been finished. The commit start and reference nodes are 355 * written in one go to the nearest empty log LEB (hence, when commit is 356 * finished UBIFS may safely unmap all the previous log LEBs). This function 357 * returns zero in case of success and a negative error code in case of 358 * failure. 359 */ 360 int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) 361 { 362 void *buf; 363 struct ubifs_cs_node *cs; 364 struct ubifs_ref_node *ref; 365 int err, i, max_len, len; 366 367 err = dbg_check_bud_bytes(c); 368 if (err) 369 return err; 370 371 max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ; 372 max_len = ALIGN(max_len, c->min_io_size); 373 buf = cs = kmalloc(max_len, GFP_NOFS); 374 if (!buf) 375 return -ENOMEM; 376 377 cs->ch.node_type = UBIFS_CS_NODE; 378 cs->cmt_no = cpu_to_le64(c->cmt_no); 379 ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0); 380 381 /* 382 * Note, we do not lock 'c->log_mutex' because this is the commit start 383 * phase and we are exclusively using the log. And we do not lock 384 * write-buffer because nobody can write to the file-system at this 385 * phase. 386 */ 387 388 len = UBIFS_CS_NODE_SZ; 389 for (i = 0; i < c->jhead_cnt; i++) { 390 int lnum = c->jheads[i].wbuf.lnum; 391 int offs = c->jheads[i].wbuf.offs; 392 393 if (lnum == -1 || offs == c->leb_size) 394 continue; 395 396 dbg_log("add ref to LEB %d:%d for jhead %s", 397 lnum, offs, dbg_jhead(i)); 398 ref = buf + len; 399 ref->ch.node_type = UBIFS_REF_NODE; 400 ref->lnum = cpu_to_le32(lnum); 401 ref->offs = cpu_to_le32(offs); 402 ref->jhead = cpu_to_le32(i); 403 404 ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0); 405 len += UBIFS_REF_NODE_SZ; 406 } 407 408 ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len); 409 410 /* Switch to the next log LEB */ 411 if (c->lhead_offs) { 412 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 413 c->lhead_offs = 0; 414 } 415 416 if (c->lhead_offs == 0) { 417 /* Must ensure next LEB has been unmapped */ 418 err = ubifs_leb_unmap(c, c->lhead_lnum); 419 if (err) 420 goto out; 421 } 422 423 len = ALIGN(len, c->min_io_size); 424 dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len); 425 err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len, UBI_SHORTTERM); 426 if (err) 427 goto out; 428 429 *ltail_lnum = c->lhead_lnum; 430 431 c->lhead_offs += len; 432 if (c->lhead_offs == c->leb_size) { 433 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 434 c->lhead_offs = 0; 435 } 436 437 remove_buds(c); 438 439 /* 440 * We have started the commit and now users may use the rest of the log 441 * for new writes. 442 */ 443 c->min_log_bytes = 0; 444 445 out: 446 kfree(buf); 447 return err; 448 } 449 450 /** 451 * ubifs_log_end_commit - end commit. 452 * @c: UBIFS file-system description object 453 * @ltail_lnum: new log tail LEB number 454 * 455 * This function is called on when the commit operation was finished. It 456 * moves log tail to new position and unmaps LEBs which contain obsolete data. 457 * Returns zero in case of success and a negative error code in case of 458 * failure. 459 */ 460 int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum) 461 { 462 int err; 463 464 /* 465 * At this phase we have to lock 'c->log_mutex' because UBIFS allows FS 466 * writes during commit. Its only short "commit" start phase when 467 * writers are blocked. 468 */ 469 mutex_lock(&c->log_mutex); 470 471 dbg_log("old tail was LEB %d:0, new tail is LEB %d:0", 472 c->ltail_lnum, ltail_lnum); 473 474 c->ltail_lnum = ltail_lnum; 475 /* 476 * The commit is finished and from now on it must be guaranteed that 477 * there is always enough space for the next commit. 478 */ 479 c->min_log_bytes = c->leb_size; 480 481 spin_lock(&c->buds_lock); 482 c->bud_bytes -= c->cmt_bud_bytes; 483 spin_unlock(&c->buds_lock); 484 485 err = dbg_check_bud_bytes(c); 486 487 mutex_unlock(&c->log_mutex); 488 return err; 489 } 490 491 /** 492 * ubifs_log_post_commit - things to do after commit is completed. 493 * @c: UBIFS file-system description object 494 * @old_ltail_lnum: old log tail LEB number 495 * 496 * Release buds only after commit is completed, because they must be unchanged 497 * if recovery is needed. 498 * 499 * Unmap log LEBs only after commit is completed, because they may be needed for 500 * recovery. 501 * 502 * This function returns %0 on success and a negative error code on failure. 503 */ 504 int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum) 505 { 506 int lnum, err = 0; 507 508 while (!list_empty(&c->old_buds)) { 509 struct ubifs_bud *bud; 510 511 bud = list_entry(c->old_buds.next, struct ubifs_bud, list); 512 err = ubifs_return_leb(c, bud->lnum); 513 if (err) 514 return err; 515 list_del(&bud->list); 516 kfree(bud); 517 } 518 mutex_lock(&c->log_mutex); 519 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum; 520 lnum = ubifs_next_log_lnum(c, lnum)) { 521 dbg_log("unmap log LEB %d", lnum); 522 err = ubifs_leb_unmap(c, lnum); 523 if (err) 524 goto out; 525 } 526 out: 527 mutex_unlock(&c->log_mutex); 528 return err; 529 } 530 531 /** 532 * struct done_ref - references that have been done. 533 * @rb: rb-tree node 534 * @lnum: LEB number 535 */ 536 struct done_ref { 537 struct rb_node rb; 538 int lnum; 539 }; 540 541 /** 542 * done_already - determine if a reference has been done already. 543 * @done_tree: rb-tree to store references that have been done 544 * @lnum: LEB number of reference 545 * 546 * This function returns %1 if the reference has been done, %0 if not, otherwise 547 * a negative error code is returned. 548 */ 549 static int done_already(struct rb_root *done_tree, int lnum) 550 { 551 struct rb_node **p = &done_tree->rb_node, *parent = NULL; 552 struct done_ref *dr; 553 554 while (*p) { 555 parent = *p; 556 dr = rb_entry(parent, struct done_ref, rb); 557 if (lnum < dr->lnum) 558 p = &(*p)->rb_left; 559 else if (lnum > dr->lnum) 560 p = &(*p)->rb_right; 561 else 562 return 1; 563 } 564 565 dr = kzalloc(sizeof(struct done_ref), GFP_NOFS); 566 if (!dr) 567 return -ENOMEM; 568 569 dr->lnum = lnum; 570 571 rb_link_node(&dr->rb, parent, p); 572 rb_insert_color(&dr->rb, done_tree); 573 574 return 0; 575 } 576 577 /** 578 * destroy_done_tree - destroy the done tree. 579 * @done_tree: done tree to destroy 580 */ 581 static void destroy_done_tree(struct rb_root *done_tree) 582 { 583 struct rb_node *this = done_tree->rb_node; 584 struct done_ref *dr; 585 586 while (this) { 587 if (this->rb_left) { 588 this = this->rb_left; 589 continue; 590 } else if (this->rb_right) { 591 this = this->rb_right; 592 continue; 593 } 594 dr = rb_entry(this, struct done_ref, rb); 595 this = rb_parent(this); 596 if (this) { 597 if (this->rb_left == &dr->rb) 598 this->rb_left = NULL; 599 else 600 this->rb_right = NULL; 601 } 602 kfree(dr); 603 } 604 } 605 606 /** 607 * add_node - add a node to the consolidated log. 608 * @c: UBIFS file-system description object 609 * @buf: buffer to which to add 610 * @lnum: LEB number to which to write is passed and returned here 611 * @offs: offset to where to write is passed and returned here 612 * @node: node to add 613 * 614 * This function returns %0 on success and a negative error code on failure. 615 */ 616 static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs, 617 void *node) 618 { 619 struct ubifs_ch *ch = node; 620 int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs; 621 622 if (len > remains) { 623 int sz = ALIGN(*offs, c->min_io_size), err; 624 625 ubifs_pad(c, buf + *offs, sz - *offs); 626 err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM); 627 if (err) 628 return err; 629 *lnum = ubifs_next_log_lnum(c, *lnum); 630 *offs = 0; 631 } 632 memcpy(buf + *offs, node, len); 633 *offs += ALIGN(len, 8); 634 return 0; 635 } 636 637 /** 638 * ubifs_consolidate_log - consolidate the log. 639 * @c: UBIFS file-system description object 640 * 641 * Repeated failed commits could cause the log to be full, but at least 1 LEB is 642 * needed for commit. This function rewrites the reference nodes in the log 643 * omitting duplicates, and failed CS nodes, and leaving no gaps. 644 * 645 * This function returns %0 on success and a negative error code on failure. 646 */ 647 int ubifs_consolidate_log(struct ubifs_info *c) 648 { 649 struct ubifs_scan_leb *sleb; 650 struct ubifs_scan_node *snod; 651 struct rb_root done_tree = RB_ROOT; 652 int lnum, err, first = 1, write_lnum, offs = 0; 653 void *buf; 654 655 dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum, 656 c->lhead_lnum); 657 buf = vmalloc(c->leb_size); 658 if (!buf) 659 return -ENOMEM; 660 lnum = c->ltail_lnum; 661 write_lnum = lnum; 662 while (1) { 663 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0); 664 if (IS_ERR(sleb)) { 665 err = PTR_ERR(sleb); 666 goto out_free; 667 } 668 list_for_each_entry(snod, &sleb->nodes, list) { 669 switch (snod->type) { 670 case UBIFS_REF_NODE: { 671 struct ubifs_ref_node *ref = snod->node; 672 int ref_lnum = le32_to_cpu(ref->lnum); 673 674 err = done_already(&done_tree, ref_lnum); 675 if (err < 0) 676 goto out_scan; 677 if (err != 1) { 678 err = add_node(c, buf, &write_lnum, 679 &offs, snod->node); 680 if (err) 681 goto out_scan; 682 } 683 break; 684 } 685 case UBIFS_CS_NODE: 686 if (!first) 687 break; 688 err = add_node(c, buf, &write_lnum, &offs, 689 snod->node); 690 if (err) 691 goto out_scan; 692 first = 0; 693 break; 694 } 695 } 696 ubifs_scan_destroy(sleb); 697 if (lnum == c->lhead_lnum) 698 break; 699 lnum = ubifs_next_log_lnum(c, lnum); 700 } 701 if (offs) { 702 int sz = ALIGN(offs, c->min_io_size); 703 704 ubifs_pad(c, buf + offs, sz - offs); 705 err = ubifs_leb_change(c, write_lnum, buf, sz, UBI_SHORTTERM); 706 if (err) 707 goto out_free; 708 offs = ALIGN(offs, c->min_io_size); 709 } 710 destroy_done_tree(&done_tree); 711 vfree(buf); 712 if (write_lnum == c->lhead_lnum) { 713 ubifs_err("log is too full"); 714 return -EINVAL; 715 } 716 /* Unmap remaining LEBs */ 717 lnum = write_lnum; 718 do { 719 lnum = ubifs_next_log_lnum(c, lnum); 720 err = ubifs_leb_unmap(c, lnum); 721 if (err) 722 return err; 723 } while (lnum != c->lhead_lnum); 724 c->lhead_lnum = write_lnum; 725 c->lhead_offs = offs; 726 dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs); 727 return 0; 728 729 out_scan: 730 ubifs_scan_destroy(sleb); 731 out_free: 732 destroy_done_tree(&done_tree); 733 vfree(buf); 734 return err; 735 } 736 737 #ifdef CONFIG_UBIFS_FS_DEBUG 738 739 /** 740 * dbg_check_bud_bytes - make sure bud bytes calculation are all right. 741 * @c: UBIFS file-system description object 742 * 743 * This function makes sure the amount of flash space used by closed buds 744 * ('c->bud_bytes' is correct). Returns zero in case of success and %-EINVAL in 745 * case of failure. 746 */ 747 static int dbg_check_bud_bytes(struct ubifs_info *c) 748 { 749 int i, err = 0; 750 struct ubifs_bud *bud; 751 long long bud_bytes = 0; 752 753 if (!dbg_is_chk_gen(c)) 754 return 0; 755 756 spin_lock(&c->buds_lock); 757 for (i = 0; i < c->jhead_cnt; i++) 758 list_for_each_entry(bud, &c->jheads[i].buds_list, list) 759 bud_bytes += c->leb_size - bud->start; 760 761 if (c->bud_bytes != bud_bytes) { 762 ubifs_err("bad bud_bytes %lld, calculated %lld", 763 c->bud_bytes, bud_bytes); 764 err = -EINVAL; 765 } 766 spin_unlock(&c->buds_lock); 767 768 return err; 769 } 770 771 #endif /* CONFIG_UBIFS_FS_DEBUG */ 772