1 /* 2 * This file is part of UBIFS. 3 * 4 * Copyright (C) 2006-2008 Nokia Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 51 17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * 19 * Authors: Adrian Hunter 20 * Artem Bityutskiy (Битюцкий Артём) 21 */ 22 23 /* 24 * This file implements functions that manage the running of the commit process. 25 * Each affected module has its own functions to accomplish their part in the 26 * commit and those functions are called here. 27 * 28 * The commit is the process whereby all updates to the index and LEB properties 29 * are written out together and the journal becomes empty. This keeps the 30 * file system consistent - at all times the state can be recreated by reading 31 * the index and LEB properties and then replaying the journal. 32 * 33 * The commit is split into two parts named "commit start" and "commit end". 34 * During commit start, the commit process has exclusive access to the journal 35 * by holding the commit semaphore down for writing. As few I/O operations as 36 * possible are performed during commit start, instead the nodes that are to be 37 * written are merely identified. During commit end, the commit semaphore is no 38 * longer held and the journal is again in operation, allowing users to continue 39 * to use the file system while the bulk of the commit I/O is performed. The 40 * purpose of this two-step approach is to prevent the commit from causing any 41 * latency blips. Note that in any case, the commit does not prevent lookups 42 * (as permitted by the TNC mutex), or access to VFS data structures e.g. page 43 * cache. 44 */ 45 46 #include <linux/freezer.h> 47 #include <linux/kthread.h> 48 #include <linux/slab.h> 49 #include "ubifs.h" 50 51 /* 52 * nothing_to_commit - check if there is nothing to commit. 53 * @c: UBIFS file-system description object 54 * 55 * This is a helper function which checks if there is anything to commit. It is 56 * used as an optimization to avoid starting the commit if it is not really 57 * necessary. Indeed, the commit operation always assumes flash I/O (e.g., 58 * writing the commit start node to the log), and it is better to avoid doing 59 * this unnecessarily. E.g., 'ubifs_sync_fs()' runs the commit, but if there is 60 * nothing to commit, it is more optimal to avoid any flash I/O. 61 * 62 * This function has to be called with @c->commit_sem locked for writing - 63 * this function does not take LPT/TNC locks because the @c->commit_sem 64 * guarantees that we have exclusive access to the TNC and LPT data structures. 65 * 66 * This function returns %1 if there is nothing to commit and %0 otherwise. 67 */ 68 static int nothing_to_commit(struct ubifs_info *c) 69 { 70 /* 71 * During mounting or remounting from R/O mode to R/W mode we may 72 * commit for various recovery-related reasons. 73 */ 74 if (c->mounting || c->remounting_rw) 75 return 0; 76 77 /* 78 * If the root TNC node is dirty, we definitely have something to 79 * commit. 80 */ 81 if (c->zroot.znode && ubifs_zn_dirty(c->zroot.znode)) 82 return 0; 83 84 /* 85 * Even though the TNC is clean, the LPT tree may have dirty nodes. For 86 * example, this may happen if the budgeting subsystem invoked GC to 87 * make some free space, and the GC found an LEB with only dirty and 88 * free space. In this case GC would just change the lprops of this 89 * LEB (by turning all space into free space) and unmap it. 90 */ 91 if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags)) 92 return 0; 93 94 ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0); 95 ubifs_assert(c->dirty_pn_cnt == 0); 96 ubifs_assert(c->dirty_nn_cnt == 0); 97 98 return 1; 99 } 100 101 /** 102 * do_commit - commit the journal. 103 * @c: UBIFS file-system description object 104 * 105 * This function implements UBIFS commit. It has to be called with commit lock 106 * locked. Returns zero in case of success and a negative error code in case of 107 * failure. 108 */ 109 static int do_commit(struct ubifs_info *c) 110 { 111 int err, new_ltail_lnum, old_ltail_lnum, i; 112 struct ubifs_zbranch zroot; 113 struct ubifs_lp_stats lst; 114 115 dbg_cmt("start"); 116 ubifs_assert(!c->ro_media && !c->ro_mount); 117 118 if (c->ro_error) { 119 err = -EROFS; 120 goto out_up; 121 } 122 123 if (nothing_to_commit(c)) { 124 up_write(&c->commit_sem); 125 err = 0; 126 goto out_cancel; 127 } 128 129 /* Sync all write buffers (necessary for recovery) */ 130 for (i = 0; i < c->jhead_cnt; i++) { 131 err = ubifs_wbuf_sync(&c->jheads[i].wbuf); 132 if (err) 133 goto out_up; 134 } 135 136 c->cmt_no += 1; 137 err = ubifs_gc_start_commit(c); 138 if (err) 139 goto out_up; 140 err = dbg_check_lprops(c); 141 if (err) 142 goto out_up; 143 err = ubifs_log_start_commit(c, &new_ltail_lnum); 144 if (err) 145 goto out_up; 146 err = ubifs_tnc_start_commit(c, &zroot); 147 if (err) 148 goto out_up; 149 err = ubifs_lpt_start_commit(c); 150 if (err) 151 goto out_up; 152 err = ubifs_orphan_start_commit(c); 153 if (err) 154 goto out_up; 155 156 ubifs_get_lp_stats(c, &lst); 157 158 up_write(&c->commit_sem); 159 160 err = ubifs_tnc_end_commit(c); 161 if (err) 162 goto out; 163 err = ubifs_lpt_end_commit(c); 164 if (err) 165 goto out; 166 err = ubifs_orphan_end_commit(c); 167 if (err) 168 goto out; 169 old_ltail_lnum = c->ltail_lnum; 170 err = ubifs_log_end_commit(c, new_ltail_lnum); 171 if (err) 172 goto out; 173 err = dbg_check_old_index(c, &zroot); 174 if (err) 175 goto out; 176 177 c->mst_node->cmt_no = cpu_to_le64(c->cmt_no); 178 c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum); 179 c->mst_node->root_lnum = cpu_to_le32(zroot.lnum); 180 c->mst_node->root_offs = cpu_to_le32(zroot.offs); 181 c->mst_node->root_len = cpu_to_le32(zroot.len); 182 c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum); 183 c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs); 184 c->mst_node->index_size = cpu_to_le64(c->bi.old_idx_sz); 185 c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum); 186 c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs); 187 c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum); 188 c->mst_node->nhead_offs = cpu_to_le32(c->nhead_offs); 189 c->mst_node->ltab_lnum = cpu_to_le32(c->ltab_lnum); 190 c->mst_node->ltab_offs = cpu_to_le32(c->ltab_offs); 191 c->mst_node->lsave_lnum = cpu_to_le32(c->lsave_lnum); 192 c->mst_node->lsave_offs = cpu_to_le32(c->lsave_offs); 193 c->mst_node->lscan_lnum = cpu_to_le32(c->lscan_lnum); 194 c->mst_node->empty_lebs = cpu_to_le32(lst.empty_lebs); 195 c->mst_node->idx_lebs = cpu_to_le32(lst.idx_lebs); 196 c->mst_node->total_free = cpu_to_le64(lst.total_free); 197 c->mst_node->total_dirty = cpu_to_le64(lst.total_dirty); 198 c->mst_node->total_used = cpu_to_le64(lst.total_used); 199 c->mst_node->total_dead = cpu_to_le64(lst.total_dead); 200 c->mst_node->total_dark = cpu_to_le64(lst.total_dark); 201 if (c->no_orphs) 202 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); 203 else 204 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS); 205 err = ubifs_write_master(c); 206 if (err) 207 goto out; 208 209 err = ubifs_log_post_commit(c, old_ltail_lnum); 210 if (err) 211 goto out; 212 err = ubifs_gc_end_commit(c); 213 if (err) 214 goto out; 215 err = ubifs_lpt_post_commit(c); 216 if (err) 217 goto out; 218 219 out_cancel: 220 spin_lock(&c->cs_lock); 221 c->cmt_state = COMMIT_RESTING; 222 wake_up(&c->cmt_wq); 223 dbg_cmt("commit end"); 224 spin_unlock(&c->cs_lock); 225 return 0; 226 227 out_up: 228 up_write(&c->commit_sem); 229 out: 230 ubifs_err("commit failed, error %d", err); 231 spin_lock(&c->cs_lock); 232 c->cmt_state = COMMIT_BROKEN; 233 wake_up(&c->cmt_wq); 234 spin_unlock(&c->cs_lock); 235 ubifs_ro_mode(c, err); 236 return err; 237 } 238 239 /** 240 * run_bg_commit - run background commit if it is needed. 241 * @c: UBIFS file-system description object 242 * 243 * This function runs background commit if it is needed. Returns zero in case 244 * of success and a negative error code in case of failure. 245 */ 246 static int run_bg_commit(struct ubifs_info *c) 247 { 248 spin_lock(&c->cs_lock); 249 /* 250 * Run background commit only if background commit was requested or if 251 * commit is required. 252 */ 253 if (c->cmt_state != COMMIT_BACKGROUND && 254 c->cmt_state != COMMIT_REQUIRED) 255 goto out; 256 spin_unlock(&c->cs_lock); 257 258 down_write(&c->commit_sem); 259 spin_lock(&c->cs_lock); 260 if (c->cmt_state == COMMIT_REQUIRED) 261 c->cmt_state = COMMIT_RUNNING_REQUIRED; 262 else if (c->cmt_state == COMMIT_BACKGROUND) 263 c->cmt_state = COMMIT_RUNNING_BACKGROUND; 264 else 265 goto out_cmt_unlock; 266 spin_unlock(&c->cs_lock); 267 268 return do_commit(c); 269 270 out_cmt_unlock: 271 up_write(&c->commit_sem); 272 out: 273 spin_unlock(&c->cs_lock); 274 return 0; 275 } 276 277 /** 278 * ubifs_bg_thread - UBIFS background thread function. 279 * @info: points to the file-system description object 280 * 281 * This function implements various file-system background activities: 282 * o when a write-buffer timer expires it synchronizes the appropriate 283 * write-buffer; 284 * o when the journal is about to be full, it starts in-advance commit. 285 * 286 * Note, other stuff like background garbage collection may be added here in 287 * future. 288 */ 289 int ubifs_bg_thread(void *info) 290 { 291 int err; 292 struct ubifs_info *c = info; 293 294 ubifs_msg("background thread \"%s\" started, PID %d", 295 c->bgt_name, current->pid); 296 set_freezable(); 297 298 while (1) { 299 if (kthread_should_stop()) 300 break; 301 302 if (try_to_freeze()) 303 continue; 304 305 set_current_state(TASK_INTERRUPTIBLE); 306 /* Check if there is something to do */ 307 if (!c->need_bgt) { 308 /* 309 * Nothing prevents us from going sleep now and 310 * be never woken up and block the task which 311 * could wait in 'kthread_stop()' forever. 312 */ 313 if (kthread_should_stop()) 314 break; 315 schedule(); 316 continue; 317 } else 318 __set_current_state(TASK_RUNNING); 319 320 c->need_bgt = 0; 321 err = ubifs_bg_wbufs_sync(c); 322 if (err) 323 ubifs_ro_mode(c, err); 324 325 run_bg_commit(c); 326 cond_resched(); 327 } 328 329 ubifs_msg("background thread \"%s\" stops", c->bgt_name); 330 return 0; 331 } 332 333 /** 334 * ubifs_commit_required - set commit state to "required". 335 * @c: UBIFS file-system description object 336 * 337 * This function is called if a commit is required but cannot be done from the 338 * calling function, so it is just flagged instead. 339 */ 340 void ubifs_commit_required(struct ubifs_info *c) 341 { 342 spin_lock(&c->cs_lock); 343 switch (c->cmt_state) { 344 case COMMIT_RESTING: 345 case COMMIT_BACKGROUND: 346 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state), 347 dbg_cstate(COMMIT_REQUIRED)); 348 c->cmt_state = COMMIT_REQUIRED; 349 break; 350 case COMMIT_RUNNING_BACKGROUND: 351 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state), 352 dbg_cstate(COMMIT_RUNNING_REQUIRED)); 353 c->cmt_state = COMMIT_RUNNING_REQUIRED; 354 break; 355 case COMMIT_REQUIRED: 356 case COMMIT_RUNNING_REQUIRED: 357 case COMMIT_BROKEN: 358 break; 359 } 360 spin_unlock(&c->cs_lock); 361 } 362 363 /** 364 * ubifs_request_bg_commit - notify the background thread to do a commit. 365 * @c: UBIFS file-system description object 366 * 367 * This function is called if the journal is full enough to make a commit 368 * worthwhile, so background thread is kicked to start it. 369 */ 370 void ubifs_request_bg_commit(struct ubifs_info *c) 371 { 372 spin_lock(&c->cs_lock); 373 if (c->cmt_state == COMMIT_RESTING) { 374 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state), 375 dbg_cstate(COMMIT_BACKGROUND)); 376 c->cmt_state = COMMIT_BACKGROUND; 377 spin_unlock(&c->cs_lock); 378 ubifs_wake_up_bgt(c); 379 } else 380 spin_unlock(&c->cs_lock); 381 } 382 383 /** 384 * wait_for_commit - wait for commit. 385 * @c: UBIFS file-system description object 386 * 387 * This function sleeps until the commit operation is no longer running. 388 */ 389 static int wait_for_commit(struct ubifs_info *c) 390 { 391 dbg_cmt("pid %d goes sleep", current->pid); 392 393 /* 394 * The following sleeps if the condition is false, and will be woken 395 * when the commit ends. It is possible, although very unlikely, that we 396 * will wake up and see the subsequent commit running, rather than the 397 * one we were waiting for, and go back to sleep. However, we will be 398 * woken again, so there is no danger of sleeping forever. 399 */ 400 wait_event(c->cmt_wq, c->cmt_state != COMMIT_RUNNING_BACKGROUND && 401 c->cmt_state != COMMIT_RUNNING_REQUIRED); 402 dbg_cmt("commit finished, pid %d woke up", current->pid); 403 return 0; 404 } 405 406 /** 407 * ubifs_run_commit - run or wait for commit. 408 * @c: UBIFS file-system description object 409 * 410 * This function runs commit and returns zero in case of success and a negative 411 * error code in case of failure. 412 */ 413 int ubifs_run_commit(struct ubifs_info *c) 414 { 415 int err = 0; 416 417 spin_lock(&c->cs_lock); 418 if (c->cmt_state == COMMIT_BROKEN) { 419 err = -EROFS; 420 goto out; 421 } 422 423 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND) 424 /* 425 * We set the commit state to 'running required' to indicate 426 * that we want it to complete as quickly as possible. 427 */ 428 c->cmt_state = COMMIT_RUNNING_REQUIRED; 429 430 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) { 431 spin_unlock(&c->cs_lock); 432 return wait_for_commit(c); 433 } 434 spin_unlock(&c->cs_lock); 435 436 /* Ok, the commit is indeed needed */ 437 438 down_write(&c->commit_sem); 439 spin_lock(&c->cs_lock); 440 /* 441 * Since we unlocked 'c->cs_lock', the state may have changed, so 442 * re-check it. 443 */ 444 if (c->cmt_state == COMMIT_BROKEN) { 445 err = -EROFS; 446 goto out_cmt_unlock; 447 } 448 449 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND) 450 c->cmt_state = COMMIT_RUNNING_REQUIRED; 451 452 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) { 453 up_write(&c->commit_sem); 454 spin_unlock(&c->cs_lock); 455 return wait_for_commit(c); 456 } 457 c->cmt_state = COMMIT_RUNNING_REQUIRED; 458 spin_unlock(&c->cs_lock); 459 460 err = do_commit(c); 461 return err; 462 463 out_cmt_unlock: 464 up_write(&c->commit_sem); 465 out: 466 spin_unlock(&c->cs_lock); 467 return err; 468 } 469 470 /** 471 * ubifs_gc_should_commit - determine if it is time for GC to run commit. 472 * @c: UBIFS file-system description object 473 * 474 * This function is called by garbage collection to determine if commit should 475 * be run. If commit state is @COMMIT_BACKGROUND, which means that the journal 476 * is full enough to start commit, this function returns true. It is not 477 * absolutely necessary to commit yet, but it feels like this should be better 478 * then to keep doing GC. This function returns %1 if GC has to initiate commit 479 * and %0 if not. 480 */ 481 int ubifs_gc_should_commit(struct ubifs_info *c) 482 { 483 int ret = 0; 484 485 spin_lock(&c->cs_lock); 486 if (c->cmt_state == COMMIT_BACKGROUND) { 487 dbg_cmt("commit required now"); 488 c->cmt_state = COMMIT_REQUIRED; 489 } else 490 dbg_cmt("commit not requested"); 491 if (c->cmt_state == COMMIT_REQUIRED) 492 ret = 1; 493 spin_unlock(&c->cs_lock); 494 return ret; 495 } 496 497 /* 498 * Everything below is related to debugging. 499 */ 500 501 /** 502 * struct idx_node - hold index nodes during index tree traversal. 503 * @list: list 504 * @iip: index in parent (slot number of this indexing node in the parent 505 * indexing node) 506 * @upper_key: all keys in this indexing node have to be less or equivalent to 507 * this key 508 * @idx: index node (8-byte aligned because all node structures must be 8-byte 509 * aligned) 510 */ 511 struct idx_node { 512 struct list_head list; 513 int iip; 514 union ubifs_key upper_key; 515 struct ubifs_idx_node idx __aligned(8); 516 }; 517 518 /** 519 * dbg_old_index_check_init - get information for the next old index check. 520 * @c: UBIFS file-system description object 521 * @zroot: root of the index 522 * 523 * This function records information about the index that will be needed for the 524 * next old index check i.e. 'dbg_check_old_index()'. 525 * 526 * This function returns %0 on success and a negative error code on failure. 527 */ 528 int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot) 529 { 530 struct ubifs_idx_node *idx; 531 int lnum, offs, len, err = 0; 532 struct ubifs_debug_info *d = c->dbg; 533 534 d->old_zroot = *zroot; 535 lnum = d->old_zroot.lnum; 536 offs = d->old_zroot.offs; 537 len = d->old_zroot.len; 538 539 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); 540 if (!idx) 541 return -ENOMEM; 542 543 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); 544 if (err) 545 goto out; 546 547 d->old_zroot_level = le16_to_cpu(idx->level); 548 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); 549 out: 550 kfree(idx); 551 return err; 552 } 553 554 /** 555 * dbg_check_old_index - check the old copy of the index. 556 * @c: UBIFS file-system description object 557 * @zroot: root of the new index 558 * 559 * In order to be able to recover from an unclean unmount, a complete copy of 560 * the index must exist on flash. This is the "old" index. The commit process 561 * must write the "new" index to flash without overwriting or destroying any 562 * part of the old index. This function is run at commit end in order to check 563 * that the old index does indeed exist completely intact. 564 * 565 * This function returns %0 on success and a negative error code on failure. 566 */ 567 int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) 568 { 569 int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt; 570 int first = 1, iip; 571 struct ubifs_debug_info *d = c->dbg; 572 union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key; 573 unsigned long long uninitialized_var(last_sqnum); 574 struct ubifs_idx_node *idx; 575 struct list_head list; 576 struct idx_node *i; 577 size_t sz; 578 579 if (!dbg_is_chk_index(c)) 580 return 0; 581 582 INIT_LIST_HEAD(&list); 583 584 sz = sizeof(struct idx_node) + ubifs_idx_node_sz(c, c->fanout) - 585 UBIFS_IDX_NODE_SZ; 586 587 /* Start at the old zroot */ 588 lnum = d->old_zroot.lnum; 589 offs = d->old_zroot.offs; 590 len = d->old_zroot.len; 591 iip = 0; 592 593 /* 594 * Traverse the index tree preorder depth-first i.e. do a node and then 595 * its subtrees from left to right. 596 */ 597 while (1) { 598 struct ubifs_branch *br; 599 600 /* Get the next index node */ 601 i = kmalloc(sz, GFP_NOFS); 602 if (!i) { 603 err = -ENOMEM; 604 goto out_free; 605 } 606 i->iip = iip; 607 /* Keep the index nodes on our path in a linked list */ 608 list_add_tail(&i->list, &list); 609 /* Read the index node */ 610 idx = &i->idx; 611 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); 612 if (err) 613 goto out_free; 614 /* Validate index node */ 615 child_cnt = le16_to_cpu(idx->child_cnt); 616 if (child_cnt < 1 || child_cnt > c->fanout) { 617 err = 1; 618 goto out_dump; 619 } 620 if (first) { 621 first = 0; 622 /* Check root level and sqnum */ 623 if (le16_to_cpu(idx->level) != d->old_zroot_level) { 624 err = 2; 625 goto out_dump; 626 } 627 if (le64_to_cpu(idx->ch.sqnum) != d->old_zroot_sqnum) { 628 err = 3; 629 goto out_dump; 630 } 631 /* Set last values as though root had a parent */ 632 last_level = le16_to_cpu(idx->level) + 1; 633 last_sqnum = le64_to_cpu(idx->ch.sqnum) + 1; 634 key_read(c, ubifs_idx_key(c, idx), &lower_key); 635 highest_ino_key(c, &upper_key, INUM_WATERMARK); 636 } 637 key_copy(c, &upper_key, &i->upper_key); 638 if (le16_to_cpu(idx->level) != last_level - 1) { 639 err = 3; 640 goto out_dump; 641 } 642 /* 643 * The index is always written bottom up hence a child's sqnum 644 * is always less than the parents. 645 */ 646 if (le64_to_cpu(idx->ch.sqnum) >= last_sqnum) { 647 err = 4; 648 goto out_dump; 649 } 650 /* Check key range */ 651 key_read(c, ubifs_idx_key(c, idx), &l_key); 652 br = ubifs_idx_branch(c, idx, child_cnt - 1); 653 key_read(c, &br->key, &u_key); 654 if (keys_cmp(c, &lower_key, &l_key) > 0) { 655 err = 5; 656 goto out_dump; 657 } 658 if (keys_cmp(c, &upper_key, &u_key) < 0) { 659 err = 6; 660 goto out_dump; 661 } 662 if (keys_cmp(c, &upper_key, &u_key) == 0) 663 if (!is_hash_key(c, &u_key)) { 664 err = 7; 665 goto out_dump; 666 } 667 /* Go to next index node */ 668 if (le16_to_cpu(idx->level) == 0) { 669 /* At the bottom, so go up until can go right */ 670 while (1) { 671 /* Drop the bottom of the list */ 672 list_del(&i->list); 673 kfree(i); 674 /* No more list means we are done */ 675 if (list_empty(&list)) 676 goto out; 677 /* Look at the new bottom */ 678 i = list_entry(list.prev, struct idx_node, 679 list); 680 idx = &i->idx; 681 /* Can we go right */ 682 if (iip + 1 < le16_to_cpu(idx->child_cnt)) { 683 iip = iip + 1; 684 break; 685 } else 686 /* Nope, so go up again */ 687 iip = i->iip; 688 } 689 } else 690 /* Go down left */ 691 iip = 0; 692 /* 693 * We have the parent in 'idx' and now we set up for reading the 694 * child pointed to by slot 'iip'. 695 */ 696 last_level = le16_to_cpu(idx->level); 697 last_sqnum = le64_to_cpu(idx->ch.sqnum); 698 br = ubifs_idx_branch(c, idx, iip); 699 lnum = le32_to_cpu(br->lnum); 700 offs = le32_to_cpu(br->offs); 701 len = le32_to_cpu(br->len); 702 key_read(c, &br->key, &lower_key); 703 if (iip + 1 < le16_to_cpu(idx->child_cnt)) { 704 br = ubifs_idx_branch(c, idx, iip + 1); 705 key_read(c, &br->key, &upper_key); 706 } else 707 key_copy(c, &i->upper_key, &upper_key); 708 } 709 out: 710 err = dbg_old_index_check_init(c, zroot); 711 if (err) 712 goto out_free; 713 714 return 0; 715 716 out_dump: 717 ubifs_err("dumping index node (iip=%d)", i->iip); 718 ubifs_dump_node(c, idx); 719 list_del(&i->list); 720 kfree(i); 721 if (!list_empty(&list)) { 722 i = list_entry(list.prev, struct idx_node, list); 723 ubifs_err("dumping parent index node"); 724 ubifs_dump_node(c, &i->idx); 725 } 726 out_free: 727 while (!list_empty(&list)) { 728 i = list_entry(list.next, struct idx_node, list); 729 list_del(&i->list); 730 kfree(i); 731 } 732 ubifs_err("failed, error %d", err); 733 if (err > 0) 734 err = -EINVAL; 735 return err; 736 } 737