1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include "trace.h" 23 #include <trace/events/f2fs.h> 24 25 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 26 27 static struct kmem_cache *nat_entry_slab; 28 static struct kmem_cache *free_nid_slab; 29 static struct kmem_cache *nat_entry_set_slab; 30 31 bool available_free_memory(struct f2fs_sb_info *sbi, int type) 32 { 33 struct f2fs_nm_info *nm_i = NM_I(sbi); 34 struct sysinfo val; 35 unsigned long avail_ram; 36 unsigned long mem_size = 0; 37 bool res = false; 38 39 si_meminfo(&val); 40 41 /* only uses low memory */ 42 avail_ram = val.totalram - val.totalhigh; 43 44 /* 45 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively 46 */ 47 if (type == FREE_NIDS) { 48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 49 PAGE_CACHE_SHIFT; 50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 51 } else if (type == NAT_ENTRIES) { 52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 53 PAGE_CACHE_SHIFT; 54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 55 } else if (type == DIRTY_DENTS) { 56 if (sbi->sb->s_bdi->wb.dirty_exceeded) 57 return false; 58 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 59 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 60 } else if (type == INO_ENTRIES) { 61 int i; 62 63 for (i = 0; i <= UPDATE_INO; i++) 64 mem_size += (sbi->im[i].ino_num * 65 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 67 } else if (type == EXTENT_CACHE) { 68 mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) + 69 atomic_read(&sbi->total_ext_node) * 70 sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT; 71 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 72 } else { 73 if (sbi->sb->s_bdi->wb.dirty_exceeded) 74 return false; 75 } 76 return res; 77 } 78 79 static void clear_node_page_dirty(struct page *page) 80 { 81 struct address_space *mapping = page->mapping; 82 unsigned int long flags; 83 84 if (PageDirty(page)) { 85 spin_lock_irqsave(&mapping->tree_lock, flags); 86 radix_tree_tag_clear(&mapping->page_tree, 87 page_index(page), 88 PAGECACHE_TAG_DIRTY); 89 spin_unlock_irqrestore(&mapping->tree_lock, flags); 90 91 clear_page_dirty_for_io(page); 92 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 93 } 94 ClearPageUptodate(page); 95 } 96 97 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 98 { 99 pgoff_t index = current_nat_addr(sbi, nid); 100 return get_meta_page(sbi, index); 101 } 102 103 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 104 { 105 struct page *src_page; 106 struct page *dst_page; 107 pgoff_t src_off; 108 pgoff_t dst_off; 109 void *src_addr; 110 void *dst_addr; 111 struct f2fs_nm_info *nm_i = NM_I(sbi); 112 113 src_off = current_nat_addr(sbi, nid); 114 dst_off = next_nat_addr(sbi, src_off); 115 116 /* get current nat block page with lock */ 117 src_page = get_meta_page(sbi, src_off); 118 dst_page = grab_meta_page(sbi, dst_off); 119 f2fs_bug_on(sbi, PageDirty(src_page)); 120 121 src_addr = page_address(src_page); 122 dst_addr = page_address(dst_page); 123 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 124 set_page_dirty(dst_page); 125 f2fs_put_page(src_page, 1); 126 127 set_to_next_nat(nm_i, nid); 128 129 return dst_page; 130 } 131 132 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 133 { 134 return radix_tree_lookup(&nm_i->nat_root, n); 135 } 136 137 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 138 nid_t start, unsigned int nr, struct nat_entry **ep) 139 { 140 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 141 } 142 143 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 144 { 145 list_del(&e->list); 146 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 147 nm_i->nat_cnt--; 148 kmem_cache_free(nat_entry_slab, e); 149 } 150 151 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 152 struct nat_entry *ne) 153 { 154 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 155 struct nat_entry_set *head; 156 157 if (get_nat_flag(ne, IS_DIRTY)) 158 return; 159 160 head = radix_tree_lookup(&nm_i->nat_set_root, set); 161 if (!head) { 162 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC); 163 164 INIT_LIST_HEAD(&head->entry_list); 165 INIT_LIST_HEAD(&head->set_list); 166 head->set = set; 167 head->entry_cnt = 0; 168 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 169 } 170 list_move_tail(&ne->list, &head->entry_list); 171 nm_i->dirty_nat_cnt++; 172 head->entry_cnt++; 173 set_nat_flag(ne, IS_DIRTY, true); 174 } 175 176 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 177 struct nat_entry *ne) 178 { 179 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 180 struct nat_entry_set *head; 181 182 head = radix_tree_lookup(&nm_i->nat_set_root, set); 183 if (head) { 184 list_move_tail(&ne->list, &nm_i->nat_entries); 185 set_nat_flag(ne, IS_DIRTY, false); 186 head->entry_cnt--; 187 nm_i->dirty_nat_cnt--; 188 } 189 } 190 191 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 192 nid_t start, unsigned int nr, struct nat_entry_set **ep) 193 { 194 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 195 start, nr); 196 } 197 198 int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 199 { 200 struct f2fs_nm_info *nm_i = NM_I(sbi); 201 struct nat_entry *e; 202 bool need = false; 203 204 down_read(&nm_i->nat_tree_lock); 205 e = __lookup_nat_cache(nm_i, nid); 206 if (e) { 207 if (!get_nat_flag(e, IS_CHECKPOINTED) && 208 !get_nat_flag(e, HAS_FSYNCED_INODE)) 209 need = true; 210 } 211 up_read(&nm_i->nat_tree_lock); 212 return need; 213 } 214 215 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 216 { 217 struct f2fs_nm_info *nm_i = NM_I(sbi); 218 struct nat_entry *e; 219 bool is_cp = true; 220 221 down_read(&nm_i->nat_tree_lock); 222 e = __lookup_nat_cache(nm_i, nid); 223 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 224 is_cp = false; 225 up_read(&nm_i->nat_tree_lock); 226 return is_cp; 227 } 228 229 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 230 { 231 struct f2fs_nm_info *nm_i = NM_I(sbi); 232 struct nat_entry *e; 233 bool need_update = true; 234 235 down_read(&nm_i->nat_tree_lock); 236 e = __lookup_nat_cache(nm_i, ino); 237 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 238 (get_nat_flag(e, IS_CHECKPOINTED) || 239 get_nat_flag(e, HAS_FSYNCED_INODE))) 240 need_update = false; 241 up_read(&nm_i->nat_tree_lock); 242 return need_update; 243 } 244 245 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 246 { 247 struct nat_entry *new; 248 249 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 250 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); 251 memset(new, 0, sizeof(struct nat_entry)); 252 nat_set_nid(new, nid); 253 nat_reset_flag(new); 254 list_add_tail(&new->list, &nm_i->nat_entries); 255 nm_i->nat_cnt++; 256 return new; 257 } 258 259 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 260 struct f2fs_nat_entry *ne) 261 { 262 struct nat_entry *e; 263 264 down_write(&nm_i->nat_tree_lock); 265 e = __lookup_nat_cache(nm_i, nid); 266 if (!e) { 267 e = grab_nat_entry(nm_i, nid); 268 node_info_from_raw_nat(&e->ni, ne); 269 } 270 up_write(&nm_i->nat_tree_lock); 271 } 272 273 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 274 block_t new_blkaddr, bool fsync_done) 275 { 276 struct f2fs_nm_info *nm_i = NM_I(sbi); 277 struct nat_entry *e; 278 279 down_write(&nm_i->nat_tree_lock); 280 e = __lookup_nat_cache(nm_i, ni->nid); 281 if (!e) { 282 e = grab_nat_entry(nm_i, ni->nid); 283 copy_node_info(&e->ni, ni); 284 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 285 } else if (new_blkaddr == NEW_ADDR) { 286 /* 287 * when nid is reallocated, 288 * previous nat entry can be remained in nat cache. 289 * So, reinitialize it with new information. 290 */ 291 copy_node_info(&e->ni, ni); 292 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 293 } 294 295 /* sanity check */ 296 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 297 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 298 new_blkaddr == NULL_ADDR); 299 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 300 new_blkaddr == NEW_ADDR); 301 f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR && 302 nat_get_blkaddr(e) != NULL_ADDR && 303 new_blkaddr == NEW_ADDR); 304 305 /* increment version no as node is removed */ 306 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 307 unsigned char version = nat_get_version(e); 308 nat_set_version(e, inc_node_version(version)); 309 } 310 311 /* change address */ 312 nat_set_blkaddr(e, new_blkaddr); 313 if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR) 314 set_nat_flag(e, IS_CHECKPOINTED, false); 315 __set_nat_cache_dirty(nm_i, e); 316 317 /* update fsync_mark if its inode nat entry is still alive */ 318 if (ni->nid != ni->ino) 319 e = __lookup_nat_cache(nm_i, ni->ino); 320 if (e) { 321 if (fsync_done && ni->nid == ni->ino) 322 set_nat_flag(e, HAS_FSYNCED_INODE, true); 323 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 324 } 325 up_write(&nm_i->nat_tree_lock); 326 } 327 328 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 329 { 330 struct f2fs_nm_info *nm_i = NM_I(sbi); 331 332 if (available_free_memory(sbi, NAT_ENTRIES)) 333 return 0; 334 335 down_write(&nm_i->nat_tree_lock); 336 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 337 struct nat_entry *ne; 338 ne = list_first_entry(&nm_i->nat_entries, 339 struct nat_entry, list); 340 __del_from_nat_cache(nm_i, ne); 341 nr_shrink--; 342 } 343 up_write(&nm_i->nat_tree_lock); 344 return nr_shrink; 345 } 346 347 /* 348 * This function always returns success 349 */ 350 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 351 { 352 struct f2fs_nm_info *nm_i = NM_I(sbi); 353 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 354 struct f2fs_summary_block *sum = curseg->sum_blk; 355 nid_t start_nid = START_NID(nid); 356 struct f2fs_nat_block *nat_blk; 357 struct page *page = NULL; 358 struct f2fs_nat_entry ne; 359 struct nat_entry *e; 360 int i; 361 362 ni->nid = nid; 363 364 /* Check nat cache */ 365 down_read(&nm_i->nat_tree_lock); 366 e = __lookup_nat_cache(nm_i, nid); 367 if (e) { 368 ni->ino = nat_get_ino(e); 369 ni->blk_addr = nat_get_blkaddr(e); 370 ni->version = nat_get_version(e); 371 } 372 up_read(&nm_i->nat_tree_lock); 373 if (e) 374 return; 375 376 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 377 378 /* Check current segment summary */ 379 mutex_lock(&curseg->curseg_mutex); 380 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 381 if (i >= 0) { 382 ne = nat_in_journal(sum, i); 383 node_info_from_raw_nat(ni, &ne); 384 } 385 mutex_unlock(&curseg->curseg_mutex); 386 if (i >= 0) 387 goto cache; 388 389 /* Fill node_info from nat page */ 390 page = get_current_nat_page(sbi, start_nid); 391 nat_blk = (struct f2fs_nat_block *)page_address(page); 392 ne = nat_blk->entries[nid - start_nid]; 393 node_info_from_raw_nat(ni, &ne); 394 f2fs_put_page(page, 1); 395 cache: 396 /* cache nat entry */ 397 cache_nat_entry(NM_I(sbi), nid, &ne); 398 } 399 400 /* 401 * The maximum depth is four. 402 * Offset[0] will have raw inode offset. 403 */ 404 static int get_node_path(struct f2fs_inode_info *fi, long block, 405 int offset[4], unsigned int noffset[4]) 406 { 407 const long direct_index = ADDRS_PER_INODE(fi); 408 const long direct_blks = ADDRS_PER_BLOCK; 409 const long dptrs_per_blk = NIDS_PER_BLOCK; 410 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 411 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 412 int n = 0; 413 int level = 0; 414 415 noffset[0] = 0; 416 417 if (block < direct_index) { 418 offset[n] = block; 419 goto got; 420 } 421 block -= direct_index; 422 if (block < direct_blks) { 423 offset[n++] = NODE_DIR1_BLOCK; 424 noffset[n] = 1; 425 offset[n] = block; 426 level = 1; 427 goto got; 428 } 429 block -= direct_blks; 430 if (block < direct_blks) { 431 offset[n++] = NODE_DIR2_BLOCK; 432 noffset[n] = 2; 433 offset[n] = block; 434 level = 1; 435 goto got; 436 } 437 block -= direct_blks; 438 if (block < indirect_blks) { 439 offset[n++] = NODE_IND1_BLOCK; 440 noffset[n] = 3; 441 offset[n++] = block / direct_blks; 442 noffset[n] = 4 + offset[n - 1]; 443 offset[n] = block % direct_blks; 444 level = 2; 445 goto got; 446 } 447 block -= indirect_blks; 448 if (block < indirect_blks) { 449 offset[n++] = NODE_IND2_BLOCK; 450 noffset[n] = 4 + dptrs_per_blk; 451 offset[n++] = block / direct_blks; 452 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 453 offset[n] = block % direct_blks; 454 level = 2; 455 goto got; 456 } 457 block -= indirect_blks; 458 if (block < dindirect_blks) { 459 offset[n++] = NODE_DIND_BLOCK; 460 noffset[n] = 5 + (dptrs_per_blk * 2); 461 offset[n++] = block / indirect_blks; 462 noffset[n] = 6 + (dptrs_per_blk * 2) + 463 offset[n - 1] * (dptrs_per_blk + 1); 464 offset[n++] = (block / direct_blks) % dptrs_per_blk; 465 noffset[n] = 7 + (dptrs_per_blk * 2) + 466 offset[n - 2] * (dptrs_per_blk + 1) + 467 offset[n - 1]; 468 offset[n] = block % direct_blks; 469 level = 3; 470 goto got; 471 } else { 472 BUG(); 473 } 474 got: 475 return level; 476 } 477 478 /* 479 * Caller should call f2fs_put_dnode(dn). 480 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 481 * f2fs_unlock_op() only if ro is not set RDONLY_NODE. 482 * In the case of RDONLY_NODE, we don't need to care about mutex. 483 */ 484 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 485 { 486 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 487 struct page *npage[4]; 488 struct page *parent = NULL; 489 int offset[4]; 490 unsigned int noffset[4]; 491 nid_t nids[4]; 492 int level, i; 493 int err = 0; 494 495 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset); 496 497 nids[0] = dn->inode->i_ino; 498 npage[0] = dn->inode_page; 499 500 if (!npage[0]) { 501 npage[0] = get_node_page(sbi, nids[0]); 502 if (IS_ERR(npage[0])) 503 return PTR_ERR(npage[0]); 504 } 505 506 /* if inline_data is set, should not report any block indices */ 507 if (f2fs_has_inline_data(dn->inode) && index) { 508 err = -ENOENT; 509 f2fs_put_page(npage[0], 1); 510 goto release_out; 511 } 512 513 parent = npage[0]; 514 if (level != 0) 515 nids[1] = get_nid(parent, offset[0], true); 516 dn->inode_page = npage[0]; 517 dn->inode_page_locked = true; 518 519 /* get indirect or direct nodes */ 520 for (i = 1; i <= level; i++) { 521 bool done = false; 522 523 if (!nids[i] && mode == ALLOC_NODE) { 524 /* alloc new node */ 525 if (!alloc_nid(sbi, &(nids[i]))) { 526 err = -ENOSPC; 527 goto release_pages; 528 } 529 530 dn->nid = nids[i]; 531 npage[i] = new_node_page(dn, noffset[i], NULL); 532 if (IS_ERR(npage[i])) { 533 alloc_nid_failed(sbi, nids[i]); 534 err = PTR_ERR(npage[i]); 535 goto release_pages; 536 } 537 538 set_nid(parent, offset[i - 1], nids[i], i == 1); 539 alloc_nid_done(sbi, nids[i]); 540 done = true; 541 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 542 npage[i] = get_node_page_ra(parent, offset[i - 1]); 543 if (IS_ERR(npage[i])) { 544 err = PTR_ERR(npage[i]); 545 goto release_pages; 546 } 547 done = true; 548 } 549 if (i == 1) { 550 dn->inode_page_locked = false; 551 unlock_page(parent); 552 } else { 553 f2fs_put_page(parent, 1); 554 } 555 556 if (!done) { 557 npage[i] = get_node_page(sbi, nids[i]); 558 if (IS_ERR(npage[i])) { 559 err = PTR_ERR(npage[i]); 560 f2fs_put_page(npage[0], 0); 561 goto release_out; 562 } 563 } 564 if (i < level) { 565 parent = npage[i]; 566 nids[i + 1] = get_nid(parent, offset[i], false); 567 } 568 } 569 dn->nid = nids[level]; 570 dn->ofs_in_node = offset[level]; 571 dn->node_page = npage[level]; 572 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 573 return 0; 574 575 release_pages: 576 f2fs_put_page(parent, 1); 577 if (i > 1) 578 f2fs_put_page(npage[0], 0); 579 release_out: 580 dn->inode_page = NULL; 581 dn->node_page = NULL; 582 return err; 583 } 584 585 static void truncate_node(struct dnode_of_data *dn) 586 { 587 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 588 struct node_info ni; 589 590 get_node_info(sbi, dn->nid, &ni); 591 if (dn->inode->i_blocks == 0) { 592 f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR); 593 goto invalidate; 594 } 595 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 596 597 /* Deallocate node address */ 598 invalidate_blocks(sbi, ni.blk_addr); 599 dec_valid_node_count(sbi, dn->inode); 600 set_node_addr(sbi, &ni, NULL_ADDR, false); 601 602 if (dn->nid == dn->inode->i_ino) { 603 remove_orphan_inode(sbi, dn->nid); 604 dec_valid_inode_count(sbi); 605 } else { 606 sync_inode_page(dn); 607 } 608 invalidate: 609 clear_node_page_dirty(dn->node_page); 610 set_sbi_flag(sbi, SBI_IS_DIRTY); 611 612 f2fs_put_page(dn->node_page, 1); 613 614 invalidate_mapping_pages(NODE_MAPPING(sbi), 615 dn->node_page->index, dn->node_page->index); 616 617 dn->node_page = NULL; 618 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 619 } 620 621 static int truncate_dnode(struct dnode_of_data *dn) 622 { 623 struct page *page; 624 625 if (dn->nid == 0) 626 return 1; 627 628 /* get direct node */ 629 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 630 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 631 return 1; 632 else if (IS_ERR(page)) 633 return PTR_ERR(page); 634 635 /* Make dnode_of_data for parameter */ 636 dn->node_page = page; 637 dn->ofs_in_node = 0; 638 truncate_data_blocks(dn); 639 truncate_node(dn); 640 return 1; 641 } 642 643 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 644 int ofs, int depth) 645 { 646 struct dnode_of_data rdn = *dn; 647 struct page *page; 648 struct f2fs_node *rn; 649 nid_t child_nid; 650 unsigned int child_nofs; 651 int freed = 0; 652 int i, ret; 653 654 if (dn->nid == 0) 655 return NIDS_PER_BLOCK + 1; 656 657 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 658 659 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 660 if (IS_ERR(page)) { 661 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 662 return PTR_ERR(page); 663 } 664 665 rn = F2FS_NODE(page); 666 if (depth < 3) { 667 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 668 child_nid = le32_to_cpu(rn->in.nid[i]); 669 if (child_nid == 0) 670 continue; 671 rdn.nid = child_nid; 672 ret = truncate_dnode(&rdn); 673 if (ret < 0) 674 goto out_err; 675 set_nid(page, i, 0, false); 676 } 677 } else { 678 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 679 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 680 child_nid = le32_to_cpu(rn->in.nid[i]); 681 if (child_nid == 0) { 682 child_nofs += NIDS_PER_BLOCK + 1; 683 continue; 684 } 685 rdn.nid = child_nid; 686 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 687 if (ret == (NIDS_PER_BLOCK + 1)) { 688 set_nid(page, i, 0, false); 689 child_nofs += ret; 690 } else if (ret < 0 && ret != -ENOENT) { 691 goto out_err; 692 } 693 } 694 freed = child_nofs; 695 } 696 697 if (!ofs) { 698 /* remove current indirect node */ 699 dn->node_page = page; 700 truncate_node(dn); 701 freed++; 702 } else { 703 f2fs_put_page(page, 1); 704 } 705 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 706 return freed; 707 708 out_err: 709 f2fs_put_page(page, 1); 710 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 711 return ret; 712 } 713 714 static int truncate_partial_nodes(struct dnode_of_data *dn, 715 struct f2fs_inode *ri, int *offset, int depth) 716 { 717 struct page *pages[2]; 718 nid_t nid[3]; 719 nid_t child_nid; 720 int err = 0; 721 int i; 722 int idx = depth - 2; 723 724 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 725 if (!nid[0]) 726 return 0; 727 728 /* get indirect nodes in the path */ 729 for (i = 0; i < idx + 1; i++) { 730 /* reference count'll be increased */ 731 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]); 732 if (IS_ERR(pages[i])) { 733 err = PTR_ERR(pages[i]); 734 idx = i - 1; 735 goto fail; 736 } 737 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 738 } 739 740 /* free direct nodes linked to a partial indirect node */ 741 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 742 child_nid = get_nid(pages[idx], i, false); 743 if (!child_nid) 744 continue; 745 dn->nid = child_nid; 746 err = truncate_dnode(dn); 747 if (err < 0) 748 goto fail; 749 set_nid(pages[idx], i, 0, false); 750 } 751 752 if (offset[idx + 1] == 0) { 753 dn->node_page = pages[idx]; 754 dn->nid = nid[idx]; 755 truncate_node(dn); 756 } else { 757 f2fs_put_page(pages[idx], 1); 758 } 759 offset[idx]++; 760 offset[idx + 1] = 0; 761 idx--; 762 fail: 763 for (i = idx; i >= 0; i--) 764 f2fs_put_page(pages[i], 1); 765 766 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 767 768 return err; 769 } 770 771 /* 772 * All the block addresses of data and nodes should be nullified. 773 */ 774 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 775 { 776 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 777 int err = 0, cont = 1; 778 int level, offset[4], noffset[4]; 779 unsigned int nofs = 0; 780 struct f2fs_inode *ri; 781 struct dnode_of_data dn; 782 struct page *page; 783 784 trace_f2fs_truncate_inode_blocks_enter(inode, from); 785 786 level = get_node_path(F2FS_I(inode), from, offset, noffset); 787 restart: 788 page = get_node_page(sbi, inode->i_ino); 789 if (IS_ERR(page)) { 790 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 791 return PTR_ERR(page); 792 } 793 794 set_new_dnode(&dn, inode, page, NULL, 0); 795 unlock_page(page); 796 797 ri = F2FS_INODE(page); 798 switch (level) { 799 case 0: 800 case 1: 801 nofs = noffset[1]; 802 break; 803 case 2: 804 nofs = noffset[1]; 805 if (!offset[level - 1]) 806 goto skip_partial; 807 err = truncate_partial_nodes(&dn, ri, offset, level); 808 if (err < 0 && err != -ENOENT) 809 goto fail; 810 nofs += 1 + NIDS_PER_BLOCK; 811 break; 812 case 3: 813 nofs = 5 + 2 * NIDS_PER_BLOCK; 814 if (!offset[level - 1]) 815 goto skip_partial; 816 err = truncate_partial_nodes(&dn, ri, offset, level); 817 if (err < 0 && err != -ENOENT) 818 goto fail; 819 break; 820 default: 821 BUG(); 822 } 823 824 skip_partial: 825 while (cont) { 826 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 827 switch (offset[0]) { 828 case NODE_DIR1_BLOCK: 829 case NODE_DIR2_BLOCK: 830 err = truncate_dnode(&dn); 831 break; 832 833 case NODE_IND1_BLOCK: 834 case NODE_IND2_BLOCK: 835 err = truncate_nodes(&dn, nofs, offset[1], 2); 836 break; 837 838 case NODE_DIND_BLOCK: 839 err = truncate_nodes(&dn, nofs, offset[1], 3); 840 cont = 0; 841 break; 842 843 default: 844 BUG(); 845 } 846 if (err < 0 && err != -ENOENT) 847 goto fail; 848 if (offset[1] == 0 && 849 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 850 lock_page(page); 851 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 852 f2fs_put_page(page, 1); 853 goto restart; 854 } 855 f2fs_wait_on_page_writeback(page, NODE); 856 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 857 set_page_dirty(page); 858 unlock_page(page); 859 } 860 offset[1] = 0; 861 offset[0]++; 862 nofs += err; 863 } 864 fail: 865 f2fs_put_page(page, 0); 866 trace_f2fs_truncate_inode_blocks_exit(inode, err); 867 return err > 0 ? 0 : err; 868 } 869 870 int truncate_xattr_node(struct inode *inode, struct page *page) 871 { 872 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 873 nid_t nid = F2FS_I(inode)->i_xattr_nid; 874 struct dnode_of_data dn; 875 struct page *npage; 876 877 if (!nid) 878 return 0; 879 880 npage = get_node_page(sbi, nid); 881 if (IS_ERR(npage)) 882 return PTR_ERR(npage); 883 884 F2FS_I(inode)->i_xattr_nid = 0; 885 886 /* need to do checkpoint during fsync */ 887 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); 888 889 set_new_dnode(&dn, inode, page, npage, nid); 890 891 if (page) 892 dn.inode_page_locked = true; 893 truncate_node(&dn); 894 return 0; 895 } 896 897 /* 898 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 899 * f2fs_unlock_op(). 900 */ 901 void remove_inode_page(struct inode *inode) 902 { 903 struct dnode_of_data dn; 904 905 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 906 if (get_dnode_of_data(&dn, 0, LOOKUP_NODE)) 907 return; 908 909 if (truncate_xattr_node(inode, dn.inode_page)) { 910 f2fs_put_dnode(&dn); 911 return; 912 } 913 914 /* remove potential inline_data blocks */ 915 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 916 S_ISLNK(inode->i_mode)) 917 truncate_data_blocks_range(&dn, 1); 918 919 /* 0 is possible, after f2fs_new_inode() has failed */ 920 f2fs_bug_on(F2FS_I_SB(inode), 921 inode->i_blocks != 0 && inode->i_blocks != 1); 922 923 /* will put inode & node pages */ 924 truncate_node(&dn); 925 } 926 927 struct page *new_inode_page(struct inode *inode) 928 { 929 struct dnode_of_data dn; 930 931 /* allocate inode page for new inode */ 932 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 933 934 /* caller should f2fs_put_page(page, 1); */ 935 return new_node_page(&dn, 0, NULL); 936 } 937 938 struct page *new_node_page(struct dnode_of_data *dn, 939 unsigned int ofs, struct page *ipage) 940 { 941 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 942 struct node_info old_ni, new_ni; 943 struct page *page; 944 int err; 945 946 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 947 return ERR_PTR(-EPERM); 948 949 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); 950 if (!page) 951 return ERR_PTR(-ENOMEM); 952 953 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { 954 err = -ENOSPC; 955 goto fail; 956 } 957 958 get_node_info(sbi, dn->nid, &old_ni); 959 960 /* Reinitialize old_ni with new node page */ 961 f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR); 962 new_ni = old_ni; 963 new_ni.ino = dn->inode->i_ino; 964 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 965 966 f2fs_wait_on_page_writeback(page, NODE); 967 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 968 set_cold_node(dn->inode, page); 969 SetPageUptodate(page); 970 set_page_dirty(page); 971 972 if (f2fs_has_xattr_block(ofs)) 973 F2FS_I(dn->inode)->i_xattr_nid = dn->nid; 974 975 dn->node_page = page; 976 if (ipage) 977 update_inode(dn->inode, ipage); 978 else 979 sync_inode_page(dn); 980 if (ofs == 0) 981 inc_valid_inode_count(sbi); 982 983 return page; 984 985 fail: 986 clear_node_page_dirty(page); 987 f2fs_put_page(page, 1); 988 return ERR_PTR(err); 989 } 990 991 /* 992 * Caller should do after getting the following values. 993 * 0: f2fs_put_page(page, 0) 994 * LOCKED_PAGE: f2fs_put_page(page, 1) 995 * error: nothing 996 */ 997 static int read_node_page(struct page *page, int rw) 998 { 999 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1000 struct node_info ni; 1001 struct f2fs_io_info fio = { 1002 .sbi = sbi, 1003 .type = NODE, 1004 .rw = rw, 1005 .page = page, 1006 .encrypted_page = NULL, 1007 }; 1008 1009 get_node_info(sbi, page->index, &ni); 1010 1011 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1012 ClearPageUptodate(page); 1013 f2fs_put_page(page, 1); 1014 return -ENOENT; 1015 } 1016 1017 if (PageUptodate(page)) 1018 return LOCKED_PAGE; 1019 1020 fio.blk_addr = ni.blk_addr; 1021 return f2fs_submit_page_bio(&fio); 1022 } 1023 1024 /* 1025 * Readahead a node page 1026 */ 1027 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1028 { 1029 struct page *apage; 1030 int err; 1031 1032 apage = find_get_page(NODE_MAPPING(sbi), nid); 1033 if (apage && PageUptodate(apage)) { 1034 f2fs_put_page(apage, 0); 1035 return; 1036 } 1037 f2fs_put_page(apage, 0); 1038 1039 apage = grab_cache_page(NODE_MAPPING(sbi), nid); 1040 if (!apage) 1041 return; 1042 1043 err = read_node_page(apage, READA); 1044 if (err == 0) 1045 f2fs_put_page(apage, 0); 1046 else if (err == LOCKED_PAGE) 1047 f2fs_put_page(apage, 1); 1048 } 1049 1050 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1051 { 1052 struct page *page; 1053 int err; 1054 repeat: 1055 page = grab_cache_page(NODE_MAPPING(sbi), nid); 1056 if (!page) 1057 return ERR_PTR(-ENOMEM); 1058 1059 err = read_node_page(page, READ_SYNC); 1060 if (err < 0) 1061 return ERR_PTR(err); 1062 else if (err != LOCKED_PAGE) 1063 lock_page(page); 1064 1065 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) { 1066 ClearPageUptodate(page); 1067 f2fs_put_page(page, 1); 1068 return ERR_PTR(-EIO); 1069 } 1070 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1071 f2fs_put_page(page, 1); 1072 goto repeat; 1073 } 1074 return page; 1075 } 1076 1077 /* 1078 * Return a locked page for the desired node page. 1079 * And, readahead MAX_RA_NODE number of node pages. 1080 */ 1081 struct page *get_node_page_ra(struct page *parent, int start) 1082 { 1083 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1084 struct blk_plug plug; 1085 struct page *page; 1086 int err, i, end; 1087 nid_t nid; 1088 1089 /* First, try getting the desired direct node. */ 1090 nid = get_nid(parent, start, false); 1091 if (!nid) 1092 return ERR_PTR(-ENOENT); 1093 repeat: 1094 page = grab_cache_page(NODE_MAPPING(sbi), nid); 1095 if (!page) 1096 return ERR_PTR(-ENOMEM); 1097 1098 err = read_node_page(page, READ_SYNC); 1099 if (err < 0) 1100 return ERR_PTR(err); 1101 else if (err == LOCKED_PAGE) 1102 goto page_hit; 1103 1104 blk_start_plug(&plug); 1105 1106 /* Then, try readahead for siblings of the desired node */ 1107 end = start + MAX_RA_NODE; 1108 end = min(end, NIDS_PER_BLOCK); 1109 for (i = start + 1; i < end; i++) { 1110 nid = get_nid(parent, i, false); 1111 if (!nid) 1112 continue; 1113 ra_node_page(sbi, nid); 1114 } 1115 1116 blk_finish_plug(&plug); 1117 1118 lock_page(page); 1119 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1120 f2fs_put_page(page, 1); 1121 goto repeat; 1122 } 1123 page_hit: 1124 if (unlikely(!PageUptodate(page))) { 1125 f2fs_put_page(page, 1); 1126 return ERR_PTR(-EIO); 1127 } 1128 return page; 1129 } 1130 1131 void sync_inode_page(struct dnode_of_data *dn) 1132 { 1133 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 1134 update_inode(dn->inode, dn->node_page); 1135 } else if (dn->inode_page) { 1136 if (!dn->inode_page_locked) 1137 lock_page(dn->inode_page); 1138 update_inode(dn->inode, dn->inode_page); 1139 if (!dn->inode_page_locked) 1140 unlock_page(dn->inode_page); 1141 } else { 1142 update_inode_page(dn->inode); 1143 } 1144 } 1145 1146 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 1147 struct writeback_control *wbc) 1148 { 1149 pgoff_t index, end; 1150 struct pagevec pvec; 1151 int step = ino ? 2 : 0; 1152 int nwritten = 0, wrote = 0; 1153 1154 pagevec_init(&pvec, 0); 1155 1156 next_step: 1157 index = 0; 1158 end = LONG_MAX; 1159 1160 while (index <= end) { 1161 int i, nr_pages; 1162 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1163 PAGECACHE_TAG_DIRTY, 1164 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1165 if (nr_pages == 0) 1166 break; 1167 1168 for (i = 0; i < nr_pages; i++) { 1169 struct page *page = pvec.pages[i]; 1170 1171 /* 1172 * flushing sequence with step: 1173 * 0. indirect nodes 1174 * 1. dentry dnodes 1175 * 2. file dnodes 1176 */ 1177 if (step == 0 && IS_DNODE(page)) 1178 continue; 1179 if (step == 1 && (!IS_DNODE(page) || 1180 is_cold_node(page))) 1181 continue; 1182 if (step == 2 && (!IS_DNODE(page) || 1183 !is_cold_node(page))) 1184 continue; 1185 1186 /* 1187 * If an fsync mode, 1188 * we should not skip writing node pages. 1189 */ 1190 if (ino && ino_of_node(page) == ino) 1191 lock_page(page); 1192 else if (!trylock_page(page)) 1193 continue; 1194 1195 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1196 continue_unlock: 1197 unlock_page(page); 1198 continue; 1199 } 1200 if (ino && ino_of_node(page) != ino) 1201 goto continue_unlock; 1202 1203 if (!PageDirty(page)) { 1204 /* someone wrote it for us */ 1205 goto continue_unlock; 1206 } 1207 1208 if (!clear_page_dirty_for_io(page)) 1209 goto continue_unlock; 1210 1211 /* called by fsync() */ 1212 if (ino && IS_DNODE(page)) { 1213 set_fsync_mark(page, 1); 1214 if (IS_INODE(page)) 1215 set_dentry_mark(page, 1216 need_dentry_mark(sbi, ino)); 1217 nwritten++; 1218 } else { 1219 set_fsync_mark(page, 0); 1220 set_dentry_mark(page, 0); 1221 } 1222 1223 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) 1224 unlock_page(page); 1225 else 1226 wrote++; 1227 1228 if (--wbc->nr_to_write == 0) 1229 break; 1230 } 1231 pagevec_release(&pvec); 1232 cond_resched(); 1233 1234 if (wbc->nr_to_write == 0) { 1235 step = 2; 1236 break; 1237 } 1238 } 1239 1240 if (step < 2) { 1241 step++; 1242 goto next_step; 1243 } 1244 1245 if (wrote) 1246 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1247 return nwritten; 1248 } 1249 1250 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) 1251 { 1252 pgoff_t index = 0, end = LONG_MAX; 1253 struct pagevec pvec; 1254 int ret2 = 0, ret = 0; 1255 1256 pagevec_init(&pvec, 0); 1257 1258 while (index <= end) { 1259 int i, nr_pages; 1260 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1261 PAGECACHE_TAG_WRITEBACK, 1262 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1263 if (nr_pages == 0) 1264 break; 1265 1266 for (i = 0; i < nr_pages; i++) { 1267 struct page *page = pvec.pages[i]; 1268 1269 /* until radix tree lookup accepts end_index */ 1270 if (unlikely(page->index > end)) 1271 continue; 1272 1273 if (ino && ino_of_node(page) == ino) { 1274 f2fs_wait_on_page_writeback(page, NODE); 1275 if (TestClearPageError(page)) 1276 ret = -EIO; 1277 } 1278 } 1279 pagevec_release(&pvec); 1280 cond_resched(); 1281 } 1282 1283 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags))) 1284 ret2 = -ENOSPC; 1285 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags))) 1286 ret2 = -EIO; 1287 if (!ret) 1288 ret = ret2; 1289 return ret; 1290 } 1291 1292 static int f2fs_write_node_page(struct page *page, 1293 struct writeback_control *wbc) 1294 { 1295 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1296 nid_t nid; 1297 struct node_info ni; 1298 struct f2fs_io_info fio = { 1299 .sbi = sbi, 1300 .type = NODE, 1301 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1302 .page = page, 1303 .encrypted_page = NULL, 1304 }; 1305 1306 trace_f2fs_writepage(page, NODE); 1307 1308 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1309 goto redirty_out; 1310 if (unlikely(f2fs_cp_error(sbi))) 1311 goto redirty_out; 1312 1313 f2fs_wait_on_page_writeback(page, NODE); 1314 1315 /* get old block addr of this node page */ 1316 nid = nid_of_node(page); 1317 f2fs_bug_on(sbi, page->index != nid); 1318 1319 get_node_info(sbi, nid, &ni); 1320 1321 /* This page is already truncated */ 1322 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1323 ClearPageUptodate(page); 1324 dec_page_count(sbi, F2FS_DIRTY_NODES); 1325 unlock_page(page); 1326 return 0; 1327 } 1328 1329 if (wbc->for_reclaim) { 1330 if (!down_read_trylock(&sbi->node_write)) 1331 goto redirty_out; 1332 } else { 1333 down_read(&sbi->node_write); 1334 } 1335 1336 set_page_writeback(page); 1337 fio.blk_addr = ni.blk_addr; 1338 write_node_page(nid, &fio); 1339 set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page)); 1340 dec_page_count(sbi, F2FS_DIRTY_NODES); 1341 up_read(&sbi->node_write); 1342 unlock_page(page); 1343 1344 if (wbc->for_reclaim) 1345 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1346 1347 return 0; 1348 1349 redirty_out: 1350 redirty_page_for_writepage(wbc, page); 1351 return AOP_WRITEPAGE_ACTIVATE; 1352 } 1353 1354 static int f2fs_write_node_pages(struct address_space *mapping, 1355 struct writeback_control *wbc) 1356 { 1357 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 1358 long diff; 1359 1360 trace_f2fs_writepages(mapping->host, wbc, NODE); 1361 1362 /* balancing f2fs's metadata in background */ 1363 f2fs_balance_fs_bg(sbi); 1364 1365 /* collect a number of dirty node pages and write together */ 1366 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) 1367 goto skip_write; 1368 1369 diff = nr_pages_to_write(sbi, NODE, wbc); 1370 wbc->sync_mode = WB_SYNC_NONE; 1371 sync_node_pages(sbi, 0, wbc); 1372 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1373 return 0; 1374 1375 skip_write: 1376 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 1377 return 0; 1378 } 1379 1380 static int f2fs_set_node_page_dirty(struct page *page) 1381 { 1382 trace_f2fs_set_page_dirty(page, NODE); 1383 1384 SetPageUptodate(page); 1385 if (!PageDirty(page)) { 1386 __set_page_dirty_nobuffers(page); 1387 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 1388 SetPagePrivate(page); 1389 f2fs_trace_pid(page); 1390 return 1; 1391 } 1392 return 0; 1393 } 1394 1395 /* 1396 * Structure of the f2fs node operations 1397 */ 1398 const struct address_space_operations f2fs_node_aops = { 1399 .writepage = f2fs_write_node_page, 1400 .writepages = f2fs_write_node_pages, 1401 .set_page_dirty = f2fs_set_node_page_dirty, 1402 .invalidatepage = f2fs_invalidate_page, 1403 .releasepage = f2fs_release_page, 1404 }; 1405 1406 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 1407 nid_t n) 1408 { 1409 return radix_tree_lookup(&nm_i->free_nid_root, n); 1410 } 1411 1412 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i, 1413 struct free_nid *i) 1414 { 1415 list_del(&i->list); 1416 radix_tree_delete(&nm_i->free_nid_root, i->nid); 1417 } 1418 1419 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) 1420 { 1421 struct f2fs_nm_info *nm_i = NM_I(sbi); 1422 struct free_nid *i; 1423 struct nat_entry *ne; 1424 bool allocated = false; 1425 1426 if (!available_free_memory(sbi, FREE_NIDS)) 1427 return -1; 1428 1429 /* 0 nid should not be used */ 1430 if (unlikely(nid == 0)) 1431 return 0; 1432 1433 if (build) { 1434 /* do not add allocated nids */ 1435 down_read(&nm_i->nat_tree_lock); 1436 ne = __lookup_nat_cache(nm_i, nid); 1437 if (ne && 1438 (!get_nat_flag(ne, IS_CHECKPOINTED) || 1439 nat_get_blkaddr(ne) != NULL_ADDR)) 1440 allocated = true; 1441 up_read(&nm_i->nat_tree_lock); 1442 if (allocated) 1443 return 0; 1444 } 1445 1446 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1447 i->nid = nid; 1448 i->state = NID_NEW; 1449 1450 if (radix_tree_preload(GFP_NOFS)) { 1451 kmem_cache_free(free_nid_slab, i); 1452 return 0; 1453 } 1454 1455 spin_lock(&nm_i->free_nid_list_lock); 1456 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) { 1457 spin_unlock(&nm_i->free_nid_list_lock); 1458 radix_tree_preload_end(); 1459 kmem_cache_free(free_nid_slab, i); 1460 return 0; 1461 } 1462 list_add_tail(&i->list, &nm_i->free_nid_list); 1463 nm_i->fcnt++; 1464 spin_unlock(&nm_i->free_nid_list_lock); 1465 radix_tree_preload_end(); 1466 return 1; 1467 } 1468 1469 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1470 { 1471 struct free_nid *i; 1472 bool need_free = false; 1473 1474 spin_lock(&nm_i->free_nid_list_lock); 1475 i = __lookup_free_nid_list(nm_i, nid); 1476 if (i && i->state == NID_NEW) { 1477 __del_from_free_nid_list(nm_i, i); 1478 nm_i->fcnt--; 1479 need_free = true; 1480 } 1481 spin_unlock(&nm_i->free_nid_list_lock); 1482 1483 if (need_free) 1484 kmem_cache_free(free_nid_slab, i); 1485 } 1486 1487 static void scan_nat_page(struct f2fs_sb_info *sbi, 1488 struct page *nat_page, nid_t start_nid) 1489 { 1490 struct f2fs_nm_info *nm_i = NM_I(sbi); 1491 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1492 block_t blk_addr; 1493 int i; 1494 1495 i = start_nid % NAT_ENTRY_PER_BLOCK; 1496 1497 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1498 1499 if (unlikely(start_nid >= nm_i->max_nid)) 1500 break; 1501 1502 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1503 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1504 if (blk_addr == NULL_ADDR) { 1505 if (add_free_nid(sbi, start_nid, true) < 0) 1506 break; 1507 } 1508 } 1509 } 1510 1511 static void build_free_nids(struct f2fs_sb_info *sbi) 1512 { 1513 struct f2fs_nm_info *nm_i = NM_I(sbi); 1514 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1515 struct f2fs_summary_block *sum = curseg->sum_blk; 1516 int i = 0; 1517 nid_t nid = nm_i->next_scan_nid; 1518 1519 /* Enough entries */ 1520 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) 1521 return; 1522 1523 /* readahead nat pages to be scanned */ 1524 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT); 1525 1526 while (1) { 1527 struct page *page = get_current_nat_page(sbi, nid); 1528 1529 scan_nat_page(sbi, page, nid); 1530 f2fs_put_page(page, 1); 1531 1532 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1533 if (unlikely(nid >= nm_i->max_nid)) 1534 nid = 0; 1535 1536 if (i++ == FREE_NID_PAGES) 1537 break; 1538 } 1539 1540 /* go to the next free nat pages to find free nids abundantly */ 1541 nm_i->next_scan_nid = nid; 1542 1543 /* find free nids from current sum_pages */ 1544 mutex_lock(&curseg->curseg_mutex); 1545 for (i = 0; i < nats_in_cursum(sum); i++) { 1546 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1547 nid = le32_to_cpu(nid_in_journal(sum, i)); 1548 if (addr == NULL_ADDR) 1549 add_free_nid(sbi, nid, true); 1550 else 1551 remove_free_nid(nm_i, nid); 1552 } 1553 mutex_unlock(&curseg->curseg_mutex); 1554 } 1555 1556 /* 1557 * If this function returns success, caller can obtain a new nid 1558 * from second parameter of this function. 1559 * The returned nid could be used ino as well as nid when inode is created. 1560 */ 1561 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1562 { 1563 struct f2fs_nm_info *nm_i = NM_I(sbi); 1564 struct free_nid *i = NULL; 1565 retry: 1566 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) 1567 return false; 1568 1569 spin_lock(&nm_i->free_nid_list_lock); 1570 1571 /* We should not use stale free nids created by build_free_nids */ 1572 if (nm_i->fcnt && !on_build_free_nids(nm_i)) { 1573 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 1574 list_for_each_entry(i, &nm_i->free_nid_list, list) 1575 if (i->state == NID_NEW) 1576 break; 1577 1578 f2fs_bug_on(sbi, i->state != NID_NEW); 1579 *nid = i->nid; 1580 i->state = NID_ALLOC; 1581 nm_i->fcnt--; 1582 spin_unlock(&nm_i->free_nid_list_lock); 1583 return true; 1584 } 1585 spin_unlock(&nm_i->free_nid_list_lock); 1586 1587 /* Let's scan nat pages and its caches to get free nids */ 1588 mutex_lock(&nm_i->build_lock); 1589 build_free_nids(sbi); 1590 mutex_unlock(&nm_i->build_lock); 1591 goto retry; 1592 } 1593 1594 /* 1595 * alloc_nid() should be called prior to this function. 1596 */ 1597 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1598 { 1599 struct f2fs_nm_info *nm_i = NM_I(sbi); 1600 struct free_nid *i; 1601 1602 spin_lock(&nm_i->free_nid_list_lock); 1603 i = __lookup_free_nid_list(nm_i, nid); 1604 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1605 __del_from_free_nid_list(nm_i, i); 1606 spin_unlock(&nm_i->free_nid_list_lock); 1607 1608 kmem_cache_free(free_nid_slab, i); 1609 } 1610 1611 /* 1612 * alloc_nid() should be called prior to this function. 1613 */ 1614 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1615 { 1616 struct f2fs_nm_info *nm_i = NM_I(sbi); 1617 struct free_nid *i; 1618 bool need_free = false; 1619 1620 if (!nid) 1621 return; 1622 1623 spin_lock(&nm_i->free_nid_list_lock); 1624 i = __lookup_free_nid_list(nm_i, nid); 1625 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1626 if (!available_free_memory(sbi, FREE_NIDS)) { 1627 __del_from_free_nid_list(nm_i, i); 1628 need_free = true; 1629 } else { 1630 i->state = NID_NEW; 1631 nm_i->fcnt++; 1632 } 1633 spin_unlock(&nm_i->free_nid_list_lock); 1634 1635 if (need_free) 1636 kmem_cache_free(free_nid_slab, i); 1637 } 1638 1639 void recover_inline_xattr(struct inode *inode, struct page *page) 1640 { 1641 void *src_addr, *dst_addr; 1642 size_t inline_size; 1643 struct page *ipage; 1644 struct f2fs_inode *ri; 1645 1646 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 1647 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); 1648 1649 ri = F2FS_INODE(page); 1650 if (!(ri->i_inline & F2FS_INLINE_XATTR)) { 1651 clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR); 1652 goto update_inode; 1653 } 1654 1655 dst_addr = inline_xattr_addr(ipage); 1656 src_addr = inline_xattr_addr(page); 1657 inline_size = inline_xattr_size(inode); 1658 1659 f2fs_wait_on_page_writeback(ipage, NODE); 1660 memcpy(dst_addr, src_addr, inline_size); 1661 update_inode: 1662 update_inode(inode, ipage); 1663 f2fs_put_page(ipage, 1); 1664 } 1665 1666 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) 1667 { 1668 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1669 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 1670 nid_t new_xnid = nid_of_node(page); 1671 struct node_info ni; 1672 1673 /* 1: invalidate the previous xattr nid */ 1674 if (!prev_xnid) 1675 goto recover_xnid; 1676 1677 /* Deallocate node address */ 1678 get_node_info(sbi, prev_xnid, &ni); 1679 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 1680 invalidate_blocks(sbi, ni.blk_addr); 1681 dec_valid_node_count(sbi, inode); 1682 set_node_addr(sbi, &ni, NULL_ADDR, false); 1683 1684 recover_xnid: 1685 /* 2: allocate new xattr nid */ 1686 if (unlikely(!inc_valid_node_count(sbi, inode))) 1687 f2fs_bug_on(sbi, 1); 1688 1689 remove_free_nid(NM_I(sbi), new_xnid); 1690 get_node_info(sbi, new_xnid, &ni); 1691 ni.ino = inode->i_ino; 1692 set_node_addr(sbi, &ni, NEW_ADDR, false); 1693 F2FS_I(inode)->i_xattr_nid = new_xnid; 1694 1695 /* 3: update xattr blkaddr */ 1696 refresh_sit_entry(sbi, NEW_ADDR, blkaddr); 1697 set_node_addr(sbi, &ni, blkaddr, false); 1698 1699 update_inode_page(inode); 1700 } 1701 1702 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1703 { 1704 struct f2fs_inode *src, *dst; 1705 nid_t ino = ino_of_node(page); 1706 struct node_info old_ni, new_ni; 1707 struct page *ipage; 1708 1709 get_node_info(sbi, ino, &old_ni); 1710 1711 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 1712 return -EINVAL; 1713 1714 ipage = grab_cache_page(NODE_MAPPING(sbi), ino); 1715 if (!ipage) 1716 return -ENOMEM; 1717 1718 /* Should not use this inode from free nid list */ 1719 remove_free_nid(NM_I(sbi), ino); 1720 1721 SetPageUptodate(ipage); 1722 fill_node_footer(ipage, ino, ino, 0, true); 1723 1724 src = F2FS_INODE(page); 1725 dst = F2FS_INODE(ipage); 1726 1727 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 1728 dst->i_size = 0; 1729 dst->i_blocks = cpu_to_le64(1); 1730 dst->i_links = cpu_to_le32(1); 1731 dst->i_xattr_nid = 0; 1732 dst->i_inline = src->i_inline & F2FS_INLINE_XATTR; 1733 1734 new_ni = old_ni; 1735 new_ni.ino = ino; 1736 1737 if (unlikely(!inc_valid_node_count(sbi, NULL))) 1738 WARN_ON(1); 1739 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1740 inc_valid_inode_count(sbi); 1741 set_page_dirty(ipage); 1742 f2fs_put_page(ipage, 1); 1743 return 0; 1744 } 1745 1746 int restore_node_summary(struct f2fs_sb_info *sbi, 1747 unsigned int segno, struct f2fs_summary_block *sum) 1748 { 1749 struct f2fs_node *rn; 1750 struct f2fs_summary *sum_entry; 1751 block_t addr; 1752 int bio_blocks = MAX_BIO_BLOCKS(sbi); 1753 int i, idx, last_offset, nrpages; 1754 1755 /* scan the node segment */ 1756 last_offset = sbi->blocks_per_seg; 1757 addr = START_BLOCK(sbi, segno); 1758 sum_entry = &sum->entries[0]; 1759 1760 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 1761 nrpages = min(last_offset - i, bio_blocks); 1762 1763 /* readahead node pages */ 1764 ra_meta_pages(sbi, addr, nrpages, META_POR); 1765 1766 for (idx = addr; idx < addr + nrpages; idx++) { 1767 struct page *page = get_meta_page(sbi, idx); 1768 1769 rn = F2FS_NODE(page); 1770 sum_entry->nid = rn->footer.nid; 1771 sum_entry->version = 0; 1772 sum_entry->ofs_in_node = 0; 1773 sum_entry++; 1774 f2fs_put_page(page, 1); 1775 } 1776 1777 invalidate_mapping_pages(META_MAPPING(sbi), addr, 1778 addr + nrpages); 1779 } 1780 return 0; 1781 } 1782 1783 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 1784 { 1785 struct f2fs_nm_info *nm_i = NM_I(sbi); 1786 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1787 struct f2fs_summary_block *sum = curseg->sum_blk; 1788 int i; 1789 1790 mutex_lock(&curseg->curseg_mutex); 1791 for (i = 0; i < nats_in_cursum(sum); i++) { 1792 struct nat_entry *ne; 1793 struct f2fs_nat_entry raw_ne; 1794 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1795 1796 raw_ne = nat_in_journal(sum, i); 1797 1798 down_write(&nm_i->nat_tree_lock); 1799 ne = __lookup_nat_cache(nm_i, nid); 1800 if (!ne) { 1801 ne = grab_nat_entry(nm_i, nid); 1802 node_info_from_raw_nat(&ne->ni, &raw_ne); 1803 } 1804 __set_nat_cache_dirty(nm_i, ne); 1805 up_write(&nm_i->nat_tree_lock); 1806 } 1807 update_nats_in_cursum(sum, -i); 1808 mutex_unlock(&curseg->curseg_mutex); 1809 } 1810 1811 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 1812 struct list_head *head, int max) 1813 { 1814 struct nat_entry_set *cur; 1815 1816 if (nes->entry_cnt >= max) 1817 goto add_out; 1818 1819 list_for_each_entry(cur, head, set_list) { 1820 if (cur->entry_cnt >= nes->entry_cnt) { 1821 list_add(&nes->set_list, cur->set_list.prev); 1822 return; 1823 } 1824 } 1825 add_out: 1826 list_add_tail(&nes->set_list, head); 1827 } 1828 1829 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, 1830 struct nat_entry_set *set) 1831 { 1832 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1833 struct f2fs_summary_block *sum = curseg->sum_blk; 1834 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 1835 bool to_journal = true; 1836 struct f2fs_nat_block *nat_blk; 1837 struct nat_entry *ne, *cur; 1838 struct page *page = NULL; 1839 struct f2fs_nm_info *nm_i = NM_I(sbi); 1840 1841 /* 1842 * there are two steps to flush nat entries: 1843 * #1, flush nat entries to journal in current hot data summary block. 1844 * #2, flush nat entries to nat page. 1845 */ 1846 if (!__has_cursum_space(sum, set->entry_cnt, NAT_JOURNAL)) 1847 to_journal = false; 1848 1849 if (to_journal) { 1850 mutex_lock(&curseg->curseg_mutex); 1851 } else { 1852 page = get_next_nat_page(sbi, start_nid); 1853 nat_blk = page_address(page); 1854 f2fs_bug_on(sbi, !nat_blk); 1855 } 1856 1857 /* flush dirty nats in nat entry set */ 1858 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 1859 struct f2fs_nat_entry *raw_ne; 1860 nid_t nid = nat_get_nid(ne); 1861 int offset; 1862 1863 if (nat_get_blkaddr(ne) == NEW_ADDR) 1864 continue; 1865 1866 if (to_journal) { 1867 offset = lookup_journal_in_cursum(sum, 1868 NAT_JOURNAL, nid, 1); 1869 f2fs_bug_on(sbi, offset < 0); 1870 raw_ne = &nat_in_journal(sum, offset); 1871 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1872 } else { 1873 raw_ne = &nat_blk->entries[nid - start_nid]; 1874 } 1875 raw_nat_from_node_info(raw_ne, &ne->ni); 1876 1877 down_write(&NM_I(sbi)->nat_tree_lock); 1878 nat_reset_flag(ne); 1879 __clear_nat_cache_dirty(NM_I(sbi), ne); 1880 up_write(&NM_I(sbi)->nat_tree_lock); 1881 1882 if (nat_get_blkaddr(ne) == NULL_ADDR) 1883 add_free_nid(sbi, nid, false); 1884 } 1885 1886 if (to_journal) 1887 mutex_unlock(&curseg->curseg_mutex); 1888 else 1889 f2fs_put_page(page, 1); 1890 1891 f2fs_bug_on(sbi, set->entry_cnt); 1892 1893 down_write(&nm_i->nat_tree_lock); 1894 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 1895 up_write(&nm_i->nat_tree_lock); 1896 kmem_cache_free(nat_entry_set_slab, set); 1897 } 1898 1899 /* 1900 * This function is called during the checkpointing process. 1901 */ 1902 void flush_nat_entries(struct f2fs_sb_info *sbi) 1903 { 1904 struct f2fs_nm_info *nm_i = NM_I(sbi); 1905 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1906 struct f2fs_summary_block *sum = curseg->sum_blk; 1907 struct nat_entry_set *setvec[SETVEC_SIZE]; 1908 struct nat_entry_set *set, *tmp; 1909 unsigned int found; 1910 nid_t set_idx = 0; 1911 LIST_HEAD(sets); 1912 1913 if (!nm_i->dirty_nat_cnt) 1914 return; 1915 /* 1916 * if there are no enough space in journal to store dirty nat 1917 * entries, remove all entries from journal and merge them 1918 * into nat entry set. 1919 */ 1920 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 1921 remove_nats_in_journal(sbi); 1922 1923 down_write(&nm_i->nat_tree_lock); 1924 while ((found = __gang_lookup_nat_set(nm_i, 1925 set_idx, SETVEC_SIZE, setvec))) { 1926 unsigned idx; 1927 set_idx = setvec[found - 1]->set + 1; 1928 for (idx = 0; idx < found; idx++) 1929 __adjust_nat_entry_set(setvec[idx], &sets, 1930 MAX_NAT_JENTRIES(sum)); 1931 } 1932 up_write(&nm_i->nat_tree_lock); 1933 1934 /* flush dirty nats in nat entry set */ 1935 list_for_each_entry_safe(set, tmp, &sets, set_list) 1936 __flush_nat_entry_set(sbi, set); 1937 1938 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); 1939 } 1940 1941 static int init_node_manager(struct f2fs_sb_info *sbi) 1942 { 1943 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1944 struct f2fs_nm_info *nm_i = NM_I(sbi); 1945 unsigned char *version_bitmap; 1946 unsigned int nat_segs, nat_blocks; 1947 1948 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1949 1950 /* segment_count_nat includes pair segment so divide to 2. */ 1951 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1952 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1953 1954 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1955 1956 /* not used nids: 0, node, meta, (and root counted as valid node) */ 1957 nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM; 1958 nm_i->fcnt = 0; 1959 nm_i->nat_cnt = 0; 1960 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 1961 1962 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 1963 INIT_LIST_HEAD(&nm_i->free_nid_list); 1964 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 1965 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 1966 INIT_LIST_HEAD(&nm_i->nat_entries); 1967 1968 mutex_init(&nm_i->build_lock); 1969 spin_lock_init(&nm_i->free_nid_list_lock); 1970 init_rwsem(&nm_i->nat_tree_lock); 1971 1972 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1973 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 1974 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 1975 if (!version_bitmap) 1976 return -EFAULT; 1977 1978 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 1979 GFP_KERNEL); 1980 if (!nm_i->nat_bitmap) 1981 return -ENOMEM; 1982 return 0; 1983 } 1984 1985 int build_node_manager(struct f2fs_sb_info *sbi) 1986 { 1987 int err; 1988 1989 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 1990 if (!sbi->nm_info) 1991 return -ENOMEM; 1992 1993 err = init_node_manager(sbi); 1994 if (err) 1995 return err; 1996 1997 build_free_nids(sbi); 1998 return 0; 1999 } 2000 2001 void destroy_node_manager(struct f2fs_sb_info *sbi) 2002 { 2003 struct f2fs_nm_info *nm_i = NM_I(sbi); 2004 struct free_nid *i, *next_i; 2005 struct nat_entry *natvec[NATVEC_SIZE]; 2006 struct nat_entry_set *setvec[SETVEC_SIZE]; 2007 nid_t nid = 0; 2008 unsigned int found; 2009 2010 if (!nm_i) 2011 return; 2012 2013 /* destroy free nid list */ 2014 spin_lock(&nm_i->free_nid_list_lock); 2015 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 2016 f2fs_bug_on(sbi, i->state == NID_ALLOC); 2017 __del_from_free_nid_list(nm_i, i); 2018 nm_i->fcnt--; 2019 spin_unlock(&nm_i->free_nid_list_lock); 2020 kmem_cache_free(free_nid_slab, i); 2021 spin_lock(&nm_i->free_nid_list_lock); 2022 } 2023 f2fs_bug_on(sbi, nm_i->fcnt); 2024 spin_unlock(&nm_i->free_nid_list_lock); 2025 2026 /* destroy nat cache */ 2027 down_write(&nm_i->nat_tree_lock); 2028 while ((found = __gang_lookup_nat_cache(nm_i, 2029 nid, NATVEC_SIZE, natvec))) { 2030 unsigned idx; 2031 2032 nid = nat_get_nid(natvec[found - 1]) + 1; 2033 for (idx = 0; idx < found; idx++) 2034 __del_from_nat_cache(nm_i, natvec[idx]); 2035 } 2036 f2fs_bug_on(sbi, nm_i->nat_cnt); 2037 2038 /* destroy nat set cache */ 2039 nid = 0; 2040 while ((found = __gang_lookup_nat_set(nm_i, 2041 nid, SETVEC_SIZE, setvec))) { 2042 unsigned idx; 2043 2044 nid = setvec[found - 1]->set + 1; 2045 for (idx = 0; idx < found; idx++) { 2046 /* entry_cnt is not zero, when cp_error was occurred */ 2047 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 2048 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 2049 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 2050 } 2051 } 2052 up_write(&nm_i->nat_tree_lock); 2053 2054 kfree(nm_i->nat_bitmap); 2055 sbi->nm_info = NULL; 2056 kfree(nm_i); 2057 } 2058 2059 int __init create_node_manager_caches(void) 2060 { 2061 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 2062 sizeof(struct nat_entry)); 2063 if (!nat_entry_slab) 2064 goto fail; 2065 2066 free_nid_slab = f2fs_kmem_cache_create("free_nid", 2067 sizeof(struct free_nid)); 2068 if (!free_nid_slab) 2069 goto destroy_nat_entry; 2070 2071 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", 2072 sizeof(struct nat_entry_set)); 2073 if (!nat_entry_set_slab) 2074 goto destroy_free_nid; 2075 return 0; 2076 2077 destroy_free_nid: 2078 kmem_cache_destroy(free_nid_slab); 2079 destroy_nat_entry: 2080 kmem_cache_destroy(nat_entry_slab); 2081 fail: 2082 return -ENOMEM; 2083 } 2084 2085 void destroy_node_manager_caches(void) 2086 { 2087 kmem_cache_destroy(nat_entry_set_slab); 2088 kmem_cache_destroy(free_nid_slab); 2089 kmem_cache_destroy(nat_entry_slab); 2090 } 2091