1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/node.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/mpage.h> 11 #include <linux/backing-dev.h> 12 #include <linux/blkdev.h> 13 #include <linux/pagevec.h> 14 #include <linux/swap.h> 15 16 #include "f2fs.h" 17 #include "node.h" 18 #include "segment.h" 19 #include "xattr.h" 20 #include <trace/events/f2fs.h> 21 22 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock) 23 24 static struct kmem_cache *nat_entry_slab; 25 static struct kmem_cache *free_nid_slab; 26 static struct kmem_cache *nat_entry_set_slab; 27 static struct kmem_cache *fsync_node_entry_slab; 28 29 /* 30 * Check whether the given nid is within node id range. 31 */ 32 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 33 { 34 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { 35 set_sbi_flag(sbi, SBI_NEED_FSCK); 36 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", 37 __func__, nid); 38 return -EFSCORRUPTED; 39 } 40 return 0; 41 } 42 43 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) 44 { 45 struct f2fs_nm_info *nm_i = NM_I(sbi); 46 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 47 struct sysinfo val; 48 unsigned long avail_ram; 49 unsigned long mem_size = 0; 50 bool res = false; 51 52 if (!nm_i) 53 return true; 54 55 si_meminfo(&val); 56 57 /* only uses low memory */ 58 avail_ram = val.totalram - val.totalhigh; 59 60 /* 61 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively 62 */ 63 if (type == FREE_NIDS) { 64 mem_size = (nm_i->nid_cnt[FREE_NID] * 65 sizeof(struct free_nid)) >> PAGE_SHIFT; 66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 67 } else if (type == NAT_ENTRIES) { 68 mem_size = (nm_i->nat_cnt[TOTAL_NAT] * 69 sizeof(struct nat_entry)) >> PAGE_SHIFT; 70 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 71 if (excess_cached_nats(sbi)) 72 res = false; 73 } else if (type == DIRTY_DENTS) { 74 if (sbi->sb->s_bdi->wb.dirty_exceeded) 75 return false; 76 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 77 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 78 } else if (type == INO_ENTRIES) { 79 int i; 80 81 for (i = 0; i < MAX_INO_ENTRY; i++) 82 mem_size += sbi->im[i].ino_num * 83 sizeof(struct ino_entry); 84 mem_size >>= PAGE_SHIFT; 85 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 86 } else if (type == EXTENT_CACHE) { 87 mem_size = (atomic_read(&sbi->total_ext_tree) * 88 sizeof(struct extent_tree) + 89 atomic_read(&sbi->total_ext_node) * 90 sizeof(struct extent_node)) >> PAGE_SHIFT; 91 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 92 } else if (type == INMEM_PAGES) { 93 /* it allows 20% / total_ram for inmemory pages */ 94 mem_size = get_pages(sbi, F2FS_INMEM_PAGES); 95 res = mem_size < (val.totalram / 5); 96 } else if (type == DISCARD_CACHE) { 97 mem_size = (atomic_read(&dcc->discard_cmd_cnt) * 98 sizeof(struct discard_cmd)) >> PAGE_SHIFT; 99 res = mem_size < (avail_ram * nm_i->ram_thresh / 100); 100 } else { 101 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 102 return true; 103 } 104 return res; 105 } 106 107 static void clear_node_page_dirty(struct page *page) 108 { 109 if (PageDirty(page)) { 110 f2fs_clear_page_cache_dirty_tag(page); 111 clear_page_dirty_for_io(page); 112 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 113 } 114 ClearPageUptodate(page); 115 } 116 117 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 118 { 119 return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); 120 } 121 122 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 123 { 124 struct page *src_page; 125 struct page *dst_page; 126 pgoff_t dst_off; 127 void *src_addr; 128 void *dst_addr; 129 struct f2fs_nm_info *nm_i = NM_I(sbi); 130 131 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); 132 133 /* get current nat block page with lock */ 134 src_page = get_current_nat_page(sbi, nid); 135 if (IS_ERR(src_page)) 136 return src_page; 137 dst_page = f2fs_grab_meta_page(sbi, dst_off); 138 f2fs_bug_on(sbi, PageDirty(src_page)); 139 140 src_addr = page_address(src_page); 141 dst_addr = page_address(dst_page); 142 memcpy(dst_addr, src_addr, PAGE_SIZE); 143 set_page_dirty(dst_page); 144 f2fs_put_page(src_page, 1); 145 146 set_to_next_nat(nm_i, nid); 147 148 return dst_page; 149 } 150 151 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail) 152 { 153 struct nat_entry *new; 154 155 if (no_fail) 156 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO); 157 else 158 new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO); 159 if (new) { 160 nat_set_nid(new, nid); 161 nat_reset_flag(new); 162 } 163 return new; 164 } 165 166 static void __free_nat_entry(struct nat_entry *e) 167 { 168 kmem_cache_free(nat_entry_slab, e); 169 } 170 171 /* must be locked by nat_tree_lock */ 172 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, 173 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) 174 { 175 if (no_fail) 176 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); 177 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) 178 return NULL; 179 180 if (raw_ne) 181 node_info_from_raw_nat(&ne->ni, raw_ne); 182 183 spin_lock(&nm_i->nat_list_lock); 184 list_add_tail(&ne->list, &nm_i->nat_entries); 185 spin_unlock(&nm_i->nat_list_lock); 186 187 nm_i->nat_cnt[TOTAL_NAT]++; 188 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 189 return ne; 190 } 191 192 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 193 { 194 struct nat_entry *ne; 195 196 ne = radix_tree_lookup(&nm_i->nat_root, n); 197 198 /* for recent accessed nat entry, move it to tail of lru list */ 199 if (ne && !get_nat_flag(ne, IS_DIRTY)) { 200 spin_lock(&nm_i->nat_list_lock); 201 if (!list_empty(&ne->list)) 202 list_move_tail(&ne->list, &nm_i->nat_entries); 203 spin_unlock(&nm_i->nat_list_lock); 204 } 205 206 return ne; 207 } 208 209 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 210 nid_t start, unsigned int nr, struct nat_entry **ep) 211 { 212 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 213 } 214 215 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 216 { 217 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 218 nm_i->nat_cnt[TOTAL_NAT]--; 219 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 220 __free_nat_entry(e); 221 } 222 223 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, 224 struct nat_entry *ne) 225 { 226 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 227 struct nat_entry_set *head; 228 229 head = radix_tree_lookup(&nm_i->nat_set_root, set); 230 if (!head) { 231 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); 232 233 INIT_LIST_HEAD(&head->entry_list); 234 INIT_LIST_HEAD(&head->set_list); 235 head->set = set; 236 head->entry_cnt = 0; 237 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 238 } 239 return head; 240 } 241 242 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 243 struct nat_entry *ne) 244 { 245 struct nat_entry_set *head; 246 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; 247 248 if (!new_ne) 249 head = __grab_nat_entry_set(nm_i, ne); 250 251 /* 252 * update entry_cnt in below condition: 253 * 1. update NEW_ADDR to valid block address; 254 * 2. update old block address to new one; 255 */ 256 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) || 257 !get_nat_flag(ne, IS_DIRTY))) 258 head->entry_cnt++; 259 260 set_nat_flag(ne, IS_PREALLOC, new_ne); 261 262 if (get_nat_flag(ne, IS_DIRTY)) 263 goto refresh_list; 264 265 nm_i->nat_cnt[DIRTY_NAT]++; 266 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 267 set_nat_flag(ne, IS_DIRTY, true); 268 refresh_list: 269 spin_lock(&nm_i->nat_list_lock); 270 if (new_ne) 271 list_del_init(&ne->list); 272 else 273 list_move_tail(&ne->list, &head->entry_list); 274 spin_unlock(&nm_i->nat_list_lock); 275 } 276 277 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 278 struct nat_entry_set *set, struct nat_entry *ne) 279 { 280 spin_lock(&nm_i->nat_list_lock); 281 list_move_tail(&ne->list, &nm_i->nat_entries); 282 spin_unlock(&nm_i->nat_list_lock); 283 284 set_nat_flag(ne, IS_DIRTY, false); 285 set->entry_cnt--; 286 nm_i->nat_cnt[DIRTY_NAT]--; 287 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 288 } 289 290 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 291 nid_t start, unsigned int nr, struct nat_entry_set **ep) 292 { 293 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 294 start, nr); 295 } 296 297 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) 298 { 299 return NODE_MAPPING(sbi) == page->mapping && 300 IS_DNODE(page) && is_cold_node(page); 301 } 302 303 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) 304 { 305 spin_lock_init(&sbi->fsync_node_lock); 306 INIT_LIST_HEAD(&sbi->fsync_node_list); 307 sbi->fsync_seg_id = 0; 308 sbi->fsync_node_num = 0; 309 } 310 311 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, 312 struct page *page) 313 { 314 struct fsync_node_entry *fn; 315 unsigned long flags; 316 unsigned int seq_id; 317 318 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS); 319 320 get_page(page); 321 fn->page = page; 322 INIT_LIST_HEAD(&fn->list); 323 324 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 325 list_add_tail(&fn->list, &sbi->fsync_node_list); 326 fn->seq_id = sbi->fsync_seg_id++; 327 seq_id = fn->seq_id; 328 sbi->fsync_node_num++; 329 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 330 331 return seq_id; 332 } 333 334 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) 335 { 336 struct fsync_node_entry *fn; 337 unsigned long flags; 338 339 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 340 list_for_each_entry(fn, &sbi->fsync_node_list, list) { 341 if (fn->page == page) { 342 list_del(&fn->list); 343 sbi->fsync_node_num--; 344 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 345 kmem_cache_free(fsync_node_entry_slab, fn); 346 put_page(page); 347 return; 348 } 349 } 350 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 351 f2fs_bug_on(sbi, 1); 352 } 353 354 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) 355 { 356 unsigned long flags; 357 358 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 359 sbi->fsync_seg_id = 0; 360 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 361 } 362 363 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 364 { 365 struct f2fs_nm_info *nm_i = NM_I(sbi); 366 struct nat_entry *e; 367 bool need = false; 368 369 down_read(&nm_i->nat_tree_lock); 370 e = __lookup_nat_cache(nm_i, nid); 371 if (e) { 372 if (!get_nat_flag(e, IS_CHECKPOINTED) && 373 !get_nat_flag(e, HAS_FSYNCED_INODE)) 374 need = true; 375 } 376 up_read(&nm_i->nat_tree_lock); 377 return need; 378 } 379 380 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 381 { 382 struct f2fs_nm_info *nm_i = NM_I(sbi); 383 struct nat_entry *e; 384 bool is_cp = true; 385 386 down_read(&nm_i->nat_tree_lock); 387 e = __lookup_nat_cache(nm_i, nid); 388 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 389 is_cp = false; 390 up_read(&nm_i->nat_tree_lock); 391 return is_cp; 392 } 393 394 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 395 { 396 struct f2fs_nm_info *nm_i = NM_I(sbi); 397 struct nat_entry *e; 398 bool need_update = true; 399 400 down_read(&nm_i->nat_tree_lock); 401 e = __lookup_nat_cache(nm_i, ino); 402 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 403 (get_nat_flag(e, IS_CHECKPOINTED) || 404 get_nat_flag(e, HAS_FSYNCED_INODE))) 405 need_update = false; 406 up_read(&nm_i->nat_tree_lock); 407 return need_update; 408 } 409 410 /* must be locked by nat_tree_lock */ 411 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 412 struct f2fs_nat_entry *ne) 413 { 414 struct f2fs_nm_info *nm_i = NM_I(sbi); 415 struct nat_entry *new, *e; 416 417 new = __alloc_nat_entry(nid, false); 418 if (!new) 419 return; 420 421 down_write(&nm_i->nat_tree_lock); 422 e = __lookup_nat_cache(nm_i, nid); 423 if (!e) 424 e = __init_nat_entry(nm_i, new, ne, false); 425 else 426 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || 427 nat_get_blkaddr(e) != 428 le32_to_cpu(ne->block_addr) || 429 nat_get_version(e) != ne->version); 430 up_write(&nm_i->nat_tree_lock); 431 if (e != new) 432 __free_nat_entry(new); 433 } 434 435 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 436 block_t new_blkaddr, bool fsync_done) 437 { 438 struct f2fs_nm_info *nm_i = NM_I(sbi); 439 struct nat_entry *e; 440 struct nat_entry *new = __alloc_nat_entry(ni->nid, true); 441 442 down_write(&nm_i->nat_tree_lock); 443 e = __lookup_nat_cache(nm_i, ni->nid); 444 if (!e) { 445 e = __init_nat_entry(nm_i, new, NULL, true); 446 copy_node_info(&e->ni, ni); 447 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 448 } else if (new_blkaddr == NEW_ADDR) { 449 /* 450 * when nid is reallocated, 451 * previous nat entry can be remained in nat cache. 452 * So, reinitialize it with new information. 453 */ 454 copy_node_info(&e->ni, ni); 455 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 456 } 457 /* let's free early to reduce memory consumption */ 458 if (e != new) 459 __free_nat_entry(new); 460 461 /* sanity check */ 462 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 463 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 464 new_blkaddr == NULL_ADDR); 465 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 466 new_blkaddr == NEW_ADDR); 467 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && 468 new_blkaddr == NEW_ADDR); 469 470 /* increment version no as node is removed */ 471 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 472 unsigned char version = nat_get_version(e); 473 474 nat_set_version(e, inc_node_version(version)); 475 } 476 477 /* change address */ 478 nat_set_blkaddr(e, new_blkaddr); 479 if (!__is_valid_data_blkaddr(new_blkaddr)) 480 set_nat_flag(e, IS_CHECKPOINTED, false); 481 __set_nat_cache_dirty(nm_i, e); 482 483 /* update fsync_mark if its inode nat entry is still alive */ 484 if (ni->nid != ni->ino) 485 e = __lookup_nat_cache(nm_i, ni->ino); 486 if (e) { 487 if (fsync_done && ni->nid == ni->ino) 488 set_nat_flag(e, HAS_FSYNCED_INODE, true); 489 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 490 } 491 up_write(&nm_i->nat_tree_lock); 492 } 493 494 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 495 { 496 struct f2fs_nm_info *nm_i = NM_I(sbi); 497 int nr = nr_shrink; 498 499 if (!down_write_trylock(&nm_i->nat_tree_lock)) 500 return 0; 501 502 spin_lock(&nm_i->nat_list_lock); 503 while (nr_shrink) { 504 struct nat_entry *ne; 505 506 if (list_empty(&nm_i->nat_entries)) 507 break; 508 509 ne = list_first_entry(&nm_i->nat_entries, 510 struct nat_entry, list); 511 list_del(&ne->list); 512 spin_unlock(&nm_i->nat_list_lock); 513 514 __del_from_nat_cache(nm_i, ne); 515 nr_shrink--; 516 517 spin_lock(&nm_i->nat_list_lock); 518 } 519 spin_unlock(&nm_i->nat_list_lock); 520 521 up_write(&nm_i->nat_tree_lock); 522 return nr - nr_shrink; 523 } 524 525 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 526 struct node_info *ni) 527 { 528 struct f2fs_nm_info *nm_i = NM_I(sbi); 529 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 530 struct f2fs_journal *journal = curseg->journal; 531 nid_t start_nid = START_NID(nid); 532 struct f2fs_nat_block *nat_blk; 533 struct page *page = NULL; 534 struct f2fs_nat_entry ne; 535 struct nat_entry *e; 536 pgoff_t index; 537 block_t blkaddr; 538 int i; 539 540 ni->nid = nid; 541 542 /* Check nat cache */ 543 down_read(&nm_i->nat_tree_lock); 544 e = __lookup_nat_cache(nm_i, nid); 545 if (e) { 546 ni->ino = nat_get_ino(e); 547 ni->blk_addr = nat_get_blkaddr(e); 548 ni->version = nat_get_version(e); 549 up_read(&nm_i->nat_tree_lock); 550 return 0; 551 } 552 553 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 554 555 /* Check current segment summary */ 556 down_read(&curseg->journal_rwsem); 557 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); 558 if (i >= 0) { 559 ne = nat_in_journal(journal, i); 560 node_info_from_raw_nat(ni, &ne); 561 } 562 up_read(&curseg->journal_rwsem); 563 if (i >= 0) { 564 up_read(&nm_i->nat_tree_lock); 565 goto cache; 566 } 567 568 /* Fill node_info from nat page */ 569 index = current_nat_addr(sbi, nid); 570 up_read(&nm_i->nat_tree_lock); 571 572 page = f2fs_get_meta_page(sbi, index); 573 if (IS_ERR(page)) 574 return PTR_ERR(page); 575 576 nat_blk = (struct f2fs_nat_block *)page_address(page); 577 ne = nat_blk->entries[nid - start_nid]; 578 node_info_from_raw_nat(ni, &ne); 579 f2fs_put_page(page, 1); 580 cache: 581 blkaddr = le32_to_cpu(ne.block_addr); 582 if (__is_valid_data_blkaddr(blkaddr) && 583 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) 584 return -EFAULT; 585 586 /* cache nat entry */ 587 cache_nat_entry(sbi, nid, &ne); 588 return 0; 589 } 590 591 /* 592 * readahead MAX_RA_NODE number of node pages. 593 */ 594 static void f2fs_ra_node_pages(struct page *parent, int start, int n) 595 { 596 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 597 struct blk_plug plug; 598 int i, end; 599 nid_t nid; 600 601 blk_start_plug(&plug); 602 603 /* Then, try readahead for siblings of the desired node */ 604 end = start + n; 605 end = min(end, NIDS_PER_BLOCK); 606 for (i = start; i < end; i++) { 607 nid = get_nid(parent, i, false); 608 f2fs_ra_node_page(sbi, nid); 609 } 610 611 blk_finish_plug(&plug); 612 } 613 614 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) 615 { 616 const long direct_index = ADDRS_PER_INODE(dn->inode); 617 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); 618 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; 619 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); 620 int cur_level = dn->cur_level; 621 int max_level = dn->max_level; 622 pgoff_t base = 0; 623 624 if (!dn->max_level) 625 return pgofs + 1; 626 627 while (max_level-- > cur_level) 628 skipped_unit *= NIDS_PER_BLOCK; 629 630 switch (dn->max_level) { 631 case 3: 632 base += 2 * indirect_blks; 633 fallthrough; 634 case 2: 635 base += 2 * direct_blks; 636 fallthrough; 637 case 1: 638 base += direct_index; 639 break; 640 default: 641 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); 642 } 643 644 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; 645 } 646 647 /* 648 * The maximum depth is four. 649 * Offset[0] will have raw inode offset. 650 */ 651 static int get_node_path(struct inode *inode, long block, 652 int offset[4], unsigned int noffset[4]) 653 { 654 const long direct_index = ADDRS_PER_INODE(inode); 655 const long direct_blks = ADDRS_PER_BLOCK(inode); 656 const long dptrs_per_blk = NIDS_PER_BLOCK; 657 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK; 658 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 659 int n = 0; 660 int level = 0; 661 662 noffset[0] = 0; 663 664 if (block < direct_index) { 665 offset[n] = block; 666 goto got; 667 } 668 block -= direct_index; 669 if (block < direct_blks) { 670 offset[n++] = NODE_DIR1_BLOCK; 671 noffset[n] = 1; 672 offset[n] = block; 673 level = 1; 674 goto got; 675 } 676 block -= direct_blks; 677 if (block < direct_blks) { 678 offset[n++] = NODE_DIR2_BLOCK; 679 noffset[n] = 2; 680 offset[n] = block; 681 level = 1; 682 goto got; 683 } 684 block -= direct_blks; 685 if (block < indirect_blks) { 686 offset[n++] = NODE_IND1_BLOCK; 687 noffset[n] = 3; 688 offset[n++] = block / direct_blks; 689 noffset[n] = 4 + offset[n - 1]; 690 offset[n] = block % direct_blks; 691 level = 2; 692 goto got; 693 } 694 block -= indirect_blks; 695 if (block < indirect_blks) { 696 offset[n++] = NODE_IND2_BLOCK; 697 noffset[n] = 4 + dptrs_per_blk; 698 offset[n++] = block / direct_blks; 699 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 700 offset[n] = block % direct_blks; 701 level = 2; 702 goto got; 703 } 704 block -= indirect_blks; 705 if (block < dindirect_blks) { 706 offset[n++] = NODE_DIND_BLOCK; 707 noffset[n] = 5 + (dptrs_per_blk * 2); 708 offset[n++] = block / indirect_blks; 709 noffset[n] = 6 + (dptrs_per_blk * 2) + 710 offset[n - 1] * (dptrs_per_blk + 1); 711 offset[n++] = (block / direct_blks) % dptrs_per_blk; 712 noffset[n] = 7 + (dptrs_per_blk * 2) + 713 offset[n - 2] * (dptrs_per_blk + 1) + 714 offset[n - 1]; 715 offset[n] = block % direct_blks; 716 level = 3; 717 goto got; 718 } else { 719 return -E2BIG; 720 } 721 got: 722 return level; 723 } 724 725 /* 726 * Caller should call f2fs_put_dnode(dn). 727 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 728 * f2fs_unlock_op() only if mode is set with ALLOC_NODE. 729 */ 730 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 731 { 732 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 733 struct page *npage[4]; 734 struct page *parent = NULL; 735 int offset[4]; 736 unsigned int noffset[4]; 737 nid_t nids[4]; 738 int level, i = 0; 739 int err = 0; 740 741 level = get_node_path(dn->inode, index, offset, noffset); 742 if (level < 0) 743 return level; 744 745 nids[0] = dn->inode->i_ino; 746 npage[0] = dn->inode_page; 747 748 if (!npage[0]) { 749 npage[0] = f2fs_get_node_page(sbi, nids[0]); 750 if (IS_ERR(npage[0])) 751 return PTR_ERR(npage[0]); 752 } 753 754 /* if inline_data is set, should not report any block indices */ 755 if (f2fs_has_inline_data(dn->inode) && index) { 756 err = -ENOENT; 757 f2fs_put_page(npage[0], 1); 758 goto release_out; 759 } 760 761 parent = npage[0]; 762 if (level != 0) 763 nids[1] = get_nid(parent, offset[0], true); 764 dn->inode_page = npage[0]; 765 dn->inode_page_locked = true; 766 767 /* get indirect or direct nodes */ 768 for (i = 1; i <= level; i++) { 769 bool done = false; 770 771 if (!nids[i] && mode == ALLOC_NODE) { 772 /* alloc new node */ 773 if (!f2fs_alloc_nid(sbi, &(nids[i]))) { 774 err = -ENOSPC; 775 goto release_pages; 776 } 777 778 dn->nid = nids[i]; 779 npage[i] = f2fs_new_node_page(dn, noffset[i]); 780 if (IS_ERR(npage[i])) { 781 f2fs_alloc_nid_failed(sbi, nids[i]); 782 err = PTR_ERR(npage[i]); 783 goto release_pages; 784 } 785 786 set_nid(parent, offset[i - 1], nids[i], i == 1); 787 f2fs_alloc_nid_done(sbi, nids[i]); 788 done = true; 789 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 790 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]); 791 if (IS_ERR(npage[i])) { 792 err = PTR_ERR(npage[i]); 793 goto release_pages; 794 } 795 done = true; 796 } 797 if (i == 1) { 798 dn->inode_page_locked = false; 799 unlock_page(parent); 800 } else { 801 f2fs_put_page(parent, 1); 802 } 803 804 if (!done) { 805 npage[i] = f2fs_get_node_page(sbi, nids[i]); 806 if (IS_ERR(npage[i])) { 807 err = PTR_ERR(npage[i]); 808 f2fs_put_page(npage[0], 0); 809 goto release_out; 810 } 811 } 812 if (i < level) { 813 parent = npage[i]; 814 nids[i + 1] = get_nid(parent, offset[i], false); 815 } 816 } 817 dn->nid = nids[level]; 818 dn->ofs_in_node = offset[level]; 819 dn->node_page = npage[level]; 820 dn->data_blkaddr = f2fs_data_blkaddr(dn); 821 return 0; 822 823 release_pages: 824 f2fs_put_page(parent, 1); 825 if (i > 1) 826 f2fs_put_page(npage[0], 0); 827 release_out: 828 dn->inode_page = NULL; 829 dn->node_page = NULL; 830 if (err == -ENOENT) { 831 dn->cur_level = i; 832 dn->max_level = level; 833 dn->ofs_in_node = offset[level]; 834 } 835 return err; 836 } 837 838 static int truncate_node(struct dnode_of_data *dn) 839 { 840 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 841 struct node_info ni; 842 int err; 843 pgoff_t index; 844 845 err = f2fs_get_node_info(sbi, dn->nid, &ni); 846 if (err) 847 return err; 848 849 /* Deallocate node address */ 850 f2fs_invalidate_blocks(sbi, ni.blk_addr); 851 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); 852 set_node_addr(sbi, &ni, NULL_ADDR, false); 853 854 if (dn->nid == dn->inode->i_ino) { 855 f2fs_remove_orphan_inode(sbi, dn->nid); 856 dec_valid_inode_count(sbi); 857 f2fs_inode_synced(dn->inode); 858 } 859 860 clear_node_page_dirty(dn->node_page); 861 set_sbi_flag(sbi, SBI_IS_DIRTY); 862 863 index = dn->node_page->index; 864 f2fs_put_page(dn->node_page, 1); 865 866 invalidate_mapping_pages(NODE_MAPPING(sbi), 867 index, index); 868 869 dn->node_page = NULL; 870 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 871 872 return 0; 873 } 874 875 static int truncate_dnode(struct dnode_of_data *dn) 876 { 877 struct page *page; 878 int err; 879 880 if (dn->nid == 0) 881 return 1; 882 883 /* get direct node */ 884 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 885 if (PTR_ERR(page) == -ENOENT) 886 return 1; 887 else if (IS_ERR(page)) 888 return PTR_ERR(page); 889 890 /* Make dnode_of_data for parameter */ 891 dn->node_page = page; 892 dn->ofs_in_node = 0; 893 f2fs_truncate_data_blocks(dn); 894 err = truncate_node(dn); 895 if (err) 896 return err; 897 898 return 1; 899 } 900 901 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 902 int ofs, int depth) 903 { 904 struct dnode_of_data rdn = *dn; 905 struct page *page; 906 struct f2fs_node *rn; 907 nid_t child_nid; 908 unsigned int child_nofs; 909 int freed = 0; 910 int i, ret; 911 912 if (dn->nid == 0) 913 return NIDS_PER_BLOCK + 1; 914 915 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 916 917 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 918 if (IS_ERR(page)) { 919 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 920 return PTR_ERR(page); 921 } 922 923 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK); 924 925 rn = F2FS_NODE(page); 926 if (depth < 3) { 927 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 928 child_nid = le32_to_cpu(rn->in.nid[i]); 929 if (child_nid == 0) 930 continue; 931 rdn.nid = child_nid; 932 ret = truncate_dnode(&rdn); 933 if (ret < 0) 934 goto out_err; 935 if (set_nid(page, i, 0, false)) 936 dn->node_changed = true; 937 } 938 } else { 939 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 940 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 941 child_nid = le32_to_cpu(rn->in.nid[i]); 942 if (child_nid == 0) { 943 child_nofs += NIDS_PER_BLOCK + 1; 944 continue; 945 } 946 rdn.nid = child_nid; 947 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 948 if (ret == (NIDS_PER_BLOCK + 1)) { 949 if (set_nid(page, i, 0, false)) 950 dn->node_changed = true; 951 child_nofs += ret; 952 } else if (ret < 0 && ret != -ENOENT) { 953 goto out_err; 954 } 955 } 956 freed = child_nofs; 957 } 958 959 if (!ofs) { 960 /* remove current indirect node */ 961 dn->node_page = page; 962 ret = truncate_node(dn); 963 if (ret) 964 goto out_err; 965 freed++; 966 } else { 967 f2fs_put_page(page, 1); 968 } 969 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 970 return freed; 971 972 out_err: 973 f2fs_put_page(page, 1); 974 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 975 return ret; 976 } 977 978 static int truncate_partial_nodes(struct dnode_of_data *dn, 979 struct f2fs_inode *ri, int *offset, int depth) 980 { 981 struct page *pages[2]; 982 nid_t nid[3]; 983 nid_t child_nid; 984 int err = 0; 985 int i; 986 int idx = depth - 2; 987 988 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 989 if (!nid[0]) 990 return 0; 991 992 /* get indirect nodes in the path */ 993 for (i = 0; i < idx + 1; i++) { 994 /* reference count'll be increased */ 995 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); 996 if (IS_ERR(pages[i])) { 997 err = PTR_ERR(pages[i]); 998 idx = i - 1; 999 goto fail; 1000 } 1001 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 1002 } 1003 1004 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); 1005 1006 /* free direct nodes linked to a partial indirect node */ 1007 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 1008 child_nid = get_nid(pages[idx], i, false); 1009 if (!child_nid) 1010 continue; 1011 dn->nid = child_nid; 1012 err = truncate_dnode(dn); 1013 if (err < 0) 1014 goto fail; 1015 if (set_nid(pages[idx], i, 0, false)) 1016 dn->node_changed = true; 1017 } 1018 1019 if (offset[idx + 1] == 0) { 1020 dn->node_page = pages[idx]; 1021 dn->nid = nid[idx]; 1022 err = truncate_node(dn); 1023 if (err) 1024 goto fail; 1025 } else { 1026 f2fs_put_page(pages[idx], 1); 1027 } 1028 offset[idx]++; 1029 offset[idx + 1] = 0; 1030 idx--; 1031 fail: 1032 for (i = idx; i >= 0; i--) 1033 f2fs_put_page(pages[i], 1); 1034 1035 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 1036 1037 return err; 1038 } 1039 1040 /* 1041 * All the block addresses of data and nodes should be nullified. 1042 */ 1043 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) 1044 { 1045 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1046 int err = 0, cont = 1; 1047 int level, offset[4], noffset[4]; 1048 unsigned int nofs = 0; 1049 struct f2fs_inode *ri; 1050 struct dnode_of_data dn; 1051 struct page *page; 1052 1053 trace_f2fs_truncate_inode_blocks_enter(inode, from); 1054 1055 level = get_node_path(inode, from, offset, noffset); 1056 if (level < 0) { 1057 trace_f2fs_truncate_inode_blocks_exit(inode, level); 1058 return level; 1059 } 1060 1061 page = f2fs_get_node_page(sbi, inode->i_ino); 1062 if (IS_ERR(page)) { 1063 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 1064 return PTR_ERR(page); 1065 } 1066 1067 set_new_dnode(&dn, inode, page, NULL, 0); 1068 unlock_page(page); 1069 1070 ri = F2FS_INODE(page); 1071 switch (level) { 1072 case 0: 1073 case 1: 1074 nofs = noffset[1]; 1075 break; 1076 case 2: 1077 nofs = noffset[1]; 1078 if (!offset[level - 1]) 1079 goto skip_partial; 1080 err = truncate_partial_nodes(&dn, ri, offset, level); 1081 if (err < 0 && err != -ENOENT) 1082 goto fail; 1083 nofs += 1 + NIDS_PER_BLOCK; 1084 break; 1085 case 3: 1086 nofs = 5 + 2 * NIDS_PER_BLOCK; 1087 if (!offset[level - 1]) 1088 goto skip_partial; 1089 err = truncate_partial_nodes(&dn, ri, offset, level); 1090 if (err < 0 && err != -ENOENT) 1091 goto fail; 1092 break; 1093 default: 1094 BUG(); 1095 } 1096 1097 skip_partial: 1098 while (cont) { 1099 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 1100 switch (offset[0]) { 1101 case NODE_DIR1_BLOCK: 1102 case NODE_DIR2_BLOCK: 1103 err = truncate_dnode(&dn); 1104 break; 1105 1106 case NODE_IND1_BLOCK: 1107 case NODE_IND2_BLOCK: 1108 err = truncate_nodes(&dn, nofs, offset[1], 2); 1109 break; 1110 1111 case NODE_DIND_BLOCK: 1112 err = truncate_nodes(&dn, nofs, offset[1], 3); 1113 cont = 0; 1114 break; 1115 1116 default: 1117 BUG(); 1118 } 1119 if (err < 0 && err != -ENOENT) 1120 goto fail; 1121 if (offset[1] == 0 && 1122 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 1123 lock_page(page); 1124 BUG_ON(page->mapping != NODE_MAPPING(sbi)); 1125 f2fs_wait_on_page_writeback(page, NODE, true, true); 1126 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 1127 set_page_dirty(page); 1128 unlock_page(page); 1129 } 1130 offset[1] = 0; 1131 offset[0]++; 1132 nofs += err; 1133 } 1134 fail: 1135 f2fs_put_page(page, 0); 1136 trace_f2fs_truncate_inode_blocks_exit(inode, err); 1137 return err > 0 ? 0 : err; 1138 } 1139 1140 /* caller must lock inode page */ 1141 int f2fs_truncate_xattr_node(struct inode *inode) 1142 { 1143 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1144 nid_t nid = F2FS_I(inode)->i_xattr_nid; 1145 struct dnode_of_data dn; 1146 struct page *npage; 1147 int err; 1148 1149 if (!nid) 1150 return 0; 1151 1152 npage = f2fs_get_node_page(sbi, nid); 1153 if (IS_ERR(npage)) 1154 return PTR_ERR(npage); 1155 1156 set_new_dnode(&dn, inode, NULL, npage, nid); 1157 err = truncate_node(&dn); 1158 if (err) { 1159 f2fs_put_page(npage, 1); 1160 return err; 1161 } 1162 1163 f2fs_i_xnid_write(inode, 0); 1164 1165 return 0; 1166 } 1167 1168 /* 1169 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 1170 * f2fs_unlock_op(). 1171 */ 1172 int f2fs_remove_inode_page(struct inode *inode) 1173 { 1174 struct dnode_of_data dn; 1175 int err; 1176 1177 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1178 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE); 1179 if (err) 1180 return err; 1181 1182 err = f2fs_truncate_xattr_node(inode); 1183 if (err) { 1184 f2fs_put_dnode(&dn); 1185 return err; 1186 } 1187 1188 /* remove potential inline_data blocks */ 1189 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1190 S_ISLNK(inode->i_mode)) 1191 f2fs_truncate_data_blocks_range(&dn, 1); 1192 1193 /* 0 is possible, after f2fs_new_inode() has failed */ 1194 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 1195 f2fs_put_dnode(&dn); 1196 return -EIO; 1197 } 1198 1199 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { 1200 f2fs_warn(F2FS_I_SB(inode), 1201 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu", 1202 inode->i_ino, (unsigned long long)inode->i_blocks); 1203 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 1204 } 1205 1206 /* will put inode & node pages */ 1207 err = truncate_node(&dn); 1208 if (err) { 1209 f2fs_put_dnode(&dn); 1210 return err; 1211 } 1212 return 0; 1213 } 1214 1215 struct page *f2fs_new_inode_page(struct inode *inode) 1216 { 1217 struct dnode_of_data dn; 1218 1219 /* allocate inode page for new inode */ 1220 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1221 1222 /* caller should f2fs_put_page(page, 1); */ 1223 return f2fs_new_node_page(&dn, 0); 1224 } 1225 1226 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) 1227 { 1228 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1229 struct node_info new_ni; 1230 struct page *page; 1231 int err; 1232 1233 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 1234 return ERR_PTR(-EPERM); 1235 1236 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); 1237 if (!page) 1238 return ERR_PTR(-ENOMEM); 1239 1240 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) 1241 goto fail; 1242 1243 #ifdef CONFIG_F2FS_CHECK_FS 1244 err = f2fs_get_node_info(sbi, dn->nid, &new_ni); 1245 if (err) { 1246 dec_valid_node_count(sbi, dn->inode, !ofs); 1247 goto fail; 1248 } 1249 f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); 1250 #endif 1251 new_ni.nid = dn->nid; 1252 new_ni.ino = dn->inode->i_ino; 1253 new_ni.blk_addr = NULL_ADDR; 1254 new_ni.flag = 0; 1255 new_ni.version = 0; 1256 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1257 1258 f2fs_wait_on_page_writeback(page, NODE, true, true); 1259 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 1260 set_cold_node(page, S_ISDIR(dn->inode->i_mode)); 1261 if (!PageUptodate(page)) 1262 SetPageUptodate(page); 1263 if (set_page_dirty(page)) 1264 dn->node_changed = true; 1265 1266 if (f2fs_has_xattr_block(ofs)) 1267 f2fs_i_xnid_write(dn->inode, dn->nid); 1268 1269 if (ofs == 0) 1270 inc_valid_inode_count(sbi); 1271 return page; 1272 1273 fail: 1274 clear_node_page_dirty(page); 1275 f2fs_put_page(page, 1); 1276 return ERR_PTR(err); 1277 } 1278 1279 /* 1280 * Caller should do after getting the following values. 1281 * 0: f2fs_put_page(page, 0) 1282 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1283 */ 1284 static int read_node_page(struct page *page, int op_flags) 1285 { 1286 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1287 struct node_info ni; 1288 struct f2fs_io_info fio = { 1289 .sbi = sbi, 1290 .type = NODE, 1291 .op = REQ_OP_READ, 1292 .op_flags = op_flags, 1293 .page = page, 1294 .encrypted_page = NULL, 1295 }; 1296 int err; 1297 1298 if (PageUptodate(page)) { 1299 if (!f2fs_inode_chksum_verify(sbi, page)) { 1300 ClearPageUptodate(page); 1301 return -EFSBADCRC; 1302 } 1303 return LOCKED_PAGE; 1304 } 1305 1306 err = f2fs_get_node_info(sbi, page->index, &ni); 1307 if (err) 1308 return err; 1309 1310 if (unlikely(ni.blk_addr == NULL_ADDR) || 1311 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) { 1312 ClearPageUptodate(page); 1313 return -ENOENT; 1314 } 1315 1316 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; 1317 1318 err = f2fs_submit_page_bio(&fio); 1319 1320 if (!err) 1321 f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE); 1322 1323 return err; 1324 } 1325 1326 /* 1327 * Readahead a node page 1328 */ 1329 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1330 { 1331 struct page *apage; 1332 int err; 1333 1334 if (!nid) 1335 return; 1336 if (f2fs_check_nid_range(sbi, nid)) 1337 return; 1338 1339 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); 1340 if (apage) 1341 return; 1342 1343 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1344 if (!apage) 1345 return; 1346 1347 err = read_node_page(apage, REQ_RAHEAD); 1348 f2fs_put_page(apage, err ? 1 : 0); 1349 } 1350 1351 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1352 struct page *parent, int start) 1353 { 1354 struct page *page; 1355 int err; 1356 1357 if (!nid) 1358 return ERR_PTR(-ENOENT); 1359 if (f2fs_check_nid_range(sbi, nid)) 1360 return ERR_PTR(-EINVAL); 1361 repeat: 1362 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1363 if (!page) 1364 return ERR_PTR(-ENOMEM); 1365 1366 err = read_node_page(page, 0); 1367 if (err < 0) { 1368 f2fs_put_page(page, 1); 1369 return ERR_PTR(err); 1370 } else if (err == LOCKED_PAGE) { 1371 err = 0; 1372 goto page_hit; 1373 } 1374 1375 if (parent) 1376 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE); 1377 1378 lock_page(page); 1379 1380 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1381 f2fs_put_page(page, 1); 1382 goto repeat; 1383 } 1384 1385 if (unlikely(!PageUptodate(page))) { 1386 err = -EIO; 1387 goto out_err; 1388 } 1389 1390 if (!f2fs_inode_chksum_verify(sbi, page)) { 1391 err = -EFSBADCRC; 1392 goto out_err; 1393 } 1394 page_hit: 1395 if (unlikely(nid != nid_of_node(page))) { 1396 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1397 nid, nid_of_node(page), ino_of_node(page), 1398 ofs_of_node(page), cpver_of_node(page), 1399 next_blkaddr_of_node(page)); 1400 err = -EINVAL; 1401 out_err: 1402 ClearPageUptodate(page); 1403 f2fs_put_page(page, 1); 1404 return ERR_PTR(err); 1405 } 1406 return page; 1407 } 1408 1409 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1410 { 1411 return __get_node_page(sbi, nid, NULL, 0); 1412 } 1413 1414 struct page *f2fs_get_node_page_ra(struct page *parent, int start) 1415 { 1416 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1417 nid_t nid = get_nid(parent, start, false); 1418 1419 return __get_node_page(sbi, nid, parent, start); 1420 } 1421 1422 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) 1423 { 1424 struct inode *inode; 1425 struct page *page; 1426 int ret; 1427 1428 /* should flush inline_data before evict_inode */ 1429 inode = ilookup(sbi->sb, ino); 1430 if (!inode) 1431 return; 1432 1433 page = f2fs_pagecache_get_page(inode->i_mapping, 0, 1434 FGP_LOCK|FGP_NOWAIT, 0); 1435 if (!page) 1436 goto iput_out; 1437 1438 if (!PageUptodate(page)) 1439 goto page_out; 1440 1441 if (!PageDirty(page)) 1442 goto page_out; 1443 1444 if (!clear_page_dirty_for_io(page)) 1445 goto page_out; 1446 1447 ret = f2fs_write_inline_data(inode, page); 1448 inode_dec_dirty_pages(inode); 1449 f2fs_remove_dirty_inode(inode); 1450 if (ret) 1451 set_page_dirty(page); 1452 page_out: 1453 f2fs_put_page(page, 1); 1454 iput_out: 1455 iput(inode); 1456 } 1457 1458 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) 1459 { 1460 pgoff_t index; 1461 struct pagevec pvec; 1462 struct page *last_page = NULL; 1463 int nr_pages; 1464 1465 pagevec_init(&pvec); 1466 index = 0; 1467 1468 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1469 PAGECACHE_TAG_DIRTY))) { 1470 int i; 1471 1472 for (i = 0; i < nr_pages; i++) { 1473 struct page *page = pvec.pages[i]; 1474 1475 if (unlikely(f2fs_cp_error(sbi))) { 1476 f2fs_put_page(last_page, 0); 1477 pagevec_release(&pvec); 1478 return ERR_PTR(-EIO); 1479 } 1480 1481 if (!IS_DNODE(page) || !is_cold_node(page)) 1482 continue; 1483 if (ino_of_node(page) != ino) 1484 continue; 1485 1486 lock_page(page); 1487 1488 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1489 continue_unlock: 1490 unlock_page(page); 1491 continue; 1492 } 1493 if (ino_of_node(page) != ino) 1494 goto continue_unlock; 1495 1496 if (!PageDirty(page)) { 1497 /* someone wrote it for us */ 1498 goto continue_unlock; 1499 } 1500 1501 if (last_page) 1502 f2fs_put_page(last_page, 0); 1503 1504 get_page(page); 1505 last_page = page; 1506 unlock_page(page); 1507 } 1508 pagevec_release(&pvec); 1509 cond_resched(); 1510 } 1511 return last_page; 1512 } 1513 1514 static int __write_node_page(struct page *page, bool atomic, bool *submitted, 1515 struct writeback_control *wbc, bool do_balance, 1516 enum iostat_type io_type, unsigned int *seq_id) 1517 { 1518 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1519 nid_t nid; 1520 struct node_info ni; 1521 struct f2fs_io_info fio = { 1522 .sbi = sbi, 1523 .ino = ino_of_node(page), 1524 .type = NODE, 1525 .op = REQ_OP_WRITE, 1526 .op_flags = wbc_to_write_flags(wbc), 1527 .page = page, 1528 .encrypted_page = NULL, 1529 .submitted = false, 1530 .io_type = io_type, 1531 .io_wbc = wbc, 1532 }; 1533 unsigned int seq; 1534 1535 trace_f2fs_writepage(page, NODE); 1536 1537 if (unlikely(f2fs_cp_error(sbi))) { 1538 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) { 1539 ClearPageUptodate(page); 1540 dec_page_count(sbi, F2FS_DIRTY_NODES); 1541 unlock_page(page); 1542 return 0; 1543 } 1544 goto redirty_out; 1545 } 1546 1547 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1548 goto redirty_out; 1549 1550 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1551 wbc->sync_mode == WB_SYNC_NONE && 1552 IS_DNODE(page) && is_cold_node(page)) 1553 goto redirty_out; 1554 1555 /* get old block addr of this node page */ 1556 nid = nid_of_node(page); 1557 f2fs_bug_on(sbi, page->index != nid); 1558 1559 if (f2fs_get_node_info(sbi, nid, &ni)) 1560 goto redirty_out; 1561 1562 if (wbc->for_reclaim) { 1563 if (!down_read_trylock(&sbi->node_write)) 1564 goto redirty_out; 1565 } else { 1566 down_read(&sbi->node_write); 1567 } 1568 1569 /* This page is already truncated */ 1570 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1571 ClearPageUptodate(page); 1572 dec_page_count(sbi, F2FS_DIRTY_NODES); 1573 up_read(&sbi->node_write); 1574 unlock_page(page); 1575 return 0; 1576 } 1577 1578 if (__is_valid_data_blkaddr(ni.blk_addr) && 1579 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, 1580 DATA_GENERIC_ENHANCE)) { 1581 up_read(&sbi->node_write); 1582 goto redirty_out; 1583 } 1584 1585 if (atomic && !test_opt(sbi, NOBARRIER)) 1586 fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 1587 1588 /* should add to global list before clearing PAGECACHE status */ 1589 if (f2fs_in_warm_node_list(sbi, page)) { 1590 seq = f2fs_add_fsync_node_entry(sbi, page); 1591 if (seq_id) 1592 *seq_id = seq; 1593 } 1594 1595 set_page_writeback(page); 1596 ClearPageError(page); 1597 1598 fio.old_blkaddr = ni.blk_addr; 1599 f2fs_do_write_node_page(nid, &fio); 1600 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1601 dec_page_count(sbi, F2FS_DIRTY_NODES); 1602 up_read(&sbi->node_write); 1603 1604 if (wbc->for_reclaim) { 1605 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); 1606 submitted = NULL; 1607 } 1608 1609 unlock_page(page); 1610 1611 if (unlikely(f2fs_cp_error(sbi))) { 1612 f2fs_submit_merged_write(sbi, NODE); 1613 submitted = NULL; 1614 } 1615 if (submitted) 1616 *submitted = fio.submitted; 1617 1618 if (do_balance) 1619 f2fs_balance_fs(sbi, false); 1620 return 0; 1621 1622 redirty_out: 1623 redirty_page_for_writepage(wbc, page); 1624 return AOP_WRITEPAGE_ACTIVATE; 1625 } 1626 1627 int f2fs_move_node_page(struct page *node_page, int gc_type) 1628 { 1629 int err = 0; 1630 1631 if (gc_type == FG_GC) { 1632 struct writeback_control wbc = { 1633 .sync_mode = WB_SYNC_ALL, 1634 .nr_to_write = 1, 1635 .for_reclaim = 0, 1636 }; 1637 1638 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 1639 1640 set_page_dirty(node_page); 1641 1642 if (!clear_page_dirty_for_io(node_page)) { 1643 err = -EAGAIN; 1644 goto out_page; 1645 } 1646 1647 if (__write_node_page(node_page, false, NULL, 1648 &wbc, false, FS_GC_NODE_IO, NULL)) { 1649 err = -EAGAIN; 1650 unlock_page(node_page); 1651 } 1652 goto release_page; 1653 } else { 1654 /* set page dirty and write it */ 1655 if (!PageWriteback(node_page)) 1656 set_page_dirty(node_page); 1657 } 1658 out_page: 1659 unlock_page(node_page); 1660 release_page: 1661 f2fs_put_page(node_page, 0); 1662 return err; 1663 } 1664 1665 static int f2fs_write_node_page(struct page *page, 1666 struct writeback_control *wbc) 1667 { 1668 return __write_node_page(page, false, NULL, wbc, false, 1669 FS_NODE_IO, NULL); 1670 } 1671 1672 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 1673 struct writeback_control *wbc, bool atomic, 1674 unsigned int *seq_id) 1675 { 1676 pgoff_t index; 1677 struct pagevec pvec; 1678 int ret = 0; 1679 struct page *last_page = NULL; 1680 bool marked = false; 1681 nid_t ino = inode->i_ino; 1682 int nr_pages; 1683 int nwritten = 0; 1684 1685 if (atomic) { 1686 last_page = last_fsync_dnode(sbi, ino); 1687 if (IS_ERR_OR_NULL(last_page)) 1688 return PTR_ERR_OR_ZERO(last_page); 1689 } 1690 retry: 1691 pagevec_init(&pvec); 1692 index = 0; 1693 1694 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1695 PAGECACHE_TAG_DIRTY))) { 1696 int i; 1697 1698 for (i = 0; i < nr_pages; i++) { 1699 struct page *page = pvec.pages[i]; 1700 bool submitted = false; 1701 1702 if (unlikely(f2fs_cp_error(sbi))) { 1703 f2fs_put_page(last_page, 0); 1704 pagevec_release(&pvec); 1705 ret = -EIO; 1706 goto out; 1707 } 1708 1709 if (!IS_DNODE(page) || !is_cold_node(page)) 1710 continue; 1711 if (ino_of_node(page) != ino) 1712 continue; 1713 1714 lock_page(page); 1715 1716 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1717 continue_unlock: 1718 unlock_page(page); 1719 continue; 1720 } 1721 if (ino_of_node(page) != ino) 1722 goto continue_unlock; 1723 1724 if (!PageDirty(page) && page != last_page) { 1725 /* someone wrote it for us */ 1726 goto continue_unlock; 1727 } 1728 1729 f2fs_wait_on_page_writeback(page, NODE, true, true); 1730 1731 set_fsync_mark(page, 0); 1732 set_dentry_mark(page, 0); 1733 1734 if (!atomic || page == last_page) { 1735 set_fsync_mark(page, 1); 1736 if (IS_INODE(page)) { 1737 if (is_inode_flag_set(inode, 1738 FI_DIRTY_INODE)) 1739 f2fs_update_inode(inode, page); 1740 set_dentry_mark(page, 1741 f2fs_need_dentry_mark(sbi, ino)); 1742 } 1743 /* may be written by other thread */ 1744 if (!PageDirty(page)) 1745 set_page_dirty(page); 1746 } 1747 1748 if (!clear_page_dirty_for_io(page)) 1749 goto continue_unlock; 1750 1751 ret = __write_node_page(page, atomic && 1752 page == last_page, 1753 &submitted, wbc, true, 1754 FS_NODE_IO, seq_id); 1755 if (ret) { 1756 unlock_page(page); 1757 f2fs_put_page(last_page, 0); 1758 break; 1759 } else if (submitted) { 1760 nwritten++; 1761 } 1762 1763 if (page == last_page) { 1764 f2fs_put_page(page, 0); 1765 marked = true; 1766 break; 1767 } 1768 } 1769 pagevec_release(&pvec); 1770 cond_resched(); 1771 1772 if (ret || marked) 1773 break; 1774 } 1775 if (!ret && atomic && !marked) { 1776 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", 1777 ino, last_page->index); 1778 lock_page(last_page); 1779 f2fs_wait_on_page_writeback(last_page, NODE, true, true); 1780 set_page_dirty(last_page); 1781 unlock_page(last_page); 1782 goto retry; 1783 } 1784 out: 1785 if (nwritten) 1786 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); 1787 return ret ? -EIO : 0; 1788 } 1789 1790 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) 1791 { 1792 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1793 bool clean; 1794 1795 if (inode->i_ino != ino) 1796 return 0; 1797 1798 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 1799 return 0; 1800 1801 spin_lock(&sbi->inode_lock[DIRTY_META]); 1802 clean = list_empty(&F2FS_I(inode)->gdirty_list); 1803 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1804 1805 if (clean) 1806 return 0; 1807 1808 inode = igrab(inode); 1809 if (!inode) 1810 return 0; 1811 return 1; 1812 } 1813 1814 static bool flush_dirty_inode(struct page *page) 1815 { 1816 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1817 struct inode *inode; 1818 nid_t ino = ino_of_node(page); 1819 1820 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); 1821 if (!inode) 1822 return false; 1823 1824 f2fs_update_inode(inode, page); 1825 unlock_page(page); 1826 1827 iput(inode); 1828 return true; 1829 } 1830 1831 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) 1832 { 1833 pgoff_t index = 0; 1834 struct pagevec pvec; 1835 int nr_pages; 1836 1837 pagevec_init(&pvec); 1838 1839 while ((nr_pages = pagevec_lookup_tag(&pvec, 1840 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { 1841 int i; 1842 1843 for (i = 0; i < nr_pages; i++) { 1844 struct page *page = pvec.pages[i]; 1845 1846 if (!IS_DNODE(page)) 1847 continue; 1848 1849 lock_page(page); 1850 1851 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1852 continue_unlock: 1853 unlock_page(page); 1854 continue; 1855 } 1856 1857 if (!PageDirty(page)) { 1858 /* someone wrote it for us */ 1859 goto continue_unlock; 1860 } 1861 1862 /* flush inline_data, if it's async context. */ 1863 if (is_inline_node(page)) { 1864 clear_inline_node(page); 1865 unlock_page(page); 1866 flush_inline_data(sbi, ino_of_node(page)); 1867 continue; 1868 } 1869 unlock_page(page); 1870 } 1871 pagevec_release(&pvec); 1872 cond_resched(); 1873 } 1874 } 1875 1876 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 1877 struct writeback_control *wbc, 1878 bool do_balance, enum iostat_type io_type) 1879 { 1880 pgoff_t index; 1881 struct pagevec pvec; 1882 int step = 0; 1883 int nwritten = 0; 1884 int ret = 0; 1885 int nr_pages, done = 0; 1886 1887 pagevec_init(&pvec); 1888 1889 next_step: 1890 index = 0; 1891 1892 while (!done && (nr_pages = pagevec_lookup_tag(&pvec, 1893 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { 1894 int i; 1895 1896 for (i = 0; i < nr_pages; i++) { 1897 struct page *page = pvec.pages[i]; 1898 bool submitted = false; 1899 bool may_dirty = true; 1900 1901 /* give a priority to WB_SYNC threads */ 1902 if (atomic_read(&sbi->wb_sync_req[NODE]) && 1903 wbc->sync_mode == WB_SYNC_NONE) { 1904 done = 1; 1905 break; 1906 } 1907 1908 /* 1909 * flushing sequence with step: 1910 * 0. indirect nodes 1911 * 1. dentry dnodes 1912 * 2. file dnodes 1913 */ 1914 if (step == 0 && IS_DNODE(page)) 1915 continue; 1916 if (step == 1 && (!IS_DNODE(page) || 1917 is_cold_node(page))) 1918 continue; 1919 if (step == 2 && (!IS_DNODE(page) || 1920 !is_cold_node(page))) 1921 continue; 1922 lock_node: 1923 if (wbc->sync_mode == WB_SYNC_ALL) 1924 lock_page(page); 1925 else if (!trylock_page(page)) 1926 continue; 1927 1928 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1929 continue_unlock: 1930 unlock_page(page); 1931 continue; 1932 } 1933 1934 if (!PageDirty(page)) { 1935 /* someone wrote it for us */ 1936 goto continue_unlock; 1937 } 1938 1939 /* flush inline_data/inode, if it's async context. */ 1940 if (!do_balance) 1941 goto write_node; 1942 1943 /* flush inline_data */ 1944 if (is_inline_node(page)) { 1945 clear_inline_node(page); 1946 unlock_page(page); 1947 flush_inline_data(sbi, ino_of_node(page)); 1948 goto lock_node; 1949 } 1950 1951 /* flush dirty inode */ 1952 if (IS_INODE(page) && may_dirty) { 1953 may_dirty = false; 1954 if (flush_dirty_inode(page)) 1955 goto lock_node; 1956 } 1957 write_node: 1958 f2fs_wait_on_page_writeback(page, NODE, true, true); 1959 1960 if (!clear_page_dirty_for_io(page)) 1961 goto continue_unlock; 1962 1963 set_fsync_mark(page, 0); 1964 set_dentry_mark(page, 0); 1965 1966 ret = __write_node_page(page, false, &submitted, 1967 wbc, do_balance, io_type, NULL); 1968 if (ret) 1969 unlock_page(page); 1970 else if (submitted) 1971 nwritten++; 1972 1973 if (--wbc->nr_to_write == 0) 1974 break; 1975 } 1976 pagevec_release(&pvec); 1977 cond_resched(); 1978 1979 if (wbc->nr_to_write == 0) { 1980 step = 2; 1981 break; 1982 } 1983 } 1984 1985 if (step < 2) { 1986 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1987 wbc->sync_mode == WB_SYNC_NONE && step == 1) 1988 goto out; 1989 step++; 1990 goto next_step; 1991 } 1992 out: 1993 if (nwritten) 1994 f2fs_submit_merged_write(sbi, NODE); 1995 1996 if (unlikely(f2fs_cp_error(sbi))) 1997 return -EIO; 1998 return ret; 1999 } 2000 2001 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 2002 unsigned int seq_id) 2003 { 2004 struct fsync_node_entry *fn; 2005 struct page *page; 2006 struct list_head *head = &sbi->fsync_node_list; 2007 unsigned long flags; 2008 unsigned int cur_seq_id = 0; 2009 int ret2, ret = 0; 2010 2011 while (seq_id && cur_seq_id < seq_id) { 2012 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 2013 if (list_empty(head)) { 2014 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2015 break; 2016 } 2017 fn = list_first_entry(head, struct fsync_node_entry, list); 2018 if (fn->seq_id > seq_id) { 2019 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2020 break; 2021 } 2022 cur_seq_id = fn->seq_id; 2023 page = fn->page; 2024 get_page(page); 2025 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2026 2027 f2fs_wait_on_page_writeback(page, NODE, true, false); 2028 if (TestClearPageError(page)) 2029 ret = -EIO; 2030 2031 put_page(page); 2032 2033 if (ret) 2034 break; 2035 } 2036 2037 ret2 = filemap_check_errors(NODE_MAPPING(sbi)); 2038 if (!ret) 2039 ret = ret2; 2040 2041 return ret; 2042 } 2043 2044 static int f2fs_write_node_pages(struct address_space *mapping, 2045 struct writeback_control *wbc) 2046 { 2047 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 2048 struct blk_plug plug; 2049 long diff; 2050 2051 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 2052 goto skip_write; 2053 2054 /* balancing f2fs's metadata in background */ 2055 f2fs_balance_fs_bg(sbi, true); 2056 2057 /* collect a number of dirty node pages and write together */ 2058 if (wbc->sync_mode != WB_SYNC_ALL && 2059 get_pages(sbi, F2FS_DIRTY_NODES) < 2060 nr_pages_to_skip(sbi, NODE)) 2061 goto skip_write; 2062 2063 if (wbc->sync_mode == WB_SYNC_ALL) 2064 atomic_inc(&sbi->wb_sync_req[NODE]); 2065 else if (atomic_read(&sbi->wb_sync_req[NODE])) 2066 goto skip_write; 2067 2068 trace_f2fs_writepages(mapping->host, wbc, NODE); 2069 2070 diff = nr_pages_to_write(sbi, NODE, wbc); 2071 blk_start_plug(&plug); 2072 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO); 2073 blk_finish_plug(&plug); 2074 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 2075 2076 if (wbc->sync_mode == WB_SYNC_ALL) 2077 atomic_dec(&sbi->wb_sync_req[NODE]); 2078 return 0; 2079 2080 skip_write: 2081 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 2082 trace_f2fs_writepages(mapping->host, wbc, NODE); 2083 return 0; 2084 } 2085 2086 static int f2fs_set_node_page_dirty(struct page *page) 2087 { 2088 trace_f2fs_set_page_dirty(page, NODE); 2089 2090 if (!PageUptodate(page)) 2091 SetPageUptodate(page); 2092 #ifdef CONFIG_F2FS_CHECK_FS 2093 if (IS_INODE(page)) 2094 f2fs_inode_chksum_set(F2FS_P_SB(page), page); 2095 #endif 2096 if (!PageDirty(page)) { 2097 __set_page_dirty_nobuffers(page); 2098 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 2099 f2fs_set_page_private(page, 0); 2100 return 1; 2101 } 2102 return 0; 2103 } 2104 2105 /* 2106 * Structure of the f2fs node operations 2107 */ 2108 const struct address_space_operations f2fs_node_aops = { 2109 .writepage = f2fs_write_node_page, 2110 .writepages = f2fs_write_node_pages, 2111 .set_page_dirty = f2fs_set_node_page_dirty, 2112 .invalidatepage = f2fs_invalidate_page, 2113 .releasepage = f2fs_release_page, 2114 #ifdef CONFIG_MIGRATION 2115 .migratepage = f2fs_migrate_page, 2116 #endif 2117 }; 2118 2119 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 2120 nid_t n) 2121 { 2122 return radix_tree_lookup(&nm_i->free_nid_root, n); 2123 } 2124 2125 static int __insert_free_nid(struct f2fs_sb_info *sbi, 2126 struct free_nid *i) 2127 { 2128 struct f2fs_nm_info *nm_i = NM_I(sbi); 2129 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); 2130 2131 if (err) 2132 return err; 2133 2134 nm_i->nid_cnt[FREE_NID]++; 2135 list_add_tail(&i->list, &nm_i->free_nid_list); 2136 return 0; 2137 } 2138 2139 static void __remove_free_nid(struct f2fs_sb_info *sbi, 2140 struct free_nid *i, enum nid_state state) 2141 { 2142 struct f2fs_nm_info *nm_i = NM_I(sbi); 2143 2144 f2fs_bug_on(sbi, state != i->state); 2145 nm_i->nid_cnt[state]--; 2146 if (state == FREE_NID) 2147 list_del(&i->list); 2148 radix_tree_delete(&nm_i->free_nid_root, i->nid); 2149 } 2150 2151 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, 2152 enum nid_state org_state, enum nid_state dst_state) 2153 { 2154 struct f2fs_nm_info *nm_i = NM_I(sbi); 2155 2156 f2fs_bug_on(sbi, org_state != i->state); 2157 i->state = dst_state; 2158 nm_i->nid_cnt[org_state]--; 2159 nm_i->nid_cnt[dst_state]++; 2160 2161 switch (dst_state) { 2162 case PREALLOC_NID: 2163 list_del(&i->list); 2164 break; 2165 case FREE_NID: 2166 list_add_tail(&i->list, &nm_i->free_nid_list); 2167 break; 2168 default: 2169 BUG_ON(1); 2170 } 2171 } 2172 2173 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 2174 bool set, bool build) 2175 { 2176 struct f2fs_nm_info *nm_i = NM_I(sbi); 2177 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 2178 unsigned int nid_ofs = nid - START_NID(nid); 2179 2180 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 2181 return; 2182 2183 if (set) { 2184 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2185 return; 2186 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2187 nm_i->free_nid_count[nat_ofs]++; 2188 } else { 2189 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2190 return; 2191 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2192 if (!build) 2193 nm_i->free_nid_count[nat_ofs]--; 2194 } 2195 } 2196 2197 /* return if the nid is recognized as free */ 2198 static bool add_free_nid(struct f2fs_sb_info *sbi, 2199 nid_t nid, bool build, bool update) 2200 { 2201 struct f2fs_nm_info *nm_i = NM_I(sbi); 2202 struct free_nid *i, *e; 2203 struct nat_entry *ne; 2204 int err = -EINVAL; 2205 bool ret = false; 2206 2207 /* 0 nid should not be used */ 2208 if (unlikely(nid == 0)) 2209 return false; 2210 2211 if (unlikely(f2fs_check_nid_range(sbi, nid))) 2212 return false; 2213 2214 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 2215 i->nid = nid; 2216 i->state = FREE_NID; 2217 2218 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); 2219 2220 spin_lock(&nm_i->nid_list_lock); 2221 2222 if (build) { 2223 /* 2224 * Thread A Thread B 2225 * - f2fs_create 2226 * - f2fs_new_inode 2227 * - f2fs_alloc_nid 2228 * - __insert_nid_to_list(PREALLOC_NID) 2229 * - f2fs_balance_fs_bg 2230 * - f2fs_build_free_nids 2231 * - __f2fs_build_free_nids 2232 * - scan_nat_page 2233 * - add_free_nid 2234 * - __lookup_nat_cache 2235 * - f2fs_add_link 2236 * - f2fs_init_inode_metadata 2237 * - f2fs_new_inode_page 2238 * - f2fs_new_node_page 2239 * - set_node_addr 2240 * - f2fs_alloc_nid_done 2241 * - __remove_nid_from_list(PREALLOC_NID) 2242 * - __insert_nid_to_list(FREE_NID) 2243 */ 2244 ne = __lookup_nat_cache(nm_i, nid); 2245 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 2246 nat_get_blkaddr(ne) != NULL_ADDR)) 2247 goto err_out; 2248 2249 e = __lookup_free_nid_list(nm_i, nid); 2250 if (e) { 2251 if (e->state == FREE_NID) 2252 ret = true; 2253 goto err_out; 2254 } 2255 } 2256 ret = true; 2257 err = __insert_free_nid(sbi, i); 2258 err_out: 2259 if (update) { 2260 update_free_nid_bitmap(sbi, nid, ret, build); 2261 if (!build) 2262 nm_i->available_nids++; 2263 } 2264 spin_unlock(&nm_i->nid_list_lock); 2265 radix_tree_preload_end(); 2266 2267 if (err) 2268 kmem_cache_free(free_nid_slab, i); 2269 return ret; 2270 } 2271 2272 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) 2273 { 2274 struct f2fs_nm_info *nm_i = NM_I(sbi); 2275 struct free_nid *i; 2276 bool need_free = false; 2277 2278 spin_lock(&nm_i->nid_list_lock); 2279 i = __lookup_free_nid_list(nm_i, nid); 2280 if (i && i->state == FREE_NID) { 2281 __remove_free_nid(sbi, i, FREE_NID); 2282 need_free = true; 2283 } 2284 spin_unlock(&nm_i->nid_list_lock); 2285 2286 if (need_free) 2287 kmem_cache_free(free_nid_slab, i); 2288 } 2289 2290 static int scan_nat_page(struct f2fs_sb_info *sbi, 2291 struct page *nat_page, nid_t start_nid) 2292 { 2293 struct f2fs_nm_info *nm_i = NM_I(sbi); 2294 struct f2fs_nat_block *nat_blk = page_address(nat_page); 2295 block_t blk_addr; 2296 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 2297 int i; 2298 2299 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 2300 2301 i = start_nid % NAT_ENTRY_PER_BLOCK; 2302 2303 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 2304 if (unlikely(start_nid >= nm_i->max_nid)) 2305 break; 2306 2307 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 2308 2309 if (blk_addr == NEW_ADDR) 2310 return -EINVAL; 2311 2312 if (blk_addr == NULL_ADDR) { 2313 add_free_nid(sbi, start_nid, true, true); 2314 } else { 2315 spin_lock(&NM_I(sbi)->nid_list_lock); 2316 update_free_nid_bitmap(sbi, start_nid, false, true); 2317 spin_unlock(&NM_I(sbi)->nid_list_lock); 2318 } 2319 } 2320 2321 return 0; 2322 } 2323 2324 static void scan_curseg_cache(struct f2fs_sb_info *sbi) 2325 { 2326 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2327 struct f2fs_journal *journal = curseg->journal; 2328 int i; 2329 2330 down_read(&curseg->journal_rwsem); 2331 for (i = 0; i < nats_in_cursum(journal); i++) { 2332 block_t addr; 2333 nid_t nid; 2334 2335 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); 2336 nid = le32_to_cpu(nid_in_journal(journal, i)); 2337 if (addr == NULL_ADDR) 2338 add_free_nid(sbi, nid, true, false); 2339 else 2340 remove_free_nid(sbi, nid); 2341 } 2342 up_read(&curseg->journal_rwsem); 2343 } 2344 2345 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) 2346 { 2347 struct f2fs_nm_info *nm_i = NM_I(sbi); 2348 unsigned int i, idx; 2349 nid_t nid; 2350 2351 down_read(&nm_i->nat_tree_lock); 2352 2353 for (i = 0; i < nm_i->nat_blocks; i++) { 2354 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 2355 continue; 2356 if (!nm_i->free_nid_count[i]) 2357 continue; 2358 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 2359 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], 2360 NAT_ENTRY_PER_BLOCK, idx); 2361 if (idx >= NAT_ENTRY_PER_BLOCK) 2362 break; 2363 2364 nid = i * NAT_ENTRY_PER_BLOCK + idx; 2365 add_free_nid(sbi, nid, true, false); 2366 2367 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) 2368 goto out; 2369 } 2370 } 2371 out: 2372 scan_curseg_cache(sbi); 2373 2374 up_read(&nm_i->nat_tree_lock); 2375 } 2376 2377 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, 2378 bool sync, bool mount) 2379 { 2380 struct f2fs_nm_info *nm_i = NM_I(sbi); 2381 int i = 0, ret; 2382 nid_t nid = nm_i->next_scan_nid; 2383 2384 if (unlikely(nid >= nm_i->max_nid)) 2385 nid = 0; 2386 2387 if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) 2388 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; 2389 2390 /* Enough entries */ 2391 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2392 return 0; 2393 2394 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS)) 2395 return 0; 2396 2397 if (!mount) { 2398 /* try to find free nids in free_nid_bitmap */ 2399 scan_free_nid_bits(sbi); 2400 2401 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2402 return 0; 2403 } 2404 2405 /* readahead nat pages to be scanned */ 2406 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 2407 META_NAT, true); 2408 2409 down_read(&nm_i->nat_tree_lock); 2410 2411 while (1) { 2412 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), 2413 nm_i->nat_block_bitmap)) { 2414 struct page *page = get_current_nat_page(sbi, nid); 2415 2416 if (IS_ERR(page)) { 2417 ret = PTR_ERR(page); 2418 } else { 2419 ret = scan_nat_page(sbi, page, nid); 2420 f2fs_put_page(page, 1); 2421 } 2422 2423 if (ret) { 2424 up_read(&nm_i->nat_tree_lock); 2425 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); 2426 return ret; 2427 } 2428 } 2429 2430 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 2431 if (unlikely(nid >= nm_i->max_nid)) 2432 nid = 0; 2433 2434 if (++i >= FREE_NID_PAGES) 2435 break; 2436 } 2437 2438 /* go to the next free nat pages to find free nids abundantly */ 2439 nm_i->next_scan_nid = nid; 2440 2441 /* find free nids from current sum_pages */ 2442 scan_curseg_cache(sbi); 2443 2444 up_read(&nm_i->nat_tree_lock); 2445 2446 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 2447 nm_i->ra_nid_pages, META_NAT, false); 2448 2449 return 0; 2450 } 2451 2452 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 2453 { 2454 int ret; 2455 2456 mutex_lock(&NM_I(sbi)->build_lock); 2457 ret = __f2fs_build_free_nids(sbi, sync, mount); 2458 mutex_unlock(&NM_I(sbi)->build_lock); 2459 2460 return ret; 2461 } 2462 2463 /* 2464 * If this function returns success, caller can obtain a new nid 2465 * from second parameter of this function. 2466 * The returned nid could be used ino as well as nid when inode is created. 2467 */ 2468 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 2469 { 2470 struct f2fs_nm_info *nm_i = NM_I(sbi); 2471 struct free_nid *i = NULL; 2472 retry: 2473 if (time_to_inject(sbi, FAULT_ALLOC_NID)) { 2474 f2fs_show_injection_info(sbi, FAULT_ALLOC_NID); 2475 return false; 2476 } 2477 2478 spin_lock(&nm_i->nid_list_lock); 2479 2480 if (unlikely(nm_i->available_nids == 0)) { 2481 spin_unlock(&nm_i->nid_list_lock); 2482 return false; 2483 } 2484 2485 /* We should not use stale free nids created by f2fs_build_free_nids */ 2486 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { 2487 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 2488 i = list_first_entry(&nm_i->free_nid_list, 2489 struct free_nid, list); 2490 *nid = i->nid; 2491 2492 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); 2493 nm_i->available_nids--; 2494 2495 update_free_nid_bitmap(sbi, *nid, false, false); 2496 2497 spin_unlock(&nm_i->nid_list_lock); 2498 return true; 2499 } 2500 spin_unlock(&nm_i->nid_list_lock); 2501 2502 /* Let's scan nat pages and its caches to get free nids */ 2503 if (!f2fs_build_free_nids(sbi, true, false)) 2504 goto retry; 2505 return false; 2506 } 2507 2508 /* 2509 * f2fs_alloc_nid() should be called prior to this function. 2510 */ 2511 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 2512 { 2513 struct f2fs_nm_info *nm_i = NM_I(sbi); 2514 struct free_nid *i; 2515 2516 spin_lock(&nm_i->nid_list_lock); 2517 i = __lookup_free_nid_list(nm_i, nid); 2518 f2fs_bug_on(sbi, !i); 2519 __remove_free_nid(sbi, i, PREALLOC_NID); 2520 spin_unlock(&nm_i->nid_list_lock); 2521 2522 kmem_cache_free(free_nid_slab, i); 2523 } 2524 2525 /* 2526 * f2fs_alloc_nid() should be called prior to this function. 2527 */ 2528 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 2529 { 2530 struct f2fs_nm_info *nm_i = NM_I(sbi); 2531 struct free_nid *i; 2532 bool need_free = false; 2533 2534 if (!nid) 2535 return; 2536 2537 spin_lock(&nm_i->nid_list_lock); 2538 i = __lookup_free_nid_list(nm_i, nid); 2539 f2fs_bug_on(sbi, !i); 2540 2541 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) { 2542 __remove_free_nid(sbi, i, PREALLOC_NID); 2543 need_free = true; 2544 } else { 2545 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); 2546 } 2547 2548 nm_i->available_nids++; 2549 2550 update_free_nid_bitmap(sbi, nid, true, false); 2551 2552 spin_unlock(&nm_i->nid_list_lock); 2553 2554 if (need_free) 2555 kmem_cache_free(free_nid_slab, i); 2556 } 2557 2558 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 2559 { 2560 struct f2fs_nm_info *nm_i = NM_I(sbi); 2561 int nr = nr_shrink; 2562 2563 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2564 return 0; 2565 2566 if (!mutex_trylock(&nm_i->build_lock)) 2567 return 0; 2568 2569 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { 2570 struct free_nid *i, *next; 2571 unsigned int batch = SHRINK_NID_BATCH_SIZE; 2572 2573 spin_lock(&nm_i->nid_list_lock); 2574 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 2575 if (!nr_shrink || !batch || 2576 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2577 break; 2578 __remove_free_nid(sbi, i, FREE_NID); 2579 kmem_cache_free(free_nid_slab, i); 2580 nr_shrink--; 2581 batch--; 2582 } 2583 spin_unlock(&nm_i->nid_list_lock); 2584 } 2585 2586 mutex_unlock(&nm_i->build_lock); 2587 2588 return nr - nr_shrink; 2589 } 2590 2591 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) 2592 { 2593 void *src_addr, *dst_addr; 2594 size_t inline_size; 2595 struct page *ipage; 2596 struct f2fs_inode *ri; 2597 2598 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); 2599 if (IS_ERR(ipage)) 2600 return PTR_ERR(ipage); 2601 2602 ri = F2FS_INODE(page); 2603 if (ri->i_inline & F2FS_INLINE_XATTR) { 2604 if (!f2fs_has_inline_xattr(inode)) { 2605 set_inode_flag(inode, FI_INLINE_XATTR); 2606 stat_inc_inline_xattr(inode); 2607 } 2608 } else { 2609 if (f2fs_has_inline_xattr(inode)) { 2610 stat_dec_inline_xattr(inode); 2611 clear_inode_flag(inode, FI_INLINE_XATTR); 2612 } 2613 goto update_inode; 2614 } 2615 2616 dst_addr = inline_xattr_addr(inode, ipage); 2617 src_addr = inline_xattr_addr(inode, page); 2618 inline_size = inline_xattr_size(inode); 2619 2620 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 2621 memcpy(dst_addr, src_addr, inline_size); 2622 update_inode: 2623 f2fs_update_inode(inode, ipage); 2624 f2fs_put_page(ipage, 1); 2625 return 0; 2626 } 2627 2628 int f2fs_recover_xattr_data(struct inode *inode, struct page *page) 2629 { 2630 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2631 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 2632 nid_t new_xnid; 2633 struct dnode_of_data dn; 2634 struct node_info ni; 2635 struct page *xpage; 2636 int err; 2637 2638 if (!prev_xnid) 2639 goto recover_xnid; 2640 2641 /* 1: invalidate the previous xattr nid */ 2642 err = f2fs_get_node_info(sbi, prev_xnid, &ni); 2643 if (err) 2644 return err; 2645 2646 f2fs_invalidate_blocks(sbi, ni.blk_addr); 2647 dec_valid_node_count(sbi, inode, false); 2648 set_node_addr(sbi, &ni, NULL_ADDR, false); 2649 2650 recover_xnid: 2651 /* 2: update xattr nid in inode */ 2652 if (!f2fs_alloc_nid(sbi, &new_xnid)) 2653 return -ENOSPC; 2654 2655 set_new_dnode(&dn, inode, NULL, NULL, new_xnid); 2656 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); 2657 if (IS_ERR(xpage)) { 2658 f2fs_alloc_nid_failed(sbi, new_xnid); 2659 return PTR_ERR(xpage); 2660 } 2661 2662 f2fs_alloc_nid_done(sbi, new_xnid); 2663 f2fs_update_inode_page(inode); 2664 2665 /* 3: update and set xattr node page dirty */ 2666 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE); 2667 2668 set_page_dirty(xpage); 2669 f2fs_put_page(xpage, 1); 2670 2671 return 0; 2672 } 2673 2674 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 2675 { 2676 struct f2fs_inode *src, *dst; 2677 nid_t ino = ino_of_node(page); 2678 struct node_info old_ni, new_ni; 2679 struct page *ipage; 2680 int err; 2681 2682 err = f2fs_get_node_info(sbi, ino, &old_ni); 2683 if (err) 2684 return err; 2685 2686 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 2687 return -EINVAL; 2688 retry: 2689 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); 2690 if (!ipage) { 2691 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); 2692 goto retry; 2693 } 2694 2695 /* Should not use this inode from free nid list */ 2696 remove_free_nid(sbi, ino); 2697 2698 if (!PageUptodate(ipage)) 2699 SetPageUptodate(ipage); 2700 fill_node_footer(ipage, ino, ino, 0, true); 2701 set_cold_node(ipage, false); 2702 2703 src = F2FS_INODE(page); 2704 dst = F2FS_INODE(ipage); 2705 2706 memcpy(dst, src, offsetof(struct f2fs_inode, i_ext)); 2707 dst->i_size = 0; 2708 dst->i_blocks = cpu_to_le64(1); 2709 dst->i_links = cpu_to_le32(1); 2710 dst->i_xattr_nid = 0; 2711 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); 2712 if (dst->i_inline & F2FS_EXTRA_ATTR) { 2713 dst->i_extra_isize = src->i_extra_isize; 2714 2715 if (f2fs_sb_has_flexible_inline_xattr(sbi) && 2716 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2717 i_inline_xattr_size)) 2718 dst->i_inline_xattr_size = src->i_inline_xattr_size; 2719 2720 if (f2fs_sb_has_project_quota(sbi) && 2721 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2722 i_projid)) 2723 dst->i_projid = src->i_projid; 2724 2725 if (f2fs_sb_has_inode_crtime(sbi) && 2726 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2727 i_crtime_nsec)) { 2728 dst->i_crtime = src->i_crtime; 2729 dst->i_crtime_nsec = src->i_crtime_nsec; 2730 } 2731 } 2732 2733 new_ni = old_ni; 2734 new_ni.ino = ino; 2735 2736 if (unlikely(inc_valid_node_count(sbi, NULL, true))) 2737 WARN_ON(1); 2738 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 2739 inc_valid_inode_count(sbi); 2740 set_page_dirty(ipage); 2741 f2fs_put_page(ipage, 1); 2742 return 0; 2743 } 2744 2745 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 2746 unsigned int segno, struct f2fs_summary_block *sum) 2747 { 2748 struct f2fs_node *rn; 2749 struct f2fs_summary *sum_entry; 2750 block_t addr; 2751 int i, idx, last_offset, nrpages; 2752 2753 /* scan the node segment */ 2754 last_offset = sbi->blocks_per_seg; 2755 addr = START_BLOCK(sbi, segno); 2756 sum_entry = &sum->entries[0]; 2757 2758 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2759 nrpages = bio_max_segs(last_offset - i); 2760 2761 /* readahead node pages */ 2762 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); 2763 2764 for (idx = addr; idx < addr + nrpages; idx++) { 2765 struct page *page = f2fs_get_tmp_page(sbi, idx); 2766 2767 if (IS_ERR(page)) 2768 return PTR_ERR(page); 2769 2770 rn = F2FS_NODE(page); 2771 sum_entry->nid = rn->footer.nid; 2772 sum_entry->version = 0; 2773 sum_entry->ofs_in_node = 0; 2774 sum_entry++; 2775 f2fs_put_page(page, 1); 2776 } 2777 2778 invalidate_mapping_pages(META_MAPPING(sbi), addr, 2779 addr + nrpages); 2780 } 2781 return 0; 2782 } 2783 2784 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 2785 { 2786 struct f2fs_nm_info *nm_i = NM_I(sbi); 2787 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2788 struct f2fs_journal *journal = curseg->journal; 2789 int i; 2790 2791 down_write(&curseg->journal_rwsem); 2792 for (i = 0; i < nats_in_cursum(journal); i++) { 2793 struct nat_entry *ne; 2794 struct f2fs_nat_entry raw_ne; 2795 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); 2796 2797 if (f2fs_check_nid_range(sbi, nid)) 2798 continue; 2799 2800 raw_ne = nat_in_journal(journal, i); 2801 2802 ne = __lookup_nat_cache(nm_i, nid); 2803 if (!ne) { 2804 ne = __alloc_nat_entry(nid, true); 2805 __init_nat_entry(nm_i, ne, &raw_ne, true); 2806 } 2807 2808 /* 2809 * if a free nat in journal has not been used after last 2810 * checkpoint, we should remove it from available nids, 2811 * since later we will add it again. 2812 */ 2813 if (!get_nat_flag(ne, IS_DIRTY) && 2814 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { 2815 spin_lock(&nm_i->nid_list_lock); 2816 nm_i->available_nids--; 2817 spin_unlock(&nm_i->nid_list_lock); 2818 } 2819 2820 __set_nat_cache_dirty(nm_i, ne); 2821 } 2822 update_nats_in_cursum(journal, -i); 2823 up_write(&curseg->journal_rwsem); 2824 } 2825 2826 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 2827 struct list_head *head, int max) 2828 { 2829 struct nat_entry_set *cur; 2830 2831 if (nes->entry_cnt >= max) 2832 goto add_out; 2833 2834 list_for_each_entry(cur, head, set_list) { 2835 if (cur->entry_cnt >= nes->entry_cnt) { 2836 list_add(&nes->set_list, cur->set_list.prev); 2837 return; 2838 } 2839 } 2840 add_out: 2841 list_add_tail(&nes->set_list, head); 2842 } 2843 2844 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2845 struct page *page) 2846 { 2847 struct f2fs_nm_info *nm_i = NM_I(sbi); 2848 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; 2849 struct f2fs_nat_block *nat_blk = page_address(page); 2850 int valid = 0; 2851 int i = 0; 2852 2853 if (!enabled_nat_bits(sbi, NULL)) 2854 return; 2855 2856 if (nat_index == 0) { 2857 valid = 1; 2858 i = 1; 2859 } 2860 for (; i < NAT_ENTRY_PER_BLOCK; i++) { 2861 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) 2862 valid++; 2863 } 2864 if (valid == 0) { 2865 __set_bit_le(nat_index, nm_i->empty_nat_bits); 2866 __clear_bit_le(nat_index, nm_i->full_nat_bits); 2867 return; 2868 } 2869 2870 __clear_bit_le(nat_index, nm_i->empty_nat_bits); 2871 if (valid == NAT_ENTRY_PER_BLOCK) 2872 __set_bit_le(nat_index, nm_i->full_nat_bits); 2873 else 2874 __clear_bit_le(nat_index, nm_i->full_nat_bits); 2875 } 2876 2877 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, 2878 struct nat_entry_set *set, struct cp_control *cpc) 2879 { 2880 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2881 struct f2fs_journal *journal = curseg->journal; 2882 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 2883 bool to_journal = true; 2884 struct f2fs_nat_block *nat_blk; 2885 struct nat_entry *ne, *cur; 2886 struct page *page = NULL; 2887 2888 /* 2889 * there are two steps to flush nat entries: 2890 * #1, flush nat entries to journal in current hot data summary block. 2891 * #2, flush nat entries to nat page. 2892 */ 2893 if (enabled_nat_bits(sbi, cpc) || 2894 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) 2895 to_journal = false; 2896 2897 if (to_journal) { 2898 down_write(&curseg->journal_rwsem); 2899 } else { 2900 page = get_next_nat_page(sbi, start_nid); 2901 if (IS_ERR(page)) 2902 return PTR_ERR(page); 2903 2904 nat_blk = page_address(page); 2905 f2fs_bug_on(sbi, !nat_blk); 2906 } 2907 2908 /* flush dirty nats in nat entry set */ 2909 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 2910 struct f2fs_nat_entry *raw_ne; 2911 nid_t nid = nat_get_nid(ne); 2912 int offset; 2913 2914 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); 2915 2916 if (to_journal) { 2917 offset = f2fs_lookup_journal_in_cursum(journal, 2918 NAT_JOURNAL, nid, 1); 2919 f2fs_bug_on(sbi, offset < 0); 2920 raw_ne = &nat_in_journal(journal, offset); 2921 nid_in_journal(journal, offset) = cpu_to_le32(nid); 2922 } else { 2923 raw_ne = &nat_blk->entries[nid - start_nid]; 2924 } 2925 raw_nat_from_node_info(raw_ne, &ne->ni); 2926 nat_reset_flag(ne); 2927 __clear_nat_cache_dirty(NM_I(sbi), set, ne); 2928 if (nat_get_blkaddr(ne) == NULL_ADDR) { 2929 add_free_nid(sbi, nid, false, true); 2930 } else { 2931 spin_lock(&NM_I(sbi)->nid_list_lock); 2932 update_free_nid_bitmap(sbi, nid, false, false); 2933 spin_unlock(&NM_I(sbi)->nid_list_lock); 2934 } 2935 } 2936 2937 if (to_journal) { 2938 up_write(&curseg->journal_rwsem); 2939 } else { 2940 __update_nat_bits(sbi, start_nid, page); 2941 f2fs_put_page(page, 1); 2942 } 2943 2944 /* Allow dirty nats by node block allocation in write_begin */ 2945 if (!set->entry_cnt) { 2946 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 2947 kmem_cache_free(nat_entry_set_slab, set); 2948 } 2949 return 0; 2950 } 2951 2952 /* 2953 * This function is called during the checkpointing process. 2954 */ 2955 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 2956 { 2957 struct f2fs_nm_info *nm_i = NM_I(sbi); 2958 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2959 struct f2fs_journal *journal = curseg->journal; 2960 struct nat_entry_set *setvec[SETVEC_SIZE]; 2961 struct nat_entry_set *set, *tmp; 2962 unsigned int found; 2963 nid_t set_idx = 0; 2964 LIST_HEAD(sets); 2965 int err = 0; 2966 2967 /* 2968 * during unmount, let's flush nat_bits before checking 2969 * nat_cnt[DIRTY_NAT]. 2970 */ 2971 if (enabled_nat_bits(sbi, cpc)) { 2972 down_write(&nm_i->nat_tree_lock); 2973 remove_nats_in_journal(sbi); 2974 up_write(&nm_i->nat_tree_lock); 2975 } 2976 2977 if (!nm_i->nat_cnt[DIRTY_NAT]) 2978 return 0; 2979 2980 down_write(&nm_i->nat_tree_lock); 2981 2982 /* 2983 * if there are no enough space in journal to store dirty nat 2984 * entries, remove all entries from journal and merge them 2985 * into nat entry set. 2986 */ 2987 if (enabled_nat_bits(sbi, cpc) || 2988 !__has_cursum_space(journal, 2989 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) 2990 remove_nats_in_journal(sbi); 2991 2992 while ((found = __gang_lookup_nat_set(nm_i, 2993 set_idx, SETVEC_SIZE, setvec))) { 2994 unsigned idx; 2995 2996 set_idx = setvec[found - 1]->set + 1; 2997 for (idx = 0; idx < found; idx++) 2998 __adjust_nat_entry_set(setvec[idx], &sets, 2999 MAX_NAT_JENTRIES(journal)); 3000 } 3001 3002 /* flush dirty nats in nat entry set */ 3003 list_for_each_entry_safe(set, tmp, &sets, set_list) { 3004 err = __flush_nat_entry_set(sbi, set, cpc); 3005 if (err) 3006 break; 3007 } 3008 3009 up_write(&nm_i->nat_tree_lock); 3010 /* Allow dirty nats by node block allocation in write_begin */ 3011 3012 return err; 3013 } 3014 3015 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) 3016 { 3017 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3018 struct f2fs_nm_info *nm_i = NM_I(sbi); 3019 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; 3020 unsigned int i; 3021 __u64 cp_ver = cur_cp_version(ckpt); 3022 block_t nat_bits_addr; 3023 3024 if (!enabled_nat_bits(sbi, NULL)) 3025 return 0; 3026 3027 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); 3028 nm_i->nat_bits = f2fs_kvzalloc(sbi, 3029 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); 3030 if (!nm_i->nat_bits) 3031 return -ENOMEM; 3032 3033 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - 3034 nm_i->nat_bits_blocks; 3035 for (i = 0; i < nm_i->nat_bits_blocks; i++) { 3036 struct page *page; 3037 3038 page = f2fs_get_meta_page(sbi, nat_bits_addr++); 3039 if (IS_ERR(page)) 3040 return PTR_ERR(page); 3041 3042 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), 3043 page_address(page), F2FS_BLKSIZE); 3044 f2fs_put_page(page, 1); 3045 } 3046 3047 cp_ver |= (cur_cp_crc(ckpt) << 32); 3048 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { 3049 disable_nat_bits(sbi, true); 3050 return 0; 3051 } 3052 3053 nm_i->full_nat_bits = nm_i->nat_bits + 8; 3054 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 3055 3056 f2fs_notice(sbi, "Found nat_bits in checkpoint"); 3057 return 0; 3058 } 3059 3060 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 3061 { 3062 struct f2fs_nm_info *nm_i = NM_I(sbi); 3063 unsigned int i = 0; 3064 nid_t nid, last_nid; 3065 3066 if (!enabled_nat_bits(sbi, NULL)) 3067 return; 3068 3069 for (i = 0; i < nm_i->nat_blocks; i++) { 3070 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 3071 if (i >= nm_i->nat_blocks) 3072 break; 3073 3074 __set_bit_le(i, nm_i->nat_block_bitmap); 3075 3076 nid = i * NAT_ENTRY_PER_BLOCK; 3077 last_nid = nid + NAT_ENTRY_PER_BLOCK; 3078 3079 spin_lock(&NM_I(sbi)->nid_list_lock); 3080 for (; nid < last_nid; nid++) 3081 update_free_nid_bitmap(sbi, nid, true, true); 3082 spin_unlock(&NM_I(sbi)->nid_list_lock); 3083 } 3084 3085 for (i = 0; i < nm_i->nat_blocks; i++) { 3086 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 3087 if (i >= nm_i->nat_blocks) 3088 break; 3089 3090 __set_bit_le(i, nm_i->nat_block_bitmap); 3091 } 3092 } 3093 3094 static int init_node_manager(struct f2fs_sb_info *sbi) 3095 { 3096 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 3097 struct f2fs_nm_info *nm_i = NM_I(sbi); 3098 unsigned char *version_bitmap; 3099 unsigned int nat_segs; 3100 int err; 3101 3102 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 3103 3104 /* segment_count_nat includes pair segment so divide to 2. */ 3105 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 3106 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 3107 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; 3108 3109 /* not used nids: 0, node, meta, (and root counted as valid node) */ 3110 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - 3111 F2FS_RESERVED_NODE_NUM; 3112 nm_i->nid_cnt[FREE_NID] = 0; 3113 nm_i->nid_cnt[PREALLOC_NID] = 0; 3114 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 3115 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 3116 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; 3117 3118 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 3119 INIT_LIST_HEAD(&nm_i->free_nid_list); 3120 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 3121 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 3122 INIT_LIST_HEAD(&nm_i->nat_entries); 3123 spin_lock_init(&nm_i->nat_list_lock); 3124 3125 mutex_init(&nm_i->build_lock); 3126 spin_lock_init(&nm_i->nid_list_lock); 3127 init_rwsem(&nm_i->nat_tree_lock); 3128 3129 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 3130 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 3131 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 3132 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 3133 GFP_KERNEL); 3134 if (!nm_i->nat_bitmap) 3135 return -ENOMEM; 3136 3137 err = __get_nat_bitmaps(sbi); 3138 if (err) 3139 return err; 3140 3141 #ifdef CONFIG_F2FS_CHECK_FS 3142 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, 3143 GFP_KERNEL); 3144 if (!nm_i->nat_bitmap_mir) 3145 return -ENOMEM; 3146 #endif 3147 3148 return 0; 3149 } 3150 3151 static int init_free_nid_cache(struct f2fs_sb_info *sbi) 3152 { 3153 struct f2fs_nm_info *nm_i = NM_I(sbi); 3154 int i; 3155 3156 nm_i->free_nid_bitmap = 3157 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), 3158 nm_i->nat_blocks), 3159 GFP_KERNEL); 3160 if (!nm_i->free_nid_bitmap) 3161 return -ENOMEM; 3162 3163 for (i = 0; i < nm_i->nat_blocks; i++) { 3164 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, 3165 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL); 3166 if (!nm_i->free_nid_bitmap[i]) 3167 return -ENOMEM; 3168 } 3169 3170 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, 3171 GFP_KERNEL); 3172 if (!nm_i->nat_block_bitmap) 3173 return -ENOMEM; 3174 3175 nm_i->free_nid_count = 3176 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), 3177 nm_i->nat_blocks), 3178 GFP_KERNEL); 3179 if (!nm_i->free_nid_count) 3180 return -ENOMEM; 3181 return 0; 3182 } 3183 3184 int f2fs_build_node_manager(struct f2fs_sb_info *sbi) 3185 { 3186 int err; 3187 3188 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), 3189 GFP_KERNEL); 3190 if (!sbi->nm_info) 3191 return -ENOMEM; 3192 3193 err = init_node_manager(sbi); 3194 if (err) 3195 return err; 3196 3197 err = init_free_nid_cache(sbi); 3198 if (err) 3199 return err; 3200 3201 /* load free nid status from nat_bits table */ 3202 load_free_nid_bitmap(sbi); 3203 3204 return f2fs_build_free_nids(sbi, true, true); 3205 } 3206 3207 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) 3208 { 3209 struct f2fs_nm_info *nm_i = NM_I(sbi); 3210 struct free_nid *i, *next_i; 3211 struct nat_entry *natvec[NATVEC_SIZE]; 3212 struct nat_entry_set *setvec[SETVEC_SIZE]; 3213 nid_t nid = 0; 3214 unsigned int found; 3215 3216 if (!nm_i) 3217 return; 3218 3219 /* destroy free nid list */ 3220 spin_lock(&nm_i->nid_list_lock); 3221 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 3222 __remove_free_nid(sbi, i, FREE_NID); 3223 spin_unlock(&nm_i->nid_list_lock); 3224 kmem_cache_free(free_nid_slab, i); 3225 spin_lock(&nm_i->nid_list_lock); 3226 } 3227 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); 3228 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); 3229 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); 3230 spin_unlock(&nm_i->nid_list_lock); 3231 3232 /* destroy nat cache */ 3233 down_write(&nm_i->nat_tree_lock); 3234 while ((found = __gang_lookup_nat_cache(nm_i, 3235 nid, NATVEC_SIZE, natvec))) { 3236 unsigned idx; 3237 3238 nid = nat_get_nid(natvec[found - 1]) + 1; 3239 for (idx = 0; idx < found; idx++) { 3240 spin_lock(&nm_i->nat_list_lock); 3241 list_del(&natvec[idx]->list); 3242 spin_unlock(&nm_i->nat_list_lock); 3243 3244 __del_from_nat_cache(nm_i, natvec[idx]); 3245 } 3246 } 3247 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); 3248 3249 /* destroy nat set cache */ 3250 nid = 0; 3251 while ((found = __gang_lookup_nat_set(nm_i, 3252 nid, SETVEC_SIZE, setvec))) { 3253 unsigned idx; 3254 3255 nid = setvec[found - 1]->set + 1; 3256 for (idx = 0; idx < found; idx++) { 3257 /* entry_cnt is not zero, when cp_error was occurred */ 3258 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 3259 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 3260 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 3261 } 3262 } 3263 up_write(&nm_i->nat_tree_lock); 3264 3265 kvfree(nm_i->nat_block_bitmap); 3266 if (nm_i->free_nid_bitmap) { 3267 int i; 3268 3269 for (i = 0; i < nm_i->nat_blocks; i++) 3270 kvfree(nm_i->free_nid_bitmap[i]); 3271 kvfree(nm_i->free_nid_bitmap); 3272 } 3273 kvfree(nm_i->free_nid_count); 3274 3275 kvfree(nm_i->nat_bitmap); 3276 kvfree(nm_i->nat_bits); 3277 #ifdef CONFIG_F2FS_CHECK_FS 3278 kvfree(nm_i->nat_bitmap_mir); 3279 #endif 3280 sbi->nm_info = NULL; 3281 kfree(nm_i); 3282 } 3283 3284 int __init f2fs_create_node_manager_caches(void) 3285 { 3286 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry", 3287 sizeof(struct nat_entry)); 3288 if (!nat_entry_slab) 3289 goto fail; 3290 3291 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid", 3292 sizeof(struct free_nid)); 3293 if (!free_nid_slab) 3294 goto destroy_nat_entry; 3295 3296 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set", 3297 sizeof(struct nat_entry_set)); 3298 if (!nat_entry_set_slab) 3299 goto destroy_free_nid; 3300 3301 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry", 3302 sizeof(struct fsync_node_entry)); 3303 if (!fsync_node_entry_slab) 3304 goto destroy_nat_entry_set; 3305 return 0; 3306 3307 destroy_nat_entry_set: 3308 kmem_cache_destroy(nat_entry_set_slab); 3309 destroy_free_nid: 3310 kmem_cache_destroy(free_nid_slab); 3311 destroy_nat_entry: 3312 kmem_cache_destroy(nat_entry_slab); 3313 fail: 3314 return -ENOMEM; 3315 } 3316 3317 void f2fs_destroy_node_manager_caches(void) 3318 { 3319 kmem_cache_destroy(fsync_node_entry_slab); 3320 kmem_cache_destroy(nat_entry_set_slab); 3321 kmem_cache_destroy(free_nid_slab); 3322 kmem_cache_destroy(nat_entry_slab); 3323 } 3324