1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/node.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/mpage.h> 11 #include <linux/sched/mm.h> 12 #include <linux/blkdev.h> 13 #include <linux/pagevec.h> 14 #include <linux/swap.h> 15 16 #include "f2fs.h" 17 #include "node.h" 18 #include "segment.h" 19 #include "xattr.h" 20 #include "iostat.h" 21 #include <trace/events/f2fs.h> 22 23 #define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock) 24 25 static struct kmem_cache *nat_entry_slab; 26 static struct kmem_cache *free_nid_slab; 27 static struct kmem_cache *nat_entry_set_slab; 28 static struct kmem_cache *fsync_node_entry_slab; 29 30 /* 31 * Check whether the given nid is within node id range. 32 */ 33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 34 { 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { 36 set_sbi_flag(sbi, SBI_NEED_FSCK); 37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", 38 __func__, nid); 39 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 40 return -EFSCORRUPTED; 41 } 42 return 0; 43 } 44 45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) 46 { 47 struct f2fs_nm_info *nm_i = NM_I(sbi); 48 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 49 struct sysinfo val; 50 unsigned long avail_ram; 51 unsigned long mem_size = 0; 52 bool res = false; 53 54 if (!nm_i) 55 return true; 56 57 si_meminfo(&val); 58 59 /* only uses low memory */ 60 avail_ram = val.totalram - val.totalhigh; 61 62 /* 63 * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively 64 */ 65 if (type == FREE_NIDS) { 66 mem_size = (nm_i->nid_cnt[FREE_NID] * 67 sizeof(struct free_nid)) >> PAGE_SHIFT; 68 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 69 } else if (type == NAT_ENTRIES) { 70 mem_size = (nm_i->nat_cnt[TOTAL_NAT] * 71 sizeof(struct nat_entry)) >> PAGE_SHIFT; 72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 73 if (excess_cached_nats(sbi)) 74 res = false; 75 } else if (type == DIRTY_DENTS) { 76 if (sbi->sb->s_bdi->wb.dirty_exceeded) 77 return false; 78 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 79 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 80 } else if (type == INO_ENTRIES) { 81 int i; 82 83 for (i = 0; i < MAX_INO_ENTRY; i++) 84 mem_size += sbi->im[i].ino_num * 85 sizeof(struct ino_entry); 86 mem_size >>= PAGE_SHIFT; 87 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 88 } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) { 89 enum extent_type etype = type == READ_EXTENT_CACHE ? 90 EX_READ : EX_BLOCK_AGE; 91 struct extent_tree_info *eti = &sbi->extent_tree[etype]; 92 93 mem_size = (atomic_read(&eti->total_ext_tree) * 94 sizeof(struct extent_tree) + 95 atomic_read(&eti->total_ext_node) * 96 sizeof(struct extent_node)) >> PAGE_SHIFT; 97 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 98 } else if (type == DISCARD_CACHE) { 99 mem_size = (atomic_read(&dcc->discard_cmd_cnt) * 100 sizeof(struct discard_cmd)) >> PAGE_SHIFT; 101 res = mem_size < (avail_ram * nm_i->ram_thresh / 100); 102 } else if (type == COMPRESS_PAGE) { 103 #ifdef CONFIG_F2FS_FS_COMPRESSION 104 unsigned long free_ram = val.freeram; 105 106 /* 107 * free memory is lower than watermark or cached page count 108 * exceed threshold, deny caching compress page. 109 */ 110 res = (free_ram > avail_ram * sbi->compress_watermark / 100) && 111 (COMPRESS_MAPPING(sbi)->nrpages < 112 free_ram * sbi->compress_percent / 100); 113 #else 114 res = false; 115 #endif 116 } else { 117 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 118 return true; 119 } 120 return res; 121 } 122 123 static void clear_node_page_dirty(struct page *page) 124 { 125 if (PageDirty(page)) { 126 f2fs_clear_page_cache_dirty_tag(page_folio(page)); 127 clear_page_dirty_for_io(page); 128 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 129 } 130 ClearPageUptodate(page); 131 } 132 133 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 134 { 135 return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); 136 } 137 138 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 139 { 140 struct page *src_page; 141 struct page *dst_page; 142 pgoff_t dst_off; 143 void *src_addr; 144 void *dst_addr; 145 struct f2fs_nm_info *nm_i = NM_I(sbi); 146 147 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); 148 149 /* get current nat block page with lock */ 150 src_page = get_current_nat_page(sbi, nid); 151 if (IS_ERR(src_page)) 152 return src_page; 153 dst_page = f2fs_grab_meta_page(sbi, dst_off); 154 f2fs_bug_on(sbi, PageDirty(src_page)); 155 156 src_addr = page_address(src_page); 157 dst_addr = page_address(dst_page); 158 memcpy(dst_addr, src_addr, PAGE_SIZE); 159 set_page_dirty(dst_page); 160 f2fs_put_page(src_page, 1); 161 162 set_to_next_nat(nm_i, nid); 163 164 return dst_page; 165 } 166 167 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi, 168 nid_t nid, bool no_fail) 169 { 170 struct nat_entry *new; 171 172 new = f2fs_kmem_cache_alloc(nat_entry_slab, 173 GFP_F2FS_ZERO, no_fail, sbi); 174 if (new) { 175 nat_set_nid(new, nid); 176 nat_reset_flag(new); 177 } 178 return new; 179 } 180 181 static void __free_nat_entry(struct nat_entry *e) 182 { 183 kmem_cache_free(nat_entry_slab, e); 184 } 185 186 /* must be locked by nat_tree_lock */ 187 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, 188 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) 189 { 190 if (no_fail) 191 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); 192 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) 193 return NULL; 194 195 if (raw_ne) 196 node_info_from_raw_nat(&ne->ni, raw_ne); 197 198 spin_lock(&nm_i->nat_list_lock); 199 list_add_tail(&ne->list, &nm_i->nat_entries); 200 spin_unlock(&nm_i->nat_list_lock); 201 202 nm_i->nat_cnt[TOTAL_NAT]++; 203 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 204 return ne; 205 } 206 207 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 208 { 209 struct nat_entry *ne; 210 211 ne = radix_tree_lookup(&nm_i->nat_root, n); 212 213 /* for recent accessed nat entry, move it to tail of lru list */ 214 if (ne && !get_nat_flag(ne, IS_DIRTY)) { 215 spin_lock(&nm_i->nat_list_lock); 216 if (!list_empty(&ne->list)) 217 list_move_tail(&ne->list, &nm_i->nat_entries); 218 spin_unlock(&nm_i->nat_list_lock); 219 } 220 221 return ne; 222 } 223 224 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 225 nid_t start, unsigned int nr, struct nat_entry **ep) 226 { 227 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 228 } 229 230 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 231 { 232 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 233 nm_i->nat_cnt[TOTAL_NAT]--; 234 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 235 __free_nat_entry(e); 236 } 237 238 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, 239 struct nat_entry *ne) 240 { 241 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 242 struct nat_entry_set *head; 243 244 head = radix_tree_lookup(&nm_i->nat_set_root, set); 245 if (!head) { 246 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, 247 GFP_NOFS, true, NULL); 248 249 INIT_LIST_HEAD(&head->entry_list); 250 INIT_LIST_HEAD(&head->set_list); 251 head->set = set; 252 head->entry_cnt = 0; 253 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 254 } 255 return head; 256 } 257 258 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 259 struct nat_entry *ne) 260 { 261 struct nat_entry_set *head; 262 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; 263 264 if (!new_ne) 265 head = __grab_nat_entry_set(nm_i, ne); 266 267 /* 268 * update entry_cnt in below condition: 269 * 1. update NEW_ADDR to valid block address; 270 * 2. update old block address to new one; 271 */ 272 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) || 273 !get_nat_flag(ne, IS_DIRTY))) 274 head->entry_cnt++; 275 276 set_nat_flag(ne, IS_PREALLOC, new_ne); 277 278 if (get_nat_flag(ne, IS_DIRTY)) 279 goto refresh_list; 280 281 nm_i->nat_cnt[DIRTY_NAT]++; 282 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 283 set_nat_flag(ne, IS_DIRTY, true); 284 refresh_list: 285 spin_lock(&nm_i->nat_list_lock); 286 if (new_ne) 287 list_del_init(&ne->list); 288 else 289 list_move_tail(&ne->list, &head->entry_list); 290 spin_unlock(&nm_i->nat_list_lock); 291 } 292 293 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 294 struct nat_entry_set *set, struct nat_entry *ne) 295 { 296 spin_lock(&nm_i->nat_list_lock); 297 list_move_tail(&ne->list, &nm_i->nat_entries); 298 spin_unlock(&nm_i->nat_list_lock); 299 300 set_nat_flag(ne, IS_DIRTY, false); 301 set->entry_cnt--; 302 nm_i->nat_cnt[DIRTY_NAT]--; 303 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 304 } 305 306 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 307 nid_t start, unsigned int nr, struct nat_entry_set **ep) 308 { 309 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 310 start, nr); 311 } 312 313 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) 314 { 315 return NODE_MAPPING(sbi) == page->mapping && 316 IS_DNODE(page) && is_cold_node(page); 317 } 318 319 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) 320 { 321 spin_lock_init(&sbi->fsync_node_lock); 322 INIT_LIST_HEAD(&sbi->fsync_node_list); 323 sbi->fsync_seg_id = 0; 324 sbi->fsync_node_num = 0; 325 } 326 327 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, 328 struct page *page) 329 { 330 struct fsync_node_entry *fn; 331 unsigned long flags; 332 unsigned int seq_id; 333 334 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, 335 GFP_NOFS, true, NULL); 336 337 get_page(page); 338 fn->page = page; 339 INIT_LIST_HEAD(&fn->list); 340 341 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 342 list_add_tail(&fn->list, &sbi->fsync_node_list); 343 fn->seq_id = sbi->fsync_seg_id++; 344 seq_id = fn->seq_id; 345 sbi->fsync_node_num++; 346 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 347 348 return seq_id; 349 } 350 351 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) 352 { 353 struct fsync_node_entry *fn; 354 unsigned long flags; 355 356 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 357 list_for_each_entry(fn, &sbi->fsync_node_list, list) { 358 if (fn->page == page) { 359 list_del(&fn->list); 360 sbi->fsync_node_num--; 361 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 362 kmem_cache_free(fsync_node_entry_slab, fn); 363 put_page(page); 364 return; 365 } 366 } 367 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 368 f2fs_bug_on(sbi, 1); 369 } 370 371 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) 372 { 373 unsigned long flags; 374 375 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 376 sbi->fsync_seg_id = 0; 377 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 378 } 379 380 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 381 { 382 struct f2fs_nm_info *nm_i = NM_I(sbi); 383 struct nat_entry *e; 384 bool need = false; 385 386 f2fs_down_read(&nm_i->nat_tree_lock); 387 e = __lookup_nat_cache(nm_i, nid); 388 if (e) { 389 if (!get_nat_flag(e, IS_CHECKPOINTED) && 390 !get_nat_flag(e, HAS_FSYNCED_INODE)) 391 need = true; 392 } 393 f2fs_up_read(&nm_i->nat_tree_lock); 394 return need; 395 } 396 397 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 398 { 399 struct f2fs_nm_info *nm_i = NM_I(sbi); 400 struct nat_entry *e; 401 bool is_cp = true; 402 403 f2fs_down_read(&nm_i->nat_tree_lock); 404 e = __lookup_nat_cache(nm_i, nid); 405 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 406 is_cp = false; 407 f2fs_up_read(&nm_i->nat_tree_lock); 408 return is_cp; 409 } 410 411 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 412 { 413 struct f2fs_nm_info *nm_i = NM_I(sbi); 414 struct nat_entry *e; 415 bool need_update = true; 416 417 f2fs_down_read(&nm_i->nat_tree_lock); 418 e = __lookup_nat_cache(nm_i, ino); 419 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 420 (get_nat_flag(e, IS_CHECKPOINTED) || 421 get_nat_flag(e, HAS_FSYNCED_INODE))) 422 need_update = false; 423 f2fs_up_read(&nm_i->nat_tree_lock); 424 return need_update; 425 } 426 427 /* must be locked by nat_tree_lock */ 428 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 429 struct f2fs_nat_entry *ne) 430 { 431 struct f2fs_nm_info *nm_i = NM_I(sbi); 432 struct nat_entry *new, *e; 433 434 /* Let's mitigate lock contention of nat_tree_lock during checkpoint */ 435 if (f2fs_rwsem_is_locked(&sbi->cp_global_sem)) 436 return; 437 438 new = __alloc_nat_entry(sbi, nid, false); 439 if (!new) 440 return; 441 442 f2fs_down_write(&nm_i->nat_tree_lock); 443 e = __lookup_nat_cache(nm_i, nid); 444 if (!e) 445 e = __init_nat_entry(nm_i, new, ne, false); 446 else 447 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || 448 nat_get_blkaddr(e) != 449 le32_to_cpu(ne->block_addr) || 450 nat_get_version(e) != ne->version); 451 f2fs_up_write(&nm_i->nat_tree_lock); 452 if (e != new) 453 __free_nat_entry(new); 454 } 455 456 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 457 block_t new_blkaddr, bool fsync_done) 458 { 459 struct f2fs_nm_info *nm_i = NM_I(sbi); 460 struct nat_entry *e; 461 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); 462 463 f2fs_down_write(&nm_i->nat_tree_lock); 464 e = __lookup_nat_cache(nm_i, ni->nid); 465 if (!e) { 466 e = __init_nat_entry(nm_i, new, NULL, true); 467 copy_node_info(&e->ni, ni); 468 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 469 } else if (new_blkaddr == NEW_ADDR) { 470 /* 471 * when nid is reallocated, 472 * previous nat entry can be remained in nat cache. 473 * So, reinitialize it with new information. 474 */ 475 copy_node_info(&e->ni, ni); 476 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 477 } 478 /* let's free early to reduce memory consumption */ 479 if (e != new) 480 __free_nat_entry(new); 481 482 /* sanity check */ 483 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 484 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 485 new_blkaddr == NULL_ADDR); 486 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 487 new_blkaddr == NEW_ADDR); 488 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && 489 new_blkaddr == NEW_ADDR); 490 491 /* increment version no as node is removed */ 492 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 493 unsigned char version = nat_get_version(e); 494 495 nat_set_version(e, inc_node_version(version)); 496 } 497 498 /* change address */ 499 nat_set_blkaddr(e, new_blkaddr); 500 if (!__is_valid_data_blkaddr(new_blkaddr)) 501 set_nat_flag(e, IS_CHECKPOINTED, false); 502 __set_nat_cache_dirty(nm_i, e); 503 504 /* update fsync_mark if its inode nat entry is still alive */ 505 if (ni->nid != ni->ino) 506 e = __lookup_nat_cache(nm_i, ni->ino); 507 if (e) { 508 if (fsync_done && ni->nid == ni->ino) 509 set_nat_flag(e, HAS_FSYNCED_INODE, true); 510 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 511 } 512 f2fs_up_write(&nm_i->nat_tree_lock); 513 } 514 515 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 516 { 517 struct f2fs_nm_info *nm_i = NM_I(sbi); 518 int nr = nr_shrink; 519 520 if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock)) 521 return 0; 522 523 spin_lock(&nm_i->nat_list_lock); 524 while (nr_shrink) { 525 struct nat_entry *ne; 526 527 if (list_empty(&nm_i->nat_entries)) 528 break; 529 530 ne = list_first_entry(&nm_i->nat_entries, 531 struct nat_entry, list); 532 list_del(&ne->list); 533 spin_unlock(&nm_i->nat_list_lock); 534 535 __del_from_nat_cache(nm_i, ne); 536 nr_shrink--; 537 538 spin_lock(&nm_i->nat_list_lock); 539 } 540 spin_unlock(&nm_i->nat_list_lock); 541 542 f2fs_up_write(&nm_i->nat_tree_lock); 543 return nr - nr_shrink; 544 } 545 546 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 547 struct node_info *ni, bool checkpoint_context) 548 { 549 struct f2fs_nm_info *nm_i = NM_I(sbi); 550 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 551 struct f2fs_journal *journal = curseg->journal; 552 nid_t start_nid = START_NID(nid); 553 struct f2fs_nat_block *nat_blk; 554 struct page *page = NULL; 555 struct f2fs_nat_entry ne; 556 struct nat_entry *e; 557 pgoff_t index; 558 block_t blkaddr; 559 int i; 560 561 ni->nid = nid; 562 retry: 563 /* Check nat cache */ 564 f2fs_down_read(&nm_i->nat_tree_lock); 565 e = __lookup_nat_cache(nm_i, nid); 566 if (e) { 567 ni->ino = nat_get_ino(e); 568 ni->blk_addr = nat_get_blkaddr(e); 569 ni->version = nat_get_version(e); 570 f2fs_up_read(&nm_i->nat_tree_lock); 571 return 0; 572 } 573 574 /* 575 * Check current segment summary by trying to grab journal_rwsem first. 576 * This sem is on the critical path on the checkpoint requiring the above 577 * nat_tree_lock. Therefore, we should retry, if we failed to grab here 578 * while not bothering checkpoint. 579 */ 580 if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { 581 down_read(&curseg->journal_rwsem); 582 } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) || 583 !down_read_trylock(&curseg->journal_rwsem)) { 584 f2fs_up_read(&nm_i->nat_tree_lock); 585 goto retry; 586 } 587 588 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); 589 if (i >= 0) { 590 ne = nat_in_journal(journal, i); 591 node_info_from_raw_nat(ni, &ne); 592 } 593 up_read(&curseg->journal_rwsem); 594 if (i >= 0) { 595 f2fs_up_read(&nm_i->nat_tree_lock); 596 goto cache; 597 } 598 599 /* Fill node_info from nat page */ 600 index = current_nat_addr(sbi, nid); 601 f2fs_up_read(&nm_i->nat_tree_lock); 602 603 page = f2fs_get_meta_page(sbi, index); 604 if (IS_ERR(page)) 605 return PTR_ERR(page); 606 607 nat_blk = (struct f2fs_nat_block *)page_address(page); 608 ne = nat_blk->entries[nid - start_nid]; 609 node_info_from_raw_nat(ni, &ne); 610 f2fs_put_page(page, 1); 611 cache: 612 blkaddr = le32_to_cpu(ne.block_addr); 613 if (__is_valid_data_blkaddr(blkaddr) && 614 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) 615 return -EFAULT; 616 617 /* cache nat entry */ 618 cache_nat_entry(sbi, nid, &ne); 619 return 0; 620 } 621 622 /* 623 * readahead MAX_RA_NODE number of node pages. 624 */ 625 static void f2fs_ra_node_pages(struct page *parent, int start, int n) 626 { 627 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 628 struct blk_plug plug; 629 int i, end; 630 nid_t nid; 631 632 blk_start_plug(&plug); 633 634 /* Then, try readahead for siblings of the desired node */ 635 end = start + n; 636 end = min(end, (int)NIDS_PER_BLOCK); 637 for (i = start; i < end; i++) { 638 nid = get_nid(parent, i, false); 639 f2fs_ra_node_page(sbi, nid); 640 } 641 642 blk_finish_plug(&plug); 643 } 644 645 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) 646 { 647 const long direct_index = ADDRS_PER_INODE(dn->inode); 648 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); 649 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; 650 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); 651 int cur_level = dn->cur_level; 652 int max_level = dn->max_level; 653 pgoff_t base = 0; 654 655 if (!dn->max_level) 656 return pgofs + 1; 657 658 while (max_level-- > cur_level) 659 skipped_unit *= NIDS_PER_BLOCK; 660 661 switch (dn->max_level) { 662 case 3: 663 base += 2 * indirect_blks; 664 fallthrough; 665 case 2: 666 base += 2 * direct_blks; 667 fallthrough; 668 case 1: 669 base += direct_index; 670 break; 671 default: 672 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); 673 } 674 675 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; 676 } 677 678 /* 679 * The maximum depth is four. 680 * Offset[0] will have raw inode offset. 681 */ 682 static int get_node_path(struct inode *inode, long block, 683 int offset[4], unsigned int noffset[4]) 684 { 685 const long direct_index = ADDRS_PER_INODE(inode); 686 const long direct_blks = ADDRS_PER_BLOCK(inode); 687 const long dptrs_per_blk = NIDS_PER_BLOCK; 688 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK; 689 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 690 int n = 0; 691 int level = 0; 692 693 noffset[0] = 0; 694 695 if (block < direct_index) { 696 offset[n] = block; 697 goto got; 698 } 699 block -= direct_index; 700 if (block < direct_blks) { 701 offset[n++] = NODE_DIR1_BLOCK; 702 noffset[n] = 1; 703 offset[n] = block; 704 level = 1; 705 goto got; 706 } 707 block -= direct_blks; 708 if (block < direct_blks) { 709 offset[n++] = NODE_DIR2_BLOCK; 710 noffset[n] = 2; 711 offset[n] = block; 712 level = 1; 713 goto got; 714 } 715 block -= direct_blks; 716 if (block < indirect_blks) { 717 offset[n++] = NODE_IND1_BLOCK; 718 noffset[n] = 3; 719 offset[n++] = block / direct_blks; 720 noffset[n] = 4 + offset[n - 1]; 721 offset[n] = block % direct_blks; 722 level = 2; 723 goto got; 724 } 725 block -= indirect_blks; 726 if (block < indirect_blks) { 727 offset[n++] = NODE_IND2_BLOCK; 728 noffset[n] = 4 + dptrs_per_blk; 729 offset[n++] = block / direct_blks; 730 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 731 offset[n] = block % direct_blks; 732 level = 2; 733 goto got; 734 } 735 block -= indirect_blks; 736 if (block < dindirect_blks) { 737 offset[n++] = NODE_DIND_BLOCK; 738 noffset[n] = 5 + (dptrs_per_blk * 2); 739 offset[n++] = block / indirect_blks; 740 noffset[n] = 6 + (dptrs_per_blk * 2) + 741 offset[n - 1] * (dptrs_per_blk + 1); 742 offset[n++] = (block / direct_blks) % dptrs_per_blk; 743 noffset[n] = 7 + (dptrs_per_blk * 2) + 744 offset[n - 2] * (dptrs_per_blk + 1) + 745 offset[n - 1]; 746 offset[n] = block % direct_blks; 747 level = 3; 748 goto got; 749 } else { 750 return -E2BIG; 751 } 752 got: 753 return level; 754 } 755 756 /* 757 * Caller should call f2fs_put_dnode(dn). 758 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 759 * f2fs_unlock_op() only if mode is set with ALLOC_NODE. 760 */ 761 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 762 { 763 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 764 struct page *npage[4]; 765 struct page *parent = NULL; 766 int offset[4]; 767 unsigned int noffset[4]; 768 nid_t nids[4]; 769 int level, i = 0; 770 int err = 0; 771 772 level = get_node_path(dn->inode, index, offset, noffset); 773 if (level < 0) 774 return level; 775 776 nids[0] = dn->inode->i_ino; 777 npage[0] = dn->inode_page; 778 779 if (!npage[0]) { 780 npage[0] = f2fs_get_node_page(sbi, nids[0]); 781 if (IS_ERR(npage[0])) 782 return PTR_ERR(npage[0]); 783 } 784 785 /* if inline_data is set, should not report any block indices */ 786 if (f2fs_has_inline_data(dn->inode) && index) { 787 err = -ENOENT; 788 f2fs_put_page(npage[0], 1); 789 goto release_out; 790 } 791 792 parent = npage[0]; 793 if (level != 0) 794 nids[1] = get_nid(parent, offset[0], true); 795 dn->inode_page = npage[0]; 796 dn->inode_page_locked = true; 797 798 /* get indirect or direct nodes */ 799 for (i = 1; i <= level; i++) { 800 bool done = false; 801 802 if (!nids[i] && mode == ALLOC_NODE) { 803 /* alloc new node */ 804 if (!f2fs_alloc_nid(sbi, &(nids[i]))) { 805 err = -ENOSPC; 806 goto release_pages; 807 } 808 809 dn->nid = nids[i]; 810 npage[i] = f2fs_new_node_page(dn, noffset[i]); 811 if (IS_ERR(npage[i])) { 812 f2fs_alloc_nid_failed(sbi, nids[i]); 813 err = PTR_ERR(npage[i]); 814 goto release_pages; 815 } 816 817 set_nid(parent, offset[i - 1], nids[i], i == 1); 818 f2fs_alloc_nid_done(sbi, nids[i]); 819 done = true; 820 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 821 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]); 822 if (IS_ERR(npage[i])) { 823 err = PTR_ERR(npage[i]); 824 goto release_pages; 825 } 826 done = true; 827 } 828 if (i == 1) { 829 dn->inode_page_locked = false; 830 unlock_page(parent); 831 } else { 832 f2fs_put_page(parent, 1); 833 } 834 835 if (!done) { 836 npage[i] = f2fs_get_node_page(sbi, nids[i]); 837 if (IS_ERR(npage[i])) { 838 err = PTR_ERR(npage[i]); 839 f2fs_put_page(npage[0], 0); 840 goto release_out; 841 } 842 } 843 if (i < level) { 844 parent = npage[i]; 845 nids[i + 1] = get_nid(parent, offset[i], false); 846 } 847 } 848 dn->nid = nids[level]; 849 dn->ofs_in_node = offset[level]; 850 dn->node_page = npage[level]; 851 dn->data_blkaddr = f2fs_data_blkaddr(dn); 852 853 if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) && 854 f2fs_sb_has_readonly(sbi)) { 855 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; 856 unsigned int ofs_in_node = dn->ofs_in_node; 857 pgoff_t fofs = index; 858 unsigned int c_len; 859 block_t blkaddr; 860 861 /* should align fofs and ofs_in_node to cluster_size */ 862 if (fofs % cluster_size) { 863 fofs = round_down(fofs, cluster_size); 864 ofs_in_node = round_down(ofs_in_node, cluster_size); 865 } 866 867 c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node); 868 if (!c_len) 869 goto out; 870 871 blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node); 872 if (blkaddr == COMPRESS_ADDR) 873 blkaddr = data_blkaddr(dn->inode, dn->node_page, 874 ofs_in_node + 1); 875 876 f2fs_update_read_extent_tree_range_compressed(dn->inode, 877 fofs, blkaddr, cluster_size, c_len); 878 } 879 out: 880 return 0; 881 882 release_pages: 883 f2fs_put_page(parent, 1); 884 if (i > 1) 885 f2fs_put_page(npage[0], 0); 886 release_out: 887 dn->inode_page = NULL; 888 dn->node_page = NULL; 889 if (err == -ENOENT) { 890 dn->cur_level = i; 891 dn->max_level = level; 892 dn->ofs_in_node = offset[level]; 893 } 894 return err; 895 } 896 897 static int truncate_node(struct dnode_of_data *dn) 898 { 899 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 900 struct node_info ni; 901 int err; 902 pgoff_t index; 903 904 err = f2fs_get_node_info(sbi, dn->nid, &ni, false); 905 if (err) 906 return err; 907 908 if (ni.blk_addr != NEW_ADDR && 909 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) { 910 f2fs_err_ratelimited(sbi, 911 "nat entry is corrupted, run fsck to fix it, ino:%u, " 912 "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr); 913 set_sbi_flag(sbi, SBI_NEED_FSCK); 914 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT); 915 return -EFSCORRUPTED; 916 } 917 918 /* Deallocate node address */ 919 f2fs_invalidate_blocks(sbi, ni.blk_addr); 920 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); 921 set_node_addr(sbi, &ni, NULL_ADDR, false); 922 923 if (dn->nid == dn->inode->i_ino) { 924 f2fs_remove_orphan_inode(sbi, dn->nid); 925 dec_valid_inode_count(sbi); 926 f2fs_inode_synced(dn->inode); 927 } 928 929 clear_node_page_dirty(dn->node_page); 930 set_sbi_flag(sbi, SBI_IS_DIRTY); 931 932 index = page_folio(dn->node_page)->index; 933 f2fs_put_page(dn->node_page, 1); 934 935 invalidate_mapping_pages(NODE_MAPPING(sbi), 936 index, index); 937 938 dn->node_page = NULL; 939 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 940 941 return 0; 942 } 943 944 static int truncate_dnode(struct dnode_of_data *dn) 945 { 946 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 947 struct page *page; 948 int err; 949 950 if (dn->nid == 0) 951 return 1; 952 953 /* get direct node */ 954 page = f2fs_get_node_page(sbi, dn->nid); 955 if (PTR_ERR(page) == -ENOENT) 956 return 1; 957 else if (IS_ERR(page)) 958 return PTR_ERR(page); 959 960 if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) { 961 f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u", 962 dn->inode->i_ino, dn->nid, ino_of_node(page)); 963 set_sbi_flag(sbi, SBI_NEED_FSCK); 964 f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE); 965 f2fs_put_page(page, 1); 966 return -EFSCORRUPTED; 967 } 968 969 /* Make dnode_of_data for parameter */ 970 dn->node_page = page; 971 dn->ofs_in_node = 0; 972 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); 973 err = truncate_node(dn); 974 if (err) { 975 f2fs_put_page(page, 1); 976 return err; 977 } 978 979 return 1; 980 } 981 982 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 983 int ofs, int depth) 984 { 985 struct dnode_of_data rdn = *dn; 986 struct page *page; 987 struct f2fs_node *rn; 988 nid_t child_nid; 989 unsigned int child_nofs; 990 int freed = 0; 991 int i, ret; 992 993 if (dn->nid == 0) 994 return NIDS_PER_BLOCK + 1; 995 996 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 997 998 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 999 if (IS_ERR(page)) { 1000 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 1001 return PTR_ERR(page); 1002 } 1003 1004 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK); 1005 1006 rn = F2FS_NODE(page); 1007 if (depth < 3) { 1008 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 1009 child_nid = le32_to_cpu(rn->in.nid[i]); 1010 if (child_nid == 0) 1011 continue; 1012 rdn.nid = child_nid; 1013 ret = truncate_dnode(&rdn); 1014 if (ret < 0) 1015 goto out_err; 1016 if (set_nid(page, i, 0, false)) 1017 dn->node_changed = true; 1018 } 1019 } else { 1020 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 1021 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 1022 child_nid = le32_to_cpu(rn->in.nid[i]); 1023 if (child_nid == 0) { 1024 child_nofs += NIDS_PER_BLOCK + 1; 1025 continue; 1026 } 1027 rdn.nid = child_nid; 1028 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 1029 if (ret == (NIDS_PER_BLOCK + 1)) { 1030 if (set_nid(page, i, 0, false)) 1031 dn->node_changed = true; 1032 child_nofs += ret; 1033 } else if (ret < 0 && ret != -ENOENT) { 1034 goto out_err; 1035 } 1036 } 1037 freed = child_nofs; 1038 } 1039 1040 if (!ofs) { 1041 /* remove current indirect node */ 1042 dn->node_page = page; 1043 ret = truncate_node(dn); 1044 if (ret) 1045 goto out_err; 1046 freed++; 1047 } else { 1048 f2fs_put_page(page, 1); 1049 } 1050 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 1051 return freed; 1052 1053 out_err: 1054 f2fs_put_page(page, 1); 1055 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 1056 return ret; 1057 } 1058 1059 static int truncate_partial_nodes(struct dnode_of_data *dn, 1060 struct f2fs_inode *ri, int *offset, int depth) 1061 { 1062 struct page *pages[2]; 1063 nid_t nid[3]; 1064 nid_t child_nid; 1065 int err = 0; 1066 int i; 1067 int idx = depth - 2; 1068 1069 nid[0] = get_nid(dn->inode_page, offset[0], true); 1070 if (!nid[0]) 1071 return 0; 1072 1073 /* get indirect nodes in the path */ 1074 for (i = 0; i < idx + 1; i++) { 1075 /* reference count'll be increased */ 1076 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); 1077 if (IS_ERR(pages[i])) { 1078 err = PTR_ERR(pages[i]); 1079 idx = i - 1; 1080 goto fail; 1081 } 1082 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 1083 } 1084 1085 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); 1086 1087 /* free direct nodes linked to a partial indirect node */ 1088 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 1089 child_nid = get_nid(pages[idx], i, false); 1090 if (!child_nid) 1091 continue; 1092 dn->nid = child_nid; 1093 err = truncate_dnode(dn); 1094 if (err < 0) 1095 goto fail; 1096 if (set_nid(pages[idx], i, 0, false)) 1097 dn->node_changed = true; 1098 } 1099 1100 if (offset[idx + 1] == 0) { 1101 dn->node_page = pages[idx]; 1102 dn->nid = nid[idx]; 1103 err = truncate_node(dn); 1104 if (err) 1105 goto fail; 1106 } else { 1107 f2fs_put_page(pages[idx], 1); 1108 } 1109 offset[idx]++; 1110 offset[idx + 1] = 0; 1111 idx--; 1112 fail: 1113 for (i = idx; i >= 0; i--) 1114 f2fs_put_page(pages[i], 1); 1115 1116 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 1117 1118 return err; 1119 } 1120 1121 /* 1122 * All the block addresses of data and nodes should be nullified. 1123 */ 1124 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) 1125 { 1126 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1127 int err = 0, cont = 1; 1128 int level, offset[4], noffset[4]; 1129 unsigned int nofs = 0; 1130 struct f2fs_inode *ri; 1131 struct dnode_of_data dn; 1132 struct page *page; 1133 1134 trace_f2fs_truncate_inode_blocks_enter(inode, from); 1135 1136 level = get_node_path(inode, from, offset, noffset); 1137 if (level < 0) { 1138 trace_f2fs_truncate_inode_blocks_exit(inode, level); 1139 return level; 1140 } 1141 1142 page = f2fs_get_node_page(sbi, inode->i_ino); 1143 if (IS_ERR(page)) { 1144 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 1145 return PTR_ERR(page); 1146 } 1147 1148 set_new_dnode(&dn, inode, page, NULL, 0); 1149 unlock_page(page); 1150 1151 ri = F2FS_INODE(page); 1152 switch (level) { 1153 case 0: 1154 case 1: 1155 nofs = noffset[1]; 1156 break; 1157 case 2: 1158 nofs = noffset[1]; 1159 if (!offset[level - 1]) 1160 goto skip_partial; 1161 err = truncate_partial_nodes(&dn, ri, offset, level); 1162 if (err < 0 && err != -ENOENT) 1163 goto fail; 1164 nofs += 1 + NIDS_PER_BLOCK; 1165 break; 1166 case 3: 1167 nofs = 5 + 2 * NIDS_PER_BLOCK; 1168 if (!offset[level - 1]) 1169 goto skip_partial; 1170 err = truncate_partial_nodes(&dn, ri, offset, level); 1171 if (err < 0 && err != -ENOENT) 1172 goto fail; 1173 break; 1174 default: 1175 BUG(); 1176 } 1177 1178 skip_partial: 1179 while (cont) { 1180 dn.nid = get_nid(page, offset[0], true); 1181 switch (offset[0]) { 1182 case NODE_DIR1_BLOCK: 1183 case NODE_DIR2_BLOCK: 1184 err = truncate_dnode(&dn); 1185 break; 1186 1187 case NODE_IND1_BLOCK: 1188 case NODE_IND2_BLOCK: 1189 err = truncate_nodes(&dn, nofs, offset[1], 2); 1190 break; 1191 1192 case NODE_DIND_BLOCK: 1193 err = truncate_nodes(&dn, nofs, offset[1], 3); 1194 cont = 0; 1195 break; 1196 1197 default: 1198 BUG(); 1199 } 1200 if (err == -ENOENT) { 1201 set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK); 1202 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 1203 f2fs_err_ratelimited(sbi, 1204 "truncate node fail, ino:%lu, nid:%u, " 1205 "offset[0]:%d, offset[1]:%d, nofs:%d", 1206 inode->i_ino, dn.nid, offset[0], 1207 offset[1], nofs); 1208 err = 0; 1209 } 1210 if (err < 0) 1211 goto fail; 1212 if (offset[1] == 0 && get_nid(page, offset[0], true)) { 1213 lock_page(page); 1214 BUG_ON(page->mapping != NODE_MAPPING(sbi)); 1215 set_nid(page, offset[0], 0, true); 1216 unlock_page(page); 1217 } 1218 offset[1] = 0; 1219 offset[0]++; 1220 nofs += err; 1221 } 1222 fail: 1223 f2fs_put_page(page, 0); 1224 trace_f2fs_truncate_inode_blocks_exit(inode, err); 1225 return err > 0 ? 0 : err; 1226 } 1227 1228 /* caller must lock inode page */ 1229 int f2fs_truncate_xattr_node(struct inode *inode) 1230 { 1231 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1232 nid_t nid = F2FS_I(inode)->i_xattr_nid; 1233 struct dnode_of_data dn; 1234 struct page *npage; 1235 int err; 1236 1237 if (!nid) 1238 return 0; 1239 1240 npage = f2fs_get_node_page(sbi, nid); 1241 if (IS_ERR(npage)) 1242 return PTR_ERR(npage); 1243 1244 set_new_dnode(&dn, inode, NULL, npage, nid); 1245 err = truncate_node(&dn); 1246 if (err) { 1247 f2fs_put_page(npage, 1); 1248 return err; 1249 } 1250 1251 f2fs_i_xnid_write(inode, 0); 1252 1253 return 0; 1254 } 1255 1256 /* 1257 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 1258 * f2fs_unlock_op(). 1259 */ 1260 int f2fs_remove_inode_page(struct inode *inode) 1261 { 1262 struct dnode_of_data dn; 1263 int err; 1264 1265 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1266 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE); 1267 if (err) 1268 return err; 1269 1270 err = f2fs_truncate_xattr_node(inode); 1271 if (err) { 1272 f2fs_put_dnode(&dn); 1273 return err; 1274 } 1275 1276 /* remove potential inline_data blocks */ 1277 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1278 S_ISLNK(inode->i_mode)) 1279 f2fs_truncate_data_blocks_range(&dn, 1); 1280 1281 /* 0 is possible, after f2fs_new_inode() has failed */ 1282 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 1283 f2fs_put_dnode(&dn); 1284 return -EIO; 1285 } 1286 1287 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { 1288 f2fs_warn(F2FS_I_SB(inode), 1289 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu", 1290 inode->i_ino, (unsigned long long)inode->i_blocks); 1291 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 1292 } 1293 1294 /* will put inode & node pages */ 1295 err = truncate_node(&dn); 1296 if (err) { 1297 f2fs_put_dnode(&dn); 1298 return err; 1299 } 1300 return 0; 1301 } 1302 1303 struct page *f2fs_new_inode_page(struct inode *inode) 1304 { 1305 struct dnode_of_data dn; 1306 1307 /* allocate inode page for new inode */ 1308 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1309 1310 /* caller should f2fs_put_page(page, 1); */ 1311 return f2fs_new_node_page(&dn, 0); 1312 } 1313 1314 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) 1315 { 1316 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1317 struct node_info new_ni; 1318 struct page *page; 1319 int err; 1320 1321 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 1322 return ERR_PTR(-EPERM); 1323 1324 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); 1325 if (!page) 1326 return ERR_PTR(-ENOMEM); 1327 1328 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) 1329 goto fail; 1330 1331 #ifdef CONFIG_F2FS_CHECK_FS 1332 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false); 1333 if (err) { 1334 dec_valid_node_count(sbi, dn->inode, !ofs); 1335 goto fail; 1336 } 1337 if (unlikely(new_ni.blk_addr != NULL_ADDR)) { 1338 err = -EFSCORRUPTED; 1339 dec_valid_node_count(sbi, dn->inode, !ofs); 1340 set_sbi_flag(sbi, SBI_NEED_FSCK); 1341 f2fs_warn_ratelimited(sbi, 1342 "f2fs_new_node_page: inconsistent nat entry, " 1343 "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u", 1344 new_ni.ino, new_ni.nid, new_ni.blk_addr, 1345 new_ni.version, new_ni.flag); 1346 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT); 1347 goto fail; 1348 } 1349 #endif 1350 new_ni.nid = dn->nid; 1351 new_ni.ino = dn->inode->i_ino; 1352 new_ni.blk_addr = NULL_ADDR; 1353 new_ni.flag = 0; 1354 new_ni.version = 0; 1355 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1356 1357 f2fs_wait_on_page_writeback(page, NODE, true, true); 1358 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 1359 set_cold_node(page, S_ISDIR(dn->inode->i_mode)); 1360 if (!PageUptodate(page)) 1361 SetPageUptodate(page); 1362 if (set_page_dirty(page)) 1363 dn->node_changed = true; 1364 1365 if (f2fs_has_xattr_block(ofs)) 1366 f2fs_i_xnid_write(dn->inode, dn->nid); 1367 1368 if (ofs == 0) 1369 inc_valid_inode_count(sbi); 1370 return page; 1371 fail: 1372 clear_node_page_dirty(page); 1373 f2fs_put_page(page, 1); 1374 return ERR_PTR(err); 1375 } 1376 1377 /* 1378 * Caller should do after getting the following values. 1379 * 0: f2fs_put_page(page, 0) 1380 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1381 */ 1382 static int read_node_page(struct page *page, blk_opf_t op_flags) 1383 { 1384 struct folio *folio = page_folio(page); 1385 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1386 struct node_info ni; 1387 struct f2fs_io_info fio = { 1388 .sbi = sbi, 1389 .type = NODE, 1390 .op = REQ_OP_READ, 1391 .op_flags = op_flags, 1392 .page = page, 1393 .encrypted_page = NULL, 1394 }; 1395 int err; 1396 1397 if (folio_test_uptodate(folio)) { 1398 if (!f2fs_inode_chksum_verify(sbi, page)) { 1399 folio_clear_uptodate(folio); 1400 return -EFSBADCRC; 1401 } 1402 return LOCKED_PAGE; 1403 } 1404 1405 err = f2fs_get_node_info(sbi, folio->index, &ni, false); 1406 if (err) 1407 return err; 1408 1409 /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */ 1410 if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) { 1411 folio_clear_uptodate(folio); 1412 return -ENOENT; 1413 } 1414 1415 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; 1416 1417 err = f2fs_submit_page_bio(&fio); 1418 1419 if (!err) 1420 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE); 1421 1422 return err; 1423 } 1424 1425 /* 1426 * Readahead a node page 1427 */ 1428 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1429 { 1430 struct page *apage; 1431 int err; 1432 1433 if (!nid) 1434 return; 1435 if (f2fs_check_nid_range(sbi, nid)) 1436 return; 1437 1438 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); 1439 if (apage) 1440 return; 1441 1442 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1443 if (!apage) 1444 return; 1445 1446 err = read_node_page(apage, REQ_RAHEAD); 1447 f2fs_put_page(apage, err ? 1 : 0); 1448 } 1449 1450 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1451 struct page *parent, int start) 1452 { 1453 struct page *page; 1454 int err; 1455 1456 if (!nid) 1457 return ERR_PTR(-ENOENT); 1458 if (f2fs_check_nid_range(sbi, nid)) 1459 return ERR_PTR(-EINVAL); 1460 repeat: 1461 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1462 if (!page) 1463 return ERR_PTR(-ENOMEM); 1464 1465 err = read_node_page(page, 0); 1466 if (err < 0) { 1467 goto out_put_err; 1468 } else if (err == LOCKED_PAGE) { 1469 err = 0; 1470 goto page_hit; 1471 } 1472 1473 if (parent) 1474 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE); 1475 1476 lock_page(page); 1477 1478 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1479 f2fs_put_page(page, 1); 1480 goto repeat; 1481 } 1482 1483 if (unlikely(!PageUptodate(page))) { 1484 err = -EIO; 1485 goto out_err; 1486 } 1487 1488 if (!f2fs_inode_chksum_verify(sbi, page)) { 1489 err = -EFSBADCRC; 1490 goto out_err; 1491 } 1492 page_hit: 1493 if (likely(nid == nid_of_node(page))) 1494 return page; 1495 1496 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1497 nid, nid_of_node(page), ino_of_node(page), 1498 ofs_of_node(page), cpver_of_node(page), 1499 next_blkaddr_of_node(page)); 1500 set_sbi_flag(sbi, SBI_NEED_FSCK); 1501 f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER); 1502 err = -EFSCORRUPTED; 1503 out_err: 1504 ClearPageUptodate(page); 1505 out_put_err: 1506 /* ENOENT comes from read_node_page which is not an error. */ 1507 if (err != -ENOENT) 1508 f2fs_handle_page_eio(sbi, page_folio(page), NODE); 1509 f2fs_put_page(page, 1); 1510 return ERR_PTR(err); 1511 } 1512 1513 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1514 { 1515 return __get_node_page(sbi, nid, NULL, 0); 1516 } 1517 1518 struct page *f2fs_get_node_page_ra(struct page *parent, int start) 1519 { 1520 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1521 nid_t nid = get_nid(parent, start, false); 1522 1523 return __get_node_page(sbi, nid, parent, start); 1524 } 1525 1526 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) 1527 { 1528 struct inode *inode; 1529 struct page *page; 1530 int ret; 1531 1532 /* should flush inline_data before evict_inode */ 1533 inode = ilookup(sbi->sb, ino); 1534 if (!inode) 1535 return; 1536 1537 page = f2fs_pagecache_get_page(inode->i_mapping, 0, 1538 FGP_LOCK|FGP_NOWAIT, 0); 1539 if (!page) 1540 goto iput_out; 1541 1542 if (!PageUptodate(page)) 1543 goto page_out; 1544 1545 if (!PageDirty(page)) 1546 goto page_out; 1547 1548 if (!clear_page_dirty_for_io(page)) 1549 goto page_out; 1550 1551 ret = f2fs_write_inline_data(inode, page_folio(page)); 1552 inode_dec_dirty_pages(inode); 1553 f2fs_remove_dirty_inode(inode); 1554 if (ret) 1555 set_page_dirty(page); 1556 page_out: 1557 f2fs_put_page(page, 1); 1558 iput_out: 1559 iput(inode); 1560 } 1561 1562 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) 1563 { 1564 pgoff_t index; 1565 struct folio_batch fbatch; 1566 struct page *last_page = NULL; 1567 int nr_folios; 1568 1569 folio_batch_init(&fbatch); 1570 index = 0; 1571 1572 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, 1573 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1574 &fbatch))) { 1575 int i; 1576 1577 for (i = 0; i < nr_folios; i++) { 1578 struct page *page = &fbatch.folios[i]->page; 1579 1580 if (unlikely(f2fs_cp_error(sbi))) { 1581 f2fs_put_page(last_page, 0); 1582 folio_batch_release(&fbatch); 1583 return ERR_PTR(-EIO); 1584 } 1585 1586 if (!IS_DNODE(page) || !is_cold_node(page)) 1587 continue; 1588 if (ino_of_node(page) != ino) 1589 continue; 1590 1591 lock_page(page); 1592 1593 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1594 continue_unlock: 1595 unlock_page(page); 1596 continue; 1597 } 1598 if (ino_of_node(page) != ino) 1599 goto continue_unlock; 1600 1601 if (!PageDirty(page)) { 1602 /* someone wrote it for us */ 1603 goto continue_unlock; 1604 } 1605 1606 if (last_page) 1607 f2fs_put_page(last_page, 0); 1608 1609 get_page(page); 1610 last_page = page; 1611 unlock_page(page); 1612 } 1613 folio_batch_release(&fbatch); 1614 cond_resched(); 1615 } 1616 return last_page; 1617 } 1618 1619 static int __write_node_page(struct page *page, bool atomic, bool *submitted, 1620 struct writeback_control *wbc, bool do_balance, 1621 enum iostat_type io_type, unsigned int *seq_id) 1622 { 1623 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1624 struct folio *folio = page_folio(page); 1625 nid_t nid; 1626 struct node_info ni; 1627 struct f2fs_io_info fio = { 1628 .sbi = sbi, 1629 .ino = ino_of_node(page), 1630 .type = NODE, 1631 .op = REQ_OP_WRITE, 1632 .op_flags = wbc_to_write_flags(wbc), 1633 .page = page, 1634 .encrypted_page = NULL, 1635 .submitted = 0, 1636 .io_type = io_type, 1637 .io_wbc = wbc, 1638 }; 1639 unsigned int seq; 1640 1641 trace_f2fs_writepage(folio, NODE); 1642 1643 if (unlikely(f2fs_cp_error(sbi))) { 1644 /* keep node pages in remount-ro mode */ 1645 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) 1646 goto redirty_out; 1647 folio_clear_uptodate(folio); 1648 dec_page_count(sbi, F2FS_DIRTY_NODES); 1649 folio_unlock(folio); 1650 return 0; 1651 } 1652 1653 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1654 goto redirty_out; 1655 1656 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1657 wbc->sync_mode == WB_SYNC_NONE && 1658 IS_DNODE(page) && is_cold_node(page)) 1659 goto redirty_out; 1660 1661 /* get old block addr of this node page */ 1662 nid = nid_of_node(page); 1663 f2fs_bug_on(sbi, folio->index != nid); 1664 1665 if (f2fs_get_node_info(sbi, nid, &ni, !do_balance)) 1666 goto redirty_out; 1667 1668 if (wbc->for_reclaim) { 1669 if (!f2fs_down_read_trylock(&sbi->node_write)) 1670 goto redirty_out; 1671 } else { 1672 f2fs_down_read(&sbi->node_write); 1673 } 1674 1675 /* This page is already truncated */ 1676 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1677 folio_clear_uptodate(folio); 1678 dec_page_count(sbi, F2FS_DIRTY_NODES); 1679 f2fs_up_read(&sbi->node_write); 1680 folio_unlock(folio); 1681 return 0; 1682 } 1683 1684 if (__is_valid_data_blkaddr(ni.blk_addr) && 1685 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, 1686 DATA_GENERIC_ENHANCE)) { 1687 f2fs_up_read(&sbi->node_write); 1688 goto redirty_out; 1689 } 1690 1691 if (atomic && !test_opt(sbi, NOBARRIER)) 1692 fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 1693 1694 /* should add to global list before clearing PAGECACHE status */ 1695 if (f2fs_in_warm_node_list(sbi, page)) { 1696 seq = f2fs_add_fsync_node_entry(sbi, page); 1697 if (seq_id) 1698 *seq_id = seq; 1699 } 1700 1701 folio_start_writeback(folio); 1702 1703 fio.old_blkaddr = ni.blk_addr; 1704 f2fs_do_write_node_page(nid, &fio); 1705 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1706 dec_page_count(sbi, F2FS_DIRTY_NODES); 1707 f2fs_up_read(&sbi->node_write); 1708 1709 if (wbc->for_reclaim) { 1710 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); 1711 submitted = NULL; 1712 } 1713 1714 folio_unlock(folio); 1715 1716 if (unlikely(f2fs_cp_error(sbi))) { 1717 f2fs_submit_merged_write(sbi, NODE); 1718 submitted = NULL; 1719 } 1720 if (submitted) 1721 *submitted = fio.submitted; 1722 1723 if (do_balance) 1724 f2fs_balance_fs(sbi, false); 1725 return 0; 1726 1727 redirty_out: 1728 folio_redirty_for_writepage(wbc, folio); 1729 return AOP_WRITEPAGE_ACTIVATE; 1730 } 1731 1732 int f2fs_move_node_page(struct page *node_page, int gc_type) 1733 { 1734 int err = 0; 1735 1736 if (gc_type == FG_GC) { 1737 struct writeback_control wbc = { 1738 .sync_mode = WB_SYNC_ALL, 1739 .nr_to_write = 1, 1740 .for_reclaim = 0, 1741 }; 1742 1743 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 1744 1745 set_page_dirty(node_page); 1746 1747 if (!clear_page_dirty_for_io(node_page)) { 1748 err = -EAGAIN; 1749 goto out_page; 1750 } 1751 1752 if (__write_node_page(node_page, false, NULL, 1753 &wbc, false, FS_GC_NODE_IO, NULL)) { 1754 err = -EAGAIN; 1755 unlock_page(node_page); 1756 } 1757 goto release_page; 1758 } else { 1759 /* set page dirty and write it */ 1760 if (!folio_test_writeback(page_folio(node_page))) 1761 set_page_dirty(node_page); 1762 } 1763 out_page: 1764 unlock_page(node_page); 1765 release_page: 1766 f2fs_put_page(node_page, 0); 1767 return err; 1768 } 1769 1770 static int f2fs_write_node_page(struct page *page, 1771 struct writeback_control *wbc) 1772 { 1773 return __write_node_page(page, false, NULL, wbc, false, 1774 FS_NODE_IO, NULL); 1775 } 1776 1777 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 1778 struct writeback_control *wbc, bool atomic, 1779 unsigned int *seq_id) 1780 { 1781 pgoff_t index; 1782 struct folio_batch fbatch; 1783 int ret = 0; 1784 struct page *last_page = NULL; 1785 bool marked = false; 1786 nid_t ino = inode->i_ino; 1787 int nr_folios; 1788 int nwritten = 0; 1789 1790 if (atomic) { 1791 last_page = last_fsync_dnode(sbi, ino); 1792 if (IS_ERR_OR_NULL(last_page)) 1793 return PTR_ERR_OR_ZERO(last_page); 1794 } 1795 retry: 1796 folio_batch_init(&fbatch); 1797 index = 0; 1798 1799 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, 1800 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1801 &fbatch))) { 1802 int i; 1803 1804 for (i = 0; i < nr_folios; i++) { 1805 struct page *page = &fbatch.folios[i]->page; 1806 bool submitted = false; 1807 1808 if (unlikely(f2fs_cp_error(sbi))) { 1809 f2fs_put_page(last_page, 0); 1810 folio_batch_release(&fbatch); 1811 ret = -EIO; 1812 goto out; 1813 } 1814 1815 if (!IS_DNODE(page) || !is_cold_node(page)) 1816 continue; 1817 if (ino_of_node(page) != ino) 1818 continue; 1819 1820 lock_page(page); 1821 1822 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1823 continue_unlock: 1824 unlock_page(page); 1825 continue; 1826 } 1827 if (ino_of_node(page) != ino) 1828 goto continue_unlock; 1829 1830 if (!PageDirty(page) && page != last_page) { 1831 /* someone wrote it for us */ 1832 goto continue_unlock; 1833 } 1834 1835 f2fs_wait_on_page_writeback(page, NODE, true, true); 1836 1837 set_fsync_mark(page, 0); 1838 set_dentry_mark(page, 0); 1839 1840 if (!atomic || page == last_page) { 1841 set_fsync_mark(page, 1); 1842 percpu_counter_inc(&sbi->rf_node_block_count); 1843 if (IS_INODE(page)) { 1844 if (is_inode_flag_set(inode, 1845 FI_DIRTY_INODE)) 1846 f2fs_update_inode(inode, page); 1847 set_dentry_mark(page, 1848 f2fs_need_dentry_mark(sbi, ino)); 1849 } 1850 /* may be written by other thread */ 1851 if (!PageDirty(page)) 1852 set_page_dirty(page); 1853 } 1854 1855 if (!clear_page_dirty_for_io(page)) 1856 goto continue_unlock; 1857 1858 ret = __write_node_page(page, atomic && 1859 page == last_page, 1860 &submitted, wbc, true, 1861 FS_NODE_IO, seq_id); 1862 if (ret) { 1863 unlock_page(page); 1864 f2fs_put_page(last_page, 0); 1865 break; 1866 } else if (submitted) { 1867 nwritten++; 1868 } 1869 1870 if (page == last_page) { 1871 f2fs_put_page(page, 0); 1872 marked = true; 1873 break; 1874 } 1875 } 1876 folio_batch_release(&fbatch); 1877 cond_resched(); 1878 1879 if (ret || marked) 1880 break; 1881 } 1882 if (!ret && atomic && !marked) { 1883 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", 1884 ino, page_folio(last_page)->index); 1885 lock_page(last_page); 1886 f2fs_wait_on_page_writeback(last_page, NODE, true, true); 1887 set_page_dirty(last_page); 1888 unlock_page(last_page); 1889 goto retry; 1890 } 1891 out: 1892 if (nwritten) 1893 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); 1894 return ret ? -EIO : 0; 1895 } 1896 1897 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) 1898 { 1899 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1900 bool clean; 1901 1902 if (inode->i_ino != ino) 1903 return 0; 1904 1905 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 1906 return 0; 1907 1908 spin_lock(&sbi->inode_lock[DIRTY_META]); 1909 clean = list_empty(&F2FS_I(inode)->gdirty_list); 1910 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1911 1912 if (clean) 1913 return 0; 1914 1915 inode = igrab(inode); 1916 if (!inode) 1917 return 0; 1918 return 1; 1919 } 1920 1921 static bool flush_dirty_inode(struct page *page) 1922 { 1923 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1924 struct inode *inode; 1925 nid_t ino = ino_of_node(page); 1926 1927 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); 1928 if (!inode) 1929 return false; 1930 1931 f2fs_update_inode(inode, page); 1932 unlock_page(page); 1933 1934 iput(inode); 1935 return true; 1936 } 1937 1938 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) 1939 { 1940 pgoff_t index = 0; 1941 struct folio_batch fbatch; 1942 int nr_folios; 1943 1944 folio_batch_init(&fbatch); 1945 1946 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, 1947 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1948 &fbatch))) { 1949 int i; 1950 1951 for (i = 0; i < nr_folios; i++) { 1952 struct page *page = &fbatch.folios[i]->page; 1953 1954 if (!IS_INODE(page)) 1955 continue; 1956 1957 lock_page(page); 1958 1959 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1960 continue_unlock: 1961 unlock_page(page); 1962 continue; 1963 } 1964 1965 if (!PageDirty(page)) { 1966 /* someone wrote it for us */ 1967 goto continue_unlock; 1968 } 1969 1970 /* flush inline_data, if it's async context. */ 1971 if (page_private_inline(page)) { 1972 clear_page_private_inline(page); 1973 unlock_page(page); 1974 flush_inline_data(sbi, ino_of_node(page)); 1975 continue; 1976 } 1977 unlock_page(page); 1978 } 1979 folio_batch_release(&fbatch); 1980 cond_resched(); 1981 } 1982 } 1983 1984 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 1985 struct writeback_control *wbc, 1986 bool do_balance, enum iostat_type io_type) 1987 { 1988 pgoff_t index; 1989 struct folio_batch fbatch; 1990 int step = 0; 1991 int nwritten = 0; 1992 int ret = 0; 1993 int nr_folios, done = 0; 1994 1995 folio_batch_init(&fbatch); 1996 1997 next_step: 1998 index = 0; 1999 2000 while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), 2001 &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 2002 &fbatch))) { 2003 int i; 2004 2005 for (i = 0; i < nr_folios; i++) { 2006 struct page *page = &fbatch.folios[i]->page; 2007 bool submitted = false; 2008 2009 /* give a priority to WB_SYNC threads */ 2010 if (atomic_read(&sbi->wb_sync_req[NODE]) && 2011 wbc->sync_mode == WB_SYNC_NONE) { 2012 done = 1; 2013 break; 2014 } 2015 2016 /* 2017 * flushing sequence with step: 2018 * 0. indirect nodes 2019 * 1. dentry dnodes 2020 * 2. file dnodes 2021 */ 2022 if (step == 0 && IS_DNODE(page)) 2023 continue; 2024 if (step == 1 && (!IS_DNODE(page) || 2025 is_cold_node(page))) 2026 continue; 2027 if (step == 2 && (!IS_DNODE(page) || 2028 !is_cold_node(page))) 2029 continue; 2030 lock_node: 2031 if (wbc->sync_mode == WB_SYNC_ALL) 2032 lock_page(page); 2033 else if (!trylock_page(page)) 2034 continue; 2035 2036 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 2037 continue_unlock: 2038 unlock_page(page); 2039 continue; 2040 } 2041 2042 if (!PageDirty(page)) { 2043 /* someone wrote it for us */ 2044 goto continue_unlock; 2045 } 2046 2047 /* flush inline_data/inode, if it's async context. */ 2048 if (!do_balance) 2049 goto write_node; 2050 2051 /* flush inline_data */ 2052 if (page_private_inline(page)) { 2053 clear_page_private_inline(page); 2054 unlock_page(page); 2055 flush_inline_data(sbi, ino_of_node(page)); 2056 goto lock_node; 2057 } 2058 2059 /* flush dirty inode */ 2060 if (IS_INODE(page) && flush_dirty_inode(page)) 2061 goto lock_node; 2062 write_node: 2063 f2fs_wait_on_page_writeback(page, NODE, true, true); 2064 2065 if (!clear_page_dirty_for_io(page)) 2066 goto continue_unlock; 2067 2068 set_fsync_mark(page, 0); 2069 set_dentry_mark(page, 0); 2070 2071 ret = __write_node_page(page, false, &submitted, 2072 wbc, do_balance, io_type, NULL); 2073 if (ret) 2074 unlock_page(page); 2075 else if (submitted) 2076 nwritten++; 2077 2078 if (--wbc->nr_to_write == 0) 2079 break; 2080 } 2081 folio_batch_release(&fbatch); 2082 cond_resched(); 2083 2084 if (wbc->nr_to_write == 0) { 2085 step = 2; 2086 break; 2087 } 2088 } 2089 2090 if (step < 2) { 2091 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 2092 wbc->sync_mode == WB_SYNC_NONE && step == 1) 2093 goto out; 2094 step++; 2095 goto next_step; 2096 } 2097 out: 2098 if (nwritten) 2099 f2fs_submit_merged_write(sbi, NODE); 2100 2101 if (unlikely(f2fs_cp_error(sbi))) 2102 return -EIO; 2103 return ret; 2104 } 2105 2106 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 2107 unsigned int seq_id) 2108 { 2109 struct fsync_node_entry *fn; 2110 struct page *page; 2111 struct list_head *head = &sbi->fsync_node_list; 2112 unsigned long flags; 2113 unsigned int cur_seq_id = 0; 2114 2115 while (seq_id && cur_seq_id < seq_id) { 2116 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 2117 if (list_empty(head)) { 2118 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2119 break; 2120 } 2121 fn = list_first_entry(head, struct fsync_node_entry, list); 2122 if (fn->seq_id > seq_id) { 2123 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2124 break; 2125 } 2126 cur_seq_id = fn->seq_id; 2127 page = fn->page; 2128 get_page(page); 2129 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2130 2131 f2fs_wait_on_page_writeback(page, NODE, true, false); 2132 2133 put_page(page); 2134 } 2135 2136 return filemap_check_errors(NODE_MAPPING(sbi)); 2137 } 2138 2139 static int f2fs_write_node_pages(struct address_space *mapping, 2140 struct writeback_control *wbc) 2141 { 2142 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 2143 struct blk_plug plug; 2144 long diff; 2145 2146 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 2147 goto skip_write; 2148 2149 /* balancing f2fs's metadata in background */ 2150 f2fs_balance_fs_bg(sbi, true); 2151 2152 /* collect a number of dirty node pages and write together */ 2153 if (wbc->sync_mode != WB_SYNC_ALL && 2154 get_pages(sbi, F2FS_DIRTY_NODES) < 2155 nr_pages_to_skip(sbi, NODE)) 2156 goto skip_write; 2157 2158 if (wbc->sync_mode == WB_SYNC_ALL) 2159 atomic_inc(&sbi->wb_sync_req[NODE]); 2160 else if (atomic_read(&sbi->wb_sync_req[NODE])) { 2161 /* to avoid potential deadlock */ 2162 if (current->plug) 2163 blk_finish_plug(current->plug); 2164 goto skip_write; 2165 } 2166 2167 trace_f2fs_writepages(mapping->host, wbc, NODE); 2168 2169 diff = nr_pages_to_write(sbi, NODE, wbc); 2170 blk_start_plug(&plug); 2171 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO); 2172 blk_finish_plug(&plug); 2173 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 2174 2175 if (wbc->sync_mode == WB_SYNC_ALL) 2176 atomic_dec(&sbi->wb_sync_req[NODE]); 2177 return 0; 2178 2179 skip_write: 2180 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 2181 trace_f2fs_writepages(mapping->host, wbc, NODE); 2182 return 0; 2183 } 2184 2185 static bool f2fs_dirty_node_folio(struct address_space *mapping, 2186 struct folio *folio) 2187 { 2188 trace_f2fs_set_page_dirty(folio, NODE); 2189 2190 if (!folio_test_uptodate(folio)) 2191 folio_mark_uptodate(folio); 2192 #ifdef CONFIG_F2FS_CHECK_FS 2193 if (IS_INODE(&folio->page)) 2194 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page); 2195 #endif 2196 if (filemap_dirty_folio(mapping, folio)) { 2197 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 2198 set_page_private_reference(&folio->page); 2199 return true; 2200 } 2201 return false; 2202 } 2203 2204 /* 2205 * Structure of the f2fs node operations 2206 */ 2207 const struct address_space_operations f2fs_node_aops = { 2208 .writepage = f2fs_write_node_page, 2209 .writepages = f2fs_write_node_pages, 2210 .dirty_folio = f2fs_dirty_node_folio, 2211 .invalidate_folio = f2fs_invalidate_folio, 2212 .release_folio = f2fs_release_folio, 2213 .migrate_folio = filemap_migrate_folio, 2214 }; 2215 2216 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 2217 nid_t n) 2218 { 2219 return radix_tree_lookup(&nm_i->free_nid_root, n); 2220 } 2221 2222 static int __insert_free_nid(struct f2fs_sb_info *sbi, 2223 struct free_nid *i) 2224 { 2225 struct f2fs_nm_info *nm_i = NM_I(sbi); 2226 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); 2227 2228 if (err) 2229 return err; 2230 2231 nm_i->nid_cnt[FREE_NID]++; 2232 list_add_tail(&i->list, &nm_i->free_nid_list); 2233 return 0; 2234 } 2235 2236 static void __remove_free_nid(struct f2fs_sb_info *sbi, 2237 struct free_nid *i, enum nid_state state) 2238 { 2239 struct f2fs_nm_info *nm_i = NM_I(sbi); 2240 2241 f2fs_bug_on(sbi, state != i->state); 2242 nm_i->nid_cnt[state]--; 2243 if (state == FREE_NID) 2244 list_del(&i->list); 2245 radix_tree_delete(&nm_i->free_nid_root, i->nid); 2246 } 2247 2248 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, 2249 enum nid_state org_state, enum nid_state dst_state) 2250 { 2251 struct f2fs_nm_info *nm_i = NM_I(sbi); 2252 2253 f2fs_bug_on(sbi, org_state != i->state); 2254 i->state = dst_state; 2255 nm_i->nid_cnt[org_state]--; 2256 nm_i->nid_cnt[dst_state]++; 2257 2258 switch (dst_state) { 2259 case PREALLOC_NID: 2260 list_del(&i->list); 2261 break; 2262 case FREE_NID: 2263 list_add_tail(&i->list, &nm_i->free_nid_list); 2264 break; 2265 default: 2266 BUG_ON(1); 2267 } 2268 } 2269 2270 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi) 2271 { 2272 struct f2fs_nm_info *nm_i = NM_I(sbi); 2273 unsigned int i; 2274 bool ret = true; 2275 2276 f2fs_down_read(&nm_i->nat_tree_lock); 2277 for (i = 0; i < nm_i->nat_blocks; i++) { 2278 if (!test_bit_le(i, nm_i->nat_block_bitmap)) { 2279 ret = false; 2280 break; 2281 } 2282 } 2283 f2fs_up_read(&nm_i->nat_tree_lock); 2284 2285 return ret; 2286 } 2287 2288 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 2289 bool set, bool build) 2290 { 2291 struct f2fs_nm_info *nm_i = NM_I(sbi); 2292 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 2293 unsigned int nid_ofs = nid - START_NID(nid); 2294 2295 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 2296 return; 2297 2298 if (set) { 2299 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2300 return; 2301 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2302 nm_i->free_nid_count[nat_ofs]++; 2303 } else { 2304 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2305 return; 2306 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2307 if (!build) 2308 nm_i->free_nid_count[nat_ofs]--; 2309 } 2310 } 2311 2312 /* return if the nid is recognized as free */ 2313 static bool add_free_nid(struct f2fs_sb_info *sbi, 2314 nid_t nid, bool build, bool update) 2315 { 2316 struct f2fs_nm_info *nm_i = NM_I(sbi); 2317 struct free_nid *i, *e; 2318 struct nat_entry *ne; 2319 int err = -EINVAL; 2320 bool ret = false; 2321 2322 /* 0 nid should not be used */ 2323 if (unlikely(nid == 0)) 2324 return false; 2325 2326 if (unlikely(f2fs_check_nid_range(sbi, nid))) 2327 return false; 2328 2329 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL); 2330 i->nid = nid; 2331 i->state = FREE_NID; 2332 2333 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); 2334 2335 spin_lock(&nm_i->nid_list_lock); 2336 2337 if (build) { 2338 /* 2339 * Thread A Thread B 2340 * - f2fs_create 2341 * - f2fs_new_inode 2342 * - f2fs_alloc_nid 2343 * - __insert_nid_to_list(PREALLOC_NID) 2344 * - f2fs_balance_fs_bg 2345 * - f2fs_build_free_nids 2346 * - __f2fs_build_free_nids 2347 * - scan_nat_page 2348 * - add_free_nid 2349 * - __lookup_nat_cache 2350 * - f2fs_add_link 2351 * - f2fs_init_inode_metadata 2352 * - f2fs_new_inode_page 2353 * - f2fs_new_node_page 2354 * - set_node_addr 2355 * - f2fs_alloc_nid_done 2356 * - __remove_nid_from_list(PREALLOC_NID) 2357 * - __insert_nid_to_list(FREE_NID) 2358 */ 2359 ne = __lookup_nat_cache(nm_i, nid); 2360 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 2361 nat_get_blkaddr(ne) != NULL_ADDR)) 2362 goto err_out; 2363 2364 e = __lookup_free_nid_list(nm_i, nid); 2365 if (e) { 2366 if (e->state == FREE_NID) 2367 ret = true; 2368 goto err_out; 2369 } 2370 } 2371 ret = true; 2372 err = __insert_free_nid(sbi, i); 2373 err_out: 2374 if (update) { 2375 update_free_nid_bitmap(sbi, nid, ret, build); 2376 if (!build) 2377 nm_i->available_nids++; 2378 } 2379 spin_unlock(&nm_i->nid_list_lock); 2380 radix_tree_preload_end(); 2381 2382 if (err) 2383 kmem_cache_free(free_nid_slab, i); 2384 return ret; 2385 } 2386 2387 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) 2388 { 2389 struct f2fs_nm_info *nm_i = NM_I(sbi); 2390 struct free_nid *i; 2391 bool need_free = false; 2392 2393 spin_lock(&nm_i->nid_list_lock); 2394 i = __lookup_free_nid_list(nm_i, nid); 2395 if (i && i->state == FREE_NID) { 2396 __remove_free_nid(sbi, i, FREE_NID); 2397 need_free = true; 2398 } 2399 spin_unlock(&nm_i->nid_list_lock); 2400 2401 if (need_free) 2402 kmem_cache_free(free_nid_slab, i); 2403 } 2404 2405 static int scan_nat_page(struct f2fs_sb_info *sbi, 2406 struct page *nat_page, nid_t start_nid) 2407 { 2408 struct f2fs_nm_info *nm_i = NM_I(sbi); 2409 struct f2fs_nat_block *nat_blk = page_address(nat_page); 2410 block_t blk_addr; 2411 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 2412 int i; 2413 2414 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 2415 2416 i = start_nid % NAT_ENTRY_PER_BLOCK; 2417 2418 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 2419 if (unlikely(start_nid >= nm_i->max_nid)) 2420 break; 2421 2422 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 2423 2424 if (blk_addr == NEW_ADDR) 2425 return -EFSCORRUPTED; 2426 2427 if (blk_addr == NULL_ADDR) { 2428 add_free_nid(sbi, start_nid, true, true); 2429 } else { 2430 spin_lock(&NM_I(sbi)->nid_list_lock); 2431 update_free_nid_bitmap(sbi, start_nid, false, true); 2432 spin_unlock(&NM_I(sbi)->nid_list_lock); 2433 } 2434 } 2435 2436 return 0; 2437 } 2438 2439 static void scan_curseg_cache(struct f2fs_sb_info *sbi) 2440 { 2441 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2442 struct f2fs_journal *journal = curseg->journal; 2443 int i; 2444 2445 down_read(&curseg->journal_rwsem); 2446 for (i = 0; i < nats_in_cursum(journal); i++) { 2447 block_t addr; 2448 nid_t nid; 2449 2450 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); 2451 nid = le32_to_cpu(nid_in_journal(journal, i)); 2452 if (addr == NULL_ADDR) 2453 add_free_nid(sbi, nid, true, false); 2454 else 2455 remove_free_nid(sbi, nid); 2456 } 2457 up_read(&curseg->journal_rwsem); 2458 } 2459 2460 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) 2461 { 2462 struct f2fs_nm_info *nm_i = NM_I(sbi); 2463 unsigned int i, idx; 2464 nid_t nid; 2465 2466 f2fs_down_read(&nm_i->nat_tree_lock); 2467 2468 for (i = 0; i < nm_i->nat_blocks; i++) { 2469 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 2470 continue; 2471 if (!nm_i->free_nid_count[i]) 2472 continue; 2473 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 2474 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], 2475 NAT_ENTRY_PER_BLOCK, idx); 2476 if (idx >= NAT_ENTRY_PER_BLOCK) 2477 break; 2478 2479 nid = i * NAT_ENTRY_PER_BLOCK + idx; 2480 add_free_nid(sbi, nid, true, false); 2481 2482 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) 2483 goto out; 2484 } 2485 } 2486 out: 2487 scan_curseg_cache(sbi); 2488 2489 f2fs_up_read(&nm_i->nat_tree_lock); 2490 } 2491 2492 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, 2493 bool sync, bool mount) 2494 { 2495 struct f2fs_nm_info *nm_i = NM_I(sbi); 2496 int i = 0, ret; 2497 nid_t nid = nm_i->next_scan_nid; 2498 2499 if (unlikely(nid >= nm_i->max_nid)) 2500 nid = 0; 2501 2502 if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) 2503 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; 2504 2505 /* Enough entries */ 2506 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2507 return 0; 2508 2509 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS)) 2510 return 0; 2511 2512 if (!mount) { 2513 /* try to find free nids in free_nid_bitmap */ 2514 scan_free_nid_bits(sbi); 2515 2516 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2517 return 0; 2518 } 2519 2520 /* readahead nat pages to be scanned */ 2521 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 2522 META_NAT, true); 2523 2524 f2fs_down_read(&nm_i->nat_tree_lock); 2525 2526 while (1) { 2527 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), 2528 nm_i->nat_block_bitmap)) { 2529 struct page *page = get_current_nat_page(sbi, nid); 2530 2531 if (IS_ERR(page)) { 2532 ret = PTR_ERR(page); 2533 } else { 2534 ret = scan_nat_page(sbi, page, nid); 2535 f2fs_put_page(page, 1); 2536 } 2537 2538 if (ret) { 2539 f2fs_up_read(&nm_i->nat_tree_lock); 2540 2541 if (ret == -EFSCORRUPTED) { 2542 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); 2543 set_sbi_flag(sbi, SBI_NEED_FSCK); 2544 f2fs_handle_error(sbi, 2545 ERROR_INCONSISTENT_NAT); 2546 } 2547 2548 return ret; 2549 } 2550 } 2551 2552 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 2553 if (unlikely(nid >= nm_i->max_nid)) 2554 nid = 0; 2555 2556 if (++i >= FREE_NID_PAGES) 2557 break; 2558 } 2559 2560 /* go to the next free nat pages to find free nids abundantly */ 2561 nm_i->next_scan_nid = nid; 2562 2563 /* find free nids from current sum_pages */ 2564 scan_curseg_cache(sbi); 2565 2566 f2fs_up_read(&nm_i->nat_tree_lock); 2567 2568 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 2569 nm_i->ra_nid_pages, META_NAT, false); 2570 2571 return 0; 2572 } 2573 2574 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 2575 { 2576 int ret; 2577 2578 mutex_lock(&NM_I(sbi)->build_lock); 2579 ret = __f2fs_build_free_nids(sbi, sync, mount); 2580 mutex_unlock(&NM_I(sbi)->build_lock); 2581 2582 return ret; 2583 } 2584 2585 /* 2586 * If this function returns success, caller can obtain a new nid 2587 * from second parameter of this function. 2588 * The returned nid could be used ino as well as nid when inode is created. 2589 */ 2590 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 2591 { 2592 struct f2fs_nm_info *nm_i = NM_I(sbi); 2593 struct free_nid *i = NULL; 2594 retry: 2595 if (time_to_inject(sbi, FAULT_ALLOC_NID)) 2596 return false; 2597 2598 spin_lock(&nm_i->nid_list_lock); 2599 2600 if (unlikely(nm_i->available_nids == 0)) { 2601 spin_unlock(&nm_i->nid_list_lock); 2602 return false; 2603 } 2604 2605 /* We should not use stale free nids created by f2fs_build_free_nids */ 2606 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { 2607 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 2608 i = list_first_entry(&nm_i->free_nid_list, 2609 struct free_nid, list); 2610 *nid = i->nid; 2611 2612 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); 2613 nm_i->available_nids--; 2614 2615 update_free_nid_bitmap(sbi, *nid, false, false); 2616 2617 spin_unlock(&nm_i->nid_list_lock); 2618 return true; 2619 } 2620 spin_unlock(&nm_i->nid_list_lock); 2621 2622 /* Let's scan nat pages and its caches to get free nids */ 2623 if (!f2fs_build_free_nids(sbi, true, false)) 2624 goto retry; 2625 return false; 2626 } 2627 2628 /* 2629 * f2fs_alloc_nid() should be called prior to this function. 2630 */ 2631 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 2632 { 2633 struct f2fs_nm_info *nm_i = NM_I(sbi); 2634 struct free_nid *i; 2635 2636 spin_lock(&nm_i->nid_list_lock); 2637 i = __lookup_free_nid_list(nm_i, nid); 2638 f2fs_bug_on(sbi, !i); 2639 __remove_free_nid(sbi, i, PREALLOC_NID); 2640 spin_unlock(&nm_i->nid_list_lock); 2641 2642 kmem_cache_free(free_nid_slab, i); 2643 } 2644 2645 /* 2646 * f2fs_alloc_nid() should be called prior to this function. 2647 */ 2648 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 2649 { 2650 struct f2fs_nm_info *nm_i = NM_I(sbi); 2651 struct free_nid *i; 2652 bool need_free = false; 2653 2654 if (!nid) 2655 return; 2656 2657 spin_lock(&nm_i->nid_list_lock); 2658 i = __lookup_free_nid_list(nm_i, nid); 2659 f2fs_bug_on(sbi, !i); 2660 2661 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) { 2662 __remove_free_nid(sbi, i, PREALLOC_NID); 2663 need_free = true; 2664 } else { 2665 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); 2666 } 2667 2668 nm_i->available_nids++; 2669 2670 update_free_nid_bitmap(sbi, nid, true, false); 2671 2672 spin_unlock(&nm_i->nid_list_lock); 2673 2674 if (need_free) 2675 kmem_cache_free(free_nid_slab, i); 2676 } 2677 2678 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 2679 { 2680 struct f2fs_nm_info *nm_i = NM_I(sbi); 2681 int nr = nr_shrink; 2682 2683 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2684 return 0; 2685 2686 if (!mutex_trylock(&nm_i->build_lock)) 2687 return 0; 2688 2689 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { 2690 struct free_nid *i, *next; 2691 unsigned int batch = SHRINK_NID_BATCH_SIZE; 2692 2693 spin_lock(&nm_i->nid_list_lock); 2694 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 2695 if (!nr_shrink || !batch || 2696 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2697 break; 2698 __remove_free_nid(sbi, i, FREE_NID); 2699 kmem_cache_free(free_nid_slab, i); 2700 nr_shrink--; 2701 batch--; 2702 } 2703 spin_unlock(&nm_i->nid_list_lock); 2704 } 2705 2706 mutex_unlock(&nm_i->build_lock); 2707 2708 return nr - nr_shrink; 2709 } 2710 2711 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) 2712 { 2713 void *src_addr, *dst_addr; 2714 size_t inline_size; 2715 struct page *ipage; 2716 struct f2fs_inode *ri; 2717 2718 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); 2719 if (IS_ERR(ipage)) 2720 return PTR_ERR(ipage); 2721 2722 ri = F2FS_INODE(page); 2723 if (ri->i_inline & F2FS_INLINE_XATTR) { 2724 if (!f2fs_has_inline_xattr(inode)) { 2725 set_inode_flag(inode, FI_INLINE_XATTR); 2726 stat_inc_inline_xattr(inode); 2727 } 2728 } else { 2729 if (f2fs_has_inline_xattr(inode)) { 2730 stat_dec_inline_xattr(inode); 2731 clear_inode_flag(inode, FI_INLINE_XATTR); 2732 } 2733 goto update_inode; 2734 } 2735 2736 dst_addr = inline_xattr_addr(inode, ipage); 2737 src_addr = inline_xattr_addr(inode, page); 2738 inline_size = inline_xattr_size(inode); 2739 2740 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 2741 memcpy(dst_addr, src_addr, inline_size); 2742 update_inode: 2743 f2fs_update_inode(inode, ipage); 2744 f2fs_put_page(ipage, 1); 2745 return 0; 2746 } 2747 2748 int f2fs_recover_xattr_data(struct inode *inode, struct page *page) 2749 { 2750 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2751 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 2752 nid_t new_xnid; 2753 struct dnode_of_data dn; 2754 struct node_info ni; 2755 struct page *xpage; 2756 int err; 2757 2758 if (!prev_xnid) 2759 goto recover_xnid; 2760 2761 /* 1: invalidate the previous xattr nid */ 2762 err = f2fs_get_node_info(sbi, prev_xnid, &ni, false); 2763 if (err) 2764 return err; 2765 2766 f2fs_invalidate_blocks(sbi, ni.blk_addr); 2767 dec_valid_node_count(sbi, inode, false); 2768 set_node_addr(sbi, &ni, NULL_ADDR, false); 2769 2770 recover_xnid: 2771 /* 2: update xattr nid in inode */ 2772 if (!f2fs_alloc_nid(sbi, &new_xnid)) 2773 return -ENOSPC; 2774 2775 set_new_dnode(&dn, inode, NULL, NULL, new_xnid); 2776 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); 2777 if (IS_ERR(xpage)) { 2778 f2fs_alloc_nid_failed(sbi, new_xnid); 2779 return PTR_ERR(xpage); 2780 } 2781 2782 f2fs_alloc_nid_done(sbi, new_xnid); 2783 f2fs_update_inode_page(inode); 2784 2785 /* 3: update and set xattr node page dirty */ 2786 if (page) { 2787 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), 2788 VALID_XATTR_BLOCK_SIZE); 2789 set_page_dirty(xpage); 2790 } 2791 f2fs_put_page(xpage, 1); 2792 2793 return 0; 2794 } 2795 2796 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 2797 { 2798 struct f2fs_inode *src, *dst; 2799 nid_t ino = ino_of_node(page); 2800 struct node_info old_ni, new_ni; 2801 struct page *ipage; 2802 int err; 2803 2804 err = f2fs_get_node_info(sbi, ino, &old_ni, false); 2805 if (err) 2806 return err; 2807 2808 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 2809 return -EINVAL; 2810 retry: 2811 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); 2812 if (!ipage) { 2813 memalloc_retry_wait(GFP_NOFS); 2814 goto retry; 2815 } 2816 2817 /* Should not use this inode from free nid list */ 2818 remove_free_nid(sbi, ino); 2819 2820 if (!PageUptodate(ipage)) 2821 SetPageUptodate(ipage); 2822 fill_node_footer(ipage, ino, ino, 0, true); 2823 set_cold_node(ipage, false); 2824 2825 src = F2FS_INODE(page); 2826 dst = F2FS_INODE(ipage); 2827 2828 memcpy(dst, src, offsetof(struct f2fs_inode, i_ext)); 2829 dst->i_size = 0; 2830 dst->i_blocks = cpu_to_le64(1); 2831 dst->i_links = cpu_to_le32(1); 2832 dst->i_xattr_nid = 0; 2833 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); 2834 if (dst->i_inline & F2FS_EXTRA_ATTR) { 2835 dst->i_extra_isize = src->i_extra_isize; 2836 2837 if (f2fs_sb_has_flexible_inline_xattr(sbi) && 2838 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2839 i_inline_xattr_size)) 2840 dst->i_inline_xattr_size = src->i_inline_xattr_size; 2841 2842 if (f2fs_sb_has_project_quota(sbi) && 2843 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2844 i_projid)) 2845 dst->i_projid = src->i_projid; 2846 2847 if (f2fs_sb_has_inode_crtime(sbi) && 2848 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2849 i_crtime_nsec)) { 2850 dst->i_crtime = src->i_crtime; 2851 dst->i_crtime_nsec = src->i_crtime_nsec; 2852 } 2853 } 2854 2855 new_ni = old_ni; 2856 new_ni.ino = ino; 2857 2858 if (unlikely(inc_valid_node_count(sbi, NULL, true))) 2859 WARN_ON(1); 2860 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 2861 inc_valid_inode_count(sbi); 2862 set_page_dirty(ipage); 2863 f2fs_put_page(ipage, 1); 2864 return 0; 2865 } 2866 2867 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 2868 unsigned int segno, struct f2fs_summary_block *sum) 2869 { 2870 struct f2fs_node *rn; 2871 struct f2fs_summary *sum_entry; 2872 block_t addr; 2873 int i, idx, last_offset, nrpages; 2874 2875 /* scan the node segment */ 2876 last_offset = BLKS_PER_SEG(sbi); 2877 addr = START_BLOCK(sbi, segno); 2878 sum_entry = &sum->entries[0]; 2879 2880 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2881 nrpages = bio_max_segs(last_offset - i); 2882 2883 /* readahead node pages */ 2884 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); 2885 2886 for (idx = addr; idx < addr + nrpages; idx++) { 2887 struct page *page = f2fs_get_tmp_page(sbi, idx); 2888 2889 if (IS_ERR(page)) 2890 return PTR_ERR(page); 2891 2892 rn = F2FS_NODE(page); 2893 sum_entry->nid = rn->footer.nid; 2894 sum_entry->version = 0; 2895 sum_entry->ofs_in_node = 0; 2896 sum_entry++; 2897 f2fs_put_page(page, 1); 2898 } 2899 2900 invalidate_mapping_pages(META_MAPPING(sbi), addr, 2901 addr + nrpages); 2902 } 2903 return 0; 2904 } 2905 2906 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 2907 { 2908 struct f2fs_nm_info *nm_i = NM_I(sbi); 2909 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2910 struct f2fs_journal *journal = curseg->journal; 2911 int i; 2912 2913 down_write(&curseg->journal_rwsem); 2914 for (i = 0; i < nats_in_cursum(journal); i++) { 2915 struct nat_entry *ne; 2916 struct f2fs_nat_entry raw_ne; 2917 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); 2918 2919 if (f2fs_check_nid_range(sbi, nid)) 2920 continue; 2921 2922 raw_ne = nat_in_journal(journal, i); 2923 2924 ne = __lookup_nat_cache(nm_i, nid); 2925 if (!ne) { 2926 ne = __alloc_nat_entry(sbi, nid, true); 2927 __init_nat_entry(nm_i, ne, &raw_ne, true); 2928 } 2929 2930 /* 2931 * if a free nat in journal has not been used after last 2932 * checkpoint, we should remove it from available nids, 2933 * since later we will add it again. 2934 */ 2935 if (!get_nat_flag(ne, IS_DIRTY) && 2936 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { 2937 spin_lock(&nm_i->nid_list_lock); 2938 nm_i->available_nids--; 2939 spin_unlock(&nm_i->nid_list_lock); 2940 } 2941 2942 __set_nat_cache_dirty(nm_i, ne); 2943 } 2944 update_nats_in_cursum(journal, -i); 2945 up_write(&curseg->journal_rwsem); 2946 } 2947 2948 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 2949 struct list_head *head, int max) 2950 { 2951 struct nat_entry_set *cur; 2952 2953 if (nes->entry_cnt >= max) 2954 goto add_out; 2955 2956 list_for_each_entry(cur, head, set_list) { 2957 if (cur->entry_cnt >= nes->entry_cnt) { 2958 list_add(&nes->set_list, cur->set_list.prev); 2959 return; 2960 } 2961 } 2962 add_out: 2963 list_add_tail(&nes->set_list, head); 2964 } 2965 2966 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs, 2967 unsigned int valid) 2968 { 2969 if (valid == 0) { 2970 __set_bit_le(nat_ofs, nm_i->empty_nat_bits); 2971 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); 2972 return; 2973 } 2974 2975 __clear_bit_le(nat_ofs, nm_i->empty_nat_bits); 2976 if (valid == NAT_ENTRY_PER_BLOCK) 2977 __set_bit_le(nat_ofs, nm_i->full_nat_bits); 2978 else 2979 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); 2980 } 2981 2982 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2983 struct page *page) 2984 { 2985 struct f2fs_nm_info *nm_i = NM_I(sbi); 2986 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; 2987 struct f2fs_nat_block *nat_blk = page_address(page); 2988 int valid = 0; 2989 int i = 0; 2990 2991 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 2992 return; 2993 2994 if (nat_index == 0) { 2995 valid = 1; 2996 i = 1; 2997 } 2998 for (; i < NAT_ENTRY_PER_BLOCK; i++) { 2999 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) 3000 valid++; 3001 } 3002 3003 __update_nat_bits(nm_i, nat_index, valid); 3004 } 3005 3006 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi) 3007 { 3008 struct f2fs_nm_info *nm_i = NM_I(sbi); 3009 unsigned int nat_ofs; 3010 3011 f2fs_down_read(&nm_i->nat_tree_lock); 3012 3013 for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { 3014 unsigned int valid = 0, nid_ofs = 0; 3015 3016 /* handle nid zero due to it should never be used */ 3017 if (unlikely(nat_ofs == 0)) { 3018 valid = 1; 3019 nid_ofs = 1; 3020 } 3021 3022 for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) { 3023 if (!test_bit_le(nid_ofs, 3024 nm_i->free_nid_bitmap[nat_ofs])) 3025 valid++; 3026 } 3027 3028 __update_nat_bits(nm_i, nat_ofs, valid); 3029 } 3030 3031 f2fs_up_read(&nm_i->nat_tree_lock); 3032 } 3033 3034 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, 3035 struct nat_entry_set *set, struct cp_control *cpc) 3036 { 3037 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 3038 struct f2fs_journal *journal = curseg->journal; 3039 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 3040 bool to_journal = true; 3041 struct f2fs_nat_block *nat_blk; 3042 struct nat_entry *ne, *cur; 3043 struct page *page = NULL; 3044 3045 /* 3046 * there are two steps to flush nat entries: 3047 * #1, flush nat entries to journal in current hot data summary block. 3048 * #2, flush nat entries to nat page. 3049 */ 3050 if ((cpc->reason & CP_UMOUNT) || 3051 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) 3052 to_journal = false; 3053 3054 if (to_journal) { 3055 down_write(&curseg->journal_rwsem); 3056 } else { 3057 page = get_next_nat_page(sbi, start_nid); 3058 if (IS_ERR(page)) 3059 return PTR_ERR(page); 3060 3061 nat_blk = page_address(page); 3062 f2fs_bug_on(sbi, !nat_blk); 3063 } 3064 3065 /* flush dirty nats in nat entry set */ 3066 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 3067 struct f2fs_nat_entry *raw_ne; 3068 nid_t nid = nat_get_nid(ne); 3069 int offset; 3070 3071 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); 3072 3073 if (to_journal) { 3074 offset = f2fs_lookup_journal_in_cursum(journal, 3075 NAT_JOURNAL, nid, 1); 3076 f2fs_bug_on(sbi, offset < 0); 3077 raw_ne = &nat_in_journal(journal, offset); 3078 nid_in_journal(journal, offset) = cpu_to_le32(nid); 3079 } else { 3080 raw_ne = &nat_blk->entries[nid - start_nid]; 3081 } 3082 raw_nat_from_node_info(raw_ne, &ne->ni); 3083 nat_reset_flag(ne); 3084 __clear_nat_cache_dirty(NM_I(sbi), set, ne); 3085 if (nat_get_blkaddr(ne) == NULL_ADDR) { 3086 add_free_nid(sbi, nid, false, true); 3087 } else { 3088 spin_lock(&NM_I(sbi)->nid_list_lock); 3089 update_free_nid_bitmap(sbi, nid, false, false); 3090 spin_unlock(&NM_I(sbi)->nid_list_lock); 3091 } 3092 } 3093 3094 if (to_journal) { 3095 up_write(&curseg->journal_rwsem); 3096 } else { 3097 update_nat_bits(sbi, start_nid, page); 3098 f2fs_put_page(page, 1); 3099 } 3100 3101 /* Allow dirty nats by node block allocation in write_begin */ 3102 if (!set->entry_cnt) { 3103 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 3104 kmem_cache_free(nat_entry_set_slab, set); 3105 } 3106 return 0; 3107 } 3108 3109 /* 3110 * This function is called during the checkpointing process. 3111 */ 3112 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 3113 { 3114 struct f2fs_nm_info *nm_i = NM_I(sbi); 3115 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 3116 struct f2fs_journal *journal = curseg->journal; 3117 struct nat_entry_set *setvec[NAT_VEC_SIZE]; 3118 struct nat_entry_set *set, *tmp; 3119 unsigned int found; 3120 nid_t set_idx = 0; 3121 LIST_HEAD(sets); 3122 int err = 0; 3123 3124 /* 3125 * during unmount, let's flush nat_bits before checking 3126 * nat_cnt[DIRTY_NAT]. 3127 */ 3128 if (cpc->reason & CP_UMOUNT) { 3129 f2fs_down_write(&nm_i->nat_tree_lock); 3130 remove_nats_in_journal(sbi); 3131 f2fs_up_write(&nm_i->nat_tree_lock); 3132 } 3133 3134 if (!nm_i->nat_cnt[DIRTY_NAT]) 3135 return 0; 3136 3137 f2fs_down_write(&nm_i->nat_tree_lock); 3138 3139 /* 3140 * if there are no enough space in journal to store dirty nat 3141 * entries, remove all entries from journal and merge them 3142 * into nat entry set. 3143 */ 3144 if (cpc->reason & CP_UMOUNT || 3145 !__has_cursum_space(journal, 3146 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) 3147 remove_nats_in_journal(sbi); 3148 3149 while ((found = __gang_lookup_nat_set(nm_i, 3150 set_idx, NAT_VEC_SIZE, setvec))) { 3151 unsigned idx; 3152 3153 set_idx = setvec[found - 1]->set + 1; 3154 for (idx = 0; idx < found; idx++) 3155 __adjust_nat_entry_set(setvec[idx], &sets, 3156 MAX_NAT_JENTRIES(journal)); 3157 } 3158 3159 /* flush dirty nats in nat entry set */ 3160 list_for_each_entry_safe(set, tmp, &sets, set_list) { 3161 err = __flush_nat_entry_set(sbi, set, cpc); 3162 if (err) 3163 break; 3164 } 3165 3166 f2fs_up_write(&nm_i->nat_tree_lock); 3167 /* Allow dirty nats by node block allocation in write_begin */ 3168 3169 return err; 3170 } 3171 3172 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) 3173 { 3174 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3175 struct f2fs_nm_info *nm_i = NM_I(sbi); 3176 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; 3177 unsigned int i; 3178 __u64 cp_ver = cur_cp_version(ckpt); 3179 block_t nat_bits_addr; 3180 3181 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); 3182 nm_i->nat_bits = f2fs_kvzalloc(sbi, 3183 F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL); 3184 if (!nm_i->nat_bits) 3185 return -ENOMEM; 3186 3187 nm_i->full_nat_bits = nm_i->nat_bits + 8; 3188 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 3189 3190 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 3191 return 0; 3192 3193 nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) - 3194 nm_i->nat_bits_blocks; 3195 for (i = 0; i < nm_i->nat_bits_blocks; i++) { 3196 struct page *page; 3197 3198 page = f2fs_get_meta_page(sbi, nat_bits_addr++); 3199 if (IS_ERR(page)) 3200 return PTR_ERR(page); 3201 3202 memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i), 3203 page_address(page), F2FS_BLKSIZE); 3204 f2fs_put_page(page, 1); 3205 } 3206 3207 cp_ver |= (cur_cp_crc(ckpt) << 32); 3208 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { 3209 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 3210 f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)", 3211 cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits)); 3212 return 0; 3213 } 3214 3215 f2fs_notice(sbi, "Found nat_bits in checkpoint"); 3216 return 0; 3217 } 3218 3219 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 3220 { 3221 struct f2fs_nm_info *nm_i = NM_I(sbi); 3222 unsigned int i = 0; 3223 nid_t nid, last_nid; 3224 3225 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 3226 return; 3227 3228 for (i = 0; i < nm_i->nat_blocks; i++) { 3229 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 3230 if (i >= nm_i->nat_blocks) 3231 break; 3232 3233 __set_bit_le(i, nm_i->nat_block_bitmap); 3234 3235 nid = i * NAT_ENTRY_PER_BLOCK; 3236 last_nid = nid + NAT_ENTRY_PER_BLOCK; 3237 3238 spin_lock(&NM_I(sbi)->nid_list_lock); 3239 for (; nid < last_nid; nid++) 3240 update_free_nid_bitmap(sbi, nid, true, true); 3241 spin_unlock(&NM_I(sbi)->nid_list_lock); 3242 } 3243 3244 for (i = 0; i < nm_i->nat_blocks; i++) { 3245 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 3246 if (i >= nm_i->nat_blocks) 3247 break; 3248 3249 __set_bit_le(i, nm_i->nat_block_bitmap); 3250 } 3251 } 3252 3253 static int init_node_manager(struct f2fs_sb_info *sbi) 3254 { 3255 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 3256 struct f2fs_nm_info *nm_i = NM_I(sbi); 3257 unsigned char *version_bitmap; 3258 unsigned int nat_segs; 3259 int err; 3260 3261 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 3262 3263 /* segment_count_nat includes pair segment so divide to 2. */ 3264 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 3265 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 3266 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; 3267 3268 /* not used nids: 0, node, meta, (and root counted as valid node) */ 3269 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - 3270 F2FS_RESERVED_NODE_NUM; 3271 nm_i->nid_cnt[FREE_NID] = 0; 3272 nm_i->nid_cnt[PREALLOC_NID] = 0; 3273 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 3274 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 3275 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; 3276 nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS; 3277 3278 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 3279 INIT_LIST_HEAD(&nm_i->free_nid_list); 3280 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 3281 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 3282 INIT_LIST_HEAD(&nm_i->nat_entries); 3283 spin_lock_init(&nm_i->nat_list_lock); 3284 3285 mutex_init(&nm_i->build_lock); 3286 spin_lock_init(&nm_i->nid_list_lock); 3287 init_f2fs_rwsem(&nm_i->nat_tree_lock); 3288 3289 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 3290 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 3291 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 3292 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 3293 GFP_KERNEL); 3294 if (!nm_i->nat_bitmap) 3295 return -ENOMEM; 3296 3297 err = __get_nat_bitmaps(sbi); 3298 if (err) 3299 return err; 3300 3301 #ifdef CONFIG_F2FS_CHECK_FS 3302 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, 3303 GFP_KERNEL); 3304 if (!nm_i->nat_bitmap_mir) 3305 return -ENOMEM; 3306 #endif 3307 3308 return 0; 3309 } 3310 3311 static int init_free_nid_cache(struct f2fs_sb_info *sbi) 3312 { 3313 struct f2fs_nm_info *nm_i = NM_I(sbi); 3314 int i; 3315 3316 nm_i->free_nid_bitmap = 3317 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), 3318 nm_i->nat_blocks), 3319 GFP_KERNEL); 3320 if (!nm_i->free_nid_bitmap) 3321 return -ENOMEM; 3322 3323 for (i = 0; i < nm_i->nat_blocks; i++) { 3324 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, 3325 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL); 3326 if (!nm_i->free_nid_bitmap[i]) 3327 return -ENOMEM; 3328 } 3329 3330 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, 3331 GFP_KERNEL); 3332 if (!nm_i->nat_block_bitmap) 3333 return -ENOMEM; 3334 3335 nm_i->free_nid_count = 3336 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), 3337 nm_i->nat_blocks), 3338 GFP_KERNEL); 3339 if (!nm_i->free_nid_count) 3340 return -ENOMEM; 3341 return 0; 3342 } 3343 3344 int f2fs_build_node_manager(struct f2fs_sb_info *sbi) 3345 { 3346 int err; 3347 3348 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), 3349 GFP_KERNEL); 3350 if (!sbi->nm_info) 3351 return -ENOMEM; 3352 3353 err = init_node_manager(sbi); 3354 if (err) 3355 return err; 3356 3357 err = init_free_nid_cache(sbi); 3358 if (err) 3359 return err; 3360 3361 /* load free nid status from nat_bits table */ 3362 load_free_nid_bitmap(sbi); 3363 3364 return f2fs_build_free_nids(sbi, true, true); 3365 } 3366 3367 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) 3368 { 3369 struct f2fs_nm_info *nm_i = NM_I(sbi); 3370 struct free_nid *i, *next_i; 3371 void *vec[NAT_VEC_SIZE]; 3372 struct nat_entry **natvec = (struct nat_entry **)vec; 3373 struct nat_entry_set **setvec = (struct nat_entry_set **)vec; 3374 nid_t nid = 0; 3375 unsigned int found; 3376 3377 if (!nm_i) 3378 return; 3379 3380 /* destroy free nid list */ 3381 spin_lock(&nm_i->nid_list_lock); 3382 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 3383 __remove_free_nid(sbi, i, FREE_NID); 3384 spin_unlock(&nm_i->nid_list_lock); 3385 kmem_cache_free(free_nid_slab, i); 3386 spin_lock(&nm_i->nid_list_lock); 3387 } 3388 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); 3389 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); 3390 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); 3391 spin_unlock(&nm_i->nid_list_lock); 3392 3393 /* destroy nat cache */ 3394 f2fs_down_write(&nm_i->nat_tree_lock); 3395 while ((found = __gang_lookup_nat_cache(nm_i, 3396 nid, NAT_VEC_SIZE, natvec))) { 3397 unsigned idx; 3398 3399 nid = nat_get_nid(natvec[found - 1]) + 1; 3400 for (idx = 0; idx < found; idx++) { 3401 spin_lock(&nm_i->nat_list_lock); 3402 list_del(&natvec[idx]->list); 3403 spin_unlock(&nm_i->nat_list_lock); 3404 3405 __del_from_nat_cache(nm_i, natvec[idx]); 3406 } 3407 } 3408 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); 3409 3410 /* destroy nat set cache */ 3411 nid = 0; 3412 memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE); 3413 while ((found = __gang_lookup_nat_set(nm_i, 3414 nid, NAT_VEC_SIZE, setvec))) { 3415 unsigned idx; 3416 3417 nid = setvec[found - 1]->set + 1; 3418 for (idx = 0; idx < found; idx++) { 3419 /* entry_cnt is not zero, when cp_error was occurred */ 3420 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 3421 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 3422 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 3423 } 3424 } 3425 f2fs_up_write(&nm_i->nat_tree_lock); 3426 3427 kvfree(nm_i->nat_block_bitmap); 3428 if (nm_i->free_nid_bitmap) { 3429 int i; 3430 3431 for (i = 0; i < nm_i->nat_blocks; i++) 3432 kvfree(nm_i->free_nid_bitmap[i]); 3433 kvfree(nm_i->free_nid_bitmap); 3434 } 3435 kvfree(nm_i->free_nid_count); 3436 3437 kvfree(nm_i->nat_bitmap); 3438 kvfree(nm_i->nat_bits); 3439 #ifdef CONFIG_F2FS_CHECK_FS 3440 kvfree(nm_i->nat_bitmap_mir); 3441 #endif 3442 sbi->nm_info = NULL; 3443 kfree(nm_i); 3444 } 3445 3446 int __init f2fs_create_node_manager_caches(void) 3447 { 3448 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry", 3449 sizeof(struct nat_entry)); 3450 if (!nat_entry_slab) 3451 goto fail; 3452 3453 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid", 3454 sizeof(struct free_nid)); 3455 if (!free_nid_slab) 3456 goto destroy_nat_entry; 3457 3458 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set", 3459 sizeof(struct nat_entry_set)); 3460 if (!nat_entry_set_slab) 3461 goto destroy_free_nid; 3462 3463 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry", 3464 sizeof(struct fsync_node_entry)); 3465 if (!fsync_node_entry_slab) 3466 goto destroy_nat_entry_set; 3467 return 0; 3468 3469 destroy_nat_entry_set: 3470 kmem_cache_destroy(nat_entry_set_slab); 3471 destroy_free_nid: 3472 kmem_cache_destroy(free_nid_slab); 3473 destroy_nat_entry: 3474 kmem_cache_destroy(nat_entry_slab); 3475 fail: 3476 return -ENOMEM; 3477 } 3478 3479 void f2fs_destroy_node_manager_caches(void) 3480 { 3481 kmem_cache_destroy(fsync_node_entry_slab); 3482 kmem_cache_destroy(nat_entry_set_slab); 3483 kmem_cache_destroy(free_nid_slab); 3484 kmem_cache_destroy(nat_entry_slab); 3485 } 3486