1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/node.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/mpage.h> 11 #include <linux/backing-dev.h> 12 #include <linux/blkdev.h> 13 #include <linux/pagevec.h> 14 #include <linux/swap.h> 15 16 #include "f2fs.h" 17 #include "node.h" 18 #include "segment.h" 19 #include "xattr.h" 20 #include "trace.h" 21 #include <trace/events/f2fs.h> 22 23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock) 24 25 static struct kmem_cache *nat_entry_slab; 26 static struct kmem_cache *free_nid_slab; 27 static struct kmem_cache *nat_entry_set_slab; 28 static struct kmem_cache *fsync_node_entry_slab; 29 30 /* 31 * Check whether the given nid is within node id range. 32 */ 33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 34 { 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { 36 set_sbi_flag(sbi, SBI_NEED_FSCK); 37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", 38 __func__, nid); 39 return -EFSCORRUPTED; 40 } 41 return 0; 42 } 43 44 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) 45 { 46 struct f2fs_nm_info *nm_i = NM_I(sbi); 47 struct sysinfo val; 48 unsigned long avail_ram; 49 unsigned long mem_size = 0; 50 bool res = false; 51 52 si_meminfo(&val); 53 54 /* only uses low memory */ 55 avail_ram = val.totalram - val.totalhigh; 56 57 /* 58 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively 59 */ 60 if (type == FREE_NIDS) { 61 mem_size = (nm_i->nid_cnt[FREE_NID] * 62 sizeof(struct free_nid)) >> PAGE_SHIFT; 63 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 64 } else if (type == NAT_ENTRIES) { 65 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 66 PAGE_SHIFT; 67 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 68 if (excess_cached_nats(sbi)) 69 res = false; 70 } else if (type == DIRTY_DENTS) { 71 if (sbi->sb->s_bdi->wb.dirty_exceeded) 72 return false; 73 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 74 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 75 } else if (type == INO_ENTRIES) { 76 int i; 77 78 for (i = 0; i < MAX_INO_ENTRY; i++) 79 mem_size += sbi->im[i].ino_num * 80 sizeof(struct ino_entry); 81 mem_size >>= PAGE_SHIFT; 82 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 83 } else if (type == EXTENT_CACHE) { 84 mem_size = (atomic_read(&sbi->total_ext_tree) * 85 sizeof(struct extent_tree) + 86 atomic_read(&sbi->total_ext_node) * 87 sizeof(struct extent_node)) >> PAGE_SHIFT; 88 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 89 } else if (type == INMEM_PAGES) { 90 /* it allows 20% / total_ram for inmemory pages */ 91 mem_size = get_pages(sbi, F2FS_INMEM_PAGES); 92 res = mem_size < (val.totalram / 5); 93 } else { 94 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 95 return true; 96 } 97 return res; 98 } 99 100 static void clear_node_page_dirty(struct page *page) 101 { 102 if (PageDirty(page)) { 103 f2fs_clear_page_cache_dirty_tag(page); 104 clear_page_dirty_for_io(page); 105 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 106 } 107 ClearPageUptodate(page); 108 } 109 110 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 111 { 112 return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid)); 113 } 114 115 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 116 { 117 struct page *src_page; 118 struct page *dst_page; 119 pgoff_t dst_off; 120 void *src_addr; 121 void *dst_addr; 122 struct f2fs_nm_info *nm_i = NM_I(sbi); 123 124 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); 125 126 /* get current nat block page with lock */ 127 src_page = get_current_nat_page(sbi, nid); 128 if (IS_ERR(src_page)) 129 return src_page; 130 dst_page = f2fs_grab_meta_page(sbi, dst_off); 131 f2fs_bug_on(sbi, PageDirty(src_page)); 132 133 src_addr = page_address(src_page); 134 dst_addr = page_address(dst_page); 135 memcpy(dst_addr, src_addr, PAGE_SIZE); 136 set_page_dirty(dst_page); 137 f2fs_put_page(src_page, 1); 138 139 set_to_next_nat(nm_i, nid); 140 141 return dst_page; 142 } 143 144 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail) 145 { 146 struct nat_entry *new; 147 148 if (no_fail) 149 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO); 150 else 151 new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO); 152 if (new) { 153 nat_set_nid(new, nid); 154 nat_reset_flag(new); 155 } 156 return new; 157 } 158 159 static void __free_nat_entry(struct nat_entry *e) 160 { 161 kmem_cache_free(nat_entry_slab, e); 162 } 163 164 /* must be locked by nat_tree_lock */ 165 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, 166 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) 167 { 168 if (no_fail) 169 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); 170 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) 171 return NULL; 172 173 if (raw_ne) 174 node_info_from_raw_nat(&ne->ni, raw_ne); 175 176 spin_lock(&nm_i->nat_list_lock); 177 list_add_tail(&ne->list, &nm_i->nat_entries); 178 spin_unlock(&nm_i->nat_list_lock); 179 180 nm_i->nat_cnt++; 181 return ne; 182 } 183 184 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 185 { 186 struct nat_entry *ne; 187 188 ne = radix_tree_lookup(&nm_i->nat_root, n); 189 190 /* for recent accessed nat entry, move it to tail of lru list */ 191 if (ne && !get_nat_flag(ne, IS_DIRTY)) { 192 spin_lock(&nm_i->nat_list_lock); 193 if (!list_empty(&ne->list)) 194 list_move_tail(&ne->list, &nm_i->nat_entries); 195 spin_unlock(&nm_i->nat_list_lock); 196 } 197 198 return ne; 199 } 200 201 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 202 nid_t start, unsigned int nr, struct nat_entry **ep) 203 { 204 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 205 } 206 207 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 208 { 209 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 210 nm_i->nat_cnt--; 211 __free_nat_entry(e); 212 } 213 214 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, 215 struct nat_entry *ne) 216 { 217 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 218 struct nat_entry_set *head; 219 220 head = radix_tree_lookup(&nm_i->nat_set_root, set); 221 if (!head) { 222 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); 223 224 INIT_LIST_HEAD(&head->entry_list); 225 INIT_LIST_HEAD(&head->set_list); 226 head->set = set; 227 head->entry_cnt = 0; 228 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 229 } 230 return head; 231 } 232 233 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 234 struct nat_entry *ne) 235 { 236 struct nat_entry_set *head; 237 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; 238 239 if (!new_ne) 240 head = __grab_nat_entry_set(nm_i, ne); 241 242 /* 243 * update entry_cnt in below condition: 244 * 1. update NEW_ADDR to valid block address; 245 * 2. update old block address to new one; 246 */ 247 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) || 248 !get_nat_flag(ne, IS_DIRTY))) 249 head->entry_cnt++; 250 251 set_nat_flag(ne, IS_PREALLOC, new_ne); 252 253 if (get_nat_flag(ne, IS_DIRTY)) 254 goto refresh_list; 255 256 nm_i->dirty_nat_cnt++; 257 set_nat_flag(ne, IS_DIRTY, true); 258 refresh_list: 259 spin_lock(&nm_i->nat_list_lock); 260 if (new_ne) 261 list_del_init(&ne->list); 262 else 263 list_move_tail(&ne->list, &head->entry_list); 264 spin_unlock(&nm_i->nat_list_lock); 265 } 266 267 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 268 struct nat_entry_set *set, struct nat_entry *ne) 269 { 270 spin_lock(&nm_i->nat_list_lock); 271 list_move_tail(&ne->list, &nm_i->nat_entries); 272 spin_unlock(&nm_i->nat_list_lock); 273 274 set_nat_flag(ne, IS_DIRTY, false); 275 set->entry_cnt--; 276 nm_i->dirty_nat_cnt--; 277 } 278 279 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 280 nid_t start, unsigned int nr, struct nat_entry_set **ep) 281 { 282 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 283 start, nr); 284 } 285 286 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) 287 { 288 return NODE_MAPPING(sbi) == page->mapping && 289 IS_DNODE(page) && is_cold_node(page); 290 } 291 292 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) 293 { 294 spin_lock_init(&sbi->fsync_node_lock); 295 INIT_LIST_HEAD(&sbi->fsync_node_list); 296 sbi->fsync_seg_id = 0; 297 sbi->fsync_node_num = 0; 298 } 299 300 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, 301 struct page *page) 302 { 303 struct fsync_node_entry *fn; 304 unsigned long flags; 305 unsigned int seq_id; 306 307 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS); 308 309 get_page(page); 310 fn->page = page; 311 INIT_LIST_HEAD(&fn->list); 312 313 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 314 list_add_tail(&fn->list, &sbi->fsync_node_list); 315 fn->seq_id = sbi->fsync_seg_id++; 316 seq_id = fn->seq_id; 317 sbi->fsync_node_num++; 318 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 319 320 return seq_id; 321 } 322 323 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) 324 { 325 struct fsync_node_entry *fn; 326 unsigned long flags; 327 328 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 329 list_for_each_entry(fn, &sbi->fsync_node_list, list) { 330 if (fn->page == page) { 331 list_del(&fn->list); 332 sbi->fsync_node_num--; 333 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 334 kmem_cache_free(fsync_node_entry_slab, fn); 335 put_page(page); 336 return; 337 } 338 } 339 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 340 f2fs_bug_on(sbi, 1); 341 } 342 343 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) 344 { 345 unsigned long flags; 346 347 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 348 sbi->fsync_seg_id = 0; 349 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 350 } 351 352 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 353 { 354 struct f2fs_nm_info *nm_i = NM_I(sbi); 355 struct nat_entry *e; 356 bool need = false; 357 358 down_read(&nm_i->nat_tree_lock); 359 e = __lookup_nat_cache(nm_i, nid); 360 if (e) { 361 if (!get_nat_flag(e, IS_CHECKPOINTED) && 362 !get_nat_flag(e, HAS_FSYNCED_INODE)) 363 need = true; 364 } 365 up_read(&nm_i->nat_tree_lock); 366 return need; 367 } 368 369 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 370 { 371 struct f2fs_nm_info *nm_i = NM_I(sbi); 372 struct nat_entry *e; 373 bool is_cp = true; 374 375 down_read(&nm_i->nat_tree_lock); 376 e = __lookup_nat_cache(nm_i, nid); 377 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 378 is_cp = false; 379 up_read(&nm_i->nat_tree_lock); 380 return is_cp; 381 } 382 383 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 384 { 385 struct f2fs_nm_info *nm_i = NM_I(sbi); 386 struct nat_entry *e; 387 bool need_update = true; 388 389 down_read(&nm_i->nat_tree_lock); 390 e = __lookup_nat_cache(nm_i, ino); 391 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 392 (get_nat_flag(e, IS_CHECKPOINTED) || 393 get_nat_flag(e, HAS_FSYNCED_INODE))) 394 need_update = false; 395 up_read(&nm_i->nat_tree_lock); 396 return need_update; 397 } 398 399 /* must be locked by nat_tree_lock */ 400 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 401 struct f2fs_nat_entry *ne) 402 { 403 struct f2fs_nm_info *nm_i = NM_I(sbi); 404 struct nat_entry *new, *e; 405 406 new = __alloc_nat_entry(nid, false); 407 if (!new) 408 return; 409 410 down_write(&nm_i->nat_tree_lock); 411 e = __lookup_nat_cache(nm_i, nid); 412 if (!e) 413 e = __init_nat_entry(nm_i, new, ne, false); 414 else 415 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || 416 nat_get_blkaddr(e) != 417 le32_to_cpu(ne->block_addr) || 418 nat_get_version(e) != ne->version); 419 up_write(&nm_i->nat_tree_lock); 420 if (e != new) 421 __free_nat_entry(new); 422 } 423 424 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 425 block_t new_blkaddr, bool fsync_done) 426 { 427 struct f2fs_nm_info *nm_i = NM_I(sbi); 428 struct nat_entry *e; 429 struct nat_entry *new = __alloc_nat_entry(ni->nid, true); 430 431 down_write(&nm_i->nat_tree_lock); 432 e = __lookup_nat_cache(nm_i, ni->nid); 433 if (!e) { 434 e = __init_nat_entry(nm_i, new, NULL, true); 435 copy_node_info(&e->ni, ni); 436 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 437 } else if (new_blkaddr == NEW_ADDR) { 438 /* 439 * when nid is reallocated, 440 * previous nat entry can be remained in nat cache. 441 * So, reinitialize it with new information. 442 */ 443 copy_node_info(&e->ni, ni); 444 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 445 } 446 /* let's free early to reduce memory consumption */ 447 if (e != new) 448 __free_nat_entry(new); 449 450 /* sanity check */ 451 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 452 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 453 new_blkaddr == NULL_ADDR); 454 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 455 new_blkaddr == NEW_ADDR); 456 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && 457 new_blkaddr == NEW_ADDR); 458 459 /* increment version no as node is removed */ 460 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 461 unsigned char version = nat_get_version(e); 462 nat_set_version(e, inc_node_version(version)); 463 } 464 465 /* change address */ 466 nat_set_blkaddr(e, new_blkaddr); 467 if (!__is_valid_data_blkaddr(new_blkaddr)) 468 set_nat_flag(e, IS_CHECKPOINTED, false); 469 __set_nat_cache_dirty(nm_i, e); 470 471 /* update fsync_mark if its inode nat entry is still alive */ 472 if (ni->nid != ni->ino) 473 e = __lookup_nat_cache(nm_i, ni->ino); 474 if (e) { 475 if (fsync_done && ni->nid == ni->ino) 476 set_nat_flag(e, HAS_FSYNCED_INODE, true); 477 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 478 } 479 up_write(&nm_i->nat_tree_lock); 480 } 481 482 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 483 { 484 struct f2fs_nm_info *nm_i = NM_I(sbi); 485 int nr = nr_shrink; 486 487 if (!down_write_trylock(&nm_i->nat_tree_lock)) 488 return 0; 489 490 spin_lock(&nm_i->nat_list_lock); 491 while (nr_shrink) { 492 struct nat_entry *ne; 493 494 if (list_empty(&nm_i->nat_entries)) 495 break; 496 497 ne = list_first_entry(&nm_i->nat_entries, 498 struct nat_entry, list); 499 list_del(&ne->list); 500 spin_unlock(&nm_i->nat_list_lock); 501 502 __del_from_nat_cache(nm_i, ne); 503 nr_shrink--; 504 505 spin_lock(&nm_i->nat_list_lock); 506 } 507 spin_unlock(&nm_i->nat_list_lock); 508 509 up_write(&nm_i->nat_tree_lock); 510 return nr - nr_shrink; 511 } 512 513 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 514 struct node_info *ni) 515 { 516 struct f2fs_nm_info *nm_i = NM_I(sbi); 517 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 518 struct f2fs_journal *journal = curseg->journal; 519 nid_t start_nid = START_NID(nid); 520 struct f2fs_nat_block *nat_blk; 521 struct page *page = NULL; 522 struct f2fs_nat_entry ne; 523 struct nat_entry *e; 524 pgoff_t index; 525 block_t blkaddr; 526 int i; 527 528 ni->nid = nid; 529 530 /* Check nat cache */ 531 down_read(&nm_i->nat_tree_lock); 532 e = __lookup_nat_cache(nm_i, nid); 533 if (e) { 534 ni->ino = nat_get_ino(e); 535 ni->blk_addr = nat_get_blkaddr(e); 536 ni->version = nat_get_version(e); 537 up_read(&nm_i->nat_tree_lock); 538 return 0; 539 } 540 541 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 542 543 /* Check current segment summary */ 544 down_read(&curseg->journal_rwsem); 545 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); 546 if (i >= 0) { 547 ne = nat_in_journal(journal, i); 548 node_info_from_raw_nat(ni, &ne); 549 } 550 up_read(&curseg->journal_rwsem); 551 if (i >= 0) { 552 up_read(&nm_i->nat_tree_lock); 553 goto cache; 554 } 555 556 /* Fill node_info from nat page */ 557 index = current_nat_addr(sbi, nid); 558 up_read(&nm_i->nat_tree_lock); 559 560 page = f2fs_get_meta_page(sbi, index); 561 if (IS_ERR(page)) 562 return PTR_ERR(page); 563 564 nat_blk = (struct f2fs_nat_block *)page_address(page); 565 ne = nat_blk->entries[nid - start_nid]; 566 node_info_from_raw_nat(ni, &ne); 567 f2fs_put_page(page, 1); 568 cache: 569 blkaddr = le32_to_cpu(ne.block_addr); 570 if (__is_valid_data_blkaddr(blkaddr) && 571 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) 572 return -EFAULT; 573 574 /* cache nat entry */ 575 cache_nat_entry(sbi, nid, &ne); 576 return 0; 577 } 578 579 /* 580 * readahead MAX_RA_NODE number of node pages. 581 */ 582 static void f2fs_ra_node_pages(struct page *parent, int start, int n) 583 { 584 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 585 struct blk_plug plug; 586 int i, end; 587 nid_t nid; 588 589 blk_start_plug(&plug); 590 591 /* Then, try readahead for siblings of the desired node */ 592 end = start + n; 593 end = min(end, NIDS_PER_BLOCK); 594 for (i = start; i < end; i++) { 595 nid = get_nid(parent, i, false); 596 f2fs_ra_node_page(sbi, nid); 597 } 598 599 blk_finish_plug(&plug); 600 } 601 602 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) 603 { 604 const long direct_index = ADDRS_PER_INODE(dn->inode); 605 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); 606 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; 607 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); 608 int cur_level = dn->cur_level; 609 int max_level = dn->max_level; 610 pgoff_t base = 0; 611 612 if (!dn->max_level) 613 return pgofs + 1; 614 615 while (max_level-- > cur_level) 616 skipped_unit *= NIDS_PER_BLOCK; 617 618 switch (dn->max_level) { 619 case 3: 620 base += 2 * indirect_blks; 621 /* fall through */ 622 case 2: 623 base += 2 * direct_blks; 624 /* fall through */ 625 case 1: 626 base += direct_index; 627 break; 628 default: 629 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); 630 } 631 632 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; 633 } 634 635 /* 636 * The maximum depth is four. 637 * Offset[0] will have raw inode offset. 638 */ 639 static int get_node_path(struct inode *inode, long block, 640 int offset[4], unsigned int noffset[4]) 641 { 642 const long direct_index = ADDRS_PER_INODE(inode); 643 const long direct_blks = ADDRS_PER_BLOCK(inode); 644 const long dptrs_per_blk = NIDS_PER_BLOCK; 645 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK; 646 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 647 int n = 0; 648 int level = 0; 649 650 noffset[0] = 0; 651 652 if (block < direct_index) { 653 offset[n] = block; 654 goto got; 655 } 656 block -= direct_index; 657 if (block < direct_blks) { 658 offset[n++] = NODE_DIR1_BLOCK; 659 noffset[n] = 1; 660 offset[n] = block; 661 level = 1; 662 goto got; 663 } 664 block -= direct_blks; 665 if (block < direct_blks) { 666 offset[n++] = NODE_DIR2_BLOCK; 667 noffset[n] = 2; 668 offset[n] = block; 669 level = 1; 670 goto got; 671 } 672 block -= direct_blks; 673 if (block < indirect_blks) { 674 offset[n++] = NODE_IND1_BLOCK; 675 noffset[n] = 3; 676 offset[n++] = block / direct_blks; 677 noffset[n] = 4 + offset[n - 1]; 678 offset[n] = block % direct_blks; 679 level = 2; 680 goto got; 681 } 682 block -= indirect_blks; 683 if (block < indirect_blks) { 684 offset[n++] = NODE_IND2_BLOCK; 685 noffset[n] = 4 + dptrs_per_blk; 686 offset[n++] = block / direct_blks; 687 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 688 offset[n] = block % direct_blks; 689 level = 2; 690 goto got; 691 } 692 block -= indirect_blks; 693 if (block < dindirect_blks) { 694 offset[n++] = NODE_DIND_BLOCK; 695 noffset[n] = 5 + (dptrs_per_blk * 2); 696 offset[n++] = block / indirect_blks; 697 noffset[n] = 6 + (dptrs_per_blk * 2) + 698 offset[n - 1] * (dptrs_per_blk + 1); 699 offset[n++] = (block / direct_blks) % dptrs_per_blk; 700 noffset[n] = 7 + (dptrs_per_blk * 2) + 701 offset[n - 2] * (dptrs_per_blk + 1) + 702 offset[n - 1]; 703 offset[n] = block % direct_blks; 704 level = 3; 705 goto got; 706 } else { 707 return -E2BIG; 708 } 709 got: 710 return level; 711 } 712 713 /* 714 * Caller should call f2fs_put_dnode(dn). 715 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 716 * f2fs_unlock_op() only if mode is set with ALLOC_NODE. 717 */ 718 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 719 { 720 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 721 struct page *npage[4]; 722 struct page *parent = NULL; 723 int offset[4]; 724 unsigned int noffset[4]; 725 nid_t nids[4]; 726 int level, i = 0; 727 int err = 0; 728 729 level = get_node_path(dn->inode, index, offset, noffset); 730 if (level < 0) 731 return level; 732 733 nids[0] = dn->inode->i_ino; 734 npage[0] = dn->inode_page; 735 736 if (!npage[0]) { 737 npage[0] = f2fs_get_node_page(sbi, nids[0]); 738 if (IS_ERR(npage[0])) 739 return PTR_ERR(npage[0]); 740 } 741 742 /* if inline_data is set, should not report any block indices */ 743 if (f2fs_has_inline_data(dn->inode) && index) { 744 err = -ENOENT; 745 f2fs_put_page(npage[0], 1); 746 goto release_out; 747 } 748 749 parent = npage[0]; 750 if (level != 0) 751 nids[1] = get_nid(parent, offset[0], true); 752 dn->inode_page = npage[0]; 753 dn->inode_page_locked = true; 754 755 /* get indirect or direct nodes */ 756 for (i = 1; i <= level; i++) { 757 bool done = false; 758 759 if (!nids[i] && mode == ALLOC_NODE) { 760 /* alloc new node */ 761 if (!f2fs_alloc_nid(sbi, &(nids[i]))) { 762 err = -ENOSPC; 763 goto release_pages; 764 } 765 766 dn->nid = nids[i]; 767 npage[i] = f2fs_new_node_page(dn, noffset[i]); 768 if (IS_ERR(npage[i])) { 769 f2fs_alloc_nid_failed(sbi, nids[i]); 770 err = PTR_ERR(npage[i]); 771 goto release_pages; 772 } 773 774 set_nid(parent, offset[i - 1], nids[i], i == 1); 775 f2fs_alloc_nid_done(sbi, nids[i]); 776 done = true; 777 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 778 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]); 779 if (IS_ERR(npage[i])) { 780 err = PTR_ERR(npage[i]); 781 goto release_pages; 782 } 783 done = true; 784 } 785 if (i == 1) { 786 dn->inode_page_locked = false; 787 unlock_page(parent); 788 } else { 789 f2fs_put_page(parent, 1); 790 } 791 792 if (!done) { 793 npage[i] = f2fs_get_node_page(sbi, nids[i]); 794 if (IS_ERR(npage[i])) { 795 err = PTR_ERR(npage[i]); 796 f2fs_put_page(npage[0], 0); 797 goto release_out; 798 } 799 } 800 if (i < level) { 801 parent = npage[i]; 802 nids[i + 1] = get_nid(parent, offset[i], false); 803 } 804 } 805 dn->nid = nids[level]; 806 dn->ofs_in_node = offset[level]; 807 dn->node_page = npage[level]; 808 dn->data_blkaddr = f2fs_data_blkaddr(dn); 809 return 0; 810 811 release_pages: 812 f2fs_put_page(parent, 1); 813 if (i > 1) 814 f2fs_put_page(npage[0], 0); 815 release_out: 816 dn->inode_page = NULL; 817 dn->node_page = NULL; 818 if (err == -ENOENT) { 819 dn->cur_level = i; 820 dn->max_level = level; 821 dn->ofs_in_node = offset[level]; 822 } 823 return err; 824 } 825 826 static int truncate_node(struct dnode_of_data *dn) 827 { 828 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 829 struct node_info ni; 830 int err; 831 pgoff_t index; 832 833 err = f2fs_get_node_info(sbi, dn->nid, &ni); 834 if (err) 835 return err; 836 837 /* Deallocate node address */ 838 f2fs_invalidate_blocks(sbi, ni.blk_addr); 839 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); 840 set_node_addr(sbi, &ni, NULL_ADDR, false); 841 842 if (dn->nid == dn->inode->i_ino) { 843 f2fs_remove_orphan_inode(sbi, dn->nid); 844 dec_valid_inode_count(sbi); 845 f2fs_inode_synced(dn->inode); 846 } 847 848 clear_node_page_dirty(dn->node_page); 849 set_sbi_flag(sbi, SBI_IS_DIRTY); 850 851 index = dn->node_page->index; 852 f2fs_put_page(dn->node_page, 1); 853 854 invalidate_mapping_pages(NODE_MAPPING(sbi), 855 index, index); 856 857 dn->node_page = NULL; 858 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 859 860 return 0; 861 } 862 863 static int truncate_dnode(struct dnode_of_data *dn) 864 { 865 struct page *page; 866 int err; 867 868 if (dn->nid == 0) 869 return 1; 870 871 /* get direct node */ 872 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 873 if (PTR_ERR(page) == -ENOENT) 874 return 1; 875 else if (IS_ERR(page)) 876 return PTR_ERR(page); 877 878 /* Make dnode_of_data for parameter */ 879 dn->node_page = page; 880 dn->ofs_in_node = 0; 881 f2fs_truncate_data_blocks(dn); 882 err = truncate_node(dn); 883 if (err) 884 return err; 885 886 return 1; 887 } 888 889 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 890 int ofs, int depth) 891 { 892 struct dnode_of_data rdn = *dn; 893 struct page *page; 894 struct f2fs_node *rn; 895 nid_t child_nid; 896 unsigned int child_nofs; 897 int freed = 0; 898 int i, ret; 899 900 if (dn->nid == 0) 901 return NIDS_PER_BLOCK + 1; 902 903 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 904 905 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 906 if (IS_ERR(page)) { 907 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 908 return PTR_ERR(page); 909 } 910 911 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK); 912 913 rn = F2FS_NODE(page); 914 if (depth < 3) { 915 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 916 child_nid = le32_to_cpu(rn->in.nid[i]); 917 if (child_nid == 0) 918 continue; 919 rdn.nid = child_nid; 920 ret = truncate_dnode(&rdn); 921 if (ret < 0) 922 goto out_err; 923 if (set_nid(page, i, 0, false)) 924 dn->node_changed = true; 925 } 926 } else { 927 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 928 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 929 child_nid = le32_to_cpu(rn->in.nid[i]); 930 if (child_nid == 0) { 931 child_nofs += NIDS_PER_BLOCK + 1; 932 continue; 933 } 934 rdn.nid = child_nid; 935 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 936 if (ret == (NIDS_PER_BLOCK + 1)) { 937 if (set_nid(page, i, 0, false)) 938 dn->node_changed = true; 939 child_nofs += ret; 940 } else if (ret < 0 && ret != -ENOENT) { 941 goto out_err; 942 } 943 } 944 freed = child_nofs; 945 } 946 947 if (!ofs) { 948 /* remove current indirect node */ 949 dn->node_page = page; 950 ret = truncate_node(dn); 951 if (ret) 952 goto out_err; 953 freed++; 954 } else { 955 f2fs_put_page(page, 1); 956 } 957 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 958 return freed; 959 960 out_err: 961 f2fs_put_page(page, 1); 962 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 963 return ret; 964 } 965 966 static int truncate_partial_nodes(struct dnode_of_data *dn, 967 struct f2fs_inode *ri, int *offset, int depth) 968 { 969 struct page *pages[2]; 970 nid_t nid[3]; 971 nid_t child_nid; 972 int err = 0; 973 int i; 974 int idx = depth - 2; 975 976 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 977 if (!nid[0]) 978 return 0; 979 980 /* get indirect nodes in the path */ 981 for (i = 0; i < idx + 1; i++) { 982 /* reference count'll be increased */ 983 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); 984 if (IS_ERR(pages[i])) { 985 err = PTR_ERR(pages[i]); 986 idx = i - 1; 987 goto fail; 988 } 989 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 990 } 991 992 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); 993 994 /* free direct nodes linked to a partial indirect node */ 995 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 996 child_nid = get_nid(pages[idx], i, false); 997 if (!child_nid) 998 continue; 999 dn->nid = child_nid; 1000 err = truncate_dnode(dn); 1001 if (err < 0) 1002 goto fail; 1003 if (set_nid(pages[idx], i, 0, false)) 1004 dn->node_changed = true; 1005 } 1006 1007 if (offset[idx + 1] == 0) { 1008 dn->node_page = pages[idx]; 1009 dn->nid = nid[idx]; 1010 err = truncate_node(dn); 1011 if (err) 1012 goto fail; 1013 } else { 1014 f2fs_put_page(pages[idx], 1); 1015 } 1016 offset[idx]++; 1017 offset[idx + 1] = 0; 1018 idx--; 1019 fail: 1020 for (i = idx; i >= 0; i--) 1021 f2fs_put_page(pages[i], 1); 1022 1023 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 1024 1025 return err; 1026 } 1027 1028 /* 1029 * All the block addresses of data and nodes should be nullified. 1030 */ 1031 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) 1032 { 1033 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1034 int err = 0, cont = 1; 1035 int level, offset[4], noffset[4]; 1036 unsigned int nofs = 0; 1037 struct f2fs_inode *ri; 1038 struct dnode_of_data dn; 1039 struct page *page; 1040 1041 trace_f2fs_truncate_inode_blocks_enter(inode, from); 1042 1043 level = get_node_path(inode, from, offset, noffset); 1044 if (level < 0) 1045 return level; 1046 1047 page = f2fs_get_node_page(sbi, inode->i_ino); 1048 if (IS_ERR(page)) { 1049 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 1050 return PTR_ERR(page); 1051 } 1052 1053 set_new_dnode(&dn, inode, page, NULL, 0); 1054 unlock_page(page); 1055 1056 ri = F2FS_INODE(page); 1057 switch (level) { 1058 case 0: 1059 case 1: 1060 nofs = noffset[1]; 1061 break; 1062 case 2: 1063 nofs = noffset[1]; 1064 if (!offset[level - 1]) 1065 goto skip_partial; 1066 err = truncate_partial_nodes(&dn, ri, offset, level); 1067 if (err < 0 && err != -ENOENT) 1068 goto fail; 1069 nofs += 1 + NIDS_PER_BLOCK; 1070 break; 1071 case 3: 1072 nofs = 5 + 2 * NIDS_PER_BLOCK; 1073 if (!offset[level - 1]) 1074 goto skip_partial; 1075 err = truncate_partial_nodes(&dn, ri, offset, level); 1076 if (err < 0 && err != -ENOENT) 1077 goto fail; 1078 break; 1079 default: 1080 BUG(); 1081 } 1082 1083 skip_partial: 1084 while (cont) { 1085 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 1086 switch (offset[0]) { 1087 case NODE_DIR1_BLOCK: 1088 case NODE_DIR2_BLOCK: 1089 err = truncate_dnode(&dn); 1090 break; 1091 1092 case NODE_IND1_BLOCK: 1093 case NODE_IND2_BLOCK: 1094 err = truncate_nodes(&dn, nofs, offset[1], 2); 1095 break; 1096 1097 case NODE_DIND_BLOCK: 1098 err = truncate_nodes(&dn, nofs, offset[1], 3); 1099 cont = 0; 1100 break; 1101 1102 default: 1103 BUG(); 1104 } 1105 if (err < 0 && err != -ENOENT) 1106 goto fail; 1107 if (offset[1] == 0 && 1108 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 1109 lock_page(page); 1110 BUG_ON(page->mapping != NODE_MAPPING(sbi)); 1111 f2fs_wait_on_page_writeback(page, NODE, true, true); 1112 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 1113 set_page_dirty(page); 1114 unlock_page(page); 1115 } 1116 offset[1] = 0; 1117 offset[0]++; 1118 nofs += err; 1119 } 1120 fail: 1121 f2fs_put_page(page, 0); 1122 trace_f2fs_truncate_inode_blocks_exit(inode, err); 1123 return err > 0 ? 0 : err; 1124 } 1125 1126 /* caller must lock inode page */ 1127 int f2fs_truncate_xattr_node(struct inode *inode) 1128 { 1129 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1130 nid_t nid = F2FS_I(inode)->i_xattr_nid; 1131 struct dnode_of_data dn; 1132 struct page *npage; 1133 int err; 1134 1135 if (!nid) 1136 return 0; 1137 1138 npage = f2fs_get_node_page(sbi, nid); 1139 if (IS_ERR(npage)) 1140 return PTR_ERR(npage); 1141 1142 set_new_dnode(&dn, inode, NULL, npage, nid); 1143 err = truncate_node(&dn); 1144 if (err) { 1145 f2fs_put_page(npage, 1); 1146 return err; 1147 } 1148 1149 f2fs_i_xnid_write(inode, 0); 1150 1151 return 0; 1152 } 1153 1154 /* 1155 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 1156 * f2fs_unlock_op(). 1157 */ 1158 int f2fs_remove_inode_page(struct inode *inode) 1159 { 1160 struct dnode_of_data dn; 1161 int err; 1162 1163 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1164 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE); 1165 if (err) 1166 return err; 1167 1168 err = f2fs_truncate_xattr_node(inode); 1169 if (err) { 1170 f2fs_put_dnode(&dn); 1171 return err; 1172 } 1173 1174 /* remove potential inline_data blocks */ 1175 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1176 S_ISLNK(inode->i_mode)) 1177 f2fs_truncate_data_blocks_range(&dn, 1); 1178 1179 /* 0 is possible, after f2fs_new_inode() has failed */ 1180 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 1181 f2fs_put_dnode(&dn); 1182 return -EIO; 1183 } 1184 1185 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { 1186 f2fs_warn(F2FS_I_SB(inode), 1187 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu", 1188 inode->i_ino, (unsigned long long)inode->i_blocks); 1189 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 1190 } 1191 1192 /* will put inode & node pages */ 1193 err = truncate_node(&dn); 1194 if (err) { 1195 f2fs_put_dnode(&dn); 1196 return err; 1197 } 1198 return 0; 1199 } 1200 1201 struct page *f2fs_new_inode_page(struct inode *inode) 1202 { 1203 struct dnode_of_data dn; 1204 1205 /* allocate inode page for new inode */ 1206 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1207 1208 /* caller should f2fs_put_page(page, 1); */ 1209 return f2fs_new_node_page(&dn, 0); 1210 } 1211 1212 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) 1213 { 1214 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1215 struct node_info new_ni; 1216 struct page *page; 1217 int err; 1218 1219 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 1220 return ERR_PTR(-EPERM); 1221 1222 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); 1223 if (!page) 1224 return ERR_PTR(-ENOMEM); 1225 1226 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) 1227 goto fail; 1228 1229 #ifdef CONFIG_F2FS_CHECK_FS 1230 err = f2fs_get_node_info(sbi, dn->nid, &new_ni); 1231 if (err) { 1232 dec_valid_node_count(sbi, dn->inode, !ofs); 1233 goto fail; 1234 } 1235 f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); 1236 #endif 1237 new_ni.nid = dn->nid; 1238 new_ni.ino = dn->inode->i_ino; 1239 new_ni.blk_addr = NULL_ADDR; 1240 new_ni.flag = 0; 1241 new_ni.version = 0; 1242 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1243 1244 f2fs_wait_on_page_writeback(page, NODE, true, true); 1245 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 1246 set_cold_node(page, S_ISDIR(dn->inode->i_mode)); 1247 if (!PageUptodate(page)) 1248 SetPageUptodate(page); 1249 if (set_page_dirty(page)) 1250 dn->node_changed = true; 1251 1252 if (f2fs_has_xattr_block(ofs)) 1253 f2fs_i_xnid_write(dn->inode, dn->nid); 1254 1255 if (ofs == 0) 1256 inc_valid_inode_count(sbi); 1257 return page; 1258 1259 fail: 1260 clear_node_page_dirty(page); 1261 f2fs_put_page(page, 1); 1262 return ERR_PTR(err); 1263 } 1264 1265 /* 1266 * Caller should do after getting the following values. 1267 * 0: f2fs_put_page(page, 0) 1268 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1269 */ 1270 static int read_node_page(struct page *page, int op_flags) 1271 { 1272 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1273 struct node_info ni; 1274 struct f2fs_io_info fio = { 1275 .sbi = sbi, 1276 .type = NODE, 1277 .op = REQ_OP_READ, 1278 .op_flags = op_flags, 1279 .page = page, 1280 .encrypted_page = NULL, 1281 }; 1282 int err; 1283 1284 if (PageUptodate(page)) { 1285 if (!f2fs_inode_chksum_verify(sbi, page)) { 1286 ClearPageUptodate(page); 1287 return -EFSBADCRC; 1288 } 1289 return LOCKED_PAGE; 1290 } 1291 1292 err = f2fs_get_node_info(sbi, page->index, &ni); 1293 if (err) 1294 return err; 1295 1296 if (unlikely(ni.blk_addr == NULL_ADDR) || 1297 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) { 1298 ClearPageUptodate(page); 1299 return -ENOENT; 1300 } 1301 1302 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; 1303 return f2fs_submit_page_bio(&fio); 1304 } 1305 1306 /* 1307 * Readahead a node page 1308 */ 1309 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1310 { 1311 struct page *apage; 1312 int err; 1313 1314 if (!nid) 1315 return; 1316 if (f2fs_check_nid_range(sbi, nid)) 1317 return; 1318 1319 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); 1320 if (apage) 1321 return; 1322 1323 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1324 if (!apage) 1325 return; 1326 1327 err = read_node_page(apage, REQ_RAHEAD); 1328 f2fs_put_page(apage, err ? 1 : 0); 1329 } 1330 1331 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1332 struct page *parent, int start) 1333 { 1334 struct page *page; 1335 int err; 1336 1337 if (!nid) 1338 return ERR_PTR(-ENOENT); 1339 if (f2fs_check_nid_range(sbi, nid)) 1340 return ERR_PTR(-EINVAL); 1341 repeat: 1342 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1343 if (!page) 1344 return ERR_PTR(-ENOMEM); 1345 1346 err = read_node_page(page, 0); 1347 if (err < 0) { 1348 f2fs_put_page(page, 1); 1349 return ERR_PTR(err); 1350 } else if (err == LOCKED_PAGE) { 1351 err = 0; 1352 goto page_hit; 1353 } 1354 1355 if (parent) 1356 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE); 1357 1358 lock_page(page); 1359 1360 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1361 f2fs_put_page(page, 1); 1362 goto repeat; 1363 } 1364 1365 if (unlikely(!PageUptodate(page))) { 1366 err = -EIO; 1367 goto out_err; 1368 } 1369 1370 if (!f2fs_inode_chksum_verify(sbi, page)) { 1371 err = -EFSBADCRC; 1372 goto out_err; 1373 } 1374 page_hit: 1375 if(unlikely(nid != nid_of_node(page))) { 1376 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1377 nid, nid_of_node(page), ino_of_node(page), 1378 ofs_of_node(page), cpver_of_node(page), 1379 next_blkaddr_of_node(page)); 1380 err = -EINVAL; 1381 out_err: 1382 ClearPageUptodate(page); 1383 f2fs_put_page(page, 1); 1384 return ERR_PTR(err); 1385 } 1386 return page; 1387 } 1388 1389 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1390 { 1391 return __get_node_page(sbi, nid, NULL, 0); 1392 } 1393 1394 struct page *f2fs_get_node_page_ra(struct page *parent, int start) 1395 { 1396 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1397 nid_t nid = get_nid(parent, start, false); 1398 1399 return __get_node_page(sbi, nid, parent, start); 1400 } 1401 1402 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) 1403 { 1404 struct inode *inode; 1405 struct page *page; 1406 int ret; 1407 1408 /* should flush inline_data before evict_inode */ 1409 inode = ilookup(sbi->sb, ino); 1410 if (!inode) 1411 return; 1412 1413 page = f2fs_pagecache_get_page(inode->i_mapping, 0, 1414 FGP_LOCK|FGP_NOWAIT, 0); 1415 if (!page) 1416 goto iput_out; 1417 1418 if (!PageUptodate(page)) 1419 goto page_out; 1420 1421 if (!PageDirty(page)) 1422 goto page_out; 1423 1424 if (!clear_page_dirty_for_io(page)) 1425 goto page_out; 1426 1427 ret = f2fs_write_inline_data(inode, page); 1428 inode_dec_dirty_pages(inode); 1429 f2fs_remove_dirty_inode(inode); 1430 if (ret) 1431 set_page_dirty(page); 1432 page_out: 1433 f2fs_put_page(page, 1); 1434 iput_out: 1435 iput(inode); 1436 } 1437 1438 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) 1439 { 1440 pgoff_t index; 1441 struct pagevec pvec; 1442 struct page *last_page = NULL; 1443 int nr_pages; 1444 1445 pagevec_init(&pvec); 1446 index = 0; 1447 1448 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1449 PAGECACHE_TAG_DIRTY))) { 1450 int i; 1451 1452 for (i = 0; i < nr_pages; i++) { 1453 struct page *page = pvec.pages[i]; 1454 1455 if (unlikely(f2fs_cp_error(sbi))) { 1456 f2fs_put_page(last_page, 0); 1457 pagevec_release(&pvec); 1458 return ERR_PTR(-EIO); 1459 } 1460 1461 if (!IS_DNODE(page) || !is_cold_node(page)) 1462 continue; 1463 if (ino_of_node(page) != ino) 1464 continue; 1465 1466 lock_page(page); 1467 1468 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1469 continue_unlock: 1470 unlock_page(page); 1471 continue; 1472 } 1473 if (ino_of_node(page) != ino) 1474 goto continue_unlock; 1475 1476 if (!PageDirty(page)) { 1477 /* someone wrote it for us */ 1478 goto continue_unlock; 1479 } 1480 1481 if (last_page) 1482 f2fs_put_page(last_page, 0); 1483 1484 get_page(page); 1485 last_page = page; 1486 unlock_page(page); 1487 } 1488 pagevec_release(&pvec); 1489 cond_resched(); 1490 } 1491 return last_page; 1492 } 1493 1494 static int __write_node_page(struct page *page, bool atomic, bool *submitted, 1495 struct writeback_control *wbc, bool do_balance, 1496 enum iostat_type io_type, unsigned int *seq_id) 1497 { 1498 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1499 nid_t nid; 1500 struct node_info ni; 1501 struct f2fs_io_info fio = { 1502 .sbi = sbi, 1503 .ino = ino_of_node(page), 1504 .type = NODE, 1505 .op = REQ_OP_WRITE, 1506 .op_flags = wbc_to_write_flags(wbc), 1507 .page = page, 1508 .encrypted_page = NULL, 1509 .submitted = false, 1510 .io_type = io_type, 1511 .io_wbc = wbc, 1512 }; 1513 unsigned int seq; 1514 1515 trace_f2fs_writepage(page, NODE); 1516 1517 if (unlikely(f2fs_cp_error(sbi))) 1518 goto redirty_out; 1519 1520 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1521 goto redirty_out; 1522 1523 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1524 wbc->sync_mode == WB_SYNC_NONE && 1525 IS_DNODE(page) && is_cold_node(page)) 1526 goto redirty_out; 1527 1528 /* get old block addr of this node page */ 1529 nid = nid_of_node(page); 1530 f2fs_bug_on(sbi, page->index != nid); 1531 1532 if (f2fs_get_node_info(sbi, nid, &ni)) 1533 goto redirty_out; 1534 1535 if (wbc->for_reclaim) { 1536 if (!down_read_trylock(&sbi->node_write)) 1537 goto redirty_out; 1538 } else { 1539 down_read(&sbi->node_write); 1540 } 1541 1542 /* This page is already truncated */ 1543 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1544 ClearPageUptodate(page); 1545 dec_page_count(sbi, F2FS_DIRTY_NODES); 1546 up_read(&sbi->node_write); 1547 unlock_page(page); 1548 return 0; 1549 } 1550 1551 if (__is_valid_data_blkaddr(ni.blk_addr) && 1552 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, 1553 DATA_GENERIC_ENHANCE)) { 1554 up_read(&sbi->node_write); 1555 goto redirty_out; 1556 } 1557 1558 if (atomic && !test_opt(sbi, NOBARRIER)) 1559 fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 1560 1561 /* should add to global list before clearing PAGECACHE status */ 1562 if (f2fs_in_warm_node_list(sbi, page)) { 1563 seq = f2fs_add_fsync_node_entry(sbi, page); 1564 if (seq_id) 1565 *seq_id = seq; 1566 } 1567 1568 set_page_writeback(page); 1569 ClearPageError(page); 1570 1571 fio.old_blkaddr = ni.blk_addr; 1572 f2fs_do_write_node_page(nid, &fio); 1573 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1574 dec_page_count(sbi, F2FS_DIRTY_NODES); 1575 up_read(&sbi->node_write); 1576 1577 if (wbc->for_reclaim) { 1578 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); 1579 submitted = NULL; 1580 } 1581 1582 unlock_page(page); 1583 1584 if (unlikely(f2fs_cp_error(sbi))) { 1585 f2fs_submit_merged_write(sbi, NODE); 1586 submitted = NULL; 1587 } 1588 if (submitted) 1589 *submitted = fio.submitted; 1590 1591 if (do_balance) 1592 f2fs_balance_fs(sbi, false); 1593 return 0; 1594 1595 redirty_out: 1596 redirty_page_for_writepage(wbc, page); 1597 return AOP_WRITEPAGE_ACTIVATE; 1598 } 1599 1600 int f2fs_move_node_page(struct page *node_page, int gc_type) 1601 { 1602 int err = 0; 1603 1604 if (gc_type == FG_GC) { 1605 struct writeback_control wbc = { 1606 .sync_mode = WB_SYNC_ALL, 1607 .nr_to_write = 1, 1608 .for_reclaim = 0, 1609 }; 1610 1611 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 1612 1613 set_page_dirty(node_page); 1614 1615 if (!clear_page_dirty_for_io(node_page)) { 1616 err = -EAGAIN; 1617 goto out_page; 1618 } 1619 1620 if (__write_node_page(node_page, false, NULL, 1621 &wbc, false, FS_GC_NODE_IO, NULL)) { 1622 err = -EAGAIN; 1623 unlock_page(node_page); 1624 } 1625 goto release_page; 1626 } else { 1627 /* set page dirty and write it */ 1628 if (!PageWriteback(node_page)) 1629 set_page_dirty(node_page); 1630 } 1631 out_page: 1632 unlock_page(node_page); 1633 release_page: 1634 f2fs_put_page(node_page, 0); 1635 return err; 1636 } 1637 1638 static int f2fs_write_node_page(struct page *page, 1639 struct writeback_control *wbc) 1640 { 1641 return __write_node_page(page, false, NULL, wbc, false, 1642 FS_NODE_IO, NULL); 1643 } 1644 1645 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 1646 struct writeback_control *wbc, bool atomic, 1647 unsigned int *seq_id) 1648 { 1649 pgoff_t index; 1650 struct pagevec pvec; 1651 int ret = 0; 1652 struct page *last_page = NULL; 1653 bool marked = false; 1654 nid_t ino = inode->i_ino; 1655 int nr_pages; 1656 int nwritten = 0; 1657 1658 if (atomic) { 1659 last_page = last_fsync_dnode(sbi, ino); 1660 if (IS_ERR_OR_NULL(last_page)) 1661 return PTR_ERR_OR_ZERO(last_page); 1662 } 1663 retry: 1664 pagevec_init(&pvec); 1665 index = 0; 1666 1667 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1668 PAGECACHE_TAG_DIRTY))) { 1669 int i; 1670 1671 for (i = 0; i < nr_pages; i++) { 1672 struct page *page = pvec.pages[i]; 1673 bool submitted = false; 1674 1675 if (unlikely(f2fs_cp_error(sbi))) { 1676 f2fs_put_page(last_page, 0); 1677 pagevec_release(&pvec); 1678 ret = -EIO; 1679 goto out; 1680 } 1681 1682 if (!IS_DNODE(page) || !is_cold_node(page)) 1683 continue; 1684 if (ino_of_node(page) != ino) 1685 continue; 1686 1687 lock_page(page); 1688 1689 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1690 continue_unlock: 1691 unlock_page(page); 1692 continue; 1693 } 1694 if (ino_of_node(page) != ino) 1695 goto continue_unlock; 1696 1697 if (!PageDirty(page) && page != last_page) { 1698 /* someone wrote it for us */ 1699 goto continue_unlock; 1700 } 1701 1702 f2fs_wait_on_page_writeback(page, NODE, true, true); 1703 1704 set_fsync_mark(page, 0); 1705 set_dentry_mark(page, 0); 1706 1707 if (!atomic || page == last_page) { 1708 set_fsync_mark(page, 1); 1709 if (IS_INODE(page)) { 1710 if (is_inode_flag_set(inode, 1711 FI_DIRTY_INODE)) 1712 f2fs_update_inode(inode, page); 1713 set_dentry_mark(page, 1714 f2fs_need_dentry_mark(sbi, ino)); 1715 } 1716 /* may be written by other thread */ 1717 if (!PageDirty(page)) 1718 set_page_dirty(page); 1719 } 1720 1721 if (!clear_page_dirty_for_io(page)) 1722 goto continue_unlock; 1723 1724 ret = __write_node_page(page, atomic && 1725 page == last_page, 1726 &submitted, wbc, true, 1727 FS_NODE_IO, seq_id); 1728 if (ret) { 1729 unlock_page(page); 1730 f2fs_put_page(last_page, 0); 1731 break; 1732 } else if (submitted) { 1733 nwritten++; 1734 } 1735 1736 if (page == last_page) { 1737 f2fs_put_page(page, 0); 1738 marked = true; 1739 break; 1740 } 1741 } 1742 pagevec_release(&pvec); 1743 cond_resched(); 1744 1745 if (ret || marked) 1746 break; 1747 } 1748 if (!ret && atomic && !marked) { 1749 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", 1750 ino, last_page->index); 1751 lock_page(last_page); 1752 f2fs_wait_on_page_writeback(last_page, NODE, true, true); 1753 set_page_dirty(last_page); 1754 unlock_page(last_page); 1755 goto retry; 1756 } 1757 out: 1758 if (nwritten) 1759 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); 1760 return ret ? -EIO: 0; 1761 } 1762 1763 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) 1764 { 1765 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1766 bool clean; 1767 1768 if (inode->i_ino != ino) 1769 return 0; 1770 1771 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 1772 return 0; 1773 1774 spin_lock(&sbi->inode_lock[DIRTY_META]); 1775 clean = list_empty(&F2FS_I(inode)->gdirty_list); 1776 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1777 1778 if (clean) 1779 return 0; 1780 1781 inode = igrab(inode); 1782 if (!inode) 1783 return 0; 1784 return 1; 1785 } 1786 1787 static bool flush_dirty_inode(struct page *page) 1788 { 1789 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1790 struct inode *inode; 1791 nid_t ino = ino_of_node(page); 1792 1793 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); 1794 if (!inode) 1795 return false; 1796 1797 f2fs_update_inode(inode, page); 1798 unlock_page(page); 1799 1800 iput(inode); 1801 return true; 1802 } 1803 1804 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 1805 struct writeback_control *wbc, 1806 bool do_balance, enum iostat_type io_type) 1807 { 1808 pgoff_t index; 1809 struct pagevec pvec; 1810 int step = 0; 1811 int nwritten = 0; 1812 int ret = 0; 1813 int nr_pages, done = 0; 1814 1815 pagevec_init(&pvec); 1816 1817 next_step: 1818 index = 0; 1819 1820 while (!done && (nr_pages = pagevec_lookup_tag(&pvec, 1821 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { 1822 int i; 1823 1824 for (i = 0; i < nr_pages; i++) { 1825 struct page *page = pvec.pages[i]; 1826 bool submitted = false; 1827 bool may_dirty = true; 1828 1829 /* give a priority to WB_SYNC threads */ 1830 if (atomic_read(&sbi->wb_sync_req[NODE]) && 1831 wbc->sync_mode == WB_SYNC_NONE) { 1832 done = 1; 1833 break; 1834 } 1835 1836 /* 1837 * flushing sequence with step: 1838 * 0. indirect nodes 1839 * 1. dentry dnodes 1840 * 2. file dnodes 1841 */ 1842 if (step == 0 && IS_DNODE(page)) 1843 continue; 1844 if (step == 1 && (!IS_DNODE(page) || 1845 is_cold_node(page))) 1846 continue; 1847 if (step == 2 && (!IS_DNODE(page) || 1848 !is_cold_node(page))) 1849 continue; 1850 lock_node: 1851 if (wbc->sync_mode == WB_SYNC_ALL) 1852 lock_page(page); 1853 else if (!trylock_page(page)) 1854 continue; 1855 1856 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1857 continue_unlock: 1858 unlock_page(page); 1859 continue; 1860 } 1861 1862 if (!PageDirty(page)) { 1863 /* someone wrote it for us */ 1864 goto continue_unlock; 1865 } 1866 1867 /* flush inline_data */ 1868 if (is_inline_node(page)) { 1869 clear_inline_node(page); 1870 unlock_page(page); 1871 flush_inline_data(sbi, ino_of_node(page)); 1872 goto lock_node; 1873 } 1874 1875 /* flush dirty inode */ 1876 if (IS_INODE(page) && may_dirty) { 1877 may_dirty = false; 1878 if (flush_dirty_inode(page)) 1879 goto lock_node; 1880 } 1881 1882 f2fs_wait_on_page_writeback(page, NODE, true, true); 1883 1884 if (!clear_page_dirty_for_io(page)) 1885 goto continue_unlock; 1886 1887 set_fsync_mark(page, 0); 1888 set_dentry_mark(page, 0); 1889 1890 ret = __write_node_page(page, false, &submitted, 1891 wbc, do_balance, io_type, NULL); 1892 if (ret) 1893 unlock_page(page); 1894 else if (submitted) 1895 nwritten++; 1896 1897 if (--wbc->nr_to_write == 0) 1898 break; 1899 } 1900 pagevec_release(&pvec); 1901 cond_resched(); 1902 1903 if (wbc->nr_to_write == 0) { 1904 step = 2; 1905 break; 1906 } 1907 } 1908 1909 if (step < 2) { 1910 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1911 wbc->sync_mode == WB_SYNC_NONE && step == 1) 1912 goto out; 1913 step++; 1914 goto next_step; 1915 } 1916 out: 1917 if (nwritten) 1918 f2fs_submit_merged_write(sbi, NODE); 1919 1920 if (unlikely(f2fs_cp_error(sbi))) 1921 return -EIO; 1922 return ret; 1923 } 1924 1925 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 1926 unsigned int seq_id) 1927 { 1928 struct fsync_node_entry *fn; 1929 struct page *page; 1930 struct list_head *head = &sbi->fsync_node_list; 1931 unsigned long flags; 1932 unsigned int cur_seq_id = 0; 1933 int ret2, ret = 0; 1934 1935 while (seq_id && cur_seq_id < seq_id) { 1936 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 1937 if (list_empty(head)) { 1938 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 1939 break; 1940 } 1941 fn = list_first_entry(head, struct fsync_node_entry, list); 1942 if (fn->seq_id > seq_id) { 1943 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 1944 break; 1945 } 1946 cur_seq_id = fn->seq_id; 1947 page = fn->page; 1948 get_page(page); 1949 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 1950 1951 f2fs_wait_on_page_writeback(page, NODE, true, false); 1952 if (TestClearPageError(page)) 1953 ret = -EIO; 1954 1955 put_page(page); 1956 1957 if (ret) 1958 break; 1959 } 1960 1961 ret2 = filemap_check_errors(NODE_MAPPING(sbi)); 1962 if (!ret) 1963 ret = ret2; 1964 1965 return ret; 1966 } 1967 1968 static int f2fs_write_node_pages(struct address_space *mapping, 1969 struct writeback_control *wbc) 1970 { 1971 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 1972 struct blk_plug plug; 1973 long diff; 1974 1975 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1976 goto skip_write; 1977 1978 /* balancing f2fs's metadata in background */ 1979 f2fs_balance_fs_bg(sbi, true); 1980 1981 /* collect a number of dirty node pages and write together */ 1982 if (wbc->sync_mode != WB_SYNC_ALL && 1983 get_pages(sbi, F2FS_DIRTY_NODES) < 1984 nr_pages_to_skip(sbi, NODE)) 1985 goto skip_write; 1986 1987 if (wbc->sync_mode == WB_SYNC_ALL) 1988 atomic_inc(&sbi->wb_sync_req[NODE]); 1989 else if (atomic_read(&sbi->wb_sync_req[NODE])) 1990 goto skip_write; 1991 1992 trace_f2fs_writepages(mapping->host, wbc, NODE); 1993 1994 diff = nr_pages_to_write(sbi, NODE, wbc); 1995 blk_start_plug(&plug); 1996 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO); 1997 blk_finish_plug(&plug); 1998 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1999 2000 if (wbc->sync_mode == WB_SYNC_ALL) 2001 atomic_dec(&sbi->wb_sync_req[NODE]); 2002 return 0; 2003 2004 skip_write: 2005 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 2006 trace_f2fs_writepages(mapping->host, wbc, NODE); 2007 return 0; 2008 } 2009 2010 static int f2fs_set_node_page_dirty(struct page *page) 2011 { 2012 trace_f2fs_set_page_dirty(page, NODE); 2013 2014 if (!PageUptodate(page)) 2015 SetPageUptodate(page); 2016 #ifdef CONFIG_F2FS_CHECK_FS 2017 if (IS_INODE(page)) 2018 f2fs_inode_chksum_set(F2FS_P_SB(page), page); 2019 #endif 2020 if (!PageDirty(page)) { 2021 __set_page_dirty_nobuffers(page); 2022 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 2023 f2fs_set_page_private(page, 0); 2024 f2fs_trace_pid(page); 2025 return 1; 2026 } 2027 return 0; 2028 } 2029 2030 /* 2031 * Structure of the f2fs node operations 2032 */ 2033 const struct address_space_operations f2fs_node_aops = { 2034 .writepage = f2fs_write_node_page, 2035 .writepages = f2fs_write_node_pages, 2036 .set_page_dirty = f2fs_set_node_page_dirty, 2037 .invalidatepage = f2fs_invalidate_page, 2038 .releasepage = f2fs_release_page, 2039 #ifdef CONFIG_MIGRATION 2040 .migratepage = f2fs_migrate_page, 2041 #endif 2042 }; 2043 2044 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 2045 nid_t n) 2046 { 2047 return radix_tree_lookup(&nm_i->free_nid_root, n); 2048 } 2049 2050 static int __insert_free_nid(struct f2fs_sb_info *sbi, 2051 struct free_nid *i, enum nid_state state) 2052 { 2053 struct f2fs_nm_info *nm_i = NM_I(sbi); 2054 2055 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); 2056 if (err) 2057 return err; 2058 2059 f2fs_bug_on(sbi, state != i->state); 2060 nm_i->nid_cnt[state]++; 2061 if (state == FREE_NID) 2062 list_add_tail(&i->list, &nm_i->free_nid_list); 2063 return 0; 2064 } 2065 2066 static void __remove_free_nid(struct f2fs_sb_info *sbi, 2067 struct free_nid *i, enum nid_state state) 2068 { 2069 struct f2fs_nm_info *nm_i = NM_I(sbi); 2070 2071 f2fs_bug_on(sbi, state != i->state); 2072 nm_i->nid_cnt[state]--; 2073 if (state == FREE_NID) 2074 list_del(&i->list); 2075 radix_tree_delete(&nm_i->free_nid_root, i->nid); 2076 } 2077 2078 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, 2079 enum nid_state org_state, enum nid_state dst_state) 2080 { 2081 struct f2fs_nm_info *nm_i = NM_I(sbi); 2082 2083 f2fs_bug_on(sbi, org_state != i->state); 2084 i->state = dst_state; 2085 nm_i->nid_cnt[org_state]--; 2086 nm_i->nid_cnt[dst_state]++; 2087 2088 switch (dst_state) { 2089 case PREALLOC_NID: 2090 list_del(&i->list); 2091 break; 2092 case FREE_NID: 2093 list_add_tail(&i->list, &nm_i->free_nid_list); 2094 break; 2095 default: 2096 BUG_ON(1); 2097 } 2098 } 2099 2100 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 2101 bool set, bool build) 2102 { 2103 struct f2fs_nm_info *nm_i = NM_I(sbi); 2104 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 2105 unsigned int nid_ofs = nid - START_NID(nid); 2106 2107 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 2108 return; 2109 2110 if (set) { 2111 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2112 return; 2113 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2114 nm_i->free_nid_count[nat_ofs]++; 2115 } else { 2116 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2117 return; 2118 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2119 if (!build) 2120 nm_i->free_nid_count[nat_ofs]--; 2121 } 2122 } 2123 2124 /* return if the nid is recognized as free */ 2125 static bool add_free_nid(struct f2fs_sb_info *sbi, 2126 nid_t nid, bool build, bool update) 2127 { 2128 struct f2fs_nm_info *nm_i = NM_I(sbi); 2129 struct free_nid *i, *e; 2130 struct nat_entry *ne; 2131 int err = -EINVAL; 2132 bool ret = false; 2133 2134 /* 0 nid should not be used */ 2135 if (unlikely(nid == 0)) 2136 return false; 2137 2138 if (unlikely(f2fs_check_nid_range(sbi, nid))) 2139 return false; 2140 2141 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 2142 i->nid = nid; 2143 i->state = FREE_NID; 2144 2145 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); 2146 2147 spin_lock(&nm_i->nid_list_lock); 2148 2149 if (build) { 2150 /* 2151 * Thread A Thread B 2152 * - f2fs_create 2153 * - f2fs_new_inode 2154 * - f2fs_alloc_nid 2155 * - __insert_nid_to_list(PREALLOC_NID) 2156 * - f2fs_balance_fs_bg 2157 * - f2fs_build_free_nids 2158 * - __f2fs_build_free_nids 2159 * - scan_nat_page 2160 * - add_free_nid 2161 * - __lookup_nat_cache 2162 * - f2fs_add_link 2163 * - f2fs_init_inode_metadata 2164 * - f2fs_new_inode_page 2165 * - f2fs_new_node_page 2166 * - set_node_addr 2167 * - f2fs_alloc_nid_done 2168 * - __remove_nid_from_list(PREALLOC_NID) 2169 * - __insert_nid_to_list(FREE_NID) 2170 */ 2171 ne = __lookup_nat_cache(nm_i, nid); 2172 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 2173 nat_get_blkaddr(ne) != NULL_ADDR)) 2174 goto err_out; 2175 2176 e = __lookup_free_nid_list(nm_i, nid); 2177 if (e) { 2178 if (e->state == FREE_NID) 2179 ret = true; 2180 goto err_out; 2181 } 2182 } 2183 ret = true; 2184 err = __insert_free_nid(sbi, i, FREE_NID); 2185 err_out: 2186 if (update) { 2187 update_free_nid_bitmap(sbi, nid, ret, build); 2188 if (!build) 2189 nm_i->available_nids++; 2190 } 2191 spin_unlock(&nm_i->nid_list_lock); 2192 radix_tree_preload_end(); 2193 2194 if (err) 2195 kmem_cache_free(free_nid_slab, i); 2196 return ret; 2197 } 2198 2199 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) 2200 { 2201 struct f2fs_nm_info *nm_i = NM_I(sbi); 2202 struct free_nid *i; 2203 bool need_free = false; 2204 2205 spin_lock(&nm_i->nid_list_lock); 2206 i = __lookup_free_nid_list(nm_i, nid); 2207 if (i && i->state == FREE_NID) { 2208 __remove_free_nid(sbi, i, FREE_NID); 2209 need_free = true; 2210 } 2211 spin_unlock(&nm_i->nid_list_lock); 2212 2213 if (need_free) 2214 kmem_cache_free(free_nid_slab, i); 2215 } 2216 2217 static int scan_nat_page(struct f2fs_sb_info *sbi, 2218 struct page *nat_page, nid_t start_nid) 2219 { 2220 struct f2fs_nm_info *nm_i = NM_I(sbi); 2221 struct f2fs_nat_block *nat_blk = page_address(nat_page); 2222 block_t blk_addr; 2223 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 2224 int i; 2225 2226 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 2227 2228 i = start_nid % NAT_ENTRY_PER_BLOCK; 2229 2230 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 2231 if (unlikely(start_nid >= nm_i->max_nid)) 2232 break; 2233 2234 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 2235 2236 if (blk_addr == NEW_ADDR) 2237 return -EINVAL; 2238 2239 if (blk_addr == NULL_ADDR) { 2240 add_free_nid(sbi, start_nid, true, true); 2241 } else { 2242 spin_lock(&NM_I(sbi)->nid_list_lock); 2243 update_free_nid_bitmap(sbi, start_nid, false, true); 2244 spin_unlock(&NM_I(sbi)->nid_list_lock); 2245 } 2246 } 2247 2248 return 0; 2249 } 2250 2251 static void scan_curseg_cache(struct f2fs_sb_info *sbi) 2252 { 2253 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2254 struct f2fs_journal *journal = curseg->journal; 2255 int i; 2256 2257 down_read(&curseg->journal_rwsem); 2258 for (i = 0; i < nats_in_cursum(journal); i++) { 2259 block_t addr; 2260 nid_t nid; 2261 2262 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); 2263 nid = le32_to_cpu(nid_in_journal(journal, i)); 2264 if (addr == NULL_ADDR) 2265 add_free_nid(sbi, nid, true, false); 2266 else 2267 remove_free_nid(sbi, nid); 2268 } 2269 up_read(&curseg->journal_rwsem); 2270 } 2271 2272 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) 2273 { 2274 struct f2fs_nm_info *nm_i = NM_I(sbi); 2275 unsigned int i, idx; 2276 nid_t nid; 2277 2278 down_read(&nm_i->nat_tree_lock); 2279 2280 for (i = 0; i < nm_i->nat_blocks; i++) { 2281 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 2282 continue; 2283 if (!nm_i->free_nid_count[i]) 2284 continue; 2285 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 2286 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], 2287 NAT_ENTRY_PER_BLOCK, idx); 2288 if (idx >= NAT_ENTRY_PER_BLOCK) 2289 break; 2290 2291 nid = i * NAT_ENTRY_PER_BLOCK + idx; 2292 add_free_nid(sbi, nid, true, false); 2293 2294 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) 2295 goto out; 2296 } 2297 } 2298 out: 2299 scan_curseg_cache(sbi); 2300 2301 up_read(&nm_i->nat_tree_lock); 2302 } 2303 2304 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, 2305 bool sync, bool mount) 2306 { 2307 struct f2fs_nm_info *nm_i = NM_I(sbi); 2308 int i = 0, ret; 2309 nid_t nid = nm_i->next_scan_nid; 2310 2311 if (unlikely(nid >= nm_i->max_nid)) 2312 nid = 0; 2313 2314 /* Enough entries */ 2315 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2316 return 0; 2317 2318 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS)) 2319 return 0; 2320 2321 if (!mount) { 2322 /* try to find free nids in free_nid_bitmap */ 2323 scan_free_nid_bits(sbi); 2324 2325 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2326 return 0; 2327 } 2328 2329 /* readahead nat pages to be scanned */ 2330 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 2331 META_NAT, true); 2332 2333 down_read(&nm_i->nat_tree_lock); 2334 2335 while (1) { 2336 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), 2337 nm_i->nat_block_bitmap)) { 2338 struct page *page = get_current_nat_page(sbi, nid); 2339 2340 if (IS_ERR(page)) { 2341 ret = PTR_ERR(page); 2342 } else { 2343 ret = scan_nat_page(sbi, page, nid); 2344 f2fs_put_page(page, 1); 2345 } 2346 2347 if (ret) { 2348 up_read(&nm_i->nat_tree_lock); 2349 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); 2350 return ret; 2351 } 2352 } 2353 2354 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 2355 if (unlikely(nid >= nm_i->max_nid)) 2356 nid = 0; 2357 2358 if (++i >= FREE_NID_PAGES) 2359 break; 2360 } 2361 2362 /* go to the next free nat pages to find free nids abundantly */ 2363 nm_i->next_scan_nid = nid; 2364 2365 /* find free nids from current sum_pages */ 2366 scan_curseg_cache(sbi); 2367 2368 up_read(&nm_i->nat_tree_lock); 2369 2370 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 2371 nm_i->ra_nid_pages, META_NAT, false); 2372 2373 return 0; 2374 } 2375 2376 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 2377 { 2378 int ret; 2379 2380 mutex_lock(&NM_I(sbi)->build_lock); 2381 ret = __f2fs_build_free_nids(sbi, sync, mount); 2382 mutex_unlock(&NM_I(sbi)->build_lock); 2383 2384 return ret; 2385 } 2386 2387 /* 2388 * If this function returns success, caller can obtain a new nid 2389 * from second parameter of this function. 2390 * The returned nid could be used ino as well as nid when inode is created. 2391 */ 2392 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 2393 { 2394 struct f2fs_nm_info *nm_i = NM_I(sbi); 2395 struct free_nid *i = NULL; 2396 retry: 2397 if (time_to_inject(sbi, FAULT_ALLOC_NID)) { 2398 f2fs_show_injection_info(sbi, FAULT_ALLOC_NID); 2399 return false; 2400 } 2401 2402 spin_lock(&nm_i->nid_list_lock); 2403 2404 if (unlikely(nm_i->available_nids == 0)) { 2405 spin_unlock(&nm_i->nid_list_lock); 2406 return false; 2407 } 2408 2409 /* We should not use stale free nids created by f2fs_build_free_nids */ 2410 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { 2411 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 2412 i = list_first_entry(&nm_i->free_nid_list, 2413 struct free_nid, list); 2414 *nid = i->nid; 2415 2416 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); 2417 nm_i->available_nids--; 2418 2419 update_free_nid_bitmap(sbi, *nid, false, false); 2420 2421 spin_unlock(&nm_i->nid_list_lock); 2422 return true; 2423 } 2424 spin_unlock(&nm_i->nid_list_lock); 2425 2426 /* Let's scan nat pages and its caches to get free nids */ 2427 if (!f2fs_build_free_nids(sbi, true, false)) 2428 goto retry; 2429 return false; 2430 } 2431 2432 /* 2433 * f2fs_alloc_nid() should be called prior to this function. 2434 */ 2435 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 2436 { 2437 struct f2fs_nm_info *nm_i = NM_I(sbi); 2438 struct free_nid *i; 2439 2440 spin_lock(&nm_i->nid_list_lock); 2441 i = __lookup_free_nid_list(nm_i, nid); 2442 f2fs_bug_on(sbi, !i); 2443 __remove_free_nid(sbi, i, PREALLOC_NID); 2444 spin_unlock(&nm_i->nid_list_lock); 2445 2446 kmem_cache_free(free_nid_slab, i); 2447 } 2448 2449 /* 2450 * f2fs_alloc_nid() should be called prior to this function. 2451 */ 2452 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 2453 { 2454 struct f2fs_nm_info *nm_i = NM_I(sbi); 2455 struct free_nid *i; 2456 bool need_free = false; 2457 2458 if (!nid) 2459 return; 2460 2461 spin_lock(&nm_i->nid_list_lock); 2462 i = __lookup_free_nid_list(nm_i, nid); 2463 f2fs_bug_on(sbi, !i); 2464 2465 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) { 2466 __remove_free_nid(sbi, i, PREALLOC_NID); 2467 need_free = true; 2468 } else { 2469 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); 2470 } 2471 2472 nm_i->available_nids++; 2473 2474 update_free_nid_bitmap(sbi, nid, true, false); 2475 2476 spin_unlock(&nm_i->nid_list_lock); 2477 2478 if (need_free) 2479 kmem_cache_free(free_nid_slab, i); 2480 } 2481 2482 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 2483 { 2484 struct f2fs_nm_info *nm_i = NM_I(sbi); 2485 struct free_nid *i, *next; 2486 int nr = nr_shrink; 2487 2488 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2489 return 0; 2490 2491 if (!mutex_trylock(&nm_i->build_lock)) 2492 return 0; 2493 2494 spin_lock(&nm_i->nid_list_lock); 2495 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 2496 if (nr_shrink <= 0 || 2497 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2498 break; 2499 2500 __remove_free_nid(sbi, i, FREE_NID); 2501 kmem_cache_free(free_nid_slab, i); 2502 nr_shrink--; 2503 } 2504 spin_unlock(&nm_i->nid_list_lock); 2505 mutex_unlock(&nm_i->build_lock); 2506 2507 return nr - nr_shrink; 2508 } 2509 2510 void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) 2511 { 2512 void *src_addr, *dst_addr; 2513 size_t inline_size; 2514 struct page *ipage; 2515 struct f2fs_inode *ri; 2516 2517 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); 2518 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); 2519 2520 ri = F2FS_INODE(page); 2521 if (ri->i_inline & F2FS_INLINE_XATTR) { 2522 set_inode_flag(inode, FI_INLINE_XATTR); 2523 } else { 2524 clear_inode_flag(inode, FI_INLINE_XATTR); 2525 goto update_inode; 2526 } 2527 2528 dst_addr = inline_xattr_addr(inode, ipage); 2529 src_addr = inline_xattr_addr(inode, page); 2530 inline_size = inline_xattr_size(inode); 2531 2532 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 2533 memcpy(dst_addr, src_addr, inline_size); 2534 update_inode: 2535 f2fs_update_inode(inode, ipage); 2536 f2fs_put_page(ipage, 1); 2537 } 2538 2539 int f2fs_recover_xattr_data(struct inode *inode, struct page *page) 2540 { 2541 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2542 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 2543 nid_t new_xnid; 2544 struct dnode_of_data dn; 2545 struct node_info ni; 2546 struct page *xpage; 2547 int err; 2548 2549 if (!prev_xnid) 2550 goto recover_xnid; 2551 2552 /* 1: invalidate the previous xattr nid */ 2553 err = f2fs_get_node_info(sbi, prev_xnid, &ni); 2554 if (err) 2555 return err; 2556 2557 f2fs_invalidate_blocks(sbi, ni.blk_addr); 2558 dec_valid_node_count(sbi, inode, false); 2559 set_node_addr(sbi, &ni, NULL_ADDR, false); 2560 2561 recover_xnid: 2562 /* 2: update xattr nid in inode */ 2563 if (!f2fs_alloc_nid(sbi, &new_xnid)) 2564 return -ENOSPC; 2565 2566 set_new_dnode(&dn, inode, NULL, NULL, new_xnid); 2567 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); 2568 if (IS_ERR(xpage)) { 2569 f2fs_alloc_nid_failed(sbi, new_xnid); 2570 return PTR_ERR(xpage); 2571 } 2572 2573 f2fs_alloc_nid_done(sbi, new_xnid); 2574 f2fs_update_inode_page(inode); 2575 2576 /* 3: update and set xattr node page dirty */ 2577 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE); 2578 2579 set_page_dirty(xpage); 2580 f2fs_put_page(xpage, 1); 2581 2582 return 0; 2583 } 2584 2585 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 2586 { 2587 struct f2fs_inode *src, *dst; 2588 nid_t ino = ino_of_node(page); 2589 struct node_info old_ni, new_ni; 2590 struct page *ipage; 2591 int err; 2592 2593 err = f2fs_get_node_info(sbi, ino, &old_ni); 2594 if (err) 2595 return err; 2596 2597 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 2598 return -EINVAL; 2599 retry: 2600 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); 2601 if (!ipage) { 2602 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); 2603 goto retry; 2604 } 2605 2606 /* Should not use this inode from free nid list */ 2607 remove_free_nid(sbi, ino); 2608 2609 if (!PageUptodate(ipage)) 2610 SetPageUptodate(ipage); 2611 fill_node_footer(ipage, ino, ino, 0, true); 2612 set_cold_node(ipage, false); 2613 2614 src = F2FS_INODE(page); 2615 dst = F2FS_INODE(ipage); 2616 2617 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 2618 dst->i_size = 0; 2619 dst->i_blocks = cpu_to_le64(1); 2620 dst->i_links = cpu_to_le32(1); 2621 dst->i_xattr_nid = 0; 2622 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); 2623 if (dst->i_inline & F2FS_EXTRA_ATTR) { 2624 dst->i_extra_isize = src->i_extra_isize; 2625 2626 if (f2fs_sb_has_flexible_inline_xattr(sbi) && 2627 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2628 i_inline_xattr_size)) 2629 dst->i_inline_xattr_size = src->i_inline_xattr_size; 2630 2631 if (f2fs_sb_has_project_quota(sbi) && 2632 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2633 i_projid)) 2634 dst->i_projid = src->i_projid; 2635 2636 if (f2fs_sb_has_inode_crtime(sbi) && 2637 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2638 i_crtime_nsec)) { 2639 dst->i_crtime = src->i_crtime; 2640 dst->i_crtime_nsec = src->i_crtime_nsec; 2641 } 2642 } 2643 2644 new_ni = old_ni; 2645 new_ni.ino = ino; 2646 2647 if (unlikely(inc_valid_node_count(sbi, NULL, true))) 2648 WARN_ON(1); 2649 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 2650 inc_valid_inode_count(sbi); 2651 set_page_dirty(ipage); 2652 f2fs_put_page(ipage, 1); 2653 return 0; 2654 } 2655 2656 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 2657 unsigned int segno, struct f2fs_summary_block *sum) 2658 { 2659 struct f2fs_node *rn; 2660 struct f2fs_summary *sum_entry; 2661 block_t addr; 2662 int i, idx, last_offset, nrpages; 2663 2664 /* scan the node segment */ 2665 last_offset = sbi->blocks_per_seg; 2666 addr = START_BLOCK(sbi, segno); 2667 sum_entry = &sum->entries[0]; 2668 2669 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2670 nrpages = min(last_offset - i, BIO_MAX_PAGES); 2671 2672 /* readahead node pages */ 2673 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); 2674 2675 for (idx = addr; idx < addr + nrpages; idx++) { 2676 struct page *page = f2fs_get_tmp_page(sbi, idx); 2677 2678 if (IS_ERR(page)) 2679 return PTR_ERR(page); 2680 2681 rn = F2FS_NODE(page); 2682 sum_entry->nid = rn->footer.nid; 2683 sum_entry->version = 0; 2684 sum_entry->ofs_in_node = 0; 2685 sum_entry++; 2686 f2fs_put_page(page, 1); 2687 } 2688 2689 invalidate_mapping_pages(META_MAPPING(sbi), addr, 2690 addr + nrpages); 2691 } 2692 return 0; 2693 } 2694 2695 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 2696 { 2697 struct f2fs_nm_info *nm_i = NM_I(sbi); 2698 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2699 struct f2fs_journal *journal = curseg->journal; 2700 int i; 2701 2702 down_write(&curseg->journal_rwsem); 2703 for (i = 0; i < nats_in_cursum(journal); i++) { 2704 struct nat_entry *ne; 2705 struct f2fs_nat_entry raw_ne; 2706 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); 2707 2708 raw_ne = nat_in_journal(journal, i); 2709 2710 ne = __lookup_nat_cache(nm_i, nid); 2711 if (!ne) { 2712 ne = __alloc_nat_entry(nid, true); 2713 __init_nat_entry(nm_i, ne, &raw_ne, true); 2714 } 2715 2716 /* 2717 * if a free nat in journal has not been used after last 2718 * checkpoint, we should remove it from available nids, 2719 * since later we will add it again. 2720 */ 2721 if (!get_nat_flag(ne, IS_DIRTY) && 2722 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { 2723 spin_lock(&nm_i->nid_list_lock); 2724 nm_i->available_nids--; 2725 spin_unlock(&nm_i->nid_list_lock); 2726 } 2727 2728 __set_nat_cache_dirty(nm_i, ne); 2729 } 2730 update_nats_in_cursum(journal, -i); 2731 up_write(&curseg->journal_rwsem); 2732 } 2733 2734 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 2735 struct list_head *head, int max) 2736 { 2737 struct nat_entry_set *cur; 2738 2739 if (nes->entry_cnt >= max) 2740 goto add_out; 2741 2742 list_for_each_entry(cur, head, set_list) { 2743 if (cur->entry_cnt >= nes->entry_cnt) { 2744 list_add(&nes->set_list, cur->set_list.prev); 2745 return; 2746 } 2747 } 2748 add_out: 2749 list_add_tail(&nes->set_list, head); 2750 } 2751 2752 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2753 struct page *page) 2754 { 2755 struct f2fs_nm_info *nm_i = NM_I(sbi); 2756 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; 2757 struct f2fs_nat_block *nat_blk = page_address(page); 2758 int valid = 0; 2759 int i = 0; 2760 2761 if (!enabled_nat_bits(sbi, NULL)) 2762 return; 2763 2764 if (nat_index == 0) { 2765 valid = 1; 2766 i = 1; 2767 } 2768 for (; i < NAT_ENTRY_PER_BLOCK; i++) { 2769 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) 2770 valid++; 2771 } 2772 if (valid == 0) { 2773 __set_bit_le(nat_index, nm_i->empty_nat_bits); 2774 __clear_bit_le(nat_index, nm_i->full_nat_bits); 2775 return; 2776 } 2777 2778 __clear_bit_le(nat_index, nm_i->empty_nat_bits); 2779 if (valid == NAT_ENTRY_PER_BLOCK) 2780 __set_bit_le(nat_index, nm_i->full_nat_bits); 2781 else 2782 __clear_bit_le(nat_index, nm_i->full_nat_bits); 2783 } 2784 2785 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, 2786 struct nat_entry_set *set, struct cp_control *cpc) 2787 { 2788 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2789 struct f2fs_journal *journal = curseg->journal; 2790 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 2791 bool to_journal = true; 2792 struct f2fs_nat_block *nat_blk; 2793 struct nat_entry *ne, *cur; 2794 struct page *page = NULL; 2795 2796 /* 2797 * there are two steps to flush nat entries: 2798 * #1, flush nat entries to journal in current hot data summary block. 2799 * #2, flush nat entries to nat page. 2800 */ 2801 if (enabled_nat_bits(sbi, cpc) || 2802 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) 2803 to_journal = false; 2804 2805 if (to_journal) { 2806 down_write(&curseg->journal_rwsem); 2807 } else { 2808 page = get_next_nat_page(sbi, start_nid); 2809 if (IS_ERR(page)) 2810 return PTR_ERR(page); 2811 2812 nat_blk = page_address(page); 2813 f2fs_bug_on(sbi, !nat_blk); 2814 } 2815 2816 /* flush dirty nats in nat entry set */ 2817 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 2818 struct f2fs_nat_entry *raw_ne; 2819 nid_t nid = nat_get_nid(ne); 2820 int offset; 2821 2822 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); 2823 2824 if (to_journal) { 2825 offset = f2fs_lookup_journal_in_cursum(journal, 2826 NAT_JOURNAL, nid, 1); 2827 f2fs_bug_on(sbi, offset < 0); 2828 raw_ne = &nat_in_journal(journal, offset); 2829 nid_in_journal(journal, offset) = cpu_to_le32(nid); 2830 } else { 2831 raw_ne = &nat_blk->entries[nid - start_nid]; 2832 } 2833 raw_nat_from_node_info(raw_ne, &ne->ni); 2834 nat_reset_flag(ne); 2835 __clear_nat_cache_dirty(NM_I(sbi), set, ne); 2836 if (nat_get_blkaddr(ne) == NULL_ADDR) { 2837 add_free_nid(sbi, nid, false, true); 2838 } else { 2839 spin_lock(&NM_I(sbi)->nid_list_lock); 2840 update_free_nid_bitmap(sbi, nid, false, false); 2841 spin_unlock(&NM_I(sbi)->nid_list_lock); 2842 } 2843 } 2844 2845 if (to_journal) { 2846 up_write(&curseg->journal_rwsem); 2847 } else { 2848 __update_nat_bits(sbi, start_nid, page); 2849 f2fs_put_page(page, 1); 2850 } 2851 2852 /* Allow dirty nats by node block allocation in write_begin */ 2853 if (!set->entry_cnt) { 2854 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 2855 kmem_cache_free(nat_entry_set_slab, set); 2856 } 2857 return 0; 2858 } 2859 2860 /* 2861 * This function is called during the checkpointing process. 2862 */ 2863 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 2864 { 2865 struct f2fs_nm_info *nm_i = NM_I(sbi); 2866 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2867 struct f2fs_journal *journal = curseg->journal; 2868 struct nat_entry_set *setvec[SETVEC_SIZE]; 2869 struct nat_entry_set *set, *tmp; 2870 unsigned int found; 2871 nid_t set_idx = 0; 2872 LIST_HEAD(sets); 2873 int err = 0; 2874 2875 /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */ 2876 if (enabled_nat_bits(sbi, cpc)) { 2877 down_write(&nm_i->nat_tree_lock); 2878 remove_nats_in_journal(sbi); 2879 up_write(&nm_i->nat_tree_lock); 2880 } 2881 2882 if (!nm_i->dirty_nat_cnt) 2883 return 0; 2884 2885 down_write(&nm_i->nat_tree_lock); 2886 2887 /* 2888 * if there are no enough space in journal to store dirty nat 2889 * entries, remove all entries from journal and merge them 2890 * into nat entry set. 2891 */ 2892 if (enabled_nat_bits(sbi, cpc) || 2893 !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 2894 remove_nats_in_journal(sbi); 2895 2896 while ((found = __gang_lookup_nat_set(nm_i, 2897 set_idx, SETVEC_SIZE, setvec))) { 2898 unsigned idx; 2899 set_idx = setvec[found - 1]->set + 1; 2900 for (idx = 0; idx < found; idx++) 2901 __adjust_nat_entry_set(setvec[idx], &sets, 2902 MAX_NAT_JENTRIES(journal)); 2903 } 2904 2905 /* flush dirty nats in nat entry set */ 2906 list_for_each_entry_safe(set, tmp, &sets, set_list) { 2907 err = __flush_nat_entry_set(sbi, set, cpc); 2908 if (err) 2909 break; 2910 } 2911 2912 up_write(&nm_i->nat_tree_lock); 2913 /* Allow dirty nats by node block allocation in write_begin */ 2914 2915 return err; 2916 } 2917 2918 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) 2919 { 2920 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2921 struct f2fs_nm_info *nm_i = NM_I(sbi); 2922 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; 2923 unsigned int i; 2924 __u64 cp_ver = cur_cp_version(ckpt); 2925 block_t nat_bits_addr; 2926 2927 if (!enabled_nat_bits(sbi, NULL)) 2928 return 0; 2929 2930 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); 2931 nm_i->nat_bits = f2fs_kzalloc(sbi, 2932 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); 2933 if (!nm_i->nat_bits) 2934 return -ENOMEM; 2935 2936 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - 2937 nm_i->nat_bits_blocks; 2938 for (i = 0; i < nm_i->nat_bits_blocks; i++) { 2939 struct page *page; 2940 2941 page = f2fs_get_meta_page(sbi, nat_bits_addr++); 2942 if (IS_ERR(page)) 2943 return PTR_ERR(page); 2944 2945 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), 2946 page_address(page), F2FS_BLKSIZE); 2947 f2fs_put_page(page, 1); 2948 } 2949 2950 cp_ver |= (cur_cp_crc(ckpt) << 32); 2951 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { 2952 disable_nat_bits(sbi, true); 2953 return 0; 2954 } 2955 2956 nm_i->full_nat_bits = nm_i->nat_bits + 8; 2957 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 2958 2959 f2fs_notice(sbi, "Found nat_bits in checkpoint"); 2960 return 0; 2961 } 2962 2963 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 2964 { 2965 struct f2fs_nm_info *nm_i = NM_I(sbi); 2966 unsigned int i = 0; 2967 nid_t nid, last_nid; 2968 2969 if (!enabled_nat_bits(sbi, NULL)) 2970 return; 2971 2972 for (i = 0; i < nm_i->nat_blocks; i++) { 2973 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 2974 if (i >= nm_i->nat_blocks) 2975 break; 2976 2977 __set_bit_le(i, nm_i->nat_block_bitmap); 2978 2979 nid = i * NAT_ENTRY_PER_BLOCK; 2980 last_nid = nid + NAT_ENTRY_PER_BLOCK; 2981 2982 spin_lock(&NM_I(sbi)->nid_list_lock); 2983 for (; nid < last_nid; nid++) 2984 update_free_nid_bitmap(sbi, nid, true, true); 2985 spin_unlock(&NM_I(sbi)->nid_list_lock); 2986 } 2987 2988 for (i = 0; i < nm_i->nat_blocks; i++) { 2989 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 2990 if (i >= nm_i->nat_blocks) 2991 break; 2992 2993 __set_bit_le(i, nm_i->nat_block_bitmap); 2994 } 2995 } 2996 2997 static int init_node_manager(struct f2fs_sb_info *sbi) 2998 { 2999 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 3000 struct f2fs_nm_info *nm_i = NM_I(sbi); 3001 unsigned char *version_bitmap; 3002 unsigned int nat_segs; 3003 int err; 3004 3005 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 3006 3007 /* segment_count_nat includes pair segment so divide to 2. */ 3008 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 3009 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 3010 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; 3011 3012 /* not used nids: 0, node, meta, (and root counted as valid node) */ 3013 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - 3014 F2FS_RESERVED_NODE_NUM; 3015 nm_i->nid_cnt[FREE_NID] = 0; 3016 nm_i->nid_cnt[PREALLOC_NID] = 0; 3017 nm_i->nat_cnt = 0; 3018 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 3019 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 3020 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; 3021 3022 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 3023 INIT_LIST_HEAD(&nm_i->free_nid_list); 3024 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 3025 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 3026 INIT_LIST_HEAD(&nm_i->nat_entries); 3027 spin_lock_init(&nm_i->nat_list_lock); 3028 3029 mutex_init(&nm_i->build_lock); 3030 spin_lock_init(&nm_i->nid_list_lock); 3031 init_rwsem(&nm_i->nat_tree_lock); 3032 3033 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 3034 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 3035 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 3036 if (!version_bitmap) 3037 return -EFAULT; 3038 3039 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 3040 GFP_KERNEL); 3041 if (!nm_i->nat_bitmap) 3042 return -ENOMEM; 3043 3044 err = __get_nat_bitmaps(sbi); 3045 if (err) 3046 return err; 3047 3048 #ifdef CONFIG_F2FS_CHECK_FS 3049 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, 3050 GFP_KERNEL); 3051 if (!nm_i->nat_bitmap_mir) 3052 return -ENOMEM; 3053 #endif 3054 3055 return 0; 3056 } 3057 3058 static int init_free_nid_cache(struct f2fs_sb_info *sbi) 3059 { 3060 struct f2fs_nm_info *nm_i = NM_I(sbi); 3061 int i; 3062 3063 nm_i->free_nid_bitmap = 3064 f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *), 3065 nm_i->nat_blocks), 3066 GFP_KERNEL); 3067 if (!nm_i->free_nid_bitmap) 3068 return -ENOMEM; 3069 3070 for (i = 0; i < nm_i->nat_blocks; i++) { 3071 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, 3072 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL); 3073 if (!nm_i->free_nid_bitmap[i]) 3074 return -ENOMEM; 3075 } 3076 3077 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, 3078 GFP_KERNEL); 3079 if (!nm_i->nat_block_bitmap) 3080 return -ENOMEM; 3081 3082 nm_i->free_nid_count = 3083 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), 3084 nm_i->nat_blocks), 3085 GFP_KERNEL); 3086 if (!nm_i->free_nid_count) 3087 return -ENOMEM; 3088 return 0; 3089 } 3090 3091 int f2fs_build_node_manager(struct f2fs_sb_info *sbi) 3092 { 3093 int err; 3094 3095 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), 3096 GFP_KERNEL); 3097 if (!sbi->nm_info) 3098 return -ENOMEM; 3099 3100 err = init_node_manager(sbi); 3101 if (err) 3102 return err; 3103 3104 err = init_free_nid_cache(sbi); 3105 if (err) 3106 return err; 3107 3108 /* load free nid status from nat_bits table */ 3109 load_free_nid_bitmap(sbi); 3110 3111 return f2fs_build_free_nids(sbi, true, true); 3112 } 3113 3114 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) 3115 { 3116 struct f2fs_nm_info *nm_i = NM_I(sbi); 3117 struct free_nid *i, *next_i; 3118 struct nat_entry *natvec[NATVEC_SIZE]; 3119 struct nat_entry_set *setvec[SETVEC_SIZE]; 3120 nid_t nid = 0; 3121 unsigned int found; 3122 3123 if (!nm_i) 3124 return; 3125 3126 /* destroy free nid list */ 3127 spin_lock(&nm_i->nid_list_lock); 3128 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 3129 __remove_free_nid(sbi, i, FREE_NID); 3130 spin_unlock(&nm_i->nid_list_lock); 3131 kmem_cache_free(free_nid_slab, i); 3132 spin_lock(&nm_i->nid_list_lock); 3133 } 3134 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); 3135 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); 3136 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); 3137 spin_unlock(&nm_i->nid_list_lock); 3138 3139 /* destroy nat cache */ 3140 down_write(&nm_i->nat_tree_lock); 3141 while ((found = __gang_lookup_nat_cache(nm_i, 3142 nid, NATVEC_SIZE, natvec))) { 3143 unsigned idx; 3144 3145 nid = nat_get_nid(natvec[found - 1]) + 1; 3146 for (idx = 0; idx < found; idx++) { 3147 spin_lock(&nm_i->nat_list_lock); 3148 list_del(&natvec[idx]->list); 3149 spin_unlock(&nm_i->nat_list_lock); 3150 3151 __del_from_nat_cache(nm_i, natvec[idx]); 3152 } 3153 } 3154 f2fs_bug_on(sbi, nm_i->nat_cnt); 3155 3156 /* destroy nat set cache */ 3157 nid = 0; 3158 while ((found = __gang_lookup_nat_set(nm_i, 3159 nid, SETVEC_SIZE, setvec))) { 3160 unsigned idx; 3161 3162 nid = setvec[found - 1]->set + 1; 3163 for (idx = 0; idx < found; idx++) { 3164 /* entry_cnt is not zero, when cp_error was occurred */ 3165 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 3166 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 3167 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 3168 } 3169 } 3170 up_write(&nm_i->nat_tree_lock); 3171 3172 kvfree(nm_i->nat_block_bitmap); 3173 if (nm_i->free_nid_bitmap) { 3174 int i; 3175 3176 for (i = 0; i < nm_i->nat_blocks; i++) 3177 kvfree(nm_i->free_nid_bitmap[i]); 3178 kvfree(nm_i->free_nid_bitmap); 3179 } 3180 kvfree(nm_i->free_nid_count); 3181 3182 kvfree(nm_i->nat_bitmap); 3183 kvfree(nm_i->nat_bits); 3184 #ifdef CONFIG_F2FS_CHECK_FS 3185 kvfree(nm_i->nat_bitmap_mir); 3186 #endif 3187 sbi->nm_info = NULL; 3188 kvfree(nm_i); 3189 } 3190 3191 int __init f2fs_create_node_manager_caches(void) 3192 { 3193 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry", 3194 sizeof(struct nat_entry)); 3195 if (!nat_entry_slab) 3196 goto fail; 3197 3198 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid", 3199 sizeof(struct free_nid)); 3200 if (!free_nid_slab) 3201 goto destroy_nat_entry; 3202 3203 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set", 3204 sizeof(struct nat_entry_set)); 3205 if (!nat_entry_set_slab) 3206 goto destroy_free_nid; 3207 3208 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry", 3209 sizeof(struct fsync_node_entry)); 3210 if (!fsync_node_entry_slab) 3211 goto destroy_nat_entry_set; 3212 return 0; 3213 3214 destroy_nat_entry_set: 3215 kmem_cache_destroy(nat_entry_set_slab); 3216 destroy_free_nid: 3217 kmem_cache_destroy(free_nid_slab); 3218 destroy_nat_entry: 3219 kmem_cache_destroy(nat_entry_slab); 3220 fail: 3221 return -ENOMEM; 3222 } 3223 3224 void f2fs_destroy_node_manager_caches(void) 3225 { 3226 kmem_cache_destroy(fsync_node_entry_slab); 3227 kmem_cache_destroy(nat_entry_set_slab); 3228 kmem_cache_destroy(free_nid_slab); 3229 kmem_cache_destroy(nat_entry_slab); 3230 } 3231