1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include "trace.h" 23 #include <trace/events/f2fs.h> 24 25 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 26 27 static struct kmem_cache *nat_entry_slab; 28 static struct kmem_cache *free_nid_slab; 29 static struct kmem_cache *nat_entry_set_slab; 30 31 bool available_free_memory(struct f2fs_sb_info *sbi, int type) 32 { 33 struct f2fs_nm_info *nm_i = NM_I(sbi); 34 struct sysinfo val; 35 unsigned long avail_ram; 36 unsigned long mem_size = 0; 37 bool res = false; 38 39 si_meminfo(&val); 40 41 /* only uses low memory */ 42 avail_ram = val.totalram - val.totalhigh; 43 44 /* 45 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively 46 */ 47 if (type == FREE_NIDS) { 48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 49 PAGE_SHIFT; 50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 51 } else if (type == NAT_ENTRIES) { 52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 53 PAGE_SHIFT; 54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 55 if (excess_cached_nats(sbi)) 56 res = false; 57 if (nm_i->nat_cnt > DEF_NAT_CACHE_THRESHOLD) 58 res = false; 59 } else if (type == DIRTY_DENTS) { 60 if (sbi->sb->s_bdi->wb.dirty_exceeded) 61 return false; 62 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 63 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 64 } else if (type == INO_ENTRIES) { 65 int i; 66 67 for (i = 0; i <= UPDATE_INO; i++) 68 mem_size += (sbi->im[i].ino_num * 69 sizeof(struct ino_entry)) >> PAGE_SHIFT; 70 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 71 } else if (type == EXTENT_CACHE) { 72 mem_size = (atomic_read(&sbi->total_ext_tree) * 73 sizeof(struct extent_tree) + 74 atomic_read(&sbi->total_ext_node) * 75 sizeof(struct extent_node)) >> PAGE_SHIFT; 76 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 77 } else { 78 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 79 return true; 80 } 81 return res; 82 } 83 84 static void clear_node_page_dirty(struct page *page) 85 { 86 struct address_space *mapping = page->mapping; 87 unsigned int long flags; 88 89 if (PageDirty(page)) { 90 spin_lock_irqsave(&mapping->tree_lock, flags); 91 radix_tree_tag_clear(&mapping->page_tree, 92 page_index(page), 93 PAGECACHE_TAG_DIRTY); 94 spin_unlock_irqrestore(&mapping->tree_lock, flags); 95 96 clear_page_dirty_for_io(page); 97 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 98 } 99 ClearPageUptodate(page); 100 } 101 102 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 103 { 104 pgoff_t index = current_nat_addr(sbi, nid); 105 return get_meta_page(sbi, index); 106 } 107 108 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 109 { 110 struct page *src_page; 111 struct page *dst_page; 112 pgoff_t src_off; 113 pgoff_t dst_off; 114 void *src_addr; 115 void *dst_addr; 116 struct f2fs_nm_info *nm_i = NM_I(sbi); 117 118 src_off = current_nat_addr(sbi, nid); 119 dst_off = next_nat_addr(sbi, src_off); 120 121 /* get current nat block page with lock */ 122 src_page = get_meta_page(sbi, src_off); 123 dst_page = grab_meta_page(sbi, dst_off); 124 f2fs_bug_on(sbi, PageDirty(src_page)); 125 126 src_addr = page_address(src_page); 127 dst_addr = page_address(dst_page); 128 memcpy(dst_addr, src_addr, PAGE_SIZE); 129 set_page_dirty(dst_page); 130 f2fs_put_page(src_page, 1); 131 132 set_to_next_nat(nm_i, nid); 133 134 return dst_page; 135 } 136 137 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 138 { 139 return radix_tree_lookup(&nm_i->nat_root, n); 140 } 141 142 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 143 nid_t start, unsigned int nr, struct nat_entry **ep) 144 { 145 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 146 } 147 148 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 149 { 150 list_del(&e->list); 151 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 152 nm_i->nat_cnt--; 153 kmem_cache_free(nat_entry_slab, e); 154 } 155 156 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 157 struct nat_entry *ne) 158 { 159 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 160 struct nat_entry_set *head; 161 162 if (get_nat_flag(ne, IS_DIRTY)) 163 return; 164 165 head = radix_tree_lookup(&nm_i->nat_set_root, set); 166 if (!head) { 167 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); 168 169 INIT_LIST_HEAD(&head->entry_list); 170 INIT_LIST_HEAD(&head->set_list); 171 head->set = set; 172 head->entry_cnt = 0; 173 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 174 } 175 list_move_tail(&ne->list, &head->entry_list); 176 nm_i->dirty_nat_cnt++; 177 head->entry_cnt++; 178 set_nat_flag(ne, IS_DIRTY, true); 179 } 180 181 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 182 struct nat_entry *ne) 183 { 184 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 185 struct nat_entry_set *head; 186 187 head = radix_tree_lookup(&nm_i->nat_set_root, set); 188 if (head) { 189 list_move_tail(&ne->list, &nm_i->nat_entries); 190 set_nat_flag(ne, IS_DIRTY, false); 191 head->entry_cnt--; 192 nm_i->dirty_nat_cnt--; 193 } 194 } 195 196 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 197 nid_t start, unsigned int nr, struct nat_entry_set **ep) 198 { 199 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 200 start, nr); 201 } 202 203 int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 204 { 205 struct f2fs_nm_info *nm_i = NM_I(sbi); 206 struct nat_entry *e; 207 bool need = false; 208 209 percpu_down_read(&nm_i->nat_tree_lock); 210 e = __lookup_nat_cache(nm_i, nid); 211 if (e) { 212 if (!get_nat_flag(e, IS_CHECKPOINTED) && 213 !get_nat_flag(e, HAS_FSYNCED_INODE)) 214 need = true; 215 } 216 percpu_up_read(&nm_i->nat_tree_lock); 217 return need; 218 } 219 220 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 221 { 222 struct f2fs_nm_info *nm_i = NM_I(sbi); 223 struct nat_entry *e; 224 bool is_cp = true; 225 226 percpu_down_read(&nm_i->nat_tree_lock); 227 e = __lookup_nat_cache(nm_i, nid); 228 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 229 is_cp = false; 230 percpu_up_read(&nm_i->nat_tree_lock); 231 return is_cp; 232 } 233 234 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 235 { 236 struct f2fs_nm_info *nm_i = NM_I(sbi); 237 struct nat_entry *e; 238 bool need_update = true; 239 240 percpu_down_read(&nm_i->nat_tree_lock); 241 e = __lookup_nat_cache(nm_i, ino); 242 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 243 (get_nat_flag(e, IS_CHECKPOINTED) || 244 get_nat_flag(e, HAS_FSYNCED_INODE))) 245 need_update = false; 246 percpu_up_read(&nm_i->nat_tree_lock); 247 return need_update; 248 } 249 250 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 251 { 252 struct nat_entry *new; 253 254 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS); 255 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); 256 memset(new, 0, sizeof(struct nat_entry)); 257 nat_set_nid(new, nid); 258 nat_reset_flag(new); 259 list_add_tail(&new->list, &nm_i->nat_entries); 260 nm_i->nat_cnt++; 261 return new; 262 } 263 264 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 265 struct f2fs_nat_entry *ne) 266 { 267 struct f2fs_nm_info *nm_i = NM_I(sbi); 268 struct nat_entry *e; 269 270 e = __lookup_nat_cache(nm_i, nid); 271 if (!e) { 272 e = grab_nat_entry(nm_i, nid); 273 node_info_from_raw_nat(&e->ni, ne); 274 } else { 275 f2fs_bug_on(sbi, nat_get_ino(e) != ne->ino || 276 nat_get_blkaddr(e) != ne->block_addr || 277 nat_get_version(e) != ne->version); 278 } 279 } 280 281 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 282 block_t new_blkaddr, bool fsync_done) 283 { 284 struct f2fs_nm_info *nm_i = NM_I(sbi); 285 struct nat_entry *e; 286 287 percpu_down_write(&nm_i->nat_tree_lock); 288 e = __lookup_nat_cache(nm_i, ni->nid); 289 if (!e) { 290 e = grab_nat_entry(nm_i, ni->nid); 291 copy_node_info(&e->ni, ni); 292 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 293 } else if (new_blkaddr == NEW_ADDR) { 294 /* 295 * when nid is reallocated, 296 * previous nat entry can be remained in nat cache. 297 * So, reinitialize it with new information. 298 */ 299 copy_node_info(&e->ni, ni); 300 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 301 } 302 303 /* sanity check */ 304 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 305 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 306 new_blkaddr == NULL_ADDR); 307 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 308 new_blkaddr == NEW_ADDR); 309 f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR && 310 nat_get_blkaddr(e) != NULL_ADDR && 311 new_blkaddr == NEW_ADDR); 312 313 /* increment version no as node is removed */ 314 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 315 unsigned char version = nat_get_version(e); 316 nat_set_version(e, inc_node_version(version)); 317 318 /* in order to reuse the nid */ 319 if (nm_i->next_scan_nid > ni->nid) 320 nm_i->next_scan_nid = ni->nid; 321 } 322 323 /* change address */ 324 nat_set_blkaddr(e, new_blkaddr); 325 if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR) 326 set_nat_flag(e, IS_CHECKPOINTED, false); 327 __set_nat_cache_dirty(nm_i, e); 328 329 /* update fsync_mark if its inode nat entry is still alive */ 330 if (ni->nid != ni->ino) 331 e = __lookup_nat_cache(nm_i, ni->ino); 332 if (e) { 333 if (fsync_done && ni->nid == ni->ino) 334 set_nat_flag(e, HAS_FSYNCED_INODE, true); 335 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 336 } 337 percpu_up_write(&nm_i->nat_tree_lock); 338 } 339 340 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 341 { 342 struct f2fs_nm_info *nm_i = NM_I(sbi); 343 int nr = nr_shrink; 344 345 percpu_down_write(&nm_i->nat_tree_lock); 346 347 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 348 struct nat_entry *ne; 349 ne = list_first_entry(&nm_i->nat_entries, 350 struct nat_entry, list); 351 __del_from_nat_cache(nm_i, ne); 352 nr_shrink--; 353 } 354 percpu_up_write(&nm_i->nat_tree_lock); 355 return nr - nr_shrink; 356 } 357 358 /* 359 * This function always returns success 360 */ 361 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 362 { 363 struct f2fs_nm_info *nm_i = NM_I(sbi); 364 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 365 struct f2fs_journal *journal = curseg->journal; 366 nid_t start_nid = START_NID(nid); 367 struct f2fs_nat_block *nat_blk; 368 struct page *page = NULL; 369 struct f2fs_nat_entry ne; 370 struct nat_entry *e; 371 int i; 372 373 ni->nid = nid; 374 375 /* Check nat cache */ 376 percpu_down_read(&nm_i->nat_tree_lock); 377 e = __lookup_nat_cache(nm_i, nid); 378 if (e) { 379 ni->ino = nat_get_ino(e); 380 ni->blk_addr = nat_get_blkaddr(e); 381 ni->version = nat_get_version(e); 382 percpu_up_read(&nm_i->nat_tree_lock); 383 return; 384 } 385 386 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 387 388 /* Check current segment summary */ 389 down_read(&curseg->journal_rwsem); 390 i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); 391 if (i >= 0) { 392 ne = nat_in_journal(journal, i); 393 node_info_from_raw_nat(ni, &ne); 394 } 395 up_read(&curseg->journal_rwsem); 396 if (i >= 0) 397 goto cache; 398 399 /* Fill node_info from nat page */ 400 page = get_current_nat_page(sbi, start_nid); 401 nat_blk = (struct f2fs_nat_block *)page_address(page); 402 ne = nat_blk->entries[nid - start_nid]; 403 node_info_from_raw_nat(ni, &ne); 404 f2fs_put_page(page, 1); 405 cache: 406 percpu_up_read(&nm_i->nat_tree_lock); 407 /* cache nat entry */ 408 percpu_down_write(&nm_i->nat_tree_lock); 409 cache_nat_entry(sbi, nid, &ne); 410 percpu_up_write(&nm_i->nat_tree_lock); 411 } 412 413 /* 414 * readahead MAX_RA_NODE number of node pages. 415 */ 416 static void ra_node_pages(struct page *parent, int start, int n) 417 { 418 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 419 struct blk_plug plug; 420 int i, end; 421 nid_t nid; 422 423 blk_start_plug(&plug); 424 425 /* Then, try readahead for siblings of the desired node */ 426 end = start + n; 427 end = min(end, NIDS_PER_BLOCK); 428 for (i = start; i < end; i++) { 429 nid = get_nid(parent, i, false); 430 ra_node_page(sbi, nid); 431 } 432 433 blk_finish_plug(&plug); 434 } 435 436 pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) 437 { 438 const long direct_index = ADDRS_PER_INODE(dn->inode); 439 const long direct_blks = ADDRS_PER_BLOCK; 440 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 441 unsigned int skipped_unit = ADDRS_PER_BLOCK; 442 int cur_level = dn->cur_level; 443 int max_level = dn->max_level; 444 pgoff_t base = 0; 445 446 if (!dn->max_level) 447 return pgofs + 1; 448 449 while (max_level-- > cur_level) 450 skipped_unit *= NIDS_PER_BLOCK; 451 452 switch (dn->max_level) { 453 case 3: 454 base += 2 * indirect_blks; 455 case 2: 456 base += 2 * direct_blks; 457 case 1: 458 base += direct_index; 459 break; 460 default: 461 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); 462 } 463 464 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; 465 } 466 467 /* 468 * The maximum depth is four. 469 * Offset[0] will have raw inode offset. 470 */ 471 static int get_node_path(struct inode *inode, long block, 472 int offset[4], unsigned int noffset[4]) 473 { 474 const long direct_index = ADDRS_PER_INODE(inode); 475 const long direct_blks = ADDRS_PER_BLOCK; 476 const long dptrs_per_blk = NIDS_PER_BLOCK; 477 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 478 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 479 int n = 0; 480 int level = 0; 481 482 noffset[0] = 0; 483 484 if (block < direct_index) { 485 offset[n] = block; 486 goto got; 487 } 488 block -= direct_index; 489 if (block < direct_blks) { 490 offset[n++] = NODE_DIR1_BLOCK; 491 noffset[n] = 1; 492 offset[n] = block; 493 level = 1; 494 goto got; 495 } 496 block -= direct_blks; 497 if (block < direct_blks) { 498 offset[n++] = NODE_DIR2_BLOCK; 499 noffset[n] = 2; 500 offset[n] = block; 501 level = 1; 502 goto got; 503 } 504 block -= direct_blks; 505 if (block < indirect_blks) { 506 offset[n++] = NODE_IND1_BLOCK; 507 noffset[n] = 3; 508 offset[n++] = block / direct_blks; 509 noffset[n] = 4 + offset[n - 1]; 510 offset[n] = block % direct_blks; 511 level = 2; 512 goto got; 513 } 514 block -= indirect_blks; 515 if (block < indirect_blks) { 516 offset[n++] = NODE_IND2_BLOCK; 517 noffset[n] = 4 + dptrs_per_blk; 518 offset[n++] = block / direct_blks; 519 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 520 offset[n] = block % direct_blks; 521 level = 2; 522 goto got; 523 } 524 block -= indirect_blks; 525 if (block < dindirect_blks) { 526 offset[n++] = NODE_DIND_BLOCK; 527 noffset[n] = 5 + (dptrs_per_blk * 2); 528 offset[n++] = block / indirect_blks; 529 noffset[n] = 6 + (dptrs_per_blk * 2) + 530 offset[n - 1] * (dptrs_per_blk + 1); 531 offset[n++] = (block / direct_blks) % dptrs_per_blk; 532 noffset[n] = 7 + (dptrs_per_blk * 2) + 533 offset[n - 2] * (dptrs_per_blk + 1) + 534 offset[n - 1]; 535 offset[n] = block % direct_blks; 536 level = 3; 537 goto got; 538 } else { 539 BUG(); 540 } 541 got: 542 return level; 543 } 544 545 /* 546 * Caller should call f2fs_put_dnode(dn). 547 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 548 * f2fs_unlock_op() only if ro is not set RDONLY_NODE. 549 * In the case of RDONLY_NODE, we don't need to care about mutex. 550 */ 551 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 552 { 553 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 554 struct page *npage[4]; 555 struct page *parent = NULL; 556 int offset[4]; 557 unsigned int noffset[4]; 558 nid_t nids[4]; 559 int level, i = 0; 560 int err = 0; 561 562 level = get_node_path(dn->inode, index, offset, noffset); 563 564 nids[0] = dn->inode->i_ino; 565 npage[0] = dn->inode_page; 566 567 if (!npage[0]) { 568 npage[0] = get_node_page(sbi, nids[0]); 569 if (IS_ERR(npage[0])) 570 return PTR_ERR(npage[0]); 571 } 572 573 /* if inline_data is set, should not report any block indices */ 574 if (f2fs_has_inline_data(dn->inode) && index) { 575 err = -ENOENT; 576 f2fs_put_page(npage[0], 1); 577 goto release_out; 578 } 579 580 parent = npage[0]; 581 if (level != 0) 582 nids[1] = get_nid(parent, offset[0], true); 583 dn->inode_page = npage[0]; 584 dn->inode_page_locked = true; 585 586 /* get indirect or direct nodes */ 587 for (i = 1; i <= level; i++) { 588 bool done = false; 589 590 if (!nids[i] && mode == ALLOC_NODE) { 591 /* alloc new node */ 592 if (!alloc_nid(sbi, &(nids[i]))) { 593 err = -ENOSPC; 594 goto release_pages; 595 } 596 597 dn->nid = nids[i]; 598 npage[i] = new_node_page(dn, noffset[i], NULL); 599 if (IS_ERR(npage[i])) { 600 alloc_nid_failed(sbi, nids[i]); 601 err = PTR_ERR(npage[i]); 602 goto release_pages; 603 } 604 605 set_nid(parent, offset[i - 1], nids[i], i == 1); 606 alloc_nid_done(sbi, nids[i]); 607 done = true; 608 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 609 npage[i] = get_node_page_ra(parent, offset[i - 1]); 610 if (IS_ERR(npage[i])) { 611 err = PTR_ERR(npage[i]); 612 goto release_pages; 613 } 614 done = true; 615 } 616 if (i == 1) { 617 dn->inode_page_locked = false; 618 unlock_page(parent); 619 } else { 620 f2fs_put_page(parent, 1); 621 } 622 623 if (!done) { 624 npage[i] = get_node_page(sbi, nids[i]); 625 if (IS_ERR(npage[i])) { 626 err = PTR_ERR(npage[i]); 627 f2fs_put_page(npage[0], 0); 628 goto release_out; 629 } 630 } 631 if (i < level) { 632 parent = npage[i]; 633 nids[i + 1] = get_nid(parent, offset[i], false); 634 } 635 } 636 dn->nid = nids[level]; 637 dn->ofs_in_node = offset[level]; 638 dn->node_page = npage[level]; 639 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 640 return 0; 641 642 release_pages: 643 f2fs_put_page(parent, 1); 644 if (i > 1) 645 f2fs_put_page(npage[0], 0); 646 release_out: 647 dn->inode_page = NULL; 648 dn->node_page = NULL; 649 if (err == -ENOENT) { 650 dn->cur_level = i; 651 dn->max_level = level; 652 dn->ofs_in_node = offset[level]; 653 } 654 return err; 655 } 656 657 static void truncate_node(struct dnode_of_data *dn) 658 { 659 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 660 struct node_info ni; 661 662 get_node_info(sbi, dn->nid, &ni); 663 if (dn->inode->i_blocks == 0) { 664 f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR); 665 goto invalidate; 666 } 667 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 668 669 /* Deallocate node address */ 670 invalidate_blocks(sbi, ni.blk_addr); 671 dec_valid_node_count(sbi, dn->inode); 672 set_node_addr(sbi, &ni, NULL_ADDR, false); 673 674 if (dn->nid == dn->inode->i_ino) { 675 remove_orphan_inode(sbi, dn->nid); 676 dec_valid_inode_count(sbi); 677 f2fs_inode_synced(dn->inode); 678 } 679 invalidate: 680 clear_node_page_dirty(dn->node_page); 681 set_sbi_flag(sbi, SBI_IS_DIRTY); 682 683 f2fs_put_page(dn->node_page, 1); 684 685 invalidate_mapping_pages(NODE_MAPPING(sbi), 686 dn->node_page->index, dn->node_page->index); 687 688 dn->node_page = NULL; 689 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 690 } 691 692 static int truncate_dnode(struct dnode_of_data *dn) 693 { 694 struct page *page; 695 696 if (dn->nid == 0) 697 return 1; 698 699 /* get direct node */ 700 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 701 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 702 return 1; 703 else if (IS_ERR(page)) 704 return PTR_ERR(page); 705 706 /* Make dnode_of_data for parameter */ 707 dn->node_page = page; 708 dn->ofs_in_node = 0; 709 truncate_data_blocks(dn); 710 truncate_node(dn); 711 return 1; 712 } 713 714 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 715 int ofs, int depth) 716 { 717 struct dnode_of_data rdn = *dn; 718 struct page *page; 719 struct f2fs_node *rn; 720 nid_t child_nid; 721 unsigned int child_nofs; 722 int freed = 0; 723 int i, ret; 724 725 if (dn->nid == 0) 726 return NIDS_PER_BLOCK + 1; 727 728 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 729 730 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 731 if (IS_ERR(page)) { 732 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 733 return PTR_ERR(page); 734 } 735 736 ra_node_pages(page, ofs, NIDS_PER_BLOCK); 737 738 rn = F2FS_NODE(page); 739 if (depth < 3) { 740 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 741 child_nid = le32_to_cpu(rn->in.nid[i]); 742 if (child_nid == 0) 743 continue; 744 rdn.nid = child_nid; 745 ret = truncate_dnode(&rdn); 746 if (ret < 0) 747 goto out_err; 748 if (set_nid(page, i, 0, false)) 749 dn->node_changed = true; 750 } 751 } else { 752 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 753 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 754 child_nid = le32_to_cpu(rn->in.nid[i]); 755 if (child_nid == 0) { 756 child_nofs += NIDS_PER_BLOCK + 1; 757 continue; 758 } 759 rdn.nid = child_nid; 760 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 761 if (ret == (NIDS_PER_BLOCK + 1)) { 762 if (set_nid(page, i, 0, false)) 763 dn->node_changed = true; 764 child_nofs += ret; 765 } else if (ret < 0 && ret != -ENOENT) { 766 goto out_err; 767 } 768 } 769 freed = child_nofs; 770 } 771 772 if (!ofs) { 773 /* remove current indirect node */ 774 dn->node_page = page; 775 truncate_node(dn); 776 freed++; 777 } else { 778 f2fs_put_page(page, 1); 779 } 780 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 781 return freed; 782 783 out_err: 784 f2fs_put_page(page, 1); 785 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 786 return ret; 787 } 788 789 static int truncate_partial_nodes(struct dnode_of_data *dn, 790 struct f2fs_inode *ri, int *offset, int depth) 791 { 792 struct page *pages[2]; 793 nid_t nid[3]; 794 nid_t child_nid; 795 int err = 0; 796 int i; 797 int idx = depth - 2; 798 799 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 800 if (!nid[0]) 801 return 0; 802 803 /* get indirect nodes in the path */ 804 for (i = 0; i < idx + 1; i++) { 805 /* reference count'll be increased */ 806 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]); 807 if (IS_ERR(pages[i])) { 808 err = PTR_ERR(pages[i]); 809 idx = i - 1; 810 goto fail; 811 } 812 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 813 } 814 815 ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); 816 817 /* free direct nodes linked to a partial indirect node */ 818 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 819 child_nid = get_nid(pages[idx], i, false); 820 if (!child_nid) 821 continue; 822 dn->nid = child_nid; 823 err = truncate_dnode(dn); 824 if (err < 0) 825 goto fail; 826 if (set_nid(pages[idx], i, 0, false)) 827 dn->node_changed = true; 828 } 829 830 if (offset[idx + 1] == 0) { 831 dn->node_page = pages[idx]; 832 dn->nid = nid[idx]; 833 truncate_node(dn); 834 } else { 835 f2fs_put_page(pages[idx], 1); 836 } 837 offset[idx]++; 838 offset[idx + 1] = 0; 839 idx--; 840 fail: 841 for (i = idx; i >= 0; i--) 842 f2fs_put_page(pages[i], 1); 843 844 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 845 846 return err; 847 } 848 849 /* 850 * All the block addresses of data and nodes should be nullified. 851 */ 852 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 853 { 854 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 855 int err = 0, cont = 1; 856 int level, offset[4], noffset[4]; 857 unsigned int nofs = 0; 858 struct f2fs_inode *ri; 859 struct dnode_of_data dn; 860 struct page *page; 861 862 trace_f2fs_truncate_inode_blocks_enter(inode, from); 863 864 level = get_node_path(inode, from, offset, noffset); 865 866 page = get_node_page(sbi, inode->i_ino); 867 if (IS_ERR(page)) { 868 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 869 return PTR_ERR(page); 870 } 871 872 set_new_dnode(&dn, inode, page, NULL, 0); 873 unlock_page(page); 874 875 ri = F2FS_INODE(page); 876 switch (level) { 877 case 0: 878 case 1: 879 nofs = noffset[1]; 880 break; 881 case 2: 882 nofs = noffset[1]; 883 if (!offset[level - 1]) 884 goto skip_partial; 885 err = truncate_partial_nodes(&dn, ri, offset, level); 886 if (err < 0 && err != -ENOENT) 887 goto fail; 888 nofs += 1 + NIDS_PER_BLOCK; 889 break; 890 case 3: 891 nofs = 5 + 2 * NIDS_PER_BLOCK; 892 if (!offset[level - 1]) 893 goto skip_partial; 894 err = truncate_partial_nodes(&dn, ri, offset, level); 895 if (err < 0 && err != -ENOENT) 896 goto fail; 897 break; 898 default: 899 BUG(); 900 } 901 902 skip_partial: 903 while (cont) { 904 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 905 switch (offset[0]) { 906 case NODE_DIR1_BLOCK: 907 case NODE_DIR2_BLOCK: 908 err = truncate_dnode(&dn); 909 break; 910 911 case NODE_IND1_BLOCK: 912 case NODE_IND2_BLOCK: 913 err = truncate_nodes(&dn, nofs, offset[1], 2); 914 break; 915 916 case NODE_DIND_BLOCK: 917 err = truncate_nodes(&dn, nofs, offset[1], 3); 918 cont = 0; 919 break; 920 921 default: 922 BUG(); 923 } 924 if (err < 0 && err != -ENOENT) 925 goto fail; 926 if (offset[1] == 0 && 927 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 928 lock_page(page); 929 BUG_ON(page->mapping != NODE_MAPPING(sbi)); 930 f2fs_wait_on_page_writeback(page, NODE, true); 931 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 932 set_page_dirty(page); 933 unlock_page(page); 934 } 935 offset[1] = 0; 936 offset[0]++; 937 nofs += err; 938 } 939 fail: 940 f2fs_put_page(page, 0); 941 trace_f2fs_truncate_inode_blocks_exit(inode, err); 942 return err > 0 ? 0 : err; 943 } 944 945 int truncate_xattr_node(struct inode *inode, struct page *page) 946 { 947 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 948 nid_t nid = F2FS_I(inode)->i_xattr_nid; 949 struct dnode_of_data dn; 950 struct page *npage; 951 952 if (!nid) 953 return 0; 954 955 npage = get_node_page(sbi, nid); 956 if (IS_ERR(npage)) 957 return PTR_ERR(npage); 958 959 f2fs_i_xnid_write(inode, 0); 960 961 /* need to do checkpoint during fsync */ 962 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); 963 964 set_new_dnode(&dn, inode, page, npage, nid); 965 966 if (page) 967 dn.inode_page_locked = true; 968 truncate_node(&dn); 969 return 0; 970 } 971 972 /* 973 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 974 * f2fs_unlock_op(). 975 */ 976 int remove_inode_page(struct inode *inode) 977 { 978 struct dnode_of_data dn; 979 int err; 980 981 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 982 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 983 if (err) 984 return err; 985 986 err = truncate_xattr_node(inode, dn.inode_page); 987 if (err) { 988 f2fs_put_dnode(&dn); 989 return err; 990 } 991 992 /* remove potential inline_data blocks */ 993 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 994 S_ISLNK(inode->i_mode)) 995 truncate_data_blocks_range(&dn, 1); 996 997 /* 0 is possible, after f2fs_new_inode() has failed */ 998 f2fs_bug_on(F2FS_I_SB(inode), 999 inode->i_blocks != 0 && inode->i_blocks != 1); 1000 1001 /* will put inode & node pages */ 1002 truncate_node(&dn); 1003 return 0; 1004 } 1005 1006 struct page *new_inode_page(struct inode *inode) 1007 { 1008 struct dnode_of_data dn; 1009 1010 /* allocate inode page for new inode */ 1011 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1012 1013 /* caller should f2fs_put_page(page, 1); */ 1014 return new_node_page(&dn, 0, NULL); 1015 } 1016 1017 struct page *new_node_page(struct dnode_of_data *dn, 1018 unsigned int ofs, struct page *ipage) 1019 { 1020 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1021 struct node_info old_ni, new_ni; 1022 struct page *page; 1023 int err; 1024 1025 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 1026 return ERR_PTR(-EPERM); 1027 1028 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); 1029 if (!page) 1030 return ERR_PTR(-ENOMEM); 1031 1032 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { 1033 err = -ENOSPC; 1034 goto fail; 1035 } 1036 1037 get_node_info(sbi, dn->nid, &old_ni); 1038 1039 /* Reinitialize old_ni with new node page */ 1040 f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR); 1041 new_ni = old_ni; 1042 new_ni.ino = dn->inode->i_ino; 1043 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1044 1045 f2fs_wait_on_page_writeback(page, NODE, true); 1046 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 1047 set_cold_node(dn->inode, page); 1048 if (!PageUptodate(page)) 1049 SetPageUptodate(page); 1050 if (set_page_dirty(page)) 1051 dn->node_changed = true; 1052 1053 if (f2fs_has_xattr_block(ofs)) 1054 f2fs_i_xnid_write(dn->inode, dn->nid); 1055 1056 if (ofs == 0) 1057 inc_valid_inode_count(sbi); 1058 return page; 1059 1060 fail: 1061 clear_node_page_dirty(page); 1062 f2fs_put_page(page, 1); 1063 return ERR_PTR(err); 1064 } 1065 1066 /* 1067 * Caller should do after getting the following values. 1068 * 0: f2fs_put_page(page, 0) 1069 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1070 */ 1071 static int read_node_page(struct page *page, int op_flags) 1072 { 1073 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1074 struct node_info ni; 1075 struct f2fs_io_info fio = { 1076 .sbi = sbi, 1077 .type = NODE, 1078 .op = REQ_OP_READ, 1079 .op_flags = op_flags, 1080 .page = page, 1081 .encrypted_page = NULL, 1082 }; 1083 1084 if (PageUptodate(page)) 1085 return LOCKED_PAGE; 1086 1087 get_node_info(sbi, page->index, &ni); 1088 1089 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1090 ClearPageUptodate(page); 1091 return -ENOENT; 1092 } 1093 1094 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; 1095 return f2fs_submit_page_bio(&fio); 1096 } 1097 1098 /* 1099 * Readahead a node page 1100 */ 1101 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1102 { 1103 struct page *apage; 1104 int err; 1105 1106 if (!nid) 1107 return; 1108 f2fs_bug_on(sbi, check_nid_range(sbi, nid)); 1109 1110 rcu_read_lock(); 1111 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid); 1112 rcu_read_unlock(); 1113 if (apage) 1114 return; 1115 1116 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1117 if (!apage) 1118 return; 1119 1120 err = read_node_page(apage, REQ_RAHEAD); 1121 f2fs_put_page(apage, err ? 1 : 0); 1122 } 1123 1124 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1125 struct page *parent, int start) 1126 { 1127 struct page *page; 1128 int err; 1129 1130 if (!nid) 1131 return ERR_PTR(-ENOENT); 1132 f2fs_bug_on(sbi, check_nid_range(sbi, nid)); 1133 repeat: 1134 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1135 if (!page) 1136 return ERR_PTR(-ENOMEM); 1137 1138 err = read_node_page(page, READ_SYNC); 1139 if (err < 0) { 1140 f2fs_put_page(page, 1); 1141 return ERR_PTR(err); 1142 } else if (err == LOCKED_PAGE) { 1143 goto page_hit; 1144 } 1145 1146 if (parent) 1147 ra_node_pages(parent, start + 1, MAX_RA_NODE); 1148 1149 lock_page(page); 1150 1151 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1152 f2fs_put_page(page, 1); 1153 goto repeat; 1154 } 1155 1156 if (unlikely(!PageUptodate(page))) 1157 goto out_err; 1158 page_hit: 1159 if(unlikely(nid != nid_of_node(page))) { 1160 f2fs_bug_on(sbi, 1); 1161 ClearPageUptodate(page); 1162 out_err: 1163 f2fs_put_page(page, 1); 1164 return ERR_PTR(-EIO); 1165 } 1166 return page; 1167 } 1168 1169 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1170 { 1171 return __get_node_page(sbi, nid, NULL, 0); 1172 } 1173 1174 struct page *get_node_page_ra(struct page *parent, int start) 1175 { 1176 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1177 nid_t nid = get_nid(parent, start, false); 1178 1179 return __get_node_page(sbi, nid, parent, start); 1180 } 1181 1182 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) 1183 { 1184 struct inode *inode; 1185 struct page *page; 1186 int ret; 1187 1188 /* should flush inline_data before evict_inode */ 1189 inode = ilookup(sbi->sb, ino); 1190 if (!inode) 1191 return; 1192 1193 page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0); 1194 if (!page) 1195 goto iput_out; 1196 1197 if (!PageUptodate(page)) 1198 goto page_out; 1199 1200 if (!PageDirty(page)) 1201 goto page_out; 1202 1203 if (!clear_page_dirty_for_io(page)) 1204 goto page_out; 1205 1206 ret = f2fs_write_inline_data(inode, page); 1207 inode_dec_dirty_pages(inode); 1208 if (ret) 1209 set_page_dirty(page); 1210 page_out: 1211 f2fs_put_page(page, 1); 1212 iput_out: 1213 iput(inode); 1214 } 1215 1216 void move_node_page(struct page *node_page, int gc_type) 1217 { 1218 if (gc_type == FG_GC) { 1219 struct f2fs_sb_info *sbi = F2FS_P_SB(node_page); 1220 struct writeback_control wbc = { 1221 .sync_mode = WB_SYNC_ALL, 1222 .nr_to_write = 1, 1223 .for_reclaim = 0, 1224 }; 1225 1226 set_page_dirty(node_page); 1227 f2fs_wait_on_page_writeback(node_page, NODE, true); 1228 1229 f2fs_bug_on(sbi, PageWriteback(node_page)); 1230 if (!clear_page_dirty_for_io(node_page)) 1231 goto out_page; 1232 1233 if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc)) 1234 unlock_page(node_page); 1235 goto release_page; 1236 } else { 1237 /* set page dirty and write it */ 1238 if (!PageWriteback(node_page)) 1239 set_page_dirty(node_page); 1240 } 1241 out_page: 1242 unlock_page(node_page); 1243 release_page: 1244 f2fs_put_page(node_page, 0); 1245 } 1246 1247 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) 1248 { 1249 pgoff_t index, end; 1250 struct pagevec pvec; 1251 struct page *last_page = NULL; 1252 1253 pagevec_init(&pvec, 0); 1254 index = 0; 1255 end = ULONG_MAX; 1256 1257 while (index <= end) { 1258 int i, nr_pages; 1259 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1260 PAGECACHE_TAG_DIRTY, 1261 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1262 if (nr_pages == 0) 1263 break; 1264 1265 for (i = 0; i < nr_pages; i++) { 1266 struct page *page = pvec.pages[i]; 1267 1268 if (unlikely(f2fs_cp_error(sbi))) { 1269 f2fs_put_page(last_page, 0); 1270 pagevec_release(&pvec); 1271 return ERR_PTR(-EIO); 1272 } 1273 1274 if (!IS_DNODE(page) || !is_cold_node(page)) 1275 continue; 1276 if (ino_of_node(page) != ino) 1277 continue; 1278 1279 lock_page(page); 1280 1281 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1282 continue_unlock: 1283 unlock_page(page); 1284 continue; 1285 } 1286 if (ino_of_node(page) != ino) 1287 goto continue_unlock; 1288 1289 if (!PageDirty(page)) { 1290 /* someone wrote it for us */ 1291 goto continue_unlock; 1292 } 1293 1294 if (last_page) 1295 f2fs_put_page(last_page, 0); 1296 1297 get_page(page); 1298 last_page = page; 1299 unlock_page(page); 1300 } 1301 pagevec_release(&pvec); 1302 cond_resched(); 1303 } 1304 return last_page; 1305 } 1306 1307 int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 1308 struct writeback_control *wbc, bool atomic) 1309 { 1310 pgoff_t index, end; 1311 struct pagevec pvec; 1312 int ret = 0; 1313 struct page *last_page = NULL; 1314 bool marked = false; 1315 nid_t ino = inode->i_ino; 1316 1317 if (atomic) { 1318 last_page = last_fsync_dnode(sbi, ino); 1319 if (IS_ERR_OR_NULL(last_page)) 1320 return PTR_ERR_OR_ZERO(last_page); 1321 } 1322 retry: 1323 pagevec_init(&pvec, 0); 1324 index = 0; 1325 end = ULONG_MAX; 1326 1327 while (index <= end) { 1328 int i, nr_pages; 1329 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1330 PAGECACHE_TAG_DIRTY, 1331 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1332 if (nr_pages == 0) 1333 break; 1334 1335 for (i = 0; i < nr_pages; i++) { 1336 struct page *page = pvec.pages[i]; 1337 1338 if (unlikely(f2fs_cp_error(sbi))) { 1339 f2fs_put_page(last_page, 0); 1340 pagevec_release(&pvec); 1341 return -EIO; 1342 } 1343 1344 if (!IS_DNODE(page) || !is_cold_node(page)) 1345 continue; 1346 if (ino_of_node(page) != ino) 1347 continue; 1348 1349 lock_page(page); 1350 1351 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1352 continue_unlock: 1353 unlock_page(page); 1354 continue; 1355 } 1356 if (ino_of_node(page) != ino) 1357 goto continue_unlock; 1358 1359 if (!PageDirty(page) && page != last_page) { 1360 /* someone wrote it for us */ 1361 goto continue_unlock; 1362 } 1363 1364 f2fs_wait_on_page_writeback(page, NODE, true); 1365 BUG_ON(PageWriteback(page)); 1366 1367 if (!atomic || page == last_page) { 1368 set_fsync_mark(page, 1); 1369 if (IS_INODE(page)) { 1370 if (is_inode_flag_set(inode, 1371 FI_DIRTY_INODE)) 1372 update_inode(inode, page); 1373 set_dentry_mark(page, 1374 need_dentry_mark(sbi, ino)); 1375 } 1376 /* may be written by other thread */ 1377 if (!PageDirty(page)) 1378 set_page_dirty(page); 1379 } 1380 1381 if (!clear_page_dirty_for_io(page)) 1382 goto continue_unlock; 1383 1384 ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc); 1385 if (ret) { 1386 unlock_page(page); 1387 f2fs_put_page(last_page, 0); 1388 break; 1389 } 1390 if (page == last_page) { 1391 f2fs_put_page(page, 0); 1392 marked = true; 1393 break; 1394 } 1395 } 1396 pagevec_release(&pvec); 1397 cond_resched(); 1398 1399 if (ret || marked) 1400 break; 1401 } 1402 if (!ret && atomic && !marked) { 1403 f2fs_msg(sbi->sb, KERN_DEBUG, 1404 "Retry to write fsync mark: ino=%u, idx=%lx", 1405 ino, last_page->index); 1406 lock_page(last_page); 1407 set_page_dirty(last_page); 1408 unlock_page(last_page); 1409 goto retry; 1410 } 1411 return ret ? -EIO: 0; 1412 } 1413 1414 int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc) 1415 { 1416 pgoff_t index, end; 1417 struct pagevec pvec; 1418 int step = 0; 1419 int nwritten = 0; 1420 1421 pagevec_init(&pvec, 0); 1422 1423 next_step: 1424 index = 0; 1425 end = ULONG_MAX; 1426 1427 while (index <= end) { 1428 int i, nr_pages; 1429 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1430 PAGECACHE_TAG_DIRTY, 1431 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1432 if (nr_pages == 0) 1433 break; 1434 1435 for (i = 0; i < nr_pages; i++) { 1436 struct page *page = pvec.pages[i]; 1437 1438 if (unlikely(f2fs_cp_error(sbi))) { 1439 pagevec_release(&pvec); 1440 return -EIO; 1441 } 1442 1443 /* 1444 * flushing sequence with step: 1445 * 0. indirect nodes 1446 * 1. dentry dnodes 1447 * 2. file dnodes 1448 */ 1449 if (step == 0 && IS_DNODE(page)) 1450 continue; 1451 if (step == 1 && (!IS_DNODE(page) || 1452 is_cold_node(page))) 1453 continue; 1454 if (step == 2 && (!IS_DNODE(page) || 1455 !is_cold_node(page))) 1456 continue; 1457 lock_node: 1458 if (!trylock_page(page)) 1459 continue; 1460 1461 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1462 continue_unlock: 1463 unlock_page(page); 1464 continue; 1465 } 1466 1467 if (!PageDirty(page)) { 1468 /* someone wrote it for us */ 1469 goto continue_unlock; 1470 } 1471 1472 /* flush inline_data */ 1473 if (is_inline_node(page)) { 1474 clear_inline_node(page); 1475 unlock_page(page); 1476 flush_inline_data(sbi, ino_of_node(page)); 1477 goto lock_node; 1478 } 1479 1480 f2fs_wait_on_page_writeback(page, NODE, true); 1481 1482 BUG_ON(PageWriteback(page)); 1483 if (!clear_page_dirty_for_io(page)) 1484 goto continue_unlock; 1485 1486 set_fsync_mark(page, 0); 1487 set_dentry_mark(page, 0); 1488 1489 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) 1490 unlock_page(page); 1491 1492 if (--wbc->nr_to_write == 0) 1493 break; 1494 } 1495 pagevec_release(&pvec); 1496 cond_resched(); 1497 1498 if (wbc->nr_to_write == 0) { 1499 step = 2; 1500 break; 1501 } 1502 } 1503 1504 if (step < 2) { 1505 step++; 1506 goto next_step; 1507 } 1508 return nwritten; 1509 } 1510 1511 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) 1512 { 1513 pgoff_t index = 0, end = ULONG_MAX; 1514 struct pagevec pvec; 1515 int ret2 = 0, ret = 0; 1516 1517 pagevec_init(&pvec, 0); 1518 1519 while (index <= end) { 1520 int i, nr_pages; 1521 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1522 PAGECACHE_TAG_WRITEBACK, 1523 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1524 if (nr_pages == 0) 1525 break; 1526 1527 for (i = 0; i < nr_pages; i++) { 1528 struct page *page = pvec.pages[i]; 1529 1530 /* until radix tree lookup accepts end_index */ 1531 if (unlikely(page->index > end)) 1532 continue; 1533 1534 if (ino && ino_of_node(page) == ino) { 1535 f2fs_wait_on_page_writeback(page, NODE, true); 1536 if (TestClearPageError(page)) 1537 ret = -EIO; 1538 } 1539 } 1540 pagevec_release(&pvec); 1541 cond_resched(); 1542 } 1543 1544 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags))) 1545 ret2 = -ENOSPC; 1546 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags))) 1547 ret2 = -EIO; 1548 if (!ret) 1549 ret = ret2; 1550 return ret; 1551 } 1552 1553 static int f2fs_write_node_page(struct page *page, 1554 struct writeback_control *wbc) 1555 { 1556 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1557 nid_t nid; 1558 struct node_info ni; 1559 struct f2fs_io_info fio = { 1560 .sbi = sbi, 1561 .type = NODE, 1562 .op = REQ_OP_WRITE, 1563 .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, 1564 .page = page, 1565 .encrypted_page = NULL, 1566 }; 1567 1568 trace_f2fs_writepage(page, NODE); 1569 1570 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1571 goto redirty_out; 1572 if (unlikely(f2fs_cp_error(sbi))) 1573 goto redirty_out; 1574 1575 /* get old block addr of this node page */ 1576 nid = nid_of_node(page); 1577 f2fs_bug_on(sbi, page->index != nid); 1578 1579 if (wbc->for_reclaim) { 1580 if (!down_read_trylock(&sbi->node_write)) 1581 goto redirty_out; 1582 } else { 1583 down_read(&sbi->node_write); 1584 } 1585 1586 get_node_info(sbi, nid, &ni); 1587 1588 /* This page is already truncated */ 1589 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1590 ClearPageUptodate(page); 1591 dec_page_count(sbi, F2FS_DIRTY_NODES); 1592 up_read(&sbi->node_write); 1593 unlock_page(page); 1594 return 0; 1595 } 1596 1597 set_page_writeback(page); 1598 fio.old_blkaddr = ni.blk_addr; 1599 write_node_page(nid, &fio); 1600 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1601 dec_page_count(sbi, F2FS_DIRTY_NODES); 1602 up_read(&sbi->node_write); 1603 1604 if (wbc->for_reclaim) 1605 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE); 1606 1607 unlock_page(page); 1608 1609 if (unlikely(f2fs_cp_error(sbi))) 1610 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1611 1612 return 0; 1613 1614 redirty_out: 1615 redirty_page_for_writepage(wbc, page); 1616 return AOP_WRITEPAGE_ACTIVATE; 1617 } 1618 1619 static int f2fs_write_node_pages(struct address_space *mapping, 1620 struct writeback_control *wbc) 1621 { 1622 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 1623 struct blk_plug plug; 1624 long diff; 1625 1626 /* balancing f2fs's metadata in background */ 1627 f2fs_balance_fs_bg(sbi); 1628 1629 /* collect a number of dirty node pages and write together */ 1630 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) 1631 goto skip_write; 1632 1633 trace_f2fs_writepages(mapping->host, wbc, NODE); 1634 1635 diff = nr_pages_to_write(sbi, NODE, wbc); 1636 wbc->sync_mode = WB_SYNC_NONE; 1637 blk_start_plug(&plug); 1638 sync_node_pages(sbi, wbc); 1639 blk_finish_plug(&plug); 1640 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1641 return 0; 1642 1643 skip_write: 1644 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 1645 trace_f2fs_writepages(mapping->host, wbc, NODE); 1646 return 0; 1647 } 1648 1649 static int f2fs_set_node_page_dirty(struct page *page) 1650 { 1651 trace_f2fs_set_page_dirty(page, NODE); 1652 1653 if (!PageUptodate(page)) 1654 SetPageUptodate(page); 1655 if (!PageDirty(page)) { 1656 f2fs_set_page_dirty_nobuffers(page); 1657 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 1658 SetPagePrivate(page); 1659 f2fs_trace_pid(page); 1660 return 1; 1661 } 1662 return 0; 1663 } 1664 1665 /* 1666 * Structure of the f2fs node operations 1667 */ 1668 const struct address_space_operations f2fs_node_aops = { 1669 .writepage = f2fs_write_node_page, 1670 .writepages = f2fs_write_node_pages, 1671 .set_page_dirty = f2fs_set_node_page_dirty, 1672 .invalidatepage = f2fs_invalidate_page, 1673 .releasepage = f2fs_release_page, 1674 }; 1675 1676 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 1677 nid_t n) 1678 { 1679 return radix_tree_lookup(&nm_i->free_nid_root, n); 1680 } 1681 1682 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i, 1683 struct free_nid *i) 1684 { 1685 list_del(&i->list); 1686 radix_tree_delete(&nm_i->free_nid_root, i->nid); 1687 } 1688 1689 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) 1690 { 1691 struct f2fs_nm_info *nm_i = NM_I(sbi); 1692 struct free_nid *i; 1693 struct nat_entry *ne; 1694 1695 if (!available_free_memory(sbi, FREE_NIDS)) 1696 return -1; 1697 1698 /* 0 nid should not be used */ 1699 if (unlikely(nid == 0)) 1700 return 0; 1701 1702 if (build) { 1703 /* do not add allocated nids */ 1704 ne = __lookup_nat_cache(nm_i, nid); 1705 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 1706 nat_get_blkaddr(ne) != NULL_ADDR)) 1707 return 0; 1708 } 1709 1710 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1711 i->nid = nid; 1712 i->state = NID_NEW; 1713 1714 if (radix_tree_preload(GFP_NOFS)) { 1715 kmem_cache_free(free_nid_slab, i); 1716 return 0; 1717 } 1718 1719 spin_lock(&nm_i->free_nid_list_lock); 1720 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) { 1721 spin_unlock(&nm_i->free_nid_list_lock); 1722 radix_tree_preload_end(); 1723 kmem_cache_free(free_nid_slab, i); 1724 return 0; 1725 } 1726 list_add_tail(&i->list, &nm_i->free_nid_list); 1727 nm_i->fcnt++; 1728 spin_unlock(&nm_i->free_nid_list_lock); 1729 radix_tree_preload_end(); 1730 return 1; 1731 } 1732 1733 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1734 { 1735 struct free_nid *i; 1736 bool need_free = false; 1737 1738 spin_lock(&nm_i->free_nid_list_lock); 1739 i = __lookup_free_nid_list(nm_i, nid); 1740 if (i && i->state == NID_NEW) { 1741 __del_from_free_nid_list(nm_i, i); 1742 nm_i->fcnt--; 1743 need_free = true; 1744 } 1745 spin_unlock(&nm_i->free_nid_list_lock); 1746 1747 if (need_free) 1748 kmem_cache_free(free_nid_slab, i); 1749 } 1750 1751 static void scan_nat_page(struct f2fs_sb_info *sbi, 1752 struct page *nat_page, nid_t start_nid) 1753 { 1754 struct f2fs_nm_info *nm_i = NM_I(sbi); 1755 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1756 block_t blk_addr; 1757 int i; 1758 1759 i = start_nid % NAT_ENTRY_PER_BLOCK; 1760 1761 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1762 1763 if (unlikely(start_nid >= nm_i->max_nid)) 1764 break; 1765 1766 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1767 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1768 if (blk_addr == NULL_ADDR) { 1769 if (add_free_nid(sbi, start_nid, true) < 0) 1770 break; 1771 } 1772 } 1773 } 1774 1775 void build_free_nids(struct f2fs_sb_info *sbi) 1776 { 1777 struct f2fs_nm_info *nm_i = NM_I(sbi); 1778 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1779 struct f2fs_journal *journal = curseg->journal; 1780 int i = 0; 1781 nid_t nid = nm_i->next_scan_nid; 1782 1783 /* Enough entries */ 1784 if (nm_i->fcnt >= NAT_ENTRY_PER_BLOCK) 1785 return; 1786 1787 /* readahead nat pages to be scanned */ 1788 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 1789 META_NAT, true); 1790 1791 percpu_down_read(&nm_i->nat_tree_lock); 1792 1793 while (1) { 1794 struct page *page = get_current_nat_page(sbi, nid); 1795 1796 scan_nat_page(sbi, page, nid); 1797 f2fs_put_page(page, 1); 1798 1799 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1800 if (unlikely(nid >= nm_i->max_nid)) 1801 nid = 0; 1802 1803 if (++i >= FREE_NID_PAGES) 1804 break; 1805 } 1806 1807 /* go to the next free nat pages to find free nids abundantly */ 1808 nm_i->next_scan_nid = nid; 1809 1810 /* find free nids from current sum_pages */ 1811 down_read(&curseg->journal_rwsem); 1812 for (i = 0; i < nats_in_cursum(journal); i++) { 1813 block_t addr; 1814 1815 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); 1816 nid = le32_to_cpu(nid_in_journal(journal, i)); 1817 if (addr == NULL_ADDR) 1818 add_free_nid(sbi, nid, true); 1819 else 1820 remove_free_nid(nm_i, nid); 1821 } 1822 up_read(&curseg->journal_rwsem); 1823 percpu_up_read(&nm_i->nat_tree_lock); 1824 1825 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 1826 nm_i->ra_nid_pages, META_NAT, false); 1827 } 1828 1829 /* 1830 * If this function returns success, caller can obtain a new nid 1831 * from second parameter of this function. 1832 * The returned nid could be used ino as well as nid when inode is created. 1833 */ 1834 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1835 { 1836 struct f2fs_nm_info *nm_i = NM_I(sbi); 1837 struct free_nid *i = NULL; 1838 retry: 1839 #ifdef CONFIG_F2FS_FAULT_INJECTION 1840 if (time_to_inject(FAULT_ALLOC_NID)) 1841 return false; 1842 #endif 1843 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) 1844 return false; 1845 1846 spin_lock(&nm_i->free_nid_list_lock); 1847 1848 /* We should not use stale free nids created by build_free_nids */ 1849 if (nm_i->fcnt && !on_build_free_nids(nm_i)) { 1850 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 1851 list_for_each_entry(i, &nm_i->free_nid_list, list) 1852 if (i->state == NID_NEW) 1853 break; 1854 1855 f2fs_bug_on(sbi, i->state != NID_NEW); 1856 *nid = i->nid; 1857 i->state = NID_ALLOC; 1858 nm_i->fcnt--; 1859 spin_unlock(&nm_i->free_nid_list_lock); 1860 return true; 1861 } 1862 spin_unlock(&nm_i->free_nid_list_lock); 1863 1864 /* Let's scan nat pages and its caches to get free nids */ 1865 mutex_lock(&nm_i->build_lock); 1866 build_free_nids(sbi); 1867 mutex_unlock(&nm_i->build_lock); 1868 goto retry; 1869 } 1870 1871 /* 1872 * alloc_nid() should be called prior to this function. 1873 */ 1874 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1875 { 1876 struct f2fs_nm_info *nm_i = NM_I(sbi); 1877 struct free_nid *i; 1878 1879 spin_lock(&nm_i->free_nid_list_lock); 1880 i = __lookup_free_nid_list(nm_i, nid); 1881 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1882 __del_from_free_nid_list(nm_i, i); 1883 spin_unlock(&nm_i->free_nid_list_lock); 1884 1885 kmem_cache_free(free_nid_slab, i); 1886 } 1887 1888 /* 1889 * alloc_nid() should be called prior to this function. 1890 */ 1891 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1892 { 1893 struct f2fs_nm_info *nm_i = NM_I(sbi); 1894 struct free_nid *i; 1895 bool need_free = false; 1896 1897 if (!nid) 1898 return; 1899 1900 spin_lock(&nm_i->free_nid_list_lock); 1901 i = __lookup_free_nid_list(nm_i, nid); 1902 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1903 if (!available_free_memory(sbi, FREE_NIDS)) { 1904 __del_from_free_nid_list(nm_i, i); 1905 need_free = true; 1906 } else { 1907 i->state = NID_NEW; 1908 nm_i->fcnt++; 1909 } 1910 spin_unlock(&nm_i->free_nid_list_lock); 1911 1912 if (need_free) 1913 kmem_cache_free(free_nid_slab, i); 1914 } 1915 1916 int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 1917 { 1918 struct f2fs_nm_info *nm_i = NM_I(sbi); 1919 struct free_nid *i, *next; 1920 int nr = nr_shrink; 1921 1922 if (nm_i->fcnt <= MAX_FREE_NIDS) 1923 return 0; 1924 1925 if (!mutex_trylock(&nm_i->build_lock)) 1926 return 0; 1927 1928 spin_lock(&nm_i->free_nid_list_lock); 1929 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 1930 if (nr_shrink <= 0 || nm_i->fcnt <= MAX_FREE_NIDS) 1931 break; 1932 if (i->state == NID_ALLOC) 1933 continue; 1934 __del_from_free_nid_list(nm_i, i); 1935 kmem_cache_free(free_nid_slab, i); 1936 nm_i->fcnt--; 1937 nr_shrink--; 1938 } 1939 spin_unlock(&nm_i->free_nid_list_lock); 1940 mutex_unlock(&nm_i->build_lock); 1941 1942 return nr - nr_shrink; 1943 } 1944 1945 void recover_inline_xattr(struct inode *inode, struct page *page) 1946 { 1947 void *src_addr, *dst_addr; 1948 size_t inline_size; 1949 struct page *ipage; 1950 struct f2fs_inode *ri; 1951 1952 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 1953 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); 1954 1955 ri = F2FS_INODE(page); 1956 if (!(ri->i_inline & F2FS_INLINE_XATTR)) { 1957 clear_inode_flag(inode, FI_INLINE_XATTR); 1958 goto update_inode; 1959 } 1960 1961 dst_addr = inline_xattr_addr(ipage); 1962 src_addr = inline_xattr_addr(page); 1963 inline_size = inline_xattr_size(inode); 1964 1965 f2fs_wait_on_page_writeback(ipage, NODE, true); 1966 memcpy(dst_addr, src_addr, inline_size); 1967 update_inode: 1968 update_inode(inode, ipage); 1969 f2fs_put_page(ipage, 1); 1970 } 1971 1972 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) 1973 { 1974 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1975 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 1976 nid_t new_xnid = nid_of_node(page); 1977 struct node_info ni; 1978 1979 /* 1: invalidate the previous xattr nid */ 1980 if (!prev_xnid) 1981 goto recover_xnid; 1982 1983 /* Deallocate node address */ 1984 get_node_info(sbi, prev_xnid, &ni); 1985 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 1986 invalidate_blocks(sbi, ni.blk_addr); 1987 dec_valid_node_count(sbi, inode); 1988 set_node_addr(sbi, &ni, NULL_ADDR, false); 1989 1990 recover_xnid: 1991 /* 2: allocate new xattr nid */ 1992 if (unlikely(!inc_valid_node_count(sbi, inode))) 1993 f2fs_bug_on(sbi, 1); 1994 1995 remove_free_nid(NM_I(sbi), new_xnid); 1996 get_node_info(sbi, new_xnid, &ni); 1997 ni.ino = inode->i_ino; 1998 set_node_addr(sbi, &ni, NEW_ADDR, false); 1999 f2fs_i_xnid_write(inode, new_xnid); 2000 2001 /* 3: update xattr blkaddr */ 2002 refresh_sit_entry(sbi, NEW_ADDR, blkaddr); 2003 set_node_addr(sbi, &ni, blkaddr, false); 2004 } 2005 2006 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 2007 { 2008 struct f2fs_inode *src, *dst; 2009 nid_t ino = ino_of_node(page); 2010 struct node_info old_ni, new_ni; 2011 struct page *ipage; 2012 2013 get_node_info(sbi, ino, &old_ni); 2014 2015 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 2016 return -EINVAL; 2017 2018 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); 2019 if (!ipage) 2020 return -ENOMEM; 2021 2022 /* Should not use this inode from free nid list */ 2023 remove_free_nid(NM_I(sbi), ino); 2024 2025 if (!PageUptodate(ipage)) 2026 SetPageUptodate(ipage); 2027 fill_node_footer(ipage, ino, ino, 0, true); 2028 2029 src = F2FS_INODE(page); 2030 dst = F2FS_INODE(ipage); 2031 2032 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 2033 dst->i_size = 0; 2034 dst->i_blocks = cpu_to_le64(1); 2035 dst->i_links = cpu_to_le32(1); 2036 dst->i_xattr_nid = 0; 2037 dst->i_inline = src->i_inline & F2FS_INLINE_XATTR; 2038 2039 new_ni = old_ni; 2040 new_ni.ino = ino; 2041 2042 if (unlikely(!inc_valid_node_count(sbi, NULL))) 2043 WARN_ON(1); 2044 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 2045 inc_valid_inode_count(sbi); 2046 set_page_dirty(ipage); 2047 f2fs_put_page(ipage, 1); 2048 return 0; 2049 } 2050 2051 int restore_node_summary(struct f2fs_sb_info *sbi, 2052 unsigned int segno, struct f2fs_summary_block *sum) 2053 { 2054 struct f2fs_node *rn; 2055 struct f2fs_summary *sum_entry; 2056 block_t addr; 2057 int bio_blocks = MAX_BIO_BLOCKS(sbi); 2058 int i, idx, last_offset, nrpages; 2059 2060 /* scan the node segment */ 2061 last_offset = sbi->blocks_per_seg; 2062 addr = START_BLOCK(sbi, segno); 2063 sum_entry = &sum->entries[0]; 2064 2065 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2066 nrpages = min(last_offset - i, bio_blocks); 2067 2068 /* readahead node pages */ 2069 ra_meta_pages(sbi, addr, nrpages, META_POR, true); 2070 2071 for (idx = addr; idx < addr + nrpages; idx++) { 2072 struct page *page = get_tmp_page(sbi, idx); 2073 2074 rn = F2FS_NODE(page); 2075 sum_entry->nid = rn->footer.nid; 2076 sum_entry->version = 0; 2077 sum_entry->ofs_in_node = 0; 2078 sum_entry++; 2079 f2fs_put_page(page, 1); 2080 } 2081 2082 invalidate_mapping_pages(META_MAPPING(sbi), addr, 2083 addr + nrpages); 2084 } 2085 return 0; 2086 } 2087 2088 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 2089 { 2090 struct f2fs_nm_info *nm_i = NM_I(sbi); 2091 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2092 struct f2fs_journal *journal = curseg->journal; 2093 int i; 2094 2095 down_write(&curseg->journal_rwsem); 2096 for (i = 0; i < nats_in_cursum(journal); i++) { 2097 struct nat_entry *ne; 2098 struct f2fs_nat_entry raw_ne; 2099 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); 2100 2101 raw_ne = nat_in_journal(journal, i); 2102 2103 ne = __lookup_nat_cache(nm_i, nid); 2104 if (!ne) { 2105 ne = grab_nat_entry(nm_i, nid); 2106 node_info_from_raw_nat(&ne->ni, &raw_ne); 2107 } 2108 __set_nat_cache_dirty(nm_i, ne); 2109 } 2110 update_nats_in_cursum(journal, -i); 2111 up_write(&curseg->journal_rwsem); 2112 } 2113 2114 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 2115 struct list_head *head, int max) 2116 { 2117 struct nat_entry_set *cur; 2118 2119 if (nes->entry_cnt >= max) 2120 goto add_out; 2121 2122 list_for_each_entry(cur, head, set_list) { 2123 if (cur->entry_cnt >= nes->entry_cnt) { 2124 list_add(&nes->set_list, cur->set_list.prev); 2125 return; 2126 } 2127 } 2128 add_out: 2129 list_add_tail(&nes->set_list, head); 2130 } 2131 2132 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, 2133 struct nat_entry_set *set) 2134 { 2135 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2136 struct f2fs_journal *journal = curseg->journal; 2137 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 2138 bool to_journal = true; 2139 struct f2fs_nat_block *nat_blk; 2140 struct nat_entry *ne, *cur; 2141 struct page *page = NULL; 2142 2143 /* 2144 * there are two steps to flush nat entries: 2145 * #1, flush nat entries to journal in current hot data summary block. 2146 * #2, flush nat entries to nat page. 2147 */ 2148 if (!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) 2149 to_journal = false; 2150 2151 if (to_journal) { 2152 down_write(&curseg->journal_rwsem); 2153 } else { 2154 page = get_next_nat_page(sbi, start_nid); 2155 nat_blk = page_address(page); 2156 f2fs_bug_on(sbi, !nat_blk); 2157 } 2158 2159 /* flush dirty nats in nat entry set */ 2160 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 2161 struct f2fs_nat_entry *raw_ne; 2162 nid_t nid = nat_get_nid(ne); 2163 int offset; 2164 2165 if (nat_get_blkaddr(ne) == NEW_ADDR) 2166 continue; 2167 2168 if (to_journal) { 2169 offset = lookup_journal_in_cursum(journal, 2170 NAT_JOURNAL, nid, 1); 2171 f2fs_bug_on(sbi, offset < 0); 2172 raw_ne = &nat_in_journal(journal, offset); 2173 nid_in_journal(journal, offset) = cpu_to_le32(nid); 2174 } else { 2175 raw_ne = &nat_blk->entries[nid - start_nid]; 2176 } 2177 raw_nat_from_node_info(raw_ne, &ne->ni); 2178 nat_reset_flag(ne); 2179 __clear_nat_cache_dirty(NM_I(sbi), ne); 2180 if (nat_get_blkaddr(ne) == NULL_ADDR) 2181 add_free_nid(sbi, nid, false); 2182 } 2183 2184 if (to_journal) 2185 up_write(&curseg->journal_rwsem); 2186 else 2187 f2fs_put_page(page, 1); 2188 2189 f2fs_bug_on(sbi, set->entry_cnt); 2190 2191 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 2192 kmem_cache_free(nat_entry_set_slab, set); 2193 } 2194 2195 /* 2196 * This function is called during the checkpointing process. 2197 */ 2198 void flush_nat_entries(struct f2fs_sb_info *sbi) 2199 { 2200 struct f2fs_nm_info *nm_i = NM_I(sbi); 2201 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2202 struct f2fs_journal *journal = curseg->journal; 2203 struct nat_entry_set *setvec[SETVEC_SIZE]; 2204 struct nat_entry_set *set, *tmp; 2205 unsigned int found; 2206 nid_t set_idx = 0; 2207 LIST_HEAD(sets); 2208 2209 if (!nm_i->dirty_nat_cnt) 2210 return; 2211 2212 percpu_down_write(&nm_i->nat_tree_lock); 2213 2214 /* 2215 * if there are no enough space in journal to store dirty nat 2216 * entries, remove all entries from journal and merge them 2217 * into nat entry set. 2218 */ 2219 if (!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 2220 remove_nats_in_journal(sbi); 2221 2222 while ((found = __gang_lookup_nat_set(nm_i, 2223 set_idx, SETVEC_SIZE, setvec))) { 2224 unsigned idx; 2225 set_idx = setvec[found - 1]->set + 1; 2226 for (idx = 0; idx < found; idx++) 2227 __adjust_nat_entry_set(setvec[idx], &sets, 2228 MAX_NAT_JENTRIES(journal)); 2229 } 2230 2231 /* flush dirty nats in nat entry set */ 2232 list_for_each_entry_safe(set, tmp, &sets, set_list) 2233 __flush_nat_entry_set(sbi, set); 2234 2235 percpu_up_write(&nm_i->nat_tree_lock); 2236 2237 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); 2238 } 2239 2240 static int init_node_manager(struct f2fs_sb_info *sbi) 2241 { 2242 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 2243 struct f2fs_nm_info *nm_i = NM_I(sbi); 2244 unsigned char *version_bitmap; 2245 unsigned int nat_segs, nat_blocks; 2246 2247 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 2248 2249 /* segment_count_nat includes pair segment so divide to 2. */ 2250 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 2251 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 2252 2253 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 2254 2255 /* not used nids: 0, node, meta, (and root counted as valid node) */ 2256 nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM; 2257 nm_i->fcnt = 0; 2258 nm_i->nat_cnt = 0; 2259 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 2260 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 2261 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; 2262 2263 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 2264 INIT_LIST_HEAD(&nm_i->free_nid_list); 2265 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 2266 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 2267 INIT_LIST_HEAD(&nm_i->nat_entries); 2268 2269 mutex_init(&nm_i->build_lock); 2270 spin_lock_init(&nm_i->free_nid_list_lock); 2271 if (percpu_init_rwsem(&nm_i->nat_tree_lock)) 2272 return -ENOMEM; 2273 2274 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 2275 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 2276 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 2277 if (!version_bitmap) 2278 return -EFAULT; 2279 2280 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 2281 GFP_KERNEL); 2282 if (!nm_i->nat_bitmap) 2283 return -ENOMEM; 2284 return 0; 2285 } 2286 2287 int build_node_manager(struct f2fs_sb_info *sbi) 2288 { 2289 int err; 2290 2291 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 2292 if (!sbi->nm_info) 2293 return -ENOMEM; 2294 2295 err = init_node_manager(sbi); 2296 if (err) 2297 return err; 2298 2299 build_free_nids(sbi); 2300 return 0; 2301 } 2302 2303 void destroy_node_manager(struct f2fs_sb_info *sbi) 2304 { 2305 struct f2fs_nm_info *nm_i = NM_I(sbi); 2306 struct free_nid *i, *next_i; 2307 struct nat_entry *natvec[NATVEC_SIZE]; 2308 struct nat_entry_set *setvec[SETVEC_SIZE]; 2309 nid_t nid = 0; 2310 unsigned int found; 2311 2312 if (!nm_i) 2313 return; 2314 2315 /* destroy free nid list */ 2316 spin_lock(&nm_i->free_nid_list_lock); 2317 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 2318 f2fs_bug_on(sbi, i->state == NID_ALLOC); 2319 __del_from_free_nid_list(nm_i, i); 2320 nm_i->fcnt--; 2321 spin_unlock(&nm_i->free_nid_list_lock); 2322 kmem_cache_free(free_nid_slab, i); 2323 spin_lock(&nm_i->free_nid_list_lock); 2324 } 2325 f2fs_bug_on(sbi, nm_i->fcnt); 2326 spin_unlock(&nm_i->free_nid_list_lock); 2327 2328 /* destroy nat cache */ 2329 percpu_down_write(&nm_i->nat_tree_lock); 2330 while ((found = __gang_lookup_nat_cache(nm_i, 2331 nid, NATVEC_SIZE, natvec))) { 2332 unsigned idx; 2333 2334 nid = nat_get_nid(natvec[found - 1]) + 1; 2335 for (idx = 0; idx < found; idx++) 2336 __del_from_nat_cache(nm_i, natvec[idx]); 2337 } 2338 f2fs_bug_on(sbi, nm_i->nat_cnt); 2339 2340 /* destroy nat set cache */ 2341 nid = 0; 2342 while ((found = __gang_lookup_nat_set(nm_i, 2343 nid, SETVEC_SIZE, setvec))) { 2344 unsigned idx; 2345 2346 nid = setvec[found - 1]->set + 1; 2347 for (idx = 0; idx < found; idx++) { 2348 /* entry_cnt is not zero, when cp_error was occurred */ 2349 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 2350 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 2351 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 2352 } 2353 } 2354 percpu_up_write(&nm_i->nat_tree_lock); 2355 2356 percpu_free_rwsem(&nm_i->nat_tree_lock); 2357 kfree(nm_i->nat_bitmap); 2358 sbi->nm_info = NULL; 2359 kfree(nm_i); 2360 } 2361 2362 int __init create_node_manager_caches(void) 2363 { 2364 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 2365 sizeof(struct nat_entry)); 2366 if (!nat_entry_slab) 2367 goto fail; 2368 2369 free_nid_slab = f2fs_kmem_cache_create("free_nid", 2370 sizeof(struct free_nid)); 2371 if (!free_nid_slab) 2372 goto destroy_nat_entry; 2373 2374 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", 2375 sizeof(struct nat_entry_set)); 2376 if (!nat_entry_set_slab) 2377 goto destroy_free_nid; 2378 return 0; 2379 2380 destroy_free_nid: 2381 kmem_cache_destroy(free_nid_slab); 2382 destroy_nat_entry: 2383 kmem_cache_destroy(nat_entry_slab); 2384 fail: 2385 return -ENOMEM; 2386 } 2387 2388 void destroy_node_manager_caches(void) 2389 { 2390 kmem_cache_destroy(nat_entry_set_slab); 2391 kmem_cache_destroy(free_nid_slab); 2392 kmem_cache_destroy(nat_entry_slab); 2393 } 2394