1 /* 2 * fs/f2fs/data.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/buffer_head.h> 14 #include <linux/mpage.h> 15 #include <linux/writeback.h> 16 #include <linux/backing-dev.h> 17 #include <linux/blkdev.h> 18 #include <linux/bio.h> 19 #include <linux/prefetch.h> 20 #include <linux/uio.h> 21 #include <linux/cleancache.h> 22 23 #include "f2fs.h" 24 #include "node.h" 25 #include "segment.h" 26 #include "trace.h" 27 #include <trace/events/f2fs.h> 28 29 static struct kmem_cache *extent_tree_slab; 30 static struct kmem_cache *extent_node_slab; 31 32 static void f2fs_read_end_io(struct bio *bio, int err) 33 { 34 struct bio_vec *bvec; 35 int i; 36 37 if (f2fs_bio_encrypted(bio)) { 38 if (err) { 39 f2fs_release_crypto_ctx(bio->bi_private); 40 } else { 41 f2fs_end_io_crypto_work(bio->bi_private, bio); 42 return; 43 } 44 } 45 46 bio_for_each_segment_all(bvec, bio, i) { 47 struct page *page = bvec->bv_page; 48 49 if (!err) { 50 SetPageUptodate(page); 51 } else { 52 ClearPageUptodate(page); 53 SetPageError(page); 54 } 55 unlock_page(page); 56 } 57 bio_put(bio); 58 } 59 60 static void f2fs_write_end_io(struct bio *bio, int err) 61 { 62 struct f2fs_sb_info *sbi = bio->bi_private; 63 struct bio_vec *bvec; 64 int i; 65 66 bio_for_each_segment_all(bvec, bio, i) { 67 struct page *page = bvec->bv_page; 68 69 f2fs_restore_and_release_control_page(&page); 70 71 if (unlikely(err)) { 72 set_page_dirty(page); 73 set_bit(AS_EIO, &page->mapping->flags); 74 f2fs_stop_checkpoint(sbi); 75 } 76 end_page_writeback(page); 77 dec_page_count(sbi, F2FS_WRITEBACK); 78 } 79 80 if (!get_pages(sbi, F2FS_WRITEBACK) && 81 !list_empty(&sbi->cp_wait.task_list)) 82 wake_up(&sbi->cp_wait); 83 84 bio_put(bio); 85 } 86 87 /* 88 * Low-level block read/write IO operations. 89 */ 90 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, 91 int npages, bool is_read) 92 { 93 struct bio *bio; 94 95 /* No failure on bio allocation */ 96 bio = bio_alloc(GFP_NOIO, npages); 97 98 bio->bi_bdev = sbi->sb->s_bdev; 99 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); 100 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; 101 bio->bi_private = is_read ? NULL : sbi; 102 103 return bio; 104 } 105 106 static void __submit_merged_bio(struct f2fs_bio_info *io) 107 { 108 struct f2fs_io_info *fio = &io->fio; 109 110 if (!io->bio) 111 return; 112 113 if (is_read_io(fio->rw)) 114 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); 115 else 116 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); 117 118 submit_bio(fio->rw, io->bio); 119 io->bio = NULL; 120 } 121 122 void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, 123 enum page_type type, int rw) 124 { 125 enum page_type btype = PAGE_TYPE_OF_BIO(type); 126 struct f2fs_bio_info *io; 127 128 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; 129 130 down_write(&io->io_rwsem); 131 132 /* change META to META_FLUSH in the checkpoint procedure */ 133 if (type >= META_FLUSH) { 134 io->fio.type = META_FLUSH; 135 if (test_opt(sbi, NOBARRIER)) 136 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; 137 else 138 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; 139 } 140 __submit_merged_bio(io); 141 up_write(&io->io_rwsem); 142 } 143 144 /* 145 * Fill the locked page with data located in the block address. 146 * Return unlocked page. 147 */ 148 int f2fs_submit_page_bio(struct f2fs_io_info *fio) 149 { 150 struct bio *bio; 151 struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; 152 153 trace_f2fs_submit_page_bio(page, fio); 154 f2fs_trace_ios(fio, 0); 155 156 /* Allocate a new bio */ 157 bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw)); 158 159 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 160 bio_put(bio); 161 f2fs_put_page(page, 1); 162 return -EFAULT; 163 } 164 165 submit_bio(fio->rw, bio); 166 return 0; 167 } 168 169 void f2fs_submit_page_mbio(struct f2fs_io_info *fio) 170 { 171 struct f2fs_sb_info *sbi = fio->sbi; 172 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 173 struct f2fs_bio_info *io; 174 bool is_read = is_read_io(fio->rw); 175 struct page *bio_page; 176 177 io = is_read ? &sbi->read_io : &sbi->write_io[btype]; 178 179 verify_block_addr(sbi, fio->blk_addr); 180 181 down_write(&io->io_rwsem); 182 183 if (!is_read) 184 inc_page_count(sbi, F2FS_WRITEBACK); 185 186 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || 187 io->fio.rw != fio->rw)) 188 __submit_merged_bio(io); 189 alloc_new: 190 if (io->bio == NULL) { 191 int bio_blocks = MAX_BIO_BLOCKS(sbi); 192 193 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); 194 io->fio = *fio; 195 } 196 197 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; 198 199 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < 200 PAGE_CACHE_SIZE) { 201 __submit_merged_bio(io); 202 goto alloc_new; 203 } 204 205 io->last_block_in_bio = fio->blk_addr; 206 f2fs_trace_ios(fio, 0); 207 208 up_write(&io->io_rwsem); 209 trace_f2fs_submit_page_mbio(fio->page, fio); 210 } 211 212 /* 213 * Lock ordering for the change of data block address: 214 * ->data_page 215 * ->node_page 216 * update block addresses in the node page 217 */ 218 void set_data_blkaddr(struct dnode_of_data *dn) 219 { 220 struct f2fs_node *rn; 221 __le32 *addr_array; 222 struct page *node_page = dn->node_page; 223 unsigned int ofs_in_node = dn->ofs_in_node; 224 225 f2fs_wait_on_page_writeback(node_page, NODE); 226 227 rn = F2FS_NODE(node_page); 228 229 /* Get physical address of data block */ 230 addr_array = blkaddr_in_node(rn); 231 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); 232 set_page_dirty(node_page); 233 } 234 235 int reserve_new_block(struct dnode_of_data *dn) 236 { 237 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 238 239 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 240 return -EPERM; 241 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 242 return -ENOSPC; 243 244 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); 245 246 dn->data_blkaddr = NEW_ADDR; 247 set_data_blkaddr(dn); 248 mark_inode_dirty(dn->inode); 249 sync_inode_page(dn); 250 return 0; 251 } 252 253 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) 254 { 255 bool need_put = dn->inode_page ? false : true; 256 int err; 257 258 err = get_dnode_of_data(dn, index, ALLOC_NODE); 259 if (err) 260 return err; 261 262 if (dn->data_blkaddr == NULL_ADDR) 263 err = reserve_new_block(dn); 264 if (err || need_put) 265 f2fs_put_dnode(dn); 266 return err; 267 } 268 269 static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs, 270 struct extent_info *ei) 271 { 272 struct f2fs_inode_info *fi = F2FS_I(inode); 273 pgoff_t start_fofs, end_fofs; 274 block_t start_blkaddr; 275 276 read_lock(&fi->ext_lock); 277 if (fi->ext.len == 0) { 278 read_unlock(&fi->ext_lock); 279 return false; 280 } 281 282 stat_inc_total_hit(inode->i_sb); 283 284 start_fofs = fi->ext.fofs; 285 end_fofs = fi->ext.fofs + fi->ext.len - 1; 286 start_blkaddr = fi->ext.blk; 287 288 if (pgofs >= start_fofs && pgofs <= end_fofs) { 289 *ei = fi->ext; 290 stat_inc_read_hit(inode->i_sb); 291 read_unlock(&fi->ext_lock); 292 return true; 293 } 294 read_unlock(&fi->ext_lock); 295 return false; 296 } 297 298 static bool update_extent_info(struct inode *inode, pgoff_t fofs, 299 block_t blkaddr) 300 { 301 struct f2fs_inode_info *fi = F2FS_I(inode); 302 pgoff_t start_fofs, end_fofs; 303 block_t start_blkaddr, end_blkaddr; 304 int need_update = true; 305 306 write_lock(&fi->ext_lock); 307 308 start_fofs = fi->ext.fofs; 309 end_fofs = fi->ext.fofs + fi->ext.len - 1; 310 start_blkaddr = fi->ext.blk; 311 end_blkaddr = fi->ext.blk + fi->ext.len - 1; 312 313 /* Drop and initialize the matched extent */ 314 if (fi->ext.len == 1 && fofs == start_fofs) 315 fi->ext.len = 0; 316 317 /* Initial extent */ 318 if (fi->ext.len == 0) { 319 if (blkaddr != NULL_ADDR) { 320 fi->ext.fofs = fofs; 321 fi->ext.blk = blkaddr; 322 fi->ext.len = 1; 323 } 324 goto end_update; 325 } 326 327 /* Front merge */ 328 if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) { 329 fi->ext.fofs--; 330 fi->ext.blk--; 331 fi->ext.len++; 332 goto end_update; 333 } 334 335 /* Back merge */ 336 if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) { 337 fi->ext.len++; 338 goto end_update; 339 } 340 341 /* Split the existing extent */ 342 if (fi->ext.len > 1 && 343 fofs >= start_fofs && fofs <= end_fofs) { 344 if ((end_fofs - fofs) < (fi->ext.len >> 1)) { 345 fi->ext.len = fofs - start_fofs; 346 } else { 347 fi->ext.fofs = fofs + 1; 348 fi->ext.blk = start_blkaddr + fofs - start_fofs + 1; 349 fi->ext.len -= fofs - start_fofs + 1; 350 } 351 } else { 352 need_update = false; 353 } 354 355 /* Finally, if the extent is very fragmented, let's drop the cache. */ 356 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) { 357 fi->ext.len = 0; 358 set_inode_flag(fi, FI_NO_EXTENT); 359 need_update = true; 360 } 361 end_update: 362 write_unlock(&fi->ext_lock); 363 return need_update; 364 } 365 366 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi, 367 struct extent_tree *et, struct extent_info *ei, 368 struct rb_node *parent, struct rb_node **p) 369 { 370 struct extent_node *en; 371 372 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC); 373 if (!en) 374 return NULL; 375 376 en->ei = *ei; 377 INIT_LIST_HEAD(&en->list); 378 379 rb_link_node(&en->rb_node, parent, p); 380 rb_insert_color(&en->rb_node, &et->root); 381 et->count++; 382 atomic_inc(&sbi->total_ext_node); 383 return en; 384 } 385 386 static void __detach_extent_node(struct f2fs_sb_info *sbi, 387 struct extent_tree *et, struct extent_node *en) 388 { 389 rb_erase(&en->rb_node, &et->root); 390 et->count--; 391 atomic_dec(&sbi->total_ext_node); 392 393 if (et->cached_en == en) 394 et->cached_en = NULL; 395 } 396 397 static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi, 398 nid_t ino) 399 { 400 struct extent_tree *et; 401 402 down_read(&sbi->extent_tree_lock); 403 et = radix_tree_lookup(&sbi->extent_tree_root, ino); 404 if (!et) { 405 up_read(&sbi->extent_tree_lock); 406 return NULL; 407 } 408 atomic_inc(&et->refcount); 409 up_read(&sbi->extent_tree_lock); 410 411 return et; 412 } 413 414 static struct extent_tree *__grab_extent_tree(struct inode *inode) 415 { 416 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 417 struct extent_tree *et; 418 nid_t ino = inode->i_ino; 419 420 down_write(&sbi->extent_tree_lock); 421 et = radix_tree_lookup(&sbi->extent_tree_root, ino); 422 if (!et) { 423 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS); 424 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et); 425 memset(et, 0, sizeof(struct extent_tree)); 426 et->ino = ino; 427 et->root = RB_ROOT; 428 et->cached_en = NULL; 429 rwlock_init(&et->lock); 430 atomic_set(&et->refcount, 0); 431 et->count = 0; 432 sbi->total_ext_tree++; 433 } 434 atomic_inc(&et->refcount); 435 up_write(&sbi->extent_tree_lock); 436 437 return et; 438 } 439 440 static struct extent_node *__lookup_extent_tree(struct extent_tree *et, 441 unsigned int fofs) 442 { 443 struct rb_node *node = et->root.rb_node; 444 struct extent_node *en; 445 446 if (et->cached_en) { 447 struct extent_info *cei = &et->cached_en->ei; 448 449 if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) 450 return et->cached_en; 451 } 452 453 while (node) { 454 en = rb_entry(node, struct extent_node, rb_node); 455 456 if (fofs < en->ei.fofs) { 457 node = node->rb_left; 458 } else if (fofs >= en->ei.fofs + en->ei.len) { 459 node = node->rb_right; 460 } else { 461 et->cached_en = en; 462 return en; 463 } 464 } 465 return NULL; 466 } 467 468 static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi, 469 struct extent_tree *et, struct extent_node *en) 470 { 471 struct extent_node *prev; 472 struct rb_node *node; 473 474 node = rb_prev(&en->rb_node); 475 if (!node) 476 return NULL; 477 478 prev = rb_entry(node, struct extent_node, rb_node); 479 if (__is_back_mergeable(&en->ei, &prev->ei)) { 480 en->ei.fofs = prev->ei.fofs; 481 en->ei.blk = prev->ei.blk; 482 en->ei.len += prev->ei.len; 483 __detach_extent_node(sbi, et, prev); 484 return prev; 485 } 486 return NULL; 487 } 488 489 static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi, 490 struct extent_tree *et, struct extent_node *en) 491 { 492 struct extent_node *next; 493 struct rb_node *node; 494 495 node = rb_next(&en->rb_node); 496 if (!node) 497 return NULL; 498 499 next = rb_entry(node, struct extent_node, rb_node); 500 if (__is_front_mergeable(&en->ei, &next->ei)) { 501 en->ei.len += next->ei.len; 502 __detach_extent_node(sbi, et, next); 503 return next; 504 } 505 return NULL; 506 } 507 508 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, 509 struct extent_tree *et, struct extent_info *ei, 510 struct extent_node **den) 511 { 512 struct rb_node **p = &et->root.rb_node; 513 struct rb_node *parent = NULL; 514 struct extent_node *en; 515 516 while (*p) { 517 parent = *p; 518 en = rb_entry(parent, struct extent_node, rb_node); 519 520 if (ei->fofs < en->ei.fofs) { 521 if (__is_front_mergeable(ei, &en->ei)) { 522 f2fs_bug_on(sbi, !den); 523 en->ei.fofs = ei->fofs; 524 en->ei.blk = ei->blk; 525 en->ei.len += ei->len; 526 *den = __try_back_merge(sbi, et, en); 527 return en; 528 } 529 p = &(*p)->rb_left; 530 } else if (ei->fofs >= en->ei.fofs + en->ei.len) { 531 if (__is_back_mergeable(ei, &en->ei)) { 532 f2fs_bug_on(sbi, !den); 533 en->ei.len += ei->len; 534 *den = __try_front_merge(sbi, et, en); 535 return en; 536 } 537 p = &(*p)->rb_right; 538 } else { 539 f2fs_bug_on(sbi, 1); 540 } 541 } 542 543 return __attach_extent_node(sbi, et, ei, parent, p); 544 } 545 546 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, 547 struct extent_tree *et, bool free_all) 548 { 549 struct rb_node *node, *next; 550 struct extent_node *en; 551 unsigned int count = et->count; 552 553 node = rb_first(&et->root); 554 while (node) { 555 next = rb_next(node); 556 en = rb_entry(node, struct extent_node, rb_node); 557 558 if (free_all) { 559 spin_lock(&sbi->extent_lock); 560 if (!list_empty(&en->list)) 561 list_del_init(&en->list); 562 spin_unlock(&sbi->extent_lock); 563 } 564 565 if (free_all || list_empty(&en->list)) { 566 __detach_extent_node(sbi, et, en); 567 kmem_cache_free(extent_node_slab, en); 568 } 569 node = next; 570 } 571 572 return count - et->count; 573 } 574 575 static void f2fs_init_extent_tree(struct inode *inode, 576 struct f2fs_extent *i_ext) 577 { 578 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 579 struct extent_tree *et; 580 struct extent_node *en; 581 struct extent_info ei; 582 583 if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN) 584 return; 585 586 et = __grab_extent_tree(inode); 587 588 write_lock(&et->lock); 589 if (et->count) 590 goto out; 591 592 set_extent_info(&ei, le32_to_cpu(i_ext->fofs), 593 le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len)); 594 595 en = __insert_extent_tree(sbi, et, &ei, NULL); 596 if (en) { 597 et->cached_en = en; 598 599 spin_lock(&sbi->extent_lock); 600 list_add_tail(&en->list, &sbi->extent_list); 601 spin_unlock(&sbi->extent_lock); 602 } 603 out: 604 write_unlock(&et->lock); 605 atomic_dec(&et->refcount); 606 } 607 608 static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, 609 struct extent_info *ei) 610 { 611 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 612 struct extent_tree *et; 613 struct extent_node *en; 614 615 trace_f2fs_lookup_extent_tree_start(inode, pgofs); 616 617 et = __find_extent_tree(sbi, inode->i_ino); 618 if (!et) 619 return false; 620 621 read_lock(&et->lock); 622 en = __lookup_extent_tree(et, pgofs); 623 if (en) { 624 *ei = en->ei; 625 spin_lock(&sbi->extent_lock); 626 if (!list_empty(&en->list)) 627 list_move_tail(&en->list, &sbi->extent_list); 628 spin_unlock(&sbi->extent_lock); 629 stat_inc_read_hit(sbi->sb); 630 } 631 stat_inc_total_hit(sbi->sb); 632 read_unlock(&et->lock); 633 634 trace_f2fs_lookup_extent_tree_end(inode, pgofs, en); 635 636 atomic_dec(&et->refcount); 637 return en ? true : false; 638 } 639 640 static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs, 641 block_t blkaddr) 642 { 643 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 644 struct extent_tree *et; 645 struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL; 646 struct extent_node *den = NULL; 647 struct extent_info ei, dei; 648 unsigned int endofs; 649 650 trace_f2fs_update_extent_tree(inode, fofs, blkaddr); 651 652 et = __grab_extent_tree(inode); 653 654 write_lock(&et->lock); 655 656 /* 1. lookup and remove existing extent info in cache */ 657 en = __lookup_extent_tree(et, fofs); 658 if (!en) 659 goto update_extent; 660 661 dei = en->ei; 662 __detach_extent_node(sbi, et, en); 663 664 /* 2. if extent can be split more, split and insert the left part */ 665 if (dei.len > 1) { 666 /* insert left part of split extent into cache */ 667 if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) { 668 set_extent_info(&ei, dei.fofs, dei.blk, 669 fofs - dei.fofs); 670 en1 = __insert_extent_tree(sbi, et, &ei, NULL); 671 } 672 673 /* insert right part of split extent into cache */ 674 endofs = dei.fofs + dei.len - 1; 675 if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) { 676 set_extent_info(&ei, fofs + 1, 677 fofs - dei.fofs + dei.blk, endofs - fofs); 678 en2 = __insert_extent_tree(sbi, et, &ei, NULL); 679 } 680 } 681 682 update_extent: 683 /* 3. update extent in extent cache */ 684 if (blkaddr) { 685 set_extent_info(&ei, fofs, blkaddr, 1); 686 en3 = __insert_extent_tree(sbi, et, &ei, &den); 687 } 688 689 /* 4. update in global extent list */ 690 spin_lock(&sbi->extent_lock); 691 if (en && !list_empty(&en->list)) 692 list_del(&en->list); 693 /* 694 * en1 and en2 split from en, they will become more and more smaller 695 * fragments after splitting several times. So if the length is smaller 696 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree. 697 */ 698 if (en1) 699 list_add_tail(&en1->list, &sbi->extent_list); 700 if (en2) 701 list_add_tail(&en2->list, &sbi->extent_list); 702 if (en3) { 703 if (list_empty(&en3->list)) 704 list_add_tail(&en3->list, &sbi->extent_list); 705 else 706 list_move_tail(&en3->list, &sbi->extent_list); 707 } 708 if (den && !list_empty(&den->list)) 709 list_del(&den->list); 710 spin_unlock(&sbi->extent_lock); 711 712 /* 5. release extent node */ 713 if (en) 714 kmem_cache_free(extent_node_slab, en); 715 if (den) 716 kmem_cache_free(extent_node_slab, den); 717 718 write_unlock(&et->lock); 719 atomic_dec(&et->refcount); 720 } 721 722 void f2fs_preserve_extent_tree(struct inode *inode) 723 { 724 struct extent_tree *et; 725 struct extent_info *ext = &F2FS_I(inode)->ext; 726 bool sync = false; 727 728 if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE)) 729 return; 730 731 et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino); 732 if (!et) { 733 if (ext->len) { 734 ext->len = 0; 735 update_inode_page(inode); 736 } 737 return; 738 } 739 740 read_lock(&et->lock); 741 if (et->count) { 742 struct extent_node *en; 743 744 if (et->cached_en) { 745 en = et->cached_en; 746 } else { 747 struct rb_node *node = rb_first(&et->root); 748 749 if (!node) 750 node = rb_last(&et->root); 751 en = rb_entry(node, struct extent_node, rb_node); 752 } 753 754 if (__is_extent_same(ext, &en->ei)) 755 goto out; 756 757 *ext = en->ei; 758 sync = true; 759 } else if (ext->len) { 760 ext->len = 0; 761 sync = true; 762 } 763 out: 764 read_unlock(&et->lock); 765 atomic_dec(&et->refcount); 766 767 if (sync) 768 update_inode_page(inode); 769 } 770 771 void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) 772 { 773 struct extent_tree *treevec[EXT_TREE_VEC_SIZE]; 774 struct extent_node *en, *tmp; 775 unsigned long ino = F2FS_ROOT_INO(sbi); 776 struct radix_tree_iter iter; 777 void **slot; 778 unsigned int found; 779 unsigned int node_cnt = 0, tree_cnt = 0; 780 781 if (!test_opt(sbi, EXTENT_CACHE)) 782 return; 783 784 if (available_free_memory(sbi, EXTENT_CACHE)) 785 return; 786 787 spin_lock(&sbi->extent_lock); 788 list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) { 789 if (!nr_shrink--) 790 break; 791 list_del_init(&en->list); 792 } 793 spin_unlock(&sbi->extent_lock); 794 795 down_read(&sbi->extent_tree_lock); 796 while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root, 797 (void **)treevec, ino, EXT_TREE_VEC_SIZE))) { 798 unsigned i; 799 800 ino = treevec[found - 1]->ino + 1; 801 for (i = 0; i < found; i++) { 802 struct extent_tree *et = treevec[i]; 803 804 atomic_inc(&et->refcount); 805 write_lock(&et->lock); 806 node_cnt += __free_extent_tree(sbi, et, false); 807 write_unlock(&et->lock); 808 atomic_dec(&et->refcount); 809 } 810 } 811 up_read(&sbi->extent_tree_lock); 812 813 down_write(&sbi->extent_tree_lock); 814 radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter, 815 F2FS_ROOT_INO(sbi)) { 816 struct extent_tree *et = (struct extent_tree *)*slot; 817 818 if (!atomic_read(&et->refcount) && !et->count) { 819 radix_tree_delete(&sbi->extent_tree_root, et->ino); 820 kmem_cache_free(extent_tree_slab, et); 821 sbi->total_ext_tree--; 822 tree_cnt++; 823 } 824 } 825 up_write(&sbi->extent_tree_lock); 826 827 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt); 828 } 829 830 void f2fs_destroy_extent_tree(struct inode *inode) 831 { 832 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 833 struct extent_tree *et; 834 unsigned int node_cnt = 0; 835 836 if (!test_opt(sbi, EXTENT_CACHE)) 837 return; 838 839 et = __find_extent_tree(sbi, inode->i_ino); 840 if (!et) 841 goto out; 842 843 /* free all extent info belong to this extent tree */ 844 write_lock(&et->lock); 845 node_cnt = __free_extent_tree(sbi, et, true); 846 write_unlock(&et->lock); 847 848 atomic_dec(&et->refcount); 849 850 /* try to find and delete extent tree entry in radix tree */ 851 down_write(&sbi->extent_tree_lock); 852 et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino); 853 if (!et) { 854 up_write(&sbi->extent_tree_lock); 855 goto out; 856 } 857 f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count); 858 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino); 859 kmem_cache_free(extent_tree_slab, et); 860 sbi->total_ext_tree--; 861 up_write(&sbi->extent_tree_lock); 862 out: 863 trace_f2fs_destroy_extent_tree(inode, node_cnt); 864 return; 865 } 866 867 void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext) 868 { 869 if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE)) 870 f2fs_init_extent_tree(inode, i_ext); 871 872 write_lock(&F2FS_I(inode)->ext_lock); 873 get_extent_info(&F2FS_I(inode)->ext, *i_ext); 874 write_unlock(&F2FS_I(inode)->ext_lock); 875 } 876 877 static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 878 struct extent_info *ei) 879 { 880 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) 881 return false; 882 883 if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE)) 884 return f2fs_lookup_extent_tree(inode, pgofs, ei); 885 886 return lookup_extent_info(inode, pgofs, ei); 887 } 888 889 void f2fs_update_extent_cache(struct dnode_of_data *dn) 890 { 891 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 892 pgoff_t fofs; 893 894 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); 895 896 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 897 return; 898 899 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 900 dn->ofs_in_node; 901 902 if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE)) 903 return f2fs_update_extent_tree(dn->inode, fofs, 904 dn->data_blkaddr); 905 906 if (update_extent_info(dn->inode, fofs, dn->data_blkaddr)) 907 sync_inode_page(dn); 908 } 909 910 struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw) 911 { 912 struct address_space *mapping = inode->i_mapping; 913 struct dnode_of_data dn; 914 struct page *page; 915 struct extent_info ei; 916 int err; 917 struct f2fs_io_info fio = { 918 .sbi = F2FS_I_SB(inode), 919 .type = DATA, 920 .rw = rw, 921 .encrypted_page = NULL, 922 }; 923 924 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 925 return read_mapping_page(mapping, index, NULL); 926 927 page = grab_cache_page(mapping, index); 928 if (!page) 929 return ERR_PTR(-ENOMEM); 930 931 if (f2fs_lookup_extent_cache(inode, index, &ei)) { 932 dn.data_blkaddr = ei.blk + index - ei.fofs; 933 goto got_it; 934 } 935 936 set_new_dnode(&dn, inode, NULL, NULL, 0); 937 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 938 if (err) { 939 f2fs_put_page(page, 1); 940 return ERR_PTR(err); 941 } 942 f2fs_put_dnode(&dn); 943 944 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 945 f2fs_put_page(page, 1); 946 return ERR_PTR(-ENOENT); 947 } 948 got_it: 949 if (PageUptodate(page)) { 950 unlock_page(page); 951 return page; 952 } 953 954 /* 955 * A new dentry page is allocated but not able to be written, since its 956 * new inode page couldn't be allocated due to -ENOSPC. 957 * In such the case, its blkaddr can be remained as NEW_ADDR. 958 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 959 */ 960 if (dn.data_blkaddr == NEW_ADDR) { 961 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 962 SetPageUptodate(page); 963 unlock_page(page); 964 return page; 965 } 966 967 fio.blk_addr = dn.data_blkaddr; 968 fio.page = page; 969 err = f2fs_submit_page_bio(&fio); 970 if (err) 971 return ERR_PTR(err); 972 return page; 973 } 974 975 struct page *find_data_page(struct inode *inode, pgoff_t index) 976 { 977 struct address_space *mapping = inode->i_mapping; 978 struct page *page; 979 980 page = find_get_page(mapping, index); 981 if (page && PageUptodate(page)) 982 return page; 983 f2fs_put_page(page, 0); 984 985 page = get_read_data_page(inode, index, READ_SYNC); 986 if (IS_ERR(page)) 987 return page; 988 989 if (PageUptodate(page)) 990 return page; 991 992 wait_on_page_locked(page); 993 if (unlikely(!PageUptodate(page))) { 994 f2fs_put_page(page, 0); 995 return ERR_PTR(-EIO); 996 } 997 return page; 998 } 999 1000 /* 1001 * If it tries to access a hole, return an error. 1002 * Because, the callers, functions in dir.c and GC, should be able to know 1003 * whether this page exists or not. 1004 */ 1005 struct page *get_lock_data_page(struct inode *inode, pgoff_t index) 1006 { 1007 struct address_space *mapping = inode->i_mapping; 1008 struct page *page; 1009 repeat: 1010 page = get_read_data_page(inode, index, READ_SYNC); 1011 if (IS_ERR(page)) 1012 return page; 1013 1014 /* wait for read completion */ 1015 lock_page(page); 1016 if (unlikely(!PageUptodate(page))) { 1017 f2fs_put_page(page, 1); 1018 return ERR_PTR(-EIO); 1019 } 1020 if (unlikely(page->mapping != mapping)) { 1021 f2fs_put_page(page, 1); 1022 goto repeat; 1023 } 1024 return page; 1025 } 1026 1027 /* 1028 * Caller ensures that this data page is never allocated. 1029 * A new zero-filled data page is allocated in the page cache. 1030 * 1031 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and 1032 * f2fs_unlock_op(). 1033 * Note that, ipage is set only by make_empty_dir. 1034 */ 1035 struct page *get_new_data_page(struct inode *inode, 1036 struct page *ipage, pgoff_t index, bool new_i_size) 1037 { 1038 struct address_space *mapping = inode->i_mapping; 1039 struct page *page; 1040 struct dnode_of_data dn; 1041 int err; 1042 repeat: 1043 page = grab_cache_page(mapping, index); 1044 if (!page) 1045 return ERR_PTR(-ENOMEM); 1046 1047 set_new_dnode(&dn, inode, ipage, NULL, 0); 1048 err = f2fs_reserve_block(&dn, index); 1049 if (err) { 1050 f2fs_put_page(page, 1); 1051 return ERR_PTR(err); 1052 } 1053 if (!ipage) 1054 f2fs_put_dnode(&dn); 1055 1056 if (PageUptodate(page)) 1057 goto got_it; 1058 1059 if (dn.data_blkaddr == NEW_ADDR) { 1060 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1061 SetPageUptodate(page); 1062 } else { 1063 f2fs_put_page(page, 1); 1064 1065 page = get_read_data_page(inode, index, READ_SYNC); 1066 if (IS_ERR(page)) 1067 goto repeat; 1068 1069 /* wait for read completion */ 1070 lock_page(page); 1071 } 1072 got_it: 1073 if (new_i_size && 1074 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { 1075 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); 1076 /* Only the directory inode sets new_i_size */ 1077 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 1078 } 1079 return page; 1080 } 1081 1082 static int __allocate_data_block(struct dnode_of_data *dn) 1083 { 1084 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1085 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 1086 struct f2fs_summary sum; 1087 struct node_info ni; 1088 int seg = CURSEG_WARM_DATA; 1089 pgoff_t fofs; 1090 1091 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 1092 return -EPERM; 1093 1094 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 1095 if (dn->data_blkaddr == NEW_ADDR) 1096 goto alloc; 1097 1098 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 1099 return -ENOSPC; 1100 1101 alloc: 1102 get_node_info(sbi, dn->nid, &ni); 1103 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 1104 1105 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) 1106 seg = CURSEG_DIRECT_IO; 1107 1108 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, 1109 &sum, seg); 1110 1111 /* direct IO doesn't use extent cache to maximize the performance */ 1112 set_data_blkaddr(dn); 1113 1114 /* update i_size */ 1115 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 1116 dn->ofs_in_node; 1117 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT)) 1118 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT)); 1119 1120 return 0; 1121 } 1122 1123 static void __allocate_data_blocks(struct inode *inode, loff_t offset, 1124 size_t count) 1125 { 1126 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1127 struct dnode_of_data dn; 1128 u64 start = F2FS_BYTES_TO_BLK(offset); 1129 u64 len = F2FS_BYTES_TO_BLK(count); 1130 bool allocated; 1131 u64 end_offset; 1132 1133 while (len) { 1134 f2fs_balance_fs(sbi); 1135 f2fs_lock_op(sbi); 1136 1137 /* When reading holes, we need its node page */ 1138 set_new_dnode(&dn, inode, NULL, NULL, 0); 1139 if (get_dnode_of_data(&dn, start, ALLOC_NODE)) 1140 goto out; 1141 1142 allocated = false; 1143 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 1144 1145 while (dn.ofs_in_node < end_offset && len) { 1146 block_t blkaddr; 1147 1148 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 1149 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) { 1150 if (__allocate_data_block(&dn)) 1151 goto sync_out; 1152 allocated = true; 1153 } 1154 len--; 1155 start++; 1156 dn.ofs_in_node++; 1157 } 1158 1159 if (allocated) 1160 sync_inode_page(&dn); 1161 1162 f2fs_put_dnode(&dn); 1163 f2fs_unlock_op(sbi); 1164 } 1165 return; 1166 1167 sync_out: 1168 if (allocated) 1169 sync_inode_page(&dn); 1170 f2fs_put_dnode(&dn); 1171 out: 1172 f2fs_unlock_op(sbi); 1173 return; 1174 } 1175 1176 /* 1177 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with 1178 * f2fs_map_blocks structure. 1179 * If original data blocks are allocated, then give them to blockdev. 1180 * Otherwise, 1181 * a. preallocate requested block addresses 1182 * b. do not use extent cache for better performance 1183 * c. give the block addresses to blockdev 1184 */ 1185 static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 1186 int create, bool fiemap) 1187 { 1188 unsigned int maxblocks = map->m_len; 1189 struct dnode_of_data dn; 1190 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; 1191 pgoff_t pgofs, end_offset; 1192 int err = 0, ofs = 1; 1193 struct extent_info ei; 1194 bool allocated = false; 1195 1196 map->m_len = 0; 1197 map->m_flags = 0; 1198 1199 /* it only supports block size == page size */ 1200 pgofs = (pgoff_t)map->m_lblk; 1201 1202 if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) { 1203 map->m_pblk = ei.blk + pgofs - ei.fofs; 1204 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); 1205 map->m_flags = F2FS_MAP_MAPPED; 1206 goto out; 1207 } 1208 1209 if (create) 1210 f2fs_lock_op(F2FS_I_SB(inode)); 1211 1212 /* When reading holes, we need its node page */ 1213 set_new_dnode(&dn, inode, NULL, NULL, 0); 1214 err = get_dnode_of_data(&dn, pgofs, mode); 1215 if (err) { 1216 if (err == -ENOENT) 1217 err = 0; 1218 goto unlock_out; 1219 } 1220 if (dn.data_blkaddr == NEW_ADDR && !fiemap) 1221 goto put_out; 1222 1223 if (dn.data_blkaddr != NULL_ADDR) { 1224 map->m_flags = F2FS_MAP_MAPPED; 1225 map->m_pblk = dn.data_blkaddr; 1226 if (dn.data_blkaddr == NEW_ADDR) 1227 map->m_flags |= F2FS_MAP_UNWRITTEN; 1228 } else if (create) { 1229 err = __allocate_data_block(&dn); 1230 if (err) 1231 goto put_out; 1232 allocated = true; 1233 map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED; 1234 map->m_pblk = dn.data_blkaddr; 1235 } else { 1236 goto put_out; 1237 } 1238 1239 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 1240 map->m_len = 1; 1241 dn.ofs_in_node++; 1242 pgofs++; 1243 1244 get_next: 1245 if (dn.ofs_in_node >= end_offset) { 1246 if (allocated) 1247 sync_inode_page(&dn); 1248 allocated = false; 1249 f2fs_put_dnode(&dn); 1250 1251 set_new_dnode(&dn, inode, NULL, NULL, 0); 1252 err = get_dnode_of_data(&dn, pgofs, mode); 1253 if (err) { 1254 if (err == -ENOENT) 1255 err = 0; 1256 goto unlock_out; 1257 } 1258 if (dn.data_blkaddr == NEW_ADDR && !fiemap) 1259 goto put_out; 1260 1261 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 1262 } 1263 1264 if (maxblocks > map->m_len) { 1265 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 1266 if (blkaddr == NULL_ADDR && create) { 1267 err = __allocate_data_block(&dn); 1268 if (err) 1269 goto sync_out; 1270 allocated = true; 1271 map->m_flags |= F2FS_MAP_NEW; 1272 blkaddr = dn.data_blkaddr; 1273 } 1274 /* Give more consecutive addresses for the readahead */ 1275 if ((map->m_pblk != NEW_ADDR && 1276 blkaddr == (map->m_pblk + ofs)) || 1277 (map->m_pblk == NEW_ADDR && 1278 blkaddr == NEW_ADDR)) { 1279 ofs++; 1280 dn.ofs_in_node++; 1281 pgofs++; 1282 map->m_len++; 1283 goto get_next; 1284 } 1285 } 1286 sync_out: 1287 if (allocated) 1288 sync_inode_page(&dn); 1289 put_out: 1290 f2fs_put_dnode(&dn); 1291 unlock_out: 1292 if (create) 1293 f2fs_unlock_op(F2FS_I_SB(inode)); 1294 out: 1295 trace_f2fs_map_blocks(inode, map, err); 1296 return err; 1297 } 1298 1299 static int __get_data_block(struct inode *inode, sector_t iblock, 1300 struct buffer_head *bh, int create, bool fiemap) 1301 { 1302 struct f2fs_map_blocks map; 1303 int ret; 1304 1305 map.m_lblk = iblock; 1306 map.m_len = bh->b_size >> inode->i_blkbits; 1307 1308 ret = f2fs_map_blocks(inode, &map, create, fiemap); 1309 if (!ret) { 1310 map_bh(bh, inode->i_sb, map.m_pblk); 1311 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; 1312 bh->b_size = map.m_len << inode->i_blkbits; 1313 } 1314 return ret; 1315 } 1316 1317 static int get_data_block(struct inode *inode, sector_t iblock, 1318 struct buffer_head *bh_result, int create) 1319 { 1320 return __get_data_block(inode, iblock, bh_result, create, false); 1321 } 1322 1323 static int get_data_block_fiemap(struct inode *inode, sector_t iblock, 1324 struct buffer_head *bh_result, int create) 1325 { 1326 return __get_data_block(inode, iblock, bh_result, create, true); 1327 } 1328 1329 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) 1330 { 1331 return (offset >> inode->i_blkbits); 1332 } 1333 1334 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) 1335 { 1336 return (blk << inode->i_blkbits); 1337 } 1338 1339 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1340 u64 start, u64 len) 1341 { 1342 struct buffer_head map_bh; 1343 sector_t start_blk, last_blk; 1344 loff_t isize = i_size_read(inode); 1345 u64 logical = 0, phys = 0, size = 0; 1346 u32 flags = 0; 1347 bool past_eof = false, whole_file = false; 1348 int ret = 0; 1349 1350 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); 1351 if (ret) 1352 return ret; 1353 1354 mutex_lock(&inode->i_mutex); 1355 1356 if (len >= isize) { 1357 whole_file = true; 1358 len = isize; 1359 } 1360 1361 if (logical_to_blk(inode, len) == 0) 1362 len = blk_to_logical(inode, 1); 1363 1364 start_blk = logical_to_blk(inode, start); 1365 last_blk = logical_to_blk(inode, start + len - 1); 1366 next: 1367 memset(&map_bh, 0, sizeof(struct buffer_head)); 1368 map_bh.b_size = len; 1369 1370 ret = get_data_block_fiemap(inode, start_blk, &map_bh, 0); 1371 if (ret) 1372 goto out; 1373 1374 /* HOLE */ 1375 if (!buffer_mapped(&map_bh)) { 1376 start_blk++; 1377 1378 if (!past_eof && blk_to_logical(inode, start_blk) >= isize) 1379 past_eof = 1; 1380 1381 if (past_eof && size) { 1382 flags |= FIEMAP_EXTENT_LAST; 1383 ret = fiemap_fill_next_extent(fieinfo, logical, 1384 phys, size, flags); 1385 } else if (size) { 1386 ret = fiemap_fill_next_extent(fieinfo, logical, 1387 phys, size, flags); 1388 size = 0; 1389 } 1390 1391 /* if we have holes up to/past EOF then we're done */ 1392 if (start_blk > last_blk || past_eof || ret) 1393 goto out; 1394 } else { 1395 if (start_blk > last_blk && !whole_file) { 1396 ret = fiemap_fill_next_extent(fieinfo, logical, 1397 phys, size, flags); 1398 goto out; 1399 } 1400 1401 /* 1402 * if size != 0 then we know we already have an extent 1403 * to add, so add it. 1404 */ 1405 if (size) { 1406 ret = fiemap_fill_next_extent(fieinfo, logical, 1407 phys, size, flags); 1408 if (ret) 1409 goto out; 1410 } 1411 1412 logical = blk_to_logical(inode, start_blk); 1413 phys = blk_to_logical(inode, map_bh.b_blocknr); 1414 size = map_bh.b_size; 1415 flags = 0; 1416 if (buffer_unwritten(&map_bh)) 1417 flags = FIEMAP_EXTENT_UNWRITTEN; 1418 1419 start_blk += logical_to_blk(inode, size); 1420 1421 /* 1422 * If we are past the EOF, then we need to make sure as 1423 * soon as we find a hole that the last extent we found 1424 * is marked with FIEMAP_EXTENT_LAST 1425 */ 1426 if (!past_eof && logical + size >= isize) 1427 past_eof = true; 1428 } 1429 cond_resched(); 1430 if (fatal_signal_pending(current)) 1431 ret = -EINTR; 1432 else 1433 goto next; 1434 out: 1435 if (ret == 1) 1436 ret = 0; 1437 1438 mutex_unlock(&inode->i_mutex); 1439 return ret; 1440 } 1441 1442 /* 1443 * This function was originally taken from fs/mpage.c, and customized for f2fs. 1444 * Major change was from block_size == page_size in f2fs by default. 1445 */ 1446 static int f2fs_mpage_readpages(struct address_space *mapping, 1447 struct list_head *pages, struct page *page, 1448 unsigned nr_pages) 1449 { 1450 struct bio *bio = NULL; 1451 unsigned page_idx; 1452 sector_t last_block_in_bio = 0; 1453 struct inode *inode = mapping->host; 1454 const unsigned blkbits = inode->i_blkbits; 1455 const unsigned blocksize = 1 << blkbits; 1456 sector_t block_in_file; 1457 sector_t last_block; 1458 sector_t last_block_in_file; 1459 sector_t block_nr; 1460 struct block_device *bdev = inode->i_sb->s_bdev; 1461 struct f2fs_map_blocks map; 1462 1463 map.m_pblk = 0; 1464 map.m_lblk = 0; 1465 map.m_len = 0; 1466 map.m_flags = 0; 1467 1468 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { 1469 1470 prefetchw(&page->flags); 1471 if (pages) { 1472 page = list_entry(pages->prev, struct page, lru); 1473 list_del(&page->lru); 1474 if (add_to_page_cache_lru(page, mapping, 1475 page->index, GFP_KERNEL)) 1476 goto next_page; 1477 } 1478 1479 block_in_file = (sector_t)page->index; 1480 last_block = block_in_file + nr_pages; 1481 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> 1482 blkbits; 1483 if (last_block > last_block_in_file) 1484 last_block = last_block_in_file; 1485 1486 /* 1487 * Map blocks using the previous result first. 1488 */ 1489 if ((map.m_flags & F2FS_MAP_MAPPED) && 1490 block_in_file > map.m_lblk && 1491 block_in_file < (map.m_lblk + map.m_len)) 1492 goto got_it; 1493 1494 /* 1495 * Then do more f2fs_map_blocks() calls until we are 1496 * done with this page. 1497 */ 1498 map.m_flags = 0; 1499 1500 if (block_in_file < last_block) { 1501 map.m_lblk = block_in_file; 1502 map.m_len = last_block - block_in_file; 1503 1504 if (f2fs_map_blocks(inode, &map, 0, false)) 1505 goto set_error_page; 1506 } 1507 got_it: 1508 if ((map.m_flags & F2FS_MAP_MAPPED)) { 1509 block_nr = map.m_pblk + block_in_file - map.m_lblk; 1510 SetPageMappedToDisk(page); 1511 1512 if (!PageUptodate(page) && !cleancache_get_page(page)) { 1513 SetPageUptodate(page); 1514 goto confused; 1515 } 1516 } else { 1517 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1518 SetPageUptodate(page); 1519 unlock_page(page); 1520 goto next_page; 1521 } 1522 1523 /* 1524 * This page will go to BIO. Do we need to send this 1525 * BIO off first? 1526 */ 1527 if (bio && (last_block_in_bio != block_nr - 1)) { 1528 submit_and_realloc: 1529 submit_bio(READ, bio); 1530 bio = NULL; 1531 } 1532 if (bio == NULL) { 1533 struct f2fs_crypto_ctx *ctx = NULL; 1534 1535 if (f2fs_encrypted_inode(inode) && 1536 S_ISREG(inode->i_mode)) { 1537 struct page *cpage; 1538 1539 ctx = f2fs_get_crypto_ctx(inode); 1540 if (IS_ERR(ctx)) 1541 goto set_error_page; 1542 1543 /* wait the page to be moved by cleaning */ 1544 cpage = find_lock_page( 1545 META_MAPPING(F2FS_I_SB(inode)), 1546 block_nr); 1547 if (cpage) { 1548 f2fs_wait_on_page_writeback(cpage, 1549 DATA); 1550 f2fs_put_page(cpage, 1); 1551 } 1552 } 1553 1554 bio = bio_alloc(GFP_KERNEL, 1555 min_t(int, nr_pages, bio_get_nr_vecs(bdev))); 1556 if (!bio) { 1557 if (ctx) 1558 f2fs_release_crypto_ctx(ctx); 1559 goto set_error_page; 1560 } 1561 bio->bi_bdev = bdev; 1562 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); 1563 bio->bi_end_io = f2fs_read_end_io; 1564 bio->bi_private = ctx; 1565 } 1566 1567 if (bio_add_page(bio, page, blocksize, 0) < blocksize) 1568 goto submit_and_realloc; 1569 1570 last_block_in_bio = block_nr; 1571 goto next_page; 1572 set_error_page: 1573 SetPageError(page); 1574 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1575 unlock_page(page); 1576 goto next_page; 1577 confused: 1578 if (bio) { 1579 submit_bio(READ, bio); 1580 bio = NULL; 1581 } 1582 unlock_page(page); 1583 next_page: 1584 if (pages) 1585 page_cache_release(page); 1586 } 1587 BUG_ON(pages && !list_empty(pages)); 1588 if (bio) 1589 submit_bio(READ, bio); 1590 return 0; 1591 } 1592 1593 static int f2fs_read_data_page(struct file *file, struct page *page) 1594 { 1595 struct inode *inode = page->mapping->host; 1596 int ret = -EAGAIN; 1597 1598 trace_f2fs_readpage(page, DATA); 1599 1600 /* If the file has inline data, try to read it directly */ 1601 if (f2fs_has_inline_data(inode)) 1602 ret = f2fs_read_inline_data(inode, page); 1603 if (ret == -EAGAIN) 1604 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1); 1605 return ret; 1606 } 1607 1608 static int f2fs_read_data_pages(struct file *file, 1609 struct address_space *mapping, 1610 struct list_head *pages, unsigned nr_pages) 1611 { 1612 struct inode *inode = file->f_mapping->host; 1613 1614 /* If the file has inline data, skip readpages */ 1615 if (f2fs_has_inline_data(inode)) 1616 return 0; 1617 1618 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages); 1619 } 1620 1621 int do_write_data_page(struct f2fs_io_info *fio) 1622 { 1623 struct page *page = fio->page; 1624 struct inode *inode = page->mapping->host; 1625 struct dnode_of_data dn; 1626 int err = 0; 1627 1628 set_new_dnode(&dn, inode, NULL, NULL, 0); 1629 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 1630 if (err) 1631 return err; 1632 1633 fio->blk_addr = dn.data_blkaddr; 1634 1635 /* This page is already truncated */ 1636 if (fio->blk_addr == NULL_ADDR) { 1637 ClearPageUptodate(page); 1638 goto out_writepage; 1639 } 1640 1641 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { 1642 fio->encrypted_page = f2fs_encrypt(inode, fio->page); 1643 if (IS_ERR(fio->encrypted_page)) { 1644 err = PTR_ERR(fio->encrypted_page); 1645 goto out_writepage; 1646 } 1647 } 1648 1649 set_page_writeback(page); 1650 1651 /* 1652 * If current allocation needs SSR, 1653 * it had better in-place writes for updated data. 1654 */ 1655 if (unlikely(fio->blk_addr != NEW_ADDR && 1656 !is_cold_data(page) && 1657 need_inplace_update(inode))) { 1658 rewrite_data_page(fio); 1659 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); 1660 trace_f2fs_do_write_data_page(page, IPU); 1661 } else { 1662 write_data_page(&dn, fio); 1663 set_data_blkaddr(&dn); 1664 f2fs_update_extent_cache(&dn); 1665 trace_f2fs_do_write_data_page(page, OPU); 1666 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 1667 if (page->index == 0) 1668 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); 1669 } 1670 out_writepage: 1671 f2fs_put_dnode(&dn); 1672 return err; 1673 } 1674 1675 static int f2fs_write_data_page(struct page *page, 1676 struct writeback_control *wbc) 1677 { 1678 struct inode *inode = page->mapping->host; 1679 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1680 loff_t i_size = i_size_read(inode); 1681 const pgoff_t end_index = ((unsigned long long) i_size) 1682 >> PAGE_CACHE_SHIFT; 1683 unsigned offset = 0; 1684 bool need_balance_fs = false; 1685 int err = 0; 1686 struct f2fs_io_info fio = { 1687 .sbi = sbi, 1688 .type = DATA, 1689 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1690 .page = page, 1691 .encrypted_page = NULL, 1692 }; 1693 1694 trace_f2fs_writepage(page, DATA); 1695 1696 if (page->index < end_index) 1697 goto write; 1698 1699 /* 1700 * If the offset is out-of-range of file size, 1701 * this page does not have to be written to disk. 1702 */ 1703 offset = i_size & (PAGE_CACHE_SIZE - 1); 1704 if ((page->index >= end_index + 1) || !offset) 1705 goto out; 1706 1707 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 1708 write: 1709 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1710 goto redirty_out; 1711 if (f2fs_is_drop_cache(inode)) 1712 goto out; 1713 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && 1714 available_free_memory(sbi, BASE_CHECK)) 1715 goto redirty_out; 1716 1717 /* Dentry blocks are controlled by checkpoint */ 1718 if (S_ISDIR(inode->i_mode)) { 1719 if (unlikely(f2fs_cp_error(sbi))) 1720 goto redirty_out; 1721 err = do_write_data_page(&fio); 1722 goto done; 1723 } 1724 1725 /* we should bypass data pages to proceed the kworkder jobs */ 1726 if (unlikely(f2fs_cp_error(sbi))) { 1727 SetPageError(page); 1728 goto out; 1729 } 1730 1731 if (!wbc->for_reclaim) 1732 need_balance_fs = true; 1733 else if (has_not_enough_free_secs(sbi, 0)) 1734 goto redirty_out; 1735 1736 err = -EAGAIN; 1737 f2fs_lock_op(sbi); 1738 if (f2fs_has_inline_data(inode)) 1739 err = f2fs_write_inline_data(inode, page); 1740 if (err == -EAGAIN) 1741 err = do_write_data_page(&fio); 1742 f2fs_unlock_op(sbi); 1743 done: 1744 if (err && err != -ENOENT) 1745 goto redirty_out; 1746 1747 clear_cold_data(page); 1748 out: 1749 inode_dec_dirty_pages(inode); 1750 if (err) 1751 ClearPageUptodate(page); 1752 unlock_page(page); 1753 if (need_balance_fs) 1754 f2fs_balance_fs(sbi); 1755 if (wbc->for_reclaim) 1756 f2fs_submit_merged_bio(sbi, DATA, WRITE); 1757 return 0; 1758 1759 redirty_out: 1760 redirty_page_for_writepage(wbc, page); 1761 return AOP_WRITEPAGE_ACTIVATE; 1762 } 1763 1764 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, 1765 void *data) 1766 { 1767 struct address_space *mapping = data; 1768 int ret = mapping->a_ops->writepage(page, wbc); 1769 mapping_set_error(mapping, ret); 1770 return ret; 1771 } 1772 1773 static int f2fs_write_data_pages(struct address_space *mapping, 1774 struct writeback_control *wbc) 1775 { 1776 struct inode *inode = mapping->host; 1777 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1778 bool locked = false; 1779 int ret; 1780 long diff; 1781 1782 trace_f2fs_writepages(mapping->host, wbc, DATA); 1783 1784 /* deal with chardevs and other special file */ 1785 if (!mapping->a_ops->writepage) 1786 return 0; 1787 1788 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && 1789 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && 1790 available_free_memory(sbi, DIRTY_DENTS)) 1791 goto skip_write; 1792 1793 /* during POR, we don't need to trigger writepage at all. */ 1794 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1795 goto skip_write; 1796 1797 diff = nr_pages_to_write(sbi, DATA, wbc); 1798 1799 if (!S_ISDIR(inode->i_mode)) { 1800 mutex_lock(&sbi->writepages); 1801 locked = true; 1802 } 1803 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 1804 if (locked) 1805 mutex_unlock(&sbi->writepages); 1806 1807 f2fs_submit_merged_bio(sbi, DATA, WRITE); 1808 1809 remove_dirty_dir_inode(inode); 1810 1811 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1812 return ret; 1813 1814 skip_write: 1815 wbc->pages_skipped += get_dirty_pages(inode); 1816 return 0; 1817 } 1818 1819 static void f2fs_write_failed(struct address_space *mapping, loff_t to) 1820 { 1821 struct inode *inode = mapping->host; 1822 1823 if (to > inode->i_size) { 1824 truncate_pagecache(inode, inode->i_size); 1825 truncate_blocks(inode, inode->i_size, true); 1826 } 1827 } 1828 1829 static int f2fs_write_begin(struct file *file, struct address_space *mapping, 1830 loff_t pos, unsigned len, unsigned flags, 1831 struct page **pagep, void **fsdata) 1832 { 1833 struct inode *inode = mapping->host; 1834 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1835 struct page *page, *ipage; 1836 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 1837 struct dnode_of_data dn; 1838 int err = 0; 1839 1840 trace_f2fs_write_begin(inode, pos, len, flags); 1841 1842 f2fs_balance_fs(sbi); 1843 1844 /* 1845 * We should check this at this moment to avoid deadlock on inode page 1846 * and #0 page. The locking rule for inline_data conversion should be: 1847 * lock_page(page #0) -> lock_page(inode_page) 1848 */ 1849 if (index != 0) { 1850 err = f2fs_convert_inline_inode(inode); 1851 if (err) 1852 goto fail; 1853 } 1854 repeat: 1855 page = grab_cache_page_write_begin(mapping, index, flags); 1856 if (!page) { 1857 err = -ENOMEM; 1858 goto fail; 1859 } 1860 1861 *pagep = page; 1862 1863 f2fs_lock_op(sbi); 1864 1865 /* check inline_data */ 1866 ipage = get_node_page(sbi, inode->i_ino); 1867 if (IS_ERR(ipage)) { 1868 err = PTR_ERR(ipage); 1869 goto unlock_fail; 1870 } 1871 1872 set_new_dnode(&dn, inode, ipage, ipage, 0); 1873 1874 if (f2fs_has_inline_data(inode)) { 1875 if (pos + len <= MAX_INLINE_DATA) { 1876 read_inline_data(page, ipage); 1877 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 1878 sync_inode_page(&dn); 1879 goto put_next; 1880 } 1881 err = f2fs_convert_inline_page(&dn, page); 1882 if (err) 1883 goto put_fail; 1884 } 1885 err = f2fs_reserve_block(&dn, index); 1886 if (err) 1887 goto put_fail; 1888 put_next: 1889 f2fs_put_dnode(&dn); 1890 f2fs_unlock_op(sbi); 1891 1892 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) 1893 return 0; 1894 1895 f2fs_wait_on_page_writeback(page, DATA); 1896 1897 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1898 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1899 unsigned end = start + len; 1900 1901 /* Reading beyond i_size is simple: memset to zero */ 1902 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 1903 goto out; 1904 } 1905 1906 if (dn.data_blkaddr == NEW_ADDR) { 1907 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1908 } else { 1909 struct f2fs_io_info fio = { 1910 .sbi = sbi, 1911 .type = DATA, 1912 .rw = READ_SYNC, 1913 .blk_addr = dn.data_blkaddr, 1914 .page = page, 1915 .encrypted_page = NULL, 1916 }; 1917 err = f2fs_submit_page_bio(&fio); 1918 if (err) 1919 goto fail; 1920 1921 lock_page(page); 1922 if (unlikely(!PageUptodate(page))) { 1923 f2fs_put_page(page, 1); 1924 err = -EIO; 1925 goto fail; 1926 } 1927 if (unlikely(page->mapping != mapping)) { 1928 f2fs_put_page(page, 1); 1929 goto repeat; 1930 } 1931 1932 /* avoid symlink page */ 1933 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { 1934 err = f2fs_decrypt_one(inode, page); 1935 if (err) { 1936 f2fs_put_page(page, 1); 1937 goto fail; 1938 } 1939 } 1940 } 1941 out: 1942 SetPageUptodate(page); 1943 clear_cold_data(page); 1944 return 0; 1945 1946 put_fail: 1947 f2fs_put_dnode(&dn); 1948 unlock_fail: 1949 f2fs_unlock_op(sbi); 1950 f2fs_put_page(page, 1); 1951 fail: 1952 f2fs_write_failed(mapping, pos + len); 1953 return err; 1954 } 1955 1956 static int f2fs_write_end(struct file *file, 1957 struct address_space *mapping, 1958 loff_t pos, unsigned len, unsigned copied, 1959 struct page *page, void *fsdata) 1960 { 1961 struct inode *inode = page->mapping->host; 1962 1963 trace_f2fs_write_end(inode, pos, len, copied); 1964 1965 set_page_dirty(page); 1966 1967 if (pos + copied > i_size_read(inode)) { 1968 i_size_write(inode, pos + copied); 1969 mark_inode_dirty(inode); 1970 update_inode_page(inode); 1971 } 1972 1973 f2fs_put_page(page, 1); 1974 return copied; 1975 } 1976 1977 static int check_direct_IO(struct inode *inode, struct iov_iter *iter, 1978 loff_t offset) 1979 { 1980 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; 1981 1982 if (iov_iter_rw(iter) == READ) 1983 return 0; 1984 1985 if (offset & blocksize_mask) 1986 return -EINVAL; 1987 1988 if (iov_iter_alignment(iter) & blocksize_mask) 1989 return -EINVAL; 1990 1991 return 0; 1992 } 1993 1994 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 1995 loff_t offset) 1996 { 1997 struct file *file = iocb->ki_filp; 1998 struct address_space *mapping = file->f_mapping; 1999 struct inode *inode = mapping->host; 2000 size_t count = iov_iter_count(iter); 2001 int err; 2002 2003 /* we don't need to use inline_data strictly */ 2004 if (f2fs_has_inline_data(inode)) { 2005 err = f2fs_convert_inline_inode(inode); 2006 if (err) 2007 return err; 2008 } 2009 2010 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 2011 return 0; 2012 2013 if (check_direct_IO(inode, iter, offset)) 2014 return 0; 2015 2016 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); 2017 2018 if (iov_iter_rw(iter) == WRITE) 2019 __allocate_data_blocks(inode, offset, count); 2020 2021 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block); 2022 if (err < 0 && iov_iter_rw(iter) == WRITE) 2023 f2fs_write_failed(mapping, offset + count); 2024 2025 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); 2026 2027 return err; 2028 } 2029 2030 void f2fs_invalidate_page(struct page *page, unsigned int offset, 2031 unsigned int length) 2032 { 2033 struct inode *inode = page->mapping->host; 2034 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2035 2036 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && 2037 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) 2038 return; 2039 2040 if (PageDirty(page)) { 2041 if (inode->i_ino == F2FS_META_INO(sbi)) 2042 dec_page_count(sbi, F2FS_DIRTY_META); 2043 else if (inode->i_ino == F2FS_NODE_INO(sbi)) 2044 dec_page_count(sbi, F2FS_DIRTY_NODES); 2045 else 2046 inode_dec_dirty_pages(inode); 2047 } 2048 ClearPagePrivate(page); 2049 } 2050 2051 int f2fs_release_page(struct page *page, gfp_t wait) 2052 { 2053 /* If this is dirty page, keep PagePrivate */ 2054 if (PageDirty(page)) 2055 return 0; 2056 2057 ClearPagePrivate(page); 2058 return 1; 2059 } 2060 2061 static int f2fs_set_data_page_dirty(struct page *page) 2062 { 2063 struct address_space *mapping = page->mapping; 2064 struct inode *inode = mapping->host; 2065 2066 trace_f2fs_set_page_dirty(page, DATA); 2067 2068 SetPageUptodate(page); 2069 2070 if (f2fs_is_atomic_file(inode)) { 2071 register_inmem_page(inode, page); 2072 return 1; 2073 } 2074 2075 if (!PageDirty(page)) { 2076 __set_page_dirty_nobuffers(page); 2077 update_dirty_page(inode, page); 2078 return 1; 2079 } 2080 return 0; 2081 } 2082 2083 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 2084 { 2085 struct inode *inode = mapping->host; 2086 2087 /* we don't need to use inline_data strictly */ 2088 if (f2fs_has_inline_data(inode)) { 2089 int err = f2fs_convert_inline_inode(inode); 2090 if (err) 2091 return err; 2092 } 2093 return generic_block_bmap(mapping, block, get_data_block); 2094 } 2095 2096 void init_extent_cache_info(struct f2fs_sb_info *sbi) 2097 { 2098 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO); 2099 init_rwsem(&sbi->extent_tree_lock); 2100 INIT_LIST_HEAD(&sbi->extent_list); 2101 spin_lock_init(&sbi->extent_lock); 2102 sbi->total_ext_tree = 0; 2103 atomic_set(&sbi->total_ext_node, 0); 2104 } 2105 2106 int __init create_extent_cache(void) 2107 { 2108 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree", 2109 sizeof(struct extent_tree)); 2110 if (!extent_tree_slab) 2111 return -ENOMEM; 2112 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node", 2113 sizeof(struct extent_node)); 2114 if (!extent_node_slab) { 2115 kmem_cache_destroy(extent_tree_slab); 2116 return -ENOMEM; 2117 } 2118 return 0; 2119 } 2120 2121 void destroy_extent_cache(void) 2122 { 2123 kmem_cache_destroy(extent_node_slab); 2124 kmem_cache_destroy(extent_tree_slab); 2125 } 2126 2127 const struct address_space_operations f2fs_dblock_aops = { 2128 .readpage = f2fs_read_data_page, 2129 .readpages = f2fs_read_data_pages, 2130 .writepage = f2fs_write_data_page, 2131 .writepages = f2fs_write_data_pages, 2132 .write_begin = f2fs_write_begin, 2133 .write_end = f2fs_write_end, 2134 .set_page_dirty = f2fs_set_data_page_dirty, 2135 .invalidatepage = f2fs_invalidate_page, 2136 .releasepage = f2fs_release_page, 2137 .direct_IO = f2fs_direct_IO, 2138 .bmap = f2fs_bmap, 2139 }; 2140