1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/pagemap.h> 21 #include <linux/highmem.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/backing-dev.h> 26 #include <linux/mpage.h> 27 #include <linux/falloc.h> 28 #include <linux/swap.h> 29 #include <linux/writeback.h> 30 #include <linux/statfs.h> 31 #include <linux/compat.h> 32 #include <linux/slab.h> 33 #include "ctree.h" 34 #include "disk-io.h" 35 #include "transaction.h" 36 #include "btrfs_inode.h" 37 #include "ioctl.h" 38 #include "print-tree.h" 39 #include "tree-log.h" 40 #include "locking.h" 41 #include "compat.h" 42 43 /* 44 * when auto defrag is enabled we 45 * queue up these defrag structs to remember which 46 * inodes need defragging passes 47 */ 48 struct inode_defrag { 49 struct rb_node rb_node; 50 /* objectid */ 51 u64 ino; 52 /* 53 * transid where the defrag was added, we search for 54 * extents newer than this 55 */ 56 u64 transid; 57 58 /* root objectid */ 59 u64 root; 60 61 /* last offset we were able to defrag */ 62 u64 last_offset; 63 64 /* if we've wrapped around back to zero once already */ 65 int cycled; 66 }; 67 68 /* pop a record for an inode into the defrag tree. The lock 69 * must be held already 70 * 71 * If you're inserting a record for an older transid than an 72 * existing record, the transid already in the tree is lowered 73 * 74 * If an existing record is found the defrag item you 75 * pass in is freed 76 */ 77 static void __btrfs_add_inode_defrag(struct inode *inode, 78 struct inode_defrag *defrag) 79 { 80 struct btrfs_root *root = BTRFS_I(inode)->root; 81 struct inode_defrag *entry; 82 struct rb_node **p; 83 struct rb_node *parent = NULL; 84 85 p = &root->fs_info->defrag_inodes.rb_node; 86 while (*p) { 87 parent = *p; 88 entry = rb_entry(parent, struct inode_defrag, rb_node); 89 90 if (defrag->ino < entry->ino) 91 p = &parent->rb_left; 92 else if (defrag->ino > entry->ino) 93 p = &parent->rb_right; 94 else { 95 /* if we're reinserting an entry for 96 * an old defrag run, make sure to 97 * lower the transid of our existing record 98 */ 99 if (defrag->transid < entry->transid) 100 entry->transid = defrag->transid; 101 if (defrag->last_offset > entry->last_offset) 102 entry->last_offset = defrag->last_offset; 103 goto exists; 104 } 105 } 106 BTRFS_I(inode)->in_defrag = 1; 107 rb_link_node(&defrag->rb_node, parent, p); 108 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); 109 return; 110 111 exists: 112 kfree(defrag); 113 return; 114 115 } 116 117 /* 118 * insert a defrag record for this inode if auto defrag is 119 * enabled 120 */ 121 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 122 struct inode *inode) 123 { 124 struct btrfs_root *root = BTRFS_I(inode)->root; 125 struct inode_defrag *defrag; 126 u64 transid; 127 128 if (!btrfs_test_opt(root, AUTO_DEFRAG)) 129 return 0; 130 131 if (btrfs_fs_closing(root->fs_info)) 132 return 0; 133 134 if (BTRFS_I(inode)->in_defrag) 135 return 0; 136 137 if (trans) 138 transid = trans->transid; 139 else 140 transid = BTRFS_I(inode)->root->last_trans; 141 142 defrag = kzalloc(sizeof(*defrag), GFP_NOFS); 143 if (!defrag) 144 return -ENOMEM; 145 146 defrag->ino = btrfs_ino(inode); 147 defrag->transid = transid; 148 defrag->root = root->root_key.objectid; 149 150 spin_lock(&root->fs_info->defrag_inodes_lock); 151 if (!BTRFS_I(inode)->in_defrag) 152 __btrfs_add_inode_defrag(inode, defrag); 153 else 154 kfree(defrag); 155 spin_unlock(&root->fs_info->defrag_inodes_lock); 156 return 0; 157 } 158 159 /* 160 * must be called with the defrag_inodes lock held 161 */ 162 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino, 163 struct rb_node **next) 164 { 165 struct inode_defrag *entry = NULL; 166 struct rb_node *p; 167 struct rb_node *parent = NULL; 168 169 p = info->defrag_inodes.rb_node; 170 while (p) { 171 parent = p; 172 entry = rb_entry(parent, struct inode_defrag, rb_node); 173 174 if (ino < entry->ino) 175 p = parent->rb_left; 176 else if (ino > entry->ino) 177 p = parent->rb_right; 178 else 179 return entry; 180 } 181 182 if (next) { 183 while (parent && ino > entry->ino) { 184 parent = rb_next(parent); 185 entry = rb_entry(parent, struct inode_defrag, rb_node); 186 } 187 *next = parent; 188 } 189 return NULL; 190 } 191 192 /* 193 * run through the list of inodes in the FS that need 194 * defragging 195 */ 196 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) 197 { 198 struct inode_defrag *defrag; 199 struct btrfs_root *inode_root; 200 struct inode *inode; 201 struct rb_node *n; 202 struct btrfs_key key; 203 struct btrfs_ioctl_defrag_range_args range; 204 u64 first_ino = 0; 205 int num_defrag; 206 int defrag_batch = 1024; 207 208 memset(&range, 0, sizeof(range)); 209 range.len = (u64)-1; 210 211 atomic_inc(&fs_info->defrag_running); 212 spin_lock(&fs_info->defrag_inodes_lock); 213 while(1) { 214 n = NULL; 215 216 /* find an inode to defrag */ 217 defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n); 218 if (!defrag) { 219 if (n) 220 defrag = rb_entry(n, struct inode_defrag, rb_node); 221 else if (first_ino) { 222 first_ino = 0; 223 continue; 224 } else { 225 break; 226 } 227 } 228 229 /* remove it from the rbtree */ 230 first_ino = defrag->ino + 1; 231 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); 232 233 if (btrfs_fs_closing(fs_info)) 234 goto next_free; 235 236 spin_unlock(&fs_info->defrag_inodes_lock); 237 238 /* get the inode */ 239 key.objectid = defrag->root; 240 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 241 key.offset = (u64)-1; 242 inode_root = btrfs_read_fs_root_no_name(fs_info, &key); 243 if (IS_ERR(inode_root)) 244 goto next; 245 246 key.objectid = defrag->ino; 247 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 248 key.offset = 0; 249 250 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); 251 if (IS_ERR(inode)) 252 goto next; 253 254 /* do a chunk of defrag */ 255 BTRFS_I(inode)->in_defrag = 0; 256 range.start = defrag->last_offset; 257 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, 258 defrag_batch); 259 /* 260 * if we filled the whole defrag batch, there 261 * must be more work to do. Queue this defrag 262 * again 263 */ 264 if (num_defrag == defrag_batch) { 265 defrag->last_offset = range.start; 266 __btrfs_add_inode_defrag(inode, defrag); 267 /* 268 * we don't want to kfree defrag, we added it back to 269 * the rbtree 270 */ 271 defrag = NULL; 272 } else if (defrag->last_offset && !defrag->cycled) { 273 /* 274 * we didn't fill our defrag batch, but 275 * we didn't start at zero. Make sure we loop 276 * around to the start of the file. 277 */ 278 defrag->last_offset = 0; 279 defrag->cycled = 1; 280 __btrfs_add_inode_defrag(inode, defrag); 281 defrag = NULL; 282 } 283 284 iput(inode); 285 next: 286 spin_lock(&fs_info->defrag_inodes_lock); 287 next_free: 288 kfree(defrag); 289 } 290 spin_unlock(&fs_info->defrag_inodes_lock); 291 292 atomic_dec(&fs_info->defrag_running); 293 294 /* 295 * during unmount, we use the transaction_wait queue to 296 * wait for the defragger to stop 297 */ 298 wake_up(&fs_info->transaction_wait); 299 return 0; 300 } 301 302 /* simple helper to fault in pages and copy. This should go away 303 * and be replaced with calls into generic code. 304 */ 305 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 306 size_t write_bytes, 307 struct page **prepared_pages, 308 struct iov_iter *i) 309 { 310 size_t copied = 0; 311 size_t total_copied = 0; 312 int pg = 0; 313 int offset = pos & (PAGE_CACHE_SIZE - 1); 314 315 while (write_bytes > 0) { 316 size_t count = min_t(size_t, 317 PAGE_CACHE_SIZE - offset, write_bytes); 318 struct page *page = prepared_pages[pg]; 319 /* 320 * Copy data from userspace to the current page 321 * 322 * Disable pagefault to avoid recursive lock since 323 * the pages are already locked 324 */ 325 pagefault_disable(); 326 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 327 pagefault_enable(); 328 329 /* Flush processor's dcache for this page */ 330 flush_dcache_page(page); 331 332 /* 333 * if we get a partial write, we can end up with 334 * partially up to date pages. These add 335 * a lot of complexity, so make sure they don't 336 * happen by forcing this copy to be retried. 337 * 338 * The rest of the btrfs_file_write code will fall 339 * back to page at a time copies after we return 0. 340 */ 341 if (!PageUptodate(page) && copied < count) 342 copied = 0; 343 344 iov_iter_advance(i, copied); 345 write_bytes -= copied; 346 total_copied += copied; 347 348 /* Return to btrfs_file_aio_write to fault page */ 349 if (unlikely(copied == 0)) 350 break; 351 352 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { 353 offset += copied; 354 } else { 355 pg++; 356 offset = 0; 357 } 358 } 359 return total_copied; 360 } 361 362 /* 363 * unlocks pages after btrfs_file_write is done with them 364 */ 365 void btrfs_drop_pages(struct page **pages, size_t num_pages) 366 { 367 size_t i; 368 for (i = 0; i < num_pages; i++) { 369 /* page checked is some magic around finding pages that 370 * have been modified without going through btrfs_set_page_dirty 371 * clear it here 372 */ 373 ClearPageChecked(pages[i]); 374 unlock_page(pages[i]); 375 mark_page_accessed(pages[i]); 376 page_cache_release(pages[i]); 377 } 378 } 379 380 /* 381 * after copy_from_user, pages need to be dirtied and we need to make 382 * sure holes are created between the current EOF and the start of 383 * any next extents (if required). 384 * 385 * this also makes the decision about creating an inline extent vs 386 * doing real data extents, marking pages dirty and delalloc as required. 387 */ 388 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 389 struct page **pages, size_t num_pages, 390 loff_t pos, size_t write_bytes, 391 struct extent_state **cached) 392 { 393 int err = 0; 394 int i; 395 u64 num_bytes; 396 u64 start_pos; 397 u64 end_of_last_block; 398 u64 end_pos = pos + write_bytes; 399 loff_t isize = i_size_read(inode); 400 401 start_pos = pos & ~((u64)root->sectorsize - 1); 402 num_bytes = (write_bytes + pos - start_pos + 403 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 404 405 end_of_last_block = start_pos + num_bytes - 1; 406 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 407 cached); 408 if (err) 409 return err; 410 411 for (i = 0; i < num_pages; i++) { 412 struct page *p = pages[i]; 413 SetPageUptodate(p); 414 ClearPageChecked(p); 415 set_page_dirty(p); 416 } 417 418 /* 419 * we've only changed i_size in ram, and we haven't updated 420 * the disk i_size. There is no need to log the inode 421 * at this time. 422 */ 423 if (end_pos > isize) 424 i_size_write(inode, end_pos); 425 return 0; 426 } 427 428 /* 429 * this drops all the extents in the cache that intersect the range 430 * [start, end]. Existing extents are split as required. 431 */ 432 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 433 int skip_pinned) 434 { 435 struct extent_map *em; 436 struct extent_map *split = NULL; 437 struct extent_map *split2 = NULL; 438 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 439 u64 len = end - start + 1; 440 int ret; 441 int testend = 1; 442 unsigned long flags; 443 int compressed = 0; 444 445 WARN_ON(end < start); 446 if (end == (u64)-1) { 447 len = (u64)-1; 448 testend = 0; 449 } 450 while (1) { 451 if (!split) 452 split = alloc_extent_map(); 453 if (!split2) 454 split2 = alloc_extent_map(); 455 BUG_ON(!split || !split2); 456 457 write_lock(&em_tree->lock); 458 em = lookup_extent_mapping(em_tree, start, len); 459 if (!em) { 460 write_unlock(&em_tree->lock); 461 break; 462 } 463 flags = em->flags; 464 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 465 if (testend && em->start + em->len >= start + len) { 466 free_extent_map(em); 467 write_unlock(&em_tree->lock); 468 break; 469 } 470 start = em->start + em->len; 471 if (testend) 472 len = start + len - (em->start + em->len); 473 free_extent_map(em); 474 write_unlock(&em_tree->lock); 475 continue; 476 } 477 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 478 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 479 remove_extent_mapping(em_tree, em); 480 481 if (em->block_start < EXTENT_MAP_LAST_BYTE && 482 em->start < start) { 483 split->start = em->start; 484 split->len = start - em->start; 485 split->orig_start = em->orig_start; 486 split->block_start = em->block_start; 487 488 if (compressed) 489 split->block_len = em->block_len; 490 else 491 split->block_len = split->len; 492 493 split->bdev = em->bdev; 494 split->flags = flags; 495 split->compress_type = em->compress_type; 496 ret = add_extent_mapping(em_tree, split); 497 BUG_ON(ret); 498 free_extent_map(split); 499 split = split2; 500 split2 = NULL; 501 } 502 if (em->block_start < EXTENT_MAP_LAST_BYTE && 503 testend && em->start + em->len > start + len) { 504 u64 diff = start + len - em->start; 505 506 split->start = start + len; 507 split->len = em->start + em->len - (start + len); 508 split->bdev = em->bdev; 509 split->flags = flags; 510 split->compress_type = em->compress_type; 511 512 if (compressed) { 513 split->block_len = em->block_len; 514 split->block_start = em->block_start; 515 split->orig_start = em->orig_start; 516 } else { 517 split->block_len = split->len; 518 split->block_start = em->block_start + diff; 519 split->orig_start = split->start; 520 } 521 522 ret = add_extent_mapping(em_tree, split); 523 BUG_ON(ret); 524 free_extent_map(split); 525 split = NULL; 526 } 527 write_unlock(&em_tree->lock); 528 529 /* once for us */ 530 free_extent_map(em); 531 /* once for the tree*/ 532 free_extent_map(em); 533 } 534 if (split) 535 free_extent_map(split); 536 if (split2) 537 free_extent_map(split2); 538 return 0; 539 } 540 541 /* 542 * this is very complex, but the basic idea is to drop all extents 543 * in the range start - end. hint_block is filled in with a block number 544 * that would be a good hint to the block allocator for this file. 545 * 546 * If an extent intersects the range but is not entirely inside the range 547 * it is either truncated or split. Anything entirely inside the range 548 * is deleted from the tree. 549 */ 550 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, 551 u64 start, u64 end, u64 *hint_byte, int drop_cache) 552 { 553 struct btrfs_root *root = BTRFS_I(inode)->root; 554 struct extent_buffer *leaf; 555 struct btrfs_file_extent_item *fi; 556 struct btrfs_path *path; 557 struct btrfs_key key; 558 struct btrfs_key new_key; 559 u64 ino = btrfs_ino(inode); 560 u64 search_start = start; 561 u64 disk_bytenr = 0; 562 u64 num_bytes = 0; 563 u64 extent_offset = 0; 564 u64 extent_end = 0; 565 int del_nr = 0; 566 int del_slot = 0; 567 int extent_type; 568 int recow; 569 int ret; 570 571 if (drop_cache) 572 btrfs_drop_extent_cache(inode, start, end - 1, 0); 573 574 path = btrfs_alloc_path(); 575 if (!path) 576 return -ENOMEM; 577 578 while (1) { 579 recow = 0; 580 ret = btrfs_lookup_file_extent(trans, root, path, ino, 581 search_start, -1); 582 if (ret < 0) 583 break; 584 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 585 leaf = path->nodes[0]; 586 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 587 if (key.objectid == ino && 588 key.type == BTRFS_EXTENT_DATA_KEY) 589 path->slots[0]--; 590 } 591 ret = 0; 592 next_slot: 593 leaf = path->nodes[0]; 594 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 595 BUG_ON(del_nr > 0); 596 ret = btrfs_next_leaf(root, path); 597 if (ret < 0) 598 break; 599 if (ret > 0) { 600 ret = 0; 601 break; 602 } 603 leaf = path->nodes[0]; 604 recow = 1; 605 } 606 607 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 608 if (key.objectid > ino || 609 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 610 break; 611 612 fi = btrfs_item_ptr(leaf, path->slots[0], 613 struct btrfs_file_extent_item); 614 extent_type = btrfs_file_extent_type(leaf, fi); 615 616 if (extent_type == BTRFS_FILE_EXTENT_REG || 617 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 618 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 619 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 620 extent_offset = btrfs_file_extent_offset(leaf, fi); 621 extent_end = key.offset + 622 btrfs_file_extent_num_bytes(leaf, fi); 623 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 624 extent_end = key.offset + 625 btrfs_file_extent_inline_len(leaf, fi); 626 } else { 627 WARN_ON(1); 628 extent_end = search_start; 629 } 630 631 if (extent_end <= search_start) { 632 path->slots[0]++; 633 goto next_slot; 634 } 635 636 search_start = max(key.offset, start); 637 if (recow) { 638 btrfs_release_path(path); 639 continue; 640 } 641 642 /* 643 * | - range to drop - | 644 * | -------- extent -------- | 645 */ 646 if (start > key.offset && end < extent_end) { 647 BUG_ON(del_nr > 0); 648 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 649 650 memcpy(&new_key, &key, sizeof(new_key)); 651 new_key.offset = start; 652 ret = btrfs_duplicate_item(trans, root, path, 653 &new_key); 654 if (ret == -EAGAIN) { 655 btrfs_release_path(path); 656 continue; 657 } 658 if (ret < 0) 659 break; 660 661 leaf = path->nodes[0]; 662 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 663 struct btrfs_file_extent_item); 664 btrfs_set_file_extent_num_bytes(leaf, fi, 665 start - key.offset); 666 667 fi = btrfs_item_ptr(leaf, path->slots[0], 668 struct btrfs_file_extent_item); 669 670 extent_offset += start - key.offset; 671 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 672 btrfs_set_file_extent_num_bytes(leaf, fi, 673 extent_end - start); 674 btrfs_mark_buffer_dirty(leaf); 675 676 if (disk_bytenr > 0) { 677 ret = btrfs_inc_extent_ref(trans, root, 678 disk_bytenr, num_bytes, 0, 679 root->root_key.objectid, 680 new_key.objectid, 681 start - extent_offset); 682 BUG_ON(ret); 683 *hint_byte = disk_bytenr; 684 } 685 key.offset = start; 686 } 687 /* 688 * | ---- range to drop ----- | 689 * | -------- extent -------- | 690 */ 691 if (start <= key.offset && end < extent_end) { 692 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 693 694 memcpy(&new_key, &key, sizeof(new_key)); 695 new_key.offset = end; 696 btrfs_set_item_key_safe(trans, root, path, &new_key); 697 698 extent_offset += end - key.offset; 699 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 700 btrfs_set_file_extent_num_bytes(leaf, fi, 701 extent_end - end); 702 btrfs_mark_buffer_dirty(leaf); 703 if (disk_bytenr > 0) { 704 inode_sub_bytes(inode, end - key.offset); 705 *hint_byte = disk_bytenr; 706 } 707 break; 708 } 709 710 search_start = extent_end; 711 /* 712 * | ---- range to drop ----- | 713 * | -------- extent -------- | 714 */ 715 if (start > key.offset && end >= extent_end) { 716 BUG_ON(del_nr > 0); 717 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 718 719 btrfs_set_file_extent_num_bytes(leaf, fi, 720 start - key.offset); 721 btrfs_mark_buffer_dirty(leaf); 722 if (disk_bytenr > 0) { 723 inode_sub_bytes(inode, extent_end - start); 724 *hint_byte = disk_bytenr; 725 } 726 if (end == extent_end) 727 break; 728 729 path->slots[0]++; 730 goto next_slot; 731 } 732 733 /* 734 * | ---- range to drop ----- | 735 * | ------ extent ------ | 736 */ 737 if (start <= key.offset && end >= extent_end) { 738 if (del_nr == 0) { 739 del_slot = path->slots[0]; 740 del_nr = 1; 741 } else { 742 BUG_ON(del_slot + del_nr != path->slots[0]); 743 del_nr++; 744 } 745 746 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 747 inode_sub_bytes(inode, 748 extent_end - key.offset); 749 extent_end = ALIGN(extent_end, 750 root->sectorsize); 751 } else if (disk_bytenr > 0) { 752 ret = btrfs_free_extent(trans, root, 753 disk_bytenr, num_bytes, 0, 754 root->root_key.objectid, 755 key.objectid, key.offset - 756 extent_offset); 757 BUG_ON(ret); 758 inode_sub_bytes(inode, 759 extent_end - key.offset); 760 *hint_byte = disk_bytenr; 761 } 762 763 if (end == extent_end) 764 break; 765 766 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 767 path->slots[0]++; 768 goto next_slot; 769 } 770 771 ret = btrfs_del_items(trans, root, path, del_slot, 772 del_nr); 773 BUG_ON(ret); 774 775 del_nr = 0; 776 del_slot = 0; 777 778 btrfs_release_path(path); 779 continue; 780 } 781 782 BUG_ON(1); 783 } 784 785 if (del_nr > 0) { 786 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 787 BUG_ON(ret); 788 } 789 790 btrfs_free_path(path); 791 return ret; 792 } 793 794 static int extent_mergeable(struct extent_buffer *leaf, int slot, 795 u64 objectid, u64 bytenr, u64 orig_offset, 796 u64 *start, u64 *end) 797 { 798 struct btrfs_file_extent_item *fi; 799 struct btrfs_key key; 800 u64 extent_end; 801 802 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 803 return 0; 804 805 btrfs_item_key_to_cpu(leaf, &key, slot); 806 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 807 return 0; 808 809 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 810 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 811 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 812 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 813 btrfs_file_extent_compression(leaf, fi) || 814 btrfs_file_extent_encryption(leaf, fi) || 815 btrfs_file_extent_other_encoding(leaf, fi)) 816 return 0; 817 818 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 819 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 820 return 0; 821 822 *start = key.offset; 823 *end = extent_end; 824 return 1; 825 } 826 827 /* 828 * Mark extent in the range start - end as written. 829 * 830 * This changes extent type from 'pre-allocated' to 'regular'. If only 831 * part of extent is marked as written, the extent will be split into 832 * two or three. 833 */ 834 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 835 struct inode *inode, u64 start, u64 end) 836 { 837 struct btrfs_root *root = BTRFS_I(inode)->root; 838 struct extent_buffer *leaf; 839 struct btrfs_path *path; 840 struct btrfs_file_extent_item *fi; 841 struct btrfs_key key; 842 struct btrfs_key new_key; 843 u64 bytenr; 844 u64 num_bytes; 845 u64 extent_end; 846 u64 orig_offset; 847 u64 other_start; 848 u64 other_end; 849 u64 split; 850 int del_nr = 0; 851 int del_slot = 0; 852 int recow; 853 int ret; 854 u64 ino = btrfs_ino(inode); 855 856 btrfs_drop_extent_cache(inode, start, end - 1, 0); 857 858 path = btrfs_alloc_path(); 859 if (!path) 860 return -ENOMEM; 861 again: 862 recow = 0; 863 split = start; 864 key.objectid = ino; 865 key.type = BTRFS_EXTENT_DATA_KEY; 866 key.offset = split; 867 868 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 869 if (ret < 0) 870 goto out; 871 if (ret > 0 && path->slots[0] > 0) 872 path->slots[0]--; 873 874 leaf = path->nodes[0]; 875 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 876 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY); 877 fi = btrfs_item_ptr(leaf, path->slots[0], 878 struct btrfs_file_extent_item); 879 BUG_ON(btrfs_file_extent_type(leaf, fi) != 880 BTRFS_FILE_EXTENT_PREALLOC); 881 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 882 BUG_ON(key.offset > start || extent_end < end); 883 884 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 885 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 886 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 887 memcpy(&new_key, &key, sizeof(new_key)); 888 889 if (start == key.offset && end < extent_end) { 890 other_start = 0; 891 other_end = start; 892 if (extent_mergeable(leaf, path->slots[0] - 1, 893 ino, bytenr, orig_offset, 894 &other_start, &other_end)) { 895 new_key.offset = end; 896 btrfs_set_item_key_safe(trans, root, path, &new_key); 897 fi = btrfs_item_ptr(leaf, path->slots[0], 898 struct btrfs_file_extent_item); 899 btrfs_set_file_extent_num_bytes(leaf, fi, 900 extent_end - end); 901 btrfs_set_file_extent_offset(leaf, fi, 902 end - orig_offset); 903 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 904 struct btrfs_file_extent_item); 905 btrfs_set_file_extent_num_bytes(leaf, fi, 906 end - other_start); 907 btrfs_mark_buffer_dirty(leaf); 908 goto out; 909 } 910 } 911 912 if (start > key.offset && end == extent_end) { 913 other_start = end; 914 other_end = 0; 915 if (extent_mergeable(leaf, path->slots[0] + 1, 916 ino, bytenr, orig_offset, 917 &other_start, &other_end)) { 918 fi = btrfs_item_ptr(leaf, path->slots[0], 919 struct btrfs_file_extent_item); 920 btrfs_set_file_extent_num_bytes(leaf, fi, 921 start - key.offset); 922 path->slots[0]++; 923 new_key.offset = start; 924 btrfs_set_item_key_safe(trans, root, path, &new_key); 925 926 fi = btrfs_item_ptr(leaf, path->slots[0], 927 struct btrfs_file_extent_item); 928 btrfs_set_file_extent_num_bytes(leaf, fi, 929 other_end - start); 930 btrfs_set_file_extent_offset(leaf, fi, 931 start - orig_offset); 932 btrfs_mark_buffer_dirty(leaf); 933 goto out; 934 } 935 } 936 937 while (start > key.offset || end < extent_end) { 938 if (key.offset == start) 939 split = end; 940 941 new_key.offset = split; 942 ret = btrfs_duplicate_item(trans, root, path, &new_key); 943 if (ret == -EAGAIN) { 944 btrfs_release_path(path); 945 goto again; 946 } 947 BUG_ON(ret < 0); 948 949 leaf = path->nodes[0]; 950 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 951 struct btrfs_file_extent_item); 952 btrfs_set_file_extent_num_bytes(leaf, fi, 953 split - key.offset); 954 955 fi = btrfs_item_ptr(leaf, path->slots[0], 956 struct btrfs_file_extent_item); 957 958 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 959 btrfs_set_file_extent_num_bytes(leaf, fi, 960 extent_end - split); 961 btrfs_mark_buffer_dirty(leaf); 962 963 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 964 root->root_key.objectid, 965 ino, orig_offset); 966 BUG_ON(ret); 967 968 if (split == start) { 969 key.offset = start; 970 } else { 971 BUG_ON(start != key.offset); 972 path->slots[0]--; 973 extent_end = end; 974 } 975 recow = 1; 976 } 977 978 other_start = end; 979 other_end = 0; 980 if (extent_mergeable(leaf, path->slots[0] + 1, 981 ino, bytenr, orig_offset, 982 &other_start, &other_end)) { 983 if (recow) { 984 btrfs_release_path(path); 985 goto again; 986 } 987 extent_end = other_end; 988 del_slot = path->slots[0] + 1; 989 del_nr++; 990 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 991 0, root->root_key.objectid, 992 ino, orig_offset); 993 BUG_ON(ret); 994 } 995 other_start = 0; 996 other_end = start; 997 if (extent_mergeable(leaf, path->slots[0] - 1, 998 ino, bytenr, orig_offset, 999 &other_start, &other_end)) { 1000 if (recow) { 1001 btrfs_release_path(path); 1002 goto again; 1003 } 1004 key.offset = other_start; 1005 del_slot = path->slots[0]; 1006 del_nr++; 1007 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1008 0, root->root_key.objectid, 1009 ino, orig_offset); 1010 BUG_ON(ret); 1011 } 1012 if (del_nr == 0) { 1013 fi = btrfs_item_ptr(leaf, path->slots[0], 1014 struct btrfs_file_extent_item); 1015 btrfs_set_file_extent_type(leaf, fi, 1016 BTRFS_FILE_EXTENT_REG); 1017 btrfs_mark_buffer_dirty(leaf); 1018 } else { 1019 fi = btrfs_item_ptr(leaf, del_slot - 1, 1020 struct btrfs_file_extent_item); 1021 btrfs_set_file_extent_type(leaf, fi, 1022 BTRFS_FILE_EXTENT_REG); 1023 btrfs_set_file_extent_num_bytes(leaf, fi, 1024 extent_end - key.offset); 1025 btrfs_mark_buffer_dirty(leaf); 1026 1027 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 1028 BUG_ON(ret); 1029 } 1030 out: 1031 btrfs_free_path(path); 1032 return 0; 1033 } 1034 1035 /* 1036 * on error we return an unlocked page and the error value 1037 * on success we return a locked page and 0 1038 */ 1039 static int prepare_uptodate_page(struct page *page, u64 pos, 1040 bool force_uptodate) 1041 { 1042 int ret = 0; 1043 1044 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && 1045 !PageUptodate(page)) { 1046 ret = btrfs_readpage(NULL, page); 1047 if (ret) 1048 return ret; 1049 lock_page(page); 1050 if (!PageUptodate(page)) { 1051 unlock_page(page); 1052 return -EIO; 1053 } 1054 } 1055 return 0; 1056 } 1057 1058 /* 1059 * this gets pages into the page cache and locks them down, it also properly 1060 * waits for data=ordered extents to finish before allowing the pages to be 1061 * modified. 1062 */ 1063 static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 1064 struct page **pages, size_t num_pages, 1065 loff_t pos, unsigned long first_index, 1066 size_t write_bytes, bool force_uptodate) 1067 { 1068 struct extent_state *cached_state = NULL; 1069 int i; 1070 unsigned long index = pos >> PAGE_CACHE_SHIFT; 1071 struct inode *inode = fdentry(file)->d_inode; 1072 int err = 0; 1073 int faili = 0; 1074 u64 start_pos; 1075 u64 last_pos; 1076 1077 start_pos = pos & ~((u64)root->sectorsize - 1); 1078 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 1079 1080 again: 1081 for (i = 0; i < num_pages; i++) { 1082 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1083 GFP_NOFS); 1084 if (!pages[i]) { 1085 faili = i - 1; 1086 err = -ENOMEM; 1087 goto fail; 1088 } 1089 1090 if (i == 0) 1091 err = prepare_uptodate_page(pages[i], pos, 1092 force_uptodate); 1093 if (i == num_pages - 1) 1094 err = prepare_uptodate_page(pages[i], 1095 pos + write_bytes, false); 1096 if (err) { 1097 page_cache_release(pages[i]); 1098 faili = i - 1; 1099 goto fail; 1100 } 1101 wait_on_page_writeback(pages[i]); 1102 } 1103 err = 0; 1104 if (start_pos < inode->i_size) { 1105 struct btrfs_ordered_extent *ordered; 1106 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1107 start_pos, last_pos - 1, 0, &cached_state, 1108 GFP_NOFS); 1109 ordered = btrfs_lookup_first_ordered_extent(inode, 1110 last_pos - 1); 1111 if (ordered && 1112 ordered->file_offset + ordered->len > start_pos && 1113 ordered->file_offset < last_pos) { 1114 btrfs_put_ordered_extent(ordered); 1115 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1116 start_pos, last_pos - 1, 1117 &cached_state, GFP_NOFS); 1118 for (i = 0; i < num_pages; i++) { 1119 unlock_page(pages[i]); 1120 page_cache_release(pages[i]); 1121 } 1122 btrfs_wait_ordered_range(inode, start_pos, 1123 last_pos - start_pos); 1124 goto again; 1125 } 1126 if (ordered) 1127 btrfs_put_ordered_extent(ordered); 1128 1129 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, 1130 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 1131 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, 1132 GFP_NOFS); 1133 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1134 start_pos, last_pos - 1, &cached_state, 1135 GFP_NOFS); 1136 } 1137 for (i = 0; i < num_pages; i++) { 1138 clear_page_dirty_for_io(pages[i]); 1139 set_page_extent_mapped(pages[i]); 1140 WARN_ON(!PageLocked(pages[i])); 1141 } 1142 return 0; 1143 fail: 1144 while (faili >= 0) { 1145 unlock_page(pages[faili]); 1146 page_cache_release(pages[faili]); 1147 faili--; 1148 } 1149 return err; 1150 1151 } 1152 1153 static noinline ssize_t __btrfs_buffered_write(struct file *file, 1154 struct iov_iter *i, 1155 loff_t pos) 1156 { 1157 struct inode *inode = fdentry(file)->d_inode; 1158 struct btrfs_root *root = BTRFS_I(inode)->root; 1159 struct page **pages = NULL; 1160 unsigned long first_index; 1161 size_t num_written = 0; 1162 int nrptrs; 1163 int ret = 0; 1164 bool force_page_uptodate = false; 1165 1166 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / 1167 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 1168 (sizeof(struct page *))); 1169 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 1170 if (!pages) 1171 return -ENOMEM; 1172 1173 first_index = pos >> PAGE_CACHE_SHIFT; 1174 1175 while (iov_iter_count(i) > 0) { 1176 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1177 size_t write_bytes = min(iov_iter_count(i), 1178 nrptrs * (size_t)PAGE_CACHE_SIZE - 1179 offset); 1180 size_t num_pages = (write_bytes + offset + 1181 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1182 size_t dirty_pages; 1183 size_t copied; 1184 1185 WARN_ON(num_pages > nrptrs); 1186 1187 /* 1188 * Fault pages before locking them in prepare_pages 1189 * to avoid recursive lock 1190 */ 1191 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) { 1192 ret = -EFAULT; 1193 break; 1194 } 1195 1196 ret = btrfs_delalloc_reserve_space(inode, 1197 num_pages << PAGE_CACHE_SHIFT); 1198 if (ret) 1199 break; 1200 1201 /* 1202 * This is going to setup the pages array with the number of 1203 * pages we want, so we don't really need to worry about the 1204 * contents of pages from loop to loop 1205 */ 1206 ret = prepare_pages(root, file, pages, num_pages, 1207 pos, first_index, write_bytes, 1208 force_page_uptodate); 1209 if (ret) { 1210 btrfs_delalloc_release_space(inode, 1211 num_pages << PAGE_CACHE_SHIFT); 1212 break; 1213 } 1214 1215 copied = btrfs_copy_from_user(pos, num_pages, 1216 write_bytes, pages, i); 1217 1218 /* 1219 * if we have trouble faulting in the pages, fall 1220 * back to one page at a time 1221 */ 1222 if (copied < write_bytes) 1223 nrptrs = 1; 1224 1225 if (copied == 0) { 1226 force_page_uptodate = true; 1227 dirty_pages = 0; 1228 } else { 1229 force_page_uptodate = false; 1230 dirty_pages = (copied + offset + 1231 PAGE_CACHE_SIZE - 1) >> 1232 PAGE_CACHE_SHIFT; 1233 } 1234 1235 /* 1236 * If we had a short copy we need to release the excess delaloc 1237 * bytes we reserved. We need to increment outstanding_extents 1238 * because btrfs_delalloc_release_space will decrement it, but 1239 * we still have an outstanding extent for the chunk we actually 1240 * managed to copy. 1241 */ 1242 if (num_pages > dirty_pages) { 1243 if (copied > 0) { 1244 spin_lock(&BTRFS_I(inode)->lock); 1245 BTRFS_I(inode)->outstanding_extents++; 1246 spin_unlock(&BTRFS_I(inode)->lock); 1247 } 1248 btrfs_delalloc_release_space(inode, 1249 (num_pages - dirty_pages) << 1250 PAGE_CACHE_SHIFT); 1251 } 1252 1253 if (copied > 0) { 1254 ret = btrfs_dirty_pages(root, inode, pages, 1255 dirty_pages, pos, copied, 1256 NULL); 1257 if (ret) { 1258 btrfs_delalloc_release_space(inode, 1259 dirty_pages << PAGE_CACHE_SHIFT); 1260 btrfs_drop_pages(pages, num_pages); 1261 break; 1262 } 1263 } 1264 1265 btrfs_drop_pages(pages, num_pages); 1266 1267 cond_resched(); 1268 1269 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1270 dirty_pages); 1271 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1272 btrfs_btree_balance_dirty(root, 1); 1273 btrfs_throttle(root); 1274 1275 pos += copied; 1276 num_written += copied; 1277 } 1278 1279 kfree(pages); 1280 1281 return num_written ? num_written : ret; 1282 } 1283 1284 static ssize_t __btrfs_direct_write(struct kiocb *iocb, 1285 const struct iovec *iov, 1286 unsigned long nr_segs, loff_t pos, 1287 loff_t *ppos, size_t count, size_t ocount) 1288 { 1289 struct file *file = iocb->ki_filp; 1290 struct inode *inode = fdentry(file)->d_inode; 1291 struct iov_iter i; 1292 ssize_t written; 1293 ssize_t written_buffered; 1294 loff_t endbyte; 1295 int err; 1296 1297 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, 1298 count, ocount); 1299 1300 /* 1301 * the generic O_DIRECT will update in-memory i_size after the 1302 * DIOs are done. But our endio handlers that update the on 1303 * disk i_size never update past the in memory i_size. So we 1304 * need one more update here to catch any additions to the 1305 * file 1306 */ 1307 if (inode->i_size != BTRFS_I(inode)->disk_i_size) { 1308 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 1309 mark_inode_dirty(inode); 1310 } 1311 1312 if (written < 0 || written == count) 1313 return written; 1314 1315 pos += written; 1316 count -= written; 1317 iov_iter_init(&i, iov, nr_segs, count, written); 1318 written_buffered = __btrfs_buffered_write(file, &i, pos); 1319 if (written_buffered < 0) { 1320 err = written_buffered; 1321 goto out; 1322 } 1323 endbyte = pos + written_buffered - 1; 1324 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); 1325 if (err) 1326 goto out; 1327 written += written_buffered; 1328 *ppos = pos + written_buffered; 1329 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, 1330 endbyte >> PAGE_CACHE_SHIFT); 1331 out: 1332 return written ? written : err; 1333 } 1334 1335 static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 1336 const struct iovec *iov, 1337 unsigned long nr_segs, loff_t pos) 1338 { 1339 struct file *file = iocb->ki_filp; 1340 struct inode *inode = fdentry(file)->d_inode; 1341 struct btrfs_root *root = BTRFS_I(inode)->root; 1342 loff_t *ppos = &iocb->ki_pos; 1343 u64 start_pos; 1344 ssize_t num_written = 0; 1345 ssize_t err = 0; 1346 size_t count, ocount; 1347 1348 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 1349 1350 mutex_lock(&inode->i_mutex); 1351 1352 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 1353 if (err) { 1354 mutex_unlock(&inode->i_mutex); 1355 goto out; 1356 } 1357 count = ocount; 1358 1359 current->backing_dev_info = inode->i_mapping->backing_dev_info; 1360 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 1361 if (err) { 1362 mutex_unlock(&inode->i_mutex); 1363 goto out; 1364 } 1365 1366 if (count == 0) { 1367 mutex_unlock(&inode->i_mutex); 1368 goto out; 1369 } 1370 1371 err = file_remove_suid(file); 1372 if (err) { 1373 mutex_unlock(&inode->i_mutex); 1374 goto out; 1375 } 1376 1377 /* 1378 * If BTRFS flips readonly due to some impossible error 1379 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), 1380 * although we have opened a file as writable, we have 1381 * to stop this write operation to ensure FS consistency. 1382 */ 1383 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 1384 mutex_unlock(&inode->i_mutex); 1385 err = -EROFS; 1386 goto out; 1387 } 1388 1389 file_update_time(file); 1390 BTRFS_I(inode)->sequence++; 1391 1392 start_pos = round_down(pos, root->sectorsize); 1393 if (start_pos > i_size_read(inode)) { 1394 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); 1395 if (err) { 1396 mutex_unlock(&inode->i_mutex); 1397 goto out; 1398 } 1399 } 1400 1401 if (unlikely(file->f_flags & O_DIRECT)) { 1402 num_written = __btrfs_direct_write(iocb, iov, nr_segs, 1403 pos, ppos, count, ocount); 1404 } else { 1405 struct iov_iter i; 1406 1407 iov_iter_init(&i, iov, nr_segs, count, num_written); 1408 1409 num_written = __btrfs_buffered_write(file, &i, pos); 1410 if (num_written > 0) 1411 *ppos = pos + num_written; 1412 } 1413 1414 mutex_unlock(&inode->i_mutex); 1415 1416 /* 1417 * we want to make sure fsync finds this change 1418 * but we haven't joined a transaction running right now. 1419 * 1420 * Later on, someone is sure to update the inode and get the 1421 * real transid recorded. 1422 * 1423 * We set last_trans now to the fs_info generation + 1, 1424 * this will either be one more than the running transaction 1425 * or the generation used for the next transaction if there isn't 1426 * one running right now. 1427 */ 1428 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 1429 if (num_written > 0 || num_written == -EIOCBQUEUED) { 1430 err = generic_write_sync(file, pos, num_written); 1431 if (err < 0 && num_written > 0) 1432 num_written = err; 1433 } 1434 out: 1435 current->backing_dev_info = NULL; 1436 return num_written ? num_written : err; 1437 } 1438 1439 int btrfs_release_file(struct inode *inode, struct file *filp) 1440 { 1441 /* 1442 * ordered_data_close is set by settattr when we are about to truncate 1443 * a file from a non-zero size to a zero size. This tries to 1444 * flush down new bytes that may have been written if the 1445 * application were using truncate to replace a file in place. 1446 */ 1447 if (BTRFS_I(inode)->ordered_data_close) { 1448 BTRFS_I(inode)->ordered_data_close = 0; 1449 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); 1450 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1451 filemap_flush(inode->i_mapping); 1452 } 1453 if (filp->private_data) 1454 btrfs_ioctl_trans_end(filp); 1455 return 0; 1456 } 1457 1458 /* 1459 * fsync call for both files and directories. This logs the inode into 1460 * the tree log instead of forcing full commits whenever possible. 1461 * 1462 * It needs to call filemap_fdatawait so that all ordered extent updates are 1463 * in the metadata btree are up to date for copying to the log. 1464 * 1465 * It drops the inode mutex before doing the tree log commit. This is an 1466 * important optimization for directories because holding the mutex prevents 1467 * new operations on the dir while we write to disk. 1468 */ 1469 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 1470 { 1471 struct dentry *dentry = file->f_path.dentry; 1472 struct inode *inode = dentry->d_inode; 1473 struct btrfs_root *root = BTRFS_I(inode)->root; 1474 int ret = 0; 1475 struct btrfs_trans_handle *trans; 1476 1477 trace_btrfs_sync_file(file, datasync); 1478 1479 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1480 if (ret) 1481 return ret; 1482 mutex_lock(&inode->i_mutex); 1483 1484 /* we wait first, since the writeback may change the inode */ 1485 root->log_batch++; 1486 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1487 root->log_batch++; 1488 1489 /* 1490 * check the transaction that last modified this inode 1491 * and see if its already been committed 1492 */ 1493 if (!BTRFS_I(inode)->last_trans) { 1494 mutex_unlock(&inode->i_mutex); 1495 goto out; 1496 } 1497 1498 /* 1499 * if the last transaction that changed this file was before 1500 * the current transaction, we can bail out now without any 1501 * syncing 1502 */ 1503 smp_mb(); 1504 if (BTRFS_I(inode)->last_trans <= 1505 root->fs_info->last_trans_committed) { 1506 BTRFS_I(inode)->last_trans = 0; 1507 mutex_unlock(&inode->i_mutex); 1508 goto out; 1509 } 1510 1511 /* 1512 * ok we haven't committed the transaction yet, lets do a commit 1513 */ 1514 if (file->private_data) 1515 btrfs_ioctl_trans_end(file); 1516 1517 trans = btrfs_start_transaction(root, 0); 1518 if (IS_ERR(trans)) { 1519 ret = PTR_ERR(trans); 1520 mutex_unlock(&inode->i_mutex); 1521 goto out; 1522 } 1523 1524 ret = btrfs_log_dentry_safe(trans, root, dentry); 1525 if (ret < 0) { 1526 mutex_unlock(&inode->i_mutex); 1527 goto out; 1528 } 1529 1530 /* we've logged all the items and now have a consistent 1531 * version of the file in the log. It is possible that 1532 * someone will come in and modify the file, but that's 1533 * fine because the log is consistent on disk, and we 1534 * have references to all of the file's extents 1535 * 1536 * It is possible that someone will come in and log the 1537 * file again, but that will end up using the synchronization 1538 * inside btrfs_sync_log to keep things safe. 1539 */ 1540 mutex_unlock(&inode->i_mutex); 1541 1542 if (ret != BTRFS_NO_LOG_SYNC) { 1543 if (ret > 0) { 1544 ret = btrfs_commit_transaction(trans, root); 1545 } else { 1546 ret = btrfs_sync_log(trans, root); 1547 if (ret == 0) 1548 ret = btrfs_end_transaction(trans, root); 1549 else 1550 ret = btrfs_commit_transaction(trans, root); 1551 } 1552 } else { 1553 ret = btrfs_end_transaction(trans, root); 1554 } 1555 out: 1556 return ret > 0 ? -EIO : ret; 1557 } 1558 1559 static const struct vm_operations_struct btrfs_file_vm_ops = { 1560 .fault = filemap_fault, 1561 .page_mkwrite = btrfs_page_mkwrite, 1562 }; 1563 1564 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 1565 { 1566 struct address_space *mapping = filp->f_mapping; 1567 1568 if (!mapping->a_ops->readpage) 1569 return -ENOEXEC; 1570 1571 file_accessed(filp); 1572 vma->vm_ops = &btrfs_file_vm_ops; 1573 vma->vm_flags |= VM_CAN_NONLINEAR; 1574 1575 return 0; 1576 } 1577 1578 static long btrfs_fallocate(struct file *file, int mode, 1579 loff_t offset, loff_t len) 1580 { 1581 struct inode *inode = file->f_path.dentry->d_inode; 1582 struct extent_state *cached_state = NULL; 1583 u64 cur_offset; 1584 u64 last_byte; 1585 u64 alloc_start; 1586 u64 alloc_end; 1587 u64 alloc_hint = 0; 1588 u64 locked_end; 1589 u64 mask = BTRFS_I(inode)->root->sectorsize - 1; 1590 struct extent_map *em; 1591 int ret; 1592 1593 alloc_start = offset & ~mask; 1594 alloc_end = (offset + len + mask) & ~mask; 1595 1596 /* We only support the FALLOC_FL_KEEP_SIZE mode */ 1597 if (mode & ~FALLOC_FL_KEEP_SIZE) 1598 return -EOPNOTSUPP; 1599 1600 /* 1601 * wait for ordered IO before we have any locks. We'll loop again 1602 * below with the locks held. 1603 */ 1604 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); 1605 1606 mutex_lock(&inode->i_mutex); 1607 ret = inode_newsize_ok(inode, alloc_end); 1608 if (ret) 1609 goto out; 1610 1611 if (alloc_start > inode->i_size) { 1612 ret = btrfs_cont_expand(inode, i_size_read(inode), 1613 alloc_start); 1614 if (ret) 1615 goto out; 1616 } 1617 1618 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); 1619 if (ret) 1620 goto out; 1621 1622 locked_end = alloc_end - 1; 1623 while (1) { 1624 struct btrfs_ordered_extent *ordered; 1625 1626 /* the extent lock is ordered inside the running 1627 * transaction 1628 */ 1629 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 1630 locked_end, 0, &cached_state, GFP_NOFS); 1631 ordered = btrfs_lookup_first_ordered_extent(inode, 1632 alloc_end - 1); 1633 if (ordered && 1634 ordered->file_offset + ordered->len > alloc_start && 1635 ordered->file_offset < alloc_end) { 1636 btrfs_put_ordered_extent(ordered); 1637 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1638 alloc_start, locked_end, 1639 &cached_state, GFP_NOFS); 1640 /* 1641 * we can't wait on the range with the transaction 1642 * running or with the extent lock held 1643 */ 1644 btrfs_wait_ordered_range(inode, alloc_start, 1645 alloc_end - alloc_start); 1646 } else { 1647 if (ordered) 1648 btrfs_put_ordered_extent(ordered); 1649 break; 1650 } 1651 } 1652 1653 cur_offset = alloc_start; 1654 while (1) { 1655 u64 actual_end; 1656 1657 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 1658 alloc_end - cur_offset, 0); 1659 BUG_ON(IS_ERR_OR_NULL(em)); 1660 last_byte = min(extent_map_end(em), alloc_end); 1661 actual_end = min_t(u64, extent_map_end(em), offset + len); 1662 last_byte = (last_byte + mask) & ~mask; 1663 1664 if (em->block_start == EXTENT_MAP_HOLE || 1665 (cur_offset >= inode->i_size && 1666 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 1667 ret = btrfs_prealloc_file_range(inode, mode, cur_offset, 1668 last_byte - cur_offset, 1669 1 << inode->i_blkbits, 1670 offset + len, 1671 &alloc_hint); 1672 if (ret < 0) { 1673 free_extent_map(em); 1674 break; 1675 } 1676 } else if (actual_end > inode->i_size && 1677 !(mode & FALLOC_FL_KEEP_SIZE)) { 1678 /* 1679 * We didn't need to allocate any more space, but we 1680 * still extended the size of the file so we need to 1681 * update i_size. 1682 */ 1683 inode->i_ctime = CURRENT_TIME; 1684 i_size_write(inode, actual_end); 1685 btrfs_ordered_update_i_size(inode, actual_end, NULL); 1686 } 1687 free_extent_map(em); 1688 1689 cur_offset = last_byte; 1690 if (cur_offset >= alloc_end) { 1691 ret = 0; 1692 break; 1693 } 1694 } 1695 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 1696 &cached_state, GFP_NOFS); 1697 1698 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); 1699 out: 1700 mutex_unlock(&inode->i_mutex); 1701 return ret; 1702 } 1703 1704 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin) 1705 { 1706 struct btrfs_root *root = BTRFS_I(inode)->root; 1707 struct extent_map *em; 1708 struct extent_state *cached_state = NULL; 1709 u64 lockstart = *offset; 1710 u64 lockend = i_size_read(inode); 1711 u64 start = *offset; 1712 u64 orig_start = *offset; 1713 u64 len = i_size_read(inode); 1714 u64 last_end = 0; 1715 int ret = 0; 1716 1717 lockend = max_t(u64, root->sectorsize, lockend); 1718 if (lockend <= lockstart) 1719 lockend = lockstart + root->sectorsize; 1720 1721 len = lockend - lockstart + 1; 1722 1723 len = max_t(u64, len, root->sectorsize); 1724 if (inode->i_size == 0) 1725 return -ENXIO; 1726 1727 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, 1728 &cached_state, GFP_NOFS); 1729 1730 /* 1731 * Delalloc is such a pain. If we have a hole and we have pending 1732 * delalloc for a portion of the hole we will get back a hole that 1733 * exists for the entire range since it hasn't been actually written 1734 * yet. So to take care of this case we need to look for an extent just 1735 * before the position we want in case there is outstanding delalloc 1736 * going on here. 1737 */ 1738 if (origin == SEEK_HOLE && start != 0) { 1739 if (start <= root->sectorsize) 1740 em = btrfs_get_extent_fiemap(inode, NULL, 0, 0, 1741 root->sectorsize, 0); 1742 else 1743 em = btrfs_get_extent_fiemap(inode, NULL, 0, 1744 start - root->sectorsize, 1745 root->sectorsize, 0); 1746 if (IS_ERR(em)) { 1747 ret = -ENXIO; 1748 goto out; 1749 } 1750 last_end = em->start + em->len; 1751 if (em->block_start == EXTENT_MAP_DELALLOC) 1752 last_end = min_t(u64, last_end, inode->i_size); 1753 free_extent_map(em); 1754 } 1755 1756 while (1) { 1757 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); 1758 if (IS_ERR(em)) { 1759 ret = -ENXIO; 1760 break; 1761 } 1762 1763 if (em->block_start == EXTENT_MAP_HOLE) { 1764 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { 1765 if (last_end <= orig_start) { 1766 free_extent_map(em); 1767 ret = -ENXIO; 1768 break; 1769 } 1770 } 1771 1772 if (origin == SEEK_HOLE) { 1773 *offset = start; 1774 free_extent_map(em); 1775 break; 1776 } 1777 } else { 1778 if (origin == SEEK_DATA) { 1779 if (em->block_start == EXTENT_MAP_DELALLOC) { 1780 if (start >= inode->i_size) { 1781 free_extent_map(em); 1782 ret = -ENXIO; 1783 break; 1784 } 1785 } 1786 1787 *offset = start; 1788 free_extent_map(em); 1789 break; 1790 } 1791 } 1792 1793 start = em->start + em->len; 1794 last_end = em->start + em->len; 1795 1796 if (em->block_start == EXTENT_MAP_DELALLOC) 1797 last_end = min_t(u64, last_end, inode->i_size); 1798 1799 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { 1800 free_extent_map(em); 1801 ret = -ENXIO; 1802 break; 1803 } 1804 free_extent_map(em); 1805 cond_resched(); 1806 } 1807 if (!ret) 1808 *offset = min(*offset, inode->i_size); 1809 out: 1810 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 1811 &cached_state, GFP_NOFS); 1812 return ret; 1813 } 1814 1815 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin) 1816 { 1817 struct inode *inode = file->f_mapping->host; 1818 int ret; 1819 1820 mutex_lock(&inode->i_mutex); 1821 switch (origin) { 1822 case SEEK_END: 1823 case SEEK_CUR: 1824 offset = generic_file_llseek(file, offset, origin); 1825 goto out; 1826 case SEEK_DATA: 1827 case SEEK_HOLE: 1828 if (offset >= i_size_read(inode)) { 1829 mutex_unlock(&inode->i_mutex); 1830 return -ENXIO; 1831 } 1832 1833 ret = find_desired_extent(inode, &offset, origin); 1834 if (ret) { 1835 mutex_unlock(&inode->i_mutex); 1836 return ret; 1837 } 1838 } 1839 1840 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) { 1841 offset = -EINVAL; 1842 goto out; 1843 } 1844 if (offset > inode->i_sb->s_maxbytes) { 1845 offset = -EINVAL; 1846 goto out; 1847 } 1848 1849 /* Special lock needed here? */ 1850 if (offset != file->f_pos) { 1851 file->f_pos = offset; 1852 file->f_version = 0; 1853 } 1854 out: 1855 mutex_unlock(&inode->i_mutex); 1856 return offset; 1857 } 1858 1859 const struct file_operations btrfs_file_operations = { 1860 .llseek = btrfs_file_llseek, 1861 .read = do_sync_read, 1862 .write = do_sync_write, 1863 .aio_read = generic_file_aio_read, 1864 .splice_read = generic_file_splice_read, 1865 .aio_write = btrfs_file_aio_write, 1866 .mmap = btrfs_file_mmap, 1867 .open = generic_file_open, 1868 .release = btrfs_release_file, 1869 .fsync = btrfs_sync_file, 1870 .fallocate = btrfs_fallocate, 1871 .unlocked_ioctl = btrfs_ioctl, 1872 #ifdef CONFIG_COMPAT 1873 .compat_ioctl = btrfs_ioctl, 1874 #endif 1875 }; 1876