1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/pagemap.h> 21 #include <linux/highmem.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/backing-dev.h> 26 #include <linux/mpage.h> 27 #include <linux/falloc.h> 28 #include <linux/swap.h> 29 #include <linux/writeback.h> 30 #include <linux/statfs.h> 31 #include <linux/compat.h> 32 #include <linux/slab.h> 33 #include "ctree.h" 34 #include "disk-io.h" 35 #include "transaction.h" 36 #include "btrfs_inode.h" 37 #include "ioctl.h" 38 #include "print-tree.h" 39 #include "tree-log.h" 40 #include "locking.h" 41 #include "compat.h" 42 43 /* 44 * when auto defrag is enabled we 45 * queue up these defrag structs to remember which 46 * inodes need defragging passes 47 */ 48 struct inode_defrag { 49 struct rb_node rb_node; 50 /* objectid */ 51 u64 ino; 52 /* 53 * transid where the defrag was added, we search for 54 * extents newer than this 55 */ 56 u64 transid; 57 58 /* root objectid */ 59 u64 root; 60 61 /* last offset we were able to defrag */ 62 u64 last_offset; 63 64 /* if we've wrapped around back to zero once already */ 65 int cycled; 66 }; 67 68 /* pop a record for an inode into the defrag tree. The lock 69 * must be held already 70 * 71 * If you're inserting a record for an older transid than an 72 * existing record, the transid already in the tree is lowered 73 * 74 * If an existing record is found the defrag item you 75 * pass in is freed 76 */ 77 static void __btrfs_add_inode_defrag(struct inode *inode, 78 struct inode_defrag *defrag) 79 { 80 struct btrfs_root *root = BTRFS_I(inode)->root; 81 struct inode_defrag *entry; 82 struct rb_node **p; 83 struct rb_node *parent = NULL; 84 85 p = &root->fs_info->defrag_inodes.rb_node; 86 while (*p) { 87 parent = *p; 88 entry = rb_entry(parent, struct inode_defrag, rb_node); 89 90 if (defrag->ino < entry->ino) 91 p = &parent->rb_left; 92 else if (defrag->ino > entry->ino) 93 p = &parent->rb_right; 94 else { 95 /* if we're reinserting an entry for 96 * an old defrag run, make sure to 97 * lower the transid of our existing record 98 */ 99 if (defrag->transid < entry->transid) 100 entry->transid = defrag->transid; 101 if (defrag->last_offset > entry->last_offset) 102 entry->last_offset = defrag->last_offset; 103 goto exists; 104 } 105 } 106 BTRFS_I(inode)->in_defrag = 1; 107 rb_link_node(&defrag->rb_node, parent, p); 108 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); 109 return; 110 111 exists: 112 kfree(defrag); 113 return; 114 115 } 116 117 /* 118 * insert a defrag record for this inode if auto defrag is 119 * enabled 120 */ 121 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 122 struct inode *inode) 123 { 124 struct btrfs_root *root = BTRFS_I(inode)->root; 125 struct inode_defrag *defrag; 126 u64 transid; 127 128 if (!btrfs_test_opt(root, AUTO_DEFRAG)) 129 return 0; 130 131 if (btrfs_fs_closing(root->fs_info)) 132 return 0; 133 134 if (BTRFS_I(inode)->in_defrag) 135 return 0; 136 137 if (trans) 138 transid = trans->transid; 139 else 140 transid = BTRFS_I(inode)->root->last_trans; 141 142 defrag = kzalloc(sizeof(*defrag), GFP_NOFS); 143 if (!defrag) 144 return -ENOMEM; 145 146 defrag->ino = btrfs_ino(inode); 147 defrag->transid = transid; 148 defrag->root = root->root_key.objectid; 149 150 spin_lock(&root->fs_info->defrag_inodes_lock); 151 if (!BTRFS_I(inode)->in_defrag) 152 __btrfs_add_inode_defrag(inode, defrag); 153 else 154 kfree(defrag); 155 spin_unlock(&root->fs_info->defrag_inodes_lock); 156 return 0; 157 } 158 159 /* 160 * must be called with the defrag_inodes lock held 161 */ 162 struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino, 163 struct rb_node **next) 164 { 165 struct inode_defrag *entry = NULL; 166 struct rb_node *p; 167 struct rb_node *parent = NULL; 168 169 p = info->defrag_inodes.rb_node; 170 while (p) { 171 parent = p; 172 entry = rb_entry(parent, struct inode_defrag, rb_node); 173 174 if (ino < entry->ino) 175 p = parent->rb_left; 176 else if (ino > entry->ino) 177 p = parent->rb_right; 178 else 179 return entry; 180 } 181 182 if (next) { 183 while (parent && ino > entry->ino) { 184 parent = rb_next(parent); 185 entry = rb_entry(parent, struct inode_defrag, rb_node); 186 } 187 *next = parent; 188 } 189 return NULL; 190 } 191 192 /* 193 * run through the list of inodes in the FS that need 194 * defragging 195 */ 196 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) 197 { 198 struct inode_defrag *defrag; 199 struct btrfs_root *inode_root; 200 struct inode *inode; 201 struct rb_node *n; 202 struct btrfs_key key; 203 struct btrfs_ioctl_defrag_range_args range; 204 u64 first_ino = 0; 205 int num_defrag; 206 int defrag_batch = 1024; 207 208 memset(&range, 0, sizeof(range)); 209 range.len = (u64)-1; 210 211 atomic_inc(&fs_info->defrag_running); 212 spin_lock(&fs_info->defrag_inodes_lock); 213 while(1) { 214 n = NULL; 215 216 /* find an inode to defrag */ 217 defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n); 218 if (!defrag) { 219 if (n) 220 defrag = rb_entry(n, struct inode_defrag, rb_node); 221 else if (first_ino) { 222 first_ino = 0; 223 continue; 224 } else { 225 break; 226 } 227 } 228 229 /* remove it from the rbtree */ 230 first_ino = defrag->ino + 1; 231 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); 232 233 if (btrfs_fs_closing(fs_info)) 234 goto next_free; 235 236 spin_unlock(&fs_info->defrag_inodes_lock); 237 238 /* get the inode */ 239 key.objectid = defrag->root; 240 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 241 key.offset = (u64)-1; 242 inode_root = btrfs_read_fs_root_no_name(fs_info, &key); 243 if (IS_ERR(inode_root)) 244 goto next; 245 246 key.objectid = defrag->ino; 247 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 248 key.offset = 0; 249 250 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); 251 if (IS_ERR(inode)) 252 goto next; 253 254 /* do a chunk of defrag */ 255 BTRFS_I(inode)->in_defrag = 0; 256 range.start = defrag->last_offset; 257 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, 258 defrag_batch); 259 /* 260 * if we filled the whole defrag batch, there 261 * must be more work to do. Queue this defrag 262 * again 263 */ 264 if (num_defrag == defrag_batch) { 265 defrag->last_offset = range.start; 266 __btrfs_add_inode_defrag(inode, defrag); 267 /* 268 * we don't want to kfree defrag, we added it back to 269 * the rbtree 270 */ 271 defrag = NULL; 272 } else if (defrag->last_offset && !defrag->cycled) { 273 /* 274 * we didn't fill our defrag batch, but 275 * we didn't start at zero. Make sure we loop 276 * around to the start of the file. 277 */ 278 defrag->last_offset = 0; 279 defrag->cycled = 1; 280 __btrfs_add_inode_defrag(inode, defrag); 281 defrag = NULL; 282 } 283 284 iput(inode); 285 next: 286 spin_lock(&fs_info->defrag_inodes_lock); 287 next_free: 288 kfree(defrag); 289 } 290 spin_unlock(&fs_info->defrag_inodes_lock); 291 292 atomic_dec(&fs_info->defrag_running); 293 294 /* 295 * during unmount, we use the transaction_wait queue to 296 * wait for the defragger to stop 297 */ 298 wake_up(&fs_info->transaction_wait); 299 return 0; 300 } 301 302 /* simple helper to fault in pages and copy. This should go away 303 * and be replaced with calls into generic code. 304 */ 305 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 306 size_t write_bytes, 307 struct page **prepared_pages, 308 struct iov_iter *i) 309 { 310 size_t copied = 0; 311 size_t total_copied = 0; 312 int pg = 0; 313 int offset = pos & (PAGE_CACHE_SIZE - 1); 314 315 while (write_bytes > 0) { 316 size_t count = min_t(size_t, 317 PAGE_CACHE_SIZE - offset, write_bytes); 318 struct page *page = prepared_pages[pg]; 319 /* 320 * Copy data from userspace to the current page 321 * 322 * Disable pagefault to avoid recursive lock since 323 * the pages are already locked 324 */ 325 pagefault_disable(); 326 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 327 pagefault_enable(); 328 329 /* Flush processor's dcache for this page */ 330 flush_dcache_page(page); 331 332 /* 333 * if we get a partial write, we can end up with 334 * partially up to date pages. These add 335 * a lot of complexity, so make sure they don't 336 * happen by forcing this copy to be retried. 337 * 338 * The rest of the btrfs_file_write code will fall 339 * back to page at a time copies after we return 0. 340 */ 341 if (!PageUptodate(page) && copied < count) 342 copied = 0; 343 344 iov_iter_advance(i, copied); 345 write_bytes -= copied; 346 total_copied += copied; 347 348 /* Return to btrfs_file_aio_write to fault page */ 349 if (unlikely(copied == 0)) 350 break; 351 352 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { 353 offset += copied; 354 } else { 355 pg++; 356 offset = 0; 357 } 358 } 359 return total_copied; 360 } 361 362 /* 363 * unlocks pages after btrfs_file_write is done with them 364 */ 365 void btrfs_drop_pages(struct page **pages, size_t num_pages) 366 { 367 size_t i; 368 for (i = 0; i < num_pages; i++) { 369 /* page checked is some magic around finding pages that 370 * have been modified without going through btrfs_set_page_dirty 371 * clear it here 372 */ 373 ClearPageChecked(pages[i]); 374 unlock_page(pages[i]); 375 mark_page_accessed(pages[i]); 376 page_cache_release(pages[i]); 377 } 378 } 379 380 /* 381 * after copy_from_user, pages need to be dirtied and we need to make 382 * sure holes are created between the current EOF and the start of 383 * any next extents (if required). 384 * 385 * this also makes the decision about creating an inline extent vs 386 * doing real data extents, marking pages dirty and delalloc as required. 387 */ 388 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 389 struct page **pages, size_t num_pages, 390 loff_t pos, size_t write_bytes, 391 struct extent_state **cached) 392 { 393 int err = 0; 394 int i; 395 u64 num_bytes; 396 u64 start_pos; 397 u64 end_of_last_block; 398 u64 end_pos = pos + write_bytes; 399 loff_t isize = i_size_read(inode); 400 401 start_pos = pos & ~((u64)root->sectorsize - 1); 402 num_bytes = (write_bytes + pos - start_pos + 403 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 404 405 end_of_last_block = start_pos + num_bytes - 1; 406 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 407 cached); 408 if (err) 409 return err; 410 411 for (i = 0; i < num_pages; i++) { 412 struct page *p = pages[i]; 413 SetPageUptodate(p); 414 ClearPageChecked(p); 415 set_page_dirty(p); 416 } 417 418 /* 419 * we've only changed i_size in ram, and we haven't updated 420 * the disk i_size. There is no need to log the inode 421 * at this time. 422 */ 423 if (end_pos > isize) 424 i_size_write(inode, end_pos); 425 return 0; 426 } 427 428 /* 429 * this drops all the extents in the cache that intersect the range 430 * [start, end]. Existing extents are split as required. 431 */ 432 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 433 int skip_pinned) 434 { 435 struct extent_map *em; 436 struct extent_map *split = NULL; 437 struct extent_map *split2 = NULL; 438 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 439 u64 len = end - start + 1; 440 int ret; 441 int testend = 1; 442 unsigned long flags; 443 int compressed = 0; 444 445 WARN_ON(end < start); 446 if (end == (u64)-1) { 447 len = (u64)-1; 448 testend = 0; 449 } 450 while (1) { 451 if (!split) 452 split = alloc_extent_map(); 453 if (!split2) 454 split2 = alloc_extent_map(); 455 BUG_ON(!split || !split2); 456 457 write_lock(&em_tree->lock); 458 em = lookup_extent_mapping(em_tree, start, len); 459 if (!em) { 460 write_unlock(&em_tree->lock); 461 break; 462 } 463 flags = em->flags; 464 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 465 if (testend && em->start + em->len >= start + len) { 466 free_extent_map(em); 467 write_unlock(&em_tree->lock); 468 break; 469 } 470 start = em->start + em->len; 471 if (testend) 472 len = start + len - (em->start + em->len); 473 free_extent_map(em); 474 write_unlock(&em_tree->lock); 475 continue; 476 } 477 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 478 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 479 remove_extent_mapping(em_tree, em); 480 481 if (em->block_start < EXTENT_MAP_LAST_BYTE && 482 em->start < start) { 483 split->start = em->start; 484 split->len = start - em->start; 485 split->orig_start = em->orig_start; 486 split->block_start = em->block_start; 487 488 if (compressed) 489 split->block_len = em->block_len; 490 else 491 split->block_len = split->len; 492 493 split->bdev = em->bdev; 494 split->flags = flags; 495 split->compress_type = em->compress_type; 496 ret = add_extent_mapping(em_tree, split); 497 BUG_ON(ret); 498 free_extent_map(split); 499 split = split2; 500 split2 = NULL; 501 } 502 if (em->block_start < EXTENT_MAP_LAST_BYTE && 503 testend && em->start + em->len > start + len) { 504 u64 diff = start + len - em->start; 505 506 split->start = start + len; 507 split->len = em->start + em->len - (start + len); 508 split->bdev = em->bdev; 509 split->flags = flags; 510 split->compress_type = em->compress_type; 511 512 if (compressed) { 513 split->block_len = em->block_len; 514 split->block_start = em->block_start; 515 split->orig_start = em->orig_start; 516 } else { 517 split->block_len = split->len; 518 split->block_start = em->block_start + diff; 519 split->orig_start = split->start; 520 } 521 522 ret = add_extent_mapping(em_tree, split); 523 BUG_ON(ret); 524 free_extent_map(split); 525 split = NULL; 526 } 527 write_unlock(&em_tree->lock); 528 529 /* once for us */ 530 free_extent_map(em); 531 /* once for the tree*/ 532 free_extent_map(em); 533 } 534 if (split) 535 free_extent_map(split); 536 if (split2) 537 free_extent_map(split2); 538 return 0; 539 } 540 541 /* 542 * this is very complex, but the basic idea is to drop all extents 543 * in the range start - end. hint_block is filled in with a block number 544 * that would be a good hint to the block allocator for this file. 545 * 546 * If an extent intersects the range but is not entirely inside the range 547 * it is either truncated or split. Anything entirely inside the range 548 * is deleted from the tree. 549 */ 550 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, 551 u64 start, u64 end, u64 *hint_byte, int drop_cache) 552 { 553 struct btrfs_root *root = BTRFS_I(inode)->root; 554 struct extent_buffer *leaf; 555 struct btrfs_file_extent_item *fi; 556 struct btrfs_path *path; 557 struct btrfs_key key; 558 struct btrfs_key new_key; 559 u64 ino = btrfs_ino(inode); 560 u64 search_start = start; 561 u64 disk_bytenr = 0; 562 u64 num_bytes = 0; 563 u64 extent_offset = 0; 564 u64 extent_end = 0; 565 int del_nr = 0; 566 int del_slot = 0; 567 int extent_type; 568 int recow; 569 int ret; 570 571 if (drop_cache) 572 btrfs_drop_extent_cache(inode, start, end - 1, 0); 573 574 path = btrfs_alloc_path(); 575 if (!path) 576 return -ENOMEM; 577 578 while (1) { 579 recow = 0; 580 ret = btrfs_lookup_file_extent(trans, root, path, ino, 581 search_start, -1); 582 if (ret < 0) 583 break; 584 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 585 leaf = path->nodes[0]; 586 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 587 if (key.objectid == ino && 588 key.type == BTRFS_EXTENT_DATA_KEY) 589 path->slots[0]--; 590 } 591 ret = 0; 592 next_slot: 593 leaf = path->nodes[0]; 594 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 595 BUG_ON(del_nr > 0); 596 ret = btrfs_next_leaf(root, path); 597 if (ret < 0) 598 break; 599 if (ret > 0) { 600 ret = 0; 601 break; 602 } 603 leaf = path->nodes[0]; 604 recow = 1; 605 } 606 607 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 608 if (key.objectid > ino || 609 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 610 break; 611 612 fi = btrfs_item_ptr(leaf, path->slots[0], 613 struct btrfs_file_extent_item); 614 extent_type = btrfs_file_extent_type(leaf, fi); 615 616 if (extent_type == BTRFS_FILE_EXTENT_REG || 617 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 618 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 619 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 620 extent_offset = btrfs_file_extent_offset(leaf, fi); 621 extent_end = key.offset + 622 btrfs_file_extent_num_bytes(leaf, fi); 623 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 624 extent_end = key.offset + 625 btrfs_file_extent_inline_len(leaf, fi); 626 } else { 627 WARN_ON(1); 628 extent_end = search_start; 629 } 630 631 if (extent_end <= search_start) { 632 path->slots[0]++; 633 goto next_slot; 634 } 635 636 search_start = max(key.offset, start); 637 if (recow) { 638 btrfs_release_path(path); 639 continue; 640 } 641 642 /* 643 * | - range to drop - | 644 * | -------- extent -------- | 645 */ 646 if (start > key.offset && end < extent_end) { 647 BUG_ON(del_nr > 0); 648 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 649 650 memcpy(&new_key, &key, sizeof(new_key)); 651 new_key.offset = start; 652 ret = btrfs_duplicate_item(trans, root, path, 653 &new_key); 654 if (ret == -EAGAIN) { 655 btrfs_release_path(path); 656 continue; 657 } 658 if (ret < 0) 659 break; 660 661 leaf = path->nodes[0]; 662 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 663 struct btrfs_file_extent_item); 664 btrfs_set_file_extent_num_bytes(leaf, fi, 665 start - key.offset); 666 667 fi = btrfs_item_ptr(leaf, path->slots[0], 668 struct btrfs_file_extent_item); 669 670 extent_offset += start - key.offset; 671 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 672 btrfs_set_file_extent_num_bytes(leaf, fi, 673 extent_end - start); 674 btrfs_mark_buffer_dirty(leaf); 675 676 if (disk_bytenr > 0) { 677 ret = btrfs_inc_extent_ref(trans, root, 678 disk_bytenr, num_bytes, 0, 679 root->root_key.objectid, 680 new_key.objectid, 681 start - extent_offset); 682 BUG_ON(ret); 683 *hint_byte = disk_bytenr; 684 } 685 key.offset = start; 686 } 687 /* 688 * | ---- range to drop ----- | 689 * | -------- extent -------- | 690 */ 691 if (start <= key.offset && end < extent_end) { 692 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 693 694 memcpy(&new_key, &key, sizeof(new_key)); 695 new_key.offset = end; 696 btrfs_set_item_key_safe(trans, root, path, &new_key); 697 698 extent_offset += end - key.offset; 699 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 700 btrfs_set_file_extent_num_bytes(leaf, fi, 701 extent_end - end); 702 btrfs_mark_buffer_dirty(leaf); 703 if (disk_bytenr > 0) { 704 inode_sub_bytes(inode, end - key.offset); 705 *hint_byte = disk_bytenr; 706 } 707 break; 708 } 709 710 search_start = extent_end; 711 /* 712 * | ---- range to drop ----- | 713 * | -------- extent -------- | 714 */ 715 if (start > key.offset && end >= extent_end) { 716 BUG_ON(del_nr > 0); 717 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 718 719 btrfs_set_file_extent_num_bytes(leaf, fi, 720 start - key.offset); 721 btrfs_mark_buffer_dirty(leaf); 722 if (disk_bytenr > 0) { 723 inode_sub_bytes(inode, extent_end - start); 724 *hint_byte = disk_bytenr; 725 } 726 if (end == extent_end) 727 break; 728 729 path->slots[0]++; 730 goto next_slot; 731 } 732 733 /* 734 * | ---- range to drop ----- | 735 * | ------ extent ------ | 736 */ 737 if (start <= key.offset && end >= extent_end) { 738 if (del_nr == 0) { 739 del_slot = path->slots[0]; 740 del_nr = 1; 741 } else { 742 BUG_ON(del_slot + del_nr != path->slots[0]); 743 del_nr++; 744 } 745 746 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 747 inode_sub_bytes(inode, 748 extent_end - key.offset); 749 extent_end = ALIGN(extent_end, 750 root->sectorsize); 751 } else if (disk_bytenr > 0) { 752 ret = btrfs_free_extent(trans, root, 753 disk_bytenr, num_bytes, 0, 754 root->root_key.objectid, 755 key.objectid, key.offset - 756 extent_offset); 757 BUG_ON(ret); 758 inode_sub_bytes(inode, 759 extent_end - key.offset); 760 *hint_byte = disk_bytenr; 761 } 762 763 if (end == extent_end) 764 break; 765 766 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 767 path->slots[0]++; 768 goto next_slot; 769 } 770 771 ret = btrfs_del_items(trans, root, path, del_slot, 772 del_nr); 773 BUG_ON(ret); 774 775 del_nr = 0; 776 del_slot = 0; 777 778 btrfs_release_path(path); 779 continue; 780 } 781 782 BUG_ON(1); 783 } 784 785 if (del_nr > 0) { 786 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 787 BUG_ON(ret); 788 } 789 790 btrfs_free_path(path); 791 return ret; 792 } 793 794 static int extent_mergeable(struct extent_buffer *leaf, int slot, 795 u64 objectid, u64 bytenr, u64 orig_offset, 796 u64 *start, u64 *end) 797 { 798 struct btrfs_file_extent_item *fi; 799 struct btrfs_key key; 800 u64 extent_end; 801 802 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 803 return 0; 804 805 btrfs_item_key_to_cpu(leaf, &key, slot); 806 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 807 return 0; 808 809 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 810 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 811 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 812 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 813 btrfs_file_extent_compression(leaf, fi) || 814 btrfs_file_extent_encryption(leaf, fi) || 815 btrfs_file_extent_other_encoding(leaf, fi)) 816 return 0; 817 818 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 819 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 820 return 0; 821 822 *start = key.offset; 823 *end = extent_end; 824 return 1; 825 } 826 827 /* 828 * Mark extent in the range start - end as written. 829 * 830 * This changes extent type from 'pre-allocated' to 'regular'. If only 831 * part of extent is marked as written, the extent will be split into 832 * two or three. 833 */ 834 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 835 struct inode *inode, u64 start, u64 end) 836 { 837 struct btrfs_root *root = BTRFS_I(inode)->root; 838 struct extent_buffer *leaf; 839 struct btrfs_path *path; 840 struct btrfs_file_extent_item *fi; 841 struct btrfs_key key; 842 struct btrfs_key new_key; 843 u64 bytenr; 844 u64 num_bytes; 845 u64 extent_end; 846 u64 orig_offset; 847 u64 other_start; 848 u64 other_end; 849 u64 split; 850 int del_nr = 0; 851 int del_slot = 0; 852 int recow; 853 int ret; 854 u64 ino = btrfs_ino(inode); 855 856 btrfs_drop_extent_cache(inode, start, end - 1, 0); 857 858 path = btrfs_alloc_path(); 859 if (!path) 860 return -ENOMEM; 861 again: 862 recow = 0; 863 split = start; 864 key.objectid = ino; 865 key.type = BTRFS_EXTENT_DATA_KEY; 866 key.offset = split; 867 868 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 869 if (ret < 0) 870 goto out; 871 if (ret > 0 && path->slots[0] > 0) 872 path->slots[0]--; 873 874 leaf = path->nodes[0]; 875 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 876 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY); 877 fi = btrfs_item_ptr(leaf, path->slots[0], 878 struct btrfs_file_extent_item); 879 BUG_ON(btrfs_file_extent_type(leaf, fi) != 880 BTRFS_FILE_EXTENT_PREALLOC); 881 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 882 BUG_ON(key.offset > start || extent_end < end); 883 884 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 885 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 886 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 887 memcpy(&new_key, &key, sizeof(new_key)); 888 889 if (start == key.offset && end < extent_end) { 890 other_start = 0; 891 other_end = start; 892 if (extent_mergeable(leaf, path->slots[0] - 1, 893 ino, bytenr, orig_offset, 894 &other_start, &other_end)) { 895 new_key.offset = end; 896 btrfs_set_item_key_safe(trans, root, path, &new_key); 897 fi = btrfs_item_ptr(leaf, path->slots[0], 898 struct btrfs_file_extent_item); 899 btrfs_set_file_extent_num_bytes(leaf, fi, 900 extent_end - end); 901 btrfs_set_file_extent_offset(leaf, fi, 902 end - orig_offset); 903 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 904 struct btrfs_file_extent_item); 905 btrfs_set_file_extent_num_bytes(leaf, fi, 906 end - other_start); 907 btrfs_mark_buffer_dirty(leaf); 908 goto out; 909 } 910 } 911 912 if (start > key.offset && end == extent_end) { 913 other_start = end; 914 other_end = 0; 915 if (extent_mergeable(leaf, path->slots[0] + 1, 916 ino, bytenr, orig_offset, 917 &other_start, &other_end)) { 918 fi = btrfs_item_ptr(leaf, path->slots[0], 919 struct btrfs_file_extent_item); 920 btrfs_set_file_extent_num_bytes(leaf, fi, 921 start - key.offset); 922 path->slots[0]++; 923 new_key.offset = start; 924 btrfs_set_item_key_safe(trans, root, path, &new_key); 925 926 fi = btrfs_item_ptr(leaf, path->slots[0], 927 struct btrfs_file_extent_item); 928 btrfs_set_file_extent_num_bytes(leaf, fi, 929 other_end - start); 930 btrfs_set_file_extent_offset(leaf, fi, 931 start - orig_offset); 932 btrfs_mark_buffer_dirty(leaf); 933 goto out; 934 } 935 } 936 937 while (start > key.offset || end < extent_end) { 938 if (key.offset == start) 939 split = end; 940 941 new_key.offset = split; 942 ret = btrfs_duplicate_item(trans, root, path, &new_key); 943 if (ret == -EAGAIN) { 944 btrfs_release_path(path); 945 goto again; 946 } 947 BUG_ON(ret < 0); 948 949 leaf = path->nodes[0]; 950 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 951 struct btrfs_file_extent_item); 952 btrfs_set_file_extent_num_bytes(leaf, fi, 953 split - key.offset); 954 955 fi = btrfs_item_ptr(leaf, path->slots[0], 956 struct btrfs_file_extent_item); 957 958 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 959 btrfs_set_file_extent_num_bytes(leaf, fi, 960 extent_end - split); 961 btrfs_mark_buffer_dirty(leaf); 962 963 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 964 root->root_key.objectid, 965 ino, orig_offset); 966 BUG_ON(ret); 967 968 if (split == start) { 969 key.offset = start; 970 } else { 971 BUG_ON(start != key.offset); 972 path->slots[0]--; 973 extent_end = end; 974 } 975 recow = 1; 976 } 977 978 other_start = end; 979 other_end = 0; 980 if (extent_mergeable(leaf, path->slots[0] + 1, 981 ino, bytenr, orig_offset, 982 &other_start, &other_end)) { 983 if (recow) { 984 btrfs_release_path(path); 985 goto again; 986 } 987 extent_end = other_end; 988 del_slot = path->slots[0] + 1; 989 del_nr++; 990 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 991 0, root->root_key.objectid, 992 ino, orig_offset); 993 BUG_ON(ret); 994 } 995 other_start = 0; 996 other_end = start; 997 if (extent_mergeable(leaf, path->slots[0] - 1, 998 ino, bytenr, orig_offset, 999 &other_start, &other_end)) { 1000 if (recow) { 1001 btrfs_release_path(path); 1002 goto again; 1003 } 1004 key.offset = other_start; 1005 del_slot = path->slots[0]; 1006 del_nr++; 1007 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1008 0, root->root_key.objectid, 1009 ino, orig_offset); 1010 BUG_ON(ret); 1011 } 1012 if (del_nr == 0) { 1013 fi = btrfs_item_ptr(leaf, path->slots[0], 1014 struct btrfs_file_extent_item); 1015 btrfs_set_file_extent_type(leaf, fi, 1016 BTRFS_FILE_EXTENT_REG); 1017 btrfs_mark_buffer_dirty(leaf); 1018 } else { 1019 fi = btrfs_item_ptr(leaf, del_slot - 1, 1020 struct btrfs_file_extent_item); 1021 btrfs_set_file_extent_type(leaf, fi, 1022 BTRFS_FILE_EXTENT_REG); 1023 btrfs_set_file_extent_num_bytes(leaf, fi, 1024 extent_end - key.offset); 1025 btrfs_mark_buffer_dirty(leaf); 1026 1027 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 1028 BUG_ON(ret); 1029 } 1030 out: 1031 btrfs_free_path(path); 1032 return 0; 1033 } 1034 1035 /* 1036 * on error we return an unlocked page and the error value 1037 * on success we return a locked page and 0 1038 */ 1039 static int prepare_uptodate_page(struct page *page, u64 pos, 1040 bool force_uptodate) 1041 { 1042 int ret = 0; 1043 1044 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && 1045 !PageUptodate(page)) { 1046 ret = btrfs_readpage(NULL, page); 1047 if (ret) 1048 return ret; 1049 lock_page(page); 1050 if (!PageUptodate(page)) { 1051 unlock_page(page); 1052 return -EIO; 1053 } 1054 } 1055 return 0; 1056 } 1057 1058 /* 1059 * this gets pages into the page cache and locks them down, it also properly 1060 * waits for data=ordered extents to finish before allowing the pages to be 1061 * modified. 1062 */ 1063 static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 1064 struct page **pages, size_t num_pages, 1065 loff_t pos, unsigned long first_index, 1066 size_t write_bytes, bool force_uptodate) 1067 { 1068 struct extent_state *cached_state = NULL; 1069 int i; 1070 unsigned long index = pos >> PAGE_CACHE_SHIFT; 1071 struct inode *inode = fdentry(file)->d_inode; 1072 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 1073 int err = 0; 1074 int faili = 0; 1075 u64 start_pos; 1076 u64 last_pos; 1077 1078 start_pos = pos & ~((u64)root->sectorsize - 1); 1079 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 1080 1081 again: 1082 for (i = 0; i < num_pages; i++) { 1083 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1084 mask); 1085 if (!pages[i]) { 1086 faili = i - 1; 1087 err = -ENOMEM; 1088 goto fail; 1089 } 1090 1091 if (i == 0) 1092 err = prepare_uptodate_page(pages[i], pos, 1093 force_uptodate); 1094 if (i == num_pages - 1) 1095 err = prepare_uptodate_page(pages[i], 1096 pos + write_bytes, false); 1097 if (err) { 1098 page_cache_release(pages[i]); 1099 faili = i - 1; 1100 goto fail; 1101 } 1102 wait_on_page_writeback(pages[i]); 1103 } 1104 err = 0; 1105 if (start_pos < inode->i_size) { 1106 struct btrfs_ordered_extent *ordered; 1107 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1108 start_pos, last_pos - 1, 0, &cached_state, 1109 GFP_NOFS); 1110 ordered = btrfs_lookup_first_ordered_extent(inode, 1111 last_pos - 1); 1112 if (ordered && 1113 ordered->file_offset + ordered->len > start_pos && 1114 ordered->file_offset < last_pos) { 1115 btrfs_put_ordered_extent(ordered); 1116 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1117 start_pos, last_pos - 1, 1118 &cached_state, GFP_NOFS); 1119 for (i = 0; i < num_pages; i++) { 1120 unlock_page(pages[i]); 1121 page_cache_release(pages[i]); 1122 } 1123 btrfs_wait_ordered_range(inode, start_pos, 1124 last_pos - start_pos); 1125 goto again; 1126 } 1127 if (ordered) 1128 btrfs_put_ordered_extent(ordered); 1129 1130 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, 1131 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 1132 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, 1133 GFP_NOFS); 1134 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1135 start_pos, last_pos - 1, &cached_state, 1136 GFP_NOFS); 1137 } 1138 for (i = 0; i < num_pages; i++) { 1139 clear_page_dirty_for_io(pages[i]); 1140 set_page_extent_mapped(pages[i]); 1141 WARN_ON(!PageLocked(pages[i])); 1142 } 1143 return 0; 1144 fail: 1145 while (faili >= 0) { 1146 unlock_page(pages[faili]); 1147 page_cache_release(pages[faili]); 1148 faili--; 1149 } 1150 return err; 1151 1152 } 1153 1154 static noinline ssize_t __btrfs_buffered_write(struct file *file, 1155 struct iov_iter *i, 1156 loff_t pos) 1157 { 1158 struct inode *inode = fdentry(file)->d_inode; 1159 struct btrfs_root *root = BTRFS_I(inode)->root; 1160 struct page **pages = NULL; 1161 unsigned long first_index; 1162 size_t num_written = 0; 1163 int nrptrs; 1164 int ret = 0; 1165 bool force_page_uptodate = false; 1166 1167 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / 1168 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 1169 (sizeof(struct page *))); 1170 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 1171 if (!pages) 1172 return -ENOMEM; 1173 1174 first_index = pos >> PAGE_CACHE_SHIFT; 1175 1176 while (iov_iter_count(i) > 0) { 1177 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1178 size_t write_bytes = min(iov_iter_count(i), 1179 nrptrs * (size_t)PAGE_CACHE_SIZE - 1180 offset); 1181 size_t num_pages = (write_bytes + offset + 1182 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1183 size_t dirty_pages; 1184 size_t copied; 1185 1186 WARN_ON(num_pages > nrptrs); 1187 1188 /* 1189 * Fault pages before locking them in prepare_pages 1190 * to avoid recursive lock 1191 */ 1192 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) { 1193 ret = -EFAULT; 1194 break; 1195 } 1196 1197 ret = btrfs_delalloc_reserve_space(inode, 1198 num_pages << PAGE_CACHE_SHIFT); 1199 if (ret) 1200 break; 1201 1202 /* 1203 * This is going to setup the pages array with the number of 1204 * pages we want, so we don't really need to worry about the 1205 * contents of pages from loop to loop 1206 */ 1207 ret = prepare_pages(root, file, pages, num_pages, 1208 pos, first_index, write_bytes, 1209 force_page_uptodate); 1210 if (ret) { 1211 btrfs_delalloc_release_space(inode, 1212 num_pages << PAGE_CACHE_SHIFT); 1213 break; 1214 } 1215 1216 copied = btrfs_copy_from_user(pos, num_pages, 1217 write_bytes, pages, i); 1218 1219 /* 1220 * if we have trouble faulting in the pages, fall 1221 * back to one page at a time 1222 */ 1223 if (copied < write_bytes) 1224 nrptrs = 1; 1225 1226 if (copied == 0) { 1227 force_page_uptodate = true; 1228 dirty_pages = 0; 1229 } else { 1230 force_page_uptodate = false; 1231 dirty_pages = (copied + offset + 1232 PAGE_CACHE_SIZE - 1) >> 1233 PAGE_CACHE_SHIFT; 1234 } 1235 1236 /* 1237 * If we had a short copy we need to release the excess delaloc 1238 * bytes we reserved. We need to increment outstanding_extents 1239 * because btrfs_delalloc_release_space will decrement it, but 1240 * we still have an outstanding extent for the chunk we actually 1241 * managed to copy. 1242 */ 1243 if (num_pages > dirty_pages) { 1244 if (copied > 0) { 1245 spin_lock(&BTRFS_I(inode)->lock); 1246 BTRFS_I(inode)->outstanding_extents++; 1247 spin_unlock(&BTRFS_I(inode)->lock); 1248 } 1249 btrfs_delalloc_release_space(inode, 1250 (num_pages - dirty_pages) << 1251 PAGE_CACHE_SHIFT); 1252 } 1253 1254 if (copied > 0) { 1255 ret = btrfs_dirty_pages(root, inode, pages, 1256 dirty_pages, pos, copied, 1257 NULL); 1258 if (ret) { 1259 btrfs_delalloc_release_space(inode, 1260 dirty_pages << PAGE_CACHE_SHIFT); 1261 btrfs_drop_pages(pages, num_pages); 1262 break; 1263 } 1264 } 1265 1266 btrfs_drop_pages(pages, num_pages); 1267 1268 cond_resched(); 1269 1270 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1271 dirty_pages); 1272 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1273 btrfs_btree_balance_dirty(root, 1); 1274 btrfs_throttle(root); 1275 1276 pos += copied; 1277 num_written += copied; 1278 } 1279 1280 kfree(pages); 1281 1282 return num_written ? num_written : ret; 1283 } 1284 1285 static ssize_t __btrfs_direct_write(struct kiocb *iocb, 1286 const struct iovec *iov, 1287 unsigned long nr_segs, loff_t pos, 1288 loff_t *ppos, size_t count, size_t ocount) 1289 { 1290 struct file *file = iocb->ki_filp; 1291 struct inode *inode = fdentry(file)->d_inode; 1292 struct iov_iter i; 1293 ssize_t written; 1294 ssize_t written_buffered; 1295 loff_t endbyte; 1296 int err; 1297 1298 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, 1299 count, ocount); 1300 1301 /* 1302 * the generic O_DIRECT will update in-memory i_size after the 1303 * DIOs are done. But our endio handlers that update the on 1304 * disk i_size never update past the in memory i_size. So we 1305 * need one more update here to catch any additions to the 1306 * file 1307 */ 1308 if (inode->i_size != BTRFS_I(inode)->disk_i_size) { 1309 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 1310 mark_inode_dirty(inode); 1311 } 1312 1313 if (written < 0 || written == count) 1314 return written; 1315 1316 pos += written; 1317 count -= written; 1318 iov_iter_init(&i, iov, nr_segs, count, written); 1319 written_buffered = __btrfs_buffered_write(file, &i, pos); 1320 if (written_buffered < 0) { 1321 err = written_buffered; 1322 goto out; 1323 } 1324 endbyte = pos + written_buffered - 1; 1325 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); 1326 if (err) 1327 goto out; 1328 written += written_buffered; 1329 *ppos = pos + written_buffered; 1330 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, 1331 endbyte >> PAGE_CACHE_SHIFT); 1332 out: 1333 return written ? written : err; 1334 } 1335 1336 static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 1337 const struct iovec *iov, 1338 unsigned long nr_segs, loff_t pos) 1339 { 1340 struct file *file = iocb->ki_filp; 1341 struct inode *inode = fdentry(file)->d_inode; 1342 struct btrfs_root *root = BTRFS_I(inode)->root; 1343 loff_t *ppos = &iocb->ki_pos; 1344 u64 start_pos; 1345 ssize_t num_written = 0; 1346 ssize_t err = 0; 1347 size_t count, ocount; 1348 1349 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 1350 1351 mutex_lock(&inode->i_mutex); 1352 1353 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 1354 if (err) { 1355 mutex_unlock(&inode->i_mutex); 1356 goto out; 1357 } 1358 count = ocount; 1359 1360 current->backing_dev_info = inode->i_mapping->backing_dev_info; 1361 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 1362 if (err) { 1363 mutex_unlock(&inode->i_mutex); 1364 goto out; 1365 } 1366 1367 if (count == 0) { 1368 mutex_unlock(&inode->i_mutex); 1369 goto out; 1370 } 1371 1372 err = file_remove_suid(file); 1373 if (err) { 1374 mutex_unlock(&inode->i_mutex); 1375 goto out; 1376 } 1377 1378 /* 1379 * If BTRFS flips readonly due to some impossible error 1380 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), 1381 * although we have opened a file as writable, we have 1382 * to stop this write operation to ensure FS consistency. 1383 */ 1384 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 1385 mutex_unlock(&inode->i_mutex); 1386 err = -EROFS; 1387 goto out; 1388 } 1389 1390 file_update_time(file); 1391 BTRFS_I(inode)->sequence++; 1392 1393 start_pos = round_down(pos, root->sectorsize); 1394 if (start_pos > i_size_read(inode)) { 1395 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); 1396 if (err) { 1397 mutex_unlock(&inode->i_mutex); 1398 goto out; 1399 } 1400 } 1401 1402 if (unlikely(file->f_flags & O_DIRECT)) { 1403 num_written = __btrfs_direct_write(iocb, iov, nr_segs, 1404 pos, ppos, count, ocount); 1405 } else { 1406 struct iov_iter i; 1407 1408 iov_iter_init(&i, iov, nr_segs, count, num_written); 1409 1410 num_written = __btrfs_buffered_write(file, &i, pos); 1411 if (num_written > 0) 1412 *ppos = pos + num_written; 1413 } 1414 1415 mutex_unlock(&inode->i_mutex); 1416 1417 /* 1418 * we want to make sure fsync finds this change 1419 * but we haven't joined a transaction running right now. 1420 * 1421 * Later on, someone is sure to update the inode and get the 1422 * real transid recorded. 1423 * 1424 * We set last_trans now to the fs_info generation + 1, 1425 * this will either be one more than the running transaction 1426 * or the generation used for the next transaction if there isn't 1427 * one running right now. 1428 */ 1429 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 1430 if (num_written > 0 || num_written == -EIOCBQUEUED) { 1431 err = generic_write_sync(file, pos, num_written); 1432 if (err < 0 && num_written > 0) 1433 num_written = err; 1434 } 1435 out: 1436 current->backing_dev_info = NULL; 1437 return num_written ? num_written : err; 1438 } 1439 1440 int btrfs_release_file(struct inode *inode, struct file *filp) 1441 { 1442 /* 1443 * ordered_data_close is set by settattr when we are about to truncate 1444 * a file from a non-zero size to a zero size. This tries to 1445 * flush down new bytes that may have been written if the 1446 * application were using truncate to replace a file in place. 1447 */ 1448 if (BTRFS_I(inode)->ordered_data_close) { 1449 BTRFS_I(inode)->ordered_data_close = 0; 1450 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); 1451 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1452 filemap_flush(inode->i_mapping); 1453 } 1454 if (filp->private_data) 1455 btrfs_ioctl_trans_end(filp); 1456 return 0; 1457 } 1458 1459 /* 1460 * fsync call for both files and directories. This logs the inode into 1461 * the tree log instead of forcing full commits whenever possible. 1462 * 1463 * It needs to call filemap_fdatawait so that all ordered extent updates are 1464 * in the metadata btree are up to date for copying to the log. 1465 * 1466 * It drops the inode mutex before doing the tree log commit. This is an 1467 * important optimization for directories because holding the mutex prevents 1468 * new operations on the dir while we write to disk. 1469 */ 1470 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 1471 { 1472 struct dentry *dentry = file->f_path.dentry; 1473 struct inode *inode = dentry->d_inode; 1474 struct btrfs_root *root = BTRFS_I(inode)->root; 1475 int ret = 0; 1476 struct btrfs_trans_handle *trans; 1477 1478 trace_btrfs_sync_file(file, datasync); 1479 1480 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1481 if (ret) 1482 return ret; 1483 mutex_lock(&inode->i_mutex); 1484 1485 /* we wait first, since the writeback may change the inode */ 1486 root->log_batch++; 1487 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1488 root->log_batch++; 1489 1490 /* 1491 * check the transaction that last modified this inode 1492 * and see if its already been committed 1493 */ 1494 if (!BTRFS_I(inode)->last_trans) { 1495 mutex_unlock(&inode->i_mutex); 1496 goto out; 1497 } 1498 1499 /* 1500 * if the last transaction that changed this file was before 1501 * the current transaction, we can bail out now without any 1502 * syncing 1503 */ 1504 smp_mb(); 1505 if (BTRFS_I(inode)->last_trans <= 1506 root->fs_info->last_trans_committed) { 1507 BTRFS_I(inode)->last_trans = 0; 1508 mutex_unlock(&inode->i_mutex); 1509 goto out; 1510 } 1511 1512 /* 1513 * ok we haven't committed the transaction yet, lets do a commit 1514 */ 1515 if (file->private_data) 1516 btrfs_ioctl_trans_end(file); 1517 1518 trans = btrfs_start_transaction(root, 0); 1519 if (IS_ERR(trans)) { 1520 ret = PTR_ERR(trans); 1521 mutex_unlock(&inode->i_mutex); 1522 goto out; 1523 } 1524 1525 ret = btrfs_log_dentry_safe(trans, root, dentry); 1526 if (ret < 0) { 1527 mutex_unlock(&inode->i_mutex); 1528 goto out; 1529 } 1530 1531 /* we've logged all the items and now have a consistent 1532 * version of the file in the log. It is possible that 1533 * someone will come in and modify the file, but that's 1534 * fine because the log is consistent on disk, and we 1535 * have references to all of the file's extents 1536 * 1537 * It is possible that someone will come in and log the 1538 * file again, but that will end up using the synchronization 1539 * inside btrfs_sync_log to keep things safe. 1540 */ 1541 mutex_unlock(&inode->i_mutex); 1542 1543 if (ret != BTRFS_NO_LOG_SYNC) { 1544 if (ret > 0) { 1545 ret = btrfs_commit_transaction(trans, root); 1546 } else { 1547 ret = btrfs_sync_log(trans, root); 1548 if (ret == 0) 1549 ret = btrfs_end_transaction(trans, root); 1550 else 1551 ret = btrfs_commit_transaction(trans, root); 1552 } 1553 } else { 1554 ret = btrfs_end_transaction(trans, root); 1555 } 1556 out: 1557 return ret > 0 ? -EIO : ret; 1558 } 1559 1560 static const struct vm_operations_struct btrfs_file_vm_ops = { 1561 .fault = filemap_fault, 1562 .page_mkwrite = btrfs_page_mkwrite, 1563 }; 1564 1565 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 1566 { 1567 struct address_space *mapping = filp->f_mapping; 1568 1569 if (!mapping->a_ops->readpage) 1570 return -ENOEXEC; 1571 1572 file_accessed(filp); 1573 vma->vm_ops = &btrfs_file_vm_ops; 1574 vma->vm_flags |= VM_CAN_NONLINEAR; 1575 1576 return 0; 1577 } 1578 1579 static long btrfs_fallocate(struct file *file, int mode, 1580 loff_t offset, loff_t len) 1581 { 1582 struct inode *inode = file->f_path.dentry->d_inode; 1583 struct extent_state *cached_state = NULL; 1584 u64 cur_offset; 1585 u64 last_byte; 1586 u64 alloc_start; 1587 u64 alloc_end; 1588 u64 alloc_hint = 0; 1589 u64 locked_end; 1590 u64 mask = BTRFS_I(inode)->root->sectorsize - 1; 1591 struct extent_map *em; 1592 int ret; 1593 1594 alloc_start = offset & ~mask; 1595 alloc_end = (offset + len + mask) & ~mask; 1596 1597 /* We only support the FALLOC_FL_KEEP_SIZE mode */ 1598 if (mode & ~FALLOC_FL_KEEP_SIZE) 1599 return -EOPNOTSUPP; 1600 1601 /* 1602 * wait for ordered IO before we have any locks. We'll loop again 1603 * below with the locks held. 1604 */ 1605 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); 1606 1607 mutex_lock(&inode->i_mutex); 1608 ret = inode_newsize_ok(inode, alloc_end); 1609 if (ret) 1610 goto out; 1611 1612 if (alloc_start > inode->i_size) { 1613 ret = btrfs_cont_expand(inode, i_size_read(inode), 1614 alloc_start); 1615 if (ret) 1616 goto out; 1617 } 1618 1619 locked_end = alloc_end - 1; 1620 while (1) { 1621 struct btrfs_ordered_extent *ordered; 1622 1623 /* the extent lock is ordered inside the running 1624 * transaction 1625 */ 1626 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 1627 locked_end, 0, &cached_state, GFP_NOFS); 1628 ordered = btrfs_lookup_first_ordered_extent(inode, 1629 alloc_end - 1); 1630 if (ordered && 1631 ordered->file_offset + ordered->len > alloc_start && 1632 ordered->file_offset < alloc_end) { 1633 btrfs_put_ordered_extent(ordered); 1634 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1635 alloc_start, locked_end, 1636 &cached_state, GFP_NOFS); 1637 /* 1638 * we can't wait on the range with the transaction 1639 * running or with the extent lock held 1640 */ 1641 btrfs_wait_ordered_range(inode, alloc_start, 1642 alloc_end - alloc_start); 1643 } else { 1644 if (ordered) 1645 btrfs_put_ordered_extent(ordered); 1646 break; 1647 } 1648 } 1649 1650 cur_offset = alloc_start; 1651 while (1) { 1652 u64 actual_end; 1653 1654 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 1655 alloc_end - cur_offset, 0); 1656 BUG_ON(IS_ERR_OR_NULL(em)); 1657 last_byte = min(extent_map_end(em), alloc_end); 1658 actual_end = min_t(u64, extent_map_end(em), offset + len); 1659 last_byte = (last_byte + mask) & ~mask; 1660 1661 if (em->block_start == EXTENT_MAP_HOLE || 1662 (cur_offset >= inode->i_size && 1663 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 1664 1665 /* 1666 * Make sure we have enough space before we do the 1667 * allocation. 1668 */ 1669 ret = btrfs_check_data_free_space(inode, last_byte - 1670 cur_offset); 1671 if (ret) { 1672 free_extent_map(em); 1673 break; 1674 } 1675 1676 ret = btrfs_prealloc_file_range(inode, mode, cur_offset, 1677 last_byte - cur_offset, 1678 1 << inode->i_blkbits, 1679 offset + len, 1680 &alloc_hint); 1681 1682 /* Let go of our reservation. */ 1683 btrfs_free_reserved_data_space(inode, last_byte - 1684 cur_offset); 1685 if (ret < 0) { 1686 free_extent_map(em); 1687 break; 1688 } 1689 } else if (actual_end > inode->i_size && 1690 !(mode & FALLOC_FL_KEEP_SIZE)) { 1691 /* 1692 * We didn't need to allocate any more space, but we 1693 * still extended the size of the file so we need to 1694 * update i_size. 1695 */ 1696 inode->i_ctime = CURRENT_TIME; 1697 i_size_write(inode, actual_end); 1698 btrfs_ordered_update_i_size(inode, actual_end, NULL); 1699 } 1700 free_extent_map(em); 1701 1702 cur_offset = last_byte; 1703 if (cur_offset >= alloc_end) { 1704 ret = 0; 1705 break; 1706 } 1707 } 1708 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 1709 &cached_state, GFP_NOFS); 1710 out: 1711 mutex_unlock(&inode->i_mutex); 1712 return ret; 1713 } 1714 1715 static int find_desired_extent(struct inode *inode, loff_t *offset, int origin) 1716 { 1717 struct btrfs_root *root = BTRFS_I(inode)->root; 1718 struct extent_map *em; 1719 struct extent_state *cached_state = NULL; 1720 u64 lockstart = *offset; 1721 u64 lockend = i_size_read(inode); 1722 u64 start = *offset; 1723 u64 orig_start = *offset; 1724 u64 len = i_size_read(inode); 1725 u64 last_end = 0; 1726 int ret = 0; 1727 1728 lockend = max_t(u64, root->sectorsize, lockend); 1729 if (lockend <= lockstart) 1730 lockend = lockstart + root->sectorsize; 1731 1732 len = lockend - lockstart + 1; 1733 1734 len = max_t(u64, len, root->sectorsize); 1735 if (inode->i_size == 0) 1736 return -ENXIO; 1737 1738 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, 1739 &cached_state, GFP_NOFS); 1740 1741 /* 1742 * Delalloc is such a pain. If we have a hole and we have pending 1743 * delalloc for a portion of the hole we will get back a hole that 1744 * exists for the entire range since it hasn't been actually written 1745 * yet. So to take care of this case we need to look for an extent just 1746 * before the position we want in case there is outstanding delalloc 1747 * going on here. 1748 */ 1749 if (origin == SEEK_HOLE && start != 0) { 1750 if (start <= root->sectorsize) 1751 em = btrfs_get_extent_fiemap(inode, NULL, 0, 0, 1752 root->sectorsize, 0); 1753 else 1754 em = btrfs_get_extent_fiemap(inode, NULL, 0, 1755 start - root->sectorsize, 1756 root->sectorsize, 0); 1757 if (IS_ERR(em)) { 1758 ret = -ENXIO; 1759 goto out; 1760 } 1761 last_end = em->start + em->len; 1762 if (em->block_start == EXTENT_MAP_DELALLOC) 1763 last_end = min_t(u64, last_end, inode->i_size); 1764 free_extent_map(em); 1765 } 1766 1767 while (1) { 1768 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); 1769 if (IS_ERR(em)) { 1770 ret = -ENXIO; 1771 break; 1772 } 1773 1774 if (em->block_start == EXTENT_MAP_HOLE) { 1775 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { 1776 if (last_end <= orig_start) { 1777 free_extent_map(em); 1778 ret = -ENXIO; 1779 break; 1780 } 1781 } 1782 1783 if (origin == SEEK_HOLE) { 1784 *offset = start; 1785 free_extent_map(em); 1786 break; 1787 } 1788 } else { 1789 if (origin == SEEK_DATA) { 1790 if (em->block_start == EXTENT_MAP_DELALLOC) { 1791 if (start >= inode->i_size) { 1792 free_extent_map(em); 1793 ret = -ENXIO; 1794 break; 1795 } 1796 } 1797 1798 *offset = start; 1799 free_extent_map(em); 1800 break; 1801 } 1802 } 1803 1804 start = em->start + em->len; 1805 last_end = em->start + em->len; 1806 1807 if (em->block_start == EXTENT_MAP_DELALLOC) 1808 last_end = min_t(u64, last_end, inode->i_size); 1809 1810 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { 1811 free_extent_map(em); 1812 ret = -ENXIO; 1813 break; 1814 } 1815 free_extent_map(em); 1816 cond_resched(); 1817 } 1818 if (!ret) 1819 *offset = min(*offset, inode->i_size); 1820 out: 1821 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 1822 &cached_state, GFP_NOFS); 1823 return ret; 1824 } 1825 1826 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin) 1827 { 1828 struct inode *inode = file->f_mapping->host; 1829 int ret; 1830 1831 mutex_lock(&inode->i_mutex); 1832 switch (origin) { 1833 case SEEK_END: 1834 case SEEK_CUR: 1835 offset = generic_file_llseek(file, offset, origin); 1836 goto out; 1837 case SEEK_DATA: 1838 case SEEK_HOLE: 1839 if (offset >= i_size_read(inode)) { 1840 mutex_unlock(&inode->i_mutex); 1841 return -ENXIO; 1842 } 1843 1844 ret = find_desired_extent(inode, &offset, origin); 1845 if (ret) { 1846 mutex_unlock(&inode->i_mutex); 1847 return ret; 1848 } 1849 } 1850 1851 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) { 1852 offset = -EINVAL; 1853 goto out; 1854 } 1855 if (offset > inode->i_sb->s_maxbytes) { 1856 offset = -EINVAL; 1857 goto out; 1858 } 1859 1860 /* Special lock needed here? */ 1861 if (offset != file->f_pos) { 1862 file->f_pos = offset; 1863 file->f_version = 0; 1864 } 1865 out: 1866 mutex_unlock(&inode->i_mutex); 1867 return offset; 1868 } 1869 1870 const struct file_operations btrfs_file_operations = { 1871 .llseek = btrfs_file_llseek, 1872 .read = do_sync_read, 1873 .write = do_sync_write, 1874 .aio_read = generic_file_aio_read, 1875 .splice_read = generic_file_splice_read, 1876 .aio_write = btrfs_file_aio_write, 1877 .mmap = btrfs_file_mmap, 1878 .open = generic_file_open, 1879 .release = btrfs_release_file, 1880 .fsync = btrfs_sync_file, 1881 .fallocate = btrfs_fallocate, 1882 .unlocked_ioctl = btrfs_ioctl, 1883 #ifdef CONFIG_COMPAT 1884 .compat_ioctl = btrfs_ioctl, 1885 #endif 1886 }; 1887