1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/pagemap.h> 21 #include <linux/highmem.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/backing-dev.h> 26 #include <linux/mpage.h> 27 #include <linux/falloc.h> 28 #include <linux/swap.h> 29 #include <linux/writeback.h> 30 #include <linux/statfs.h> 31 #include <linux/compat.h> 32 #include <linux/slab.h> 33 #include "ctree.h" 34 #include "disk-io.h" 35 #include "transaction.h" 36 #include "btrfs_inode.h" 37 #include "ioctl.h" 38 #include "print-tree.h" 39 #include "tree-log.h" 40 #include "locking.h" 41 #include "compat.h" 42 43 44 /* simple helper to fault in pages and copy. This should go away 45 * and be replaced with calls into generic code. 46 */ 47 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 48 int write_bytes, 49 struct page **prepared_pages, 50 struct iov_iter *i) 51 { 52 size_t copied = 0; 53 int pg = 0; 54 int offset = pos & (PAGE_CACHE_SIZE - 1); 55 int total_copied = 0; 56 57 while (write_bytes > 0) { 58 size_t count = min_t(size_t, 59 PAGE_CACHE_SIZE - offset, write_bytes); 60 struct page *page = prepared_pages[pg]; 61 /* 62 * Copy data from userspace to the current page 63 * 64 * Disable pagefault to avoid recursive lock since 65 * the pages are already locked 66 */ 67 pagefault_disable(); 68 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 69 pagefault_enable(); 70 71 /* Flush processor's dcache for this page */ 72 flush_dcache_page(page); 73 iov_iter_advance(i, copied); 74 write_bytes -= copied; 75 total_copied += copied; 76 77 /* Return to btrfs_file_aio_write to fault page */ 78 if (unlikely(copied == 0)) { 79 break; 80 } 81 82 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { 83 offset += copied; 84 } else { 85 pg++; 86 offset = 0; 87 } 88 } 89 return total_copied; 90 } 91 92 /* 93 * unlocks pages after btrfs_file_write is done with them 94 */ 95 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) 96 { 97 size_t i; 98 for (i = 0; i < num_pages; i++) { 99 if (!pages[i]) 100 break; 101 /* page checked is some magic around finding pages that 102 * have been modified without going through btrfs_set_page_dirty 103 * clear it here 104 */ 105 ClearPageChecked(pages[i]); 106 unlock_page(pages[i]); 107 mark_page_accessed(pages[i]); 108 page_cache_release(pages[i]); 109 } 110 } 111 112 /* 113 * after copy_from_user, pages need to be dirtied and we need to make 114 * sure holes are created between the current EOF and the start of 115 * any next extents (if required). 116 * 117 * this also makes the decision about creating an inline extent vs 118 * doing real data extents, marking pages dirty and delalloc as required. 119 */ 120 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, 121 struct btrfs_root *root, 122 struct file *file, 123 struct page **pages, 124 size_t num_pages, 125 loff_t pos, 126 size_t write_bytes) 127 { 128 int err = 0; 129 int i; 130 struct inode *inode = fdentry(file)->d_inode; 131 u64 num_bytes; 132 u64 start_pos; 133 u64 end_of_last_block; 134 u64 end_pos = pos + write_bytes; 135 loff_t isize = i_size_read(inode); 136 137 start_pos = pos & ~((u64)root->sectorsize - 1); 138 num_bytes = (write_bytes + pos - start_pos + 139 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 140 141 end_of_last_block = start_pos + num_bytes - 1; 142 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 143 NULL); 144 BUG_ON(err); 145 146 for (i = 0; i < num_pages; i++) { 147 struct page *p = pages[i]; 148 SetPageUptodate(p); 149 ClearPageChecked(p); 150 set_page_dirty(p); 151 } 152 if (end_pos > isize) { 153 i_size_write(inode, end_pos); 154 /* we've only changed i_size in ram, and we haven't updated 155 * the disk i_size. There is no need to log the inode 156 * at this time. 157 */ 158 } 159 return 0; 160 } 161 162 /* 163 * this drops all the extents in the cache that intersect the range 164 * [start, end]. Existing extents are split as required. 165 */ 166 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 167 int skip_pinned) 168 { 169 struct extent_map *em; 170 struct extent_map *split = NULL; 171 struct extent_map *split2 = NULL; 172 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 173 u64 len = end - start + 1; 174 int ret; 175 int testend = 1; 176 unsigned long flags; 177 int compressed = 0; 178 179 WARN_ON(end < start); 180 if (end == (u64)-1) { 181 len = (u64)-1; 182 testend = 0; 183 } 184 while (1) { 185 if (!split) 186 split = alloc_extent_map(GFP_NOFS); 187 if (!split2) 188 split2 = alloc_extent_map(GFP_NOFS); 189 190 write_lock(&em_tree->lock); 191 em = lookup_extent_mapping(em_tree, start, len); 192 if (!em) { 193 write_unlock(&em_tree->lock); 194 break; 195 } 196 flags = em->flags; 197 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 198 if (testend && em->start + em->len >= start + len) { 199 free_extent_map(em); 200 write_unlock(&em_tree->lock); 201 break; 202 } 203 start = em->start + em->len; 204 if (testend) 205 len = start + len - (em->start + em->len); 206 free_extent_map(em); 207 write_unlock(&em_tree->lock); 208 continue; 209 } 210 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 211 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 212 remove_extent_mapping(em_tree, em); 213 214 if (em->block_start < EXTENT_MAP_LAST_BYTE && 215 em->start < start) { 216 split->start = em->start; 217 split->len = start - em->start; 218 split->orig_start = em->orig_start; 219 split->block_start = em->block_start; 220 221 if (compressed) 222 split->block_len = em->block_len; 223 else 224 split->block_len = split->len; 225 226 split->bdev = em->bdev; 227 split->flags = flags; 228 split->compress_type = em->compress_type; 229 ret = add_extent_mapping(em_tree, split); 230 BUG_ON(ret); 231 free_extent_map(split); 232 split = split2; 233 split2 = NULL; 234 } 235 if (em->block_start < EXTENT_MAP_LAST_BYTE && 236 testend && em->start + em->len > start + len) { 237 u64 diff = start + len - em->start; 238 239 split->start = start + len; 240 split->len = em->start + em->len - (start + len); 241 split->bdev = em->bdev; 242 split->flags = flags; 243 split->compress_type = em->compress_type; 244 245 if (compressed) { 246 split->block_len = em->block_len; 247 split->block_start = em->block_start; 248 split->orig_start = em->orig_start; 249 } else { 250 split->block_len = split->len; 251 split->block_start = em->block_start + diff; 252 split->orig_start = split->start; 253 } 254 255 ret = add_extent_mapping(em_tree, split); 256 BUG_ON(ret); 257 free_extent_map(split); 258 split = NULL; 259 } 260 write_unlock(&em_tree->lock); 261 262 /* once for us */ 263 free_extent_map(em); 264 /* once for the tree*/ 265 free_extent_map(em); 266 } 267 if (split) 268 free_extent_map(split); 269 if (split2) 270 free_extent_map(split2); 271 return 0; 272 } 273 274 /* 275 * this is very complex, but the basic idea is to drop all extents 276 * in the range start - end. hint_block is filled in with a block number 277 * that would be a good hint to the block allocator for this file. 278 * 279 * If an extent intersects the range but is not entirely inside the range 280 * it is either truncated or split. Anything entirely inside the range 281 * is deleted from the tree. 282 */ 283 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, 284 u64 start, u64 end, u64 *hint_byte, int drop_cache) 285 { 286 struct btrfs_root *root = BTRFS_I(inode)->root; 287 struct extent_buffer *leaf; 288 struct btrfs_file_extent_item *fi; 289 struct btrfs_path *path; 290 struct btrfs_key key; 291 struct btrfs_key new_key; 292 u64 search_start = start; 293 u64 disk_bytenr = 0; 294 u64 num_bytes = 0; 295 u64 extent_offset = 0; 296 u64 extent_end = 0; 297 int del_nr = 0; 298 int del_slot = 0; 299 int extent_type; 300 int recow; 301 int ret; 302 303 if (drop_cache) 304 btrfs_drop_extent_cache(inode, start, end - 1, 0); 305 306 path = btrfs_alloc_path(); 307 if (!path) 308 return -ENOMEM; 309 310 while (1) { 311 recow = 0; 312 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 313 search_start, -1); 314 if (ret < 0) 315 break; 316 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 317 leaf = path->nodes[0]; 318 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 319 if (key.objectid == inode->i_ino && 320 key.type == BTRFS_EXTENT_DATA_KEY) 321 path->slots[0]--; 322 } 323 ret = 0; 324 next_slot: 325 leaf = path->nodes[0]; 326 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 327 BUG_ON(del_nr > 0); 328 ret = btrfs_next_leaf(root, path); 329 if (ret < 0) 330 break; 331 if (ret > 0) { 332 ret = 0; 333 break; 334 } 335 leaf = path->nodes[0]; 336 recow = 1; 337 } 338 339 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 340 if (key.objectid > inode->i_ino || 341 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 342 break; 343 344 fi = btrfs_item_ptr(leaf, path->slots[0], 345 struct btrfs_file_extent_item); 346 extent_type = btrfs_file_extent_type(leaf, fi); 347 348 if (extent_type == BTRFS_FILE_EXTENT_REG || 349 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 350 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 351 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 352 extent_offset = btrfs_file_extent_offset(leaf, fi); 353 extent_end = key.offset + 354 btrfs_file_extent_num_bytes(leaf, fi); 355 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 356 extent_end = key.offset + 357 btrfs_file_extent_inline_len(leaf, fi); 358 } else { 359 WARN_ON(1); 360 extent_end = search_start; 361 } 362 363 if (extent_end <= search_start) { 364 path->slots[0]++; 365 goto next_slot; 366 } 367 368 search_start = max(key.offset, start); 369 if (recow) { 370 btrfs_release_path(root, path); 371 continue; 372 } 373 374 /* 375 * | - range to drop - | 376 * | -------- extent -------- | 377 */ 378 if (start > key.offset && end < extent_end) { 379 BUG_ON(del_nr > 0); 380 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 381 382 memcpy(&new_key, &key, sizeof(new_key)); 383 new_key.offset = start; 384 ret = btrfs_duplicate_item(trans, root, path, 385 &new_key); 386 if (ret == -EAGAIN) { 387 btrfs_release_path(root, path); 388 continue; 389 } 390 if (ret < 0) 391 break; 392 393 leaf = path->nodes[0]; 394 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 395 struct btrfs_file_extent_item); 396 btrfs_set_file_extent_num_bytes(leaf, fi, 397 start - key.offset); 398 399 fi = btrfs_item_ptr(leaf, path->slots[0], 400 struct btrfs_file_extent_item); 401 402 extent_offset += start - key.offset; 403 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 404 btrfs_set_file_extent_num_bytes(leaf, fi, 405 extent_end - start); 406 btrfs_mark_buffer_dirty(leaf); 407 408 if (disk_bytenr > 0) { 409 ret = btrfs_inc_extent_ref(trans, root, 410 disk_bytenr, num_bytes, 0, 411 root->root_key.objectid, 412 new_key.objectid, 413 start - extent_offset); 414 BUG_ON(ret); 415 *hint_byte = disk_bytenr; 416 } 417 key.offset = start; 418 } 419 /* 420 * | ---- range to drop ----- | 421 * | -------- extent -------- | 422 */ 423 if (start <= key.offset && end < extent_end) { 424 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 425 426 memcpy(&new_key, &key, sizeof(new_key)); 427 new_key.offset = end; 428 btrfs_set_item_key_safe(trans, root, path, &new_key); 429 430 extent_offset += end - key.offset; 431 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 432 btrfs_set_file_extent_num_bytes(leaf, fi, 433 extent_end - end); 434 btrfs_mark_buffer_dirty(leaf); 435 if (disk_bytenr > 0) { 436 inode_sub_bytes(inode, end - key.offset); 437 *hint_byte = disk_bytenr; 438 } 439 break; 440 } 441 442 search_start = extent_end; 443 /* 444 * | ---- range to drop ----- | 445 * | -------- extent -------- | 446 */ 447 if (start > key.offset && end >= extent_end) { 448 BUG_ON(del_nr > 0); 449 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 450 451 btrfs_set_file_extent_num_bytes(leaf, fi, 452 start - key.offset); 453 btrfs_mark_buffer_dirty(leaf); 454 if (disk_bytenr > 0) { 455 inode_sub_bytes(inode, extent_end - start); 456 *hint_byte = disk_bytenr; 457 } 458 if (end == extent_end) 459 break; 460 461 path->slots[0]++; 462 goto next_slot; 463 } 464 465 /* 466 * | ---- range to drop ----- | 467 * | ------ extent ------ | 468 */ 469 if (start <= key.offset && end >= extent_end) { 470 if (del_nr == 0) { 471 del_slot = path->slots[0]; 472 del_nr = 1; 473 } else { 474 BUG_ON(del_slot + del_nr != path->slots[0]); 475 del_nr++; 476 } 477 478 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 479 inode_sub_bytes(inode, 480 extent_end - key.offset); 481 extent_end = ALIGN(extent_end, 482 root->sectorsize); 483 } else if (disk_bytenr > 0) { 484 ret = btrfs_free_extent(trans, root, 485 disk_bytenr, num_bytes, 0, 486 root->root_key.objectid, 487 key.objectid, key.offset - 488 extent_offset); 489 BUG_ON(ret); 490 inode_sub_bytes(inode, 491 extent_end - key.offset); 492 *hint_byte = disk_bytenr; 493 } 494 495 if (end == extent_end) 496 break; 497 498 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 499 path->slots[0]++; 500 goto next_slot; 501 } 502 503 ret = btrfs_del_items(trans, root, path, del_slot, 504 del_nr); 505 BUG_ON(ret); 506 507 del_nr = 0; 508 del_slot = 0; 509 510 btrfs_release_path(root, path); 511 continue; 512 } 513 514 BUG_ON(1); 515 } 516 517 if (del_nr > 0) { 518 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 519 BUG_ON(ret); 520 } 521 522 btrfs_free_path(path); 523 return ret; 524 } 525 526 static int extent_mergeable(struct extent_buffer *leaf, int slot, 527 u64 objectid, u64 bytenr, u64 orig_offset, 528 u64 *start, u64 *end) 529 { 530 struct btrfs_file_extent_item *fi; 531 struct btrfs_key key; 532 u64 extent_end; 533 534 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 535 return 0; 536 537 btrfs_item_key_to_cpu(leaf, &key, slot); 538 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 539 return 0; 540 541 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 542 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 543 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 544 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 545 btrfs_file_extent_compression(leaf, fi) || 546 btrfs_file_extent_encryption(leaf, fi) || 547 btrfs_file_extent_other_encoding(leaf, fi)) 548 return 0; 549 550 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 551 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 552 return 0; 553 554 *start = key.offset; 555 *end = extent_end; 556 return 1; 557 } 558 559 /* 560 * Mark extent in the range start - end as written. 561 * 562 * This changes extent type from 'pre-allocated' to 'regular'. If only 563 * part of extent is marked as written, the extent will be split into 564 * two or three. 565 */ 566 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 567 struct inode *inode, u64 start, u64 end) 568 { 569 struct btrfs_root *root = BTRFS_I(inode)->root; 570 struct extent_buffer *leaf; 571 struct btrfs_path *path; 572 struct btrfs_file_extent_item *fi; 573 struct btrfs_key key; 574 struct btrfs_key new_key; 575 u64 bytenr; 576 u64 num_bytes; 577 u64 extent_end; 578 u64 orig_offset; 579 u64 other_start; 580 u64 other_end; 581 u64 split; 582 int del_nr = 0; 583 int del_slot = 0; 584 int recow; 585 int ret; 586 587 btrfs_drop_extent_cache(inode, start, end - 1, 0); 588 589 path = btrfs_alloc_path(); 590 BUG_ON(!path); 591 again: 592 recow = 0; 593 split = start; 594 key.objectid = inode->i_ino; 595 key.type = BTRFS_EXTENT_DATA_KEY; 596 key.offset = split; 597 598 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 599 if (ret > 0 && path->slots[0] > 0) 600 path->slots[0]--; 601 602 leaf = path->nodes[0]; 603 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 604 BUG_ON(key.objectid != inode->i_ino || 605 key.type != BTRFS_EXTENT_DATA_KEY); 606 fi = btrfs_item_ptr(leaf, path->slots[0], 607 struct btrfs_file_extent_item); 608 BUG_ON(btrfs_file_extent_type(leaf, fi) != 609 BTRFS_FILE_EXTENT_PREALLOC); 610 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 611 BUG_ON(key.offset > start || extent_end < end); 612 613 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 614 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 615 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 616 memcpy(&new_key, &key, sizeof(new_key)); 617 618 if (start == key.offset && end < extent_end) { 619 other_start = 0; 620 other_end = start; 621 if (extent_mergeable(leaf, path->slots[0] - 1, 622 inode->i_ino, bytenr, orig_offset, 623 &other_start, &other_end)) { 624 new_key.offset = end; 625 btrfs_set_item_key_safe(trans, root, path, &new_key); 626 fi = btrfs_item_ptr(leaf, path->slots[0], 627 struct btrfs_file_extent_item); 628 btrfs_set_file_extent_num_bytes(leaf, fi, 629 extent_end - end); 630 btrfs_set_file_extent_offset(leaf, fi, 631 end - orig_offset); 632 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 633 struct btrfs_file_extent_item); 634 btrfs_set_file_extent_num_bytes(leaf, fi, 635 end - other_start); 636 btrfs_mark_buffer_dirty(leaf); 637 goto out; 638 } 639 } 640 641 if (start > key.offset && end == extent_end) { 642 other_start = end; 643 other_end = 0; 644 if (extent_mergeable(leaf, path->slots[0] + 1, 645 inode->i_ino, bytenr, orig_offset, 646 &other_start, &other_end)) { 647 fi = btrfs_item_ptr(leaf, path->slots[0], 648 struct btrfs_file_extent_item); 649 btrfs_set_file_extent_num_bytes(leaf, fi, 650 start - key.offset); 651 path->slots[0]++; 652 new_key.offset = start; 653 btrfs_set_item_key_safe(trans, root, path, &new_key); 654 655 fi = btrfs_item_ptr(leaf, path->slots[0], 656 struct btrfs_file_extent_item); 657 btrfs_set_file_extent_num_bytes(leaf, fi, 658 other_end - start); 659 btrfs_set_file_extent_offset(leaf, fi, 660 start - orig_offset); 661 btrfs_mark_buffer_dirty(leaf); 662 goto out; 663 } 664 } 665 666 while (start > key.offset || end < extent_end) { 667 if (key.offset == start) 668 split = end; 669 670 new_key.offset = split; 671 ret = btrfs_duplicate_item(trans, root, path, &new_key); 672 if (ret == -EAGAIN) { 673 btrfs_release_path(root, path); 674 goto again; 675 } 676 BUG_ON(ret < 0); 677 678 leaf = path->nodes[0]; 679 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 680 struct btrfs_file_extent_item); 681 btrfs_set_file_extent_num_bytes(leaf, fi, 682 split - key.offset); 683 684 fi = btrfs_item_ptr(leaf, path->slots[0], 685 struct btrfs_file_extent_item); 686 687 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 688 btrfs_set_file_extent_num_bytes(leaf, fi, 689 extent_end - split); 690 btrfs_mark_buffer_dirty(leaf); 691 692 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 693 root->root_key.objectid, 694 inode->i_ino, orig_offset); 695 BUG_ON(ret); 696 697 if (split == start) { 698 key.offset = start; 699 } else { 700 BUG_ON(start != key.offset); 701 path->slots[0]--; 702 extent_end = end; 703 } 704 recow = 1; 705 } 706 707 other_start = end; 708 other_end = 0; 709 if (extent_mergeable(leaf, path->slots[0] + 1, 710 inode->i_ino, bytenr, orig_offset, 711 &other_start, &other_end)) { 712 if (recow) { 713 btrfs_release_path(root, path); 714 goto again; 715 } 716 extent_end = other_end; 717 del_slot = path->slots[0] + 1; 718 del_nr++; 719 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 720 0, root->root_key.objectid, 721 inode->i_ino, orig_offset); 722 BUG_ON(ret); 723 } 724 other_start = 0; 725 other_end = start; 726 if (extent_mergeable(leaf, path->slots[0] - 1, 727 inode->i_ino, bytenr, orig_offset, 728 &other_start, &other_end)) { 729 if (recow) { 730 btrfs_release_path(root, path); 731 goto again; 732 } 733 key.offset = other_start; 734 del_slot = path->slots[0]; 735 del_nr++; 736 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 737 0, root->root_key.objectid, 738 inode->i_ino, orig_offset); 739 BUG_ON(ret); 740 } 741 if (del_nr == 0) { 742 fi = btrfs_item_ptr(leaf, path->slots[0], 743 struct btrfs_file_extent_item); 744 btrfs_set_file_extent_type(leaf, fi, 745 BTRFS_FILE_EXTENT_REG); 746 btrfs_mark_buffer_dirty(leaf); 747 } else { 748 fi = btrfs_item_ptr(leaf, del_slot - 1, 749 struct btrfs_file_extent_item); 750 btrfs_set_file_extent_type(leaf, fi, 751 BTRFS_FILE_EXTENT_REG); 752 btrfs_set_file_extent_num_bytes(leaf, fi, 753 extent_end - key.offset); 754 btrfs_mark_buffer_dirty(leaf); 755 756 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 757 BUG_ON(ret); 758 } 759 out: 760 btrfs_free_path(path); 761 return 0; 762 } 763 764 /* 765 * this gets pages into the page cache and locks them down, it also properly 766 * waits for data=ordered extents to finish before allowing the pages to be 767 * modified. 768 */ 769 static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 770 struct page **pages, size_t num_pages, 771 loff_t pos, unsigned long first_index, 772 unsigned long last_index, size_t write_bytes) 773 { 774 struct extent_state *cached_state = NULL; 775 int i; 776 unsigned long index = pos >> PAGE_CACHE_SHIFT; 777 struct inode *inode = fdentry(file)->d_inode; 778 int err = 0; 779 u64 start_pos; 780 u64 last_pos; 781 782 start_pos = pos & ~((u64)root->sectorsize - 1); 783 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 784 785 if (start_pos > inode->i_size) { 786 err = btrfs_cont_expand(inode, start_pos); 787 if (err) 788 return err; 789 } 790 791 memset(pages, 0, num_pages * sizeof(struct page *)); 792 again: 793 for (i = 0; i < num_pages; i++) { 794 pages[i] = grab_cache_page(inode->i_mapping, index + i); 795 if (!pages[i]) { 796 err = -ENOMEM; 797 BUG_ON(1); 798 } 799 wait_on_page_writeback(pages[i]); 800 } 801 if (start_pos < inode->i_size) { 802 struct btrfs_ordered_extent *ordered; 803 lock_extent_bits(&BTRFS_I(inode)->io_tree, 804 start_pos, last_pos - 1, 0, &cached_state, 805 GFP_NOFS); 806 ordered = btrfs_lookup_first_ordered_extent(inode, 807 last_pos - 1); 808 if (ordered && 809 ordered->file_offset + ordered->len > start_pos && 810 ordered->file_offset < last_pos) { 811 btrfs_put_ordered_extent(ordered); 812 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 813 start_pos, last_pos - 1, 814 &cached_state, GFP_NOFS); 815 for (i = 0; i < num_pages; i++) { 816 unlock_page(pages[i]); 817 page_cache_release(pages[i]); 818 } 819 btrfs_wait_ordered_range(inode, start_pos, 820 last_pos - start_pos); 821 goto again; 822 } 823 if (ordered) 824 btrfs_put_ordered_extent(ordered); 825 826 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, 827 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 828 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, 829 GFP_NOFS); 830 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 831 start_pos, last_pos - 1, &cached_state, 832 GFP_NOFS); 833 } 834 for (i = 0; i < num_pages; i++) { 835 clear_page_dirty_for_io(pages[i]); 836 set_page_extent_mapped(pages[i]); 837 WARN_ON(!PageLocked(pages[i])); 838 } 839 return 0; 840 } 841 842 static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 843 const struct iovec *iov, 844 unsigned long nr_segs, loff_t pos) 845 { 846 struct file *file = iocb->ki_filp; 847 struct inode *inode = fdentry(file)->d_inode; 848 struct btrfs_root *root = BTRFS_I(inode)->root; 849 struct page *pinned[2]; 850 struct page **pages = NULL; 851 struct iov_iter i; 852 loff_t *ppos = &iocb->ki_pos; 853 loff_t start_pos; 854 ssize_t num_written = 0; 855 ssize_t err = 0; 856 size_t count; 857 size_t ocount; 858 int ret = 0; 859 int nrptrs; 860 unsigned long first_index; 861 unsigned long last_index; 862 int will_write; 863 int buffered = 0; 864 int copied = 0; 865 int dirty_pages = 0; 866 867 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 868 (file->f_flags & O_DIRECT)); 869 870 pinned[0] = NULL; 871 pinned[1] = NULL; 872 873 start_pos = pos; 874 875 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 876 877 mutex_lock(&inode->i_mutex); 878 879 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 880 if (err) 881 goto out; 882 count = ocount; 883 884 current->backing_dev_info = inode->i_mapping->backing_dev_info; 885 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 886 if (err) 887 goto out; 888 889 if (count == 0) 890 goto out; 891 892 err = file_remove_suid(file); 893 if (err) 894 goto out; 895 896 /* 897 * If BTRFS flips readonly due to some impossible error 898 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), 899 * although we have opened a file as writable, we have 900 * to stop this write operation to ensure FS consistency. 901 */ 902 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 903 err = -EROFS; 904 goto out; 905 } 906 907 file_update_time(file); 908 BTRFS_I(inode)->sequence++; 909 910 if (unlikely(file->f_flags & O_DIRECT)) { 911 num_written = generic_file_direct_write(iocb, iov, &nr_segs, 912 pos, ppos, count, 913 ocount); 914 /* 915 * the generic O_DIRECT will update in-memory i_size after the 916 * DIOs are done. But our endio handlers that update the on 917 * disk i_size never update past the in memory i_size. So we 918 * need one more update here to catch any additions to the 919 * file 920 */ 921 if (inode->i_size != BTRFS_I(inode)->disk_i_size) { 922 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 923 mark_inode_dirty(inode); 924 } 925 926 if (num_written < 0) { 927 ret = num_written; 928 num_written = 0; 929 goto out; 930 } else if (num_written == count) { 931 /* pick up pos changes done by the generic code */ 932 pos = *ppos; 933 goto out; 934 } 935 /* 936 * We are going to do buffered for the rest of the range, so we 937 * need to make sure to invalidate the buffered pages when we're 938 * done. 939 */ 940 buffered = 1; 941 pos += num_written; 942 } 943 944 iov_iter_init(&i, iov, nr_segs, count, num_written); 945 nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) / 946 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 947 (sizeof(struct page *))); 948 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 949 950 /* generic_write_checks can change our pos */ 951 start_pos = pos; 952 953 first_index = pos >> PAGE_CACHE_SHIFT; 954 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; 955 956 /* 957 * there are lots of better ways to do this, but this code 958 * makes sure the first and last page in the file range are 959 * up to date and ready for cow 960 */ 961 if ((pos & (PAGE_CACHE_SIZE - 1))) { 962 pinned[0] = grab_cache_page(inode->i_mapping, first_index); 963 if (!PageUptodate(pinned[0])) { 964 ret = btrfs_readpage(NULL, pinned[0]); 965 BUG_ON(ret); 966 wait_on_page_locked(pinned[0]); 967 } else { 968 unlock_page(pinned[0]); 969 } 970 } 971 if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) { 972 pinned[1] = grab_cache_page(inode->i_mapping, last_index); 973 if (!PageUptodate(pinned[1])) { 974 ret = btrfs_readpage(NULL, pinned[1]); 975 BUG_ON(ret); 976 wait_on_page_locked(pinned[1]); 977 } else { 978 unlock_page(pinned[1]); 979 } 980 } 981 982 while (iov_iter_count(&i) > 0) { 983 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 984 size_t write_bytes = min(iov_iter_count(&i), 985 nrptrs * (size_t)PAGE_CACHE_SIZE - 986 offset); 987 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> 988 PAGE_CACHE_SHIFT; 989 990 WARN_ON(num_pages > nrptrs); 991 memset(pages, 0, sizeof(struct page *) * nrptrs); 992 993 /* 994 * Fault pages before locking them in prepare_pages 995 * to avoid recursive lock 996 */ 997 if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) { 998 ret = -EFAULT; 999 goto out; 1000 } 1001 1002 ret = btrfs_delalloc_reserve_space(inode, 1003 num_pages << PAGE_CACHE_SHIFT); 1004 if (ret) 1005 goto out; 1006 1007 ret = prepare_pages(root, file, pages, num_pages, 1008 pos, first_index, last_index, 1009 write_bytes); 1010 if (ret) { 1011 btrfs_delalloc_release_space(inode, 1012 num_pages << PAGE_CACHE_SHIFT); 1013 goto out; 1014 } 1015 1016 copied = btrfs_copy_from_user(pos, num_pages, 1017 write_bytes, pages, &i); 1018 dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> 1019 PAGE_CACHE_SHIFT; 1020 1021 if (num_pages > dirty_pages) { 1022 if (copied > 0) 1023 atomic_inc( 1024 &BTRFS_I(inode)->outstanding_extents); 1025 btrfs_delalloc_release_space(inode, 1026 (num_pages - dirty_pages) << 1027 PAGE_CACHE_SHIFT); 1028 } 1029 1030 if (copied > 0) { 1031 dirty_and_release_pages(NULL, root, file, pages, 1032 dirty_pages, pos, copied); 1033 } 1034 1035 btrfs_drop_pages(pages, num_pages); 1036 1037 if (copied > 0) { 1038 if (will_write) { 1039 filemap_fdatawrite_range(inode->i_mapping, pos, 1040 pos + copied - 1); 1041 } else { 1042 balance_dirty_pages_ratelimited_nr( 1043 inode->i_mapping, 1044 dirty_pages); 1045 if (dirty_pages < 1046 (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1047 btrfs_btree_balance_dirty(root, 1); 1048 btrfs_throttle(root); 1049 } 1050 } 1051 1052 pos += copied; 1053 num_written += copied; 1054 1055 cond_resched(); 1056 } 1057 out: 1058 mutex_unlock(&inode->i_mutex); 1059 if (ret) 1060 err = ret; 1061 1062 kfree(pages); 1063 if (pinned[0]) 1064 page_cache_release(pinned[0]); 1065 if (pinned[1]) 1066 page_cache_release(pinned[1]); 1067 *ppos = pos; 1068 1069 /* 1070 * we want to make sure fsync finds this change 1071 * but we haven't joined a transaction running right now. 1072 * 1073 * Later on, someone is sure to update the inode and get the 1074 * real transid recorded. 1075 * 1076 * We set last_trans now to the fs_info generation + 1, 1077 * this will either be one more than the running transaction 1078 * or the generation used for the next transaction if there isn't 1079 * one running right now. 1080 */ 1081 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 1082 1083 if (num_written > 0 && will_write) { 1084 struct btrfs_trans_handle *trans; 1085 1086 err = btrfs_wait_ordered_range(inode, start_pos, num_written); 1087 if (err) 1088 num_written = err; 1089 1090 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { 1091 trans = btrfs_start_transaction(root, 0); 1092 if (IS_ERR(trans)) { 1093 num_written = PTR_ERR(trans); 1094 goto done; 1095 } 1096 mutex_lock(&inode->i_mutex); 1097 ret = btrfs_log_dentry_safe(trans, root, 1098 file->f_dentry); 1099 mutex_unlock(&inode->i_mutex); 1100 if (ret == 0) { 1101 ret = btrfs_sync_log(trans, root); 1102 if (ret == 0) 1103 btrfs_end_transaction(trans, root); 1104 else 1105 btrfs_commit_transaction(trans, root); 1106 } else if (ret != BTRFS_NO_LOG_SYNC) { 1107 btrfs_commit_transaction(trans, root); 1108 } else { 1109 btrfs_end_transaction(trans, root); 1110 } 1111 } 1112 if (file->f_flags & O_DIRECT && buffered) { 1113 invalidate_mapping_pages(inode->i_mapping, 1114 start_pos >> PAGE_CACHE_SHIFT, 1115 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1116 } 1117 } 1118 done: 1119 current->backing_dev_info = NULL; 1120 return num_written ? num_written : err; 1121 } 1122 1123 int btrfs_release_file(struct inode *inode, struct file *filp) 1124 { 1125 /* 1126 * ordered_data_close is set by settattr when we are about to truncate 1127 * a file from a non-zero size to a zero size. This tries to 1128 * flush down new bytes that may have been written if the 1129 * application were using truncate to replace a file in place. 1130 */ 1131 if (BTRFS_I(inode)->ordered_data_close) { 1132 BTRFS_I(inode)->ordered_data_close = 0; 1133 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); 1134 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1135 filemap_flush(inode->i_mapping); 1136 } 1137 if (filp->private_data) 1138 btrfs_ioctl_trans_end(filp); 1139 return 0; 1140 } 1141 1142 /* 1143 * fsync call for both files and directories. This logs the inode into 1144 * the tree log instead of forcing full commits whenever possible. 1145 * 1146 * It needs to call filemap_fdatawait so that all ordered extent updates are 1147 * in the metadata btree are up to date for copying to the log. 1148 * 1149 * It drops the inode mutex before doing the tree log commit. This is an 1150 * important optimization for directories because holding the mutex prevents 1151 * new operations on the dir while we write to disk. 1152 */ 1153 int btrfs_sync_file(struct file *file, int datasync) 1154 { 1155 struct dentry *dentry = file->f_path.dentry; 1156 struct inode *inode = dentry->d_inode; 1157 struct btrfs_root *root = BTRFS_I(inode)->root; 1158 int ret = 0; 1159 struct btrfs_trans_handle *trans; 1160 1161 1162 /* we wait first, since the writeback may change the inode */ 1163 root->log_batch++; 1164 /* the VFS called filemap_fdatawrite for us */ 1165 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1166 root->log_batch++; 1167 1168 /* 1169 * check the transaction that last modified this inode 1170 * and see if its already been committed 1171 */ 1172 if (!BTRFS_I(inode)->last_trans) 1173 goto out; 1174 1175 /* 1176 * if the last transaction that changed this file was before 1177 * the current transaction, we can bail out now without any 1178 * syncing 1179 */ 1180 mutex_lock(&root->fs_info->trans_mutex); 1181 if (BTRFS_I(inode)->last_trans <= 1182 root->fs_info->last_trans_committed) { 1183 BTRFS_I(inode)->last_trans = 0; 1184 mutex_unlock(&root->fs_info->trans_mutex); 1185 goto out; 1186 } 1187 mutex_unlock(&root->fs_info->trans_mutex); 1188 1189 /* 1190 * ok we haven't committed the transaction yet, lets do a commit 1191 */ 1192 if (file->private_data) 1193 btrfs_ioctl_trans_end(file); 1194 1195 trans = btrfs_start_transaction(root, 0); 1196 if (IS_ERR(trans)) { 1197 ret = PTR_ERR(trans); 1198 goto out; 1199 } 1200 1201 ret = btrfs_log_dentry_safe(trans, root, dentry); 1202 if (ret < 0) 1203 goto out; 1204 1205 /* we've logged all the items and now have a consistent 1206 * version of the file in the log. It is possible that 1207 * someone will come in and modify the file, but that's 1208 * fine because the log is consistent on disk, and we 1209 * have references to all of the file's extents 1210 * 1211 * It is possible that someone will come in and log the 1212 * file again, but that will end up using the synchronization 1213 * inside btrfs_sync_log to keep things safe. 1214 */ 1215 mutex_unlock(&dentry->d_inode->i_mutex); 1216 1217 if (ret != BTRFS_NO_LOG_SYNC) { 1218 if (ret > 0) { 1219 ret = btrfs_commit_transaction(trans, root); 1220 } else { 1221 ret = btrfs_sync_log(trans, root); 1222 if (ret == 0) 1223 ret = btrfs_end_transaction(trans, root); 1224 else 1225 ret = btrfs_commit_transaction(trans, root); 1226 } 1227 } else { 1228 ret = btrfs_end_transaction(trans, root); 1229 } 1230 mutex_lock(&dentry->d_inode->i_mutex); 1231 out: 1232 return ret > 0 ? -EIO : ret; 1233 } 1234 1235 static const struct vm_operations_struct btrfs_file_vm_ops = { 1236 .fault = filemap_fault, 1237 .page_mkwrite = btrfs_page_mkwrite, 1238 }; 1239 1240 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 1241 { 1242 struct address_space *mapping = filp->f_mapping; 1243 1244 if (!mapping->a_ops->readpage) 1245 return -ENOEXEC; 1246 1247 file_accessed(filp); 1248 vma->vm_ops = &btrfs_file_vm_ops; 1249 vma->vm_flags |= VM_CAN_NONLINEAR; 1250 1251 return 0; 1252 } 1253 1254 static long btrfs_fallocate(struct file *file, int mode, 1255 loff_t offset, loff_t len) 1256 { 1257 struct inode *inode = file->f_path.dentry->d_inode; 1258 struct extent_state *cached_state = NULL; 1259 u64 cur_offset; 1260 u64 last_byte; 1261 u64 alloc_start; 1262 u64 alloc_end; 1263 u64 alloc_hint = 0; 1264 u64 locked_end; 1265 u64 mask = BTRFS_I(inode)->root->sectorsize - 1; 1266 struct extent_map *em; 1267 int ret; 1268 1269 alloc_start = offset & ~mask; 1270 alloc_end = (offset + len + mask) & ~mask; 1271 1272 /* We only support the FALLOC_FL_KEEP_SIZE mode */ 1273 if (mode & ~FALLOC_FL_KEEP_SIZE) 1274 return -EOPNOTSUPP; 1275 1276 /* 1277 * wait for ordered IO before we have any locks. We'll loop again 1278 * below with the locks held. 1279 */ 1280 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); 1281 1282 mutex_lock(&inode->i_mutex); 1283 ret = inode_newsize_ok(inode, alloc_end); 1284 if (ret) 1285 goto out; 1286 1287 if (alloc_start > inode->i_size) { 1288 ret = btrfs_cont_expand(inode, alloc_start); 1289 if (ret) 1290 goto out; 1291 } 1292 1293 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); 1294 if (ret) 1295 goto out; 1296 1297 locked_end = alloc_end - 1; 1298 while (1) { 1299 struct btrfs_ordered_extent *ordered; 1300 1301 /* the extent lock is ordered inside the running 1302 * transaction 1303 */ 1304 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 1305 locked_end, 0, &cached_state, GFP_NOFS); 1306 ordered = btrfs_lookup_first_ordered_extent(inode, 1307 alloc_end - 1); 1308 if (ordered && 1309 ordered->file_offset + ordered->len > alloc_start && 1310 ordered->file_offset < alloc_end) { 1311 btrfs_put_ordered_extent(ordered); 1312 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1313 alloc_start, locked_end, 1314 &cached_state, GFP_NOFS); 1315 /* 1316 * we can't wait on the range with the transaction 1317 * running or with the extent lock held 1318 */ 1319 btrfs_wait_ordered_range(inode, alloc_start, 1320 alloc_end - alloc_start); 1321 } else { 1322 if (ordered) 1323 btrfs_put_ordered_extent(ordered); 1324 break; 1325 } 1326 } 1327 1328 cur_offset = alloc_start; 1329 while (1) { 1330 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 1331 alloc_end - cur_offset, 0); 1332 BUG_ON(IS_ERR(em) || !em); 1333 last_byte = min(extent_map_end(em), alloc_end); 1334 last_byte = (last_byte + mask) & ~mask; 1335 if (em->block_start == EXTENT_MAP_HOLE || 1336 (cur_offset >= inode->i_size && 1337 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 1338 ret = btrfs_prealloc_file_range(inode, mode, cur_offset, 1339 last_byte - cur_offset, 1340 1 << inode->i_blkbits, 1341 offset + len, 1342 &alloc_hint); 1343 if (ret < 0) { 1344 free_extent_map(em); 1345 break; 1346 } 1347 } 1348 free_extent_map(em); 1349 1350 cur_offset = last_byte; 1351 if (cur_offset >= alloc_end) { 1352 ret = 0; 1353 break; 1354 } 1355 } 1356 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 1357 &cached_state, GFP_NOFS); 1358 1359 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); 1360 out: 1361 mutex_unlock(&inode->i_mutex); 1362 return ret; 1363 } 1364 1365 const struct file_operations btrfs_file_operations = { 1366 .llseek = generic_file_llseek, 1367 .read = do_sync_read, 1368 .write = do_sync_write, 1369 .aio_read = generic_file_aio_read, 1370 .splice_read = generic_file_splice_read, 1371 .aio_write = btrfs_file_aio_write, 1372 .mmap = btrfs_file_mmap, 1373 .open = generic_file_open, 1374 .release = btrfs_release_file, 1375 .fsync = btrfs_sync_file, 1376 .fallocate = btrfs_fallocate, 1377 .unlocked_ioctl = btrfs_ioctl, 1378 #ifdef CONFIG_COMPAT 1379 .compat_ioctl = btrfs_ioctl, 1380 #endif 1381 }; 1382