1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/pagemap.h> 21 #include <linux/highmem.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/backing-dev.h> 26 #include <linux/mpage.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/statfs.h> 30 #include <linux/compat.h> 31 #include "ctree.h" 32 #include "disk-io.h" 33 #include "transaction.h" 34 #include "btrfs_inode.h" 35 #include "ioctl.h" 36 #include "print-tree.h" 37 #include "tree-log.h" 38 #include "locking.h" 39 #include "compat.h" 40 41 42 /* simple helper to fault in pages and copy. This should go away 43 * and be replaced with calls into generic code. 44 */ 45 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 46 int write_bytes, 47 struct page **prepared_pages, 48 const char __user *buf) 49 { 50 long page_fault = 0; 51 int i; 52 int offset = pos & (PAGE_CACHE_SIZE - 1); 53 54 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) { 55 size_t count = min_t(size_t, 56 PAGE_CACHE_SIZE - offset, write_bytes); 57 struct page *page = prepared_pages[i]; 58 fault_in_pages_readable(buf, count); 59 60 /* Copy data from userspace to the current page */ 61 kmap(page); 62 page_fault = __copy_from_user(page_address(page) + offset, 63 buf, count); 64 /* Flush processor's dcache for this page */ 65 flush_dcache_page(page); 66 kunmap(page); 67 buf += count; 68 write_bytes -= count; 69 70 if (page_fault) 71 break; 72 } 73 return page_fault ? -EFAULT : 0; 74 } 75 76 /* 77 * unlocks pages after btrfs_file_write is done with them 78 */ 79 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) 80 { 81 size_t i; 82 for (i = 0; i < num_pages; i++) { 83 if (!pages[i]) 84 break; 85 /* page checked is some magic around finding pages that 86 * have been modified without going through btrfs_set_page_dirty 87 * clear it here 88 */ 89 ClearPageChecked(pages[i]); 90 unlock_page(pages[i]); 91 mark_page_accessed(pages[i]); 92 page_cache_release(pages[i]); 93 } 94 } 95 96 /* 97 * after copy_from_user, pages need to be dirtied and we need to make 98 * sure holes are created between the current EOF and the start of 99 * any next extents (if required). 100 * 101 * this also makes the decision about creating an inline extent vs 102 * doing real data extents, marking pages dirty and delalloc as required. 103 */ 104 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, 105 struct btrfs_root *root, 106 struct file *file, 107 struct page **pages, 108 size_t num_pages, 109 loff_t pos, 110 size_t write_bytes) 111 { 112 int err = 0; 113 int i; 114 struct inode *inode = fdentry(file)->d_inode; 115 u64 num_bytes; 116 u64 start_pos; 117 u64 end_of_last_block; 118 u64 end_pos = pos + write_bytes; 119 loff_t isize = i_size_read(inode); 120 121 start_pos = pos & ~((u64)root->sectorsize - 1); 122 num_bytes = (write_bytes + pos - start_pos + 123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 124 125 end_of_last_block = start_pos + num_bytes - 1; 126 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); 127 if (err) 128 return err; 129 130 for (i = 0; i < num_pages; i++) { 131 struct page *p = pages[i]; 132 SetPageUptodate(p); 133 ClearPageChecked(p); 134 set_page_dirty(p); 135 } 136 if (end_pos > isize) { 137 i_size_write(inode, end_pos); 138 /* we've only changed i_size in ram, and we haven't updated 139 * the disk i_size. There is no need to log the inode 140 * at this time. 141 */ 142 } 143 return err; 144 } 145 146 /* 147 * this drops all the extents in the cache that intersect the range 148 * [start, end]. Existing extents are split as required. 149 */ 150 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 151 int skip_pinned) 152 { 153 struct extent_map *em; 154 struct extent_map *split = NULL; 155 struct extent_map *split2 = NULL; 156 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 157 u64 len = end - start + 1; 158 int ret; 159 int testend = 1; 160 unsigned long flags; 161 int compressed = 0; 162 163 WARN_ON(end < start); 164 if (end == (u64)-1) { 165 len = (u64)-1; 166 testend = 0; 167 } 168 while (1) { 169 if (!split) 170 split = alloc_extent_map(GFP_NOFS); 171 if (!split2) 172 split2 = alloc_extent_map(GFP_NOFS); 173 174 write_lock(&em_tree->lock); 175 em = lookup_extent_mapping(em_tree, start, len); 176 if (!em) { 177 write_unlock(&em_tree->lock); 178 break; 179 } 180 flags = em->flags; 181 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 182 if (testend && em->start + em->len >= start + len) { 183 free_extent_map(em); 184 write_unlock(&em_tree->lock); 185 break; 186 } 187 start = em->start + em->len; 188 if (testend) 189 len = start + len - (em->start + em->len); 190 free_extent_map(em); 191 write_unlock(&em_tree->lock); 192 continue; 193 } 194 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 195 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 196 remove_extent_mapping(em_tree, em); 197 198 if (em->block_start < EXTENT_MAP_LAST_BYTE && 199 em->start < start) { 200 split->start = em->start; 201 split->len = start - em->start; 202 split->orig_start = em->orig_start; 203 split->block_start = em->block_start; 204 205 if (compressed) 206 split->block_len = em->block_len; 207 else 208 split->block_len = split->len; 209 210 split->bdev = em->bdev; 211 split->flags = flags; 212 ret = add_extent_mapping(em_tree, split); 213 BUG_ON(ret); 214 free_extent_map(split); 215 split = split2; 216 split2 = NULL; 217 } 218 if (em->block_start < EXTENT_MAP_LAST_BYTE && 219 testend && em->start + em->len > start + len) { 220 u64 diff = start + len - em->start; 221 222 split->start = start + len; 223 split->len = em->start + em->len - (start + len); 224 split->bdev = em->bdev; 225 split->flags = flags; 226 227 if (compressed) { 228 split->block_len = em->block_len; 229 split->block_start = em->block_start; 230 split->orig_start = em->orig_start; 231 } else { 232 split->block_len = split->len; 233 split->block_start = em->block_start + diff; 234 split->orig_start = split->start; 235 } 236 237 ret = add_extent_mapping(em_tree, split); 238 BUG_ON(ret); 239 free_extent_map(split); 240 split = NULL; 241 } 242 write_unlock(&em_tree->lock); 243 244 /* once for us */ 245 free_extent_map(em); 246 /* once for the tree*/ 247 free_extent_map(em); 248 } 249 if (split) 250 free_extent_map(split); 251 if (split2) 252 free_extent_map(split2); 253 return 0; 254 } 255 256 /* 257 * this is very complex, but the basic idea is to drop all extents 258 * in the range start - end. hint_block is filled in with a block number 259 * that would be a good hint to the block allocator for this file. 260 * 261 * If an extent intersects the range but is not entirely inside the range 262 * it is either truncated or split. Anything entirely inside the range 263 * is deleted from the tree. 264 */ 265 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, 266 u64 start, u64 end, u64 *hint_byte, int drop_cache) 267 { 268 struct btrfs_root *root = BTRFS_I(inode)->root; 269 struct extent_buffer *leaf; 270 struct btrfs_file_extent_item *fi; 271 struct btrfs_path *path; 272 struct btrfs_key key; 273 struct btrfs_key new_key; 274 u64 search_start = start; 275 u64 disk_bytenr = 0; 276 u64 num_bytes = 0; 277 u64 extent_offset = 0; 278 u64 extent_end = 0; 279 int del_nr = 0; 280 int del_slot = 0; 281 int extent_type; 282 int recow; 283 int ret; 284 285 if (drop_cache) 286 btrfs_drop_extent_cache(inode, start, end - 1, 0); 287 288 path = btrfs_alloc_path(); 289 if (!path) 290 return -ENOMEM; 291 292 while (1) { 293 recow = 0; 294 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 295 search_start, -1); 296 if (ret < 0) 297 break; 298 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 299 leaf = path->nodes[0]; 300 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 301 if (key.objectid == inode->i_ino && 302 key.type == BTRFS_EXTENT_DATA_KEY) 303 path->slots[0]--; 304 } 305 ret = 0; 306 next_slot: 307 leaf = path->nodes[0]; 308 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 309 BUG_ON(del_nr > 0); 310 ret = btrfs_next_leaf(root, path); 311 if (ret < 0) 312 break; 313 if (ret > 0) { 314 ret = 0; 315 break; 316 } 317 leaf = path->nodes[0]; 318 recow = 1; 319 } 320 321 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 322 if (key.objectid > inode->i_ino || 323 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 324 break; 325 326 fi = btrfs_item_ptr(leaf, path->slots[0], 327 struct btrfs_file_extent_item); 328 extent_type = btrfs_file_extent_type(leaf, fi); 329 330 if (extent_type == BTRFS_FILE_EXTENT_REG || 331 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 332 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 333 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 334 extent_offset = btrfs_file_extent_offset(leaf, fi); 335 extent_end = key.offset + 336 btrfs_file_extent_num_bytes(leaf, fi); 337 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 338 extent_end = key.offset + 339 btrfs_file_extent_inline_len(leaf, fi); 340 } else { 341 WARN_ON(1); 342 extent_end = search_start; 343 } 344 345 if (extent_end <= search_start) { 346 path->slots[0]++; 347 goto next_slot; 348 } 349 350 search_start = max(key.offset, start); 351 if (recow) { 352 btrfs_release_path(root, path); 353 continue; 354 } 355 356 /* 357 * | - range to drop - | 358 * | -------- extent -------- | 359 */ 360 if (start > key.offset && end < extent_end) { 361 BUG_ON(del_nr > 0); 362 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 363 364 memcpy(&new_key, &key, sizeof(new_key)); 365 new_key.offset = start; 366 ret = btrfs_duplicate_item(trans, root, path, 367 &new_key); 368 if (ret == -EAGAIN) { 369 btrfs_release_path(root, path); 370 continue; 371 } 372 if (ret < 0) 373 break; 374 375 leaf = path->nodes[0]; 376 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 377 struct btrfs_file_extent_item); 378 btrfs_set_file_extent_num_bytes(leaf, fi, 379 start - key.offset); 380 381 fi = btrfs_item_ptr(leaf, path->slots[0], 382 struct btrfs_file_extent_item); 383 384 extent_offset += start - key.offset; 385 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 386 btrfs_set_file_extent_num_bytes(leaf, fi, 387 extent_end - start); 388 btrfs_mark_buffer_dirty(leaf); 389 390 if (disk_bytenr > 0) { 391 ret = btrfs_inc_extent_ref(trans, root, 392 disk_bytenr, num_bytes, 0, 393 root->root_key.objectid, 394 new_key.objectid, 395 start - extent_offset); 396 BUG_ON(ret); 397 *hint_byte = disk_bytenr; 398 } 399 key.offset = start; 400 } 401 /* 402 * | ---- range to drop ----- | 403 * | -------- extent -------- | 404 */ 405 if (start <= key.offset && end < extent_end) { 406 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 407 408 memcpy(&new_key, &key, sizeof(new_key)); 409 new_key.offset = end; 410 btrfs_set_item_key_safe(trans, root, path, &new_key); 411 412 extent_offset += end - key.offset; 413 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 414 btrfs_set_file_extent_num_bytes(leaf, fi, 415 extent_end - end); 416 btrfs_mark_buffer_dirty(leaf); 417 if (disk_bytenr > 0) { 418 inode_sub_bytes(inode, end - key.offset); 419 *hint_byte = disk_bytenr; 420 } 421 break; 422 } 423 424 search_start = extent_end; 425 /* 426 * | ---- range to drop ----- | 427 * | -------- extent -------- | 428 */ 429 if (start > key.offset && end >= extent_end) { 430 BUG_ON(del_nr > 0); 431 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 432 433 btrfs_set_file_extent_num_bytes(leaf, fi, 434 start - key.offset); 435 btrfs_mark_buffer_dirty(leaf); 436 if (disk_bytenr > 0) { 437 inode_sub_bytes(inode, extent_end - start); 438 *hint_byte = disk_bytenr; 439 } 440 if (end == extent_end) 441 break; 442 443 path->slots[0]++; 444 goto next_slot; 445 } 446 447 /* 448 * | ---- range to drop ----- | 449 * | ------ extent ------ | 450 */ 451 if (start <= key.offset && end >= extent_end) { 452 if (del_nr == 0) { 453 del_slot = path->slots[0]; 454 del_nr = 1; 455 } else { 456 BUG_ON(del_slot + del_nr != path->slots[0]); 457 del_nr++; 458 } 459 460 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 461 inode_sub_bytes(inode, 462 extent_end - key.offset); 463 extent_end = ALIGN(extent_end, 464 root->sectorsize); 465 } else if (disk_bytenr > 0) { 466 ret = btrfs_free_extent(trans, root, 467 disk_bytenr, num_bytes, 0, 468 root->root_key.objectid, 469 key.objectid, key.offset - 470 extent_offset); 471 BUG_ON(ret); 472 inode_sub_bytes(inode, 473 extent_end - key.offset); 474 *hint_byte = disk_bytenr; 475 } 476 477 if (end == extent_end) 478 break; 479 480 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 481 path->slots[0]++; 482 goto next_slot; 483 } 484 485 ret = btrfs_del_items(trans, root, path, del_slot, 486 del_nr); 487 BUG_ON(ret); 488 489 del_nr = 0; 490 del_slot = 0; 491 492 btrfs_release_path(root, path); 493 continue; 494 } 495 496 BUG_ON(1); 497 } 498 499 if (del_nr > 0) { 500 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 501 BUG_ON(ret); 502 } 503 504 btrfs_free_path(path); 505 return ret; 506 } 507 508 static int extent_mergeable(struct extent_buffer *leaf, int slot, 509 u64 objectid, u64 bytenr, u64 orig_offset, 510 u64 *start, u64 *end) 511 { 512 struct btrfs_file_extent_item *fi; 513 struct btrfs_key key; 514 u64 extent_end; 515 516 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 517 return 0; 518 519 btrfs_item_key_to_cpu(leaf, &key, slot); 520 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 521 return 0; 522 523 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 524 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 525 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 526 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 527 btrfs_file_extent_compression(leaf, fi) || 528 btrfs_file_extent_encryption(leaf, fi) || 529 btrfs_file_extent_other_encoding(leaf, fi)) 530 return 0; 531 532 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 533 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 534 return 0; 535 536 *start = key.offset; 537 *end = extent_end; 538 return 1; 539 } 540 541 /* 542 * Mark extent in the range start - end as written. 543 * 544 * This changes extent type from 'pre-allocated' to 'regular'. If only 545 * part of extent is marked as written, the extent will be split into 546 * two or three. 547 */ 548 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 549 struct inode *inode, u64 start, u64 end) 550 { 551 struct btrfs_root *root = BTRFS_I(inode)->root; 552 struct extent_buffer *leaf; 553 struct btrfs_path *path; 554 struct btrfs_file_extent_item *fi; 555 struct btrfs_key key; 556 struct btrfs_key new_key; 557 u64 bytenr; 558 u64 num_bytes; 559 u64 extent_end; 560 u64 orig_offset; 561 u64 other_start; 562 u64 other_end; 563 u64 split; 564 int del_nr = 0; 565 int del_slot = 0; 566 int recow; 567 int ret; 568 569 btrfs_drop_extent_cache(inode, start, end - 1, 0); 570 571 path = btrfs_alloc_path(); 572 BUG_ON(!path); 573 again: 574 recow = 0; 575 split = start; 576 key.objectid = inode->i_ino; 577 key.type = BTRFS_EXTENT_DATA_KEY; 578 key.offset = split; 579 580 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 581 if (ret > 0 && path->slots[0] > 0) 582 path->slots[0]--; 583 584 leaf = path->nodes[0]; 585 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 586 BUG_ON(key.objectid != inode->i_ino || 587 key.type != BTRFS_EXTENT_DATA_KEY); 588 fi = btrfs_item_ptr(leaf, path->slots[0], 589 struct btrfs_file_extent_item); 590 BUG_ON(btrfs_file_extent_type(leaf, fi) != 591 BTRFS_FILE_EXTENT_PREALLOC); 592 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 593 BUG_ON(key.offset > start || extent_end < end); 594 595 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 596 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 597 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 598 memcpy(&new_key, &key, sizeof(new_key)); 599 600 if (start == key.offset && end < extent_end) { 601 other_start = 0; 602 other_end = start; 603 if (extent_mergeable(leaf, path->slots[0] - 1, 604 inode->i_ino, bytenr, orig_offset, 605 &other_start, &other_end)) { 606 new_key.offset = end; 607 btrfs_set_item_key_safe(trans, root, path, &new_key); 608 fi = btrfs_item_ptr(leaf, path->slots[0], 609 struct btrfs_file_extent_item); 610 btrfs_set_file_extent_num_bytes(leaf, fi, 611 extent_end - end); 612 btrfs_set_file_extent_offset(leaf, fi, 613 end - orig_offset); 614 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 615 struct btrfs_file_extent_item); 616 btrfs_set_file_extent_num_bytes(leaf, fi, 617 end - other_start); 618 btrfs_mark_buffer_dirty(leaf); 619 goto out; 620 } 621 } 622 623 if (start > key.offset && end == extent_end) { 624 other_start = end; 625 other_end = 0; 626 if (extent_mergeable(leaf, path->slots[0] + 1, 627 inode->i_ino, bytenr, orig_offset, 628 &other_start, &other_end)) { 629 fi = btrfs_item_ptr(leaf, path->slots[0], 630 struct btrfs_file_extent_item); 631 btrfs_set_file_extent_num_bytes(leaf, fi, 632 start - key.offset); 633 path->slots[0]++; 634 new_key.offset = start; 635 btrfs_set_item_key_safe(trans, root, path, &new_key); 636 637 fi = btrfs_item_ptr(leaf, path->slots[0], 638 struct btrfs_file_extent_item); 639 btrfs_set_file_extent_num_bytes(leaf, fi, 640 other_end - start); 641 btrfs_set_file_extent_offset(leaf, fi, 642 start - orig_offset); 643 btrfs_mark_buffer_dirty(leaf); 644 goto out; 645 } 646 } 647 648 while (start > key.offset || end < extent_end) { 649 if (key.offset == start) 650 split = end; 651 652 new_key.offset = split; 653 ret = btrfs_duplicate_item(trans, root, path, &new_key); 654 if (ret == -EAGAIN) { 655 btrfs_release_path(root, path); 656 goto again; 657 } 658 BUG_ON(ret < 0); 659 660 leaf = path->nodes[0]; 661 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 662 struct btrfs_file_extent_item); 663 btrfs_set_file_extent_num_bytes(leaf, fi, 664 split - key.offset); 665 666 fi = btrfs_item_ptr(leaf, path->slots[0], 667 struct btrfs_file_extent_item); 668 669 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 670 btrfs_set_file_extent_num_bytes(leaf, fi, 671 extent_end - split); 672 btrfs_mark_buffer_dirty(leaf); 673 674 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 675 root->root_key.objectid, 676 inode->i_ino, orig_offset); 677 BUG_ON(ret); 678 679 if (split == start) { 680 key.offset = start; 681 } else { 682 BUG_ON(start != key.offset); 683 path->slots[0]--; 684 extent_end = end; 685 } 686 recow = 1; 687 } 688 689 other_start = end; 690 other_end = 0; 691 if (extent_mergeable(leaf, path->slots[0] + 1, 692 inode->i_ino, bytenr, orig_offset, 693 &other_start, &other_end)) { 694 if (recow) { 695 btrfs_release_path(root, path); 696 goto again; 697 } 698 extent_end = other_end; 699 del_slot = path->slots[0] + 1; 700 del_nr++; 701 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 702 0, root->root_key.objectid, 703 inode->i_ino, orig_offset); 704 BUG_ON(ret); 705 } 706 other_start = 0; 707 other_end = start; 708 if (extent_mergeable(leaf, path->slots[0] - 1, 709 inode->i_ino, bytenr, orig_offset, 710 &other_start, &other_end)) { 711 if (recow) { 712 btrfs_release_path(root, path); 713 goto again; 714 } 715 key.offset = other_start; 716 del_slot = path->slots[0]; 717 del_nr++; 718 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 719 0, root->root_key.objectid, 720 inode->i_ino, orig_offset); 721 BUG_ON(ret); 722 } 723 if (del_nr == 0) { 724 fi = btrfs_item_ptr(leaf, path->slots[0], 725 struct btrfs_file_extent_item); 726 btrfs_set_file_extent_type(leaf, fi, 727 BTRFS_FILE_EXTENT_REG); 728 btrfs_mark_buffer_dirty(leaf); 729 } else { 730 fi = btrfs_item_ptr(leaf, del_slot - 1, 731 struct btrfs_file_extent_item); 732 btrfs_set_file_extent_type(leaf, fi, 733 BTRFS_FILE_EXTENT_REG); 734 btrfs_set_file_extent_num_bytes(leaf, fi, 735 extent_end - key.offset); 736 btrfs_mark_buffer_dirty(leaf); 737 738 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 739 BUG_ON(ret); 740 } 741 out: 742 btrfs_free_path(path); 743 return 0; 744 } 745 746 /* 747 * this gets pages into the page cache and locks them down, it also properly 748 * waits for data=ordered extents to finish before allowing the pages to be 749 * modified. 750 */ 751 static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 752 struct page **pages, size_t num_pages, 753 loff_t pos, unsigned long first_index, 754 unsigned long last_index, size_t write_bytes) 755 { 756 int i; 757 unsigned long index = pos >> PAGE_CACHE_SHIFT; 758 struct inode *inode = fdentry(file)->d_inode; 759 int err = 0; 760 u64 start_pos; 761 u64 last_pos; 762 763 start_pos = pos & ~((u64)root->sectorsize - 1); 764 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 765 766 if (start_pos > inode->i_size) { 767 err = btrfs_cont_expand(inode, start_pos); 768 if (err) 769 return err; 770 } 771 772 memset(pages, 0, num_pages * sizeof(struct page *)); 773 again: 774 for (i = 0; i < num_pages; i++) { 775 pages[i] = grab_cache_page(inode->i_mapping, index + i); 776 if (!pages[i]) { 777 err = -ENOMEM; 778 BUG_ON(1); 779 } 780 wait_on_page_writeback(pages[i]); 781 } 782 if (start_pos < inode->i_size) { 783 struct btrfs_ordered_extent *ordered; 784 lock_extent(&BTRFS_I(inode)->io_tree, 785 start_pos, last_pos - 1, GFP_NOFS); 786 ordered = btrfs_lookup_first_ordered_extent(inode, 787 last_pos - 1); 788 if (ordered && 789 ordered->file_offset + ordered->len > start_pos && 790 ordered->file_offset < last_pos) { 791 btrfs_put_ordered_extent(ordered); 792 unlock_extent(&BTRFS_I(inode)->io_tree, 793 start_pos, last_pos - 1, GFP_NOFS); 794 for (i = 0; i < num_pages; i++) { 795 unlock_page(pages[i]); 796 page_cache_release(pages[i]); 797 } 798 btrfs_wait_ordered_range(inode, start_pos, 799 last_pos - start_pos); 800 goto again; 801 } 802 if (ordered) 803 btrfs_put_ordered_extent(ordered); 804 805 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, 806 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 807 EXTENT_DO_ACCOUNTING, 808 GFP_NOFS); 809 unlock_extent(&BTRFS_I(inode)->io_tree, 810 start_pos, last_pos - 1, GFP_NOFS); 811 } 812 for (i = 0; i < num_pages; i++) { 813 clear_page_dirty_for_io(pages[i]); 814 set_page_extent_mapped(pages[i]); 815 WARN_ON(!PageLocked(pages[i])); 816 } 817 return 0; 818 } 819 820 static ssize_t btrfs_file_write(struct file *file, const char __user *buf, 821 size_t count, loff_t *ppos) 822 { 823 loff_t pos; 824 loff_t start_pos; 825 ssize_t num_written = 0; 826 ssize_t err = 0; 827 int ret = 0; 828 struct inode *inode = fdentry(file)->d_inode; 829 struct btrfs_root *root = BTRFS_I(inode)->root; 830 struct page **pages = NULL; 831 int nrptrs; 832 struct page *pinned[2]; 833 unsigned long first_index; 834 unsigned long last_index; 835 int will_write; 836 837 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 838 (file->f_flags & O_DIRECT)); 839 840 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, 841 PAGE_CACHE_SIZE / (sizeof(struct page *))); 842 pinned[0] = NULL; 843 pinned[1] = NULL; 844 845 pos = *ppos; 846 start_pos = pos; 847 848 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 849 850 /* do the reserve before the mutex lock in case we have to do some 851 * flushing. We wouldn't deadlock, but this is more polite. 852 */ 853 err = btrfs_reserve_metadata_for_delalloc(root, inode, 1); 854 if (err) 855 goto out_nolock; 856 857 mutex_lock(&inode->i_mutex); 858 859 current->backing_dev_info = inode->i_mapping->backing_dev_info; 860 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 861 if (err) 862 goto out; 863 864 if (count == 0) 865 goto out; 866 867 err = file_remove_suid(file); 868 if (err) 869 goto out; 870 871 file_update_time(file); 872 873 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 874 875 /* generic_write_checks can change our pos */ 876 start_pos = pos; 877 878 BTRFS_I(inode)->sequence++; 879 first_index = pos >> PAGE_CACHE_SHIFT; 880 last_index = (pos + count) >> PAGE_CACHE_SHIFT; 881 882 /* 883 * there are lots of better ways to do this, but this code 884 * makes sure the first and last page in the file range are 885 * up to date and ready for cow 886 */ 887 if ((pos & (PAGE_CACHE_SIZE - 1))) { 888 pinned[0] = grab_cache_page(inode->i_mapping, first_index); 889 if (!PageUptodate(pinned[0])) { 890 ret = btrfs_readpage(NULL, pinned[0]); 891 BUG_ON(ret); 892 wait_on_page_locked(pinned[0]); 893 } else { 894 unlock_page(pinned[0]); 895 } 896 } 897 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) { 898 pinned[1] = grab_cache_page(inode->i_mapping, last_index); 899 if (!PageUptodate(pinned[1])) { 900 ret = btrfs_readpage(NULL, pinned[1]); 901 BUG_ON(ret); 902 wait_on_page_locked(pinned[1]); 903 } else { 904 unlock_page(pinned[1]); 905 } 906 } 907 908 while (count > 0) { 909 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 910 size_t write_bytes = min(count, nrptrs * 911 (size_t)PAGE_CACHE_SIZE - 912 offset); 913 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> 914 PAGE_CACHE_SHIFT; 915 916 WARN_ON(num_pages > nrptrs); 917 memset(pages, 0, sizeof(struct page *) * nrptrs); 918 919 ret = btrfs_check_data_free_space(root, inode, write_bytes); 920 if (ret) 921 goto out; 922 923 ret = prepare_pages(root, file, pages, num_pages, 924 pos, first_index, last_index, 925 write_bytes); 926 if (ret) { 927 btrfs_free_reserved_data_space(root, inode, 928 write_bytes); 929 goto out; 930 } 931 932 ret = btrfs_copy_from_user(pos, num_pages, 933 write_bytes, pages, buf); 934 if (ret) { 935 btrfs_free_reserved_data_space(root, inode, 936 write_bytes); 937 btrfs_drop_pages(pages, num_pages); 938 goto out; 939 } 940 941 ret = dirty_and_release_pages(NULL, root, file, pages, 942 num_pages, pos, write_bytes); 943 btrfs_drop_pages(pages, num_pages); 944 if (ret) { 945 btrfs_free_reserved_data_space(root, inode, 946 write_bytes); 947 goto out; 948 } 949 950 if (will_write) { 951 filemap_fdatawrite_range(inode->i_mapping, pos, 952 pos + write_bytes - 1); 953 } else { 954 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 955 num_pages); 956 if (num_pages < 957 (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 958 btrfs_btree_balance_dirty(root, 1); 959 btrfs_throttle(root); 960 } 961 962 buf += write_bytes; 963 count -= write_bytes; 964 pos += write_bytes; 965 num_written += write_bytes; 966 967 cond_resched(); 968 } 969 out: 970 mutex_unlock(&inode->i_mutex); 971 if (ret) 972 err = ret; 973 btrfs_unreserve_metadata_for_delalloc(root, inode, 1); 974 975 out_nolock: 976 kfree(pages); 977 if (pinned[0]) 978 page_cache_release(pinned[0]); 979 if (pinned[1]) 980 page_cache_release(pinned[1]); 981 *ppos = pos; 982 983 /* 984 * we want to make sure fsync finds this change 985 * but we haven't joined a transaction running right now. 986 * 987 * Later on, someone is sure to update the inode and get the 988 * real transid recorded. 989 * 990 * We set last_trans now to the fs_info generation + 1, 991 * this will either be one more than the running transaction 992 * or the generation used for the next transaction if there isn't 993 * one running right now. 994 */ 995 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 996 997 if (num_written > 0 && will_write) { 998 struct btrfs_trans_handle *trans; 999 1000 err = btrfs_wait_ordered_range(inode, start_pos, num_written); 1001 if (err) 1002 num_written = err; 1003 1004 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { 1005 trans = btrfs_start_transaction(root, 1); 1006 ret = btrfs_log_dentry_safe(trans, root, 1007 file->f_dentry); 1008 if (ret == 0) { 1009 ret = btrfs_sync_log(trans, root); 1010 if (ret == 0) 1011 btrfs_end_transaction(trans, root); 1012 else 1013 btrfs_commit_transaction(trans, root); 1014 } else if (ret != BTRFS_NO_LOG_SYNC) { 1015 btrfs_commit_transaction(trans, root); 1016 } else { 1017 btrfs_end_transaction(trans, root); 1018 } 1019 } 1020 if (file->f_flags & O_DIRECT) { 1021 invalidate_mapping_pages(inode->i_mapping, 1022 start_pos >> PAGE_CACHE_SHIFT, 1023 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1024 } 1025 } 1026 current->backing_dev_info = NULL; 1027 return num_written ? num_written : err; 1028 } 1029 1030 int btrfs_release_file(struct inode *inode, struct file *filp) 1031 { 1032 /* 1033 * ordered_data_close is set by settattr when we are about to truncate 1034 * a file from a non-zero size to a zero size. This tries to 1035 * flush down new bytes that may have been written if the 1036 * application were using truncate to replace a file in place. 1037 */ 1038 if (BTRFS_I(inode)->ordered_data_close) { 1039 BTRFS_I(inode)->ordered_data_close = 0; 1040 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); 1041 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1042 filemap_flush(inode->i_mapping); 1043 } 1044 if (filp->private_data) 1045 btrfs_ioctl_trans_end(filp); 1046 return 0; 1047 } 1048 1049 /* 1050 * fsync call for both files and directories. This logs the inode into 1051 * the tree log instead of forcing full commits whenever possible. 1052 * 1053 * It needs to call filemap_fdatawait so that all ordered extent updates are 1054 * in the metadata btree are up to date for copying to the log. 1055 * 1056 * It drops the inode mutex before doing the tree log commit. This is an 1057 * important optimization for directories because holding the mutex prevents 1058 * new operations on the dir while we write to disk. 1059 */ 1060 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) 1061 { 1062 struct inode *inode = dentry->d_inode; 1063 struct btrfs_root *root = BTRFS_I(inode)->root; 1064 int ret = 0; 1065 struct btrfs_trans_handle *trans; 1066 1067 1068 /* we wait first, since the writeback may change the inode */ 1069 root->log_batch++; 1070 /* the VFS called filemap_fdatawrite for us */ 1071 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1072 root->log_batch++; 1073 1074 /* 1075 * check the transaction that last modified this inode 1076 * and see if its already been committed 1077 */ 1078 if (!BTRFS_I(inode)->last_trans) 1079 goto out; 1080 1081 /* 1082 * if the last transaction that changed this file was before 1083 * the current transaction, we can bail out now without any 1084 * syncing 1085 */ 1086 mutex_lock(&root->fs_info->trans_mutex); 1087 if (BTRFS_I(inode)->last_trans <= 1088 root->fs_info->last_trans_committed) { 1089 BTRFS_I(inode)->last_trans = 0; 1090 mutex_unlock(&root->fs_info->trans_mutex); 1091 goto out; 1092 } 1093 mutex_unlock(&root->fs_info->trans_mutex); 1094 1095 /* 1096 * ok we haven't committed the transaction yet, lets do a commit 1097 */ 1098 if (file && file->private_data) 1099 btrfs_ioctl_trans_end(file); 1100 1101 trans = btrfs_start_transaction(root, 1); 1102 if (!trans) { 1103 ret = -ENOMEM; 1104 goto out; 1105 } 1106 1107 ret = btrfs_log_dentry_safe(trans, root, dentry); 1108 if (ret < 0) 1109 goto out; 1110 1111 /* we've logged all the items and now have a consistent 1112 * version of the file in the log. It is possible that 1113 * someone will come in and modify the file, but that's 1114 * fine because the log is consistent on disk, and we 1115 * have references to all of the file's extents 1116 * 1117 * It is possible that someone will come in and log the 1118 * file again, but that will end up using the synchronization 1119 * inside btrfs_sync_log to keep things safe. 1120 */ 1121 mutex_unlock(&dentry->d_inode->i_mutex); 1122 1123 if (ret != BTRFS_NO_LOG_SYNC) { 1124 if (ret > 0) { 1125 ret = btrfs_commit_transaction(trans, root); 1126 } else { 1127 ret = btrfs_sync_log(trans, root); 1128 if (ret == 0) 1129 ret = btrfs_end_transaction(trans, root); 1130 else 1131 ret = btrfs_commit_transaction(trans, root); 1132 } 1133 } else { 1134 ret = btrfs_end_transaction(trans, root); 1135 } 1136 mutex_lock(&dentry->d_inode->i_mutex); 1137 out: 1138 return ret > 0 ? -EIO : ret; 1139 } 1140 1141 static const struct vm_operations_struct btrfs_file_vm_ops = { 1142 .fault = filemap_fault, 1143 .page_mkwrite = btrfs_page_mkwrite, 1144 }; 1145 1146 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 1147 { 1148 vma->vm_ops = &btrfs_file_vm_ops; 1149 file_accessed(filp); 1150 return 0; 1151 } 1152 1153 const struct file_operations btrfs_file_operations = { 1154 .llseek = generic_file_llseek, 1155 .read = do_sync_read, 1156 .aio_read = generic_file_aio_read, 1157 .splice_read = generic_file_splice_read, 1158 .write = btrfs_file_write, 1159 .mmap = btrfs_file_mmap, 1160 .open = generic_file_open, 1161 .release = btrfs_release_file, 1162 .fsync = btrfs_sync_file, 1163 .unlocked_ioctl = btrfs_ioctl, 1164 #ifdef CONFIG_COMPAT 1165 .compat_ioctl = btrfs_ioctl, 1166 #endif 1167 }; 1168