1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/pagemap.h> 21 #include <linux/highmem.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/backing-dev.h> 26 #include <linux/mpage.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/statfs.h> 30 #include <linux/compat.h> 31 #include "ctree.h" 32 #include "disk-io.h" 33 #include "transaction.h" 34 #include "btrfs_inode.h" 35 #include "ioctl.h" 36 #include "print-tree.h" 37 #include "tree-log.h" 38 #include "locking.h" 39 #include "compat.h" 40 41 42 /* simple helper to fault in pages and copy. This should go away 43 * and be replaced with calls into generic code. 44 */ 45 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 46 int write_bytes, 47 struct page **prepared_pages, 48 const char __user *buf) 49 { 50 long page_fault = 0; 51 int i; 52 int offset = pos & (PAGE_CACHE_SIZE - 1); 53 54 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) { 55 size_t count = min_t(size_t, 56 PAGE_CACHE_SIZE - offset, write_bytes); 57 struct page *page = prepared_pages[i]; 58 fault_in_pages_readable(buf, count); 59 60 /* Copy data from userspace to the current page */ 61 kmap(page); 62 page_fault = __copy_from_user(page_address(page) + offset, 63 buf, count); 64 /* Flush processor's dcache for this page */ 65 flush_dcache_page(page); 66 kunmap(page); 67 buf += count; 68 write_bytes -= count; 69 70 if (page_fault) 71 break; 72 } 73 return page_fault ? -EFAULT : 0; 74 } 75 76 /* 77 * unlocks pages after btrfs_file_write is done with them 78 */ 79 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) 80 { 81 size_t i; 82 for (i = 0; i < num_pages; i++) { 83 if (!pages[i]) 84 break; 85 /* page checked is some magic around finding pages that 86 * have been modified without going through btrfs_set_page_dirty 87 * clear it here 88 */ 89 ClearPageChecked(pages[i]); 90 unlock_page(pages[i]); 91 mark_page_accessed(pages[i]); 92 page_cache_release(pages[i]); 93 } 94 } 95 96 /* 97 * after copy_from_user, pages need to be dirtied and we need to make 98 * sure holes are created between the current EOF and the start of 99 * any next extents (if required). 100 * 101 * this also makes the decision about creating an inline extent vs 102 * doing real data extents, marking pages dirty and delalloc as required. 103 */ 104 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, 105 struct btrfs_root *root, 106 struct file *file, 107 struct page **pages, 108 size_t num_pages, 109 loff_t pos, 110 size_t write_bytes) 111 { 112 int err = 0; 113 int i; 114 struct inode *inode = fdentry(file)->d_inode; 115 u64 num_bytes; 116 u64 start_pos; 117 u64 end_of_last_block; 118 u64 end_pos = pos + write_bytes; 119 loff_t isize = i_size_read(inode); 120 121 start_pos = pos & ~((u64)root->sectorsize - 1); 122 num_bytes = (write_bytes + pos - start_pos + 123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 124 125 end_of_last_block = start_pos + num_bytes - 1; 126 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); 127 for (i = 0; i < num_pages; i++) { 128 struct page *p = pages[i]; 129 SetPageUptodate(p); 130 ClearPageChecked(p); 131 set_page_dirty(p); 132 } 133 if (end_pos > isize) { 134 i_size_write(inode, end_pos); 135 /* we've only changed i_size in ram, and we haven't updated 136 * the disk i_size. There is no need to log the inode 137 * at this time. 138 */ 139 } 140 return err; 141 } 142 143 /* 144 * this drops all the extents in the cache that intersect the range 145 * [start, end]. Existing extents are split as required. 146 */ 147 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 148 int skip_pinned) 149 { 150 struct extent_map *em; 151 struct extent_map *split = NULL; 152 struct extent_map *split2 = NULL; 153 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 154 u64 len = end - start + 1; 155 int ret; 156 int testend = 1; 157 unsigned long flags; 158 int compressed = 0; 159 160 WARN_ON(end < start); 161 if (end == (u64)-1) { 162 len = (u64)-1; 163 testend = 0; 164 } 165 while (1) { 166 if (!split) 167 split = alloc_extent_map(GFP_NOFS); 168 if (!split2) 169 split2 = alloc_extent_map(GFP_NOFS); 170 171 write_lock(&em_tree->lock); 172 em = lookup_extent_mapping(em_tree, start, len); 173 if (!em) { 174 write_unlock(&em_tree->lock); 175 break; 176 } 177 flags = em->flags; 178 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 179 if (em->start <= start && 180 (!testend || em->start + em->len >= start + len)) { 181 free_extent_map(em); 182 write_unlock(&em_tree->lock); 183 break; 184 } 185 if (start < em->start) { 186 len = em->start - start; 187 } else { 188 len = start + len - (em->start + em->len); 189 start = em->start + em->len; 190 } 191 free_extent_map(em); 192 write_unlock(&em_tree->lock); 193 continue; 194 } 195 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 196 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 197 remove_extent_mapping(em_tree, em); 198 199 if (em->block_start < EXTENT_MAP_LAST_BYTE && 200 em->start < start) { 201 split->start = em->start; 202 split->len = start - em->start; 203 split->orig_start = em->orig_start; 204 split->block_start = em->block_start; 205 206 if (compressed) 207 split->block_len = em->block_len; 208 else 209 split->block_len = split->len; 210 211 split->bdev = em->bdev; 212 split->flags = flags; 213 ret = add_extent_mapping(em_tree, split); 214 BUG_ON(ret); 215 free_extent_map(split); 216 split = split2; 217 split2 = NULL; 218 } 219 if (em->block_start < EXTENT_MAP_LAST_BYTE && 220 testend && em->start + em->len > start + len) { 221 u64 diff = start + len - em->start; 222 223 split->start = start + len; 224 split->len = em->start + em->len - (start + len); 225 split->bdev = em->bdev; 226 split->flags = flags; 227 228 if (compressed) { 229 split->block_len = em->block_len; 230 split->block_start = em->block_start; 231 split->orig_start = em->orig_start; 232 } else { 233 split->block_len = split->len; 234 split->block_start = em->block_start + diff; 235 split->orig_start = split->start; 236 } 237 238 ret = add_extent_mapping(em_tree, split); 239 BUG_ON(ret); 240 free_extent_map(split); 241 split = NULL; 242 } 243 write_unlock(&em_tree->lock); 244 245 /* once for us */ 246 free_extent_map(em); 247 /* once for the tree*/ 248 free_extent_map(em); 249 } 250 if (split) 251 free_extent_map(split); 252 if (split2) 253 free_extent_map(split2); 254 return 0; 255 } 256 257 /* 258 * this is very complex, but the basic idea is to drop all extents 259 * in the range start - end. hint_block is filled in with a block number 260 * that would be a good hint to the block allocator for this file. 261 * 262 * If an extent intersects the range but is not entirely inside the range 263 * it is either truncated or split. Anything entirely inside the range 264 * is deleted from the tree. 265 * 266 * inline_limit is used to tell this code which offsets in the file to keep 267 * if they contain inline extents. 268 */ 269 noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans, 270 struct btrfs_root *root, struct inode *inode, 271 u64 start, u64 end, u64 locked_end, 272 u64 inline_limit, u64 *hint_byte, int drop_cache) 273 { 274 u64 extent_end = 0; 275 u64 search_start = start; 276 u64 ram_bytes = 0; 277 u64 disk_bytenr = 0; 278 u64 orig_locked_end = locked_end; 279 u8 compression; 280 u8 encryption; 281 u16 other_encoding = 0; 282 struct extent_buffer *leaf; 283 struct btrfs_file_extent_item *extent; 284 struct btrfs_path *path; 285 struct btrfs_key key; 286 struct btrfs_file_extent_item old; 287 int keep; 288 int slot; 289 int bookend; 290 int found_type = 0; 291 int found_extent; 292 int found_inline; 293 int recow; 294 int ret; 295 296 inline_limit = 0; 297 if (drop_cache) 298 btrfs_drop_extent_cache(inode, start, end - 1, 0); 299 300 path = btrfs_alloc_path(); 301 if (!path) 302 return -ENOMEM; 303 while (1) { 304 recow = 0; 305 btrfs_release_path(root, path); 306 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 307 search_start, -1); 308 if (ret < 0) 309 goto out; 310 if (ret > 0) { 311 if (path->slots[0] == 0) { 312 ret = 0; 313 goto out; 314 } 315 path->slots[0]--; 316 } 317 next_slot: 318 keep = 0; 319 bookend = 0; 320 found_extent = 0; 321 found_inline = 0; 322 compression = 0; 323 encryption = 0; 324 extent = NULL; 325 leaf = path->nodes[0]; 326 slot = path->slots[0]; 327 ret = 0; 328 btrfs_item_key_to_cpu(leaf, &key, slot); 329 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY && 330 key.offset >= end) { 331 goto out; 332 } 333 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || 334 key.objectid != inode->i_ino) { 335 goto out; 336 } 337 if (recow) { 338 search_start = max(key.offset, start); 339 continue; 340 } 341 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { 342 extent = btrfs_item_ptr(leaf, slot, 343 struct btrfs_file_extent_item); 344 found_type = btrfs_file_extent_type(leaf, extent); 345 compression = btrfs_file_extent_compression(leaf, 346 extent); 347 encryption = btrfs_file_extent_encryption(leaf, 348 extent); 349 other_encoding = btrfs_file_extent_other_encoding(leaf, 350 extent); 351 if (found_type == BTRFS_FILE_EXTENT_REG || 352 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 353 extent_end = 354 btrfs_file_extent_disk_bytenr(leaf, 355 extent); 356 if (extent_end) 357 *hint_byte = extent_end; 358 359 extent_end = key.offset + 360 btrfs_file_extent_num_bytes(leaf, extent); 361 ram_bytes = btrfs_file_extent_ram_bytes(leaf, 362 extent); 363 found_extent = 1; 364 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 365 found_inline = 1; 366 extent_end = key.offset + 367 btrfs_file_extent_inline_len(leaf, extent); 368 } 369 } else { 370 extent_end = search_start; 371 } 372 373 /* we found nothing we can drop */ 374 if ((!found_extent && !found_inline) || 375 search_start >= extent_end) { 376 int nextret; 377 u32 nritems; 378 nritems = btrfs_header_nritems(leaf); 379 if (slot >= nritems - 1) { 380 nextret = btrfs_next_leaf(root, path); 381 if (nextret) 382 goto out; 383 recow = 1; 384 } else { 385 path->slots[0]++; 386 } 387 goto next_slot; 388 } 389 390 if (end <= extent_end && start >= key.offset && found_inline) 391 *hint_byte = EXTENT_MAP_INLINE; 392 393 if (found_extent) { 394 read_extent_buffer(leaf, &old, (unsigned long)extent, 395 sizeof(old)); 396 } 397 398 if (end < extent_end && end >= key.offset) { 399 bookend = 1; 400 if (found_inline && start <= key.offset) 401 keep = 1; 402 } 403 404 if (bookend && found_extent) { 405 if (locked_end < extent_end) { 406 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 407 locked_end, extent_end - 1, 408 GFP_NOFS); 409 if (!ret) { 410 btrfs_release_path(root, path); 411 lock_extent(&BTRFS_I(inode)->io_tree, 412 locked_end, extent_end - 1, 413 GFP_NOFS); 414 locked_end = extent_end; 415 continue; 416 } 417 locked_end = extent_end; 418 } 419 disk_bytenr = le64_to_cpu(old.disk_bytenr); 420 if (disk_bytenr != 0) { 421 ret = btrfs_inc_extent_ref(trans, root, 422 disk_bytenr, 423 le64_to_cpu(old.disk_num_bytes), 0, 424 root->root_key.objectid, 425 key.objectid, key.offset - 426 le64_to_cpu(old.offset)); 427 BUG_ON(ret); 428 } 429 } 430 431 if (found_inline) { 432 u64 mask = root->sectorsize - 1; 433 search_start = (extent_end + mask) & ~mask; 434 } else 435 search_start = extent_end; 436 437 /* truncate existing extent */ 438 if (start > key.offset) { 439 u64 new_num; 440 u64 old_num; 441 keep = 1; 442 WARN_ON(start & (root->sectorsize - 1)); 443 if (found_extent) { 444 new_num = start - key.offset; 445 old_num = btrfs_file_extent_num_bytes(leaf, 446 extent); 447 *hint_byte = 448 btrfs_file_extent_disk_bytenr(leaf, 449 extent); 450 if (btrfs_file_extent_disk_bytenr(leaf, 451 extent)) { 452 inode_sub_bytes(inode, old_num - 453 new_num); 454 } 455 btrfs_set_file_extent_num_bytes(leaf, 456 extent, new_num); 457 btrfs_mark_buffer_dirty(leaf); 458 } else if (key.offset < inline_limit && 459 (end > extent_end) && 460 (inline_limit < extent_end)) { 461 u32 new_size; 462 new_size = btrfs_file_extent_calc_inline_size( 463 inline_limit - key.offset); 464 inode_sub_bytes(inode, extent_end - 465 inline_limit); 466 btrfs_set_file_extent_ram_bytes(leaf, extent, 467 new_size); 468 if (!compression && !encryption) { 469 btrfs_truncate_item(trans, root, path, 470 new_size, 1); 471 } 472 } 473 } 474 /* delete the entire extent */ 475 if (!keep) { 476 if (found_inline) 477 inode_sub_bytes(inode, extent_end - 478 key.offset); 479 ret = btrfs_del_item(trans, root, path); 480 /* TODO update progress marker and return */ 481 BUG_ON(ret); 482 extent = NULL; 483 btrfs_release_path(root, path); 484 /* the extent will be freed later */ 485 } 486 if (bookend && found_inline && start <= key.offset) { 487 u32 new_size; 488 new_size = btrfs_file_extent_calc_inline_size( 489 extent_end - end); 490 inode_sub_bytes(inode, end - key.offset); 491 btrfs_set_file_extent_ram_bytes(leaf, extent, 492 new_size); 493 if (!compression && !encryption) 494 ret = btrfs_truncate_item(trans, root, path, 495 new_size, 0); 496 BUG_ON(ret); 497 } 498 /* create bookend, splitting the extent in two */ 499 if (bookend && found_extent) { 500 struct btrfs_key ins; 501 ins.objectid = inode->i_ino; 502 ins.offset = end; 503 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY); 504 505 btrfs_release_path(root, path); 506 path->leave_spinning = 1; 507 ret = btrfs_insert_empty_item(trans, root, path, &ins, 508 sizeof(*extent)); 509 BUG_ON(ret); 510 511 leaf = path->nodes[0]; 512 extent = btrfs_item_ptr(leaf, path->slots[0], 513 struct btrfs_file_extent_item); 514 write_extent_buffer(leaf, &old, 515 (unsigned long)extent, sizeof(old)); 516 517 btrfs_set_file_extent_compression(leaf, extent, 518 compression); 519 btrfs_set_file_extent_encryption(leaf, extent, 520 encryption); 521 btrfs_set_file_extent_other_encoding(leaf, extent, 522 other_encoding); 523 btrfs_set_file_extent_offset(leaf, extent, 524 le64_to_cpu(old.offset) + end - key.offset); 525 WARN_ON(le64_to_cpu(old.num_bytes) < 526 (extent_end - end)); 527 btrfs_set_file_extent_num_bytes(leaf, extent, 528 extent_end - end); 529 530 /* 531 * set the ram bytes to the size of the full extent 532 * before splitting. This is a worst case flag, 533 * but its the best we can do because we don't know 534 * how splitting affects compression 535 */ 536 btrfs_set_file_extent_ram_bytes(leaf, extent, 537 ram_bytes); 538 btrfs_set_file_extent_type(leaf, extent, found_type); 539 540 btrfs_unlock_up_safe(path, 1); 541 btrfs_mark_buffer_dirty(path->nodes[0]); 542 btrfs_set_lock_blocking(path->nodes[0]); 543 544 path->leave_spinning = 0; 545 btrfs_release_path(root, path); 546 if (disk_bytenr != 0) 547 inode_add_bytes(inode, extent_end - end); 548 } 549 550 if (found_extent && !keep) { 551 u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr); 552 553 if (old_disk_bytenr != 0) { 554 inode_sub_bytes(inode, 555 le64_to_cpu(old.num_bytes)); 556 ret = btrfs_free_extent(trans, root, 557 old_disk_bytenr, 558 le64_to_cpu(old.disk_num_bytes), 559 0, root->root_key.objectid, 560 key.objectid, key.offset - 561 le64_to_cpu(old.offset)); 562 BUG_ON(ret); 563 *hint_byte = old_disk_bytenr; 564 } 565 } 566 567 if (search_start >= end) { 568 ret = 0; 569 goto out; 570 } 571 } 572 out: 573 btrfs_free_path(path); 574 if (locked_end > orig_locked_end) { 575 unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end, 576 locked_end - 1, GFP_NOFS); 577 } 578 return ret; 579 } 580 581 static int extent_mergeable(struct extent_buffer *leaf, int slot, 582 u64 objectid, u64 bytenr, u64 *start, u64 *end) 583 { 584 struct btrfs_file_extent_item *fi; 585 struct btrfs_key key; 586 u64 extent_end; 587 588 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 589 return 0; 590 591 btrfs_item_key_to_cpu(leaf, &key, slot); 592 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 593 return 0; 594 595 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 596 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 597 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 598 btrfs_file_extent_compression(leaf, fi) || 599 btrfs_file_extent_encryption(leaf, fi) || 600 btrfs_file_extent_other_encoding(leaf, fi)) 601 return 0; 602 603 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 604 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 605 return 0; 606 607 *start = key.offset; 608 *end = extent_end; 609 return 1; 610 } 611 612 /* 613 * Mark extent in the range start - end as written. 614 * 615 * This changes extent type from 'pre-allocated' to 'regular'. If only 616 * part of extent is marked as written, the extent will be split into 617 * two or three. 618 */ 619 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 620 struct btrfs_root *root, 621 struct inode *inode, u64 start, u64 end) 622 { 623 struct extent_buffer *leaf; 624 struct btrfs_path *path; 625 struct btrfs_file_extent_item *fi; 626 struct btrfs_key key; 627 u64 bytenr; 628 u64 num_bytes; 629 u64 extent_end; 630 u64 orig_offset; 631 u64 other_start; 632 u64 other_end; 633 u64 split = start; 634 u64 locked_end = end; 635 int extent_type; 636 int split_end = 1; 637 int ret; 638 639 btrfs_drop_extent_cache(inode, start, end - 1, 0); 640 641 path = btrfs_alloc_path(); 642 BUG_ON(!path); 643 again: 644 key.objectid = inode->i_ino; 645 key.type = BTRFS_EXTENT_DATA_KEY; 646 if (split == start) 647 key.offset = split; 648 else 649 key.offset = split - 1; 650 651 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 652 if (ret > 0 && path->slots[0] > 0) 653 path->slots[0]--; 654 655 leaf = path->nodes[0]; 656 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 657 BUG_ON(key.objectid != inode->i_ino || 658 key.type != BTRFS_EXTENT_DATA_KEY); 659 fi = btrfs_item_ptr(leaf, path->slots[0], 660 struct btrfs_file_extent_item); 661 extent_type = btrfs_file_extent_type(leaf, fi); 662 BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC); 663 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 664 BUG_ON(key.offset > start || extent_end < end); 665 666 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 667 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 668 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 669 670 if (key.offset == start) 671 split = end; 672 673 if (key.offset == start && extent_end == end) { 674 int del_nr = 0; 675 int del_slot = 0; 676 other_start = end; 677 other_end = 0; 678 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, 679 bytenr, &other_start, &other_end)) { 680 extent_end = other_end; 681 del_slot = path->slots[0] + 1; 682 del_nr++; 683 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 684 0, root->root_key.objectid, 685 inode->i_ino, orig_offset); 686 BUG_ON(ret); 687 } 688 other_start = 0; 689 other_end = start; 690 if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino, 691 bytenr, &other_start, &other_end)) { 692 key.offset = other_start; 693 del_slot = path->slots[0]; 694 del_nr++; 695 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 696 0, root->root_key.objectid, 697 inode->i_ino, orig_offset); 698 BUG_ON(ret); 699 } 700 split_end = 0; 701 if (del_nr == 0) { 702 btrfs_set_file_extent_type(leaf, fi, 703 BTRFS_FILE_EXTENT_REG); 704 goto done; 705 } 706 707 fi = btrfs_item_ptr(leaf, del_slot - 1, 708 struct btrfs_file_extent_item); 709 btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); 710 btrfs_set_file_extent_num_bytes(leaf, fi, 711 extent_end - key.offset); 712 btrfs_mark_buffer_dirty(leaf); 713 714 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 715 BUG_ON(ret); 716 goto release; 717 } else if (split == start) { 718 if (locked_end < extent_end) { 719 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 720 locked_end, extent_end - 1, GFP_NOFS); 721 if (!ret) { 722 btrfs_release_path(root, path); 723 lock_extent(&BTRFS_I(inode)->io_tree, 724 locked_end, extent_end - 1, GFP_NOFS); 725 locked_end = extent_end; 726 goto again; 727 } 728 locked_end = extent_end; 729 } 730 btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset); 731 } else { 732 BUG_ON(key.offset != start); 733 key.offset = split; 734 btrfs_set_file_extent_offset(leaf, fi, key.offset - 735 orig_offset); 736 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split); 737 btrfs_set_item_key_safe(trans, root, path, &key); 738 extent_end = split; 739 } 740 741 if (extent_end == end) { 742 split_end = 0; 743 extent_type = BTRFS_FILE_EXTENT_REG; 744 } 745 if (extent_end == end && split == start) { 746 other_start = end; 747 other_end = 0; 748 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, 749 bytenr, &other_start, &other_end)) { 750 path->slots[0]++; 751 fi = btrfs_item_ptr(leaf, path->slots[0], 752 struct btrfs_file_extent_item); 753 key.offset = split; 754 btrfs_set_item_key_safe(trans, root, path, &key); 755 btrfs_set_file_extent_offset(leaf, fi, key.offset - 756 orig_offset); 757 btrfs_set_file_extent_num_bytes(leaf, fi, 758 other_end - split); 759 goto done; 760 } 761 } 762 if (extent_end == end && split == end) { 763 other_start = 0; 764 other_end = start; 765 if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino, 766 bytenr, &other_start, &other_end)) { 767 path->slots[0]--; 768 fi = btrfs_item_ptr(leaf, path->slots[0], 769 struct btrfs_file_extent_item); 770 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - 771 other_start); 772 goto done; 773 } 774 } 775 776 btrfs_mark_buffer_dirty(leaf); 777 778 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 779 root->root_key.objectid, 780 inode->i_ino, orig_offset); 781 BUG_ON(ret); 782 btrfs_release_path(root, path); 783 784 key.offset = start; 785 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi)); 786 BUG_ON(ret); 787 788 leaf = path->nodes[0]; 789 fi = btrfs_item_ptr(leaf, path->slots[0], 790 struct btrfs_file_extent_item); 791 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 792 btrfs_set_file_extent_type(leaf, fi, extent_type); 793 btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr); 794 btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes); 795 btrfs_set_file_extent_offset(leaf, fi, key.offset - orig_offset); 796 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset); 797 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 798 btrfs_set_file_extent_compression(leaf, fi, 0); 799 btrfs_set_file_extent_encryption(leaf, fi, 0); 800 btrfs_set_file_extent_other_encoding(leaf, fi, 0); 801 done: 802 btrfs_mark_buffer_dirty(leaf); 803 804 release: 805 btrfs_release_path(root, path); 806 if (split_end && split == start) { 807 split = end; 808 goto again; 809 } 810 if (locked_end > end) { 811 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1, 812 GFP_NOFS); 813 } 814 btrfs_free_path(path); 815 return 0; 816 } 817 818 /* 819 * this gets pages into the page cache and locks them down, it also properly 820 * waits for data=ordered extents to finish before allowing the pages to be 821 * modified. 822 */ 823 static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 824 struct page **pages, size_t num_pages, 825 loff_t pos, unsigned long first_index, 826 unsigned long last_index, size_t write_bytes) 827 { 828 int i; 829 unsigned long index = pos >> PAGE_CACHE_SHIFT; 830 struct inode *inode = fdentry(file)->d_inode; 831 int err = 0; 832 u64 start_pos; 833 u64 last_pos; 834 835 start_pos = pos & ~((u64)root->sectorsize - 1); 836 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 837 838 if (start_pos > inode->i_size) { 839 err = btrfs_cont_expand(inode, start_pos); 840 if (err) 841 return err; 842 } 843 844 memset(pages, 0, num_pages * sizeof(struct page *)); 845 again: 846 for (i = 0; i < num_pages; i++) { 847 pages[i] = grab_cache_page(inode->i_mapping, index + i); 848 if (!pages[i]) { 849 err = -ENOMEM; 850 BUG_ON(1); 851 } 852 wait_on_page_writeback(pages[i]); 853 } 854 if (start_pos < inode->i_size) { 855 struct btrfs_ordered_extent *ordered; 856 lock_extent(&BTRFS_I(inode)->io_tree, 857 start_pos, last_pos - 1, GFP_NOFS); 858 ordered = btrfs_lookup_first_ordered_extent(inode, 859 last_pos - 1); 860 if (ordered && 861 ordered->file_offset + ordered->len > start_pos && 862 ordered->file_offset < last_pos) { 863 btrfs_put_ordered_extent(ordered); 864 unlock_extent(&BTRFS_I(inode)->io_tree, 865 start_pos, last_pos - 1, GFP_NOFS); 866 for (i = 0; i < num_pages; i++) { 867 unlock_page(pages[i]); 868 page_cache_release(pages[i]); 869 } 870 btrfs_wait_ordered_range(inode, start_pos, 871 last_pos - start_pos); 872 goto again; 873 } 874 if (ordered) 875 btrfs_put_ordered_extent(ordered); 876 877 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, 878 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC, 879 GFP_NOFS); 880 unlock_extent(&BTRFS_I(inode)->io_tree, 881 start_pos, last_pos - 1, GFP_NOFS); 882 } 883 for (i = 0; i < num_pages; i++) { 884 clear_page_dirty_for_io(pages[i]); 885 set_page_extent_mapped(pages[i]); 886 WARN_ON(!PageLocked(pages[i])); 887 } 888 return 0; 889 } 890 891 static ssize_t btrfs_file_write(struct file *file, const char __user *buf, 892 size_t count, loff_t *ppos) 893 { 894 loff_t pos; 895 loff_t start_pos; 896 ssize_t num_written = 0; 897 ssize_t err = 0; 898 int ret = 0; 899 struct inode *inode = fdentry(file)->d_inode; 900 struct btrfs_root *root = BTRFS_I(inode)->root; 901 struct page **pages = NULL; 902 int nrptrs; 903 struct page *pinned[2]; 904 unsigned long first_index; 905 unsigned long last_index; 906 int will_write; 907 908 will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) || 909 (file->f_flags & O_DIRECT)); 910 911 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, 912 PAGE_CACHE_SIZE / (sizeof(struct page *))); 913 pinned[0] = NULL; 914 pinned[1] = NULL; 915 916 pos = *ppos; 917 start_pos = pos; 918 919 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 920 current->backing_dev_info = inode->i_mapping->backing_dev_info; 921 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 922 if (err) 923 goto out_nolock; 924 if (count == 0) 925 goto out_nolock; 926 927 err = file_remove_suid(file); 928 if (err) 929 goto out_nolock; 930 file_update_time(file); 931 932 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 933 934 mutex_lock(&inode->i_mutex); 935 BTRFS_I(inode)->sequence++; 936 first_index = pos >> PAGE_CACHE_SHIFT; 937 last_index = (pos + count) >> PAGE_CACHE_SHIFT; 938 939 /* 940 * there are lots of better ways to do this, but this code 941 * makes sure the first and last page in the file range are 942 * up to date and ready for cow 943 */ 944 if ((pos & (PAGE_CACHE_SIZE - 1))) { 945 pinned[0] = grab_cache_page(inode->i_mapping, first_index); 946 if (!PageUptodate(pinned[0])) { 947 ret = btrfs_readpage(NULL, pinned[0]); 948 BUG_ON(ret); 949 wait_on_page_locked(pinned[0]); 950 } else { 951 unlock_page(pinned[0]); 952 } 953 } 954 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) { 955 pinned[1] = grab_cache_page(inode->i_mapping, last_index); 956 if (!PageUptodate(pinned[1])) { 957 ret = btrfs_readpage(NULL, pinned[1]); 958 BUG_ON(ret); 959 wait_on_page_locked(pinned[1]); 960 } else { 961 unlock_page(pinned[1]); 962 } 963 } 964 965 while (count > 0) { 966 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 967 size_t write_bytes = min(count, nrptrs * 968 (size_t)PAGE_CACHE_SIZE - 969 offset); 970 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> 971 PAGE_CACHE_SHIFT; 972 973 WARN_ON(num_pages > nrptrs); 974 memset(pages, 0, sizeof(struct page *) * nrptrs); 975 976 ret = btrfs_check_data_free_space(root, inode, write_bytes); 977 if (ret) 978 goto out; 979 980 ret = prepare_pages(root, file, pages, num_pages, 981 pos, first_index, last_index, 982 write_bytes); 983 if (ret) { 984 btrfs_free_reserved_data_space(root, inode, 985 write_bytes); 986 goto out; 987 } 988 989 ret = btrfs_copy_from_user(pos, num_pages, 990 write_bytes, pages, buf); 991 if (ret) { 992 btrfs_free_reserved_data_space(root, inode, 993 write_bytes); 994 btrfs_drop_pages(pages, num_pages); 995 goto out; 996 } 997 998 ret = dirty_and_release_pages(NULL, root, file, pages, 999 num_pages, pos, write_bytes); 1000 btrfs_drop_pages(pages, num_pages); 1001 if (ret) { 1002 btrfs_free_reserved_data_space(root, inode, 1003 write_bytes); 1004 goto out; 1005 } 1006 1007 if (will_write) { 1008 btrfs_fdatawrite_range(inode->i_mapping, pos, 1009 pos + write_bytes - 1, 1010 WB_SYNC_ALL); 1011 } else { 1012 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1013 num_pages); 1014 if (num_pages < 1015 (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1016 btrfs_btree_balance_dirty(root, 1); 1017 btrfs_throttle(root); 1018 } 1019 1020 buf += write_bytes; 1021 count -= write_bytes; 1022 pos += write_bytes; 1023 num_written += write_bytes; 1024 1025 cond_resched(); 1026 } 1027 out: 1028 mutex_unlock(&inode->i_mutex); 1029 if (ret) 1030 err = ret; 1031 1032 out_nolock: 1033 kfree(pages); 1034 if (pinned[0]) 1035 page_cache_release(pinned[0]); 1036 if (pinned[1]) 1037 page_cache_release(pinned[1]); 1038 *ppos = pos; 1039 1040 /* 1041 * we want to make sure fsync finds this change 1042 * but we haven't joined a transaction running right now. 1043 * 1044 * Later on, someone is sure to update the inode and get the 1045 * real transid recorded. 1046 * 1047 * We set last_trans now to the fs_info generation + 1, 1048 * this will either be one more than the running transaction 1049 * or the generation used for the next transaction if there isn't 1050 * one running right now. 1051 */ 1052 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 1053 1054 if (num_written > 0 && will_write) { 1055 struct btrfs_trans_handle *trans; 1056 1057 err = btrfs_wait_ordered_range(inode, start_pos, num_written); 1058 if (err) 1059 num_written = err; 1060 1061 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { 1062 trans = btrfs_start_transaction(root, 1); 1063 ret = btrfs_log_dentry_safe(trans, root, 1064 file->f_dentry); 1065 if (ret == 0) { 1066 ret = btrfs_sync_log(trans, root); 1067 if (ret == 0) 1068 btrfs_end_transaction(trans, root); 1069 else 1070 btrfs_commit_transaction(trans, root); 1071 } else { 1072 btrfs_commit_transaction(trans, root); 1073 } 1074 } 1075 if (file->f_flags & O_DIRECT) { 1076 invalidate_mapping_pages(inode->i_mapping, 1077 start_pos >> PAGE_CACHE_SHIFT, 1078 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1079 } 1080 } 1081 current->backing_dev_info = NULL; 1082 return num_written ? num_written : err; 1083 } 1084 1085 int btrfs_release_file(struct inode *inode, struct file *filp) 1086 { 1087 /* 1088 * ordered_data_close is set by settattr when we are about to truncate 1089 * a file from a non-zero size to a zero size. This tries to 1090 * flush down new bytes that may have been written if the 1091 * application were using truncate to replace a file in place. 1092 */ 1093 if (BTRFS_I(inode)->ordered_data_close) { 1094 BTRFS_I(inode)->ordered_data_close = 0; 1095 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); 1096 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1097 filemap_flush(inode->i_mapping); 1098 } 1099 if (filp->private_data) 1100 btrfs_ioctl_trans_end(filp); 1101 return 0; 1102 } 1103 1104 /* 1105 * fsync call for both files and directories. This logs the inode into 1106 * the tree log instead of forcing full commits whenever possible. 1107 * 1108 * It needs to call filemap_fdatawait so that all ordered extent updates are 1109 * in the metadata btree are up to date for copying to the log. 1110 * 1111 * It drops the inode mutex before doing the tree log commit. This is an 1112 * important optimization for directories because holding the mutex prevents 1113 * new operations on the dir while we write to disk. 1114 */ 1115 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) 1116 { 1117 struct inode *inode = dentry->d_inode; 1118 struct btrfs_root *root = BTRFS_I(inode)->root; 1119 int ret = 0; 1120 struct btrfs_trans_handle *trans; 1121 1122 /* 1123 * check the transaction that last modified this inode 1124 * and see if its already been committed 1125 */ 1126 if (!BTRFS_I(inode)->last_trans) 1127 goto out; 1128 1129 mutex_lock(&root->fs_info->trans_mutex); 1130 if (BTRFS_I(inode)->last_trans <= 1131 root->fs_info->last_trans_committed) { 1132 BTRFS_I(inode)->last_trans = 0; 1133 mutex_unlock(&root->fs_info->trans_mutex); 1134 goto out; 1135 } 1136 mutex_unlock(&root->fs_info->trans_mutex); 1137 1138 root->log_batch++; 1139 filemap_fdatawrite(inode->i_mapping); 1140 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1141 root->log_batch++; 1142 1143 if (datasync && !(inode->i_state & I_DIRTY_PAGES)) 1144 goto out; 1145 /* 1146 * ok we haven't committed the transaction yet, lets do a commit 1147 */ 1148 if (file && file->private_data) 1149 btrfs_ioctl_trans_end(file); 1150 1151 trans = btrfs_start_transaction(root, 1); 1152 if (!trans) { 1153 ret = -ENOMEM; 1154 goto out; 1155 } 1156 1157 ret = btrfs_log_dentry_safe(trans, root, dentry); 1158 if (ret < 0) 1159 goto out; 1160 1161 /* we've logged all the items and now have a consistent 1162 * version of the file in the log. It is possible that 1163 * someone will come in and modify the file, but that's 1164 * fine because the log is consistent on disk, and we 1165 * have references to all of the file's extents 1166 * 1167 * It is possible that someone will come in and log the 1168 * file again, but that will end up using the synchronization 1169 * inside btrfs_sync_log to keep things safe. 1170 */ 1171 mutex_unlock(&dentry->d_inode->i_mutex); 1172 1173 if (ret > 0) { 1174 ret = btrfs_commit_transaction(trans, root); 1175 } else { 1176 ret = btrfs_sync_log(trans, root); 1177 if (ret == 0) 1178 ret = btrfs_end_transaction(trans, root); 1179 else 1180 ret = btrfs_commit_transaction(trans, root); 1181 } 1182 mutex_lock(&dentry->d_inode->i_mutex); 1183 out: 1184 return ret > 0 ? EIO : ret; 1185 } 1186 1187 static const struct vm_operations_struct btrfs_file_vm_ops = { 1188 .fault = filemap_fault, 1189 .page_mkwrite = btrfs_page_mkwrite, 1190 }; 1191 1192 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 1193 { 1194 vma->vm_ops = &btrfs_file_vm_ops; 1195 file_accessed(filp); 1196 return 0; 1197 } 1198 1199 struct file_operations btrfs_file_operations = { 1200 .llseek = generic_file_llseek, 1201 .read = do_sync_read, 1202 .aio_read = generic_file_aio_read, 1203 .splice_read = generic_file_splice_read, 1204 .write = btrfs_file_write, 1205 .mmap = btrfs_file_mmap, 1206 .open = generic_file_open, 1207 .release = btrfs_release_file, 1208 .fsync = btrfs_sync_file, 1209 .unlocked_ioctl = btrfs_ioctl, 1210 #ifdef CONFIG_COMPAT 1211 .compat_ioctl = btrfs_ioctl, 1212 #endif 1213 }; 1214