1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/statfs.h> 34 #include <linux/compat.h> 35 #include <linux/bit_spinlock.h> 36 #include <linux/xattr.h> 37 #include <linux/posix_acl.h> 38 #include <linux/falloc.h> 39 #include <linux/slab.h> 40 #include <linux/ratelimit.h> 41 #include <linux/mount.h> 42 #include "compat.h" 43 #include "ctree.h" 44 #include "disk-io.h" 45 #include "transaction.h" 46 #include "btrfs_inode.h" 47 #include "ioctl.h" 48 #include "print-tree.h" 49 #include "ordered-data.h" 50 #include "xattr.h" 51 #include "tree-log.h" 52 #include "volumes.h" 53 #include "compression.h" 54 #include "locking.h" 55 #include "free-space-cache.h" 56 #include "inode-map.h" 57 58 struct btrfs_iget_args { 59 u64 ino; 60 struct btrfs_root *root; 61 }; 62 63 static const struct inode_operations btrfs_dir_inode_operations; 64 static const struct inode_operations btrfs_symlink_inode_operations; 65 static const struct inode_operations btrfs_dir_ro_inode_operations; 66 static const struct inode_operations btrfs_special_inode_operations; 67 static const struct inode_operations btrfs_file_inode_operations; 68 static const struct address_space_operations btrfs_aops; 69 static const struct address_space_operations btrfs_symlink_aops; 70 static const struct file_operations btrfs_dir_file_operations; 71 static struct extent_io_ops btrfs_extent_io_ops; 72 73 static struct kmem_cache *btrfs_inode_cachep; 74 struct kmem_cache *btrfs_trans_handle_cachep; 75 struct kmem_cache *btrfs_transaction_cachep; 76 struct kmem_cache *btrfs_path_cachep; 77 struct kmem_cache *btrfs_free_space_cachep; 78 79 #define S_SHIFT 12 80 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { 81 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, 82 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, 83 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, 84 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, 85 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, 86 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, 87 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 88 }; 89 90 static int btrfs_setsize(struct inode *inode, loff_t newsize); 91 static int btrfs_truncate(struct inode *inode); 92 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 93 static noinline int cow_file_range(struct inode *inode, 94 struct page *locked_page, 95 u64 start, u64 end, int *page_started, 96 unsigned long *nr_written, int unlock); 97 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 98 struct btrfs_root *root, struct inode *inode); 99 100 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 101 struct inode *inode, struct inode *dir, 102 const struct qstr *qstr) 103 { 104 int err; 105 106 err = btrfs_init_acl(trans, inode, dir); 107 if (!err) 108 err = btrfs_xattr_security_init(trans, inode, dir, qstr); 109 return err; 110 } 111 112 /* 113 * this does all the hard work for inserting an inline extent into 114 * the btree. The caller should have done a btrfs_drop_extents so that 115 * no overlapping inline items exist in the btree 116 */ 117 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, 118 struct btrfs_root *root, struct inode *inode, 119 u64 start, size_t size, size_t compressed_size, 120 int compress_type, 121 struct page **compressed_pages) 122 { 123 struct btrfs_key key; 124 struct btrfs_path *path; 125 struct extent_buffer *leaf; 126 struct page *page = NULL; 127 char *kaddr; 128 unsigned long ptr; 129 struct btrfs_file_extent_item *ei; 130 int err = 0; 131 int ret; 132 size_t cur_size = size; 133 size_t datasize; 134 unsigned long offset; 135 136 if (compressed_size && compressed_pages) 137 cur_size = compressed_size; 138 139 path = btrfs_alloc_path(); 140 if (!path) 141 return -ENOMEM; 142 143 path->leave_spinning = 1; 144 145 key.objectid = btrfs_ino(inode); 146 key.offset = start; 147 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 148 datasize = btrfs_file_extent_calc_inline_size(cur_size); 149 150 inode_add_bytes(inode, size); 151 ret = btrfs_insert_empty_item(trans, root, path, &key, 152 datasize); 153 if (ret) { 154 err = ret; 155 goto fail; 156 } 157 leaf = path->nodes[0]; 158 ei = btrfs_item_ptr(leaf, path->slots[0], 159 struct btrfs_file_extent_item); 160 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 161 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 162 btrfs_set_file_extent_encryption(leaf, ei, 0); 163 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 164 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 165 ptr = btrfs_file_extent_inline_start(ei); 166 167 if (compress_type != BTRFS_COMPRESS_NONE) { 168 struct page *cpage; 169 int i = 0; 170 while (compressed_size > 0) { 171 cpage = compressed_pages[i]; 172 cur_size = min_t(unsigned long, compressed_size, 173 PAGE_CACHE_SIZE); 174 175 kaddr = kmap_atomic(cpage); 176 write_extent_buffer(leaf, kaddr, ptr, cur_size); 177 kunmap_atomic(kaddr); 178 179 i++; 180 ptr += cur_size; 181 compressed_size -= cur_size; 182 } 183 btrfs_set_file_extent_compression(leaf, ei, 184 compress_type); 185 } else { 186 page = find_get_page(inode->i_mapping, 187 start >> PAGE_CACHE_SHIFT); 188 btrfs_set_file_extent_compression(leaf, ei, 0); 189 kaddr = kmap_atomic(page); 190 offset = start & (PAGE_CACHE_SIZE - 1); 191 write_extent_buffer(leaf, kaddr + offset, ptr, size); 192 kunmap_atomic(kaddr); 193 page_cache_release(page); 194 } 195 btrfs_mark_buffer_dirty(leaf); 196 btrfs_free_path(path); 197 198 /* 199 * we're an inline extent, so nobody can 200 * extend the file past i_size without locking 201 * a page we already have locked. 202 * 203 * We must do any isize and inode updates 204 * before we unlock the pages. Otherwise we 205 * could end up racing with unlink. 206 */ 207 BTRFS_I(inode)->disk_i_size = inode->i_size; 208 ret = btrfs_update_inode(trans, root, inode); 209 210 return ret; 211 fail: 212 btrfs_free_path(path); 213 return err; 214 } 215 216 217 /* 218 * conditionally insert an inline extent into the file. This 219 * does the checks required to make sure the data is small enough 220 * to fit as an inline extent. 221 */ 222 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, 223 struct btrfs_root *root, 224 struct inode *inode, u64 start, u64 end, 225 size_t compressed_size, int compress_type, 226 struct page **compressed_pages) 227 { 228 u64 isize = i_size_read(inode); 229 u64 actual_end = min(end + 1, isize); 230 u64 inline_len = actual_end - start; 231 u64 aligned_end = (end + root->sectorsize - 1) & 232 ~((u64)root->sectorsize - 1); 233 u64 hint_byte; 234 u64 data_len = inline_len; 235 int ret; 236 237 if (compressed_size) 238 data_len = compressed_size; 239 240 if (start > 0 || 241 actual_end >= PAGE_CACHE_SIZE || 242 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || 243 (!compressed_size && 244 (actual_end & (root->sectorsize - 1)) == 0) || 245 end + 1 < isize || 246 data_len > root->fs_info->max_inline) { 247 return 1; 248 } 249 250 ret = btrfs_drop_extents(trans, inode, start, aligned_end, 251 &hint_byte, 1); 252 if (ret) 253 return ret; 254 255 if (isize > actual_end) 256 inline_len = min_t(u64, isize, actual_end); 257 ret = insert_inline_extent(trans, root, inode, start, 258 inline_len, compressed_size, 259 compress_type, compressed_pages); 260 if (ret && ret != -ENOSPC) { 261 btrfs_abort_transaction(trans, root, ret); 262 return ret; 263 } else if (ret == -ENOSPC) { 264 return 1; 265 } 266 267 btrfs_delalloc_release_metadata(inode, end + 1 - start); 268 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 269 return 0; 270 } 271 272 struct async_extent { 273 u64 start; 274 u64 ram_size; 275 u64 compressed_size; 276 struct page **pages; 277 unsigned long nr_pages; 278 int compress_type; 279 struct list_head list; 280 }; 281 282 struct async_cow { 283 struct inode *inode; 284 struct btrfs_root *root; 285 struct page *locked_page; 286 u64 start; 287 u64 end; 288 struct list_head extents; 289 struct btrfs_work work; 290 }; 291 292 static noinline int add_async_extent(struct async_cow *cow, 293 u64 start, u64 ram_size, 294 u64 compressed_size, 295 struct page **pages, 296 unsigned long nr_pages, 297 int compress_type) 298 { 299 struct async_extent *async_extent; 300 301 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 302 BUG_ON(!async_extent); /* -ENOMEM */ 303 async_extent->start = start; 304 async_extent->ram_size = ram_size; 305 async_extent->compressed_size = compressed_size; 306 async_extent->pages = pages; 307 async_extent->nr_pages = nr_pages; 308 async_extent->compress_type = compress_type; 309 list_add_tail(&async_extent->list, &cow->extents); 310 return 0; 311 } 312 313 /* 314 * we create compressed extents in two phases. The first 315 * phase compresses a range of pages that have already been 316 * locked (both pages and state bits are locked). 317 * 318 * This is done inside an ordered work queue, and the compression 319 * is spread across many cpus. The actual IO submission is step 320 * two, and the ordered work queue takes care of making sure that 321 * happens in the same order things were put onto the queue by 322 * writepages and friends. 323 * 324 * If this code finds it can't get good compression, it puts an 325 * entry onto the work queue to write the uncompressed bytes. This 326 * makes sure that both compressed inodes and uncompressed inodes 327 * are written in the same order that pdflush sent them down. 328 */ 329 static noinline int compress_file_range(struct inode *inode, 330 struct page *locked_page, 331 u64 start, u64 end, 332 struct async_cow *async_cow, 333 int *num_added) 334 { 335 struct btrfs_root *root = BTRFS_I(inode)->root; 336 struct btrfs_trans_handle *trans; 337 u64 num_bytes; 338 u64 blocksize = root->sectorsize; 339 u64 actual_end; 340 u64 isize = i_size_read(inode); 341 int ret = 0; 342 struct page **pages = NULL; 343 unsigned long nr_pages; 344 unsigned long nr_pages_ret = 0; 345 unsigned long total_compressed = 0; 346 unsigned long total_in = 0; 347 unsigned long max_compressed = 128 * 1024; 348 unsigned long max_uncompressed = 128 * 1024; 349 int i; 350 int will_compress; 351 int compress_type = root->fs_info->compress_type; 352 353 /* if this is a small write inside eof, kick off a defrag */ 354 if ((end - start + 1) < 16 * 1024 && 355 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 356 btrfs_add_inode_defrag(NULL, inode); 357 358 actual_end = min_t(u64, isize, end + 1); 359 again: 360 will_compress = 0; 361 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 362 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 363 364 /* 365 * we don't want to send crud past the end of i_size through 366 * compression, that's just a waste of CPU time. So, if the 367 * end of the file is before the start of our current 368 * requested range of bytes, we bail out to the uncompressed 369 * cleanup code that can deal with all of this. 370 * 371 * It isn't really the fastest way to fix things, but this is a 372 * very uncommon corner. 373 */ 374 if (actual_end <= start) 375 goto cleanup_and_bail_uncompressed; 376 377 total_compressed = actual_end - start; 378 379 /* we want to make sure that amount of ram required to uncompress 380 * an extent is reasonable, so we limit the total size in ram 381 * of a compressed extent to 128k. This is a crucial number 382 * because it also controls how easily we can spread reads across 383 * cpus for decompression. 384 * 385 * We also want to make sure the amount of IO required to do 386 * a random read is reasonably small, so we limit the size of 387 * a compressed extent to 128k. 388 */ 389 total_compressed = min(total_compressed, max_uncompressed); 390 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 391 num_bytes = max(blocksize, num_bytes); 392 total_in = 0; 393 ret = 0; 394 395 /* 396 * we do compression for mount -o compress and when the 397 * inode has not been flagged as nocompress. This flag can 398 * change at any time if we discover bad compression ratios. 399 */ 400 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && 401 (btrfs_test_opt(root, COMPRESS) || 402 (BTRFS_I(inode)->force_compress) || 403 (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) { 404 WARN_ON(pages); 405 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); 406 if (!pages) { 407 /* just bail out to the uncompressed code */ 408 goto cont; 409 } 410 411 if (BTRFS_I(inode)->force_compress) 412 compress_type = BTRFS_I(inode)->force_compress; 413 414 ret = btrfs_compress_pages(compress_type, 415 inode->i_mapping, start, 416 total_compressed, pages, 417 nr_pages, &nr_pages_ret, 418 &total_in, 419 &total_compressed, 420 max_compressed); 421 422 if (!ret) { 423 unsigned long offset = total_compressed & 424 (PAGE_CACHE_SIZE - 1); 425 struct page *page = pages[nr_pages_ret - 1]; 426 char *kaddr; 427 428 /* zero the tail end of the last page, we might be 429 * sending it down to disk 430 */ 431 if (offset) { 432 kaddr = kmap_atomic(page); 433 memset(kaddr + offset, 0, 434 PAGE_CACHE_SIZE - offset); 435 kunmap_atomic(kaddr); 436 } 437 will_compress = 1; 438 } 439 } 440 cont: 441 if (start == 0) { 442 trans = btrfs_join_transaction(root); 443 if (IS_ERR(trans)) { 444 ret = PTR_ERR(trans); 445 trans = NULL; 446 goto cleanup_and_out; 447 } 448 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 449 450 /* lets try to make an inline extent */ 451 if (ret || total_in < (actual_end - start)) { 452 /* we didn't compress the entire range, try 453 * to make an uncompressed inline extent. 454 */ 455 ret = cow_file_range_inline(trans, root, inode, 456 start, end, 0, 0, NULL); 457 } else { 458 /* try making a compressed inline extent */ 459 ret = cow_file_range_inline(trans, root, inode, 460 start, end, 461 total_compressed, 462 compress_type, pages); 463 } 464 if (ret <= 0) { 465 /* 466 * inline extent creation worked or returned error, 467 * we don't need to create any more async work items. 468 * Unlock and free up our temp pages. 469 */ 470 extent_clear_unlock_delalloc(inode, 471 &BTRFS_I(inode)->io_tree, 472 start, end, NULL, 473 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | 474 EXTENT_CLEAR_DELALLOC | 475 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); 476 477 btrfs_end_transaction(trans, root); 478 goto free_pages_out; 479 } 480 btrfs_end_transaction(trans, root); 481 } 482 483 if (will_compress) { 484 /* 485 * we aren't doing an inline extent round the compressed size 486 * up to a block size boundary so the allocator does sane 487 * things 488 */ 489 total_compressed = (total_compressed + blocksize - 1) & 490 ~(blocksize - 1); 491 492 /* 493 * one last check to make sure the compression is really a 494 * win, compare the page count read with the blocks on disk 495 */ 496 total_in = (total_in + PAGE_CACHE_SIZE - 1) & 497 ~(PAGE_CACHE_SIZE - 1); 498 if (total_compressed >= total_in) { 499 will_compress = 0; 500 } else { 501 num_bytes = total_in; 502 } 503 } 504 if (!will_compress && pages) { 505 /* 506 * the compression code ran but failed to make things smaller, 507 * free any pages it allocated and our page pointer array 508 */ 509 for (i = 0; i < nr_pages_ret; i++) { 510 WARN_ON(pages[i]->mapping); 511 page_cache_release(pages[i]); 512 } 513 kfree(pages); 514 pages = NULL; 515 total_compressed = 0; 516 nr_pages_ret = 0; 517 518 /* flag the file so we don't compress in the future */ 519 if (!btrfs_test_opt(root, FORCE_COMPRESS) && 520 !(BTRFS_I(inode)->force_compress)) { 521 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 522 } 523 } 524 if (will_compress) { 525 *num_added += 1; 526 527 /* the async work queues will take care of doing actual 528 * allocation on disk for these compressed pages, 529 * and will submit them to the elevator. 530 */ 531 add_async_extent(async_cow, start, num_bytes, 532 total_compressed, pages, nr_pages_ret, 533 compress_type); 534 535 if (start + num_bytes < end) { 536 start += num_bytes; 537 pages = NULL; 538 cond_resched(); 539 goto again; 540 } 541 } else { 542 cleanup_and_bail_uncompressed: 543 /* 544 * No compression, but we still need to write the pages in 545 * the file we've been given so far. redirty the locked 546 * page if it corresponds to our extent and set things up 547 * for the async work queue to run cow_file_range to do 548 * the normal delalloc dance 549 */ 550 if (page_offset(locked_page) >= start && 551 page_offset(locked_page) <= end) { 552 __set_page_dirty_nobuffers(locked_page); 553 /* unlocked later on in the async handlers */ 554 } 555 add_async_extent(async_cow, start, end - start + 1, 556 0, NULL, 0, BTRFS_COMPRESS_NONE); 557 *num_added += 1; 558 } 559 560 out: 561 return ret; 562 563 free_pages_out: 564 for (i = 0; i < nr_pages_ret; i++) { 565 WARN_ON(pages[i]->mapping); 566 page_cache_release(pages[i]); 567 } 568 kfree(pages); 569 570 goto out; 571 572 cleanup_and_out: 573 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 574 start, end, NULL, 575 EXTENT_CLEAR_UNLOCK_PAGE | 576 EXTENT_CLEAR_DIRTY | 577 EXTENT_CLEAR_DELALLOC | 578 EXTENT_SET_WRITEBACK | 579 EXTENT_END_WRITEBACK); 580 if (!trans || IS_ERR(trans)) 581 btrfs_error(root->fs_info, ret, "Failed to join transaction"); 582 else 583 btrfs_abort_transaction(trans, root, ret); 584 goto free_pages_out; 585 } 586 587 /* 588 * phase two of compressed writeback. This is the ordered portion 589 * of the code, which only gets called in the order the work was 590 * queued. We walk all the async extents created by compress_file_range 591 * and send them down to the disk. 592 */ 593 static noinline int submit_compressed_extents(struct inode *inode, 594 struct async_cow *async_cow) 595 { 596 struct async_extent *async_extent; 597 u64 alloc_hint = 0; 598 struct btrfs_trans_handle *trans; 599 struct btrfs_key ins; 600 struct extent_map *em; 601 struct btrfs_root *root = BTRFS_I(inode)->root; 602 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 603 struct extent_io_tree *io_tree; 604 int ret = 0; 605 606 if (list_empty(&async_cow->extents)) 607 return 0; 608 609 610 while (!list_empty(&async_cow->extents)) { 611 async_extent = list_entry(async_cow->extents.next, 612 struct async_extent, list); 613 list_del(&async_extent->list); 614 615 io_tree = &BTRFS_I(inode)->io_tree; 616 617 retry: 618 /* did the compression code fall back to uncompressed IO? */ 619 if (!async_extent->pages) { 620 int page_started = 0; 621 unsigned long nr_written = 0; 622 623 lock_extent(io_tree, async_extent->start, 624 async_extent->start + 625 async_extent->ram_size - 1); 626 627 /* allocate blocks */ 628 ret = cow_file_range(inode, async_cow->locked_page, 629 async_extent->start, 630 async_extent->start + 631 async_extent->ram_size - 1, 632 &page_started, &nr_written, 0); 633 634 /* JDM XXX */ 635 636 /* 637 * if page_started, cow_file_range inserted an 638 * inline extent and took care of all the unlocking 639 * and IO for us. Otherwise, we need to submit 640 * all those pages down to the drive. 641 */ 642 if (!page_started && !ret) 643 extent_write_locked_range(io_tree, 644 inode, async_extent->start, 645 async_extent->start + 646 async_extent->ram_size - 1, 647 btrfs_get_extent, 648 WB_SYNC_ALL); 649 kfree(async_extent); 650 cond_resched(); 651 continue; 652 } 653 654 lock_extent(io_tree, async_extent->start, 655 async_extent->start + async_extent->ram_size - 1); 656 657 trans = btrfs_join_transaction(root); 658 if (IS_ERR(trans)) { 659 ret = PTR_ERR(trans); 660 } else { 661 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 662 ret = btrfs_reserve_extent(trans, root, 663 async_extent->compressed_size, 664 async_extent->compressed_size, 665 0, alloc_hint, &ins, 1); 666 if (ret) 667 btrfs_abort_transaction(trans, root, ret); 668 btrfs_end_transaction(trans, root); 669 } 670 671 if (ret) { 672 int i; 673 for (i = 0; i < async_extent->nr_pages; i++) { 674 WARN_ON(async_extent->pages[i]->mapping); 675 page_cache_release(async_extent->pages[i]); 676 } 677 kfree(async_extent->pages); 678 async_extent->nr_pages = 0; 679 async_extent->pages = NULL; 680 unlock_extent(io_tree, async_extent->start, 681 async_extent->start + 682 async_extent->ram_size - 1); 683 if (ret == -ENOSPC) 684 goto retry; 685 goto out_free; /* JDM: Requeue? */ 686 } 687 688 /* 689 * here we're doing allocation and writeback of the 690 * compressed pages 691 */ 692 btrfs_drop_extent_cache(inode, async_extent->start, 693 async_extent->start + 694 async_extent->ram_size - 1, 0); 695 696 em = alloc_extent_map(); 697 BUG_ON(!em); /* -ENOMEM */ 698 em->start = async_extent->start; 699 em->len = async_extent->ram_size; 700 em->orig_start = em->start; 701 702 em->block_start = ins.objectid; 703 em->block_len = ins.offset; 704 em->bdev = root->fs_info->fs_devices->latest_bdev; 705 em->compress_type = async_extent->compress_type; 706 set_bit(EXTENT_FLAG_PINNED, &em->flags); 707 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 708 709 while (1) { 710 write_lock(&em_tree->lock); 711 ret = add_extent_mapping(em_tree, em); 712 write_unlock(&em_tree->lock); 713 if (ret != -EEXIST) { 714 free_extent_map(em); 715 break; 716 } 717 btrfs_drop_extent_cache(inode, async_extent->start, 718 async_extent->start + 719 async_extent->ram_size - 1, 0); 720 } 721 722 ret = btrfs_add_ordered_extent_compress(inode, 723 async_extent->start, 724 ins.objectid, 725 async_extent->ram_size, 726 ins.offset, 727 BTRFS_ORDERED_COMPRESSED, 728 async_extent->compress_type); 729 BUG_ON(ret); /* -ENOMEM */ 730 731 /* 732 * clear dirty, set writeback and unlock the pages. 733 */ 734 extent_clear_unlock_delalloc(inode, 735 &BTRFS_I(inode)->io_tree, 736 async_extent->start, 737 async_extent->start + 738 async_extent->ram_size - 1, 739 NULL, EXTENT_CLEAR_UNLOCK_PAGE | 740 EXTENT_CLEAR_UNLOCK | 741 EXTENT_CLEAR_DELALLOC | 742 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK); 743 744 ret = btrfs_submit_compressed_write(inode, 745 async_extent->start, 746 async_extent->ram_size, 747 ins.objectid, 748 ins.offset, async_extent->pages, 749 async_extent->nr_pages); 750 751 BUG_ON(ret); /* -ENOMEM */ 752 alloc_hint = ins.objectid + ins.offset; 753 kfree(async_extent); 754 cond_resched(); 755 } 756 ret = 0; 757 out: 758 return ret; 759 out_free: 760 kfree(async_extent); 761 goto out; 762 } 763 764 static u64 get_extent_allocation_hint(struct inode *inode, u64 start, 765 u64 num_bytes) 766 { 767 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 768 struct extent_map *em; 769 u64 alloc_hint = 0; 770 771 read_lock(&em_tree->lock); 772 em = search_extent_mapping(em_tree, start, num_bytes); 773 if (em) { 774 /* 775 * if block start isn't an actual block number then find the 776 * first block in this inode and use that as a hint. If that 777 * block is also bogus then just don't worry about it. 778 */ 779 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 780 free_extent_map(em); 781 em = search_extent_mapping(em_tree, 0, 0); 782 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 783 alloc_hint = em->block_start; 784 if (em) 785 free_extent_map(em); 786 } else { 787 alloc_hint = em->block_start; 788 free_extent_map(em); 789 } 790 } 791 read_unlock(&em_tree->lock); 792 793 return alloc_hint; 794 } 795 796 /* 797 * when extent_io.c finds a delayed allocation range in the file, 798 * the call backs end up in this code. The basic idea is to 799 * allocate extents on disk for the range, and create ordered data structs 800 * in ram to track those extents. 801 * 802 * locked_page is the page that writepage had locked already. We use 803 * it to make sure we don't do extra locks or unlocks. 804 * 805 * *page_started is set to one if we unlock locked_page and do everything 806 * required to start IO on it. It may be clean and already done with 807 * IO when we return. 808 */ 809 static noinline int cow_file_range(struct inode *inode, 810 struct page *locked_page, 811 u64 start, u64 end, int *page_started, 812 unsigned long *nr_written, 813 int unlock) 814 { 815 struct btrfs_root *root = BTRFS_I(inode)->root; 816 struct btrfs_trans_handle *trans; 817 u64 alloc_hint = 0; 818 u64 num_bytes; 819 unsigned long ram_size; 820 u64 disk_num_bytes; 821 u64 cur_alloc_size; 822 u64 blocksize = root->sectorsize; 823 struct btrfs_key ins; 824 struct extent_map *em; 825 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 826 int ret = 0; 827 828 BUG_ON(btrfs_is_free_space_inode(root, inode)); 829 trans = btrfs_join_transaction(root); 830 if (IS_ERR(trans)) { 831 extent_clear_unlock_delalloc(inode, 832 &BTRFS_I(inode)->io_tree, 833 start, end, NULL, 834 EXTENT_CLEAR_UNLOCK_PAGE | 835 EXTENT_CLEAR_UNLOCK | 836 EXTENT_CLEAR_DELALLOC | 837 EXTENT_CLEAR_DIRTY | 838 EXTENT_SET_WRITEBACK | 839 EXTENT_END_WRITEBACK); 840 return PTR_ERR(trans); 841 } 842 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 843 844 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 845 num_bytes = max(blocksize, num_bytes); 846 disk_num_bytes = num_bytes; 847 ret = 0; 848 849 /* if this is a small write inside eof, kick off defrag */ 850 if (num_bytes < 64 * 1024 && 851 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 852 btrfs_add_inode_defrag(trans, inode); 853 854 if (start == 0) { 855 /* lets try to make an inline extent */ 856 ret = cow_file_range_inline(trans, root, inode, 857 start, end, 0, 0, NULL); 858 if (ret == 0) { 859 extent_clear_unlock_delalloc(inode, 860 &BTRFS_I(inode)->io_tree, 861 start, end, NULL, 862 EXTENT_CLEAR_UNLOCK_PAGE | 863 EXTENT_CLEAR_UNLOCK | 864 EXTENT_CLEAR_DELALLOC | 865 EXTENT_CLEAR_DIRTY | 866 EXTENT_SET_WRITEBACK | 867 EXTENT_END_WRITEBACK); 868 869 *nr_written = *nr_written + 870 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 871 *page_started = 1; 872 goto out; 873 } else if (ret < 0) { 874 btrfs_abort_transaction(trans, root, ret); 875 goto out_unlock; 876 } 877 } 878 879 BUG_ON(disk_num_bytes > 880 btrfs_super_total_bytes(root->fs_info->super_copy)); 881 882 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 883 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 884 885 while (disk_num_bytes > 0) { 886 unsigned long op; 887 888 cur_alloc_size = disk_num_bytes; 889 ret = btrfs_reserve_extent(trans, root, cur_alloc_size, 890 root->sectorsize, 0, alloc_hint, 891 &ins, 1); 892 if (ret < 0) { 893 btrfs_abort_transaction(trans, root, ret); 894 goto out_unlock; 895 } 896 897 em = alloc_extent_map(); 898 BUG_ON(!em); /* -ENOMEM */ 899 em->start = start; 900 em->orig_start = em->start; 901 ram_size = ins.offset; 902 em->len = ins.offset; 903 904 em->block_start = ins.objectid; 905 em->block_len = ins.offset; 906 em->bdev = root->fs_info->fs_devices->latest_bdev; 907 set_bit(EXTENT_FLAG_PINNED, &em->flags); 908 909 while (1) { 910 write_lock(&em_tree->lock); 911 ret = add_extent_mapping(em_tree, em); 912 write_unlock(&em_tree->lock); 913 if (ret != -EEXIST) { 914 free_extent_map(em); 915 break; 916 } 917 btrfs_drop_extent_cache(inode, start, 918 start + ram_size - 1, 0); 919 } 920 921 cur_alloc_size = ins.offset; 922 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 923 ram_size, cur_alloc_size, 0); 924 BUG_ON(ret); /* -ENOMEM */ 925 926 if (root->root_key.objectid == 927 BTRFS_DATA_RELOC_TREE_OBJECTID) { 928 ret = btrfs_reloc_clone_csums(inode, start, 929 cur_alloc_size); 930 if (ret) { 931 btrfs_abort_transaction(trans, root, ret); 932 goto out_unlock; 933 } 934 } 935 936 if (disk_num_bytes < cur_alloc_size) 937 break; 938 939 /* we're not doing compressed IO, don't unlock the first 940 * page (which the caller expects to stay locked), don't 941 * clear any dirty bits and don't set any writeback bits 942 * 943 * Do set the Private2 bit so we know this page was properly 944 * setup for writepage 945 */ 946 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0; 947 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | 948 EXTENT_SET_PRIVATE2; 949 950 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 951 start, start + ram_size - 1, 952 locked_page, op); 953 disk_num_bytes -= cur_alloc_size; 954 num_bytes -= cur_alloc_size; 955 alloc_hint = ins.objectid + ins.offset; 956 start += cur_alloc_size; 957 } 958 ret = 0; 959 out: 960 btrfs_end_transaction(trans, root); 961 962 return ret; 963 out_unlock: 964 extent_clear_unlock_delalloc(inode, 965 &BTRFS_I(inode)->io_tree, 966 start, end, NULL, 967 EXTENT_CLEAR_UNLOCK_PAGE | 968 EXTENT_CLEAR_UNLOCK | 969 EXTENT_CLEAR_DELALLOC | 970 EXTENT_CLEAR_DIRTY | 971 EXTENT_SET_WRITEBACK | 972 EXTENT_END_WRITEBACK); 973 974 goto out; 975 } 976 977 /* 978 * work queue call back to started compression on a file and pages 979 */ 980 static noinline void async_cow_start(struct btrfs_work *work) 981 { 982 struct async_cow *async_cow; 983 int num_added = 0; 984 async_cow = container_of(work, struct async_cow, work); 985 986 compress_file_range(async_cow->inode, async_cow->locked_page, 987 async_cow->start, async_cow->end, async_cow, 988 &num_added); 989 if (num_added == 0) 990 async_cow->inode = NULL; 991 } 992 993 /* 994 * work queue call back to submit previously compressed pages 995 */ 996 static noinline void async_cow_submit(struct btrfs_work *work) 997 { 998 struct async_cow *async_cow; 999 struct btrfs_root *root; 1000 unsigned long nr_pages; 1001 1002 async_cow = container_of(work, struct async_cow, work); 1003 1004 root = async_cow->root; 1005 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1006 PAGE_CACHE_SHIFT; 1007 1008 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); 1009 1010 if (atomic_read(&root->fs_info->async_delalloc_pages) < 1011 5 * 1042 * 1024 && 1012 waitqueue_active(&root->fs_info->async_submit_wait)) 1013 wake_up(&root->fs_info->async_submit_wait); 1014 1015 if (async_cow->inode) 1016 submit_compressed_extents(async_cow->inode, async_cow); 1017 } 1018 1019 static noinline void async_cow_free(struct btrfs_work *work) 1020 { 1021 struct async_cow *async_cow; 1022 async_cow = container_of(work, struct async_cow, work); 1023 kfree(async_cow); 1024 } 1025 1026 static int cow_file_range_async(struct inode *inode, struct page *locked_page, 1027 u64 start, u64 end, int *page_started, 1028 unsigned long *nr_written) 1029 { 1030 struct async_cow *async_cow; 1031 struct btrfs_root *root = BTRFS_I(inode)->root; 1032 unsigned long nr_pages; 1033 u64 cur_end; 1034 int limit = 10 * 1024 * 1042; 1035 1036 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, 1037 1, 0, NULL, GFP_NOFS); 1038 while (start < end) { 1039 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1040 BUG_ON(!async_cow); /* -ENOMEM */ 1041 async_cow->inode = inode; 1042 async_cow->root = root; 1043 async_cow->locked_page = locked_page; 1044 async_cow->start = start; 1045 1046 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) 1047 cur_end = end; 1048 else 1049 cur_end = min(end, start + 512 * 1024 - 1); 1050 1051 async_cow->end = cur_end; 1052 INIT_LIST_HEAD(&async_cow->extents); 1053 1054 async_cow->work.func = async_cow_start; 1055 async_cow->work.ordered_func = async_cow_submit; 1056 async_cow->work.ordered_free = async_cow_free; 1057 async_cow->work.flags = 0; 1058 1059 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1060 PAGE_CACHE_SHIFT; 1061 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1062 1063 btrfs_queue_worker(&root->fs_info->delalloc_workers, 1064 &async_cow->work); 1065 1066 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { 1067 wait_event(root->fs_info->async_submit_wait, 1068 (atomic_read(&root->fs_info->async_delalloc_pages) < 1069 limit)); 1070 } 1071 1072 while (atomic_read(&root->fs_info->async_submit_draining) && 1073 atomic_read(&root->fs_info->async_delalloc_pages)) { 1074 wait_event(root->fs_info->async_submit_wait, 1075 (atomic_read(&root->fs_info->async_delalloc_pages) == 1076 0)); 1077 } 1078 1079 *nr_written += nr_pages; 1080 start = cur_end + 1; 1081 } 1082 *page_started = 1; 1083 return 0; 1084 } 1085 1086 static noinline int csum_exist_in_range(struct btrfs_root *root, 1087 u64 bytenr, u64 num_bytes) 1088 { 1089 int ret; 1090 struct btrfs_ordered_sum *sums; 1091 LIST_HEAD(list); 1092 1093 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, 1094 bytenr + num_bytes - 1, &list, 0); 1095 if (ret == 0 && list_empty(&list)) 1096 return 0; 1097 1098 while (!list_empty(&list)) { 1099 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1100 list_del(&sums->list); 1101 kfree(sums); 1102 } 1103 return 1; 1104 } 1105 1106 /* 1107 * when nowcow writeback call back. This checks for snapshots or COW copies 1108 * of the extents that exist in the file, and COWs the file as required. 1109 * 1110 * If no cow copies or snapshots exist, we write directly to the existing 1111 * blocks on disk 1112 */ 1113 static noinline int run_delalloc_nocow(struct inode *inode, 1114 struct page *locked_page, 1115 u64 start, u64 end, int *page_started, int force, 1116 unsigned long *nr_written) 1117 { 1118 struct btrfs_root *root = BTRFS_I(inode)->root; 1119 struct btrfs_trans_handle *trans; 1120 struct extent_buffer *leaf; 1121 struct btrfs_path *path; 1122 struct btrfs_file_extent_item *fi; 1123 struct btrfs_key found_key; 1124 u64 cow_start; 1125 u64 cur_offset; 1126 u64 extent_end; 1127 u64 extent_offset; 1128 u64 disk_bytenr; 1129 u64 num_bytes; 1130 int extent_type; 1131 int ret, err; 1132 int type; 1133 int nocow; 1134 int check_prev = 1; 1135 bool nolock; 1136 u64 ino = btrfs_ino(inode); 1137 1138 path = btrfs_alloc_path(); 1139 if (!path) 1140 return -ENOMEM; 1141 1142 nolock = btrfs_is_free_space_inode(root, inode); 1143 1144 if (nolock) 1145 trans = btrfs_join_transaction_nolock(root); 1146 else 1147 trans = btrfs_join_transaction(root); 1148 1149 if (IS_ERR(trans)) { 1150 btrfs_free_path(path); 1151 return PTR_ERR(trans); 1152 } 1153 1154 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1155 1156 cow_start = (u64)-1; 1157 cur_offset = start; 1158 while (1) { 1159 ret = btrfs_lookup_file_extent(trans, root, path, ino, 1160 cur_offset, 0); 1161 if (ret < 0) { 1162 btrfs_abort_transaction(trans, root, ret); 1163 goto error; 1164 } 1165 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1166 leaf = path->nodes[0]; 1167 btrfs_item_key_to_cpu(leaf, &found_key, 1168 path->slots[0] - 1); 1169 if (found_key.objectid == ino && 1170 found_key.type == BTRFS_EXTENT_DATA_KEY) 1171 path->slots[0]--; 1172 } 1173 check_prev = 0; 1174 next_slot: 1175 leaf = path->nodes[0]; 1176 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1177 ret = btrfs_next_leaf(root, path); 1178 if (ret < 0) { 1179 btrfs_abort_transaction(trans, root, ret); 1180 goto error; 1181 } 1182 if (ret > 0) 1183 break; 1184 leaf = path->nodes[0]; 1185 } 1186 1187 nocow = 0; 1188 disk_bytenr = 0; 1189 num_bytes = 0; 1190 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1191 1192 if (found_key.objectid > ino || 1193 found_key.type > BTRFS_EXTENT_DATA_KEY || 1194 found_key.offset > end) 1195 break; 1196 1197 if (found_key.offset > cur_offset) { 1198 extent_end = found_key.offset; 1199 extent_type = 0; 1200 goto out_check; 1201 } 1202 1203 fi = btrfs_item_ptr(leaf, path->slots[0], 1204 struct btrfs_file_extent_item); 1205 extent_type = btrfs_file_extent_type(leaf, fi); 1206 1207 if (extent_type == BTRFS_FILE_EXTENT_REG || 1208 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1209 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1210 extent_offset = btrfs_file_extent_offset(leaf, fi); 1211 extent_end = found_key.offset + 1212 btrfs_file_extent_num_bytes(leaf, fi); 1213 if (extent_end <= start) { 1214 path->slots[0]++; 1215 goto next_slot; 1216 } 1217 if (disk_bytenr == 0) 1218 goto out_check; 1219 if (btrfs_file_extent_compression(leaf, fi) || 1220 btrfs_file_extent_encryption(leaf, fi) || 1221 btrfs_file_extent_other_encoding(leaf, fi)) 1222 goto out_check; 1223 if (extent_type == BTRFS_FILE_EXTENT_REG && !force) 1224 goto out_check; 1225 if (btrfs_extent_readonly(root, disk_bytenr)) 1226 goto out_check; 1227 if (btrfs_cross_ref_exist(trans, root, ino, 1228 found_key.offset - 1229 extent_offset, disk_bytenr)) 1230 goto out_check; 1231 disk_bytenr += extent_offset; 1232 disk_bytenr += cur_offset - found_key.offset; 1233 num_bytes = min(end + 1, extent_end) - cur_offset; 1234 /* 1235 * force cow if csum exists in the range. 1236 * this ensure that csum for a given extent are 1237 * either valid or do not exist. 1238 */ 1239 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 1240 goto out_check; 1241 nocow = 1; 1242 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1243 extent_end = found_key.offset + 1244 btrfs_file_extent_inline_len(leaf, fi); 1245 extent_end = ALIGN(extent_end, root->sectorsize); 1246 } else { 1247 BUG_ON(1); 1248 } 1249 out_check: 1250 if (extent_end <= start) { 1251 path->slots[0]++; 1252 goto next_slot; 1253 } 1254 if (!nocow) { 1255 if (cow_start == (u64)-1) 1256 cow_start = cur_offset; 1257 cur_offset = extent_end; 1258 if (cur_offset > end) 1259 break; 1260 path->slots[0]++; 1261 goto next_slot; 1262 } 1263 1264 btrfs_release_path(path); 1265 if (cow_start != (u64)-1) { 1266 ret = cow_file_range(inode, locked_page, cow_start, 1267 found_key.offset - 1, page_started, 1268 nr_written, 1); 1269 if (ret) { 1270 btrfs_abort_transaction(trans, root, ret); 1271 goto error; 1272 } 1273 cow_start = (u64)-1; 1274 } 1275 1276 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1277 struct extent_map *em; 1278 struct extent_map_tree *em_tree; 1279 em_tree = &BTRFS_I(inode)->extent_tree; 1280 em = alloc_extent_map(); 1281 BUG_ON(!em); /* -ENOMEM */ 1282 em->start = cur_offset; 1283 em->orig_start = em->start; 1284 em->len = num_bytes; 1285 em->block_len = num_bytes; 1286 em->block_start = disk_bytenr; 1287 em->bdev = root->fs_info->fs_devices->latest_bdev; 1288 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1289 while (1) { 1290 write_lock(&em_tree->lock); 1291 ret = add_extent_mapping(em_tree, em); 1292 write_unlock(&em_tree->lock); 1293 if (ret != -EEXIST) { 1294 free_extent_map(em); 1295 break; 1296 } 1297 btrfs_drop_extent_cache(inode, em->start, 1298 em->start + em->len - 1, 0); 1299 } 1300 type = BTRFS_ORDERED_PREALLOC; 1301 } else { 1302 type = BTRFS_ORDERED_NOCOW; 1303 } 1304 1305 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, 1306 num_bytes, num_bytes, type); 1307 BUG_ON(ret); /* -ENOMEM */ 1308 1309 if (root->root_key.objectid == 1310 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1311 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1312 num_bytes); 1313 if (ret) { 1314 btrfs_abort_transaction(trans, root, ret); 1315 goto error; 1316 } 1317 } 1318 1319 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 1320 cur_offset, cur_offset + num_bytes - 1, 1321 locked_page, EXTENT_CLEAR_UNLOCK_PAGE | 1322 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | 1323 EXTENT_SET_PRIVATE2); 1324 cur_offset = extent_end; 1325 if (cur_offset > end) 1326 break; 1327 } 1328 btrfs_release_path(path); 1329 1330 if (cur_offset <= end && cow_start == (u64)-1) 1331 cow_start = cur_offset; 1332 if (cow_start != (u64)-1) { 1333 ret = cow_file_range(inode, locked_page, cow_start, end, 1334 page_started, nr_written, 1); 1335 if (ret) { 1336 btrfs_abort_transaction(trans, root, ret); 1337 goto error; 1338 } 1339 } 1340 1341 error: 1342 if (nolock) { 1343 err = btrfs_end_transaction_nolock(trans, root); 1344 } else { 1345 err = btrfs_end_transaction(trans, root); 1346 } 1347 if (!ret) 1348 ret = err; 1349 1350 btrfs_free_path(path); 1351 return ret; 1352 } 1353 1354 /* 1355 * extent_io.c call back to do delayed allocation processing 1356 */ 1357 static int run_delalloc_range(struct inode *inode, struct page *locked_page, 1358 u64 start, u64 end, int *page_started, 1359 unsigned long *nr_written) 1360 { 1361 int ret; 1362 struct btrfs_root *root = BTRFS_I(inode)->root; 1363 1364 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) 1365 ret = run_delalloc_nocow(inode, locked_page, start, end, 1366 page_started, 1, nr_written); 1367 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) 1368 ret = run_delalloc_nocow(inode, locked_page, start, end, 1369 page_started, 0, nr_written); 1370 else if (!btrfs_test_opt(root, COMPRESS) && 1371 !(BTRFS_I(inode)->force_compress) && 1372 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) 1373 ret = cow_file_range(inode, locked_page, start, end, 1374 page_started, nr_written, 1); 1375 else 1376 ret = cow_file_range_async(inode, locked_page, start, end, 1377 page_started, nr_written); 1378 return ret; 1379 } 1380 1381 static void btrfs_split_extent_hook(struct inode *inode, 1382 struct extent_state *orig, u64 split) 1383 { 1384 /* not delalloc, ignore it */ 1385 if (!(orig->state & EXTENT_DELALLOC)) 1386 return; 1387 1388 spin_lock(&BTRFS_I(inode)->lock); 1389 BTRFS_I(inode)->outstanding_extents++; 1390 spin_unlock(&BTRFS_I(inode)->lock); 1391 } 1392 1393 /* 1394 * extent_io.c merge_extent_hook, used to track merged delayed allocation 1395 * extents so we can keep track of new extents that are just merged onto old 1396 * extents, such as when we are doing sequential writes, so we can properly 1397 * account for the metadata space we'll need. 1398 */ 1399 static void btrfs_merge_extent_hook(struct inode *inode, 1400 struct extent_state *new, 1401 struct extent_state *other) 1402 { 1403 /* not delalloc, ignore it */ 1404 if (!(other->state & EXTENT_DELALLOC)) 1405 return; 1406 1407 spin_lock(&BTRFS_I(inode)->lock); 1408 BTRFS_I(inode)->outstanding_extents--; 1409 spin_unlock(&BTRFS_I(inode)->lock); 1410 } 1411 1412 /* 1413 * extent_io.c set_bit_hook, used to track delayed allocation 1414 * bytes in this file, and to maintain the list of inodes that 1415 * have pending delalloc work to be done. 1416 */ 1417 static void btrfs_set_bit_hook(struct inode *inode, 1418 struct extent_state *state, int *bits) 1419 { 1420 1421 /* 1422 * set_bit and clear bit hooks normally require _irqsave/restore 1423 * but in this case, we are only testing for the DELALLOC 1424 * bit, which is only set or cleared with irqs on 1425 */ 1426 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1427 struct btrfs_root *root = BTRFS_I(inode)->root; 1428 u64 len = state->end + 1 - state->start; 1429 bool do_list = !btrfs_is_free_space_inode(root, inode); 1430 1431 if (*bits & EXTENT_FIRST_DELALLOC) { 1432 *bits &= ~EXTENT_FIRST_DELALLOC; 1433 } else { 1434 spin_lock(&BTRFS_I(inode)->lock); 1435 BTRFS_I(inode)->outstanding_extents++; 1436 spin_unlock(&BTRFS_I(inode)->lock); 1437 } 1438 1439 spin_lock(&root->fs_info->delalloc_lock); 1440 BTRFS_I(inode)->delalloc_bytes += len; 1441 root->fs_info->delalloc_bytes += len; 1442 if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1443 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 1444 &root->fs_info->delalloc_inodes); 1445 } 1446 spin_unlock(&root->fs_info->delalloc_lock); 1447 } 1448 } 1449 1450 /* 1451 * extent_io.c clear_bit_hook, see set_bit_hook for why 1452 */ 1453 static void btrfs_clear_bit_hook(struct inode *inode, 1454 struct extent_state *state, int *bits) 1455 { 1456 /* 1457 * set_bit and clear bit hooks normally require _irqsave/restore 1458 * but in this case, we are only testing for the DELALLOC 1459 * bit, which is only set or cleared with irqs on 1460 */ 1461 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1462 struct btrfs_root *root = BTRFS_I(inode)->root; 1463 u64 len = state->end + 1 - state->start; 1464 bool do_list = !btrfs_is_free_space_inode(root, inode); 1465 1466 if (*bits & EXTENT_FIRST_DELALLOC) { 1467 *bits &= ~EXTENT_FIRST_DELALLOC; 1468 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { 1469 spin_lock(&BTRFS_I(inode)->lock); 1470 BTRFS_I(inode)->outstanding_extents--; 1471 spin_unlock(&BTRFS_I(inode)->lock); 1472 } 1473 1474 if (*bits & EXTENT_DO_ACCOUNTING) 1475 btrfs_delalloc_release_metadata(inode, len); 1476 1477 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 1478 && do_list) 1479 btrfs_free_reserved_data_space(inode, len); 1480 1481 spin_lock(&root->fs_info->delalloc_lock); 1482 root->fs_info->delalloc_bytes -= len; 1483 BTRFS_I(inode)->delalloc_bytes -= len; 1484 1485 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && 1486 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1487 list_del_init(&BTRFS_I(inode)->delalloc_inodes); 1488 } 1489 spin_unlock(&root->fs_info->delalloc_lock); 1490 } 1491 } 1492 1493 /* 1494 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure 1495 * we don't create bios that span stripes or chunks 1496 */ 1497 int btrfs_merge_bio_hook(struct page *page, unsigned long offset, 1498 size_t size, struct bio *bio, 1499 unsigned long bio_flags) 1500 { 1501 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 1502 struct btrfs_mapping_tree *map_tree; 1503 u64 logical = (u64)bio->bi_sector << 9; 1504 u64 length = 0; 1505 u64 map_length; 1506 int ret; 1507 1508 if (bio_flags & EXTENT_BIO_COMPRESSED) 1509 return 0; 1510 1511 length = bio->bi_size; 1512 map_tree = &root->fs_info->mapping_tree; 1513 map_length = length; 1514 ret = btrfs_map_block(map_tree, READ, logical, 1515 &map_length, NULL, 0); 1516 /* Will always return 0 or 1 with map_multi == NULL */ 1517 BUG_ON(ret < 0); 1518 if (map_length < length + size) 1519 return 1; 1520 return 0; 1521 } 1522 1523 /* 1524 * in order to insert checksums into the metadata in large chunks, 1525 * we wait until bio submission time. All the pages in the bio are 1526 * checksummed and sums are attached onto the ordered extent record. 1527 * 1528 * At IO completion time the cums attached on the ordered extent record 1529 * are inserted into the btree 1530 */ 1531 static int __btrfs_submit_bio_start(struct inode *inode, int rw, 1532 struct bio *bio, int mirror_num, 1533 unsigned long bio_flags, 1534 u64 bio_offset) 1535 { 1536 struct btrfs_root *root = BTRFS_I(inode)->root; 1537 int ret = 0; 1538 1539 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1540 BUG_ON(ret); /* -ENOMEM */ 1541 return 0; 1542 } 1543 1544 /* 1545 * in order to insert checksums into the metadata in large chunks, 1546 * we wait until bio submission time. All the pages in the bio are 1547 * checksummed and sums are attached onto the ordered extent record. 1548 * 1549 * At IO completion time the cums attached on the ordered extent record 1550 * are inserted into the btree 1551 */ 1552 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 1553 int mirror_num, unsigned long bio_flags, 1554 u64 bio_offset) 1555 { 1556 struct btrfs_root *root = BTRFS_I(inode)->root; 1557 return btrfs_map_bio(root, rw, bio, mirror_num, 1); 1558 } 1559 1560 /* 1561 * extent_io.c submission hook. This does the right thing for csum calculation 1562 * on write, or reading the csums from the tree before a read 1563 */ 1564 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 1565 int mirror_num, unsigned long bio_flags, 1566 u64 bio_offset) 1567 { 1568 struct btrfs_root *root = BTRFS_I(inode)->root; 1569 int ret = 0; 1570 int skip_sum; 1571 int metadata = 0; 1572 1573 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1574 1575 if (btrfs_is_free_space_inode(root, inode)) 1576 metadata = 2; 1577 1578 if (!(rw & REQ_WRITE)) { 1579 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); 1580 if (ret) 1581 return ret; 1582 1583 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1584 return btrfs_submit_compressed_read(inode, bio, 1585 mirror_num, bio_flags); 1586 } else if (!skip_sum) { 1587 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); 1588 if (ret) 1589 return ret; 1590 } 1591 goto mapit; 1592 } else if (!skip_sum) { 1593 /* csum items have already been cloned */ 1594 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 1595 goto mapit; 1596 /* we're doing a write, do the async checksumming */ 1597 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 1598 inode, rw, bio, mirror_num, 1599 bio_flags, bio_offset, 1600 __btrfs_submit_bio_start, 1601 __btrfs_submit_bio_done); 1602 } 1603 1604 mapit: 1605 return btrfs_map_bio(root, rw, bio, mirror_num, 0); 1606 } 1607 1608 /* 1609 * given a list of ordered sums record them in the inode. This happens 1610 * at IO completion time based on sums calculated at bio submission time. 1611 */ 1612 static noinline int add_pending_csums(struct btrfs_trans_handle *trans, 1613 struct inode *inode, u64 file_offset, 1614 struct list_head *list) 1615 { 1616 struct btrfs_ordered_sum *sum; 1617 1618 list_for_each_entry(sum, list, list) { 1619 btrfs_csum_file_blocks(trans, 1620 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1621 } 1622 return 0; 1623 } 1624 1625 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1626 struct extent_state **cached_state) 1627 { 1628 if ((end & (PAGE_CACHE_SIZE - 1)) == 0) 1629 WARN_ON(1); 1630 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1631 cached_state, GFP_NOFS); 1632 } 1633 1634 /* see btrfs_writepage_start_hook for details on why this is required */ 1635 struct btrfs_writepage_fixup { 1636 struct page *page; 1637 struct btrfs_work work; 1638 }; 1639 1640 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 1641 { 1642 struct btrfs_writepage_fixup *fixup; 1643 struct btrfs_ordered_extent *ordered; 1644 struct extent_state *cached_state = NULL; 1645 struct page *page; 1646 struct inode *inode; 1647 u64 page_start; 1648 u64 page_end; 1649 int ret; 1650 1651 fixup = container_of(work, struct btrfs_writepage_fixup, work); 1652 page = fixup->page; 1653 again: 1654 lock_page(page); 1655 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 1656 ClearPageChecked(page); 1657 goto out_page; 1658 } 1659 1660 inode = page->mapping->host; 1661 page_start = page_offset(page); 1662 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1663 1664 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, 1665 &cached_state); 1666 1667 /* already ordered? We're done */ 1668 if (PagePrivate2(page)) 1669 goto out; 1670 1671 ordered = btrfs_lookup_ordered_extent(inode, page_start); 1672 if (ordered) { 1673 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 1674 page_end, &cached_state, GFP_NOFS); 1675 unlock_page(page); 1676 btrfs_start_ordered_extent(inode, ordered, 1); 1677 btrfs_put_ordered_extent(ordered); 1678 goto again; 1679 } 1680 1681 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 1682 if (ret) { 1683 mapping_set_error(page->mapping, ret); 1684 end_extent_writepage(page, ret, page_start, page_end); 1685 ClearPageChecked(page); 1686 goto out; 1687 } 1688 1689 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 1690 ClearPageChecked(page); 1691 set_page_dirty(page); 1692 out: 1693 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, 1694 &cached_state, GFP_NOFS); 1695 out_page: 1696 unlock_page(page); 1697 page_cache_release(page); 1698 kfree(fixup); 1699 } 1700 1701 /* 1702 * There are a few paths in the higher layers of the kernel that directly 1703 * set the page dirty bit without asking the filesystem if it is a 1704 * good idea. This causes problems because we want to make sure COW 1705 * properly happens and the data=ordered rules are followed. 1706 * 1707 * In our case any range that doesn't have the ORDERED bit set 1708 * hasn't been properly setup for IO. We kick off an async process 1709 * to fix it up. The async helper will wait for ordered extents, set 1710 * the delalloc bit and make it safe to write the page. 1711 */ 1712 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) 1713 { 1714 struct inode *inode = page->mapping->host; 1715 struct btrfs_writepage_fixup *fixup; 1716 struct btrfs_root *root = BTRFS_I(inode)->root; 1717 1718 /* this page is properly in the ordered list */ 1719 if (TestClearPagePrivate2(page)) 1720 return 0; 1721 1722 if (PageChecked(page)) 1723 return -EAGAIN; 1724 1725 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 1726 if (!fixup) 1727 return -EAGAIN; 1728 1729 SetPageChecked(page); 1730 page_cache_get(page); 1731 fixup->work.func = btrfs_writepage_fixup_worker; 1732 fixup->page = page; 1733 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); 1734 return -EBUSY; 1735 } 1736 1737 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 1738 struct inode *inode, u64 file_pos, 1739 u64 disk_bytenr, u64 disk_num_bytes, 1740 u64 num_bytes, u64 ram_bytes, 1741 u8 compression, u8 encryption, 1742 u16 other_encoding, int extent_type) 1743 { 1744 struct btrfs_root *root = BTRFS_I(inode)->root; 1745 struct btrfs_file_extent_item *fi; 1746 struct btrfs_path *path; 1747 struct extent_buffer *leaf; 1748 struct btrfs_key ins; 1749 u64 hint; 1750 int ret; 1751 1752 path = btrfs_alloc_path(); 1753 if (!path) 1754 return -ENOMEM; 1755 1756 path->leave_spinning = 1; 1757 1758 /* 1759 * we may be replacing one extent in the tree with another. 1760 * The new extent is pinned in the extent map, and we don't want 1761 * to drop it from the cache until it is completely in the btree. 1762 * 1763 * So, tell btrfs_drop_extents to leave this extent in the cache. 1764 * the caller is expected to unpin it and allow it to be merged 1765 * with the others. 1766 */ 1767 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes, 1768 &hint, 0); 1769 if (ret) 1770 goto out; 1771 1772 ins.objectid = btrfs_ino(inode); 1773 ins.offset = file_pos; 1774 ins.type = BTRFS_EXTENT_DATA_KEY; 1775 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); 1776 if (ret) 1777 goto out; 1778 leaf = path->nodes[0]; 1779 fi = btrfs_item_ptr(leaf, path->slots[0], 1780 struct btrfs_file_extent_item); 1781 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1782 btrfs_set_file_extent_type(leaf, fi, extent_type); 1783 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); 1784 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); 1785 btrfs_set_file_extent_offset(leaf, fi, 0); 1786 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 1787 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); 1788 btrfs_set_file_extent_compression(leaf, fi, compression); 1789 btrfs_set_file_extent_encryption(leaf, fi, encryption); 1790 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); 1791 1792 btrfs_unlock_up_safe(path, 1); 1793 btrfs_set_lock_blocking(leaf); 1794 1795 btrfs_mark_buffer_dirty(leaf); 1796 1797 inode_add_bytes(inode, num_bytes); 1798 1799 ins.objectid = disk_bytenr; 1800 ins.offset = disk_num_bytes; 1801 ins.type = BTRFS_EXTENT_ITEM_KEY; 1802 ret = btrfs_alloc_reserved_file_extent(trans, root, 1803 root->root_key.objectid, 1804 btrfs_ino(inode), file_pos, &ins); 1805 out: 1806 btrfs_free_path(path); 1807 1808 return ret; 1809 } 1810 1811 /* 1812 * helper function for btrfs_finish_ordered_io, this 1813 * just reads in some of the csum leaves to prime them into ram 1814 * before we start the transaction. It limits the amount of btree 1815 * reads required while inside the transaction. 1816 */ 1817 /* as ordered data IO finishes, this gets called so we can finish 1818 * an ordered extent if the range of bytes in the file it covers are 1819 * fully written. 1820 */ 1821 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) 1822 { 1823 struct inode *inode = ordered_extent->inode; 1824 struct btrfs_root *root = BTRFS_I(inode)->root; 1825 struct btrfs_trans_handle *trans = NULL; 1826 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1827 struct extent_state *cached_state = NULL; 1828 int compress_type = 0; 1829 int ret; 1830 bool nolock; 1831 1832 nolock = btrfs_is_free_space_inode(root, inode); 1833 1834 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 1835 ret = -EIO; 1836 goto out; 1837 } 1838 1839 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1840 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 1841 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1842 if (!ret) { 1843 if (nolock) 1844 trans = btrfs_join_transaction_nolock(root); 1845 else 1846 trans = btrfs_join_transaction(root); 1847 if (IS_ERR(trans)) 1848 return PTR_ERR(trans); 1849 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1850 ret = btrfs_update_inode_fallback(trans, root, inode); 1851 if (ret) /* -ENOMEM or corruption */ 1852 btrfs_abort_transaction(trans, root, ret); 1853 } 1854 goto out; 1855 } 1856 1857 lock_extent_bits(io_tree, ordered_extent->file_offset, 1858 ordered_extent->file_offset + ordered_extent->len - 1, 1859 0, &cached_state); 1860 1861 if (nolock) 1862 trans = btrfs_join_transaction_nolock(root); 1863 else 1864 trans = btrfs_join_transaction(root); 1865 if (IS_ERR(trans)) { 1866 ret = PTR_ERR(trans); 1867 trans = NULL; 1868 goto out_unlock; 1869 } 1870 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1871 1872 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1873 compress_type = ordered_extent->compress_type; 1874 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1875 BUG_ON(compress_type); 1876 ret = btrfs_mark_extent_written(trans, inode, 1877 ordered_extent->file_offset, 1878 ordered_extent->file_offset + 1879 ordered_extent->len); 1880 } else { 1881 BUG_ON(root == root->fs_info->tree_root); 1882 ret = insert_reserved_file_extent(trans, inode, 1883 ordered_extent->file_offset, 1884 ordered_extent->start, 1885 ordered_extent->disk_len, 1886 ordered_extent->len, 1887 ordered_extent->len, 1888 compress_type, 0, 0, 1889 BTRFS_FILE_EXTENT_REG); 1890 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 1891 ordered_extent->file_offset, 1892 ordered_extent->len); 1893 } 1894 1895 if (ret < 0) { 1896 btrfs_abort_transaction(trans, root, ret); 1897 goto out_unlock; 1898 } 1899 1900 add_pending_csums(trans, inode, ordered_extent->file_offset, 1901 &ordered_extent->list); 1902 1903 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1904 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1905 ret = btrfs_update_inode_fallback(trans, root, inode); 1906 if (ret) { /* -ENOMEM or corruption */ 1907 btrfs_abort_transaction(trans, root, ret); 1908 goto out_unlock; 1909 } 1910 } 1911 ret = 0; 1912 out_unlock: 1913 unlock_extent_cached(io_tree, ordered_extent->file_offset, 1914 ordered_extent->file_offset + 1915 ordered_extent->len - 1, &cached_state, GFP_NOFS); 1916 out: 1917 if (root != root->fs_info->tree_root) 1918 btrfs_delalloc_release_metadata(inode, ordered_extent->len); 1919 if (trans) { 1920 if (nolock) 1921 btrfs_end_transaction_nolock(trans, root); 1922 else 1923 btrfs_end_transaction(trans, root); 1924 } 1925 1926 if (ret) 1927 clear_extent_uptodate(io_tree, ordered_extent->file_offset, 1928 ordered_extent->file_offset + 1929 ordered_extent->len - 1, NULL, GFP_NOFS); 1930 1931 /* 1932 * This needs to be dont to make sure anybody waiting knows we are done 1933 * upating everything for this ordered extent. 1934 */ 1935 btrfs_remove_ordered_extent(inode, ordered_extent); 1936 1937 /* once for us */ 1938 btrfs_put_ordered_extent(ordered_extent); 1939 /* once for the tree */ 1940 btrfs_put_ordered_extent(ordered_extent); 1941 1942 return ret; 1943 } 1944 1945 static void finish_ordered_fn(struct btrfs_work *work) 1946 { 1947 struct btrfs_ordered_extent *ordered_extent; 1948 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 1949 btrfs_finish_ordered_io(ordered_extent); 1950 } 1951 1952 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 1953 struct extent_state *state, int uptodate) 1954 { 1955 struct inode *inode = page->mapping->host; 1956 struct btrfs_root *root = BTRFS_I(inode)->root; 1957 struct btrfs_ordered_extent *ordered_extent = NULL; 1958 struct btrfs_workers *workers; 1959 1960 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 1961 1962 ClearPagePrivate2(page); 1963 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 1964 end - start + 1, uptodate)) 1965 return 0; 1966 1967 ordered_extent->work.func = finish_ordered_fn; 1968 ordered_extent->work.flags = 0; 1969 1970 if (btrfs_is_free_space_inode(root, inode)) 1971 workers = &root->fs_info->endio_freespace_worker; 1972 else 1973 workers = &root->fs_info->endio_write_workers; 1974 btrfs_queue_worker(workers, &ordered_extent->work); 1975 1976 return 0; 1977 } 1978 1979 /* 1980 * when reads are done, we need to check csums to verify the data is correct 1981 * if there's a match, we allow the bio to finish. If not, the code in 1982 * extent_io.c will try to find good copies for us. 1983 */ 1984 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, 1985 struct extent_state *state, int mirror) 1986 { 1987 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); 1988 struct inode *inode = page->mapping->host; 1989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1990 char *kaddr; 1991 u64 private = ~(u32)0; 1992 int ret; 1993 struct btrfs_root *root = BTRFS_I(inode)->root; 1994 u32 csum = ~(u32)0; 1995 1996 if (PageChecked(page)) { 1997 ClearPageChecked(page); 1998 goto good; 1999 } 2000 2001 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 2002 goto good; 2003 2004 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 2005 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 2006 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, 2007 GFP_NOFS); 2008 return 0; 2009 } 2010 2011 if (state && state->start == start) { 2012 private = state->private; 2013 ret = 0; 2014 } else { 2015 ret = get_state_private(io_tree, start, &private); 2016 } 2017 kaddr = kmap_atomic(page); 2018 if (ret) 2019 goto zeroit; 2020 2021 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); 2022 btrfs_csum_final(csum, (char *)&csum); 2023 if (csum != private) 2024 goto zeroit; 2025 2026 kunmap_atomic(kaddr); 2027 good: 2028 return 0; 2029 2030 zeroit: 2031 printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u " 2032 "private %llu\n", 2033 (unsigned long long)btrfs_ino(page->mapping->host), 2034 (unsigned long long)start, csum, 2035 (unsigned long long)private); 2036 memset(kaddr + offset, 1, end - start + 1); 2037 flush_dcache_page(page); 2038 kunmap_atomic(kaddr); 2039 if (private == 0) 2040 return 0; 2041 return -EIO; 2042 } 2043 2044 struct delayed_iput { 2045 struct list_head list; 2046 struct inode *inode; 2047 }; 2048 2049 /* JDM: If this is fs-wide, why can't we add a pointer to 2050 * btrfs_inode instead and avoid the allocation? */ 2051 void btrfs_add_delayed_iput(struct inode *inode) 2052 { 2053 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 2054 struct delayed_iput *delayed; 2055 2056 if (atomic_add_unless(&inode->i_count, -1, 1)) 2057 return; 2058 2059 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL); 2060 delayed->inode = inode; 2061 2062 spin_lock(&fs_info->delayed_iput_lock); 2063 list_add_tail(&delayed->list, &fs_info->delayed_iputs); 2064 spin_unlock(&fs_info->delayed_iput_lock); 2065 } 2066 2067 void btrfs_run_delayed_iputs(struct btrfs_root *root) 2068 { 2069 LIST_HEAD(list); 2070 struct btrfs_fs_info *fs_info = root->fs_info; 2071 struct delayed_iput *delayed; 2072 int empty; 2073 2074 spin_lock(&fs_info->delayed_iput_lock); 2075 empty = list_empty(&fs_info->delayed_iputs); 2076 spin_unlock(&fs_info->delayed_iput_lock); 2077 if (empty) 2078 return; 2079 2080 down_read(&root->fs_info->cleanup_work_sem); 2081 spin_lock(&fs_info->delayed_iput_lock); 2082 list_splice_init(&fs_info->delayed_iputs, &list); 2083 spin_unlock(&fs_info->delayed_iput_lock); 2084 2085 while (!list_empty(&list)) { 2086 delayed = list_entry(list.next, struct delayed_iput, list); 2087 list_del(&delayed->list); 2088 iput(delayed->inode); 2089 kfree(delayed); 2090 } 2091 up_read(&root->fs_info->cleanup_work_sem); 2092 } 2093 2094 enum btrfs_orphan_cleanup_state { 2095 ORPHAN_CLEANUP_STARTED = 1, 2096 ORPHAN_CLEANUP_DONE = 2, 2097 }; 2098 2099 /* 2100 * This is called in transaction commit time. If there are no orphan 2101 * files in the subvolume, it removes orphan item and frees block_rsv 2102 * structure. 2103 */ 2104 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 2105 struct btrfs_root *root) 2106 { 2107 struct btrfs_block_rsv *block_rsv; 2108 int ret; 2109 2110 if (atomic_read(&root->orphan_inodes) || 2111 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) 2112 return; 2113 2114 spin_lock(&root->orphan_lock); 2115 if (atomic_read(&root->orphan_inodes)) { 2116 spin_unlock(&root->orphan_lock); 2117 return; 2118 } 2119 2120 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { 2121 spin_unlock(&root->orphan_lock); 2122 return; 2123 } 2124 2125 block_rsv = root->orphan_block_rsv; 2126 root->orphan_block_rsv = NULL; 2127 spin_unlock(&root->orphan_lock); 2128 2129 if (root->orphan_item_inserted && 2130 btrfs_root_refs(&root->root_item) > 0) { 2131 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, 2132 root->root_key.objectid); 2133 BUG_ON(ret); 2134 root->orphan_item_inserted = 0; 2135 } 2136 2137 if (block_rsv) { 2138 WARN_ON(block_rsv->size > 0); 2139 btrfs_free_block_rsv(root, block_rsv); 2140 } 2141 } 2142 2143 /* 2144 * This creates an orphan entry for the given inode in case something goes 2145 * wrong in the middle of an unlink/truncate. 2146 * 2147 * NOTE: caller of this function should reserve 5 units of metadata for 2148 * this function. 2149 */ 2150 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) 2151 { 2152 struct btrfs_root *root = BTRFS_I(inode)->root; 2153 struct btrfs_block_rsv *block_rsv = NULL; 2154 int reserve = 0; 2155 int insert = 0; 2156 int ret; 2157 2158 if (!root->orphan_block_rsv) { 2159 block_rsv = btrfs_alloc_block_rsv(root); 2160 if (!block_rsv) 2161 return -ENOMEM; 2162 } 2163 2164 spin_lock(&root->orphan_lock); 2165 if (!root->orphan_block_rsv) { 2166 root->orphan_block_rsv = block_rsv; 2167 } else if (block_rsv) { 2168 btrfs_free_block_rsv(root, block_rsv); 2169 block_rsv = NULL; 2170 } 2171 2172 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 2173 &BTRFS_I(inode)->runtime_flags)) { 2174 #if 0 2175 /* 2176 * For proper ENOSPC handling, we should do orphan 2177 * cleanup when mounting. But this introduces backward 2178 * compatibility issue. 2179 */ 2180 if (!xchg(&root->orphan_item_inserted, 1)) 2181 insert = 2; 2182 else 2183 insert = 1; 2184 #endif 2185 insert = 1; 2186 atomic_dec(&root->orphan_inodes); 2187 } 2188 2189 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 2190 &BTRFS_I(inode)->runtime_flags)) 2191 reserve = 1; 2192 spin_unlock(&root->orphan_lock); 2193 2194 /* grab metadata reservation from transaction handle */ 2195 if (reserve) { 2196 ret = btrfs_orphan_reserve_metadata(trans, inode); 2197 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ 2198 } 2199 2200 /* insert an orphan item to track this unlinked/truncated file */ 2201 if (insert >= 1) { 2202 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 2203 if (ret && ret != -EEXIST) { 2204 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 2205 &BTRFS_I(inode)->runtime_flags); 2206 btrfs_abort_transaction(trans, root, ret); 2207 return ret; 2208 } 2209 ret = 0; 2210 } 2211 2212 /* insert an orphan item to track subvolume contains orphan files */ 2213 if (insert >= 2) { 2214 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, 2215 root->root_key.objectid); 2216 if (ret && ret != -EEXIST) { 2217 btrfs_abort_transaction(trans, root, ret); 2218 return ret; 2219 } 2220 } 2221 return 0; 2222 } 2223 2224 /* 2225 * We have done the truncate/delete so we can go ahead and remove the orphan 2226 * item for this particular inode. 2227 */ 2228 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) 2229 { 2230 struct btrfs_root *root = BTRFS_I(inode)->root; 2231 int delete_item = 0; 2232 int release_rsv = 0; 2233 int ret = 0; 2234 2235 spin_lock(&root->orphan_lock); 2236 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 2237 &BTRFS_I(inode)->runtime_flags)) 2238 delete_item = 1; 2239 2240 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 2241 &BTRFS_I(inode)->runtime_flags)) 2242 release_rsv = 1; 2243 spin_unlock(&root->orphan_lock); 2244 2245 if (trans && delete_item) { 2246 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); 2247 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ 2248 } 2249 2250 if (release_rsv) { 2251 btrfs_orphan_release_metadata(inode); 2252 atomic_dec(&root->orphan_inodes); 2253 } 2254 2255 return 0; 2256 } 2257 2258 /* 2259 * this cleans up any orphans that may be left on the list from the last use 2260 * of this root. 2261 */ 2262 int btrfs_orphan_cleanup(struct btrfs_root *root) 2263 { 2264 struct btrfs_path *path; 2265 struct extent_buffer *leaf; 2266 struct btrfs_key key, found_key; 2267 struct btrfs_trans_handle *trans; 2268 struct inode *inode; 2269 u64 last_objectid = 0; 2270 int ret = 0, nr_unlink = 0, nr_truncate = 0; 2271 2272 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) 2273 return 0; 2274 2275 path = btrfs_alloc_path(); 2276 if (!path) { 2277 ret = -ENOMEM; 2278 goto out; 2279 } 2280 path->reada = -1; 2281 2282 key.objectid = BTRFS_ORPHAN_OBJECTID; 2283 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); 2284 key.offset = (u64)-1; 2285 2286 while (1) { 2287 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2288 if (ret < 0) 2289 goto out; 2290 2291 /* 2292 * if ret == 0 means we found what we were searching for, which 2293 * is weird, but possible, so only screw with path if we didn't 2294 * find the key and see if we have stuff that matches 2295 */ 2296 if (ret > 0) { 2297 ret = 0; 2298 if (path->slots[0] == 0) 2299 break; 2300 path->slots[0]--; 2301 } 2302 2303 /* pull out the item */ 2304 leaf = path->nodes[0]; 2305 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2306 2307 /* make sure the item matches what we want */ 2308 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 2309 break; 2310 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY) 2311 break; 2312 2313 /* release the path since we're done with it */ 2314 btrfs_release_path(path); 2315 2316 /* 2317 * this is where we are basically btrfs_lookup, without the 2318 * crossing root thing. we store the inode number in the 2319 * offset of the orphan item. 2320 */ 2321 2322 if (found_key.offset == last_objectid) { 2323 printk(KERN_ERR "btrfs: Error removing orphan entry, " 2324 "stopping orphan cleanup\n"); 2325 ret = -EINVAL; 2326 goto out; 2327 } 2328 2329 last_objectid = found_key.offset; 2330 2331 found_key.objectid = found_key.offset; 2332 found_key.type = BTRFS_INODE_ITEM_KEY; 2333 found_key.offset = 0; 2334 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 2335 ret = PTR_RET(inode); 2336 if (ret && ret != -ESTALE) 2337 goto out; 2338 2339 if (ret == -ESTALE && root == root->fs_info->tree_root) { 2340 struct btrfs_root *dead_root; 2341 struct btrfs_fs_info *fs_info = root->fs_info; 2342 int is_dead_root = 0; 2343 2344 /* 2345 * this is an orphan in the tree root. Currently these 2346 * could come from 2 sources: 2347 * a) a snapshot deletion in progress 2348 * b) a free space cache inode 2349 * We need to distinguish those two, as the snapshot 2350 * orphan must not get deleted. 2351 * find_dead_roots already ran before us, so if this 2352 * is a snapshot deletion, we should find the root 2353 * in the dead_roots list 2354 */ 2355 spin_lock(&fs_info->trans_lock); 2356 list_for_each_entry(dead_root, &fs_info->dead_roots, 2357 root_list) { 2358 if (dead_root->root_key.objectid == 2359 found_key.objectid) { 2360 is_dead_root = 1; 2361 break; 2362 } 2363 } 2364 spin_unlock(&fs_info->trans_lock); 2365 if (is_dead_root) { 2366 /* prevent this orphan from being found again */ 2367 key.offset = found_key.objectid - 1; 2368 continue; 2369 } 2370 } 2371 /* 2372 * Inode is already gone but the orphan item is still there, 2373 * kill the orphan item. 2374 */ 2375 if (ret == -ESTALE) { 2376 trans = btrfs_start_transaction(root, 1); 2377 if (IS_ERR(trans)) { 2378 ret = PTR_ERR(trans); 2379 goto out; 2380 } 2381 printk(KERN_ERR "auto deleting %Lu\n", 2382 found_key.objectid); 2383 ret = btrfs_del_orphan_item(trans, root, 2384 found_key.objectid); 2385 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ 2386 btrfs_end_transaction(trans, root); 2387 continue; 2388 } 2389 2390 /* 2391 * add this inode to the orphan list so btrfs_orphan_del does 2392 * the proper thing when we hit it 2393 */ 2394 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 2395 &BTRFS_I(inode)->runtime_flags); 2396 2397 /* if we have links, this was a truncate, lets do that */ 2398 if (inode->i_nlink) { 2399 if (!S_ISREG(inode->i_mode)) { 2400 WARN_ON(1); 2401 iput(inode); 2402 continue; 2403 } 2404 nr_truncate++; 2405 ret = btrfs_truncate(inode); 2406 } else { 2407 nr_unlink++; 2408 } 2409 2410 /* this will do delete_inode and everything for us */ 2411 iput(inode); 2412 if (ret) 2413 goto out; 2414 } 2415 /* release the path since we're done with it */ 2416 btrfs_release_path(path); 2417 2418 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 2419 2420 if (root->orphan_block_rsv) 2421 btrfs_block_rsv_release(root, root->orphan_block_rsv, 2422 (u64)-1); 2423 2424 if (root->orphan_block_rsv || root->orphan_item_inserted) { 2425 trans = btrfs_join_transaction(root); 2426 if (!IS_ERR(trans)) 2427 btrfs_end_transaction(trans, root); 2428 } 2429 2430 if (nr_unlink) 2431 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); 2432 if (nr_truncate) 2433 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); 2434 2435 out: 2436 if (ret) 2437 printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret); 2438 btrfs_free_path(path); 2439 return ret; 2440 } 2441 2442 /* 2443 * very simple check to peek ahead in the leaf looking for xattrs. If we 2444 * don't find any xattrs, we know there can't be any acls. 2445 * 2446 * slot is the slot the inode is in, objectid is the objectid of the inode 2447 */ 2448 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 2449 int slot, u64 objectid) 2450 { 2451 u32 nritems = btrfs_header_nritems(leaf); 2452 struct btrfs_key found_key; 2453 int scanned = 0; 2454 2455 slot++; 2456 while (slot < nritems) { 2457 btrfs_item_key_to_cpu(leaf, &found_key, slot); 2458 2459 /* we found a different objectid, there must not be acls */ 2460 if (found_key.objectid != objectid) 2461 return 0; 2462 2463 /* we found an xattr, assume we've got an acl */ 2464 if (found_key.type == BTRFS_XATTR_ITEM_KEY) 2465 return 1; 2466 2467 /* 2468 * we found a key greater than an xattr key, there can't 2469 * be any acls later on 2470 */ 2471 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 2472 return 0; 2473 2474 slot++; 2475 scanned++; 2476 2477 /* 2478 * it goes inode, inode backrefs, xattrs, extents, 2479 * so if there are a ton of hard links to an inode there can 2480 * be a lot of backrefs. Don't waste time searching too hard, 2481 * this is just an optimization 2482 */ 2483 if (scanned >= 8) 2484 break; 2485 } 2486 /* we hit the end of the leaf before we found an xattr or 2487 * something larger than an xattr. We have to assume the inode 2488 * has acls 2489 */ 2490 return 1; 2491 } 2492 2493 /* 2494 * read an inode from the btree into the in-memory inode 2495 */ 2496 static void btrfs_read_locked_inode(struct inode *inode) 2497 { 2498 struct btrfs_path *path; 2499 struct extent_buffer *leaf; 2500 struct btrfs_inode_item *inode_item; 2501 struct btrfs_timespec *tspec; 2502 struct btrfs_root *root = BTRFS_I(inode)->root; 2503 struct btrfs_key location; 2504 int maybe_acls; 2505 u32 rdev; 2506 int ret; 2507 bool filled = false; 2508 2509 ret = btrfs_fill_inode(inode, &rdev); 2510 if (!ret) 2511 filled = true; 2512 2513 path = btrfs_alloc_path(); 2514 if (!path) 2515 goto make_bad; 2516 2517 path->leave_spinning = 1; 2518 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 2519 2520 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 2521 if (ret) 2522 goto make_bad; 2523 2524 leaf = path->nodes[0]; 2525 2526 if (filled) 2527 goto cache_acl; 2528 2529 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2530 struct btrfs_inode_item); 2531 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 2532 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 2533 inode->i_uid = btrfs_inode_uid(leaf, inode_item); 2534 inode->i_gid = btrfs_inode_gid(leaf, inode_item); 2535 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 2536 2537 tspec = btrfs_inode_atime(inode_item); 2538 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); 2539 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 2540 2541 tspec = btrfs_inode_mtime(inode_item); 2542 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); 2543 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 2544 2545 tspec = btrfs_inode_ctime(inode_item); 2546 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); 2547 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 2548 2549 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 2550 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 2551 inode->i_version = btrfs_inode_sequence(leaf, inode_item); 2552 inode->i_generation = BTRFS_I(inode)->generation; 2553 inode->i_rdev = 0; 2554 rdev = btrfs_inode_rdev(leaf, inode_item); 2555 2556 BTRFS_I(inode)->index_cnt = (u64)-1; 2557 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2558 cache_acl: 2559 /* 2560 * try to precache a NULL acl entry for files that don't have 2561 * any xattrs or acls 2562 */ 2563 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 2564 btrfs_ino(inode)); 2565 if (!maybe_acls) 2566 cache_no_acl(inode); 2567 2568 btrfs_free_path(path); 2569 2570 switch (inode->i_mode & S_IFMT) { 2571 case S_IFREG: 2572 inode->i_mapping->a_ops = &btrfs_aops; 2573 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 2574 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 2575 inode->i_fop = &btrfs_file_operations; 2576 inode->i_op = &btrfs_file_inode_operations; 2577 break; 2578 case S_IFDIR: 2579 inode->i_fop = &btrfs_dir_file_operations; 2580 if (root == root->fs_info->tree_root) 2581 inode->i_op = &btrfs_dir_ro_inode_operations; 2582 else 2583 inode->i_op = &btrfs_dir_inode_operations; 2584 break; 2585 case S_IFLNK: 2586 inode->i_op = &btrfs_symlink_inode_operations; 2587 inode->i_mapping->a_ops = &btrfs_symlink_aops; 2588 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 2589 break; 2590 default: 2591 inode->i_op = &btrfs_special_inode_operations; 2592 init_special_inode(inode, inode->i_mode, rdev); 2593 break; 2594 } 2595 2596 btrfs_update_iflags(inode); 2597 return; 2598 2599 make_bad: 2600 btrfs_free_path(path); 2601 make_bad_inode(inode); 2602 } 2603 2604 /* 2605 * given a leaf and an inode, copy the inode fields into the leaf 2606 */ 2607 static void fill_inode_item(struct btrfs_trans_handle *trans, 2608 struct extent_buffer *leaf, 2609 struct btrfs_inode_item *item, 2610 struct inode *inode) 2611 { 2612 btrfs_set_inode_uid(leaf, item, inode->i_uid); 2613 btrfs_set_inode_gid(leaf, item, inode->i_gid); 2614 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); 2615 btrfs_set_inode_mode(leaf, item, inode->i_mode); 2616 btrfs_set_inode_nlink(leaf, item, inode->i_nlink); 2617 2618 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item), 2619 inode->i_atime.tv_sec); 2620 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item), 2621 inode->i_atime.tv_nsec); 2622 2623 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item), 2624 inode->i_mtime.tv_sec); 2625 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item), 2626 inode->i_mtime.tv_nsec); 2627 2628 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item), 2629 inode->i_ctime.tv_sec); 2630 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item), 2631 inode->i_ctime.tv_nsec); 2632 2633 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); 2634 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); 2635 btrfs_set_inode_sequence(leaf, item, inode->i_version); 2636 btrfs_set_inode_transid(leaf, item, trans->transid); 2637 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 2638 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); 2639 btrfs_set_inode_block_group(leaf, item, 0); 2640 } 2641 2642 /* 2643 * copy everything in the in-memory inode into the btree. 2644 */ 2645 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 2646 struct btrfs_root *root, struct inode *inode) 2647 { 2648 struct btrfs_inode_item *inode_item; 2649 struct btrfs_path *path; 2650 struct extent_buffer *leaf; 2651 int ret; 2652 2653 path = btrfs_alloc_path(); 2654 if (!path) 2655 return -ENOMEM; 2656 2657 path->leave_spinning = 1; 2658 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 2659 1); 2660 if (ret) { 2661 if (ret > 0) 2662 ret = -ENOENT; 2663 goto failed; 2664 } 2665 2666 btrfs_unlock_up_safe(path, 1); 2667 leaf = path->nodes[0]; 2668 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2669 struct btrfs_inode_item); 2670 2671 fill_inode_item(trans, leaf, inode_item, inode); 2672 btrfs_mark_buffer_dirty(leaf); 2673 btrfs_set_inode_last_trans(trans, inode); 2674 ret = 0; 2675 failed: 2676 btrfs_free_path(path); 2677 return ret; 2678 } 2679 2680 /* 2681 * copy everything in the in-memory inode into the btree. 2682 */ 2683 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 2684 struct btrfs_root *root, struct inode *inode) 2685 { 2686 int ret; 2687 2688 /* 2689 * If the inode is a free space inode, we can deadlock during commit 2690 * if we put it into the delayed code. 2691 * 2692 * The data relocation inode should also be directly updated 2693 * without delay 2694 */ 2695 if (!btrfs_is_free_space_inode(root, inode) 2696 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { 2697 ret = btrfs_delayed_update_inode(trans, root, inode); 2698 if (!ret) 2699 btrfs_set_inode_last_trans(trans, inode); 2700 return ret; 2701 } 2702 2703 return btrfs_update_inode_item(trans, root, inode); 2704 } 2705 2706 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 2707 struct btrfs_root *root, struct inode *inode) 2708 { 2709 int ret; 2710 2711 ret = btrfs_update_inode(trans, root, inode); 2712 if (ret == -ENOSPC) 2713 return btrfs_update_inode_item(trans, root, inode); 2714 return ret; 2715 } 2716 2717 /* 2718 * unlink helper that gets used here in inode.c and in the tree logging 2719 * recovery code. It remove a link in a directory with a given name, and 2720 * also drops the back refs in the inode to the directory 2721 */ 2722 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 2723 struct btrfs_root *root, 2724 struct inode *dir, struct inode *inode, 2725 const char *name, int name_len) 2726 { 2727 struct btrfs_path *path; 2728 int ret = 0; 2729 struct extent_buffer *leaf; 2730 struct btrfs_dir_item *di; 2731 struct btrfs_key key; 2732 u64 index; 2733 u64 ino = btrfs_ino(inode); 2734 u64 dir_ino = btrfs_ino(dir); 2735 2736 path = btrfs_alloc_path(); 2737 if (!path) { 2738 ret = -ENOMEM; 2739 goto out; 2740 } 2741 2742 path->leave_spinning = 1; 2743 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 2744 name, name_len, -1); 2745 if (IS_ERR(di)) { 2746 ret = PTR_ERR(di); 2747 goto err; 2748 } 2749 if (!di) { 2750 ret = -ENOENT; 2751 goto err; 2752 } 2753 leaf = path->nodes[0]; 2754 btrfs_dir_item_key_to_cpu(leaf, di, &key); 2755 ret = btrfs_delete_one_dir_name(trans, root, path, di); 2756 if (ret) 2757 goto err; 2758 btrfs_release_path(path); 2759 2760 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, 2761 dir_ino, &index); 2762 if (ret) { 2763 printk(KERN_INFO "btrfs failed to delete reference to %.*s, " 2764 "inode %llu parent %llu\n", name_len, name, 2765 (unsigned long long)ino, (unsigned long long)dir_ino); 2766 btrfs_abort_transaction(trans, root, ret); 2767 goto err; 2768 } 2769 2770 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 2771 if (ret) { 2772 btrfs_abort_transaction(trans, root, ret); 2773 goto err; 2774 } 2775 2776 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 2777 inode, dir_ino); 2778 if (ret != 0 && ret != -ENOENT) { 2779 btrfs_abort_transaction(trans, root, ret); 2780 goto err; 2781 } 2782 2783 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 2784 dir, index); 2785 if (ret == -ENOENT) 2786 ret = 0; 2787 err: 2788 btrfs_free_path(path); 2789 if (ret) 2790 goto out; 2791 2792 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 2793 inode_inc_iversion(inode); 2794 inode_inc_iversion(dir); 2795 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; 2796 btrfs_update_inode(trans, root, dir); 2797 out: 2798 return ret; 2799 } 2800 2801 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 2802 struct btrfs_root *root, 2803 struct inode *dir, struct inode *inode, 2804 const char *name, int name_len) 2805 { 2806 int ret; 2807 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 2808 if (!ret) { 2809 btrfs_drop_nlink(inode); 2810 ret = btrfs_update_inode(trans, root, inode); 2811 } 2812 return ret; 2813 } 2814 2815 2816 /* helper to check if there is any shared block in the path */ 2817 static int check_path_shared(struct btrfs_root *root, 2818 struct btrfs_path *path) 2819 { 2820 struct extent_buffer *eb; 2821 int level; 2822 u64 refs = 1; 2823 2824 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2825 int ret; 2826 2827 if (!path->nodes[level]) 2828 break; 2829 eb = path->nodes[level]; 2830 if (!btrfs_block_can_be_shared(root, eb)) 2831 continue; 2832 ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len, 2833 &refs, NULL); 2834 if (refs > 1) 2835 return 1; 2836 } 2837 return 0; 2838 } 2839 2840 /* 2841 * helper to start transaction for unlink and rmdir. 2842 * 2843 * unlink and rmdir are special in btrfs, they do not always free space. 2844 * so in enospc case, we should make sure they will free space before 2845 * allowing them to use the global metadata reservation. 2846 */ 2847 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, 2848 struct dentry *dentry) 2849 { 2850 struct btrfs_trans_handle *trans; 2851 struct btrfs_root *root = BTRFS_I(dir)->root; 2852 struct btrfs_path *path; 2853 struct btrfs_inode_ref *ref; 2854 struct btrfs_dir_item *di; 2855 struct inode *inode = dentry->d_inode; 2856 u64 index; 2857 int check_link = 1; 2858 int err = -ENOSPC; 2859 int ret; 2860 u64 ino = btrfs_ino(inode); 2861 u64 dir_ino = btrfs_ino(dir); 2862 2863 /* 2864 * 1 for the possible orphan item 2865 * 1 for the dir item 2866 * 1 for the dir index 2867 * 1 for the inode ref 2868 * 1 for the inode ref in the tree log 2869 * 2 for the dir entries in the log 2870 * 1 for the inode 2871 */ 2872 trans = btrfs_start_transaction(root, 8); 2873 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 2874 return trans; 2875 2876 if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 2877 return ERR_PTR(-ENOSPC); 2878 2879 /* check if there is someone else holds reference */ 2880 if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1) 2881 return ERR_PTR(-ENOSPC); 2882 2883 if (atomic_read(&inode->i_count) > 2) 2884 return ERR_PTR(-ENOSPC); 2885 2886 if (xchg(&root->fs_info->enospc_unlink, 1)) 2887 return ERR_PTR(-ENOSPC); 2888 2889 path = btrfs_alloc_path(); 2890 if (!path) { 2891 root->fs_info->enospc_unlink = 0; 2892 return ERR_PTR(-ENOMEM); 2893 } 2894 2895 /* 1 for the orphan item */ 2896 trans = btrfs_start_transaction(root, 1); 2897 if (IS_ERR(trans)) { 2898 btrfs_free_path(path); 2899 root->fs_info->enospc_unlink = 0; 2900 return trans; 2901 } 2902 2903 path->skip_locking = 1; 2904 path->search_commit_root = 1; 2905 2906 ret = btrfs_lookup_inode(trans, root, path, 2907 &BTRFS_I(dir)->location, 0); 2908 if (ret < 0) { 2909 err = ret; 2910 goto out; 2911 } 2912 if (ret == 0) { 2913 if (check_path_shared(root, path)) 2914 goto out; 2915 } else { 2916 check_link = 0; 2917 } 2918 btrfs_release_path(path); 2919 2920 ret = btrfs_lookup_inode(trans, root, path, 2921 &BTRFS_I(inode)->location, 0); 2922 if (ret < 0) { 2923 err = ret; 2924 goto out; 2925 } 2926 if (ret == 0) { 2927 if (check_path_shared(root, path)) 2928 goto out; 2929 } else { 2930 check_link = 0; 2931 } 2932 btrfs_release_path(path); 2933 2934 if (ret == 0 && S_ISREG(inode->i_mode)) { 2935 ret = btrfs_lookup_file_extent(trans, root, path, 2936 ino, (u64)-1, 0); 2937 if (ret < 0) { 2938 err = ret; 2939 goto out; 2940 } 2941 BUG_ON(ret == 0); /* Corruption */ 2942 if (check_path_shared(root, path)) 2943 goto out; 2944 btrfs_release_path(path); 2945 } 2946 2947 if (!check_link) { 2948 err = 0; 2949 goto out; 2950 } 2951 2952 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 2953 dentry->d_name.name, dentry->d_name.len, 0); 2954 if (IS_ERR(di)) { 2955 err = PTR_ERR(di); 2956 goto out; 2957 } 2958 if (di) { 2959 if (check_path_shared(root, path)) 2960 goto out; 2961 } else { 2962 err = 0; 2963 goto out; 2964 } 2965 btrfs_release_path(path); 2966 2967 ref = btrfs_lookup_inode_ref(trans, root, path, 2968 dentry->d_name.name, dentry->d_name.len, 2969 ino, dir_ino, 0); 2970 if (IS_ERR(ref)) { 2971 err = PTR_ERR(ref); 2972 goto out; 2973 } 2974 BUG_ON(!ref); /* Logic error */ 2975 if (check_path_shared(root, path)) 2976 goto out; 2977 index = btrfs_inode_ref_index(path->nodes[0], ref); 2978 btrfs_release_path(path); 2979 2980 /* 2981 * This is a commit root search, if we can lookup inode item and other 2982 * relative items in the commit root, it means the transaction of 2983 * dir/file creation has been committed, and the dir index item that we 2984 * delay to insert has also been inserted into the commit root. So 2985 * we needn't worry about the delayed insertion of the dir index item 2986 * here. 2987 */ 2988 di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, 2989 dentry->d_name.name, dentry->d_name.len, 0); 2990 if (IS_ERR(di)) { 2991 err = PTR_ERR(di); 2992 goto out; 2993 } 2994 BUG_ON(ret == -ENOENT); 2995 if (check_path_shared(root, path)) 2996 goto out; 2997 2998 err = 0; 2999 out: 3000 btrfs_free_path(path); 3001 /* Migrate the orphan reservation over */ 3002 if (!err) 3003 err = btrfs_block_rsv_migrate(trans->block_rsv, 3004 &root->fs_info->global_block_rsv, 3005 trans->bytes_reserved); 3006 3007 if (err) { 3008 btrfs_end_transaction(trans, root); 3009 root->fs_info->enospc_unlink = 0; 3010 return ERR_PTR(err); 3011 } 3012 3013 trans->block_rsv = &root->fs_info->global_block_rsv; 3014 return trans; 3015 } 3016 3017 static void __unlink_end_trans(struct btrfs_trans_handle *trans, 3018 struct btrfs_root *root) 3019 { 3020 if (trans->block_rsv == &root->fs_info->global_block_rsv) { 3021 btrfs_block_rsv_release(root, trans->block_rsv, 3022 trans->bytes_reserved); 3023 trans->block_rsv = &root->fs_info->trans_block_rsv; 3024 BUG_ON(!root->fs_info->enospc_unlink); 3025 root->fs_info->enospc_unlink = 0; 3026 } 3027 btrfs_end_transaction(trans, root); 3028 } 3029 3030 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 3031 { 3032 struct btrfs_root *root = BTRFS_I(dir)->root; 3033 struct btrfs_trans_handle *trans; 3034 struct inode *inode = dentry->d_inode; 3035 int ret; 3036 unsigned long nr = 0; 3037 3038 trans = __unlink_start_trans(dir, dentry); 3039 if (IS_ERR(trans)) 3040 return PTR_ERR(trans); 3041 3042 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); 3043 3044 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3045 dentry->d_name.name, dentry->d_name.len); 3046 if (ret) 3047 goto out; 3048 3049 if (inode->i_nlink == 0) { 3050 ret = btrfs_orphan_add(trans, inode); 3051 if (ret) 3052 goto out; 3053 } 3054 3055 out: 3056 nr = trans->blocks_used; 3057 __unlink_end_trans(trans, root); 3058 btrfs_btree_balance_dirty(root, nr); 3059 return ret; 3060 } 3061 3062 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3063 struct btrfs_root *root, 3064 struct inode *dir, u64 objectid, 3065 const char *name, int name_len) 3066 { 3067 struct btrfs_path *path; 3068 struct extent_buffer *leaf; 3069 struct btrfs_dir_item *di; 3070 struct btrfs_key key; 3071 u64 index; 3072 int ret; 3073 u64 dir_ino = btrfs_ino(dir); 3074 3075 path = btrfs_alloc_path(); 3076 if (!path) 3077 return -ENOMEM; 3078 3079 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3080 name, name_len, -1); 3081 if (IS_ERR_OR_NULL(di)) { 3082 if (!di) 3083 ret = -ENOENT; 3084 else 3085 ret = PTR_ERR(di); 3086 goto out; 3087 } 3088 3089 leaf = path->nodes[0]; 3090 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3091 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 3092 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3093 if (ret) { 3094 btrfs_abort_transaction(trans, root, ret); 3095 goto out; 3096 } 3097 btrfs_release_path(path); 3098 3099 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 3100 objectid, root->root_key.objectid, 3101 dir_ino, &index, name, name_len); 3102 if (ret < 0) { 3103 if (ret != -ENOENT) { 3104 btrfs_abort_transaction(trans, root, ret); 3105 goto out; 3106 } 3107 di = btrfs_search_dir_index_item(root, path, dir_ino, 3108 name, name_len); 3109 if (IS_ERR_OR_NULL(di)) { 3110 if (!di) 3111 ret = -ENOENT; 3112 else 3113 ret = PTR_ERR(di); 3114 btrfs_abort_transaction(trans, root, ret); 3115 goto out; 3116 } 3117 3118 leaf = path->nodes[0]; 3119 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3120 btrfs_release_path(path); 3121 index = key.offset; 3122 } 3123 btrfs_release_path(path); 3124 3125 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 3126 if (ret) { 3127 btrfs_abort_transaction(trans, root, ret); 3128 goto out; 3129 } 3130 3131 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3132 inode_inc_iversion(dir); 3133 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3134 ret = btrfs_update_inode(trans, root, dir); 3135 if (ret) 3136 btrfs_abort_transaction(trans, root, ret); 3137 out: 3138 btrfs_free_path(path); 3139 return ret; 3140 } 3141 3142 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 3143 { 3144 struct inode *inode = dentry->d_inode; 3145 int err = 0; 3146 struct btrfs_root *root = BTRFS_I(dir)->root; 3147 struct btrfs_trans_handle *trans; 3148 unsigned long nr = 0; 3149 3150 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || 3151 btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) 3152 return -ENOTEMPTY; 3153 3154 trans = __unlink_start_trans(dir, dentry); 3155 if (IS_ERR(trans)) 3156 return PTR_ERR(trans); 3157 3158 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 3159 err = btrfs_unlink_subvol(trans, root, dir, 3160 BTRFS_I(inode)->location.objectid, 3161 dentry->d_name.name, 3162 dentry->d_name.len); 3163 goto out; 3164 } 3165 3166 err = btrfs_orphan_add(trans, inode); 3167 if (err) 3168 goto out; 3169 3170 /* now the directory is empty */ 3171 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3172 dentry->d_name.name, dentry->d_name.len); 3173 if (!err) 3174 btrfs_i_size_write(inode, 0); 3175 out: 3176 nr = trans->blocks_used; 3177 __unlink_end_trans(trans, root); 3178 btrfs_btree_balance_dirty(root, nr); 3179 3180 return err; 3181 } 3182 3183 /* 3184 * this can truncate away extent items, csum items and directory items. 3185 * It starts at a high offset and removes keys until it can't find 3186 * any higher than new_size 3187 * 3188 * csum items that cross the new i_size are truncated to the new size 3189 * as well. 3190 * 3191 * min_type is the minimum key type to truncate down to. If set to 0, this 3192 * will kill all the items on this inode, including the INODE_ITEM_KEY. 3193 */ 3194 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 3195 struct btrfs_root *root, 3196 struct inode *inode, 3197 u64 new_size, u32 min_type) 3198 { 3199 struct btrfs_path *path; 3200 struct extent_buffer *leaf; 3201 struct btrfs_file_extent_item *fi; 3202 struct btrfs_key key; 3203 struct btrfs_key found_key; 3204 u64 extent_start = 0; 3205 u64 extent_num_bytes = 0; 3206 u64 extent_offset = 0; 3207 u64 item_end = 0; 3208 u64 mask = root->sectorsize - 1; 3209 u32 found_type = (u8)-1; 3210 int found_extent; 3211 int del_item; 3212 int pending_del_nr = 0; 3213 int pending_del_slot = 0; 3214 int extent_type = -1; 3215 int ret; 3216 int err = 0; 3217 u64 ino = btrfs_ino(inode); 3218 3219 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 3220 3221 path = btrfs_alloc_path(); 3222 if (!path) 3223 return -ENOMEM; 3224 path->reada = -1; 3225 3226 if (root->ref_cows || root == root->fs_info->tree_root) 3227 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); 3228 3229 /* 3230 * This function is also used to drop the items in the log tree before 3231 * we relog the inode, so if root != BTRFS_I(inode)->root, it means 3232 * it is used to drop the loged items. So we shouldn't kill the delayed 3233 * items. 3234 */ 3235 if (min_type == 0 && root == BTRFS_I(inode)->root) 3236 btrfs_kill_delayed_inode_items(inode); 3237 3238 key.objectid = ino; 3239 key.offset = (u64)-1; 3240 key.type = (u8)-1; 3241 3242 search_again: 3243 path->leave_spinning = 1; 3244 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3245 if (ret < 0) { 3246 err = ret; 3247 goto out; 3248 } 3249 3250 if (ret > 0) { 3251 /* there are no items in the tree for us to truncate, we're 3252 * done 3253 */ 3254 if (path->slots[0] == 0) 3255 goto out; 3256 path->slots[0]--; 3257 } 3258 3259 while (1) { 3260 fi = NULL; 3261 leaf = path->nodes[0]; 3262 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3263 found_type = btrfs_key_type(&found_key); 3264 3265 if (found_key.objectid != ino) 3266 break; 3267 3268 if (found_type < min_type) 3269 break; 3270 3271 item_end = found_key.offset; 3272 if (found_type == BTRFS_EXTENT_DATA_KEY) { 3273 fi = btrfs_item_ptr(leaf, path->slots[0], 3274 struct btrfs_file_extent_item); 3275 extent_type = btrfs_file_extent_type(leaf, fi); 3276 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 3277 item_end += 3278 btrfs_file_extent_num_bytes(leaf, fi); 3279 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 3280 item_end += btrfs_file_extent_inline_len(leaf, 3281 fi); 3282 } 3283 item_end--; 3284 } 3285 if (found_type > min_type) { 3286 del_item = 1; 3287 } else { 3288 if (item_end < new_size) 3289 break; 3290 if (found_key.offset >= new_size) 3291 del_item = 1; 3292 else 3293 del_item = 0; 3294 } 3295 found_extent = 0; 3296 /* FIXME, shrink the extent if the ref count is only 1 */ 3297 if (found_type != BTRFS_EXTENT_DATA_KEY) 3298 goto delete; 3299 3300 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 3301 u64 num_dec; 3302 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 3303 if (!del_item) { 3304 u64 orig_num_bytes = 3305 btrfs_file_extent_num_bytes(leaf, fi); 3306 extent_num_bytes = new_size - 3307 found_key.offset + root->sectorsize - 1; 3308 extent_num_bytes = extent_num_bytes & 3309 ~((u64)root->sectorsize - 1); 3310 btrfs_set_file_extent_num_bytes(leaf, fi, 3311 extent_num_bytes); 3312 num_dec = (orig_num_bytes - 3313 extent_num_bytes); 3314 if (root->ref_cows && extent_start != 0) 3315 inode_sub_bytes(inode, num_dec); 3316 btrfs_mark_buffer_dirty(leaf); 3317 } else { 3318 extent_num_bytes = 3319 btrfs_file_extent_disk_num_bytes(leaf, 3320 fi); 3321 extent_offset = found_key.offset - 3322 btrfs_file_extent_offset(leaf, fi); 3323 3324 /* FIXME blocksize != 4096 */ 3325 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 3326 if (extent_start != 0) { 3327 found_extent = 1; 3328 if (root->ref_cows) 3329 inode_sub_bytes(inode, num_dec); 3330 } 3331 } 3332 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 3333 /* 3334 * we can't truncate inline items that have had 3335 * special encodings 3336 */ 3337 if (!del_item && 3338 btrfs_file_extent_compression(leaf, fi) == 0 && 3339 btrfs_file_extent_encryption(leaf, fi) == 0 && 3340 btrfs_file_extent_other_encoding(leaf, fi) == 0) { 3341 u32 size = new_size - found_key.offset; 3342 3343 if (root->ref_cows) { 3344 inode_sub_bytes(inode, item_end + 1 - 3345 new_size); 3346 } 3347 size = 3348 btrfs_file_extent_calc_inline_size(size); 3349 btrfs_truncate_item(trans, root, path, 3350 size, 1); 3351 } else if (root->ref_cows) { 3352 inode_sub_bytes(inode, item_end + 1 - 3353 found_key.offset); 3354 } 3355 } 3356 delete: 3357 if (del_item) { 3358 if (!pending_del_nr) { 3359 /* no pending yet, add ourselves */ 3360 pending_del_slot = path->slots[0]; 3361 pending_del_nr = 1; 3362 } else if (pending_del_nr && 3363 path->slots[0] + 1 == pending_del_slot) { 3364 /* hop on the pending chunk */ 3365 pending_del_nr++; 3366 pending_del_slot = path->slots[0]; 3367 } else { 3368 BUG(); 3369 } 3370 } else { 3371 break; 3372 } 3373 if (found_extent && (root->ref_cows || 3374 root == root->fs_info->tree_root)) { 3375 btrfs_set_path_blocking(path); 3376 ret = btrfs_free_extent(trans, root, extent_start, 3377 extent_num_bytes, 0, 3378 btrfs_header_owner(leaf), 3379 ino, extent_offset, 0); 3380 BUG_ON(ret); 3381 } 3382 3383 if (found_type == BTRFS_INODE_ITEM_KEY) 3384 break; 3385 3386 if (path->slots[0] == 0 || 3387 path->slots[0] != pending_del_slot) { 3388 if (root->ref_cows && 3389 BTRFS_I(inode)->location.objectid != 3390 BTRFS_FREE_INO_OBJECTID) { 3391 err = -EAGAIN; 3392 goto out; 3393 } 3394 if (pending_del_nr) { 3395 ret = btrfs_del_items(trans, root, path, 3396 pending_del_slot, 3397 pending_del_nr); 3398 if (ret) { 3399 btrfs_abort_transaction(trans, 3400 root, ret); 3401 goto error; 3402 } 3403 pending_del_nr = 0; 3404 } 3405 btrfs_release_path(path); 3406 goto search_again; 3407 } else { 3408 path->slots[0]--; 3409 } 3410 } 3411 out: 3412 if (pending_del_nr) { 3413 ret = btrfs_del_items(trans, root, path, pending_del_slot, 3414 pending_del_nr); 3415 if (ret) 3416 btrfs_abort_transaction(trans, root, ret); 3417 } 3418 error: 3419 btrfs_free_path(path); 3420 return err; 3421 } 3422 3423 /* 3424 * taken from block_truncate_page, but does cow as it zeros out 3425 * any bytes left in the last page in the file. 3426 */ 3427 static int btrfs_truncate_page(struct address_space *mapping, loff_t from) 3428 { 3429 struct inode *inode = mapping->host; 3430 struct btrfs_root *root = BTRFS_I(inode)->root; 3431 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3432 struct btrfs_ordered_extent *ordered; 3433 struct extent_state *cached_state = NULL; 3434 char *kaddr; 3435 u32 blocksize = root->sectorsize; 3436 pgoff_t index = from >> PAGE_CACHE_SHIFT; 3437 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3438 struct page *page; 3439 gfp_t mask = btrfs_alloc_write_mask(mapping); 3440 int ret = 0; 3441 u64 page_start; 3442 u64 page_end; 3443 3444 if ((offset & (blocksize - 1)) == 0) 3445 goto out; 3446 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 3447 if (ret) 3448 goto out; 3449 3450 ret = -ENOMEM; 3451 again: 3452 page = find_or_create_page(mapping, index, mask); 3453 if (!page) { 3454 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 3455 goto out; 3456 } 3457 3458 page_start = page_offset(page); 3459 page_end = page_start + PAGE_CACHE_SIZE - 1; 3460 3461 if (!PageUptodate(page)) { 3462 ret = btrfs_readpage(NULL, page); 3463 lock_page(page); 3464 if (page->mapping != mapping) { 3465 unlock_page(page); 3466 page_cache_release(page); 3467 goto again; 3468 } 3469 if (!PageUptodate(page)) { 3470 ret = -EIO; 3471 goto out_unlock; 3472 } 3473 } 3474 wait_on_page_writeback(page); 3475 3476 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 3477 set_page_extent_mapped(page); 3478 3479 ordered = btrfs_lookup_ordered_extent(inode, page_start); 3480 if (ordered) { 3481 unlock_extent_cached(io_tree, page_start, page_end, 3482 &cached_state, GFP_NOFS); 3483 unlock_page(page); 3484 page_cache_release(page); 3485 btrfs_start_ordered_extent(inode, ordered, 1); 3486 btrfs_put_ordered_extent(ordered); 3487 goto again; 3488 } 3489 3490 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 3491 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 3492 0, 0, &cached_state, GFP_NOFS); 3493 3494 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 3495 &cached_state); 3496 if (ret) { 3497 unlock_extent_cached(io_tree, page_start, page_end, 3498 &cached_state, GFP_NOFS); 3499 goto out_unlock; 3500 } 3501 3502 ret = 0; 3503 if (offset != PAGE_CACHE_SIZE) { 3504 kaddr = kmap(page); 3505 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); 3506 flush_dcache_page(page); 3507 kunmap(page); 3508 } 3509 ClearPageChecked(page); 3510 set_page_dirty(page); 3511 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, 3512 GFP_NOFS); 3513 3514 out_unlock: 3515 if (ret) 3516 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 3517 unlock_page(page); 3518 page_cache_release(page); 3519 out: 3520 return ret; 3521 } 3522 3523 /* 3524 * This function puts in dummy file extents for the area we're creating a hole 3525 * for. So if we are truncating this file to a larger size we need to insert 3526 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 3527 * the range between oldsize and size 3528 */ 3529 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) 3530 { 3531 struct btrfs_trans_handle *trans; 3532 struct btrfs_root *root = BTRFS_I(inode)->root; 3533 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3534 struct extent_map *em = NULL; 3535 struct extent_state *cached_state = NULL; 3536 u64 mask = root->sectorsize - 1; 3537 u64 hole_start = (oldsize + mask) & ~mask; 3538 u64 block_end = (size + mask) & ~mask; 3539 u64 last_byte; 3540 u64 cur_offset; 3541 u64 hole_size; 3542 int err = 0; 3543 3544 if (size <= hole_start) 3545 return 0; 3546 3547 while (1) { 3548 struct btrfs_ordered_extent *ordered; 3549 btrfs_wait_ordered_range(inode, hole_start, 3550 block_end - hole_start); 3551 lock_extent_bits(io_tree, hole_start, block_end - 1, 0, 3552 &cached_state); 3553 ordered = btrfs_lookup_ordered_extent(inode, hole_start); 3554 if (!ordered) 3555 break; 3556 unlock_extent_cached(io_tree, hole_start, block_end - 1, 3557 &cached_state, GFP_NOFS); 3558 btrfs_put_ordered_extent(ordered); 3559 } 3560 3561 cur_offset = hole_start; 3562 while (1) { 3563 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 3564 block_end - cur_offset, 0); 3565 if (IS_ERR(em)) { 3566 err = PTR_ERR(em); 3567 break; 3568 } 3569 last_byte = min(extent_map_end(em), block_end); 3570 last_byte = (last_byte + mask) & ~mask; 3571 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3572 u64 hint_byte = 0; 3573 hole_size = last_byte - cur_offset; 3574 3575 trans = btrfs_start_transaction(root, 3); 3576 if (IS_ERR(trans)) { 3577 err = PTR_ERR(trans); 3578 break; 3579 } 3580 3581 err = btrfs_drop_extents(trans, inode, cur_offset, 3582 cur_offset + hole_size, 3583 &hint_byte, 1); 3584 if (err) { 3585 btrfs_abort_transaction(trans, root, err); 3586 btrfs_end_transaction(trans, root); 3587 break; 3588 } 3589 3590 err = btrfs_insert_file_extent(trans, root, 3591 btrfs_ino(inode), cur_offset, 0, 3592 0, hole_size, 0, hole_size, 3593 0, 0, 0); 3594 if (err) { 3595 btrfs_abort_transaction(trans, root, err); 3596 btrfs_end_transaction(trans, root); 3597 break; 3598 } 3599 3600 btrfs_drop_extent_cache(inode, hole_start, 3601 last_byte - 1, 0); 3602 3603 btrfs_update_inode(trans, root, inode); 3604 btrfs_end_transaction(trans, root); 3605 } 3606 free_extent_map(em); 3607 em = NULL; 3608 cur_offset = last_byte; 3609 if (cur_offset >= block_end) 3610 break; 3611 } 3612 3613 free_extent_map(em); 3614 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, 3615 GFP_NOFS); 3616 return err; 3617 } 3618 3619 static int btrfs_setsize(struct inode *inode, loff_t newsize) 3620 { 3621 struct btrfs_root *root = BTRFS_I(inode)->root; 3622 struct btrfs_trans_handle *trans; 3623 loff_t oldsize = i_size_read(inode); 3624 int ret; 3625 3626 if (newsize == oldsize) 3627 return 0; 3628 3629 if (newsize > oldsize) { 3630 truncate_pagecache(inode, oldsize, newsize); 3631 ret = btrfs_cont_expand(inode, oldsize, newsize); 3632 if (ret) 3633 return ret; 3634 3635 trans = btrfs_start_transaction(root, 1); 3636 if (IS_ERR(trans)) 3637 return PTR_ERR(trans); 3638 3639 i_size_write(inode, newsize); 3640 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); 3641 ret = btrfs_update_inode(trans, root, inode); 3642 btrfs_end_transaction(trans, root); 3643 } else { 3644 3645 /* 3646 * We're truncating a file that used to have good data down to 3647 * zero. Make sure it gets into the ordered flush list so that 3648 * any new writes get down to disk quickly. 3649 */ 3650 if (newsize == 0) 3651 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 3652 &BTRFS_I(inode)->runtime_flags); 3653 3654 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 3655 truncate_setsize(inode, newsize); 3656 ret = btrfs_truncate(inode); 3657 } 3658 3659 return ret; 3660 } 3661 3662 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) 3663 { 3664 struct inode *inode = dentry->d_inode; 3665 struct btrfs_root *root = BTRFS_I(inode)->root; 3666 int err; 3667 3668 if (btrfs_root_readonly(root)) 3669 return -EROFS; 3670 3671 err = inode_change_ok(inode, attr); 3672 if (err) 3673 return err; 3674 3675 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 3676 err = btrfs_setsize(inode, attr->ia_size); 3677 if (err) 3678 return err; 3679 } 3680 3681 if (attr->ia_valid) { 3682 setattr_copy(inode, attr); 3683 inode_inc_iversion(inode); 3684 err = btrfs_dirty_inode(inode); 3685 3686 if (!err && attr->ia_valid & ATTR_MODE) 3687 err = btrfs_acl_chmod(inode); 3688 } 3689 3690 return err; 3691 } 3692 3693 void btrfs_evict_inode(struct inode *inode) 3694 { 3695 struct btrfs_trans_handle *trans; 3696 struct btrfs_root *root = BTRFS_I(inode)->root; 3697 struct btrfs_block_rsv *rsv, *global_rsv; 3698 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 3699 unsigned long nr; 3700 int ret; 3701 3702 trace_btrfs_inode_evict(inode); 3703 3704 truncate_inode_pages(&inode->i_data, 0); 3705 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || 3706 btrfs_is_free_space_inode(root, inode))) 3707 goto no_delete; 3708 3709 if (is_bad_inode(inode)) { 3710 btrfs_orphan_del(NULL, inode); 3711 goto no_delete; 3712 } 3713 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ 3714 btrfs_wait_ordered_range(inode, 0, (u64)-1); 3715 3716 if (root->fs_info->log_root_recovering) { 3717 BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3718 &BTRFS_I(inode)->runtime_flags)); 3719 goto no_delete; 3720 } 3721 3722 if (inode->i_nlink > 0) { 3723 BUG_ON(btrfs_root_refs(&root->root_item) != 0); 3724 goto no_delete; 3725 } 3726 3727 rsv = btrfs_alloc_block_rsv(root); 3728 if (!rsv) { 3729 btrfs_orphan_del(NULL, inode); 3730 goto no_delete; 3731 } 3732 rsv->size = min_size; 3733 global_rsv = &root->fs_info->global_block_rsv; 3734 3735 btrfs_i_size_write(inode, 0); 3736 3737 /* 3738 * This is a bit simpler than btrfs_truncate since 3739 * 3740 * 1) We've already reserved our space for our orphan item in the 3741 * unlink. 3742 * 2) We're going to delete the inode item, so we don't need to update 3743 * it at all. 3744 * 3745 * So we just need to reserve some slack space in case we add bytes when 3746 * doing the truncate. 3747 */ 3748 while (1) { 3749 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size); 3750 3751 /* 3752 * Try and steal from the global reserve since we will 3753 * likely not use this space anyway, we want to try as 3754 * hard as possible to get this to work. 3755 */ 3756 if (ret) 3757 ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size); 3758 3759 if (ret) { 3760 printk(KERN_WARNING "Could not get space for a " 3761 "delete, will truncate on mount %d\n", ret); 3762 btrfs_orphan_del(NULL, inode); 3763 btrfs_free_block_rsv(root, rsv); 3764 goto no_delete; 3765 } 3766 3767 trans = btrfs_start_transaction(root, 0); 3768 if (IS_ERR(trans)) { 3769 btrfs_orphan_del(NULL, inode); 3770 btrfs_free_block_rsv(root, rsv); 3771 goto no_delete; 3772 } 3773 3774 trans->block_rsv = rsv; 3775 3776 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 3777 if (ret != -EAGAIN) 3778 break; 3779 3780 nr = trans->blocks_used; 3781 btrfs_end_transaction(trans, root); 3782 trans = NULL; 3783 btrfs_btree_balance_dirty(root, nr); 3784 } 3785 3786 btrfs_free_block_rsv(root, rsv); 3787 3788 if (ret == 0) { 3789 trans->block_rsv = root->orphan_block_rsv; 3790 ret = btrfs_orphan_del(trans, inode); 3791 BUG_ON(ret); 3792 } 3793 3794 trans->block_rsv = &root->fs_info->trans_block_rsv; 3795 if (!(root == root->fs_info->tree_root || 3796 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) 3797 btrfs_return_ino(root, btrfs_ino(inode)); 3798 3799 nr = trans->blocks_used; 3800 btrfs_end_transaction(trans, root); 3801 btrfs_btree_balance_dirty(root, nr); 3802 no_delete: 3803 clear_inode(inode); 3804 return; 3805 } 3806 3807 /* 3808 * this returns the key found in the dir entry in the location pointer. 3809 * If no dir entries were found, location->objectid is 0. 3810 */ 3811 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, 3812 struct btrfs_key *location) 3813 { 3814 const char *name = dentry->d_name.name; 3815 int namelen = dentry->d_name.len; 3816 struct btrfs_dir_item *di; 3817 struct btrfs_path *path; 3818 struct btrfs_root *root = BTRFS_I(dir)->root; 3819 int ret = 0; 3820 3821 path = btrfs_alloc_path(); 3822 if (!path) 3823 return -ENOMEM; 3824 3825 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, 3826 namelen, 0); 3827 if (IS_ERR(di)) 3828 ret = PTR_ERR(di); 3829 3830 if (IS_ERR_OR_NULL(di)) 3831 goto out_err; 3832 3833 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 3834 out: 3835 btrfs_free_path(path); 3836 return ret; 3837 out_err: 3838 location->objectid = 0; 3839 goto out; 3840 } 3841 3842 /* 3843 * when we hit a tree root in a directory, the btrfs part of the inode 3844 * needs to be changed to reflect the root directory of the tree root. This 3845 * is kind of like crossing a mount point. 3846 */ 3847 static int fixup_tree_root_location(struct btrfs_root *root, 3848 struct inode *dir, 3849 struct dentry *dentry, 3850 struct btrfs_key *location, 3851 struct btrfs_root **sub_root) 3852 { 3853 struct btrfs_path *path; 3854 struct btrfs_root *new_root; 3855 struct btrfs_root_ref *ref; 3856 struct extent_buffer *leaf; 3857 int ret; 3858 int err = 0; 3859 3860 path = btrfs_alloc_path(); 3861 if (!path) { 3862 err = -ENOMEM; 3863 goto out; 3864 } 3865 3866 err = -ENOENT; 3867 ret = btrfs_find_root_ref(root->fs_info->tree_root, path, 3868 BTRFS_I(dir)->root->root_key.objectid, 3869 location->objectid); 3870 if (ret) { 3871 if (ret < 0) 3872 err = ret; 3873 goto out; 3874 } 3875 3876 leaf = path->nodes[0]; 3877 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 3878 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 3879 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 3880 goto out; 3881 3882 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, 3883 (unsigned long)(ref + 1), 3884 dentry->d_name.len); 3885 if (ret) 3886 goto out; 3887 3888 btrfs_release_path(path); 3889 3890 new_root = btrfs_read_fs_root_no_name(root->fs_info, location); 3891 if (IS_ERR(new_root)) { 3892 err = PTR_ERR(new_root); 3893 goto out; 3894 } 3895 3896 if (btrfs_root_refs(&new_root->root_item) == 0) { 3897 err = -ENOENT; 3898 goto out; 3899 } 3900 3901 *sub_root = new_root; 3902 location->objectid = btrfs_root_dirid(&new_root->root_item); 3903 location->type = BTRFS_INODE_ITEM_KEY; 3904 location->offset = 0; 3905 err = 0; 3906 out: 3907 btrfs_free_path(path); 3908 return err; 3909 } 3910 3911 static void inode_tree_add(struct inode *inode) 3912 { 3913 struct btrfs_root *root = BTRFS_I(inode)->root; 3914 struct btrfs_inode *entry; 3915 struct rb_node **p; 3916 struct rb_node *parent; 3917 u64 ino = btrfs_ino(inode); 3918 again: 3919 p = &root->inode_tree.rb_node; 3920 parent = NULL; 3921 3922 if (inode_unhashed(inode)) 3923 return; 3924 3925 spin_lock(&root->inode_lock); 3926 while (*p) { 3927 parent = *p; 3928 entry = rb_entry(parent, struct btrfs_inode, rb_node); 3929 3930 if (ino < btrfs_ino(&entry->vfs_inode)) 3931 p = &parent->rb_left; 3932 else if (ino > btrfs_ino(&entry->vfs_inode)) 3933 p = &parent->rb_right; 3934 else { 3935 WARN_ON(!(entry->vfs_inode.i_state & 3936 (I_WILL_FREE | I_FREEING))); 3937 rb_erase(parent, &root->inode_tree); 3938 RB_CLEAR_NODE(parent); 3939 spin_unlock(&root->inode_lock); 3940 goto again; 3941 } 3942 } 3943 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p); 3944 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree); 3945 spin_unlock(&root->inode_lock); 3946 } 3947 3948 static void inode_tree_del(struct inode *inode) 3949 { 3950 struct btrfs_root *root = BTRFS_I(inode)->root; 3951 int empty = 0; 3952 3953 spin_lock(&root->inode_lock); 3954 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { 3955 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); 3956 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 3957 empty = RB_EMPTY_ROOT(&root->inode_tree); 3958 } 3959 spin_unlock(&root->inode_lock); 3960 3961 /* 3962 * Free space cache has inodes in the tree root, but the tree root has a 3963 * root_refs of 0, so this could end up dropping the tree root as a 3964 * snapshot, so we need the extra !root->fs_info->tree_root check to 3965 * make sure we don't drop it. 3966 */ 3967 if (empty && btrfs_root_refs(&root->root_item) == 0 && 3968 root != root->fs_info->tree_root) { 3969 synchronize_srcu(&root->fs_info->subvol_srcu); 3970 spin_lock(&root->inode_lock); 3971 empty = RB_EMPTY_ROOT(&root->inode_tree); 3972 spin_unlock(&root->inode_lock); 3973 if (empty) 3974 btrfs_add_dead_root(root); 3975 } 3976 } 3977 3978 void btrfs_invalidate_inodes(struct btrfs_root *root) 3979 { 3980 struct rb_node *node; 3981 struct rb_node *prev; 3982 struct btrfs_inode *entry; 3983 struct inode *inode; 3984 u64 objectid = 0; 3985 3986 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 3987 3988 spin_lock(&root->inode_lock); 3989 again: 3990 node = root->inode_tree.rb_node; 3991 prev = NULL; 3992 while (node) { 3993 prev = node; 3994 entry = rb_entry(node, struct btrfs_inode, rb_node); 3995 3996 if (objectid < btrfs_ino(&entry->vfs_inode)) 3997 node = node->rb_left; 3998 else if (objectid > btrfs_ino(&entry->vfs_inode)) 3999 node = node->rb_right; 4000 else 4001 break; 4002 } 4003 if (!node) { 4004 while (prev) { 4005 entry = rb_entry(prev, struct btrfs_inode, rb_node); 4006 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 4007 node = prev; 4008 break; 4009 } 4010 prev = rb_next(prev); 4011 } 4012 } 4013 while (node) { 4014 entry = rb_entry(node, struct btrfs_inode, rb_node); 4015 objectid = btrfs_ino(&entry->vfs_inode) + 1; 4016 inode = igrab(&entry->vfs_inode); 4017 if (inode) { 4018 spin_unlock(&root->inode_lock); 4019 if (atomic_read(&inode->i_count) > 1) 4020 d_prune_aliases(inode); 4021 /* 4022 * btrfs_drop_inode will have it removed from 4023 * the inode cache when its usage count 4024 * hits zero. 4025 */ 4026 iput(inode); 4027 cond_resched(); 4028 spin_lock(&root->inode_lock); 4029 goto again; 4030 } 4031 4032 if (cond_resched_lock(&root->inode_lock)) 4033 goto again; 4034 4035 node = rb_next(node); 4036 } 4037 spin_unlock(&root->inode_lock); 4038 } 4039 4040 static int btrfs_init_locked_inode(struct inode *inode, void *p) 4041 { 4042 struct btrfs_iget_args *args = p; 4043 inode->i_ino = args->ino; 4044 BTRFS_I(inode)->root = args->root; 4045 btrfs_set_inode_space_info(args->root, inode); 4046 return 0; 4047 } 4048 4049 static int btrfs_find_actor(struct inode *inode, void *opaque) 4050 { 4051 struct btrfs_iget_args *args = opaque; 4052 return args->ino == btrfs_ino(inode) && 4053 args->root == BTRFS_I(inode)->root; 4054 } 4055 4056 static struct inode *btrfs_iget_locked(struct super_block *s, 4057 u64 objectid, 4058 struct btrfs_root *root) 4059 { 4060 struct inode *inode; 4061 struct btrfs_iget_args args; 4062 args.ino = objectid; 4063 args.root = root; 4064 4065 inode = iget5_locked(s, objectid, btrfs_find_actor, 4066 btrfs_init_locked_inode, 4067 (void *)&args); 4068 return inode; 4069 } 4070 4071 /* Get an inode object given its location and corresponding root. 4072 * Returns in *is_new if the inode was read from disk 4073 */ 4074 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 4075 struct btrfs_root *root, int *new) 4076 { 4077 struct inode *inode; 4078 4079 inode = btrfs_iget_locked(s, location->objectid, root); 4080 if (!inode) 4081 return ERR_PTR(-ENOMEM); 4082 4083 if (inode->i_state & I_NEW) { 4084 BTRFS_I(inode)->root = root; 4085 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); 4086 btrfs_read_locked_inode(inode); 4087 if (!is_bad_inode(inode)) { 4088 inode_tree_add(inode); 4089 unlock_new_inode(inode); 4090 if (new) 4091 *new = 1; 4092 } else { 4093 unlock_new_inode(inode); 4094 iput(inode); 4095 inode = ERR_PTR(-ESTALE); 4096 } 4097 } 4098 4099 return inode; 4100 } 4101 4102 static struct inode *new_simple_dir(struct super_block *s, 4103 struct btrfs_key *key, 4104 struct btrfs_root *root) 4105 { 4106 struct inode *inode = new_inode(s); 4107 4108 if (!inode) 4109 return ERR_PTR(-ENOMEM); 4110 4111 BTRFS_I(inode)->root = root; 4112 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 4113 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 4114 4115 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 4116 inode->i_op = &btrfs_dir_ro_inode_operations; 4117 inode->i_fop = &simple_dir_operations; 4118 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 4119 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 4120 4121 return inode; 4122 } 4123 4124 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 4125 { 4126 struct inode *inode; 4127 struct btrfs_root *root = BTRFS_I(dir)->root; 4128 struct btrfs_root *sub_root = root; 4129 struct btrfs_key location; 4130 int index; 4131 int ret = 0; 4132 4133 if (dentry->d_name.len > BTRFS_NAME_LEN) 4134 return ERR_PTR(-ENAMETOOLONG); 4135 4136 if (unlikely(d_need_lookup(dentry))) { 4137 memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key)); 4138 kfree(dentry->d_fsdata); 4139 dentry->d_fsdata = NULL; 4140 /* This thing is hashed, drop it for now */ 4141 d_drop(dentry); 4142 } else { 4143 ret = btrfs_inode_by_name(dir, dentry, &location); 4144 } 4145 4146 if (ret < 0) 4147 return ERR_PTR(ret); 4148 4149 if (location.objectid == 0) 4150 return NULL; 4151 4152 if (location.type == BTRFS_INODE_ITEM_KEY) { 4153 inode = btrfs_iget(dir->i_sb, &location, root, NULL); 4154 return inode; 4155 } 4156 4157 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); 4158 4159 index = srcu_read_lock(&root->fs_info->subvol_srcu); 4160 ret = fixup_tree_root_location(root, dir, dentry, 4161 &location, &sub_root); 4162 if (ret < 0) { 4163 if (ret != -ENOENT) 4164 inode = ERR_PTR(ret); 4165 else 4166 inode = new_simple_dir(dir->i_sb, &location, sub_root); 4167 } else { 4168 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); 4169 } 4170 srcu_read_unlock(&root->fs_info->subvol_srcu, index); 4171 4172 if (!IS_ERR(inode) && root != sub_root) { 4173 down_read(&root->fs_info->cleanup_work_sem); 4174 if (!(inode->i_sb->s_flags & MS_RDONLY)) 4175 ret = btrfs_orphan_cleanup(sub_root); 4176 up_read(&root->fs_info->cleanup_work_sem); 4177 if (ret) 4178 inode = ERR_PTR(ret); 4179 } 4180 4181 return inode; 4182 } 4183 4184 static int btrfs_dentry_delete(const struct dentry *dentry) 4185 { 4186 struct btrfs_root *root; 4187 struct inode *inode = dentry->d_inode; 4188 4189 if (!inode && !IS_ROOT(dentry)) 4190 inode = dentry->d_parent->d_inode; 4191 4192 if (inode) { 4193 root = BTRFS_I(inode)->root; 4194 if (btrfs_root_refs(&root->root_item) == 0) 4195 return 1; 4196 4197 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 4198 return 1; 4199 } 4200 return 0; 4201 } 4202 4203 static void btrfs_dentry_release(struct dentry *dentry) 4204 { 4205 if (dentry->d_fsdata) 4206 kfree(dentry->d_fsdata); 4207 } 4208 4209 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 4210 struct nameidata *nd) 4211 { 4212 struct dentry *ret; 4213 4214 ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); 4215 if (unlikely(d_need_lookup(dentry))) { 4216 spin_lock(&dentry->d_lock); 4217 dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 4218 spin_unlock(&dentry->d_lock); 4219 } 4220 return ret; 4221 } 4222 4223 unsigned char btrfs_filetype_table[] = { 4224 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 4225 }; 4226 4227 static int btrfs_real_readdir(struct file *filp, void *dirent, 4228 filldir_t filldir) 4229 { 4230 struct inode *inode = filp->f_dentry->d_inode; 4231 struct btrfs_root *root = BTRFS_I(inode)->root; 4232 struct btrfs_item *item; 4233 struct btrfs_dir_item *di; 4234 struct btrfs_key key; 4235 struct btrfs_key found_key; 4236 struct btrfs_path *path; 4237 struct list_head ins_list; 4238 struct list_head del_list; 4239 int ret; 4240 struct extent_buffer *leaf; 4241 int slot; 4242 unsigned char d_type; 4243 int over = 0; 4244 u32 di_cur; 4245 u32 di_total; 4246 u32 di_len; 4247 int key_type = BTRFS_DIR_INDEX_KEY; 4248 char tmp_name[32]; 4249 char *name_ptr; 4250 int name_len; 4251 int is_curr = 0; /* filp->f_pos points to the current index? */ 4252 4253 /* FIXME, use a real flag for deciding about the key type */ 4254 if (root->fs_info->tree_root == root) 4255 key_type = BTRFS_DIR_ITEM_KEY; 4256 4257 /* special case for "." */ 4258 if (filp->f_pos == 0) { 4259 over = filldir(dirent, ".", 1, 4260 filp->f_pos, btrfs_ino(inode), DT_DIR); 4261 if (over) 4262 return 0; 4263 filp->f_pos = 1; 4264 } 4265 /* special case for .., just use the back ref */ 4266 if (filp->f_pos == 1) { 4267 u64 pino = parent_ino(filp->f_path.dentry); 4268 over = filldir(dirent, "..", 2, 4269 filp->f_pos, pino, DT_DIR); 4270 if (over) 4271 return 0; 4272 filp->f_pos = 2; 4273 } 4274 path = btrfs_alloc_path(); 4275 if (!path) 4276 return -ENOMEM; 4277 4278 path->reada = 1; 4279 4280 if (key_type == BTRFS_DIR_INDEX_KEY) { 4281 INIT_LIST_HEAD(&ins_list); 4282 INIT_LIST_HEAD(&del_list); 4283 btrfs_get_delayed_items(inode, &ins_list, &del_list); 4284 } 4285 4286 btrfs_set_key_type(&key, key_type); 4287 key.offset = filp->f_pos; 4288 key.objectid = btrfs_ino(inode); 4289 4290 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4291 if (ret < 0) 4292 goto err; 4293 4294 while (1) { 4295 leaf = path->nodes[0]; 4296 slot = path->slots[0]; 4297 if (slot >= btrfs_header_nritems(leaf)) { 4298 ret = btrfs_next_leaf(root, path); 4299 if (ret < 0) 4300 goto err; 4301 else if (ret > 0) 4302 break; 4303 continue; 4304 } 4305 4306 item = btrfs_item_nr(leaf, slot); 4307 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4308 4309 if (found_key.objectid != key.objectid) 4310 break; 4311 if (btrfs_key_type(&found_key) != key_type) 4312 break; 4313 if (found_key.offset < filp->f_pos) 4314 goto next; 4315 if (key_type == BTRFS_DIR_INDEX_KEY && 4316 btrfs_should_delete_dir_index(&del_list, 4317 found_key.offset)) 4318 goto next; 4319 4320 filp->f_pos = found_key.offset; 4321 is_curr = 1; 4322 4323 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 4324 di_cur = 0; 4325 di_total = btrfs_item_size(leaf, item); 4326 4327 while (di_cur < di_total) { 4328 struct btrfs_key location; 4329 4330 if (verify_dir_item(root, leaf, di)) 4331 break; 4332 4333 name_len = btrfs_dir_name_len(leaf, di); 4334 if (name_len <= sizeof(tmp_name)) { 4335 name_ptr = tmp_name; 4336 } else { 4337 name_ptr = kmalloc(name_len, GFP_NOFS); 4338 if (!name_ptr) { 4339 ret = -ENOMEM; 4340 goto err; 4341 } 4342 } 4343 read_extent_buffer(leaf, name_ptr, 4344 (unsigned long)(di + 1), name_len); 4345 4346 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 4347 btrfs_dir_item_key_to_cpu(leaf, di, &location); 4348 4349 4350 /* is this a reference to our own snapshot? If so 4351 * skip it. 4352 * 4353 * In contrast to old kernels, we insert the snapshot's 4354 * dir item and dir index after it has been created, so 4355 * we won't find a reference to our own snapshot. We 4356 * still keep the following code for backward 4357 * compatibility. 4358 */ 4359 if (location.type == BTRFS_ROOT_ITEM_KEY && 4360 location.objectid == root->root_key.objectid) { 4361 over = 0; 4362 goto skip; 4363 } 4364 over = filldir(dirent, name_ptr, name_len, 4365 found_key.offset, location.objectid, 4366 d_type); 4367 4368 skip: 4369 if (name_ptr != tmp_name) 4370 kfree(name_ptr); 4371 4372 if (over) 4373 goto nopos; 4374 di_len = btrfs_dir_name_len(leaf, di) + 4375 btrfs_dir_data_len(leaf, di) + sizeof(*di); 4376 di_cur += di_len; 4377 di = (struct btrfs_dir_item *)((char *)di + di_len); 4378 } 4379 next: 4380 path->slots[0]++; 4381 } 4382 4383 if (key_type == BTRFS_DIR_INDEX_KEY) { 4384 if (is_curr) 4385 filp->f_pos++; 4386 ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, 4387 &ins_list); 4388 if (ret) 4389 goto nopos; 4390 } 4391 4392 /* Reached end of directory/root. Bump pos past the last item. */ 4393 if (key_type == BTRFS_DIR_INDEX_KEY) 4394 /* 4395 * 32-bit glibc will use getdents64, but then strtol - 4396 * so the last number we can serve is this. 4397 */ 4398 filp->f_pos = 0x7fffffff; 4399 else 4400 filp->f_pos++; 4401 nopos: 4402 ret = 0; 4403 err: 4404 if (key_type == BTRFS_DIR_INDEX_KEY) 4405 btrfs_put_delayed_items(&ins_list, &del_list); 4406 btrfs_free_path(path); 4407 return ret; 4408 } 4409 4410 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) 4411 { 4412 struct btrfs_root *root = BTRFS_I(inode)->root; 4413 struct btrfs_trans_handle *trans; 4414 int ret = 0; 4415 bool nolock = false; 4416 4417 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 4418 return 0; 4419 4420 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode)) 4421 nolock = true; 4422 4423 if (wbc->sync_mode == WB_SYNC_ALL) { 4424 if (nolock) 4425 trans = btrfs_join_transaction_nolock(root); 4426 else 4427 trans = btrfs_join_transaction(root); 4428 if (IS_ERR(trans)) 4429 return PTR_ERR(trans); 4430 if (nolock) 4431 ret = btrfs_end_transaction_nolock(trans, root); 4432 else 4433 ret = btrfs_commit_transaction(trans, root); 4434 } 4435 return ret; 4436 } 4437 4438 /* 4439 * This is somewhat expensive, updating the tree every time the 4440 * inode changes. But, it is most likely to find the inode in cache. 4441 * FIXME, needs more benchmarking...there are no reasons other than performance 4442 * to keep or drop this code. 4443 */ 4444 int btrfs_dirty_inode(struct inode *inode) 4445 { 4446 struct btrfs_root *root = BTRFS_I(inode)->root; 4447 struct btrfs_trans_handle *trans; 4448 int ret; 4449 4450 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 4451 return 0; 4452 4453 trans = btrfs_join_transaction(root); 4454 if (IS_ERR(trans)) 4455 return PTR_ERR(trans); 4456 4457 ret = btrfs_update_inode(trans, root, inode); 4458 if (ret && ret == -ENOSPC) { 4459 /* whoops, lets try again with the full transaction */ 4460 btrfs_end_transaction(trans, root); 4461 trans = btrfs_start_transaction(root, 1); 4462 if (IS_ERR(trans)) 4463 return PTR_ERR(trans); 4464 4465 ret = btrfs_update_inode(trans, root, inode); 4466 } 4467 btrfs_end_transaction(trans, root); 4468 if (BTRFS_I(inode)->delayed_node) 4469 btrfs_balance_delayed_items(root); 4470 4471 return ret; 4472 } 4473 4474 /* 4475 * This is a copy of file_update_time. We need this so we can return error on 4476 * ENOSPC for updating the inode in the case of file write and mmap writes. 4477 */ 4478 static int btrfs_update_time(struct inode *inode, struct timespec *now, 4479 int flags) 4480 { 4481 if (flags & S_VERSION) 4482 inode_inc_iversion(inode); 4483 if (flags & S_CTIME) 4484 inode->i_ctime = *now; 4485 if (flags & S_MTIME) 4486 inode->i_mtime = *now; 4487 if (flags & S_ATIME) 4488 inode->i_atime = *now; 4489 return btrfs_dirty_inode(inode); 4490 } 4491 4492 /* 4493 * find the highest existing sequence number in a directory 4494 * and then set the in-memory index_cnt variable to reflect 4495 * free sequence numbers 4496 */ 4497 static int btrfs_set_inode_index_count(struct inode *inode) 4498 { 4499 struct btrfs_root *root = BTRFS_I(inode)->root; 4500 struct btrfs_key key, found_key; 4501 struct btrfs_path *path; 4502 struct extent_buffer *leaf; 4503 int ret; 4504 4505 key.objectid = btrfs_ino(inode); 4506 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); 4507 key.offset = (u64)-1; 4508 4509 path = btrfs_alloc_path(); 4510 if (!path) 4511 return -ENOMEM; 4512 4513 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4514 if (ret < 0) 4515 goto out; 4516 /* FIXME: we should be able to handle this */ 4517 if (ret == 0) 4518 goto out; 4519 ret = 0; 4520 4521 /* 4522 * MAGIC NUMBER EXPLANATION: 4523 * since we search a directory based on f_pos we have to start at 2 4524 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody 4525 * else has to start at 2 4526 */ 4527 if (path->slots[0] == 0) { 4528 BTRFS_I(inode)->index_cnt = 2; 4529 goto out; 4530 } 4531 4532 path->slots[0]--; 4533 4534 leaf = path->nodes[0]; 4535 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4536 4537 if (found_key.objectid != btrfs_ino(inode) || 4538 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { 4539 BTRFS_I(inode)->index_cnt = 2; 4540 goto out; 4541 } 4542 4543 BTRFS_I(inode)->index_cnt = found_key.offset + 1; 4544 out: 4545 btrfs_free_path(path); 4546 return ret; 4547 } 4548 4549 /* 4550 * helper to find a free sequence number in a given directory. This current 4551 * code is very simple, later versions will do smarter things in the btree 4552 */ 4553 int btrfs_set_inode_index(struct inode *dir, u64 *index) 4554 { 4555 int ret = 0; 4556 4557 if (BTRFS_I(dir)->index_cnt == (u64)-1) { 4558 ret = btrfs_inode_delayed_dir_index_count(dir); 4559 if (ret) { 4560 ret = btrfs_set_inode_index_count(dir); 4561 if (ret) 4562 return ret; 4563 } 4564 } 4565 4566 *index = BTRFS_I(dir)->index_cnt; 4567 BTRFS_I(dir)->index_cnt++; 4568 4569 return ret; 4570 } 4571 4572 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, 4573 struct btrfs_root *root, 4574 struct inode *dir, 4575 const char *name, int name_len, 4576 u64 ref_objectid, u64 objectid, 4577 umode_t mode, u64 *index) 4578 { 4579 struct inode *inode; 4580 struct btrfs_inode_item *inode_item; 4581 struct btrfs_key *location; 4582 struct btrfs_path *path; 4583 struct btrfs_inode_ref *ref; 4584 struct btrfs_key key[2]; 4585 u32 sizes[2]; 4586 unsigned long ptr; 4587 int ret; 4588 int owner; 4589 4590 path = btrfs_alloc_path(); 4591 if (!path) 4592 return ERR_PTR(-ENOMEM); 4593 4594 inode = new_inode(root->fs_info->sb); 4595 if (!inode) { 4596 btrfs_free_path(path); 4597 return ERR_PTR(-ENOMEM); 4598 } 4599 4600 /* 4601 * we have to initialize this early, so we can reclaim the inode 4602 * number if we fail afterwards in this function. 4603 */ 4604 inode->i_ino = objectid; 4605 4606 if (dir) { 4607 trace_btrfs_inode_request(dir); 4608 4609 ret = btrfs_set_inode_index(dir, index); 4610 if (ret) { 4611 btrfs_free_path(path); 4612 iput(inode); 4613 return ERR_PTR(ret); 4614 } 4615 } 4616 /* 4617 * index_cnt is ignored for everything but a dir, 4618 * btrfs_get_inode_index_count has an explanation for the magic 4619 * number 4620 */ 4621 BTRFS_I(inode)->index_cnt = 2; 4622 BTRFS_I(inode)->root = root; 4623 BTRFS_I(inode)->generation = trans->transid; 4624 inode->i_generation = BTRFS_I(inode)->generation; 4625 btrfs_set_inode_space_info(root, inode); 4626 4627 if (S_ISDIR(mode)) 4628 owner = 0; 4629 else 4630 owner = 1; 4631 4632 key[0].objectid = objectid; 4633 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); 4634 key[0].offset = 0; 4635 4636 key[1].objectid = objectid; 4637 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); 4638 key[1].offset = ref_objectid; 4639 4640 sizes[0] = sizeof(struct btrfs_inode_item); 4641 sizes[1] = name_len + sizeof(*ref); 4642 4643 path->leave_spinning = 1; 4644 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2); 4645 if (ret != 0) 4646 goto fail; 4647 4648 inode_init_owner(inode, dir, mode); 4649 inode_set_bytes(inode, 0); 4650 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 4651 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4652 struct btrfs_inode_item); 4653 fill_inode_item(trans, path->nodes[0], inode_item, inode); 4654 4655 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 4656 struct btrfs_inode_ref); 4657 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); 4658 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); 4659 ptr = (unsigned long)(ref + 1); 4660 write_extent_buffer(path->nodes[0], name, ptr, name_len); 4661 4662 btrfs_mark_buffer_dirty(path->nodes[0]); 4663 btrfs_free_path(path); 4664 4665 location = &BTRFS_I(inode)->location; 4666 location->objectid = objectid; 4667 location->offset = 0; 4668 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); 4669 4670 btrfs_inherit_iflags(inode, dir); 4671 4672 if (S_ISREG(mode)) { 4673 if (btrfs_test_opt(root, NODATASUM)) 4674 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 4675 if (btrfs_test_opt(root, NODATACOW) || 4676 (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW)) 4677 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; 4678 } 4679 4680 insert_inode_hash(inode); 4681 inode_tree_add(inode); 4682 4683 trace_btrfs_inode_new(inode); 4684 btrfs_set_inode_last_trans(trans, inode); 4685 4686 return inode; 4687 fail: 4688 if (dir) 4689 BTRFS_I(dir)->index_cnt--; 4690 btrfs_free_path(path); 4691 iput(inode); 4692 return ERR_PTR(ret); 4693 } 4694 4695 static inline u8 btrfs_inode_type(struct inode *inode) 4696 { 4697 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; 4698 } 4699 4700 /* 4701 * utility function to add 'inode' into 'parent_inode' with 4702 * a give name and a given sequence number. 4703 * if 'add_backref' is true, also insert a backref from the 4704 * inode to the parent directory. 4705 */ 4706 int btrfs_add_link(struct btrfs_trans_handle *trans, 4707 struct inode *parent_inode, struct inode *inode, 4708 const char *name, int name_len, int add_backref, u64 index) 4709 { 4710 int ret = 0; 4711 struct btrfs_key key; 4712 struct btrfs_root *root = BTRFS_I(parent_inode)->root; 4713 u64 ino = btrfs_ino(inode); 4714 u64 parent_ino = btrfs_ino(parent_inode); 4715 4716 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 4717 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); 4718 } else { 4719 key.objectid = ino; 4720 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 4721 key.offset = 0; 4722 } 4723 4724 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 4725 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 4726 key.objectid, root->root_key.objectid, 4727 parent_ino, index, name, name_len); 4728 } else if (add_backref) { 4729 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, 4730 parent_ino, index); 4731 } 4732 4733 /* Nothing to clean up yet */ 4734 if (ret) 4735 return ret; 4736 4737 ret = btrfs_insert_dir_item(trans, root, name, name_len, 4738 parent_inode, &key, 4739 btrfs_inode_type(inode), index); 4740 if (ret == -EEXIST) 4741 goto fail_dir_item; 4742 else if (ret) { 4743 btrfs_abort_transaction(trans, root, ret); 4744 return ret; 4745 } 4746 4747 btrfs_i_size_write(parent_inode, parent_inode->i_size + 4748 name_len * 2); 4749 inode_inc_iversion(parent_inode); 4750 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 4751 ret = btrfs_update_inode(trans, root, parent_inode); 4752 if (ret) 4753 btrfs_abort_transaction(trans, root, ret); 4754 return ret; 4755 4756 fail_dir_item: 4757 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 4758 u64 local_index; 4759 int err; 4760 err = btrfs_del_root_ref(trans, root->fs_info->tree_root, 4761 key.objectid, root->root_key.objectid, 4762 parent_ino, &local_index, name, name_len); 4763 4764 } else if (add_backref) { 4765 u64 local_index; 4766 int err; 4767 4768 err = btrfs_del_inode_ref(trans, root, name, name_len, 4769 ino, parent_ino, &local_index); 4770 } 4771 return ret; 4772 } 4773 4774 static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 4775 struct inode *dir, struct dentry *dentry, 4776 struct inode *inode, int backref, u64 index) 4777 { 4778 int err = btrfs_add_link(trans, dir, inode, 4779 dentry->d_name.name, dentry->d_name.len, 4780 backref, index); 4781 if (err > 0) 4782 err = -EEXIST; 4783 return err; 4784 } 4785 4786 static int btrfs_mknod(struct inode *dir, struct dentry *dentry, 4787 umode_t mode, dev_t rdev) 4788 { 4789 struct btrfs_trans_handle *trans; 4790 struct btrfs_root *root = BTRFS_I(dir)->root; 4791 struct inode *inode = NULL; 4792 int err; 4793 int drop_inode = 0; 4794 u64 objectid; 4795 unsigned long nr = 0; 4796 u64 index = 0; 4797 4798 if (!new_valid_dev(rdev)) 4799 return -EINVAL; 4800 4801 /* 4802 * 2 for inode item and ref 4803 * 2 for dir items 4804 * 1 for xattr if selinux is on 4805 */ 4806 trans = btrfs_start_transaction(root, 5); 4807 if (IS_ERR(trans)) 4808 return PTR_ERR(trans); 4809 4810 err = btrfs_find_free_ino(root, &objectid); 4811 if (err) 4812 goto out_unlock; 4813 4814 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4815 dentry->d_name.len, btrfs_ino(dir), objectid, 4816 mode, &index); 4817 if (IS_ERR(inode)) { 4818 err = PTR_ERR(inode); 4819 goto out_unlock; 4820 } 4821 4822 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4823 if (err) { 4824 drop_inode = 1; 4825 goto out_unlock; 4826 } 4827 4828 /* 4829 * If the active LSM wants to access the inode during 4830 * d_instantiate it needs these. Smack checks to see 4831 * if the filesystem supports xattrs by looking at the 4832 * ops vector. 4833 */ 4834 4835 inode->i_op = &btrfs_special_inode_operations; 4836 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4837 if (err) 4838 drop_inode = 1; 4839 else { 4840 init_special_inode(inode, inode->i_mode, rdev); 4841 btrfs_update_inode(trans, root, inode); 4842 d_instantiate(dentry, inode); 4843 } 4844 out_unlock: 4845 nr = trans->blocks_used; 4846 btrfs_end_transaction(trans, root); 4847 btrfs_btree_balance_dirty(root, nr); 4848 if (drop_inode) { 4849 inode_dec_link_count(inode); 4850 iput(inode); 4851 } 4852 return err; 4853 } 4854 4855 static int btrfs_create(struct inode *dir, struct dentry *dentry, 4856 umode_t mode, struct nameidata *nd) 4857 { 4858 struct btrfs_trans_handle *trans; 4859 struct btrfs_root *root = BTRFS_I(dir)->root; 4860 struct inode *inode = NULL; 4861 int drop_inode = 0; 4862 int err; 4863 unsigned long nr = 0; 4864 u64 objectid; 4865 u64 index = 0; 4866 4867 /* 4868 * 2 for inode item and ref 4869 * 2 for dir items 4870 * 1 for xattr if selinux is on 4871 */ 4872 trans = btrfs_start_transaction(root, 5); 4873 if (IS_ERR(trans)) 4874 return PTR_ERR(trans); 4875 4876 err = btrfs_find_free_ino(root, &objectid); 4877 if (err) 4878 goto out_unlock; 4879 4880 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4881 dentry->d_name.len, btrfs_ino(dir), objectid, 4882 mode, &index); 4883 if (IS_ERR(inode)) { 4884 err = PTR_ERR(inode); 4885 goto out_unlock; 4886 } 4887 4888 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4889 if (err) { 4890 drop_inode = 1; 4891 goto out_unlock; 4892 } 4893 4894 /* 4895 * If the active LSM wants to access the inode during 4896 * d_instantiate it needs these. Smack checks to see 4897 * if the filesystem supports xattrs by looking at the 4898 * ops vector. 4899 */ 4900 inode->i_fop = &btrfs_file_operations; 4901 inode->i_op = &btrfs_file_inode_operations; 4902 4903 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4904 if (err) 4905 drop_inode = 1; 4906 else { 4907 inode->i_mapping->a_ops = &btrfs_aops; 4908 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 4909 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 4910 d_instantiate(dentry, inode); 4911 } 4912 out_unlock: 4913 nr = trans->blocks_used; 4914 btrfs_end_transaction(trans, root); 4915 if (drop_inode) { 4916 inode_dec_link_count(inode); 4917 iput(inode); 4918 } 4919 btrfs_btree_balance_dirty(root, nr); 4920 return err; 4921 } 4922 4923 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 4924 struct dentry *dentry) 4925 { 4926 struct btrfs_trans_handle *trans; 4927 struct btrfs_root *root = BTRFS_I(dir)->root; 4928 struct inode *inode = old_dentry->d_inode; 4929 u64 index; 4930 unsigned long nr = 0; 4931 int err; 4932 int drop_inode = 0; 4933 4934 /* do not allow sys_link's with other subvols of the same device */ 4935 if (root->objectid != BTRFS_I(inode)->root->objectid) 4936 return -EXDEV; 4937 4938 if (inode->i_nlink == ~0U) 4939 return -EMLINK; 4940 4941 err = btrfs_set_inode_index(dir, &index); 4942 if (err) 4943 goto fail; 4944 4945 /* 4946 * 2 items for inode and inode ref 4947 * 2 items for dir items 4948 * 1 item for parent inode 4949 */ 4950 trans = btrfs_start_transaction(root, 5); 4951 if (IS_ERR(trans)) { 4952 err = PTR_ERR(trans); 4953 goto fail; 4954 } 4955 4956 btrfs_inc_nlink(inode); 4957 inode_inc_iversion(inode); 4958 inode->i_ctime = CURRENT_TIME; 4959 ihold(inode); 4960 4961 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); 4962 4963 if (err) { 4964 drop_inode = 1; 4965 } else { 4966 struct dentry *parent = dentry->d_parent; 4967 err = btrfs_update_inode(trans, root, inode); 4968 if (err) 4969 goto fail; 4970 d_instantiate(dentry, inode); 4971 btrfs_log_new_name(trans, inode, NULL, parent); 4972 } 4973 4974 nr = trans->blocks_used; 4975 btrfs_end_transaction(trans, root); 4976 fail: 4977 if (drop_inode) { 4978 inode_dec_link_count(inode); 4979 iput(inode); 4980 } 4981 btrfs_btree_balance_dirty(root, nr); 4982 return err; 4983 } 4984 4985 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 4986 { 4987 struct inode *inode = NULL; 4988 struct btrfs_trans_handle *trans; 4989 struct btrfs_root *root = BTRFS_I(dir)->root; 4990 int err = 0; 4991 int drop_on_err = 0; 4992 u64 objectid = 0; 4993 u64 index = 0; 4994 unsigned long nr = 1; 4995 4996 /* 4997 * 2 items for inode and ref 4998 * 2 items for dir items 4999 * 1 for xattr if selinux is on 5000 */ 5001 trans = btrfs_start_transaction(root, 5); 5002 if (IS_ERR(trans)) 5003 return PTR_ERR(trans); 5004 5005 err = btrfs_find_free_ino(root, &objectid); 5006 if (err) 5007 goto out_fail; 5008 5009 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 5010 dentry->d_name.len, btrfs_ino(dir), objectid, 5011 S_IFDIR | mode, &index); 5012 if (IS_ERR(inode)) { 5013 err = PTR_ERR(inode); 5014 goto out_fail; 5015 } 5016 5017 drop_on_err = 1; 5018 5019 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 5020 if (err) 5021 goto out_fail; 5022 5023 inode->i_op = &btrfs_dir_inode_operations; 5024 inode->i_fop = &btrfs_dir_file_operations; 5025 5026 btrfs_i_size_write(inode, 0); 5027 err = btrfs_update_inode(trans, root, inode); 5028 if (err) 5029 goto out_fail; 5030 5031 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, 5032 dentry->d_name.len, 0, index); 5033 if (err) 5034 goto out_fail; 5035 5036 d_instantiate(dentry, inode); 5037 drop_on_err = 0; 5038 5039 out_fail: 5040 nr = trans->blocks_used; 5041 btrfs_end_transaction(trans, root); 5042 if (drop_on_err) 5043 iput(inode); 5044 btrfs_btree_balance_dirty(root, nr); 5045 return err; 5046 } 5047 5048 /* helper for btfs_get_extent. Given an existing extent in the tree, 5049 * and an extent that you want to insert, deal with overlap and insert 5050 * the new extent into the tree. 5051 */ 5052 static int merge_extent_mapping(struct extent_map_tree *em_tree, 5053 struct extent_map *existing, 5054 struct extent_map *em, 5055 u64 map_start, u64 map_len) 5056 { 5057 u64 start_diff; 5058 5059 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); 5060 start_diff = map_start - em->start; 5061 em->start = map_start; 5062 em->len = map_len; 5063 if (em->block_start < EXTENT_MAP_LAST_BYTE && 5064 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 5065 em->block_start += start_diff; 5066 em->block_len -= start_diff; 5067 } 5068 return add_extent_mapping(em_tree, em); 5069 } 5070 5071 static noinline int uncompress_inline(struct btrfs_path *path, 5072 struct inode *inode, struct page *page, 5073 size_t pg_offset, u64 extent_offset, 5074 struct btrfs_file_extent_item *item) 5075 { 5076 int ret; 5077 struct extent_buffer *leaf = path->nodes[0]; 5078 char *tmp; 5079 size_t max_size; 5080 unsigned long inline_size; 5081 unsigned long ptr; 5082 int compress_type; 5083 5084 WARN_ON(pg_offset != 0); 5085 compress_type = btrfs_file_extent_compression(leaf, item); 5086 max_size = btrfs_file_extent_ram_bytes(leaf, item); 5087 inline_size = btrfs_file_extent_inline_item_len(leaf, 5088 btrfs_item_nr(leaf, path->slots[0])); 5089 tmp = kmalloc(inline_size, GFP_NOFS); 5090 if (!tmp) 5091 return -ENOMEM; 5092 ptr = btrfs_file_extent_inline_start(item); 5093 5094 read_extent_buffer(leaf, tmp, ptr, inline_size); 5095 5096 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 5097 ret = btrfs_decompress(compress_type, tmp, page, 5098 extent_offset, inline_size, max_size); 5099 if (ret) { 5100 char *kaddr = kmap_atomic(page); 5101 unsigned long copy_size = min_t(u64, 5102 PAGE_CACHE_SIZE - pg_offset, 5103 max_size - extent_offset); 5104 memset(kaddr + pg_offset, 0, copy_size); 5105 kunmap_atomic(kaddr); 5106 } 5107 kfree(tmp); 5108 return 0; 5109 } 5110 5111 /* 5112 * a bit scary, this does extent mapping from logical file offset to the disk. 5113 * the ugly parts come from merging extents from the disk with the in-ram 5114 * representation. This gets more complex because of the data=ordered code, 5115 * where the in-ram extents might be locked pending data=ordered completion. 5116 * 5117 * This also copies inline extents directly into the page. 5118 */ 5119 5120 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 5121 size_t pg_offset, u64 start, u64 len, 5122 int create) 5123 { 5124 int ret; 5125 int err = 0; 5126 u64 bytenr; 5127 u64 extent_start = 0; 5128 u64 extent_end = 0; 5129 u64 objectid = btrfs_ino(inode); 5130 u32 found_type; 5131 struct btrfs_path *path = NULL; 5132 struct btrfs_root *root = BTRFS_I(inode)->root; 5133 struct btrfs_file_extent_item *item; 5134 struct extent_buffer *leaf; 5135 struct btrfs_key found_key; 5136 struct extent_map *em = NULL; 5137 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 5138 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5139 struct btrfs_trans_handle *trans = NULL; 5140 int compress_type; 5141 5142 again: 5143 read_lock(&em_tree->lock); 5144 em = lookup_extent_mapping(em_tree, start, len); 5145 if (em) 5146 em->bdev = root->fs_info->fs_devices->latest_bdev; 5147 read_unlock(&em_tree->lock); 5148 5149 if (em) { 5150 if (em->start > start || em->start + em->len <= start) 5151 free_extent_map(em); 5152 else if (em->block_start == EXTENT_MAP_INLINE && page) 5153 free_extent_map(em); 5154 else 5155 goto out; 5156 } 5157 em = alloc_extent_map(); 5158 if (!em) { 5159 err = -ENOMEM; 5160 goto out; 5161 } 5162 em->bdev = root->fs_info->fs_devices->latest_bdev; 5163 em->start = EXTENT_MAP_HOLE; 5164 em->orig_start = EXTENT_MAP_HOLE; 5165 em->len = (u64)-1; 5166 em->block_len = (u64)-1; 5167 5168 if (!path) { 5169 path = btrfs_alloc_path(); 5170 if (!path) { 5171 err = -ENOMEM; 5172 goto out; 5173 } 5174 /* 5175 * Chances are we'll be called again, so go ahead and do 5176 * readahead 5177 */ 5178 path->reada = 1; 5179 } 5180 5181 ret = btrfs_lookup_file_extent(trans, root, path, 5182 objectid, start, trans != NULL); 5183 if (ret < 0) { 5184 err = ret; 5185 goto out; 5186 } 5187 5188 if (ret != 0) { 5189 if (path->slots[0] == 0) 5190 goto not_found; 5191 path->slots[0]--; 5192 } 5193 5194 leaf = path->nodes[0]; 5195 item = btrfs_item_ptr(leaf, path->slots[0], 5196 struct btrfs_file_extent_item); 5197 /* are we inside the extent that was found? */ 5198 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5199 found_type = btrfs_key_type(&found_key); 5200 if (found_key.objectid != objectid || 5201 found_type != BTRFS_EXTENT_DATA_KEY) { 5202 goto not_found; 5203 } 5204 5205 found_type = btrfs_file_extent_type(leaf, item); 5206 extent_start = found_key.offset; 5207 compress_type = btrfs_file_extent_compression(leaf, item); 5208 if (found_type == BTRFS_FILE_EXTENT_REG || 5209 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 5210 extent_end = extent_start + 5211 btrfs_file_extent_num_bytes(leaf, item); 5212 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 5213 size_t size; 5214 size = btrfs_file_extent_inline_len(leaf, item); 5215 extent_end = (extent_start + size + root->sectorsize - 1) & 5216 ~((u64)root->sectorsize - 1); 5217 } 5218 5219 if (start >= extent_end) { 5220 path->slots[0]++; 5221 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 5222 ret = btrfs_next_leaf(root, path); 5223 if (ret < 0) { 5224 err = ret; 5225 goto out; 5226 } 5227 if (ret > 0) 5228 goto not_found; 5229 leaf = path->nodes[0]; 5230 } 5231 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5232 if (found_key.objectid != objectid || 5233 found_key.type != BTRFS_EXTENT_DATA_KEY) 5234 goto not_found; 5235 if (start + len <= found_key.offset) 5236 goto not_found; 5237 em->start = start; 5238 em->len = found_key.offset - start; 5239 goto not_found_em; 5240 } 5241 5242 if (found_type == BTRFS_FILE_EXTENT_REG || 5243 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 5244 em->start = extent_start; 5245 em->len = extent_end - extent_start; 5246 em->orig_start = extent_start - 5247 btrfs_file_extent_offset(leaf, item); 5248 bytenr = btrfs_file_extent_disk_bytenr(leaf, item); 5249 if (bytenr == 0) { 5250 em->block_start = EXTENT_MAP_HOLE; 5251 goto insert; 5252 } 5253 if (compress_type != BTRFS_COMPRESS_NONE) { 5254 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 5255 em->compress_type = compress_type; 5256 em->block_start = bytenr; 5257 em->block_len = btrfs_file_extent_disk_num_bytes(leaf, 5258 item); 5259 } else { 5260 bytenr += btrfs_file_extent_offset(leaf, item); 5261 em->block_start = bytenr; 5262 em->block_len = em->len; 5263 if (found_type == BTRFS_FILE_EXTENT_PREALLOC) 5264 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 5265 } 5266 goto insert; 5267 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 5268 unsigned long ptr; 5269 char *map; 5270 size_t size; 5271 size_t extent_offset; 5272 size_t copy_size; 5273 5274 em->block_start = EXTENT_MAP_INLINE; 5275 if (!page || create) { 5276 em->start = extent_start; 5277 em->len = extent_end - extent_start; 5278 goto out; 5279 } 5280 5281 size = btrfs_file_extent_inline_len(leaf, item); 5282 extent_offset = page_offset(page) + pg_offset - extent_start; 5283 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 5284 size - extent_offset); 5285 em->start = extent_start + extent_offset; 5286 em->len = (copy_size + root->sectorsize - 1) & 5287 ~((u64)root->sectorsize - 1); 5288 em->orig_start = EXTENT_MAP_INLINE; 5289 if (compress_type) { 5290 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 5291 em->compress_type = compress_type; 5292 } 5293 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 5294 if (create == 0 && !PageUptodate(page)) { 5295 if (btrfs_file_extent_compression(leaf, item) != 5296 BTRFS_COMPRESS_NONE) { 5297 ret = uncompress_inline(path, inode, page, 5298 pg_offset, 5299 extent_offset, item); 5300 BUG_ON(ret); /* -ENOMEM */ 5301 } else { 5302 map = kmap(page); 5303 read_extent_buffer(leaf, map + pg_offset, ptr, 5304 copy_size); 5305 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 5306 memset(map + pg_offset + copy_size, 0, 5307 PAGE_CACHE_SIZE - pg_offset - 5308 copy_size); 5309 } 5310 kunmap(page); 5311 } 5312 flush_dcache_page(page); 5313 } else if (create && PageUptodate(page)) { 5314 BUG(); 5315 if (!trans) { 5316 kunmap(page); 5317 free_extent_map(em); 5318 em = NULL; 5319 5320 btrfs_release_path(path); 5321 trans = btrfs_join_transaction(root); 5322 5323 if (IS_ERR(trans)) 5324 return ERR_CAST(trans); 5325 goto again; 5326 } 5327 map = kmap(page); 5328 write_extent_buffer(leaf, map + pg_offset, ptr, 5329 copy_size); 5330 kunmap(page); 5331 btrfs_mark_buffer_dirty(leaf); 5332 } 5333 set_extent_uptodate(io_tree, em->start, 5334 extent_map_end(em) - 1, NULL, GFP_NOFS); 5335 goto insert; 5336 } else { 5337 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); 5338 WARN_ON(1); 5339 } 5340 not_found: 5341 em->start = start; 5342 em->len = len; 5343 not_found_em: 5344 em->block_start = EXTENT_MAP_HOLE; 5345 set_bit(EXTENT_FLAG_VACANCY, &em->flags); 5346 insert: 5347 btrfs_release_path(path); 5348 if (em->start > start || extent_map_end(em) <= start) { 5349 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " 5350 "[%llu %llu]\n", (unsigned long long)em->start, 5351 (unsigned long long)em->len, 5352 (unsigned long long)start, 5353 (unsigned long long)len); 5354 err = -EIO; 5355 goto out; 5356 } 5357 5358 err = 0; 5359 write_lock(&em_tree->lock); 5360 ret = add_extent_mapping(em_tree, em); 5361 /* it is possible that someone inserted the extent into the tree 5362 * while we had the lock dropped. It is also possible that 5363 * an overlapping map exists in the tree 5364 */ 5365 if (ret == -EEXIST) { 5366 struct extent_map *existing; 5367 5368 ret = 0; 5369 5370 existing = lookup_extent_mapping(em_tree, start, len); 5371 if (existing && (existing->start > start || 5372 existing->start + existing->len <= start)) { 5373 free_extent_map(existing); 5374 existing = NULL; 5375 } 5376 if (!existing) { 5377 existing = lookup_extent_mapping(em_tree, em->start, 5378 em->len); 5379 if (existing) { 5380 err = merge_extent_mapping(em_tree, existing, 5381 em, start, 5382 root->sectorsize); 5383 free_extent_map(existing); 5384 if (err) { 5385 free_extent_map(em); 5386 em = NULL; 5387 } 5388 } else { 5389 err = -EIO; 5390 free_extent_map(em); 5391 em = NULL; 5392 } 5393 } else { 5394 free_extent_map(em); 5395 em = existing; 5396 err = 0; 5397 } 5398 } 5399 write_unlock(&em_tree->lock); 5400 out: 5401 5402 trace_btrfs_get_extent(root, em); 5403 5404 if (path) 5405 btrfs_free_path(path); 5406 if (trans) { 5407 ret = btrfs_end_transaction(trans, root); 5408 if (!err) 5409 err = ret; 5410 } 5411 if (err) { 5412 free_extent_map(em); 5413 return ERR_PTR(err); 5414 } 5415 BUG_ON(!em); /* Error is always set */ 5416 return em; 5417 } 5418 5419 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 5420 size_t pg_offset, u64 start, u64 len, 5421 int create) 5422 { 5423 struct extent_map *em; 5424 struct extent_map *hole_em = NULL; 5425 u64 range_start = start; 5426 u64 end; 5427 u64 found; 5428 u64 found_end; 5429 int err = 0; 5430 5431 em = btrfs_get_extent(inode, page, pg_offset, start, len, create); 5432 if (IS_ERR(em)) 5433 return em; 5434 if (em) { 5435 /* 5436 * if our em maps to a hole, there might 5437 * actually be delalloc bytes behind it 5438 */ 5439 if (em->block_start != EXTENT_MAP_HOLE) 5440 return em; 5441 else 5442 hole_em = em; 5443 } 5444 5445 /* check to see if we've wrapped (len == -1 or similar) */ 5446 end = start + len; 5447 if (end < start) 5448 end = (u64)-1; 5449 else 5450 end -= 1; 5451 5452 em = NULL; 5453 5454 /* ok, we didn't find anything, lets look for delalloc */ 5455 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, 5456 end, len, EXTENT_DELALLOC, 1); 5457 found_end = range_start + found; 5458 if (found_end < range_start) 5459 found_end = (u64)-1; 5460 5461 /* 5462 * we didn't find anything useful, return 5463 * the original results from get_extent() 5464 */ 5465 if (range_start > end || found_end <= start) { 5466 em = hole_em; 5467 hole_em = NULL; 5468 goto out; 5469 } 5470 5471 /* adjust the range_start to make sure it doesn't 5472 * go backwards from the start they passed in 5473 */ 5474 range_start = max(start,range_start); 5475 found = found_end - range_start; 5476 5477 if (found > 0) { 5478 u64 hole_start = start; 5479 u64 hole_len = len; 5480 5481 em = alloc_extent_map(); 5482 if (!em) { 5483 err = -ENOMEM; 5484 goto out; 5485 } 5486 /* 5487 * when btrfs_get_extent can't find anything it 5488 * returns one huge hole 5489 * 5490 * make sure what it found really fits our range, and 5491 * adjust to make sure it is based on the start from 5492 * the caller 5493 */ 5494 if (hole_em) { 5495 u64 calc_end = extent_map_end(hole_em); 5496 5497 if (calc_end <= start || (hole_em->start > end)) { 5498 free_extent_map(hole_em); 5499 hole_em = NULL; 5500 } else { 5501 hole_start = max(hole_em->start, start); 5502 hole_len = calc_end - hole_start; 5503 } 5504 } 5505 em->bdev = NULL; 5506 if (hole_em && range_start > hole_start) { 5507 /* our hole starts before our delalloc, so we 5508 * have to return just the parts of the hole 5509 * that go until the delalloc starts 5510 */ 5511 em->len = min(hole_len, 5512 range_start - hole_start); 5513 em->start = hole_start; 5514 em->orig_start = hole_start; 5515 /* 5516 * don't adjust block start at all, 5517 * it is fixed at EXTENT_MAP_HOLE 5518 */ 5519 em->block_start = hole_em->block_start; 5520 em->block_len = hole_len; 5521 } else { 5522 em->start = range_start; 5523 em->len = found; 5524 em->orig_start = range_start; 5525 em->block_start = EXTENT_MAP_DELALLOC; 5526 em->block_len = found; 5527 } 5528 } else if (hole_em) { 5529 return hole_em; 5530 } 5531 out: 5532 5533 free_extent_map(hole_em); 5534 if (err) { 5535 free_extent_map(em); 5536 return ERR_PTR(err); 5537 } 5538 return em; 5539 } 5540 5541 static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 5542 struct extent_map *em, 5543 u64 start, u64 len) 5544 { 5545 struct btrfs_root *root = BTRFS_I(inode)->root; 5546 struct btrfs_trans_handle *trans; 5547 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 5548 struct btrfs_key ins; 5549 u64 alloc_hint; 5550 int ret; 5551 bool insert = false; 5552 5553 /* 5554 * Ok if the extent map we looked up is a hole and is for the exact 5555 * range we want, there is no reason to allocate a new one, however if 5556 * it is not right then we need to free this one and drop the cache for 5557 * our range. 5558 */ 5559 if (em->block_start != EXTENT_MAP_HOLE || em->start != start || 5560 em->len != len) { 5561 free_extent_map(em); 5562 em = NULL; 5563 insert = true; 5564 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 5565 } 5566 5567 trans = btrfs_join_transaction(root); 5568 if (IS_ERR(trans)) 5569 return ERR_CAST(trans); 5570 5571 if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024) 5572 btrfs_add_inode_defrag(trans, inode); 5573 5574 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 5575 5576 alloc_hint = get_extent_allocation_hint(inode, start, len); 5577 ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0, 5578 alloc_hint, &ins, 1); 5579 if (ret) { 5580 em = ERR_PTR(ret); 5581 goto out; 5582 } 5583 5584 if (!em) { 5585 em = alloc_extent_map(); 5586 if (!em) { 5587 em = ERR_PTR(-ENOMEM); 5588 goto out; 5589 } 5590 } 5591 5592 em->start = start; 5593 em->orig_start = em->start; 5594 em->len = ins.offset; 5595 5596 em->block_start = ins.objectid; 5597 em->block_len = ins.offset; 5598 em->bdev = root->fs_info->fs_devices->latest_bdev; 5599 5600 /* 5601 * We need to do this because if we're using the original em we searched 5602 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that. 5603 */ 5604 em->flags = 0; 5605 set_bit(EXTENT_FLAG_PINNED, &em->flags); 5606 5607 while (insert) { 5608 write_lock(&em_tree->lock); 5609 ret = add_extent_mapping(em_tree, em); 5610 write_unlock(&em_tree->lock); 5611 if (ret != -EEXIST) 5612 break; 5613 btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0); 5614 } 5615 5616 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, 5617 ins.offset, ins.offset, 0); 5618 if (ret) { 5619 btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 5620 em = ERR_PTR(ret); 5621 } 5622 out: 5623 btrfs_end_transaction(trans, root); 5624 return em; 5625 } 5626 5627 /* 5628 * returns 1 when the nocow is safe, < 1 on error, 0 if the 5629 * block must be cow'd 5630 */ 5631 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, 5632 struct inode *inode, u64 offset, u64 len) 5633 { 5634 struct btrfs_path *path; 5635 int ret; 5636 struct extent_buffer *leaf; 5637 struct btrfs_root *root = BTRFS_I(inode)->root; 5638 struct btrfs_file_extent_item *fi; 5639 struct btrfs_key key; 5640 u64 disk_bytenr; 5641 u64 backref_offset; 5642 u64 extent_end; 5643 u64 num_bytes; 5644 int slot; 5645 int found_type; 5646 5647 path = btrfs_alloc_path(); 5648 if (!path) 5649 return -ENOMEM; 5650 5651 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), 5652 offset, 0); 5653 if (ret < 0) 5654 goto out; 5655 5656 slot = path->slots[0]; 5657 if (ret == 1) { 5658 if (slot == 0) { 5659 /* can't find the item, must cow */ 5660 ret = 0; 5661 goto out; 5662 } 5663 slot--; 5664 } 5665 ret = 0; 5666 leaf = path->nodes[0]; 5667 btrfs_item_key_to_cpu(leaf, &key, slot); 5668 if (key.objectid != btrfs_ino(inode) || 5669 key.type != BTRFS_EXTENT_DATA_KEY) { 5670 /* not our file or wrong item type, must cow */ 5671 goto out; 5672 } 5673 5674 if (key.offset > offset) { 5675 /* Wrong offset, must cow */ 5676 goto out; 5677 } 5678 5679 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5680 found_type = btrfs_file_extent_type(leaf, fi); 5681 if (found_type != BTRFS_FILE_EXTENT_REG && 5682 found_type != BTRFS_FILE_EXTENT_PREALLOC) { 5683 /* not a regular extent, must cow */ 5684 goto out; 5685 } 5686 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 5687 backref_offset = btrfs_file_extent_offset(leaf, fi); 5688 5689 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 5690 if (extent_end < offset + len) { 5691 /* extent doesn't include our full range, must cow */ 5692 goto out; 5693 } 5694 5695 if (btrfs_extent_readonly(root, disk_bytenr)) 5696 goto out; 5697 5698 /* 5699 * look for other files referencing this extent, if we 5700 * find any we must cow 5701 */ 5702 if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), 5703 key.offset - backref_offset, disk_bytenr)) 5704 goto out; 5705 5706 /* 5707 * adjust disk_bytenr and num_bytes to cover just the bytes 5708 * in this extent we are about to write. If there 5709 * are any csums in that range we have to cow in order 5710 * to keep the csums correct 5711 */ 5712 disk_bytenr += backref_offset; 5713 disk_bytenr += offset - key.offset; 5714 num_bytes = min(offset + len, extent_end) - offset; 5715 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 5716 goto out; 5717 /* 5718 * all of the above have passed, it is safe to overwrite this extent 5719 * without cow 5720 */ 5721 ret = 1; 5722 out: 5723 btrfs_free_path(path); 5724 return ret; 5725 } 5726 5727 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 5728 struct buffer_head *bh_result, int create) 5729 { 5730 struct extent_map *em; 5731 struct btrfs_root *root = BTRFS_I(inode)->root; 5732 u64 start = iblock << inode->i_blkbits; 5733 u64 len = bh_result->b_size; 5734 struct btrfs_trans_handle *trans; 5735 5736 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 5737 if (IS_ERR(em)) 5738 return PTR_ERR(em); 5739 5740 /* 5741 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 5742 * io. INLINE is special, and we could probably kludge it in here, but 5743 * it's still buffered so for safety lets just fall back to the generic 5744 * buffered path. 5745 * 5746 * For COMPRESSED we _have_ to read the entire extent in so we can 5747 * decompress it, so there will be buffering required no matter what we 5748 * do, so go ahead and fallback to buffered. 5749 * 5750 * We return -ENOTBLK because thats what makes DIO go ahead and go back 5751 * to buffered IO. Don't blame me, this is the price we pay for using 5752 * the generic code. 5753 */ 5754 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 5755 em->block_start == EXTENT_MAP_INLINE) { 5756 free_extent_map(em); 5757 return -ENOTBLK; 5758 } 5759 5760 /* Just a good old fashioned hole, return */ 5761 if (!create && (em->block_start == EXTENT_MAP_HOLE || 5762 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 5763 free_extent_map(em); 5764 /* DIO will do one hole at a time, so just unlock a sector */ 5765 unlock_extent(&BTRFS_I(inode)->io_tree, start, 5766 start + root->sectorsize - 1); 5767 return 0; 5768 } 5769 5770 /* 5771 * We don't allocate a new extent in the following cases 5772 * 5773 * 1) The inode is marked as NODATACOW. In this case we'll just use the 5774 * existing extent. 5775 * 2) The extent is marked as PREALLOC. We're good to go here and can 5776 * just use the extent. 5777 * 5778 */ 5779 if (!create) { 5780 len = em->len - (start - em->start); 5781 goto map; 5782 } 5783 5784 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 5785 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 5786 em->block_start != EXTENT_MAP_HOLE)) { 5787 int type; 5788 int ret; 5789 u64 block_start; 5790 5791 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 5792 type = BTRFS_ORDERED_PREALLOC; 5793 else 5794 type = BTRFS_ORDERED_NOCOW; 5795 len = min(len, em->len - (start - em->start)); 5796 block_start = em->block_start + (start - em->start); 5797 5798 /* 5799 * we're not going to log anything, but we do need 5800 * to make sure the current transaction stays open 5801 * while we look for nocow cross refs 5802 */ 5803 trans = btrfs_join_transaction(root); 5804 if (IS_ERR(trans)) 5805 goto must_cow; 5806 5807 if (can_nocow_odirect(trans, inode, start, len) == 1) { 5808 ret = btrfs_add_ordered_extent_dio(inode, start, 5809 block_start, len, len, type); 5810 btrfs_end_transaction(trans, root); 5811 if (ret) { 5812 free_extent_map(em); 5813 return ret; 5814 } 5815 goto unlock; 5816 } 5817 btrfs_end_transaction(trans, root); 5818 } 5819 must_cow: 5820 /* 5821 * this will cow the extent, reset the len in case we changed 5822 * it above 5823 */ 5824 len = bh_result->b_size; 5825 em = btrfs_new_extent_direct(inode, em, start, len); 5826 if (IS_ERR(em)) 5827 return PTR_ERR(em); 5828 len = min(len, em->len - (start - em->start)); 5829 unlock: 5830 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, 5831 EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, 5832 0, NULL, GFP_NOFS); 5833 map: 5834 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 5835 inode->i_blkbits; 5836 bh_result->b_size = len; 5837 bh_result->b_bdev = em->bdev; 5838 set_buffer_mapped(bh_result); 5839 if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 5840 set_buffer_new(bh_result); 5841 5842 free_extent_map(em); 5843 5844 return 0; 5845 } 5846 5847 struct btrfs_dio_private { 5848 struct inode *inode; 5849 u64 logical_offset; 5850 u64 disk_bytenr; 5851 u64 bytes; 5852 u32 *csums; 5853 void *private; 5854 5855 /* number of bios pending for this dio */ 5856 atomic_t pending_bios; 5857 5858 /* IO errors */ 5859 int errors; 5860 5861 struct bio *orig_bio; 5862 }; 5863 5864 static void btrfs_endio_direct_read(struct bio *bio, int err) 5865 { 5866 struct btrfs_dio_private *dip = bio->bi_private; 5867 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; 5868 struct bio_vec *bvec = bio->bi_io_vec; 5869 struct inode *inode = dip->inode; 5870 struct btrfs_root *root = BTRFS_I(inode)->root; 5871 u64 start; 5872 u32 *private = dip->csums; 5873 5874 start = dip->logical_offset; 5875 do { 5876 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 5877 struct page *page = bvec->bv_page; 5878 char *kaddr; 5879 u32 csum = ~(u32)0; 5880 unsigned long flags; 5881 5882 local_irq_save(flags); 5883 kaddr = kmap_atomic(page); 5884 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, 5885 csum, bvec->bv_len); 5886 btrfs_csum_final(csum, (char *)&csum); 5887 kunmap_atomic(kaddr); 5888 local_irq_restore(flags); 5889 5890 flush_dcache_page(bvec->bv_page); 5891 if (csum != *private) { 5892 printk(KERN_ERR "btrfs csum failed ino %llu off" 5893 " %llu csum %u private %u\n", 5894 (unsigned long long)btrfs_ino(inode), 5895 (unsigned long long)start, 5896 csum, *private); 5897 err = -EIO; 5898 } 5899 } 5900 5901 start += bvec->bv_len; 5902 private++; 5903 bvec++; 5904 } while (bvec <= bvec_end); 5905 5906 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 5907 dip->logical_offset + dip->bytes - 1); 5908 bio->bi_private = dip->private; 5909 5910 kfree(dip->csums); 5911 kfree(dip); 5912 5913 /* If we had a csum failure make sure to clear the uptodate flag */ 5914 if (err) 5915 clear_bit(BIO_UPTODATE, &bio->bi_flags); 5916 dio_end_io(bio, err); 5917 } 5918 5919 static void btrfs_endio_direct_write(struct bio *bio, int err) 5920 { 5921 struct btrfs_dio_private *dip = bio->bi_private; 5922 struct inode *inode = dip->inode; 5923 struct btrfs_root *root = BTRFS_I(inode)->root; 5924 struct btrfs_ordered_extent *ordered = NULL; 5925 u64 ordered_offset = dip->logical_offset; 5926 u64 ordered_bytes = dip->bytes; 5927 int ret; 5928 5929 if (err) 5930 goto out_done; 5931 again: 5932 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 5933 &ordered_offset, 5934 ordered_bytes, !err); 5935 if (!ret) 5936 goto out_test; 5937 5938 ordered->work.func = finish_ordered_fn; 5939 ordered->work.flags = 0; 5940 btrfs_queue_worker(&root->fs_info->endio_write_workers, 5941 &ordered->work); 5942 out_test: 5943 /* 5944 * our bio might span multiple ordered extents. If we haven't 5945 * completed the accounting for the whole dio, go back and try again 5946 */ 5947 if (ordered_offset < dip->logical_offset + dip->bytes) { 5948 ordered_bytes = dip->logical_offset + dip->bytes - 5949 ordered_offset; 5950 ordered = NULL; 5951 goto again; 5952 } 5953 out_done: 5954 bio->bi_private = dip->private; 5955 5956 kfree(dip); 5957 5958 /* If we had an error make sure to clear the uptodate flag */ 5959 if (err) 5960 clear_bit(BIO_UPTODATE, &bio->bi_flags); 5961 dio_end_io(bio, err); 5962 } 5963 5964 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, 5965 struct bio *bio, int mirror_num, 5966 unsigned long bio_flags, u64 offset) 5967 { 5968 int ret; 5969 struct btrfs_root *root = BTRFS_I(inode)->root; 5970 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); 5971 BUG_ON(ret); /* -ENOMEM */ 5972 return 0; 5973 } 5974 5975 static void btrfs_end_dio_bio(struct bio *bio, int err) 5976 { 5977 struct btrfs_dio_private *dip = bio->bi_private; 5978 5979 if (err) { 5980 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " 5981 "sector %#Lx len %u err no %d\n", 5982 (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw, 5983 (unsigned long long)bio->bi_sector, bio->bi_size, err); 5984 dip->errors = 1; 5985 5986 /* 5987 * before atomic variable goto zero, we must make sure 5988 * dip->errors is perceived to be set. 5989 */ 5990 smp_mb__before_atomic_dec(); 5991 } 5992 5993 /* if there are more bios still pending for this dio, just exit */ 5994 if (!atomic_dec_and_test(&dip->pending_bios)) 5995 goto out; 5996 5997 if (dip->errors) 5998 bio_io_error(dip->orig_bio); 5999 else { 6000 set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags); 6001 bio_endio(dip->orig_bio, 0); 6002 } 6003 out: 6004 bio_put(bio); 6005 } 6006 6007 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, 6008 u64 first_sector, gfp_t gfp_flags) 6009 { 6010 int nr_vecs = bio_get_nr_vecs(bdev); 6011 return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); 6012 } 6013 6014 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 6015 int rw, u64 file_offset, int skip_sum, 6016 u32 *csums, int async_submit) 6017 { 6018 int write = rw & REQ_WRITE; 6019 struct btrfs_root *root = BTRFS_I(inode)->root; 6020 int ret; 6021 6022 bio_get(bio); 6023 6024 if (!write) { 6025 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 6026 if (ret) 6027 goto err; 6028 } 6029 6030 if (skip_sum) 6031 goto map; 6032 6033 if (write && async_submit) { 6034 ret = btrfs_wq_submit_bio(root->fs_info, 6035 inode, rw, bio, 0, 0, 6036 file_offset, 6037 __btrfs_submit_bio_start_direct_io, 6038 __btrfs_submit_bio_done); 6039 goto err; 6040 } else if (write) { 6041 /* 6042 * If we aren't doing async submit, calculate the csum of the 6043 * bio now. 6044 */ 6045 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); 6046 if (ret) 6047 goto err; 6048 } else if (!skip_sum) { 6049 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, 6050 file_offset, csums); 6051 if (ret) 6052 goto err; 6053 } 6054 6055 map: 6056 ret = btrfs_map_bio(root, rw, bio, 0, async_submit); 6057 err: 6058 bio_put(bio); 6059 return ret; 6060 } 6061 6062 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, 6063 int skip_sum) 6064 { 6065 struct inode *inode = dip->inode; 6066 struct btrfs_root *root = BTRFS_I(inode)->root; 6067 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 6068 struct bio *bio; 6069 struct bio *orig_bio = dip->orig_bio; 6070 struct bio_vec *bvec = orig_bio->bi_io_vec; 6071 u64 start_sector = orig_bio->bi_sector; 6072 u64 file_offset = dip->logical_offset; 6073 u64 submit_len = 0; 6074 u64 map_length; 6075 int nr_pages = 0; 6076 u32 *csums = dip->csums; 6077 int ret = 0; 6078 int async_submit = 0; 6079 int write = rw & REQ_WRITE; 6080 6081 map_length = orig_bio->bi_size; 6082 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6083 &map_length, NULL, 0); 6084 if (ret) { 6085 bio_put(orig_bio); 6086 return -EIO; 6087 } 6088 6089 if (map_length >= orig_bio->bi_size) { 6090 bio = orig_bio; 6091 goto submit; 6092 } 6093 6094 async_submit = 1; 6095 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); 6096 if (!bio) 6097 return -ENOMEM; 6098 bio->bi_private = dip; 6099 bio->bi_end_io = btrfs_end_dio_bio; 6100 atomic_inc(&dip->pending_bios); 6101 6102 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { 6103 if (unlikely(map_length < submit_len + bvec->bv_len || 6104 bio_add_page(bio, bvec->bv_page, bvec->bv_len, 6105 bvec->bv_offset) < bvec->bv_len)) { 6106 /* 6107 * inc the count before we submit the bio so 6108 * we know the end IO handler won't happen before 6109 * we inc the count. Otherwise, the dip might get freed 6110 * before we're done setting it up 6111 */ 6112 atomic_inc(&dip->pending_bios); 6113 ret = __btrfs_submit_dio_bio(bio, inode, rw, 6114 file_offset, skip_sum, 6115 csums, async_submit); 6116 if (ret) { 6117 bio_put(bio); 6118 atomic_dec(&dip->pending_bios); 6119 goto out_err; 6120 } 6121 6122 /* Write's use the ordered csums */ 6123 if (!write && !skip_sum) 6124 csums = csums + nr_pages; 6125 start_sector += submit_len >> 9; 6126 file_offset += submit_len; 6127 6128 submit_len = 0; 6129 nr_pages = 0; 6130 6131 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, 6132 start_sector, GFP_NOFS); 6133 if (!bio) 6134 goto out_err; 6135 bio->bi_private = dip; 6136 bio->bi_end_io = btrfs_end_dio_bio; 6137 6138 map_length = orig_bio->bi_size; 6139 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6140 &map_length, NULL, 0); 6141 if (ret) { 6142 bio_put(bio); 6143 goto out_err; 6144 } 6145 } else { 6146 submit_len += bvec->bv_len; 6147 nr_pages ++; 6148 bvec++; 6149 } 6150 } 6151 6152 submit: 6153 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 6154 csums, async_submit); 6155 if (!ret) 6156 return 0; 6157 6158 bio_put(bio); 6159 out_err: 6160 dip->errors = 1; 6161 /* 6162 * before atomic variable goto zero, we must 6163 * make sure dip->errors is perceived to be set. 6164 */ 6165 smp_mb__before_atomic_dec(); 6166 if (atomic_dec_and_test(&dip->pending_bios)) 6167 bio_io_error(dip->orig_bio); 6168 6169 /* bio_end_io() will handle error, so we needn't return it */ 6170 return 0; 6171 } 6172 6173 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, 6174 loff_t file_offset) 6175 { 6176 struct btrfs_root *root = BTRFS_I(inode)->root; 6177 struct btrfs_dio_private *dip; 6178 struct bio_vec *bvec = bio->bi_io_vec; 6179 int skip_sum; 6180 int write = rw & REQ_WRITE; 6181 int ret = 0; 6182 6183 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 6184 6185 dip = kmalloc(sizeof(*dip), GFP_NOFS); 6186 if (!dip) { 6187 ret = -ENOMEM; 6188 goto free_ordered; 6189 } 6190 dip->csums = NULL; 6191 6192 /* Write's use the ordered csum stuff, so we don't need dip->csums */ 6193 if (!write && !skip_sum) { 6194 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); 6195 if (!dip->csums) { 6196 kfree(dip); 6197 ret = -ENOMEM; 6198 goto free_ordered; 6199 } 6200 } 6201 6202 dip->private = bio->bi_private; 6203 dip->inode = inode; 6204 dip->logical_offset = file_offset; 6205 6206 dip->bytes = 0; 6207 do { 6208 dip->bytes += bvec->bv_len; 6209 bvec++; 6210 } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1)); 6211 6212 dip->disk_bytenr = (u64)bio->bi_sector << 9; 6213 bio->bi_private = dip; 6214 dip->errors = 0; 6215 dip->orig_bio = bio; 6216 atomic_set(&dip->pending_bios, 0); 6217 6218 if (write) 6219 bio->bi_end_io = btrfs_endio_direct_write; 6220 else 6221 bio->bi_end_io = btrfs_endio_direct_read; 6222 6223 ret = btrfs_submit_direct_hook(rw, dip, skip_sum); 6224 if (!ret) 6225 return; 6226 free_ordered: 6227 /* 6228 * If this is a write, we need to clean up the reserved space and kill 6229 * the ordered extent. 6230 */ 6231 if (write) { 6232 struct btrfs_ordered_extent *ordered; 6233 ordered = btrfs_lookup_ordered_extent(inode, file_offset); 6234 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 6235 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 6236 btrfs_free_reserved_extent(root, ordered->start, 6237 ordered->disk_len); 6238 btrfs_put_ordered_extent(ordered); 6239 btrfs_put_ordered_extent(ordered); 6240 } 6241 bio_endio(bio, ret); 6242 } 6243 6244 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, 6245 const struct iovec *iov, loff_t offset, 6246 unsigned long nr_segs) 6247 { 6248 int seg; 6249 int i; 6250 size_t size; 6251 unsigned long addr; 6252 unsigned blocksize_mask = root->sectorsize - 1; 6253 ssize_t retval = -EINVAL; 6254 loff_t end = offset; 6255 6256 if (offset & blocksize_mask) 6257 goto out; 6258 6259 /* Check the memory alignment. Blocks cannot straddle pages */ 6260 for (seg = 0; seg < nr_segs; seg++) { 6261 addr = (unsigned long)iov[seg].iov_base; 6262 size = iov[seg].iov_len; 6263 end += size; 6264 if ((addr & blocksize_mask) || (size & blocksize_mask)) 6265 goto out; 6266 6267 /* If this is a write we don't need to check anymore */ 6268 if (rw & WRITE) 6269 continue; 6270 6271 /* 6272 * Check to make sure we don't have duplicate iov_base's in this 6273 * iovec, if so return EINVAL, otherwise we'll get csum errors 6274 * when reading back. 6275 */ 6276 for (i = seg + 1; i < nr_segs; i++) { 6277 if (iov[seg].iov_base == iov[i].iov_base) 6278 goto out; 6279 } 6280 } 6281 retval = 0; 6282 out: 6283 return retval; 6284 } 6285 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, 6286 const struct iovec *iov, loff_t offset, 6287 unsigned long nr_segs) 6288 { 6289 struct file *file = iocb->ki_filp; 6290 struct inode *inode = file->f_mapping->host; 6291 struct btrfs_ordered_extent *ordered; 6292 struct extent_state *cached_state = NULL; 6293 u64 lockstart, lockend; 6294 ssize_t ret; 6295 int writing = rw & WRITE; 6296 int write_bits = 0; 6297 size_t count = iov_length(iov, nr_segs); 6298 6299 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, 6300 offset, nr_segs)) { 6301 return 0; 6302 } 6303 6304 lockstart = offset; 6305 lockend = offset + count - 1; 6306 6307 if (writing) { 6308 ret = btrfs_delalloc_reserve_space(inode, count); 6309 if (ret) 6310 goto out; 6311 } 6312 6313 while (1) { 6314 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6315 0, &cached_state); 6316 /* 6317 * We're concerned with the entire range that we're going to be 6318 * doing DIO to, so we need to make sure theres no ordered 6319 * extents in this range. 6320 */ 6321 ordered = btrfs_lookup_ordered_range(inode, lockstart, 6322 lockend - lockstart + 1); 6323 if (!ordered) 6324 break; 6325 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6326 &cached_state, GFP_NOFS); 6327 btrfs_start_ordered_extent(inode, ordered, 1); 6328 btrfs_put_ordered_extent(ordered); 6329 cond_resched(); 6330 } 6331 6332 /* 6333 * we don't use btrfs_set_extent_delalloc because we don't want 6334 * the dirty or uptodate bits 6335 */ 6336 if (writing) { 6337 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; 6338 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6339 EXTENT_DELALLOC, NULL, &cached_state, 6340 GFP_NOFS); 6341 if (ret) { 6342 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 6343 lockend, EXTENT_LOCKED | write_bits, 6344 1, 0, &cached_state, GFP_NOFS); 6345 goto out; 6346 } 6347 } 6348 6349 free_extent_state(cached_state); 6350 cached_state = NULL; 6351 6352 ret = __blockdev_direct_IO(rw, iocb, inode, 6353 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, 6354 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, 6355 btrfs_submit_direct, 0); 6356 6357 if (ret < 0 && ret != -EIOCBQUEUED) { 6358 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, 6359 offset + iov_length(iov, nr_segs) - 1, 6360 EXTENT_LOCKED | write_bits, 1, 0, 6361 &cached_state, GFP_NOFS); 6362 } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { 6363 /* 6364 * We're falling back to buffered, unlock the section we didn't 6365 * do IO on. 6366 */ 6367 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, 6368 offset + iov_length(iov, nr_segs) - 1, 6369 EXTENT_LOCKED | write_bits, 1, 0, 6370 &cached_state, GFP_NOFS); 6371 } 6372 out: 6373 free_extent_state(cached_state); 6374 return ret; 6375 } 6376 6377 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 6378 __u64 start, __u64 len) 6379 { 6380 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); 6381 } 6382 6383 int btrfs_readpage(struct file *file, struct page *page) 6384 { 6385 struct extent_io_tree *tree; 6386 tree = &BTRFS_I(page->mapping->host)->io_tree; 6387 return extent_read_full_page(tree, page, btrfs_get_extent, 0); 6388 } 6389 6390 static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 6391 { 6392 struct extent_io_tree *tree; 6393 6394 6395 if (current->flags & PF_MEMALLOC) { 6396 redirty_page_for_writepage(wbc, page); 6397 unlock_page(page); 6398 return 0; 6399 } 6400 tree = &BTRFS_I(page->mapping->host)->io_tree; 6401 return extent_write_full_page(tree, page, btrfs_get_extent, wbc); 6402 } 6403 6404 int btrfs_writepages(struct address_space *mapping, 6405 struct writeback_control *wbc) 6406 { 6407 struct extent_io_tree *tree; 6408 6409 tree = &BTRFS_I(mapping->host)->io_tree; 6410 return extent_writepages(tree, mapping, btrfs_get_extent, wbc); 6411 } 6412 6413 static int 6414 btrfs_readpages(struct file *file, struct address_space *mapping, 6415 struct list_head *pages, unsigned nr_pages) 6416 { 6417 struct extent_io_tree *tree; 6418 tree = &BTRFS_I(mapping->host)->io_tree; 6419 return extent_readpages(tree, mapping, pages, nr_pages, 6420 btrfs_get_extent); 6421 } 6422 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) 6423 { 6424 struct extent_io_tree *tree; 6425 struct extent_map_tree *map; 6426 int ret; 6427 6428 tree = &BTRFS_I(page->mapping->host)->io_tree; 6429 map = &BTRFS_I(page->mapping->host)->extent_tree; 6430 ret = try_release_extent_mapping(map, tree, page, gfp_flags); 6431 if (ret == 1) { 6432 ClearPagePrivate(page); 6433 set_page_private(page, 0); 6434 page_cache_release(page); 6435 } 6436 return ret; 6437 } 6438 6439 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) 6440 { 6441 if (PageWriteback(page) || PageDirty(page)) 6442 return 0; 6443 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); 6444 } 6445 6446 static void btrfs_invalidatepage(struct page *page, unsigned long offset) 6447 { 6448 struct inode *inode = page->mapping->host; 6449 struct extent_io_tree *tree; 6450 struct btrfs_ordered_extent *ordered; 6451 struct extent_state *cached_state = NULL; 6452 u64 page_start = page_offset(page); 6453 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 6454 6455 /* 6456 * we have the page locked, so new writeback can't start, 6457 * and the dirty bit won't be cleared while we are here. 6458 * 6459 * Wait for IO on this page so that we can safely clear 6460 * the PagePrivate2 bit and do ordered accounting 6461 */ 6462 wait_on_page_writeback(page); 6463 6464 tree = &BTRFS_I(inode)->io_tree; 6465 if (offset) { 6466 btrfs_releasepage(page, GFP_NOFS); 6467 return; 6468 } 6469 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 6470 ordered = btrfs_lookup_ordered_extent(inode, 6471 page_offset(page)); 6472 if (ordered) { 6473 /* 6474 * IO on this page will never be started, so we need 6475 * to account for any ordered extents now 6476 */ 6477 clear_extent_bit(tree, page_start, page_end, 6478 EXTENT_DIRTY | EXTENT_DELALLOC | 6479 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, 6480 &cached_state, GFP_NOFS); 6481 /* 6482 * whoever cleared the private bit is responsible 6483 * for the finish_ordered_io 6484 */ 6485 if (TestClearPagePrivate2(page) && 6486 btrfs_dec_test_ordered_pending(inode, &ordered, page_start, 6487 PAGE_CACHE_SIZE, 1)) { 6488 btrfs_finish_ordered_io(ordered); 6489 } 6490 btrfs_put_ordered_extent(ordered); 6491 cached_state = NULL; 6492 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 6493 } 6494 clear_extent_bit(tree, page_start, page_end, 6495 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 6496 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); 6497 __btrfs_releasepage(page, GFP_NOFS); 6498 6499 ClearPageChecked(page); 6500 if (PagePrivate(page)) { 6501 ClearPagePrivate(page); 6502 set_page_private(page, 0); 6503 page_cache_release(page); 6504 } 6505 } 6506 6507 /* 6508 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 6509 * called from a page fault handler when a page is first dirtied. Hence we must 6510 * be careful to check for EOF conditions here. We set the page up correctly 6511 * for a written page which means we get ENOSPC checking when writing into 6512 * holes and correct delalloc and unwritten extent mapping on filesystems that 6513 * support these features. 6514 * 6515 * We are not allowed to take the i_mutex here so we have to play games to 6516 * protect against truncate races as the page could now be beyond EOF. Because 6517 * vmtruncate() writes the inode size before removing pages, once we have the 6518 * page lock we can determine safely if the page is beyond EOF. If it is not 6519 * beyond EOF, then the page is guaranteed safe against truncation until we 6520 * unlock the page. 6521 */ 6522 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 6523 { 6524 struct page *page = vmf->page; 6525 struct inode *inode = fdentry(vma->vm_file)->d_inode; 6526 struct btrfs_root *root = BTRFS_I(inode)->root; 6527 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 6528 struct btrfs_ordered_extent *ordered; 6529 struct extent_state *cached_state = NULL; 6530 char *kaddr; 6531 unsigned long zero_start; 6532 loff_t size; 6533 int ret; 6534 int reserved = 0; 6535 u64 page_start; 6536 u64 page_end; 6537 6538 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 6539 if (!ret) { 6540 ret = file_update_time(vma->vm_file); 6541 reserved = 1; 6542 } 6543 if (ret) { 6544 if (ret == -ENOMEM) 6545 ret = VM_FAULT_OOM; 6546 else /* -ENOSPC, -EIO, etc */ 6547 ret = VM_FAULT_SIGBUS; 6548 if (reserved) 6549 goto out; 6550 goto out_noreserve; 6551 } 6552 6553 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 6554 again: 6555 lock_page(page); 6556 size = i_size_read(inode); 6557 page_start = page_offset(page); 6558 page_end = page_start + PAGE_CACHE_SIZE - 1; 6559 6560 if ((page->mapping != inode->i_mapping) || 6561 (page_start >= size)) { 6562 /* page got truncated out from underneath us */ 6563 goto out_unlock; 6564 } 6565 wait_on_page_writeback(page); 6566 6567 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 6568 set_page_extent_mapped(page); 6569 6570 /* 6571 * we can't set the delalloc bits if there are pending ordered 6572 * extents. Drop our locks and wait for them to finish 6573 */ 6574 ordered = btrfs_lookup_ordered_extent(inode, page_start); 6575 if (ordered) { 6576 unlock_extent_cached(io_tree, page_start, page_end, 6577 &cached_state, GFP_NOFS); 6578 unlock_page(page); 6579 btrfs_start_ordered_extent(inode, ordered, 1); 6580 btrfs_put_ordered_extent(ordered); 6581 goto again; 6582 } 6583 6584 /* 6585 * XXX - page_mkwrite gets called every time the page is dirtied, even 6586 * if it was already dirty, so for space accounting reasons we need to 6587 * clear any delalloc bits for the range we are fixing to save. There 6588 * is probably a better way to do this, but for now keep consistent with 6589 * prepare_pages in the normal write path. 6590 */ 6591 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 6592 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 6593 0, 0, &cached_state, GFP_NOFS); 6594 6595 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 6596 &cached_state); 6597 if (ret) { 6598 unlock_extent_cached(io_tree, page_start, page_end, 6599 &cached_state, GFP_NOFS); 6600 ret = VM_FAULT_SIGBUS; 6601 goto out_unlock; 6602 } 6603 ret = 0; 6604 6605 /* page is wholly or partially inside EOF */ 6606 if (page_start + PAGE_CACHE_SIZE > size) 6607 zero_start = size & ~PAGE_CACHE_MASK; 6608 else 6609 zero_start = PAGE_CACHE_SIZE; 6610 6611 if (zero_start != PAGE_CACHE_SIZE) { 6612 kaddr = kmap(page); 6613 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 6614 flush_dcache_page(page); 6615 kunmap(page); 6616 } 6617 ClearPageChecked(page); 6618 set_page_dirty(page); 6619 SetPageUptodate(page); 6620 6621 BTRFS_I(inode)->last_trans = root->fs_info->generation; 6622 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 6623 6624 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); 6625 6626 out_unlock: 6627 if (!ret) 6628 return VM_FAULT_LOCKED; 6629 unlock_page(page); 6630 out: 6631 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 6632 out_noreserve: 6633 return ret; 6634 } 6635 6636 static int btrfs_truncate(struct inode *inode) 6637 { 6638 struct btrfs_root *root = BTRFS_I(inode)->root; 6639 struct btrfs_block_rsv *rsv; 6640 int ret; 6641 int err = 0; 6642 struct btrfs_trans_handle *trans; 6643 unsigned long nr; 6644 u64 mask = root->sectorsize - 1; 6645 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 6646 6647 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); 6648 if (ret) 6649 return ret; 6650 6651 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 6652 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 6653 6654 /* 6655 * Yes ladies and gentelment, this is indeed ugly. The fact is we have 6656 * 3 things going on here 6657 * 6658 * 1) We need to reserve space for our orphan item and the space to 6659 * delete our orphan item. Lord knows we don't want to have a dangling 6660 * orphan item because we didn't reserve space to remove it. 6661 * 6662 * 2) We need to reserve space to update our inode. 6663 * 6664 * 3) We need to have something to cache all the space that is going to 6665 * be free'd up by the truncate operation, but also have some slack 6666 * space reserved in case it uses space during the truncate (thank you 6667 * very much snapshotting). 6668 * 6669 * And we need these to all be seperate. The fact is we can use alot of 6670 * space doing the truncate, and we have no earthly idea how much space 6671 * we will use, so we need the truncate reservation to be seperate so it 6672 * doesn't end up using space reserved for updating the inode or 6673 * removing the orphan item. We also need to be able to stop the 6674 * transaction and start a new one, which means we need to be able to 6675 * update the inode several times, and we have no idea of knowing how 6676 * many times that will be, so we can't just reserve 1 item for the 6677 * entirety of the opration, so that has to be done seperately as well. 6678 * Then there is the orphan item, which does indeed need to be held on 6679 * to for the whole operation, and we need nobody to touch this reserved 6680 * space except the orphan code. 6681 * 6682 * So that leaves us with 6683 * 6684 * 1) root->orphan_block_rsv - for the orphan deletion. 6685 * 2) rsv - for the truncate reservation, which we will steal from the 6686 * transaction reservation. 6687 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for 6688 * updating the inode. 6689 */ 6690 rsv = btrfs_alloc_block_rsv(root); 6691 if (!rsv) 6692 return -ENOMEM; 6693 rsv->size = min_size; 6694 6695 /* 6696 * 1 for the truncate slack space 6697 * 1 for the orphan item we're going to add 6698 * 1 for the orphan item deletion 6699 * 1 for updating the inode. 6700 */ 6701 trans = btrfs_start_transaction(root, 4); 6702 if (IS_ERR(trans)) { 6703 err = PTR_ERR(trans); 6704 goto out; 6705 } 6706 6707 /* Migrate the slack space for the truncate to our reserve */ 6708 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, 6709 min_size); 6710 BUG_ON(ret); 6711 6712 ret = btrfs_orphan_add(trans, inode); 6713 if (ret) { 6714 btrfs_end_transaction(trans, root); 6715 goto out; 6716 } 6717 6718 /* 6719 * setattr is responsible for setting the ordered_data_close flag, 6720 * but that is only tested during the last file release. That 6721 * could happen well after the next commit, leaving a great big 6722 * window where new writes may get lost if someone chooses to write 6723 * to this file after truncating to zero 6724 * 6725 * The inode doesn't have any dirty data here, and so if we commit 6726 * this is a noop. If someone immediately starts writing to the inode 6727 * it is very likely we'll catch some of their writes in this 6728 * transaction, and the commit will find this file on the ordered 6729 * data list with good things to send down. 6730 * 6731 * This is a best effort solution, there is still a window where 6732 * using truncate to replace the contents of the file will 6733 * end up with a zero length file after a crash. 6734 */ 6735 if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 6736 &BTRFS_I(inode)->runtime_flags)) 6737 btrfs_add_ordered_operation(trans, root, inode); 6738 6739 while (1) { 6740 ret = btrfs_block_rsv_refill(root, rsv, min_size); 6741 if (ret) { 6742 /* 6743 * This can only happen with the original transaction we 6744 * started above, every other time we shouldn't have a 6745 * transaction started yet. 6746 */ 6747 if (ret == -EAGAIN) 6748 goto end_trans; 6749 err = ret; 6750 break; 6751 } 6752 6753 if (!trans) { 6754 /* Just need the 1 for updating the inode */ 6755 trans = btrfs_start_transaction(root, 1); 6756 if (IS_ERR(trans)) { 6757 ret = err = PTR_ERR(trans); 6758 trans = NULL; 6759 break; 6760 } 6761 } 6762 6763 trans->block_rsv = rsv; 6764 6765 ret = btrfs_truncate_inode_items(trans, root, inode, 6766 inode->i_size, 6767 BTRFS_EXTENT_DATA_KEY); 6768 if (ret != -EAGAIN) { 6769 err = ret; 6770 break; 6771 } 6772 6773 trans->block_rsv = &root->fs_info->trans_block_rsv; 6774 ret = btrfs_update_inode(trans, root, inode); 6775 if (ret) { 6776 err = ret; 6777 break; 6778 } 6779 end_trans: 6780 nr = trans->blocks_used; 6781 btrfs_end_transaction(trans, root); 6782 trans = NULL; 6783 btrfs_btree_balance_dirty(root, nr); 6784 } 6785 6786 if (ret == 0 && inode->i_nlink > 0) { 6787 trans->block_rsv = root->orphan_block_rsv; 6788 ret = btrfs_orphan_del(trans, inode); 6789 if (ret) 6790 err = ret; 6791 } else if (ret && inode->i_nlink > 0) { 6792 /* 6793 * Failed to do the truncate, remove us from the in memory 6794 * orphan list. 6795 */ 6796 ret = btrfs_orphan_del(NULL, inode); 6797 } 6798 6799 if (trans) { 6800 trans->block_rsv = &root->fs_info->trans_block_rsv; 6801 ret = btrfs_update_inode(trans, root, inode); 6802 if (ret && !err) 6803 err = ret; 6804 6805 nr = trans->blocks_used; 6806 ret = btrfs_end_transaction(trans, root); 6807 btrfs_btree_balance_dirty(root, nr); 6808 } 6809 6810 out: 6811 btrfs_free_block_rsv(root, rsv); 6812 6813 if (ret && !err) 6814 err = ret; 6815 6816 return err; 6817 } 6818 6819 /* 6820 * create a new subvolume directory/inode (helper for the ioctl). 6821 */ 6822 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 6823 struct btrfs_root *new_root, u64 new_dirid) 6824 { 6825 struct inode *inode; 6826 int err; 6827 u64 index = 0; 6828 6829 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, 6830 new_dirid, new_dirid, 6831 S_IFDIR | (~current_umask() & S_IRWXUGO), 6832 &index); 6833 if (IS_ERR(inode)) 6834 return PTR_ERR(inode); 6835 inode->i_op = &btrfs_dir_inode_operations; 6836 inode->i_fop = &btrfs_dir_file_operations; 6837 6838 set_nlink(inode, 1); 6839 btrfs_i_size_write(inode, 0); 6840 6841 err = btrfs_update_inode(trans, new_root, inode); 6842 6843 iput(inode); 6844 return err; 6845 } 6846 6847 struct inode *btrfs_alloc_inode(struct super_block *sb) 6848 { 6849 struct btrfs_inode *ei; 6850 struct inode *inode; 6851 6852 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); 6853 if (!ei) 6854 return NULL; 6855 6856 ei->root = NULL; 6857 ei->space_info = NULL; 6858 ei->generation = 0; 6859 ei->last_trans = 0; 6860 ei->last_sub_trans = 0; 6861 ei->logged_trans = 0; 6862 ei->delalloc_bytes = 0; 6863 ei->disk_i_size = 0; 6864 ei->flags = 0; 6865 ei->csum_bytes = 0; 6866 ei->index_cnt = (u64)-1; 6867 ei->last_unlink_trans = 0; 6868 6869 spin_lock_init(&ei->lock); 6870 ei->outstanding_extents = 0; 6871 ei->reserved_extents = 0; 6872 6873 ei->runtime_flags = 0; 6874 ei->force_compress = BTRFS_COMPRESS_NONE; 6875 6876 ei->delayed_node = NULL; 6877 6878 inode = &ei->vfs_inode; 6879 extent_map_tree_init(&ei->extent_tree); 6880 extent_io_tree_init(&ei->io_tree, &inode->i_data); 6881 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); 6882 ei->io_tree.track_uptodate = 1; 6883 ei->io_failure_tree.track_uptodate = 1; 6884 mutex_init(&ei->log_mutex); 6885 mutex_init(&ei->delalloc_mutex); 6886 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 6887 INIT_LIST_HEAD(&ei->delalloc_inodes); 6888 INIT_LIST_HEAD(&ei->ordered_operations); 6889 RB_CLEAR_NODE(&ei->rb_node); 6890 6891 return inode; 6892 } 6893 6894 static void btrfs_i_callback(struct rcu_head *head) 6895 { 6896 struct inode *inode = container_of(head, struct inode, i_rcu); 6897 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 6898 } 6899 6900 void btrfs_destroy_inode(struct inode *inode) 6901 { 6902 struct btrfs_ordered_extent *ordered; 6903 struct btrfs_root *root = BTRFS_I(inode)->root; 6904 6905 WARN_ON(!list_empty(&inode->i_dentry)); 6906 WARN_ON(inode->i_data.nrpages); 6907 WARN_ON(BTRFS_I(inode)->outstanding_extents); 6908 WARN_ON(BTRFS_I(inode)->reserved_extents); 6909 WARN_ON(BTRFS_I(inode)->delalloc_bytes); 6910 WARN_ON(BTRFS_I(inode)->csum_bytes); 6911 6912 /* 6913 * This can happen where we create an inode, but somebody else also 6914 * created the same inode and we need to destroy the one we already 6915 * created. 6916 */ 6917 if (!root) 6918 goto free; 6919 6920 /* 6921 * Make sure we're properly removed from the ordered operation 6922 * lists. 6923 */ 6924 smp_mb(); 6925 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) { 6926 spin_lock(&root->fs_info->ordered_extent_lock); 6927 list_del_init(&BTRFS_I(inode)->ordered_operations); 6928 spin_unlock(&root->fs_info->ordered_extent_lock); 6929 } 6930 6931 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 6932 &BTRFS_I(inode)->runtime_flags)) { 6933 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", 6934 (unsigned long long)btrfs_ino(inode)); 6935 atomic_dec(&root->orphan_inodes); 6936 } 6937 6938 while (1) { 6939 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 6940 if (!ordered) 6941 break; 6942 else { 6943 printk(KERN_ERR "btrfs found ordered " 6944 "extent %llu %llu on inode cleanup\n", 6945 (unsigned long long)ordered->file_offset, 6946 (unsigned long long)ordered->len); 6947 btrfs_remove_ordered_extent(inode, ordered); 6948 btrfs_put_ordered_extent(ordered); 6949 btrfs_put_ordered_extent(ordered); 6950 } 6951 } 6952 inode_tree_del(inode); 6953 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 6954 free: 6955 btrfs_remove_delayed_node(inode); 6956 call_rcu(&inode->i_rcu, btrfs_i_callback); 6957 } 6958 6959 int btrfs_drop_inode(struct inode *inode) 6960 { 6961 struct btrfs_root *root = BTRFS_I(inode)->root; 6962 6963 if (btrfs_root_refs(&root->root_item) == 0 && 6964 !btrfs_is_free_space_inode(root, inode)) 6965 return 1; 6966 else 6967 return generic_drop_inode(inode); 6968 } 6969 6970 static void init_once(void *foo) 6971 { 6972 struct btrfs_inode *ei = (struct btrfs_inode *) foo; 6973 6974 inode_init_once(&ei->vfs_inode); 6975 } 6976 6977 void btrfs_destroy_cachep(void) 6978 { 6979 if (btrfs_inode_cachep) 6980 kmem_cache_destroy(btrfs_inode_cachep); 6981 if (btrfs_trans_handle_cachep) 6982 kmem_cache_destroy(btrfs_trans_handle_cachep); 6983 if (btrfs_transaction_cachep) 6984 kmem_cache_destroy(btrfs_transaction_cachep); 6985 if (btrfs_path_cachep) 6986 kmem_cache_destroy(btrfs_path_cachep); 6987 if (btrfs_free_space_cachep) 6988 kmem_cache_destroy(btrfs_free_space_cachep); 6989 } 6990 6991 int btrfs_init_cachep(void) 6992 { 6993 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache", 6994 sizeof(struct btrfs_inode), 0, 6995 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); 6996 if (!btrfs_inode_cachep) 6997 goto fail; 6998 6999 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache", 7000 sizeof(struct btrfs_trans_handle), 0, 7001 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7002 if (!btrfs_trans_handle_cachep) 7003 goto fail; 7004 7005 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache", 7006 sizeof(struct btrfs_transaction), 0, 7007 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7008 if (!btrfs_transaction_cachep) 7009 goto fail; 7010 7011 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache", 7012 sizeof(struct btrfs_path), 0, 7013 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7014 if (!btrfs_path_cachep) 7015 goto fail; 7016 7017 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache", 7018 sizeof(struct btrfs_free_space), 0, 7019 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7020 if (!btrfs_free_space_cachep) 7021 goto fail; 7022 7023 return 0; 7024 fail: 7025 btrfs_destroy_cachep(); 7026 return -ENOMEM; 7027 } 7028 7029 static int btrfs_getattr(struct vfsmount *mnt, 7030 struct dentry *dentry, struct kstat *stat) 7031 { 7032 struct inode *inode = dentry->d_inode; 7033 u32 blocksize = inode->i_sb->s_blocksize; 7034 7035 generic_fillattr(inode, stat); 7036 stat->dev = BTRFS_I(inode)->root->anon_dev; 7037 stat->blksize = PAGE_CACHE_SIZE; 7038 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + 7039 ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9; 7040 return 0; 7041 } 7042 7043 /* 7044 * If a file is moved, it will inherit the cow and compression flags of the new 7045 * directory. 7046 */ 7047 static void fixup_inode_flags(struct inode *dir, struct inode *inode) 7048 { 7049 struct btrfs_inode *b_dir = BTRFS_I(dir); 7050 struct btrfs_inode *b_inode = BTRFS_I(inode); 7051 7052 if (b_dir->flags & BTRFS_INODE_NODATACOW) 7053 b_inode->flags |= BTRFS_INODE_NODATACOW; 7054 else 7055 b_inode->flags &= ~BTRFS_INODE_NODATACOW; 7056 7057 if (b_dir->flags & BTRFS_INODE_COMPRESS) 7058 b_inode->flags |= BTRFS_INODE_COMPRESS; 7059 else 7060 b_inode->flags &= ~BTRFS_INODE_COMPRESS; 7061 } 7062 7063 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 7064 struct inode *new_dir, struct dentry *new_dentry) 7065 { 7066 struct btrfs_trans_handle *trans; 7067 struct btrfs_root *root = BTRFS_I(old_dir)->root; 7068 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 7069 struct inode *new_inode = new_dentry->d_inode; 7070 struct inode *old_inode = old_dentry->d_inode; 7071 struct timespec ctime = CURRENT_TIME; 7072 u64 index = 0; 7073 u64 root_objectid; 7074 int ret; 7075 u64 old_ino = btrfs_ino(old_inode); 7076 7077 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 7078 return -EPERM; 7079 7080 /* we only allow rename subvolume link between subvolumes */ 7081 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 7082 return -EXDEV; 7083 7084 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 7085 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) 7086 return -ENOTEMPTY; 7087 7088 if (S_ISDIR(old_inode->i_mode) && new_inode && 7089 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 7090 return -ENOTEMPTY; 7091 /* 7092 * we're using rename to replace one file with another. 7093 * and the replacement file is large. Start IO on it now so 7094 * we don't add too much work to the end of the transaction 7095 */ 7096 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && 7097 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 7098 filemap_flush(old_inode->i_mapping); 7099 7100 /* close the racy window with snapshot create/destroy ioctl */ 7101 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 7102 down_read(&root->fs_info->subvol_sem); 7103 /* 7104 * We want to reserve the absolute worst case amount of items. So if 7105 * both inodes are subvols and we need to unlink them then that would 7106 * require 4 item modifications, but if they are both normal inodes it 7107 * would require 5 item modifications, so we'll assume their normal 7108 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 7109 * should cover the worst case number of items we'll modify. 7110 */ 7111 trans = btrfs_start_transaction(root, 20); 7112 if (IS_ERR(trans)) { 7113 ret = PTR_ERR(trans); 7114 goto out_notrans; 7115 } 7116 7117 if (dest != root) 7118 btrfs_record_root_in_trans(trans, dest); 7119 7120 ret = btrfs_set_inode_index(new_dir, &index); 7121 if (ret) 7122 goto out_fail; 7123 7124 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 7125 /* force full log commit if subvolume involved. */ 7126 root->fs_info->last_trans_log_full_commit = trans->transid; 7127 } else { 7128 ret = btrfs_insert_inode_ref(trans, dest, 7129 new_dentry->d_name.name, 7130 new_dentry->d_name.len, 7131 old_ino, 7132 btrfs_ino(new_dir), index); 7133 if (ret) 7134 goto out_fail; 7135 /* 7136 * this is an ugly little race, but the rename is required 7137 * to make sure that if we crash, the inode is either at the 7138 * old name or the new one. pinning the log transaction lets 7139 * us make sure we don't allow a log commit to come in after 7140 * we unlink the name but before we add the new name back in. 7141 */ 7142 btrfs_pin_log_trans(root); 7143 } 7144 /* 7145 * make sure the inode gets flushed if it is replacing 7146 * something. 7147 */ 7148 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) 7149 btrfs_add_ordered_operation(trans, root, old_inode); 7150 7151 inode_inc_iversion(old_dir); 7152 inode_inc_iversion(new_dir); 7153 inode_inc_iversion(old_inode); 7154 old_dir->i_ctime = old_dir->i_mtime = ctime; 7155 new_dir->i_ctime = new_dir->i_mtime = ctime; 7156 old_inode->i_ctime = ctime; 7157 7158 if (old_dentry->d_parent != new_dentry->d_parent) 7159 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); 7160 7161 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 7162 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 7163 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, 7164 old_dentry->d_name.name, 7165 old_dentry->d_name.len); 7166 } else { 7167 ret = __btrfs_unlink_inode(trans, root, old_dir, 7168 old_dentry->d_inode, 7169 old_dentry->d_name.name, 7170 old_dentry->d_name.len); 7171 if (!ret) 7172 ret = btrfs_update_inode(trans, root, old_inode); 7173 } 7174 if (ret) { 7175 btrfs_abort_transaction(trans, root, ret); 7176 goto out_fail; 7177 } 7178 7179 if (new_inode) { 7180 inode_inc_iversion(new_inode); 7181 new_inode->i_ctime = CURRENT_TIME; 7182 if (unlikely(btrfs_ino(new_inode) == 7183 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 7184 root_objectid = BTRFS_I(new_inode)->location.objectid; 7185 ret = btrfs_unlink_subvol(trans, dest, new_dir, 7186 root_objectid, 7187 new_dentry->d_name.name, 7188 new_dentry->d_name.len); 7189 BUG_ON(new_inode->i_nlink == 0); 7190 } else { 7191 ret = btrfs_unlink_inode(trans, dest, new_dir, 7192 new_dentry->d_inode, 7193 new_dentry->d_name.name, 7194 new_dentry->d_name.len); 7195 } 7196 if (!ret && new_inode->i_nlink == 0) { 7197 ret = btrfs_orphan_add(trans, new_dentry->d_inode); 7198 BUG_ON(ret); 7199 } 7200 if (ret) { 7201 btrfs_abort_transaction(trans, root, ret); 7202 goto out_fail; 7203 } 7204 } 7205 7206 fixup_inode_flags(new_dir, old_inode); 7207 7208 ret = btrfs_add_link(trans, new_dir, old_inode, 7209 new_dentry->d_name.name, 7210 new_dentry->d_name.len, 0, index); 7211 if (ret) { 7212 btrfs_abort_transaction(trans, root, ret); 7213 goto out_fail; 7214 } 7215 7216 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 7217 struct dentry *parent = new_dentry->d_parent; 7218 btrfs_log_new_name(trans, old_inode, old_dir, parent); 7219 btrfs_end_log_trans(root); 7220 } 7221 out_fail: 7222 btrfs_end_transaction(trans, root); 7223 out_notrans: 7224 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 7225 up_read(&root->fs_info->subvol_sem); 7226 7227 return ret; 7228 } 7229 7230 /* 7231 * some fairly slow code that needs optimization. This walks the list 7232 * of all the inodes with pending delalloc and forces them to disk. 7233 */ 7234 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 7235 { 7236 struct list_head *head = &root->fs_info->delalloc_inodes; 7237 struct btrfs_inode *binode; 7238 struct inode *inode; 7239 7240 if (root->fs_info->sb->s_flags & MS_RDONLY) 7241 return -EROFS; 7242 7243 spin_lock(&root->fs_info->delalloc_lock); 7244 while (!list_empty(head)) { 7245 binode = list_entry(head->next, struct btrfs_inode, 7246 delalloc_inodes); 7247 inode = igrab(&binode->vfs_inode); 7248 if (!inode) 7249 list_del_init(&binode->delalloc_inodes); 7250 spin_unlock(&root->fs_info->delalloc_lock); 7251 if (inode) { 7252 filemap_flush(inode->i_mapping); 7253 if (delay_iput) 7254 btrfs_add_delayed_iput(inode); 7255 else 7256 iput(inode); 7257 } 7258 cond_resched(); 7259 spin_lock(&root->fs_info->delalloc_lock); 7260 } 7261 spin_unlock(&root->fs_info->delalloc_lock); 7262 7263 /* the filemap_flush will queue IO into the worker threads, but 7264 * we have to make sure the IO is actually started and that 7265 * ordered extents get created before we return 7266 */ 7267 atomic_inc(&root->fs_info->async_submit_draining); 7268 while (atomic_read(&root->fs_info->nr_async_submits) || 7269 atomic_read(&root->fs_info->async_delalloc_pages)) { 7270 wait_event(root->fs_info->async_submit_wait, 7271 (atomic_read(&root->fs_info->nr_async_submits) == 0 && 7272 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 7273 } 7274 atomic_dec(&root->fs_info->async_submit_draining); 7275 return 0; 7276 } 7277 7278 static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 7279 const char *symname) 7280 { 7281 struct btrfs_trans_handle *trans; 7282 struct btrfs_root *root = BTRFS_I(dir)->root; 7283 struct btrfs_path *path; 7284 struct btrfs_key key; 7285 struct inode *inode = NULL; 7286 int err; 7287 int drop_inode = 0; 7288 u64 objectid; 7289 u64 index = 0 ; 7290 int name_len; 7291 int datasize; 7292 unsigned long ptr; 7293 struct btrfs_file_extent_item *ei; 7294 struct extent_buffer *leaf; 7295 unsigned long nr = 0; 7296 7297 name_len = strlen(symname) + 1; 7298 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 7299 return -ENAMETOOLONG; 7300 7301 /* 7302 * 2 items for inode item and ref 7303 * 2 items for dir items 7304 * 1 item for xattr if selinux is on 7305 */ 7306 trans = btrfs_start_transaction(root, 5); 7307 if (IS_ERR(trans)) 7308 return PTR_ERR(trans); 7309 7310 err = btrfs_find_free_ino(root, &objectid); 7311 if (err) 7312 goto out_unlock; 7313 7314 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 7315 dentry->d_name.len, btrfs_ino(dir), objectid, 7316 S_IFLNK|S_IRWXUGO, &index); 7317 if (IS_ERR(inode)) { 7318 err = PTR_ERR(inode); 7319 goto out_unlock; 7320 } 7321 7322 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 7323 if (err) { 7324 drop_inode = 1; 7325 goto out_unlock; 7326 } 7327 7328 /* 7329 * If the active LSM wants to access the inode during 7330 * d_instantiate it needs these. Smack checks to see 7331 * if the filesystem supports xattrs by looking at the 7332 * ops vector. 7333 */ 7334 inode->i_fop = &btrfs_file_operations; 7335 inode->i_op = &btrfs_file_inode_operations; 7336 7337 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 7338 if (err) 7339 drop_inode = 1; 7340 else { 7341 inode->i_mapping->a_ops = &btrfs_aops; 7342 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 7343 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 7344 } 7345 if (drop_inode) 7346 goto out_unlock; 7347 7348 path = btrfs_alloc_path(); 7349 if (!path) { 7350 err = -ENOMEM; 7351 drop_inode = 1; 7352 goto out_unlock; 7353 } 7354 key.objectid = btrfs_ino(inode); 7355 key.offset = 0; 7356 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 7357 datasize = btrfs_file_extent_calc_inline_size(name_len); 7358 err = btrfs_insert_empty_item(trans, root, path, &key, 7359 datasize); 7360 if (err) { 7361 drop_inode = 1; 7362 btrfs_free_path(path); 7363 goto out_unlock; 7364 } 7365 leaf = path->nodes[0]; 7366 ei = btrfs_item_ptr(leaf, path->slots[0], 7367 struct btrfs_file_extent_item); 7368 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 7369 btrfs_set_file_extent_type(leaf, ei, 7370 BTRFS_FILE_EXTENT_INLINE); 7371 btrfs_set_file_extent_encryption(leaf, ei, 0); 7372 btrfs_set_file_extent_compression(leaf, ei, 0); 7373 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 7374 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 7375 7376 ptr = btrfs_file_extent_inline_start(ei); 7377 write_extent_buffer(leaf, symname, ptr, name_len); 7378 btrfs_mark_buffer_dirty(leaf); 7379 btrfs_free_path(path); 7380 7381 inode->i_op = &btrfs_symlink_inode_operations; 7382 inode->i_mapping->a_ops = &btrfs_symlink_aops; 7383 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 7384 inode_set_bytes(inode, name_len); 7385 btrfs_i_size_write(inode, name_len - 1); 7386 err = btrfs_update_inode(trans, root, inode); 7387 if (err) 7388 drop_inode = 1; 7389 7390 out_unlock: 7391 if (!err) 7392 d_instantiate(dentry, inode); 7393 nr = trans->blocks_used; 7394 btrfs_end_transaction(trans, root); 7395 if (drop_inode) { 7396 inode_dec_link_count(inode); 7397 iput(inode); 7398 } 7399 btrfs_btree_balance_dirty(root, nr); 7400 return err; 7401 } 7402 7403 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 7404 u64 start, u64 num_bytes, u64 min_size, 7405 loff_t actual_len, u64 *alloc_hint, 7406 struct btrfs_trans_handle *trans) 7407 { 7408 struct btrfs_root *root = BTRFS_I(inode)->root; 7409 struct btrfs_key ins; 7410 u64 cur_offset = start; 7411 u64 i_size; 7412 int ret = 0; 7413 bool own_trans = true; 7414 7415 if (trans) 7416 own_trans = false; 7417 while (num_bytes > 0) { 7418 if (own_trans) { 7419 trans = btrfs_start_transaction(root, 3); 7420 if (IS_ERR(trans)) { 7421 ret = PTR_ERR(trans); 7422 break; 7423 } 7424 } 7425 7426 ret = btrfs_reserve_extent(trans, root, num_bytes, min_size, 7427 0, *alloc_hint, &ins, 1); 7428 if (ret) { 7429 if (own_trans) 7430 btrfs_end_transaction(trans, root); 7431 break; 7432 } 7433 7434 ret = insert_reserved_file_extent(trans, inode, 7435 cur_offset, ins.objectid, 7436 ins.offset, ins.offset, 7437 ins.offset, 0, 0, 0, 7438 BTRFS_FILE_EXTENT_PREALLOC); 7439 if (ret) { 7440 btrfs_abort_transaction(trans, root, ret); 7441 if (own_trans) 7442 btrfs_end_transaction(trans, root); 7443 break; 7444 } 7445 btrfs_drop_extent_cache(inode, cur_offset, 7446 cur_offset + ins.offset -1, 0); 7447 7448 num_bytes -= ins.offset; 7449 cur_offset += ins.offset; 7450 *alloc_hint = ins.objectid + ins.offset; 7451 7452 inode_inc_iversion(inode); 7453 inode->i_ctime = CURRENT_TIME; 7454 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 7455 if (!(mode & FALLOC_FL_KEEP_SIZE) && 7456 (actual_len > inode->i_size) && 7457 (cur_offset > inode->i_size)) { 7458 if (cur_offset > actual_len) 7459 i_size = actual_len; 7460 else 7461 i_size = cur_offset; 7462 i_size_write(inode, i_size); 7463 btrfs_ordered_update_i_size(inode, i_size, NULL); 7464 } 7465 7466 ret = btrfs_update_inode(trans, root, inode); 7467 7468 if (ret) { 7469 btrfs_abort_transaction(trans, root, ret); 7470 if (own_trans) 7471 btrfs_end_transaction(trans, root); 7472 break; 7473 } 7474 7475 if (own_trans) 7476 btrfs_end_transaction(trans, root); 7477 } 7478 return ret; 7479 } 7480 7481 int btrfs_prealloc_file_range(struct inode *inode, int mode, 7482 u64 start, u64 num_bytes, u64 min_size, 7483 loff_t actual_len, u64 *alloc_hint) 7484 { 7485 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 7486 min_size, actual_len, alloc_hint, 7487 NULL); 7488 } 7489 7490 int btrfs_prealloc_file_range_trans(struct inode *inode, 7491 struct btrfs_trans_handle *trans, int mode, 7492 u64 start, u64 num_bytes, u64 min_size, 7493 loff_t actual_len, u64 *alloc_hint) 7494 { 7495 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 7496 min_size, actual_len, alloc_hint, trans); 7497 } 7498 7499 static int btrfs_set_page_dirty(struct page *page) 7500 { 7501 return __set_page_dirty_nobuffers(page); 7502 } 7503 7504 static int btrfs_permission(struct inode *inode, int mask) 7505 { 7506 struct btrfs_root *root = BTRFS_I(inode)->root; 7507 umode_t mode = inode->i_mode; 7508 7509 if (mask & MAY_WRITE && 7510 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 7511 if (btrfs_root_readonly(root)) 7512 return -EROFS; 7513 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 7514 return -EACCES; 7515 } 7516 return generic_permission(inode, mask); 7517 } 7518 7519 static const struct inode_operations btrfs_dir_inode_operations = { 7520 .getattr = btrfs_getattr, 7521 .lookup = btrfs_lookup, 7522 .create = btrfs_create, 7523 .unlink = btrfs_unlink, 7524 .link = btrfs_link, 7525 .mkdir = btrfs_mkdir, 7526 .rmdir = btrfs_rmdir, 7527 .rename = btrfs_rename, 7528 .symlink = btrfs_symlink, 7529 .setattr = btrfs_setattr, 7530 .mknod = btrfs_mknod, 7531 .setxattr = btrfs_setxattr, 7532 .getxattr = btrfs_getxattr, 7533 .listxattr = btrfs_listxattr, 7534 .removexattr = btrfs_removexattr, 7535 .permission = btrfs_permission, 7536 .get_acl = btrfs_get_acl, 7537 }; 7538 static const struct inode_operations btrfs_dir_ro_inode_operations = { 7539 .lookup = btrfs_lookup, 7540 .permission = btrfs_permission, 7541 .get_acl = btrfs_get_acl, 7542 }; 7543 7544 static const struct file_operations btrfs_dir_file_operations = { 7545 .llseek = generic_file_llseek, 7546 .read = generic_read_dir, 7547 .readdir = btrfs_real_readdir, 7548 .unlocked_ioctl = btrfs_ioctl, 7549 #ifdef CONFIG_COMPAT 7550 .compat_ioctl = btrfs_ioctl, 7551 #endif 7552 .release = btrfs_release_file, 7553 .fsync = btrfs_sync_file, 7554 }; 7555 7556 static struct extent_io_ops btrfs_extent_io_ops = { 7557 .fill_delalloc = run_delalloc_range, 7558 .submit_bio_hook = btrfs_submit_bio_hook, 7559 .merge_bio_hook = btrfs_merge_bio_hook, 7560 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 7561 .writepage_end_io_hook = btrfs_writepage_end_io_hook, 7562 .writepage_start_hook = btrfs_writepage_start_hook, 7563 .set_bit_hook = btrfs_set_bit_hook, 7564 .clear_bit_hook = btrfs_clear_bit_hook, 7565 .merge_extent_hook = btrfs_merge_extent_hook, 7566 .split_extent_hook = btrfs_split_extent_hook, 7567 }; 7568 7569 /* 7570 * btrfs doesn't support the bmap operation because swapfiles 7571 * use bmap to make a mapping of extents in the file. They assume 7572 * these extents won't change over the life of the file and they 7573 * use the bmap result to do IO directly to the drive. 7574 * 7575 * the btrfs bmap call would return logical addresses that aren't 7576 * suitable for IO and they also will change frequently as COW 7577 * operations happen. So, swapfile + btrfs == corruption. 7578 * 7579 * For now we're avoiding this by dropping bmap. 7580 */ 7581 static const struct address_space_operations btrfs_aops = { 7582 .readpage = btrfs_readpage, 7583 .writepage = btrfs_writepage, 7584 .writepages = btrfs_writepages, 7585 .readpages = btrfs_readpages, 7586 .direct_IO = btrfs_direct_IO, 7587 .invalidatepage = btrfs_invalidatepage, 7588 .releasepage = btrfs_releasepage, 7589 .set_page_dirty = btrfs_set_page_dirty, 7590 .error_remove_page = generic_error_remove_page, 7591 }; 7592 7593 static const struct address_space_operations btrfs_symlink_aops = { 7594 .readpage = btrfs_readpage, 7595 .writepage = btrfs_writepage, 7596 .invalidatepage = btrfs_invalidatepage, 7597 .releasepage = btrfs_releasepage, 7598 }; 7599 7600 static const struct inode_operations btrfs_file_inode_operations = { 7601 .getattr = btrfs_getattr, 7602 .setattr = btrfs_setattr, 7603 .setxattr = btrfs_setxattr, 7604 .getxattr = btrfs_getxattr, 7605 .listxattr = btrfs_listxattr, 7606 .removexattr = btrfs_removexattr, 7607 .permission = btrfs_permission, 7608 .fiemap = btrfs_fiemap, 7609 .get_acl = btrfs_get_acl, 7610 .update_time = btrfs_update_time, 7611 }; 7612 static const struct inode_operations btrfs_special_inode_operations = { 7613 .getattr = btrfs_getattr, 7614 .setattr = btrfs_setattr, 7615 .permission = btrfs_permission, 7616 .setxattr = btrfs_setxattr, 7617 .getxattr = btrfs_getxattr, 7618 .listxattr = btrfs_listxattr, 7619 .removexattr = btrfs_removexattr, 7620 .get_acl = btrfs_get_acl, 7621 .update_time = btrfs_update_time, 7622 }; 7623 static const struct inode_operations btrfs_symlink_inode_operations = { 7624 .readlink = generic_readlink, 7625 .follow_link = page_follow_link_light, 7626 .put_link = page_put_link, 7627 .getattr = btrfs_getattr, 7628 .setattr = btrfs_setattr, 7629 .permission = btrfs_permission, 7630 .setxattr = btrfs_setxattr, 7631 .getxattr = btrfs_getxattr, 7632 .listxattr = btrfs_listxattr, 7633 .removexattr = btrfs_removexattr, 7634 .get_acl = btrfs_get_acl, 7635 .update_time = btrfs_update_time, 7636 }; 7637 7638 const struct dentry_operations btrfs_dentry_operations = { 7639 .d_delete = btrfs_dentry_delete, 7640 .d_release = btrfs_dentry_release, 7641 }; 7642