1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/statfs.h> 34 #include <linux/compat.h> 35 #include <linux/bit_spinlock.h> 36 #include <linux/xattr.h> 37 #include <linux/posix_acl.h> 38 #include <linux/falloc.h> 39 #include <linux/slab.h> 40 #include <linux/ratelimit.h> 41 #include <linux/mount.h> 42 #include "compat.h" 43 #include "ctree.h" 44 #include "disk-io.h" 45 #include "transaction.h" 46 #include "btrfs_inode.h" 47 #include "ioctl.h" 48 #include "print-tree.h" 49 #include "ordered-data.h" 50 #include "xattr.h" 51 #include "tree-log.h" 52 #include "volumes.h" 53 #include "compression.h" 54 #include "locking.h" 55 #include "free-space-cache.h" 56 #include "inode-map.h" 57 58 struct btrfs_iget_args { 59 u64 ino; 60 struct btrfs_root *root; 61 }; 62 63 static const struct inode_operations btrfs_dir_inode_operations; 64 static const struct inode_operations btrfs_symlink_inode_operations; 65 static const struct inode_operations btrfs_dir_ro_inode_operations; 66 static const struct inode_operations btrfs_special_inode_operations; 67 static const struct inode_operations btrfs_file_inode_operations; 68 static const struct address_space_operations btrfs_aops; 69 static const struct address_space_operations btrfs_symlink_aops; 70 static const struct file_operations btrfs_dir_file_operations; 71 static struct extent_io_ops btrfs_extent_io_ops; 72 73 static struct kmem_cache *btrfs_inode_cachep; 74 struct kmem_cache *btrfs_trans_handle_cachep; 75 struct kmem_cache *btrfs_transaction_cachep; 76 struct kmem_cache *btrfs_path_cachep; 77 struct kmem_cache *btrfs_free_space_cachep; 78 79 #define S_SHIFT 12 80 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { 81 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, 82 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, 83 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, 84 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, 85 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, 86 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, 87 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 88 }; 89 90 static int btrfs_setsize(struct inode *inode, loff_t newsize); 91 static int btrfs_truncate(struct inode *inode); 92 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); 93 static noinline int cow_file_range(struct inode *inode, 94 struct page *locked_page, 95 u64 start, u64 end, int *page_started, 96 unsigned long *nr_written, int unlock); 97 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 98 struct btrfs_root *root, struct inode *inode); 99 100 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 101 struct inode *inode, struct inode *dir, 102 const struct qstr *qstr) 103 { 104 int err; 105 106 err = btrfs_init_acl(trans, inode, dir); 107 if (!err) 108 err = btrfs_xattr_security_init(trans, inode, dir, qstr); 109 return err; 110 } 111 112 /* 113 * this does all the hard work for inserting an inline extent into 114 * the btree. The caller should have done a btrfs_drop_extents so that 115 * no overlapping inline items exist in the btree 116 */ 117 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, 118 struct btrfs_root *root, struct inode *inode, 119 u64 start, size_t size, size_t compressed_size, 120 int compress_type, 121 struct page **compressed_pages) 122 { 123 struct btrfs_key key; 124 struct btrfs_path *path; 125 struct extent_buffer *leaf; 126 struct page *page = NULL; 127 char *kaddr; 128 unsigned long ptr; 129 struct btrfs_file_extent_item *ei; 130 int err = 0; 131 int ret; 132 size_t cur_size = size; 133 size_t datasize; 134 unsigned long offset; 135 136 if (compressed_size && compressed_pages) 137 cur_size = compressed_size; 138 139 path = btrfs_alloc_path(); 140 if (!path) 141 return -ENOMEM; 142 143 path->leave_spinning = 1; 144 145 key.objectid = btrfs_ino(inode); 146 key.offset = start; 147 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 148 datasize = btrfs_file_extent_calc_inline_size(cur_size); 149 150 inode_add_bytes(inode, size); 151 ret = btrfs_insert_empty_item(trans, root, path, &key, 152 datasize); 153 if (ret) { 154 err = ret; 155 goto fail; 156 } 157 leaf = path->nodes[0]; 158 ei = btrfs_item_ptr(leaf, path->slots[0], 159 struct btrfs_file_extent_item); 160 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 161 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 162 btrfs_set_file_extent_encryption(leaf, ei, 0); 163 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 164 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 165 ptr = btrfs_file_extent_inline_start(ei); 166 167 if (compress_type != BTRFS_COMPRESS_NONE) { 168 struct page *cpage; 169 int i = 0; 170 while (compressed_size > 0) { 171 cpage = compressed_pages[i]; 172 cur_size = min_t(unsigned long, compressed_size, 173 PAGE_CACHE_SIZE); 174 175 kaddr = kmap_atomic(cpage); 176 write_extent_buffer(leaf, kaddr, ptr, cur_size); 177 kunmap_atomic(kaddr); 178 179 i++; 180 ptr += cur_size; 181 compressed_size -= cur_size; 182 } 183 btrfs_set_file_extent_compression(leaf, ei, 184 compress_type); 185 } else { 186 page = find_get_page(inode->i_mapping, 187 start >> PAGE_CACHE_SHIFT); 188 btrfs_set_file_extent_compression(leaf, ei, 0); 189 kaddr = kmap_atomic(page); 190 offset = start & (PAGE_CACHE_SIZE - 1); 191 write_extent_buffer(leaf, kaddr + offset, ptr, size); 192 kunmap_atomic(kaddr); 193 page_cache_release(page); 194 } 195 btrfs_mark_buffer_dirty(leaf); 196 btrfs_free_path(path); 197 198 /* 199 * we're an inline extent, so nobody can 200 * extend the file past i_size without locking 201 * a page we already have locked. 202 * 203 * We must do any isize and inode updates 204 * before we unlock the pages. Otherwise we 205 * could end up racing with unlink. 206 */ 207 BTRFS_I(inode)->disk_i_size = inode->i_size; 208 ret = btrfs_update_inode(trans, root, inode); 209 210 return ret; 211 fail: 212 btrfs_free_path(path); 213 return err; 214 } 215 216 217 /* 218 * conditionally insert an inline extent into the file. This 219 * does the checks required to make sure the data is small enough 220 * to fit as an inline extent. 221 */ 222 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, 223 struct btrfs_root *root, 224 struct inode *inode, u64 start, u64 end, 225 size_t compressed_size, int compress_type, 226 struct page **compressed_pages) 227 { 228 u64 isize = i_size_read(inode); 229 u64 actual_end = min(end + 1, isize); 230 u64 inline_len = actual_end - start; 231 u64 aligned_end = (end + root->sectorsize - 1) & 232 ~((u64)root->sectorsize - 1); 233 u64 hint_byte; 234 u64 data_len = inline_len; 235 int ret; 236 237 if (compressed_size) 238 data_len = compressed_size; 239 240 if (start > 0 || 241 actual_end >= PAGE_CACHE_SIZE || 242 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || 243 (!compressed_size && 244 (actual_end & (root->sectorsize - 1)) == 0) || 245 end + 1 < isize || 246 data_len > root->fs_info->max_inline) { 247 return 1; 248 } 249 250 ret = btrfs_drop_extents(trans, inode, start, aligned_end, 251 &hint_byte, 1); 252 if (ret) 253 return ret; 254 255 if (isize > actual_end) 256 inline_len = min_t(u64, isize, actual_end); 257 ret = insert_inline_extent(trans, root, inode, start, 258 inline_len, compressed_size, 259 compress_type, compressed_pages); 260 if (ret) { 261 btrfs_abort_transaction(trans, root, ret); 262 return ret; 263 } 264 btrfs_delalloc_release_metadata(inode, end + 1 - start); 265 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 266 return 0; 267 } 268 269 struct async_extent { 270 u64 start; 271 u64 ram_size; 272 u64 compressed_size; 273 struct page **pages; 274 unsigned long nr_pages; 275 int compress_type; 276 struct list_head list; 277 }; 278 279 struct async_cow { 280 struct inode *inode; 281 struct btrfs_root *root; 282 struct page *locked_page; 283 u64 start; 284 u64 end; 285 struct list_head extents; 286 struct btrfs_work work; 287 }; 288 289 static noinline int add_async_extent(struct async_cow *cow, 290 u64 start, u64 ram_size, 291 u64 compressed_size, 292 struct page **pages, 293 unsigned long nr_pages, 294 int compress_type) 295 { 296 struct async_extent *async_extent; 297 298 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 299 BUG_ON(!async_extent); /* -ENOMEM */ 300 async_extent->start = start; 301 async_extent->ram_size = ram_size; 302 async_extent->compressed_size = compressed_size; 303 async_extent->pages = pages; 304 async_extent->nr_pages = nr_pages; 305 async_extent->compress_type = compress_type; 306 list_add_tail(&async_extent->list, &cow->extents); 307 return 0; 308 } 309 310 /* 311 * we create compressed extents in two phases. The first 312 * phase compresses a range of pages that have already been 313 * locked (both pages and state bits are locked). 314 * 315 * This is done inside an ordered work queue, and the compression 316 * is spread across many cpus. The actual IO submission is step 317 * two, and the ordered work queue takes care of making sure that 318 * happens in the same order things were put onto the queue by 319 * writepages and friends. 320 * 321 * If this code finds it can't get good compression, it puts an 322 * entry onto the work queue to write the uncompressed bytes. This 323 * makes sure that both compressed inodes and uncompressed inodes 324 * are written in the same order that pdflush sent them down. 325 */ 326 static noinline int compress_file_range(struct inode *inode, 327 struct page *locked_page, 328 u64 start, u64 end, 329 struct async_cow *async_cow, 330 int *num_added) 331 { 332 struct btrfs_root *root = BTRFS_I(inode)->root; 333 struct btrfs_trans_handle *trans; 334 u64 num_bytes; 335 u64 blocksize = root->sectorsize; 336 u64 actual_end; 337 u64 isize = i_size_read(inode); 338 int ret = 0; 339 struct page **pages = NULL; 340 unsigned long nr_pages; 341 unsigned long nr_pages_ret = 0; 342 unsigned long total_compressed = 0; 343 unsigned long total_in = 0; 344 unsigned long max_compressed = 128 * 1024; 345 unsigned long max_uncompressed = 128 * 1024; 346 int i; 347 int will_compress; 348 int compress_type = root->fs_info->compress_type; 349 350 /* if this is a small write inside eof, kick off a defrag */ 351 if ((end - start + 1) < 16 * 1024 && 352 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 353 btrfs_add_inode_defrag(NULL, inode); 354 355 actual_end = min_t(u64, isize, end + 1); 356 again: 357 will_compress = 0; 358 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 359 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 360 361 /* 362 * we don't want to send crud past the end of i_size through 363 * compression, that's just a waste of CPU time. So, if the 364 * end of the file is before the start of our current 365 * requested range of bytes, we bail out to the uncompressed 366 * cleanup code that can deal with all of this. 367 * 368 * It isn't really the fastest way to fix things, but this is a 369 * very uncommon corner. 370 */ 371 if (actual_end <= start) 372 goto cleanup_and_bail_uncompressed; 373 374 total_compressed = actual_end - start; 375 376 /* we want to make sure that amount of ram required to uncompress 377 * an extent is reasonable, so we limit the total size in ram 378 * of a compressed extent to 128k. This is a crucial number 379 * because it also controls how easily we can spread reads across 380 * cpus for decompression. 381 * 382 * We also want to make sure the amount of IO required to do 383 * a random read is reasonably small, so we limit the size of 384 * a compressed extent to 128k. 385 */ 386 total_compressed = min(total_compressed, max_uncompressed); 387 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 388 num_bytes = max(blocksize, num_bytes); 389 total_in = 0; 390 ret = 0; 391 392 /* 393 * we do compression for mount -o compress and when the 394 * inode has not been flagged as nocompress. This flag can 395 * change at any time if we discover bad compression ratios. 396 */ 397 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && 398 (btrfs_test_opt(root, COMPRESS) || 399 (BTRFS_I(inode)->force_compress) || 400 (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) { 401 WARN_ON(pages); 402 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); 403 if (!pages) { 404 /* just bail out to the uncompressed code */ 405 goto cont; 406 } 407 408 if (BTRFS_I(inode)->force_compress) 409 compress_type = BTRFS_I(inode)->force_compress; 410 411 ret = btrfs_compress_pages(compress_type, 412 inode->i_mapping, start, 413 total_compressed, pages, 414 nr_pages, &nr_pages_ret, 415 &total_in, 416 &total_compressed, 417 max_compressed); 418 419 if (!ret) { 420 unsigned long offset = total_compressed & 421 (PAGE_CACHE_SIZE - 1); 422 struct page *page = pages[nr_pages_ret - 1]; 423 char *kaddr; 424 425 /* zero the tail end of the last page, we might be 426 * sending it down to disk 427 */ 428 if (offset) { 429 kaddr = kmap_atomic(page); 430 memset(kaddr + offset, 0, 431 PAGE_CACHE_SIZE - offset); 432 kunmap_atomic(kaddr); 433 } 434 will_compress = 1; 435 } 436 } 437 cont: 438 if (start == 0) { 439 trans = btrfs_join_transaction(root); 440 if (IS_ERR(trans)) { 441 ret = PTR_ERR(trans); 442 trans = NULL; 443 goto cleanup_and_out; 444 } 445 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 446 447 /* lets try to make an inline extent */ 448 if (ret || total_in < (actual_end - start)) { 449 /* we didn't compress the entire range, try 450 * to make an uncompressed inline extent. 451 */ 452 ret = cow_file_range_inline(trans, root, inode, 453 start, end, 0, 0, NULL); 454 } else { 455 /* try making a compressed inline extent */ 456 ret = cow_file_range_inline(trans, root, inode, 457 start, end, 458 total_compressed, 459 compress_type, pages); 460 } 461 if (ret <= 0) { 462 /* 463 * inline extent creation worked or returned error, 464 * we don't need to create any more async work items. 465 * Unlock and free up our temp pages. 466 */ 467 extent_clear_unlock_delalloc(inode, 468 &BTRFS_I(inode)->io_tree, 469 start, end, NULL, 470 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | 471 EXTENT_CLEAR_DELALLOC | 472 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); 473 474 btrfs_end_transaction(trans, root); 475 goto free_pages_out; 476 } 477 btrfs_end_transaction(trans, root); 478 } 479 480 if (will_compress) { 481 /* 482 * we aren't doing an inline extent round the compressed size 483 * up to a block size boundary so the allocator does sane 484 * things 485 */ 486 total_compressed = (total_compressed + blocksize - 1) & 487 ~(blocksize - 1); 488 489 /* 490 * one last check to make sure the compression is really a 491 * win, compare the page count read with the blocks on disk 492 */ 493 total_in = (total_in + PAGE_CACHE_SIZE - 1) & 494 ~(PAGE_CACHE_SIZE - 1); 495 if (total_compressed >= total_in) { 496 will_compress = 0; 497 } else { 498 num_bytes = total_in; 499 } 500 } 501 if (!will_compress && pages) { 502 /* 503 * the compression code ran but failed to make things smaller, 504 * free any pages it allocated and our page pointer array 505 */ 506 for (i = 0; i < nr_pages_ret; i++) { 507 WARN_ON(pages[i]->mapping); 508 page_cache_release(pages[i]); 509 } 510 kfree(pages); 511 pages = NULL; 512 total_compressed = 0; 513 nr_pages_ret = 0; 514 515 /* flag the file so we don't compress in the future */ 516 if (!btrfs_test_opt(root, FORCE_COMPRESS) && 517 !(BTRFS_I(inode)->force_compress)) { 518 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 519 } 520 } 521 if (will_compress) { 522 *num_added += 1; 523 524 /* the async work queues will take care of doing actual 525 * allocation on disk for these compressed pages, 526 * and will submit them to the elevator. 527 */ 528 add_async_extent(async_cow, start, num_bytes, 529 total_compressed, pages, nr_pages_ret, 530 compress_type); 531 532 if (start + num_bytes < end) { 533 start += num_bytes; 534 pages = NULL; 535 cond_resched(); 536 goto again; 537 } 538 } else { 539 cleanup_and_bail_uncompressed: 540 /* 541 * No compression, but we still need to write the pages in 542 * the file we've been given so far. redirty the locked 543 * page if it corresponds to our extent and set things up 544 * for the async work queue to run cow_file_range to do 545 * the normal delalloc dance 546 */ 547 if (page_offset(locked_page) >= start && 548 page_offset(locked_page) <= end) { 549 __set_page_dirty_nobuffers(locked_page); 550 /* unlocked later on in the async handlers */ 551 } 552 add_async_extent(async_cow, start, end - start + 1, 553 0, NULL, 0, BTRFS_COMPRESS_NONE); 554 *num_added += 1; 555 } 556 557 out: 558 return ret; 559 560 free_pages_out: 561 for (i = 0; i < nr_pages_ret; i++) { 562 WARN_ON(pages[i]->mapping); 563 page_cache_release(pages[i]); 564 } 565 kfree(pages); 566 567 goto out; 568 569 cleanup_and_out: 570 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 571 start, end, NULL, 572 EXTENT_CLEAR_UNLOCK_PAGE | 573 EXTENT_CLEAR_DIRTY | 574 EXTENT_CLEAR_DELALLOC | 575 EXTENT_SET_WRITEBACK | 576 EXTENT_END_WRITEBACK); 577 if (!trans || IS_ERR(trans)) 578 btrfs_error(root->fs_info, ret, "Failed to join transaction"); 579 else 580 btrfs_abort_transaction(trans, root, ret); 581 goto free_pages_out; 582 } 583 584 /* 585 * phase two of compressed writeback. This is the ordered portion 586 * of the code, which only gets called in the order the work was 587 * queued. We walk all the async extents created by compress_file_range 588 * and send them down to the disk. 589 */ 590 static noinline int submit_compressed_extents(struct inode *inode, 591 struct async_cow *async_cow) 592 { 593 struct async_extent *async_extent; 594 u64 alloc_hint = 0; 595 struct btrfs_trans_handle *trans; 596 struct btrfs_key ins; 597 struct extent_map *em; 598 struct btrfs_root *root = BTRFS_I(inode)->root; 599 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 600 struct extent_io_tree *io_tree; 601 int ret = 0; 602 603 if (list_empty(&async_cow->extents)) 604 return 0; 605 606 607 while (!list_empty(&async_cow->extents)) { 608 async_extent = list_entry(async_cow->extents.next, 609 struct async_extent, list); 610 list_del(&async_extent->list); 611 612 io_tree = &BTRFS_I(inode)->io_tree; 613 614 retry: 615 /* did the compression code fall back to uncompressed IO? */ 616 if (!async_extent->pages) { 617 int page_started = 0; 618 unsigned long nr_written = 0; 619 620 lock_extent(io_tree, async_extent->start, 621 async_extent->start + 622 async_extent->ram_size - 1); 623 624 /* allocate blocks */ 625 ret = cow_file_range(inode, async_cow->locked_page, 626 async_extent->start, 627 async_extent->start + 628 async_extent->ram_size - 1, 629 &page_started, &nr_written, 0); 630 631 /* JDM XXX */ 632 633 /* 634 * if page_started, cow_file_range inserted an 635 * inline extent and took care of all the unlocking 636 * and IO for us. Otherwise, we need to submit 637 * all those pages down to the drive. 638 */ 639 if (!page_started && !ret) 640 extent_write_locked_range(io_tree, 641 inode, async_extent->start, 642 async_extent->start + 643 async_extent->ram_size - 1, 644 btrfs_get_extent, 645 WB_SYNC_ALL); 646 kfree(async_extent); 647 cond_resched(); 648 continue; 649 } 650 651 lock_extent(io_tree, async_extent->start, 652 async_extent->start + async_extent->ram_size - 1); 653 654 trans = btrfs_join_transaction(root); 655 if (IS_ERR(trans)) { 656 ret = PTR_ERR(trans); 657 } else { 658 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 659 ret = btrfs_reserve_extent(trans, root, 660 async_extent->compressed_size, 661 async_extent->compressed_size, 662 0, alloc_hint, &ins, 1); 663 if (ret) 664 btrfs_abort_transaction(trans, root, ret); 665 btrfs_end_transaction(trans, root); 666 } 667 668 if (ret) { 669 int i; 670 for (i = 0; i < async_extent->nr_pages; i++) { 671 WARN_ON(async_extent->pages[i]->mapping); 672 page_cache_release(async_extent->pages[i]); 673 } 674 kfree(async_extent->pages); 675 async_extent->nr_pages = 0; 676 async_extent->pages = NULL; 677 unlock_extent(io_tree, async_extent->start, 678 async_extent->start + 679 async_extent->ram_size - 1); 680 if (ret == -ENOSPC) 681 goto retry; 682 goto out_free; /* JDM: Requeue? */ 683 } 684 685 /* 686 * here we're doing allocation and writeback of the 687 * compressed pages 688 */ 689 btrfs_drop_extent_cache(inode, async_extent->start, 690 async_extent->start + 691 async_extent->ram_size - 1, 0); 692 693 em = alloc_extent_map(); 694 BUG_ON(!em); /* -ENOMEM */ 695 em->start = async_extent->start; 696 em->len = async_extent->ram_size; 697 em->orig_start = em->start; 698 699 em->block_start = ins.objectid; 700 em->block_len = ins.offset; 701 em->bdev = root->fs_info->fs_devices->latest_bdev; 702 em->compress_type = async_extent->compress_type; 703 set_bit(EXTENT_FLAG_PINNED, &em->flags); 704 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 705 706 while (1) { 707 write_lock(&em_tree->lock); 708 ret = add_extent_mapping(em_tree, em); 709 write_unlock(&em_tree->lock); 710 if (ret != -EEXIST) { 711 free_extent_map(em); 712 break; 713 } 714 btrfs_drop_extent_cache(inode, async_extent->start, 715 async_extent->start + 716 async_extent->ram_size - 1, 0); 717 } 718 719 ret = btrfs_add_ordered_extent_compress(inode, 720 async_extent->start, 721 ins.objectid, 722 async_extent->ram_size, 723 ins.offset, 724 BTRFS_ORDERED_COMPRESSED, 725 async_extent->compress_type); 726 BUG_ON(ret); /* -ENOMEM */ 727 728 /* 729 * clear dirty, set writeback and unlock the pages. 730 */ 731 extent_clear_unlock_delalloc(inode, 732 &BTRFS_I(inode)->io_tree, 733 async_extent->start, 734 async_extent->start + 735 async_extent->ram_size - 1, 736 NULL, EXTENT_CLEAR_UNLOCK_PAGE | 737 EXTENT_CLEAR_UNLOCK | 738 EXTENT_CLEAR_DELALLOC | 739 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK); 740 741 ret = btrfs_submit_compressed_write(inode, 742 async_extent->start, 743 async_extent->ram_size, 744 ins.objectid, 745 ins.offset, async_extent->pages, 746 async_extent->nr_pages); 747 748 BUG_ON(ret); /* -ENOMEM */ 749 alloc_hint = ins.objectid + ins.offset; 750 kfree(async_extent); 751 cond_resched(); 752 } 753 ret = 0; 754 out: 755 return ret; 756 out_free: 757 kfree(async_extent); 758 goto out; 759 } 760 761 static u64 get_extent_allocation_hint(struct inode *inode, u64 start, 762 u64 num_bytes) 763 { 764 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 765 struct extent_map *em; 766 u64 alloc_hint = 0; 767 768 read_lock(&em_tree->lock); 769 em = search_extent_mapping(em_tree, start, num_bytes); 770 if (em) { 771 /* 772 * if block start isn't an actual block number then find the 773 * first block in this inode and use that as a hint. If that 774 * block is also bogus then just don't worry about it. 775 */ 776 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 777 free_extent_map(em); 778 em = search_extent_mapping(em_tree, 0, 0); 779 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 780 alloc_hint = em->block_start; 781 if (em) 782 free_extent_map(em); 783 } else { 784 alloc_hint = em->block_start; 785 free_extent_map(em); 786 } 787 } 788 read_unlock(&em_tree->lock); 789 790 return alloc_hint; 791 } 792 793 /* 794 * when extent_io.c finds a delayed allocation range in the file, 795 * the call backs end up in this code. The basic idea is to 796 * allocate extents on disk for the range, and create ordered data structs 797 * in ram to track those extents. 798 * 799 * locked_page is the page that writepage had locked already. We use 800 * it to make sure we don't do extra locks or unlocks. 801 * 802 * *page_started is set to one if we unlock locked_page and do everything 803 * required to start IO on it. It may be clean and already done with 804 * IO when we return. 805 */ 806 static noinline int cow_file_range(struct inode *inode, 807 struct page *locked_page, 808 u64 start, u64 end, int *page_started, 809 unsigned long *nr_written, 810 int unlock) 811 { 812 struct btrfs_root *root = BTRFS_I(inode)->root; 813 struct btrfs_trans_handle *trans; 814 u64 alloc_hint = 0; 815 u64 num_bytes; 816 unsigned long ram_size; 817 u64 disk_num_bytes; 818 u64 cur_alloc_size; 819 u64 blocksize = root->sectorsize; 820 struct btrfs_key ins; 821 struct extent_map *em; 822 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 823 int ret = 0; 824 825 BUG_ON(btrfs_is_free_space_inode(root, inode)); 826 trans = btrfs_join_transaction(root); 827 if (IS_ERR(trans)) { 828 extent_clear_unlock_delalloc(inode, 829 &BTRFS_I(inode)->io_tree, 830 start, end, NULL, 831 EXTENT_CLEAR_UNLOCK_PAGE | 832 EXTENT_CLEAR_UNLOCK | 833 EXTENT_CLEAR_DELALLOC | 834 EXTENT_CLEAR_DIRTY | 835 EXTENT_SET_WRITEBACK | 836 EXTENT_END_WRITEBACK); 837 return PTR_ERR(trans); 838 } 839 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 840 841 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 842 num_bytes = max(blocksize, num_bytes); 843 disk_num_bytes = num_bytes; 844 ret = 0; 845 846 /* if this is a small write inside eof, kick off defrag */ 847 if (num_bytes < 64 * 1024 && 848 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 849 btrfs_add_inode_defrag(trans, inode); 850 851 if (start == 0) { 852 /* lets try to make an inline extent */ 853 ret = cow_file_range_inline(trans, root, inode, 854 start, end, 0, 0, NULL); 855 if (ret == 0) { 856 extent_clear_unlock_delalloc(inode, 857 &BTRFS_I(inode)->io_tree, 858 start, end, NULL, 859 EXTENT_CLEAR_UNLOCK_PAGE | 860 EXTENT_CLEAR_UNLOCK | 861 EXTENT_CLEAR_DELALLOC | 862 EXTENT_CLEAR_DIRTY | 863 EXTENT_SET_WRITEBACK | 864 EXTENT_END_WRITEBACK); 865 866 *nr_written = *nr_written + 867 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 868 *page_started = 1; 869 goto out; 870 } else if (ret < 0) { 871 btrfs_abort_transaction(trans, root, ret); 872 goto out_unlock; 873 } 874 } 875 876 BUG_ON(disk_num_bytes > 877 btrfs_super_total_bytes(root->fs_info->super_copy)); 878 879 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 880 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 881 882 while (disk_num_bytes > 0) { 883 unsigned long op; 884 885 cur_alloc_size = disk_num_bytes; 886 ret = btrfs_reserve_extent(trans, root, cur_alloc_size, 887 root->sectorsize, 0, alloc_hint, 888 &ins, 1); 889 if (ret < 0) { 890 btrfs_abort_transaction(trans, root, ret); 891 goto out_unlock; 892 } 893 894 em = alloc_extent_map(); 895 BUG_ON(!em); /* -ENOMEM */ 896 em->start = start; 897 em->orig_start = em->start; 898 ram_size = ins.offset; 899 em->len = ins.offset; 900 901 em->block_start = ins.objectid; 902 em->block_len = ins.offset; 903 em->bdev = root->fs_info->fs_devices->latest_bdev; 904 set_bit(EXTENT_FLAG_PINNED, &em->flags); 905 906 while (1) { 907 write_lock(&em_tree->lock); 908 ret = add_extent_mapping(em_tree, em); 909 write_unlock(&em_tree->lock); 910 if (ret != -EEXIST) { 911 free_extent_map(em); 912 break; 913 } 914 btrfs_drop_extent_cache(inode, start, 915 start + ram_size - 1, 0); 916 } 917 918 cur_alloc_size = ins.offset; 919 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 920 ram_size, cur_alloc_size, 0); 921 BUG_ON(ret); /* -ENOMEM */ 922 923 if (root->root_key.objectid == 924 BTRFS_DATA_RELOC_TREE_OBJECTID) { 925 ret = btrfs_reloc_clone_csums(inode, start, 926 cur_alloc_size); 927 if (ret) { 928 btrfs_abort_transaction(trans, root, ret); 929 goto out_unlock; 930 } 931 } 932 933 if (disk_num_bytes < cur_alloc_size) 934 break; 935 936 /* we're not doing compressed IO, don't unlock the first 937 * page (which the caller expects to stay locked), don't 938 * clear any dirty bits and don't set any writeback bits 939 * 940 * Do set the Private2 bit so we know this page was properly 941 * setup for writepage 942 */ 943 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0; 944 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | 945 EXTENT_SET_PRIVATE2; 946 947 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 948 start, start + ram_size - 1, 949 locked_page, op); 950 disk_num_bytes -= cur_alloc_size; 951 num_bytes -= cur_alloc_size; 952 alloc_hint = ins.objectid + ins.offset; 953 start += cur_alloc_size; 954 } 955 ret = 0; 956 out: 957 btrfs_end_transaction(trans, root); 958 959 return ret; 960 out_unlock: 961 extent_clear_unlock_delalloc(inode, 962 &BTRFS_I(inode)->io_tree, 963 start, end, NULL, 964 EXTENT_CLEAR_UNLOCK_PAGE | 965 EXTENT_CLEAR_UNLOCK | 966 EXTENT_CLEAR_DELALLOC | 967 EXTENT_CLEAR_DIRTY | 968 EXTENT_SET_WRITEBACK | 969 EXTENT_END_WRITEBACK); 970 971 goto out; 972 } 973 974 /* 975 * work queue call back to started compression on a file and pages 976 */ 977 static noinline void async_cow_start(struct btrfs_work *work) 978 { 979 struct async_cow *async_cow; 980 int num_added = 0; 981 async_cow = container_of(work, struct async_cow, work); 982 983 compress_file_range(async_cow->inode, async_cow->locked_page, 984 async_cow->start, async_cow->end, async_cow, 985 &num_added); 986 if (num_added == 0) 987 async_cow->inode = NULL; 988 } 989 990 /* 991 * work queue call back to submit previously compressed pages 992 */ 993 static noinline void async_cow_submit(struct btrfs_work *work) 994 { 995 struct async_cow *async_cow; 996 struct btrfs_root *root; 997 unsigned long nr_pages; 998 999 async_cow = container_of(work, struct async_cow, work); 1000 1001 root = async_cow->root; 1002 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1003 PAGE_CACHE_SHIFT; 1004 1005 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); 1006 1007 if (atomic_read(&root->fs_info->async_delalloc_pages) < 1008 5 * 1042 * 1024 && 1009 waitqueue_active(&root->fs_info->async_submit_wait)) 1010 wake_up(&root->fs_info->async_submit_wait); 1011 1012 if (async_cow->inode) 1013 submit_compressed_extents(async_cow->inode, async_cow); 1014 } 1015 1016 static noinline void async_cow_free(struct btrfs_work *work) 1017 { 1018 struct async_cow *async_cow; 1019 async_cow = container_of(work, struct async_cow, work); 1020 kfree(async_cow); 1021 } 1022 1023 static int cow_file_range_async(struct inode *inode, struct page *locked_page, 1024 u64 start, u64 end, int *page_started, 1025 unsigned long *nr_written) 1026 { 1027 struct async_cow *async_cow; 1028 struct btrfs_root *root = BTRFS_I(inode)->root; 1029 unsigned long nr_pages; 1030 u64 cur_end; 1031 int limit = 10 * 1024 * 1042; 1032 1033 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, 1034 1, 0, NULL, GFP_NOFS); 1035 while (start < end) { 1036 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1037 BUG_ON(!async_cow); /* -ENOMEM */ 1038 async_cow->inode = inode; 1039 async_cow->root = root; 1040 async_cow->locked_page = locked_page; 1041 async_cow->start = start; 1042 1043 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) 1044 cur_end = end; 1045 else 1046 cur_end = min(end, start + 512 * 1024 - 1); 1047 1048 async_cow->end = cur_end; 1049 INIT_LIST_HEAD(&async_cow->extents); 1050 1051 async_cow->work.func = async_cow_start; 1052 async_cow->work.ordered_func = async_cow_submit; 1053 async_cow->work.ordered_free = async_cow_free; 1054 async_cow->work.flags = 0; 1055 1056 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1057 PAGE_CACHE_SHIFT; 1058 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1059 1060 btrfs_queue_worker(&root->fs_info->delalloc_workers, 1061 &async_cow->work); 1062 1063 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { 1064 wait_event(root->fs_info->async_submit_wait, 1065 (atomic_read(&root->fs_info->async_delalloc_pages) < 1066 limit)); 1067 } 1068 1069 while (atomic_read(&root->fs_info->async_submit_draining) && 1070 atomic_read(&root->fs_info->async_delalloc_pages)) { 1071 wait_event(root->fs_info->async_submit_wait, 1072 (atomic_read(&root->fs_info->async_delalloc_pages) == 1073 0)); 1074 } 1075 1076 *nr_written += nr_pages; 1077 start = cur_end + 1; 1078 } 1079 *page_started = 1; 1080 return 0; 1081 } 1082 1083 static noinline int csum_exist_in_range(struct btrfs_root *root, 1084 u64 bytenr, u64 num_bytes) 1085 { 1086 int ret; 1087 struct btrfs_ordered_sum *sums; 1088 LIST_HEAD(list); 1089 1090 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, 1091 bytenr + num_bytes - 1, &list, 0); 1092 if (ret == 0 && list_empty(&list)) 1093 return 0; 1094 1095 while (!list_empty(&list)) { 1096 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1097 list_del(&sums->list); 1098 kfree(sums); 1099 } 1100 return 1; 1101 } 1102 1103 /* 1104 * when nowcow writeback call back. This checks for snapshots or COW copies 1105 * of the extents that exist in the file, and COWs the file as required. 1106 * 1107 * If no cow copies or snapshots exist, we write directly to the existing 1108 * blocks on disk 1109 */ 1110 static noinline int run_delalloc_nocow(struct inode *inode, 1111 struct page *locked_page, 1112 u64 start, u64 end, int *page_started, int force, 1113 unsigned long *nr_written) 1114 { 1115 struct btrfs_root *root = BTRFS_I(inode)->root; 1116 struct btrfs_trans_handle *trans; 1117 struct extent_buffer *leaf; 1118 struct btrfs_path *path; 1119 struct btrfs_file_extent_item *fi; 1120 struct btrfs_key found_key; 1121 u64 cow_start; 1122 u64 cur_offset; 1123 u64 extent_end; 1124 u64 extent_offset; 1125 u64 disk_bytenr; 1126 u64 num_bytes; 1127 int extent_type; 1128 int ret, err; 1129 int type; 1130 int nocow; 1131 int check_prev = 1; 1132 bool nolock; 1133 u64 ino = btrfs_ino(inode); 1134 1135 path = btrfs_alloc_path(); 1136 if (!path) 1137 return -ENOMEM; 1138 1139 nolock = btrfs_is_free_space_inode(root, inode); 1140 1141 if (nolock) 1142 trans = btrfs_join_transaction_nolock(root); 1143 else 1144 trans = btrfs_join_transaction(root); 1145 1146 if (IS_ERR(trans)) { 1147 btrfs_free_path(path); 1148 return PTR_ERR(trans); 1149 } 1150 1151 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1152 1153 cow_start = (u64)-1; 1154 cur_offset = start; 1155 while (1) { 1156 ret = btrfs_lookup_file_extent(trans, root, path, ino, 1157 cur_offset, 0); 1158 if (ret < 0) { 1159 btrfs_abort_transaction(trans, root, ret); 1160 goto error; 1161 } 1162 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1163 leaf = path->nodes[0]; 1164 btrfs_item_key_to_cpu(leaf, &found_key, 1165 path->slots[0] - 1); 1166 if (found_key.objectid == ino && 1167 found_key.type == BTRFS_EXTENT_DATA_KEY) 1168 path->slots[0]--; 1169 } 1170 check_prev = 0; 1171 next_slot: 1172 leaf = path->nodes[0]; 1173 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1174 ret = btrfs_next_leaf(root, path); 1175 if (ret < 0) { 1176 btrfs_abort_transaction(trans, root, ret); 1177 goto error; 1178 } 1179 if (ret > 0) 1180 break; 1181 leaf = path->nodes[0]; 1182 } 1183 1184 nocow = 0; 1185 disk_bytenr = 0; 1186 num_bytes = 0; 1187 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1188 1189 if (found_key.objectid > ino || 1190 found_key.type > BTRFS_EXTENT_DATA_KEY || 1191 found_key.offset > end) 1192 break; 1193 1194 if (found_key.offset > cur_offset) { 1195 extent_end = found_key.offset; 1196 extent_type = 0; 1197 goto out_check; 1198 } 1199 1200 fi = btrfs_item_ptr(leaf, path->slots[0], 1201 struct btrfs_file_extent_item); 1202 extent_type = btrfs_file_extent_type(leaf, fi); 1203 1204 if (extent_type == BTRFS_FILE_EXTENT_REG || 1205 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1206 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1207 extent_offset = btrfs_file_extent_offset(leaf, fi); 1208 extent_end = found_key.offset + 1209 btrfs_file_extent_num_bytes(leaf, fi); 1210 if (extent_end <= start) { 1211 path->slots[0]++; 1212 goto next_slot; 1213 } 1214 if (disk_bytenr == 0) 1215 goto out_check; 1216 if (btrfs_file_extent_compression(leaf, fi) || 1217 btrfs_file_extent_encryption(leaf, fi) || 1218 btrfs_file_extent_other_encoding(leaf, fi)) 1219 goto out_check; 1220 if (extent_type == BTRFS_FILE_EXTENT_REG && !force) 1221 goto out_check; 1222 if (btrfs_extent_readonly(root, disk_bytenr)) 1223 goto out_check; 1224 if (btrfs_cross_ref_exist(trans, root, ino, 1225 found_key.offset - 1226 extent_offset, disk_bytenr)) 1227 goto out_check; 1228 disk_bytenr += extent_offset; 1229 disk_bytenr += cur_offset - found_key.offset; 1230 num_bytes = min(end + 1, extent_end) - cur_offset; 1231 /* 1232 * force cow if csum exists in the range. 1233 * this ensure that csum for a given extent are 1234 * either valid or do not exist. 1235 */ 1236 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 1237 goto out_check; 1238 nocow = 1; 1239 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1240 extent_end = found_key.offset + 1241 btrfs_file_extent_inline_len(leaf, fi); 1242 extent_end = ALIGN(extent_end, root->sectorsize); 1243 } else { 1244 BUG_ON(1); 1245 } 1246 out_check: 1247 if (extent_end <= start) { 1248 path->slots[0]++; 1249 goto next_slot; 1250 } 1251 if (!nocow) { 1252 if (cow_start == (u64)-1) 1253 cow_start = cur_offset; 1254 cur_offset = extent_end; 1255 if (cur_offset > end) 1256 break; 1257 path->slots[0]++; 1258 goto next_slot; 1259 } 1260 1261 btrfs_release_path(path); 1262 if (cow_start != (u64)-1) { 1263 ret = cow_file_range(inode, locked_page, cow_start, 1264 found_key.offset - 1, page_started, 1265 nr_written, 1); 1266 if (ret) { 1267 btrfs_abort_transaction(trans, root, ret); 1268 goto error; 1269 } 1270 cow_start = (u64)-1; 1271 } 1272 1273 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1274 struct extent_map *em; 1275 struct extent_map_tree *em_tree; 1276 em_tree = &BTRFS_I(inode)->extent_tree; 1277 em = alloc_extent_map(); 1278 BUG_ON(!em); /* -ENOMEM */ 1279 em->start = cur_offset; 1280 em->orig_start = em->start; 1281 em->len = num_bytes; 1282 em->block_len = num_bytes; 1283 em->block_start = disk_bytenr; 1284 em->bdev = root->fs_info->fs_devices->latest_bdev; 1285 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1286 while (1) { 1287 write_lock(&em_tree->lock); 1288 ret = add_extent_mapping(em_tree, em); 1289 write_unlock(&em_tree->lock); 1290 if (ret != -EEXIST) { 1291 free_extent_map(em); 1292 break; 1293 } 1294 btrfs_drop_extent_cache(inode, em->start, 1295 em->start + em->len - 1, 0); 1296 } 1297 type = BTRFS_ORDERED_PREALLOC; 1298 } else { 1299 type = BTRFS_ORDERED_NOCOW; 1300 } 1301 1302 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, 1303 num_bytes, num_bytes, type); 1304 BUG_ON(ret); /* -ENOMEM */ 1305 1306 if (root->root_key.objectid == 1307 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1308 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1309 num_bytes); 1310 if (ret) { 1311 btrfs_abort_transaction(trans, root, ret); 1312 goto error; 1313 } 1314 } 1315 1316 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 1317 cur_offset, cur_offset + num_bytes - 1, 1318 locked_page, EXTENT_CLEAR_UNLOCK_PAGE | 1319 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | 1320 EXTENT_SET_PRIVATE2); 1321 cur_offset = extent_end; 1322 if (cur_offset > end) 1323 break; 1324 } 1325 btrfs_release_path(path); 1326 1327 if (cur_offset <= end && cow_start == (u64)-1) 1328 cow_start = cur_offset; 1329 if (cow_start != (u64)-1) { 1330 ret = cow_file_range(inode, locked_page, cow_start, end, 1331 page_started, nr_written, 1); 1332 if (ret) { 1333 btrfs_abort_transaction(trans, root, ret); 1334 goto error; 1335 } 1336 } 1337 1338 error: 1339 if (nolock) { 1340 err = btrfs_end_transaction_nolock(trans, root); 1341 } else { 1342 err = btrfs_end_transaction(trans, root); 1343 } 1344 if (!ret) 1345 ret = err; 1346 1347 btrfs_free_path(path); 1348 return ret; 1349 } 1350 1351 /* 1352 * extent_io.c call back to do delayed allocation processing 1353 */ 1354 static int run_delalloc_range(struct inode *inode, struct page *locked_page, 1355 u64 start, u64 end, int *page_started, 1356 unsigned long *nr_written) 1357 { 1358 int ret; 1359 struct btrfs_root *root = BTRFS_I(inode)->root; 1360 1361 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) 1362 ret = run_delalloc_nocow(inode, locked_page, start, end, 1363 page_started, 1, nr_written); 1364 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) 1365 ret = run_delalloc_nocow(inode, locked_page, start, end, 1366 page_started, 0, nr_written); 1367 else if (!btrfs_test_opt(root, COMPRESS) && 1368 !(BTRFS_I(inode)->force_compress) && 1369 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) 1370 ret = cow_file_range(inode, locked_page, start, end, 1371 page_started, nr_written, 1); 1372 else 1373 ret = cow_file_range_async(inode, locked_page, start, end, 1374 page_started, nr_written); 1375 return ret; 1376 } 1377 1378 static void btrfs_split_extent_hook(struct inode *inode, 1379 struct extent_state *orig, u64 split) 1380 { 1381 /* not delalloc, ignore it */ 1382 if (!(orig->state & EXTENT_DELALLOC)) 1383 return; 1384 1385 spin_lock(&BTRFS_I(inode)->lock); 1386 BTRFS_I(inode)->outstanding_extents++; 1387 spin_unlock(&BTRFS_I(inode)->lock); 1388 } 1389 1390 /* 1391 * extent_io.c merge_extent_hook, used to track merged delayed allocation 1392 * extents so we can keep track of new extents that are just merged onto old 1393 * extents, such as when we are doing sequential writes, so we can properly 1394 * account for the metadata space we'll need. 1395 */ 1396 static void btrfs_merge_extent_hook(struct inode *inode, 1397 struct extent_state *new, 1398 struct extent_state *other) 1399 { 1400 /* not delalloc, ignore it */ 1401 if (!(other->state & EXTENT_DELALLOC)) 1402 return; 1403 1404 spin_lock(&BTRFS_I(inode)->lock); 1405 BTRFS_I(inode)->outstanding_extents--; 1406 spin_unlock(&BTRFS_I(inode)->lock); 1407 } 1408 1409 /* 1410 * extent_io.c set_bit_hook, used to track delayed allocation 1411 * bytes in this file, and to maintain the list of inodes that 1412 * have pending delalloc work to be done. 1413 */ 1414 static void btrfs_set_bit_hook(struct inode *inode, 1415 struct extent_state *state, int *bits) 1416 { 1417 1418 /* 1419 * set_bit and clear bit hooks normally require _irqsave/restore 1420 * but in this case, we are only testing for the DELALLOC 1421 * bit, which is only set or cleared with irqs on 1422 */ 1423 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1424 struct btrfs_root *root = BTRFS_I(inode)->root; 1425 u64 len = state->end + 1 - state->start; 1426 bool do_list = !btrfs_is_free_space_inode(root, inode); 1427 1428 if (*bits & EXTENT_FIRST_DELALLOC) { 1429 *bits &= ~EXTENT_FIRST_DELALLOC; 1430 } else { 1431 spin_lock(&BTRFS_I(inode)->lock); 1432 BTRFS_I(inode)->outstanding_extents++; 1433 spin_unlock(&BTRFS_I(inode)->lock); 1434 } 1435 1436 spin_lock(&root->fs_info->delalloc_lock); 1437 BTRFS_I(inode)->delalloc_bytes += len; 1438 root->fs_info->delalloc_bytes += len; 1439 if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1440 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 1441 &root->fs_info->delalloc_inodes); 1442 } 1443 spin_unlock(&root->fs_info->delalloc_lock); 1444 } 1445 } 1446 1447 /* 1448 * extent_io.c clear_bit_hook, see set_bit_hook for why 1449 */ 1450 static void btrfs_clear_bit_hook(struct inode *inode, 1451 struct extent_state *state, int *bits) 1452 { 1453 /* 1454 * set_bit and clear bit hooks normally require _irqsave/restore 1455 * but in this case, we are only testing for the DELALLOC 1456 * bit, which is only set or cleared with irqs on 1457 */ 1458 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1459 struct btrfs_root *root = BTRFS_I(inode)->root; 1460 u64 len = state->end + 1 - state->start; 1461 bool do_list = !btrfs_is_free_space_inode(root, inode); 1462 1463 if (*bits & EXTENT_FIRST_DELALLOC) { 1464 *bits &= ~EXTENT_FIRST_DELALLOC; 1465 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { 1466 spin_lock(&BTRFS_I(inode)->lock); 1467 BTRFS_I(inode)->outstanding_extents--; 1468 spin_unlock(&BTRFS_I(inode)->lock); 1469 } 1470 1471 if (*bits & EXTENT_DO_ACCOUNTING) 1472 btrfs_delalloc_release_metadata(inode, len); 1473 1474 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 1475 && do_list) 1476 btrfs_free_reserved_data_space(inode, len); 1477 1478 spin_lock(&root->fs_info->delalloc_lock); 1479 root->fs_info->delalloc_bytes -= len; 1480 BTRFS_I(inode)->delalloc_bytes -= len; 1481 1482 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && 1483 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1484 list_del_init(&BTRFS_I(inode)->delalloc_inodes); 1485 } 1486 spin_unlock(&root->fs_info->delalloc_lock); 1487 } 1488 } 1489 1490 /* 1491 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure 1492 * we don't create bios that span stripes or chunks 1493 */ 1494 int btrfs_merge_bio_hook(struct page *page, unsigned long offset, 1495 size_t size, struct bio *bio, 1496 unsigned long bio_flags) 1497 { 1498 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 1499 struct btrfs_mapping_tree *map_tree; 1500 u64 logical = (u64)bio->bi_sector << 9; 1501 u64 length = 0; 1502 u64 map_length; 1503 int ret; 1504 1505 if (bio_flags & EXTENT_BIO_COMPRESSED) 1506 return 0; 1507 1508 length = bio->bi_size; 1509 map_tree = &root->fs_info->mapping_tree; 1510 map_length = length; 1511 ret = btrfs_map_block(map_tree, READ, logical, 1512 &map_length, NULL, 0); 1513 /* Will always return 0 or 1 with map_multi == NULL */ 1514 BUG_ON(ret < 0); 1515 if (map_length < length + size) 1516 return 1; 1517 return 0; 1518 } 1519 1520 /* 1521 * in order to insert checksums into the metadata in large chunks, 1522 * we wait until bio submission time. All the pages in the bio are 1523 * checksummed and sums are attached onto the ordered extent record. 1524 * 1525 * At IO completion time the cums attached on the ordered extent record 1526 * are inserted into the btree 1527 */ 1528 static int __btrfs_submit_bio_start(struct inode *inode, int rw, 1529 struct bio *bio, int mirror_num, 1530 unsigned long bio_flags, 1531 u64 bio_offset) 1532 { 1533 struct btrfs_root *root = BTRFS_I(inode)->root; 1534 int ret = 0; 1535 1536 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1537 BUG_ON(ret); /* -ENOMEM */ 1538 return 0; 1539 } 1540 1541 /* 1542 * in order to insert checksums into the metadata in large chunks, 1543 * we wait until bio submission time. All the pages in the bio are 1544 * checksummed and sums are attached onto the ordered extent record. 1545 * 1546 * At IO completion time the cums attached on the ordered extent record 1547 * are inserted into the btree 1548 */ 1549 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 1550 int mirror_num, unsigned long bio_flags, 1551 u64 bio_offset) 1552 { 1553 struct btrfs_root *root = BTRFS_I(inode)->root; 1554 return btrfs_map_bio(root, rw, bio, mirror_num, 1); 1555 } 1556 1557 /* 1558 * extent_io.c submission hook. This does the right thing for csum calculation 1559 * on write, or reading the csums from the tree before a read 1560 */ 1561 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 1562 int mirror_num, unsigned long bio_flags, 1563 u64 bio_offset) 1564 { 1565 struct btrfs_root *root = BTRFS_I(inode)->root; 1566 int ret = 0; 1567 int skip_sum; 1568 int metadata = 0; 1569 1570 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1571 1572 if (btrfs_is_free_space_inode(root, inode)) 1573 metadata = 2; 1574 1575 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); 1576 if (ret) 1577 return ret; 1578 1579 if (!(rw & REQ_WRITE)) { 1580 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1581 return btrfs_submit_compressed_read(inode, bio, 1582 mirror_num, bio_flags); 1583 } else if (!skip_sum) { 1584 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); 1585 if (ret) 1586 return ret; 1587 } 1588 goto mapit; 1589 } else if (!skip_sum) { 1590 /* csum items have already been cloned */ 1591 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 1592 goto mapit; 1593 /* we're doing a write, do the async checksumming */ 1594 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 1595 inode, rw, bio, mirror_num, 1596 bio_flags, bio_offset, 1597 __btrfs_submit_bio_start, 1598 __btrfs_submit_bio_done); 1599 } 1600 1601 mapit: 1602 return btrfs_map_bio(root, rw, bio, mirror_num, 0); 1603 } 1604 1605 /* 1606 * given a list of ordered sums record them in the inode. This happens 1607 * at IO completion time based on sums calculated at bio submission time. 1608 */ 1609 static noinline int add_pending_csums(struct btrfs_trans_handle *trans, 1610 struct inode *inode, u64 file_offset, 1611 struct list_head *list) 1612 { 1613 struct btrfs_ordered_sum *sum; 1614 1615 list_for_each_entry(sum, list, list) { 1616 btrfs_csum_file_blocks(trans, 1617 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1618 } 1619 return 0; 1620 } 1621 1622 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1623 struct extent_state **cached_state) 1624 { 1625 if ((end & (PAGE_CACHE_SIZE - 1)) == 0) 1626 WARN_ON(1); 1627 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1628 cached_state, GFP_NOFS); 1629 } 1630 1631 /* see btrfs_writepage_start_hook for details on why this is required */ 1632 struct btrfs_writepage_fixup { 1633 struct page *page; 1634 struct btrfs_work work; 1635 }; 1636 1637 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 1638 { 1639 struct btrfs_writepage_fixup *fixup; 1640 struct btrfs_ordered_extent *ordered; 1641 struct extent_state *cached_state = NULL; 1642 struct page *page; 1643 struct inode *inode; 1644 u64 page_start; 1645 u64 page_end; 1646 int ret; 1647 1648 fixup = container_of(work, struct btrfs_writepage_fixup, work); 1649 page = fixup->page; 1650 again: 1651 lock_page(page); 1652 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 1653 ClearPageChecked(page); 1654 goto out_page; 1655 } 1656 1657 inode = page->mapping->host; 1658 page_start = page_offset(page); 1659 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1660 1661 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, 1662 &cached_state); 1663 1664 /* already ordered? We're done */ 1665 if (PagePrivate2(page)) 1666 goto out; 1667 1668 ordered = btrfs_lookup_ordered_extent(inode, page_start); 1669 if (ordered) { 1670 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 1671 page_end, &cached_state, GFP_NOFS); 1672 unlock_page(page); 1673 btrfs_start_ordered_extent(inode, ordered, 1); 1674 btrfs_put_ordered_extent(ordered); 1675 goto again; 1676 } 1677 1678 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 1679 if (ret) { 1680 mapping_set_error(page->mapping, ret); 1681 end_extent_writepage(page, ret, page_start, page_end); 1682 ClearPageChecked(page); 1683 goto out; 1684 } 1685 1686 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 1687 ClearPageChecked(page); 1688 set_page_dirty(page); 1689 out: 1690 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, 1691 &cached_state, GFP_NOFS); 1692 out_page: 1693 unlock_page(page); 1694 page_cache_release(page); 1695 kfree(fixup); 1696 } 1697 1698 /* 1699 * There are a few paths in the higher layers of the kernel that directly 1700 * set the page dirty bit without asking the filesystem if it is a 1701 * good idea. This causes problems because we want to make sure COW 1702 * properly happens and the data=ordered rules are followed. 1703 * 1704 * In our case any range that doesn't have the ORDERED bit set 1705 * hasn't been properly setup for IO. We kick off an async process 1706 * to fix it up. The async helper will wait for ordered extents, set 1707 * the delalloc bit and make it safe to write the page. 1708 */ 1709 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) 1710 { 1711 struct inode *inode = page->mapping->host; 1712 struct btrfs_writepage_fixup *fixup; 1713 struct btrfs_root *root = BTRFS_I(inode)->root; 1714 1715 /* this page is properly in the ordered list */ 1716 if (TestClearPagePrivate2(page)) 1717 return 0; 1718 1719 if (PageChecked(page)) 1720 return -EAGAIN; 1721 1722 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 1723 if (!fixup) 1724 return -EAGAIN; 1725 1726 SetPageChecked(page); 1727 page_cache_get(page); 1728 fixup->work.func = btrfs_writepage_fixup_worker; 1729 fixup->page = page; 1730 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); 1731 return -EBUSY; 1732 } 1733 1734 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 1735 struct inode *inode, u64 file_pos, 1736 u64 disk_bytenr, u64 disk_num_bytes, 1737 u64 num_bytes, u64 ram_bytes, 1738 u8 compression, u8 encryption, 1739 u16 other_encoding, int extent_type) 1740 { 1741 struct btrfs_root *root = BTRFS_I(inode)->root; 1742 struct btrfs_file_extent_item *fi; 1743 struct btrfs_path *path; 1744 struct extent_buffer *leaf; 1745 struct btrfs_key ins; 1746 u64 hint; 1747 int ret; 1748 1749 path = btrfs_alloc_path(); 1750 if (!path) 1751 return -ENOMEM; 1752 1753 path->leave_spinning = 1; 1754 1755 /* 1756 * we may be replacing one extent in the tree with another. 1757 * The new extent is pinned in the extent map, and we don't want 1758 * to drop it from the cache until it is completely in the btree. 1759 * 1760 * So, tell btrfs_drop_extents to leave this extent in the cache. 1761 * the caller is expected to unpin it and allow it to be merged 1762 * with the others. 1763 */ 1764 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes, 1765 &hint, 0); 1766 if (ret) 1767 goto out; 1768 1769 ins.objectid = btrfs_ino(inode); 1770 ins.offset = file_pos; 1771 ins.type = BTRFS_EXTENT_DATA_KEY; 1772 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); 1773 if (ret) 1774 goto out; 1775 leaf = path->nodes[0]; 1776 fi = btrfs_item_ptr(leaf, path->slots[0], 1777 struct btrfs_file_extent_item); 1778 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1779 btrfs_set_file_extent_type(leaf, fi, extent_type); 1780 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); 1781 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); 1782 btrfs_set_file_extent_offset(leaf, fi, 0); 1783 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 1784 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); 1785 btrfs_set_file_extent_compression(leaf, fi, compression); 1786 btrfs_set_file_extent_encryption(leaf, fi, encryption); 1787 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); 1788 1789 btrfs_unlock_up_safe(path, 1); 1790 btrfs_set_lock_blocking(leaf); 1791 1792 btrfs_mark_buffer_dirty(leaf); 1793 1794 inode_add_bytes(inode, num_bytes); 1795 1796 ins.objectid = disk_bytenr; 1797 ins.offset = disk_num_bytes; 1798 ins.type = BTRFS_EXTENT_ITEM_KEY; 1799 ret = btrfs_alloc_reserved_file_extent(trans, root, 1800 root->root_key.objectid, 1801 btrfs_ino(inode), file_pos, &ins); 1802 out: 1803 btrfs_free_path(path); 1804 1805 return ret; 1806 } 1807 1808 /* 1809 * helper function for btrfs_finish_ordered_io, this 1810 * just reads in some of the csum leaves to prime them into ram 1811 * before we start the transaction. It limits the amount of btree 1812 * reads required while inside the transaction. 1813 */ 1814 /* as ordered data IO finishes, this gets called so we can finish 1815 * an ordered extent if the range of bytes in the file it covers are 1816 * fully written. 1817 */ 1818 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) 1819 { 1820 struct btrfs_root *root = BTRFS_I(inode)->root; 1821 struct btrfs_trans_handle *trans = NULL; 1822 struct btrfs_ordered_extent *ordered_extent = NULL; 1823 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1824 struct extent_state *cached_state = NULL; 1825 int compress_type = 0; 1826 int ret; 1827 bool nolock; 1828 1829 ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 1830 end - start + 1); 1831 if (!ret) 1832 return 0; 1833 BUG_ON(!ordered_extent); /* Logic error */ 1834 1835 nolock = btrfs_is_free_space_inode(root, inode); 1836 1837 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1838 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 1839 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1840 if (!ret) { 1841 if (nolock) 1842 trans = btrfs_join_transaction_nolock(root); 1843 else 1844 trans = btrfs_join_transaction(root); 1845 if (IS_ERR(trans)) 1846 return PTR_ERR(trans); 1847 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1848 ret = btrfs_update_inode_fallback(trans, root, inode); 1849 if (ret) /* -ENOMEM or corruption */ 1850 btrfs_abort_transaction(trans, root, ret); 1851 } 1852 goto out; 1853 } 1854 1855 lock_extent_bits(io_tree, ordered_extent->file_offset, 1856 ordered_extent->file_offset + ordered_extent->len - 1, 1857 0, &cached_state); 1858 1859 if (nolock) 1860 trans = btrfs_join_transaction_nolock(root); 1861 else 1862 trans = btrfs_join_transaction(root); 1863 if (IS_ERR(trans)) { 1864 ret = PTR_ERR(trans); 1865 trans = NULL; 1866 goto out_unlock; 1867 } 1868 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1869 1870 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1871 compress_type = ordered_extent->compress_type; 1872 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1873 BUG_ON(compress_type); 1874 ret = btrfs_mark_extent_written(trans, inode, 1875 ordered_extent->file_offset, 1876 ordered_extent->file_offset + 1877 ordered_extent->len); 1878 } else { 1879 BUG_ON(root == root->fs_info->tree_root); 1880 ret = insert_reserved_file_extent(trans, inode, 1881 ordered_extent->file_offset, 1882 ordered_extent->start, 1883 ordered_extent->disk_len, 1884 ordered_extent->len, 1885 ordered_extent->len, 1886 compress_type, 0, 0, 1887 BTRFS_FILE_EXTENT_REG); 1888 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 1889 ordered_extent->file_offset, 1890 ordered_extent->len); 1891 } 1892 unlock_extent_cached(io_tree, ordered_extent->file_offset, 1893 ordered_extent->file_offset + 1894 ordered_extent->len - 1, &cached_state, GFP_NOFS); 1895 if (ret < 0) { 1896 btrfs_abort_transaction(trans, root, ret); 1897 goto out; 1898 } 1899 1900 add_pending_csums(trans, inode, ordered_extent->file_offset, 1901 &ordered_extent->list); 1902 1903 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1904 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1905 ret = btrfs_update_inode_fallback(trans, root, inode); 1906 if (ret) { /* -ENOMEM or corruption */ 1907 btrfs_abort_transaction(trans, root, ret); 1908 goto out; 1909 } 1910 } 1911 ret = 0; 1912 out: 1913 if (root != root->fs_info->tree_root) 1914 btrfs_delalloc_release_metadata(inode, ordered_extent->len); 1915 if (trans) { 1916 if (nolock) 1917 btrfs_end_transaction_nolock(trans, root); 1918 else 1919 btrfs_end_transaction(trans, root); 1920 } 1921 1922 /* once for us */ 1923 btrfs_put_ordered_extent(ordered_extent); 1924 /* once for the tree */ 1925 btrfs_put_ordered_extent(ordered_extent); 1926 1927 return 0; 1928 out_unlock: 1929 unlock_extent_cached(io_tree, ordered_extent->file_offset, 1930 ordered_extent->file_offset + 1931 ordered_extent->len - 1, &cached_state, GFP_NOFS); 1932 goto out; 1933 } 1934 1935 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 1936 struct extent_state *state, int uptodate) 1937 { 1938 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 1939 1940 ClearPagePrivate2(page); 1941 return btrfs_finish_ordered_io(page->mapping->host, start, end); 1942 } 1943 1944 /* 1945 * when reads are done, we need to check csums to verify the data is correct 1946 * if there's a match, we allow the bio to finish. If not, the code in 1947 * extent_io.c will try to find good copies for us. 1948 */ 1949 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, 1950 struct extent_state *state) 1951 { 1952 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); 1953 struct inode *inode = page->mapping->host; 1954 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1955 char *kaddr; 1956 u64 private = ~(u32)0; 1957 int ret; 1958 struct btrfs_root *root = BTRFS_I(inode)->root; 1959 u32 csum = ~(u32)0; 1960 1961 if (PageChecked(page)) { 1962 ClearPageChecked(page); 1963 goto good; 1964 } 1965 1966 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 1967 goto good; 1968 1969 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 1970 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 1971 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, 1972 GFP_NOFS); 1973 return 0; 1974 } 1975 1976 if (state && state->start == start) { 1977 private = state->private; 1978 ret = 0; 1979 } else { 1980 ret = get_state_private(io_tree, start, &private); 1981 } 1982 kaddr = kmap_atomic(page); 1983 if (ret) 1984 goto zeroit; 1985 1986 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); 1987 btrfs_csum_final(csum, (char *)&csum); 1988 if (csum != private) 1989 goto zeroit; 1990 1991 kunmap_atomic(kaddr); 1992 good: 1993 return 0; 1994 1995 zeroit: 1996 printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u " 1997 "private %llu\n", 1998 (unsigned long long)btrfs_ino(page->mapping->host), 1999 (unsigned long long)start, csum, 2000 (unsigned long long)private); 2001 memset(kaddr + offset, 1, end - start + 1); 2002 flush_dcache_page(page); 2003 kunmap_atomic(kaddr); 2004 if (private == 0) 2005 return 0; 2006 return -EIO; 2007 } 2008 2009 struct delayed_iput { 2010 struct list_head list; 2011 struct inode *inode; 2012 }; 2013 2014 /* JDM: If this is fs-wide, why can't we add a pointer to 2015 * btrfs_inode instead and avoid the allocation? */ 2016 void btrfs_add_delayed_iput(struct inode *inode) 2017 { 2018 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 2019 struct delayed_iput *delayed; 2020 2021 if (atomic_add_unless(&inode->i_count, -1, 1)) 2022 return; 2023 2024 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL); 2025 delayed->inode = inode; 2026 2027 spin_lock(&fs_info->delayed_iput_lock); 2028 list_add_tail(&delayed->list, &fs_info->delayed_iputs); 2029 spin_unlock(&fs_info->delayed_iput_lock); 2030 } 2031 2032 void btrfs_run_delayed_iputs(struct btrfs_root *root) 2033 { 2034 LIST_HEAD(list); 2035 struct btrfs_fs_info *fs_info = root->fs_info; 2036 struct delayed_iput *delayed; 2037 int empty; 2038 2039 spin_lock(&fs_info->delayed_iput_lock); 2040 empty = list_empty(&fs_info->delayed_iputs); 2041 spin_unlock(&fs_info->delayed_iput_lock); 2042 if (empty) 2043 return; 2044 2045 down_read(&root->fs_info->cleanup_work_sem); 2046 spin_lock(&fs_info->delayed_iput_lock); 2047 list_splice_init(&fs_info->delayed_iputs, &list); 2048 spin_unlock(&fs_info->delayed_iput_lock); 2049 2050 while (!list_empty(&list)) { 2051 delayed = list_entry(list.next, struct delayed_iput, list); 2052 list_del(&delayed->list); 2053 iput(delayed->inode); 2054 kfree(delayed); 2055 } 2056 up_read(&root->fs_info->cleanup_work_sem); 2057 } 2058 2059 enum btrfs_orphan_cleanup_state { 2060 ORPHAN_CLEANUP_STARTED = 1, 2061 ORPHAN_CLEANUP_DONE = 2, 2062 }; 2063 2064 /* 2065 * This is called in transaction commit time. If there are no orphan 2066 * files in the subvolume, it removes orphan item and frees block_rsv 2067 * structure. 2068 */ 2069 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 2070 struct btrfs_root *root) 2071 { 2072 struct btrfs_block_rsv *block_rsv; 2073 int ret; 2074 2075 if (!list_empty(&root->orphan_list) || 2076 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) 2077 return; 2078 2079 spin_lock(&root->orphan_lock); 2080 if (!list_empty(&root->orphan_list)) { 2081 spin_unlock(&root->orphan_lock); 2082 return; 2083 } 2084 2085 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { 2086 spin_unlock(&root->orphan_lock); 2087 return; 2088 } 2089 2090 block_rsv = root->orphan_block_rsv; 2091 root->orphan_block_rsv = NULL; 2092 spin_unlock(&root->orphan_lock); 2093 2094 if (root->orphan_item_inserted && 2095 btrfs_root_refs(&root->root_item) > 0) { 2096 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, 2097 root->root_key.objectid); 2098 BUG_ON(ret); 2099 root->orphan_item_inserted = 0; 2100 } 2101 2102 if (block_rsv) { 2103 WARN_ON(block_rsv->size > 0); 2104 btrfs_free_block_rsv(root, block_rsv); 2105 } 2106 } 2107 2108 /* 2109 * This creates an orphan entry for the given inode in case something goes 2110 * wrong in the middle of an unlink/truncate. 2111 * 2112 * NOTE: caller of this function should reserve 5 units of metadata for 2113 * this function. 2114 */ 2115 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) 2116 { 2117 struct btrfs_root *root = BTRFS_I(inode)->root; 2118 struct btrfs_block_rsv *block_rsv = NULL; 2119 int reserve = 0; 2120 int insert = 0; 2121 int ret; 2122 2123 if (!root->orphan_block_rsv) { 2124 block_rsv = btrfs_alloc_block_rsv(root); 2125 if (!block_rsv) 2126 return -ENOMEM; 2127 } 2128 2129 spin_lock(&root->orphan_lock); 2130 if (!root->orphan_block_rsv) { 2131 root->orphan_block_rsv = block_rsv; 2132 } else if (block_rsv) { 2133 btrfs_free_block_rsv(root, block_rsv); 2134 block_rsv = NULL; 2135 } 2136 2137 if (list_empty(&BTRFS_I(inode)->i_orphan)) { 2138 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); 2139 #if 0 2140 /* 2141 * For proper ENOSPC handling, we should do orphan 2142 * cleanup when mounting. But this introduces backward 2143 * compatibility issue. 2144 */ 2145 if (!xchg(&root->orphan_item_inserted, 1)) 2146 insert = 2; 2147 else 2148 insert = 1; 2149 #endif 2150 insert = 1; 2151 } 2152 2153 if (!BTRFS_I(inode)->orphan_meta_reserved) { 2154 BTRFS_I(inode)->orphan_meta_reserved = 1; 2155 reserve = 1; 2156 } 2157 spin_unlock(&root->orphan_lock); 2158 2159 /* grab metadata reservation from transaction handle */ 2160 if (reserve) { 2161 ret = btrfs_orphan_reserve_metadata(trans, inode); 2162 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ 2163 } 2164 2165 /* insert an orphan item to track this unlinked/truncated file */ 2166 if (insert >= 1) { 2167 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 2168 if (ret && ret != -EEXIST) { 2169 btrfs_abort_transaction(trans, root, ret); 2170 return ret; 2171 } 2172 ret = 0; 2173 } 2174 2175 /* insert an orphan item to track subvolume contains orphan files */ 2176 if (insert >= 2) { 2177 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, 2178 root->root_key.objectid); 2179 if (ret && ret != -EEXIST) { 2180 btrfs_abort_transaction(trans, root, ret); 2181 return ret; 2182 } 2183 } 2184 return 0; 2185 } 2186 2187 /* 2188 * We have done the truncate/delete so we can go ahead and remove the orphan 2189 * item for this particular inode. 2190 */ 2191 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) 2192 { 2193 struct btrfs_root *root = BTRFS_I(inode)->root; 2194 int delete_item = 0; 2195 int release_rsv = 0; 2196 int ret = 0; 2197 2198 spin_lock(&root->orphan_lock); 2199 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 2200 list_del_init(&BTRFS_I(inode)->i_orphan); 2201 delete_item = 1; 2202 } 2203 2204 if (BTRFS_I(inode)->orphan_meta_reserved) { 2205 BTRFS_I(inode)->orphan_meta_reserved = 0; 2206 release_rsv = 1; 2207 } 2208 spin_unlock(&root->orphan_lock); 2209 2210 if (trans && delete_item) { 2211 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); 2212 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ 2213 } 2214 2215 if (release_rsv) 2216 btrfs_orphan_release_metadata(inode); 2217 2218 return 0; 2219 } 2220 2221 /* 2222 * this cleans up any orphans that may be left on the list from the last use 2223 * of this root. 2224 */ 2225 int btrfs_orphan_cleanup(struct btrfs_root *root) 2226 { 2227 struct btrfs_path *path; 2228 struct extent_buffer *leaf; 2229 struct btrfs_key key, found_key; 2230 struct btrfs_trans_handle *trans; 2231 struct inode *inode; 2232 u64 last_objectid = 0; 2233 int ret = 0, nr_unlink = 0, nr_truncate = 0; 2234 2235 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) 2236 return 0; 2237 2238 path = btrfs_alloc_path(); 2239 if (!path) { 2240 ret = -ENOMEM; 2241 goto out; 2242 } 2243 path->reada = -1; 2244 2245 key.objectid = BTRFS_ORPHAN_OBJECTID; 2246 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); 2247 key.offset = (u64)-1; 2248 2249 while (1) { 2250 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2251 if (ret < 0) 2252 goto out; 2253 2254 /* 2255 * if ret == 0 means we found what we were searching for, which 2256 * is weird, but possible, so only screw with path if we didn't 2257 * find the key and see if we have stuff that matches 2258 */ 2259 if (ret > 0) { 2260 ret = 0; 2261 if (path->slots[0] == 0) 2262 break; 2263 path->slots[0]--; 2264 } 2265 2266 /* pull out the item */ 2267 leaf = path->nodes[0]; 2268 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2269 2270 /* make sure the item matches what we want */ 2271 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 2272 break; 2273 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY) 2274 break; 2275 2276 /* release the path since we're done with it */ 2277 btrfs_release_path(path); 2278 2279 /* 2280 * this is where we are basically btrfs_lookup, without the 2281 * crossing root thing. we store the inode number in the 2282 * offset of the orphan item. 2283 */ 2284 2285 if (found_key.offset == last_objectid) { 2286 printk(KERN_ERR "btrfs: Error removing orphan entry, " 2287 "stopping orphan cleanup\n"); 2288 ret = -EINVAL; 2289 goto out; 2290 } 2291 2292 last_objectid = found_key.offset; 2293 2294 found_key.objectid = found_key.offset; 2295 found_key.type = BTRFS_INODE_ITEM_KEY; 2296 found_key.offset = 0; 2297 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 2298 ret = PTR_RET(inode); 2299 if (ret && ret != -ESTALE) 2300 goto out; 2301 2302 if (ret == -ESTALE && root == root->fs_info->tree_root) { 2303 struct btrfs_root *dead_root; 2304 struct btrfs_fs_info *fs_info = root->fs_info; 2305 int is_dead_root = 0; 2306 2307 /* 2308 * this is an orphan in the tree root. Currently these 2309 * could come from 2 sources: 2310 * a) a snapshot deletion in progress 2311 * b) a free space cache inode 2312 * We need to distinguish those two, as the snapshot 2313 * orphan must not get deleted. 2314 * find_dead_roots already ran before us, so if this 2315 * is a snapshot deletion, we should find the root 2316 * in the dead_roots list 2317 */ 2318 spin_lock(&fs_info->trans_lock); 2319 list_for_each_entry(dead_root, &fs_info->dead_roots, 2320 root_list) { 2321 if (dead_root->root_key.objectid == 2322 found_key.objectid) { 2323 is_dead_root = 1; 2324 break; 2325 } 2326 } 2327 spin_unlock(&fs_info->trans_lock); 2328 if (is_dead_root) { 2329 /* prevent this orphan from being found again */ 2330 key.offset = found_key.objectid - 1; 2331 continue; 2332 } 2333 } 2334 /* 2335 * Inode is already gone but the orphan item is still there, 2336 * kill the orphan item. 2337 */ 2338 if (ret == -ESTALE) { 2339 trans = btrfs_start_transaction(root, 1); 2340 if (IS_ERR(trans)) { 2341 ret = PTR_ERR(trans); 2342 goto out; 2343 } 2344 ret = btrfs_del_orphan_item(trans, root, 2345 found_key.objectid); 2346 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ 2347 btrfs_end_transaction(trans, root); 2348 continue; 2349 } 2350 2351 /* 2352 * add this inode to the orphan list so btrfs_orphan_del does 2353 * the proper thing when we hit it 2354 */ 2355 spin_lock(&root->orphan_lock); 2356 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); 2357 spin_unlock(&root->orphan_lock); 2358 2359 /* if we have links, this was a truncate, lets do that */ 2360 if (inode->i_nlink) { 2361 if (!S_ISREG(inode->i_mode)) { 2362 WARN_ON(1); 2363 iput(inode); 2364 continue; 2365 } 2366 nr_truncate++; 2367 ret = btrfs_truncate(inode); 2368 } else { 2369 nr_unlink++; 2370 } 2371 2372 /* this will do delete_inode and everything for us */ 2373 iput(inode); 2374 if (ret) 2375 goto out; 2376 } 2377 /* release the path since we're done with it */ 2378 btrfs_release_path(path); 2379 2380 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 2381 2382 if (root->orphan_block_rsv) 2383 btrfs_block_rsv_release(root, root->orphan_block_rsv, 2384 (u64)-1); 2385 2386 if (root->orphan_block_rsv || root->orphan_item_inserted) { 2387 trans = btrfs_join_transaction(root); 2388 if (!IS_ERR(trans)) 2389 btrfs_end_transaction(trans, root); 2390 } 2391 2392 if (nr_unlink) 2393 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); 2394 if (nr_truncate) 2395 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); 2396 2397 out: 2398 if (ret) 2399 printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret); 2400 btrfs_free_path(path); 2401 return ret; 2402 } 2403 2404 /* 2405 * very simple check to peek ahead in the leaf looking for xattrs. If we 2406 * don't find any xattrs, we know there can't be any acls. 2407 * 2408 * slot is the slot the inode is in, objectid is the objectid of the inode 2409 */ 2410 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 2411 int slot, u64 objectid) 2412 { 2413 u32 nritems = btrfs_header_nritems(leaf); 2414 struct btrfs_key found_key; 2415 int scanned = 0; 2416 2417 slot++; 2418 while (slot < nritems) { 2419 btrfs_item_key_to_cpu(leaf, &found_key, slot); 2420 2421 /* we found a different objectid, there must not be acls */ 2422 if (found_key.objectid != objectid) 2423 return 0; 2424 2425 /* we found an xattr, assume we've got an acl */ 2426 if (found_key.type == BTRFS_XATTR_ITEM_KEY) 2427 return 1; 2428 2429 /* 2430 * we found a key greater than an xattr key, there can't 2431 * be any acls later on 2432 */ 2433 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 2434 return 0; 2435 2436 slot++; 2437 scanned++; 2438 2439 /* 2440 * it goes inode, inode backrefs, xattrs, extents, 2441 * so if there are a ton of hard links to an inode there can 2442 * be a lot of backrefs. Don't waste time searching too hard, 2443 * this is just an optimization 2444 */ 2445 if (scanned >= 8) 2446 break; 2447 } 2448 /* we hit the end of the leaf before we found an xattr or 2449 * something larger than an xattr. We have to assume the inode 2450 * has acls 2451 */ 2452 return 1; 2453 } 2454 2455 /* 2456 * read an inode from the btree into the in-memory inode 2457 */ 2458 static void btrfs_read_locked_inode(struct inode *inode) 2459 { 2460 struct btrfs_path *path; 2461 struct extent_buffer *leaf; 2462 struct btrfs_inode_item *inode_item; 2463 struct btrfs_timespec *tspec; 2464 struct btrfs_root *root = BTRFS_I(inode)->root; 2465 struct btrfs_key location; 2466 int maybe_acls; 2467 u32 rdev; 2468 int ret; 2469 bool filled = false; 2470 2471 ret = btrfs_fill_inode(inode, &rdev); 2472 if (!ret) 2473 filled = true; 2474 2475 path = btrfs_alloc_path(); 2476 if (!path) 2477 goto make_bad; 2478 2479 path->leave_spinning = 1; 2480 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 2481 2482 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 2483 if (ret) 2484 goto make_bad; 2485 2486 leaf = path->nodes[0]; 2487 2488 if (filled) 2489 goto cache_acl; 2490 2491 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2492 struct btrfs_inode_item); 2493 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 2494 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 2495 inode->i_uid = btrfs_inode_uid(leaf, inode_item); 2496 inode->i_gid = btrfs_inode_gid(leaf, inode_item); 2497 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 2498 2499 tspec = btrfs_inode_atime(inode_item); 2500 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); 2501 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 2502 2503 tspec = btrfs_inode_mtime(inode_item); 2504 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); 2505 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 2506 2507 tspec = btrfs_inode_ctime(inode_item); 2508 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); 2509 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 2510 2511 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 2512 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 2513 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item); 2514 inode->i_generation = BTRFS_I(inode)->generation; 2515 inode->i_rdev = 0; 2516 rdev = btrfs_inode_rdev(leaf, inode_item); 2517 2518 BTRFS_I(inode)->index_cnt = (u64)-1; 2519 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2520 cache_acl: 2521 /* 2522 * try to precache a NULL acl entry for files that don't have 2523 * any xattrs or acls 2524 */ 2525 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 2526 btrfs_ino(inode)); 2527 if (!maybe_acls) 2528 cache_no_acl(inode); 2529 2530 btrfs_free_path(path); 2531 2532 switch (inode->i_mode & S_IFMT) { 2533 case S_IFREG: 2534 inode->i_mapping->a_ops = &btrfs_aops; 2535 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 2536 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 2537 inode->i_fop = &btrfs_file_operations; 2538 inode->i_op = &btrfs_file_inode_operations; 2539 break; 2540 case S_IFDIR: 2541 inode->i_fop = &btrfs_dir_file_operations; 2542 if (root == root->fs_info->tree_root) 2543 inode->i_op = &btrfs_dir_ro_inode_operations; 2544 else 2545 inode->i_op = &btrfs_dir_inode_operations; 2546 break; 2547 case S_IFLNK: 2548 inode->i_op = &btrfs_symlink_inode_operations; 2549 inode->i_mapping->a_ops = &btrfs_symlink_aops; 2550 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 2551 break; 2552 default: 2553 inode->i_op = &btrfs_special_inode_operations; 2554 init_special_inode(inode, inode->i_mode, rdev); 2555 break; 2556 } 2557 2558 btrfs_update_iflags(inode); 2559 return; 2560 2561 make_bad: 2562 btrfs_free_path(path); 2563 make_bad_inode(inode); 2564 } 2565 2566 /* 2567 * given a leaf and an inode, copy the inode fields into the leaf 2568 */ 2569 static void fill_inode_item(struct btrfs_trans_handle *trans, 2570 struct extent_buffer *leaf, 2571 struct btrfs_inode_item *item, 2572 struct inode *inode) 2573 { 2574 btrfs_set_inode_uid(leaf, item, inode->i_uid); 2575 btrfs_set_inode_gid(leaf, item, inode->i_gid); 2576 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); 2577 btrfs_set_inode_mode(leaf, item, inode->i_mode); 2578 btrfs_set_inode_nlink(leaf, item, inode->i_nlink); 2579 2580 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item), 2581 inode->i_atime.tv_sec); 2582 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item), 2583 inode->i_atime.tv_nsec); 2584 2585 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item), 2586 inode->i_mtime.tv_sec); 2587 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item), 2588 inode->i_mtime.tv_nsec); 2589 2590 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item), 2591 inode->i_ctime.tv_sec); 2592 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item), 2593 inode->i_ctime.tv_nsec); 2594 2595 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); 2596 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); 2597 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence); 2598 btrfs_set_inode_transid(leaf, item, trans->transid); 2599 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 2600 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); 2601 btrfs_set_inode_block_group(leaf, item, 0); 2602 } 2603 2604 /* 2605 * copy everything in the in-memory inode into the btree. 2606 */ 2607 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 2608 struct btrfs_root *root, struct inode *inode) 2609 { 2610 struct btrfs_inode_item *inode_item; 2611 struct btrfs_path *path; 2612 struct extent_buffer *leaf; 2613 int ret; 2614 2615 path = btrfs_alloc_path(); 2616 if (!path) 2617 return -ENOMEM; 2618 2619 path->leave_spinning = 1; 2620 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 2621 1); 2622 if (ret) { 2623 if (ret > 0) 2624 ret = -ENOENT; 2625 goto failed; 2626 } 2627 2628 btrfs_unlock_up_safe(path, 1); 2629 leaf = path->nodes[0]; 2630 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2631 struct btrfs_inode_item); 2632 2633 fill_inode_item(trans, leaf, inode_item, inode); 2634 btrfs_mark_buffer_dirty(leaf); 2635 btrfs_set_inode_last_trans(trans, inode); 2636 ret = 0; 2637 failed: 2638 btrfs_free_path(path); 2639 return ret; 2640 } 2641 2642 /* 2643 * copy everything in the in-memory inode into the btree. 2644 */ 2645 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 2646 struct btrfs_root *root, struct inode *inode) 2647 { 2648 int ret; 2649 2650 /* 2651 * If the inode is a free space inode, we can deadlock during commit 2652 * if we put it into the delayed code. 2653 * 2654 * The data relocation inode should also be directly updated 2655 * without delay 2656 */ 2657 if (!btrfs_is_free_space_inode(root, inode) 2658 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { 2659 ret = btrfs_delayed_update_inode(trans, root, inode); 2660 if (!ret) 2661 btrfs_set_inode_last_trans(trans, inode); 2662 return ret; 2663 } 2664 2665 return btrfs_update_inode_item(trans, root, inode); 2666 } 2667 2668 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 2669 struct btrfs_root *root, struct inode *inode) 2670 { 2671 int ret; 2672 2673 ret = btrfs_update_inode(trans, root, inode); 2674 if (ret == -ENOSPC) 2675 return btrfs_update_inode_item(trans, root, inode); 2676 return ret; 2677 } 2678 2679 /* 2680 * unlink helper that gets used here in inode.c and in the tree logging 2681 * recovery code. It remove a link in a directory with a given name, and 2682 * also drops the back refs in the inode to the directory 2683 */ 2684 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 2685 struct btrfs_root *root, 2686 struct inode *dir, struct inode *inode, 2687 const char *name, int name_len) 2688 { 2689 struct btrfs_path *path; 2690 int ret = 0; 2691 struct extent_buffer *leaf; 2692 struct btrfs_dir_item *di; 2693 struct btrfs_key key; 2694 u64 index; 2695 u64 ino = btrfs_ino(inode); 2696 u64 dir_ino = btrfs_ino(dir); 2697 2698 path = btrfs_alloc_path(); 2699 if (!path) { 2700 ret = -ENOMEM; 2701 goto out; 2702 } 2703 2704 path->leave_spinning = 1; 2705 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 2706 name, name_len, -1); 2707 if (IS_ERR(di)) { 2708 ret = PTR_ERR(di); 2709 goto err; 2710 } 2711 if (!di) { 2712 ret = -ENOENT; 2713 goto err; 2714 } 2715 leaf = path->nodes[0]; 2716 btrfs_dir_item_key_to_cpu(leaf, di, &key); 2717 ret = btrfs_delete_one_dir_name(trans, root, path, di); 2718 if (ret) 2719 goto err; 2720 btrfs_release_path(path); 2721 2722 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, 2723 dir_ino, &index); 2724 if (ret) { 2725 printk(KERN_INFO "btrfs failed to delete reference to %.*s, " 2726 "inode %llu parent %llu\n", name_len, name, 2727 (unsigned long long)ino, (unsigned long long)dir_ino); 2728 btrfs_abort_transaction(trans, root, ret); 2729 goto err; 2730 } 2731 2732 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 2733 if (ret) { 2734 btrfs_abort_transaction(trans, root, ret); 2735 goto err; 2736 } 2737 2738 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 2739 inode, dir_ino); 2740 if (ret != 0 && ret != -ENOENT) { 2741 btrfs_abort_transaction(trans, root, ret); 2742 goto err; 2743 } 2744 2745 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 2746 dir, index); 2747 if (ret == -ENOENT) 2748 ret = 0; 2749 err: 2750 btrfs_free_path(path); 2751 if (ret) 2752 goto out; 2753 2754 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 2755 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; 2756 btrfs_update_inode(trans, root, dir); 2757 out: 2758 return ret; 2759 } 2760 2761 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 2762 struct btrfs_root *root, 2763 struct inode *dir, struct inode *inode, 2764 const char *name, int name_len) 2765 { 2766 int ret; 2767 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 2768 if (!ret) { 2769 btrfs_drop_nlink(inode); 2770 ret = btrfs_update_inode(trans, root, inode); 2771 } 2772 return ret; 2773 } 2774 2775 2776 /* helper to check if there is any shared block in the path */ 2777 static int check_path_shared(struct btrfs_root *root, 2778 struct btrfs_path *path) 2779 { 2780 struct extent_buffer *eb; 2781 int level; 2782 u64 refs = 1; 2783 2784 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2785 int ret; 2786 2787 if (!path->nodes[level]) 2788 break; 2789 eb = path->nodes[level]; 2790 if (!btrfs_block_can_be_shared(root, eb)) 2791 continue; 2792 ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len, 2793 &refs, NULL); 2794 if (refs > 1) 2795 return 1; 2796 } 2797 return 0; 2798 } 2799 2800 /* 2801 * helper to start transaction for unlink and rmdir. 2802 * 2803 * unlink and rmdir are special in btrfs, they do not always free space. 2804 * so in enospc case, we should make sure they will free space before 2805 * allowing them to use the global metadata reservation. 2806 */ 2807 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, 2808 struct dentry *dentry) 2809 { 2810 struct btrfs_trans_handle *trans; 2811 struct btrfs_root *root = BTRFS_I(dir)->root; 2812 struct btrfs_path *path; 2813 struct btrfs_inode_ref *ref; 2814 struct btrfs_dir_item *di; 2815 struct inode *inode = dentry->d_inode; 2816 u64 index; 2817 int check_link = 1; 2818 int err = -ENOSPC; 2819 int ret; 2820 u64 ino = btrfs_ino(inode); 2821 u64 dir_ino = btrfs_ino(dir); 2822 2823 /* 2824 * 1 for the possible orphan item 2825 * 1 for the dir item 2826 * 1 for the dir index 2827 * 1 for the inode ref 2828 * 1 for the inode ref in the tree log 2829 * 2 for the dir entries in the log 2830 * 1 for the inode 2831 */ 2832 trans = btrfs_start_transaction(root, 8); 2833 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 2834 return trans; 2835 2836 if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 2837 return ERR_PTR(-ENOSPC); 2838 2839 /* check if there is someone else holds reference */ 2840 if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1) 2841 return ERR_PTR(-ENOSPC); 2842 2843 if (atomic_read(&inode->i_count) > 2) 2844 return ERR_PTR(-ENOSPC); 2845 2846 if (xchg(&root->fs_info->enospc_unlink, 1)) 2847 return ERR_PTR(-ENOSPC); 2848 2849 path = btrfs_alloc_path(); 2850 if (!path) { 2851 root->fs_info->enospc_unlink = 0; 2852 return ERR_PTR(-ENOMEM); 2853 } 2854 2855 /* 1 for the orphan item */ 2856 trans = btrfs_start_transaction(root, 1); 2857 if (IS_ERR(trans)) { 2858 btrfs_free_path(path); 2859 root->fs_info->enospc_unlink = 0; 2860 return trans; 2861 } 2862 2863 path->skip_locking = 1; 2864 path->search_commit_root = 1; 2865 2866 ret = btrfs_lookup_inode(trans, root, path, 2867 &BTRFS_I(dir)->location, 0); 2868 if (ret < 0) { 2869 err = ret; 2870 goto out; 2871 } 2872 if (ret == 0) { 2873 if (check_path_shared(root, path)) 2874 goto out; 2875 } else { 2876 check_link = 0; 2877 } 2878 btrfs_release_path(path); 2879 2880 ret = btrfs_lookup_inode(trans, root, path, 2881 &BTRFS_I(inode)->location, 0); 2882 if (ret < 0) { 2883 err = ret; 2884 goto out; 2885 } 2886 if (ret == 0) { 2887 if (check_path_shared(root, path)) 2888 goto out; 2889 } else { 2890 check_link = 0; 2891 } 2892 btrfs_release_path(path); 2893 2894 if (ret == 0 && S_ISREG(inode->i_mode)) { 2895 ret = btrfs_lookup_file_extent(trans, root, path, 2896 ino, (u64)-1, 0); 2897 if (ret < 0) { 2898 err = ret; 2899 goto out; 2900 } 2901 BUG_ON(ret == 0); /* Corruption */ 2902 if (check_path_shared(root, path)) 2903 goto out; 2904 btrfs_release_path(path); 2905 } 2906 2907 if (!check_link) { 2908 err = 0; 2909 goto out; 2910 } 2911 2912 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 2913 dentry->d_name.name, dentry->d_name.len, 0); 2914 if (IS_ERR(di)) { 2915 err = PTR_ERR(di); 2916 goto out; 2917 } 2918 if (di) { 2919 if (check_path_shared(root, path)) 2920 goto out; 2921 } else { 2922 err = 0; 2923 goto out; 2924 } 2925 btrfs_release_path(path); 2926 2927 ref = btrfs_lookup_inode_ref(trans, root, path, 2928 dentry->d_name.name, dentry->d_name.len, 2929 ino, dir_ino, 0); 2930 if (IS_ERR(ref)) { 2931 err = PTR_ERR(ref); 2932 goto out; 2933 } 2934 BUG_ON(!ref); /* Logic error */ 2935 if (check_path_shared(root, path)) 2936 goto out; 2937 index = btrfs_inode_ref_index(path->nodes[0], ref); 2938 btrfs_release_path(path); 2939 2940 /* 2941 * This is a commit root search, if we can lookup inode item and other 2942 * relative items in the commit root, it means the transaction of 2943 * dir/file creation has been committed, and the dir index item that we 2944 * delay to insert has also been inserted into the commit root. So 2945 * we needn't worry about the delayed insertion of the dir index item 2946 * here. 2947 */ 2948 di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, 2949 dentry->d_name.name, dentry->d_name.len, 0); 2950 if (IS_ERR(di)) { 2951 err = PTR_ERR(di); 2952 goto out; 2953 } 2954 BUG_ON(ret == -ENOENT); 2955 if (check_path_shared(root, path)) 2956 goto out; 2957 2958 err = 0; 2959 out: 2960 btrfs_free_path(path); 2961 /* Migrate the orphan reservation over */ 2962 if (!err) 2963 err = btrfs_block_rsv_migrate(trans->block_rsv, 2964 &root->fs_info->global_block_rsv, 2965 trans->bytes_reserved); 2966 2967 if (err) { 2968 btrfs_end_transaction(trans, root); 2969 root->fs_info->enospc_unlink = 0; 2970 return ERR_PTR(err); 2971 } 2972 2973 trans->block_rsv = &root->fs_info->global_block_rsv; 2974 return trans; 2975 } 2976 2977 static void __unlink_end_trans(struct btrfs_trans_handle *trans, 2978 struct btrfs_root *root) 2979 { 2980 if (trans->block_rsv == &root->fs_info->global_block_rsv) { 2981 btrfs_block_rsv_release(root, trans->block_rsv, 2982 trans->bytes_reserved); 2983 trans->block_rsv = &root->fs_info->trans_block_rsv; 2984 BUG_ON(!root->fs_info->enospc_unlink); 2985 root->fs_info->enospc_unlink = 0; 2986 } 2987 btrfs_end_transaction(trans, root); 2988 } 2989 2990 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 2991 { 2992 struct btrfs_root *root = BTRFS_I(dir)->root; 2993 struct btrfs_trans_handle *trans; 2994 struct inode *inode = dentry->d_inode; 2995 int ret; 2996 unsigned long nr = 0; 2997 2998 trans = __unlink_start_trans(dir, dentry); 2999 if (IS_ERR(trans)) 3000 return PTR_ERR(trans); 3001 3002 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); 3003 3004 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3005 dentry->d_name.name, dentry->d_name.len); 3006 if (ret) 3007 goto out; 3008 3009 if (inode->i_nlink == 0) { 3010 ret = btrfs_orphan_add(trans, inode); 3011 if (ret) 3012 goto out; 3013 } 3014 3015 out: 3016 nr = trans->blocks_used; 3017 __unlink_end_trans(trans, root); 3018 btrfs_btree_balance_dirty(root, nr); 3019 return ret; 3020 } 3021 3022 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3023 struct btrfs_root *root, 3024 struct inode *dir, u64 objectid, 3025 const char *name, int name_len) 3026 { 3027 struct btrfs_path *path; 3028 struct extent_buffer *leaf; 3029 struct btrfs_dir_item *di; 3030 struct btrfs_key key; 3031 u64 index; 3032 int ret; 3033 u64 dir_ino = btrfs_ino(dir); 3034 3035 path = btrfs_alloc_path(); 3036 if (!path) 3037 return -ENOMEM; 3038 3039 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3040 name, name_len, -1); 3041 if (IS_ERR_OR_NULL(di)) { 3042 if (!di) 3043 ret = -ENOENT; 3044 else 3045 ret = PTR_ERR(di); 3046 goto out; 3047 } 3048 3049 leaf = path->nodes[0]; 3050 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3051 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 3052 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3053 if (ret) { 3054 btrfs_abort_transaction(trans, root, ret); 3055 goto out; 3056 } 3057 btrfs_release_path(path); 3058 3059 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 3060 objectid, root->root_key.objectid, 3061 dir_ino, &index, name, name_len); 3062 if (ret < 0) { 3063 if (ret != -ENOENT) { 3064 btrfs_abort_transaction(trans, root, ret); 3065 goto out; 3066 } 3067 di = btrfs_search_dir_index_item(root, path, dir_ino, 3068 name, name_len); 3069 if (IS_ERR_OR_NULL(di)) { 3070 if (!di) 3071 ret = -ENOENT; 3072 else 3073 ret = PTR_ERR(di); 3074 btrfs_abort_transaction(trans, root, ret); 3075 goto out; 3076 } 3077 3078 leaf = path->nodes[0]; 3079 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3080 btrfs_release_path(path); 3081 index = key.offset; 3082 } 3083 btrfs_release_path(path); 3084 3085 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 3086 if (ret) { 3087 btrfs_abort_transaction(trans, root, ret); 3088 goto out; 3089 } 3090 3091 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3092 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3093 ret = btrfs_update_inode(trans, root, dir); 3094 if (ret) 3095 btrfs_abort_transaction(trans, root, ret); 3096 out: 3097 btrfs_free_path(path); 3098 return ret; 3099 } 3100 3101 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 3102 { 3103 struct inode *inode = dentry->d_inode; 3104 int err = 0; 3105 struct btrfs_root *root = BTRFS_I(dir)->root; 3106 struct btrfs_trans_handle *trans; 3107 unsigned long nr = 0; 3108 3109 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || 3110 btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) 3111 return -ENOTEMPTY; 3112 3113 trans = __unlink_start_trans(dir, dentry); 3114 if (IS_ERR(trans)) 3115 return PTR_ERR(trans); 3116 3117 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 3118 err = btrfs_unlink_subvol(trans, root, dir, 3119 BTRFS_I(inode)->location.objectid, 3120 dentry->d_name.name, 3121 dentry->d_name.len); 3122 goto out; 3123 } 3124 3125 err = btrfs_orphan_add(trans, inode); 3126 if (err) 3127 goto out; 3128 3129 /* now the directory is empty */ 3130 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3131 dentry->d_name.name, dentry->d_name.len); 3132 if (!err) 3133 btrfs_i_size_write(inode, 0); 3134 out: 3135 nr = trans->blocks_used; 3136 __unlink_end_trans(trans, root); 3137 btrfs_btree_balance_dirty(root, nr); 3138 3139 return err; 3140 } 3141 3142 /* 3143 * this can truncate away extent items, csum items and directory items. 3144 * It starts at a high offset and removes keys until it can't find 3145 * any higher than new_size 3146 * 3147 * csum items that cross the new i_size are truncated to the new size 3148 * as well. 3149 * 3150 * min_type is the minimum key type to truncate down to. If set to 0, this 3151 * will kill all the items on this inode, including the INODE_ITEM_KEY. 3152 */ 3153 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 3154 struct btrfs_root *root, 3155 struct inode *inode, 3156 u64 new_size, u32 min_type) 3157 { 3158 struct btrfs_path *path; 3159 struct extent_buffer *leaf; 3160 struct btrfs_file_extent_item *fi; 3161 struct btrfs_key key; 3162 struct btrfs_key found_key; 3163 u64 extent_start = 0; 3164 u64 extent_num_bytes = 0; 3165 u64 extent_offset = 0; 3166 u64 item_end = 0; 3167 u64 mask = root->sectorsize - 1; 3168 u32 found_type = (u8)-1; 3169 int found_extent; 3170 int del_item; 3171 int pending_del_nr = 0; 3172 int pending_del_slot = 0; 3173 int extent_type = -1; 3174 int ret; 3175 int err = 0; 3176 u64 ino = btrfs_ino(inode); 3177 3178 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 3179 3180 path = btrfs_alloc_path(); 3181 if (!path) 3182 return -ENOMEM; 3183 path->reada = -1; 3184 3185 if (root->ref_cows || root == root->fs_info->tree_root) 3186 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); 3187 3188 /* 3189 * This function is also used to drop the items in the log tree before 3190 * we relog the inode, so if root != BTRFS_I(inode)->root, it means 3191 * it is used to drop the loged items. So we shouldn't kill the delayed 3192 * items. 3193 */ 3194 if (min_type == 0 && root == BTRFS_I(inode)->root) 3195 btrfs_kill_delayed_inode_items(inode); 3196 3197 key.objectid = ino; 3198 key.offset = (u64)-1; 3199 key.type = (u8)-1; 3200 3201 search_again: 3202 path->leave_spinning = 1; 3203 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3204 if (ret < 0) { 3205 err = ret; 3206 goto out; 3207 } 3208 3209 if (ret > 0) { 3210 /* there are no items in the tree for us to truncate, we're 3211 * done 3212 */ 3213 if (path->slots[0] == 0) 3214 goto out; 3215 path->slots[0]--; 3216 } 3217 3218 while (1) { 3219 fi = NULL; 3220 leaf = path->nodes[0]; 3221 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3222 found_type = btrfs_key_type(&found_key); 3223 3224 if (found_key.objectid != ino) 3225 break; 3226 3227 if (found_type < min_type) 3228 break; 3229 3230 item_end = found_key.offset; 3231 if (found_type == BTRFS_EXTENT_DATA_KEY) { 3232 fi = btrfs_item_ptr(leaf, path->slots[0], 3233 struct btrfs_file_extent_item); 3234 extent_type = btrfs_file_extent_type(leaf, fi); 3235 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 3236 item_end += 3237 btrfs_file_extent_num_bytes(leaf, fi); 3238 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 3239 item_end += btrfs_file_extent_inline_len(leaf, 3240 fi); 3241 } 3242 item_end--; 3243 } 3244 if (found_type > min_type) { 3245 del_item = 1; 3246 } else { 3247 if (item_end < new_size) 3248 break; 3249 if (found_key.offset >= new_size) 3250 del_item = 1; 3251 else 3252 del_item = 0; 3253 } 3254 found_extent = 0; 3255 /* FIXME, shrink the extent if the ref count is only 1 */ 3256 if (found_type != BTRFS_EXTENT_DATA_KEY) 3257 goto delete; 3258 3259 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 3260 u64 num_dec; 3261 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 3262 if (!del_item) { 3263 u64 orig_num_bytes = 3264 btrfs_file_extent_num_bytes(leaf, fi); 3265 extent_num_bytes = new_size - 3266 found_key.offset + root->sectorsize - 1; 3267 extent_num_bytes = extent_num_bytes & 3268 ~((u64)root->sectorsize - 1); 3269 btrfs_set_file_extent_num_bytes(leaf, fi, 3270 extent_num_bytes); 3271 num_dec = (orig_num_bytes - 3272 extent_num_bytes); 3273 if (root->ref_cows && extent_start != 0) 3274 inode_sub_bytes(inode, num_dec); 3275 btrfs_mark_buffer_dirty(leaf); 3276 } else { 3277 extent_num_bytes = 3278 btrfs_file_extent_disk_num_bytes(leaf, 3279 fi); 3280 extent_offset = found_key.offset - 3281 btrfs_file_extent_offset(leaf, fi); 3282 3283 /* FIXME blocksize != 4096 */ 3284 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 3285 if (extent_start != 0) { 3286 found_extent = 1; 3287 if (root->ref_cows) 3288 inode_sub_bytes(inode, num_dec); 3289 } 3290 } 3291 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 3292 /* 3293 * we can't truncate inline items that have had 3294 * special encodings 3295 */ 3296 if (!del_item && 3297 btrfs_file_extent_compression(leaf, fi) == 0 && 3298 btrfs_file_extent_encryption(leaf, fi) == 0 && 3299 btrfs_file_extent_other_encoding(leaf, fi) == 0) { 3300 u32 size = new_size - found_key.offset; 3301 3302 if (root->ref_cows) { 3303 inode_sub_bytes(inode, item_end + 1 - 3304 new_size); 3305 } 3306 size = 3307 btrfs_file_extent_calc_inline_size(size); 3308 btrfs_truncate_item(trans, root, path, 3309 size, 1); 3310 } else if (root->ref_cows) { 3311 inode_sub_bytes(inode, item_end + 1 - 3312 found_key.offset); 3313 } 3314 } 3315 delete: 3316 if (del_item) { 3317 if (!pending_del_nr) { 3318 /* no pending yet, add ourselves */ 3319 pending_del_slot = path->slots[0]; 3320 pending_del_nr = 1; 3321 } else if (pending_del_nr && 3322 path->slots[0] + 1 == pending_del_slot) { 3323 /* hop on the pending chunk */ 3324 pending_del_nr++; 3325 pending_del_slot = path->slots[0]; 3326 } else { 3327 BUG(); 3328 } 3329 } else { 3330 break; 3331 } 3332 if (found_extent && (root->ref_cows || 3333 root == root->fs_info->tree_root)) { 3334 btrfs_set_path_blocking(path); 3335 ret = btrfs_free_extent(trans, root, extent_start, 3336 extent_num_bytes, 0, 3337 btrfs_header_owner(leaf), 3338 ino, extent_offset, 0); 3339 BUG_ON(ret); 3340 } 3341 3342 if (found_type == BTRFS_INODE_ITEM_KEY) 3343 break; 3344 3345 if (path->slots[0] == 0 || 3346 path->slots[0] != pending_del_slot) { 3347 if (root->ref_cows && 3348 BTRFS_I(inode)->location.objectid != 3349 BTRFS_FREE_INO_OBJECTID) { 3350 err = -EAGAIN; 3351 goto out; 3352 } 3353 if (pending_del_nr) { 3354 ret = btrfs_del_items(trans, root, path, 3355 pending_del_slot, 3356 pending_del_nr); 3357 if (ret) { 3358 btrfs_abort_transaction(trans, 3359 root, ret); 3360 goto error; 3361 } 3362 pending_del_nr = 0; 3363 } 3364 btrfs_release_path(path); 3365 goto search_again; 3366 } else { 3367 path->slots[0]--; 3368 } 3369 } 3370 out: 3371 if (pending_del_nr) { 3372 ret = btrfs_del_items(trans, root, path, pending_del_slot, 3373 pending_del_nr); 3374 if (ret) 3375 btrfs_abort_transaction(trans, root, ret); 3376 } 3377 error: 3378 btrfs_free_path(path); 3379 return err; 3380 } 3381 3382 /* 3383 * taken from block_truncate_page, but does cow as it zeros out 3384 * any bytes left in the last page in the file. 3385 */ 3386 static int btrfs_truncate_page(struct address_space *mapping, loff_t from) 3387 { 3388 struct inode *inode = mapping->host; 3389 struct btrfs_root *root = BTRFS_I(inode)->root; 3390 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3391 struct btrfs_ordered_extent *ordered; 3392 struct extent_state *cached_state = NULL; 3393 char *kaddr; 3394 u32 blocksize = root->sectorsize; 3395 pgoff_t index = from >> PAGE_CACHE_SHIFT; 3396 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3397 struct page *page; 3398 gfp_t mask = btrfs_alloc_write_mask(mapping); 3399 int ret = 0; 3400 u64 page_start; 3401 u64 page_end; 3402 3403 if ((offset & (blocksize - 1)) == 0) 3404 goto out; 3405 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 3406 if (ret) 3407 goto out; 3408 3409 ret = -ENOMEM; 3410 again: 3411 page = find_or_create_page(mapping, index, mask); 3412 if (!page) { 3413 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 3414 goto out; 3415 } 3416 3417 page_start = page_offset(page); 3418 page_end = page_start + PAGE_CACHE_SIZE - 1; 3419 3420 if (!PageUptodate(page)) { 3421 ret = btrfs_readpage(NULL, page); 3422 lock_page(page); 3423 if (page->mapping != mapping) { 3424 unlock_page(page); 3425 page_cache_release(page); 3426 goto again; 3427 } 3428 if (!PageUptodate(page)) { 3429 ret = -EIO; 3430 goto out_unlock; 3431 } 3432 } 3433 wait_on_page_writeback(page); 3434 3435 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 3436 set_page_extent_mapped(page); 3437 3438 ordered = btrfs_lookup_ordered_extent(inode, page_start); 3439 if (ordered) { 3440 unlock_extent_cached(io_tree, page_start, page_end, 3441 &cached_state, GFP_NOFS); 3442 unlock_page(page); 3443 page_cache_release(page); 3444 btrfs_start_ordered_extent(inode, ordered, 1); 3445 btrfs_put_ordered_extent(ordered); 3446 goto again; 3447 } 3448 3449 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 3450 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 3451 0, 0, &cached_state, GFP_NOFS); 3452 3453 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 3454 &cached_state); 3455 if (ret) { 3456 unlock_extent_cached(io_tree, page_start, page_end, 3457 &cached_state, GFP_NOFS); 3458 goto out_unlock; 3459 } 3460 3461 ret = 0; 3462 if (offset != PAGE_CACHE_SIZE) { 3463 kaddr = kmap(page); 3464 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); 3465 flush_dcache_page(page); 3466 kunmap(page); 3467 } 3468 ClearPageChecked(page); 3469 set_page_dirty(page); 3470 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, 3471 GFP_NOFS); 3472 3473 out_unlock: 3474 if (ret) 3475 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 3476 unlock_page(page); 3477 page_cache_release(page); 3478 out: 3479 return ret; 3480 } 3481 3482 /* 3483 * This function puts in dummy file extents for the area we're creating a hole 3484 * for. So if we are truncating this file to a larger size we need to insert 3485 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 3486 * the range between oldsize and size 3487 */ 3488 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) 3489 { 3490 struct btrfs_trans_handle *trans; 3491 struct btrfs_root *root = BTRFS_I(inode)->root; 3492 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3493 struct extent_map *em = NULL; 3494 struct extent_state *cached_state = NULL; 3495 u64 mask = root->sectorsize - 1; 3496 u64 hole_start = (oldsize + mask) & ~mask; 3497 u64 block_end = (size + mask) & ~mask; 3498 u64 last_byte; 3499 u64 cur_offset; 3500 u64 hole_size; 3501 int err = 0; 3502 3503 if (size <= hole_start) 3504 return 0; 3505 3506 while (1) { 3507 struct btrfs_ordered_extent *ordered; 3508 btrfs_wait_ordered_range(inode, hole_start, 3509 block_end - hole_start); 3510 lock_extent_bits(io_tree, hole_start, block_end - 1, 0, 3511 &cached_state); 3512 ordered = btrfs_lookup_ordered_extent(inode, hole_start); 3513 if (!ordered) 3514 break; 3515 unlock_extent_cached(io_tree, hole_start, block_end - 1, 3516 &cached_state, GFP_NOFS); 3517 btrfs_put_ordered_extent(ordered); 3518 } 3519 3520 cur_offset = hole_start; 3521 while (1) { 3522 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 3523 block_end - cur_offset, 0); 3524 if (IS_ERR(em)) { 3525 err = PTR_ERR(em); 3526 break; 3527 } 3528 last_byte = min(extent_map_end(em), block_end); 3529 last_byte = (last_byte + mask) & ~mask; 3530 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3531 u64 hint_byte = 0; 3532 hole_size = last_byte - cur_offset; 3533 3534 trans = btrfs_start_transaction(root, 3); 3535 if (IS_ERR(trans)) { 3536 err = PTR_ERR(trans); 3537 break; 3538 } 3539 3540 err = btrfs_drop_extents(trans, inode, cur_offset, 3541 cur_offset + hole_size, 3542 &hint_byte, 1); 3543 if (err) { 3544 btrfs_abort_transaction(trans, root, err); 3545 btrfs_end_transaction(trans, root); 3546 break; 3547 } 3548 3549 err = btrfs_insert_file_extent(trans, root, 3550 btrfs_ino(inode), cur_offset, 0, 3551 0, hole_size, 0, hole_size, 3552 0, 0, 0); 3553 if (err) { 3554 btrfs_abort_transaction(trans, root, err); 3555 btrfs_end_transaction(trans, root); 3556 break; 3557 } 3558 3559 btrfs_drop_extent_cache(inode, hole_start, 3560 last_byte - 1, 0); 3561 3562 btrfs_update_inode(trans, root, inode); 3563 btrfs_end_transaction(trans, root); 3564 } 3565 free_extent_map(em); 3566 em = NULL; 3567 cur_offset = last_byte; 3568 if (cur_offset >= block_end) 3569 break; 3570 } 3571 3572 free_extent_map(em); 3573 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, 3574 GFP_NOFS); 3575 return err; 3576 } 3577 3578 static int btrfs_setsize(struct inode *inode, loff_t newsize) 3579 { 3580 struct btrfs_root *root = BTRFS_I(inode)->root; 3581 struct btrfs_trans_handle *trans; 3582 loff_t oldsize = i_size_read(inode); 3583 int ret; 3584 3585 if (newsize == oldsize) 3586 return 0; 3587 3588 if (newsize > oldsize) { 3589 truncate_pagecache(inode, oldsize, newsize); 3590 ret = btrfs_cont_expand(inode, oldsize, newsize); 3591 if (ret) 3592 return ret; 3593 3594 trans = btrfs_start_transaction(root, 1); 3595 if (IS_ERR(trans)) 3596 return PTR_ERR(trans); 3597 3598 i_size_write(inode, newsize); 3599 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); 3600 ret = btrfs_update_inode(trans, root, inode); 3601 btrfs_end_transaction(trans, root); 3602 } else { 3603 3604 /* 3605 * We're truncating a file that used to have good data down to 3606 * zero. Make sure it gets into the ordered flush list so that 3607 * any new writes get down to disk quickly. 3608 */ 3609 if (newsize == 0) 3610 BTRFS_I(inode)->ordered_data_close = 1; 3611 3612 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 3613 truncate_setsize(inode, newsize); 3614 ret = btrfs_truncate(inode); 3615 } 3616 3617 return ret; 3618 } 3619 3620 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) 3621 { 3622 struct inode *inode = dentry->d_inode; 3623 struct btrfs_root *root = BTRFS_I(inode)->root; 3624 int err; 3625 3626 if (btrfs_root_readonly(root)) 3627 return -EROFS; 3628 3629 err = inode_change_ok(inode, attr); 3630 if (err) 3631 return err; 3632 3633 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 3634 err = btrfs_setsize(inode, attr->ia_size); 3635 if (err) 3636 return err; 3637 } 3638 3639 if (attr->ia_valid) { 3640 setattr_copy(inode, attr); 3641 err = btrfs_dirty_inode(inode); 3642 3643 if (!err && attr->ia_valid & ATTR_MODE) 3644 err = btrfs_acl_chmod(inode); 3645 } 3646 3647 return err; 3648 } 3649 3650 void btrfs_evict_inode(struct inode *inode) 3651 { 3652 struct btrfs_trans_handle *trans; 3653 struct btrfs_root *root = BTRFS_I(inode)->root; 3654 struct btrfs_block_rsv *rsv, *global_rsv; 3655 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 3656 unsigned long nr; 3657 int ret; 3658 3659 trace_btrfs_inode_evict(inode); 3660 3661 truncate_inode_pages(&inode->i_data, 0); 3662 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || 3663 btrfs_is_free_space_inode(root, inode))) 3664 goto no_delete; 3665 3666 if (is_bad_inode(inode)) { 3667 btrfs_orphan_del(NULL, inode); 3668 goto no_delete; 3669 } 3670 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ 3671 btrfs_wait_ordered_range(inode, 0, (u64)-1); 3672 3673 if (root->fs_info->log_root_recovering) { 3674 BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan)); 3675 goto no_delete; 3676 } 3677 3678 if (inode->i_nlink > 0) { 3679 BUG_ON(btrfs_root_refs(&root->root_item) != 0); 3680 goto no_delete; 3681 } 3682 3683 rsv = btrfs_alloc_block_rsv(root); 3684 if (!rsv) { 3685 btrfs_orphan_del(NULL, inode); 3686 goto no_delete; 3687 } 3688 rsv->size = min_size; 3689 global_rsv = &root->fs_info->global_block_rsv; 3690 3691 btrfs_i_size_write(inode, 0); 3692 3693 /* 3694 * This is a bit simpler than btrfs_truncate since 3695 * 3696 * 1) We've already reserved our space for our orphan item in the 3697 * unlink. 3698 * 2) We're going to delete the inode item, so we don't need to update 3699 * it at all. 3700 * 3701 * So we just need to reserve some slack space in case we add bytes when 3702 * doing the truncate. 3703 */ 3704 while (1) { 3705 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size); 3706 3707 /* 3708 * Try and steal from the global reserve since we will 3709 * likely not use this space anyway, we want to try as 3710 * hard as possible to get this to work. 3711 */ 3712 if (ret) 3713 ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size); 3714 3715 if (ret) { 3716 printk(KERN_WARNING "Could not get space for a " 3717 "delete, will truncate on mount %d\n", ret); 3718 btrfs_orphan_del(NULL, inode); 3719 btrfs_free_block_rsv(root, rsv); 3720 goto no_delete; 3721 } 3722 3723 trans = btrfs_start_transaction(root, 0); 3724 if (IS_ERR(trans)) { 3725 btrfs_orphan_del(NULL, inode); 3726 btrfs_free_block_rsv(root, rsv); 3727 goto no_delete; 3728 } 3729 3730 trans->block_rsv = rsv; 3731 3732 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 3733 if (ret != -EAGAIN) 3734 break; 3735 3736 nr = trans->blocks_used; 3737 btrfs_end_transaction(trans, root); 3738 trans = NULL; 3739 btrfs_btree_balance_dirty(root, nr); 3740 } 3741 3742 btrfs_free_block_rsv(root, rsv); 3743 3744 if (ret == 0) { 3745 trans->block_rsv = root->orphan_block_rsv; 3746 ret = btrfs_orphan_del(trans, inode); 3747 BUG_ON(ret); 3748 } 3749 3750 trans->block_rsv = &root->fs_info->trans_block_rsv; 3751 if (!(root == root->fs_info->tree_root || 3752 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) 3753 btrfs_return_ino(root, btrfs_ino(inode)); 3754 3755 nr = trans->blocks_used; 3756 btrfs_end_transaction(trans, root); 3757 btrfs_btree_balance_dirty(root, nr); 3758 no_delete: 3759 end_writeback(inode); 3760 return; 3761 } 3762 3763 /* 3764 * this returns the key found in the dir entry in the location pointer. 3765 * If no dir entries were found, location->objectid is 0. 3766 */ 3767 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, 3768 struct btrfs_key *location) 3769 { 3770 const char *name = dentry->d_name.name; 3771 int namelen = dentry->d_name.len; 3772 struct btrfs_dir_item *di; 3773 struct btrfs_path *path; 3774 struct btrfs_root *root = BTRFS_I(dir)->root; 3775 int ret = 0; 3776 3777 path = btrfs_alloc_path(); 3778 if (!path) 3779 return -ENOMEM; 3780 3781 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, 3782 namelen, 0); 3783 if (IS_ERR(di)) 3784 ret = PTR_ERR(di); 3785 3786 if (IS_ERR_OR_NULL(di)) 3787 goto out_err; 3788 3789 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 3790 out: 3791 btrfs_free_path(path); 3792 return ret; 3793 out_err: 3794 location->objectid = 0; 3795 goto out; 3796 } 3797 3798 /* 3799 * when we hit a tree root in a directory, the btrfs part of the inode 3800 * needs to be changed to reflect the root directory of the tree root. This 3801 * is kind of like crossing a mount point. 3802 */ 3803 static int fixup_tree_root_location(struct btrfs_root *root, 3804 struct inode *dir, 3805 struct dentry *dentry, 3806 struct btrfs_key *location, 3807 struct btrfs_root **sub_root) 3808 { 3809 struct btrfs_path *path; 3810 struct btrfs_root *new_root; 3811 struct btrfs_root_ref *ref; 3812 struct extent_buffer *leaf; 3813 int ret; 3814 int err = 0; 3815 3816 path = btrfs_alloc_path(); 3817 if (!path) { 3818 err = -ENOMEM; 3819 goto out; 3820 } 3821 3822 err = -ENOENT; 3823 ret = btrfs_find_root_ref(root->fs_info->tree_root, path, 3824 BTRFS_I(dir)->root->root_key.objectid, 3825 location->objectid); 3826 if (ret) { 3827 if (ret < 0) 3828 err = ret; 3829 goto out; 3830 } 3831 3832 leaf = path->nodes[0]; 3833 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 3834 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 3835 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 3836 goto out; 3837 3838 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, 3839 (unsigned long)(ref + 1), 3840 dentry->d_name.len); 3841 if (ret) 3842 goto out; 3843 3844 btrfs_release_path(path); 3845 3846 new_root = btrfs_read_fs_root_no_name(root->fs_info, location); 3847 if (IS_ERR(new_root)) { 3848 err = PTR_ERR(new_root); 3849 goto out; 3850 } 3851 3852 if (btrfs_root_refs(&new_root->root_item) == 0) { 3853 err = -ENOENT; 3854 goto out; 3855 } 3856 3857 *sub_root = new_root; 3858 location->objectid = btrfs_root_dirid(&new_root->root_item); 3859 location->type = BTRFS_INODE_ITEM_KEY; 3860 location->offset = 0; 3861 err = 0; 3862 out: 3863 btrfs_free_path(path); 3864 return err; 3865 } 3866 3867 static void inode_tree_add(struct inode *inode) 3868 { 3869 struct btrfs_root *root = BTRFS_I(inode)->root; 3870 struct btrfs_inode *entry; 3871 struct rb_node **p; 3872 struct rb_node *parent; 3873 u64 ino = btrfs_ino(inode); 3874 again: 3875 p = &root->inode_tree.rb_node; 3876 parent = NULL; 3877 3878 if (inode_unhashed(inode)) 3879 return; 3880 3881 spin_lock(&root->inode_lock); 3882 while (*p) { 3883 parent = *p; 3884 entry = rb_entry(parent, struct btrfs_inode, rb_node); 3885 3886 if (ino < btrfs_ino(&entry->vfs_inode)) 3887 p = &parent->rb_left; 3888 else if (ino > btrfs_ino(&entry->vfs_inode)) 3889 p = &parent->rb_right; 3890 else { 3891 WARN_ON(!(entry->vfs_inode.i_state & 3892 (I_WILL_FREE | I_FREEING))); 3893 rb_erase(parent, &root->inode_tree); 3894 RB_CLEAR_NODE(parent); 3895 spin_unlock(&root->inode_lock); 3896 goto again; 3897 } 3898 } 3899 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p); 3900 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree); 3901 spin_unlock(&root->inode_lock); 3902 } 3903 3904 static void inode_tree_del(struct inode *inode) 3905 { 3906 struct btrfs_root *root = BTRFS_I(inode)->root; 3907 int empty = 0; 3908 3909 spin_lock(&root->inode_lock); 3910 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { 3911 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); 3912 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 3913 empty = RB_EMPTY_ROOT(&root->inode_tree); 3914 } 3915 spin_unlock(&root->inode_lock); 3916 3917 /* 3918 * Free space cache has inodes in the tree root, but the tree root has a 3919 * root_refs of 0, so this could end up dropping the tree root as a 3920 * snapshot, so we need the extra !root->fs_info->tree_root check to 3921 * make sure we don't drop it. 3922 */ 3923 if (empty && btrfs_root_refs(&root->root_item) == 0 && 3924 root != root->fs_info->tree_root) { 3925 synchronize_srcu(&root->fs_info->subvol_srcu); 3926 spin_lock(&root->inode_lock); 3927 empty = RB_EMPTY_ROOT(&root->inode_tree); 3928 spin_unlock(&root->inode_lock); 3929 if (empty) 3930 btrfs_add_dead_root(root); 3931 } 3932 } 3933 3934 void btrfs_invalidate_inodes(struct btrfs_root *root) 3935 { 3936 struct rb_node *node; 3937 struct rb_node *prev; 3938 struct btrfs_inode *entry; 3939 struct inode *inode; 3940 u64 objectid = 0; 3941 3942 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 3943 3944 spin_lock(&root->inode_lock); 3945 again: 3946 node = root->inode_tree.rb_node; 3947 prev = NULL; 3948 while (node) { 3949 prev = node; 3950 entry = rb_entry(node, struct btrfs_inode, rb_node); 3951 3952 if (objectid < btrfs_ino(&entry->vfs_inode)) 3953 node = node->rb_left; 3954 else if (objectid > btrfs_ino(&entry->vfs_inode)) 3955 node = node->rb_right; 3956 else 3957 break; 3958 } 3959 if (!node) { 3960 while (prev) { 3961 entry = rb_entry(prev, struct btrfs_inode, rb_node); 3962 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 3963 node = prev; 3964 break; 3965 } 3966 prev = rb_next(prev); 3967 } 3968 } 3969 while (node) { 3970 entry = rb_entry(node, struct btrfs_inode, rb_node); 3971 objectid = btrfs_ino(&entry->vfs_inode) + 1; 3972 inode = igrab(&entry->vfs_inode); 3973 if (inode) { 3974 spin_unlock(&root->inode_lock); 3975 if (atomic_read(&inode->i_count) > 1) 3976 d_prune_aliases(inode); 3977 /* 3978 * btrfs_drop_inode will have it removed from 3979 * the inode cache when its usage count 3980 * hits zero. 3981 */ 3982 iput(inode); 3983 cond_resched(); 3984 spin_lock(&root->inode_lock); 3985 goto again; 3986 } 3987 3988 if (cond_resched_lock(&root->inode_lock)) 3989 goto again; 3990 3991 node = rb_next(node); 3992 } 3993 spin_unlock(&root->inode_lock); 3994 } 3995 3996 static int btrfs_init_locked_inode(struct inode *inode, void *p) 3997 { 3998 struct btrfs_iget_args *args = p; 3999 inode->i_ino = args->ino; 4000 BTRFS_I(inode)->root = args->root; 4001 btrfs_set_inode_space_info(args->root, inode); 4002 return 0; 4003 } 4004 4005 static int btrfs_find_actor(struct inode *inode, void *opaque) 4006 { 4007 struct btrfs_iget_args *args = opaque; 4008 return args->ino == btrfs_ino(inode) && 4009 args->root == BTRFS_I(inode)->root; 4010 } 4011 4012 static struct inode *btrfs_iget_locked(struct super_block *s, 4013 u64 objectid, 4014 struct btrfs_root *root) 4015 { 4016 struct inode *inode; 4017 struct btrfs_iget_args args; 4018 args.ino = objectid; 4019 args.root = root; 4020 4021 inode = iget5_locked(s, objectid, btrfs_find_actor, 4022 btrfs_init_locked_inode, 4023 (void *)&args); 4024 return inode; 4025 } 4026 4027 /* Get an inode object given its location and corresponding root. 4028 * Returns in *is_new if the inode was read from disk 4029 */ 4030 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 4031 struct btrfs_root *root, int *new) 4032 { 4033 struct inode *inode; 4034 4035 inode = btrfs_iget_locked(s, location->objectid, root); 4036 if (!inode) 4037 return ERR_PTR(-ENOMEM); 4038 4039 if (inode->i_state & I_NEW) { 4040 BTRFS_I(inode)->root = root; 4041 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); 4042 btrfs_read_locked_inode(inode); 4043 if (!is_bad_inode(inode)) { 4044 inode_tree_add(inode); 4045 unlock_new_inode(inode); 4046 if (new) 4047 *new = 1; 4048 } else { 4049 unlock_new_inode(inode); 4050 iput(inode); 4051 inode = ERR_PTR(-ESTALE); 4052 } 4053 } 4054 4055 return inode; 4056 } 4057 4058 static struct inode *new_simple_dir(struct super_block *s, 4059 struct btrfs_key *key, 4060 struct btrfs_root *root) 4061 { 4062 struct inode *inode = new_inode(s); 4063 4064 if (!inode) 4065 return ERR_PTR(-ENOMEM); 4066 4067 BTRFS_I(inode)->root = root; 4068 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 4069 BTRFS_I(inode)->dummy_inode = 1; 4070 4071 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 4072 inode->i_op = &simple_dir_inode_operations; 4073 inode->i_fop = &simple_dir_operations; 4074 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 4075 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 4076 4077 return inode; 4078 } 4079 4080 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 4081 { 4082 struct inode *inode; 4083 struct btrfs_root *root = BTRFS_I(dir)->root; 4084 struct btrfs_root *sub_root = root; 4085 struct btrfs_key location; 4086 int index; 4087 int ret = 0; 4088 4089 if (dentry->d_name.len > BTRFS_NAME_LEN) 4090 return ERR_PTR(-ENAMETOOLONG); 4091 4092 if (unlikely(d_need_lookup(dentry))) { 4093 memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key)); 4094 kfree(dentry->d_fsdata); 4095 dentry->d_fsdata = NULL; 4096 /* This thing is hashed, drop it for now */ 4097 d_drop(dentry); 4098 } else { 4099 ret = btrfs_inode_by_name(dir, dentry, &location); 4100 } 4101 4102 if (ret < 0) 4103 return ERR_PTR(ret); 4104 4105 if (location.objectid == 0) 4106 return NULL; 4107 4108 if (location.type == BTRFS_INODE_ITEM_KEY) { 4109 inode = btrfs_iget(dir->i_sb, &location, root, NULL); 4110 return inode; 4111 } 4112 4113 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); 4114 4115 index = srcu_read_lock(&root->fs_info->subvol_srcu); 4116 ret = fixup_tree_root_location(root, dir, dentry, 4117 &location, &sub_root); 4118 if (ret < 0) { 4119 if (ret != -ENOENT) 4120 inode = ERR_PTR(ret); 4121 else 4122 inode = new_simple_dir(dir->i_sb, &location, sub_root); 4123 } else { 4124 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); 4125 } 4126 srcu_read_unlock(&root->fs_info->subvol_srcu, index); 4127 4128 if (!IS_ERR(inode) && root != sub_root) { 4129 down_read(&root->fs_info->cleanup_work_sem); 4130 if (!(inode->i_sb->s_flags & MS_RDONLY)) 4131 ret = btrfs_orphan_cleanup(sub_root); 4132 up_read(&root->fs_info->cleanup_work_sem); 4133 if (ret) 4134 inode = ERR_PTR(ret); 4135 } 4136 4137 return inode; 4138 } 4139 4140 static int btrfs_dentry_delete(const struct dentry *dentry) 4141 { 4142 struct btrfs_root *root; 4143 4144 if (!dentry->d_inode && !IS_ROOT(dentry)) 4145 dentry = dentry->d_parent; 4146 4147 if (dentry->d_inode) { 4148 root = BTRFS_I(dentry->d_inode)->root; 4149 if (btrfs_root_refs(&root->root_item) == 0) 4150 return 1; 4151 } 4152 return 0; 4153 } 4154 4155 static void btrfs_dentry_release(struct dentry *dentry) 4156 { 4157 if (dentry->d_fsdata) 4158 kfree(dentry->d_fsdata); 4159 } 4160 4161 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 4162 struct nameidata *nd) 4163 { 4164 struct dentry *ret; 4165 4166 ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); 4167 if (unlikely(d_need_lookup(dentry))) { 4168 spin_lock(&dentry->d_lock); 4169 dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 4170 spin_unlock(&dentry->d_lock); 4171 } 4172 return ret; 4173 } 4174 4175 unsigned char btrfs_filetype_table[] = { 4176 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 4177 }; 4178 4179 static int btrfs_real_readdir(struct file *filp, void *dirent, 4180 filldir_t filldir) 4181 { 4182 struct inode *inode = filp->f_dentry->d_inode; 4183 struct btrfs_root *root = BTRFS_I(inode)->root; 4184 struct btrfs_item *item; 4185 struct btrfs_dir_item *di; 4186 struct btrfs_key key; 4187 struct btrfs_key found_key; 4188 struct btrfs_path *path; 4189 struct list_head ins_list; 4190 struct list_head del_list; 4191 struct qstr q; 4192 int ret; 4193 struct extent_buffer *leaf; 4194 int slot; 4195 unsigned char d_type; 4196 int over = 0; 4197 u32 di_cur; 4198 u32 di_total; 4199 u32 di_len; 4200 int key_type = BTRFS_DIR_INDEX_KEY; 4201 char tmp_name[32]; 4202 char *name_ptr; 4203 int name_len; 4204 int is_curr = 0; /* filp->f_pos points to the current index? */ 4205 4206 /* FIXME, use a real flag for deciding about the key type */ 4207 if (root->fs_info->tree_root == root) 4208 key_type = BTRFS_DIR_ITEM_KEY; 4209 4210 /* special case for "." */ 4211 if (filp->f_pos == 0) { 4212 over = filldir(dirent, ".", 1, 4213 filp->f_pos, btrfs_ino(inode), DT_DIR); 4214 if (over) 4215 return 0; 4216 filp->f_pos = 1; 4217 } 4218 /* special case for .., just use the back ref */ 4219 if (filp->f_pos == 1) { 4220 u64 pino = parent_ino(filp->f_path.dentry); 4221 over = filldir(dirent, "..", 2, 4222 filp->f_pos, pino, DT_DIR); 4223 if (over) 4224 return 0; 4225 filp->f_pos = 2; 4226 } 4227 path = btrfs_alloc_path(); 4228 if (!path) 4229 return -ENOMEM; 4230 4231 path->reada = 1; 4232 4233 if (key_type == BTRFS_DIR_INDEX_KEY) { 4234 INIT_LIST_HEAD(&ins_list); 4235 INIT_LIST_HEAD(&del_list); 4236 btrfs_get_delayed_items(inode, &ins_list, &del_list); 4237 } 4238 4239 btrfs_set_key_type(&key, key_type); 4240 key.offset = filp->f_pos; 4241 key.objectid = btrfs_ino(inode); 4242 4243 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4244 if (ret < 0) 4245 goto err; 4246 4247 while (1) { 4248 leaf = path->nodes[0]; 4249 slot = path->slots[0]; 4250 if (slot >= btrfs_header_nritems(leaf)) { 4251 ret = btrfs_next_leaf(root, path); 4252 if (ret < 0) 4253 goto err; 4254 else if (ret > 0) 4255 break; 4256 continue; 4257 } 4258 4259 item = btrfs_item_nr(leaf, slot); 4260 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4261 4262 if (found_key.objectid != key.objectid) 4263 break; 4264 if (btrfs_key_type(&found_key) != key_type) 4265 break; 4266 if (found_key.offset < filp->f_pos) 4267 goto next; 4268 if (key_type == BTRFS_DIR_INDEX_KEY && 4269 btrfs_should_delete_dir_index(&del_list, 4270 found_key.offset)) 4271 goto next; 4272 4273 filp->f_pos = found_key.offset; 4274 is_curr = 1; 4275 4276 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 4277 di_cur = 0; 4278 di_total = btrfs_item_size(leaf, item); 4279 4280 while (di_cur < di_total) { 4281 struct btrfs_key location; 4282 struct dentry *tmp; 4283 4284 if (verify_dir_item(root, leaf, di)) 4285 break; 4286 4287 name_len = btrfs_dir_name_len(leaf, di); 4288 if (name_len <= sizeof(tmp_name)) { 4289 name_ptr = tmp_name; 4290 } else { 4291 name_ptr = kmalloc(name_len, GFP_NOFS); 4292 if (!name_ptr) { 4293 ret = -ENOMEM; 4294 goto err; 4295 } 4296 } 4297 read_extent_buffer(leaf, name_ptr, 4298 (unsigned long)(di + 1), name_len); 4299 4300 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 4301 btrfs_dir_item_key_to_cpu(leaf, di, &location); 4302 4303 q.name = name_ptr; 4304 q.len = name_len; 4305 q.hash = full_name_hash(q.name, q.len); 4306 tmp = d_lookup(filp->f_dentry, &q); 4307 if (!tmp) { 4308 struct btrfs_key *newkey; 4309 4310 newkey = kzalloc(sizeof(struct btrfs_key), 4311 GFP_NOFS); 4312 if (!newkey) 4313 goto no_dentry; 4314 tmp = d_alloc(filp->f_dentry, &q); 4315 if (!tmp) { 4316 kfree(newkey); 4317 dput(tmp); 4318 goto no_dentry; 4319 } 4320 memcpy(newkey, &location, 4321 sizeof(struct btrfs_key)); 4322 tmp->d_fsdata = newkey; 4323 tmp->d_flags |= DCACHE_NEED_LOOKUP; 4324 d_rehash(tmp); 4325 dput(tmp); 4326 } else { 4327 dput(tmp); 4328 } 4329 no_dentry: 4330 /* is this a reference to our own snapshot? If so 4331 * skip it 4332 */ 4333 if (location.type == BTRFS_ROOT_ITEM_KEY && 4334 location.objectid == root->root_key.objectid) { 4335 over = 0; 4336 goto skip; 4337 } 4338 over = filldir(dirent, name_ptr, name_len, 4339 found_key.offset, location.objectid, 4340 d_type); 4341 4342 skip: 4343 if (name_ptr != tmp_name) 4344 kfree(name_ptr); 4345 4346 if (over) 4347 goto nopos; 4348 di_len = btrfs_dir_name_len(leaf, di) + 4349 btrfs_dir_data_len(leaf, di) + sizeof(*di); 4350 di_cur += di_len; 4351 di = (struct btrfs_dir_item *)((char *)di + di_len); 4352 } 4353 next: 4354 path->slots[0]++; 4355 } 4356 4357 if (key_type == BTRFS_DIR_INDEX_KEY) { 4358 if (is_curr) 4359 filp->f_pos++; 4360 ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, 4361 &ins_list); 4362 if (ret) 4363 goto nopos; 4364 } 4365 4366 /* Reached end of directory/root. Bump pos past the last item. */ 4367 if (key_type == BTRFS_DIR_INDEX_KEY) 4368 /* 4369 * 32-bit glibc will use getdents64, but then strtol - 4370 * so the last number we can serve is this. 4371 */ 4372 filp->f_pos = 0x7fffffff; 4373 else 4374 filp->f_pos++; 4375 nopos: 4376 ret = 0; 4377 err: 4378 if (key_type == BTRFS_DIR_INDEX_KEY) 4379 btrfs_put_delayed_items(&ins_list, &del_list); 4380 btrfs_free_path(path); 4381 return ret; 4382 } 4383 4384 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) 4385 { 4386 struct btrfs_root *root = BTRFS_I(inode)->root; 4387 struct btrfs_trans_handle *trans; 4388 int ret = 0; 4389 bool nolock = false; 4390 4391 if (BTRFS_I(inode)->dummy_inode) 4392 return 0; 4393 4394 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode)) 4395 nolock = true; 4396 4397 if (wbc->sync_mode == WB_SYNC_ALL) { 4398 if (nolock) 4399 trans = btrfs_join_transaction_nolock(root); 4400 else 4401 trans = btrfs_join_transaction(root); 4402 if (IS_ERR(trans)) 4403 return PTR_ERR(trans); 4404 if (nolock) 4405 ret = btrfs_end_transaction_nolock(trans, root); 4406 else 4407 ret = btrfs_commit_transaction(trans, root); 4408 } 4409 return ret; 4410 } 4411 4412 /* 4413 * This is somewhat expensive, updating the tree every time the 4414 * inode changes. But, it is most likely to find the inode in cache. 4415 * FIXME, needs more benchmarking...there are no reasons other than performance 4416 * to keep or drop this code. 4417 */ 4418 int btrfs_dirty_inode(struct inode *inode) 4419 { 4420 struct btrfs_root *root = BTRFS_I(inode)->root; 4421 struct btrfs_trans_handle *trans; 4422 int ret; 4423 4424 if (BTRFS_I(inode)->dummy_inode) 4425 return 0; 4426 4427 trans = btrfs_join_transaction(root); 4428 if (IS_ERR(trans)) 4429 return PTR_ERR(trans); 4430 4431 ret = btrfs_update_inode(trans, root, inode); 4432 if (ret && ret == -ENOSPC) { 4433 /* whoops, lets try again with the full transaction */ 4434 btrfs_end_transaction(trans, root); 4435 trans = btrfs_start_transaction(root, 1); 4436 if (IS_ERR(trans)) 4437 return PTR_ERR(trans); 4438 4439 ret = btrfs_update_inode(trans, root, inode); 4440 } 4441 btrfs_end_transaction(trans, root); 4442 if (BTRFS_I(inode)->delayed_node) 4443 btrfs_balance_delayed_items(root); 4444 4445 return ret; 4446 } 4447 4448 /* 4449 * This is a copy of file_update_time. We need this so we can return error on 4450 * ENOSPC for updating the inode in the case of file write and mmap writes. 4451 */ 4452 int btrfs_update_time(struct file *file) 4453 { 4454 struct inode *inode = file->f_path.dentry->d_inode; 4455 struct timespec now; 4456 int ret; 4457 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0; 4458 4459 /* First try to exhaust all avenues to not sync */ 4460 if (IS_NOCMTIME(inode)) 4461 return 0; 4462 4463 now = current_fs_time(inode->i_sb); 4464 if (!timespec_equal(&inode->i_mtime, &now)) 4465 sync_it = S_MTIME; 4466 4467 if (!timespec_equal(&inode->i_ctime, &now)) 4468 sync_it |= S_CTIME; 4469 4470 if (IS_I_VERSION(inode)) 4471 sync_it |= S_VERSION; 4472 4473 if (!sync_it) 4474 return 0; 4475 4476 /* Finally allowed to write? Takes lock. */ 4477 if (mnt_want_write_file(file)) 4478 return 0; 4479 4480 /* Only change inode inside the lock region */ 4481 if (sync_it & S_VERSION) 4482 inode_inc_iversion(inode); 4483 if (sync_it & S_CTIME) 4484 inode->i_ctime = now; 4485 if (sync_it & S_MTIME) 4486 inode->i_mtime = now; 4487 ret = btrfs_dirty_inode(inode); 4488 if (!ret) 4489 mark_inode_dirty_sync(inode); 4490 mnt_drop_write(file->f_path.mnt); 4491 return ret; 4492 } 4493 4494 /* 4495 * find the highest existing sequence number in a directory 4496 * and then set the in-memory index_cnt variable to reflect 4497 * free sequence numbers 4498 */ 4499 static int btrfs_set_inode_index_count(struct inode *inode) 4500 { 4501 struct btrfs_root *root = BTRFS_I(inode)->root; 4502 struct btrfs_key key, found_key; 4503 struct btrfs_path *path; 4504 struct extent_buffer *leaf; 4505 int ret; 4506 4507 key.objectid = btrfs_ino(inode); 4508 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); 4509 key.offset = (u64)-1; 4510 4511 path = btrfs_alloc_path(); 4512 if (!path) 4513 return -ENOMEM; 4514 4515 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4516 if (ret < 0) 4517 goto out; 4518 /* FIXME: we should be able to handle this */ 4519 if (ret == 0) 4520 goto out; 4521 ret = 0; 4522 4523 /* 4524 * MAGIC NUMBER EXPLANATION: 4525 * since we search a directory based on f_pos we have to start at 2 4526 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody 4527 * else has to start at 2 4528 */ 4529 if (path->slots[0] == 0) { 4530 BTRFS_I(inode)->index_cnt = 2; 4531 goto out; 4532 } 4533 4534 path->slots[0]--; 4535 4536 leaf = path->nodes[0]; 4537 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4538 4539 if (found_key.objectid != btrfs_ino(inode) || 4540 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { 4541 BTRFS_I(inode)->index_cnt = 2; 4542 goto out; 4543 } 4544 4545 BTRFS_I(inode)->index_cnt = found_key.offset + 1; 4546 out: 4547 btrfs_free_path(path); 4548 return ret; 4549 } 4550 4551 /* 4552 * helper to find a free sequence number in a given directory. This current 4553 * code is very simple, later versions will do smarter things in the btree 4554 */ 4555 int btrfs_set_inode_index(struct inode *dir, u64 *index) 4556 { 4557 int ret = 0; 4558 4559 if (BTRFS_I(dir)->index_cnt == (u64)-1) { 4560 ret = btrfs_inode_delayed_dir_index_count(dir); 4561 if (ret) { 4562 ret = btrfs_set_inode_index_count(dir); 4563 if (ret) 4564 return ret; 4565 } 4566 } 4567 4568 *index = BTRFS_I(dir)->index_cnt; 4569 BTRFS_I(dir)->index_cnt++; 4570 4571 return ret; 4572 } 4573 4574 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, 4575 struct btrfs_root *root, 4576 struct inode *dir, 4577 const char *name, int name_len, 4578 u64 ref_objectid, u64 objectid, 4579 umode_t mode, u64 *index) 4580 { 4581 struct inode *inode; 4582 struct btrfs_inode_item *inode_item; 4583 struct btrfs_key *location; 4584 struct btrfs_path *path; 4585 struct btrfs_inode_ref *ref; 4586 struct btrfs_key key[2]; 4587 u32 sizes[2]; 4588 unsigned long ptr; 4589 int ret; 4590 int owner; 4591 4592 path = btrfs_alloc_path(); 4593 if (!path) 4594 return ERR_PTR(-ENOMEM); 4595 4596 inode = new_inode(root->fs_info->sb); 4597 if (!inode) { 4598 btrfs_free_path(path); 4599 return ERR_PTR(-ENOMEM); 4600 } 4601 4602 /* 4603 * we have to initialize this early, so we can reclaim the inode 4604 * number if we fail afterwards in this function. 4605 */ 4606 inode->i_ino = objectid; 4607 4608 if (dir) { 4609 trace_btrfs_inode_request(dir); 4610 4611 ret = btrfs_set_inode_index(dir, index); 4612 if (ret) { 4613 btrfs_free_path(path); 4614 iput(inode); 4615 return ERR_PTR(ret); 4616 } 4617 } 4618 /* 4619 * index_cnt is ignored for everything but a dir, 4620 * btrfs_get_inode_index_count has an explanation for the magic 4621 * number 4622 */ 4623 BTRFS_I(inode)->index_cnt = 2; 4624 BTRFS_I(inode)->root = root; 4625 BTRFS_I(inode)->generation = trans->transid; 4626 inode->i_generation = BTRFS_I(inode)->generation; 4627 btrfs_set_inode_space_info(root, inode); 4628 4629 if (S_ISDIR(mode)) 4630 owner = 0; 4631 else 4632 owner = 1; 4633 4634 key[0].objectid = objectid; 4635 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); 4636 key[0].offset = 0; 4637 4638 key[1].objectid = objectid; 4639 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); 4640 key[1].offset = ref_objectid; 4641 4642 sizes[0] = sizeof(struct btrfs_inode_item); 4643 sizes[1] = name_len + sizeof(*ref); 4644 4645 path->leave_spinning = 1; 4646 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2); 4647 if (ret != 0) 4648 goto fail; 4649 4650 inode_init_owner(inode, dir, mode); 4651 inode_set_bytes(inode, 0); 4652 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 4653 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4654 struct btrfs_inode_item); 4655 fill_inode_item(trans, path->nodes[0], inode_item, inode); 4656 4657 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 4658 struct btrfs_inode_ref); 4659 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); 4660 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); 4661 ptr = (unsigned long)(ref + 1); 4662 write_extent_buffer(path->nodes[0], name, ptr, name_len); 4663 4664 btrfs_mark_buffer_dirty(path->nodes[0]); 4665 btrfs_free_path(path); 4666 4667 location = &BTRFS_I(inode)->location; 4668 location->objectid = objectid; 4669 location->offset = 0; 4670 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); 4671 4672 btrfs_inherit_iflags(inode, dir); 4673 4674 if (S_ISREG(mode)) { 4675 if (btrfs_test_opt(root, NODATASUM)) 4676 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 4677 if (btrfs_test_opt(root, NODATACOW) || 4678 (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW)) 4679 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; 4680 } 4681 4682 insert_inode_hash(inode); 4683 inode_tree_add(inode); 4684 4685 trace_btrfs_inode_new(inode); 4686 btrfs_set_inode_last_trans(trans, inode); 4687 4688 return inode; 4689 fail: 4690 if (dir) 4691 BTRFS_I(dir)->index_cnt--; 4692 btrfs_free_path(path); 4693 iput(inode); 4694 return ERR_PTR(ret); 4695 } 4696 4697 static inline u8 btrfs_inode_type(struct inode *inode) 4698 { 4699 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; 4700 } 4701 4702 /* 4703 * utility function to add 'inode' into 'parent_inode' with 4704 * a give name and a given sequence number. 4705 * if 'add_backref' is true, also insert a backref from the 4706 * inode to the parent directory. 4707 */ 4708 int btrfs_add_link(struct btrfs_trans_handle *trans, 4709 struct inode *parent_inode, struct inode *inode, 4710 const char *name, int name_len, int add_backref, u64 index) 4711 { 4712 int ret = 0; 4713 struct btrfs_key key; 4714 struct btrfs_root *root = BTRFS_I(parent_inode)->root; 4715 u64 ino = btrfs_ino(inode); 4716 u64 parent_ino = btrfs_ino(parent_inode); 4717 4718 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 4719 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); 4720 } else { 4721 key.objectid = ino; 4722 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 4723 key.offset = 0; 4724 } 4725 4726 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 4727 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 4728 key.objectid, root->root_key.objectid, 4729 parent_ino, index, name, name_len); 4730 } else if (add_backref) { 4731 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, 4732 parent_ino, index); 4733 } 4734 4735 /* Nothing to clean up yet */ 4736 if (ret) 4737 return ret; 4738 4739 ret = btrfs_insert_dir_item(trans, root, name, name_len, 4740 parent_inode, &key, 4741 btrfs_inode_type(inode), index); 4742 if (ret == -EEXIST) 4743 goto fail_dir_item; 4744 else if (ret) { 4745 btrfs_abort_transaction(trans, root, ret); 4746 return ret; 4747 } 4748 4749 btrfs_i_size_write(parent_inode, parent_inode->i_size + 4750 name_len * 2); 4751 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 4752 ret = btrfs_update_inode(trans, root, parent_inode); 4753 if (ret) 4754 btrfs_abort_transaction(trans, root, ret); 4755 return ret; 4756 4757 fail_dir_item: 4758 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 4759 u64 local_index; 4760 int err; 4761 err = btrfs_del_root_ref(trans, root->fs_info->tree_root, 4762 key.objectid, root->root_key.objectid, 4763 parent_ino, &local_index, name, name_len); 4764 4765 } else if (add_backref) { 4766 u64 local_index; 4767 int err; 4768 4769 err = btrfs_del_inode_ref(trans, root, name, name_len, 4770 ino, parent_ino, &local_index); 4771 } 4772 return ret; 4773 } 4774 4775 static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 4776 struct inode *dir, struct dentry *dentry, 4777 struct inode *inode, int backref, u64 index) 4778 { 4779 int err = btrfs_add_link(trans, dir, inode, 4780 dentry->d_name.name, dentry->d_name.len, 4781 backref, index); 4782 if (err > 0) 4783 err = -EEXIST; 4784 return err; 4785 } 4786 4787 static int btrfs_mknod(struct inode *dir, struct dentry *dentry, 4788 umode_t mode, dev_t rdev) 4789 { 4790 struct btrfs_trans_handle *trans; 4791 struct btrfs_root *root = BTRFS_I(dir)->root; 4792 struct inode *inode = NULL; 4793 int err; 4794 int drop_inode = 0; 4795 u64 objectid; 4796 unsigned long nr = 0; 4797 u64 index = 0; 4798 4799 if (!new_valid_dev(rdev)) 4800 return -EINVAL; 4801 4802 /* 4803 * 2 for inode item and ref 4804 * 2 for dir items 4805 * 1 for xattr if selinux is on 4806 */ 4807 trans = btrfs_start_transaction(root, 5); 4808 if (IS_ERR(trans)) 4809 return PTR_ERR(trans); 4810 4811 err = btrfs_find_free_ino(root, &objectid); 4812 if (err) 4813 goto out_unlock; 4814 4815 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4816 dentry->d_name.len, btrfs_ino(dir), objectid, 4817 mode, &index); 4818 if (IS_ERR(inode)) { 4819 err = PTR_ERR(inode); 4820 goto out_unlock; 4821 } 4822 4823 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4824 if (err) { 4825 drop_inode = 1; 4826 goto out_unlock; 4827 } 4828 4829 /* 4830 * If the active LSM wants to access the inode during 4831 * d_instantiate it needs these. Smack checks to see 4832 * if the filesystem supports xattrs by looking at the 4833 * ops vector. 4834 */ 4835 4836 inode->i_op = &btrfs_special_inode_operations; 4837 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4838 if (err) 4839 drop_inode = 1; 4840 else { 4841 init_special_inode(inode, inode->i_mode, rdev); 4842 btrfs_update_inode(trans, root, inode); 4843 d_instantiate(dentry, inode); 4844 } 4845 out_unlock: 4846 nr = trans->blocks_used; 4847 btrfs_end_transaction(trans, root); 4848 btrfs_btree_balance_dirty(root, nr); 4849 if (drop_inode) { 4850 inode_dec_link_count(inode); 4851 iput(inode); 4852 } 4853 return err; 4854 } 4855 4856 static int btrfs_create(struct inode *dir, struct dentry *dentry, 4857 umode_t mode, struct nameidata *nd) 4858 { 4859 struct btrfs_trans_handle *trans; 4860 struct btrfs_root *root = BTRFS_I(dir)->root; 4861 struct inode *inode = NULL; 4862 int drop_inode = 0; 4863 int err; 4864 unsigned long nr = 0; 4865 u64 objectid; 4866 u64 index = 0; 4867 4868 /* 4869 * 2 for inode item and ref 4870 * 2 for dir items 4871 * 1 for xattr if selinux is on 4872 */ 4873 trans = btrfs_start_transaction(root, 5); 4874 if (IS_ERR(trans)) 4875 return PTR_ERR(trans); 4876 4877 err = btrfs_find_free_ino(root, &objectid); 4878 if (err) 4879 goto out_unlock; 4880 4881 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4882 dentry->d_name.len, btrfs_ino(dir), objectid, 4883 mode, &index); 4884 if (IS_ERR(inode)) { 4885 err = PTR_ERR(inode); 4886 goto out_unlock; 4887 } 4888 4889 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4890 if (err) { 4891 drop_inode = 1; 4892 goto out_unlock; 4893 } 4894 4895 /* 4896 * If the active LSM wants to access the inode during 4897 * d_instantiate it needs these. Smack checks to see 4898 * if the filesystem supports xattrs by looking at the 4899 * ops vector. 4900 */ 4901 inode->i_fop = &btrfs_file_operations; 4902 inode->i_op = &btrfs_file_inode_operations; 4903 4904 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4905 if (err) 4906 drop_inode = 1; 4907 else { 4908 inode->i_mapping->a_ops = &btrfs_aops; 4909 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 4910 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 4911 d_instantiate(dentry, inode); 4912 } 4913 out_unlock: 4914 nr = trans->blocks_used; 4915 btrfs_end_transaction(trans, root); 4916 if (drop_inode) { 4917 inode_dec_link_count(inode); 4918 iput(inode); 4919 } 4920 btrfs_btree_balance_dirty(root, nr); 4921 return err; 4922 } 4923 4924 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 4925 struct dentry *dentry) 4926 { 4927 struct btrfs_trans_handle *trans; 4928 struct btrfs_root *root = BTRFS_I(dir)->root; 4929 struct inode *inode = old_dentry->d_inode; 4930 u64 index; 4931 unsigned long nr = 0; 4932 int err; 4933 int drop_inode = 0; 4934 4935 /* do not allow sys_link's with other subvols of the same device */ 4936 if (root->objectid != BTRFS_I(inode)->root->objectid) 4937 return -EXDEV; 4938 4939 if (inode->i_nlink == ~0U) 4940 return -EMLINK; 4941 4942 err = btrfs_set_inode_index(dir, &index); 4943 if (err) 4944 goto fail; 4945 4946 /* 4947 * 2 items for inode and inode ref 4948 * 2 items for dir items 4949 * 1 item for parent inode 4950 */ 4951 trans = btrfs_start_transaction(root, 5); 4952 if (IS_ERR(trans)) { 4953 err = PTR_ERR(trans); 4954 goto fail; 4955 } 4956 4957 btrfs_inc_nlink(inode); 4958 inode->i_ctime = CURRENT_TIME; 4959 ihold(inode); 4960 4961 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); 4962 4963 if (err) { 4964 drop_inode = 1; 4965 } else { 4966 struct dentry *parent = dentry->d_parent; 4967 err = btrfs_update_inode(trans, root, inode); 4968 if (err) 4969 goto fail; 4970 d_instantiate(dentry, inode); 4971 btrfs_log_new_name(trans, inode, NULL, parent); 4972 } 4973 4974 nr = trans->blocks_used; 4975 btrfs_end_transaction(trans, root); 4976 fail: 4977 if (drop_inode) { 4978 inode_dec_link_count(inode); 4979 iput(inode); 4980 } 4981 btrfs_btree_balance_dirty(root, nr); 4982 return err; 4983 } 4984 4985 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 4986 { 4987 struct inode *inode = NULL; 4988 struct btrfs_trans_handle *trans; 4989 struct btrfs_root *root = BTRFS_I(dir)->root; 4990 int err = 0; 4991 int drop_on_err = 0; 4992 u64 objectid = 0; 4993 u64 index = 0; 4994 unsigned long nr = 1; 4995 4996 /* 4997 * 2 items for inode and ref 4998 * 2 items for dir items 4999 * 1 for xattr if selinux is on 5000 */ 5001 trans = btrfs_start_transaction(root, 5); 5002 if (IS_ERR(trans)) 5003 return PTR_ERR(trans); 5004 5005 err = btrfs_find_free_ino(root, &objectid); 5006 if (err) 5007 goto out_fail; 5008 5009 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 5010 dentry->d_name.len, btrfs_ino(dir), objectid, 5011 S_IFDIR | mode, &index); 5012 if (IS_ERR(inode)) { 5013 err = PTR_ERR(inode); 5014 goto out_fail; 5015 } 5016 5017 drop_on_err = 1; 5018 5019 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 5020 if (err) 5021 goto out_fail; 5022 5023 inode->i_op = &btrfs_dir_inode_operations; 5024 inode->i_fop = &btrfs_dir_file_operations; 5025 5026 btrfs_i_size_write(inode, 0); 5027 err = btrfs_update_inode(trans, root, inode); 5028 if (err) 5029 goto out_fail; 5030 5031 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, 5032 dentry->d_name.len, 0, index); 5033 if (err) 5034 goto out_fail; 5035 5036 d_instantiate(dentry, inode); 5037 drop_on_err = 0; 5038 5039 out_fail: 5040 nr = trans->blocks_used; 5041 btrfs_end_transaction(trans, root); 5042 if (drop_on_err) 5043 iput(inode); 5044 btrfs_btree_balance_dirty(root, nr); 5045 return err; 5046 } 5047 5048 /* helper for btfs_get_extent. Given an existing extent in the tree, 5049 * and an extent that you want to insert, deal with overlap and insert 5050 * the new extent into the tree. 5051 */ 5052 static int merge_extent_mapping(struct extent_map_tree *em_tree, 5053 struct extent_map *existing, 5054 struct extent_map *em, 5055 u64 map_start, u64 map_len) 5056 { 5057 u64 start_diff; 5058 5059 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); 5060 start_diff = map_start - em->start; 5061 em->start = map_start; 5062 em->len = map_len; 5063 if (em->block_start < EXTENT_MAP_LAST_BYTE && 5064 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 5065 em->block_start += start_diff; 5066 em->block_len -= start_diff; 5067 } 5068 return add_extent_mapping(em_tree, em); 5069 } 5070 5071 static noinline int uncompress_inline(struct btrfs_path *path, 5072 struct inode *inode, struct page *page, 5073 size_t pg_offset, u64 extent_offset, 5074 struct btrfs_file_extent_item *item) 5075 { 5076 int ret; 5077 struct extent_buffer *leaf = path->nodes[0]; 5078 char *tmp; 5079 size_t max_size; 5080 unsigned long inline_size; 5081 unsigned long ptr; 5082 int compress_type; 5083 5084 WARN_ON(pg_offset != 0); 5085 compress_type = btrfs_file_extent_compression(leaf, item); 5086 max_size = btrfs_file_extent_ram_bytes(leaf, item); 5087 inline_size = btrfs_file_extent_inline_item_len(leaf, 5088 btrfs_item_nr(leaf, path->slots[0])); 5089 tmp = kmalloc(inline_size, GFP_NOFS); 5090 if (!tmp) 5091 return -ENOMEM; 5092 ptr = btrfs_file_extent_inline_start(item); 5093 5094 read_extent_buffer(leaf, tmp, ptr, inline_size); 5095 5096 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 5097 ret = btrfs_decompress(compress_type, tmp, page, 5098 extent_offset, inline_size, max_size); 5099 if (ret) { 5100 char *kaddr = kmap_atomic(page); 5101 unsigned long copy_size = min_t(u64, 5102 PAGE_CACHE_SIZE - pg_offset, 5103 max_size - extent_offset); 5104 memset(kaddr + pg_offset, 0, copy_size); 5105 kunmap_atomic(kaddr); 5106 } 5107 kfree(tmp); 5108 return 0; 5109 } 5110 5111 /* 5112 * a bit scary, this does extent mapping from logical file offset to the disk. 5113 * the ugly parts come from merging extents from the disk with the in-ram 5114 * representation. This gets more complex because of the data=ordered code, 5115 * where the in-ram extents might be locked pending data=ordered completion. 5116 * 5117 * This also copies inline extents directly into the page. 5118 */ 5119 5120 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 5121 size_t pg_offset, u64 start, u64 len, 5122 int create) 5123 { 5124 int ret; 5125 int err = 0; 5126 u64 bytenr; 5127 u64 extent_start = 0; 5128 u64 extent_end = 0; 5129 u64 objectid = btrfs_ino(inode); 5130 u32 found_type; 5131 struct btrfs_path *path = NULL; 5132 struct btrfs_root *root = BTRFS_I(inode)->root; 5133 struct btrfs_file_extent_item *item; 5134 struct extent_buffer *leaf; 5135 struct btrfs_key found_key; 5136 struct extent_map *em = NULL; 5137 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 5138 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5139 struct btrfs_trans_handle *trans = NULL; 5140 int compress_type; 5141 5142 again: 5143 read_lock(&em_tree->lock); 5144 em = lookup_extent_mapping(em_tree, start, len); 5145 if (em) 5146 em->bdev = root->fs_info->fs_devices->latest_bdev; 5147 read_unlock(&em_tree->lock); 5148 5149 if (em) { 5150 if (em->start > start || em->start + em->len <= start) 5151 free_extent_map(em); 5152 else if (em->block_start == EXTENT_MAP_INLINE && page) 5153 free_extent_map(em); 5154 else 5155 goto out; 5156 } 5157 em = alloc_extent_map(); 5158 if (!em) { 5159 err = -ENOMEM; 5160 goto out; 5161 } 5162 em->bdev = root->fs_info->fs_devices->latest_bdev; 5163 em->start = EXTENT_MAP_HOLE; 5164 em->orig_start = EXTENT_MAP_HOLE; 5165 em->len = (u64)-1; 5166 em->block_len = (u64)-1; 5167 5168 if (!path) { 5169 path = btrfs_alloc_path(); 5170 if (!path) { 5171 err = -ENOMEM; 5172 goto out; 5173 } 5174 /* 5175 * Chances are we'll be called again, so go ahead and do 5176 * readahead 5177 */ 5178 path->reada = 1; 5179 } 5180 5181 ret = btrfs_lookup_file_extent(trans, root, path, 5182 objectid, start, trans != NULL); 5183 if (ret < 0) { 5184 err = ret; 5185 goto out; 5186 } 5187 5188 if (ret != 0) { 5189 if (path->slots[0] == 0) 5190 goto not_found; 5191 path->slots[0]--; 5192 } 5193 5194 leaf = path->nodes[0]; 5195 item = btrfs_item_ptr(leaf, path->slots[0], 5196 struct btrfs_file_extent_item); 5197 /* are we inside the extent that was found? */ 5198 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5199 found_type = btrfs_key_type(&found_key); 5200 if (found_key.objectid != objectid || 5201 found_type != BTRFS_EXTENT_DATA_KEY) { 5202 goto not_found; 5203 } 5204 5205 found_type = btrfs_file_extent_type(leaf, item); 5206 extent_start = found_key.offset; 5207 compress_type = btrfs_file_extent_compression(leaf, item); 5208 if (found_type == BTRFS_FILE_EXTENT_REG || 5209 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 5210 extent_end = extent_start + 5211 btrfs_file_extent_num_bytes(leaf, item); 5212 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 5213 size_t size; 5214 size = btrfs_file_extent_inline_len(leaf, item); 5215 extent_end = (extent_start + size + root->sectorsize - 1) & 5216 ~((u64)root->sectorsize - 1); 5217 } 5218 5219 if (start >= extent_end) { 5220 path->slots[0]++; 5221 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 5222 ret = btrfs_next_leaf(root, path); 5223 if (ret < 0) { 5224 err = ret; 5225 goto out; 5226 } 5227 if (ret > 0) 5228 goto not_found; 5229 leaf = path->nodes[0]; 5230 } 5231 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5232 if (found_key.objectid != objectid || 5233 found_key.type != BTRFS_EXTENT_DATA_KEY) 5234 goto not_found; 5235 if (start + len <= found_key.offset) 5236 goto not_found; 5237 em->start = start; 5238 em->len = found_key.offset - start; 5239 goto not_found_em; 5240 } 5241 5242 if (found_type == BTRFS_FILE_EXTENT_REG || 5243 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 5244 em->start = extent_start; 5245 em->len = extent_end - extent_start; 5246 em->orig_start = extent_start - 5247 btrfs_file_extent_offset(leaf, item); 5248 bytenr = btrfs_file_extent_disk_bytenr(leaf, item); 5249 if (bytenr == 0) { 5250 em->block_start = EXTENT_MAP_HOLE; 5251 goto insert; 5252 } 5253 if (compress_type != BTRFS_COMPRESS_NONE) { 5254 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 5255 em->compress_type = compress_type; 5256 em->block_start = bytenr; 5257 em->block_len = btrfs_file_extent_disk_num_bytes(leaf, 5258 item); 5259 } else { 5260 bytenr += btrfs_file_extent_offset(leaf, item); 5261 em->block_start = bytenr; 5262 em->block_len = em->len; 5263 if (found_type == BTRFS_FILE_EXTENT_PREALLOC) 5264 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 5265 } 5266 goto insert; 5267 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 5268 unsigned long ptr; 5269 char *map; 5270 size_t size; 5271 size_t extent_offset; 5272 size_t copy_size; 5273 5274 em->block_start = EXTENT_MAP_INLINE; 5275 if (!page || create) { 5276 em->start = extent_start; 5277 em->len = extent_end - extent_start; 5278 goto out; 5279 } 5280 5281 size = btrfs_file_extent_inline_len(leaf, item); 5282 extent_offset = page_offset(page) + pg_offset - extent_start; 5283 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 5284 size - extent_offset); 5285 em->start = extent_start + extent_offset; 5286 em->len = (copy_size + root->sectorsize - 1) & 5287 ~((u64)root->sectorsize - 1); 5288 em->orig_start = EXTENT_MAP_INLINE; 5289 if (compress_type) { 5290 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 5291 em->compress_type = compress_type; 5292 } 5293 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 5294 if (create == 0 && !PageUptodate(page)) { 5295 if (btrfs_file_extent_compression(leaf, item) != 5296 BTRFS_COMPRESS_NONE) { 5297 ret = uncompress_inline(path, inode, page, 5298 pg_offset, 5299 extent_offset, item); 5300 BUG_ON(ret); /* -ENOMEM */ 5301 } else { 5302 map = kmap(page); 5303 read_extent_buffer(leaf, map + pg_offset, ptr, 5304 copy_size); 5305 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 5306 memset(map + pg_offset + copy_size, 0, 5307 PAGE_CACHE_SIZE - pg_offset - 5308 copy_size); 5309 } 5310 kunmap(page); 5311 } 5312 flush_dcache_page(page); 5313 } else if (create && PageUptodate(page)) { 5314 BUG(); 5315 if (!trans) { 5316 kunmap(page); 5317 free_extent_map(em); 5318 em = NULL; 5319 5320 btrfs_release_path(path); 5321 trans = btrfs_join_transaction(root); 5322 5323 if (IS_ERR(trans)) 5324 return ERR_CAST(trans); 5325 goto again; 5326 } 5327 map = kmap(page); 5328 write_extent_buffer(leaf, map + pg_offset, ptr, 5329 copy_size); 5330 kunmap(page); 5331 btrfs_mark_buffer_dirty(leaf); 5332 } 5333 set_extent_uptodate(io_tree, em->start, 5334 extent_map_end(em) - 1, NULL, GFP_NOFS); 5335 goto insert; 5336 } else { 5337 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); 5338 WARN_ON(1); 5339 } 5340 not_found: 5341 em->start = start; 5342 em->len = len; 5343 not_found_em: 5344 em->block_start = EXTENT_MAP_HOLE; 5345 set_bit(EXTENT_FLAG_VACANCY, &em->flags); 5346 insert: 5347 btrfs_release_path(path); 5348 if (em->start > start || extent_map_end(em) <= start) { 5349 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " 5350 "[%llu %llu]\n", (unsigned long long)em->start, 5351 (unsigned long long)em->len, 5352 (unsigned long long)start, 5353 (unsigned long long)len); 5354 err = -EIO; 5355 goto out; 5356 } 5357 5358 err = 0; 5359 write_lock(&em_tree->lock); 5360 ret = add_extent_mapping(em_tree, em); 5361 /* it is possible that someone inserted the extent into the tree 5362 * while we had the lock dropped. It is also possible that 5363 * an overlapping map exists in the tree 5364 */ 5365 if (ret == -EEXIST) { 5366 struct extent_map *existing; 5367 5368 ret = 0; 5369 5370 existing = lookup_extent_mapping(em_tree, start, len); 5371 if (existing && (existing->start > start || 5372 existing->start + existing->len <= start)) { 5373 free_extent_map(existing); 5374 existing = NULL; 5375 } 5376 if (!existing) { 5377 existing = lookup_extent_mapping(em_tree, em->start, 5378 em->len); 5379 if (existing) { 5380 err = merge_extent_mapping(em_tree, existing, 5381 em, start, 5382 root->sectorsize); 5383 free_extent_map(existing); 5384 if (err) { 5385 free_extent_map(em); 5386 em = NULL; 5387 } 5388 } else { 5389 err = -EIO; 5390 free_extent_map(em); 5391 em = NULL; 5392 } 5393 } else { 5394 free_extent_map(em); 5395 em = existing; 5396 err = 0; 5397 } 5398 } 5399 write_unlock(&em_tree->lock); 5400 out: 5401 5402 trace_btrfs_get_extent(root, em); 5403 5404 if (path) 5405 btrfs_free_path(path); 5406 if (trans) { 5407 ret = btrfs_end_transaction(trans, root); 5408 if (!err) 5409 err = ret; 5410 } 5411 if (err) { 5412 free_extent_map(em); 5413 return ERR_PTR(err); 5414 } 5415 BUG_ON(!em); /* Error is always set */ 5416 return em; 5417 } 5418 5419 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 5420 size_t pg_offset, u64 start, u64 len, 5421 int create) 5422 { 5423 struct extent_map *em; 5424 struct extent_map *hole_em = NULL; 5425 u64 range_start = start; 5426 u64 end; 5427 u64 found; 5428 u64 found_end; 5429 int err = 0; 5430 5431 em = btrfs_get_extent(inode, page, pg_offset, start, len, create); 5432 if (IS_ERR(em)) 5433 return em; 5434 if (em) { 5435 /* 5436 * if our em maps to a hole, there might 5437 * actually be delalloc bytes behind it 5438 */ 5439 if (em->block_start != EXTENT_MAP_HOLE) 5440 return em; 5441 else 5442 hole_em = em; 5443 } 5444 5445 /* check to see if we've wrapped (len == -1 or similar) */ 5446 end = start + len; 5447 if (end < start) 5448 end = (u64)-1; 5449 else 5450 end -= 1; 5451 5452 em = NULL; 5453 5454 /* ok, we didn't find anything, lets look for delalloc */ 5455 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, 5456 end, len, EXTENT_DELALLOC, 1); 5457 found_end = range_start + found; 5458 if (found_end < range_start) 5459 found_end = (u64)-1; 5460 5461 /* 5462 * we didn't find anything useful, return 5463 * the original results from get_extent() 5464 */ 5465 if (range_start > end || found_end <= start) { 5466 em = hole_em; 5467 hole_em = NULL; 5468 goto out; 5469 } 5470 5471 /* adjust the range_start to make sure it doesn't 5472 * go backwards from the start they passed in 5473 */ 5474 range_start = max(start,range_start); 5475 found = found_end - range_start; 5476 5477 if (found > 0) { 5478 u64 hole_start = start; 5479 u64 hole_len = len; 5480 5481 em = alloc_extent_map(); 5482 if (!em) { 5483 err = -ENOMEM; 5484 goto out; 5485 } 5486 /* 5487 * when btrfs_get_extent can't find anything it 5488 * returns one huge hole 5489 * 5490 * make sure what it found really fits our range, and 5491 * adjust to make sure it is based on the start from 5492 * the caller 5493 */ 5494 if (hole_em) { 5495 u64 calc_end = extent_map_end(hole_em); 5496 5497 if (calc_end <= start || (hole_em->start > end)) { 5498 free_extent_map(hole_em); 5499 hole_em = NULL; 5500 } else { 5501 hole_start = max(hole_em->start, start); 5502 hole_len = calc_end - hole_start; 5503 } 5504 } 5505 em->bdev = NULL; 5506 if (hole_em && range_start > hole_start) { 5507 /* our hole starts before our delalloc, so we 5508 * have to return just the parts of the hole 5509 * that go until the delalloc starts 5510 */ 5511 em->len = min(hole_len, 5512 range_start - hole_start); 5513 em->start = hole_start; 5514 em->orig_start = hole_start; 5515 /* 5516 * don't adjust block start at all, 5517 * it is fixed at EXTENT_MAP_HOLE 5518 */ 5519 em->block_start = hole_em->block_start; 5520 em->block_len = hole_len; 5521 } else { 5522 em->start = range_start; 5523 em->len = found; 5524 em->orig_start = range_start; 5525 em->block_start = EXTENT_MAP_DELALLOC; 5526 em->block_len = found; 5527 } 5528 } else if (hole_em) { 5529 return hole_em; 5530 } 5531 out: 5532 5533 free_extent_map(hole_em); 5534 if (err) { 5535 free_extent_map(em); 5536 return ERR_PTR(err); 5537 } 5538 return em; 5539 } 5540 5541 static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 5542 struct extent_map *em, 5543 u64 start, u64 len) 5544 { 5545 struct btrfs_root *root = BTRFS_I(inode)->root; 5546 struct btrfs_trans_handle *trans; 5547 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 5548 struct btrfs_key ins; 5549 u64 alloc_hint; 5550 int ret; 5551 bool insert = false; 5552 5553 /* 5554 * Ok if the extent map we looked up is a hole and is for the exact 5555 * range we want, there is no reason to allocate a new one, however if 5556 * it is not right then we need to free this one and drop the cache for 5557 * our range. 5558 */ 5559 if (em->block_start != EXTENT_MAP_HOLE || em->start != start || 5560 em->len != len) { 5561 free_extent_map(em); 5562 em = NULL; 5563 insert = true; 5564 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 5565 } 5566 5567 trans = btrfs_join_transaction(root); 5568 if (IS_ERR(trans)) 5569 return ERR_CAST(trans); 5570 5571 if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024) 5572 btrfs_add_inode_defrag(trans, inode); 5573 5574 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 5575 5576 alloc_hint = get_extent_allocation_hint(inode, start, len); 5577 ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0, 5578 alloc_hint, &ins, 1); 5579 if (ret) { 5580 em = ERR_PTR(ret); 5581 goto out; 5582 } 5583 5584 if (!em) { 5585 em = alloc_extent_map(); 5586 if (!em) { 5587 em = ERR_PTR(-ENOMEM); 5588 goto out; 5589 } 5590 } 5591 5592 em->start = start; 5593 em->orig_start = em->start; 5594 em->len = ins.offset; 5595 5596 em->block_start = ins.objectid; 5597 em->block_len = ins.offset; 5598 em->bdev = root->fs_info->fs_devices->latest_bdev; 5599 5600 /* 5601 * We need to do this because if we're using the original em we searched 5602 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that. 5603 */ 5604 em->flags = 0; 5605 set_bit(EXTENT_FLAG_PINNED, &em->flags); 5606 5607 while (insert) { 5608 write_lock(&em_tree->lock); 5609 ret = add_extent_mapping(em_tree, em); 5610 write_unlock(&em_tree->lock); 5611 if (ret != -EEXIST) 5612 break; 5613 btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0); 5614 } 5615 5616 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, 5617 ins.offset, ins.offset, 0); 5618 if (ret) { 5619 btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 5620 em = ERR_PTR(ret); 5621 } 5622 out: 5623 btrfs_end_transaction(trans, root); 5624 return em; 5625 } 5626 5627 /* 5628 * returns 1 when the nocow is safe, < 1 on error, 0 if the 5629 * block must be cow'd 5630 */ 5631 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, 5632 struct inode *inode, u64 offset, u64 len) 5633 { 5634 struct btrfs_path *path; 5635 int ret; 5636 struct extent_buffer *leaf; 5637 struct btrfs_root *root = BTRFS_I(inode)->root; 5638 struct btrfs_file_extent_item *fi; 5639 struct btrfs_key key; 5640 u64 disk_bytenr; 5641 u64 backref_offset; 5642 u64 extent_end; 5643 u64 num_bytes; 5644 int slot; 5645 int found_type; 5646 5647 path = btrfs_alloc_path(); 5648 if (!path) 5649 return -ENOMEM; 5650 5651 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), 5652 offset, 0); 5653 if (ret < 0) 5654 goto out; 5655 5656 slot = path->slots[0]; 5657 if (ret == 1) { 5658 if (slot == 0) { 5659 /* can't find the item, must cow */ 5660 ret = 0; 5661 goto out; 5662 } 5663 slot--; 5664 } 5665 ret = 0; 5666 leaf = path->nodes[0]; 5667 btrfs_item_key_to_cpu(leaf, &key, slot); 5668 if (key.objectid != btrfs_ino(inode) || 5669 key.type != BTRFS_EXTENT_DATA_KEY) { 5670 /* not our file or wrong item type, must cow */ 5671 goto out; 5672 } 5673 5674 if (key.offset > offset) { 5675 /* Wrong offset, must cow */ 5676 goto out; 5677 } 5678 5679 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5680 found_type = btrfs_file_extent_type(leaf, fi); 5681 if (found_type != BTRFS_FILE_EXTENT_REG && 5682 found_type != BTRFS_FILE_EXTENT_PREALLOC) { 5683 /* not a regular extent, must cow */ 5684 goto out; 5685 } 5686 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 5687 backref_offset = btrfs_file_extent_offset(leaf, fi); 5688 5689 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 5690 if (extent_end < offset + len) { 5691 /* extent doesn't include our full range, must cow */ 5692 goto out; 5693 } 5694 5695 if (btrfs_extent_readonly(root, disk_bytenr)) 5696 goto out; 5697 5698 /* 5699 * look for other files referencing this extent, if we 5700 * find any we must cow 5701 */ 5702 if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), 5703 key.offset - backref_offset, disk_bytenr)) 5704 goto out; 5705 5706 /* 5707 * adjust disk_bytenr and num_bytes to cover just the bytes 5708 * in this extent we are about to write. If there 5709 * are any csums in that range we have to cow in order 5710 * to keep the csums correct 5711 */ 5712 disk_bytenr += backref_offset; 5713 disk_bytenr += offset - key.offset; 5714 num_bytes = min(offset + len, extent_end) - offset; 5715 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 5716 goto out; 5717 /* 5718 * all of the above have passed, it is safe to overwrite this extent 5719 * without cow 5720 */ 5721 ret = 1; 5722 out: 5723 btrfs_free_path(path); 5724 return ret; 5725 } 5726 5727 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 5728 struct buffer_head *bh_result, int create) 5729 { 5730 struct extent_map *em; 5731 struct btrfs_root *root = BTRFS_I(inode)->root; 5732 u64 start = iblock << inode->i_blkbits; 5733 u64 len = bh_result->b_size; 5734 struct btrfs_trans_handle *trans; 5735 5736 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 5737 if (IS_ERR(em)) 5738 return PTR_ERR(em); 5739 5740 /* 5741 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 5742 * io. INLINE is special, and we could probably kludge it in here, but 5743 * it's still buffered so for safety lets just fall back to the generic 5744 * buffered path. 5745 * 5746 * For COMPRESSED we _have_ to read the entire extent in so we can 5747 * decompress it, so there will be buffering required no matter what we 5748 * do, so go ahead and fallback to buffered. 5749 * 5750 * We return -ENOTBLK because thats what makes DIO go ahead and go back 5751 * to buffered IO. Don't blame me, this is the price we pay for using 5752 * the generic code. 5753 */ 5754 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 5755 em->block_start == EXTENT_MAP_INLINE) { 5756 free_extent_map(em); 5757 return -ENOTBLK; 5758 } 5759 5760 /* Just a good old fashioned hole, return */ 5761 if (!create && (em->block_start == EXTENT_MAP_HOLE || 5762 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 5763 free_extent_map(em); 5764 /* DIO will do one hole at a time, so just unlock a sector */ 5765 unlock_extent(&BTRFS_I(inode)->io_tree, start, 5766 start + root->sectorsize - 1); 5767 return 0; 5768 } 5769 5770 /* 5771 * We don't allocate a new extent in the following cases 5772 * 5773 * 1) The inode is marked as NODATACOW. In this case we'll just use the 5774 * existing extent. 5775 * 2) The extent is marked as PREALLOC. We're good to go here and can 5776 * just use the extent. 5777 * 5778 */ 5779 if (!create) { 5780 len = em->len - (start - em->start); 5781 goto map; 5782 } 5783 5784 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 5785 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 5786 em->block_start != EXTENT_MAP_HOLE)) { 5787 int type; 5788 int ret; 5789 u64 block_start; 5790 5791 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 5792 type = BTRFS_ORDERED_PREALLOC; 5793 else 5794 type = BTRFS_ORDERED_NOCOW; 5795 len = min(len, em->len - (start - em->start)); 5796 block_start = em->block_start + (start - em->start); 5797 5798 /* 5799 * we're not going to log anything, but we do need 5800 * to make sure the current transaction stays open 5801 * while we look for nocow cross refs 5802 */ 5803 trans = btrfs_join_transaction(root); 5804 if (IS_ERR(trans)) 5805 goto must_cow; 5806 5807 if (can_nocow_odirect(trans, inode, start, len) == 1) { 5808 ret = btrfs_add_ordered_extent_dio(inode, start, 5809 block_start, len, len, type); 5810 btrfs_end_transaction(trans, root); 5811 if (ret) { 5812 free_extent_map(em); 5813 return ret; 5814 } 5815 goto unlock; 5816 } 5817 btrfs_end_transaction(trans, root); 5818 } 5819 must_cow: 5820 /* 5821 * this will cow the extent, reset the len in case we changed 5822 * it above 5823 */ 5824 len = bh_result->b_size; 5825 em = btrfs_new_extent_direct(inode, em, start, len); 5826 if (IS_ERR(em)) 5827 return PTR_ERR(em); 5828 len = min(len, em->len - (start - em->start)); 5829 unlock: 5830 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, 5831 EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, 5832 0, NULL, GFP_NOFS); 5833 map: 5834 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 5835 inode->i_blkbits; 5836 bh_result->b_size = len; 5837 bh_result->b_bdev = em->bdev; 5838 set_buffer_mapped(bh_result); 5839 if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 5840 set_buffer_new(bh_result); 5841 5842 free_extent_map(em); 5843 5844 return 0; 5845 } 5846 5847 struct btrfs_dio_private { 5848 struct inode *inode; 5849 u64 logical_offset; 5850 u64 disk_bytenr; 5851 u64 bytes; 5852 u32 *csums; 5853 void *private; 5854 5855 /* number of bios pending for this dio */ 5856 atomic_t pending_bios; 5857 5858 /* IO errors */ 5859 int errors; 5860 5861 struct bio *orig_bio; 5862 }; 5863 5864 static void btrfs_endio_direct_read(struct bio *bio, int err) 5865 { 5866 struct btrfs_dio_private *dip = bio->bi_private; 5867 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; 5868 struct bio_vec *bvec = bio->bi_io_vec; 5869 struct inode *inode = dip->inode; 5870 struct btrfs_root *root = BTRFS_I(inode)->root; 5871 u64 start; 5872 u32 *private = dip->csums; 5873 5874 start = dip->logical_offset; 5875 do { 5876 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 5877 struct page *page = bvec->bv_page; 5878 char *kaddr; 5879 u32 csum = ~(u32)0; 5880 unsigned long flags; 5881 5882 local_irq_save(flags); 5883 kaddr = kmap_atomic(page); 5884 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, 5885 csum, bvec->bv_len); 5886 btrfs_csum_final(csum, (char *)&csum); 5887 kunmap_atomic(kaddr); 5888 local_irq_restore(flags); 5889 5890 flush_dcache_page(bvec->bv_page); 5891 if (csum != *private) { 5892 printk(KERN_ERR "btrfs csum failed ino %llu off" 5893 " %llu csum %u private %u\n", 5894 (unsigned long long)btrfs_ino(inode), 5895 (unsigned long long)start, 5896 csum, *private); 5897 err = -EIO; 5898 } 5899 } 5900 5901 start += bvec->bv_len; 5902 private++; 5903 bvec++; 5904 } while (bvec <= bvec_end); 5905 5906 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 5907 dip->logical_offset + dip->bytes - 1); 5908 bio->bi_private = dip->private; 5909 5910 kfree(dip->csums); 5911 kfree(dip); 5912 5913 /* If we had a csum failure make sure to clear the uptodate flag */ 5914 if (err) 5915 clear_bit(BIO_UPTODATE, &bio->bi_flags); 5916 dio_end_io(bio, err); 5917 } 5918 5919 static void btrfs_endio_direct_write(struct bio *bio, int err) 5920 { 5921 struct btrfs_dio_private *dip = bio->bi_private; 5922 struct inode *inode = dip->inode; 5923 struct btrfs_root *root = BTRFS_I(inode)->root; 5924 struct btrfs_trans_handle *trans; 5925 struct btrfs_ordered_extent *ordered = NULL; 5926 struct extent_state *cached_state = NULL; 5927 u64 ordered_offset = dip->logical_offset; 5928 u64 ordered_bytes = dip->bytes; 5929 int ret; 5930 5931 if (err) 5932 goto out_done; 5933 again: 5934 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 5935 &ordered_offset, 5936 ordered_bytes); 5937 if (!ret) 5938 goto out_test; 5939 5940 BUG_ON(!ordered); 5941 5942 trans = btrfs_join_transaction(root); 5943 if (IS_ERR(trans)) { 5944 err = -ENOMEM; 5945 goto out; 5946 } 5947 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 5948 5949 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 5950 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5951 if (!ret) 5952 err = btrfs_update_inode_fallback(trans, root, inode); 5953 goto out; 5954 } 5955 5956 lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5957 ordered->file_offset + ordered->len - 1, 0, 5958 &cached_state); 5959 5960 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { 5961 ret = btrfs_mark_extent_written(trans, inode, 5962 ordered->file_offset, 5963 ordered->file_offset + 5964 ordered->len); 5965 if (ret) { 5966 err = ret; 5967 goto out_unlock; 5968 } 5969 } else { 5970 ret = insert_reserved_file_extent(trans, inode, 5971 ordered->file_offset, 5972 ordered->start, 5973 ordered->disk_len, 5974 ordered->len, 5975 ordered->len, 5976 0, 0, 0, 5977 BTRFS_FILE_EXTENT_REG); 5978 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 5979 ordered->file_offset, ordered->len); 5980 if (ret) { 5981 err = ret; 5982 WARN_ON(1); 5983 goto out_unlock; 5984 } 5985 } 5986 5987 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5988 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5989 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) 5990 btrfs_update_inode_fallback(trans, root, inode); 5991 ret = 0; 5992 out_unlock: 5993 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5994 ordered->file_offset + ordered->len - 1, 5995 &cached_state, GFP_NOFS); 5996 out: 5997 btrfs_delalloc_release_metadata(inode, ordered->len); 5998 btrfs_end_transaction(trans, root); 5999 ordered_offset = ordered->file_offset + ordered->len; 6000 btrfs_put_ordered_extent(ordered); 6001 btrfs_put_ordered_extent(ordered); 6002 6003 out_test: 6004 /* 6005 * our bio might span multiple ordered extents. If we haven't 6006 * completed the accounting for the whole dio, go back and try again 6007 */ 6008 if (ordered_offset < dip->logical_offset + dip->bytes) { 6009 ordered_bytes = dip->logical_offset + dip->bytes - 6010 ordered_offset; 6011 goto again; 6012 } 6013 out_done: 6014 bio->bi_private = dip->private; 6015 6016 kfree(dip->csums); 6017 kfree(dip); 6018 6019 /* If we had an error make sure to clear the uptodate flag */ 6020 if (err) 6021 clear_bit(BIO_UPTODATE, &bio->bi_flags); 6022 dio_end_io(bio, err); 6023 } 6024 6025 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, 6026 struct bio *bio, int mirror_num, 6027 unsigned long bio_flags, u64 offset) 6028 { 6029 int ret; 6030 struct btrfs_root *root = BTRFS_I(inode)->root; 6031 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); 6032 BUG_ON(ret); /* -ENOMEM */ 6033 return 0; 6034 } 6035 6036 static void btrfs_end_dio_bio(struct bio *bio, int err) 6037 { 6038 struct btrfs_dio_private *dip = bio->bi_private; 6039 6040 if (err) { 6041 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " 6042 "sector %#Lx len %u err no %d\n", 6043 (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw, 6044 (unsigned long long)bio->bi_sector, bio->bi_size, err); 6045 dip->errors = 1; 6046 6047 /* 6048 * before atomic variable goto zero, we must make sure 6049 * dip->errors is perceived to be set. 6050 */ 6051 smp_mb__before_atomic_dec(); 6052 } 6053 6054 /* if there are more bios still pending for this dio, just exit */ 6055 if (!atomic_dec_and_test(&dip->pending_bios)) 6056 goto out; 6057 6058 if (dip->errors) 6059 bio_io_error(dip->orig_bio); 6060 else { 6061 set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags); 6062 bio_endio(dip->orig_bio, 0); 6063 } 6064 out: 6065 bio_put(bio); 6066 } 6067 6068 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, 6069 u64 first_sector, gfp_t gfp_flags) 6070 { 6071 int nr_vecs = bio_get_nr_vecs(bdev); 6072 return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); 6073 } 6074 6075 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 6076 int rw, u64 file_offset, int skip_sum, 6077 u32 *csums, int async_submit) 6078 { 6079 int write = rw & REQ_WRITE; 6080 struct btrfs_root *root = BTRFS_I(inode)->root; 6081 int ret; 6082 6083 bio_get(bio); 6084 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 6085 if (ret) 6086 goto err; 6087 6088 if (skip_sum) 6089 goto map; 6090 6091 if (write && async_submit) { 6092 ret = btrfs_wq_submit_bio(root->fs_info, 6093 inode, rw, bio, 0, 0, 6094 file_offset, 6095 __btrfs_submit_bio_start_direct_io, 6096 __btrfs_submit_bio_done); 6097 goto err; 6098 } else if (write) { 6099 /* 6100 * If we aren't doing async submit, calculate the csum of the 6101 * bio now. 6102 */ 6103 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); 6104 if (ret) 6105 goto err; 6106 } else if (!skip_sum) { 6107 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, 6108 file_offset, csums); 6109 if (ret) 6110 goto err; 6111 } 6112 6113 map: 6114 ret = btrfs_map_bio(root, rw, bio, 0, async_submit); 6115 err: 6116 bio_put(bio); 6117 return ret; 6118 } 6119 6120 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, 6121 int skip_sum) 6122 { 6123 struct inode *inode = dip->inode; 6124 struct btrfs_root *root = BTRFS_I(inode)->root; 6125 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 6126 struct bio *bio; 6127 struct bio *orig_bio = dip->orig_bio; 6128 struct bio_vec *bvec = orig_bio->bi_io_vec; 6129 u64 start_sector = orig_bio->bi_sector; 6130 u64 file_offset = dip->logical_offset; 6131 u64 submit_len = 0; 6132 u64 map_length; 6133 int nr_pages = 0; 6134 u32 *csums = dip->csums; 6135 int ret = 0; 6136 int async_submit = 0; 6137 int write = rw & REQ_WRITE; 6138 6139 map_length = orig_bio->bi_size; 6140 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6141 &map_length, NULL, 0); 6142 if (ret) { 6143 bio_put(orig_bio); 6144 return -EIO; 6145 } 6146 6147 if (map_length >= orig_bio->bi_size) { 6148 bio = orig_bio; 6149 goto submit; 6150 } 6151 6152 async_submit = 1; 6153 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); 6154 if (!bio) 6155 return -ENOMEM; 6156 bio->bi_private = dip; 6157 bio->bi_end_io = btrfs_end_dio_bio; 6158 atomic_inc(&dip->pending_bios); 6159 6160 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { 6161 if (unlikely(map_length < submit_len + bvec->bv_len || 6162 bio_add_page(bio, bvec->bv_page, bvec->bv_len, 6163 bvec->bv_offset) < bvec->bv_len)) { 6164 /* 6165 * inc the count before we submit the bio so 6166 * we know the end IO handler won't happen before 6167 * we inc the count. Otherwise, the dip might get freed 6168 * before we're done setting it up 6169 */ 6170 atomic_inc(&dip->pending_bios); 6171 ret = __btrfs_submit_dio_bio(bio, inode, rw, 6172 file_offset, skip_sum, 6173 csums, async_submit); 6174 if (ret) { 6175 bio_put(bio); 6176 atomic_dec(&dip->pending_bios); 6177 goto out_err; 6178 } 6179 6180 /* Write's use the ordered csums */ 6181 if (!write && !skip_sum) 6182 csums = csums + nr_pages; 6183 start_sector += submit_len >> 9; 6184 file_offset += submit_len; 6185 6186 submit_len = 0; 6187 nr_pages = 0; 6188 6189 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, 6190 start_sector, GFP_NOFS); 6191 if (!bio) 6192 goto out_err; 6193 bio->bi_private = dip; 6194 bio->bi_end_io = btrfs_end_dio_bio; 6195 6196 map_length = orig_bio->bi_size; 6197 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6198 &map_length, NULL, 0); 6199 if (ret) { 6200 bio_put(bio); 6201 goto out_err; 6202 } 6203 } else { 6204 submit_len += bvec->bv_len; 6205 nr_pages ++; 6206 bvec++; 6207 } 6208 } 6209 6210 submit: 6211 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 6212 csums, async_submit); 6213 if (!ret) 6214 return 0; 6215 6216 bio_put(bio); 6217 out_err: 6218 dip->errors = 1; 6219 /* 6220 * before atomic variable goto zero, we must 6221 * make sure dip->errors is perceived to be set. 6222 */ 6223 smp_mb__before_atomic_dec(); 6224 if (atomic_dec_and_test(&dip->pending_bios)) 6225 bio_io_error(dip->orig_bio); 6226 6227 /* bio_end_io() will handle error, so we needn't return it */ 6228 return 0; 6229 } 6230 6231 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, 6232 loff_t file_offset) 6233 { 6234 struct btrfs_root *root = BTRFS_I(inode)->root; 6235 struct btrfs_dio_private *dip; 6236 struct bio_vec *bvec = bio->bi_io_vec; 6237 int skip_sum; 6238 int write = rw & REQ_WRITE; 6239 int ret = 0; 6240 6241 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 6242 6243 dip = kmalloc(sizeof(*dip), GFP_NOFS); 6244 if (!dip) { 6245 ret = -ENOMEM; 6246 goto free_ordered; 6247 } 6248 dip->csums = NULL; 6249 6250 /* Write's use the ordered csum stuff, so we don't need dip->csums */ 6251 if (!write && !skip_sum) { 6252 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); 6253 if (!dip->csums) { 6254 kfree(dip); 6255 ret = -ENOMEM; 6256 goto free_ordered; 6257 } 6258 } 6259 6260 dip->private = bio->bi_private; 6261 dip->inode = inode; 6262 dip->logical_offset = file_offset; 6263 6264 dip->bytes = 0; 6265 do { 6266 dip->bytes += bvec->bv_len; 6267 bvec++; 6268 } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1)); 6269 6270 dip->disk_bytenr = (u64)bio->bi_sector << 9; 6271 bio->bi_private = dip; 6272 dip->errors = 0; 6273 dip->orig_bio = bio; 6274 atomic_set(&dip->pending_bios, 0); 6275 6276 if (write) 6277 bio->bi_end_io = btrfs_endio_direct_write; 6278 else 6279 bio->bi_end_io = btrfs_endio_direct_read; 6280 6281 ret = btrfs_submit_direct_hook(rw, dip, skip_sum); 6282 if (!ret) 6283 return; 6284 free_ordered: 6285 /* 6286 * If this is a write, we need to clean up the reserved space and kill 6287 * the ordered extent. 6288 */ 6289 if (write) { 6290 struct btrfs_ordered_extent *ordered; 6291 ordered = btrfs_lookup_ordered_extent(inode, file_offset); 6292 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 6293 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 6294 btrfs_free_reserved_extent(root, ordered->start, 6295 ordered->disk_len); 6296 btrfs_put_ordered_extent(ordered); 6297 btrfs_put_ordered_extent(ordered); 6298 } 6299 bio_endio(bio, ret); 6300 } 6301 6302 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, 6303 const struct iovec *iov, loff_t offset, 6304 unsigned long nr_segs) 6305 { 6306 int seg; 6307 int i; 6308 size_t size; 6309 unsigned long addr; 6310 unsigned blocksize_mask = root->sectorsize - 1; 6311 ssize_t retval = -EINVAL; 6312 loff_t end = offset; 6313 6314 if (offset & blocksize_mask) 6315 goto out; 6316 6317 /* Check the memory alignment. Blocks cannot straddle pages */ 6318 for (seg = 0; seg < nr_segs; seg++) { 6319 addr = (unsigned long)iov[seg].iov_base; 6320 size = iov[seg].iov_len; 6321 end += size; 6322 if ((addr & blocksize_mask) || (size & blocksize_mask)) 6323 goto out; 6324 6325 /* If this is a write we don't need to check anymore */ 6326 if (rw & WRITE) 6327 continue; 6328 6329 /* 6330 * Check to make sure we don't have duplicate iov_base's in this 6331 * iovec, if so return EINVAL, otherwise we'll get csum errors 6332 * when reading back. 6333 */ 6334 for (i = seg + 1; i < nr_segs; i++) { 6335 if (iov[seg].iov_base == iov[i].iov_base) 6336 goto out; 6337 } 6338 } 6339 retval = 0; 6340 out: 6341 return retval; 6342 } 6343 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, 6344 const struct iovec *iov, loff_t offset, 6345 unsigned long nr_segs) 6346 { 6347 struct file *file = iocb->ki_filp; 6348 struct inode *inode = file->f_mapping->host; 6349 struct btrfs_ordered_extent *ordered; 6350 struct extent_state *cached_state = NULL; 6351 u64 lockstart, lockend; 6352 ssize_t ret; 6353 int writing = rw & WRITE; 6354 int write_bits = 0; 6355 size_t count = iov_length(iov, nr_segs); 6356 6357 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, 6358 offset, nr_segs)) { 6359 return 0; 6360 } 6361 6362 lockstart = offset; 6363 lockend = offset + count - 1; 6364 6365 if (writing) { 6366 ret = btrfs_delalloc_reserve_space(inode, count); 6367 if (ret) 6368 goto out; 6369 } 6370 6371 while (1) { 6372 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6373 0, &cached_state); 6374 /* 6375 * We're concerned with the entire range that we're going to be 6376 * doing DIO to, so we need to make sure theres no ordered 6377 * extents in this range. 6378 */ 6379 ordered = btrfs_lookup_ordered_range(inode, lockstart, 6380 lockend - lockstart + 1); 6381 if (!ordered) 6382 break; 6383 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6384 &cached_state, GFP_NOFS); 6385 btrfs_start_ordered_extent(inode, ordered, 1); 6386 btrfs_put_ordered_extent(ordered); 6387 cond_resched(); 6388 } 6389 6390 /* 6391 * we don't use btrfs_set_extent_delalloc because we don't want 6392 * the dirty or uptodate bits 6393 */ 6394 if (writing) { 6395 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; 6396 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6397 EXTENT_DELALLOC, NULL, &cached_state, 6398 GFP_NOFS); 6399 if (ret) { 6400 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 6401 lockend, EXTENT_LOCKED | write_bits, 6402 1, 0, &cached_state, GFP_NOFS); 6403 goto out; 6404 } 6405 } 6406 6407 free_extent_state(cached_state); 6408 cached_state = NULL; 6409 6410 ret = __blockdev_direct_IO(rw, iocb, inode, 6411 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, 6412 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, 6413 btrfs_submit_direct, 0); 6414 6415 if (ret < 0 && ret != -EIOCBQUEUED) { 6416 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, 6417 offset + iov_length(iov, nr_segs) - 1, 6418 EXTENT_LOCKED | write_bits, 1, 0, 6419 &cached_state, GFP_NOFS); 6420 } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { 6421 /* 6422 * We're falling back to buffered, unlock the section we didn't 6423 * do IO on. 6424 */ 6425 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, 6426 offset + iov_length(iov, nr_segs) - 1, 6427 EXTENT_LOCKED | write_bits, 1, 0, 6428 &cached_state, GFP_NOFS); 6429 } 6430 out: 6431 free_extent_state(cached_state); 6432 return ret; 6433 } 6434 6435 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 6436 __u64 start, __u64 len) 6437 { 6438 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); 6439 } 6440 6441 int btrfs_readpage(struct file *file, struct page *page) 6442 { 6443 struct extent_io_tree *tree; 6444 tree = &BTRFS_I(page->mapping->host)->io_tree; 6445 return extent_read_full_page(tree, page, btrfs_get_extent, 0); 6446 } 6447 6448 static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 6449 { 6450 struct extent_io_tree *tree; 6451 6452 6453 if (current->flags & PF_MEMALLOC) { 6454 redirty_page_for_writepage(wbc, page); 6455 unlock_page(page); 6456 return 0; 6457 } 6458 tree = &BTRFS_I(page->mapping->host)->io_tree; 6459 return extent_write_full_page(tree, page, btrfs_get_extent, wbc); 6460 } 6461 6462 int btrfs_writepages(struct address_space *mapping, 6463 struct writeback_control *wbc) 6464 { 6465 struct extent_io_tree *tree; 6466 6467 tree = &BTRFS_I(mapping->host)->io_tree; 6468 return extent_writepages(tree, mapping, btrfs_get_extent, wbc); 6469 } 6470 6471 static int 6472 btrfs_readpages(struct file *file, struct address_space *mapping, 6473 struct list_head *pages, unsigned nr_pages) 6474 { 6475 struct extent_io_tree *tree; 6476 tree = &BTRFS_I(mapping->host)->io_tree; 6477 return extent_readpages(tree, mapping, pages, nr_pages, 6478 btrfs_get_extent); 6479 } 6480 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) 6481 { 6482 struct extent_io_tree *tree; 6483 struct extent_map_tree *map; 6484 int ret; 6485 6486 tree = &BTRFS_I(page->mapping->host)->io_tree; 6487 map = &BTRFS_I(page->mapping->host)->extent_tree; 6488 ret = try_release_extent_mapping(map, tree, page, gfp_flags); 6489 if (ret == 1) { 6490 ClearPagePrivate(page); 6491 set_page_private(page, 0); 6492 page_cache_release(page); 6493 } 6494 return ret; 6495 } 6496 6497 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) 6498 { 6499 if (PageWriteback(page) || PageDirty(page)) 6500 return 0; 6501 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); 6502 } 6503 6504 static void btrfs_invalidatepage(struct page *page, unsigned long offset) 6505 { 6506 struct extent_io_tree *tree; 6507 struct btrfs_ordered_extent *ordered; 6508 struct extent_state *cached_state = NULL; 6509 u64 page_start = page_offset(page); 6510 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 6511 6512 6513 /* 6514 * we have the page locked, so new writeback can't start, 6515 * and the dirty bit won't be cleared while we are here. 6516 * 6517 * Wait for IO on this page so that we can safely clear 6518 * the PagePrivate2 bit and do ordered accounting 6519 */ 6520 wait_on_page_writeback(page); 6521 6522 tree = &BTRFS_I(page->mapping->host)->io_tree; 6523 if (offset) { 6524 btrfs_releasepage(page, GFP_NOFS); 6525 return; 6526 } 6527 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 6528 ordered = btrfs_lookup_ordered_extent(page->mapping->host, 6529 page_offset(page)); 6530 if (ordered) { 6531 /* 6532 * IO on this page will never be started, so we need 6533 * to account for any ordered extents now 6534 */ 6535 clear_extent_bit(tree, page_start, page_end, 6536 EXTENT_DIRTY | EXTENT_DELALLOC | 6537 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, 6538 &cached_state, GFP_NOFS); 6539 /* 6540 * whoever cleared the private bit is responsible 6541 * for the finish_ordered_io 6542 */ 6543 if (TestClearPagePrivate2(page)) { 6544 btrfs_finish_ordered_io(page->mapping->host, 6545 page_start, page_end); 6546 } 6547 btrfs_put_ordered_extent(ordered); 6548 cached_state = NULL; 6549 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 6550 } 6551 clear_extent_bit(tree, page_start, page_end, 6552 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 6553 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); 6554 __btrfs_releasepage(page, GFP_NOFS); 6555 6556 ClearPageChecked(page); 6557 if (PagePrivate(page)) { 6558 ClearPagePrivate(page); 6559 set_page_private(page, 0); 6560 page_cache_release(page); 6561 } 6562 } 6563 6564 /* 6565 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 6566 * called from a page fault handler when a page is first dirtied. Hence we must 6567 * be careful to check for EOF conditions here. We set the page up correctly 6568 * for a written page which means we get ENOSPC checking when writing into 6569 * holes and correct delalloc and unwritten extent mapping on filesystems that 6570 * support these features. 6571 * 6572 * We are not allowed to take the i_mutex here so we have to play games to 6573 * protect against truncate races as the page could now be beyond EOF. Because 6574 * vmtruncate() writes the inode size before removing pages, once we have the 6575 * page lock we can determine safely if the page is beyond EOF. If it is not 6576 * beyond EOF, then the page is guaranteed safe against truncation until we 6577 * unlock the page. 6578 */ 6579 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 6580 { 6581 struct page *page = vmf->page; 6582 struct inode *inode = fdentry(vma->vm_file)->d_inode; 6583 struct btrfs_root *root = BTRFS_I(inode)->root; 6584 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 6585 struct btrfs_ordered_extent *ordered; 6586 struct extent_state *cached_state = NULL; 6587 char *kaddr; 6588 unsigned long zero_start; 6589 loff_t size; 6590 int ret; 6591 int reserved = 0; 6592 u64 page_start; 6593 u64 page_end; 6594 6595 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 6596 if (!ret) { 6597 ret = btrfs_update_time(vma->vm_file); 6598 reserved = 1; 6599 } 6600 if (ret) { 6601 if (ret == -ENOMEM) 6602 ret = VM_FAULT_OOM; 6603 else /* -ENOSPC, -EIO, etc */ 6604 ret = VM_FAULT_SIGBUS; 6605 if (reserved) 6606 goto out; 6607 goto out_noreserve; 6608 } 6609 6610 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 6611 again: 6612 lock_page(page); 6613 size = i_size_read(inode); 6614 page_start = page_offset(page); 6615 page_end = page_start + PAGE_CACHE_SIZE - 1; 6616 6617 if ((page->mapping != inode->i_mapping) || 6618 (page_start >= size)) { 6619 /* page got truncated out from underneath us */ 6620 goto out_unlock; 6621 } 6622 wait_on_page_writeback(page); 6623 6624 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 6625 set_page_extent_mapped(page); 6626 6627 /* 6628 * we can't set the delalloc bits if there are pending ordered 6629 * extents. Drop our locks and wait for them to finish 6630 */ 6631 ordered = btrfs_lookup_ordered_extent(inode, page_start); 6632 if (ordered) { 6633 unlock_extent_cached(io_tree, page_start, page_end, 6634 &cached_state, GFP_NOFS); 6635 unlock_page(page); 6636 btrfs_start_ordered_extent(inode, ordered, 1); 6637 btrfs_put_ordered_extent(ordered); 6638 goto again; 6639 } 6640 6641 /* 6642 * XXX - page_mkwrite gets called every time the page is dirtied, even 6643 * if it was already dirty, so for space accounting reasons we need to 6644 * clear any delalloc bits for the range we are fixing to save. There 6645 * is probably a better way to do this, but for now keep consistent with 6646 * prepare_pages in the normal write path. 6647 */ 6648 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 6649 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 6650 0, 0, &cached_state, GFP_NOFS); 6651 6652 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 6653 &cached_state); 6654 if (ret) { 6655 unlock_extent_cached(io_tree, page_start, page_end, 6656 &cached_state, GFP_NOFS); 6657 ret = VM_FAULT_SIGBUS; 6658 goto out_unlock; 6659 } 6660 ret = 0; 6661 6662 /* page is wholly or partially inside EOF */ 6663 if (page_start + PAGE_CACHE_SIZE > size) 6664 zero_start = size & ~PAGE_CACHE_MASK; 6665 else 6666 zero_start = PAGE_CACHE_SIZE; 6667 6668 if (zero_start != PAGE_CACHE_SIZE) { 6669 kaddr = kmap(page); 6670 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 6671 flush_dcache_page(page); 6672 kunmap(page); 6673 } 6674 ClearPageChecked(page); 6675 set_page_dirty(page); 6676 SetPageUptodate(page); 6677 6678 BTRFS_I(inode)->last_trans = root->fs_info->generation; 6679 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 6680 6681 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); 6682 6683 out_unlock: 6684 if (!ret) 6685 return VM_FAULT_LOCKED; 6686 unlock_page(page); 6687 out: 6688 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 6689 out_noreserve: 6690 return ret; 6691 } 6692 6693 static int btrfs_truncate(struct inode *inode) 6694 { 6695 struct btrfs_root *root = BTRFS_I(inode)->root; 6696 struct btrfs_block_rsv *rsv; 6697 int ret; 6698 int err = 0; 6699 struct btrfs_trans_handle *trans; 6700 unsigned long nr; 6701 u64 mask = root->sectorsize - 1; 6702 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 6703 6704 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); 6705 if (ret) 6706 return ret; 6707 6708 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 6709 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 6710 6711 /* 6712 * Yes ladies and gentelment, this is indeed ugly. The fact is we have 6713 * 3 things going on here 6714 * 6715 * 1) We need to reserve space for our orphan item and the space to 6716 * delete our orphan item. Lord knows we don't want to have a dangling 6717 * orphan item because we didn't reserve space to remove it. 6718 * 6719 * 2) We need to reserve space to update our inode. 6720 * 6721 * 3) We need to have something to cache all the space that is going to 6722 * be free'd up by the truncate operation, but also have some slack 6723 * space reserved in case it uses space during the truncate (thank you 6724 * very much snapshotting). 6725 * 6726 * And we need these to all be seperate. The fact is we can use alot of 6727 * space doing the truncate, and we have no earthly idea how much space 6728 * we will use, so we need the truncate reservation to be seperate so it 6729 * doesn't end up using space reserved for updating the inode or 6730 * removing the orphan item. We also need to be able to stop the 6731 * transaction and start a new one, which means we need to be able to 6732 * update the inode several times, and we have no idea of knowing how 6733 * many times that will be, so we can't just reserve 1 item for the 6734 * entirety of the opration, so that has to be done seperately as well. 6735 * Then there is the orphan item, which does indeed need to be held on 6736 * to for the whole operation, and we need nobody to touch this reserved 6737 * space except the orphan code. 6738 * 6739 * So that leaves us with 6740 * 6741 * 1) root->orphan_block_rsv - for the orphan deletion. 6742 * 2) rsv - for the truncate reservation, which we will steal from the 6743 * transaction reservation. 6744 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for 6745 * updating the inode. 6746 */ 6747 rsv = btrfs_alloc_block_rsv(root); 6748 if (!rsv) 6749 return -ENOMEM; 6750 rsv->size = min_size; 6751 6752 /* 6753 * 1 for the truncate slack space 6754 * 1 for the orphan item we're going to add 6755 * 1 for the orphan item deletion 6756 * 1 for updating the inode. 6757 */ 6758 trans = btrfs_start_transaction(root, 4); 6759 if (IS_ERR(trans)) { 6760 err = PTR_ERR(trans); 6761 goto out; 6762 } 6763 6764 /* Migrate the slack space for the truncate to our reserve */ 6765 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, 6766 min_size); 6767 BUG_ON(ret); 6768 6769 ret = btrfs_orphan_add(trans, inode); 6770 if (ret) { 6771 btrfs_end_transaction(trans, root); 6772 goto out; 6773 } 6774 6775 /* 6776 * setattr is responsible for setting the ordered_data_close flag, 6777 * but that is only tested during the last file release. That 6778 * could happen well after the next commit, leaving a great big 6779 * window where new writes may get lost if someone chooses to write 6780 * to this file after truncating to zero 6781 * 6782 * The inode doesn't have any dirty data here, and so if we commit 6783 * this is a noop. If someone immediately starts writing to the inode 6784 * it is very likely we'll catch some of their writes in this 6785 * transaction, and the commit will find this file on the ordered 6786 * data list with good things to send down. 6787 * 6788 * This is a best effort solution, there is still a window where 6789 * using truncate to replace the contents of the file will 6790 * end up with a zero length file after a crash. 6791 */ 6792 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close) 6793 btrfs_add_ordered_operation(trans, root, inode); 6794 6795 while (1) { 6796 ret = btrfs_block_rsv_refill(root, rsv, min_size); 6797 if (ret) { 6798 /* 6799 * This can only happen with the original transaction we 6800 * started above, every other time we shouldn't have a 6801 * transaction started yet. 6802 */ 6803 if (ret == -EAGAIN) 6804 goto end_trans; 6805 err = ret; 6806 break; 6807 } 6808 6809 if (!trans) { 6810 /* Just need the 1 for updating the inode */ 6811 trans = btrfs_start_transaction(root, 1); 6812 if (IS_ERR(trans)) { 6813 ret = err = PTR_ERR(trans); 6814 trans = NULL; 6815 break; 6816 } 6817 } 6818 6819 trans->block_rsv = rsv; 6820 6821 ret = btrfs_truncate_inode_items(trans, root, inode, 6822 inode->i_size, 6823 BTRFS_EXTENT_DATA_KEY); 6824 if (ret != -EAGAIN) { 6825 err = ret; 6826 break; 6827 } 6828 6829 trans->block_rsv = &root->fs_info->trans_block_rsv; 6830 ret = btrfs_update_inode(trans, root, inode); 6831 if (ret) { 6832 err = ret; 6833 break; 6834 } 6835 end_trans: 6836 nr = trans->blocks_used; 6837 btrfs_end_transaction(trans, root); 6838 trans = NULL; 6839 btrfs_btree_balance_dirty(root, nr); 6840 } 6841 6842 if (ret == 0 && inode->i_nlink > 0) { 6843 trans->block_rsv = root->orphan_block_rsv; 6844 ret = btrfs_orphan_del(trans, inode); 6845 if (ret) 6846 err = ret; 6847 } else if (ret && inode->i_nlink > 0) { 6848 /* 6849 * Failed to do the truncate, remove us from the in memory 6850 * orphan list. 6851 */ 6852 ret = btrfs_orphan_del(NULL, inode); 6853 } 6854 6855 if (trans) { 6856 trans->block_rsv = &root->fs_info->trans_block_rsv; 6857 ret = btrfs_update_inode(trans, root, inode); 6858 if (ret && !err) 6859 err = ret; 6860 6861 nr = trans->blocks_used; 6862 ret = btrfs_end_transaction(trans, root); 6863 btrfs_btree_balance_dirty(root, nr); 6864 } 6865 6866 out: 6867 btrfs_free_block_rsv(root, rsv); 6868 6869 if (ret && !err) 6870 err = ret; 6871 6872 return err; 6873 } 6874 6875 /* 6876 * create a new subvolume directory/inode (helper for the ioctl). 6877 */ 6878 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 6879 struct btrfs_root *new_root, u64 new_dirid) 6880 { 6881 struct inode *inode; 6882 int err; 6883 u64 index = 0; 6884 6885 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, 6886 new_dirid, new_dirid, 6887 S_IFDIR | (~current_umask() & S_IRWXUGO), 6888 &index); 6889 if (IS_ERR(inode)) 6890 return PTR_ERR(inode); 6891 inode->i_op = &btrfs_dir_inode_operations; 6892 inode->i_fop = &btrfs_dir_file_operations; 6893 6894 set_nlink(inode, 1); 6895 btrfs_i_size_write(inode, 0); 6896 6897 err = btrfs_update_inode(trans, new_root, inode); 6898 6899 iput(inode); 6900 return err; 6901 } 6902 6903 struct inode *btrfs_alloc_inode(struct super_block *sb) 6904 { 6905 struct btrfs_inode *ei; 6906 struct inode *inode; 6907 6908 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); 6909 if (!ei) 6910 return NULL; 6911 6912 ei->root = NULL; 6913 ei->space_info = NULL; 6914 ei->generation = 0; 6915 ei->sequence = 0; 6916 ei->last_trans = 0; 6917 ei->last_sub_trans = 0; 6918 ei->logged_trans = 0; 6919 ei->delalloc_bytes = 0; 6920 ei->disk_i_size = 0; 6921 ei->flags = 0; 6922 ei->csum_bytes = 0; 6923 ei->index_cnt = (u64)-1; 6924 ei->last_unlink_trans = 0; 6925 6926 spin_lock_init(&ei->lock); 6927 ei->outstanding_extents = 0; 6928 ei->reserved_extents = 0; 6929 6930 ei->ordered_data_close = 0; 6931 ei->orphan_meta_reserved = 0; 6932 ei->dummy_inode = 0; 6933 ei->in_defrag = 0; 6934 ei->delalloc_meta_reserved = 0; 6935 ei->force_compress = BTRFS_COMPRESS_NONE; 6936 6937 ei->delayed_node = NULL; 6938 6939 inode = &ei->vfs_inode; 6940 extent_map_tree_init(&ei->extent_tree); 6941 extent_io_tree_init(&ei->io_tree, &inode->i_data); 6942 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); 6943 ei->io_tree.track_uptodate = 1; 6944 ei->io_failure_tree.track_uptodate = 1; 6945 mutex_init(&ei->log_mutex); 6946 mutex_init(&ei->delalloc_mutex); 6947 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 6948 INIT_LIST_HEAD(&ei->i_orphan); 6949 INIT_LIST_HEAD(&ei->delalloc_inodes); 6950 INIT_LIST_HEAD(&ei->ordered_operations); 6951 RB_CLEAR_NODE(&ei->rb_node); 6952 6953 return inode; 6954 } 6955 6956 static void btrfs_i_callback(struct rcu_head *head) 6957 { 6958 struct inode *inode = container_of(head, struct inode, i_rcu); 6959 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 6960 } 6961 6962 void btrfs_destroy_inode(struct inode *inode) 6963 { 6964 struct btrfs_ordered_extent *ordered; 6965 struct btrfs_root *root = BTRFS_I(inode)->root; 6966 6967 WARN_ON(!list_empty(&inode->i_dentry)); 6968 WARN_ON(inode->i_data.nrpages); 6969 WARN_ON(BTRFS_I(inode)->outstanding_extents); 6970 WARN_ON(BTRFS_I(inode)->reserved_extents); 6971 WARN_ON(BTRFS_I(inode)->delalloc_bytes); 6972 WARN_ON(BTRFS_I(inode)->csum_bytes); 6973 6974 /* 6975 * This can happen where we create an inode, but somebody else also 6976 * created the same inode and we need to destroy the one we already 6977 * created. 6978 */ 6979 if (!root) 6980 goto free; 6981 6982 /* 6983 * Make sure we're properly removed from the ordered operation 6984 * lists. 6985 */ 6986 smp_mb(); 6987 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) { 6988 spin_lock(&root->fs_info->ordered_extent_lock); 6989 list_del_init(&BTRFS_I(inode)->ordered_operations); 6990 spin_unlock(&root->fs_info->ordered_extent_lock); 6991 } 6992 6993 spin_lock(&root->orphan_lock); 6994 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 6995 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", 6996 (unsigned long long)btrfs_ino(inode)); 6997 list_del_init(&BTRFS_I(inode)->i_orphan); 6998 } 6999 spin_unlock(&root->orphan_lock); 7000 7001 while (1) { 7002 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 7003 if (!ordered) 7004 break; 7005 else { 7006 printk(KERN_ERR "btrfs found ordered " 7007 "extent %llu %llu on inode cleanup\n", 7008 (unsigned long long)ordered->file_offset, 7009 (unsigned long long)ordered->len); 7010 btrfs_remove_ordered_extent(inode, ordered); 7011 btrfs_put_ordered_extent(ordered); 7012 btrfs_put_ordered_extent(ordered); 7013 } 7014 } 7015 inode_tree_del(inode); 7016 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 7017 free: 7018 btrfs_remove_delayed_node(inode); 7019 call_rcu(&inode->i_rcu, btrfs_i_callback); 7020 } 7021 7022 int btrfs_drop_inode(struct inode *inode) 7023 { 7024 struct btrfs_root *root = BTRFS_I(inode)->root; 7025 7026 if (btrfs_root_refs(&root->root_item) == 0 && 7027 !btrfs_is_free_space_inode(root, inode)) 7028 return 1; 7029 else 7030 return generic_drop_inode(inode); 7031 } 7032 7033 static void init_once(void *foo) 7034 { 7035 struct btrfs_inode *ei = (struct btrfs_inode *) foo; 7036 7037 inode_init_once(&ei->vfs_inode); 7038 } 7039 7040 void btrfs_destroy_cachep(void) 7041 { 7042 if (btrfs_inode_cachep) 7043 kmem_cache_destroy(btrfs_inode_cachep); 7044 if (btrfs_trans_handle_cachep) 7045 kmem_cache_destroy(btrfs_trans_handle_cachep); 7046 if (btrfs_transaction_cachep) 7047 kmem_cache_destroy(btrfs_transaction_cachep); 7048 if (btrfs_path_cachep) 7049 kmem_cache_destroy(btrfs_path_cachep); 7050 if (btrfs_free_space_cachep) 7051 kmem_cache_destroy(btrfs_free_space_cachep); 7052 } 7053 7054 int btrfs_init_cachep(void) 7055 { 7056 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache", 7057 sizeof(struct btrfs_inode), 0, 7058 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); 7059 if (!btrfs_inode_cachep) 7060 goto fail; 7061 7062 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache", 7063 sizeof(struct btrfs_trans_handle), 0, 7064 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7065 if (!btrfs_trans_handle_cachep) 7066 goto fail; 7067 7068 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache", 7069 sizeof(struct btrfs_transaction), 0, 7070 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7071 if (!btrfs_transaction_cachep) 7072 goto fail; 7073 7074 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache", 7075 sizeof(struct btrfs_path), 0, 7076 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7077 if (!btrfs_path_cachep) 7078 goto fail; 7079 7080 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache", 7081 sizeof(struct btrfs_free_space), 0, 7082 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7083 if (!btrfs_free_space_cachep) 7084 goto fail; 7085 7086 return 0; 7087 fail: 7088 btrfs_destroy_cachep(); 7089 return -ENOMEM; 7090 } 7091 7092 static int btrfs_getattr(struct vfsmount *mnt, 7093 struct dentry *dentry, struct kstat *stat) 7094 { 7095 struct inode *inode = dentry->d_inode; 7096 u32 blocksize = inode->i_sb->s_blocksize; 7097 7098 generic_fillattr(inode, stat); 7099 stat->dev = BTRFS_I(inode)->root->anon_dev; 7100 stat->blksize = PAGE_CACHE_SIZE; 7101 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + 7102 ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9; 7103 return 0; 7104 } 7105 7106 /* 7107 * If a file is moved, it will inherit the cow and compression flags of the new 7108 * directory. 7109 */ 7110 static void fixup_inode_flags(struct inode *dir, struct inode *inode) 7111 { 7112 struct btrfs_inode *b_dir = BTRFS_I(dir); 7113 struct btrfs_inode *b_inode = BTRFS_I(inode); 7114 7115 if (b_dir->flags & BTRFS_INODE_NODATACOW) 7116 b_inode->flags |= BTRFS_INODE_NODATACOW; 7117 else 7118 b_inode->flags &= ~BTRFS_INODE_NODATACOW; 7119 7120 if (b_dir->flags & BTRFS_INODE_COMPRESS) 7121 b_inode->flags |= BTRFS_INODE_COMPRESS; 7122 else 7123 b_inode->flags &= ~BTRFS_INODE_COMPRESS; 7124 } 7125 7126 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 7127 struct inode *new_dir, struct dentry *new_dentry) 7128 { 7129 struct btrfs_trans_handle *trans; 7130 struct btrfs_root *root = BTRFS_I(old_dir)->root; 7131 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 7132 struct inode *new_inode = new_dentry->d_inode; 7133 struct inode *old_inode = old_dentry->d_inode; 7134 struct timespec ctime = CURRENT_TIME; 7135 u64 index = 0; 7136 u64 root_objectid; 7137 int ret; 7138 u64 old_ino = btrfs_ino(old_inode); 7139 7140 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 7141 return -EPERM; 7142 7143 /* we only allow rename subvolume link between subvolumes */ 7144 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 7145 return -EXDEV; 7146 7147 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 7148 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) 7149 return -ENOTEMPTY; 7150 7151 if (S_ISDIR(old_inode->i_mode) && new_inode && 7152 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 7153 return -ENOTEMPTY; 7154 /* 7155 * we're using rename to replace one file with another. 7156 * and the replacement file is large. Start IO on it now so 7157 * we don't add too much work to the end of the transaction 7158 */ 7159 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && 7160 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 7161 filemap_flush(old_inode->i_mapping); 7162 7163 /* close the racy window with snapshot create/destroy ioctl */ 7164 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 7165 down_read(&root->fs_info->subvol_sem); 7166 /* 7167 * We want to reserve the absolute worst case amount of items. So if 7168 * both inodes are subvols and we need to unlink them then that would 7169 * require 4 item modifications, but if they are both normal inodes it 7170 * would require 5 item modifications, so we'll assume their normal 7171 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 7172 * should cover the worst case number of items we'll modify. 7173 */ 7174 trans = btrfs_start_transaction(root, 20); 7175 if (IS_ERR(trans)) { 7176 ret = PTR_ERR(trans); 7177 goto out_notrans; 7178 } 7179 7180 if (dest != root) 7181 btrfs_record_root_in_trans(trans, dest); 7182 7183 ret = btrfs_set_inode_index(new_dir, &index); 7184 if (ret) 7185 goto out_fail; 7186 7187 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 7188 /* force full log commit if subvolume involved. */ 7189 root->fs_info->last_trans_log_full_commit = trans->transid; 7190 } else { 7191 ret = btrfs_insert_inode_ref(trans, dest, 7192 new_dentry->d_name.name, 7193 new_dentry->d_name.len, 7194 old_ino, 7195 btrfs_ino(new_dir), index); 7196 if (ret) 7197 goto out_fail; 7198 /* 7199 * this is an ugly little race, but the rename is required 7200 * to make sure that if we crash, the inode is either at the 7201 * old name or the new one. pinning the log transaction lets 7202 * us make sure we don't allow a log commit to come in after 7203 * we unlink the name but before we add the new name back in. 7204 */ 7205 btrfs_pin_log_trans(root); 7206 } 7207 /* 7208 * make sure the inode gets flushed if it is replacing 7209 * something. 7210 */ 7211 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) 7212 btrfs_add_ordered_operation(trans, root, old_inode); 7213 7214 old_dir->i_ctime = old_dir->i_mtime = ctime; 7215 new_dir->i_ctime = new_dir->i_mtime = ctime; 7216 old_inode->i_ctime = ctime; 7217 7218 if (old_dentry->d_parent != new_dentry->d_parent) 7219 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); 7220 7221 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 7222 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 7223 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, 7224 old_dentry->d_name.name, 7225 old_dentry->d_name.len); 7226 } else { 7227 ret = __btrfs_unlink_inode(trans, root, old_dir, 7228 old_dentry->d_inode, 7229 old_dentry->d_name.name, 7230 old_dentry->d_name.len); 7231 if (!ret) 7232 ret = btrfs_update_inode(trans, root, old_inode); 7233 } 7234 if (ret) { 7235 btrfs_abort_transaction(trans, root, ret); 7236 goto out_fail; 7237 } 7238 7239 if (new_inode) { 7240 new_inode->i_ctime = CURRENT_TIME; 7241 if (unlikely(btrfs_ino(new_inode) == 7242 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 7243 root_objectid = BTRFS_I(new_inode)->location.objectid; 7244 ret = btrfs_unlink_subvol(trans, dest, new_dir, 7245 root_objectid, 7246 new_dentry->d_name.name, 7247 new_dentry->d_name.len); 7248 BUG_ON(new_inode->i_nlink == 0); 7249 } else { 7250 ret = btrfs_unlink_inode(trans, dest, new_dir, 7251 new_dentry->d_inode, 7252 new_dentry->d_name.name, 7253 new_dentry->d_name.len); 7254 } 7255 if (!ret && new_inode->i_nlink == 0) { 7256 ret = btrfs_orphan_add(trans, new_dentry->d_inode); 7257 BUG_ON(ret); 7258 } 7259 if (ret) { 7260 btrfs_abort_transaction(trans, root, ret); 7261 goto out_fail; 7262 } 7263 } 7264 7265 fixup_inode_flags(new_dir, old_inode); 7266 7267 ret = btrfs_add_link(trans, new_dir, old_inode, 7268 new_dentry->d_name.name, 7269 new_dentry->d_name.len, 0, index); 7270 if (ret) { 7271 btrfs_abort_transaction(trans, root, ret); 7272 goto out_fail; 7273 } 7274 7275 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 7276 struct dentry *parent = new_dentry->d_parent; 7277 btrfs_log_new_name(trans, old_inode, old_dir, parent); 7278 btrfs_end_log_trans(root); 7279 } 7280 out_fail: 7281 btrfs_end_transaction(trans, root); 7282 out_notrans: 7283 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 7284 up_read(&root->fs_info->subvol_sem); 7285 7286 return ret; 7287 } 7288 7289 /* 7290 * some fairly slow code that needs optimization. This walks the list 7291 * of all the inodes with pending delalloc and forces them to disk. 7292 */ 7293 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 7294 { 7295 struct list_head *head = &root->fs_info->delalloc_inodes; 7296 struct btrfs_inode *binode; 7297 struct inode *inode; 7298 7299 if (root->fs_info->sb->s_flags & MS_RDONLY) 7300 return -EROFS; 7301 7302 spin_lock(&root->fs_info->delalloc_lock); 7303 while (!list_empty(head)) { 7304 binode = list_entry(head->next, struct btrfs_inode, 7305 delalloc_inodes); 7306 inode = igrab(&binode->vfs_inode); 7307 if (!inode) 7308 list_del_init(&binode->delalloc_inodes); 7309 spin_unlock(&root->fs_info->delalloc_lock); 7310 if (inode) { 7311 filemap_flush(inode->i_mapping); 7312 if (delay_iput) 7313 btrfs_add_delayed_iput(inode); 7314 else 7315 iput(inode); 7316 } 7317 cond_resched(); 7318 spin_lock(&root->fs_info->delalloc_lock); 7319 } 7320 spin_unlock(&root->fs_info->delalloc_lock); 7321 7322 /* the filemap_flush will queue IO into the worker threads, but 7323 * we have to make sure the IO is actually started and that 7324 * ordered extents get created before we return 7325 */ 7326 atomic_inc(&root->fs_info->async_submit_draining); 7327 while (atomic_read(&root->fs_info->nr_async_submits) || 7328 atomic_read(&root->fs_info->async_delalloc_pages)) { 7329 wait_event(root->fs_info->async_submit_wait, 7330 (atomic_read(&root->fs_info->nr_async_submits) == 0 && 7331 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 7332 } 7333 atomic_dec(&root->fs_info->async_submit_draining); 7334 return 0; 7335 } 7336 7337 static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 7338 const char *symname) 7339 { 7340 struct btrfs_trans_handle *trans; 7341 struct btrfs_root *root = BTRFS_I(dir)->root; 7342 struct btrfs_path *path; 7343 struct btrfs_key key; 7344 struct inode *inode = NULL; 7345 int err; 7346 int drop_inode = 0; 7347 u64 objectid; 7348 u64 index = 0 ; 7349 int name_len; 7350 int datasize; 7351 unsigned long ptr; 7352 struct btrfs_file_extent_item *ei; 7353 struct extent_buffer *leaf; 7354 unsigned long nr = 0; 7355 7356 name_len = strlen(symname) + 1; 7357 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 7358 return -ENAMETOOLONG; 7359 7360 /* 7361 * 2 items for inode item and ref 7362 * 2 items for dir items 7363 * 1 item for xattr if selinux is on 7364 */ 7365 trans = btrfs_start_transaction(root, 5); 7366 if (IS_ERR(trans)) 7367 return PTR_ERR(trans); 7368 7369 err = btrfs_find_free_ino(root, &objectid); 7370 if (err) 7371 goto out_unlock; 7372 7373 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 7374 dentry->d_name.len, btrfs_ino(dir), objectid, 7375 S_IFLNK|S_IRWXUGO, &index); 7376 if (IS_ERR(inode)) { 7377 err = PTR_ERR(inode); 7378 goto out_unlock; 7379 } 7380 7381 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 7382 if (err) { 7383 drop_inode = 1; 7384 goto out_unlock; 7385 } 7386 7387 /* 7388 * If the active LSM wants to access the inode during 7389 * d_instantiate it needs these. Smack checks to see 7390 * if the filesystem supports xattrs by looking at the 7391 * ops vector. 7392 */ 7393 inode->i_fop = &btrfs_file_operations; 7394 inode->i_op = &btrfs_file_inode_operations; 7395 7396 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 7397 if (err) 7398 drop_inode = 1; 7399 else { 7400 inode->i_mapping->a_ops = &btrfs_aops; 7401 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 7402 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 7403 } 7404 if (drop_inode) 7405 goto out_unlock; 7406 7407 path = btrfs_alloc_path(); 7408 if (!path) { 7409 err = -ENOMEM; 7410 drop_inode = 1; 7411 goto out_unlock; 7412 } 7413 key.objectid = btrfs_ino(inode); 7414 key.offset = 0; 7415 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 7416 datasize = btrfs_file_extent_calc_inline_size(name_len); 7417 err = btrfs_insert_empty_item(trans, root, path, &key, 7418 datasize); 7419 if (err) { 7420 drop_inode = 1; 7421 btrfs_free_path(path); 7422 goto out_unlock; 7423 } 7424 leaf = path->nodes[0]; 7425 ei = btrfs_item_ptr(leaf, path->slots[0], 7426 struct btrfs_file_extent_item); 7427 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 7428 btrfs_set_file_extent_type(leaf, ei, 7429 BTRFS_FILE_EXTENT_INLINE); 7430 btrfs_set_file_extent_encryption(leaf, ei, 0); 7431 btrfs_set_file_extent_compression(leaf, ei, 0); 7432 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 7433 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 7434 7435 ptr = btrfs_file_extent_inline_start(ei); 7436 write_extent_buffer(leaf, symname, ptr, name_len); 7437 btrfs_mark_buffer_dirty(leaf); 7438 btrfs_free_path(path); 7439 7440 inode->i_op = &btrfs_symlink_inode_operations; 7441 inode->i_mapping->a_ops = &btrfs_symlink_aops; 7442 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 7443 inode_set_bytes(inode, name_len); 7444 btrfs_i_size_write(inode, name_len - 1); 7445 err = btrfs_update_inode(trans, root, inode); 7446 if (err) 7447 drop_inode = 1; 7448 7449 out_unlock: 7450 if (!err) 7451 d_instantiate(dentry, inode); 7452 nr = trans->blocks_used; 7453 btrfs_end_transaction(trans, root); 7454 if (drop_inode) { 7455 inode_dec_link_count(inode); 7456 iput(inode); 7457 } 7458 btrfs_btree_balance_dirty(root, nr); 7459 return err; 7460 } 7461 7462 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 7463 u64 start, u64 num_bytes, u64 min_size, 7464 loff_t actual_len, u64 *alloc_hint, 7465 struct btrfs_trans_handle *trans) 7466 { 7467 struct btrfs_root *root = BTRFS_I(inode)->root; 7468 struct btrfs_key ins; 7469 u64 cur_offset = start; 7470 u64 i_size; 7471 int ret = 0; 7472 bool own_trans = true; 7473 7474 if (trans) 7475 own_trans = false; 7476 while (num_bytes > 0) { 7477 if (own_trans) { 7478 trans = btrfs_start_transaction(root, 3); 7479 if (IS_ERR(trans)) { 7480 ret = PTR_ERR(trans); 7481 break; 7482 } 7483 } 7484 7485 ret = btrfs_reserve_extent(trans, root, num_bytes, min_size, 7486 0, *alloc_hint, &ins, 1); 7487 if (ret) { 7488 if (own_trans) 7489 btrfs_end_transaction(trans, root); 7490 break; 7491 } 7492 7493 ret = insert_reserved_file_extent(trans, inode, 7494 cur_offset, ins.objectid, 7495 ins.offset, ins.offset, 7496 ins.offset, 0, 0, 0, 7497 BTRFS_FILE_EXTENT_PREALLOC); 7498 if (ret) { 7499 btrfs_abort_transaction(trans, root, ret); 7500 if (own_trans) 7501 btrfs_end_transaction(trans, root); 7502 break; 7503 } 7504 btrfs_drop_extent_cache(inode, cur_offset, 7505 cur_offset + ins.offset -1, 0); 7506 7507 num_bytes -= ins.offset; 7508 cur_offset += ins.offset; 7509 *alloc_hint = ins.objectid + ins.offset; 7510 7511 inode->i_ctime = CURRENT_TIME; 7512 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 7513 if (!(mode & FALLOC_FL_KEEP_SIZE) && 7514 (actual_len > inode->i_size) && 7515 (cur_offset > inode->i_size)) { 7516 if (cur_offset > actual_len) 7517 i_size = actual_len; 7518 else 7519 i_size = cur_offset; 7520 i_size_write(inode, i_size); 7521 btrfs_ordered_update_i_size(inode, i_size, NULL); 7522 } 7523 7524 ret = btrfs_update_inode(trans, root, inode); 7525 7526 if (ret) { 7527 btrfs_abort_transaction(trans, root, ret); 7528 if (own_trans) 7529 btrfs_end_transaction(trans, root); 7530 break; 7531 } 7532 7533 if (own_trans) 7534 btrfs_end_transaction(trans, root); 7535 } 7536 return ret; 7537 } 7538 7539 int btrfs_prealloc_file_range(struct inode *inode, int mode, 7540 u64 start, u64 num_bytes, u64 min_size, 7541 loff_t actual_len, u64 *alloc_hint) 7542 { 7543 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 7544 min_size, actual_len, alloc_hint, 7545 NULL); 7546 } 7547 7548 int btrfs_prealloc_file_range_trans(struct inode *inode, 7549 struct btrfs_trans_handle *trans, int mode, 7550 u64 start, u64 num_bytes, u64 min_size, 7551 loff_t actual_len, u64 *alloc_hint) 7552 { 7553 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 7554 min_size, actual_len, alloc_hint, trans); 7555 } 7556 7557 static int btrfs_set_page_dirty(struct page *page) 7558 { 7559 return __set_page_dirty_nobuffers(page); 7560 } 7561 7562 static int btrfs_permission(struct inode *inode, int mask) 7563 { 7564 struct btrfs_root *root = BTRFS_I(inode)->root; 7565 umode_t mode = inode->i_mode; 7566 7567 if (mask & MAY_WRITE && 7568 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 7569 if (btrfs_root_readonly(root)) 7570 return -EROFS; 7571 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 7572 return -EACCES; 7573 } 7574 return generic_permission(inode, mask); 7575 } 7576 7577 static const struct inode_operations btrfs_dir_inode_operations = { 7578 .getattr = btrfs_getattr, 7579 .lookup = btrfs_lookup, 7580 .create = btrfs_create, 7581 .unlink = btrfs_unlink, 7582 .link = btrfs_link, 7583 .mkdir = btrfs_mkdir, 7584 .rmdir = btrfs_rmdir, 7585 .rename = btrfs_rename, 7586 .symlink = btrfs_symlink, 7587 .setattr = btrfs_setattr, 7588 .mknod = btrfs_mknod, 7589 .setxattr = btrfs_setxattr, 7590 .getxattr = btrfs_getxattr, 7591 .listxattr = btrfs_listxattr, 7592 .removexattr = btrfs_removexattr, 7593 .permission = btrfs_permission, 7594 .get_acl = btrfs_get_acl, 7595 }; 7596 static const struct inode_operations btrfs_dir_ro_inode_operations = { 7597 .lookup = btrfs_lookup, 7598 .permission = btrfs_permission, 7599 .get_acl = btrfs_get_acl, 7600 }; 7601 7602 static const struct file_operations btrfs_dir_file_operations = { 7603 .llseek = generic_file_llseek, 7604 .read = generic_read_dir, 7605 .readdir = btrfs_real_readdir, 7606 .unlocked_ioctl = btrfs_ioctl, 7607 #ifdef CONFIG_COMPAT 7608 .compat_ioctl = btrfs_ioctl, 7609 #endif 7610 .release = btrfs_release_file, 7611 .fsync = btrfs_sync_file, 7612 }; 7613 7614 static struct extent_io_ops btrfs_extent_io_ops = { 7615 .fill_delalloc = run_delalloc_range, 7616 .submit_bio_hook = btrfs_submit_bio_hook, 7617 .merge_bio_hook = btrfs_merge_bio_hook, 7618 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 7619 .writepage_end_io_hook = btrfs_writepage_end_io_hook, 7620 .writepage_start_hook = btrfs_writepage_start_hook, 7621 .set_bit_hook = btrfs_set_bit_hook, 7622 .clear_bit_hook = btrfs_clear_bit_hook, 7623 .merge_extent_hook = btrfs_merge_extent_hook, 7624 .split_extent_hook = btrfs_split_extent_hook, 7625 }; 7626 7627 /* 7628 * btrfs doesn't support the bmap operation because swapfiles 7629 * use bmap to make a mapping of extents in the file. They assume 7630 * these extents won't change over the life of the file and they 7631 * use the bmap result to do IO directly to the drive. 7632 * 7633 * the btrfs bmap call would return logical addresses that aren't 7634 * suitable for IO and they also will change frequently as COW 7635 * operations happen. So, swapfile + btrfs == corruption. 7636 * 7637 * For now we're avoiding this by dropping bmap. 7638 */ 7639 static const struct address_space_operations btrfs_aops = { 7640 .readpage = btrfs_readpage, 7641 .writepage = btrfs_writepage, 7642 .writepages = btrfs_writepages, 7643 .readpages = btrfs_readpages, 7644 .direct_IO = btrfs_direct_IO, 7645 .invalidatepage = btrfs_invalidatepage, 7646 .releasepage = btrfs_releasepage, 7647 .set_page_dirty = btrfs_set_page_dirty, 7648 .error_remove_page = generic_error_remove_page, 7649 }; 7650 7651 static const struct address_space_operations btrfs_symlink_aops = { 7652 .readpage = btrfs_readpage, 7653 .writepage = btrfs_writepage, 7654 .invalidatepage = btrfs_invalidatepage, 7655 .releasepage = btrfs_releasepage, 7656 }; 7657 7658 static const struct inode_operations btrfs_file_inode_operations = { 7659 .getattr = btrfs_getattr, 7660 .setattr = btrfs_setattr, 7661 .setxattr = btrfs_setxattr, 7662 .getxattr = btrfs_getxattr, 7663 .listxattr = btrfs_listxattr, 7664 .removexattr = btrfs_removexattr, 7665 .permission = btrfs_permission, 7666 .fiemap = btrfs_fiemap, 7667 .get_acl = btrfs_get_acl, 7668 }; 7669 static const struct inode_operations btrfs_special_inode_operations = { 7670 .getattr = btrfs_getattr, 7671 .setattr = btrfs_setattr, 7672 .permission = btrfs_permission, 7673 .setxattr = btrfs_setxattr, 7674 .getxattr = btrfs_getxattr, 7675 .listxattr = btrfs_listxattr, 7676 .removexattr = btrfs_removexattr, 7677 .get_acl = btrfs_get_acl, 7678 }; 7679 static const struct inode_operations btrfs_symlink_inode_operations = { 7680 .readlink = generic_readlink, 7681 .follow_link = page_follow_link_light, 7682 .put_link = page_put_link, 7683 .getattr = btrfs_getattr, 7684 .setattr = btrfs_setattr, 7685 .permission = btrfs_permission, 7686 .setxattr = btrfs_setxattr, 7687 .getxattr = btrfs_getxattr, 7688 .listxattr = btrfs_listxattr, 7689 .removexattr = btrfs_removexattr, 7690 .get_acl = btrfs_get_acl, 7691 }; 7692 7693 const struct dentry_operations btrfs_dentry_operations = { 7694 .d_delete = btrfs_dentry_delete, 7695 .d_release = btrfs_dentry_release, 7696 }; 7697