1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/statfs.h> 34 #include <linux/compat.h> 35 #include <linux/bit_spinlock.h> 36 #include <linux/xattr.h> 37 #include <linux/posix_acl.h> 38 #include <linux/falloc.h> 39 #include <linux/slab.h> 40 #include <linux/ratelimit.h> 41 #include <linux/mount.h> 42 #include <linux/btrfs.h> 43 #include <linux/blkdev.h> 44 #include <linux/posix_acl_xattr.h> 45 #include <linux/uio.h> 46 #include "ctree.h" 47 #include "disk-io.h" 48 #include "transaction.h" 49 #include "btrfs_inode.h" 50 #include "print-tree.h" 51 #include "ordered-data.h" 52 #include "xattr.h" 53 #include "tree-log.h" 54 #include "volumes.h" 55 #include "compression.h" 56 #include "locking.h" 57 #include "free-space-cache.h" 58 #include "inode-map.h" 59 #include "backref.h" 60 #include "hash.h" 61 #include "props.h" 62 #include "qgroup.h" 63 64 struct btrfs_iget_args { 65 struct btrfs_key *location; 66 struct btrfs_root *root; 67 }; 68 69 static const struct inode_operations btrfs_dir_inode_operations; 70 static const struct inode_operations btrfs_symlink_inode_operations; 71 static const struct inode_operations btrfs_dir_ro_inode_operations; 72 static const struct inode_operations btrfs_special_inode_operations; 73 static const struct inode_operations btrfs_file_inode_operations; 74 static const struct address_space_operations btrfs_aops; 75 static const struct address_space_operations btrfs_symlink_aops; 76 static const struct file_operations btrfs_dir_file_operations; 77 static struct extent_io_ops btrfs_extent_io_ops; 78 79 static struct kmem_cache *btrfs_inode_cachep; 80 static struct kmem_cache *btrfs_delalloc_work_cachep; 81 struct kmem_cache *btrfs_trans_handle_cachep; 82 struct kmem_cache *btrfs_transaction_cachep; 83 struct kmem_cache *btrfs_path_cachep; 84 struct kmem_cache *btrfs_free_space_cachep; 85 86 #define S_SHIFT 12 87 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { 88 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, 89 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, 90 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, 91 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, 92 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, 93 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, 94 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 95 }; 96 97 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 98 static int btrfs_truncate(struct inode *inode); 99 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 100 static noinline int cow_file_range(struct inode *inode, 101 struct page *locked_page, 102 u64 start, u64 end, int *page_started, 103 unsigned long *nr_written, int unlock); 104 static struct extent_map *create_pinned_em(struct inode *inode, u64 start, 105 u64 len, u64 orig_start, 106 u64 block_start, u64 block_len, 107 u64 orig_block_len, u64 ram_bytes, 108 int type); 109 110 static int btrfs_dirty_inode(struct inode *inode); 111 112 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 113 void btrfs_test_inode_set_ops(struct inode *inode) 114 { 115 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 116 } 117 #endif 118 119 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 120 struct inode *inode, struct inode *dir, 121 const struct qstr *qstr) 122 { 123 int err; 124 125 err = btrfs_init_acl(trans, inode, dir); 126 if (!err) 127 err = btrfs_xattr_security_init(trans, inode, dir, qstr); 128 return err; 129 } 130 131 /* 132 * this does all the hard work for inserting an inline extent into 133 * the btree. The caller should have done a btrfs_drop_extents so that 134 * no overlapping inline items exist in the btree 135 */ 136 static int insert_inline_extent(struct btrfs_trans_handle *trans, 137 struct btrfs_path *path, int extent_inserted, 138 struct btrfs_root *root, struct inode *inode, 139 u64 start, size_t size, size_t compressed_size, 140 int compress_type, 141 struct page **compressed_pages) 142 { 143 struct extent_buffer *leaf; 144 struct page *page = NULL; 145 char *kaddr; 146 unsigned long ptr; 147 struct btrfs_file_extent_item *ei; 148 int err = 0; 149 int ret; 150 size_t cur_size = size; 151 unsigned long offset; 152 153 if (compressed_size && compressed_pages) 154 cur_size = compressed_size; 155 156 inode_add_bytes(inode, size); 157 158 if (!extent_inserted) { 159 struct btrfs_key key; 160 size_t datasize; 161 162 key.objectid = btrfs_ino(inode); 163 key.offset = start; 164 key.type = BTRFS_EXTENT_DATA_KEY; 165 166 datasize = btrfs_file_extent_calc_inline_size(cur_size); 167 path->leave_spinning = 1; 168 ret = btrfs_insert_empty_item(trans, root, path, &key, 169 datasize); 170 if (ret) { 171 err = ret; 172 goto fail; 173 } 174 } 175 leaf = path->nodes[0]; 176 ei = btrfs_item_ptr(leaf, path->slots[0], 177 struct btrfs_file_extent_item); 178 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 179 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 180 btrfs_set_file_extent_encryption(leaf, ei, 0); 181 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 182 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 183 ptr = btrfs_file_extent_inline_start(ei); 184 185 if (compress_type != BTRFS_COMPRESS_NONE) { 186 struct page *cpage; 187 int i = 0; 188 while (compressed_size > 0) { 189 cpage = compressed_pages[i]; 190 cur_size = min_t(unsigned long, compressed_size, 191 PAGE_CACHE_SIZE); 192 193 kaddr = kmap_atomic(cpage); 194 write_extent_buffer(leaf, kaddr, ptr, cur_size); 195 kunmap_atomic(kaddr); 196 197 i++; 198 ptr += cur_size; 199 compressed_size -= cur_size; 200 } 201 btrfs_set_file_extent_compression(leaf, ei, 202 compress_type); 203 } else { 204 page = find_get_page(inode->i_mapping, 205 start >> PAGE_CACHE_SHIFT); 206 btrfs_set_file_extent_compression(leaf, ei, 0); 207 kaddr = kmap_atomic(page); 208 offset = start & (PAGE_CACHE_SIZE - 1); 209 write_extent_buffer(leaf, kaddr + offset, ptr, size); 210 kunmap_atomic(kaddr); 211 page_cache_release(page); 212 } 213 btrfs_mark_buffer_dirty(leaf); 214 btrfs_release_path(path); 215 216 /* 217 * we're an inline extent, so nobody can 218 * extend the file past i_size without locking 219 * a page we already have locked. 220 * 221 * We must do any isize and inode updates 222 * before we unlock the pages. Otherwise we 223 * could end up racing with unlink. 224 */ 225 BTRFS_I(inode)->disk_i_size = inode->i_size; 226 ret = btrfs_update_inode(trans, root, inode); 227 228 return ret; 229 fail: 230 return err; 231 } 232 233 234 /* 235 * conditionally insert an inline extent into the file. This 236 * does the checks required to make sure the data is small enough 237 * to fit as an inline extent. 238 */ 239 static noinline int cow_file_range_inline(struct btrfs_root *root, 240 struct inode *inode, u64 start, 241 u64 end, size_t compressed_size, 242 int compress_type, 243 struct page **compressed_pages) 244 { 245 struct btrfs_trans_handle *trans; 246 u64 isize = i_size_read(inode); 247 u64 actual_end = min(end + 1, isize); 248 u64 inline_len = actual_end - start; 249 u64 aligned_end = ALIGN(end, root->sectorsize); 250 u64 data_len = inline_len; 251 int ret; 252 struct btrfs_path *path; 253 int extent_inserted = 0; 254 u32 extent_item_size; 255 256 if (compressed_size) 257 data_len = compressed_size; 258 259 if (start > 0 || 260 actual_end > PAGE_CACHE_SIZE || 261 data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) || 262 (!compressed_size && 263 (actual_end & (root->sectorsize - 1)) == 0) || 264 end + 1 < isize || 265 data_len > root->fs_info->max_inline) { 266 return 1; 267 } 268 269 path = btrfs_alloc_path(); 270 if (!path) 271 return -ENOMEM; 272 273 trans = btrfs_join_transaction(root); 274 if (IS_ERR(trans)) { 275 btrfs_free_path(path); 276 return PTR_ERR(trans); 277 } 278 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 279 280 if (compressed_size && compressed_pages) 281 extent_item_size = btrfs_file_extent_calc_inline_size( 282 compressed_size); 283 else 284 extent_item_size = btrfs_file_extent_calc_inline_size( 285 inline_len); 286 287 ret = __btrfs_drop_extents(trans, root, inode, path, 288 start, aligned_end, NULL, 289 1, 1, extent_item_size, &extent_inserted); 290 if (ret) { 291 btrfs_abort_transaction(trans, root, ret); 292 goto out; 293 } 294 295 if (isize > actual_end) 296 inline_len = min_t(u64, isize, actual_end); 297 ret = insert_inline_extent(trans, path, extent_inserted, 298 root, inode, start, 299 inline_len, compressed_size, 300 compress_type, compressed_pages); 301 if (ret && ret != -ENOSPC) { 302 btrfs_abort_transaction(trans, root, ret); 303 goto out; 304 } else if (ret == -ENOSPC) { 305 ret = 1; 306 goto out; 307 } 308 309 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 310 btrfs_delalloc_release_metadata(inode, end + 1 - start); 311 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 312 out: 313 /* 314 * Don't forget to free the reserved space, as for inlined extent 315 * it won't count as data extent, free them directly here. 316 * And at reserve time, it's always aligned to page size, so 317 * just free one page here. 318 */ 319 btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); 320 btrfs_free_path(path); 321 btrfs_end_transaction(trans, root); 322 return ret; 323 } 324 325 struct async_extent { 326 u64 start; 327 u64 ram_size; 328 u64 compressed_size; 329 struct page **pages; 330 unsigned long nr_pages; 331 int compress_type; 332 struct list_head list; 333 }; 334 335 struct async_cow { 336 struct inode *inode; 337 struct btrfs_root *root; 338 struct page *locked_page; 339 u64 start; 340 u64 end; 341 struct list_head extents; 342 struct btrfs_work work; 343 }; 344 345 static noinline int add_async_extent(struct async_cow *cow, 346 u64 start, u64 ram_size, 347 u64 compressed_size, 348 struct page **pages, 349 unsigned long nr_pages, 350 int compress_type) 351 { 352 struct async_extent *async_extent; 353 354 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 355 BUG_ON(!async_extent); /* -ENOMEM */ 356 async_extent->start = start; 357 async_extent->ram_size = ram_size; 358 async_extent->compressed_size = compressed_size; 359 async_extent->pages = pages; 360 async_extent->nr_pages = nr_pages; 361 async_extent->compress_type = compress_type; 362 list_add_tail(&async_extent->list, &cow->extents); 363 return 0; 364 } 365 366 static inline int inode_need_compress(struct inode *inode) 367 { 368 struct btrfs_root *root = BTRFS_I(inode)->root; 369 370 /* force compress */ 371 if (btrfs_test_opt(root, FORCE_COMPRESS)) 372 return 1; 373 /* bad compression ratios */ 374 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) 375 return 0; 376 if (btrfs_test_opt(root, COMPRESS) || 377 BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS || 378 BTRFS_I(inode)->force_compress) 379 return 1; 380 return 0; 381 } 382 383 /* 384 * we create compressed extents in two phases. The first 385 * phase compresses a range of pages that have already been 386 * locked (both pages and state bits are locked). 387 * 388 * This is done inside an ordered work queue, and the compression 389 * is spread across many cpus. The actual IO submission is step 390 * two, and the ordered work queue takes care of making sure that 391 * happens in the same order things were put onto the queue by 392 * writepages and friends. 393 * 394 * If this code finds it can't get good compression, it puts an 395 * entry onto the work queue to write the uncompressed bytes. This 396 * makes sure that both compressed inodes and uncompressed inodes 397 * are written in the same order that the flusher thread sent them 398 * down. 399 */ 400 static noinline void compress_file_range(struct inode *inode, 401 struct page *locked_page, 402 u64 start, u64 end, 403 struct async_cow *async_cow, 404 int *num_added) 405 { 406 struct btrfs_root *root = BTRFS_I(inode)->root; 407 u64 num_bytes; 408 u64 blocksize = root->sectorsize; 409 u64 actual_end; 410 u64 isize = i_size_read(inode); 411 int ret = 0; 412 struct page **pages = NULL; 413 unsigned long nr_pages; 414 unsigned long nr_pages_ret = 0; 415 unsigned long total_compressed = 0; 416 unsigned long total_in = 0; 417 unsigned long max_compressed = 128 * 1024; 418 unsigned long max_uncompressed = 128 * 1024; 419 int i; 420 int will_compress; 421 int compress_type = root->fs_info->compress_type; 422 int redirty = 0; 423 424 /* if this is a small write inside eof, kick off a defrag */ 425 if ((end - start + 1) < 16 * 1024 && 426 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 427 btrfs_add_inode_defrag(NULL, inode); 428 429 actual_end = min_t(u64, isize, end + 1); 430 again: 431 will_compress = 0; 432 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 433 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 434 435 /* 436 * we don't want to send crud past the end of i_size through 437 * compression, that's just a waste of CPU time. So, if the 438 * end of the file is before the start of our current 439 * requested range of bytes, we bail out to the uncompressed 440 * cleanup code that can deal with all of this. 441 * 442 * It isn't really the fastest way to fix things, but this is a 443 * very uncommon corner. 444 */ 445 if (actual_end <= start) 446 goto cleanup_and_bail_uncompressed; 447 448 total_compressed = actual_end - start; 449 450 /* 451 * skip compression for a small file range(<=blocksize) that 452 * isn't an inline extent, since it dosen't save disk space at all. 453 */ 454 if (total_compressed <= blocksize && 455 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 456 goto cleanup_and_bail_uncompressed; 457 458 /* we want to make sure that amount of ram required to uncompress 459 * an extent is reasonable, so we limit the total size in ram 460 * of a compressed extent to 128k. This is a crucial number 461 * because it also controls how easily we can spread reads across 462 * cpus for decompression. 463 * 464 * We also want to make sure the amount of IO required to do 465 * a random read is reasonably small, so we limit the size of 466 * a compressed extent to 128k. 467 */ 468 total_compressed = min(total_compressed, max_uncompressed); 469 num_bytes = ALIGN(end - start + 1, blocksize); 470 num_bytes = max(blocksize, num_bytes); 471 total_in = 0; 472 ret = 0; 473 474 /* 475 * we do compression for mount -o compress and when the 476 * inode has not been flagged as nocompress. This flag can 477 * change at any time if we discover bad compression ratios. 478 */ 479 if (inode_need_compress(inode)) { 480 WARN_ON(pages); 481 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 482 if (!pages) { 483 /* just bail out to the uncompressed code */ 484 goto cont; 485 } 486 487 if (BTRFS_I(inode)->force_compress) 488 compress_type = BTRFS_I(inode)->force_compress; 489 490 /* 491 * we need to call clear_page_dirty_for_io on each 492 * page in the range. Otherwise applications with the file 493 * mmap'd can wander in and change the page contents while 494 * we are compressing them. 495 * 496 * If the compression fails for any reason, we set the pages 497 * dirty again later on. 498 */ 499 extent_range_clear_dirty_for_io(inode, start, end); 500 redirty = 1; 501 ret = btrfs_compress_pages(compress_type, 502 inode->i_mapping, start, 503 total_compressed, pages, 504 nr_pages, &nr_pages_ret, 505 &total_in, 506 &total_compressed, 507 max_compressed); 508 509 if (!ret) { 510 unsigned long offset = total_compressed & 511 (PAGE_CACHE_SIZE - 1); 512 struct page *page = pages[nr_pages_ret - 1]; 513 char *kaddr; 514 515 /* zero the tail end of the last page, we might be 516 * sending it down to disk 517 */ 518 if (offset) { 519 kaddr = kmap_atomic(page); 520 memset(kaddr + offset, 0, 521 PAGE_CACHE_SIZE - offset); 522 kunmap_atomic(kaddr); 523 } 524 will_compress = 1; 525 } 526 } 527 cont: 528 if (start == 0) { 529 /* lets try to make an inline extent */ 530 if (ret || total_in < (actual_end - start)) { 531 /* we didn't compress the entire range, try 532 * to make an uncompressed inline extent. 533 */ 534 ret = cow_file_range_inline(root, inode, start, end, 535 0, 0, NULL); 536 } else { 537 /* try making a compressed inline extent */ 538 ret = cow_file_range_inline(root, inode, start, end, 539 total_compressed, 540 compress_type, pages); 541 } 542 if (ret <= 0) { 543 unsigned long clear_flags = EXTENT_DELALLOC | 544 EXTENT_DEFRAG; 545 unsigned long page_error_op; 546 547 clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0; 548 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; 549 550 /* 551 * inline extent creation worked or returned error, 552 * we don't need to create any more async work items. 553 * Unlock and free up our temp pages. 554 */ 555 extent_clear_unlock_delalloc(inode, start, end, NULL, 556 clear_flags, PAGE_UNLOCK | 557 PAGE_CLEAR_DIRTY | 558 PAGE_SET_WRITEBACK | 559 page_error_op | 560 PAGE_END_WRITEBACK); 561 goto free_pages_out; 562 } 563 } 564 565 if (will_compress) { 566 /* 567 * we aren't doing an inline extent round the compressed size 568 * up to a block size boundary so the allocator does sane 569 * things 570 */ 571 total_compressed = ALIGN(total_compressed, blocksize); 572 573 /* 574 * one last check to make sure the compression is really a 575 * win, compare the page count read with the blocks on disk 576 */ 577 total_in = ALIGN(total_in, PAGE_CACHE_SIZE); 578 if (total_compressed >= total_in) { 579 will_compress = 0; 580 } else { 581 num_bytes = total_in; 582 } 583 } 584 if (!will_compress && pages) { 585 /* 586 * the compression code ran but failed to make things smaller, 587 * free any pages it allocated and our page pointer array 588 */ 589 for (i = 0; i < nr_pages_ret; i++) { 590 WARN_ON(pages[i]->mapping); 591 page_cache_release(pages[i]); 592 } 593 kfree(pages); 594 pages = NULL; 595 total_compressed = 0; 596 nr_pages_ret = 0; 597 598 /* flag the file so we don't compress in the future */ 599 if (!btrfs_test_opt(root, FORCE_COMPRESS) && 600 !(BTRFS_I(inode)->force_compress)) { 601 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 602 } 603 } 604 if (will_compress) { 605 *num_added += 1; 606 607 /* the async work queues will take care of doing actual 608 * allocation on disk for these compressed pages, 609 * and will submit them to the elevator. 610 */ 611 add_async_extent(async_cow, start, num_bytes, 612 total_compressed, pages, nr_pages_ret, 613 compress_type); 614 615 if (start + num_bytes < end) { 616 start += num_bytes; 617 pages = NULL; 618 cond_resched(); 619 goto again; 620 } 621 } else { 622 cleanup_and_bail_uncompressed: 623 /* 624 * No compression, but we still need to write the pages in 625 * the file we've been given so far. redirty the locked 626 * page if it corresponds to our extent and set things up 627 * for the async work queue to run cow_file_range to do 628 * the normal delalloc dance 629 */ 630 if (page_offset(locked_page) >= start && 631 page_offset(locked_page) <= end) { 632 __set_page_dirty_nobuffers(locked_page); 633 /* unlocked later on in the async handlers */ 634 } 635 if (redirty) 636 extent_range_redirty_for_io(inode, start, end); 637 add_async_extent(async_cow, start, end - start + 1, 638 0, NULL, 0, BTRFS_COMPRESS_NONE); 639 *num_added += 1; 640 } 641 642 return; 643 644 free_pages_out: 645 for (i = 0; i < nr_pages_ret; i++) { 646 WARN_ON(pages[i]->mapping); 647 page_cache_release(pages[i]); 648 } 649 kfree(pages); 650 } 651 652 static void free_async_extent_pages(struct async_extent *async_extent) 653 { 654 int i; 655 656 if (!async_extent->pages) 657 return; 658 659 for (i = 0; i < async_extent->nr_pages; i++) { 660 WARN_ON(async_extent->pages[i]->mapping); 661 page_cache_release(async_extent->pages[i]); 662 } 663 kfree(async_extent->pages); 664 async_extent->nr_pages = 0; 665 async_extent->pages = NULL; 666 } 667 668 /* 669 * phase two of compressed writeback. This is the ordered portion 670 * of the code, which only gets called in the order the work was 671 * queued. We walk all the async extents created by compress_file_range 672 * and send them down to the disk. 673 */ 674 static noinline void submit_compressed_extents(struct inode *inode, 675 struct async_cow *async_cow) 676 { 677 struct async_extent *async_extent; 678 u64 alloc_hint = 0; 679 struct btrfs_key ins; 680 struct extent_map *em; 681 struct btrfs_root *root = BTRFS_I(inode)->root; 682 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 683 struct extent_io_tree *io_tree; 684 int ret = 0; 685 686 again: 687 while (!list_empty(&async_cow->extents)) { 688 async_extent = list_entry(async_cow->extents.next, 689 struct async_extent, list); 690 list_del(&async_extent->list); 691 692 io_tree = &BTRFS_I(inode)->io_tree; 693 694 retry: 695 /* did the compression code fall back to uncompressed IO? */ 696 if (!async_extent->pages) { 697 int page_started = 0; 698 unsigned long nr_written = 0; 699 700 lock_extent(io_tree, async_extent->start, 701 async_extent->start + 702 async_extent->ram_size - 1); 703 704 /* allocate blocks */ 705 ret = cow_file_range(inode, async_cow->locked_page, 706 async_extent->start, 707 async_extent->start + 708 async_extent->ram_size - 1, 709 &page_started, &nr_written, 0); 710 711 /* JDM XXX */ 712 713 /* 714 * if page_started, cow_file_range inserted an 715 * inline extent and took care of all the unlocking 716 * and IO for us. Otherwise, we need to submit 717 * all those pages down to the drive. 718 */ 719 if (!page_started && !ret) 720 extent_write_locked_range(io_tree, 721 inode, async_extent->start, 722 async_extent->start + 723 async_extent->ram_size - 1, 724 btrfs_get_extent, 725 WB_SYNC_ALL); 726 else if (ret) 727 unlock_page(async_cow->locked_page); 728 kfree(async_extent); 729 cond_resched(); 730 continue; 731 } 732 733 lock_extent(io_tree, async_extent->start, 734 async_extent->start + async_extent->ram_size - 1); 735 736 ret = btrfs_reserve_extent(root, 737 async_extent->compressed_size, 738 async_extent->compressed_size, 739 0, alloc_hint, &ins, 1, 1); 740 if (ret) { 741 free_async_extent_pages(async_extent); 742 743 if (ret == -ENOSPC) { 744 unlock_extent(io_tree, async_extent->start, 745 async_extent->start + 746 async_extent->ram_size - 1); 747 748 /* 749 * we need to redirty the pages if we decide to 750 * fallback to uncompressed IO, otherwise we 751 * will not submit these pages down to lower 752 * layers. 753 */ 754 extent_range_redirty_for_io(inode, 755 async_extent->start, 756 async_extent->start + 757 async_extent->ram_size - 1); 758 759 goto retry; 760 } 761 goto out_free; 762 } 763 /* 764 * here we're doing allocation and writeback of the 765 * compressed pages 766 */ 767 btrfs_drop_extent_cache(inode, async_extent->start, 768 async_extent->start + 769 async_extent->ram_size - 1, 0); 770 771 em = alloc_extent_map(); 772 if (!em) { 773 ret = -ENOMEM; 774 goto out_free_reserve; 775 } 776 em->start = async_extent->start; 777 em->len = async_extent->ram_size; 778 em->orig_start = em->start; 779 em->mod_start = em->start; 780 em->mod_len = em->len; 781 782 em->block_start = ins.objectid; 783 em->block_len = ins.offset; 784 em->orig_block_len = ins.offset; 785 em->ram_bytes = async_extent->ram_size; 786 em->bdev = root->fs_info->fs_devices->latest_bdev; 787 em->compress_type = async_extent->compress_type; 788 set_bit(EXTENT_FLAG_PINNED, &em->flags); 789 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 790 em->generation = -1; 791 792 while (1) { 793 write_lock(&em_tree->lock); 794 ret = add_extent_mapping(em_tree, em, 1); 795 write_unlock(&em_tree->lock); 796 if (ret != -EEXIST) { 797 free_extent_map(em); 798 break; 799 } 800 btrfs_drop_extent_cache(inode, async_extent->start, 801 async_extent->start + 802 async_extent->ram_size - 1, 0); 803 } 804 805 if (ret) 806 goto out_free_reserve; 807 808 ret = btrfs_add_ordered_extent_compress(inode, 809 async_extent->start, 810 ins.objectid, 811 async_extent->ram_size, 812 ins.offset, 813 BTRFS_ORDERED_COMPRESSED, 814 async_extent->compress_type); 815 if (ret) { 816 btrfs_drop_extent_cache(inode, async_extent->start, 817 async_extent->start + 818 async_extent->ram_size - 1, 0); 819 goto out_free_reserve; 820 } 821 822 /* 823 * clear dirty, set writeback and unlock the pages. 824 */ 825 extent_clear_unlock_delalloc(inode, async_extent->start, 826 async_extent->start + 827 async_extent->ram_size - 1, 828 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 829 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 830 PAGE_SET_WRITEBACK); 831 ret = btrfs_submit_compressed_write(inode, 832 async_extent->start, 833 async_extent->ram_size, 834 ins.objectid, 835 ins.offset, async_extent->pages, 836 async_extent->nr_pages); 837 if (ret) { 838 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 839 struct page *p = async_extent->pages[0]; 840 const u64 start = async_extent->start; 841 const u64 end = start + async_extent->ram_size - 1; 842 843 p->mapping = inode->i_mapping; 844 tree->ops->writepage_end_io_hook(p, start, end, 845 NULL, 0); 846 p->mapping = NULL; 847 extent_clear_unlock_delalloc(inode, start, end, NULL, 0, 848 PAGE_END_WRITEBACK | 849 PAGE_SET_ERROR); 850 free_async_extent_pages(async_extent); 851 } 852 alloc_hint = ins.objectid + ins.offset; 853 kfree(async_extent); 854 cond_resched(); 855 } 856 return; 857 out_free_reserve: 858 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 859 out_free: 860 extent_clear_unlock_delalloc(inode, async_extent->start, 861 async_extent->start + 862 async_extent->ram_size - 1, 863 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 864 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 865 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 866 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | 867 PAGE_SET_ERROR); 868 free_async_extent_pages(async_extent); 869 kfree(async_extent); 870 goto again; 871 } 872 873 static u64 get_extent_allocation_hint(struct inode *inode, u64 start, 874 u64 num_bytes) 875 { 876 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 877 struct extent_map *em; 878 u64 alloc_hint = 0; 879 880 read_lock(&em_tree->lock); 881 em = search_extent_mapping(em_tree, start, num_bytes); 882 if (em) { 883 /* 884 * if block start isn't an actual block number then find the 885 * first block in this inode and use that as a hint. If that 886 * block is also bogus then just don't worry about it. 887 */ 888 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 889 free_extent_map(em); 890 em = search_extent_mapping(em_tree, 0, 0); 891 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 892 alloc_hint = em->block_start; 893 if (em) 894 free_extent_map(em); 895 } else { 896 alloc_hint = em->block_start; 897 free_extent_map(em); 898 } 899 } 900 read_unlock(&em_tree->lock); 901 902 return alloc_hint; 903 } 904 905 /* 906 * when extent_io.c finds a delayed allocation range in the file, 907 * the call backs end up in this code. The basic idea is to 908 * allocate extents on disk for the range, and create ordered data structs 909 * in ram to track those extents. 910 * 911 * locked_page is the page that writepage had locked already. We use 912 * it to make sure we don't do extra locks or unlocks. 913 * 914 * *page_started is set to one if we unlock locked_page and do everything 915 * required to start IO on it. It may be clean and already done with 916 * IO when we return. 917 */ 918 static noinline int cow_file_range(struct inode *inode, 919 struct page *locked_page, 920 u64 start, u64 end, int *page_started, 921 unsigned long *nr_written, 922 int unlock) 923 { 924 struct btrfs_root *root = BTRFS_I(inode)->root; 925 u64 alloc_hint = 0; 926 u64 num_bytes; 927 unsigned long ram_size; 928 u64 disk_num_bytes; 929 u64 cur_alloc_size; 930 u64 blocksize = root->sectorsize; 931 struct btrfs_key ins; 932 struct extent_map *em; 933 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 934 int ret = 0; 935 936 if (btrfs_is_free_space_inode(inode)) { 937 WARN_ON_ONCE(1); 938 ret = -EINVAL; 939 goto out_unlock; 940 } 941 942 num_bytes = ALIGN(end - start + 1, blocksize); 943 num_bytes = max(blocksize, num_bytes); 944 disk_num_bytes = num_bytes; 945 946 /* if this is a small write inside eof, kick off defrag */ 947 if (num_bytes < 64 * 1024 && 948 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 949 btrfs_add_inode_defrag(NULL, inode); 950 951 if (start == 0) { 952 /* lets try to make an inline extent */ 953 ret = cow_file_range_inline(root, inode, start, end, 0, 0, 954 NULL); 955 if (ret == 0) { 956 extent_clear_unlock_delalloc(inode, start, end, NULL, 957 EXTENT_LOCKED | EXTENT_DELALLOC | 958 EXTENT_DEFRAG, PAGE_UNLOCK | 959 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | 960 PAGE_END_WRITEBACK); 961 962 *nr_written = *nr_written + 963 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 964 *page_started = 1; 965 goto out; 966 } else if (ret < 0) { 967 goto out_unlock; 968 } 969 } 970 971 BUG_ON(disk_num_bytes > 972 btrfs_super_total_bytes(root->fs_info->super_copy)); 973 974 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 975 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 976 977 while (disk_num_bytes > 0) { 978 unsigned long op; 979 980 cur_alloc_size = disk_num_bytes; 981 ret = btrfs_reserve_extent(root, cur_alloc_size, 982 root->sectorsize, 0, alloc_hint, 983 &ins, 1, 1); 984 if (ret < 0) 985 goto out_unlock; 986 987 em = alloc_extent_map(); 988 if (!em) { 989 ret = -ENOMEM; 990 goto out_reserve; 991 } 992 em->start = start; 993 em->orig_start = em->start; 994 ram_size = ins.offset; 995 em->len = ins.offset; 996 em->mod_start = em->start; 997 em->mod_len = em->len; 998 999 em->block_start = ins.objectid; 1000 em->block_len = ins.offset; 1001 em->orig_block_len = ins.offset; 1002 em->ram_bytes = ram_size; 1003 em->bdev = root->fs_info->fs_devices->latest_bdev; 1004 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1005 em->generation = -1; 1006 1007 while (1) { 1008 write_lock(&em_tree->lock); 1009 ret = add_extent_mapping(em_tree, em, 1); 1010 write_unlock(&em_tree->lock); 1011 if (ret != -EEXIST) { 1012 free_extent_map(em); 1013 break; 1014 } 1015 btrfs_drop_extent_cache(inode, start, 1016 start + ram_size - 1, 0); 1017 } 1018 if (ret) 1019 goto out_reserve; 1020 1021 cur_alloc_size = ins.offset; 1022 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 1023 ram_size, cur_alloc_size, 0); 1024 if (ret) 1025 goto out_drop_extent_cache; 1026 1027 if (root->root_key.objectid == 1028 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1029 ret = btrfs_reloc_clone_csums(inode, start, 1030 cur_alloc_size); 1031 if (ret) 1032 goto out_drop_extent_cache; 1033 } 1034 1035 if (disk_num_bytes < cur_alloc_size) 1036 break; 1037 1038 /* we're not doing compressed IO, don't unlock the first 1039 * page (which the caller expects to stay locked), don't 1040 * clear any dirty bits and don't set any writeback bits 1041 * 1042 * Do set the Private2 bit so we know this page was properly 1043 * setup for writepage 1044 */ 1045 op = unlock ? PAGE_UNLOCK : 0; 1046 op |= PAGE_SET_PRIVATE2; 1047 1048 extent_clear_unlock_delalloc(inode, start, 1049 start + ram_size - 1, locked_page, 1050 EXTENT_LOCKED | EXTENT_DELALLOC, 1051 op); 1052 disk_num_bytes -= cur_alloc_size; 1053 num_bytes -= cur_alloc_size; 1054 alloc_hint = ins.objectid + ins.offset; 1055 start += cur_alloc_size; 1056 } 1057 out: 1058 return ret; 1059 1060 out_drop_extent_cache: 1061 btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); 1062 out_reserve: 1063 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 1064 out_unlock: 1065 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1066 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 1067 EXTENT_DELALLOC | EXTENT_DEFRAG, 1068 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 1069 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK); 1070 goto out; 1071 } 1072 1073 /* 1074 * work queue call back to started compression on a file and pages 1075 */ 1076 static noinline void async_cow_start(struct btrfs_work *work) 1077 { 1078 struct async_cow *async_cow; 1079 int num_added = 0; 1080 async_cow = container_of(work, struct async_cow, work); 1081 1082 compress_file_range(async_cow->inode, async_cow->locked_page, 1083 async_cow->start, async_cow->end, async_cow, 1084 &num_added); 1085 if (num_added == 0) { 1086 btrfs_add_delayed_iput(async_cow->inode); 1087 async_cow->inode = NULL; 1088 } 1089 } 1090 1091 /* 1092 * work queue call back to submit previously compressed pages 1093 */ 1094 static noinline void async_cow_submit(struct btrfs_work *work) 1095 { 1096 struct async_cow *async_cow; 1097 struct btrfs_root *root; 1098 unsigned long nr_pages; 1099 1100 async_cow = container_of(work, struct async_cow, work); 1101 1102 root = async_cow->root; 1103 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1104 PAGE_CACHE_SHIFT; 1105 1106 /* 1107 * atomic_sub_return implies a barrier for waitqueue_active 1108 */ 1109 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < 1110 5 * 1024 * 1024 && 1111 waitqueue_active(&root->fs_info->async_submit_wait)) 1112 wake_up(&root->fs_info->async_submit_wait); 1113 1114 if (async_cow->inode) 1115 submit_compressed_extents(async_cow->inode, async_cow); 1116 } 1117 1118 static noinline void async_cow_free(struct btrfs_work *work) 1119 { 1120 struct async_cow *async_cow; 1121 async_cow = container_of(work, struct async_cow, work); 1122 if (async_cow->inode) 1123 btrfs_add_delayed_iput(async_cow->inode); 1124 kfree(async_cow); 1125 } 1126 1127 static int cow_file_range_async(struct inode *inode, struct page *locked_page, 1128 u64 start, u64 end, int *page_started, 1129 unsigned long *nr_written) 1130 { 1131 struct async_cow *async_cow; 1132 struct btrfs_root *root = BTRFS_I(inode)->root; 1133 unsigned long nr_pages; 1134 u64 cur_end; 1135 int limit = 10 * 1024 * 1024; 1136 1137 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, 1138 1, 0, NULL, GFP_NOFS); 1139 while (start < end) { 1140 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1141 BUG_ON(!async_cow); /* -ENOMEM */ 1142 async_cow->inode = igrab(inode); 1143 async_cow->root = root; 1144 async_cow->locked_page = locked_page; 1145 async_cow->start = start; 1146 1147 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS && 1148 !btrfs_test_opt(root, FORCE_COMPRESS)) 1149 cur_end = end; 1150 else 1151 cur_end = min(end, start + 512 * 1024 - 1); 1152 1153 async_cow->end = cur_end; 1154 INIT_LIST_HEAD(&async_cow->extents); 1155 1156 btrfs_init_work(&async_cow->work, 1157 btrfs_delalloc_helper, 1158 async_cow_start, async_cow_submit, 1159 async_cow_free); 1160 1161 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1162 PAGE_CACHE_SHIFT; 1163 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1164 1165 btrfs_queue_work(root->fs_info->delalloc_workers, 1166 &async_cow->work); 1167 1168 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { 1169 wait_event(root->fs_info->async_submit_wait, 1170 (atomic_read(&root->fs_info->async_delalloc_pages) < 1171 limit)); 1172 } 1173 1174 while (atomic_read(&root->fs_info->async_submit_draining) && 1175 atomic_read(&root->fs_info->async_delalloc_pages)) { 1176 wait_event(root->fs_info->async_submit_wait, 1177 (atomic_read(&root->fs_info->async_delalloc_pages) == 1178 0)); 1179 } 1180 1181 *nr_written += nr_pages; 1182 start = cur_end + 1; 1183 } 1184 *page_started = 1; 1185 return 0; 1186 } 1187 1188 static noinline int csum_exist_in_range(struct btrfs_root *root, 1189 u64 bytenr, u64 num_bytes) 1190 { 1191 int ret; 1192 struct btrfs_ordered_sum *sums; 1193 LIST_HEAD(list); 1194 1195 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, 1196 bytenr + num_bytes - 1, &list, 0); 1197 if (ret == 0 && list_empty(&list)) 1198 return 0; 1199 1200 while (!list_empty(&list)) { 1201 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1202 list_del(&sums->list); 1203 kfree(sums); 1204 } 1205 return 1; 1206 } 1207 1208 /* 1209 * when nowcow writeback call back. This checks for snapshots or COW copies 1210 * of the extents that exist in the file, and COWs the file as required. 1211 * 1212 * If no cow copies or snapshots exist, we write directly to the existing 1213 * blocks on disk 1214 */ 1215 static noinline int run_delalloc_nocow(struct inode *inode, 1216 struct page *locked_page, 1217 u64 start, u64 end, int *page_started, int force, 1218 unsigned long *nr_written) 1219 { 1220 struct btrfs_root *root = BTRFS_I(inode)->root; 1221 struct btrfs_trans_handle *trans; 1222 struct extent_buffer *leaf; 1223 struct btrfs_path *path; 1224 struct btrfs_file_extent_item *fi; 1225 struct btrfs_key found_key; 1226 u64 cow_start; 1227 u64 cur_offset; 1228 u64 extent_end; 1229 u64 extent_offset; 1230 u64 disk_bytenr; 1231 u64 num_bytes; 1232 u64 disk_num_bytes; 1233 u64 ram_bytes; 1234 int extent_type; 1235 int ret, err; 1236 int type; 1237 int nocow; 1238 int check_prev = 1; 1239 bool nolock; 1240 u64 ino = btrfs_ino(inode); 1241 1242 path = btrfs_alloc_path(); 1243 if (!path) { 1244 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1245 EXTENT_LOCKED | EXTENT_DELALLOC | 1246 EXTENT_DO_ACCOUNTING | 1247 EXTENT_DEFRAG, PAGE_UNLOCK | 1248 PAGE_CLEAR_DIRTY | 1249 PAGE_SET_WRITEBACK | 1250 PAGE_END_WRITEBACK); 1251 return -ENOMEM; 1252 } 1253 1254 nolock = btrfs_is_free_space_inode(inode); 1255 1256 if (nolock) 1257 trans = btrfs_join_transaction_nolock(root); 1258 else 1259 trans = btrfs_join_transaction(root); 1260 1261 if (IS_ERR(trans)) { 1262 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1263 EXTENT_LOCKED | EXTENT_DELALLOC | 1264 EXTENT_DO_ACCOUNTING | 1265 EXTENT_DEFRAG, PAGE_UNLOCK | 1266 PAGE_CLEAR_DIRTY | 1267 PAGE_SET_WRITEBACK | 1268 PAGE_END_WRITEBACK); 1269 btrfs_free_path(path); 1270 return PTR_ERR(trans); 1271 } 1272 1273 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1274 1275 cow_start = (u64)-1; 1276 cur_offset = start; 1277 while (1) { 1278 ret = btrfs_lookup_file_extent(trans, root, path, ino, 1279 cur_offset, 0); 1280 if (ret < 0) 1281 goto error; 1282 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1283 leaf = path->nodes[0]; 1284 btrfs_item_key_to_cpu(leaf, &found_key, 1285 path->slots[0] - 1); 1286 if (found_key.objectid == ino && 1287 found_key.type == BTRFS_EXTENT_DATA_KEY) 1288 path->slots[0]--; 1289 } 1290 check_prev = 0; 1291 next_slot: 1292 leaf = path->nodes[0]; 1293 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1294 ret = btrfs_next_leaf(root, path); 1295 if (ret < 0) 1296 goto error; 1297 if (ret > 0) 1298 break; 1299 leaf = path->nodes[0]; 1300 } 1301 1302 nocow = 0; 1303 disk_bytenr = 0; 1304 num_bytes = 0; 1305 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1306 1307 if (found_key.objectid > ino) 1308 break; 1309 if (WARN_ON_ONCE(found_key.objectid < ino) || 1310 found_key.type < BTRFS_EXTENT_DATA_KEY) { 1311 path->slots[0]++; 1312 goto next_slot; 1313 } 1314 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 1315 found_key.offset > end) 1316 break; 1317 1318 if (found_key.offset > cur_offset) { 1319 extent_end = found_key.offset; 1320 extent_type = 0; 1321 goto out_check; 1322 } 1323 1324 fi = btrfs_item_ptr(leaf, path->slots[0], 1325 struct btrfs_file_extent_item); 1326 extent_type = btrfs_file_extent_type(leaf, fi); 1327 1328 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1329 if (extent_type == BTRFS_FILE_EXTENT_REG || 1330 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1331 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1332 extent_offset = btrfs_file_extent_offset(leaf, fi); 1333 extent_end = found_key.offset + 1334 btrfs_file_extent_num_bytes(leaf, fi); 1335 disk_num_bytes = 1336 btrfs_file_extent_disk_num_bytes(leaf, fi); 1337 if (extent_end <= start) { 1338 path->slots[0]++; 1339 goto next_slot; 1340 } 1341 if (disk_bytenr == 0) 1342 goto out_check; 1343 if (btrfs_file_extent_compression(leaf, fi) || 1344 btrfs_file_extent_encryption(leaf, fi) || 1345 btrfs_file_extent_other_encoding(leaf, fi)) 1346 goto out_check; 1347 if (extent_type == BTRFS_FILE_EXTENT_REG && !force) 1348 goto out_check; 1349 if (btrfs_extent_readonly(root, disk_bytenr)) 1350 goto out_check; 1351 if (btrfs_cross_ref_exist(trans, root, ino, 1352 found_key.offset - 1353 extent_offset, disk_bytenr)) 1354 goto out_check; 1355 disk_bytenr += extent_offset; 1356 disk_bytenr += cur_offset - found_key.offset; 1357 num_bytes = min(end + 1, extent_end) - cur_offset; 1358 /* 1359 * if there are pending snapshots for this root, 1360 * we fall into common COW way. 1361 */ 1362 if (!nolock) { 1363 err = btrfs_start_write_no_snapshoting(root); 1364 if (!err) 1365 goto out_check; 1366 } 1367 /* 1368 * force cow if csum exists in the range. 1369 * this ensure that csum for a given extent are 1370 * either valid or do not exist. 1371 */ 1372 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 1373 goto out_check; 1374 nocow = 1; 1375 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1376 extent_end = found_key.offset + 1377 btrfs_file_extent_inline_len(leaf, 1378 path->slots[0], fi); 1379 extent_end = ALIGN(extent_end, root->sectorsize); 1380 } else { 1381 BUG_ON(1); 1382 } 1383 out_check: 1384 if (extent_end <= start) { 1385 path->slots[0]++; 1386 if (!nolock && nocow) 1387 btrfs_end_write_no_snapshoting(root); 1388 goto next_slot; 1389 } 1390 if (!nocow) { 1391 if (cow_start == (u64)-1) 1392 cow_start = cur_offset; 1393 cur_offset = extent_end; 1394 if (cur_offset > end) 1395 break; 1396 path->slots[0]++; 1397 goto next_slot; 1398 } 1399 1400 btrfs_release_path(path); 1401 if (cow_start != (u64)-1) { 1402 ret = cow_file_range(inode, locked_page, 1403 cow_start, found_key.offset - 1, 1404 page_started, nr_written, 1); 1405 if (ret) { 1406 if (!nolock && nocow) 1407 btrfs_end_write_no_snapshoting(root); 1408 goto error; 1409 } 1410 cow_start = (u64)-1; 1411 } 1412 1413 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1414 struct extent_map *em; 1415 struct extent_map_tree *em_tree; 1416 em_tree = &BTRFS_I(inode)->extent_tree; 1417 em = alloc_extent_map(); 1418 BUG_ON(!em); /* -ENOMEM */ 1419 em->start = cur_offset; 1420 em->orig_start = found_key.offset - extent_offset; 1421 em->len = num_bytes; 1422 em->block_len = num_bytes; 1423 em->block_start = disk_bytenr; 1424 em->orig_block_len = disk_num_bytes; 1425 em->ram_bytes = ram_bytes; 1426 em->bdev = root->fs_info->fs_devices->latest_bdev; 1427 em->mod_start = em->start; 1428 em->mod_len = em->len; 1429 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1430 set_bit(EXTENT_FLAG_FILLING, &em->flags); 1431 em->generation = -1; 1432 while (1) { 1433 write_lock(&em_tree->lock); 1434 ret = add_extent_mapping(em_tree, em, 1); 1435 write_unlock(&em_tree->lock); 1436 if (ret != -EEXIST) { 1437 free_extent_map(em); 1438 break; 1439 } 1440 btrfs_drop_extent_cache(inode, em->start, 1441 em->start + em->len - 1, 0); 1442 } 1443 type = BTRFS_ORDERED_PREALLOC; 1444 } else { 1445 type = BTRFS_ORDERED_NOCOW; 1446 } 1447 1448 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, 1449 num_bytes, num_bytes, type); 1450 BUG_ON(ret); /* -ENOMEM */ 1451 1452 if (root->root_key.objectid == 1453 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1454 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1455 num_bytes); 1456 if (ret) { 1457 if (!nolock && nocow) 1458 btrfs_end_write_no_snapshoting(root); 1459 goto error; 1460 } 1461 } 1462 1463 extent_clear_unlock_delalloc(inode, cur_offset, 1464 cur_offset + num_bytes - 1, 1465 locked_page, EXTENT_LOCKED | 1466 EXTENT_DELALLOC, PAGE_UNLOCK | 1467 PAGE_SET_PRIVATE2); 1468 if (!nolock && nocow) 1469 btrfs_end_write_no_snapshoting(root); 1470 cur_offset = extent_end; 1471 if (cur_offset > end) 1472 break; 1473 } 1474 btrfs_release_path(path); 1475 1476 if (cur_offset <= end && cow_start == (u64)-1) { 1477 cow_start = cur_offset; 1478 cur_offset = end; 1479 } 1480 1481 if (cow_start != (u64)-1) { 1482 ret = cow_file_range(inode, locked_page, cow_start, end, 1483 page_started, nr_written, 1); 1484 if (ret) 1485 goto error; 1486 } 1487 1488 error: 1489 err = btrfs_end_transaction(trans, root); 1490 if (!ret) 1491 ret = err; 1492 1493 if (ret && cur_offset < end) 1494 extent_clear_unlock_delalloc(inode, cur_offset, end, 1495 locked_page, EXTENT_LOCKED | 1496 EXTENT_DELALLOC | EXTENT_DEFRAG | 1497 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1498 PAGE_CLEAR_DIRTY | 1499 PAGE_SET_WRITEBACK | 1500 PAGE_END_WRITEBACK); 1501 btrfs_free_path(path); 1502 return ret; 1503 } 1504 1505 static inline int need_force_cow(struct inode *inode, u64 start, u64 end) 1506 { 1507 1508 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 1509 !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) 1510 return 0; 1511 1512 /* 1513 * @defrag_bytes is a hint value, no spinlock held here, 1514 * if is not zero, it means the file is defragging. 1515 * Force cow if given extent needs to be defragged. 1516 */ 1517 if (BTRFS_I(inode)->defrag_bytes && 1518 test_range_bit(&BTRFS_I(inode)->io_tree, start, end, 1519 EXTENT_DEFRAG, 0, NULL)) 1520 return 1; 1521 1522 return 0; 1523 } 1524 1525 /* 1526 * extent_io.c call back to do delayed allocation processing 1527 */ 1528 static int run_delalloc_range(struct inode *inode, struct page *locked_page, 1529 u64 start, u64 end, int *page_started, 1530 unsigned long *nr_written) 1531 { 1532 int ret; 1533 int force_cow = need_force_cow(inode, start, end); 1534 1535 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { 1536 ret = run_delalloc_nocow(inode, locked_page, start, end, 1537 page_started, 1, nr_written); 1538 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { 1539 ret = run_delalloc_nocow(inode, locked_page, start, end, 1540 page_started, 0, nr_written); 1541 } else if (!inode_need_compress(inode)) { 1542 ret = cow_file_range(inode, locked_page, start, end, 1543 page_started, nr_written, 1); 1544 } else { 1545 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 1546 &BTRFS_I(inode)->runtime_flags); 1547 ret = cow_file_range_async(inode, locked_page, start, end, 1548 page_started, nr_written); 1549 } 1550 return ret; 1551 } 1552 1553 static void btrfs_split_extent_hook(struct inode *inode, 1554 struct extent_state *orig, u64 split) 1555 { 1556 u64 size; 1557 1558 /* not delalloc, ignore it */ 1559 if (!(orig->state & EXTENT_DELALLOC)) 1560 return; 1561 1562 size = orig->end - orig->start + 1; 1563 if (size > BTRFS_MAX_EXTENT_SIZE) { 1564 u64 num_extents; 1565 u64 new_size; 1566 1567 /* 1568 * See the explanation in btrfs_merge_extent_hook, the same 1569 * applies here, just in reverse. 1570 */ 1571 new_size = orig->end - split + 1; 1572 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1573 BTRFS_MAX_EXTENT_SIZE); 1574 new_size = split - orig->start; 1575 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1576 BTRFS_MAX_EXTENT_SIZE); 1577 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, 1578 BTRFS_MAX_EXTENT_SIZE) >= num_extents) 1579 return; 1580 } 1581 1582 spin_lock(&BTRFS_I(inode)->lock); 1583 BTRFS_I(inode)->outstanding_extents++; 1584 spin_unlock(&BTRFS_I(inode)->lock); 1585 } 1586 1587 /* 1588 * extent_io.c merge_extent_hook, used to track merged delayed allocation 1589 * extents so we can keep track of new extents that are just merged onto old 1590 * extents, such as when we are doing sequential writes, so we can properly 1591 * account for the metadata space we'll need. 1592 */ 1593 static void btrfs_merge_extent_hook(struct inode *inode, 1594 struct extent_state *new, 1595 struct extent_state *other) 1596 { 1597 u64 new_size, old_size; 1598 u64 num_extents; 1599 1600 /* not delalloc, ignore it */ 1601 if (!(other->state & EXTENT_DELALLOC)) 1602 return; 1603 1604 if (new->start > other->start) 1605 new_size = new->end - other->start + 1; 1606 else 1607 new_size = other->end - new->start + 1; 1608 1609 /* we're not bigger than the max, unreserve the space and go */ 1610 if (new_size <= BTRFS_MAX_EXTENT_SIZE) { 1611 spin_lock(&BTRFS_I(inode)->lock); 1612 BTRFS_I(inode)->outstanding_extents--; 1613 spin_unlock(&BTRFS_I(inode)->lock); 1614 return; 1615 } 1616 1617 /* 1618 * We have to add up either side to figure out how many extents were 1619 * accounted for before we merged into one big extent. If the number of 1620 * extents we accounted for is <= the amount we need for the new range 1621 * then we can return, otherwise drop. Think of it like this 1622 * 1623 * [ 4k][MAX_SIZE] 1624 * 1625 * So we've grown the extent by a MAX_SIZE extent, this would mean we 1626 * need 2 outstanding extents, on one side we have 1 and the other side 1627 * we have 1 so they are == and we can return. But in this case 1628 * 1629 * [MAX_SIZE+4k][MAX_SIZE+4k] 1630 * 1631 * Each range on their own accounts for 2 extents, but merged together 1632 * they are only 3 extents worth of accounting, so we need to drop in 1633 * this case. 1634 */ 1635 old_size = other->end - other->start + 1; 1636 num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, 1637 BTRFS_MAX_EXTENT_SIZE); 1638 old_size = new->end - new->start + 1; 1639 num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, 1640 BTRFS_MAX_EXTENT_SIZE); 1641 1642 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1643 BTRFS_MAX_EXTENT_SIZE) >= num_extents) 1644 return; 1645 1646 spin_lock(&BTRFS_I(inode)->lock); 1647 BTRFS_I(inode)->outstanding_extents--; 1648 spin_unlock(&BTRFS_I(inode)->lock); 1649 } 1650 1651 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 1652 struct inode *inode) 1653 { 1654 spin_lock(&root->delalloc_lock); 1655 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1656 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 1657 &root->delalloc_inodes); 1658 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1659 &BTRFS_I(inode)->runtime_flags); 1660 root->nr_delalloc_inodes++; 1661 if (root->nr_delalloc_inodes == 1) { 1662 spin_lock(&root->fs_info->delalloc_root_lock); 1663 BUG_ON(!list_empty(&root->delalloc_root)); 1664 list_add_tail(&root->delalloc_root, 1665 &root->fs_info->delalloc_roots); 1666 spin_unlock(&root->fs_info->delalloc_root_lock); 1667 } 1668 } 1669 spin_unlock(&root->delalloc_lock); 1670 } 1671 1672 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 1673 struct inode *inode) 1674 { 1675 spin_lock(&root->delalloc_lock); 1676 if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1677 list_del_init(&BTRFS_I(inode)->delalloc_inodes); 1678 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1679 &BTRFS_I(inode)->runtime_flags); 1680 root->nr_delalloc_inodes--; 1681 if (!root->nr_delalloc_inodes) { 1682 spin_lock(&root->fs_info->delalloc_root_lock); 1683 BUG_ON(list_empty(&root->delalloc_root)); 1684 list_del_init(&root->delalloc_root); 1685 spin_unlock(&root->fs_info->delalloc_root_lock); 1686 } 1687 } 1688 spin_unlock(&root->delalloc_lock); 1689 } 1690 1691 /* 1692 * extent_io.c set_bit_hook, used to track delayed allocation 1693 * bytes in this file, and to maintain the list of inodes that 1694 * have pending delalloc work to be done. 1695 */ 1696 static void btrfs_set_bit_hook(struct inode *inode, 1697 struct extent_state *state, unsigned *bits) 1698 { 1699 1700 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) 1701 WARN_ON(1); 1702 /* 1703 * set_bit and clear bit hooks normally require _irqsave/restore 1704 * but in this case, we are only testing for the DELALLOC 1705 * bit, which is only set or cleared with irqs on 1706 */ 1707 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1708 struct btrfs_root *root = BTRFS_I(inode)->root; 1709 u64 len = state->end + 1 - state->start; 1710 bool do_list = !btrfs_is_free_space_inode(inode); 1711 1712 if (*bits & EXTENT_FIRST_DELALLOC) { 1713 *bits &= ~EXTENT_FIRST_DELALLOC; 1714 } else { 1715 spin_lock(&BTRFS_I(inode)->lock); 1716 BTRFS_I(inode)->outstanding_extents++; 1717 spin_unlock(&BTRFS_I(inode)->lock); 1718 } 1719 1720 /* For sanity tests */ 1721 if (btrfs_test_is_dummy_root(root)) 1722 return; 1723 1724 __percpu_counter_add(&root->fs_info->delalloc_bytes, len, 1725 root->fs_info->delalloc_batch); 1726 spin_lock(&BTRFS_I(inode)->lock); 1727 BTRFS_I(inode)->delalloc_bytes += len; 1728 if (*bits & EXTENT_DEFRAG) 1729 BTRFS_I(inode)->defrag_bytes += len; 1730 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1731 &BTRFS_I(inode)->runtime_flags)) 1732 btrfs_add_delalloc_inodes(root, inode); 1733 spin_unlock(&BTRFS_I(inode)->lock); 1734 } 1735 } 1736 1737 /* 1738 * extent_io.c clear_bit_hook, see set_bit_hook for why 1739 */ 1740 static void btrfs_clear_bit_hook(struct inode *inode, 1741 struct extent_state *state, 1742 unsigned *bits) 1743 { 1744 u64 len = state->end + 1 - state->start; 1745 u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1, 1746 BTRFS_MAX_EXTENT_SIZE); 1747 1748 spin_lock(&BTRFS_I(inode)->lock); 1749 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) 1750 BTRFS_I(inode)->defrag_bytes -= len; 1751 spin_unlock(&BTRFS_I(inode)->lock); 1752 1753 /* 1754 * set_bit and clear bit hooks normally require _irqsave/restore 1755 * but in this case, we are only testing for the DELALLOC 1756 * bit, which is only set or cleared with irqs on 1757 */ 1758 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1759 struct btrfs_root *root = BTRFS_I(inode)->root; 1760 bool do_list = !btrfs_is_free_space_inode(inode); 1761 1762 if (*bits & EXTENT_FIRST_DELALLOC) { 1763 *bits &= ~EXTENT_FIRST_DELALLOC; 1764 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { 1765 spin_lock(&BTRFS_I(inode)->lock); 1766 BTRFS_I(inode)->outstanding_extents -= num_extents; 1767 spin_unlock(&BTRFS_I(inode)->lock); 1768 } 1769 1770 /* 1771 * We don't reserve metadata space for space cache inodes so we 1772 * don't need to call dellalloc_release_metadata if there is an 1773 * error. 1774 */ 1775 if (*bits & EXTENT_DO_ACCOUNTING && 1776 root != root->fs_info->tree_root) 1777 btrfs_delalloc_release_metadata(inode, len); 1778 1779 /* For sanity tests. */ 1780 if (btrfs_test_is_dummy_root(root)) 1781 return; 1782 1783 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 1784 && do_list && !(state->state & EXTENT_NORESERVE)) 1785 btrfs_free_reserved_data_space_noquota(inode, 1786 state->start, len); 1787 1788 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len, 1789 root->fs_info->delalloc_batch); 1790 spin_lock(&BTRFS_I(inode)->lock); 1791 BTRFS_I(inode)->delalloc_bytes -= len; 1792 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && 1793 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1794 &BTRFS_I(inode)->runtime_flags)) 1795 btrfs_del_delalloc_inode(root, inode); 1796 spin_unlock(&BTRFS_I(inode)->lock); 1797 } 1798 } 1799 1800 /* 1801 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure 1802 * we don't create bios that span stripes or chunks 1803 */ 1804 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, 1805 size_t size, struct bio *bio, 1806 unsigned long bio_flags) 1807 { 1808 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 1809 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 1810 u64 length = 0; 1811 u64 map_length; 1812 int ret; 1813 1814 if (bio_flags & EXTENT_BIO_COMPRESSED) 1815 return 0; 1816 1817 length = bio->bi_iter.bi_size; 1818 map_length = length; 1819 ret = btrfs_map_block(root->fs_info, rw, logical, 1820 &map_length, NULL, 0); 1821 /* Will always return 0 with map_multi == NULL */ 1822 BUG_ON(ret < 0); 1823 if (map_length < length + size) 1824 return 1; 1825 return 0; 1826 } 1827 1828 /* 1829 * in order to insert checksums into the metadata in large chunks, 1830 * we wait until bio submission time. All the pages in the bio are 1831 * checksummed and sums are attached onto the ordered extent record. 1832 * 1833 * At IO completion time the cums attached on the ordered extent record 1834 * are inserted into the btree 1835 */ 1836 static int __btrfs_submit_bio_start(struct inode *inode, int rw, 1837 struct bio *bio, int mirror_num, 1838 unsigned long bio_flags, 1839 u64 bio_offset) 1840 { 1841 struct btrfs_root *root = BTRFS_I(inode)->root; 1842 int ret = 0; 1843 1844 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1845 BUG_ON(ret); /* -ENOMEM */ 1846 return 0; 1847 } 1848 1849 /* 1850 * in order to insert checksums into the metadata in large chunks, 1851 * we wait until bio submission time. All the pages in the bio are 1852 * checksummed and sums are attached onto the ordered extent record. 1853 * 1854 * At IO completion time the cums attached on the ordered extent record 1855 * are inserted into the btree 1856 */ 1857 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 1858 int mirror_num, unsigned long bio_flags, 1859 u64 bio_offset) 1860 { 1861 struct btrfs_root *root = BTRFS_I(inode)->root; 1862 int ret; 1863 1864 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); 1865 if (ret) { 1866 bio->bi_error = ret; 1867 bio_endio(bio); 1868 } 1869 return ret; 1870 } 1871 1872 /* 1873 * extent_io.c submission hook. This does the right thing for csum calculation 1874 * on write, or reading the csums from the tree before a read 1875 */ 1876 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 1877 int mirror_num, unsigned long bio_flags, 1878 u64 bio_offset) 1879 { 1880 struct btrfs_root *root = BTRFS_I(inode)->root; 1881 enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; 1882 int ret = 0; 1883 int skip_sum; 1884 int async = !atomic_read(&BTRFS_I(inode)->sync_writers); 1885 1886 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1887 1888 if (btrfs_is_free_space_inode(inode)) 1889 metadata = BTRFS_WQ_ENDIO_FREE_SPACE; 1890 1891 if (!(rw & REQ_WRITE)) { 1892 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); 1893 if (ret) 1894 goto out; 1895 1896 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1897 ret = btrfs_submit_compressed_read(inode, bio, 1898 mirror_num, 1899 bio_flags); 1900 goto out; 1901 } else if (!skip_sum) { 1902 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); 1903 if (ret) 1904 goto out; 1905 } 1906 goto mapit; 1907 } else if (async && !skip_sum) { 1908 /* csum items have already been cloned */ 1909 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 1910 goto mapit; 1911 /* we're doing a write, do the async checksumming */ 1912 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 1913 inode, rw, bio, mirror_num, 1914 bio_flags, bio_offset, 1915 __btrfs_submit_bio_start, 1916 __btrfs_submit_bio_done); 1917 goto out; 1918 } else if (!skip_sum) { 1919 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1920 if (ret) 1921 goto out; 1922 } 1923 1924 mapit: 1925 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); 1926 1927 out: 1928 if (ret < 0) { 1929 bio->bi_error = ret; 1930 bio_endio(bio); 1931 } 1932 return ret; 1933 } 1934 1935 /* 1936 * given a list of ordered sums record them in the inode. This happens 1937 * at IO completion time based on sums calculated at bio submission time. 1938 */ 1939 static noinline int add_pending_csums(struct btrfs_trans_handle *trans, 1940 struct inode *inode, u64 file_offset, 1941 struct list_head *list) 1942 { 1943 struct btrfs_ordered_sum *sum; 1944 1945 list_for_each_entry(sum, list, list) { 1946 trans->adding_csums = 1; 1947 btrfs_csum_file_blocks(trans, 1948 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1949 trans->adding_csums = 0; 1950 } 1951 return 0; 1952 } 1953 1954 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1955 struct extent_state **cached_state) 1956 { 1957 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); 1958 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1959 cached_state, GFP_NOFS); 1960 } 1961 1962 /* see btrfs_writepage_start_hook for details on why this is required */ 1963 struct btrfs_writepage_fixup { 1964 struct page *page; 1965 struct btrfs_work work; 1966 }; 1967 1968 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 1969 { 1970 struct btrfs_writepage_fixup *fixup; 1971 struct btrfs_ordered_extent *ordered; 1972 struct extent_state *cached_state = NULL; 1973 struct page *page; 1974 struct inode *inode; 1975 u64 page_start; 1976 u64 page_end; 1977 int ret; 1978 1979 fixup = container_of(work, struct btrfs_writepage_fixup, work); 1980 page = fixup->page; 1981 again: 1982 lock_page(page); 1983 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 1984 ClearPageChecked(page); 1985 goto out_page; 1986 } 1987 1988 inode = page->mapping->host; 1989 page_start = page_offset(page); 1990 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1991 1992 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, 1993 &cached_state); 1994 1995 /* already ordered? We're done */ 1996 if (PagePrivate2(page)) 1997 goto out; 1998 1999 ordered = btrfs_lookup_ordered_extent(inode, page_start); 2000 if (ordered) { 2001 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 2002 page_end, &cached_state, GFP_NOFS); 2003 unlock_page(page); 2004 btrfs_start_ordered_extent(inode, ordered, 1); 2005 btrfs_put_ordered_extent(ordered); 2006 goto again; 2007 } 2008 2009 ret = btrfs_delalloc_reserve_space(inode, page_start, 2010 PAGE_CACHE_SIZE); 2011 if (ret) { 2012 mapping_set_error(page->mapping, ret); 2013 end_extent_writepage(page, ret, page_start, page_end); 2014 ClearPageChecked(page); 2015 goto out; 2016 } 2017 2018 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 2019 ClearPageChecked(page); 2020 set_page_dirty(page); 2021 out: 2022 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, 2023 &cached_state, GFP_NOFS); 2024 out_page: 2025 unlock_page(page); 2026 page_cache_release(page); 2027 kfree(fixup); 2028 } 2029 2030 /* 2031 * There are a few paths in the higher layers of the kernel that directly 2032 * set the page dirty bit without asking the filesystem if it is a 2033 * good idea. This causes problems because we want to make sure COW 2034 * properly happens and the data=ordered rules are followed. 2035 * 2036 * In our case any range that doesn't have the ORDERED bit set 2037 * hasn't been properly setup for IO. We kick off an async process 2038 * to fix it up. The async helper will wait for ordered extents, set 2039 * the delalloc bit and make it safe to write the page. 2040 */ 2041 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) 2042 { 2043 struct inode *inode = page->mapping->host; 2044 struct btrfs_writepage_fixup *fixup; 2045 struct btrfs_root *root = BTRFS_I(inode)->root; 2046 2047 /* this page is properly in the ordered list */ 2048 if (TestClearPagePrivate2(page)) 2049 return 0; 2050 2051 if (PageChecked(page)) 2052 return -EAGAIN; 2053 2054 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2055 if (!fixup) 2056 return -EAGAIN; 2057 2058 SetPageChecked(page); 2059 page_cache_get(page); 2060 btrfs_init_work(&fixup->work, btrfs_fixup_helper, 2061 btrfs_writepage_fixup_worker, NULL, NULL); 2062 fixup->page = page; 2063 btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work); 2064 return -EBUSY; 2065 } 2066 2067 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2068 struct inode *inode, u64 file_pos, 2069 u64 disk_bytenr, u64 disk_num_bytes, 2070 u64 num_bytes, u64 ram_bytes, 2071 u8 compression, u8 encryption, 2072 u16 other_encoding, int extent_type) 2073 { 2074 struct btrfs_root *root = BTRFS_I(inode)->root; 2075 struct btrfs_file_extent_item *fi; 2076 struct btrfs_path *path; 2077 struct extent_buffer *leaf; 2078 struct btrfs_key ins; 2079 int extent_inserted = 0; 2080 int ret; 2081 2082 path = btrfs_alloc_path(); 2083 if (!path) 2084 return -ENOMEM; 2085 2086 /* 2087 * we may be replacing one extent in the tree with another. 2088 * The new extent is pinned in the extent map, and we don't want 2089 * to drop it from the cache until it is completely in the btree. 2090 * 2091 * So, tell btrfs_drop_extents to leave this extent in the cache. 2092 * the caller is expected to unpin it and allow it to be merged 2093 * with the others. 2094 */ 2095 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, 2096 file_pos + num_bytes, NULL, 0, 2097 1, sizeof(*fi), &extent_inserted); 2098 if (ret) 2099 goto out; 2100 2101 if (!extent_inserted) { 2102 ins.objectid = btrfs_ino(inode); 2103 ins.offset = file_pos; 2104 ins.type = BTRFS_EXTENT_DATA_KEY; 2105 2106 path->leave_spinning = 1; 2107 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2108 sizeof(*fi)); 2109 if (ret) 2110 goto out; 2111 } 2112 leaf = path->nodes[0]; 2113 fi = btrfs_item_ptr(leaf, path->slots[0], 2114 struct btrfs_file_extent_item); 2115 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 2116 btrfs_set_file_extent_type(leaf, fi, extent_type); 2117 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); 2118 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); 2119 btrfs_set_file_extent_offset(leaf, fi, 0); 2120 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2121 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); 2122 btrfs_set_file_extent_compression(leaf, fi, compression); 2123 btrfs_set_file_extent_encryption(leaf, fi, encryption); 2124 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); 2125 2126 btrfs_mark_buffer_dirty(leaf); 2127 btrfs_release_path(path); 2128 2129 inode_add_bytes(inode, num_bytes); 2130 2131 ins.objectid = disk_bytenr; 2132 ins.offset = disk_num_bytes; 2133 ins.type = BTRFS_EXTENT_ITEM_KEY; 2134 ret = btrfs_alloc_reserved_file_extent(trans, root, 2135 root->root_key.objectid, 2136 btrfs_ino(inode), file_pos, 2137 ram_bytes, &ins); 2138 /* 2139 * Release the reserved range from inode dirty range map, as it is 2140 * already moved into delayed_ref_head 2141 */ 2142 btrfs_qgroup_release_data(inode, file_pos, ram_bytes); 2143 out: 2144 btrfs_free_path(path); 2145 2146 return ret; 2147 } 2148 2149 /* snapshot-aware defrag */ 2150 struct sa_defrag_extent_backref { 2151 struct rb_node node; 2152 struct old_sa_defrag_extent *old; 2153 u64 root_id; 2154 u64 inum; 2155 u64 file_pos; 2156 u64 extent_offset; 2157 u64 num_bytes; 2158 u64 generation; 2159 }; 2160 2161 struct old_sa_defrag_extent { 2162 struct list_head list; 2163 struct new_sa_defrag_extent *new; 2164 2165 u64 extent_offset; 2166 u64 bytenr; 2167 u64 offset; 2168 u64 len; 2169 int count; 2170 }; 2171 2172 struct new_sa_defrag_extent { 2173 struct rb_root root; 2174 struct list_head head; 2175 struct btrfs_path *path; 2176 struct inode *inode; 2177 u64 file_pos; 2178 u64 len; 2179 u64 bytenr; 2180 u64 disk_len; 2181 u8 compress_type; 2182 }; 2183 2184 static int backref_comp(struct sa_defrag_extent_backref *b1, 2185 struct sa_defrag_extent_backref *b2) 2186 { 2187 if (b1->root_id < b2->root_id) 2188 return -1; 2189 else if (b1->root_id > b2->root_id) 2190 return 1; 2191 2192 if (b1->inum < b2->inum) 2193 return -1; 2194 else if (b1->inum > b2->inum) 2195 return 1; 2196 2197 if (b1->file_pos < b2->file_pos) 2198 return -1; 2199 else if (b1->file_pos > b2->file_pos) 2200 return 1; 2201 2202 /* 2203 * [------------------------------] ===> (a range of space) 2204 * |<--->| |<---->| =============> (fs/file tree A) 2205 * |<---------------------------->| ===> (fs/file tree B) 2206 * 2207 * A range of space can refer to two file extents in one tree while 2208 * refer to only one file extent in another tree. 2209 * 2210 * So we may process a disk offset more than one time(two extents in A) 2211 * and locate at the same extent(one extent in B), then insert two same 2212 * backrefs(both refer to the extent in B). 2213 */ 2214 return 0; 2215 } 2216 2217 static void backref_insert(struct rb_root *root, 2218 struct sa_defrag_extent_backref *backref) 2219 { 2220 struct rb_node **p = &root->rb_node; 2221 struct rb_node *parent = NULL; 2222 struct sa_defrag_extent_backref *entry; 2223 int ret; 2224 2225 while (*p) { 2226 parent = *p; 2227 entry = rb_entry(parent, struct sa_defrag_extent_backref, node); 2228 2229 ret = backref_comp(backref, entry); 2230 if (ret < 0) 2231 p = &(*p)->rb_left; 2232 else 2233 p = &(*p)->rb_right; 2234 } 2235 2236 rb_link_node(&backref->node, parent, p); 2237 rb_insert_color(&backref->node, root); 2238 } 2239 2240 /* 2241 * Note the backref might has changed, and in this case we just return 0. 2242 */ 2243 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, 2244 void *ctx) 2245 { 2246 struct btrfs_file_extent_item *extent; 2247 struct btrfs_fs_info *fs_info; 2248 struct old_sa_defrag_extent *old = ctx; 2249 struct new_sa_defrag_extent *new = old->new; 2250 struct btrfs_path *path = new->path; 2251 struct btrfs_key key; 2252 struct btrfs_root *root; 2253 struct sa_defrag_extent_backref *backref; 2254 struct extent_buffer *leaf; 2255 struct inode *inode = new->inode; 2256 int slot; 2257 int ret; 2258 u64 extent_offset; 2259 u64 num_bytes; 2260 2261 if (BTRFS_I(inode)->root->root_key.objectid == root_id && 2262 inum == btrfs_ino(inode)) 2263 return 0; 2264 2265 key.objectid = root_id; 2266 key.type = BTRFS_ROOT_ITEM_KEY; 2267 key.offset = (u64)-1; 2268 2269 fs_info = BTRFS_I(inode)->root->fs_info; 2270 root = btrfs_read_fs_root_no_name(fs_info, &key); 2271 if (IS_ERR(root)) { 2272 if (PTR_ERR(root) == -ENOENT) 2273 return 0; 2274 WARN_ON(1); 2275 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n", 2276 inum, offset, root_id); 2277 return PTR_ERR(root); 2278 } 2279 2280 key.objectid = inum; 2281 key.type = BTRFS_EXTENT_DATA_KEY; 2282 if (offset > (u64)-1 << 32) 2283 key.offset = 0; 2284 else 2285 key.offset = offset; 2286 2287 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2288 if (WARN_ON(ret < 0)) 2289 return ret; 2290 ret = 0; 2291 2292 while (1) { 2293 cond_resched(); 2294 2295 leaf = path->nodes[0]; 2296 slot = path->slots[0]; 2297 2298 if (slot >= btrfs_header_nritems(leaf)) { 2299 ret = btrfs_next_leaf(root, path); 2300 if (ret < 0) { 2301 goto out; 2302 } else if (ret > 0) { 2303 ret = 0; 2304 goto out; 2305 } 2306 continue; 2307 } 2308 2309 path->slots[0]++; 2310 2311 btrfs_item_key_to_cpu(leaf, &key, slot); 2312 2313 if (key.objectid > inum) 2314 goto out; 2315 2316 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY) 2317 continue; 2318 2319 extent = btrfs_item_ptr(leaf, slot, 2320 struct btrfs_file_extent_item); 2321 2322 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) 2323 continue; 2324 2325 /* 2326 * 'offset' refers to the exact key.offset, 2327 * NOT the 'offset' field in btrfs_extent_data_ref, ie. 2328 * (key.offset - extent_offset). 2329 */ 2330 if (key.offset != offset) 2331 continue; 2332 2333 extent_offset = btrfs_file_extent_offset(leaf, extent); 2334 num_bytes = btrfs_file_extent_num_bytes(leaf, extent); 2335 2336 if (extent_offset >= old->extent_offset + old->offset + 2337 old->len || extent_offset + num_bytes <= 2338 old->extent_offset + old->offset) 2339 continue; 2340 break; 2341 } 2342 2343 backref = kmalloc(sizeof(*backref), GFP_NOFS); 2344 if (!backref) { 2345 ret = -ENOENT; 2346 goto out; 2347 } 2348 2349 backref->root_id = root_id; 2350 backref->inum = inum; 2351 backref->file_pos = offset; 2352 backref->num_bytes = num_bytes; 2353 backref->extent_offset = extent_offset; 2354 backref->generation = btrfs_file_extent_generation(leaf, extent); 2355 backref->old = old; 2356 backref_insert(&new->root, backref); 2357 old->count++; 2358 out: 2359 btrfs_release_path(path); 2360 WARN_ON(ret); 2361 return ret; 2362 } 2363 2364 static noinline bool record_extent_backrefs(struct btrfs_path *path, 2365 struct new_sa_defrag_extent *new) 2366 { 2367 struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info; 2368 struct old_sa_defrag_extent *old, *tmp; 2369 int ret; 2370 2371 new->path = path; 2372 2373 list_for_each_entry_safe(old, tmp, &new->head, list) { 2374 ret = iterate_inodes_from_logical(old->bytenr + 2375 old->extent_offset, fs_info, 2376 path, record_one_backref, 2377 old); 2378 if (ret < 0 && ret != -ENOENT) 2379 return false; 2380 2381 /* no backref to be processed for this extent */ 2382 if (!old->count) { 2383 list_del(&old->list); 2384 kfree(old); 2385 } 2386 } 2387 2388 if (list_empty(&new->head)) 2389 return false; 2390 2391 return true; 2392 } 2393 2394 static int relink_is_mergable(struct extent_buffer *leaf, 2395 struct btrfs_file_extent_item *fi, 2396 struct new_sa_defrag_extent *new) 2397 { 2398 if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr) 2399 return 0; 2400 2401 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) 2402 return 0; 2403 2404 if (btrfs_file_extent_compression(leaf, fi) != new->compress_type) 2405 return 0; 2406 2407 if (btrfs_file_extent_encryption(leaf, fi) || 2408 btrfs_file_extent_other_encoding(leaf, fi)) 2409 return 0; 2410 2411 return 1; 2412 } 2413 2414 /* 2415 * Note the backref might has changed, and in this case we just return 0. 2416 */ 2417 static noinline int relink_extent_backref(struct btrfs_path *path, 2418 struct sa_defrag_extent_backref *prev, 2419 struct sa_defrag_extent_backref *backref) 2420 { 2421 struct btrfs_file_extent_item *extent; 2422 struct btrfs_file_extent_item *item; 2423 struct btrfs_ordered_extent *ordered; 2424 struct btrfs_trans_handle *trans; 2425 struct btrfs_fs_info *fs_info; 2426 struct btrfs_root *root; 2427 struct btrfs_key key; 2428 struct extent_buffer *leaf; 2429 struct old_sa_defrag_extent *old = backref->old; 2430 struct new_sa_defrag_extent *new = old->new; 2431 struct inode *src_inode = new->inode; 2432 struct inode *inode; 2433 struct extent_state *cached = NULL; 2434 int ret = 0; 2435 u64 start; 2436 u64 len; 2437 u64 lock_start; 2438 u64 lock_end; 2439 bool merge = false; 2440 int index; 2441 2442 if (prev && prev->root_id == backref->root_id && 2443 prev->inum == backref->inum && 2444 prev->file_pos + prev->num_bytes == backref->file_pos) 2445 merge = true; 2446 2447 /* step 1: get root */ 2448 key.objectid = backref->root_id; 2449 key.type = BTRFS_ROOT_ITEM_KEY; 2450 key.offset = (u64)-1; 2451 2452 fs_info = BTRFS_I(src_inode)->root->fs_info; 2453 index = srcu_read_lock(&fs_info->subvol_srcu); 2454 2455 root = btrfs_read_fs_root_no_name(fs_info, &key); 2456 if (IS_ERR(root)) { 2457 srcu_read_unlock(&fs_info->subvol_srcu, index); 2458 if (PTR_ERR(root) == -ENOENT) 2459 return 0; 2460 return PTR_ERR(root); 2461 } 2462 2463 if (btrfs_root_readonly(root)) { 2464 srcu_read_unlock(&fs_info->subvol_srcu, index); 2465 return 0; 2466 } 2467 2468 /* step 2: get inode */ 2469 key.objectid = backref->inum; 2470 key.type = BTRFS_INODE_ITEM_KEY; 2471 key.offset = 0; 2472 2473 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 2474 if (IS_ERR(inode)) { 2475 srcu_read_unlock(&fs_info->subvol_srcu, index); 2476 return 0; 2477 } 2478 2479 srcu_read_unlock(&fs_info->subvol_srcu, index); 2480 2481 /* step 3: relink backref */ 2482 lock_start = backref->file_pos; 2483 lock_end = backref->file_pos + backref->num_bytes - 1; 2484 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end, 2485 0, &cached); 2486 2487 ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); 2488 if (ordered) { 2489 btrfs_put_ordered_extent(ordered); 2490 goto out_unlock; 2491 } 2492 2493 trans = btrfs_join_transaction(root); 2494 if (IS_ERR(trans)) { 2495 ret = PTR_ERR(trans); 2496 goto out_unlock; 2497 } 2498 2499 key.objectid = backref->inum; 2500 key.type = BTRFS_EXTENT_DATA_KEY; 2501 key.offset = backref->file_pos; 2502 2503 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2504 if (ret < 0) { 2505 goto out_free_path; 2506 } else if (ret > 0) { 2507 ret = 0; 2508 goto out_free_path; 2509 } 2510 2511 extent = btrfs_item_ptr(path->nodes[0], path->slots[0], 2512 struct btrfs_file_extent_item); 2513 2514 if (btrfs_file_extent_generation(path->nodes[0], extent) != 2515 backref->generation) 2516 goto out_free_path; 2517 2518 btrfs_release_path(path); 2519 2520 start = backref->file_pos; 2521 if (backref->extent_offset < old->extent_offset + old->offset) 2522 start += old->extent_offset + old->offset - 2523 backref->extent_offset; 2524 2525 len = min(backref->extent_offset + backref->num_bytes, 2526 old->extent_offset + old->offset + old->len); 2527 len -= max(backref->extent_offset, old->extent_offset + old->offset); 2528 2529 ret = btrfs_drop_extents(trans, root, inode, start, 2530 start + len, 1); 2531 if (ret) 2532 goto out_free_path; 2533 again: 2534 key.objectid = btrfs_ino(inode); 2535 key.type = BTRFS_EXTENT_DATA_KEY; 2536 key.offset = start; 2537 2538 path->leave_spinning = 1; 2539 if (merge) { 2540 struct btrfs_file_extent_item *fi; 2541 u64 extent_len; 2542 struct btrfs_key found_key; 2543 2544 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2545 if (ret < 0) 2546 goto out_free_path; 2547 2548 path->slots[0]--; 2549 leaf = path->nodes[0]; 2550 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2551 2552 fi = btrfs_item_ptr(leaf, path->slots[0], 2553 struct btrfs_file_extent_item); 2554 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 2555 2556 if (extent_len + found_key.offset == start && 2557 relink_is_mergable(leaf, fi, new)) { 2558 btrfs_set_file_extent_num_bytes(leaf, fi, 2559 extent_len + len); 2560 btrfs_mark_buffer_dirty(leaf); 2561 inode_add_bytes(inode, len); 2562 2563 ret = 1; 2564 goto out_free_path; 2565 } else { 2566 merge = false; 2567 btrfs_release_path(path); 2568 goto again; 2569 } 2570 } 2571 2572 ret = btrfs_insert_empty_item(trans, root, path, &key, 2573 sizeof(*extent)); 2574 if (ret) { 2575 btrfs_abort_transaction(trans, root, ret); 2576 goto out_free_path; 2577 } 2578 2579 leaf = path->nodes[0]; 2580 item = btrfs_item_ptr(leaf, path->slots[0], 2581 struct btrfs_file_extent_item); 2582 btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr); 2583 btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len); 2584 btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); 2585 btrfs_set_file_extent_num_bytes(leaf, item, len); 2586 btrfs_set_file_extent_ram_bytes(leaf, item, new->len); 2587 btrfs_set_file_extent_generation(leaf, item, trans->transid); 2588 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 2589 btrfs_set_file_extent_compression(leaf, item, new->compress_type); 2590 btrfs_set_file_extent_encryption(leaf, item, 0); 2591 btrfs_set_file_extent_other_encoding(leaf, item, 0); 2592 2593 btrfs_mark_buffer_dirty(leaf); 2594 inode_add_bytes(inode, len); 2595 btrfs_release_path(path); 2596 2597 ret = btrfs_inc_extent_ref(trans, root, new->bytenr, 2598 new->disk_len, 0, 2599 backref->root_id, backref->inum, 2600 new->file_pos); /* start - extent_offset */ 2601 if (ret) { 2602 btrfs_abort_transaction(trans, root, ret); 2603 goto out_free_path; 2604 } 2605 2606 ret = 1; 2607 out_free_path: 2608 btrfs_release_path(path); 2609 path->leave_spinning = 0; 2610 btrfs_end_transaction(trans, root); 2611 out_unlock: 2612 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, 2613 &cached, GFP_NOFS); 2614 iput(inode); 2615 return ret; 2616 } 2617 2618 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new) 2619 { 2620 struct old_sa_defrag_extent *old, *tmp; 2621 2622 if (!new) 2623 return; 2624 2625 list_for_each_entry_safe(old, tmp, &new->head, list) { 2626 kfree(old); 2627 } 2628 kfree(new); 2629 } 2630 2631 static void relink_file_extents(struct new_sa_defrag_extent *new) 2632 { 2633 struct btrfs_path *path; 2634 struct sa_defrag_extent_backref *backref; 2635 struct sa_defrag_extent_backref *prev = NULL; 2636 struct inode *inode; 2637 struct btrfs_root *root; 2638 struct rb_node *node; 2639 int ret; 2640 2641 inode = new->inode; 2642 root = BTRFS_I(inode)->root; 2643 2644 path = btrfs_alloc_path(); 2645 if (!path) 2646 return; 2647 2648 if (!record_extent_backrefs(path, new)) { 2649 btrfs_free_path(path); 2650 goto out; 2651 } 2652 btrfs_release_path(path); 2653 2654 while (1) { 2655 node = rb_first(&new->root); 2656 if (!node) 2657 break; 2658 rb_erase(node, &new->root); 2659 2660 backref = rb_entry(node, struct sa_defrag_extent_backref, node); 2661 2662 ret = relink_extent_backref(path, prev, backref); 2663 WARN_ON(ret < 0); 2664 2665 kfree(prev); 2666 2667 if (ret == 1) 2668 prev = backref; 2669 else 2670 prev = NULL; 2671 cond_resched(); 2672 } 2673 kfree(prev); 2674 2675 btrfs_free_path(path); 2676 out: 2677 free_sa_defrag_extent(new); 2678 2679 atomic_dec(&root->fs_info->defrag_running); 2680 wake_up(&root->fs_info->transaction_wait); 2681 } 2682 2683 static struct new_sa_defrag_extent * 2684 record_old_file_extents(struct inode *inode, 2685 struct btrfs_ordered_extent *ordered) 2686 { 2687 struct btrfs_root *root = BTRFS_I(inode)->root; 2688 struct btrfs_path *path; 2689 struct btrfs_key key; 2690 struct old_sa_defrag_extent *old; 2691 struct new_sa_defrag_extent *new; 2692 int ret; 2693 2694 new = kmalloc(sizeof(*new), GFP_NOFS); 2695 if (!new) 2696 return NULL; 2697 2698 new->inode = inode; 2699 new->file_pos = ordered->file_offset; 2700 new->len = ordered->len; 2701 new->bytenr = ordered->start; 2702 new->disk_len = ordered->disk_len; 2703 new->compress_type = ordered->compress_type; 2704 new->root = RB_ROOT; 2705 INIT_LIST_HEAD(&new->head); 2706 2707 path = btrfs_alloc_path(); 2708 if (!path) 2709 goto out_kfree; 2710 2711 key.objectid = btrfs_ino(inode); 2712 key.type = BTRFS_EXTENT_DATA_KEY; 2713 key.offset = new->file_pos; 2714 2715 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2716 if (ret < 0) 2717 goto out_free_path; 2718 if (ret > 0 && path->slots[0] > 0) 2719 path->slots[0]--; 2720 2721 /* find out all the old extents for the file range */ 2722 while (1) { 2723 struct btrfs_file_extent_item *extent; 2724 struct extent_buffer *l; 2725 int slot; 2726 u64 num_bytes; 2727 u64 offset; 2728 u64 end; 2729 u64 disk_bytenr; 2730 u64 extent_offset; 2731 2732 l = path->nodes[0]; 2733 slot = path->slots[0]; 2734 2735 if (slot >= btrfs_header_nritems(l)) { 2736 ret = btrfs_next_leaf(root, path); 2737 if (ret < 0) 2738 goto out_free_path; 2739 else if (ret > 0) 2740 break; 2741 continue; 2742 } 2743 2744 btrfs_item_key_to_cpu(l, &key, slot); 2745 2746 if (key.objectid != btrfs_ino(inode)) 2747 break; 2748 if (key.type != BTRFS_EXTENT_DATA_KEY) 2749 break; 2750 if (key.offset >= new->file_pos + new->len) 2751 break; 2752 2753 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); 2754 2755 num_bytes = btrfs_file_extent_num_bytes(l, extent); 2756 if (key.offset + num_bytes < new->file_pos) 2757 goto next; 2758 2759 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); 2760 if (!disk_bytenr) 2761 goto next; 2762 2763 extent_offset = btrfs_file_extent_offset(l, extent); 2764 2765 old = kmalloc(sizeof(*old), GFP_NOFS); 2766 if (!old) 2767 goto out_free_path; 2768 2769 offset = max(new->file_pos, key.offset); 2770 end = min(new->file_pos + new->len, key.offset + num_bytes); 2771 2772 old->bytenr = disk_bytenr; 2773 old->extent_offset = extent_offset; 2774 old->offset = offset - key.offset; 2775 old->len = end - offset; 2776 old->new = new; 2777 old->count = 0; 2778 list_add_tail(&old->list, &new->head); 2779 next: 2780 path->slots[0]++; 2781 cond_resched(); 2782 } 2783 2784 btrfs_free_path(path); 2785 atomic_inc(&root->fs_info->defrag_running); 2786 2787 return new; 2788 2789 out_free_path: 2790 btrfs_free_path(path); 2791 out_kfree: 2792 free_sa_defrag_extent(new); 2793 return NULL; 2794 } 2795 2796 static void btrfs_release_delalloc_bytes(struct btrfs_root *root, 2797 u64 start, u64 len) 2798 { 2799 struct btrfs_block_group_cache *cache; 2800 2801 cache = btrfs_lookup_block_group(root->fs_info, start); 2802 ASSERT(cache); 2803 2804 spin_lock(&cache->lock); 2805 cache->delalloc_bytes -= len; 2806 spin_unlock(&cache->lock); 2807 2808 btrfs_put_block_group(cache); 2809 } 2810 2811 /* as ordered data IO finishes, this gets called so we can finish 2812 * an ordered extent if the range of bytes in the file it covers are 2813 * fully written. 2814 */ 2815 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) 2816 { 2817 struct inode *inode = ordered_extent->inode; 2818 struct btrfs_root *root = BTRFS_I(inode)->root; 2819 struct btrfs_trans_handle *trans = NULL; 2820 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 2821 struct extent_state *cached_state = NULL; 2822 struct new_sa_defrag_extent *new = NULL; 2823 int compress_type = 0; 2824 int ret = 0; 2825 u64 logical_len = ordered_extent->len; 2826 bool nolock; 2827 bool truncated = false; 2828 2829 nolock = btrfs_is_free_space_inode(inode); 2830 2831 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 2832 ret = -EIO; 2833 goto out; 2834 } 2835 2836 btrfs_free_io_failure_record(inode, ordered_extent->file_offset, 2837 ordered_extent->file_offset + 2838 ordered_extent->len - 1); 2839 2840 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 2841 truncated = true; 2842 logical_len = ordered_extent->truncated_len; 2843 /* Truncated the entire extent, don't bother adding */ 2844 if (!logical_len) 2845 goto out; 2846 } 2847 2848 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 2849 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 2850 2851 /* 2852 * For mwrite(mmap + memset to write) case, we still reserve 2853 * space for NOCOW range. 2854 * As NOCOW won't cause a new delayed ref, just free the space 2855 */ 2856 btrfs_qgroup_free_data(inode, ordered_extent->file_offset, 2857 ordered_extent->len); 2858 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 2859 if (nolock) 2860 trans = btrfs_join_transaction_nolock(root); 2861 else 2862 trans = btrfs_join_transaction(root); 2863 if (IS_ERR(trans)) { 2864 ret = PTR_ERR(trans); 2865 trans = NULL; 2866 goto out; 2867 } 2868 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 2869 ret = btrfs_update_inode_fallback(trans, root, inode); 2870 if (ret) /* -ENOMEM or corruption */ 2871 btrfs_abort_transaction(trans, root, ret); 2872 goto out; 2873 } 2874 2875 lock_extent_bits(io_tree, ordered_extent->file_offset, 2876 ordered_extent->file_offset + ordered_extent->len - 1, 2877 0, &cached_state); 2878 2879 ret = test_range_bit(io_tree, ordered_extent->file_offset, 2880 ordered_extent->file_offset + ordered_extent->len - 1, 2881 EXTENT_DEFRAG, 1, cached_state); 2882 if (ret) { 2883 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2884 if (0 && last_snapshot >= BTRFS_I(inode)->generation) 2885 /* the inode is shared */ 2886 new = record_old_file_extents(inode, ordered_extent); 2887 2888 clear_extent_bit(io_tree, ordered_extent->file_offset, 2889 ordered_extent->file_offset + ordered_extent->len - 1, 2890 EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS); 2891 } 2892 2893 if (nolock) 2894 trans = btrfs_join_transaction_nolock(root); 2895 else 2896 trans = btrfs_join_transaction(root); 2897 if (IS_ERR(trans)) { 2898 ret = PTR_ERR(trans); 2899 trans = NULL; 2900 goto out_unlock; 2901 } 2902 2903 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 2904 2905 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 2906 compress_type = ordered_extent->compress_type; 2907 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 2908 BUG_ON(compress_type); 2909 ret = btrfs_mark_extent_written(trans, inode, 2910 ordered_extent->file_offset, 2911 ordered_extent->file_offset + 2912 logical_len); 2913 } else { 2914 BUG_ON(root == root->fs_info->tree_root); 2915 ret = insert_reserved_file_extent(trans, inode, 2916 ordered_extent->file_offset, 2917 ordered_extent->start, 2918 ordered_extent->disk_len, 2919 logical_len, logical_len, 2920 compress_type, 0, 0, 2921 BTRFS_FILE_EXTENT_REG); 2922 if (!ret) 2923 btrfs_release_delalloc_bytes(root, 2924 ordered_extent->start, 2925 ordered_extent->disk_len); 2926 } 2927 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 2928 ordered_extent->file_offset, ordered_extent->len, 2929 trans->transid); 2930 if (ret < 0) { 2931 btrfs_abort_transaction(trans, root, ret); 2932 goto out_unlock; 2933 } 2934 2935 add_pending_csums(trans, inode, ordered_extent->file_offset, 2936 &ordered_extent->list); 2937 2938 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 2939 ret = btrfs_update_inode_fallback(trans, root, inode); 2940 if (ret) { /* -ENOMEM or corruption */ 2941 btrfs_abort_transaction(trans, root, ret); 2942 goto out_unlock; 2943 } 2944 ret = 0; 2945 out_unlock: 2946 unlock_extent_cached(io_tree, ordered_extent->file_offset, 2947 ordered_extent->file_offset + 2948 ordered_extent->len - 1, &cached_state, GFP_NOFS); 2949 out: 2950 if (root != root->fs_info->tree_root) 2951 btrfs_delalloc_release_metadata(inode, ordered_extent->len); 2952 if (trans) 2953 btrfs_end_transaction(trans, root); 2954 2955 if (ret || truncated) { 2956 u64 start, end; 2957 2958 if (truncated) 2959 start = ordered_extent->file_offset + logical_len; 2960 else 2961 start = ordered_extent->file_offset; 2962 end = ordered_extent->file_offset + ordered_extent->len - 1; 2963 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS); 2964 2965 /* Drop the cache for the part of the extent we didn't write. */ 2966 btrfs_drop_extent_cache(inode, start, end, 0); 2967 2968 /* 2969 * If the ordered extent had an IOERR or something else went 2970 * wrong we need to return the space for this ordered extent 2971 * back to the allocator. We only free the extent in the 2972 * truncated case if we didn't write out the extent at all. 2973 */ 2974 if ((ret || !logical_len) && 2975 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 2976 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) 2977 btrfs_free_reserved_extent(root, ordered_extent->start, 2978 ordered_extent->disk_len, 1); 2979 } 2980 2981 2982 /* 2983 * This needs to be done to make sure anybody waiting knows we are done 2984 * updating everything for this ordered extent. 2985 */ 2986 btrfs_remove_ordered_extent(inode, ordered_extent); 2987 2988 /* for snapshot-aware defrag */ 2989 if (new) { 2990 if (ret) { 2991 free_sa_defrag_extent(new); 2992 atomic_dec(&root->fs_info->defrag_running); 2993 } else { 2994 relink_file_extents(new); 2995 } 2996 } 2997 2998 /* once for us */ 2999 btrfs_put_ordered_extent(ordered_extent); 3000 /* once for the tree */ 3001 btrfs_put_ordered_extent(ordered_extent); 3002 3003 return ret; 3004 } 3005 3006 static void finish_ordered_fn(struct btrfs_work *work) 3007 { 3008 struct btrfs_ordered_extent *ordered_extent; 3009 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 3010 btrfs_finish_ordered_io(ordered_extent); 3011 } 3012 3013 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 3014 struct extent_state *state, int uptodate) 3015 { 3016 struct inode *inode = page->mapping->host; 3017 struct btrfs_root *root = BTRFS_I(inode)->root; 3018 struct btrfs_ordered_extent *ordered_extent = NULL; 3019 struct btrfs_workqueue *wq; 3020 btrfs_work_func_t func; 3021 3022 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 3023 3024 ClearPagePrivate2(page); 3025 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 3026 end - start + 1, uptodate)) 3027 return 0; 3028 3029 if (btrfs_is_free_space_inode(inode)) { 3030 wq = root->fs_info->endio_freespace_worker; 3031 func = btrfs_freespace_write_helper; 3032 } else { 3033 wq = root->fs_info->endio_write_workers; 3034 func = btrfs_endio_write_helper; 3035 } 3036 3037 btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, 3038 NULL); 3039 btrfs_queue_work(wq, &ordered_extent->work); 3040 3041 return 0; 3042 } 3043 3044 static int __readpage_endio_check(struct inode *inode, 3045 struct btrfs_io_bio *io_bio, 3046 int icsum, struct page *page, 3047 int pgoff, u64 start, size_t len) 3048 { 3049 char *kaddr; 3050 u32 csum_expected; 3051 u32 csum = ~(u32)0; 3052 3053 csum_expected = *(((u32 *)io_bio->csum) + icsum); 3054 3055 kaddr = kmap_atomic(page); 3056 csum = btrfs_csum_data(kaddr + pgoff, csum, len); 3057 btrfs_csum_final(csum, (char *)&csum); 3058 if (csum != csum_expected) 3059 goto zeroit; 3060 3061 kunmap_atomic(kaddr); 3062 return 0; 3063 zeroit: 3064 btrfs_warn_rl(BTRFS_I(inode)->root->fs_info, 3065 "csum failed ino %llu off %llu csum %u expected csum %u", 3066 btrfs_ino(inode), start, csum, csum_expected); 3067 memset(kaddr + pgoff, 1, len); 3068 flush_dcache_page(page); 3069 kunmap_atomic(kaddr); 3070 if (csum_expected == 0) 3071 return 0; 3072 return -EIO; 3073 } 3074 3075 /* 3076 * when reads are done, we need to check csums to verify the data is correct 3077 * if there's a match, we allow the bio to finish. If not, the code in 3078 * extent_io.c will try to find good copies for us. 3079 */ 3080 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 3081 u64 phy_offset, struct page *page, 3082 u64 start, u64 end, int mirror) 3083 { 3084 size_t offset = start - page_offset(page); 3085 struct inode *inode = page->mapping->host; 3086 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3087 struct btrfs_root *root = BTRFS_I(inode)->root; 3088 3089 if (PageChecked(page)) { 3090 ClearPageChecked(page); 3091 return 0; 3092 } 3093 3094 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 3095 return 0; 3096 3097 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 3098 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 3099 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, 3100 GFP_NOFS); 3101 return 0; 3102 } 3103 3104 phy_offset >>= inode->i_sb->s_blocksize_bits; 3105 return __readpage_endio_check(inode, io_bio, phy_offset, page, offset, 3106 start, (size_t)(end - start + 1)); 3107 } 3108 3109 struct delayed_iput { 3110 struct list_head list; 3111 struct inode *inode; 3112 }; 3113 3114 /* JDM: If this is fs-wide, why can't we add a pointer to 3115 * btrfs_inode instead and avoid the allocation? */ 3116 void btrfs_add_delayed_iput(struct inode *inode) 3117 { 3118 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 3119 struct delayed_iput *delayed; 3120 3121 if (atomic_add_unless(&inode->i_count, -1, 1)) 3122 return; 3123 3124 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL); 3125 delayed->inode = inode; 3126 3127 spin_lock(&fs_info->delayed_iput_lock); 3128 list_add_tail(&delayed->list, &fs_info->delayed_iputs); 3129 spin_unlock(&fs_info->delayed_iput_lock); 3130 } 3131 3132 void btrfs_run_delayed_iputs(struct btrfs_root *root) 3133 { 3134 LIST_HEAD(list); 3135 struct btrfs_fs_info *fs_info = root->fs_info; 3136 struct delayed_iput *delayed; 3137 int empty; 3138 3139 spin_lock(&fs_info->delayed_iput_lock); 3140 empty = list_empty(&fs_info->delayed_iputs); 3141 spin_unlock(&fs_info->delayed_iput_lock); 3142 if (empty) 3143 return; 3144 3145 down_read(&fs_info->delayed_iput_sem); 3146 3147 spin_lock(&fs_info->delayed_iput_lock); 3148 list_splice_init(&fs_info->delayed_iputs, &list); 3149 spin_unlock(&fs_info->delayed_iput_lock); 3150 3151 while (!list_empty(&list)) { 3152 delayed = list_entry(list.next, struct delayed_iput, list); 3153 list_del(&delayed->list); 3154 iput(delayed->inode); 3155 kfree(delayed); 3156 } 3157 3158 up_read(&root->fs_info->delayed_iput_sem); 3159 } 3160 3161 /* 3162 * This is called in transaction commit time. If there are no orphan 3163 * files in the subvolume, it removes orphan item and frees block_rsv 3164 * structure. 3165 */ 3166 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3167 struct btrfs_root *root) 3168 { 3169 struct btrfs_block_rsv *block_rsv; 3170 int ret; 3171 3172 if (atomic_read(&root->orphan_inodes) || 3173 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) 3174 return; 3175 3176 spin_lock(&root->orphan_lock); 3177 if (atomic_read(&root->orphan_inodes)) { 3178 spin_unlock(&root->orphan_lock); 3179 return; 3180 } 3181 3182 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { 3183 spin_unlock(&root->orphan_lock); 3184 return; 3185 } 3186 3187 block_rsv = root->orphan_block_rsv; 3188 root->orphan_block_rsv = NULL; 3189 spin_unlock(&root->orphan_lock); 3190 3191 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) && 3192 btrfs_root_refs(&root->root_item) > 0) { 3193 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, 3194 root->root_key.objectid); 3195 if (ret) 3196 btrfs_abort_transaction(trans, root, ret); 3197 else 3198 clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, 3199 &root->state); 3200 } 3201 3202 if (block_rsv) { 3203 WARN_ON(block_rsv->size > 0); 3204 btrfs_free_block_rsv(root, block_rsv); 3205 } 3206 } 3207 3208 /* 3209 * This creates an orphan entry for the given inode in case something goes 3210 * wrong in the middle of an unlink/truncate. 3211 * 3212 * NOTE: caller of this function should reserve 5 units of metadata for 3213 * this function. 3214 */ 3215 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) 3216 { 3217 struct btrfs_root *root = BTRFS_I(inode)->root; 3218 struct btrfs_block_rsv *block_rsv = NULL; 3219 int reserve = 0; 3220 int insert = 0; 3221 int ret; 3222 3223 if (!root->orphan_block_rsv) { 3224 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 3225 if (!block_rsv) 3226 return -ENOMEM; 3227 } 3228 3229 spin_lock(&root->orphan_lock); 3230 if (!root->orphan_block_rsv) { 3231 root->orphan_block_rsv = block_rsv; 3232 } else if (block_rsv) { 3233 btrfs_free_block_rsv(root, block_rsv); 3234 block_rsv = NULL; 3235 } 3236 3237 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3238 &BTRFS_I(inode)->runtime_flags)) { 3239 #if 0 3240 /* 3241 * For proper ENOSPC handling, we should do orphan 3242 * cleanup when mounting. But this introduces backward 3243 * compatibility issue. 3244 */ 3245 if (!xchg(&root->orphan_item_inserted, 1)) 3246 insert = 2; 3247 else 3248 insert = 1; 3249 #endif 3250 insert = 1; 3251 atomic_inc(&root->orphan_inodes); 3252 } 3253 3254 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3255 &BTRFS_I(inode)->runtime_flags)) 3256 reserve = 1; 3257 spin_unlock(&root->orphan_lock); 3258 3259 /* grab metadata reservation from transaction handle */ 3260 if (reserve) { 3261 ret = btrfs_orphan_reserve_metadata(trans, inode); 3262 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ 3263 } 3264 3265 /* insert an orphan item to track this unlinked/truncated file */ 3266 if (insert >= 1) { 3267 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 3268 if (ret) { 3269 atomic_dec(&root->orphan_inodes); 3270 if (reserve) { 3271 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3272 &BTRFS_I(inode)->runtime_flags); 3273 btrfs_orphan_release_metadata(inode); 3274 } 3275 if (ret != -EEXIST) { 3276 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3277 &BTRFS_I(inode)->runtime_flags); 3278 btrfs_abort_transaction(trans, root, ret); 3279 return ret; 3280 } 3281 } 3282 ret = 0; 3283 } 3284 3285 /* insert an orphan item to track subvolume contains orphan files */ 3286 if (insert >= 2) { 3287 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, 3288 root->root_key.objectid); 3289 if (ret && ret != -EEXIST) { 3290 btrfs_abort_transaction(trans, root, ret); 3291 return ret; 3292 } 3293 } 3294 return 0; 3295 } 3296 3297 /* 3298 * We have done the truncate/delete so we can go ahead and remove the orphan 3299 * item for this particular inode. 3300 */ 3301 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3302 struct inode *inode) 3303 { 3304 struct btrfs_root *root = BTRFS_I(inode)->root; 3305 int delete_item = 0; 3306 int release_rsv = 0; 3307 int ret = 0; 3308 3309 spin_lock(&root->orphan_lock); 3310 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3311 &BTRFS_I(inode)->runtime_flags)) 3312 delete_item = 1; 3313 3314 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3315 &BTRFS_I(inode)->runtime_flags)) 3316 release_rsv = 1; 3317 spin_unlock(&root->orphan_lock); 3318 3319 if (delete_item) { 3320 atomic_dec(&root->orphan_inodes); 3321 if (trans) 3322 ret = btrfs_del_orphan_item(trans, root, 3323 btrfs_ino(inode)); 3324 } 3325 3326 if (release_rsv) 3327 btrfs_orphan_release_metadata(inode); 3328 3329 return ret; 3330 } 3331 3332 /* 3333 * this cleans up any orphans that may be left on the list from the last use 3334 * of this root. 3335 */ 3336 int btrfs_orphan_cleanup(struct btrfs_root *root) 3337 { 3338 struct btrfs_path *path; 3339 struct extent_buffer *leaf; 3340 struct btrfs_key key, found_key; 3341 struct btrfs_trans_handle *trans; 3342 struct inode *inode; 3343 u64 last_objectid = 0; 3344 int ret = 0, nr_unlink = 0, nr_truncate = 0; 3345 3346 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) 3347 return 0; 3348 3349 path = btrfs_alloc_path(); 3350 if (!path) { 3351 ret = -ENOMEM; 3352 goto out; 3353 } 3354 path->reada = -1; 3355 3356 key.objectid = BTRFS_ORPHAN_OBJECTID; 3357 key.type = BTRFS_ORPHAN_ITEM_KEY; 3358 key.offset = (u64)-1; 3359 3360 while (1) { 3361 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3362 if (ret < 0) 3363 goto out; 3364 3365 /* 3366 * if ret == 0 means we found what we were searching for, which 3367 * is weird, but possible, so only screw with path if we didn't 3368 * find the key and see if we have stuff that matches 3369 */ 3370 if (ret > 0) { 3371 ret = 0; 3372 if (path->slots[0] == 0) 3373 break; 3374 path->slots[0]--; 3375 } 3376 3377 /* pull out the item */ 3378 leaf = path->nodes[0]; 3379 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3380 3381 /* make sure the item matches what we want */ 3382 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3383 break; 3384 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3385 break; 3386 3387 /* release the path since we're done with it */ 3388 btrfs_release_path(path); 3389 3390 /* 3391 * this is where we are basically btrfs_lookup, without the 3392 * crossing root thing. we store the inode number in the 3393 * offset of the orphan item. 3394 */ 3395 3396 if (found_key.offset == last_objectid) { 3397 btrfs_err(root->fs_info, 3398 "Error removing orphan entry, stopping orphan cleanup"); 3399 ret = -EINVAL; 3400 goto out; 3401 } 3402 3403 last_objectid = found_key.offset; 3404 3405 found_key.objectid = found_key.offset; 3406 found_key.type = BTRFS_INODE_ITEM_KEY; 3407 found_key.offset = 0; 3408 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 3409 ret = PTR_ERR_OR_ZERO(inode); 3410 if (ret && ret != -ESTALE) 3411 goto out; 3412 3413 if (ret == -ESTALE && root == root->fs_info->tree_root) { 3414 struct btrfs_root *dead_root; 3415 struct btrfs_fs_info *fs_info = root->fs_info; 3416 int is_dead_root = 0; 3417 3418 /* 3419 * this is an orphan in the tree root. Currently these 3420 * could come from 2 sources: 3421 * a) a snapshot deletion in progress 3422 * b) a free space cache inode 3423 * We need to distinguish those two, as the snapshot 3424 * orphan must not get deleted. 3425 * find_dead_roots already ran before us, so if this 3426 * is a snapshot deletion, we should find the root 3427 * in the dead_roots list 3428 */ 3429 spin_lock(&fs_info->trans_lock); 3430 list_for_each_entry(dead_root, &fs_info->dead_roots, 3431 root_list) { 3432 if (dead_root->root_key.objectid == 3433 found_key.objectid) { 3434 is_dead_root = 1; 3435 break; 3436 } 3437 } 3438 spin_unlock(&fs_info->trans_lock); 3439 if (is_dead_root) { 3440 /* prevent this orphan from being found again */ 3441 key.offset = found_key.objectid - 1; 3442 continue; 3443 } 3444 } 3445 /* 3446 * Inode is already gone but the orphan item is still there, 3447 * kill the orphan item. 3448 */ 3449 if (ret == -ESTALE) { 3450 trans = btrfs_start_transaction(root, 1); 3451 if (IS_ERR(trans)) { 3452 ret = PTR_ERR(trans); 3453 goto out; 3454 } 3455 btrfs_debug(root->fs_info, "auto deleting %Lu", 3456 found_key.objectid); 3457 ret = btrfs_del_orphan_item(trans, root, 3458 found_key.objectid); 3459 btrfs_end_transaction(trans, root); 3460 if (ret) 3461 goto out; 3462 continue; 3463 } 3464 3465 /* 3466 * add this inode to the orphan list so btrfs_orphan_del does 3467 * the proper thing when we hit it 3468 */ 3469 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3470 &BTRFS_I(inode)->runtime_flags); 3471 atomic_inc(&root->orphan_inodes); 3472 3473 /* if we have links, this was a truncate, lets do that */ 3474 if (inode->i_nlink) { 3475 if (WARN_ON(!S_ISREG(inode->i_mode))) { 3476 iput(inode); 3477 continue; 3478 } 3479 nr_truncate++; 3480 3481 /* 1 for the orphan item deletion. */ 3482 trans = btrfs_start_transaction(root, 1); 3483 if (IS_ERR(trans)) { 3484 iput(inode); 3485 ret = PTR_ERR(trans); 3486 goto out; 3487 } 3488 ret = btrfs_orphan_add(trans, inode); 3489 btrfs_end_transaction(trans, root); 3490 if (ret) { 3491 iput(inode); 3492 goto out; 3493 } 3494 3495 ret = btrfs_truncate(inode); 3496 if (ret) 3497 btrfs_orphan_del(NULL, inode); 3498 } else { 3499 nr_unlink++; 3500 } 3501 3502 /* this will do delete_inode and everything for us */ 3503 iput(inode); 3504 if (ret) 3505 goto out; 3506 } 3507 /* release the path since we're done with it */ 3508 btrfs_release_path(path); 3509 3510 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 3511 3512 if (root->orphan_block_rsv) 3513 btrfs_block_rsv_release(root, root->orphan_block_rsv, 3514 (u64)-1); 3515 3516 if (root->orphan_block_rsv || 3517 test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3518 trans = btrfs_join_transaction(root); 3519 if (!IS_ERR(trans)) 3520 btrfs_end_transaction(trans, root); 3521 } 3522 3523 if (nr_unlink) 3524 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink); 3525 if (nr_truncate) 3526 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate); 3527 3528 out: 3529 if (ret) 3530 btrfs_err(root->fs_info, 3531 "could not do orphan cleanup %d", ret); 3532 btrfs_free_path(path); 3533 return ret; 3534 } 3535 3536 /* 3537 * very simple check to peek ahead in the leaf looking for xattrs. If we 3538 * don't find any xattrs, we know there can't be any acls. 3539 * 3540 * slot is the slot the inode is in, objectid is the objectid of the inode 3541 */ 3542 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3543 int slot, u64 objectid, 3544 int *first_xattr_slot) 3545 { 3546 u32 nritems = btrfs_header_nritems(leaf); 3547 struct btrfs_key found_key; 3548 static u64 xattr_access = 0; 3549 static u64 xattr_default = 0; 3550 int scanned = 0; 3551 3552 if (!xattr_access) { 3553 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS, 3554 strlen(POSIX_ACL_XATTR_ACCESS)); 3555 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT, 3556 strlen(POSIX_ACL_XATTR_DEFAULT)); 3557 } 3558 3559 slot++; 3560 *first_xattr_slot = -1; 3561 while (slot < nritems) { 3562 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3563 3564 /* we found a different objectid, there must not be acls */ 3565 if (found_key.objectid != objectid) 3566 return 0; 3567 3568 /* we found an xattr, assume we've got an acl */ 3569 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3570 if (*first_xattr_slot == -1) 3571 *first_xattr_slot = slot; 3572 if (found_key.offset == xattr_access || 3573 found_key.offset == xattr_default) 3574 return 1; 3575 } 3576 3577 /* 3578 * we found a key greater than an xattr key, there can't 3579 * be any acls later on 3580 */ 3581 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3582 return 0; 3583 3584 slot++; 3585 scanned++; 3586 3587 /* 3588 * it goes inode, inode backrefs, xattrs, extents, 3589 * so if there are a ton of hard links to an inode there can 3590 * be a lot of backrefs. Don't waste time searching too hard, 3591 * this is just an optimization 3592 */ 3593 if (scanned >= 8) 3594 break; 3595 } 3596 /* we hit the end of the leaf before we found an xattr or 3597 * something larger than an xattr. We have to assume the inode 3598 * has acls 3599 */ 3600 if (*first_xattr_slot == -1) 3601 *first_xattr_slot = slot; 3602 return 1; 3603 } 3604 3605 /* 3606 * read an inode from the btree into the in-memory inode 3607 */ 3608 static void btrfs_read_locked_inode(struct inode *inode) 3609 { 3610 struct btrfs_path *path; 3611 struct extent_buffer *leaf; 3612 struct btrfs_inode_item *inode_item; 3613 struct btrfs_root *root = BTRFS_I(inode)->root; 3614 struct btrfs_key location; 3615 unsigned long ptr; 3616 int maybe_acls; 3617 u32 rdev; 3618 int ret; 3619 bool filled = false; 3620 int first_xattr_slot; 3621 3622 ret = btrfs_fill_inode(inode, &rdev); 3623 if (!ret) 3624 filled = true; 3625 3626 path = btrfs_alloc_path(); 3627 if (!path) 3628 goto make_bad; 3629 3630 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3631 3632 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3633 if (ret) 3634 goto make_bad; 3635 3636 leaf = path->nodes[0]; 3637 3638 if (filled) 3639 goto cache_index; 3640 3641 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3642 struct btrfs_inode_item); 3643 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3644 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3645 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3646 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3647 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 3648 3649 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3650 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3651 3652 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3653 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3654 3655 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 3656 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 3657 3658 BTRFS_I(inode)->i_otime.tv_sec = 3659 btrfs_timespec_sec(leaf, &inode_item->otime); 3660 BTRFS_I(inode)->i_otime.tv_nsec = 3661 btrfs_timespec_nsec(leaf, &inode_item->otime); 3662 3663 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3664 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3665 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3666 3667 inode->i_version = btrfs_inode_sequence(leaf, inode_item); 3668 inode->i_generation = BTRFS_I(inode)->generation; 3669 inode->i_rdev = 0; 3670 rdev = btrfs_inode_rdev(leaf, inode_item); 3671 3672 BTRFS_I(inode)->index_cnt = (u64)-1; 3673 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 3674 3675 cache_index: 3676 /* 3677 * If we were modified in the current generation and evicted from memory 3678 * and then re-read we need to do a full sync since we don't have any 3679 * idea about which extents were modified before we were evicted from 3680 * cache. 3681 * 3682 * This is required for both inode re-read from disk and delayed inode 3683 * in delayed_nodes_tree. 3684 */ 3685 if (BTRFS_I(inode)->last_trans == root->fs_info->generation) 3686 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3687 &BTRFS_I(inode)->runtime_flags); 3688 3689 /* 3690 * We don't persist the id of the transaction where an unlink operation 3691 * against the inode was last made. So here we assume the inode might 3692 * have been evicted, and therefore the exact value of last_unlink_trans 3693 * lost, and set it to last_trans to avoid metadata inconsistencies 3694 * between the inode and its parent if the inode is fsync'ed and the log 3695 * replayed. For example, in the scenario: 3696 * 3697 * touch mydir/foo 3698 * ln mydir/foo mydir/bar 3699 * sync 3700 * unlink mydir/bar 3701 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3702 * xfs_io -c fsync mydir/foo 3703 * <power failure> 3704 * mount fs, triggers fsync log replay 3705 * 3706 * We must make sure that when we fsync our inode foo we also log its 3707 * parent inode, otherwise after log replay the parent still has the 3708 * dentry with the "bar" name but our inode foo has a link count of 1 3709 * and doesn't have an inode ref with the name "bar" anymore. 3710 * 3711 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3712 * but it guarantees correctness at the expense of ocassional full 3713 * transaction commits on fsync if our inode is a directory, or if our 3714 * inode is not a directory, logging its parent unnecessarily. 3715 */ 3716 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3717 3718 path->slots[0]++; 3719 if (inode->i_nlink != 1 || 3720 path->slots[0] >= btrfs_header_nritems(leaf)) 3721 goto cache_acl; 3722 3723 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3724 if (location.objectid != btrfs_ino(inode)) 3725 goto cache_acl; 3726 3727 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3728 if (location.type == BTRFS_INODE_REF_KEY) { 3729 struct btrfs_inode_ref *ref; 3730 3731 ref = (struct btrfs_inode_ref *)ptr; 3732 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3733 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3734 struct btrfs_inode_extref *extref; 3735 3736 extref = (struct btrfs_inode_extref *)ptr; 3737 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3738 extref); 3739 } 3740 cache_acl: 3741 /* 3742 * try to precache a NULL acl entry for files that don't have 3743 * any xattrs or acls 3744 */ 3745 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3746 btrfs_ino(inode), &first_xattr_slot); 3747 if (first_xattr_slot != -1) { 3748 path->slots[0] = first_xattr_slot; 3749 ret = btrfs_load_inode_props(inode, path); 3750 if (ret) 3751 btrfs_err(root->fs_info, 3752 "error loading props for ino %llu (root %llu): %d", 3753 btrfs_ino(inode), 3754 root->root_key.objectid, ret); 3755 } 3756 btrfs_free_path(path); 3757 3758 if (!maybe_acls) 3759 cache_no_acl(inode); 3760 3761 switch (inode->i_mode & S_IFMT) { 3762 case S_IFREG: 3763 inode->i_mapping->a_ops = &btrfs_aops; 3764 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 3765 inode->i_fop = &btrfs_file_operations; 3766 inode->i_op = &btrfs_file_inode_operations; 3767 break; 3768 case S_IFDIR: 3769 inode->i_fop = &btrfs_dir_file_operations; 3770 if (root == root->fs_info->tree_root) 3771 inode->i_op = &btrfs_dir_ro_inode_operations; 3772 else 3773 inode->i_op = &btrfs_dir_inode_operations; 3774 break; 3775 case S_IFLNK: 3776 inode->i_op = &btrfs_symlink_inode_operations; 3777 inode->i_mapping->a_ops = &btrfs_symlink_aops; 3778 break; 3779 default: 3780 inode->i_op = &btrfs_special_inode_operations; 3781 init_special_inode(inode, inode->i_mode, rdev); 3782 break; 3783 } 3784 3785 btrfs_update_iflags(inode); 3786 return; 3787 3788 make_bad: 3789 btrfs_free_path(path); 3790 make_bad_inode(inode); 3791 } 3792 3793 /* 3794 * given a leaf and an inode, copy the inode fields into the leaf 3795 */ 3796 static void fill_inode_item(struct btrfs_trans_handle *trans, 3797 struct extent_buffer *leaf, 3798 struct btrfs_inode_item *item, 3799 struct inode *inode) 3800 { 3801 struct btrfs_map_token token; 3802 3803 btrfs_init_map_token(&token); 3804 3805 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3806 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3807 btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size, 3808 &token); 3809 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3810 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3811 3812 btrfs_set_token_timespec_sec(leaf, &item->atime, 3813 inode->i_atime.tv_sec, &token); 3814 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3815 inode->i_atime.tv_nsec, &token); 3816 3817 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3818 inode->i_mtime.tv_sec, &token); 3819 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3820 inode->i_mtime.tv_nsec, &token); 3821 3822 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3823 inode->i_ctime.tv_sec, &token); 3824 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3825 inode->i_ctime.tv_nsec, &token); 3826 3827 btrfs_set_token_timespec_sec(leaf, &item->otime, 3828 BTRFS_I(inode)->i_otime.tv_sec, &token); 3829 btrfs_set_token_timespec_nsec(leaf, &item->otime, 3830 BTRFS_I(inode)->i_otime.tv_nsec, &token); 3831 3832 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3833 &token); 3834 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, 3835 &token); 3836 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token); 3837 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3838 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3839 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3840 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3841 } 3842 3843 /* 3844 * copy everything in the in-memory inode into the btree. 3845 */ 3846 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 3847 struct btrfs_root *root, struct inode *inode) 3848 { 3849 struct btrfs_inode_item *inode_item; 3850 struct btrfs_path *path; 3851 struct extent_buffer *leaf; 3852 int ret; 3853 3854 path = btrfs_alloc_path(); 3855 if (!path) 3856 return -ENOMEM; 3857 3858 path->leave_spinning = 1; 3859 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 3860 1); 3861 if (ret) { 3862 if (ret > 0) 3863 ret = -ENOENT; 3864 goto failed; 3865 } 3866 3867 leaf = path->nodes[0]; 3868 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3869 struct btrfs_inode_item); 3870 3871 fill_inode_item(trans, leaf, inode_item, inode); 3872 btrfs_mark_buffer_dirty(leaf); 3873 btrfs_set_inode_last_trans(trans, inode); 3874 ret = 0; 3875 failed: 3876 btrfs_free_path(path); 3877 return ret; 3878 } 3879 3880 /* 3881 * copy everything in the in-memory inode into the btree. 3882 */ 3883 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 3884 struct btrfs_root *root, struct inode *inode) 3885 { 3886 int ret; 3887 3888 /* 3889 * If the inode is a free space inode, we can deadlock during commit 3890 * if we put it into the delayed code. 3891 * 3892 * The data relocation inode should also be directly updated 3893 * without delay 3894 */ 3895 if (!btrfs_is_free_space_inode(inode) 3896 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 3897 && !root->fs_info->log_root_recovering) { 3898 btrfs_update_root_times(trans, root); 3899 3900 ret = btrfs_delayed_update_inode(trans, root, inode); 3901 if (!ret) 3902 btrfs_set_inode_last_trans(trans, inode); 3903 return ret; 3904 } 3905 3906 return btrfs_update_inode_item(trans, root, inode); 3907 } 3908 3909 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3910 struct btrfs_root *root, 3911 struct inode *inode) 3912 { 3913 int ret; 3914 3915 ret = btrfs_update_inode(trans, root, inode); 3916 if (ret == -ENOSPC) 3917 return btrfs_update_inode_item(trans, root, inode); 3918 return ret; 3919 } 3920 3921 /* 3922 * unlink helper that gets used here in inode.c and in the tree logging 3923 * recovery code. It remove a link in a directory with a given name, and 3924 * also drops the back refs in the inode to the directory 3925 */ 3926 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3927 struct btrfs_root *root, 3928 struct inode *dir, struct inode *inode, 3929 const char *name, int name_len) 3930 { 3931 struct btrfs_path *path; 3932 int ret = 0; 3933 struct extent_buffer *leaf; 3934 struct btrfs_dir_item *di; 3935 struct btrfs_key key; 3936 u64 index; 3937 u64 ino = btrfs_ino(inode); 3938 u64 dir_ino = btrfs_ino(dir); 3939 3940 path = btrfs_alloc_path(); 3941 if (!path) { 3942 ret = -ENOMEM; 3943 goto out; 3944 } 3945 3946 path->leave_spinning = 1; 3947 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3948 name, name_len, -1); 3949 if (IS_ERR(di)) { 3950 ret = PTR_ERR(di); 3951 goto err; 3952 } 3953 if (!di) { 3954 ret = -ENOENT; 3955 goto err; 3956 } 3957 leaf = path->nodes[0]; 3958 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3959 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3960 if (ret) 3961 goto err; 3962 btrfs_release_path(path); 3963 3964 /* 3965 * If we don't have dir index, we have to get it by looking up 3966 * the inode ref, since we get the inode ref, remove it directly, 3967 * it is unnecessary to do delayed deletion. 3968 * 3969 * But if we have dir index, needn't search inode ref to get it. 3970 * Since the inode ref is close to the inode item, it is better 3971 * that we delay to delete it, and just do this deletion when 3972 * we update the inode item. 3973 */ 3974 if (BTRFS_I(inode)->dir_index) { 3975 ret = btrfs_delayed_delete_inode_ref(inode); 3976 if (!ret) { 3977 index = BTRFS_I(inode)->dir_index; 3978 goto skip_backref; 3979 } 3980 } 3981 3982 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, 3983 dir_ino, &index); 3984 if (ret) { 3985 btrfs_info(root->fs_info, 3986 "failed to delete reference to %.*s, inode %llu parent %llu", 3987 name_len, name, ino, dir_ino); 3988 btrfs_abort_transaction(trans, root, ret); 3989 goto err; 3990 } 3991 skip_backref: 3992 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 3993 if (ret) { 3994 btrfs_abort_transaction(trans, root, ret); 3995 goto err; 3996 } 3997 3998 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 3999 inode, dir_ino); 4000 if (ret != 0 && ret != -ENOENT) { 4001 btrfs_abort_transaction(trans, root, ret); 4002 goto err; 4003 } 4004 4005 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 4006 dir, index); 4007 if (ret == -ENOENT) 4008 ret = 0; 4009 else if (ret) 4010 btrfs_abort_transaction(trans, root, ret); 4011 err: 4012 btrfs_free_path(path); 4013 if (ret) 4014 goto out; 4015 4016 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 4017 inode_inc_iversion(inode); 4018 inode_inc_iversion(dir); 4019 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; 4020 ret = btrfs_update_inode(trans, root, dir); 4021 out: 4022 return ret; 4023 } 4024 4025 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4026 struct btrfs_root *root, 4027 struct inode *dir, struct inode *inode, 4028 const char *name, int name_len) 4029 { 4030 int ret; 4031 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 4032 if (!ret) { 4033 drop_nlink(inode); 4034 ret = btrfs_update_inode(trans, root, inode); 4035 } 4036 return ret; 4037 } 4038 4039 /* 4040 * helper to start transaction for unlink and rmdir. 4041 * 4042 * unlink and rmdir are special in btrfs, they do not always free space, so 4043 * if we cannot make our reservations the normal way try and see if there is 4044 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4045 * allow the unlink to occur. 4046 */ 4047 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) 4048 { 4049 struct btrfs_trans_handle *trans; 4050 struct btrfs_root *root = BTRFS_I(dir)->root; 4051 int ret; 4052 4053 /* 4054 * 1 for the possible orphan item 4055 * 1 for the dir item 4056 * 1 for the dir index 4057 * 1 for the inode ref 4058 * 1 for the inode 4059 */ 4060 trans = btrfs_start_transaction(root, 5); 4061 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 4062 return trans; 4063 4064 if (PTR_ERR(trans) == -ENOSPC) { 4065 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); 4066 4067 trans = btrfs_start_transaction(root, 0); 4068 if (IS_ERR(trans)) 4069 return trans; 4070 ret = btrfs_cond_migrate_bytes(root->fs_info, 4071 &root->fs_info->trans_block_rsv, 4072 num_bytes, 5); 4073 if (ret) { 4074 btrfs_end_transaction(trans, root); 4075 return ERR_PTR(ret); 4076 } 4077 trans->block_rsv = &root->fs_info->trans_block_rsv; 4078 trans->bytes_reserved = num_bytes; 4079 } 4080 return trans; 4081 } 4082 4083 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4084 { 4085 struct btrfs_root *root = BTRFS_I(dir)->root; 4086 struct btrfs_trans_handle *trans; 4087 struct inode *inode = d_inode(dentry); 4088 int ret; 4089 4090 trans = __unlink_start_trans(dir); 4091 if (IS_ERR(trans)) 4092 return PTR_ERR(trans); 4093 4094 btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0); 4095 4096 ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), 4097 dentry->d_name.name, dentry->d_name.len); 4098 if (ret) 4099 goto out; 4100 4101 if (inode->i_nlink == 0) { 4102 ret = btrfs_orphan_add(trans, inode); 4103 if (ret) 4104 goto out; 4105 } 4106 4107 out: 4108 btrfs_end_transaction(trans, root); 4109 btrfs_btree_balance_dirty(root); 4110 return ret; 4111 } 4112 4113 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4114 struct btrfs_root *root, 4115 struct inode *dir, u64 objectid, 4116 const char *name, int name_len) 4117 { 4118 struct btrfs_path *path; 4119 struct extent_buffer *leaf; 4120 struct btrfs_dir_item *di; 4121 struct btrfs_key key; 4122 u64 index; 4123 int ret; 4124 u64 dir_ino = btrfs_ino(dir); 4125 4126 path = btrfs_alloc_path(); 4127 if (!path) 4128 return -ENOMEM; 4129 4130 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4131 name, name_len, -1); 4132 if (IS_ERR_OR_NULL(di)) { 4133 if (!di) 4134 ret = -ENOENT; 4135 else 4136 ret = PTR_ERR(di); 4137 goto out; 4138 } 4139 4140 leaf = path->nodes[0]; 4141 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4142 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4143 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4144 if (ret) { 4145 btrfs_abort_transaction(trans, root, ret); 4146 goto out; 4147 } 4148 btrfs_release_path(path); 4149 4150 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 4151 objectid, root->root_key.objectid, 4152 dir_ino, &index, name, name_len); 4153 if (ret < 0) { 4154 if (ret != -ENOENT) { 4155 btrfs_abort_transaction(trans, root, ret); 4156 goto out; 4157 } 4158 di = btrfs_search_dir_index_item(root, path, dir_ino, 4159 name, name_len); 4160 if (IS_ERR_OR_NULL(di)) { 4161 if (!di) 4162 ret = -ENOENT; 4163 else 4164 ret = PTR_ERR(di); 4165 btrfs_abort_transaction(trans, root, ret); 4166 goto out; 4167 } 4168 4169 leaf = path->nodes[0]; 4170 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4171 btrfs_release_path(path); 4172 index = key.offset; 4173 } 4174 btrfs_release_path(path); 4175 4176 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 4177 if (ret) { 4178 btrfs_abort_transaction(trans, root, ret); 4179 goto out; 4180 } 4181 4182 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 4183 inode_inc_iversion(dir); 4184 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 4185 ret = btrfs_update_inode_fallback(trans, root, dir); 4186 if (ret) 4187 btrfs_abort_transaction(trans, root, ret); 4188 out: 4189 btrfs_free_path(path); 4190 return ret; 4191 } 4192 4193 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4194 { 4195 struct inode *inode = d_inode(dentry); 4196 int err = 0; 4197 struct btrfs_root *root = BTRFS_I(dir)->root; 4198 struct btrfs_trans_handle *trans; 4199 4200 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4201 return -ENOTEMPTY; 4202 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) 4203 return -EPERM; 4204 4205 trans = __unlink_start_trans(dir); 4206 if (IS_ERR(trans)) 4207 return PTR_ERR(trans); 4208 4209 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4210 err = btrfs_unlink_subvol(trans, root, dir, 4211 BTRFS_I(inode)->location.objectid, 4212 dentry->d_name.name, 4213 dentry->d_name.len); 4214 goto out; 4215 } 4216 4217 err = btrfs_orphan_add(trans, inode); 4218 if (err) 4219 goto out; 4220 4221 /* now the directory is empty */ 4222 err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), 4223 dentry->d_name.name, dentry->d_name.len); 4224 if (!err) 4225 btrfs_i_size_write(inode, 0); 4226 out: 4227 btrfs_end_transaction(trans, root); 4228 btrfs_btree_balance_dirty(root); 4229 4230 return err; 4231 } 4232 4233 static int truncate_space_check(struct btrfs_trans_handle *trans, 4234 struct btrfs_root *root, 4235 u64 bytes_deleted) 4236 { 4237 int ret; 4238 4239 bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted); 4240 ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv, 4241 bytes_deleted, BTRFS_RESERVE_NO_FLUSH); 4242 if (!ret) 4243 trans->bytes_reserved += bytes_deleted; 4244 return ret; 4245 4246 } 4247 4248 static int truncate_inline_extent(struct inode *inode, 4249 struct btrfs_path *path, 4250 struct btrfs_key *found_key, 4251 const u64 item_end, 4252 const u64 new_size) 4253 { 4254 struct extent_buffer *leaf = path->nodes[0]; 4255 int slot = path->slots[0]; 4256 struct btrfs_file_extent_item *fi; 4257 u32 size = (u32)(new_size - found_key->offset); 4258 struct btrfs_root *root = BTRFS_I(inode)->root; 4259 4260 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 4261 4262 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { 4263 loff_t offset = new_size; 4264 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE); 4265 4266 /* 4267 * Zero out the remaining of the last page of our inline extent, 4268 * instead of directly truncating our inline extent here - that 4269 * would be much more complex (decompressing all the data, then 4270 * compressing the truncated data, which might be bigger than 4271 * the size of the inline extent, resize the extent, etc). 4272 * We release the path because to get the page we might need to 4273 * read the extent item from disk (data not in the page cache). 4274 */ 4275 btrfs_release_path(path); 4276 return btrfs_truncate_page(inode, offset, page_end - offset, 0); 4277 } 4278 4279 btrfs_set_file_extent_ram_bytes(leaf, fi, size); 4280 size = btrfs_file_extent_calc_inline_size(size); 4281 btrfs_truncate_item(root, path, size, 1); 4282 4283 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4284 inode_sub_bytes(inode, item_end + 1 - new_size); 4285 4286 return 0; 4287 } 4288 4289 /* 4290 * this can truncate away extent items, csum items and directory items. 4291 * It starts at a high offset and removes keys until it can't find 4292 * any higher than new_size 4293 * 4294 * csum items that cross the new i_size are truncated to the new size 4295 * as well. 4296 * 4297 * min_type is the minimum key type to truncate down to. If set to 0, this 4298 * will kill all the items on this inode, including the INODE_ITEM_KEY. 4299 */ 4300 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 4301 struct btrfs_root *root, 4302 struct inode *inode, 4303 u64 new_size, u32 min_type) 4304 { 4305 struct btrfs_path *path; 4306 struct extent_buffer *leaf; 4307 struct btrfs_file_extent_item *fi; 4308 struct btrfs_key key; 4309 struct btrfs_key found_key; 4310 u64 extent_start = 0; 4311 u64 extent_num_bytes = 0; 4312 u64 extent_offset = 0; 4313 u64 item_end = 0; 4314 u64 last_size = new_size; 4315 u32 found_type = (u8)-1; 4316 int found_extent; 4317 int del_item; 4318 int pending_del_nr = 0; 4319 int pending_del_slot = 0; 4320 int extent_type = -1; 4321 int ret; 4322 int err = 0; 4323 u64 ino = btrfs_ino(inode); 4324 u64 bytes_deleted = 0; 4325 bool be_nice = 0; 4326 bool should_throttle = 0; 4327 bool should_end = 0; 4328 4329 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 4330 4331 /* 4332 * for non-free space inodes and ref cows, we want to back off from 4333 * time to time 4334 */ 4335 if (!btrfs_is_free_space_inode(inode) && 4336 test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4337 be_nice = 1; 4338 4339 path = btrfs_alloc_path(); 4340 if (!path) 4341 return -ENOMEM; 4342 path->reada = -1; 4343 4344 /* 4345 * We want to drop from the next block forward in case this new size is 4346 * not block aligned since we will be keeping the last block of the 4347 * extent just the way it is. 4348 */ 4349 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4350 root == root->fs_info->tree_root) 4351 btrfs_drop_extent_cache(inode, ALIGN(new_size, 4352 root->sectorsize), (u64)-1, 0); 4353 4354 /* 4355 * This function is also used to drop the items in the log tree before 4356 * we relog the inode, so if root != BTRFS_I(inode)->root, it means 4357 * it is used to drop the loged items. So we shouldn't kill the delayed 4358 * items. 4359 */ 4360 if (min_type == 0 && root == BTRFS_I(inode)->root) 4361 btrfs_kill_delayed_inode_items(inode); 4362 4363 key.objectid = ino; 4364 key.offset = (u64)-1; 4365 key.type = (u8)-1; 4366 4367 search_again: 4368 /* 4369 * with a 16K leaf size and 128MB extents, you can actually queue 4370 * up a huge file in a single leaf. Most of the time that 4371 * bytes_deleted is > 0, it will be huge by the time we get here 4372 */ 4373 if (be_nice && bytes_deleted > 32 * 1024 * 1024) { 4374 if (btrfs_should_end_transaction(trans, root)) { 4375 err = -EAGAIN; 4376 goto error; 4377 } 4378 } 4379 4380 4381 path->leave_spinning = 1; 4382 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 4383 if (ret < 0) { 4384 err = ret; 4385 goto out; 4386 } 4387 4388 if (ret > 0) { 4389 /* there are no items in the tree for us to truncate, we're 4390 * done 4391 */ 4392 if (path->slots[0] == 0) 4393 goto out; 4394 path->slots[0]--; 4395 } 4396 4397 while (1) { 4398 fi = NULL; 4399 leaf = path->nodes[0]; 4400 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4401 found_type = found_key.type; 4402 4403 if (found_key.objectid != ino) 4404 break; 4405 4406 if (found_type < min_type) 4407 break; 4408 4409 item_end = found_key.offset; 4410 if (found_type == BTRFS_EXTENT_DATA_KEY) { 4411 fi = btrfs_item_ptr(leaf, path->slots[0], 4412 struct btrfs_file_extent_item); 4413 extent_type = btrfs_file_extent_type(leaf, fi); 4414 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4415 item_end += 4416 btrfs_file_extent_num_bytes(leaf, fi); 4417 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4418 item_end += btrfs_file_extent_inline_len(leaf, 4419 path->slots[0], fi); 4420 } 4421 item_end--; 4422 } 4423 if (found_type > min_type) { 4424 del_item = 1; 4425 } else { 4426 if (item_end < new_size) 4427 break; 4428 if (found_key.offset >= new_size) 4429 del_item = 1; 4430 else 4431 del_item = 0; 4432 } 4433 found_extent = 0; 4434 /* FIXME, shrink the extent if the ref count is only 1 */ 4435 if (found_type != BTRFS_EXTENT_DATA_KEY) 4436 goto delete; 4437 4438 if (del_item) 4439 last_size = found_key.offset; 4440 else 4441 last_size = new_size; 4442 4443 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4444 u64 num_dec; 4445 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 4446 if (!del_item) { 4447 u64 orig_num_bytes = 4448 btrfs_file_extent_num_bytes(leaf, fi); 4449 extent_num_bytes = ALIGN(new_size - 4450 found_key.offset, 4451 root->sectorsize); 4452 btrfs_set_file_extent_num_bytes(leaf, fi, 4453 extent_num_bytes); 4454 num_dec = (orig_num_bytes - 4455 extent_num_bytes); 4456 if (test_bit(BTRFS_ROOT_REF_COWS, 4457 &root->state) && 4458 extent_start != 0) 4459 inode_sub_bytes(inode, num_dec); 4460 btrfs_mark_buffer_dirty(leaf); 4461 } else { 4462 extent_num_bytes = 4463 btrfs_file_extent_disk_num_bytes(leaf, 4464 fi); 4465 extent_offset = found_key.offset - 4466 btrfs_file_extent_offset(leaf, fi); 4467 4468 /* FIXME blocksize != 4096 */ 4469 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 4470 if (extent_start != 0) { 4471 found_extent = 1; 4472 if (test_bit(BTRFS_ROOT_REF_COWS, 4473 &root->state)) 4474 inode_sub_bytes(inode, num_dec); 4475 } 4476 } 4477 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4478 /* 4479 * we can't truncate inline items that have had 4480 * special encodings 4481 */ 4482 if (!del_item && 4483 btrfs_file_extent_encryption(leaf, fi) == 0 && 4484 btrfs_file_extent_other_encoding(leaf, fi) == 0) { 4485 4486 /* 4487 * Need to release path in order to truncate a 4488 * compressed extent. So delete any accumulated 4489 * extent items so far. 4490 */ 4491 if (btrfs_file_extent_compression(leaf, fi) != 4492 BTRFS_COMPRESS_NONE && pending_del_nr) { 4493 err = btrfs_del_items(trans, root, path, 4494 pending_del_slot, 4495 pending_del_nr); 4496 if (err) { 4497 btrfs_abort_transaction(trans, 4498 root, 4499 err); 4500 goto error; 4501 } 4502 pending_del_nr = 0; 4503 } 4504 4505 err = truncate_inline_extent(inode, path, 4506 &found_key, 4507 item_end, 4508 new_size); 4509 if (err) { 4510 btrfs_abort_transaction(trans, 4511 root, err); 4512 goto error; 4513 } 4514 } else if (test_bit(BTRFS_ROOT_REF_COWS, 4515 &root->state)) { 4516 inode_sub_bytes(inode, item_end + 1 - new_size); 4517 } 4518 } 4519 delete: 4520 if (del_item) { 4521 if (!pending_del_nr) { 4522 /* no pending yet, add ourselves */ 4523 pending_del_slot = path->slots[0]; 4524 pending_del_nr = 1; 4525 } else if (pending_del_nr && 4526 path->slots[0] + 1 == pending_del_slot) { 4527 /* hop on the pending chunk */ 4528 pending_del_nr++; 4529 pending_del_slot = path->slots[0]; 4530 } else { 4531 BUG(); 4532 } 4533 } else { 4534 break; 4535 } 4536 should_throttle = 0; 4537 4538 if (found_extent && 4539 (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4540 root == root->fs_info->tree_root)) { 4541 btrfs_set_path_blocking(path); 4542 bytes_deleted += extent_num_bytes; 4543 ret = btrfs_free_extent(trans, root, extent_start, 4544 extent_num_bytes, 0, 4545 btrfs_header_owner(leaf), 4546 ino, extent_offset); 4547 BUG_ON(ret); 4548 if (btrfs_should_throttle_delayed_refs(trans, root)) 4549 btrfs_async_run_delayed_refs(root, 4550 trans->delayed_ref_updates * 2, 0); 4551 if (be_nice) { 4552 if (truncate_space_check(trans, root, 4553 extent_num_bytes)) { 4554 should_end = 1; 4555 } 4556 if (btrfs_should_throttle_delayed_refs(trans, 4557 root)) { 4558 should_throttle = 1; 4559 } 4560 } 4561 } 4562 4563 if (found_type == BTRFS_INODE_ITEM_KEY) 4564 break; 4565 4566 if (path->slots[0] == 0 || 4567 path->slots[0] != pending_del_slot || 4568 should_throttle || should_end) { 4569 if (pending_del_nr) { 4570 ret = btrfs_del_items(trans, root, path, 4571 pending_del_slot, 4572 pending_del_nr); 4573 if (ret) { 4574 btrfs_abort_transaction(trans, 4575 root, ret); 4576 goto error; 4577 } 4578 pending_del_nr = 0; 4579 } 4580 btrfs_release_path(path); 4581 if (should_throttle) { 4582 unsigned long updates = trans->delayed_ref_updates; 4583 if (updates) { 4584 trans->delayed_ref_updates = 0; 4585 ret = btrfs_run_delayed_refs(trans, root, updates * 2); 4586 if (ret && !err) 4587 err = ret; 4588 } 4589 } 4590 /* 4591 * if we failed to refill our space rsv, bail out 4592 * and let the transaction restart 4593 */ 4594 if (should_end) { 4595 err = -EAGAIN; 4596 goto error; 4597 } 4598 goto search_again; 4599 } else { 4600 path->slots[0]--; 4601 } 4602 } 4603 out: 4604 if (pending_del_nr) { 4605 ret = btrfs_del_items(trans, root, path, pending_del_slot, 4606 pending_del_nr); 4607 if (ret) 4608 btrfs_abort_transaction(trans, root, ret); 4609 } 4610 error: 4611 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 4612 btrfs_ordered_update_i_size(inode, last_size, NULL); 4613 4614 btrfs_free_path(path); 4615 4616 if (be_nice && bytes_deleted > 32 * 1024 * 1024) { 4617 unsigned long updates = trans->delayed_ref_updates; 4618 if (updates) { 4619 trans->delayed_ref_updates = 0; 4620 ret = btrfs_run_delayed_refs(trans, root, updates * 2); 4621 if (ret && !err) 4622 err = ret; 4623 } 4624 } 4625 return err; 4626 } 4627 4628 /* 4629 * btrfs_truncate_page - read, zero a chunk and write a page 4630 * @inode - inode that we're zeroing 4631 * @from - the offset to start zeroing 4632 * @len - the length to zero, 0 to zero the entire range respective to the 4633 * offset 4634 * @front - zero up to the offset instead of from the offset on 4635 * 4636 * This will find the page for the "from" offset and cow the page and zero the 4637 * part we want to zero. This is used with truncate and hole punching. 4638 */ 4639 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, 4640 int front) 4641 { 4642 struct address_space *mapping = inode->i_mapping; 4643 struct btrfs_root *root = BTRFS_I(inode)->root; 4644 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4645 struct btrfs_ordered_extent *ordered; 4646 struct extent_state *cached_state = NULL; 4647 char *kaddr; 4648 u32 blocksize = root->sectorsize; 4649 pgoff_t index = from >> PAGE_CACHE_SHIFT; 4650 unsigned offset = from & (PAGE_CACHE_SIZE-1); 4651 struct page *page; 4652 gfp_t mask = btrfs_alloc_write_mask(mapping); 4653 int ret = 0; 4654 u64 page_start; 4655 u64 page_end; 4656 4657 if ((offset & (blocksize - 1)) == 0 && 4658 (!len || ((len & (blocksize - 1)) == 0))) 4659 goto out; 4660 ret = btrfs_delalloc_reserve_space(inode, 4661 round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE); 4662 if (ret) 4663 goto out; 4664 4665 again: 4666 page = find_or_create_page(mapping, index, mask); 4667 if (!page) { 4668 btrfs_delalloc_release_space(inode, 4669 round_down(from, PAGE_CACHE_SIZE), 4670 PAGE_CACHE_SIZE); 4671 ret = -ENOMEM; 4672 goto out; 4673 } 4674 4675 page_start = page_offset(page); 4676 page_end = page_start + PAGE_CACHE_SIZE - 1; 4677 4678 if (!PageUptodate(page)) { 4679 ret = btrfs_readpage(NULL, page); 4680 lock_page(page); 4681 if (page->mapping != mapping) { 4682 unlock_page(page); 4683 page_cache_release(page); 4684 goto again; 4685 } 4686 if (!PageUptodate(page)) { 4687 ret = -EIO; 4688 goto out_unlock; 4689 } 4690 } 4691 wait_on_page_writeback(page); 4692 4693 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 4694 set_page_extent_mapped(page); 4695 4696 ordered = btrfs_lookup_ordered_extent(inode, page_start); 4697 if (ordered) { 4698 unlock_extent_cached(io_tree, page_start, page_end, 4699 &cached_state, GFP_NOFS); 4700 unlock_page(page); 4701 page_cache_release(page); 4702 btrfs_start_ordered_extent(inode, ordered, 1); 4703 btrfs_put_ordered_extent(ordered); 4704 goto again; 4705 } 4706 4707 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 4708 EXTENT_DIRTY | EXTENT_DELALLOC | 4709 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4710 0, 0, &cached_state, GFP_NOFS); 4711 4712 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 4713 &cached_state); 4714 if (ret) { 4715 unlock_extent_cached(io_tree, page_start, page_end, 4716 &cached_state, GFP_NOFS); 4717 goto out_unlock; 4718 } 4719 4720 if (offset != PAGE_CACHE_SIZE) { 4721 if (!len) 4722 len = PAGE_CACHE_SIZE - offset; 4723 kaddr = kmap(page); 4724 if (front) 4725 memset(kaddr, 0, offset); 4726 else 4727 memset(kaddr + offset, 0, len); 4728 flush_dcache_page(page); 4729 kunmap(page); 4730 } 4731 ClearPageChecked(page); 4732 set_page_dirty(page); 4733 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, 4734 GFP_NOFS); 4735 4736 out_unlock: 4737 if (ret) 4738 btrfs_delalloc_release_space(inode, page_start, 4739 PAGE_CACHE_SIZE); 4740 unlock_page(page); 4741 page_cache_release(page); 4742 out: 4743 return ret; 4744 } 4745 4746 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode, 4747 u64 offset, u64 len) 4748 { 4749 struct btrfs_trans_handle *trans; 4750 int ret; 4751 4752 /* 4753 * Still need to make sure the inode looks like it's been updated so 4754 * that any holes get logged if we fsync. 4755 */ 4756 if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) { 4757 BTRFS_I(inode)->last_trans = root->fs_info->generation; 4758 BTRFS_I(inode)->last_sub_trans = root->log_transid; 4759 BTRFS_I(inode)->last_log_commit = root->last_log_commit; 4760 return 0; 4761 } 4762 4763 /* 4764 * 1 - for the one we're dropping 4765 * 1 - for the one we're adding 4766 * 1 - for updating the inode. 4767 */ 4768 trans = btrfs_start_transaction(root, 3); 4769 if (IS_ERR(trans)) 4770 return PTR_ERR(trans); 4771 4772 ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1); 4773 if (ret) { 4774 btrfs_abort_transaction(trans, root, ret); 4775 btrfs_end_transaction(trans, root); 4776 return ret; 4777 } 4778 4779 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset, 4780 0, 0, len, 0, len, 0, 0, 0); 4781 if (ret) 4782 btrfs_abort_transaction(trans, root, ret); 4783 else 4784 btrfs_update_inode(trans, root, inode); 4785 btrfs_end_transaction(trans, root); 4786 return ret; 4787 } 4788 4789 /* 4790 * This function puts in dummy file extents for the area we're creating a hole 4791 * for. So if we are truncating this file to a larger size we need to insert 4792 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4793 * the range between oldsize and size 4794 */ 4795 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) 4796 { 4797 struct btrfs_root *root = BTRFS_I(inode)->root; 4798 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4799 struct extent_map *em = NULL; 4800 struct extent_state *cached_state = NULL; 4801 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4802 u64 hole_start = ALIGN(oldsize, root->sectorsize); 4803 u64 block_end = ALIGN(size, root->sectorsize); 4804 u64 last_byte; 4805 u64 cur_offset; 4806 u64 hole_size; 4807 int err = 0; 4808 4809 /* 4810 * If our size started in the middle of a page we need to zero out the 4811 * rest of the page before we expand the i_size, otherwise we could 4812 * expose stale data. 4813 */ 4814 err = btrfs_truncate_page(inode, oldsize, 0, 0); 4815 if (err) 4816 return err; 4817 4818 if (size <= hole_start) 4819 return 0; 4820 4821 while (1) { 4822 struct btrfs_ordered_extent *ordered; 4823 4824 lock_extent_bits(io_tree, hole_start, block_end - 1, 0, 4825 &cached_state); 4826 ordered = btrfs_lookup_ordered_range(inode, hole_start, 4827 block_end - hole_start); 4828 if (!ordered) 4829 break; 4830 unlock_extent_cached(io_tree, hole_start, block_end - 1, 4831 &cached_state, GFP_NOFS); 4832 btrfs_start_ordered_extent(inode, ordered, 1); 4833 btrfs_put_ordered_extent(ordered); 4834 } 4835 4836 cur_offset = hole_start; 4837 while (1) { 4838 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 4839 block_end - cur_offset, 0); 4840 if (IS_ERR(em)) { 4841 err = PTR_ERR(em); 4842 em = NULL; 4843 break; 4844 } 4845 last_byte = min(extent_map_end(em), block_end); 4846 last_byte = ALIGN(last_byte , root->sectorsize); 4847 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 4848 struct extent_map *hole_em; 4849 hole_size = last_byte - cur_offset; 4850 4851 err = maybe_insert_hole(root, inode, cur_offset, 4852 hole_size); 4853 if (err) 4854 break; 4855 btrfs_drop_extent_cache(inode, cur_offset, 4856 cur_offset + hole_size - 1, 0); 4857 hole_em = alloc_extent_map(); 4858 if (!hole_em) { 4859 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4860 &BTRFS_I(inode)->runtime_flags); 4861 goto next; 4862 } 4863 hole_em->start = cur_offset; 4864 hole_em->len = hole_size; 4865 hole_em->orig_start = cur_offset; 4866 4867 hole_em->block_start = EXTENT_MAP_HOLE; 4868 hole_em->block_len = 0; 4869 hole_em->orig_block_len = 0; 4870 hole_em->ram_bytes = hole_size; 4871 hole_em->bdev = root->fs_info->fs_devices->latest_bdev; 4872 hole_em->compress_type = BTRFS_COMPRESS_NONE; 4873 hole_em->generation = root->fs_info->generation; 4874 4875 while (1) { 4876 write_lock(&em_tree->lock); 4877 err = add_extent_mapping(em_tree, hole_em, 1); 4878 write_unlock(&em_tree->lock); 4879 if (err != -EEXIST) 4880 break; 4881 btrfs_drop_extent_cache(inode, cur_offset, 4882 cur_offset + 4883 hole_size - 1, 0); 4884 } 4885 free_extent_map(hole_em); 4886 } 4887 next: 4888 free_extent_map(em); 4889 em = NULL; 4890 cur_offset = last_byte; 4891 if (cur_offset >= block_end) 4892 break; 4893 } 4894 free_extent_map(em); 4895 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, 4896 GFP_NOFS); 4897 return err; 4898 } 4899 4900 static int wait_snapshoting_atomic_t(atomic_t *a) 4901 { 4902 schedule(); 4903 return 0; 4904 } 4905 4906 static void wait_for_snapshot_creation(struct btrfs_root *root) 4907 { 4908 while (true) { 4909 int ret; 4910 4911 ret = btrfs_start_write_no_snapshoting(root); 4912 if (ret) 4913 break; 4914 wait_on_atomic_t(&root->will_be_snapshoted, 4915 wait_snapshoting_atomic_t, 4916 TASK_UNINTERRUPTIBLE); 4917 } 4918 } 4919 4920 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 4921 { 4922 struct btrfs_root *root = BTRFS_I(inode)->root; 4923 struct btrfs_trans_handle *trans; 4924 loff_t oldsize = i_size_read(inode); 4925 loff_t newsize = attr->ia_size; 4926 int mask = attr->ia_valid; 4927 int ret; 4928 4929 /* 4930 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 4931 * special case where we need to update the times despite not having 4932 * these flags set. For all other operations the VFS set these flags 4933 * explicitly if it wants a timestamp update. 4934 */ 4935 if (newsize != oldsize) { 4936 inode_inc_iversion(inode); 4937 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) 4938 inode->i_ctime = inode->i_mtime = 4939 current_fs_time(inode->i_sb); 4940 } 4941 4942 if (newsize > oldsize) { 4943 truncate_pagecache(inode, newsize); 4944 /* 4945 * Don't do an expanding truncate while snapshoting is ongoing. 4946 * This is to ensure the snapshot captures a fully consistent 4947 * state of this file - if the snapshot captures this expanding 4948 * truncation, it must capture all writes that happened before 4949 * this truncation. 4950 */ 4951 wait_for_snapshot_creation(root); 4952 ret = btrfs_cont_expand(inode, oldsize, newsize); 4953 if (ret) { 4954 btrfs_end_write_no_snapshoting(root); 4955 return ret; 4956 } 4957 4958 trans = btrfs_start_transaction(root, 1); 4959 if (IS_ERR(trans)) { 4960 btrfs_end_write_no_snapshoting(root); 4961 return PTR_ERR(trans); 4962 } 4963 4964 i_size_write(inode, newsize); 4965 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); 4966 ret = btrfs_update_inode(trans, root, inode); 4967 btrfs_end_write_no_snapshoting(root); 4968 btrfs_end_transaction(trans, root); 4969 } else { 4970 4971 /* 4972 * We're truncating a file that used to have good data down to 4973 * zero. Make sure it gets into the ordered flush list so that 4974 * any new writes get down to disk quickly. 4975 */ 4976 if (newsize == 0) 4977 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 4978 &BTRFS_I(inode)->runtime_flags); 4979 4980 /* 4981 * 1 for the orphan item we're going to add 4982 * 1 for the orphan item deletion. 4983 */ 4984 trans = btrfs_start_transaction(root, 2); 4985 if (IS_ERR(trans)) 4986 return PTR_ERR(trans); 4987 4988 /* 4989 * We need to do this in case we fail at _any_ point during the 4990 * actual truncate. Once we do the truncate_setsize we could 4991 * invalidate pages which forces any outstanding ordered io to 4992 * be instantly completed which will give us extents that need 4993 * to be truncated. If we fail to get an orphan inode down we 4994 * could have left over extents that were never meant to live, 4995 * so we need to garuntee from this point on that everything 4996 * will be consistent. 4997 */ 4998 ret = btrfs_orphan_add(trans, inode); 4999 btrfs_end_transaction(trans, root); 5000 if (ret) 5001 return ret; 5002 5003 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 5004 truncate_setsize(inode, newsize); 5005 5006 /* Disable nonlocked read DIO to avoid the end less truncate */ 5007 btrfs_inode_block_unlocked_dio(inode); 5008 inode_dio_wait(inode); 5009 btrfs_inode_resume_unlocked_dio(inode); 5010 5011 ret = btrfs_truncate(inode); 5012 if (ret && inode->i_nlink) { 5013 int err; 5014 5015 /* 5016 * failed to truncate, disk_i_size is only adjusted down 5017 * as we remove extents, so it should represent the true 5018 * size of the inode, so reset the in memory size and 5019 * delete our orphan entry. 5020 */ 5021 trans = btrfs_join_transaction(root); 5022 if (IS_ERR(trans)) { 5023 btrfs_orphan_del(NULL, inode); 5024 return ret; 5025 } 5026 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5027 err = btrfs_orphan_del(trans, inode); 5028 if (err) 5029 btrfs_abort_transaction(trans, root, err); 5030 btrfs_end_transaction(trans, root); 5031 } 5032 } 5033 5034 return ret; 5035 } 5036 5037 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) 5038 { 5039 struct inode *inode = d_inode(dentry); 5040 struct btrfs_root *root = BTRFS_I(inode)->root; 5041 int err; 5042 5043 if (btrfs_root_readonly(root)) 5044 return -EROFS; 5045 5046 err = inode_change_ok(inode, attr); 5047 if (err) 5048 return err; 5049 5050 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5051 err = btrfs_setsize(inode, attr); 5052 if (err) 5053 return err; 5054 } 5055 5056 if (attr->ia_valid) { 5057 setattr_copy(inode, attr); 5058 inode_inc_iversion(inode); 5059 err = btrfs_dirty_inode(inode); 5060 5061 if (!err && attr->ia_valid & ATTR_MODE) 5062 err = posix_acl_chmod(inode, inode->i_mode); 5063 } 5064 5065 return err; 5066 } 5067 5068 /* 5069 * While truncating the inode pages during eviction, we get the VFS calling 5070 * btrfs_invalidatepage() against each page of the inode. This is slow because 5071 * the calls to btrfs_invalidatepage() result in a huge amount of calls to 5072 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting 5073 * extent_state structures over and over, wasting lots of time. 5074 * 5075 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all 5076 * those expensive operations on a per page basis and do only the ordered io 5077 * finishing, while we release here the extent_map and extent_state structures, 5078 * without the excessive merging and splitting. 5079 */ 5080 static void evict_inode_truncate_pages(struct inode *inode) 5081 { 5082 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5083 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree; 5084 struct rb_node *node; 5085 5086 ASSERT(inode->i_state & I_FREEING); 5087 truncate_inode_pages_final(&inode->i_data); 5088 5089 write_lock(&map_tree->lock); 5090 while (!RB_EMPTY_ROOT(&map_tree->map)) { 5091 struct extent_map *em; 5092 5093 node = rb_first(&map_tree->map); 5094 em = rb_entry(node, struct extent_map, rb_node); 5095 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 5096 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 5097 remove_extent_mapping(map_tree, em); 5098 free_extent_map(em); 5099 if (need_resched()) { 5100 write_unlock(&map_tree->lock); 5101 cond_resched(); 5102 write_lock(&map_tree->lock); 5103 } 5104 } 5105 write_unlock(&map_tree->lock); 5106 5107 /* 5108 * Keep looping until we have no more ranges in the io tree. 5109 * We can have ongoing bios started by readpages (called from readahead) 5110 * that have their endio callback (extent_io.c:end_bio_extent_readpage) 5111 * still in progress (unlocked the pages in the bio but did not yet 5112 * unlocked the ranges in the io tree). Therefore this means some 5113 * ranges can still be locked and eviction started because before 5114 * submitting those bios, which are executed by a separate task (work 5115 * queue kthread), inode references (inode->i_count) were not taken 5116 * (which would be dropped in the end io callback of each bio). 5117 * Therefore here we effectively end up waiting for those bios and 5118 * anyone else holding locked ranges without having bumped the inode's 5119 * reference count - if we don't do it, when they access the inode's 5120 * io_tree to unlock a range it may be too late, leading to an 5121 * use-after-free issue. 5122 */ 5123 spin_lock(&io_tree->lock); 5124 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5125 struct extent_state *state; 5126 struct extent_state *cached_state = NULL; 5127 u64 start; 5128 u64 end; 5129 5130 node = rb_first(&io_tree->state); 5131 state = rb_entry(node, struct extent_state, rb_node); 5132 start = state->start; 5133 end = state->end; 5134 spin_unlock(&io_tree->lock); 5135 5136 lock_extent_bits(io_tree, start, end, 0, &cached_state); 5137 5138 /* 5139 * If still has DELALLOC flag, the extent didn't reach disk, 5140 * and its reserved space won't be freed by delayed_ref. 5141 * So we need to free its reserved space here. 5142 * (Refer to comment in btrfs_invalidatepage, case 2) 5143 * 5144 * Note, end is the bytenr of last byte, so we need + 1 here. 5145 */ 5146 if (state->state & EXTENT_DELALLOC) 5147 btrfs_qgroup_free_data(inode, start, end - start + 1); 5148 5149 clear_extent_bit(io_tree, start, end, 5150 EXTENT_LOCKED | EXTENT_DIRTY | 5151 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 5152 EXTENT_DEFRAG, 1, 1, 5153 &cached_state, GFP_NOFS); 5154 5155 cond_resched(); 5156 spin_lock(&io_tree->lock); 5157 } 5158 spin_unlock(&io_tree->lock); 5159 } 5160 5161 void btrfs_evict_inode(struct inode *inode) 5162 { 5163 struct btrfs_trans_handle *trans; 5164 struct btrfs_root *root = BTRFS_I(inode)->root; 5165 struct btrfs_block_rsv *rsv, *global_rsv; 5166 int steal_from_global = 0; 5167 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 5168 int ret; 5169 5170 trace_btrfs_inode_evict(inode); 5171 5172 evict_inode_truncate_pages(inode); 5173 5174 if (inode->i_nlink && 5175 ((btrfs_root_refs(&root->root_item) != 0 && 5176 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5177 btrfs_is_free_space_inode(inode))) 5178 goto no_delete; 5179 5180 if (is_bad_inode(inode)) { 5181 btrfs_orphan_del(NULL, inode); 5182 goto no_delete; 5183 } 5184 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ 5185 if (!special_file(inode->i_mode)) 5186 btrfs_wait_ordered_range(inode, 0, (u64)-1); 5187 5188 btrfs_free_io_failure_record(inode, 0, (u64)-1); 5189 5190 if (root->fs_info->log_root_recovering) { 5191 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 5192 &BTRFS_I(inode)->runtime_flags)); 5193 goto no_delete; 5194 } 5195 5196 if (inode->i_nlink > 0) { 5197 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5198 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5199 goto no_delete; 5200 } 5201 5202 ret = btrfs_commit_inode_delayed_inode(inode); 5203 if (ret) { 5204 btrfs_orphan_del(NULL, inode); 5205 goto no_delete; 5206 } 5207 5208 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 5209 if (!rsv) { 5210 btrfs_orphan_del(NULL, inode); 5211 goto no_delete; 5212 } 5213 rsv->size = min_size; 5214 rsv->failfast = 1; 5215 global_rsv = &root->fs_info->global_block_rsv; 5216 5217 btrfs_i_size_write(inode, 0); 5218 5219 /* 5220 * This is a bit simpler than btrfs_truncate since we've already 5221 * reserved our space for our orphan item in the unlink, so we just 5222 * need to reserve some slack space in case we add bytes and update 5223 * inode item when doing the truncate. 5224 */ 5225 while (1) { 5226 ret = btrfs_block_rsv_refill(root, rsv, min_size, 5227 BTRFS_RESERVE_FLUSH_LIMIT); 5228 5229 /* 5230 * Try and steal from the global reserve since we will 5231 * likely not use this space anyway, we want to try as 5232 * hard as possible to get this to work. 5233 */ 5234 if (ret) 5235 steal_from_global++; 5236 else 5237 steal_from_global = 0; 5238 ret = 0; 5239 5240 /* 5241 * steal_from_global == 0: we reserved stuff, hooray! 5242 * steal_from_global == 1: we didn't reserve stuff, boo! 5243 * steal_from_global == 2: we've committed, still not a lot of 5244 * room but maybe we'll have room in the global reserve this 5245 * time. 5246 * steal_from_global == 3: abandon all hope! 5247 */ 5248 if (steal_from_global > 2) { 5249 btrfs_warn(root->fs_info, 5250 "Could not get space for a delete, will truncate on mount %d", 5251 ret); 5252 btrfs_orphan_del(NULL, inode); 5253 btrfs_free_block_rsv(root, rsv); 5254 goto no_delete; 5255 } 5256 5257 trans = btrfs_join_transaction(root); 5258 if (IS_ERR(trans)) { 5259 btrfs_orphan_del(NULL, inode); 5260 btrfs_free_block_rsv(root, rsv); 5261 goto no_delete; 5262 } 5263 5264 /* 5265 * We can't just steal from the global reserve, we need tomake 5266 * sure there is room to do it, if not we need to commit and try 5267 * again. 5268 */ 5269 if (steal_from_global) { 5270 if (!btrfs_check_space_for_delayed_refs(trans, root)) 5271 ret = btrfs_block_rsv_migrate(global_rsv, rsv, 5272 min_size); 5273 else 5274 ret = -ENOSPC; 5275 } 5276 5277 /* 5278 * Couldn't steal from the global reserve, we have too much 5279 * pending stuff built up, commit the transaction and try it 5280 * again. 5281 */ 5282 if (ret) { 5283 ret = btrfs_commit_transaction(trans, root); 5284 if (ret) { 5285 btrfs_orphan_del(NULL, inode); 5286 btrfs_free_block_rsv(root, rsv); 5287 goto no_delete; 5288 } 5289 continue; 5290 } else { 5291 steal_from_global = 0; 5292 } 5293 5294 trans->block_rsv = rsv; 5295 5296 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 5297 if (ret != -ENOSPC && ret != -EAGAIN) 5298 break; 5299 5300 trans->block_rsv = &root->fs_info->trans_block_rsv; 5301 btrfs_end_transaction(trans, root); 5302 trans = NULL; 5303 btrfs_btree_balance_dirty(root); 5304 } 5305 5306 btrfs_free_block_rsv(root, rsv); 5307 5308 /* 5309 * Errors here aren't a big deal, it just means we leave orphan items 5310 * in the tree. They will be cleaned up on the next mount. 5311 */ 5312 if (ret == 0) { 5313 trans->block_rsv = root->orphan_block_rsv; 5314 btrfs_orphan_del(trans, inode); 5315 } else { 5316 btrfs_orphan_del(NULL, inode); 5317 } 5318 5319 trans->block_rsv = &root->fs_info->trans_block_rsv; 5320 if (!(root == root->fs_info->tree_root || 5321 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) 5322 btrfs_return_ino(root, btrfs_ino(inode)); 5323 5324 btrfs_end_transaction(trans, root); 5325 btrfs_btree_balance_dirty(root); 5326 no_delete: 5327 btrfs_remove_delayed_node(inode); 5328 clear_inode(inode); 5329 return; 5330 } 5331 5332 /* 5333 * this returns the key found in the dir entry in the location pointer. 5334 * If no dir entries were found, location->objectid is 0. 5335 */ 5336 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, 5337 struct btrfs_key *location) 5338 { 5339 const char *name = dentry->d_name.name; 5340 int namelen = dentry->d_name.len; 5341 struct btrfs_dir_item *di; 5342 struct btrfs_path *path; 5343 struct btrfs_root *root = BTRFS_I(dir)->root; 5344 int ret = 0; 5345 5346 path = btrfs_alloc_path(); 5347 if (!path) 5348 return -ENOMEM; 5349 5350 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, 5351 namelen, 0); 5352 if (IS_ERR(di)) 5353 ret = PTR_ERR(di); 5354 5355 if (IS_ERR_OR_NULL(di)) 5356 goto out_err; 5357 5358 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5359 out: 5360 btrfs_free_path(path); 5361 return ret; 5362 out_err: 5363 location->objectid = 0; 5364 goto out; 5365 } 5366 5367 /* 5368 * when we hit a tree root in a directory, the btrfs part of the inode 5369 * needs to be changed to reflect the root directory of the tree root. This 5370 * is kind of like crossing a mount point. 5371 */ 5372 static int fixup_tree_root_location(struct btrfs_root *root, 5373 struct inode *dir, 5374 struct dentry *dentry, 5375 struct btrfs_key *location, 5376 struct btrfs_root **sub_root) 5377 { 5378 struct btrfs_path *path; 5379 struct btrfs_root *new_root; 5380 struct btrfs_root_ref *ref; 5381 struct extent_buffer *leaf; 5382 struct btrfs_key key; 5383 int ret; 5384 int err = 0; 5385 5386 path = btrfs_alloc_path(); 5387 if (!path) { 5388 err = -ENOMEM; 5389 goto out; 5390 } 5391 5392 err = -ENOENT; 5393 key.objectid = BTRFS_I(dir)->root->root_key.objectid; 5394 key.type = BTRFS_ROOT_REF_KEY; 5395 key.offset = location->objectid; 5396 5397 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path, 5398 0, 0); 5399 if (ret) { 5400 if (ret < 0) 5401 err = ret; 5402 goto out; 5403 } 5404 5405 leaf = path->nodes[0]; 5406 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5407 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5408 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 5409 goto out; 5410 5411 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, 5412 (unsigned long)(ref + 1), 5413 dentry->d_name.len); 5414 if (ret) 5415 goto out; 5416 5417 btrfs_release_path(path); 5418 5419 new_root = btrfs_read_fs_root_no_name(root->fs_info, location); 5420 if (IS_ERR(new_root)) { 5421 err = PTR_ERR(new_root); 5422 goto out; 5423 } 5424 5425 *sub_root = new_root; 5426 location->objectid = btrfs_root_dirid(&new_root->root_item); 5427 location->type = BTRFS_INODE_ITEM_KEY; 5428 location->offset = 0; 5429 err = 0; 5430 out: 5431 btrfs_free_path(path); 5432 return err; 5433 } 5434 5435 static void inode_tree_add(struct inode *inode) 5436 { 5437 struct btrfs_root *root = BTRFS_I(inode)->root; 5438 struct btrfs_inode *entry; 5439 struct rb_node **p; 5440 struct rb_node *parent; 5441 struct rb_node *new = &BTRFS_I(inode)->rb_node; 5442 u64 ino = btrfs_ino(inode); 5443 5444 if (inode_unhashed(inode)) 5445 return; 5446 parent = NULL; 5447 spin_lock(&root->inode_lock); 5448 p = &root->inode_tree.rb_node; 5449 while (*p) { 5450 parent = *p; 5451 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5452 5453 if (ino < btrfs_ino(&entry->vfs_inode)) 5454 p = &parent->rb_left; 5455 else if (ino > btrfs_ino(&entry->vfs_inode)) 5456 p = &parent->rb_right; 5457 else { 5458 WARN_ON(!(entry->vfs_inode.i_state & 5459 (I_WILL_FREE | I_FREEING))); 5460 rb_replace_node(parent, new, &root->inode_tree); 5461 RB_CLEAR_NODE(parent); 5462 spin_unlock(&root->inode_lock); 5463 return; 5464 } 5465 } 5466 rb_link_node(new, parent, p); 5467 rb_insert_color(new, &root->inode_tree); 5468 spin_unlock(&root->inode_lock); 5469 } 5470 5471 static void inode_tree_del(struct inode *inode) 5472 { 5473 struct btrfs_root *root = BTRFS_I(inode)->root; 5474 int empty = 0; 5475 5476 spin_lock(&root->inode_lock); 5477 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { 5478 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); 5479 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 5480 empty = RB_EMPTY_ROOT(&root->inode_tree); 5481 } 5482 spin_unlock(&root->inode_lock); 5483 5484 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5485 synchronize_srcu(&root->fs_info->subvol_srcu); 5486 spin_lock(&root->inode_lock); 5487 empty = RB_EMPTY_ROOT(&root->inode_tree); 5488 spin_unlock(&root->inode_lock); 5489 if (empty) 5490 btrfs_add_dead_root(root); 5491 } 5492 } 5493 5494 void btrfs_invalidate_inodes(struct btrfs_root *root) 5495 { 5496 struct rb_node *node; 5497 struct rb_node *prev; 5498 struct btrfs_inode *entry; 5499 struct inode *inode; 5500 u64 objectid = 0; 5501 5502 if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 5503 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 5504 5505 spin_lock(&root->inode_lock); 5506 again: 5507 node = root->inode_tree.rb_node; 5508 prev = NULL; 5509 while (node) { 5510 prev = node; 5511 entry = rb_entry(node, struct btrfs_inode, rb_node); 5512 5513 if (objectid < btrfs_ino(&entry->vfs_inode)) 5514 node = node->rb_left; 5515 else if (objectid > btrfs_ino(&entry->vfs_inode)) 5516 node = node->rb_right; 5517 else 5518 break; 5519 } 5520 if (!node) { 5521 while (prev) { 5522 entry = rb_entry(prev, struct btrfs_inode, rb_node); 5523 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 5524 node = prev; 5525 break; 5526 } 5527 prev = rb_next(prev); 5528 } 5529 } 5530 while (node) { 5531 entry = rb_entry(node, struct btrfs_inode, rb_node); 5532 objectid = btrfs_ino(&entry->vfs_inode) + 1; 5533 inode = igrab(&entry->vfs_inode); 5534 if (inode) { 5535 spin_unlock(&root->inode_lock); 5536 if (atomic_read(&inode->i_count) > 1) 5537 d_prune_aliases(inode); 5538 /* 5539 * btrfs_drop_inode will have it removed from 5540 * the inode cache when its usage count 5541 * hits zero. 5542 */ 5543 iput(inode); 5544 cond_resched(); 5545 spin_lock(&root->inode_lock); 5546 goto again; 5547 } 5548 5549 if (cond_resched_lock(&root->inode_lock)) 5550 goto again; 5551 5552 node = rb_next(node); 5553 } 5554 spin_unlock(&root->inode_lock); 5555 } 5556 5557 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5558 { 5559 struct btrfs_iget_args *args = p; 5560 inode->i_ino = args->location->objectid; 5561 memcpy(&BTRFS_I(inode)->location, args->location, 5562 sizeof(*args->location)); 5563 BTRFS_I(inode)->root = args->root; 5564 return 0; 5565 } 5566 5567 static int btrfs_find_actor(struct inode *inode, void *opaque) 5568 { 5569 struct btrfs_iget_args *args = opaque; 5570 return args->location->objectid == BTRFS_I(inode)->location.objectid && 5571 args->root == BTRFS_I(inode)->root; 5572 } 5573 5574 static struct inode *btrfs_iget_locked(struct super_block *s, 5575 struct btrfs_key *location, 5576 struct btrfs_root *root) 5577 { 5578 struct inode *inode; 5579 struct btrfs_iget_args args; 5580 unsigned long hashval = btrfs_inode_hash(location->objectid, root); 5581 5582 args.location = location; 5583 args.root = root; 5584 5585 inode = iget5_locked(s, hashval, btrfs_find_actor, 5586 btrfs_init_locked_inode, 5587 (void *)&args); 5588 return inode; 5589 } 5590 5591 /* Get an inode object given its location and corresponding root. 5592 * Returns in *is_new if the inode was read from disk 5593 */ 5594 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 5595 struct btrfs_root *root, int *new) 5596 { 5597 struct inode *inode; 5598 5599 inode = btrfs_iget_locked(s, location, root); 5600 if (!inode) 5601 return ERR_PTR(-ENOMEM); 5602 5603 if (inode->i_state & I_NEW) { 5604 btrfs_read_locked_inode(inode); 5605 if (!is_bad_inode(inode)) { 5606 inode_tree_add(inode); 5607 unlock_new_inode(inode); 5608 if (new) 5609 *new = 1; 5610 } else { 5611 unlock_new_inode(inode); 5612 iput(inode); 5613 inode = ERR_PTR(-ESTALE); 5614 } 5615 } 5616 5617 return inode; 5618 } 5619 5620 static struct inode *new_simple_dir(struct super_block *s, 5621 struct btrfs_key *key, 5622 struct btrfs_root *root) 5623 { 5624 struct inode *inode = new_inode(s); 5625 5626 if (!inode) 5627 return ERR_PTR(-ENOMEM); 5628 5629 BTRFS_I(inode)->root = root; 5630 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5631 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5632 5633 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5634 inode->i_op = &btrfs_dir_ro_inode_operations; 5635 inode->i_fop = &simple_dir_operations; 5636 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5637 inode->i_mtime = CURRENT_TIME; 5638 inode->i_atime = inode->i_mtime; 5639 inode->i_ctime = inode->i_mtime; 5640 BTRFS_I(inode)->i_otime = inode->i_mtime; 5641 5642 return inode; 5643 } 5644 5645 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5646 { 5647 struct inode *inode; 5648 struct btrfs_root *root = BTRFS_I(dir)->root; 5649 struct btrfs_root *sub_root = root; 5650 struct btrfs_key location; 5651 int index; 5652 int ret = 0; 5653 5654 if (dentry->d_name.len > BTRFS_NAME_LEN) 5655 return ERR_PTR(-ENAMETOOLONG); 5656 5657 ret = btrfs_inode_by_name(dir, dentry, &location); 5658 if (ret < 0) 5659 return ERR_PTR(ret); 5660 5661 if (location.objectid == 0) 5662 return ERR_PTR(-ENOENT); 5663 5664 if (location.type == BTRFS_INODE_ITEM_KEY) { 5665 inode = btrfs_iget(dir->i_sb, &location, root, NULL); 5666 return inode; 5667 } 5668 5669 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); 5670 5671 index = srcu_read_lock(&root->fs_info->subvol_srcu); 5672 ret = fixup_tree_root_location(root, dir, dentry, 5673 &location, &sub_root); 5674 if (ret < 0) { 5675 if (ret != -ENOENT) 5676 inode = ERR_PTR(ret); 5677 else 5678 inode = new_simple_dir(dir->i_sb, &location, sub_root); 5679 } else { 5680 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); 5681 } 5682 srcu_read_unlock(&root->fs_info->subvol_srcu, index); 5683 5684 if (!IS_ERR(inode) && root != sub_root) { 5685 down_read(&root->fs_info->cleanup_work_sem); 5686 if (!(inode->i_sb->s_flags & MS_RDONLY)) 5687 ret = btrfs_orphan_cleanup(sub_root); 5688 up_read(&root->fs_info->cleanup_work_sem); 5689 if (ret) { 5690 iput(inode); 5691 inode = ERR_PTR(ret); 5692 } 5693 } 5694 5695 return inode; 5696 } 5697 5698 static int btrfs_dentry_delete(const struct dentry *dentry) 5699 { 5700 struct btrfs_root *root; 5701 struct inode *inode = d_inode(dentry); 5702 5703 if (!inode && !IS_ROOT(dentry)) 5704 inode = d_inode(dentry->d_parent); 5705 5706 if (inode) { 5707 root = BTRFS_I(inode)->root; 5708 if (btrfs_root_refs(&root->root_item) == 0) 5709 return 1; 5710 5711 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5712 return 1; 5713 } 5714 return 0; 5715 } 5716 5717 static void btrfs_dentry_release(struct dentry *dentry) 5718 { 5719 kfree(dentry->d_fsdata); 5720 } 5721 5722 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5723 unsigned int flags) 5724 { 5725 struct inode *inode; 5726 5727 inode = btrfs_lookup_dentry(dir, dentry); 5728 if (IS_ERR(inode)) { 5729 if (PTR_ERR(inode) == -ENOENT) 5730 inode = NULL; 5731 else 5732 return ERR_CAST(inode); 5733 } 5734 5735 return d_splice_alias(inode, dentry); 5736 } 5737 5738 unsigned char btrfs_filetype_table[] = { 5739 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 5740 }; 5741 5742 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5743 { 5744 struct inode *inode = file_inode(file); 5745 struct btrfs_root *root = BTRFS_I(inode)->root; 5746 struct btrfs_item *item; 5747 struct btrfs_dir_item *di; 5748 struct btrfs_key key; 5749 struct btrfs_key found_key; 5750 struct btrfs_path *path; 5751 struct list_head ins_list; 5752 struct list_head del_list; 5753 int ret; 5754 struct extent_buffer *leaf; 5755 int slot; 5756 unsigned char d_type; 5757 int over = 0; 5758 u32 di_cur; 5759 u32 di_total; 5760 u32 di_len; 5761 int key_type = BTRFS_DIR_INDEX_KEY; 5762 char tmp_name[32]; 5763 char *name_ptr; 5764 int name_len; 5765 int is_curr = 0; /* ctx->pos points to the current index? */ 5766 5767 /* FIXME, use a real flag for deciding about the key type */ 5768 if (root->fs_info->tree_root == root) 5769 key_type = BTRFS_DIR_ITEM_KEY; 5770 5771 if (!dir_emit_dots(file, ctx)) 5772 return 0; 5773 5774 path = btrfs_alloc_path(); 5775 if (!path) 5776 return -ENOMEM; 5777 5778 path->reada = 1; 5779 5780 if (key_type == BTRFS_DIR_INDEX_KEY) { 5781 INIT_LIST_HEAD(&ins_list); 5782 INIT_LIST_HEAD(&del_list); 5783 btrfs_get_delayed_items(inode, &ins_list, &del_list); 5784 } 5785 5786 key.type = key_type; 5787 key.offset = ctx->pos; 5788 key.objectid = btrfs_ino(inode); 5789 5790 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5791 if (ret < 0) 5792 goto err; 5793 5794 while (1) { 5795 leaf = path->nodes[0]; 5796 slot = path->slots[0]; 5797 if (slot >= btrfs_header_nritems(leaf)) { 5798 ret = btrfs_next_leaf(root, path); 5799 if (ret < 0) 5800 goto err; 5801 else if (ret > 0) 5802 break; 5803 continue; 5804 } 5805 5806 item = btrfs_item_nr(slot); 5807 btrfs_item_key_to_cpu(leaf, &found_key, slot); 5808 5809 if (found_key.objectid != key.objectid) 5810 break; 5811 if (found_key.type != key_type) 5812 break; 5813 if (found_key.offset < ctx->pos) 5814 goto next; 5815 if (key_type == BTRFS_DIR_INDEX_KEY && 5816 btrfs_should_delete_dir_index(&del_list, 5817 found_key.offset)) 5818 goto next; 5819 5820 ctx->pos = found_key.offset; 5821 is_curr = 1; 5822 5823 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 5824 di_cur = 0; 5825 di_total = btrfs_item_size(leaf, item); 5826 5827 while (di_cur < di_total) { 5828 struct btrfs_key location; 5829 5830 if (verify_dir_item(root, leaf, di)) 5831 break; 5832 5833 name_len = btrfs_dir_name_len(leaf, di); 5834 if (name_len <= sizeof(tmp_name)) { 5835 name_ptr = tmp_name; 5836 } else { 5837 name_ptr = kmalloc(name_len, GFP_NOFS); 5838 if (!name_ptr) { 5839 ret = -ENOMEM; 5840 goto err; 5841 } 5842 } 5843 read_extent_buffer(leaf, name_ptr, 5844 (unsigned long)(di + 1), name_len); 5845 5846 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 5847 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5848 5849 5850 /* is this a reference to our own snapshot? If so 5851 * skip it. 5852 * 5853 * In contrast to old kernels, we insert the snapshot's 5854 * dir item and dir index after it has been created, so 5855 * we won't find a reference to our own snapshot. We 5856 * still keep the following code for backward 5857 * compatibility. 5858 */ 5859 if (location.type == BTRFS_ROOT_ITEM_KEY && 5860 location.objectid == root->root_key.objectid) { 5861 over = 0; 5862 goto skip; 5863 } 5864 over = !dir_emit(ctx, name_ptr, name_len, 5865 location.objectid, d_type); 5866 5867 skip: 5868 if (name_ptr != tmp_name) 5869 kfree(name_ptr); 5870 5871 if (over) 5872 goto nopos; 5873 di_len = btrfs_dir_name_len(leaf, di) + 5874 btrfs_dir_data_len(leaf, di) + sizeof(*di); 5875 di_cur += di_len; 5876 di = (struct btrfs_dir_item *)((char *)di + di_len); 5877 } 5878 next: 5879 path->slots[0]++; 5880 } 5881 5882 if (key_type == BTRFS_DIR_INDEX_KEY) { 5883 if (is_curr) 5884 ctx->pos++; 5885 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 5886 if (ret) 5887 goto nopos; 5888 } 5889 5890 /* Reached end of directory/root. Bump pos past the last item. */ 5891 ctx->pos++; 5892 5893 /* 5894 * Stop new entries from being returned after we return the last 5895 * entry. 5896 * 5897 * New directory entries are assigned a strictly increasing 5898 * offset. This means that new entries created during readdir 5899 * are *guaranteed* to be seen in the future by that readdir. 5900 * This has broken buggy programs which operate on names as 5901 * they're returned by readdir. Until we re-use freed offsets 5902 * we have this hack to stop new entries from being returned 5903 * under the assumption that they'll never reach this huge 5904 * offset. 5905 * 5906 * This is being careful not to overflow 32bit loff_t unless the 5907 * last entry requires it because doing so has broken 32bit apps 5908 * in the past. 5909 */ 5910 if (key_type == BTRFS_DIR_INDEX_KEY) { 5911 if (ctx->pos >= INT_MAX) 5912 ctx->pos = LLONG_MAX; 5913 else 5914 ctx->pos = INT_MAX; 5915 } 5916 nopos: 5917 ret = 0; 5918 err: 5919 if (key_type == BTRFS_DIR_INDEX_KEY) 5920 btrfs_put_delayed_items(&ins_list, &del_list); 5921 btrfs_free_path(path); 5922 return ret; 5923 } 5924 5925 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) 5926 { 5927 struct btrfs_root *root = BTRFS_I(inode)->root; 5928 struct btrfs_trans_handle *trans; 5929 int ret = 0; 5930 bool nolock = false; 5931 5932 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 5933 return 0; 5934 5935 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode)) 5936 nolock = true; 5937 5938 if (wbc->sync_mode == WB_SYNC_ALL) { 5939 if (nolock) 5940 trans = btrfs_join_transaction_nolock(root); 5941 else 5942 trans = btrfs_join_transaction(root); 5943 if (IS_ERR(trans)) 5944 return PTR_ERR(trans); 5945 ret = btrfs_commit_transaction(trans, root); 5946 } 5947 return ret; 5948 } 5949 5950 /* 5951 * This is somewhat expensive, updating the tree every time the 5952 * inode changes. But, it is most likely to find the inode in cache. 5953 * FIXME, needs more benchmarking...there are no reasons other than performance 5954 * to keep or drop this code. 5955 */ 5956 static int btrfs_dirty_inode(struct inode *inode) 5957 { 5958 struct btrfs_root *root = BTRFS_I(inode)->root; 5959 struct btrfs_trans_handle *trans; 5960 int ret; 5961 5962 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 5963 return 0; 5964 5965 trans = btrfs_join_transaction(root); 5966 if (IS_ERR(trans)) 5967 return PTR_ERR(trans); 5968 5969 ret = btrfs_update_inode(trans, root, inode); 5970 if (ret && ret == -ENOSPC) { 5971 /* whoops, lets try again with the full transaction */ 5972 btrfs_end_transaction(trans, root); 5973 trans = btrfs_start_transaction(root, 1); 5974 if (IS_ERR(trans)) 5975 return PTR_ERR(trans); 5976 5977 ret = btrfs_update_inode(trans, root, inode); 5978 } 5979 btrfs_end_transaction(trans, root); 5980 if (BTRFS_I(inode)->delayed_node) 5981 btrfs_balance_delayed_items(root); 5982 5983 return ret; 5984 } 5985 5986 /* 5987 * This is a copy of file_update_time. We need this so we can return error on 5988 * ENOSPC for updating the inode in the case of file write and mmap writes. 5989 */ 5990 static int btrfs_update_time(struct inode *inode, struct timespec *now, 5991 int flags) 5992 { 5993 struct btrfs_root *root = BTRFS_I(inode)->root; 5994 5995 if (btrfs_root_readonly(root)) 5996 return -EROFS; 5997 5998 if (flags & S_VERSION) 5999 inode_inc_iversion(inode); 6000 if (flags & S_CTIME) 6001 inode->i_ctime = *now; 6002 if (flags & S_MTIME) 6003 inode->i_mtime = *now; 6004 if (flags & S_ATIME) 6005 inode->i_atime = *now; 6006 return btrfs_dirty_inode(inode); 6007 } 6008 6009 /* 6010 * find the highest existing sequence number in a directory 6011 * and then set the in-memory index_cnt variable to reflect 6012 * free sequence numbers 6013 */ 6014 static int btrfs_set_inode_index_count(struct inode *inode) 6015 { 6016 struct btrfs_root *root = BTRFS_I(inode)->root; 6017 struct btrfs_key key, found_key; 6018 struct btrfs_path *path; 6019 struct extent_buffer *leaf; 6020 int ret; 6021 6022 key.objectid = btrfs_ino(inode); 6023 key.type = BTRFS_DIR_INDEX_KEY; 6024 key.offset = (u64)-1; 6025 6026 path = btrfs_alloc_path(); 6027 if (!path) 6028 return -ENOMEM; 6029 6030 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6031 if (ret < 0) 6032 goto out; 6033 /* FIXME: we should be able to handle this */ 6034 if (ret == 0) 6035 goto out; 6036 ret = 0; 6037 6038 /* 6039 * MAGIC NUMBER EXPLANATION: 6040 * since we search a directory based on f_pos we have to start at 2 6041 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody 6042 * else has to start at 2 6043 */ 6044 if (path->slots[0] == 0) { 6045 BTRFS_I(inode)->index_cnt = 2; 6046 goto out; 6047 } 6048 6049 path->slots[0]--; 6050 6051 leaf = path->nodes[0]; 6052 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6053 6054 if (found_key.objectid != btrfs_ino(inode) || 6055 found_key.type != BTRFS_DIR_INDEX_KEY) { 6056 BTRFS_I(inode)->index_cnt = 2; 6057 goto out; 6058 } 6059 6060 BTRFS_I(inode)->index_cnt = found_key.offset + 1; 6061 out: 6062 btrfs_free_path(path); 6063 return ret; 6064 } 6065 6066 /* 6067 * helper to find a free sequence number in a given directory. This current 6068 * code is very simple, later versions will do smarter things in the btree 6069 */ 6070 int btrfs_set_inode_index(struct inode *dir, u64 *index) 6071 { 6072 int ret = 0; 6073 6074 if (BTRFS_I(dir)->index_cnt == (u64)-1) { 6075 ret = btrfs_inode_delayed_dir_index_count(dir); 6076 if (ret) { 6077 ret = btrfs_set_inode_index_count(dir); 6078 if (ret) 6079 return ret; 6080 } 6081 } 6082 6083 *index = BTRFS_I(dir)->index_cnt; 6084 BTRFS_I(dir)->index_cnt++; 6085 6086 return ret; 6087 } 6088 6089 static int btrfs_insert_inode_locked(struct inode *inode) 6090 { 6091 struct btrfs_iget_args args; 6092 args.location = &BTRFS_I(inode)->location; 6093 args.root = BTRFS_I(inode)->root; 6094 6095 return insert_inode_locked4(inode, 6096 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6097 btrfs_find_actor, &args); 6098 } 6099 6100 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, 6101 struct btrfs_root *root, 6102 struct inode *dir, 6103 const char *name, int name_len, 6104 u64 ref_objectid, u64 objectid, 6105 umode_t mode, u64 *index) 6106 { 6107 struct inode *inode; 6108 struct btrfs_inode_item *inode_item; 6109 struct btrfs_key *location; 6110 struct btrfs_path *path; 6111 struct btrfs_inode_ref *ref; 6112 struct btrfs_key key[2]; 6113 u32 sizes[2]; 6114 int nitems = name ? 2 : 1; 6115 unsigned long ptr; 6116 int ret; 6117 6118 path = btrfs_alloc_path(); 6119 if (!path) 6120 return ERR_PTR(-ENOMEM); 6121 6122 inode = new_inode(root->fs_info->sb); 6123 if (!inode) { 6124 btrfs_free_path(path); 6125 return ERR_PTR(-ENOMEM); 6126 } 6127 6128 /* 6129 * O_TMPFILE, set link count to 0, so that after this point, 6130 * we fill in an inode item with the correct link count. 6131 */ 6132 if (!name) 6133 set_nlink(inode, 0); 6134 6135 /* 6136 * we have to initialize this early, so we can reclaim the inode 6137 * number if we fail afterwards in this function. 6138 */ 6139 inode->i_ino = objectid; 6140 6141 if (dir && name) { 6142 trace_btrfs_inode_request(dir); 6143 6144 ret = btrfs_set_inode_index(dir, index); 6145 if (ret) { 6146 btrfs_free_path(path); 6147 iput(inode); 6148 return ERR_PTR(ret); 6149 } 6150 } else if (dir) { 6151 *index = 0; 6152 } 6153 /* 6154 * index_cnt is ignored for everything but a dir, 6155 * btrfs_get_inode_index_count has an explanation for the magic 6156 * number 6157 */ 6158 BTRFS_I(inode)->index_cnt = 2; 6159 BTRFS_I(inode)->dir_index = *index; 6160 BTRFS_I(inode)->root = root; 6161 BTRFS_I(inode)->generation = trans->transid; 6162 inode->i_generation = BTRFS_I(inode)->generation; 6163 6164 /* 6165 * We could have gotten an inode number from somebody who was fsynced 6166 * and then removed in this same transaction, so let's just set full 6167 * sync since it will be a full sync anyway and this will blow away the 6168 * old info in the log. 6169 */ 6170 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 6171 6172 key[0].objectid = objectid; 6173 key[0].type = BTRFS_INODE_ITEM_KEY; 6174 key[0].offset = 0; 6175 6176 sizes[0] = sizeof(struct btrfs_inode_item); 6177 6178 if (name) { 6179 /* 6180 * Start new inodes with an inode_ref. This is slightly more 6181 * efficient for small numbers of hard links since they will 6182 * be packed into one item. Extended refs will kick in if we 6183 * add more hard links than can fit in the ref item. 6184 */ 6185 key[1].objectid = objectid; 6186 key[1].type = BTRFS_INODE_REF_KEY; 6187 key[1].offset = ref_objectid; 6188 6189 sizes[1] = name_len + sizeof(*ref); 6190 } 6191 6192 location = &BTRFS_I(inode)->location; 6193 location->objectid = objectid; 6194 location->offset = 0; 6195 location->type = BTRFS_INODE_ITEM_KEY; 6196 6197 ret = btrfs_insert_inode_locked(inode); 6198 if (ret < 0) 6199 goto fail; 6200 6201 path->leave_spinning = 1; 6202 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems); 6203 if (ret != 0) 6204 goto fail_unlock; 6205 6206 inode_init_owner(inode, dir, mode); 6207 inode_set_bytes(inode, 0); 6208 6209 inode->i_mtime = CURRENT_TIME; 6210 inode->i_atime = inode->i_mtime; 6211 inode->i_ctime = inode->i_mtime; 6212 BTRFS_I(inode)->i_otime = inode->i_mtime; 6213 6214 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6215 struct btrfs_inode_item); 6216 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item, 6217 sizeof(*inode_item)); 6218 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6219 6220 if (name) { 6221 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6222 struct btrfs_inode_ref); 6223 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); 6224 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); 6225 ptr = (unsigned long)(ref + 1); 6226 write_extent_buffer(path->nodes[0], name, ptr, name_len); 6227 } 6228 6229 btrfs_mark_buffer_dirty(path->nodes[0]); 6230 btrfs_free_path(path); 6231 6232 btrfs_inherit_iflags(inode, dir); 6233 6234 if (S_ISREG(mode)) { 6235 if (btrfs_test_opt(root, NODATASUM)) 6236 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6237 if (btrfs_test_opt(root, NODATACOW)) 6238 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6239 BTRFS_INODE_NODATASUM; 6240 } 6241 6242 inode_tree_add(inode); 6243 6244 trace_btrfs_inode_new(inode); 6245 btrfs_set_inode_last_trans(trans, inode); 6246 6247 btrfs_update_root_times(trans, root); 6248 6249 ret = btrfs_inode_inherit_props(trans, inode, dir); 6250 if (ret) 6251 btrfs_err(root->fs_info, 6252 "error inheriting props for ino %llu (root %llu): %d", 6253 btrfs_ino(inode), root->root_key.objectid, ret); 6254 6255 return inode; 6256 6257 fail_unlock: 6258 unlock_new_inode(inode); 6259 fail: 6260 if (dir && name) 6261 BTRFS_I(dir)->index_cnt--; 6262 btrfs_free_path(path); 6263 iput(inode); 6264 return ERR_PTR(ret); 6265 } 6266 6267 static inline u8 btrfs_inode_type(struct inode *inode) 6268 { 6269 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; 6270 } 6271 6272 /* 6273 * utility function to add 'inode' into 'parent_inode' with 6274 * a give name and a given sequence number. 6275 * if 'add_backref' is true, also insert a backref from the 6276 * inode to the parent directory. 6277 */ 6278 int btrfs_add_link(struct btrfs_trans_handle *trans, 6279 struct inode *parent_inode, struct inode *inode, 6280 const char *name, int name_len, int add_backref, u64 index) 6281 { 6282 int ret = 0; 6283 struct btrfs_key key; 6284 struct btrfs_root *root = BTRFS_I(parent_inode)->root; 6285 u64 ino = btrfs_ino(inode); 6286 u64 parent_ino = btrfs_ino(parent_inode); 6287 6288 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6289 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); 6290 } else { 6291 key.objectid = ino; 6292 key.type = BTRFS_INODE_ITEM_KEY; 6293 key.offset = 0; 6294 } 6295 6296 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6297 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 6298 key.objectid, root->root_key.objectid, 6299 parent_ino, index, name, name_len); 6300 } else if (add_backref) { 6301 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, 6302 parent_ino, index); 6303 } 6304 6305 /* Nothing to clean up yet */ 6306 if (ret) 6307 return ret; 6308 6309 ret = btrfs_insert_dir_item(trans, root, name, name_len, 6310 parent_inode, &key, 6311 btrfs_inode_type(inode), index); 6312 if (ret == -EEXIST || ret == -EOVERFLOW) 6313 goto fail_dir_item; 6314 else if (ret) { 6315 btrfs_abort_transaction(trans, root, ret); 6316 return ret; 6317 } 6318 6319 btrfs_i_size_write(parent_inode, parent_inode->i_size + 6320 name_len * 2); 6321 inode_inc_iversion(parent_inode); 6322 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 6323 ret = btrfs_update_inode(trans, root, parent_inode); 6324 if (ret) 6325 btrfs_abort_transaction(trans, root, ret); 6326 return ret; 6327 6328 fail_dir_item: 6329 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6330 u64 local_index; 6331 int err; 6332 err = btrfs_del_root_ref(trans, root->fs_info->tree_root, 6333 key.objectid, root->root_key.objectid, 6334 parent_ino, &local_index, name, name_len); 6335 6336 } else if (add_backref) { 6337 u64 local_index; 6338 int err; 6339 6340 err = btrfs_del_inode_ref(trans, root, name, name_len, 6341 ino, parent_ino, &local_index); 6342 } 6343 return ret; 6344 } 6345 6346 static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 6347 struct inode *dir, struct dentry *dentry, 6348 struct inode *inode, int backref, u64 index) 6349 { 6350 int err = btrfs_add_link(trans, dir, inode, 6351 dentry->d_name.name, dentry->d_name.len, 6352 backref, index); 6353 if (err > 0) 6354 err = -EEXIST; 6355 return err; 6356 } 6357 6358 static int btrfs_mknod(struct inode *dir, struct dentry *dentry, 6359 umode_t mode, dev_t rdev) 6360 { 6361 struct btrfs_trans_handle *trans; 6362 struct btrfs_root *root = BTRFS_I(dir)->root; 6363 struct inode *inode = NULL; 6364 int err; 6365 int drop_inode = 0; 6366 u64 objectid; 6367 u64 index = 0; 6368 6369 /* 6370 * 2 for inode item and ref 6371 * 2 for dir items 6372 * 1 for xattr if selinux is on 6373 */ 6374 trans = btrfs_start_transaction(root, 5); 6375 if (IS_ERR(trans)) 6376 return PTR_ERR(trans); 6377 6378 err = btrfs_find_free_ino(root, &objectid); 6379 if (err) 6380 goto out_unlock; 6381 6382 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6383 dentry->d_name.len, btrfs_ino(dir), objectid, 6384 mode, &index); 6385 if (IS_ERR(inode)) { 6386 err = PTR_ERR(inode); 6387 goto out_unlock; 6388 } 6389 6390 /* 6391 * If the active LSM wants to access the inode during 6392 * d_instantiate it needs these. Smack checks to see 6393 * if the filesystem supports xattrs by looking at the 6394 * ops vector. 6395 */ 6396 inode->i_op = &btrfs_special_inode_operations; 6397 init_special_inode(inode, inode->i_mode, rdev); 6398 6399 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6400 if (err) 6401 goto out_unlock_inode; 6402 6403 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 6404 if (err) { 6405 goto out_unlock_inode; 6406 } else { 6407 btrfs_update_inode(trans, root, inode); 6408 unlock_new_inode(inode); 6409 d_instantiate(dentry, inode); 6410 } 6411 6412 out_unlock: 6413 btrfs_end_transaction(trans, root); 6414 btrfs_balance_delayed_items(root); 6415 btrfs_btree_balance_dirty(root); 6416 if (drop_inode) { 6417 inode_dec_link_count(inode); 6418 iput(inode); 6419 } 6420 return err; 6421 6422 out_unlock_inode: 6423 drop_inode = 1; 6424 unlock_new_inode(inode); 6425 goto out_unlock; 6426 6427 } 6428 6429 static int btrfs_create(struct inode *dir, struct dentry *dentry, 6430 umode_t mode, bool excl) 6431 { 6432 struct btrfs_trans_handle *trans; 6433 struct btrfs_root *root = BTRFS_I(dir)->root; 6434 struct inode *inode = NULL; 6435 int drop_inode_on_err = 0; 6436 int err; 6437 u64 objectid; 6438 u64 index = 0; 6439 6440 /* 6441 * 2 for inode item and ref 6442 * 2 for dir items 6443 * 1 for xattr if selinux is on 6444 */ 6445 trans = btrfs_start_transaction(root, 5); 6446 if (IS_ERR(trans)) 6447 return PTR_ERR(trans); 6448 6449 err = btrfs_find_free_ino(root, &objectid); 6450 if (err) 6451 goto out_unlock; 6452 6453 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6454 dentry->d_name.len, btrfs_ino(dir), objectid, 6455 mode, &index); 6456 if (IS_ERR(inode)) { 6457 err = PTR_ERR(inode); 6458 goto out_unlock; 6459 } 6460 drop_inode_on_err = 1; 6461 /* 6462 * If the active LSM wants to access the inode during 6463 * d_instantiate it needs these. Smack checks to see 6464 * if the filesystem supports xattrs by looking at the 6465 * ops vector. 6466 */ 6467 inode->i_fop = &btrfs_file_operations; 6468 inode->i_op = &btrfs_file_inode_operations; 6469 inode->i_mapping->a_ops = &btrfs_aops; 6470 6471 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6472 if (err) 6473 goto out_unlock_inode; 6474 6475 err = btrfs_update_inode(trans, root, inode); 6476 if (err) 6477 goto out_unlock_inode; 6478 6479 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 6480 if (err) 6481 goto out_unlock_inode; 6482 6483 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 6484 unlock_new_inode(inode); 6485 d_instantiate(dentry, inode); 6486 6487 out_unlock: 6488 btrfs_end_transaction(trans, root); 6489 if (err && drop_inode_on_err) { 6490 inode_dec_link_count(inode); 6491 iput(inode); 6492 } 6493 btrfs_balance_delayed_items(root); 6494 btrfs_btree_balance_dirty(root); 6495 return err; 6496 6497 out_unlock_inode: 6498 unlock_new_inode(inode); 6499 goto out_unlock; 6500 6501 } 6502 6503 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6504 struct dentry *dentry) 6505 { 6506 struct btrfs_trans_handle *trans; 6507 struct btrfs_root *root = BTRFS_I(dir)->root; 6508 struct inode *inode = d_inode(old_dentry); 6509 u64 index; 6510 int err; 6511 int drop_inode = 0; 6512 6513 /* do not allow sys_link's with other subvols of the same device */ 6514 if (root->objectid != BTRFS_I(inode)->root->objectid) 6515 return -EXDEV; 6516 6517 if (inode->i_nlink >= BTRFS_LINK_MAX) 6518 return -EMLINK; 6519 6520 err = btrfs_set_inode_index(dir, &index); 6521 if (err) 6522 goto fail; 6523 6524 /* 6525 * 2 items for inode and inode ref 6526 * 2 items for dir items 6527 * 1 item for parent inode 6528 */ 6529 trans = btrfs_start_transaction(root, 5); 6530 if (IS_ERR(trans)) { 6531 err = PTR_ERR(trans); 6532 goto fail; 6533 } 6534 6535 /* There are several dir indexes for this inode, clear the cache. */ 6536 BTRFS_I(inode)->dir_index = 0ULL; 6537 inc_nlink(inode); 6538 inode_inc_iversion(inode); 6539 inode->i_ctime = CURRENT_TIME; 6540 ihold(inode); 6541 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6542 6543 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); 6544 6545 if (err) { 6546 drop_inode = 1; 6547 } else { 6548 struct dentry *parent = dentry->d_parent; 6549 err = btrfs_update_inode(trans, root, inode); 6550 if (err) 6551 goto fail; 6552 if (inode->i_nlink == 1) { 6553 /* 6554 * If new hard link count is 1, it's a file created 6555 * with open(2) O_TMPFILE flag. 6556 */ 6557 err = btrfs_orphan_del(trans, inode); 6558 if (err) 6559 goto fail; 6560 } 6561 d_instantiate(dentry, inode); 6562 btrfs_log_new_name(trans, inode, NULL, parent); 6563 } 6564 6565 btrfs_end_transaction(trans, root); 6566 btrfs_balance_delayed_items(root); 6567 fail: 6568 if (drop_inode) { 6569 inode_dec_link_count(inode); 6570 iput(inode); 6571 } 6572 btrfs_btree_balance_dirty(root); 6573 return err; 6574 } 6575 6576 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 6577 { 6578 struct inode *inode = NULL; 6579 struct btrfs_trans_handle *trans; 6580 struct btrfs_root *root = BTRFS_I(dir)->root; 6581 int err = 0; 6582 int drop_on_err = 0; 6583 u64 objectid = 0; 6584 u64 index = 0; 6585 6586 /* 6587 * 2 items for inode and ref 6588 * 2 items for dir items 6589 * 1 for xattr if selinux is on 6590 */ 6591 trans = btrfs_start_transaction(root, 5); 6592 if (IS_ERR(trans)) 6593 return PTR_ERR(trans); 6594 6595 err = btrfs_find_free_ino(root, &objectid); 6596 if (err) 6597 goto out_fail; 6598 6599 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6600 dentry->d_name.len, btrfs_ino(dir), objectid, 6601 S_IFDIR | mode, &index); 6602 if (IS_ERR(inode)) { 6603 err = PTR_ERR(inode); 6604 goto out_fail; 6605 } 6606 6607 drop_on_err = 1; 6608 /* these must be set before we unlock the inode */ 6609 inode->i_op = &btrfs_dir_inode_operations; 6610 inode->i_fop = &btrfs_dir_file_operations; 6611 6612 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6613 if (err) 6614 goto out_fail_inode; 6615 6616 btrfs_i_size_write(inode, 0); 6617 err = btrfs_update_inode(trans, root, inode); 6618 if (err) 6619 goto out_fail_inode; 6620 6621 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, 6622 dentry->d_name.len, 0, index); 6623 if (err) 6624 goto out_fail_inode; 6625 6626 d_instantiate(dentry, inode); 6627 /* 6628 * mkdir is special. We're unlocking after we call d_instantiate 6629 * to avoid a race with nfsd calling d_instantiate. 6630 */ 6631 unlock_new_inode(inode); 6632 drop_on_err = 0; 6633 6634 out_fail: 6635 btrfs_end_transaction(trans, root); 6636 if (drop_on_err) { 6637 inode_dec_link_count(inode); 6638 iput(inode); 6639 } 6640 btrfs_balance_delayed_items(root); 6641 btrfs_btree_balance_dirty(root); 6642 return err; 6643 6644 out_fail_inode: 6645 unlock_new_inode(inode); 6646 goto out_fail; 6647 } 6648 6649 /* Find next extent map of a given extent map, caller needs to ensure locks */ 6650 static struct extent_map *next_extent_map(struct extent_map *em) 6651 { 6652 struct rb_node *next; 6653 6654 next = rb_next(&em->rb_node); 6655 if (!next) 6656 return NULL; 6657 return container_of(next, struct extent_map, rb_node); 6658 } 6659 6660 static struct extent_map *prev_extent_map(struct extent_map *em) 6661 { 6662 struct rb_node *prev; 6663 6664 prev = rb_prev(&em->rb_node); 6665 if (!prev) 6666 return NULL; 6667 return container_of(prev, struct extent_map, rb_node); 6668 } 6669 6670 /* helper for btfs_get_extent. Given an existing extent in the tree, 6671 * the existing extent is the nearest extent to map_start, 6672 * and an extent that you want to insert, deal with overlap and insert 6673 * the best fitted new extent into the tree. 6674 */ 6675 static int merge_extent_mapping(struct extent_map_tree *em_tree, 6676 struct extent_map *existing, 6677 struct extent_map *em, 6678 u64 map_start) 6679 { 6680 struct extent_map *prev; 6681 struct extent_map *next; 6682 u64 start; 6683 u64 end; 6684 u64 start_diff; 6685 6686 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); 6687 6688 if (existing->start > map_start) { 6689 next = existing; 6690 prev = prev_extent_map(next); 6691 } else { 6692 prev = existing; 6693 next = next_extent_map(prev); 6694 } 6695 6696 start = prev ? extent_map_end(prev) : em->start; 6697 start = max_t(u64, start, em->start); 6698 end = next ? next->start : extent_map_end(em); 6699 end = min_t(u64, end, extent_map_end(em)); 6700 start_diff = start - em->start; 6701 em->start = start; 6702 em->len = end - start; 6703 if (em->block_start < EXTENT_MAP_LAST_BYTE && 6704 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 6705 em->block_start += start_diff; 6706 em->block_len -= start_diff; 6707 } 6708 return add_extent_mapping(em_tree, em, 0); 6709 } 6710 6711 static noinline int uncompress_inline(struct btrfs_path *path, 6712 struct inode *inode, struct page *page, 6713 size_t pg_offset, u64 extent_offset, 6714 struct btrfs_file_extent_item *item) 6715 { 6716 int ret; 6717 struct extent_buffer *leaf = path->nodes[0]; 6718 char *tmp; 6719 size_t max_size; 6720 unsigned long inline_size; 6721 unsigned long ptr; 6722 int compress_type; 6723 6724 WARN_ON(pg_offset != 0); 6725 compress_type = btrfs_file_extent_compression(leaf, item); 6726 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6727 inline_size = btrfs_file_extent_inline_item_len(leaf, 6728 btrfs_item_nr(path->slots[0])); 6729 tmp = kmalloc(inline_size, GFP_NOFS); 6730 if (!tmp) 6731 return -ENOMEM; 6732 ptr = btrfs_file_extent_inline_start(item); 6733 6734 read_extent_buffer(leaf, tmp, ptr, inline_size); 6735 6736 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 6737 ret = btrfs_decompress(compress_type, tmp, page, 6738 extent_offset, inline_size, max_size); 6739 kfree(tmp); 6740 return ret; 6741 } 6742 6743 /* 6744 * a bit scary, this does extent mapping from logical file offset to the disk. 6745 * the ugly parts come from merging extents from the disk with the in-ram 6746 * representation. This gets more complex because of the data=ordered code, 6747 * where the in-ram extents might be locked pending data=ordered completion. 6748 * 6749 * This also copies inline extents directly into the page. 6750 */ 6751 6752 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 6753 size_t pg_offset, u64 start, u64 len, 6754 int create) 6755 { 6756 int ret; 6757 int err = 0; 6758 u64 extent_start = 0; 6759 u64 extent_end = 0; 6760 u64 objectid = btrfs_ino(inode); 6761 u32 found_type; 6762 struct btrfs_path *path = NULL; 6763 struct btrfs_root *root = BTRFS_I(inode)->root; 6764 struct btrfs_file_extent_item *item; 6765 struct extent_buffer *leaf; 6766 struct btrfs_key found_key; 6767 struct extent_map *em = NULL; 6768 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 6769 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 6770 struct btrfs_trans_handle *trans = NULL; 6771 const bool new_inline = !page || create; 6772 6773 again: 6774 read_lock(&em_tree->lock); 6775 em = lookup_extent_mapping(em_tree, start, len); 6776 if (em) 6777 em->bdev = root->fs_info->fs_devices->latest_bdev; 6778 read_unlock(&em_tree->lock); 6779 6780 if (em) { 6781 if (em->start > start || em->start + em->len <= start) 6782 free_extent_map(em); 6783 else if (em->block_start == EXTENT_MAP_INLINE && page) 6784 free_extent_map(em); 6785 else 6786 goto out; 6787 } 6788 em = alloc_extent_map(); 6789 if (!em) { 6790 err = -ENOMEM; 6791 goto out; 6792 } 6793 em->bdev = root->fs_info->fs_devices->latest_bdev; 6794 em->start = EXTENT_MAP_HOLE; 6795 em->orig_start = EXTENT_MAP_HOLE; 6796 em->len = (u64)-1; 6797 em->block_len = (u64)-1; 6798 6799 if (!path) { 6800 path = btrfs_alloc_path(); 6801 if (!path) { 6802 err = -ENOMEM; 6803 goto out; 6804 } 6805 /* 6806 * Chances are we'll be called again, so go ahead and do 6807 * readahead 6808 */ 6809 path->reada = 1; 6810 } 6811 6812 ret = btrfs_lookup_file_extent(trans, root, path, 6813 objectid, start, trans != NULL); 6814 if (ret < 0) { 6815 err = ret; 6816 goto out; 6817 } 6818 6819 if (ret != 0) { 6820 if (path->slots[0] == 0) 6821 goto not_found; 6822 path->slots[0]--; 6823 } 6824 6825 leaf = path->nodes[0]; 6826 item = btrfs_item_ptr(leaf, path->slots[0], 6827 struct btrfs_file_extent_item); 6828 /* are we inside the extent that was found? */ 6829 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6830 found_type = found_key.type; 6831 if (found_key.objectid != objectid || 6832 found_type != BTRFS_EXTENT_DATA_KEY) { 6833 /* 6834 * If we backup past the first extent we want to move forward 6835 * and see if there is an extent in front of us, otherwise we'll 6836 * say there is a hole for our whole search range which can 6837 * cause problems. 6838 */ 6839 extent_end = start; 6840 goto next; 6841 } 6842 6843 found_type = btrfs_file_extent_type(leaf, item); 6844 extent_start = found_key.offset; 6845 if (found_type == BTRFS_FILE_EXTENT_REG || 6846 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 6847 extent_end = extent_start + 6848 btrfs_file_extent_num_bytes(leaf, item); 6849 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 6850 size_t size; 6851 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6852 extent_end = ALIGN(extent_start + size, root->sectorsize); 6853 } 6854 next: 6855 if (start >= extent_end) { 6856 path->slots[0]++; 6857 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6858 ret = btrfs_next_leaf(root, path); 6859 if (ret < 0) { 6860 err = ret; 6861 goto out; 6862 } 6863 if (ret > 0) 6864 goto not_found; 6865 leaf = path->nodes[0]; 6866 } 6867 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6868 if (found_key.objectid != objectid || 6869 found_key.type != BTRFS_EXTENT_DATA_KEY) 6870 goto not_found; 6871 if (start + len <= found_key.offset) 6872 goto not_found; 6873 if (start > found_key.offset) 6874 goto next; 6875 em->start = start; 6876 em->orig_start = start; 6877 em->len = found_key.offset - start; 6878 goto not_found_em; 6879 } 6880 6881 btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em); 6882 6883 if (found_type == BTRFS_FILE_EXTENT_REG || 6884 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 6885 goto insert; 6886 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 6887 unsigned long ptr; 6888 char *map; 6889 size_t size; 6890 size_t extent_offset; 6891 size_t copy_size; 6892 6893 if (new_inline) 6894 goto out; 6895 6896 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6897 extent_offset = page_offset(page) + pg_offset - extent_start; 6898 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 6899 size - extent_offset); 6900 em->start = extent_start + extent_offset; 6901 em->len = ALIGN(copy_size, root->sectorsize); 6902 em->orig_block_len = em->len; 6903 em->orig_start = em->start; 6904 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 6905 if (create == 0 && !PageUptodate(page)) { 6906 if (btrfs_file_extent_compression(leaf, item) != 6907 BTRFS_COMPRESS_NONE) { 6908 ret = uncompress_inline(path, inode, page, 6909 pg_offset, 6910 extent_offset, item); 6911 if (ret) { 6912 err = ret; 6913 goto out; 6914 } 6915 } else { 6916 map = kmap(page); 6917 read_extent_buffer(leaf, map + pg_offset, ptr, 6918 copy_size); 6919 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 6920 memset(map + pg_offset + copy_size, 0, 6921 PAGE_CACHE_SIZE - pg_offset - 6922 copy_size); 6923 } 6924 kunmap(page); 6925 } 6926 flush_dcache_page(page); 6927 } else if (create && PageUptodate(page)) { 6928 BUG(); 6929 if (!trans) { 6930 kunmap(page); 6931 free_extent_map(em); 6932 em = NULL; 6933 6934 btrfs_release_path(path); 6935 trans = btrfs_join_transaction(root); 6936 6937 if (IS_ERR(trans)) 6938 return ERR_CAST(trans); 6939 goto again; 6940 } 6941 map = kmap(page); 6942 write_extent_buffer(leaf, map + pg_offset, ptr, 6943 copy_size); 6944 kunmap(page); 6945 btrfs_mark_buffer_dirty(leaf); 6946 } 6947 set_extent_uptodate(io_tree, em->start, 6948 extent_map_end(em) - 1, NULL, GFP_NOFS); 6949 goto insert; 6950 } 6951 not_found: 6952 em->start = start; 6953 em->orig_start = start; 6954 em->len = len; 6955 not_found_em: 6956 em->block_start = EXTENT_MAP_HOLE; 6957 set_bit(EXTENT_FLAG_VACANCY, &em->flags); 6958 insert: 6959 btrfs_release_path(path); 6960 if (em->start > start || extent_map_end(em) <= start) { 6961 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]", 6962 em->start, em->len, start, len); 6963 err = -EIO; 6964 goto out; 6965 } 6966 6967 err = 0; 6968 write_lock(&em_tree->lock); 6969 ret = add_extent_mapping(em_tree, em, 0); 6970 /* it is possible that someone inserted the extent into the tree 6971 * while we had the lock dropped. It is also possible that 6972 * an overlapping map exists in the tree 6973 */ 6974 if (ret == -EEXIST) { 6975 struct extent_map *existing; 6976 6977 ret = 0; 6978 6979 existing = search_extent_mapping(em_tree, start, len); 6980 /* 6981 * existing will always be non-NULL, since there must be 6982 * extent causing the -EEXIST. 6983 */ 6984 if (start >= extent_map_end(existing) || 6985 start <= existing->start) { 6986 /* 6987 * The existing extent map is the one nearest to 6988 * the [start, start + len) range which overlaps 6989 */ 6990 err = merge_extent_mapping(em_tree, existing, 6991 em, start); 6992 free_extent_map(existing); 6993 if (err) { 6994 free_extent_map(em); 6995 em = NULL; 6996 } 6997 } else { 6998 free_extent_map(em); 6999 em = existing; 7000 err = 0; 7001 } 7002 } 7003 write_unlock(&em_tree->lock); 7004 out: 7005 7006 trace_btrfs_get_extent(root, em); 7007 7008 btrfs_free_path(path); 7009 if (trans) { 7010 ret = btrfs_end_transaction(trans, root); 7011 if (!err) 7012 err = ret; 7013 } 7014 if (err) { 7015 free_extent_map(em); 7016 return ERR_PTR(err); 7017 } 7018 BUG_ON(!em); /* Error is always set */ 7019 return em; 7020 } 7021 7022 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 7023 size_t pg_offset, u64 start, u64 len, 7024 int create) 7025 { 7026 struct extent_map *em; 7027 struct extent_map *hole_em = NULL; 7028 u64 range_start = start; 7029 u64 end; 7030 u64 found; 7031 u64 found_end; 7032 int err = 0; 7033 7034 em = btrfs_get_extent(inode, page, pg_offset, start, len, create); 7035 if (IS_ERR(em)) 7036 return em; 7037 if (em) { 7038 /* 7039 * if our em maps to 7040 * - a hole or 7041 * - a pre-alloc extent, 7042 * there might actually be delalloc bytes behind it. 7043 */ 7044 if (em->block_start != EXTENT_MAP_HOLE && 7045 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7046 return em; 7047 else 7048 hole_em = em; 7049 } 7050 7051 /* check to see if we've wrapped (len == -1 or similar) */ 7052 end = start + len; 7053 if (end < start) 7054 end = (u64)-1; 7055 else 7056 end -= 1; 7057 7058 em = NULL; 7059 7060 /* ok, we didn't find anything, lets look for delalloc */ 7061 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, 7062 end, len, EXTENT_DELALLOC, 1); 7063 found_end = range_start + found; 7064 if (found_end < range_start) 7065 found_end = (u64)-1; 7066 7067 /* 7068 * we didn't find anything useful, return 7069 * the original results from get_extent() 7070 */ 7071 if (range_start > end || found_end <= start) { 7072 em = hole_em; 7073 hole_em = NULL; 7074 goto out; 7075 } 7076 7077 /* adjust the range_start to make sure it doesn't 7078 * go backwards from the start they passed in 7079 */ 7080 range_start = max(start, range_start); 7081 found = found_end - range_start; 7082 7083 if (found > 0) { 7084 u64 hole_start = start; 7085 u64 hole_len = len; 7086 7087 em = alloc_extent_map(); 7088 if (!em) { 7089 err = -ENOMEM; 7090 goto out; 7091 } 7092 /* 7093 * when btrfs_get_extent can't find anything it 7094 * returns one huge hole 7095 * 7096 * make sure what it found really fits our range, and 7097 * adjust to make sure it is based on the start from 7098 * the caller 7099 */ 7100 if (hole_em) { 7101 u64 calc_end = extent_map_end(hole_em); 7102 7103 if (calc_end <= start || (hole_em->start > end)) { 7104 free_extent_map(hole_em); 7105 hole_em = NULL; 7106 } else { 7107 hole_start = max(hole_em->start, start); 7108 hole_len = calc_end - hole_start; 7109 } 7110 } 7111 em->bdev = NULL; 7112 if (hole_em && range_start > hole_start) { 7113 /* our hole starts before our delalloc, so we 7114 * have to return just the parts of the hole 7115 * that go until the delalloc starts 7116 */ 7117 em->len = min(hole_len, 7118 range_start - hole_start); 7119 em->start = hole_start; 7120 em->orig_start = hole_start; 7121 /* 7122 * don't adjust block start at all, 7123 * it is fixed at EXTENT_MAP_HOLE 7124 */ 7125 em->block_start = hole_em->block_start; 7126 em->block_len = hole_len; 7127 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) 7128 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 7129 } else { 7130 em->start = range_start; 7131 em->len = found; 7132 em->orig_start = range_start; 7133 em->block_start = EXTENT_MAP_DELALLOC; 7134 em->block_len = found; 7135 } 7136 } else if (hole_em) { 7137 return hole_em; 7138 } 7139 out: 7140 7141 free_extent_map(hole_em); 7142 if (err) { 7143 free_extent_map(em); 7144 return ERR_PTR(err); 7145 } 7146 return em; 7147 } 7148 7149 static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 7150 u64 start, u64 len) 7151 { 7152 struct btrfs_root *root = BTRFS_I(inode)->root; 7153 struct extent_map *em; 7154 struct btrfs_key ins; 7155 u64 alloc_hint; 7156 int ret; 7157 7158 alloc_hint = get_extent_allocation_hint(inode, start, len); 7159 ret = btrfs_reserve_extent(root, len, root->sectorsize, 0, 7160 alloc_hint, &ins, 1, 1); 7161 if (ret) 7162 return ERR_PTR(ret); 7163 7164 em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, 7165 ins.offset, ins.offset, ins.offset, 0); 7166 if (IS_ERR(em)) { 7167 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 7168 return em; 7169 } 7170 7171 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, 7172 ins.offset, ins.offset, 0); 7173 if (ret) { 7174 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 7175 free_extent_map(em); 7176 return ERR_PTR(ret); 7177 } 7178 7179 return em; 7180 } 7181 7182 /* 7183 * returns 1 when the nocow is safe, < 1 on error, 0 if the 7184 * block must be cow'd 7185 */ 7186 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7187 u64 *orig_start, u64 *orig_block_len, 7188 u64 *ram_bytes) 7189 { 7190 struct btrfs_trans_handle *trans; 7191 struct btrfs_path *path; 7192 int ret; 7193 struct extent_buffer *leaf; 7194 struct btrfs_root *root = BTRFS_I(inode)->root; 7195 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7196 struct btrfs_file_extent_item *fi; 7197 struct btrfs_key key; 7198 u64 disk_bytenr; 7199 u64 backref_offset; 7200 u64 extent_end; 7201 u64 num_bytes; 7202 int slot; 7203 int found_type; 7204 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW); 7205 7206 path = btrfs_alloc_path(); 7207 if (!path) 7208 return -ENOMEM; 7209 7210 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 7211 offset, 0); 7212 if (ret < 0) 7213 goto out; 7214 7215 slot = path->slots[0]; 7216 if (ret == 1) { 7217 if (slot == 0) { 7218 /* can't find the item, must cow */ 7219 ret = 0; 7220 goto out; 7221 } 7222 slot--; 7223 } 7224 ret = 0; 7225 leaf = path->nodes[0]; 7226 btrfs_item_key_to_cpu(leaf, &key, slot); 7227 if (key.objectid != btrfs_ino(inode) || 7228 key.type != BTRFS_EXTENT_DATA_KEY) { 7229 /* not our file or wrong item type, must cow */ 7230 goto out; 7231 } 7232 7233 if (key.offset > offset) { 7234 /* Wrong offset, must cow */ 7235 goto out; 7236 } 7237 7238 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 7239 found_type = btrfs_file_extent_type(leaf, fi); 7240 if (found_type != BTRFS_FILE_EXTENT_REG && 7241 found_type != BTRFS_FILE_EXTENT_PREALLOC) { 7242 /* not a regular extent, must cow */ 7243 goto out; 7244 } 7245 7246 if (!nocow && found_type == BTRFS_FILE_EXTENT_REG) 7247 goto out; 7248 7249 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 7250 if (extent_end <= offset) 7251 goto out; 7252 7253 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 7254 if (disk_bytenr == 0) 7255 goto out; 7256 7257 if (btrfs_file_extent_compression(leaf, fi) || 7258 btrfs_file_extent_encryption(leaf, fi) || 7259 btrfs_file_extent_other_encoding(leaf, fi)) 7260 goto out; 7261 7262 backref_offset = btrfs_file_extent_offset(leaf, fi); 7263 7264 if (orig_start) { 7265 *orig_start = key.offset - backref_offset; 7266 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 7267 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7268 } 7269 7270 if (btrfs_extent_readonly(root, disk_bytenr)) 7271 goto out; 7272 7273 num_bytes = min(offset + *len, extent_end) - offset; 7274 if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7275 u64 range_end; 7276 7277 range_end = round_up(offset + num_bytes, root->sectorsize) - 1; 7278 ret = test_range_bit(io_tree, offset, range_end, 7279 EXTENT_DELALLOC, 0, NULL); 7280 if (ret) { 7281 ret = -EAGAIN; 7282 goto out; 7283 } 7284 } 7285 7286 btrfs_release_path(path); 7287 7288 /* 7289 * look for other files referencing this extent, if we 7290 * find any we must cow 7291 */ 7292 trans = btrfs_join_transaction(root); 7293 if (IS_ERR(trans)) { 7294 ret = 0; 7295 goto out; 7296 } 7297 7298 ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), 7299 key.offset - backref_offset, disk_bytenr); 7300 btrfs_end_transaction(trans, root); 7301 if (ret) { 7302 ret = 0; 7303 goto out; 7304 } 7305 7306 /* 7307 * adjust disk_bytenr and num_bytes to cover just the bytes 7308 * in this extent we are about to write. If there 7309 * are any csums in that range we have to cow in order 7310 * to keep the csums correct 7311 */ 7312 disk_bytenr += backref_offset; 7313 disk_bytenr += offset - key.offset; 7314 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 7315 goto out; 7316 /* 7317 * all of the above have passed, it is safe to overwrite this extent 7318 * without cow 7319 */ 7320 *len = num_bytes; 7321 ret = 1; 7322 out: 7323 btrfs_free_path(path); 7324 return ret; 7325 } 7326 7327 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) 7328 { 7329 struct radix_tree_root *root = &inode->i_mapping->page_tree; 7330 int found = false; 7331 void **pagep = NULL; 7332 struct page *page = NULL; 7333 int start_idx; 7334 int end_idx; 7335 7336 start_idx = start >> PAGE_CACHE_SHIFT; 7337 7338 /* 7339 * end is the last byte in the last page. end == start is legal 7340 */ 7341 end_idx = end >> PAGE_CACHE_SHIFT; 7342 7343 rcu_read_lock(); 7344 7345 /* Most of the code in this while loop is lifted from 7346 * find_get_page. It's been modified to begin searching from a 7347 * page and return just the first page found in that range. If the 7348 * found idx is less than or equal to the end idx then we know that 7349 * a page exists. If no pages are found or if those pages are 7350 * outside of the range then we're fine (yay!) */ 7351 while (page == NULL && 7352 radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) { 7353 page = radix_tree_deref_slot(pagep); 7354 if (unlikely(!page)) 7355 break; 7356 7357 if (radix_tree_exception(page)) { 7358 if (radix_tree_deref_retry(page)) { 7359 page = NULL; 7360 continue; 7361 } 7362 /* 7363 * Otherwise, shmem/tmpfs must be storing a swap entry 7364 * here as an exceptional entry: so return it without 7365 * attempting to raise page count. 7366 */ 7367 page = NULL; 7368 break; /* TODO: Is this relevant for this use case? */ 7369 } 7370 7371 if (!page_cache_get_speculative(page)) { 7372 page = NULL; 7373 continue; 7374 } 7375 7376 /* 7377 * Has the page moved? 7378 * This is part of the lockless pagecache protocol. See 7379 * include/linux/pagemap.h for details. 7380 */ 7381 if (unlikely(page != *pagep)) { 7382 page_cache_release(page); 7383 page = NULL; 7384 } 7385 } 7386 7387 if (page) { 7388 if (page->index <= end_idx) 7389 found = true; 7390 page_cache_release(page); 7391 } 7392 7393 rcu_read_unlock(); 7394 return found; 7395 } 7396 7397 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7398 struct extent_state **cached_state, int writing) 7399 { 7400 struct btrfs_ordered_extent *ordered; 7401 int ret = 0; 7402 7403 while (1) { 7404 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7405 0, cached_state); 7406 /* 7407 * We're concerned with the entire range that we're going to be 7408 * doing DIO to, so we need to make sure theres no ordered 7409 * extents in this range. 7410 */ 7411 ordered = btrfs_lookup_ordered_range(inode, lockstart, 7412 lockend - lockstart + 1); 7413 7414 /* 7415 * We need to make sure there are no buffered pages in this 7416 * range either, we could have raced between the invalidate in 7417 * generic_file_direct_write and locking the extent. The 7418 * invalidate needs to happen so that reads after a write do not 7419 * get stale data. 7420 */ 7421 if (!ordered && 7422 (!writing || 7423 !btrfs_page_exists_in_range(inode, lockstart, lockend))) 7424 break; 7425 7426 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7427 cached_state, GFP_NOFS); 7428 7429 if (ordered) { 7430 btrfs_start_ordered_extent(inode, ordered, 1); 7431 btrfs_put_ordered_extent(ordered); 7432 } else { 7433 /* Screw you mmap */ 7434 ret = btrfs_fdatawrite_range(inode, lockstart, lockend); 7435 if (ret) 7436 break; 7437 ret = filemap_fdatawait_range(inode->i_mapping, 7438 lockstart, 7439 lockend); 7440 if (ret) 7441 break; 7442 7443 /* 7444 * If we found a page that couldn't be invalidated just 7445 * fall back to buffered. 7446 */ 7447 ret = invalidate_inode_pages2_range(inode->i_mapping, 7448 lockstart >> PAGE_CACHE_SHIFT, 7449 lockend >> PAGE_CACHE_SHIFT); 7450 if (ret) 7451 break; 7452 } 7453 7454 cond_resched(); 7455 } 7456 7457 return ret; 7458 } 7459 7460 static struct extent_map *create_pinned_em(struct inode *inode, u64 start, 7461 u64 len, u64 orig_start, 7462 u64 block_start, u64 block_len, 7463 u64 orig_block_len, u64 ram_bytes, 7464 int type) 7465 { 7466 struct extent_map_tree *em_tree; 7467 struct extent_map *em; 7468 struct btrfs_root *root = BTRFS_I(inode)->root; 7469 int ret; 7470 7471 em_tree = &BTRFS_I(inode)->extent_tree; 7472 em = alloc_extent_map(); 7473 if (!em) 7474 return ERR_PTR(-ENOMEM); 7475 7476 em->start = start; 7477 em->orig_start = orig_start; 7478 em->mod_start = start; 7479 em->mod_len = len; 7480 em->len = len; 7481 em->block_len = block_len; 7482 em->block_start = block_start; 7483 em->bdev = root->fs_info->fs_devices->latest_bdev; 7484 em->orig_block_len = orig_block_len; 7485 em->ram_bytes = ram_bytes; 7486 em->generation = -1; 7487 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7488 if (type == BTRFS_ORDERED_PREALLOC) 7489 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7490 7491 do { 7492 btrfs_drop_extent_cache(inode, em->start, 7493 em->start + em->len - 1, 0); 7494 write_lock(&em_tree->lock); 7495 ret = add_extent_mapping(em_tree, em, 1); 7496 write_unlock(&em_tree->lock); 7497 } while (ret == -EEXIST); 7498 7499 if (ret) { 7500 free_extent_map(em); 7501 return ERR_PTR(ret); 7502 } 7503 7504 return em; 7505 } 7506 7507 struct btrfs_dio_data { 7508 u64 outstanding_extents; 7509 u64 reserve; 7510 }; 7511 7512 static void adjust_dio_outstanding_extents(struct inode *inode, 7513 struct btrfs_dio_data *dio_data, 7514 const u64 len) 7515 { 7516 unsigned num_extents; 7517 7518 num_extents = (unsigned) div64_u64(len + BTRFS_MAX_EXTENT_SIZE - 1, 7519 BTRFS_MAX_EXTENT_SIZE); 7520 /* 7521 * If we have an outstanding_extents count still set then we're 7522 * within our reservation, otherwise we need to adjust our inode 7523 * counter appropriately. 7524 */ 7525 if (dio_data->outstanding_extents) { 7526 dio_data->outstanding_extents -= num_extents; 7527 } else { 7528 spin_lock(&BTRFS_I(inode)->lock); 7529 BTRFS_I(inode)->outstanding_extents += num_extents; 7530 spin_unlock(&BTRFS_I(inode)->lock); 7531 } 7532 } 7533 7534 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 7535 struct buffer_head *bh_result, int create) 7536 { 7537 struct extent_map *em; 7538 struct btrfs_root *root = BTRFS_I(inode)->root; 7539 struct extent_state *cached_state = NULL; 7540 struct btrfs_dio_data *dio_data = NULL; 7541 u64 start = iblock << inode->i_blkbits; 7542 u64 lockstart, lockend; 7543 u64 len = bh_result->b_size; 7544 int unlock_bits = EXTENT_LOCKED; 7545 int ret = 0; 7546 7547 if (create) 7548 unlock_bits |= EXTENT_DIRTY; 7549 else 7550 len = min_t(u64, len, root->sectorsize); 7551 7552 lockstart = start; 7553 lockend = start + len - 1; 7554 7555 if (current->journal_info) { 7556 /* 7557 * Need to pull our outstanding extents and set journal_info to NULL so 7558 * that anything that needs to check if there's a transction doesn't get 7559 * confused. 7560 */ 7561 dio_data = current->journal_info; 7562 current->journal_info = NULL; 7563 } 7564 7565 /* 7566 * If this errors out it's because we couldn't invalidate pagecache for 7567 * this range and we need to fallback to buffered. 7568 */ 7569 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, 7570 create)) { 7571 ret = -ENOTBLK; 7572 goto err; 7573 } 7574 7575 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 7576 if (IS_ERR(em)) { 7577 ret = PTR_ERR(em); 7578 goto unlock_err; 7579 } 7580 7581 /* 7582 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7583 * io. INLINE is special, and we could probably kludge it in here, but 7584 * it's still buffered so for safety lets just fall back to the generic 7585 * buffered path. 7586 * 7587 * For COMPRESSED we _have_ to read the entire extent in so we can 7588 * decompress it, so there will be buffering required no matter what we 7589 * do, so go ahead and fallback to buffered. 7590 * 7591 * We return -ENOTBLK because thats what makes DIO go ahead and go back 7592 * to buffered IO. Don't blame me, this is the price we pay for using 7593 * the generic code. 7594 */ 7595 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7596 em->block_start == EXTENT_MAP_INLINE) { 7597 free_extent_map(em); 7598 ret = -ENOTBLK; 7599 goto unlock_err; 7600 } 7601 7602 /* Just a good old fashioned hole, return */ 7603 if (!create && (em->block_start == EXTENT_MAP_HOLE || 7604 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 7605 free_extent_map(em); 7606 goto unlock_err; 7607 } 7608 7609 /* 7610 * We don't allocate a new extent in the following cases 7611 * 7612 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7613 * existing extent. 7614 * 2) The extent is marked as PREALLOC. We're good to go here and can 7615 * just use the extent. 7616 * 7617 */ 7618 if (!create) { 7619 len = min(len, em->len - (start - em->start)); 7620 lockstart = start + len; 7621 goto unlock; 7622 } 7623 7624 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7625 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7626 em->block_start != EXTENT_MAP_HOLE)) { 7627 int type; 7628 u64 block_start, orig_start, orig_block_len, ram_bytes; 7629 7630 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7631 type = BTRFS_ORDERED_PREALLOC; 7632 else 7633 type = BTRFS_ORDERED_NOCOW; 7634 len = min(len, em->len - (start - em->start)); 7635 block_start = em->block_start + (start - em->start); 7636 7637 if (can_nocow_extent(inode, start, &len, &orig_start, 7638 &orig_block_len, &ram_bytes) == 1) { 7639 if (type == BTRFS_ORDERED_PREALLOC) { 7640 free_extent_map(em); 7641 em = create_pinned_em(inode, start, len, 7642 orig_start, 7643 block_start, len, 7644 orig_block_len, 7645 ram_bytes, type); 7646 if (IS_ERR(em)) { 7647 ret = PTR_ERR(em); 7648 goto unlock_err; 7649 } 7650 } 7651 7652 ret = btrfs_add_ordered_extent_dio(inode, start, 7653 block_start, len, len, type); 7654 if (ret) { 7655 free_extent_map(em); 7656 goto unlock_err; 7657 } 7658 goto unlock; 7659 } 7660 } 7661 7662 /* 7663 * this will cow the extent, reset the len in case we changed 7664 * it above 7665 */ 7666 len = bh_result->b_size; 7667 free_extent_map(em); 7668 em = btrfs_new_extent_direct(inode, start, len); 7669 if (IS_ERR(em)) { 7670 ret = PTR_ERR(em); 7671 goto unlock_err; 7672 } 7673 len = min(len, em->len - (start - em->start)); 7674 unlock: 7675 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 7676 inode->i_blkbits; 7677 bh_result->b_size = len; 7678 bh_result->b_bdev = em->bdev; 7679 set_buffer_mapped(bh_result); 7680 if (create) { 7681 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7682 set_buffer_new(bh_result); 7683 7684 /* 7685 * Need to update the i_size under the extent lock so buffered 7686 * readers will get the updated i_size when we unlock. 7687 */ 7688 if (start + len > i_size_read(inode)) 7689 i_size_write(inode, start + len); 7690 7691 adjust_dio_outstanding_extents(inode, dio_data, len); 7692 btrfs_free_reserved_data_space(inode, start, len); 7693 WARN_ON(dio_data->reserve < len); 7694 dio_data->reserve -= len; 7695 current->journal_info = dio_data; 7696 } 7697 7698 /* 7699 * In the case of write we need to clear and unlock the entire range, 7700 * in the case of read we need to unlock only the end area that we 7701 * aren't using if there is any left over space. 7702 */ 7703 if (lockstart < lockend) { 7704 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 7705 lockend, unlock_bits, 1, 0, 7706 &cached_state, GFP_NOFS); 7707 } else { 7708 free_extent_state(cached_state); 7709 } 7710 7711 free_extent_map(em); 7712 7713 return 0; 7714 7715 unlock_err: 7716 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7717 unlock_bits, 1, 0, &cached_state, GFP_NOFS); 7718 err: 7719 if (dio_data) 7720 current->journal_info = dio_data; 7721 /* 7722 * Compensate the delalloc release we do in btrfs_direct_IO() when we 7723 * write less data then expected, so that we don't underflow our inode's 7724 * outstanding extents counter. 7725 */ 7726 if (create && dio_data) 7727 adjust_dio_outstanding_extents(inode, dio_data, len); 7728 7729 return ret; 7730 } 7731 7732 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, 7733 int rw, int mirror_num) 7734 { 7735 struct btrfs_root *root = BTRFS_I(inode)->root; 7736 int ret; 7737 7738 BUG_ON(rw & REQ_WRITE); 7739 7740 bio_get(bio); 7741 7742 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 7743 BTRFS_WQ_ENDIO_DIO_REPAIR); 7744 if (ret) 7745 goto err; 7746 7747 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); 7748 err: 7749 bio_put(bio); 7750 return ret; 7751 } 7752 7753 static int btrfs_check_dio_repairable(struct inode *inode, 7754 struct bio *failed_bio, 7755 struct io_failure_record *failrec, 7756 int failed_mirror) 7757 { 7758 int num_copies; 7759 7760 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info, 7761 failrec->logical, failrec->len); 7762 if (num_copies == 1) { 7763 /* 7764 * we only have a single copy of the data, so don't bother with 7765 * all the retry and error correction code that follows. no 7766 * matter what the error is, it is very likely to persist. 7767 */ 7768 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n", 7769 num_copies, failrec->this_mirror, failed_mirror); 7770 return 0; 7771 } 7772 7773 failrec->failed_mirror = failed_mirror; 7774 failrec->this_mirror++; 7775 if (failrec->this_mirror == failed_mirror) 7776 failrec->this_mirror++; 7777 7778 if (failrec->this_mirror > num_copies) { 7779 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n", 7780 num_copies, failrec->this_mirror, failed_mirror); 7781 return 0; 7782 } 7783 7784 return 1; 7785 } 7786 7787 static int dio_read_error(struct inode *inode, struct bio *failed_bio, 7788 struct page *page, u64 start, u64 end, 7789 int failed_mirror, bio_end_io_t *repair_endio, 7790 void *repair_arg) 7791 { 7792 struct io_failure_record *failrec; 7793 struct bio *bio; 7794 int isector; 7795 int read_mode; 7796 int ret; 7797 7798 BUG_ON(failed_bio->bi_rw & REQ_WRITE); 7799 7800 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); 7801 if (ret) 7802 return ret; 7803 7804 ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, 7805 failed_mirror); 7806 if (!ret) { 7807 free_io_failure(inode, failrec); 7808 return -EIO; 7809 } 7810 7811 if (failed_bio->bi_vcnt > 1) 7812 read_mode = READ_SYNC | REQ_FAILFAST_DEV; 7813 else 7814 read_mode = READ_SYNC; 7815 7816 isector = start - btrfs_io_bio(failed_bio)->logical; 7817 isector >>= inode->i_sb->s_blocksize_bits; 7818 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, 7819 0, isector, repair_endio, repair_arg); 7820 if (!bio) { 7821 free_io_failure(inode, failrec); 7822 return -EIO; 7823 } 7824 7825 btrfs_debug(BTRFS_I(inode)->root->fs_info, 7826 "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n", 7827 read_mode, failrec->this_mirror, failrec->in_validation); 7828 7829 ret = submit_dio_repair_bio(inode, bio, read_mode, 7830 failrec->this_mirror); 7831 if (ret) { 7832 free_io_failure(inode, failrec); 7833 bio_put(bio); 7834 } 7835 7836 return ret; 7837 } 7838 7839 struct btrfs_retry_complete { 7840 struct completion done; 7841 struct inode *inode; 7842 u64 start; 7843 int uptodate; 7844 }; 7845 7846 static void btrfs_retry_endio_nocsum(struct bio *bio) 7847 { 7848 struct btrfs_retry_complete *done = bio->bi_private; 7849 struct bio_vec *bvec; 7850 int i; 7851 7852 if (bio->bi_error) 7853 goto end; 7854 7855 done->uptodate = 1; 7856 bio_for_each_segment_all(bvec, bio, i) 7857 clean_io_failure(done->inode, done->start, bvec->bv_page, 0); 7858 end: 7859 complete(&done->done); 7860 bio_put(bio); 7861 } 7862 7863 static int __btrfs_correct_data_nocsum(struct inode *inode, 7864 struct btrfs_io_bio *io_bio) 7865 { 7866 struct bio_vec *bvec; 7867 struct btrfs_retry_complete done; 7868 u64 start; 7869 int i; 7870 int ret; 7871 7872 start = io_bio->logical; 7873 done.inode = inode; 7874 7875 bio_for_each_segment_all(bvec, &io_bio->bio, i) { 7876 try_again: 7877 done.uptodate = 0; 7878 done.start = start; 7879 init_completion(&done.done); 7880 7881 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, 7882 start + bvec->bv_len - 1, 7883 io_bio->mirror_num, 7884 btrfs_retry_endio_nocsum, &done); 7885 if (ret) 7886 return ret; 7887 7888 wait_for_completion(&done.done); 7889 7890 if (!done.uptodate) { 7891 /* We might have another mirror, so try again */ 7892 goto try_again; 7893 } 7894 7895 start += bvec->bv_len; 7896 } 7897 7898 return 0; 7899 } 7900 7901 static void btrfs_retry_endio(struct bio *bio) 7902 { 7903 struct btrfs_retry_complete *done = bio->bi_private; 7904 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7905 struct bio_vec *bvec; 7906 int uptodate; 7907 int ret; 7908 int i; 7909 7910 if (bio->bi_error) 7911 goto end; 7912 7913 uptodate = 1; 7914 bio_for_each_segment_all(bvec, bio, i) { 7915 ret = __readpage_endio_check(done->inode, io_bio, i, 7916 bvec->bv_page, 0, 7917 done->start, bvec->bv_len); 7918 if (!ret) 7919 clean_io_failure(done->inode, done->start, 7920 bvec->bv_page, 0); 7921 else 7922 uptodate = 0; 7923 } 7924 7925 done->uptodate = uptodate; 7926 end: 7927 complete(&done->done); 7928 bio_put(bio); 7929 } 7930 7931 static int __btrfs_subio_endio_read(struct inode *inode, 7932 struct btrfs_io_bio *io_bio, int err) 7933 { 7934 struct bio_vec *bvec; 7935 struct btrfs_retry_complete done; 7936 u64 start; 7937 u64 offset = 0; 7938 int i; 7939 int ret; 7940 7941 err = 0; 7942 start = io_bio->logical; 7943 done.inode = inode; 7944 7945 bio_for_each_segment_all(bvec, &io_bio->bio, i) { 7946 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, 7947 0, start, bvec->bv_len); 7948 if (likely(!ret)) 7949 goto next; 7950 try_again: 7951 done.uptodate = 0; 7952 done.start = start; 7953 init_completion(&done.done); 7954 7955 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, 7956 start + bvec->bv_len - 1, 7957 io_bio->mirror_num, 7958 btrfs_retry_endio, &done); 7959 if (ret) { 7960 err = ret; 7961 goto next; 7962 } 7963 7964 wait_for_completion(&done.done); 7965 7966 if (!done.uptodate) { 7967 /* We might have another mirror, so try again */ 7968 goto try_again; 7969 } 7970 next: 7971 offset += bvec->bv_len; 7972 start += bvec->bv_len; 7973 } 7974 7975 return err; 7976 } 7977 7978 static int btrfs_subio_endio_read(struct inode *inode, 7979 struct btrfs_io_bio *io_bio, int err) 7980 { 7981 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 7982 7983 if (skip_csum) { 7984 if (unlikely(err)) 7985 return __btrfs_correct_data_nocsum(inode, io_bio); 7986 else 7987 return 0; 7988 } else { 7989 return __btrfs_subio_endio_read(inode, io_bio, err); 7990 } 7991 } 7992 7993 static void btrfs_endio_direct_read(struct bio *bio) 7994 { 7995 struct btrfs_dio_private *dip = bio->bi_private; 7996 struct inode *inode = dip->inode; 7997 struct bio *dio_bio; 7998 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7999 int err = bio->bi_error; 8000 8001 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) 8002 err = btrfs_subio_endio_read(inode, io_bio, err); 8003 8004 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 8005 dip->logical_offset + dip->bytes - 1); 8006 dio_bio = dip->dio_bio; 8007 8008 kfree(dip); 8009 8010 dio_end_io(dio_bio, bio->bi_error); 8011 8012 if (io_bio->end_io) 8013 io_bio->end_io(io_bio, err); 8014 bio_put(bio); 8015 } 8016 8017 static void btrfs_endio_direct_write(struct bio *bio) 8018 { 8019 struct btrfs_dio_private *dip = bio->bi_private; 8020 struct inode *inode = dip->inode; 8021 struct btrfs_root *root = BTRFS_I(inode)->root; 8022 struct btrfs_ordered_extent *ordered = NULL; 8023 u64 ordered_offset = dip->logical_offset; 8024 u64 ordered_bytes = dip->bytes; 8025 struct bio *dio_bio; 8026 int ret; 8027 8028 again: 8029 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 8030 &ordered_offset, 8031 ordered_bytes, 8032 !bio->bi_error); 8033 if (!ret) 8034 goto out_test; 8035 8036 btrfs_init_work(&ordered->work, btrfs_endio_write_helper, 8037 finish_ordered_fn, NULL, NULL); 8038 btrfs_queue_work(root->fs_info->endio_write_workers, 8039 &ordered->work); 8040 out_test: 8041 /* 8042 * our bio might span multiple ordered extents. If we haven't 8043 * completed the accounting for the whole dio, go back and try again 8044 */ 8045 if (ordered_offset < dip->logical_offset + dip->bytes) { 8046 ordered_bytes = dip->logical_offset + dip->bytes - 8047 ordered_offset; 8048 ordered = NULL; 8049 goto again; 8050 } 8051 dio_bio = dip->dio_bio; 8052 8053 kfree(dip); 8054 8055 dio_end_io(dio_bio, bio->bi_error); 8056 bio_put(bio); 8057 } 8058 8059 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, 8060 struct bio *bio, int mirror_num, 8061 unsigned long bio_flags, u64 offset) 8062 { 8063 int ret; 8064 struct btrfs_root *root = BTRFS_I(inode)->root; 8065 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); 8066 BUG_ON(ret); /* -ENOMEM */ 8067 return 0; 8068 } 8069 8070 static void btrfs_end_dio_bio(struct bio *bio) 8071 { 8072 struct btrfs_dio_private *dip = bio->bi_private; 8073 int err = bio->bi_error; 8074 8075 if (err) 8076 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, 8077 "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d", 8078 btrfs_ino(dip->inode), bio->bi_rw, 8079 (unsigned long long)bio->bi_iter.bi_sector, 8080 bio->bi_iter.bi_size, err); 8081 8082 if (dip->subio_endio) 8083 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err); 8084 8085 if (err) { 8086 dip->errors = 1; 8087 8088 /* 8089 * before atomic variable goto zero, we must make sure 8090 * dip->errors is perceived to be set. 8091 */ 8092 smp_mb__before_atomic(); 8093 } 8094 8095 /* if there are more bios still pending for this dio, just exit */ 8096 if (!atomic_dec_and_test(&dip->pending_bios)) 8097 goto out; 8098 8099 if (dip->errors) { 8100 bio_io_error(dip->orig_bio); 8101 } else { 8102 dip->dio_bio->bi_error = 0; 8103 bio_endio(dip->orig_bio); 8104 } 8105 out: 8106 bio_put(bio); 8107 } 8108 8109 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, 8110 u64 first_sector, gfp_t gfp_flags) 8111 { 8112 struct bio *bio; 8113 bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags); 8114 if (bio) 8115 bio_associate_current(bio); 8116 return bio; 8117 } 8118 8119 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root, 8120 struct inode *inode, 8121 struct btrfs_dio_private *dip, 8122 struct bio *bio, 8123 u64 file_offset) 8124 { 8125 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 8126 struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); 8127 int ret; 8128 8129 /* 8130 * We load all the csum data we need when we submit 8131 * the first bio to reduce the csum tree search and 8132 * contention. 8133 */ 8134 if (dip->logical_offset == file_offset) { 8135 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio, 8136 file_offset); 8137 if (ret) 8138 return ret; 8139 } 8140 8141 if (bio == dip->orig_bio) 8142 return 0; 8143 8144 file_offset -= dip->logical_offset; 8145 file_offset >>= inode->i_sb->s_blocksize_bits; 8146 io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset); 8147 8148 return 0; 8149 } 8150 8151 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 8152 int rw, u64 file_offset, int skip_sum, 8153 int async_submit) 8154 { 8155 struct btrfs_dio_private *dip = bio->bi_private; 8156 int write = rw & REQ_WRITE; 8157 struct btrfs_root *root = BTRFS_I(inode)->root; 8158 int ret; 8159 8160 if (async_submit) 8161 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); 8162 8163 bio_get(bio); 8164 8165 if (!write) { 8166 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 8167 BTRFS_WQ_ENDIO_DATA); 8168 if (ret) 8169 goto err; 8170 } 8171 8172 if (skip_sum) 8173 goto map; 8174 8175 if (write && async_submit) { 8176 ret = btrfs_wq_submit_bio(root->fs_info, 8177 inode, rw, bio, 0, 0, 8178 file_offset, 8179 __btrfs_submit_bio_start_direct_io, 8180 __btrfs_submit_bio_done); 8181 goto err; 8182 } else if (write) { 8183 /* 8184 * If we aren't doing async submit, calculate the csum of the 8185 * bio now. 8186 */ 8187 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); 8188 if (ret) 8189 goto err; 8190 } else { 8191 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio, 8192 file_offset); 8193 if (ret) 8194 goto err; 8195 } 8196 map: 8197 ret = btrfs_map_bio(root, rw, bio, 0, async_submit); 8198 err: 8199 bio_put(bio); 8200 return ret; 8201 } 8202 8203 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, 8204 int skip_sum) 8205 { 8206 struct inode *inode = dip->inode; 8207 struct btrfs_root *root = BTRFS_I(inode)->root; 8208 struct bio *bio; 8209 struct bio *orig_bio = dip->orig_bio; 8210 struct bio_vec *bvec = orig_bio->bi_io_vec; 8211 u64 start_sector = orig_bio->bi_iter.bi_sector; 8212 u64 file_offset = dip->logical_offset; 8213 u64 submit_len = 0; 8214 u64 map_length; 8215 int nr_pages = 0; 8216 int ret; 8217 int async_submit = 0; 8218 8219 map_length = orig_bio->bi_iter.bi_size; 8220 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, 8221 &map_length, NULL, 0); 8222 if (ret) 8223 return -EIO; 8224 8225 if (map_length >= orig_bio->bi_iter.bi_size) { 8226 bio = orig_bio; 8227 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED; 8228 goto submit; 8229 } 8230 8231 /* async crcs make it difficult to collect full stripe writes. */ 8232 if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK) 8233 async_submit = 0; 8234 else 8235 async_submit = 1; 8236 8237 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); 8238 if (!bio) 8239 return -ENOMEM; 8240 8241 bio->bi_private = dip; 8242 bio->bi_end_io = btrfs_end_dio_bio; 8243 btrfs_io_bio(bio)->logical = file_offset; 8244 atomic_inc(&dip->pending_bios); 8245 8246 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { 8247 if (map_length < submit_len + bvec->bv_len || 8248 bio_add_page(bio, bvec->bv_page, bvec->bv_len, 8249 bvec->bv_offset) < bvec->bv_len) { 8250 /* 8251 * inc the count before we submit the bio so 8252 * we know the end IO handler won't happen before 8253 * we inc the count. Otherwise, the dip might get freed 8254 * before we're done setting it up 8255 */ 8256 atomic_inc(&dip->pending_bios); 8257 ret = __btrfs_submit_dio_bio(bio, inode, rw, 8258 file_offset, skip_sum, 8259 async_submit); 8260 if (ret) { 8261 bio_put(bio); 8262 atomic_dec(&dip->pending_bios); 8263 goto out_err; 8264 } 8265 8266 start_sector += submit_len >> 9; 8267 file_offset += submit_len; 8268 8269 submit_len = 0; 8270 nr_pages = 0; 8271 8272 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, 8273 start_sector, GFP_NOFS); 8274 if (!bio) 8275 goto out_err; 8276 bio->bi_private = dip; 8277 bio->bi_end_io = btrfs_end_dio_bio; 8278 btrfs_io_bio(bio)->logical = file_offset; 8279 8280 map_length = orig_bio->bi_iter.bi_size; 8281 ret = btrfs_map_block(root->fs_info, rw, 8282 start_sector << 9, 8283 &map_length, NULL, 0); 8284 if (ret) { 8285 bio_put(bio); 8286 goto out_err; 8287 } 8288 } else { 8289 submit_len += bvec->bv_len; 8290 nr_pages++; 8291 bvec++; 8292 } 8293 } 8294 8295 submit: 8296 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 8297 async_submit); 8298 if (!ret) 8299 return 0; 8300 8301 bio_put(bio); 8302 out_err: 8303 dip->errors = 1; 8304 /* 8305 * before atomic variable goto zero, we must 8306 * make sure dip->errors is perceived to be set. 8307 */ 8308 smp_mb__before_atomic(); 8309 if (atomic_dec_and_test(&dip->pending_bios)) 8310 bio_io_error(dip->orig_bio); 8311 8312 /* bio_end_io() will handle error, so we needn't return it */ 8313 return 0; 8314 } 8315 8316 static void btrfs_submit_direct(int rw, struct bio *dio_bio, 8317 struct inode *inode, loff_t file_offset) 8318 { 8319 struct btrfs_dio_private *dip = NULL; 8320 struct bio *io_bio = NULL; 8321 struct btrfs_io_bio *btrfs_bio; 8322 int skip_sum; 8323 int write = rw & REQ_WRITE; 8324 int ret = 0; 8325 8326 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 8327 8328 io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS); 8329 if (!io_bio) { 8330 ret = -ENOMEM; 8331 goto free_ordered; 8332 } 8333 8334 dip = kzalloc(sizeof(*dip), GFP_NOFS); 8335 if (!dip) { 8336 ret = -ENOMEM; 8337 goto free_ordered; 8338 } 8339 8340 dip->private = dio_bio->bi_private; 8341 dip->inode = inode; 8342 dip->logical_offset = file_offset; 8343 dip->bytes = dio_bio->bi_iter.bi_size; 8344 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; 8345 io_bio->bi_private = dip; 8346 dip->orig_bio = io_bio; 8347 dip->dio_bio = dio_bio; 8348 atomic_set(&dip->pending_bios, 0); 8349 btrfs_bio = btrfs_io_bio(io_bio); 8350 btrfs_bio->logical = file_offset; 8351 8352 if (write) { 8353 io_bio->bi_end_io = btrfs_endio_direct_write; 8354 } else { 8355 io_bio->bi_end_io = btrfs_endio_direct_read; 8356 dip->subio_endio = btrfs_subio_endio_read; 8357 } 8358 8359 ret = btrfs_submit_direct_hook(rw, dip, skip_sum); 8360 if (!ret) 8361 return; 8362 8363 if (btrfs_bio->end_io) 8364 btrfs_bio->end_io(btrfs_bio, ret); 8365 8366 free_ordered: 8367 /* 8368 * If we arrived here it means either we failed to submit the dip 8369 * or we either failed to clone the dio_bio or failed to allocate the 8370 * dip. If we cloned the dio_bio and allocated the dip, we can just 8371 * call bio_endio against our io_bio so that we get proper resource 8372 * cleanup if we fail to submit the dip, otherwise, we must do the 8373 * same as btrfs_endio_direct_[write|read] because we can't call these 8374 * callbacks - they require an allocated dip and a clone of dio_bio. 8375 */ 8376 if (io_bio && dip) { 8377 io_bio->bi_error = -EIO; 8378 bio_endio(io_bio); 8379 /* 8380 * The end io callbacks free our dip, do the final put on io_bio 8381 * and all the cleanup and final put for dio_bio (through 8382 * dio_end_io()). 8383 */ 8384 dip = NULL; 8385 io_bio = NULL; 8386 } else { 8387 if (write) { 8388 struct btrfs_ordered_extent *ordered; 8389 8390 ordered = btrfs_lookup_ordered_extent(inode, 8391 file_offset); 8392 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); 8393 /* 8394 * Decrements our ref on the ordered extent and removes 8395 * the ordered extent from the inode's ordered tree, 8396 * doing all the proper resource cleanup such as for the 8397 * reserved space and waking up any waiters for this 8398 * ordered extent (through btrfs_remove_ordered_extent). 8399 */ 8400 btrfs_finish_ordered_io(ordered); 8401 } else { 8402 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, 8403 file_offset + dio_bio->bi_iter.bi_size - 1); 8404 } 8405 dio_bio->bi_error = -EIO; 8406 /* 8407 * Releases and cleans up our dio_bio, no need to bio_put() 8408 * nor bio_endio()/bio_io_error() against dio_bio. 8409 */ 8410 dio_end_io(dio_bio, ret); 8411 } 8412 if (io_bio) 8413 bio_put(io_bio); 8414 kfree(dip); 8415 } 8416 8417 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb, 8418 const struct iov_iter *iter, loff_t offset) 8419 { 8420 int seg; 8421 int i; 8422 unsigned blocksize_mask = root->sectorsize - 1; 8423 ssize_t retval = -EINVAL; 8424 8425 if (offset & blocksize_mask) 8426 goto out; 8427 8428 if (iov_iter_alignment(iter) & blocksize_mask) 8429 goto out; 8430 8431 /* If this is a write we don't need to check anymore */ 8432 if (iov_iter_rw(iter) == WRITE) 8433 return 0; 8434 /* 8435 * Check to make sure we don't have duplicate iov_base's in this 8436 * iovec, if so return EINVAL, otherwise we'll get csum errors 8437 * when reading back. 8438 */ 8439 for (seg = 0; seg < iter->nr_segs; seg++) { 8440 for (i = seg + 1; i < iter->nr_segs; i++) { 8441 if (iter->iov[seg].iov_base == iter->iov[i].iov_base) 8442 goto out; 8443 } 8444 } 8445 retval = 0; 8446 out: 8447 return retval; 8448 } 8449 8450 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 8451 loff_t offset) 8452 { 8453 struct file *file = iocb->ki_filp; 8454 struct inode *inode = file->f_mapping->host; 8455 struct btrfs_root *root = BTRFS_I(inode)->root; 8456 struct btrfs_dio_data dio_data = { 0 }; 8457 size_t count = 0; 8458 int flags = 0; 8459 bool wakeup = true; 8460 bool relock = false; 8461 ssize_t ret; 8462 8463 if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset)) 8464 return 0; 8465 8466 inode_dio_begin(inode); 8467 smp_mb__after_atomic(); 8468 8469 /* 8470 * The generic stuff only does filemap_write_and_wait_range, which 8471 * isn't enough if we've written compressed pages to this area, so 8472 * we need to flush the dirty pages again to make absolutely sure 8473 * that any outstanding dirty pages are on disk. 8474 */ 8475 count = iov_iter_count(iter); 8476 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 8477 &BTRFS_I(inode)->runtime_flags)) 8478 filemap_fdatawrite_range(inode->i_mapping, offset, 8479 offset + count - 1); 8480 8481 if (iov_iter_rw(iter) == WRITE) { 8482 /* 8483 * If the write DIO is beyond the EOF, we need update 8484 * the isize, but it is protected by i_mutex. So we can 8485 * not unlock the i_mutex at this case. 8486 */ 8487 if (offset + count <= inode->i_size) { 8488 mutex_unlock(&inode->i_mutex); 8489 relock = true; 8490 } 8491 ret = btrfs_delalloc_reserve_space(inode, offset, count); 8492 if (ret) 8493 goto out; 8494 dio_data.outstanding_extents = div64_u64(count + 8495 BTRFS_MAX_EXTENT_SIZE - 1, 8496 BTRFS_MAX_EXTENT_SIZE); 8497 8498 /* 8499 * We need to know how many extents we reserved so that we can 8500 * do the accounting properly if we go over the number we 8501 * originally calculated. Abuse current->journal_info for this. 8502 */ 8503 dio_data.reserve = round_up(count, root->sectorsize); 8504 current->journal_info = &dio_data; 8505 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8506 &BTRFS_I(inode)->runtime_flags)) { 8507 inode_dio_end(inode); 8508 flags = DIO_LOCKING | DIO_SKIP_HOLES; 8509 wakeup = false; 8510 } 8511 8512 ret = __blockdev_direct_IO(iocb, inode, 8513 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, 8514 iter, offset, btrfs_get_blocks_direct, NULL, 8515 btrfs_submit_direct, flags); 8516 if (iov_iter_rw(iter) == WRITE) { 8517 current->journal_info = NULL; 8518 if (ret < 0 && ret != -EIOCBQUEUED) { 8519 if (dio_data.reserve) 8520 btrfs_delalloc_release_space(inode, offset, 8521 dio_data.reserve); 8522 } else if (ret >= 0 && (size_t)ret < count) 8523 btrfs_delalloc_release_space(inode, offset, 8524 count - (size_t)ret); 8525 } 8526 out: 8527 if (wakeup) 8528 inode_dio_end(inode); 8529 if (relock) 8530 mutex_lock(&inode->i_mutex); 8531 8532 return ret; 8533 } 8534 8535 #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) 8536 8537 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 8538 __u64 start, __u64 len) 8539 { 8540 int ret; 8541 8542 ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS); 8543 if (ret) 8544 return ret; 8545 8546 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); 8547 } 8548 8549 int btrfs_readpage(struct file *file, struct page *page) 8550 { 8551 struct extent_io_tree *tree; 8552 tree = &BTRFS_I(page->mapping->host)->io_tree; 8553 return extent_read_full_page(tree, page, btrfs_get_extent, 0); 8554 } 8555 8556 static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 8557 { 8558 struct extent_io_tree *tree; 8559 8560 8561 if (current->flags & PF_MEMALLOC) { 8562 redirty_page_for_writepage(wbc, page); 8563 unlock_page(page); 8564 return 0; 8565 } 8566 tree = &BTRFS_I(page->mapping->host)->io_tree; 8567 return extent_write_full_page(tree, page, btrfs_get_extent, wbc); 8568 } 8569 8570 static int btrfs_writepages(struct address_space *mapping, 8571 struct writeback_control *wbc) 8572 { 8573 struct extent_io_tree *tree; 8574 8575 tree = &BTRFS_I(mapping->host)->io_tree; 8576 return extent_writepages(tree, mapping, btrfs_get_extent, wbc); 8577 } 8578 8579 static int 8580 btrfs_readpages(struct file *file, struct address_space *mapping, 8581 struct list_head *pages, unsigned nr_pages) 8582 { 8583 struct extent_io_tree *tree; 8584 tree = &BTRFS_I(mapping->host)->io_tree; 8585 return extent_readpages(tree, mapping, pages, nr_pages, 8586 btrfs_get_extent); 8587 } 8588 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8589 { 8590 struct extent_io_tree *tree; 8591 struct extent_map_tree *map; 8592 int ret; 8593 8594 tree = &BTRFS_I(page->mapping->host)->io_tree; 8595 map = &BTRFS_I(page->mapping->host)->extent_tree; 8596 ret = try_release_extent_mapping(map, tree, page, gfp_flags); 8597 if (ret == 1) { 8598 ClearPagePrivate(page); 8599 set_page_private(page, 0); 8600 page_cache_release(page); 8601 } 8602 return ret; 8603 } 8604 8605 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8606 { 8607 if (PageWriteback(page) || PageDirty(page)) 8608 return 0; 8609 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); 8610 } 8611 8612 static void btrfs_invalidatepage(struct page *page, unsigned int offset, 8613 unsigned int length) 8614 { 8615 struct inode *inode = page->mapping->host; 8616 struct extent_io_tree *tree; 8617 struct btrfs_ordered_extent *ordered; 8618 struct extent_state *cached_state = NULL; 8619 u64 page_start = page_offset(page); 8620 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 8621 int inode_evicting = inode->i_state & I_FREEING; 8622 8623 /* 8624 * we have the page locked, so new writeback can't start, 8625 * and the dirty bit won't be cleared while we are here. 8626 * 8627 * Wait for IO on this page so that we can safely clear 8628 * the PagePrivate2 bit and do ordered accounting 8629 */ 8630 wait_on_page_writeback(page); 8631 8632 tree = &BTRFS_I(inode)->io_tree; 8633 if (offset) { 8634 btrfs_releasepage(page, GFP_NOFS); 8635 return; 8636 } 8637 8638 if (!inode_evicting) 8639 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 8640 ordered = btrfs_lookup_ordered_extent(inode, page_start); 8641 if (ordered) { 8642 /* 8643 * IO on this page will never be started, so we need 8644 * to account for any ordered extents now 8645 */ 8646 if (!inode_evicting) 8647 clear_extent_bit(tree, page_start, page_end, 8648 EXTENT_DIRTY | EXTENT_DELALLOC | 8649 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8650 EXTENT_DEFRAG, 1, 0, &cached_state, 8651 GFP_NOFS); 8652 /* 8653 * whoever cleared the private bit is responsible 8654 * for the finish_ordered_io 8655 */ 8656 if (TestClearPagePrivate2(page)) { 8657 struct btrfs_ordered_inode_tree *tree; 8658 u64 new_len; 8659 8660 tree = &BTRFS_I(inode)->ordered_tree; 8661 8662 spin_lock_irq(&tree->lock); 8663 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8664 new_len = page_start - ordered->file_offset; 8665 if (new_len < ordered->truncated_len) 8666 ordered->truncated_len = new_len; 8667 spin_unlock_irq(&tree->lock); 8668 8669 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8670 page_start, 8671 PAGE_CACHE_SIZE, 1)) 8672 btrfs_finish_ordered_io(ordered); 8673 } 8674 btrfs_put_ordered_extent(ordered); 8675 if (!inode_evicting) { 8676 cached_state = NULL; 8677 lock_extent_bits(tree, page_start, page_end, 0, 8678 &cached_state); 8679 } 8680 } 8681 8682 /* 8683 * Qgroup reserved space handler 8684 * Page here will be either 8685 * 1) Already written to disk 8686 * In this case, its reserved space is released from data rsv map 8687 * and will be freed by delayed_ref handler finally. 8688 * So even we call qgroup_free_data(), it won't decrease reserved 8689 * space. 8690 * 2) Not written to disk 8691 * This means the reserved space should be freed here. 8692 */ 8693 btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE); 8694 if (!inode_evicting) { 8695 clear_extent_bit(tree, page_start, page_end, 8696 EXTENT_LOCKED | EXTENT_DIRTY | 8697 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8698 EXTENT_DEFRAG, 1, 1, 8699 &cached_state, GFP_NOFS); 8700 8701 __btrfs_releasepage(page, GFP_NOFS); 8702 } 8703 8704 ClearPageChecked(page); 8705 if (PagePrivate(page)) { 8706 ClearPagePrivate(page); 8707 set_page_private(page, 0); 8708 page_cache_release(page); 8709 } 8710 } 8711 8712 /* 8713 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8714 * called from a page fault handler when a page is first dirtied. Hence we must 8715 * be careful to check for EOF conditions here. We set the page up correctly 8716 * for a written page which means we get ENOSPC checking when writing into 8717 * holes and correct delalloc and unwritten extent mapping on filesystems that 8718 * support these features. 8719 * 8720 * We are not allowed to take the i_mutex here so we have to play games to 8721 * protect against truncate races as the page could now be beyond EOF. Because 8722 * vmtruncate() writes the inode size before removing pages, once we have the 8723 * page lock we can determine safely if the page is beyond EOF. If it is not 8724 * beyond EOF, then the page is guaranteed safe against truncation until we 8725 * unlock the page. 8726 */ 8727 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 8728 { 8729 struct page *page = vmf->page; 8730 struct inode *inode = file_inode(vma->vm_file); 8731 struct btrfs_root *root = BTRFS_I(inode)->root; 8732 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8733 struct btrfs_ordered_extent *ordered; 8734 struct extent_state *cached_state = NULL; 8735 char *kaddr; 8736 unsigned long zero_start; 8737 loff_t size; 8738 int ret; 8739 int reserved = 0; 8740 u64 page_start; 8741 u64 page_end; 8742 8743 sb_start_pagefault(inode->i_sb); 8744 page_start = page_offset(page); 8745 page_end = page_start + PAGE_CACHE_SIZE - 1; 8746 8747 ret = btrfs_delalloc_reserve_space(inode, page_start, 8748 PAGE_CACHE_SIZE); 8749 if (!ret) { 8750 ret = file_update_time(vma->vm_file); 8751 reserved = 1; 8752 } 8753 if (ret) { 8754 if (ret == -ENOMEM) 8755 ret = VM_FAULT_OOM; 8756 else /* -ENOSPC, -EIO, etc */ 8757 ret = VM_FAULT_SIGBUS; 8758 if (reserved) 8759 goto out; 8760 goto out_noreserve; 8761 } 8762 8763 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8764 again: 8765 lock_page(page); 8766 size = i_size_read(inode); 8767 8768 if ((page->mapping != inode->i_mapping) || 8769 (page_start >= size)) { 8770 /* page got truncated out from underneath us */ 8771 goto out_unlock; 8772 } 8773 wait_on_page_writeback(page); 8774 8775 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 8776 set_page_extent_mapped(page); 8777 8778 /* 8779 * we can't set the delalloc bits if there are pending ordered 8780 * extents. Drop our locks and wait for them to finish 8781 */ 8782 ordered = btrfs_lookup_ordered_extent(inode, page_start); 8783 if (ordered) { 8784 unlock_extent_cached(io_tree, page_start, page_end, 8785 &cached_state, GFP_NOFS); 8786 unlock_page(page); 8787 btrfs_start_ordered_extent(inode, ordered, 1); 8788 btrfs_put_ordered_extent(ordered); 8789 goto again; 8790 } 8791 8792 /* 8793 * XXX - page_mkwrite gets called every time the page is dirtied, even 8794 * if it was already dirty, so for space accounting reasons we need to 8795 * clear any delalloc bits for the range we are fixing to save. There 8796 * is probably a better way to do this, but for now keep consistent with 8797 * prepare_pages in the normal write path. 8798 */ 8799 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 8800 EXTENT_DIRTY | EXTENT_DELALLOC | 8801 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 8802 0, 0, &cached_state, GFP_NOFS); 8803 8804 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 8805 &cached_state); 8806 if (ret) { 8807 unlock_extent_cached(io_tree, page_start, page_end, 8808 &cached_state, GFP_NOFS); 8809 ret = VM_FAULT_SIGBUS; 8810 goto out_unlock; 8811 } 8812 ret = 0; 8813 8814 /* page is wholly or partially inside EOF */ 8815 if (page_start + PAGE_CACHE_SIZE > size) 8816 zero_start = size & ~PAGE_CACHE_MASK; 8817 else 8818 zero_start = PAGE_CACHE_SIZE; 8819 8820 if (zero_start != PAGE_CACHE_SIZE) { 8821 kaddr = kmap(page); 8822 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 8823 flush_dcache_page(page); 8824 kunmap(page); 8825 } 8826 ClearPageChecked(page); 8827 set_page_dirty(page); 8828 SetPageUptodate(page); 8829 8830 BTRFS_I(inode)->last_trans = root->fs_info->generation; 8831 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 8832 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; 8833 8834 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); 8835 8836 out_unlock: 8837 if (!ret) { 8838 sb_end_pagefault(inode->i_sb); 8839 return VM_FAULT_LOCKED; 8840 } 8841 unlock_page(page); 8842 out: 8843 btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE); 8844 out_noreserve: 8845 sb_end_pagefault(inode->i_sb); 8846 return ret; 8847 } 8848 8849 static int btrfs_truncate(struct inode *inode) 8850 { 8851 struct btrfs_root *root = BTRFS_I(inode)->root; 8852 struct btrfs_block_rsv *rsv; 8853 int ret = 0; 8854 int err = 0; 8855 struct btrfs_trans_handle *trans; 8856 u64 mask = root->sectorsize - 1; 8857 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 8858 8859 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), 8860 (u64)-1); 8861 if (ret) 8862 return ret; 8863 8864 /* 8865 * Yes ladies and gentelment, this is indeed ugly. The fact is we have 8866 * 3 things going on here 8867 * 8868 * 1) We need to reserve space for our orphan item and the space to 8869 * delete our orphan item. Lord knows we don't want to have a dangling 8870 * orphan item because we didn't reserve space to remove it. 8871 * 8872 * 2) We need to reserve space to update our inode. 8873 * 8874 * 3) We need to have something to cache all the space that is going to 8875 * be free'd up by the truncate operation, but also have some slack 8876 * space reserved in case it uses space during the truncate (thank you 8877 * very much snapshotting). 8878 * 8879 * And we need these to all be seperate. The fact is we can use alot of 8880 * space doing the truncate, and we have no earthly idea how much space 8881 * we will use, so we need the truncate reservation to be seperate so it 8882 * doesn't end up using space reserved for updating the inode or 8883 * removing the orphan item. We also need to be able to stop the 8884 * transaction and start a new one, which means we need to be able to 8885 * update the inode several times, and we have no idea of knowing how 8886 * many times that will be, so we can't just reserve 1 item for the 8887 * entirety of the opration, so that has to be done seperately as well. 8888 * Then there is the orphan item, which does indeed need to be held on 8889 * to for the whole operation, and we need nobody to touch this reserved 8890 * space except the orphan code. 8891 * 8892 * So that leaves us with 8893 * 8894 * 1) root->orphan_block_rsv - for the orphan deletion. 8895 * 2) rsv - for the truncate reservation, which we will steal from the 8896 * transaction reservation. 8897 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for 8898 * updating the inode. 8899 */ 8900 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 8901 if (!rsv) 8902 return -ENOMEM; 8903 rsv->size = min_size; 8904 rsv->failfast = 1; 8905 8906 /* 8907 * 1 for the truncate slack space 8908 * 1 for updating the inode. 8909 */ 8910 trans = btrfs_start_transaction(root, 2); 8911 if (IS_ERR(trans)) { 8912 err = PTR_ERR(trans); 8913 goto out; 8914 } 8915 8916 /* Migrate the slack space for the truncate to our reserve */ 8917 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, 8918 min_size); 8919 BUG_ON(ret); 8920 8921 /* 8922 * So if we truncate and then write and fsync we normally would just 8923 * write the extents that changed, which is a problem if we need to 8924 * first truncate that entire inode. So set this flag so we write out 8925 * all of the extents in the inode to the sync log so we're completely 8926 * safe. 8927 */ 8928 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 8929 trans->block_rsv = rsv; 8930 8931 while (1) { 8932 ret = btrfs_truncate_inode_items(trans, root, inode, 8933 inode->i_size, 8934 BTRFS_EXTENT_DATA_KEY); 8935 if (ret != -ENOSPC && ret != -EAGAIN) { 8936 err = ret; 8937 break; 8938 } 8939 8940 trans->block_rsv = &root->fs_info->trans_block_rsv; 8941 ret = btrfs_update_inode(trans, root, inode); 8942 if (ret) { 8943 err = ret; 8944 break; 8945 } 8946 8947 btrfs_end_transaction(trans, root); 8948 btrfs_btree_balance_dirty(root); 8949 8950 trans = btrfs_start_transaction(root, 2); 8951 if (IS_ERR(trans)) { 8952 ret = err = PTR_ERR(trans); 8953 trans = NULL; 8954 break; 8955 } 8956 8957 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, 8958 rsv, min_size); 8959 BUG_ON(ret); /* shouldn't happen */ 8960 trans->block_rsv = rsv; 8961 } 8962 8963 if (ret == 0 && inode->i_nlink > 0) { 8964 trans->block_rsv = root->orphan_block_rsv; 8965 ret = btrfs_orphan_del(trans, inode); 8966 if (ret) 8967 err = ret; 8968 } 8969 8970 if (trans) { 8971 trans->block_rsv = &root->fs_info->trans_block_rsv; 8972 ret = btrfs_update_inode(trans, root, inode); 8973 if (ret && !err) 8974 err = ret; 8975 8976 ret = btrfs_end_transaction(trans, root); 8977 btrfs_btree_balance_dirty(root); 8978 } 8979 8980 out: 8981 btrfs_free_block_rsv(root, rsv); 8982 8983 if (ret && !err) 8984 err = ret; 8985 8986 return err; 8987 } 8988 8989 /* 8990 * create a new subvolume directory/inode (helper for the ioctl). 8991 */ 8992 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 8993 struct btrfs_root *new_root, 8994 struct btrfs_root *parent_root, 8995 u64 new_dirid) 8996 { 8997 struct inode *inode; 8998 int err; 8999 u64 index = 0; 9000 9001 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, 9002 new_dirid, new_dirid, 9003 S_IFDIR | (~current_umask() & S_IRWXUGO), 9004 &index); 9005 if (IS_ERR(inode)) 9006 return PTR_ERR(inode); 9007 inode->i_op = &btrfs_dir_inode_operations; 9008 inode->i_fop = &btrfs_dir_file_operations; 9009 9010 set_nlink(inode, 1); 9011 btrfs_i_size_write(inode, 0); 9012 unlock_new_inode(inode); 9013 9014 err = btrfs_subvol_inherit_props(trans, new_root, parent_root); 9015 if (err) 9016 btrfs_err(new_root->fs_info, 9017 "error inheriting subvolume %llu properties: %d", 9018 new_root->root_key.objectid, err); 9019 9020 err = btrfs_update_inode(trans, new_root, inode); 9021 9022 iput(inode); 9023 return err; 9024 } 9025 9026 struct inode *btrfs_alloc_inode(struct super_block *sb) 9027 { 9028 struct btrfs_inode *ei; 9029 struct inode *inode; 9030 9031 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); 9032 if (!ei) 9033 return NULL; 9034 9035 ei->root = NULL; 9036 ei->generation = 0; 9037 ei->last_trans = 0; 9038 ei->last_sub_trans = 0; 9039 ei->logged_trans = 0; 9040 ei->delalloc_bytes = 0; 9041 ei->defrag_bytes = 0; 9042 ei->disk_i_size = 0; 9043 ei->flags = 0; 9044 ei->csum_bytes = 0; 9045 ei->index_cnt = (u64)-1; 9046 ei->dir_index = 0; 9047 ei->last_unlink_trans = 0; 9048 ei->last_log_commit = 0; 9049 9050 spin_lock_init(&ei->lock); 9051 ei->outstanding_extents = 0; 9052 ei->reserved_extents = 0; 9053 9054 ei->runtime_flags = 0; 9055 ei->force_compress = BTRFS_COMPRESS_NONE; 9056 9057 ei->delayed_node = NULL; 9058 9059 ei->i_otime.tv_sec = 0; 9060 ei->i_otime.tv_nsec = 0; 9061 9062 inode = &ei->vfs_inode; 9063 extent_map_tree_init(&ei->extent_tree); 9064 extent_io_tree_init(&ei->io_tree, &inode->i_data); 9065 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); 9066 ei->io_tree.track_uptodate = 1; 9067 ei->io_failure_tree.track_uptodate = 1; 9068 atomic_set(&ei->sync_writers, 0); 9069 mutex_init(&ei->log_mutex); 9070 mutex_init(&ei->delalloc_mutex); 9071 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 9072 INIT_LIST_HEAD(&ei->delalloc_inodes); 9073 RB_CLEAR_NODE(&ei->rb_node); 9074 9075 return inode; 9076 } 9077 9078 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 9079 void btrfs_test_destroy_inode(struct inode *inode) 9080 { 9081 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 9082 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 9083 } 9084 #endif 9085 9086 static void btrfs_i_callback(struct rcu_head *head) 9087 { 9088 struct inode *inode = container_of(head, struct inode, i_rcu); 9089 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 9090 } 9091 9092 void btrfs_destroy_inode(struct inode *inode) 9093 { 9094 struct btrfs_ordered_extent *ordered; 9095 struct btrfs_root *root = BTRFS_I(inode)->root; 9096 9097 WARN_ON(!hlist_empty(&inode->i_dentry)); 9098 WARN_ON(inode->i_data.nrpages); 9099 WARN_ON(BTRFS_I(inode)->outstanding_extents); 9100 WARN_ON(BTRFS_I(inode)->reserved_extents); 9101 WARN_ON(BTRFS_I(inode)->delalloc_bytes); 9102 WARN_ON(BTRFS_I(inode)->csum_bytes); 9103 WARN_ON(BTRFS_I(inode)->defrag_bytes); 9104 9105 /* 9106 * This can happen where we create an inode, but somebody else also 9107 * created the same inode and we need to destroy the one we already 9108 * created. 9109 */ 9110 if (!root) 9111 goto free; 9112 9113 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 9114 &BTRFS_I(inode)->runtime_flags)) { 9115 btrfs_info(root->fs_info, "inode %llu still on the orphan list", 9116 btrfs_ino(inode)); 9117 atomic_dec(&root->orphan_inodes); 9118 } 9119 9120 while (1) { 9121 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 9122 if (!ordered) 9123 break; 9124 else { 9125 btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup", 9126 ordered->file_offset, ordered->len); 9127 btrfs_remove_ordered_extent(inode, ordered); 9128 btrfs_put_ordered_extent(ordered); 9129 btrfs_put_ordered_extent(ordered); 9130 } 9131 } 9132 btrfs_qgroup_check_reserved_leak(inode); 9133 inode_tree_del(inode); 9134 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 9135 free: 9136 call_rcu(&inode->i_rcu, btrfs_i_callback); 9137 } 9138 9139 int btrfs_drop_inode(struct inode *inode) 9140 { 9141 struct btrfs_root *root = BTRFS_I(inode)->root; 9142 9143 if (root == NULL) 9144 return 1; 9145 9146 /* the snap/subvol tree is on deleting */ 9147 if (btrfs_root_refs(&root->root_item) == 0) 9148 return 1; 9149 else 9150 return generic_drop_inode(inode); 9151 } 9152 9153 static void init_once(void *foo) 9154 { 9155 struct btrfs_inode *ei = (struct btrfs_inode *) foo; 9156 9157 inode_init_once(&ei->vfs_inode); 9158 } 9159 9160 void btrfs_destroy_cachep(void) 9161 { 9162 /* 9163 * Make sure all delayed rcu free inodes are flushed before we 9164 * destroy cache. 9165 */ 9166 rcu_barrier(); 9167 if (btrfs_inode_cachep) 9168 kmem_cache_destroy(btrfs_inode_cachep); 9169 if (btrfs_trans_handle_cachep) 9170 kmem_cache_destroy(btrfs_trans_handle_cachep); 9171 if (btrfs_transaction_cachep) 9172 kmem_cache_destroy(btrfs_transaction_cachep); 9173 if (btrfs_path_cachep) 9174 kmem_cache_destroy(btrfs_path_cachep); 9175 if (btrfs_free_space_cachep) 9176 kmem_cache_destroy(btrfs_free_space_cachep); 9177 if (btrfs_delalloc_work_cachep) 9178 kmem_cache_destroy(btrfs_delalloc_work_cachep); 9179 } 9180 9181 int btrfs_init_cachep(void) 9182 { 9183 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 9184 sizeof(struct btrfs_inode), 0, 9185 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); 9186 if (!btrfs_inode_cachep) 9187 goto fail; 9188 9189 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", 9190 sizeof(struct btrfs_trans_handle), 0, 9191 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9192 if (!btrfs_trans_handle_cachep) 9193 goto fail; 9194 9195 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction", 9196 sizeof(struct btrfs_transaction), 0, 9197 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9198 if (!btrfs_transaction_cachep) 9199 goto fail; 9200 9201 btrfs_path_cachep = kmem_cache_create("btrfs_path", 9202 sizeof(struct btrfs_path), 0, 9203 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9204 if (!btrfs_path_cachep) 9205 goto fail; 9206 9207 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", 9208 sizeof(struct btrfs_free_space), 0, 9209 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9210 if (!btrfs_free_space_cachep) 9211 goto fail; 9212 9213 btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work", 9214 sizeof(struct btrfs_delalloc_work), 0, 9215 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 9216 NULL); 9217 if (!btrfs_delalloc_work_cachep) 9218 goto fail; 9219 9220 return 0; 9221 fail: 9222 btrfs_destroy_cachep(); 9223 return -ENOMEM; 9224 } 9225 9226 static int btrfs_getattr(struct vfsmount *mnt, 9227 struct dentry *dentry, struct kstat *stat) 9228 { 9229 u64 delalloc_bytes; 9230 struct inode *inode = d_inode(dentry); 9231 u32 blocksize = inode->i_sb->s_blocksize; 9232 9233 generic_fillattr(inode, stat); 9234 stat->dev = BTRFS_I(inode)->root->anon_dev; 9235 stat->blksize = PAGE_CACHE_SIZE; 9236 9237 spin_lock(&BTRFS_I(inode)->lock); 9238 delalloc_bytes = BTRFS_I(inode)->delalloc_bytes; 9239 spin_unlock(&BTRFS_I(inode)->lock); 9240 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + 9241 ALIGN(delalloc_bytes, blocksize)) >> 9; 9242 return 0; 9243 } 9244 9245 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 9246 struct inode *new_dir, struct dentry *new_dentry) 9247 { 9248 struct btrfs_trans_handle *trans; 9249 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9250 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9251 struct inode *new_inode = d_inode(new_dentry); 9252 struct inode *old_inode = d_inode(old_dentry); 9253 struct timespec ctime = CURRENT_TIME; 9254 u64 index = 0; 9255 u64 root_objectid; 9256 int ret; 9257 u64 old_ino = btrfs_ino(old_inode); 9258 9259 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9260 return -EPERM; 9261 9262 /* we only allow rename subvolume link between subvolumes */ 9263 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9264 return -EXDEV; 9265 9266 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 9267 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) 9268 return -ENOTEMPTY; 9269 9270 if (S_ISDIR(old_inode->i_mode) && new_inode && 9271 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 9272 return -ENOTEMPTY; 9273 9274 9275 /* check for collisions, even if the name isn't there */ 9276 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, 9277 new_dentry->d_name.name, 9278 new_dentry->d_name.len); 9279 9280 if (ret) { 9281 if (ret == -EEXIST) { 9282 /* we shouldn't get 9283 * eexist without a new_inode */ 9284 if (WARN_ON(!new_inode)) { 9285 return ret; 9286 } 9287 } else { 9288 /* maybe -EOVERFLOW */ 9289 return ret; 9290 } 9291 } 9292 ret = 0; 9293 9294 /* 9295 * we're using rename to replace one file with another. Start IO on it 9296 * now so we don't add too much work to the end of the transaction 9297 */ 9298 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 9299 filemap_flush(old_inode->i_mapping); 9300 9301 /* close the racy window with snapshot create/destroy ioctl */ 9302 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9303 down_read(&root->fs_info->subvol_sem); 9304 /* 9305 * We want to reserve the absolute worst case amount of items. So if 9306 * both inodes are subvols and we need to unlink them then that would 9307 * require 4 item modifications, but if they are both normal inodes it 9308 * would require 5 item modifications, so we'll assume their normal 9309 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 9310 * should cover the worst case number of items we'll modify. 9311 */ 9312 trans = btrfs_start_transaction(root, 11); 9313 if (IS_ERR(trans)) { 9314 ret = PTR_ERR(trans); 9315 goto out_notrans; 9316 } 9317 9318 if (dest != root) 9319 btrfs_record_root_in_trans(trans, dest); 9320 9321 ret = btrfs_set_inode_index(new_dir, &index); 9322 if (ret) 9323 goto out_fail; 9324 9325 BTRFS_I(old_inode)->dir_index = 0ULL; 9326 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9327 /* force full log commit if subvolume involved. */ 9328 btrfs_set_log_full_commit(root->fs_info, trans); 9329 } else { 9330 ret = btrfs_insert_inode_ref(trans, dest, 9331 new_dentry->d_name.name, 9332 new_dentry->d_name.len, 9333 old_ino, 9334 btrfs_ino(new_dir), index); 9335 if (ret) 9336 goto out_fail; 9337 /* 9338 * this is an ugly little race, but the rename is required 9339 * to make sure that if we crash, the inode is either at the 9340 * old name or the new one. pinning the log transaction lets 9341 * us make sure we don't allow a log commit to come in after 9342 * we unlink the name but before we add the new name back in. 9343 */ 9344 btrfs_pin_log_trans(root); 9345 } 9346 9347 inode_inc_iversion(old_dir); 9348 inode_inc_iversion(new_dir); 9349 inode_inc_iversion(old_inode); 9350 old_dir->i_ctime = old_dir->i_mtime = ctime; 9351 new_dir->i_ctime = new_dir->i_mtime = ctime; 9352 old_inode->i_ctime = ctime; 9353 9354 if (old_dentry->d_parent != new_dentry->d_parent) 9355 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); 9356 9357 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9358 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 9359 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, 9360 old_dentry->d_name.name, 9361 old_dentry->d_name.len); 9362 } else { 9363 ret = __btrfs_unlink_inode(trans, root, old_dir, 9364 d_inode(old_dentry), 9365 old_dentry->d_name.name, 9366 old_dentry->d_name.len); 9367 if (!ret) 9368 ret = btrfs_update_inode(trans, root, old_inode); 9369 } 9370 if (ret) { 9371 btrfs_abort_transaction(trans, root, ret); 9372 goto out_fail; 9373 } 9374 9375 if (new_inode) { 9376 inode_inc_iversion(new_inode); 9377 new_inode->i_ctime = CURRENT_TIME; 9378 if (unlikely(btrfs_ino(new_inode) == 9379 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9380 root_objectid = BTRFS_I(new_inode)->location.objectid; 9381 ret = btrfs_unlink_subvol(trans, dest, new_dir, 9382 root_objectid, 9383 new_dentry->d_name.name, 9384 new_dentry->d_name.len); 9385 BUG_ON(new_inode->i_nlink == 0); 9386 } else { 9387 ret = btrfs_unlink_inode(trans, dest, new_dir, 9388 d_inode(new_dentry), 9389 new_dentry->d_name.name, 9390 new_dentry->d_name.len); 9391 } 9392 if (!ret && new_inode->i_nlink == 0) 9393 ret = btrfs_orphan_add(trans, d_inode(new_dentry)); 9394 if (ret) { 9395 btrfs_abort_transaction(trans, root, ret); 9396 goto out_fail; 9397 } 9398 } 9399 9400 ret = btrfs_add_link(trans, new_dir, old_inode, 9401 new_dentry->d_name.name, 9402 new_dentry->d_name.len, 0, index); 9403 if (ret) { 9404 btrfs_abort_transaction(trans, root, ret); 9405 goto out_fail; 9406 } 9407 9408 if (old_inode->i_nlink == 1) 9409 BTRFS_I(old_inode)->dir_index = index; 9410 9411 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 9412 struct dentry *parent = new_dentry->d_parent; 9413 btrfs_log_new_name(trans, old_inode, old_dir, parent); 9414 btrfs_end_log_trans(root); 9415 } 9416 out_fail: 9417 btrfs_end_transaction(trans, root); 9418 out_notrans: 9419 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9420 up_read(&root->fs_info->subvol_sem); 9421 9422 return ret; 9423 } 9424 9425 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry, 9426 struct inode *new_dir, struct dentry *new_dentry, 9427 unsigned int flags) 9428 { 9429 if (flags & ~RENAME_NOREPLACE) 9430 return -EINVAL; 9431 9432 return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry); 9433 } 9434 9435 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9436 { 9437 struct btrfs_delalloc_work *delalloc_work; 9438 struct inode *inode; 9439 9440 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9441 work); 9442 inode = delalloc_work->inode; 9443 if (delalloc_work->wait) { 9444 btrfs_wait_ordered_range(inode, 0, (u64)-1); 9445 } else { 9446 filemap_flush(inode->i_mapping); 9447 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9448 &BTRFS_I(inode)->runtime_flags)) 9449 filemap_flush(inode->i_mapping); 9450 } 9451 9452 if (delalloc_work->delay_iput) 9453 btrfs_add_delayed_iput(inode); 9454 else 9455 iput(inode); 9456 complete(&delalloc_work->completion); 9457 } 9458 9459 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 9460 int wait, int delay_iput) 9461 { 9462 struct btrfs_delalloc_work *work; 9463 9464 work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS); 9465 if (!work) 9466 return NULL; 9467 9468 init_completion(&work->completion); 9469 INIT_LIST_HEAD(&work->list); 9470 work->inode = inode; 9471 work->wait = wait; 9472 work->delay_iput = delay_iput; 9473 WARN_ON_ONCE(!inode); 9474 btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, 9475 btrfs_run_delalloc_work, NULL, NULL); 9476 9477 return work; 9478 } 9479 9480 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work) 9481 { 9482 wait_for_completion(&work->completion); 9483 kmem_cache_free(btrfs_delalloc_work_cachep, work); 9484 } 9485 9486 /* 9487 * some fairly slow code that needs optimization. This walks the list 9488 * of all the inodes with pending delalloc and forces them to disk. 9489 */ 9490 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput, 9491 int nr) 9492 { 9493 struct btrfs_inode *binode; 9494 struct inode *inode; 9495 struct btrfs_delalloc_work *work, *next; 9496 struct list_head works; 9497 struct list_head splice; 9498 int ret = 0; 9499 9500 INIT_LIST_HEAD(&works); 9501 INIT_LIST_HEAD(&splice); 9502 9503 mutex_lock(&root->delalloc_mutex); 9504 spin_lock(&root->delalloc_lock); 9505 list_splice_init(&root->delalloc_inodes, &splice); 9506 while (!list_empty(&splice)) { 9507 binode = list_entry(splice.next, struct btrfs_inode, 9508 delalloc_inodes); 9509 9510 list_move_tail(&binode->delalloc_inodes, 9511 &root->delalloc_inodes); 9512 inode = igrab(&binode->vfs_inode); 9513 if (!inode) { 9514 cond_resched_lock(&root->delalloc_lock); 9515 continue; 9516 } 9517 spin_unlock(&root->delalloc_lock); 9518 9519 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 9520 if (!work) { 9521 if (delay_iput) 9522 btrfs_add_delayed_iput(inode); 9523 else 9524 iput(inode); 9525 ret = -ENOMEM; 9526 goto out; 9527 } 9528 list_add_tail(&work->list, &works); 9529 btrfs_queue_work(root->fs_info->flush_workers, 9530 &work->work); 9531 ret++; 9532 if (nr != -1 && ret >= nr) 9533 goto out; 9534 cond_resched(); 9535 spin_lock(&root->delalloc_lock); 9536 } 9537 spin_unlock(&root->delalloc_lock); 9538 9539 out: 9540 list_for_each_entry_safe(work, next, &works, list) { 9541 list_del_init(&work->list); 9542 btrfs_wait_and_free_delalloc_work(work); 9543 } 9544 9545 if (!list_empty_careful(&splice)) { 9546 spin_lock(&root->delalloc_lock); 9547 list_splice_tail(&splice, &root->delalloc_inodes); 9548 spin_unlock(&root->delalloc_lock); 9549 } 9550 mutex_unlock(&root->delalloc_mutex); 9551 return ret; 9552 } 9553 9554 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 9555 { 9556 int ret; 9557 9558 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 9559 return -EROFS; 9560 9561 ret = __start_delalloc_inodes(root, delay_iput, -1); 9562 if (ret > 0) 9563 ret = 0; 9564 /* 9565 * the filemap_flush will queue IO into the worker threads, but 9566 * we have to make sure the IO is actually started and that 9567 * ordered extents get created before we return 9568 */ 9569 atomic_inc(&root->fs_info->async_submit_draining); 9570 while (atomic_read(&root->fs_info->nr_async_submits) || 9571 atomic_read(&root->fs_info->async_delalloc_pages)) { 9572 wait_event(root->fs_info->async_submit_wait, 9573 (atomic_read(&root->fs_info->nr_async_submits) == 0 && 9574 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 9575 } 9576 atomic_dec(&root->fs_info->async_submit_draining); 9577 return ret; 9578 } 9579 9580 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 9581 int nr) 9582 { 9583 struct btrfs_root *root; 9584 struct list_head splice; 9585 int ret; 9586 9587 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 9588 return -EROFS; 9589 9590 INIT_LIST_HEAD(&splice); 9591 9592 mutex_lock(&fs_info->delalloc_root_mutex); 9593 spin_lock(&fs_info->delalloc_root_lock); 9594 list_splice_init(&fs_info->delalloc_roots, &splice); 9595 while (!list_empty(&splice) && nr) { 9596 root = list_first_entry(&splice, struct btrfs_root, 9597 delalloc_root); 9598 root = btrfs_grab_fs_root(root); 9599 BUG_ON(!root); 9600 list_move_tail(&root->delalloc_root, 9601 &fs_info->delalloc_roots); 9602 spin_unlock(&fs_info->delalloc_root_lock); 9603 9604 ret = __start_delalloc_inodes(root, delay_iput, nr); 9605 btrfs_put_fs_root(root); 9606 if (ret < 0) 9607 goto out; 9608 9609 if (nr != -1) { 9610 nr -= ret; 9611 WARN_ON(nr < 0); 9612 } 9613 spin_lock(&fs_info->delalloc_root_lock); 9614 } 9615 spin_unlock(&fs_info->delalloc_root_lock); 9616 9617 ret = 0; 9618 atomic_inc(&fs_info->async_submit_draining); 9619 while (atomic_read(&fs_info->nr_async_submits) || 9620 atomic_read(&fs_info->async_delalloc_pages)) { 9621 wait_event(fs_info->async_submit_wait, 9622 (atomic_read(&fs_info->nr_async_submits) == 0 && 9623 atomic_read(&fs_info->async_delalloc_pages) == 0)); 9624 } 9625 atomic_dec(&fs_info->async_submit_draining); 9626 out: 9627 if (!list_empty_careful(&splice)) { 9628 spin_lock(&fs_info->delalloc_root_lock); 9629 list_splice_tail(&splice, &fs_info->delalloc_roots); 9630 spin_unlock(&fs_info->delalloc_root_lock); 9631 } 9632 mutex_unlock(&fs_info->delalloc_root_mutex); 9633 return ret; 9634 } 9635 9636 static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 9637 const char *symname) 9638 { 9639 struct btrfs_trans_handle *trans; 9640 struct btrfs_root *root = BTRFS_I(dir)->root; 9641 struct btrfs_path *path; 9642 struct btrfs_key key; 9643 struct inode *inode = NULL; 9644 int err; 9645 int drop_inode = 0; 9646 u64 objectid; 9647 u64 index = 0; 9648 int name_len; 9649 int datasize; 9650 unsigned long ptr; 9651 struct btrfs_file_extent_item *ei; 9652 struct extent_buffer *leaf; 9653 9654 name_len = strlen(symname); 9655 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 9656 return -ENAMETOOLONG; 9657 9658 /* 9659 * 2 items for inode item and ref 9660 * 2 items for dir items 9661 * 1 item for xattr if selinux is on 9662 */ 9663 trans = btrfs_start_transaction(root, 5); 9664 if (IS_ERR(trans)) 9665 return PTR_ERR(trans); 9666 9667 err = btrfs_find_free_ino(root, &objectid); 9668 if (err) 9669 goto out_unlock; 9670 9671 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 9672 dentry->d_name.len, btrfs_ino(dir), objectid, 9673 S_IFLNK|S_IRWXUGO, &index); 9674 if (IS_ERR(inode)) { 9675 err = PTR_ERR(inode); 9676 goto out_unlock; 9677 } 9678 9679 /* 9680 * If the active LSM wants to access the inode during 9681 * d_instantiate it needs these. Smack checks to see 9682 * if the filesystem supports xattrs by looking at the 9683 * ops vector. 9684 */ 9685 inode->i_fop = &btrfs_file_operations; 9686 inode->i_op = &btrfs_file_inode_operations; 9687 inode->i_mapping->a_ops = &btrfs_aops; 9688 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 9689 9690 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 9691 if (err) 9692 goto out_unlock_inode; 9693 9694 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 9695 if (err) 9696 goto out_unlock_inode; 9697 9698 path = btrfs_alloc_path(); 9699 if (!path) { 9700 err = -ENOMEM; 9701 goto out_unlock_inode; 9702 } 9703 key.objectid = btrfs_ino(inode); 9704 key.offset = 0; 9705 key.type = BTRFS_EXTENT_DATA_KEY; 9706 datasize = btrfs_file_extent_calc_inline_size(name_len); 9707 err = btrfs_insert_empty_item(trans, root, path, &key, 9708 datasize); 9709 if (err) { 9710 btrfs_free_path(path); 9711 goto out_unlock_inode; 9712 } 9713 leaf = path->nodes[0]; 9714 ei = btrfs_item_ptr(leaf, path->slots[0], 9715 struct btrfs_file_extent_item); 9716 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9717 btrfs_set_file_extent_type(leaf, ei, 9718 BTRFS_FILE_EXTENT_INLINE); 9719 btrfs_set_file_extent_encryption(leaf, ei, 0); 9720 btrfs_set_file_extent_compression(leaf, ei, 0); 9721 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9722 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9723 9724 ptr = btrfs_file_extent_inline_start(ei); 9725 write_extent_buffer(leaf, symname, ptr, name_len); 9726 btrfs_mark_buffer_dirty(leaf); 9727 btrfs_free_path(path); 9728 9729 inode->i_op = &btrfs_symlink_inode_operations; 9730 inode->i_mapping->a_ops = &btrfs_symlink_aops; 9731 inode_set_bytes(inode, name_len); 9732 btrfs_i_size_write(inode, name_len); 9733 err = btrfs_update_inode(trans, root, inode); 9734 if (err) { 9735 drop_inode = 1; 9736 goto out_unlock_inode; 9737 } 9738 9739 unlock_new_inode(inode); 9740 d_instantiate(dentry, inode); 9741 9742 out_unlock: 9743 btrfs_end_transaction(trans, root); 9744 if (drop_inode) { 9745 inode_dec_link_count(inode); 9746 iput(inode); 9747 } 9748 btrfs_btree_balance_dirty(root); 9749 return err; 9750 9751 out_unlock_inode: 9752 drop_inode = 1; 9753 unlock_new_inode(inode); 9754 goto out_unlock; 9755 } 9756 9757 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9758 u64 start, u64 num_bytes, u64 min_size, 9759 loff_t actual_len, u64 *alloc_hint, 9760 struct btrfs_trans_handle *trans) 9761 { 9762 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 9763 struct extent_map *em; 9764 struct btrfs_root *root = BTRFS_I(inode)->root; 9765 struct btrfs_key ins; 9766 u64 cur_offset = start; 9767 u64 i_size; 9768 u64 cur_bytes; 9769 u64 last_alloc = (u64)-1; 9770 int ret = 0; 9771 bool own_trans = true; 9772 9773 if (trans) 9774 own_trans = false; 9775 while (num_bytes > 0) { 9776 if (own_trans) { 9777 trans = btrfs_start_transaction(root, 3); 9778 if (IS_ERR(trans)) { 9779 ret = PTR_ERR(trans); 9780 break; 9781 } 9782 } 9783 9784 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); 9785 cur_bytes = max(cur_bytes, min_size); 9786 /* 9787 * If we are severely fragmented we could end up with really 9788 * small allocations, so if the allocator is returning small 9789 * chunks lets make its job easier by only searching for those 9790 * sized chunks. 9791 */ 9792 cur_bytes = min(cur_bytes, last_alloc); 9793 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0, 9794 *alloc_hint, &ins, 1, 0); 9795 if (ret) { 9796 if (own_trans) 9797 btrfs_end_transaction(trans, root); 9798 break; 9799 } 9800 9801 last_alloc = ins.offset; 9802 ret = insert_reserved_file_extent(trans, inode, 9803 cur_offset, ins.objectid, 9804 ins.offset, ins.offset, 9805 ins.offset, 0, 0, 0, 9806 BTRFS_FILE_EXTENT_PREALLOC); 9807 if (ret) { 9808 btrfs_free_reserved_extent(root, ins.objectid, 9809 ins.offset, 0); 9810 btrfs_abort_transaction(trans, root, ret); 9811 if (own_trans) 9812 btrfs_end_transaction(trans, root); 9813 break; 9814 } 9815 9816 btrfs_drop_extent_cache(inode, cur_offset, 9817 cur_offset + ins.offset -1, 0); 9818 9819 em = alloc_extent_map(); 9820 if (!em) { 9821 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 9822 &BTRFS_I(inode)->runtime_flags); 9823 goto next; 9824 } 9825 9826 em->start = cur_offset; 9827 em->orig_start = cur_offset; 9828 em->len = ins.offset; 9829 em->block_start = ins.objectid; 9830 em->block_len = ins.offset; 9831 em->orig_block_len = ins.offset; 9832 em->ram_bytes = ins.offset; 9833 em->bdev = root->fs_info->fs_devices->latest_bdev; 9834 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 9835 em->generation = trans->transid; 9836 9837 while (1) { 9838 write_lock(&em_tree->lock); 9839 ret = add_extent_mapping(em_tree, em, 1); 9840 write_unlock(&em_tree->lock); 9841 if (ret != -EEXIST) 9842 break; 9843 btrfs_drop_extent_cache(inode, cur_offset, 9844 cur_offset + ins.offset - 1, 9845 0); 9846 } 9847 free_extent_map(em); 9848 next: 9849 num_bytes -= ins.offset; 9850 cur_offset += ins.offset; 9851 *alloc_hint = ins.objectid + ins.offset; 9852 9853 inode_inc_iversion(inode); 9854 inode->i_ctime = CURRENT_TIME; 9855 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9856 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9857 (actual_len > inode->i_size) && 9858 (cur_offset > inode->i_size)) { 9859 if (cur_offset > actual_len) 9860 i_size = actual_len; 9861 else 9862 i_size = cur_offset; 9863 i_size_write(inode, i_size); 9864 btrfs_ordered_update_i_size(inode, i_size, NULL); 9865 } 9866 9867 ret = btrfs_update_inode(trans, root, inode); 9868 9869 if (ret) { 9870 btrfs_abort_transaction(trans, root, ret); 9871 if (own_trans) 9872 btrfs_end_transaction(trans, root); 9873 break; 9874 } 9875 9876 if (own_trans) 9877 btrfs_end_transaction(trans, root); 9878 } 9879 return ret; 9880 } 9881 9882 int btrfs_prealloc_file_range(struct inode *inode, int mode, 9883 u64 start, u64 num_bytes, u64 min_size, 9884 loff_t actual_len, u64 *alloc_hint) 9885 { 9886 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9887 min_size, actual_len, alloc_hint, 9888 NULL); 9889 } 9890 9891 int btrfs_prealloc_file_range_trans(struct inode *inode, 9892 struct btrfs_trans_handle *trans, int mode, 9893 u64 start, u64 num_bytes, u64 min_size, 9894 loff_t actual_len, u64 *alloc_hint) 9895 { 9896 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9897 min_size, actual_len, alloc_hint, trans); 9898 } 9899 9900 static int btrfs_set_page_dirty(struct page *page) 9901 { 9902 return __set_page_dirty_nobuffers(page); 9903 } 9904 9905 static int btrfs_permission(struct inode *inode, int mask) 9906 { 9907 struct btrfs_root *root = BTRFS_I(inode)->root; 9908 umode_t mode = inode->i_mode; 9909 9910 if (mask & MAY_WRITE && 9911 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9912 if (btrfs_root_readonly(root)) 9913 return -EROFS; 9914 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9915 return -EACCES; 9916 } 9917 return generic_permission(inode, mask); 9918 } 9919 9920 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 9921 { 9922 struct btrfs_trans_handle *trans; 9923 struct btrfs_root *root = BTRFS_I(dir)->root; 9924 struct inode *inode = NULL; 9925 u64 objectid; 9926 u64 index; 9927 int ret = 0; 9928 9929 /* 9930 * 5 units required for adding orphan entry 9931 */ 9932 trans = btrfs_start_transaction(root, 5); 9933 if (IS_ERR(trans)) 9934 return PTR_ERR(trans); 9935 9936 ret = btrfs_find_free_ino(root, &objectid); 9937 if (ret) 9938 goto out; 9939 9940 inode = btrfs_new_inode(trans, root, dir, NULL, 0, 9941 btrfs_ino(dir), objectid, mode, &index); 9942 if (IS_ERR(inode)) { 9943 ret = PTR_ERR(inode); 9944 inode = NULL; 9945 goto out; 9946 } 9947 9948 inode->i_fop = &btrfs_file_operations; 9949 inode->i_op = &btrfs_file_inode_operations; 9950 9951 inode->i_mapping->a_ops = &btrfs_aops; 9952 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 9953 9954 ret = btrfs_init_inode_security(trans, inode, dir, NULL); 9955 if (ret) 9956 goto out_inode; 9957 9958 ret = btrfs_update_inode(trans, root, inode); 9959 if (ret) 9960 goto out_inode; 9961 ret = btrfs_orphan_add(trans, inode); 9962 if (ret) 9963 goto out_inode; 9964 9965 /* 9966 * We set number of links to 0 in btrfs_new_inode(), and here we set 9967 * it to 1 because d_tmpfile() will issue a warning if the count is 0, 9968 * through: 9969 * 9970 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9971 */ 9972 set_nlink(inode, 1); 9973 unlock_new_inode(inode); 9974 d_tmpfile(dentry, inode); 9975 mark_inode_dirty(inode); 9976 9977 out: 9978 btrfs_end_transaction(trans, root); 9979 if (ret) 9980 iput(inode); 9981 btrfs_balance_delayed_items(root); 9982 btrfs_btree_balance_dirty(root); 9983 return ret; 9984 9985 out_inode: 9986 unlock_new_inode(inode); 9987 goto out; 9988 9989 } 9990 9991 /* Inspired by filemap_check_errors() */ 9992 int btrfs_inode_check_errors(struct inode *inode) 9993 { 9994 int ret = 0; 9995 9996 if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) && 9997 test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags)) 9998 ret = -ENOSPC; 9999 if (test_bit(AS_EIO, &inode->i_mapping->flags) && 10000 test_and_clear_bit(AS_EIO, &inode->i_mapping->flags)) 10001 ret = -EIO; 10002 10003 return ret; 10004 } 10005 10006 static const struct inode_operations btrfs_dir_inode_operations = { 10007 .getattr = btrfs_getattr, 10008 .lookup = btrfs_lookup, 10009 .create = btrfs_create, 10010 .unlink = btrfs_unlink, 10011 .link = btrfs_link, 10012 .mkdir = btrfs_mkdir, 10013 .rmdir = btrfs_rmdir, 10014 .rename2 = btrfs_rename2, 10015 .symlink = btrfs_symlink, 10016 .setattr = btrfs_setattr, 10017 .mknod = btrfs_mknod, 10018 .setxattr = btrfs_setxattr, 10019 .getxattr = btrfs_getxattr, 10020 .listxattr = btrfs_listxattr, 10021 .removexattr = btrfs_removexattr, 10022 .permission = btrfs_permission, 10023 .get_acl = btrfs_get_acl, 10024 .set_acl = btrfs_set_acl, 10025 .update_time = btrfs_update_time, 10026 .tmpfile = btrfs_tmpfile, 10027 }; 10028 static const struct inode_operations btrfs_dir_ro_inode_operations = { 10029 .lookup = btrfs_lookup, 10030 .permission = btrfs_permission, 10031 .get_acl = btrfs_get_acl, 10032 .set_acl = btrfs_set_acl, 10033 .update_time = btrfs_update_time, 10034 }; 10035 10036 static const struct file_operations btrfs_dir_file_operations = { 10037 .llseek = generic_file_llseek, 10038 .read = generic_read_dir, 10039 .iterate = btrfs_real_readdir, 10040 .unlocked_ioctl = btrfs_ioctl, 10041 #ifdef CONFIG_COMPAT 10042 .compat_ioctl = btrfs_ioctl, 10043 #endif 10044 .release = btrfs_release_file, 10045 .fsync = btrfs_sync_file, 10046 }; 10047 10048 static struct extent_io_ops btrfs_extent_io_ops = { 10049 .fill_delalloc = run_delalloc_range, 10050 .submit_bio_hook = btrfs_submit_bio_hook, 10051 .merge_bio_hook = btrfs_merge_bio_hook, 10052 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 10053 .writepage_end_io_hook = btrfs_writepage_end_io_hook, 10054 .writepage_start_hook = btrfs_writepage_start_hook, 10055 .set_bit_hook = btrfs_set_bit_hook, 10056 .clear_bit_hook = btrfs_clear_bit_hook, 10057 .merge_extent_hook = btrfs_merge_extent_hook, 10058 .split_extent_hook = btrfs_split_extent_hook, 10059 }; 10060 10061 /* 10062 * btrfs doesn't support the bmap operation because swapfiles 10063 * use bmap to make a mapping of extents in the file. They assume 10064 * these extents won't change over the life of the file and they 10065 * use the bmap result to do IO directly to the drive. 10066 * 10067 * the btrfs bmap call would return logical addresses that aren't 10068 * suitable for IO and they also will change frequently as COW 10069 * operations happen. So, swapfile + btrfs == corruption. 10070 * 10071 * For now we're avoiding this by dropping bmap. 10072 */ 10073 static const struct address_space_operations btrfs_aops = { 10074 .readpage = btrfs_readpage, 10075 .writepage = btrfs_writepage, 10076 .writepages = btrfs_writepages, 10077 .readpages = btrfs_readpages, 10078 .direct_IO = btrfs_direct_IO, 10079 .invalidatepage = btrfs_invalidatepage, 10080 .releasepage = btrfs_releasepage, 10081 .set_page_dirty = btrfs_set_page_dirty, 10082 .error_remove_page = generic_error_remove_page, 10083 }; 10084 10085 static const struct address_space_operations btrfs_symlink_aops = { 10086 .readpage = btrfs_readpage, 10087 .writepage = btrfs_writepage, 10088 .invalidatepage = btrfs_invalidatepage, 10089 .releasepage = btrfs_releasepage, 10090 }; 10091 10092 static const struct inode_operations btrfs_file_inode_operations = { 10093 .getattr = btrfs_getattr, 10094 .setattr = btrfs_setattr, 10095 .setxattr = btrfs_setxattr, 10096 .getxattr = btrfs_getxattr, 10097 .listxattr = btrfs_listxattr, 10098 .removexattr = btrfs_removexattr, 10099 .permission = btrfs_permission, 10100 .fiemap = btrfs_fiemap, 10101 .get_acl = btrfs_get_acl, 10102 .set_acl = btrfs_set_acl, 10103 .update_time = btrfs_update_time, 10104 }; 10105 static const struct inode_operations btrfs_special_inode_operations = { 10106 .getattr = btrfs_getattr, 10107 .setattr = btrfs_setattr, 10108 .permission = btrfs_permission, 10109 .setxattr = btrfs_setxattr, 10110 .getxattr = btrfs_getxattr, 10111 .listxattr = btrfs_listxattr, 10112 .removexattr = btrfs_removexattr, 10113 .get_acl = btrfs_get_acl, 10114 .set_acl = btrfs_set_acl, 10115 .update_time = btrfs_update_time, 10116 }; 10117 static const struct inode_operations btrfs_symlink_inode_operations = { 10118 .readlink = generic_readlink, 10119 .follow_link = page_follow_link_light, 10120 .put_link = page_put_link, 10121 .getattr = btrfs_getattr, 10122 .setattr = btrfs_setattr, 10123 .permission = btrfs_permission, 10124 .setxattr = btrfs_setxattr, 10125 .getxattr = btrfs_getxattr, 10126 .listxattr = btrfs_listxattr, 10127 .removexattr = btrfs_removexattr, 10128 .update_time = btrfs_update_time, 10129 }; 10130 10131 const struct dentry_operations btrfs_dentry_operations = { 10132 .d_delete = btrfs_dentry_delete, 10133 .d_release = btrfs_dentry_release, 10134 }; 10135