1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/file.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/highmem.h> 13 #include <linux/time.h> 14 #include <linux/init.h> 15 #include <linux/string.h> 16 #include <linux/backing-dev.h> 17 #include <linux/writeback.h> 18 #include <linux/compat.h> 19 #include <linux/xattr.h> 20 #include <linux/posix_acl.h> 21 #include <linux/falloc.h> 22 #include <linux/slab.h> 23 #include <linux/ratelimit.h> 24 #include <linux/btrfs.h> 25 #include <linux/blkdev.h> 26 #include <linux/posix_acl_xattr.h> 27 #include <linux/uio.h> 28 #include <linux/magic.h> 29 #include <linux/iversion.h> 30 #include <linux/swap.h> 31 #include <linux/migrate.h> 32 #include <linux/sched/mm.h> 33 #include <linux/iomap.h> 34 #include <asm/unaligned.h> 35 #include "misc.h" 36 #include "ctree.h" 37 #include "disk-io.h" 38 #include "transaction.h" 39 #include "btrfs_inode.h" 40 #include "print-tree.h" 41 #include "ordered-data.h" 42 #include "xattr.h" 43 #include "tree-log.h" 44 #include "volumes.h" 45 #include "compression.h" 46 #include "locking.h" 47 #include "free-space-cache.h" 48 #include "inode-map.h" 49 #include "props.h" 50 #include "qgroup.h" 51 #include "delalloc-space.h" 52 #include "block-group.h" 53 #include "space-info.h" 54 55 struct btrfs_iget_args { 56 u64 ino; 57 struct btrfs_root *root; 58 }; 59 60 struct btrfs_dio_data { 61 u64 reserve; 62 loff_t length; 63 ssize_t submitted; 64 struct extent_changeset *data_reserved; 65 bool sync; 66 }; 67 68 static const struct inode_operations btrfs_dir_inode_operations; 69 static const struct inode_operations btrfs_symlink_inode_operations; 70 static const struct inode_operations btrfs_special_inode_operations; 71 static const struct inode_operations btrfs_file_inode_operations; 72 static const struct address_space_operations btrfs_aops; 73 static const struct file_operations btrfs_dir_file_operations; 74 75 static struct kmem_cache *btrfs_inode_cachep; 76 struct kmem_cache *btrfs_trans_handle_cachep; 77 struct kmem_cache *btrfs_path_cachep; 78 struct kmem_cache *btrfs_free_space_cachep; 79 struct kmem_cache *btrfs_free_space_bitmap_cachep; 80 81 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 82 static int btrfs_truncate(struct inode *inode, bool skip_writeback); 83 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 84 static noinline int cow_file_range(struct btrfs_inode *inode, 85 struct page *locked_page, 86 u64 start, u64 end, int *page_started, 87 unsigned long *nr_written, int unlock); 88 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 89 u64 len, u64 orig_start, u64 block_start, 90 u64 block_len, u64 orig_block_len, 91 u64 ram_bytes, int compress_type, 92 int type); 93 94 static void __endio_write_update_ordered(struct btrfs_inode *inode, 95 const u64 offset, const u64 bytes, 96 const bool uptodate); 97 98 /* 99 * Cleanup all submitted ordered extents in specified range to handle errors 100 * from the btrfs_run_delalloc_range() callback. 101 * 102 * NOTE: caller must ensure that when an error happens, it can not call 103 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 104 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 105 * to be released, which we want to happen only when finishing the ordered 106 * extent (btrfs_finish_ordered_io()). 107 */ 108 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 109 struct page *locked_page, 110 u64 offset, u64 bytes) 111 { 112 unsigned long index = offset >> PAGE_SHIFT; 113 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 114 u64 page_start = page_offset(locked_page); 115 u64 page_end = page_start + PAGE_SIZE - 1; 116 117 struct page *page; 118 119 while (index <= end_index) { 120 page = find_get_page(inode->vfs_inode.i_mapping, index); 121 index++; 122 if (!page) 123 continue; 124 ClearPagePrivate2(page); 125 put_page(page); 126 } 127 128 /* 129 * In case this page belongs to the delalloc range being instantiated 130 * then skip it, since the first page of a range is going to be 131 * properly cleaned up by the caller of run_delalloc_range 132 */ 133 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 134 offset += PAGE_SIZE; 135 bytes -= PAGE_SIZE; 136 } 137 138 return __endio_write_update_ordered(inode, offset, bytes, false); 139 } 140 141 static int btrfs_dirty_inode(struct inode *inode); 142 143 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 144 struct inode *inode, struct inode *dir, 145 const struct qstr *qstr) 146 { 147 int err; 148 149 err = btrfs_init_acl(trans, inode, dir); 150 if (!err) 151 err = btrfs_xattr_security_init(trans, inode, dir, qstr); 152 return err; 153 } 154 155 /* 156 * this does all the hard work for inserting an inline extent into 157 * the btree. The caller should have done a btrfs_drop_extents so that 158 * no overlapping inline items exist in the btree 159 */ 160 static int insert_inline_extent(struct btrfs_trans_handle *trans, 161 struct btrfs_path *path, int extent_inserted, 162 struct btrfs_root *root, struct inode *inode, 163 u64 start, size_t size, size_t compressed_size, 164 int compress_type, 165 struct page **compressed_pages) 166 { 167 struct extent_buffer *leaf; 168 struct page *page = NULL; 169 char *kaddr; 170 unsigned long ptr; 171 struct btrfs_file_extent_item *ei; 172 int ret; 173 size_t cur_size = size; 174 unsigned long offset; 175 176 ASSERT((compressed_size > 0 && compressed_pages) || 177 (compressed_size == 0 && !compressed_pages)); 178 179 if (compressed_size && compressed_pages) 180 cur_size = compressed_size; 181 182 inode_add_bytes(inode, size); 183 184 if (!extent_inserted) { 185 struct btrfs_key key; 186 size_t datasize; 187 188 key.objectid = btrfs_ino(BTRFS_I(inode)); 189 key.offset = start; 190 key.type = BTRFS_EXTENT_DATA_KEY; 191 192 datasize = btrfs_file_extent_calc_inline_size(cur_size); 193 path->leave_spinning = 1; 194 ret = btrfs_insert_empty_item(trans, root, path, &key, 195 datasize); 196 if (ret) 197 goto fail; 198 } 199 leaf = path->nodes[0]; 200 ei = btrfs_item_ptr(leaf, path->slots[0], 201 struct btrfs_file_extent_item); 202 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 203 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 204 btrfs_set_file_extent_encryption(leaf, ei, 0); 205 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 206 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 207 ptr = btrfs_file_extent_inline_start(ei); 208 209 if (compress_type != BTRFS_COMPRESS_NONE) { 210 struct page *cpage; 211 int i = 0; 212 while (compressed_size > 0) { 213 cpage = compressed_pages[i]; 214 cur_size = min_t(unsigned long, compressed_size, 215 PAGE_SIZE); 216 217 kaddr = kmap_atomic(cpage); 218 write_extent_buffer(leaf, kaddr, ptr, cur_size); 219 kunmap_atomic(kaddr); 220 221 i++; 222 ptr += cur_size; 223 compressed_size -= cur_size; 224 } 225 btrfs_set_file_extent_compression(leaf, ei, 226 compress_type); 227 } else { 228 page = find_get_page(inode->i_mapping, 229 start >> PAGE_SHIFT); 230 btrfs_set_file_extent_compression(leaf, ei, 0); 231 kaddr = kmap_atomic(page); 232 offset = offset_in_page(start); 233 write_extent_buffer(leaf, kaddr + offset, ptr, size); 234 kunmap_atomic(kaddr); 235 put_page(page); 236 } 237 btrfs_mark_buffer_dirty(leaf); 238 btrfs_release_path(path); 239 240 /* 241 * We align size to sectorsize for inline extents just for simplicity 242 * sake. 243 */ 244 size = ALIGN(size, root->fs_info->sectorsize); 245 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start, size); 246 if (ret) 247 goto fail; 248 249 /* 250 * we're an inline extent, so nobody can 251 * extend the file past i_size without locking 252 * a page we already have locked. 253 * 254 * We must do any isize and inode updates 255 * before we unlock the pages. Otherwise we 256 * could end up racing with unlink. 257 */ 258 BTRFS_I(inode)->disk_i_size = inode->i_size; 259 ret = btrfs_update_inode(trans, root, inode); 260 261 fail: 262 return ret; 263 } 264 265 266 /* 267 * conditionally insert an inline extent into the file. This 268 * does the checks required to make sure the data is small enough 269 * to fit as an inline extent. 270 */ 271 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start, 272 u64 end, size_t compressed_size, 273 int compress_type, 274 struct page **compressed_pages) 275 { 276 struct btrfs_root *root = inode->root; 277 struct btrfs_fs_info *fs_info = root->fs_info; 278 struct btrfs_trans_handle *trans; 279 u64 isize = i_size_read(&inode->vfs_inode); 280 u64 actual_end = min(end + 1, isize); 281 u64 inline_len = actual_end - start; 282 u64 aligned_end = ALIGN(end, fs_info->sectorsize); 283 u64 data_len = inline_len; 284 int ret; 285 struct btrfs_path *path; 286 int extent_inserted = 0; 287 u32 extent_item_size; 288 289 if (compressed_size) 290 data_len = compressed_size; 291 292 if (start > 0 || 293 actual_end > fs_info->sectorsize || 294 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || 295 (!compressed_size && 296 (actual_end & (fs_info->sectorsize - 1)) == 0) || 297 end + 1 < isize || 298 data_len > fs_info->max_inline) { 299 return 1; 300 } 301 302 path = btrfs_alloc_path(); 303 if (!path) 304 return -ENOMEM; 305 306 trans = btrfs_join_transaction(root); 307 if (IS_ERR(trans)) { 308 btrfs_free_path(path); 309 return PTR_ERR(trans); 310 } 311 trans->block_rsv = &inode->block_rsv; 312 313 if (compressed_size && compressed_pages) 314 extent_item_size = btrfs_file_extent_calc_inline_size( 315 compressed_size); 316 else 317 extent_item_size = btrfs_file_extent_calc_inline_size( 318 inline_len); 319 320 ret = __btrfs_drop_extents(trans, root, inode, path, start, aligned_end, 321 NULL, 1, 1, extent_item_size, 322 &extent_inserted); 323 if (ret) { 324 btrfs_abort_transaction(trans, ret); 325 goto out; 326 } 327 328 if (isize > actual_end) 329 inline_len = min_t(u64, isize, actual_end); 330 ret = insert_inline_extent(trans, path, extent_inserted, 331 root, &inode->vfs_inode, start, 332 inline_len, compressed_size, 333 compress_type, compressed_pages); 334 if (ret && ret != -ENOSPC) { 335 btrfs_abort_transaction(trans, ret); 336 goto out; 337 } else if (ret == -ENOSPC) { 338 ret = 1; 339 goto out; 340 } 341 342 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); 343 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 344 out: 345 /* 346 * Don't forget to free the reserved space, as for inlined extent 347 * it won't count as data extent, free them directly here. 348 * And at reserve time, it's always aligned to page size, so 349 * just free one page here. 350 */ 351 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE); 352 btrfs_free_path(path); 353 btrfs_end_transaction(trans); 354 return ret; 355 } 356 357 struct async_extent { 358 u64 start; 359 u64 ram_size; 360 u64 compressed_size; 361 struct page **pages; 362 unsigned long nr_pages; 363 int compress_type; 364 struct list_head list; 365 }; 366 367 struct async_chunk { 368 struct inode *inode; 369 struct page *locked_page; 370 u64 start; 371 u64 end; 372 unsigned int write_flags; 373 struct list_head extents; 374 struct cgroup_subsys_state *blkcg_css; 375 struct btrfs_work work; 376 atomic_t *pending; 377 }; 378 379 struct async_cow { 380 /* Number of chunks in flight; must be first in the structure */ 381 atomic_t num_chunks; 382 struct async_chunk chunks[]; 383 }; 384 385 static noinline int add_async_extent(struct async_chunk *cow, 386 u64 start, u64 ram_size, 387 u64 compressed_size, 388 struct page **pages, 389 unsigned long nr_pages, 390 int compress_type) 391 { 392 struct async_extent *async_extent; 393 394 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 395 BUG_ON(!async_extent); /* -ENOMEM */ 396 async_extent->start = start; 397 async_extent->ram_size = ram_size; 398 async_extent->compressed_size = compressed_size; 399 async_extent->pages = pages; 400 async_extent->nr_pages = nr_pages; 401 async_extent->compress_type = compress_type; 402 list_add_tail(&async_extent->list, &cow->extents); 403 return 0; 404 } 405 406 /* 407 * Check if the inode has flags compatible with compression 408 */ 409 static inline bool inode_can_compress(struct btrfs_inode *inode) 410 { 411 if (inode->flags & BTRFS_INODE_NODATACOW || 412 inode->flags & BTRFS_INODE_NODATASUM) 413 return false; 414 return true; 415 } 416 417 /* 418 * Check if the inode needs to be submitted to compression, based on mount 419 * options, defragmentation, properties or heuristics. 420 */ 421 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 422 u64 end) 423 { 424 struct btrfs_fs_info *fs_info = inode->root->fs_info; 425 426 if (!inode_can_compress(inode)) { 427 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 428 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 429 btrfs_ino(inode)); 430 return 0; 431 } 432 /* force compress */ 433 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 434 return 1; 435 /* defrag ioctl */ 436 if (inode->defrag_compress) 437 return 1; 438 /* bad compression ratios */ 439 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 440 return 0; 441 if (btrfs_test_opt(fs_info, COMPRESS) || 442 inode->flags & BTRFS_INODE_COMPRESS || 443 inode->prop_compress) 444 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); 445 return 0; 446 } 447 448 static inline void inode_should_defrag(struct btrfs_inode *inode, 449 u64 start, u64 end, u64 num_bytes, u64 small_write) 450 { 451 /* If this is a small write inside eof, kick off a defrag */ 452 if (num_bytes < small_write && 453 (start > 0 || end + 1 < inode->disk_i_size)) 454 btrfs_add_inode_defrag(NULL, inode); 455 } 456 457 /* 458 * we create compressed extents in two phases. The first 459 * phase compresses a range of pages that have already been 460 * locked (both pages and state bits are locked). 461 * 462 * This is done inside an ordered work queue, and the compression 463 * is spread across many cpus. The actual IO submission is step 464 * two, and the ordered work queue takes care of making sure that 465 * happens in the same order things were put onto the queue by 466 * writepages and friends. 467 * 468 * If this code finds it can't get good compression, it puts an 469 * entry onto the work queue to write the uncompressed bytes. This 470 * makes sure that both compressed inodes and uncompressed inodes 471 * are written in the same order that the flusher thread sent them 472 * down. 473 */ 474 static noinline int compress_file_range(struct async_chunk *async_chunk) 475 { 476 struct inode *inode = async_chunk->inode; 477 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 478 u64 blocksize = fs_info->sectorsize; 479 u64 start = async_chunk->start; 480 u64 end = async_chunk->end; 481 u64 actual_end; 482 u64 i_size; 483 int ret = 0; 484 struct page **pages = NULL; 485 unsigned long nr_pages; 486 unsigned long total_compressed = 0; 487 unsigned long total_in = 0; 488 int i; 489 int will_compress; 490 int compress_type = fs_info->compress_type; 491 int compressed_extents = 0; 492 int redirty = 0; 493 494 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, 495 SZ_16K); 496 497 /* 498 * We need to save i_size before now because it could change in between 499 * us evaluating the size and assigning it. This is because we lock and 500 * unlock the page in truncate and fallocate, and then modify the i_size 501 * later on. 502 * 503 * The barriers are to emulate READ_ONCE, remove that once i_size_read 504 * does that for us. 505 */ 506 barrier(); 507 i_size = i_size_read(inode); 508 barrier(); 509 actual_end = min_t(u64, i_size, end + 1); 510 again: 511 will_compress = 0; 512 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 513 BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0); 514 nr_pages = min_t(unsigned long, nr_pages, 515 BTRFS_MAX_COMPRESSED / PAGE_SIZE); 516 517 /* 518 * we don't want to send crud past the end of i_size through 519 * compression, that's just a waste of CPU time. So, if the 520 * end of the file is before the start of our current 521 * requested range of bytes, we bail out to the uncompressed 522 * cleanup code that can deal with all of this. 523 * 524 * It isn't really the fastest way to fix things, but this is a 525 * very uncommon corner. 526 */ 527 if (actual_end <= start) 528 goto cleanup_and_bail_uncompressed; 529 530 total_compressed = actual_end - start; 531 532 /* 533 * skip compression for a small file range(<=blocksize) that 534 * isn't an inline extent, since it doesn't save disk space at all. 535 */ 536 if (total_compressed <= blocksize && 537 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 538 goto cleanup_and_bail_uncompressed; 539 540 total_compressed = min_t(unsigned long, total_compressed, 541 BTRFS_MAX_UNCOMPRESSED); 542 total_in = 0; 543 ret = 0; 544 545 /* 546 * we do compression for mount -o compress and when the 547 * inode has not been flagged as nocompress. This flag can 548 * change at any time if we discover bad compression ratios. 549 */ 550 if (inode_need_compress(BTRFS_I(inode), start, end)) { 551 WARN_ON(pages); 552 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 553 if (!pages) { 554 /* just bail out to the uncompressed code */ 555 nr_pages = 0; 556 goto cont; 557 } 558 559 if (BTRFS_I(inode)->defrag_compress) 560 compress_type = BTRFS_I(inode)->defrag_compress; 561 else if (BTRFS_I(inode)->prop_compress) 562 compress_type = BTRFS_I(inode)->prop_compress; 563 564 /* 565 * we need to call clear_page_dirty_for_io on each 566 * page in the range. Otherwise applications with the file 567 * mmap'd can wander in and change the page contents while 568 * we are compressing them. 569 * 570 * If the compression fails for any reason, we set the pages 571 * dirty again later on. 572 * 573 * Note that the remaining part is redirtied, the start pointer 574 * has moved, the end is the original one. 575 */ 576 if (!redirty) { 577 extent_range_clear_dirty_for_io(inode, start, end); 578 redirty = 1; 579 } 580 581 /* Compression level is applied here and only here */ 582 ret = btrfs_compress_pages( 583 compress_type | (fs_info->compress_level << 4), 584 inode->i_mapping, start, 585 pages, 586 &nr_pages, 587 &total_in, 588 &total_compressed); 589 590 if (!ret) { 591 unsigned long offset = offset_in_page(total_compressed); 592 struct page *page = pages[nr_pages - 1]; 593 char *kaddr; 594 595 /* zero the tail end of the last page, we might be 596 * sending it down to disk 597 */ 598 if (offset) { 599 kaddr = kmap_atomic(page); 600 memset(kaddr + offset, 0, 601 PAGE_SIZE - offset); 602 kunmap_atomic(kaddr); 603 } 604 will_compress = 1; 605 } 606 } 607 cont: 608 if (start == 0) { 609 /* lets try to make an inline extent */ 610 if (ret || total_in < actual_end) { 611 /* we didn't compress the entire range, try 612 * to make an uncompressed inline extent. 613 */ 614 ret = cow_file_range_inline(BTRFS_I(inode), start, end, 615 0, BTRFS_COMPRESS_NONE, 616 NULL); 617 } else { 618 /* try making a compressed inline extent */ 619 ret = cow_file_range_inline(BTRFS_I(inode), start, end, 620 total_compressed, 621 compress_type, pages); 622 } 623 if (ret <= 0) { 624 unsigned long clear_flags = EXTENT_DELALLOC | 625 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 626 EXTENT_DO_ACCOUNTING; 627 unsigned long page_error_op; 628 629 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; 630 631 /* 632 * inline extent creation worked or returned error, 633 * we don't need to create any more async work items. 634 * Unlock and free up our temp pages. 635 * 636 * We use DO_ACCOUNTING here because we need the 637 * delalloc_release_metadata to be done _after_ we drop 638 * our outstanding extent for clearing delalloc for this 639 * range. 640 */ 641 extent_clear_unlock_delalloc(BTRFS_I(inode), start, end, 642 NULL, 643 clear_flags, 644 PAGE_UNLOCK | 645 PAGE_CLEAR_DIRTY | 646 PAGE_SET_WRITEBACK | 647 page_error_op | 648 PAGE_END_WRITEBACK); 649 650 /* 651 * Ensure we only free the compressed pages if we have 652 * them allocated, as we can still reach here with 653 * inode_need_compress() == false. 654 */ 655 if (pages) { 656 for (i = 0; i < nr_pages; i++) { 657 WARN_ON(pages[i]->mapping); 658 put_page(pages[i]); 659 } 660 kfree(pages); 661 } 662 return 0; 663 } 664 } 665 666 if (will_compress) { 667 /* 668 * we aren't doing an inline extent round the compressed size 669 * up to a block size boundary so the allocator does sane 670 * things 671 */ 672 total_compressed = ALIGN(total_compressed, blocksize); 673 674 /* 675 * one last check to make sure the compression is really a 676 * win, compare the page count read with the blocks on disk, 677 * compression must free at least one sector size 678 */ 679 total_in = ALIGN(total_in, PAGE_SIZE); 680 if (total_compressed + blocksize <= total_in) { 681 compressed_extents++; 682 683 /* 684 * The async work queues will take care of doing actual 685 * allocation on disk for these compressed pages, and 686 * will submit them to the elevator. 687 */ 688 add_async_extent(async_chunk, start, total_in, 689 total_compressed, pages, nr_pages, 690 compress_type); 691 692 if (start + total_in < end) { 693 start += total_in; 694 pages = NULL; 695 cond_resched(); 696 goto again; 697 } 698 return compressed_extents; 699 } 700 } 701 if (pages) { 702 /* 703 * the compression code ran but failed to make things smaller, 704 * free any pages it allocated and our page pointer array 705 */ 706 for (i = 0; i < nr_pages; i++) { 707 WARN_ON(pages[i]->mapping); 708 put_page(pages[i]); 709 } 710 kfree(pages); 711 pages = NULL; 712 total_compressed = 0; 713 nr_pages = 0; 714 715 /* flag the file so we don't compress in the future */ 716 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && 717 !(BTRFS_I(inode)->prop_compress)) { 718 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 719 } 720 } 721 cleanup_and_bail_uncompressed: 722 /* 723 * No compression, but we still need to write the pages in the file 724 * we've been given so far. redirty the locked page if it corresponds 725 * to our extent and set things up for the async work queue to run 726 * cow_file_range to do the normal delalloc dance. 727 */ 728 if (async_chunk->locked_page && 729 (page_offset(async_chunk->locked_page) >= start && 730 page_offset(async_chunk->locked_page)) <= end) { 731 __set_page_dirty_nobuffers(async_chunk->locked_page); 732 /* unlocked later on in the async handlers */ 733 } 734 735 if (redirty) 736 extent_range_redirty_for_io(inode, start, end); 737 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 738 BTRFS_COMPRESS_NONE); 739 compressed_extents++; 740 741 return compressed_extents; 742 } 743 744 static void free_async_extent_pages(struct async_extent *async_extent) 745 { 746 int i; 747 748 if (!async_extent->pages) 749 return; 750 751 for (i = 0; i < async_extent->nr_pages; i++) { 752 WARN_ON(async_extent->pages[i]->mapping); 753 put_page(async_extent->pages[i]); 754 } 755 kfree(async_extent->pages); 756 async_extent->nr_pages = 0; 757 async_extent->pages = NULL; 758 } 759 760 /* 761 * phase two of compressed writeback. This is the ordered portion 762 * of the code, which only gets called in the order the work was 763 * queued. We walk all the async extents created by compress_file_range 764 * and send them down to the disk. 765 */ 766 static noinline void submit_compressed_extents(struct async_chunk *async_chunk) 767 { 768 struct btrfs_inode *inode = BTRFS_I(async_chunk->inode); 769 struct btrfs_fs_info *fs_info = inode->root->fs_info; 770 struct async_extent *async_extent; 771 u64 alloc_hint = 0; 772 struct btrfs_key ins; 773 struct extent_map *em; 774 struct btrfs_root *root = inode->root; 775 struct extent_io_tree *io_tree = &inode->io_tree; 776 int ret = 0; 777 778 again: 779 while (!list_empty(&async_chunk->extents)) { 780 async_extent = list_entry(async_chunk->extents.next, 781 struct async_extent, list); 782 list_del(&async_extent->list); 783 784 retry: 785 lock_extent(io_tree, async_extent->start, 786 async_extent->start + async_extent->ram_size - 1); 787 /* did the compression code fall back to uncompressed IO? */ 788 if (!async_extent->pages) { 789 int page_started = 0; 790 unsigned long nr_written = 0; 791 792 /* allocate blocks */ 793 ret = cow_file_range(inode, async_chunk->locked_page, 794 async_extent->start, 795 async_extent->start + 796 async_extent->ram_size - 1, 797 &page_started, &nr_written, 0); 798 799 /* JDM XXX */ 800 801 /* 802 * if page_started, cow_file_range inserted an 803 * inline extent and took care of all the unlocking 804 * and IO for us. Otherwise, we need to submit 805 * all those pages down to the drive. 806 */ 807 if (!page_started && !ret) 808 extent_write_locked_range(&inode->vfs_inode, 809 async_extent->start, 810 async_extent->start + 811 async_extent->ram_size - 1, 812 WB_SYNC_ALL); 813 else if (ret && async_chunk->locked_page) 814 unlock_page(async_chunk->locked_page); 815 kfree(async_extent); 816 cond_resched(); 817 continue; 818 } 819 820 ret = btrfs_reserve_extent(root, async_extent->ram_size, 821 async_extent->compressed_size, 822 async_extent->compressed_size, 823 0, alloc_hint, &ins, 1, 1); 824 if (ret) { 825 free_async_extent_pages(async_extent); 826 827 if (ret == -ENOSPC) { 828 unlock_extent(io_tree, async_extent->start, 829 async_extent->start + 830 async_extent->ram_size - 1); 831 832 /* 833 * we need to redirty the pages if we decide to 834 * fallback to uncompressed IO, otherwise we 835 * will not submit these pages down to lower 836 * layers. 837 */ 838 extent_range_redirty_for_io(&inode->vfs_inode, 839 async_extent->start, 840 async_extent->start + 841 async_extent->ram_size - 1); 842 843 goto retry; 844 } 845 goto out_free; 846 } 847 /* 848 * here we're doing allocation and writeback of the 849 * compressed pages 850 */ 851 em = create_io_em(inode, async_extent->start, 852 async_extent->ram_size, /* len */ 853 async_extent->start, /* orig_start */ 854 ins.objectid, /* block_start */ 855 ins.offset, /* block_len */ 856 ins.offset, /* orig_block_len */ 857 async_extent->ram_size, /* ram_bytes */ 858 async_extent->compress_type, 859 BTRFS_ORDERED_COMPRESSED); 860 if (IS_ERR(em)) 861 /* ret value is not necessary due to void function */ 862 goto out_free_reserve; 863 free_extent_map(em); 864 865 ret = btrfs_add_ordered_extent_compress(inode, 866 async_extent->start, 867 ins.objectid, 868 async_extent->ram_size, 869 ins.offset, 870 BTRFS_ORDERED_COMPRESSED, 871 async_extent->compress_type); 872 if (ret) { 873 btrfs_drop_extent_cache(inode, async_extent->start, 874 async_extent->start + 875 async_extent->ram_size - 1, 0); 876 goto out_free_reserve; 877 } 878 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 879 880 /* 881 * clear dirty, set writeback and unlock the pages. 882 */ 883 extent_clear_unlock_delalloc(inode, async_extent->start, 884 async_extent->start + 885 async_extent->ram_size - 1, 886 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 887 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 888 PAGE_SET_WRITEBACK); 889 if (btrfs_submit_compressed_write(inode, async_extent->start, 890 async_extent->ram_size, 891 ins.objectid, 892 ins.offset, async_extent->pages, 893 async_extent->nr_pages, 894 async_chunk->write_flags, 895 async_chunk->blkcg_css)) { 896 struct page *p = async_extent->pages[0]; 897 const u64 start = async_extent->start; 898 const u64 end = start + async_extent->ram_size - 1; 899 900 p->mapping = inode->vfs_inode.i_mapping; 901 btrfs_writepage_endio_finish_ordered(p, start, end, 0); 902 903 p->mapping = NULL; 904 extent_clear_unlock_delalloc(inode, start, end, NULL, 0, 905 PAGE_END_WRITEBACK | 906 PAGE_SET_ERROR); 907 free_async_extent_pages(async_extent); 908 } 909 alloc_hint = ins.objectid + ins.offset; 910 kfree(async_extent); 911 cond_resched(); 912 } 913 return; 914 out_free_reserve: 915 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 916 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 917 out_free: 918 extent_clear_unlock_delalloc(inode, async_extent->start, 919 async_extent->start + 920 async_extent->ram_size - 1, 921 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 922 EXTENT_DELALLOC_NEW | 923 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 924 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 925 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | 926 PAGE_SET_ERROR); 927 free_async_extent_pages(async_extent); 928 kfree(async_extent); 929 goto again; 930 } 931 932 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 933 u64 num_bytes) 934 { 935 struct extent_map_tree *em_tree = &inode->extent_tree; 936 struct extent_map *em; 937 u64 alloc_hint = 0; 938 939 read_lock(&em_tree->lock); 940 em = search_extent_mapping(em_tree, start, num_bytes); 941 if (em) { 942 /* 943 * if block start isn't an actual block number then find the 944 * first block in this inode and use that as a hint. If that 945 * block is also bogus then just don't worry about it. 946 */ 947 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 948 free_extent_map(em); 949 em = search_extent_mapping(em_tree, 0, 0); 950 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 951 alloc_hint = em->block_start; 952 if (em) 953 free_extent_map(em); 954 } else { 955 alloc_hint = em->block_start; 956 free_extent_map(em); 957 } 958 } 959 read_unlock(&em_tree->lock); 960 961 return alloc_hint; 962 } 963 964 /* 965 * when extent_io.c finds a delayed allocation range in the file, 966 * the call backs end up in this code. The basic idea is to 967 * allocate extents on disk for the range, and create ordered data structs 968 * in ram to track those extents. 969 * 970 * locked_page is the page that writepage had locked already. We use 971 * it to make sure we don't do extra locks or unlocks. 972 * 973 * *page_started is set to one if we unlock locked_page and do everything 974 * required to start IO on it. It may be clean and already done with 975 * IO when we return. 976 */ 977 static noinline int cow_file_range(struct btrfs_inode *inode, 978 struct page *locked_page, 979 u64 start, u64 end, int *page_started, 980 unsigned long *nr_written, int unlock) 981 { 982 struct btrfs_root *root = inode->root; 983 struct btrfs_fs_info *fs_info = root->fs_info; 984 u64 alloc_hint = 0; 985 u64 num_bytes; 986 unsigned long ram_size; 987 u64 cur_alloc_size = 0; 988 u64 min_alloc_size; 989 u64 blocksize = fs_info->sectorsize; 990 struct btrfs_key ins; 991 struct extent_map *em; 992 unsigned clear_bits; 993 unsigned long page_ops; 994 bool extent_reserved = false; 995 int ret = 0; 996 997 if (btrfs_is_free_space_inode(inode)) { 998 WARN_ON_ONCE(1); 999 ret = -EINVAL; 1000 goto out_unlock; 1001 } 1002 1003 num_bytes = ALIGN(end - start + 1, blocksize); 1004 num_bytes = max(blocksize, num_bytes); 1005 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1006 1007 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1008 1009 if (start == 0) { 1010 /* lets try to make an inline extent */ 1011 ret = cow_file_range_inline(inode, start, end, 0, 1012 BTRFS_COMPRESS_NONE, NULL); 1013 if (ret == 0) { 1014 /* 1015 * We use DO_ACCOUNTING here because we need the 1016 * delalloc_release_metadata to be run _after_ we drop 1017 * our outstanding extent for clearing delalloc for this 1018 * range. 1019 */ 1020 extent_clear_unlock_delalloc(inode, start, end, NULL, 1021 EXTENT_LOCKED | EXTENT_DELALLOC | 1022 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1023 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1024 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | 1025 PAGE_END_WRITEBACK); 1026 *nr_written = *nr_written + 1027 (end - start + PAGE_SIZE) / PAGE_SIZE; 1028 *page_started = 1; 1029 goto out; 1030 } else if (ret < 0) { 1031 goto out_unlock; 1032 } 1033 } 1034 1035 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 1036 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 1037 1038 /* 1039 * Relocation relies on the relocated extents to have exactly the same 1040 * size as the original extents. Normally writeback for relocation data 1041 * extents follows a NOCOW path because relocation preallocates the 1042 * extents. However, due to an operation such as scrub turning a block 1043 * group to RO mode, it may fallback to COW mode, so we must make sure 1044 * an extent allocated during COW has exactly the requested size and can 1045 * not be split into smaller extents, otherwise relocation breaks and 1046 * fails during the stage where it updates the bytenr of file extent 1047 * items. 1048 */ 1049 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 1050 min_alloc_size = num_bytes; 1051 else 1052 min_alloc_size = fs_info->sectorsize; 1053 1054 while (num_bytes > 0) { 1055 cur_alloc_size = num_bytes; 1056 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, 1057 min_alloc_size, 0, alloc_hint, 1058 &ins, 1, 1); 1059 if (ret < 0) 1060 goto out_unlock; 1061 cur_alloc_size = ins.offset; 1062 extent_reserved = true; 1063 1064 ram_size = ins.offset; 1065 em = create_io_em(inode, start, ins.offset, /* len */ 1066 start, /* orig_start */ 1067 ins.objectid, /* block_start */ 1068 ins.offset, /* block_len */ 1069 ins.offset, /* orig_block_len */ 1070 ram_size, /* ram_bytes */ 1071 BTRFS_COMPRESS_NONE, /* compress_type */ 1072 BTRFS_ORDERED_REGULAR /* type */); 1073 if (IS_ERR(em)) { 1074 ret = PTR_ERR(em); 1075 goto out_reserve; 1076 } 1077 free_extent_map(em); 1078 1079 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 1080 ram_size, cur_alloc_size, 0); 1081 if (ret) 1082 goto out_drop_extent_cache; 1083 1084 if (root->root_key.objectid == 1085 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1086 ret = btrfs_reloc_clone_csums(inode, start, 1087 cur_alloc_size); 1088 /* 1089 * Only drop cache here, and process as normal. 1090 * 1091 * We must not allow extent_clear_unlock_delalloc() 1092 * at out_unlock label to free meta of this ordered 1093 * extent, as its meta should be freed by 1094 * btrfs_finish_ordered_io(). 1095 * 1096 * So we must continue until @start is increased to 1097 * skip current ordered extent. 1098 */ 1099 if (ret) 1100 btrfs_drop_extent_cache(inode, start, 1101 start + ram_size - 1, 0); 1102 } 1103 1104 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1105 1106 /* we're not doing compressed IO, don't unlock the first 1107 * page (which the caller expects to stay locked), don't 1108 * clear any dirty bits and don't set any writeback bits 1109 * 1110 * Do set the Private2 bit so we know this page was properly 1111 * setup for writepage 1112 */ 1113 page_ops = unlock ? PAGE_UNLOCK : 0; 1114 page_ops |= PAGE_SET_PRIVATE2; 1115 1116 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, 1117 locked_page, 1118 EXTENT_LOCKED | EXTENT_DELALLOC, 1119 page_ops); 1120 if (num_bytes < cur_alloc_size) 1121 num_bytes = 0; 1122 else 1123 num_bytes -= cur_alloc_size; 1124 alloc_hint = ins.objectid + ins.offset; 1125 start += cur_alloc_size; 1126 extent_reserved = false; 1127 1128 /* 1129 * btrfs_reloc_clone_csums() error, since start is increased 1130 * extent_clear_unlock_delalloc() at out_unlock label won't 1131 * free metadata of current ordered extent, we're OK to exit. 1132 */ 1133 if (ret) 1134 goto out_unlock; 1135 } 1136 out: 1137 return ret; 1138 1139 out_drop_extent_cache: 1140 btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); 1141 out_reserve: 1142 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1143 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1144 out_unlock: 1145 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1146 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1147 page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | 1148 PAGE_END_WRITEBACK; 1149 /* 1150 * If we reserved an extent for our delalloc range (or a subrange) and 1151 * failed to create the respective ordered extent, then it means that 1152 * when we reserved the extent we decremented the extent's size from 1153 * the data space_info's bytes_may_use counter and incremented the 1154 * space_info's bytes_reserved counter by the same amount. We must make 1155 * sure extent_clear_unlock_delalloc() does not try to decrement again 1156 * the data space_info's bytes_may_use counter, therefore we do not pass 1157 * it the flag EXTENT_CLEAR_DATA_RESV. 1158 */ 1159 if (extent_reserved) { 1160 extent_clear_unlock_delalloc(inode, start, 1161 start + cur_alloc_size - 1, 1162 locked_page, 1163 clear_bits, 1164 page_ops); 1165 start += cur_alloc_size; 1166 if (start >= end) 1167 goto out; 1168 } 1169 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1170 clear_bits | EXTENT_CLEAR_DATA_RESV, 1171 page_ops); 1172 goto out; 1173 } 1174 1175 /* 1176 * work queue call back to started compression on a file and pages 1177 */ 1178 static noinline void async_cow_start(struct btrfs_work *work) 1179 { 1180 struct async_chunk *async_chunk; 1181 int compressed_extents; 1182 1183 async_chunk = container_of(work, struct async_chunk, work); 1184 1185 compressed_extents = compress_file_range(async_chunk); 1186 if (compressed_extents == 0) { 1187 btrfs_add_delayed_iput(async_chunk->inode); 1188 async_chunk->inode = NULL; 1189 } 1190 } 1191 1192 /* 1193 * work queue call back to submit previously compressed pages 1194 */ 1195 static noinline void async_cow_submit(struct btrfs_work *work) 1196 { 1197 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1198 work); 1199 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1200 unsigned long nr_pages; 1201 1202 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1203 PAGE_SHIFT; 1204 1205 /* atomic_sub_return implies a barrier */ 1206 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1207 5 * SZ_1M) 1208 cond_wake_up_nomb(&fs_info->async_submit_wait); 1209 1210 /* 1211 * ->inode could be NULL if async_chunk_start has failed to compress, 1212 * in which case we don't have anything to submit, yet we need to 1213 * always adjust ->async_delalloc_pages as its paired with the init 1214 * happening in cow_file_range_async 1215 */ 1216 if (async_chunk->inode) 1217 submit_compressed_extents(async_chunk); 1218 } 1219 1220 static noinline void async_cow_free(struct btrfs_work *work) 1221 { 1222 struct async_chunk *async_chunk; 1223 1224 async_chunk = container_of(work, struct async_chunk, work); 1225 if (async_chunk->inode) 1226 btrfs_add_delayed_iput(async_chunk->inode); 1227 if (async_chunk->blkcg_css) 1228 css_put(async_chunk->blkcg_css); 1229 /* 1230 * Since the pointer to 'pending' is at the beginning of the array of 1231 * async_chunk's, freeing it ensures the whole array has been freed. 1232 */ 1233 if (atomic_dec_and_test(async_chunk->pending)) 1234 kvfree(async_chunk->pending); 1235 } 1236 1237 static int cow_file_range_async(struct btrfs_inode *inode, 1238 struct writeback_control *wbc, 1239 struct page *locked_page, 1240 u64 start, u64 end, int *page_started, 1241 unsigned long *nr_written) 1242 { 1243 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1244 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1245 struct async_cow *ctx; 1246 struct async_chunk *async_chunk; 1247 unsigned long nr_pages; 1248 u64 cur_end; 1249 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1250 int i; 1251 bool should_compress; 1252 unsigned nofs_flag; 1253 const unsigned int write_flags = wbc_to_write_flags(wbc); 1254 1255 unlock_extent(&inode->io_tree, start, end); 1256 1257 if (inode->flags & BTRFS_INODE_NOCOMPRESS && 1258 !btrfs_test_opt(fs_info, FORCE_COMPRESS)) { 1259 num_chunks = 1; 1260 should_compress = false; 1261 } else { 1262 should_compress = true; 1263 } 1264 1265 nofs_flag = memalloc_nofs_save(); 1266 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1267 memalloc_nofs_restore(nofs_flag); 1268 1269 if (!ctx) { 1270 unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | 1271 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1272 EXTENT_DO_ACCOUNTING; 1273 unsigned long page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 1274 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | 1275 PAGE_SET_ERROR; 1276 1277 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1278 clear_bits, page_ops); 1279 return -ENOMEM; 1280 } 1281 1282 async_chunk = ctx->chunks; 1283 atomic_set(&ctx->num_chunks, num_chunks); 1284 1285 for (i = 0; i < num_chunks; i++) { 1286 if (should_compress) 1287 cur_end = min(end, start + SZ_512K - 1); 1288 else 1289 cur_end = end; 1290 1291 /* 1292 * igrab is called higher up in the call chain, take only the 1293 * lightweight reference for the callback lifetime 1294 */ 1295 ihold(&inode->vfs_inode); 1296 async_chunk[i].pending = &ctx->num_chunks; 1297 async_chunk[i].inode = &inode->vfs_inode; 1298 async_chunk[i].start = start; 1299 async_chunk[i].end = cur_end; 1300 async_chunk[i].write_flags = write_flags; 1301 INIT_LIST_HEAD(&async_chunk[i].extents); 1302 1303 /* 1304 * The locked_page comes all the way from writepage and its 1305 * the original page we were actually given. As we spread 1306 * this large delalloc region across multiple async_chunk 1307 * structs, only the first struct needs a pointer to locked_page 1308 * 1309 * This way we don't need racey decisions about who is supposed 1310 * to unlock it. 1311 */ 1312 if (locked_page) { 1313 /* 1314 * Depending on the compressibility, the pages might or 1315 * might not go through async. We want all of them to 1316 * be accounted against wbc once. Let's do it here 1317 * before the paths diverge. wbc accounting is used 1318 * only for foreign writeback detection and doesn't 1319 * need full accuracy. Just account the whole thing 1320 * against the first page. 1321 */ 1322 wbc_account_cgroup_owner(wbc, locked_page, 1323 cur_end - start); 1324 async_chunk[i].locked_page = locked_page; 1325 locked_page = NULL; 1326 } else { 1327 async_chunk[i].locked_page = NULL; 1328 } 1329 1330 if (blkcg_css != blkcg_root_css) { 1331 css_get(blkcg_css); 1332 async_chunk[i].blkcg_css = blkcg_css; 1333 } else { 1334 async_chunk[i].blkcg_css = NULL; 1335 } 1336 1337 btrfs_init_work(&async_chunk[i].work, async_cow_start, 1338 async_cow_submit, async_cow_free); 1339 1340 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1341 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1342 1343 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1344 1345 *nr_written += nr_pages; 1346 start = cur_end + 1; 1347 } 1348 *page_started = 1; 1349 return 0; 1350 } 1351 1352 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, 1353 u64 bytenr, u64 num_bytes) 1354 { 1355 int ret; 1356 struct btrfs_ordered_sum *sums; 1357 LIST_HEAD(list); 1358 1359 ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr, 1360 bytenr + num_bytes - 1, &list, 0); 1361 if (ret == 0 && list_empty(&list)) 1362 return 0; 1363 1364 while (!list_empty(&list)) { 1365 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1366 list_del(&sums->list); 1367 kfree(sums); 1368 } 1369 if (ret < 0) 1370 return ret; 1371 return 1; 1372 } 1373 1374 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, 1375 const u64 start, const u64 end, 1376 int *page_started, unsigned long *nr_written) 1377 { 1378 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1379 const bool is_reloc_ino = (inode->root->root_key.objectid == 1380 BTRFS_DATA_RELOC_TREE_OBJECTID); 1381 const u64 range_bytes = end + 1 - start; 1382 struct extent_io_tree *io_tree = &inode->io_tree; 1383 u64 range_start = start; 1384 u64 count; 1385 1386 /* 1387 * If EXTENT_NORESERVE is set it means that when the buffered write was 1388 * made we had not enough available data space and therefore we did not 1389 * reserve data space for it, since we though we could do NOCOW for the 1390 * respective file range (either there is prealloc extent or the inode 1391 * has the NOCOW bit set). 1392 * 1393 * However when we need to fallback to COW mode (because for example the 1394 * block group for the corresponding extent was turned to RO mode by a 1395 * scrub or relocation) we need to do the following: 1396 * 1397 * 1) We increment the bytes_may_use counter of the data space info. 1398 * If COW succeeds, it allocates a new data extent and after doing 1399 * that it decrements the space info's bytes_may_use counter and 1400 * increments its bytes_reserved counter by the same amount (we do 1401 * this at btrfs_add_reserved_bytes()). So we need to increment the 1402 * bytes_may_use counter to compensate (when space is reserved at 1403 * buffered write time, the bytes_may_use counter is incremented); 1404 * 1405 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1406 * that if the COW path fails for any reason, it decrements (through 1407 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1408 * data space info, which we incremented in the step above. 1409 * 1410 * If we need to fallback to cow and the inode corresponds to a free 1411 * space cache inode or an inode of the data relocation tree, we must 1412 * also increment bytes_may_use of the data space_info for the same 1413 * reason. Space caches and relocated data extents always get a prealloc 1414 * extent for them, however scrub or balance may have set the block 1415 * group that contains that extent to RO mode and therefore force COW 1416 * when starting writeback. 1417 */ 1418 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1419 EXTENT_NORESERVE, 0); 1420 if (count > 0 || is_space_ino || is_reloc_ino) { 1421 u64 bytes = count; 1422 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1423 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1424 1425 if (is_space_ino || is_reloc_ino) 1426 bytes = range_bytes; 1427 1428 spin_lock(&sinfo->lock); 1429 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1430 spin_unlock(&sinfo->lock); 1431 1432 if (count > 0) 1433 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1434 0, 0, NULL); 1435 } 1436 1437 return cow_file_range(inode, locked_page, start, end, page_started, 1438 nr_written, 1); 1439 } 1440 1441 /* 1442 * when nowcow writeback call back. This checks for snapshots or COW copies 1443 * of the extents that exist in the file, and COWs the file as required. 1444 * 1445 * If no cow copies or snapshots exist, we write directly to the existing 1446 * blocks on disk 1447 */ 1448 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 1449 struct page *locked_page, 1450 const u64 start, const u64 end, 1451 int *page_started, int force, 1452 unsigned long *nr_written) 1453 { 1454 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1455 struct btrfs_root *root = inode->root; 1456 struct btrfs_path *path; 1457 u64 cow_start = (u64)-1; 1458 u64 cur_offset = start; 1459 int ret; 1460 bool check_prev = true; 1461 const bool freespace_inode = btrfs_is_free_space_inode(inode); 1462 u64 ino = btrfs_ino(inode); 1463 bool nocow = false; 1464 u64 disk_bytenr = 0; 1465 1466 path = btrfs_alloc_path(); 1467 if (!path) { 1468 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1469 EXTENT_LOCKED | EXTENT_DELALLOC | 1470 EXTENT_DO_ACCOUNTING | 1471 EXTENT_DEFRAG, PAGE_UNLOCK | 1472 PAGE_CLEAR_DIRTY | 1473 PAGE_SET_WRITEBACK | 1474 PAGE_END_WRITEBACK); 1475 return -ENOMEM; 1476 } 1477 1478 while (1) { 1479 struct btrfs_key found_key; 1480 struct btrfs_file_extent_item *fi; 1481 struct extent_buffer *leaf; 1482 u64 extent_end; 1483 u64 extent_offset; 1484 u64 num_bytes = 0; 1485 u64 disk_num_bytes; 1486 u64 ram_bytes; 1487 int extent_type; 1488 1489 nocow = false; 1490 1491 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 1492 cur_offset, 0); 1493 if (ret < 0) 1494 goto error; 1495 1496 /* 1497 * If there is no extent for our range when doing the initial 1498 * search, then go back to the previous slot as it will be the 1499 * one containing the search offset 1500 */ 1501 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1502 leaf = path->nodes[0]; 1503 btrfs_item_key_to_cpu(leaf, &found_key, 1504 path->slots[0] - 1); 1505 if (found_key.objectid == ino && 1506 found_key.type == BTRFS_EXTENT_DATA_KEY) 1507 path->slots[0]--; 1508 } 1509 check_prev = false; 1510 next_slot: 1511 /* Go to next leaf if we have exhausted the current one */ 1512 leaf = path->nodes[0]; 1513 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1514 ret = btrfs_next_leaf(root, path); 1515 if (ret < 0) { 1516 if (cow_start != (u64)-1) 1517 cur_offset = cow_start; 1518 goto error; 1519 } 1520 if (ret > 0) 1521 break; 1522 leaf = path->nodes[0]; 1523 } 1524 1525 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1526 1527 /* Didn't find anything for our INO */ 1528 if (found_key.objectid > ino) 1529 break; 1530 /* 1531 * Keep searching until we find an EXTENT_ITEM or there are no 1532 * more extents for this inode 1533 */ 1534 if (WARN_ON_ONCE(found_key.objectid < ino) || 1535 found_key.type < BTRFS_EXTENT_DATA_KEY) { 1536 path->slots[0]++; 1537 goto next_slot; 1538 } 1539 1540 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 1541 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 1542 found_key.offset > end) 1543 break; 1544 1545 /* 1546 * If the found extent starts after requested offset, then 1547 * adjust extent_end to be right before this extent begins 1548 */ 1549 if (found_key.offset > cur_offset) { 1550 extent_end = found_key.offset; 1551 extent_type = 0; 1552 goto out_check; 1553 } 1554 1555 /* 1556 * Found extent which begins before our range and potentially 1557 * intersect it 1558 */ 1559 fi = btrfs_item_ptr(leaf, path->slots[0], 1560 struct btrfs_file_extent_item); 1561 extent_type = btrfs_file_extent_type(leaf, fi); 1562 1563 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1564 if (extent_type == BTRFS_FILE_EXTENT_REG || 1565 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1566 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1567 extent_offset = btrfs_file_extent_offset(leaf, fi); 1568 extent_end = found_key.offset + 1569 btrfs_file_extent_num_bytes(leaf, fi); 1570 disk_num_bytes = 1571 btrfs_file_extent_disk_num_bytes(leaf, fi); 1572 /* 1573 * If the extent we got ends before our current offset, 1574 * skip to the next extent. 1575 */ 1576 if (extent_end <= cur_offset) { 1577 path->slots[0]++; 1578 goto next_slot; 1579 } 1580 /* Skip holes */ 1581 if (disk_bytenr == 0) 1582 goto out_check; 1583 /* Skip compressed/encrypted/encoded extents */ 1584 if (btrfs_file_extent_compression(leaf, fi) || 1585 btrfs_file_extent_encryption(leaf, fi) || 1586 btrfs_file_extent_other_encoding(leaf, fi)) 1587 goto out_check; 1588 /* 1589 * If extent is created before the last volume's snapshot 1590 * this implies the extent is shared, hence we can't do 1591 * nocow. This is the same check as in 1592 * btrfs_cross_ref_exist but without calling 1593 * btrfs_search_slot. 1594 */ 1595 if (!freespace_inode && 1596 btrfs_file_extent_generation(leaf, fi) <= 1597 btrfs_root_last_snapshot(&root->root_item)) 1598 goto out_check; 1599 if (extent_type == BTRFS_FILE_EXTENT_REG && !force) 1600 goto out_check; 1601 /* If extent is RO, we must COW it */ 1602 if (btrfs_extent_readonly(fs_info, disk_bytenr)) 1603 goto out_check; 1604 ret = btrfs_cross_ref_exist(root, ino, 1605 found_key.offset - 1606 extent_offset, disk_bytenr, false); 1607 if (ret) { 1608 /* 1609 * ret could be -EIO if the above fails to read 1610 * metadata. 1611 */ 1612 if (ret < 0) { 1613 if (cow_start != (u64)-1) 1614 cur_offset = cow_start; 1615 goto error; 1616 } 1617 1618 WARN_ON_ONCE(freespace_inode); 1619 goto out_check; 1620 } 1621 disk_bytenr += extent_offset; 1622 disk_bytenr += cur_offset - found_key.offset; 1623 num_bytes = min(end + 1, extent_end) - cur_offset; 1624 /* 1625 * If there are pending snapshots for this root, we 1626 * fall into common COW way 1627 */ 1628 if (!freespace_inode && atomic_read(&root->snapshot_force_cow)) 1629 goto out_check; 1630 /* 1631 * force cow if csum exists in the range. 1632 * this ensure that csum for a given extent are 1633 * either valid or do not exist. 1634 */ 1635 ret = csum_exist_in_range(fs_info, disk_bytenr, 1636 num_bytes); 1637 if (ret) { 1638 /* 1639 * ret could be -EIO if the above fails to read 1640 * metadata. 1641 */ 1642 if (ret < 0) { 1643 if (cow_start != (u64)-1) 1644 cur_offset = cow_start; 1645 goto error; 1646 } 1647 WARN_ON_ONCE(freespace_inode); 1648 goto out_check; 1649 } 1650 if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) 1651 goto out_check; 1652 nocow = true; 1653 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1654 extent_end = found_key.offset + ram_bytes; 1655 extent_end = ALIGN(extent_end, fs_info->sectorsize); 1656 /* Skip extents outside of our requested range */ 1657 if (extent_end <= start) { 1658 path->slots[0]++; 1659 goto next_slot; 1660 } 1661 } else { 1662 /* If this triggers then we have a memory corruption */ 1663 BUG(); 1664 } 1665 out_check: 1666 /* 1667 * If nocow is false then record the beginning of the range 1668 * that needs to be COWed 1669 */ 1670 if (!nocow) { 1671 if (cow_start == (u64)-1) 1672 cow_start = cur_offset; 1673 cur_offset = extent_end; 1674 if (cur_offset > end) 1675 break; 1676 path->slots[0]++; 1677 goto next_slot; 1678 } 1679 1680 btrfs_release_path(path); 1681 1682 /* 1683 * COW range from cow_start to found_key.offset - 1. As the key 1684 * will contain the beginning of the first extent that can be 1685 * NOCOW, following one which needs to be COW'ed 1686 */ 1687 if (cow_start != (u64)-1) { 1688 ret = fallback_to_cow(inode, locked_page, 1689 cow_start, found_key.offset - 1, 1690 page_started, nr_written); 1691 if (ret) 1692 goto error; 1693 cow_start = (u64)-1; 1694 } 1695 1696 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1697 u64 orig_start = found_key.offset - extent_offset; 1698 struct extent_map *em; 1699 1700 em = create_io_em(inode, cur_offset, num_bytes, 1701 orig_start, 1702 disk_bytenr, /* block_start */ 1703 num_bytes, /* block_len */ 1704 disk_num_bytes, /* orig_block_len */ 1705 ram_bytes, BTRFS_COMPRESS_NONE, 1706 BTRFS_ORDERED_PREALLOC); 1707 if (IS_ERR(em)) { 1708 ret = PTR_ERR(em); 1709 goto error; 1710 } 1711 free_extent_map(em); 1712 ret = btrfs_add_ordered_extent(inode, cur_offset, 1713 disk_bytenr, num_bytes, 1714 num_bytes, 1715 BTRFS_ORDERED_PREALLOC); 1716 if (ret) { 1717 btrfs_drop_extent_cache(inode, cur_offset, 1718 cur_offset + num_bytes - 1, 1719 0); 1720 goto error; 1721 } 1722 } else { 1723 ret = btrfs_add_ordered_extent(inode, cur_offset, 1724 disk_bytenr, num_bytes, 1725 num_bytes, 1726 BTRFS_ORDERED_NOCOW); 1727 if (ret) 1728 goto error; 1729 } 1730 1731 if (nocow) 1732 btrfs_dec_nocow_writers(fs_info, disk_bytenr); 1733 nocow = false; 1734 1735 if (root->root_key.objectid == 1736 BTRFS_DATA_RELOC_TREE_OBJECTID) 1737 /* 1738 * Error handled later, as we must prevent 1739 * extent_clear_unlock_delalloc() in error handler 1740 * from freeing metadata of created ordered extent. 1741 */ 1742 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1743 num_bytes); 1744 1745 extent_clear_unlock_delalloc(inode, cur_offset, 1746 cur_offset + num_bytes - 1, 1747 locked_page, EXTENT_LOCKED | 1748 EXTENT_DELALLOC | 1749 EXTENT_CLEAR_DATA_RESV, 1750 PAGE_UNLOCK | PAGE_SET_PRIVATE2); 1751 1752 cur_offset = extent_end; 1753 1754 /* 1755 * btrfs_reloc_clone_csums() error, now we're OK to call error 1756 * handler, as metadata for created ordered extent will only 1757 * be freed by btrfs_finish_ordered_io(). 1758 */ 1759 if (ret) 1760 goto error; 1761 if (cur_offset > end) 1762 break; 1763 } 1764 btrfs_release_path(path); 1765 1766 if (cur_offset <= end && cow_start == (u64)-1) 1767 cow_start = cur_offset; 1768 1769 if (cow_start != (u64)-1) { 1770 cur_offset = end; 1771 ret = fallback_to_cow(inode, locked_page, cow_start, end, 1772 page_started, nr_written); 1773 if (ret) 1774 goto error; 1775 } 1776 1777 error: 1778 if (nocow) 1779 btrfs_dec_nocow_writers(fs_info, disk_bytenr); 1780 1781 if (ret && cur_offset < end) 1782 extent_clear_unlock_delalloc(inode, cur_offset, end, 1783 locked_page, EXTENT_LOCKED | 1784 EXTENT_DELALLOC | EXTENT_DEFRAG | 1785 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1786 PAGE_CLEAR_DIRTY | 1787 PAGE_SET_WRITEBACK | 1788 PAGE_END_WRITEBACK); 1789 btrfs_free_path(path); 1790 return ret; 1791 } 1792 1793 static inline int need_force_cow(struct btrfs_inode *inode, u64 start, u64 end) 1794 { 1795 1796 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1797 !(inode->flags & BTRFS_INODE_PREALLOC)) 1798 return 0; 1799 1800 /* 1801 * @defrag_bytes is a hint value, no spinlock held here, 1802 * if is not zero, it means the file is defragging. 1803 * Force cow if given extent needs to be defragged. 1804 */ 1805 if (inode->defrag_bytes && 1806 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 0, NULL)) 1807 return 1; 1808 1809 return 0; 1810 } 1811 1812 /* 1813 * Function to process delayed allocation (create CoW) for ranges which are 1814 * being touched for the first time. 1815 */ 1816 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, 1817 u64 start, u64 end, int *page_started, unsigned long *nr_written, 1818 struct writeback_control *wbc) 1819 { 1820 int ret; 1821 int force_cow = need_force_cow(inode, start, end); 1822 1823 if (inode->flags & BTRFS_INODE_NODATACOW && !force_cow) { 1824 ret = run_delalloc_nocow(inode, locked_page, start, end, 1825 page_started, 1, nr_written); 1826 } else if (inode->flags & BTRFS_INODE_PREALLOC && !force_cow) { 1827 ret = run_delalloc_nocow(inode, locked_page, start, end, 1828 page_started, 0, nr_written); 1829 } else if (!inode_can_compress(inode) || 1830 !inode_need_compress(inode, start, end)) { 1831 ret = cow_file_range(inode, locked_page, start, end, 1832 page_started, nr_written, 1); 1833 } else { 1834 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 1835 ret = cow_file_range_async(inode, wbc, locked_page, start, end, 1836 page_started, nr_written); 1837 } 1838 if (ret) 1839 btrfs_cleanup_ordered_extents(inode, locked_page, start, 1840 end - start + 1); 1841 return ret; 1842 } 1843 1844 void btrfs_split_delalloc_extent(struct inode *inode, 1845 struct extent_state *orig, u64 split) 1846 { 1847 u64 size; 1848 1849 /* not delalloc, ignore it */ 1850 if (!(orig->state & EXTENT_DELALLOC)) 1851 return; 1852 1853 size = orig->end - orig->start + 1; 1854 if (size > BTRFS_MAX_EXTENT_SIZE) { 1855 u32 num_extents; 1856 u64 new_size; 1857 1858 /* 1859 * See the explanation in btrfs_merge_delalloc_extent, the same 1860 * applies here, just in reverse. 1861 */ 1862 new_size = orig->end - split + 1; 1863 num_extents = count_max_extents(new_size); 1864 new_size = split - orig->start; 1865 num_extents += count_max_extents(new_size); 1866 if (count_max_extents(size) >= num_extents) 1867 return; 1868 } 1869 1870 spin_lock(&BTRFS_I(inode)->lock); 1871 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); 1872 spin_unlock(&BTRFS_I(inode)->lock); 1873 } 1874 1875 /* 1876 * Handle merged delayed allocation extents so we can keep track of new extents 1877 * that are just merged onto old extents, such as when we are doing sequential 1878 * writes, so we can properly account for the metadata space we'll need. 1879 */ 1880 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new, 1881 struct extent_state *other) 1882 { 1883 u64 new_size, old_size; 1884 u32 num_extents; 1885 1886 /* not delalloc, ignore it */ 1887 if (!(other->state & EXTENT_DELALLOC)) 1888 return; 1889 1890 if (new->start > other->start) 1891 new_size = new->end - other->start + 1; 1892 else 1893 new_size = other->end - new->start + 1; 1894 1895 /* we're not bigger than the max, unreserve the space and go */ 1896 if (new_size <= BTRFS_MAX_EXTENT_SIZE) { 1897 spin_lock(&BTRFS_I(inode)->lock); 1898 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); 1899 spin_unlock(&BTRFS_I(inode)->lock); 1900 return; 1901 } 1902 1903 /* 1904 * We have to add up either side to figure out how many extents were 1905 * accounted for before we merged into one big extent. If the number of 1906 * extents we accounted for is <= the amount we need for the new range 1907 * then we can return, otherwise drop. Think of it like this 1908 * 1909 * [ 4k][MAX_SIZE] 1910 * 1911 * So we've grown the extent by a MAX_SIZE extent, this would mean we 1912 * need 2 outstanding extents, on one side we have 1 and the other side 1913 * we have 1 so they are == and we can return. But in this case 1914 * 1915 * [MAX_SIZE+4k][MAX_SIZE+4k] 1916 * 1917 * Each range on their own accounts for 2 extents, but merged together 1918 * they are only 3 extents worth of accounting, so we need to drop in 1919 * this case. 1920 */ 1921 old_size = other->end - other->start + 1; 1922 num_extents = count_max_extents(old_size); 1923 old_size = new->end - new->start + 1; 1924 num_extents += count_max_extents(old_size); 1925 if (count_max_extents(new_size) >= num_extents) 1926 return; 1927 1928 spin_lock(&BTRFS_I(inode)->lock); 1929 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); 1930 spin_unlock(&BTRFS_I(inode)->lock); 1931 } 1932 1933 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 1934 struct inode *inode) 1935 { 1936 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1937 1938 spin_lock(&root->delalloc_lock); 1939 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1940 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 1941 &root->delalloc_inodes); 1942 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1943 &BTRFS_I(inode)->runtime_flags); 1944 root->nr_delalloc_inodes++; 1945 if (root->nr_delalloc_inodes == 1) { 1946 spin_lock(&fs_info->delalloc_root_lock); 1947 BUG_ON(!list_empty(&root->delalloc_root)); 1948 list_add_tail(&root->delalloc_root, 1949 &fs_info->delalloc_roots); 1950 spin_unlock(&fs_info->delalloc_root_lock); 1951 } 1952 } 1953 spin_unlock(&root->delalloc_lock); 1954 } 1955 1956 1957 void __btrfs_del_delalloc_inode(struct btrfs_root *root, 1958 struct btrfs_inode *inode) 1959 { 1960 struct btrfs_fs_info *fs_info = root->fs_info; 1961 1962 if (!list_empty(&inode->delalloc_inodes)) { 1963 list_del_init(&inode->delalloc_inodes); 1964 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1965 &inode->runtime_flags); 1966 root->nr_delalloc_inodes--; 1967 if (!root->nr_delalloc_inodes) { 1968 ASSERT(list_empty(&root->delalloc_inodes)); 1969 spin_lock(&fs_info->delalloc_root_lock); 1970 BUG_ON(list_empty(&root->delalloc_root)); 1971 list_del_init(&root->delalloc_root); 1972 spin_unlock(&fs_info->delalloc_root_lock); 1973 } 1974 } 1975 } 1976 1977 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 1978 struct btrfs_inode *inode) 1979 { 1980 spin_lock(&root->delalloc_lock); 1981 __btrfs_del_delalloc_inode(root, inode); 1982 spin_unlock(&root->delalloc_lock); 1983 } 1984 1985 /* 1986 * Properly track delayed allocation bytes in the inode and to maintain the 1987 * list of inodes that have pending delalloc work to be done. 1988 */ 1989 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state, 1990 unsigned *bits) 1991 { 1992 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1993 1994 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) 1995 WARN_ON(1); 1996 /* 1997 * set_bit and clear bit hooks normally require _irqsave/restore 1998 * but in this case, we are only testing for the DELALLOC 1999 * bit, which is only set or cleared with irqs on 2000 */ 2001 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 2002 struct btrfs_root *root = BTRFS_I(inode)->root; 2003 u64 len = state->end + 1 - state->start; 2004 u32 num_extents = count_max_extents(len); 2005 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode)); 2006 2007 spin_lock(&BTRFS_I(inode)->lock); 2008 btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents); 2009 spin_unlock(&BTRFS_I(inode)->lock); 2010 2011 /* For sanity tests */ 2012 if (btrfs_is_testing(fs_info)) 2013 return; 2014 2015 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2016 fs_info->delalloc_batch); 2017 spin_lock(&BTRFS_I(inode)->lock); 2018 BTRFS_I(inode)->delalloc_bytes += len; 2019 if (*bits & EXTENT_DEFRAG) 2020 BTRFS_I(inode)->defrag_bytes += len; 2021 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2022 &BTRFS_I(inode)->runtime_flags)) 2023 btrfs_add_delalloc_inodes(root, inode); 2024 spin_unlock(&BTRFS_I(inode)->lock); 2025 } 2026 2027 if (!(state->state & EXTENT_DELALLOC_NEW) && 2028 (*bits & EXTENT_DELALLOC_NEW)) { 2029 spin_lock(&BTRFS_I(inode)->lock); 2030 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 - 2031 state->start; 2032 spin_unlock(&BTRFS_I(inode)->lock); 2033 } 2034 } 2035 2036 /* 2037 * Once a range is no longer delalloc this function ensures that proper 2038 * accounting happens. 2039 */ 2040 void btrfs_clear_delalloc_extent(struct inode *vfs_inode, 2041 struct extent_state *state, unsigned *bits) 2042 { 2043 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 2044 struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb); 2045 u64 len = state->end + 1 - state->start; 2046 u32 num_extents = count_max_extents(len); 2047 2048 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) { 2049 spin_lock(&inode->lock); 2050 inode->defrag_bytes -= len; 2051 spin_unlock(&inode->lock); 2052 } 2053 2054 /* 2055 * set_bit and clear bit hooks normally require _irqsave/restore 2056 * but in this case, we are only testing for the DELALLOC 2057 * bit, which is only set or cleared with irqs on 2058 */ 2059 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 2060 struct btrfs_root *root = inode->root; 2061 bool do_list = !btrfs_is_free_space_inode(inode); 2062 2063 spin_lock(&inode->lock); 2064 btrfs_mod_outstanding_extents(inode, -num_extents); 2065 spin_unlock(&inode->lock); 2066 2067 /* 2068 * We don't reserve metadata space for space cache inodes so we 2069 * don't need to call delalloc_release_metadata if there is an 2070 * error. 2071 */ 2072 if (*bits & EXTENT_CLEAR_META_RESV && 2073 root != fs_info->tree_root) 2074 btrfs_delalloc_release_metadata(inode, len, false); 2075 2076 /* For sanity tests. */ 2077 if (btrfs_is_testing(fs_info)) 2078 return; 2079 2080 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && 2081 do_list && !(state->state & EXTENT_NORESERVE) && 2082 (*bits & EXTENT_CLEAR_DATA_RESV)) 2083 btrfs_free_reserved_data_space_noquota(fs_info, len); 2084 2085 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2086 fs_info->delalloc_batch); 2087 spin_lock(&inode->lock); 2088 inode->delalloc_bytes -= len; 2089 if (do_list && inode->delalloc_bytes == 0 && 2090 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2091 &inode->runtime_flags)) 2092 btrfs_del_delalloc_inode(root, inode); 2093 spin_unlock(&inode->lock); 2094 } 2095 2096 if ((state->state & EXTENT_DELALLOC_NEW) && 2097 (*bits & EXTENT_DELALLOC_NEW)) { 2098 spin_lock(&inode->lock); 2099 ASSERT(inode->new_delalloc_bytes >= len); 2100 inode->new_delalloc_bytes -= len; 2101 spin_unlock(&inode->lock); 2102 } 2103 } 2104 2105 /* 2106 * btrfs_bio_fits_in_stripe - Checks whether the size of the given bio will fit 2107 * in a chunk's stripe. This function ensures that bios do not span a 2108 * stripe/chunk 2109 * 2110 * @page - The page we are about to add to the bio 2111 * @size - size we want to add to the bio 2112 * @bio - bio we want to ensure is smaller than a stripe 2113 * @bio_flags - flags of the bio 2114 * 2115 * return 1 if page cannot be added to the bio 2116 * return 0 if page can be added to the bio 2117 * return error otherwise 2118 */ 2119 int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio, 2120 unsigned long bio_flags) 2121 { 2122 struct inode *inode = page->mapping->host; 2123 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2124 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 2125 u64 length = 0; 2126 u64 map_length; 2127 int ret; 2128 struct btrfs_io_geometry geom; 2129 2130 if (bio_flags & EXTENT_BIO_COMPRESSED) 2131 return 0; 2132 2133 length = bio->bi_iter.bi_size; 2134 map_length = length; 2135 ret = btrfs_get_io_geometry(fs_info, btrfs_op(bio), logical, map_length, 2136 &geom); 2137 if (ret < 0) 2138 return ret; 2139 2140 if (geom.len < length + size) 2141 return 1; 2142 return 0; 2143 } 2144 2145 /* 2146 * in order to insert checksums into the metadata in large chunks, 2147 * we wait until bio submission time. All the pages in the bio are 2148 * checksummed and sums are attached onto the ordered extent record. 2149 * 2150 * At IO completion time the cums attached on the ordered extent record 2151 * are inserted into the btree 2152 */ 2153 static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio, 2154 u64 bio_offset) 2155 { 2156 struct inode *inode = private_data; 2157 2158 return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0); 2159 } 2160 2161 /* 2162 * extent_io.c submission hook. This does the right thing for csum calculation 2163 * on write, or reading the csums from the tree before a read. 2164 * 2165 * Rules about async/sync submit, 2166 * a) read: sync submit 2167 * 2168 * b) write without checksum: sync submit 2169 * 2170 * c) write with checksum: 2171 * c-1) if bio is issued by fsync: sync submit 2172 * (sync_writers != 0) 2173 * 2174 * c-2) if root is reloc root: sync submit 2175 * (only in case of buffered IO) 2176 * 2177 * c-3) otherwise: async submit 2178 */ 2179 blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio, 2180 int mirror_num, unsigned long bio_flags) 2181 2182 { 2183 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2184 struct btrfs_root *root = BTRFS_I(inode)->root; 2185 enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; 2186 blk_status_t ret = 0; 2187 int skip_sum; 2188 int async = !atomic_read(&BTRFS_I(inode)->sync_writers); 2189 2190 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 2191 2192 if (btrfs_is_free_space_inode(BTRFS_I(inode))) 2193 metadata = BTRFS_WQ_ENDIO_FREE_SPACE; 2194 2195 if (bio_op(bio) != REQ_OP_WRITE) { 2196 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata); 2197 if (ret) 2198 goto out; 2199 2200 if (bio_flags & EXTENT_BIO_COMPRESSED) { 2201 ret = btrfs_submit_compressed_read(inode, bio, 2202 mirror_num, 2203 bio_flags); 2204 goto out; 2205 } else if (!skip_sum) { 2206 ret = btrfs_lookup_bio_sums(inode, bio, (u64)-1, NULL); 2207 if (ret) 2208 goto out; 2209 } 2210 goto mapit; 2211 } else if (async && !skip_sum) { 2212 /* csum items have already been cloned */ 2213 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 2214 goto mapit; 2215 /* we're doing a write, do the async checksumming */ 2216 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags, 2217 0, inode, btrfs_submit_bio_start); 2218 goto out; 2219 } else if (!skip_sum) { 2220 ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0); 2221 if (ret) 2222 goto out; 2223 } 2224 2225 mapit: 2226 ret = btrfs_map_bio(fs_info, bio, mirror_num); 2227 2228 out: 2229 if (ret) { 2230 bio->bi_status = ret; 2231 bio_endio(bio); 2232 } 2233 return ret; 2234 } 2235 2236 /* 2237 * given a list of ordered sums record them in the inode. This happens 2238 * at IO completion time based on sums calculated at bio submission time. 2239 */ 2240 static int add_pending_csums(struct btrfs_trans_handle *trans, 2241 struct list_head *list) 2242 { 2243 struct btrfs_ordered_sum *sum; 2244 int ret; 2245 2246 list_for_each_entry(sum, list, list) { 2247 trans->adding_csums = true; 2248 ret = btrfs_csum_file_blocks(trans, trans->fs_info->csum_root, sum); 2249 trans->adding_csums = false; 2250 if (ret) 2251 return ret; 2252 } 2253 return 0; 2254 } 2255 2256 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2257 const u64 start, 2258 const u64 len, 2259 struct extent_state **cached_state) 2260 { 2261 u64 search_start = start; 2262 const u64 end = start + len - 1; 2263 2264 while (search_start < end) { 2265 const u64 search_len = end - search_start + 1; 2266 struct extent_map *em; 2267 u64 em_len; 2268 int ret = 0; 2269 2270 em = btrfs_get_extent(inode, NULL, 0, search_start, search_len); 2271 if (IS_ERR(em)) 2272 return PTR_ERR(em); 2273 2274 if (em->block_start != EXTENT_MAP_HOLE) 2275 goto next; 2276 2277 em_len = em->len; 2278 if (em->start < search_start) 2279 em_len -= search_start - em->start; 2280 if (em_len > search_len) 2281 em_len = search_len; 2282 2283 ret = set_extent_bit(&inode->io_tree, search_start, 2284 search_start + em_len - 1, 2285 EXTENT_DELALLOC_NEW, 2286 NULL, cached_state, GFP_NOFS); 2287 next: 2288 search_start = extent_map_end(em); 2289 free_extent_map(em); 2290 if (ret) 2291 return ret; 2292 } 2293 return 0; 2294 } 2295 2296 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2297 unsigned int extra_bits, 2298 struct extent_state **cached_state) 2299 { 2300 WARN_ON(PAGE_ALIGNED(end)); 2301 2302 if (start >= i_size_read(&inode->vfs_inode) && 2303 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2304 /* 2305 * There can't be any extents following eof in this case so just 2306 * set the delalloc new bit for the range directly. 2307 */ 2308 extra_bits |= EXTENT_DELALLOC_NEW; 2309 } else { 2310 int ret; 2311 2312 ret = btrfs_find_new_delalloc_bytes(inode, start, 2313 end + 1 - start, 2314 cached_state); 2315 if (ret) 2316 return ret; 2317 } 2318 2319 return set_extent_delalloc(&inode->io_tree, start, end, extra_bits, 2320 cached_state); 2321 } 2322 2323 /* see btrfs_writepage_start_hook for details on why this is required */ 2324 struct btrfs_writepage_fixup { 2325 struct page *page; 2326 struct inode *inode; 2327 struct btrfs_work work; 2328 }; 2329 2330 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2331 { 2332 struct btrfs_writepage_fixup *fixup; 2333 struct btrfs_ordered_extent *ordered; 2334 struct extent_state *cached_state = NULL; 2335 struct extent_changeset *data_reserved = NULL; 2336 struct page *page; 2337 struct btrfs_inode *inode; 2338 u64 page_start; 2339 u64 page_end; 2340 int ret = 0; 2341 bool free_delalloc_space = true; 2342 2343 fixup = container_of(work, struct btrfs_writepage_fixup, work); 2344 page = fixup->page; 2345 inode = BTRFS_I(fixup->inode); 2346 page_start = page_offset(page); 2347 page_end = page_offset(page) + PAGE_SIZE - 1; 2348 2349 /* 2350 * This is similar to page_mkwrite, we need to reserve the space before 2351 * we take the page lock. 2352 */ 2353 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2354 PAGE_SIZE); 2355 again: 2356 lock_page(page); 2357 2358 /* 2359 * Before we queued this fixup, we took a reference on the page. 2360 * page->mapping may go NULL, but it shouldn't be moved to a different 2361 * address space. 2362 */ 2363 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 2364 /* 2365 * Unfortunately this is a little tricky, either 2366 * 2367 * 1) We got here and our page had already been dealt with and 2368 * we reserved our space, thus ret == 0, so we need to just 2369 * drop our space reservation and bail. This can happen the 2370 * first time we come into the fixup worker, or could happen 2371 * while waiting for the ordered extent. 2372 * 2) Our page was already dealt with, but we happened to get an 2373 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2374 * this case we obviously don't have anything to release, but 2375 * because the page was already dealt with we don't want to 2376 * mark the page with an error, so make sure we're resetting 2377 * ret to 0. This is why we have this check _before_ the ret 2378 * check, because we do not want to have a surprise ENOSPC 2379 * when the page was already properly dealt with. 2380 */ 2381 if (!ret) { 2382 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2383 btrfs_delalloc_release_space(inode, data_reserved, 2384 page_start, PAGE_SIZE, 2385 true); 2386 } 2387 ret = 0; 2388 goto out_page; 2389 } 2390 2391 /* 2392 * We can't mess with the page state unless it is locked, so now that 2393 * it is locked bail if we failed to make our space reservation. 2394 */ 2395 if (ret) 2396 goto out_page; 2397 2398 lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state); 2399 2400 /* already ordered? We're done */ 2401 if (PagePrivate2(page)) 2402 goto out_reserved; 2403 2404 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2405 if (ordered) { 2406 unlock_extent_cached(&inode->io_tree, page_start, page_end, 2407 &cached_state); 2408 unlock_page(page); 2409 btrfs_start_ordered_extent(ordered, 1); 2410 btrfs_put_ordered_extent(ordered); 2411 goto again; 2412 } 2413 2414 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2415 &cached_state); 2416 if (ret) 2417 goto out_reserved; 2418 2419 /* 2420 * Everything went as planned, we're now the owner of a dirty page with 2421 * delayed allocation bits set and space reserved for our COW 2422 * destination. 2423 * 2424 * The page was dirty when we started, nothing should have cleaned it. 2425 */ 2426 BUG_ON(!PageDirty(page)); 2427 free_delalloc_space = false; 2428 out_reserved: 2429 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2430 if (free_delalloc_space) 2431 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2432 PAGE_SIZE, true); 2433 unlock_extent_cached(&inode->io_tree, page_start, page_end, 2434 &cached_state); 2435 out_page: 2436 if (ret) { 2437 /* 2438 * We hit ENOSPC or other errors. Update the mapping and page 2439 * to reflect the errors and clean the page. 2440 */ 2441 mapping_set_error(page->mapping, ret); 2442 end_extent_writepage(page, ret, page_start, page_end); 2443 clear_page_dirty_for_io(page); 2444 SetPageError(page); 2445 } 2446 ClearPageChecked(page); 2447 unlock_page(page); 2448 put_page(page); 2449 kfree(fixup); 2450 extent_changeset_free(data_reserved); 2451 /* 2452 * As a precaution, do a delayed iput in case it would be the last iput 2453 * that could need flushing space. Recursing back to fixup worker would 2454 * deadlock. 2455 */ 2456 btrfs_add_delayed_iput(&inode->vfs_inode); 2457 } 2458 2459 /* 2460 * There are a few paths in the higher layers of the kernel that directly 2461 * set the page dirty bit without asking the filesystem if it is a 2462 * good idea. This causes problems because we want to make sure COW 2463 * properly happens and the data=ordered rules are followed. 2464 * 2465 * In our case any range that doesn't have the ORDERED bit set 2466 * hasn't been properly setup for IO. We kick off an async process 2467 * to fix it up. The async helper will wait for ordered extents, set 2468 * the delalloc bit and make it safe to write the page. 2469 */ 2470 int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end) 2471 { 2472 struct inode *inode = page->mapping->host; 2473 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2474 struct btrfs_writepage_fixup *fixup; 2475 2476 /* this page is properly in the ordered list */ 2477 if (TestClearPagePrivate2(page)) 2478 return 0; 2479 2480 /* 2481 * PageChecked is set below when we create a fixup worker for this page, 2482 * don't try to create another one if we're already PageChecked() 2483 * 2484 * The extent_io writepage code will redirty the page if we send back 2485 * EAGAIN. 2486 */ 2487 if (PageChecked(page)) 2488 return -EAGAIN; 2489 2490 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2491 if (!fixup) 2492 return -EAGAIN; 2493 2494 /* 2495 * We are already holding a reference to this inode from 2496 * write_cache_pages. We need to hold it because the space reservation 2497 * takes place outside of the page lock, and we can't trust 2498 * page->mapping outside of the page lock. 2499 */ 2500 ihold(inode); 2501 SetPageChecked(page); 2502 get_page(page); 2503 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); 2504 fixup->page = page; 2505 fixup->inode = inode; 2506 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2507 2508 return -EAGAIN; 2509 } 2510 2511 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2512 struct btrfs_inode *inode, u64 file_pos, 2513 struct btrfs_file_extent_item *stack_fi, 2514 u64 qgroup_reserved) 2515 { 2516 struct btrfs_root *root = inode->root; 2517 struct btrfs_path *path; 2518 struct extent_buffer *leaf; 2519 struct btrfs_key ins; 2520 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 2521 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 2522 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 2523 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 2524 int extent_inserted = 0; 2525 int ret; 2526 2527 path = btrfs_alloc_path(); 2528 if (!path) 2529 return -ENOMEM; 2530 2531 /* 2532 * we may be replacing one extent in the tree with another. 2533 * The new extent is pinned in the extent map, and we don't want 2534 * to drop it from the cache until it is completely in the btree. 2535 * 2536 * So, tell btrfs_drop_extents to leave this extent in the cache. 2537 * the caller is expected to unpin it and allow it to be merged 2538 * with the others. 2539 */ 2540 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, 2541 file_pos + num_bytes, NULL, 0, 2542 1, sizeof(*stack_fi), &extent_inserted); 2543 if (ret) 2544 goto out; 2545 2546 if (!extent_inserted) { 2547 ins.objectid = btrfs_ino(inode); 2548 ins.offset = file_pos; 2549 ins.type = BTRFS_EXTENT_DATA_KEY; 2550 2551 path->leave_spinning = 1; 2552 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2553 sizeof(*stack_fi)); 2554 if (ret) 2555 goto out; 2556 } 2557 leaf = path->nodes[0]; 2558 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 2559 write_extent_buffer(leaf, stack_fi, 2560 btrfs_item_ptr_offset(leaf, path->slots[0]), 2561 sizeof(struct btrfs_file_extent_item)); 2562 2563 btrfs_mark_buffer_dirty(leaf); 2564 btrfs_release_path(path); 2565 2566 inode_add_bytes(&inode->vfs_inode, num_bytes); 2567 2568 ins.objectid = disk_bytenr; 2569 ins.offset = disk_num_bytes; 2570 ins.type = BTRFS_EXTENT_ITEM_KEY; 2571 2572 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 2573 if (ret) 2574 goto out; 2575 2576 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 2577 file_pos, qgroup_reserved, &ins); 2578 out: 2579 btrfs_free_path(path); 2580 2581 return ret; 2582 } 2583 2584 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 2585 u64 start, u64 len) 2586 { 2587 struct btrfs_block_group *cache; 2588 2589 cache = btrfs_lookup_block_group(fs_info, start); 2590 ASSERT(cache); 2591 2592 spin_lock(&cache->lock); 2593 cache->delalloc_bytes -= len; 2594 spin_unlock(&cache->lock); 2595 2596 btrfs_put_block_group(cache); 2597 } 2598 2599 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 2600 struct btrfs_ordered_extent *oe) 2601 { 2602 struct btrfs_file_extent_item stack_fi; 2603 u64 logical_len; 2604 2605 memset(&stack_fi, 0, sizeof(stack_fi)); 2606 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 2607 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 2608 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 2609 oe->disk_num_bytes); 2610 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) 2611 logical_len = oe->truncated_len; 2612 else 2613 logical_len = oe->num_bytes; 2614 btrfs_set_stack_file_extent_num_bytes(&stack_fi, logical_len); 2615 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, logical_len); 2616 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 2617 /* Encryption and other encoding is reserved and all 0 */ 2618 2619 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), 2620 oe->file_offset, &stack_fi, 2621 oe->qgroup_rsv); 2622 } 2623 2624 /* 2625 * As ordered data IO finishes, this gets called so we can finish 2626 * an ordered extent if the range of bytes in the file it covers are 2627 * fully written. 2628 */ 2629 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) 2630 { 2631 struct inode *inode = ordered_extent->inode; 2632 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2633 struct btrfs_root *root = BTRFS_I(inode)->root; 2634 struct btrfs_trans_handle *trans = NULL; 2635 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 2636 struct extent_state *cached_state = NULL; 2637 u64 start, end; 2638 int compress_type = 0; 2639 int ret = 0; 2640 u64 logical_len = ordered_extent->num_bytes; 2641 bool freespace_inode; 2642 bool truncated = false; 2643 bool range_locked = false; 2644 bool clear_new_delalloc_bytes = false; 2645 bool clear_reserved_extent = true; 2646 unsigned int clear_bits; 2647 2648 start = ordered_extent->file_offset; 2649 end = start + ordered_extent->num_bytes - 1; 2650 2651 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 2652 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 2653 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags)) 2654 clear_new_delalloc_bytes = true; 2655 2656 freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode)); 2657 2658 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 2659 ret = -EIO; 2660 goto out; 2661 } 2662 2663 btrfs_free_io_failure_record(BTRFS_I(inode), start, end); 2664 2665 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 2666 truncated = true; 2667 logical_len = ordered_extent->truncated_len; 2668 /* Truncated the entire extent, don't bother adding */ 2669 if (!logical_len) 2670 goto out; 2671 } 2672 2673 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 2674 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 2675 2676 btrfs_inode_safe_disk_i_size_write(inode, 0); 2677 if (freespace_inode) 2678 trans = btrfs_join_transaction_spacecache(root); 2679 else 2680 trans = btrfs_join_transaction(root); 2681 if (IS_ERR(trans)) { 2682 ret = PTR_ERR(trans); 2683 trans = NULL; 2684 goto out; 2685 } 2686 trans->block_rsv = &BTRFS_I(inode)->block_rsv; 2687 ret = btrfs_update_inode_fallback(trans, root, inode); 2688 if (ret) /* -ENOMEM or corruption */ 2689 btrfs_abort_transaction(trans, ret); 2690 goto out; 2691 } 2692 2693 range_locked = true; 2694 lock_extent_bits(io_tree, start, end, &cached_state); 2695 2696 if (freespace_inode) 2697 trans = btrfs_join_transaction_spacecache(root); 2698 else 2699 trans = btrfs_join_transaction(root); 2700 if (IS_ERR(trans)) { 2701 ret = PTR_ERR(trans); 2702 trans = NULL; 2703 goto out; 2704 } 2705 2706 trans->block_rsv = &BTRFS_I(inode)->block_rsv; 2707 2708 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 2709 compress_type = ordered_extent->compress_type; 2710 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 2711 BUG_ON(compress_type); 2712 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), 2713 ordered_extent->file_offset, 2714 ordered_extent->file_offset + 2715 logical_len); 2716 } else { 2717 BUG_ON(root == fs_info->tree_root); 2718 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 2719 if (!ret) { 2720 clear_reserved_extent = false; 2721 btrfs_release_delalloc_bytes(fs_info, 2722 ordered_extent->disk_bytenr, 2723 ordered_extent->disk_num_bytes); 2724 } 2725 } 2726 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 2727 ordered_extent->file_offset, 2728 ordered_extent->num_bytes, trans->transid); 2729 if (ret < 0) { 2730 btrfs_abort_transaction(trans, ret); 2731 goto out; 2732 } 2733 2734 ret = add_pending_csums(trans, &ordered_extent->list); 2735 if (ret) { 2736 btrfs_abort_transaction(trans, ret); 2737 goto out; 2738 } 2739 2740 btrfs_inode_safe_disk_i_size_write(inode, 0); 2741 ret = btrfs_update_inode_fallback(trans, root, inode); 2742 if (ret) { /* -ENOMEM or corruption */ 2743 btrfs_abort_transaction(trans, ret); 2744 goto out; 2745 } 2746 ret = 0; 2747 out: 2748 clear_bits = EXTENT_DEFRAG; 2749 if (range_locked) 2750 clear_bits |= EXTENT_LOCKED; 2751 if (clear_new_delalloc_bytes) 2752 clear_bits |= EXTENT_DELALLOC_NEW; 2753 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 2754 (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0, 2755 &cached_state); 2756 2757 if (trans) 2758 btrfs_end_transaction(trans); 2759 2760 if (ret || truncated) { 2761 u64 unwritten_start = start; 2762 2763 if (truncated) 2764 unwritten_start += logical_len; 2765 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 2766 2767 /* Drop the cache for the part of the extent we didn't write. */ 2768 btrfs_drop_extent_cache(BTRFS_I(inode), unwritten_start, end, 0); 2769 2770 /* 2771 * If the ordered extent had an IOERR or something else went 2772 * wrong we need to return the space for this ordered extent 2773 * back to the allocator. We only free the extent in the 2774 * truncated case if we didn't write out the extent at all. 2775 * 2776 * If we made it past insert_reserved_file_extent before we 2777 * errored out then we don't need to do this as the accounting 2778 * has already been done. 2779 */ 2780 if ((ret || !logical_len) && 2781 clear_reserved_extent && 2782 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 2783 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 2784 /* 2785 * Discard the range before returning it back to the 2786 * free space pool 2787 */ 2788 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 2789 btrfs_discard_extent(fs_info, 2790 ordered_extent->disk_bytenr, 2791 ordered_extent->disk_num_bytes, 2792 NULL); 2793 btrfs_free_reserved_extent(fs_info, 2794 ordered_extent->disk_bytenr, 2795 ordered_extent->disk_num_bytes, 1); 2796 } 2797 } 2798 2799 /* 2800 * This needs to be done to make sure anybody waiting knows we are done 2801 * updating everything for this ordered extent. 2802 */ 2803 btrfs_remove_ordered_extent(BTRFS_I(inode), ordered_extent); 2804 2805 /* once for us */ 2806 btrfs_put_ordered_extent(ordered_extent); 2807 /* once for the tree */ 2808 btrfs_put_ordered_extent(ordered_extent); 2809 2810 return ret; 2811 } 2812 2813 static void finish_ordered_fn(struct btrfs_work *work) 2814 { 2815 struct btrfs_ordered_extent *ordered_extent; 2816 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 2817 btrfs_finish_ordered_io(ordered_extent); 2818 } 2819 2820 void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, 2821 u64 end, int uptodate) 2822 { 2823 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); 2824 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2825 struct btrfs_ordered_extent *ordered_extent = NULL; 2826 struct btrfs_workqueue *wq; 2827 2828 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 2829 2830 ClearPagePrivate2(page); 2831 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 2832 end - start + 1, uptodate)) 2833 return; 2834 2835 if (btrfs_is_free_space_inode(inode)) 2836 wq = fs_info->endio_freespace_worker; 2837 else 2838 wq = fs_info->endio_write_workers; 2839 2840 btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); 2841 btrfs_queue_work(wq, &ordered_extent->work); 2842 } 2843 2844 static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio, 2845 int icsum, struct page *page, int pgoff, u64 start, 2846 size_t len) 2847 { 2848 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2849 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 2850 char *kaddr; 2851 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 2852 u8 *csum_expected; 2853 u8 csum[BTRFS_CSUM_SIZE]; 2854 2855 csum_expected = ((u8 *)io_bio->csum) + icsum * csum_size; 2856 2857 kaddr = kmap_atomic(page); 2858 shash->tfm = fs_info->csum_shash; 2859 2860 crypto_shash_digest(shash, kaddr + pgoff, len, csum); 2861 2862 if (memcmp(csum, csum_expected, csum_size)) 2863 goto zeroit; 2864 2865 kunmap_atomic(kaddr); 2866 return 0; 2867 zeroit: 2868 btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, 2869 io_bio->mirror_num); 2870 if (io_bio->device) 2871 btrfs_dev_stat_inc_and_print(io_bio->device, 2872 BTRFS_DEV_STAT_CORRUPTION_ERRS); 2873 memset(kaddr + pgoff, 1, len); 2874 flush_dcache_page(page); 2875 kunmap_atomic(kaddr); 2876 return -EIO; 2877 } 2878 2879 /* 2880 * when reads are done, we need to check csums to verify the data is correct 2881 * if there's a match, we allow the bio to finish. If not, the code in 2882 * extent_io.c will try to find good copies for us. 2883 */ 2884 int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u64 phy_offset, 2885 struct page *page, u64 start, u64 end, int mirror) 2886 { 2887 size_t offset = start - page_offset(page); 2888 struct inode *inode = page->mapping->host; 2889 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 2890 struct btrfs_root *root = BTRFS_I(inode)->root; 2891 2892 if (PageChecked(page)) { 2893 ClearPageChecked(page); 2894 return 0; 2895 } 2896 2897 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 2898 return 0; 2899 2900 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 2901 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 2902 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM); 2903 return 0; 2904 } 2905 2906 phy_offset >>= inode->i_sb->s_blocksize_bits; 2907 return check_data_csum(inode, io_bio, phy_offset, page, offset, start, 2908 (size_t)(end - start + 1)); 2909 } 2910 2911 /* 2912 * btrfs_add_delayed_iput - perform a delayed iput on @inode 2913 * 2914 * @inode: The inode we want to perform iput on 2915 * 2916 * This function uses the generic vfs_inode::i_count to track whether we should 2917 * just decrement it (in case it's > 1) or if this is the last iput then link 2918 * the inode to the delayed iput machinery. Delayed iputs are processed at 2919 * transaction commit time/superblock commit/cleaner kthread. 2920 */ 2921 void btrfs_add_delayed_iput(struct inode *inode) 2922 { 2923 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2924 struct btrfs_inode *binode = BTRFS_I(inode); 2925 2926 if (atomic_add_unless(&inode->i_count, -1, 1)) 2927 return; 2928 2929 atomic_inc(&fs_info->nr_delayed_iputs); 2930 spin_lock(&fs_info->delayed_iput_lock); 2931 ASSERT(list_empty(&binode->delayed_iput)); 2932 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); 2933 spin_unlock(&fs_info->delayed_iput_lock); 2934 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 2935 wake_up_process(fs_info->cleaner_kthread); 2936 } 2937 2938 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 2939 struct btrfs_inode *inode) 2940 { 2941 list_del_init(&inode->delayed_iput); 2942 spin_unlock(&fs_info->delayed_iput_lock); 2943 iput(&inode->vfs_inode); 2944 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 2945 wake_up(&fs_info->delayed_iputs_wait); 2946 spin_lock(&fs_info->delayed_iput_lock); 2947 } 2948 2949 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 2950 struct btrfs_inode *inode) 2951 { 2952 if (!list_empty(&inode->delayed_iput)) { 2953 spin_lock(&fs_info->delayed_iput_lock); 2954 if (!list_empty(&inode->delayed_iput)) 2955 run_delayed_iput_locked(fs_info, inode); 2956 spin_unlock(&fs_info->delayed_iput_lock); 2957 } 2958 } 2959 2960 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 2961 { 2962 2963 spin_lock(&fs_info->delayed_iput_lock); 2964 while (!list_empty(&fs_info->delayed_iputs)) { 2965 struct btrfs_inode *inode; 2966 2967 inode = list_first_entry(&fs_info->delayed_iputs, 2968 struct btrfs_inode, delayed_iput); 2969 run_delayed_iput_locked(fs_info, inode); 2970 } 2971 spin_unlock(&fs_info->delayed_iput_lock); 2972 } 2973 2974 /** 2975 * btrfs_wait_on_delayed_iputs - wait on the delayed iputs to be done running 2976 * @fs_info - the fs_info for this fs 2977 * @return - EINTR if we were killed, 0 if nothing's pending 2978 * 2979 * This will wait on any delayed iputs that are currently running with KILLABLE 2980 * set. Once they are all done running we will return, unless we are killed in 2981 * which case we return EINTR. This helps in user operations like fallocate etc 2982 * that might get blocked on the iputs. 2983 */ 2984 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 2985 { 2986 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 2987 atomic_read(&fs_info->nr_delayed_iputs) == 0); 2988 if (ret) 2989 return -EINTR; 2990 return 0; 2991 } 2992 2993 /* 2994 * This creates an orphan entry for the given inode in case something goes wrong 2995 * in the middle of an unlink. 2996 */ 2997 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 2998 struct btrfs_inode *inode) 2999 { 3000 int ret; 3001 3002 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3003 if (ret && ret != -EEXIST) { 3004 btrfs_abort_transaction(trans, ret); 3005 return ret; 3006 } 3007 3008 return 0; 3009 } 3010 3011 /* 3012 * We have done the delete so we can go ahead and remove the orphan item for 3013 * this particular inode. 3014 */ 3015 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3016 struct btrfs_inode *inode) 3017 { 3018 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3019 } 3020 3021 /* 3022 * this cleans up any orphans that may be left on the list from the last use 3023 * of this root. 3024 */ 3025 int btrfs_orphan_cleanup(struct btrfs_root *root) 3026 { 3027 struct btrfs_fs_info *fs_info = root->fs_info; 3028 struct btrfs_path *path; 3029 struct extent_buffer *leaf; 3030 struct btrfs_key key, found_key; 3031 struct btrfs_trans_handle *trans; 3032 struct inode *inode; 3033 u64 last_objectid = 0; 3034 int ret = 0, nr_unlink = 0; 3035 3036 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) 3037 return 0; 3038 3039 path = btrfs_alloc_path(); 3040 if (!path) { 3041 ret = -ENOMEM; 3042 goto out; 3043 } 3044 path->reada = READA_BACK; 3045 3046 key.objectid = BTRFS_ORPHAN_OBJECTID; 3047 key.type = BTRFS_ORPHAN_ITEM_KEY; 3048 key.offset = (u64)-1; 3049 3050 while (1) { 3051 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3052 if (ret < 0) 3053 goto out; 3054 3055 /* 3056 * if ret == 0 means we found what we were searching for, which 3057 * is weird, but possible, so only screw with path if we didn't 3058 * find the key and see if we have stuff that matches 3059 */ 3060 if (ret > 0) { 3061 ret = 0; 3062 if (path->slots[0] == 0) 3063 break; 3064 path->slots[0]--; 3065 } 3066 3067 /* pull out the item */ 3068 leaf = path->nodes[0]; 3069 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3070 3071 /* make sure the item matches what we want */ 3072 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3073 break; 3074 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3075 break; 3076 3077 /* release the path since we're done with it */ 3078 btrfs_release_path(path); 3079 3080 /* 3081 * this is where we are basically btrfs_lookup, without the 3082 * crossing root thing. we store the inode number in the 3083 * offset of the orphan item. 3084 */ 3085 3086 if (found_key.offset == last_objectid) { 3087 btrfs_err(fs_info, 3088 "Error removing orphan entry, stopping orphan cleanup"); 3089 ret = -EINVAL; 3090 goto out; 3091 } 3092 3093 last_objectid = found_key.offset; 3094 3095 found_key.objectid = found_key.offset; 3096 found_key.type = BTRFS_INODE_ITEM_KEY; 3097 found_key.offset = 0; 3098 inode = btrfs_iget(fs_info->sb, last_objectid, root); 3099 ret = PTR_ERR_OR_ZERO(inode); 3100 if (ret && ret != -ENOENT) 3101 goto out; 3102 3103 if (ret == -ENOENT && root == fs_info->tree_root) { 3104 struct btrfs_root *dead_root; 3105 int is_dead_root = 0; 3106 3107 /* 3108 * this is an orphan in the tree root. Currently these 3109 * could come from 2 sources: 3110 * a) a snapshot deletion in progress 3111 * b) a free space cache inode 3112 * We need to distinguish those two, as the snapshot 3113 * orphan must not get deleted. 3114 * find_dead_roots already ran before us, so if this 3115 * is a snapshot deletion, we should find the root 3116 * in the fs_roots radix tree. 3117 */ 3118 3119 spin_lock(&fs_info->fs_roots_radix_lock); 3120 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3121 (unsigned long)found_key.objectid); 3122 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3123 is_dead_root = 1; 3124 spin_unlock(&fs_info->fs_roots_radix_lock); 3125 3126 if (is_dead_root) { 3127 /* prevent this orphan from being found again */ 3128 key.offset = found_key.objectid - 1; 3129 continue; 3130 } 3131 3132 } 3133 3134 /* 3135 * If we have an inode with links, there are a couple of 3136 * possibilities. Old kernels (before v3.12) used to create an 3137 * orphan item for truncate indicating that there were possibly 3138 * extent items past i_size that needed to be deleted. In v3.12, 3139 * truncate was changed to update i_size in sync with the extent 3140 * items, but the (useless) orphan item was still created. Since 3141 * v4.18, we don't create the orphan item for truncate at all. 3142 * 3143 * So, this item could mean that we need to do a truncate, but 3144 * only if this filesystem was last used on a pre-v3.12 kernel 3145 * and was not cleanly unmounted. The odds of that are quite 3146 * slim, and it's a pain to do the truncate now, so just delete 3147 * the orphan item. 3148 * 3149 * It's also possible that this orphan item was supposed to be 3150 * deleted but wasn't. The inode number may have been reused, 3151 * but either way, we can delete the orphan item. 3152 */ 3153 if (ret == -ENOENT || inode->i_nlink) { 3154 if (!ret) 3155 iput(inode); 3156 trans = btrfs_start_transaction(root, 1); 3157 if (IS_ERR(trans)) { 3158 ret = PTR_ERR(trans); 3159 goto out; 3160 } 3161 btrfs_debug(fs_info, "auto deleting %Lu", 3162 found_key.objectid); 3163 ret = btrfs_del_orphan_item(trans, root, 3164 found_key.objectid); 3165 btrfs_end_transaction(trans); 3166 if (ret) 3167 goto out; 3168 continue; 3169 } 3170 3171 nr_unlink++; 3172 3173 /* this will do delete_inode and everything for us */ 3174 iput(inode); 3175 } 3176 /* release the path since we're done with it */ 3177 btrfs_release_path(path); 3178 3179 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 3180 3181 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3182 trans = btrfs_join_transaction(root); 3183 if (!IS_ERR(trans)) 3184 btrfs_end_transaction(trans); 3185 } 3186 3187 if (nr_unlink) 3188 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3189 3190 out: 3191 if (ret) 3192 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3193 btrfs_free_path(path); 3194 return ret; 3195 } 3196 3197 /* 3198 * very simple check to peek ahead in the leaf looking for xattrs. If we 3199 * don't find any xattrs, we know there can't be any acls. 3200 * 3201 * slot is the slot the inode is in, objectid is the objectid of the inode 3202 */ 3203 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3204 int slot, u64 objectid, 3205 int *first_xattr_slot) 3206 { 3207 u32 nritems = btrfs_header_nritems(leaf); 3208 struct btrfs_key found_key; 3209 static u64 xattr_access = 0; 3210 static u64 xattr_default = 0; 3211 int scanned = 0; 3212 3213 if (!xattr_access) { 3214 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3215 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3216 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3217 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3218 } 3219 3220 slot++; 3221 *first_xattr_slot = -1; 3222 while (slot < nritems) { 3223 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3224 3225 /* we found a different objectid, there must not be acls */ 3226 if (found_key.objectid != objectid) 3227 return 0; 3228 3229 /* we found an xattr, assume we've got an acl */ 3230 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3231 if (*first_xattr_slot == -1) 3232 *first_xattr_slot = slot; 3233 if (found_key.offset == xattr_access || 3234 found_key.offset == xattr_default) 3235 return 1; 3236 } 3237 3238 /* 3239 * we found a key greater than an xattr key, there can't 3240 * be any acls later on 3241 */ 3242 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3243 return 0; 3244 3245 slot++; 3246 scanned++; 3247 3248 /* 3249 * it goes inode, inode backrefs, xattrs, extents, 3250 * so if there are a ton of hard links to an inode there can 3251 * be a lot of backrefs. Don't waste time searching too hard, 3252 * this is just an optimization 3253 */ 3254 if (scanned >= 8) 3255 break; 3256 } 3257 /* we hit the end of the leaf before we found an xattr or 3258 * something larger than an xattr. We have to assume the inode 3259 * has acls 3260 */ 3261 if (*first_xattr_slot == -1) 3262 *first_xattr_slot = slot; 3263 return 1; 3264 } 3265 3266 /* 3267 * read an inode from the btree into the in-memory inode 3268 */ 3269 static int btrfs_read_locked_inode(struct inode *inode, 3270 struct btrfs_path *in_path) 3271 { 3272 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3273 struct btrfs_path *path = in_path; 3274 struct extent_buffer *leaf; 3275 struct btrfs_inode_item *inode_item; 3276 struct btrfs_root *root = BTRFS_I(inode)->root; 3277 struct btrfs_key location; 3278 unsigned long ptr; 3279 int maybe_acls; 3280 u32 rdev; 3281 int ret; 3282 bool filled = false; 3283 int first_xattr_slot; 3284 3285 ret = btrfs_fill_inode(inode, &rdev); 3286 if (!ret) 3287 filled = true; 3288 3289 if (!path) { 3290 path = btrfs_alloc_path(); 3291 if (!path) 3292 return -ENOMEM; 3293 } 3294 3295 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3296 3297 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3298 if (ret) { 3299 if (path != in_path) 3300 btrfs_free_path(path); 3301 return ret; 3302 } 3303 3304 leaf = path->nodes[0]; 3305 3306 if (filled) 3307 goto cache_index; 3308 3309 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3310 struct btrfs_inode_item); 3311 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3312 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3313 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3314 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3315 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3316 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 3317 round_up(i_size_read(inode), fs_info->sectorsize)); 3318 3319 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3320 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3321 3322 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3323 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3324 3325 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 3326 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 3327 3328 BTRFS_I(inode)->i_otime.tv_sec = 3329 btrfs_timespec_sec(leaf, &inode_item->otime); 3330 BTRFS_I(inode)->i_otime.tv_nsec = 3331 btrfs_timespec_nsec(leaf, &inode_item->otime); 3332 3333 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3334 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3335 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3336 3337 inode_set_iversion_queried(inode, 3338 btrfs_inode_sequence(leaf, inode_item)); 3339 inode->i_generation = BTRFS_I(inode)->generation; 3340 inode->i_rdev = 0; 3341 rdev = btrfs_inode_rdev(leaf, inode_item); 3342 3343 BTRFS_I(inode)->index_cnt = (u64)-1; 3344 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 3345 3346 cache_index: 3347 /* 3348 * If we were modified in the current generation and evicted from memory 3349 * and then re-read we need to do a full sync since we don't have any 3350 * idea about which extents were modified before we were evicted from 3351 * cache. 3352 * 3353 * This is required for both inode re-read from disk and delayed inode 3354 * in delayed_nodes_tree. 3355 */ 3356 if (BTRFS_I(inode)->last_trans == fs_info->generation) 3357 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3358 &BTRFS_I(inode)->runtime_flags); 3359 3360 /* 3361 * We don't persist the id of the transaction where an unlink operation 3362 * against the inode was last made. So here we assume the inode might 3363 * have been evicted, and therefore the exact value of last_unlink_trans 3364 * lost, and set it to last_trans to avoid metadata inconsistencies 3365 * between the inode and its parent if the inode is fsync'ed and the log 3366 * replayed. For example, in the scenario: 3367 * 3368 * touch mydir/foo 3369 * ln mydir/foo mydir/bar 3370 * sync 3371 * unlink mydir/bar 3372 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3373 * xfs_io -c fsync mydir/foo 3374 * <power failure> 3375 * mount fs, triggers fsync log replay 3376 * 3377 * We must make sure that when we fsync our inode foo we also log its 3378 * parent inode, otherwise after log replay the parent still has the 3379 * dentry with the "bar" name but our inode foo has a link count of 1 3380 * and doesn't have an inode ref with the name "bar" anymore. 3381 * 3382 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3383 * but it guarantees correctness at the expense of occasional full 3384 * transaction commits on fsync if our inode is a directory, or if our 3385 * inode is not a directory, logging its parent unnecessarily. 3386 */ 3387 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3388 3389 /* 3390 * Same logic as for last_unlink_trans. We don't persist the generation 3391 * of the last transaction where this inode was used for a reflink 3392 * operation, so after eviction and reloading the inode we must be 3393 * pessimistic and assume the last transaction that modified the inode. 3394 */ 3395 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 3396 3397 path->slots[0]++; 3398 if (inode->i_nlink != 1 || 3399 path->slots[0] >= btrfs_header_nritems(leaf)) 3400 goto cache_acl; 3401 3402 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3403 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 3404 goto cache_acl; 3405 3406 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3407 if (location.type == BTRFS_INODE_REF_KEY) { 3408 struct btrfs_inode_ref *ref; 3409 3410 ref = (struct btrfs_inode_ref *)ptr; 3411 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3412 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3413 struct btrfs_inode_extref *extref; 3414 3415 extref = (struct btrfs_inode_extref *)ptr; 3416 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3417 extref); 3418 } 3419 cache_acl: 3420 /* 3421 * try to precache a NULL acl entry for files that don't have 3422 * any xattrs or acls 3423 */ 3424 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3425 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 3426 if (first_xattr_slot != -1) { 3427 path->slots[0] = first_xattr_slot; 3428 ret = btrfs_load_inode_props(inode, path); 3429 if (ret) 3430 btrfs_err(fs_info, 3431 "error loading props for ino %llu (root %llu): %d", 3432 btrfs_ino(BTRFS_I(inode)), 3433 root->root_key.objectid, ret); 3434 } 3435 if (path != in_path) 3436 btrfs_free_path(path); 3437 3438 if (!maybe_acls) 3439 cache_no_acl(inode); 3440 3441 switch (inode->i_mode & S_IFMT) { 3442 case S_IFREG: 3443 inode->i_mapping->a_ops = &btrfs_aops; 3444 inode->i_fop = &btrfs_file_operations; 3445 inode->i_op = &btrfs_file_inode_operations; 3446 break; 3447 case S_IFDIR: 3448 inode->i_fop = &btrfs_dir_file_operations; 3449 inode->i_op = &btrfs_dir_inode_operations; 3450 break; 3451 case S_IFLNK: 3452 inode->i_op = &btrfs_symlink_inode_operations; 3453 inode_nohighmem(inode); 3454 inode->i_mapping->a_ops = &btrfs_aops; 3455 break; 3456 default: 3457 inode->i_op = &btrfs_special_inode_operations; 3458 init_special_inode(inode, inode->i_mode, rdev); 3459 break; 3460 } 3461 3462 btrfs_sync_inode_flags_to_i_flags(inode); 3463 return 0; 3464 } 3465 3466 /* 3467 * given a leaf and an inode, copy the inode fields into the leaf 3468 */ 3469 static void fill_inode_item(struct btrfs_trans_handle *trans, 3470 struct extent_buffer *leaf, 3471 struct btrfs_inode_item *item, 3472 struct inode *inode) 3473 { 3474 struct btrfs_map_token token; 3475 3476 btrfs_init_map_token(&token, leaf); 3477 3478 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 3479 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 3480 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 3481 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 3482 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 3483 3484 btrfs_set_token_timespec_sec(&token, &item->atime, 3485 inode->i_atime.tv_sec); 3486 btrfs_set_token_timespec_nsec(&token, &item->atime, 3487 inode->i_atime.tv_nsec); 3488 3489 btrfs_set_token_timespec_sec(&token, &item->mtime, 3490 inode->i_mtime.tv_sec); 3491 btrfs_set_token_timespec_nsec(&token, &item->mtime, 3492 inode->i_mtime.tv_nsec); 3493 3494 btrfs_set_token_timespec_sec(&token, &item->ctime, 3495 inode->i_ctime.tv_sec); 3496 btrfs_set_token_timespec_nsec(&token, &item->ctime, 3497 inode->i_ctime.tv_nsec); 3498 3499 btrfs_set_token_timespec_sec(&token, &item->otime, 3500 BTRFS_I(inode)->i_otime.tv_sec); 3501 btrfs_set_token_timespec_nsec(&token, &item->otime, 3502 BTRFS_I(inode)->i_otime.tv_nsec); 3503 3504 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 3505 btrfs_set_token_inode_generation(&token, item, 3506 BTRFS_I(inode)->generation); 3507 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 3508 btrfs_set_token_inode_transid(&token, item, trans->transid); 3509 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 3510 btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags); 3511 btrfs_set_token_inode_block_group(&token, item, 0); 3512 } 3513 3514 /* 3515 * copy everything in the in-memory inode into the btree. 3516 */ 3517 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 3518 struct btrfs_root *root, struct inode *inode) 3519 { 3520 struct btrfs_inode_item *inode_item; 3521 struct btrfs_path *path; 3522 struct extent_buffer *leaf; 3523 int ret; 3524 3525 path = btrfs_alloc_path(); 3526 if (!path) 3527 return -ENOMEM; 3528 3529 path->leave_spinning = 1; 3530 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 3531 1); 3532 if (ret) { 3533 if (ret > 0) 3534 ret = -ENOENT; 3535 goto failed; 3536 } 3537 3538 leaf = path->nodes[0]; 3539 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3540 struct btrfs_inode_item); 3541 3542 fill_inode_item(trans, leaf, inode_item, inode); 3543 btrfs_mark_buffer_dirty(leaf); 3544 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 3545 ret = 0; 3546 failed: 3547 btrfs_free_path(path); 3548 return ret; 3549 } 3550 3551 /* 3552 * copy everything in the in-memory inode into the btree. 3553 */ 3554 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 3555 struct btrfs_root *root, struct inode *inode) 3556 { 3557 struct btrfs_fs_info *fs_info = root->fs_info; 3558 int ret; 3559 3560 /* 3561 * If the inode is a free space inode, we can deadlock during commit 3562 * if we put it into the delayed code. 3563 * 3564 * The data relocation inode should also be directly updated 3565 * without delay 3566 */ 3567 if (!btrfs_is_free_space_inode(BTRFS_I(inode)) 3568 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 3569 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 3570 btrfs_update_root_times(trans, root); 3571 3572 ret = btrfs_delayed_update_inode(trans, root, inode); 3573 if (!ret) 3574 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 3575 return ret; 3576 } 3577 3578 return btrfs_update_inode_item(trans, root, inode); 3579 } 3580 3581 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3582 struct btrfs_root *root, 3583 struct inode *inode) 3584 { 3585 int ret; 3586 3587 ret = btrfs_update_inode(trans, root, inode); 3588 if (ret == -ENOSPC) 3589 return btrfs_update_inode_item(trans, root, inode); 3590 return ret; 3591 } 3592 3593 /* 3594 * unlink helper that gets used here in inode.c and in the tree logging 3595 * recovery code. It remove a link in a directory with a given name, and 3596 * also drops the back refs in the inode to the directory 3597 */ 3598 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3599 struct btrfs_root *root, 3600 struct btrfs_inode *dir, 3601 struct btrfs_inode *inode, 3602 const char *name, int name_len) 3603 { 3604 struct btrfs_fs_info *fs_info = root->fs_info; 3605 struct btrfs_path *path; 3606 int ret = 0; 3607 struct btrfs_dir_item *di; 3608 u64 index; 3609 u64 ino = btrfs_ino(inode); 3610 u64 dir_ino = btrfs_ino(dir); 3611 3612 path = btrfs_alloc_path(); 3613 if (!path) { 3614 ret = -ENOMEM; 3615 goto out; 3616 } 3617 3618 path->leave_spinning = 1; 3619 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3620 name, name_len, -1); 3621 if (IS_ERR_OR_NULL(di)) { 3622 ret = di ? PTR_ERR(di) : -ENOENT; 3623 goto err; 3624 } 3625 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3626 if (ret) 3627 goto err; 3628 btrfs_release_path(path); 3629 3630 /* 3631 * If we don't have dir index, we have to get it by looking up 3632 * the inode ref, since we get the inode ref, remove it directly, 3633 * it is unnecessary to do delayed deletion. 3634 * 3635 * But if we have dir index, needn't search inode ref to get it. 3636 * Since the inode ref is close to the inode item, it is better 3637 * that we delay to delete it, and just do this deletion when 3638 * we update the inode item. 3639 */ 3640 if (inode->dir_index) { 3641 ret = btrfs_delayed_delete_inode_ref(inode); 3642 if (!ret) { 3643 index = inode->dir_index; 3644 goto skip_backref; 3645 } 3646 } 3647 3648 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, 3649 dir_ino, &index); 3650 if (ret) { 3651 btrfs_info(fs_info, 3652 "failed to delete reference to %.*s, inode %llu parent %llu", 3653 name_len, name, ino, dir_ino); 3654 btrfs_abort_transaction(trans, ret); 3655 goto err; 3656 } 3657 skip_backref: 3658 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 3659 if (ret) { 3660 btrfs_abort_transaction(trans, ret); 3661 goto err; 3662 } 3663 3664 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, 3665 dir_ino); 3666 if (ret != 0 && ret != -ENOENT) { 3667 btrfs_abort_transaction(trans, ret); 3668 goto err; 3669 } 3670 3671 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir, 3672 index); 3673 if (ret == -ENOENT) 3674 ret = 0; 3675 else if (ret) 3676 btrfs_abort_transaction(trans, ret); 3677 3678 /* 3679 * If we have a pending delayed iput we could end up with the final iput 3680 * being run in btrfs-cleaner context. If we have enough of these built 3681 * up we can end up burning a lot of time in btrfs-cleaner without any 3682 * way to throttle the unlinks. Since we're currently holding a ref on 3683 * the inode we can run the delayed iput here without any issues as the 3684 * final iput won't be done until after we drop the ref we're currently 3685 * holding. 3686 */ 3687 btrfs_run_delayed_iput(fs_info, inode); 3688 err: 3689 btrfs_free_path(path); 3690 if (ret) 3691 goto out; 3692 3693 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); 3694 inode_inc_iversion(&inode->vfs_inode); 3695 inode_inc_iversion(&dir->vfs_inode); 3696 inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime = 3697 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode); 3698 ret = btrfs_update_inode(trans, root, &dir->vfs_inode); 3699 out: 3700 return ret; 3701 } 3702 3703 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3704 struct btrfs_root *root, 3705 struct btrfs_inode *dir, struct btrfs_inode *inode, 3706 const char *name, int name_len) 3707 { 3708 int ret; 3709 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 3710 if (!ret) { 3711 drop_nlink(&inode->vfs_inode); 3712 ret = btrfs_update_inode(trans, root, &inode->vfs_inode); 3713 } 3714 return ret; 3715 } 3716 3717 /* 3718 * helper to start transaction for unlink and rmdir. 3719 * 3720 * unlink and rmdir are special in btrfs, they do not always free space, so 3721 * if we cannot make our reservations the normal way try and see if there is 3722 * plenty of slack room in the global reserve to migrate, otherwise we cannot 3723 * allow the unlink to occur. 3724 */ 3725 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) 3726 { 3727 struct btrfs_root *root = BTRFS_I(dir)->root; 3728 3729 /* 3730 * 1 for the possible orphan item 3731 * 1 for the dir item 3732 * 1 for the dir index 3733 * 1 for the inode ref 3734 * 1 for the inode 3735 */ 3736 return btrfs_start_transaction_fallback_global_rsv(root, 5); 3737 } 3738 3739 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 3740 { 3741 struct btrfs_root *root = BTRFS_I(dir)->root; 3742 struct btrfs_trans_handle *trans; 3743 struct inode *inode = d_inode(dentry); 3744 int ret; 3745 3746 trans = __unlink_start_trans(dir); 3747 if (IS_ERR(trans)) 3748 return PTR_ERR(trans); 3749 3750 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 3751 0); 3752 3753 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 3754 BTRFS_I(d_inode(dentry)), dentry->d_name.name, 3755 dentry->d_name.len); 3756 if (ret) 3757 goto out; 3758 3759 if (inode->i_nlink == 0) { 3760 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 3761 if (ret) 3762 goto out; 3763 } 3764 3765 out: 3766 btrfs_end_transaction(trans); 3767 btrfs_btree_balance_dirty(root->fs_info); 3768 return ret; 3769 } 3770 3771 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3772 struct inode *dir, struct dentry *dentry) 3773 { 3774 struct btrfs_root *root = BTRFS_I(dir)->root; 3775 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 3776 struct btrfs_path *path; 3777 struct extent_buffer *leaf; 3778 struct btrfs_dir_item *di; 3779 struct btrfs_key key; 3780 const char *name = dentry->d_name.name; 3781 int name_len = dentry->d_name.len; 3782 u64 index; 3783 int ret; 3784 u64 objectid; 3785 u64 dir_ino = btrfs_ino(BTRFS_I(dir)); 3786 3787 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 3788 objectid = inode->root->root_key.objectid; 3789 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 3790 objectid = inode->location.objectid; 3791 } else { 3792 WARN_ON(1); 3793 return -EINVAL; 3794 } 3795 3796 path = btrfs_alloc_path(); 3797 if (!path) 3798 return -ENOMEM; 3799 3800 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3801 name, name_len, -1); 3802 if (IS_ERR_OR_NULL(di)) { 3803 ret = di ? PTR_ERR(di) : -ENOENT; 3804 goto out; 3805 } 3806 3807 leaf = path->nodes[0]; 3808 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3809 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 3810 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3811 if (ret) { 3812 btrfs_abort_transaction(trans, ret); 3813 goto out; 3814 } 3815 btrfs_release_path(path); 3816 3817 /* 3818 * This is a placeholder inode for a subvolume we didn't have a 3819 * reference to at the time of the snapshot creation. In the meantime 3820 * we could have renamed the real subvol link into our snapshot, so 3821 * depending on btrfs_del_root_ref to return -ENOENT here is incorret. 3822 * Instead simply lookup the dir_index_item for this entry so we can 3823 * remove it. Otherwise we know we have a ref to the root and we can 3824 * call btrfs_del_root_ref, and it _shouldn't_ fail. 3825 */ 3826 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 3827 di = btrfs_search_dir_index_item(root, path, dir_ino, 3828 name, name_len); 3829 if (IS_ERR_OR_NULL(di)) { 3830 if (!di) 3831 ret = -ENOENT; 3832 else 3833 ret = PTR_ERR(di); 3834 btrfs_abort_transaction(trans, ret); 3835 goto out; 3836 } 3837 3838 leaf = path->nodes[0]; 3839 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3840 index = key.offset; 3841 btrfs_release_path(path); 3842 } else { 3843 ret = btrfs_del_root_ref(trans, objectid, 3844 root->root_key.objectid, dir_ino, 3845 &index, name, name_len); 3846 if (ret) { 3847 btrfs_abort_transaction(trans, ret); 3848 goto out; 3849 } 3850 } 3851 3852 ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index); 3853 if (ret) { 3854 btrfs_abort_transaction(trans, ret); 3855 goto out; 3856 } 3857 3858 btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); 3859 inode_inc_iversion(dir); 3860 dir->i_mtime = dir->i_ctime = current_time(dir); 3861 ret = btrfs_update_inode_fallback(trans, root, dir); 3862 if (ret) 3863 btrfs_abort_transaction(trans, ret); 3864 out: 3865 btrfs_free_path(path); 3866 return ret; 3867 } 3868 3869 /* 3870 * Helper to check if the subvolume references other subvolumes or if it's 3871 * default. 3872 */ 3873 static noinline int may_destroy_subvol(struct btrfs_root *root) 3874 { 3875 struct btrfs_fs_info *fs_info = root->fs_info; 3876 struct btrfs_path *path; 3877 struct btrfs_dir_item *di; 3878 struct btrfs_key key; 3879 u64 dir_id; 3880 int ret; 3881 3882 path = btrfs_alloc_path(); 3883 if (!path) 3884 return -ENOMEM; 3885 3886 /* Make sure this root isn't set as the default subvol */ 3887 dir_id = btrfs_super_root_dir(fs_info->super_copy); 3888 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 3889 dir_id, "default", 7, 0); 3890 if (di && !IS_ERR(di)) { 3891 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 3892 if (key.objectid == root->root_key.objectid) { 3893 ret = -EPERM; 3894 btrfs_err(fs_info, 3895 "deleting default subvolume %llu is not allowed", 3896 key.objectid); 3897 goto out; 3898 } 3899 btrfs_release_path(path); 3900 } 3901 3902 key.objectid = root->root_key.objectid; 3903 key.type = BTRFS_ROOT_REF_KEY; 3904 key.offset = (u64)-1; 3905 3906 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 3907 if (ret < 0) 3908 goto out; 3909 BUG_ON(ret == 0); 3910 3911 ret = 0; 3912 if (path->slots[0] > 0) { 3913 path->slots[0]--; 3914 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3915 if (key.objectid == root->root_key.objectid && 3916 key.type == BTRFS_ROOT_REF_KEY) 3917 ret = -ENOTEMPTY; 3918 } 3919 out: 3920 btrfs_free_path(path); 3921 return ret; 3922 } 3923 3924 /* Delete all dentries for inodes belonging to the root */ 3925 static void btrfs_prune_dentries(struct btrfs_root *root) 3926 { 3927 struct btrfs_fs_info *fs_info = root->fs_info; 3928 struct rb_node *node; 3929 struct rb_node *prev; 3930 struct btrfs_inode *entry; 3931 struct inode *inode; 3932 u64 objectid = 0; 3933 3934 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 3935 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 3936 3937 spin_lock(&root->inode_lock); 3938 again: 3939 node = root->inode_tree.rb_node; 3940 prev = NULL; 3941 while (node) { 3942 prev = node; 3943 entry = rb_entry(node, struct btrfs_inode, rb_node); 3944 3945 if (objectid < btrfs_ino(entry)) 3946 node = node->rb_left; 3947 else if (objectid > btrfs_ino(entry)) 3948 node = node->rb_right; 3949 else 3950 break; 3951 } 3952 if (!node) { 3953 while (prev) { 3954 entry = rb_entry(prev, struct btrfs_inode, rb_node); 3955 if (objectid <= btrfs_ino(entry)) { 3956 node = prev; 3957 break; 3958 } 3959 prev = rb_next(prev); 3960 } 3961 } 3962 while (node) { 3963 entry = rb_entry(node, struct btrfs_inode, rb_node); 3964 objectid = btrfs_ino(entry) + 1; 3965 inode = igrab(&entry->vfs_inode); 3966 if (inode) { 3967 spin_unlock(&root->inode_lock); 3968 if (atomic_read(&inode->i_count) > 1) 3969 d_prune_aliases(inode); 3970 /* 3971 * btrfs_drop_inode will have it removed from the inode 3972 * cache when its usage count hits zero. 3973 */ 3974 iput(inode); 3975 cond_resched(); 3976 spin_lock(&root->inode_lock); 3977 goto again; 3978 } 3979 3980 if (cond_resched_lock(&root->inode_lock)) 3981 goto again; 3982 3983 node = rb_next(node); 3984 } 3985 spin_unlock(&root->inode_lock); 3986 } 3987 3988 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) 3989 { 3990 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 3991 struct btrfs_root *root = BTRFS_I(dir)->root; 3992 struct inode *inode = d_inode(dentry); 3993 struct btrfs_root *dest = BTRFS_I(inode)->root; 3994 struct btrfs_trans_handle *trans; 3995 struct btrfs_block_rsv block_rsv; 3996 u64 root_flags; 3997 int ret; 3998 int err; 3999 4000 /* 4001 * Don't allow to delete a subvolume with send in progress. This is 4002 * inside the inode lock so the error handling that has to drop the bit 4003 * again is not run concurrently. 4004 */ 4005 spin_lock(&dest->root_item_lock); 4006 if (dest->send_in_progress) { 4007 spin_unlock(&dest->root_item_lock); 4008 btrfs_warn(fs_info, 4009 "attempt to delete subvolume %llu during send", 4010 dest->root_key.objectid); 4011 return -EPERM; 4012 } 4013 root_flags = btrfs_root_flags(&dest->root_item); 4014 btrfs_set_root_flags(&dest->root_item, 4015 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4016 spin_unlock(&dest->root_item_lock); 4017 4018 down_write(&fs_info->subvol_sem); 4019 4020 err = may_destroy_subvol(dest); 4021 if (err) 4022 goto out_up_write; 4023 4024 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4025 /* 4026 * One for dir inode, 4027 * two for dir entries, 4028 * two for root ref/backref. 4029 */ 4030 err = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4031 if (err) 4032 goto out_up_write; 4033 4034 trans = btrfs_start_transaction(root, 0); 4035 if (IS_ERR(trans)) { 4036 err = PTR_ERR(trans); 4037 goto out_release; 4038 } 4039 trans->block_rsv = &block_rsv; 4040 trans->bytes_reserved = block_rsv.size; 4041 4042 btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); 4043 4044 ret = btrfs_unlink_subvol(trans, dir, dentry); 4045 if (ret) { 4046 err = ret; 4047 btrfs_abort_transaction(trans, ret); 4048 goto out_end_trans; 4049 } 4050 4051 btrfs_record_root_in_trans(trans, dest); 4052 4053 memset(&dest->root_item.drop_progress, 0, 4054 sizeof(dest->root_item.drop_progress)); 4055 dest->root_item.drop_level = 0; 4056 btrfs_set_root_refs(&dest->root_item, 0); 4057 4058 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4059 ret = btrfs_insert_orphan_item(trans, 4060 fs_info->tree_root, 4061 dest->root_key.objectid); 4062 if (ret) { 4063 btrfs_abort_transaction(trans, ret); 4064 err = ret; 4065 goto out_end_trans; 4066 } 4067 } 4068 4069 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4070 BTRFS_UUID_KEY_SUBVOL, 4071 dest->root_key.objectid); 4072 if (ret && ret != -ENOENT) { 4073 btrfs_abort_transaction(trans, ret); 4074 err = ret; 4075 goto out_end_trans; 4076 } 4077 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4078 ret = btrfs_uuid_tree_remove(trans, 4079 dest->root_item.received_uuid, 4080 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4081 dest->root_key.objectid); 4082 if (ret && ret != -ENOENT) { 4083 btrfs_abort_transaction(trans, ret); 4084 err = ret; 4085 goto out_end_trans; 4086 } 4087 } 4088 4089 free_anon_bdev(dest->anon_dev); 4090 dest->anon_dev = 0; 4091 out_end_trans: 4092 trans->block_rsv = NULL; 4093 trans->bytes_reserved = 0; 4094 ret = btrfs_end_transaction(trans); 4095 if (ret && !err) 4096 err = ret; 4097 inode->i_flags |= S_DEAD; 4098 out_release: 4099 btrfs_subvolume_release_metadata(root, &block_rsv); 4100 out_up_write: 4101 up_write(&fs_info->subvol_sem); 4102 if (err) { 4103 spin_lock(&dest->root_item_lock); 4104 root_flags = btrfs_root_flags(&dest->root_item); 4105 btrfs_set_root_flags(&dest->root_item, 4106 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4107 spin_unlock(&dest->root_item_lock); 4108 } else { 4109 d_invalidate(dentry); 4110 btrfs_prune_dentries(dest); 4111 ASSERT(dest->send_in_progress == 0); 4112 4113 /* the last ref */ 4114 if (dest->ino_cache_inode) { 4115 iput(dest->ino_cache_inode); 4116 dest->ino_cache_inode = NULL; 4117 } 4118 } 4119 4120 return err; 4121 } 4122 4123 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4124 { 4125 struct inode *inode = d_inode(dentry); 4126 int err = 0; 4127 struct btrfs_root *root = BTRFS_I(dir)->root; 4128 struct btrfs_trans_handle *trans; 4129 u64 last_unlink_trans; 4130 4131 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4132 return -ENOTEMPTY; 4133 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) 4134 return btrfs_delete_subvolume(dir, dentry); 4135 4136 trans = __unlink_start_trans(dir); 4137 if (IS_ERR(trans)) 4138 return PTR_ERR(trans); 4139 4140 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4141 err = btrfs_unlink_subvol(trans, dir, dentry); 4142 goto out; 4143 } 4144 4145 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4146 if (err) 4147 goto out; 4148 4149 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4150 4151 /* now the directory is empty */ 4152 err = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 4153 BTRFS_I(d_inode(dentry)), dentry->d_name.name, 4154 dentry->d_name.len); 4155 if (!err) { 4156 btrfs_i_size_write(BTRFS_I(inode), 0); 4157 /* 4158 * Propagate the last_unlink_trans value of the deleted dir to 4159 * its parent directory. This is to prevent an unrecoverable 4160 * log tree in the case we do something like this: 4161 * 1) create dir foo 4162 * 2) create snapshot under dir foo 4163 * 3) delete the snapshot 4164 * 4) rmdir foo 4165 * 5) mkdir foo 4166 * 6) fsync foo or some file inside foo 4167 */ 4168 if (last_unlink_trans >= trans->transid) 4169 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4170 } 4171 out: 4172 btrfs_end_transaction(trans); 4173 btrfs_btree_balance_dirty(root->fs_info); 4174 4175 return err; 4176 } 4177 4178 /* 4179 * Return this if we need to call truncate_block for the last bit of the 4180 * truncate. 4181 */ 4182 #define NEED_TRUNCATE_BLOCK 1 4183 4184 /* 4185 * this can truncate away extent items, csum items and directory items. 4186 * It starts at a high offset and removes keys until it can't find 4187 * any higher than new_size 4188 * 4189 * csum items that cross the new i_size are truncated to the new size 4190 * as well. 4191 * 4192 * min_type is the minimum key type to truncate down to. If set to 0, this 4193 * will kill all the items on this inode, including the INODE_ITEM_KEY. 4194 */ 4195 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 4196 struct btrfs_root *root, 4197 struct inode *inode, 4198 u64 new_size, u32 min_type) 4199 { 4200 struct btrfs_fs_info *fs_info = root->fs_info; 4201 struct btrfs_path *path; 4202 struct extent_buffer *leaf; 4203 struct btrfs_file_extent_item *fi; 4204 struct btrfs_key key; 4205 struct btrfs_key found_key; 4206 u64 extent_start = 0; 4207 u64 extent_num_bytes = 0; 4208 u64 extent_offset = 0; 4209 u64 item_end = 0; 4210 u64 last_size = new_size; 4211 u32 found_type = (u8)-1; 4212 int found_extent; 4213 int del_item; 4214 int pending_del_nr = 0; 4215 int pending_del_slot = 0; 4216 int extent_type = -1; 4217 int ret; 4218 u64 ino = btrfs_ino(BTRFS_I(inode)); 4219 u64 bytes_deleted = 0; 4220 bool be_nice = false; 4221 bool should_throttle = false; 4222 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 4223 struct extent_state *cached_state = NULL; 4224 4225 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 4226 4227 /* 4228 * For non-free space inodes and non-shareable roots, we want to back 4229 * off from time to time. This means all inodes in subvolume roots, 4230 * reloc roots, and data reloc roots. 4231 */ 4232 if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && 4233 test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 4234 be_nice = true; 4235 4236 path = btrfs_alloc_path(); 4237 if (!path) 4238 return -ENOMEM; 4239 path->reada = READA_BACK; 4240 4241 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 4242 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1, 4243 &cached_state); 4244 4245 /* 4246 * We want to drop from the next block forward in case this 4247 * new size is not block aligned since we will be keeping the 4248 * last block of the extent just the way it is. 4249 */ 4250 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size, 4251 fs_info->sectorsize), 4252 (u64)-1, 0); 4253 } 4254 4255 /* 4256 * This function is also used to drop the items in the log tree before 4257 * we relog the inode, so if root != BTRFS_I(inode)->root, it means 4258 * it is used to drop the logged items. So we shouldn't kill the delayed 4259 * items. 4260 */ 4261 if (min_type == 0 && root == BTRFS_I(inode)->root) 4262 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 4263 4264 key.objectid = ino; 4265 key.offset = (u64)-1; 4266 key.type = (u8)-1; 4267 4268 search_again: 4269 /* 4270 * with a 16K leaf size and 128MB extents, you can actually queue 4271 * up a huge file in a single leaf. Most of the time that 4272 * bytes_deleted is > 0, it will be huge by the time we get here 4273 */ 4274 if (be_nice && bytes_deleted > SZ_32M && 4275 btrfs_should_end_transaction(trans)) { 4276 ret = -EAGAIN; 4277 goto out; 4278 } 4279 4280 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 4281 if (ret < 0) 4282 goto out; 4283 4284 if (ret > 0) { 4285 ret = 0; 4286 /* there are no items in the tree for us to truncate, we're 4287 * done 4288 */ 4289 if (path->slots[0] == 0) 4290 goto out; 4291 path->slots[0]--; 4292 } 4293 4294 while (1) { 4295 u64 clear_start = 0, clear_len = 0; 4296 4297 fi = NULL; 4298 leaf = path->nodes[0]; 4299 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4300 found_type = found_key.type; 4301 4302 if (found_key.objectid != ino) 4303 break; 4304 4305 if (found_type < min_type) 4306 break; 4307 4308 item_end = found_key.offset; 4309 if (found_type == BTRFS_EXTENT_DATA_KEY) { 4310 fi = btrfs_item_ptr(leaf, path->slots[0], 4311 struct btrfs_file_extent_item); 4312 extent_type = btrfs_file_extent_type(leaf, fi); 4313 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4314 item_end += 4315 btrfs_file_extent_num_bytes(leaf, fi); 4316 4317 trace_btrfs_truncate_show_fi_regular( 4318 BTRFS_I(inode), leaf, fi, 4319 found_key.offset); 4320 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4321 item_end += btrfs_file_extent_ram_bytes(leaf, 4322 fi); 4323 4324 trace_btrfs_truncate_show_fi_inline( 4325 BTRFS_I(inode), leaf, fi, path->slots[0], 4326 found_key.offset); 4327 } 4328 item_end--; 4329 } 4330 if (found_type > min_type) { 4331 del_item = 1; 4332 } else { 4333 if (item_end < new_size) 4334 break; 4335 if (found_key.offset >= new_size) 4336 del_item = 1; 4337 else 4338 del_item = 0; 4339 } 4340 found_extent = 0; 4341 /* FIXME, shrink the extent if the ref count is only 1 */ 4342 if (found_type != BTRFS_EXTENT_DATA_KEY) 4343 goto delete; 4344 4345 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4346 u64 num_dec; 4347 4348 clear_start = found_key.offset; 4349 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 4350 if (!del_item) { 4351 u64 orig_num_bytes = 4352 btrfs_file_extent_num_bytes(leaf, fi); 4353 extent_num_bytes = ALIGN(new_size - 4354 found_key.offset, 4355 fs_info->sectorsize); 4356 clear_start = ALIGN(new_size, fs_info->sectorsize); 4357 btrfs_set_file_extent_num_bytes(leaf, fi, 4358 extent_num_bytes); 4359 num_dec = (orig_num_bytes - 4360 extent_num_bytes); 4361 if (test_bit(BTRFS_ROOT_SHAREABLE, 4362 &root->state) && 4363 extent_start != 0) 4364 inode_sub_bytes(inode, num_dec); 4365 btrfs_mark_buffer_dirty(leaf); 4366 } else { 4367 extent_num_bytes = 4368 btrfs_file_extent_disk_num_bytes(leaf, 4369 fi); 4370 extent_offset = found_key.offset - 4371 btrfs_file_extent_offset(leaf, fi); 4372 4373 /* FIXME blocksize != 4096 */ 4374 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 4375 if (extent_start != 0) { 4376 found_extent = 1; 4377 if (test_bit(BTRFS_ROOT_SHAREABLE, 4378 &root->state)) 4379 inode_sub_bytes(inode, num_dec); 4380 } 4381 } 4382 clear_len = num_dec; 4383 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4384 /* 4385 * we can't truncate inline items that have had 4386 * special encodings 4387 */ 4388 if (!del_item && 4389 btrfs_file_extent_encryption(leaf, fi) == 0 && 4390 btrfs_file_extent_other_encoding(leaf, fi) == 0 && 4391 btrfs_file_extent_compression(leaf, fi) == 0) { 4392 u32 size = (u32)(new_size - found_key.offset); 4393 4394 btrfs_set_file_extent_ram_bytes(leaf, fi, size); 4395 size = btrfs_file_extent_calc_inline_size(size); 4396 btrfs_truncate_item(path, size, 1); 4397 } else if (!del_item) { 4398 /* 4399 * We have to bail so the last_size is set to 4400 * just before this extent. 4401 */ 4402 ret = NEED_TRUNCATE_BLOCK; 4403 break; 4404 } else { 4405 /* 4406 * Inline extents are special, we just treat 4407 * them as a full sector worth in the file 4408 * extent tree just for simplicity sake. 4409 */ 4410 clear_len = fs_info->sectorsize; 4411 } 4412 4413 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 4414 inode_sub_bytes(inode, item_end + 1 - new_size); 4415 } 4416 delete: 4417 /* 4418 * We use btrfs_truncate_inode_items() to clean up log trees for 4419 * multiple fsyncs, and in this case we don't want to clear the 4420 * file extent range because it's just the log. 4421 */ 4422 if (root == BTRFS_I(inode)->root) { 4423 ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode), 4424 clear_start, clear_len); 4425 if (ret) { 4426 btrfs_abort_transaction(trans, ret); 4427 break; 4428 } 4429 } 4430 4431 if (del_item) 4432 last_size = found_key.offset; 4433 else 4434 last_size = new_size; 4435 if (del_item) { 4436 if (!pending_del_nr) { 4437 /* no pending yet, add ourselves */ 4438 pending_del_slot = path->slots[0]; 4439 pending_del_nr = 1; 4440 } else if (pending_del_nr && 4441 path->slots[0] + 1 == pending_del_slot) { 4442 /* hop on the pending chunk */ 4443 pending_del_nr++; 4444 pending_del_slot = path->slots[0]; 4445 } else { 4446 BUG(); 4447 } 4448 } else { 4449 break; 4450 } 4451 should_throttle = false; 4452 4453 if (found_extent && 4454 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 4455 struct btrfs_ref ref = { 0 }; 4456 4457 bytes_deleted += extent_num_bytes; 4458 4459 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, 4460 extent_start, extent_num_bytes, 0); 4461 ref.real_root = root->root_key.objectid; 4462 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), 4463 ino, extent_offset); 4464 ret = btrfs_free_extent(trans, &ref); 4465 if (ret) { 4466 btrfs_abort_transaction(trans, ret); 4467 break; 4468 } 4469 if (be_nice) { 4470 if (btrfs_should_throttle_delayed_refs(trans)) 4471 should_throttle = true; 4472 } 4473 } 4474 4475 if (found_type == BTRFS_INODE_ITEM_KEY) 4476 break; 4477 4478 if (path->slots[0] == 0 || 4479 path->slots[0] != pending_del_slot || 4480 should_throttle) { 4481 if (pending_del_nr) { 4482 ret = btrfs_del_items(trans, root, path, 4483 pending_del_slot, 4484 pending_del_nr); 4485 if (ret) { 4486 btrfs_abort_transaction(trans, ret); 4487 break; 4488 } 4489 pending_del_nr = 0; 4490 } 4491 btrfs_release_path(path); 4492 4493 /* 4494 * We can generate a lot of delayed refs, so we need to 4495 * throttle every once and a while and make sure we're 4496 * adding enough space to keep up with the work we are 4497 * generating. Since we hold a transaction here we 4498 * can't flush, and we don't want to FLUSH_LIMIT because 4499 * we could have generated too many delayed refs to 4500 * actually allocate, so just bail if we're short and 4501 * let the normal reservation dance happen higher up. 4502 */ 4503 if (should_throttle) { 4504 ret = btrfs_delayed_refs_rsv_refill(fs_info, 4505 BTRFS_RESERVE_NO_FLUSH); 4506 if (ret) { 4507 ret = -EAGAIN; 4508 break; 4509 } 4510 } 4511 goto search_again; 4512 } else { 4513 path->slots[0]--; 4514 } 4515 } 4516 out: 4517 if (ret >= 0 && pending_del_nr) { 4518 int err; 4519 4520 err = btrfs_del_items(trans, root, path, pending_del_slot, 4521 pending_del_nr); 4522 if (err) { 4523 btrfs_abort_transaction(trans, err); 4524 ret = err; 4525 } 4526 } 4527 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 4528 ASSERT(last_size >= new_size); 4529 if (!ret && last_size > new_size) 4530 last_size = new_size; 4531 btrfs_inode_safe_disk_i_size_write(inode, last_size); 4532 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, 4533 (u64)-1, &cached_state); 4534 } 4535 4536 btrfs_free_path(path); 4537 return ret; 4538 } 4539 4540 /* 4541 * btrfs_truncate_block - read, zero a chunk and write a block 4542 * @inode - inode that we're zeroing 4543 * @from - the offset to start zeroing 4544 * @len - the length to zero, 0 to zero the entire range respective to the 4545 * offset 4546 * @front - zero up to the offset instead of from the offset on 4547 * 4548 * This will find the block for the "from" offset and cow the block and zero the 4549 * part we want to zero. This is used with truncate and hole punching. 4550 */ 4551 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, 4552 int front) 4553 { 4554 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 4555 struct address_space *mapping = inode->i_mapping; 4556 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4557 struct btrfs_ordered_extent *ordered; 4558 struct extent_state *cached_state = NULL; 4559 struct extent_changeset *data_reserved = NULL; 4560 char *kaddr; 4561 bool only_release_metadata = false; 4562 u32 blocksize = fs_info->sectorsize; 4563 pgoff_t index = from >> PAGE_SHIFT; 4564 unsigned offset = from & (blocksize - 1); 4565 struct page *page; 4566 gfp_t mask = btrfs_alloc_write_mask(mapping); 4567 size_t write_bytes = blocksize; 4568 int ret = 0; 4569 u64 block_start; 4570 u64 block_end; 4571 4572 if (IS_ALIGNED(offset, blocksize) && 4573 (!len || IS_ALIGNED(len, blocksize))) 4574 goto out; 4575 4576 block_start = round_down(from, blocksize); 4577 block_end = block_start + blocksize - 1; 4578 4579 ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 4580 block_start, blocksize); 4581 if (ret < 0) { 4582 if (btrfs_check_nocow_lock(BTRFS_I(inode), block_start, 4583 &write_bytes) > 0) { 4584 /* For nocow case, no need to reserve data space */ 4585 only_release_metadata = true; 4586 } else { 4587 goto out; 4588 } 4589 } 4590 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), blocksize); 4591 if (ret < 0) { 4592 if (!only_release_metadata) 4593 btrfs_free_reserved_data_space(BTRFS_I(inode), 4594 data_reserved, block_start, blocksize); 4595 goto out; 4596 } 4597 again: 4598 page = find_or_create_page(mapping, index, mask); 4599 if (!page) { 4600 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, 4601 block_start, blocksize, true); 4602 btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize); 4603 ret = -ENOMEM; 4604 goto out; 4605 } 4606 4607 if (!PageUptodate(page)) { 4608 ret = btrfs_readpage(NULL, page); 4609 lock_page(page); 4610 if (page->mapping != mapping) { 4611 unlock_page(page); 4612 put_page(page); 4613 goto again; 4614 } 4615 if (!PageUptodate(page)) { 4616 ret = -EIO; 4617 goto out_unlock; 4618 } 4619 } 4620 wait_on_page_writeback(page); 4621 4622 lock_extent_bits(io_tree, block_start, block_end, &cached_state); 4623 set_page_extent_mapped(page); 4624 4625 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), block_start); 4626 if (ordered) { 4627 unlock_extent_cached(io_tree, block_start, block_end, 4628 &cached_state); 4629 unlock_page(page); 4630 put_page(page); 4631 btrfs_start_ordered_extent(ordered, 1); 4632 btrfs_put_ordered_extent(ordered); 4633 goto again; 4634 } 4635 4636 clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end, 4637 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4638 0, 0, &cached_state); 4639 4640 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), block_start, block_end, 0, 4641 &cached_state); 4642 if (ret) { 4643 unlock_extent_cached(io_tree, block_start, block_end, 4644 &cached_state); 4645 goto out_unlock; 4646 } 4647 4648 if (offset != blocksize) { 4649 if (!len) 4650 len = blocksize - offset; 4651 kaddr = kmap(page); 4652 if (front) 4653 memset(kaddr + (block_start - page_offset(page)), 4654 0, offset); 4655 else 4656 memset(kaddr + (block_start - page_offset(page)) + offset, 4657 0, len); 4658 flush_dcache_page(page); 4659 kunmap(page); 4660 } 4661 ClearPageChecked(page); 4662 set_page_dirty(page); 4663 unlock_extent_cached(io_tree, block_start, block_end, &cached_state); 4664 4665 if (only_release_metadata) 4666 set_extent_bit(&BTRFS_I(inode)->io_tree, block_start, 4667 block_end, EXTENT_NORESERVE, NULL, NULL, 4668 GFP_NOFS); 4669 4670 out_unlock: 4671 if (ret) { 4672 if (only_release_metadata) 4673 btrfs_delalloc_release_metadata(BTRFS_I(inode), 4674 blocksize, true); 4675 else 4676 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, 4677 block_start, blocksize, true); 4678 } 4679 btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize); 4680 unlock_page(page); 4681 put_page(page); 4682 out: 4683 if (only_release_metadata) 4684 btrfs_check_nocow_unlock(BTRFS_I(inode)); 4685 extent_changeset_free(data_reserved); 4686 return ret; 4687 } 4688 4689 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode, 4690 u64 offset, u64 len) 4691 { 4692 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 4693 struct btrfs_trans_handle *trans; 4694 int ret; 4695 4696 /* 4697 * Still need to make sure the inode looks like it's been updated so 4698 * that any holes get logged if we fsync. 4699 */ 4700 if (btrfs_fs_incompat(fs_info, NO_HOLES)) { 4701 BTRFS_I(inode)->last_trans = fs_info->generation; 4702 BTRFS_I(inode)->last_sub_trans = root->log_transid; 4703 BTRFS_I(inode)->last_log_commit = root->last_log_commit; 4704 return 0; 4705 } 4706 4707 /* 4708 * 1 - for the one we're dropping 4709 * 1 - for the one we're adding 4710 * 1 - for updating the inode. 4711 */ 4712 trans = btrfs_start_transaction(root, 3); 4713 if (IS_ERR(trans)) 4714 return PTR_ERR(trans); 4715 4716 ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1); 4717 if (ret) { 4718 btrfs_abort_transaction(trans, ret); 4719 btrfs_end_transaction(trans); 4720 return ret; 4721 } 4722 4723 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)), 4724 offset, 0, 0, len, 0, len, 0, 0, 0); 4725 if (ret) 4726 btrfs_abort_transaction(trans, ret); 4727 else 4728 btrfs_update_inode(trans, root, inode); 4729 btrfs_end_transaction(trans); 4730 return ret; 4731 } 4732 4733 /* 4734 * This function puts in dummy file extents for the area we're creating a hole 4735 * for. So if we are truncating this file to a larger size we need to insert 4736 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4737 * the range between oldsize and size 4738 */ 4739 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) 4740 { 4741 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 4742 struct btrfs_root *root = BTRFS_I(inode)->root; 4743 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4744 struct extent_map *em = NULL; 4745 struct extent_state *cached_state = NULL; 4746 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4747 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 4748 u64 block_end = ALIGN(size, fs_info->sectorsize); 4749 u64 last_byte; 4750 u64 cur_offset; 4751 u64 hole_size; 4752 int err = 0; 4753 4754 /* 4755 * If our size started in the middle of a block we need to zero out the 4756 * rest of the block before we expand the i_size, otherwise we could 4757 * expose stale data. 4758 */ 4759 err = btrfs_truncate_block(inode, oldsize, 0, 0); 4760 if (err) 4761 return err; 4762 4763 if (size <= hole_start) 4764 return 0; 4765 4766 btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), hole_start, 4767 block_end - 1, &cached_state); 4768 cur_offset = hole_start; 4769 while (1) { 4770 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, 4771 block_end - cur_offset); 4772 if (IS_ERR(em)) { 4773 err = PTR_ERR(em); 4774 em = NULL; 4775 break; 4776 } 4777 last_byte = min(extent_map_end(em), block_end); 4778 last_byte = ALIGN(last_byte, fs_info->sectorsize); 4779 hole_size = last_byte - cur_offset; 4780 4781 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 4782 struct extent_map *hole_em; 4783 4784 err = maybe_insert_hole(root, inode, cur_offset, 4785 hole_size); 4786 if (err) 4787 break; 4788 4789 err = btrfs_inode_set_file_extent_range(BTRFS_I(inode), 4790 cur_offset, hole_size); 4791 if (err) 4792 break; 4793 4794 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, 4795 cur_offset + hole_size - 1, 0); 4796 hole_em = alloc_extent_map(); 4797 if (!hole_em) { 4798 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4799 &BTRFS_I(inode)->runtime_flags); 4800 goto next; 4801 } 4802 hole_em->start = cur_offset; 4803 hole_em->len = hole_size; 4804 hole_em->orig_start = cur_offset; 4805 4806 hole_em->block_start = EXTENT_MAP_HOLE; 4807 hole_em->block_len = 0; 4808 hole_em->orig_block_len = 0; 4809 hole_em->ram_bytes = hole_size; 4810 hole_em->compress_type = BTRFS_COMPRESS_NONE; 4811 hole_em->generation = fs_info->generation; 4812 4813 while (1) { 4814 write_lock(&em_tree->lock); 4815 err = add_extent_mapping(em_tree, hole_em, 1); 4816 write_unlock(&em_tree->lock); 4817 if (err != -EEXIST) 4818 break; 4819 btrfs_drop_extent_cache(BTRFS_I(inode), 4820 cur_offset, 4821 cur_offset + 4822 hole_size - 1, 0); 4823 } 4824 free_extent_map(hole_em); 4825 } else { 4826 err = btrfs_inode_set_file_extent_range(BTRFS_I(inode), 4827 cur_offset, hole_size); 4828 if (err) 4829 break; 4830 } 4831 next: 4832 free_extent_map(em); 4833 em = NULL; 4834 cur_offset = last_byte; 4835 if (cur_offset >= block_end) 4836 break; 4837 } 4838 free_extent_map(em); 4839 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state); 4840 return err; 4841 } 4842 4843 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 4844 { 4845 struct btrfs_root *root = BTRFS_I(inode)->root; 4846 struct btrfs_trans_handle *trans; 4847 loff_t oldsize = i_size_read(inode); 4848 loff_t newsize = attr->ia_size; 4849 int mask = attr->ia_valid; 4850 int ret; 4851 4852 /* 4853 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 4854 * special case where we need to update the times despite not having 4855 * these flags set. For all other operations the VFS set these flags 4856 * explicitly if it wants a timestamp update. 4857 */ 4858 if (newsize != oldsize) { 4859 inode_inc_iversion(inode); 4860 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) 4861 inode->i_ctime = inode->i_mtime = 4862 current_time(inode); 4863 } 4864 4865 if (newsize > oldsize) { 4866 /* 4867 * Don't do an expanding truncate while snapshotting is ongoing. 4868 * This is to ensure the snapshot captures a fully consistent 4869 * state of this file - if the snapshot captures this expanding 4870 * truncation, it must capture all writes that happened before 4871 * this truncation. 4872 */ 4873 btrfs_drew_write_lock(&root->snapshot_lock); 4874 ret = btrfs_cont_expand(inode, oldsize, newsize); 4875 if (ret) { 4876 btrfs_drew_write_unlock(&root->snapshot_lock); 4877 return ret; 4878 } 4879 4880 trans = btrfs_start_transaction(root, 1); 4881 if (IS_ERR(trans)) { 4882 btrfs_drew_write_unlock(&root->snapshot_lock); 4883 return PTR_ERR(trans); 4884 } 4885 4886 i_size_write(inode, newsize); 4887 btrfs_inode_safe_disk_i_size_write(inode, 0); 4888 pagecache_isize_extended(inode, oldsize, newsize); 4889 ret = btrfs_update_inode(trans, root, inode); 4890 btrfs_drew_write_unlock(&root->snapshot_lock); 4891 btrfs_end_transaction(trans); 4892 } else { 4893 4894 /* 4895 * We're truncating a file that used to have good data down to 4896 * zero. Make sure any new writes to the file get on disk 4897 * on close. 4898 */ 4899 if (newsize == 0) 4900 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 4901 &BTRFS_I(inode)->runtime_flags); 4902 4903 truncate_setsize(inode, newsize); 4904 4905 inode_dio_wait(inode); 4906 4907 ret = btrfs_truncate(inode, newsize == oldsize); 4908 if (ret && inode->i_nlink) { 4909 int err; 4910 4911 /* 4912 * Truncate failed, so fix up the in-memory size. We 4913 * adjusted disk_i_size down as we removed extents, so 4914 * wait for disk_i_size to be stable and then update the 4915 * in-memory size to match. 4916 */ 4917 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); 4918 if (err) 4919 return err; 4920 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 4921 } 4922 } 4923 4924 return ret; 4925 } 4926 4927 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) 4928 { 4929 struct inode *inode = d_inode(dentry); 4930 struct btrfs_root *root = BTRFS_I(inode)->root; 4931 int err; 4932 4933 if (btrfs_root_readonly(root)) 4934 return -EROFS; 4935 4936 err = setattr_prepare(dentry, attr); 4937 if (err) 4938 return err; 4939 4940 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 4941 err = btrfs_setsize(inode, attr); 4942 if (err) 4943 return err; 4944 } 4945 4946 if (attr->ia_valid) { 4947 setattr_copy(inode, attr); 4948 inode_inc_iversion(inode); 4949 err = btrfs_dirty_inode(inode); 4950 4951 if (!err && attr->ia_valid & ATTR_MODE) 4952 err = posix_acl_chmod(inode, inode->i_mode); 4953 } 4954 4955 return err; 4956 } 4957 4958 /* 4959 * While truncating the inode pages during eviction, we get the VFS calling 4960 * btrfs_invalidatepage() against each page of the inode. This is slow because 4961 * the calls to btrfs_invalidatepage() result in a huge amount of calls to 4962 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting 4963 * extent_state structures over and over, wasting lots of time. 4964 * 4965 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all 4966 * those expensive operations on a per page basis and do only the ordered io 4967 * finishing, while we release here the extent_map and extent_state structures, 4968 * without the excessive merging and splitting. 4969 */ 4970 static void evict_inode_truncate_pages(struct inode *inode) 4971 { 4972 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4973 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree; 4974 struct rb_node *node; 4975 4976 ASSERT(inode->i_state & I_FREEING); 4977 truncate_inode_pages_final(&inode->i_data); 4978 4979 write_lock(&map_tree->lock); 4980 while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) { 4981 struct extent_map *em; 4982 4983 node = rb_first_cached(&map_tree->map); 4984 em = rb_entry(node, struct extent_map, rb_node); 4985 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 4986 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 4987 remove_extent_mapping(map_tree, em); 4988 free_extent_map(em); 4989 if (need_resched()) { 4990 write_unlock(&map_tree->lock); 4991 cond_resched(); 4992 write_lock(&map_tree->lock); 4993 } 4994 } 4995 write_unlock(&map_tree->lock); 4996 4997 /* 4998 * Keep looping until we have no more ranges in the io tree. 4999 * We can have ongoing bios started by readahead that have 5000 * their endio callback (extent_io.c:end_bio_extent_readpage) 5001 * still in progress (unlocked the pages in the bio but did not yet 5002 * unlocked the ranges in the io tree). Therefore this means some 5003 * ranges can still be locked and eviction started because before 5004 * submitting those bios, which are executed by a separate task (work 5005 * queue kthread), inode references (inode->i_count) were not taken 5006 * (which would be dropped in the end io callback of each bio). 5007 * Therefore here we effectively end up waiting for those bios and 5008 * anyone else holding locked ranges without having bumped the inode's 5009 * reference count - if we don't do it, when they access the inode's 5010 * io_tree to unlock a range it may be too late, leading to an 5011 * use-after-free issue. 5012 */ 5013 spin_lock(&io_tree->lock); 5014 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5015 struct extent_state *state; 5016 struct extent_state *cached_state = NULL; 5017 u64 start; 5018 u64 end; 5019 unsigned state_flags; 5020 5021 node = rb_first(&io_tree->state); 5022 state = rb_entry(node, struct extent_state, rb_node); 5023 start = state->start; 5024 end = state->end; 5025 state_flags = state->state; 5026 spin_unlock(&io_tree->lock); 5027 5028 lock_extent_bits(io_tree, start, end, &cached_state); 5029 5030 /* 5031 * If still has DELALLOC flag, the extent didn't reach disk, 5032 * and its reserved space won't be freed by delayed_ref. 5033 * So we need to free its reserved space here. 5034 * (Refer to comment in btrfs_invalidatepage, case 2) 5035 * 5036 * Note, end is the bytenr of last byte, so we need + 1 here. 5037 */ 5038 if (state_flags & EXTENT_DELALLOC) 5039 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5040 end - start + 1); 5041 5042 clear_extent_bit(io_tree, start, end, 5043 EXTENT_LOCKED | EXTENT_DELALLOC | 5044 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, 5045 &cached_state); 5046 5047 cond_resched(); 5048 spin_lock(&io_tree->lock); 5049 } 5050 spin_unlock(&io_tree->lock); 5051 } 5052 5053 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5054 struct btrfs_block_rsv *rsv) 5055 { 5056 struct btrfs_fs_info *fs_info = root->fs_info; 5057 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 5058 struct btrfs_trans_handle *trans; 5059 u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1); 5060 int ret; 5061 5062 /* 5063 * Eviction should be taking place at some place safe because of our 5064 * delayed iputs. However the normal flushing code will run delayed 5065 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5066 * 5067 * We reserve the delayed_refs_extra here again because we can't use 5068 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5069 * above. We reserve our extra bit here because we generate a ton of 5070 * delayed refs activity by truncating. 5071 * 5072 * If we cannot make our reservation we'll attempt to steal from the 5073 * global reserve, because we really want to be able to free up space. 5074 */ 5075 ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra, 5076 BTRFS_RESERVE_FLUSH_EVICT); 5077 if (ret) { 5078 /* 5079 * Try to steal from the global reserve if there is space for 5080 * it. 5081 */ 5082 if (btrfs_check_space_for_delayed_refs(fs_info) || 5083 btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) { 5084 btrfs_warn(fs_info, 5085 "could not allocate space for delete; will truncate on mount"); 5086 return ERR_PTR(-ENOSPC); 5087 } 5088 delayed_refs_extra = 0; 5089 } 5090 5091 trans = btrfs_join_transaction(root); 5092 if (IS_ERR(trans)) 5093 return trans; 5094 5095 if (delayed_refs_extra) { 5096 trans->block_rsv = &fs_info->trans_block_rsv; 5097 trans->bytes_reserved = delayed_refs_extra; 5098 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5099 delayed_refs_extra, 1); 5100 } 5101 return trans; 5102 } 5103 5104 void btrfs_evict_inode(struct inode *inode) 5105 { 5106 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5107 struct btrfs_trans_handle *trans; 5108 struct btrfs_root *root = BTRFS_I(inode)->root; 5109 struct btrfs_block_rsv *rsv; 5110 int ret; 5111 5112 trace_btrfs_inode_evict(inode); 5113 5114 if (!root) { 5115 clear_inode(inode); 5116 return; 5117 } 5118 5119 evict_inode_truncate_pages(inode); 5120 5121 if (inode->i_nlink && 5122 ((btrfs_root_refs(&root->root_item) != 0 && 5123 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5124 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5125 goto no_delete; 5126 5127 if (is_bad_inode(inode)) 5128 goto no_delete; 5129 5130 btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); 5131 5132 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5133 goto no_delete; 5134 5135 if (inode->i_nlink > 0) { 5136 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5137 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5138 goto no_delete; 5139 } 5140 5141 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5142 if (ret) 5143 goto no_delete; 5144 5145 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5146 if (!rsv) 5147 goto no_delete; 5148 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5149 rsv->failfast = 1; 5150 5151 btrfs_i_size_write(BTRFS_I(inode), 0); 5152 5153 while (1) { 5154 trans = evict_refill_and_join(root, rsv); 5155 if (IS_ERR(trans)) 5156 goto free_rsv; 5157 5158 trans->block_rsv = rsv; 5159 5160 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 5161 trans->block_rsv = &fs_info->trans_block_rsv; 5162 btrfs_end_transaction(trans); 5163 btrfs_btree_balance_dirty(fs_info); 5164 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5165 goto free_rsv; 5166 else if (!ret) 5167 break; 5168 } 5169 5170 /* 5171 * Errors here aren't a big deal, it just means we leave orphan items in 5172 * the tree. They will be cleaned up on the next mount. If the inode 5173 * number gets reused, cleanup deletes the orphan item without doing 5174 * anything, and unlink reuses the existing orphan item. 5175 * 5176 * If it turns out that we are dropping too many of these, we might want 5177 * to add a mechanism for retrying these after a commit. 5178 */ 5179 trans = evict_refill_and_join(root, rsv); 5180 if (!IS_ERR(trans)) { 5181 trans->block_rsv = rsv; 5182 btrfs_orphan_del(trans, BTRFS_I(inode)); 5183 trans->block_rsv = &fs_info->trans_block_rsv; 5184 btrfs_end_transaction(trans); 5185 } 5186 5187 if (!(root == fs_info->tree_root || 5188 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) 5189 btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode))); 5190 5191 free_rsv: 5192 btrfs_free_block_rsv(fs_info, rsv); 5193 no_delete: 5194 /* 5195 * If we didn't successfully delete, the orphan item will still be in 5196 * the tree and we'll retry on the next mount. Again, we might also want 5197 * to retry these periodically in the future. 5198 */ 5199 btrfs_remove_delayed_node(BTRFS_I(inode)); 5200 clear_inode(inode); 5201 } 5202 5203 /* 5204 * Return the key found in the dir entry in the location pointer, fill @type 5205 * with BTRFS_FT_*, and return 0. 5206 * 5207 * If no dir entries were found, returns -ENOENT. 5208 * If found a corrupted location in dir entry, returns -EUCLEAN. 5209 */ 5210 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, 5211 struct btrfs_key *location, u8 *type) 5212 { 5213 const char *name = dentry->d_name.name; 5214 int namelen = dentry->d_name.len; 5215 struct btrfs_dir_item *di; 5216 struct btrfs_path *path; 5217 struct btrfs_root *root = BTRFS_I(dir)->root; 5218 int ret = 0; 5219 5220 path = btrfs_alloc_path(); 5221 if (!path) 5222 return -ENOMEM; 5223 5224 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)), 5225 name, namelen, 0); 5226 if (IS_ERR_OR_NULL(di)) { 5227 ret = di ? PTR_ERR(di) : -ENOENT; 5228 goto out; 5229 } 5230 5231 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5232 if (location->type != BTRFS_INODE_ITEM_KEY && 5233 location->type != BTRFS_ROOT_ITEM_KEY) { 5234 ret = -EUCLEAN; 5235 btrfs_warn(root->fs_info, 5236 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5237 __func__, name, btrfs_ino(BTRFS_I(dir)), 5238 location->objectid, location->type, location->offset); 5239 } 5240 if (!ret) 5241 *type = btrfs_dir_type(path->nodes[0], di); 5242 out: 5243 btrfs_free_path(path); 5244 return ret; 5245 } 5246 5247 /* 5248 * when we hit a tree root in a directory, the btrfs part of the inode 5249 * needs to be changed to reflect the root directory of the tree root. This 5250 * is kind of like crossing a mount point. 5251 */ 5252 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5253 struct inode *dir, 5254 struct dentry *dentry, 5255 struct btrfs_key *location, 5256 struct btrfs_root **sub_root) 5257 { 5258 struct btrfs_path *path; 5259 struct btrfs_root *new_root; 5260 struct btrfs_root_ref *ref; 5261 struct extent_buffer *leaf; 5262 struct btrfs_key key; 5263 int ret; 5264 int err = 0; 5265 5266 path = btrfs_alloc_path(); 5267 if (!path) { 5268 err = -ENOMEM; 5269 goto out; 5270 } 5271 5272 err = -ENOENT; 5273 key.objectid = BTRFS_I(dir)->root->root_key.objectid; 5274 key.type = BTRFS_ROOT_REF_KEY; 5275 key.offset = location->objectid; 5276 5277 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5278 if (ret) { 5279 if (ret < 0) 5280 err = ret; 5281 goto out; 5282 } 5283 5284 leaf = path->nodes[0]; 5285 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5286 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) || 5287 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 5288 goto out; 5289 5290 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, 5291 (unsigned long)(ref + 1), 5292 dentry->d_name.len); 5293 if (ret) 5294 goto out; 5295 5296 btrfs_release_path(path); 5297 5298 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5299 if (IS_ERR(new_root)) { 5300 err = PTR_ERR(new_root); 5301 goto out; 5302 } 5303 5304 *sub_root = new_root; 5305 location->objectid = btrfs_root_dirid(&new_root->root_item); 5306 location->type = BTRFS_INODE_ITEM_KEY; 5307 location->offset = 0; 5308 err = 0; 5309 out: 5310 btrfs_free_path(path); 5311 return err; 5312 } 5313 5314 static void inode_tree_add(struct inode *inode) 5315 { 5316 struct btrfs_root *root = BTRFS_I(inode)->root; 5317 struct btrfs_inode *entry; 5318 struct rb_node **p; 5319 struct rb_node *parent; 5320 struct rb_node *new = &BTRFS_I(inode)->rb_node; 5321 u64 ino = btrfs_ino(BTRFS_I(inode)); 5322 5323 if (inode_unhashed(inode)) 5324 return; 5325 parent = NULL; 5326 spin_lock(&root->inode_lock); 5327 p = &root->inode_tree.rb_node; 5328 while (*p) { 5329 parent = *p; 5330 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5331 5332 if (ino < btrfs_ino(entry)) 5333 p = &parent->rb_left; 5334 else if (ino > btrfs_ino(entry)) 5335 p = &parent->rb_right; 5336 else { 5337 WARN_ON(!(entry->vfs_inode.i_state & 5338 (I_WILL_FREE | I_FREEING))); 5339 rb_replace_node(parent, new, &root->inode_tree); 5340 RB_CLEAR_NODE(parent); 5341 spin_unlock(&root->inode_lock); 5342 return; 5343 } 5344 } 5345 rb_link_node(new, parent, p); 5346 rb_insert_color(new, &root->inode_tree); 5347 spin_unlock(&root->inode_lock); 5348 } 5349 5350 static void inode_tree_del(struct btrfs_inode *inode) 5351 { 5352 struct btrfs_root *root = inode->root; 5353 int empty = 0; 5354 5355 spin_lock(&root->inode_lock); 5356 if (!RB_EMPTY_NODE(&inode->rb_node)) { 5357 rb_erase(&inode->rb_node, &root->inode_tree); 5358 RB_CLEAR_NODE(&inode->rb_node); 5359 empty = RB_EMPTY_ROOT(&root->inode_tree); 5360 } 5361 spin_unlock(&root->inode_lock); 5362 5363 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5364 spin_lock(&root->inode_lock); 5365 empty = RB_EMPTY_ROOT(&root->inode_tree); 5366 spin_unlock(&root->inode_lock); 5367 if (empty) 5368 btrfs_add_dead_root(root); 5369 } 5370 } 5371 5372 5373 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5374 { 5375 struct btrfs_iget_args *args = p; 5376 5377 inode->i_ino = args->ino; 5378 BTRFS_I(inode)->location.objectid = args->ino; 5379 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 5380 BTRFS_I(inode)->location.offset = 0; 5381 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5382 BUG_ON(args->root && !BTRFS_I(inode)->root); 5383 return 0; 5384 } 5385 5386 static int btrfs_find_actor(struct inode *inode, void *opaque) 5387 { 5388 struct btrfs_iget_args *args = opaque; 5389 5390 return args->ino == BTRFS_I(inode)->location.objectid && 5391 args->root == BTRFS_I(inode)->root; 5392 } 5393 5394 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, 5395 struct btrfs_root *root) 5396 { 5397 struct inode *inode; 5398 struct btrfs_iget_args args; 5399 unsigned long hashval = btrfs_inode_hash(ino, root); 5400 5401 args.ino = ino; 5402 args.root = root; 5403 5404 inode = iget5_locked(s, hashval, btrfs_find_actor, 5405 btrfs_init_locked_inode, 5406 (void *)&args); 5407 return inode; 5408 } 5409 5410 /* 5411 * Get an inode object given its inode number and corresponding root. 5412 * Path can be preallocated to prevent recursing back to iget through 5413 * allocator. NULL is also valid but may require an additional allocation 5414 * later. 5415 */ 5416 struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 5417 struct btrfs_root *root, struct btrfs_path *path) 5418 { 5419 struct inode *inode; 5420 5421 inode = btrfs_iget_locked(s, ino, root); 5422 if (!inode) 5423 return ERR_PTR(-ENOMEM); 5424 5425 if (inode->i_state & I_NEW) { 5426 int ret; 5427 5428 ret = btrfs_read_locked_inode(inode, path); 5429 if (!ret) { 5430 inode_tree_add(inode); 5431 unlock_new_inode(inode); 5432 } else { 5433 iget_failed(inode); 5434 /* 5435 * ret > 0 can come from btrfs_search_slot called by 5436 * btrfs_read_locked_inode, this means the inode item 5437 * was not found. 5438 */ 5439 if (ret > 0) 5440 ret = -ENOENT; 5441 inode = ERR_PTR(ret); 5442 } 5443 } 5444 5445 return inode; 5446 } 5447 5448 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) 5449 { 5450 return btrfs_iget_path(s, ino, root, NULL); 5451 } 5452 5453 static struct inode *new_simple_dir(struct super_block *s, 5454 struct btrfs_key *key, 5455 struct btrfs_root *root) 5456 { 5457 struct inode *inode = new_inode(s); 5458 5459 if (!inode) 5460 return ERR_PTR(-ENOMEM); 5461 5462 BTRFS_I(inode)->root = btrfs_grab_root(root); 5463 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5464 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5465 5466 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5467 /* 5468 * We only need lookup, the rest is read-only and there's no inode 5469 * associated with the dentry 5470 */ 5471 inode->i_op = &simple_dir_inode_operations; 5472 inode->i_opflags &= ~IOP_XATTR; 5473 inode->i_fop = &simple_dir_operations; 5474 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5475 inode->i_mtime = current_time(inode); 5476 inode->i_atime = inode->i_mtime; 5477 inode->i_ctime = inode->i_mtime; 5478 BTRFS_I(inode)->i_otime = inode->i_mtime; 5479 5480 return inode; 5481 } 5482 5483 static inline u8 btrfs_inode_type(struct inode *inode) 5484 { 5485 /* 5486 * Compile-time asserts that generic FT_* types still match 5487 * BTRFS_FT_* types 5488 */ 5489 BUILD_BUG_ON(BTRFS_FT_UNKNOWN != FT_UNKNOWN); 5490 BUILD_BUG_ON(BTRFS_FT_REG_FILE != FT_REG_FILE); 5491 BUILD_BUG_ON(BTRFS_FT_DIR != FT_DIR); 5492 BUILD_BUG_ON(BTRFS_FT_CHRDEV != FT_CHRDEV); 5493 BUILD_BUG_ON(BTRFS_FT_BLKDEV != FT_BLKDEV); 5494 BUILD_BUG_ON(BTRFS_FT_FIFO != FT_FIFO); 5495 BUILD_BUG_ON(BTRFS_FT_SOCK != FT_SOCK); 5496 BUILD_BUG_ON(BTRFS_FT_SYMLINK != FT_SYMLINK); 5497 5498 return fs_umode_to_ftype(inode->i_mode); 5499 } 5500 5501 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5502 { 5503 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 5504 struct inode *inode; 5505 struct btrfs_root *root = BTRFS_I(dir)->root; 5506 struct btrfs_root *sub_root = root; 5507 struct btrfs_key location; 5508 u8 di_type = 0; 5509 int ret = 0; 5510 5511 if (dentry->d_name.len > BTRFS_NAME_LEN) 5512 return ERR_PTR(-ENAMETOOLONG); 5513 5514 ret = btrfs_inode_by_name(dir, dentry, &location, &di_type); 5515 if (ret < 0) 5516 return ERR_PTR(ret); 5517 5518 if (location.type == BTRFS_INODE_ITEM_KEY) { 5519 inode = btrfs_iget(dir->i_sb, location.objectid, root); 5520 if (IS_ERR(inode)) 5521 return inode; 5522 5523 /* Do extra check against inode mode with di_type */ 5524 if (btrfs_inode_type(inode) != di_type) { 5525 btrfs_crit(fs_info, 5526 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5527 inode->i_mode, btrfs_inode_type(inode), 5528 di_type); 5529 iput(inode); 5530 return ERR_PTR(-EUCLEAN); 5531 } 5532 return inode; 5533 } 5534 5535 ret = fixup_tree_root_location(fs_info, dir, dentry, 5536 &location, &sub_root); 5537 if (ret < 0) { 5538 if (ret != -ENOENT) 5539 inode = ERR_PTR(ret); 5540 else 5541 inode = new_simple_dir(dir->i_sb, &location, sub_root); 5542 } else { 5543 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); 5544 } 5545 if (root != sub_root) 5546 btrfs_put_root(sub_root); 5547 5548 if (!IS_ERR(inode) && root != sub_root) { 5549 down_read(&fs_info->cleanup_work_sem); 5550 if (!sb_rdonly(inode->i_sb)) 5551 ret = btrfs_orphan_cleanup(sub_root); 5552 up_read(&fs_info->cleanup_work_sem); 5553 if (ret) { 5554 iput(inode); 5555 inode = ERR_PTR(ret); 5556 } 5557 } 5558 5559 return inode; 5560 } 5561 5562 static int btrfs_dentry_delete(const struct dentry *dentry) 5563 { 5564 struct btrfs_root *root; 5565 struct inode *inode = d_inode(dentry); 5566 5567 if (!inode && !IS_ROOT(dentry)) 5568 inode = d_inode(dentry->d_parent); 5569 5570 if (inode) { 5571 root = BTRFS_I(inode)->root; 5572 if (btrfs_root_refs(&root->root_item) == 0) 5573 return 1; 5574 5575 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5576 return 1; 5577 } 5578 return 0; 5579 } 5580 5581 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5582 unsigned int flags) 5583 { 5584 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5585 5586 if (inode == ERR_PTR(-ENOENT)) 5587 inode = NULL; 5588 return d_splice_alias(inode, dentry); 5589 } 5590 5591 /* 5592 * All this infrastructure exists because dir_emit can fault, and we are holding 5593 * the tree lock when doing readdir. For now just allocate a buffer and copy 5594 * our information into that, and then dir_emit from the buffer. This is 5595 * similar to what NFS does, only we don't keep the buffer around in pagecache 5596 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5597 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5598 * tree lock. 5599 */ 5600 static int btrfs_opendir(struct inode *inode, struct file *file) 5601 { 5602 struct btrfs_file_private *private; 5603 5604 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5605 if (!private) 5606 return -ENOMEM; 5607 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5608 if (!private->filldir_buf) { 5609 kfree(private); 5610 return -ENOMEM; 5611 } 5612 file->private_data = private; 5613 return 0; 5614 } 5615 5616 struct dir_entry { 5617 u64 ino; 5618 u64 offset; 5619 unsigned type; 5620 int name_len; 5621 }; 5622 5623 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5624 { 5625 while (entries--) { 5626 struct dir_entry *entry = addr; 5627 char *name = (char *)(entry + 1); 5628 5629 ctx->pos = get_unaligned(&entry->offset); 5630 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5631 get_unaligned(&entry->ino), 5632 get_unaligned(&entry->type))) 5633 return 1; 5634 addr += sizeof(struct dir_entry) + 5635 get_unaligned(&entry->name_len); 5636 ctx->pos++; 5637 } 5638 return 0; 5639 } 5640 5641 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5642 { 5643 struct inode *inode = file_inode(file); 5644 struct btrfs_root *root = BTRFS_I(inode)->root; 5645 struct btrfs_file_private *private = file->private_data; 5646 struct btrfs_dir_item *di; 5647 struct btrfs_key key; 5648 struct btrfs_key found_key; 5649 struct btrfs_path *path; 5650 void *addr; 5651 struct list_head ins_list; 5652 struct list_head del_list; 5653 int ret; 5654 struct extent_buffer *leaf; 5655 int slot; 5656 char *name_ptr; 5657 int name_len; 5658 int entries = 0; 5659 int total_len = 0; 5660 bool put = false; 5661 struct btrfs_key location; 5662 5663 if (!dir_emit_dots(file, ctx)) 5664 return 0; 5665 5666 path = btrfs_alloc_path(); 5667 if (!path) 5668 return -ENOMEM; 5669 5670 addr = private->filldir_buf; 5671 path->reada = READA_FORWARD; 5672 5673 INIT_LIST_HEAD(&ins_list); 5674 INIT_LIST_HEAD(&del_list); 5675 put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list); 5676 5677 again: 5678 key.type = BTRFS_DIR_INDEX_KEY; 5679 key.offset = ctx->pos; 5680 key.objectid = btrfs_ino(BTRFS_I(inode)); 5681 5682 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5683 if (ret < 0) 5684 goto err; 5685 5686 while (1) { 5687 struct dir_entry *entry; 5688 5689 leaf = path->nodes[0]; 5690 slot = path->slots[0]; 5691 if (slot >= btrfs_header_nritems(leaf)) { 5692 ret = btrfs_next_leaf(root, path); 5693 if (ret < 0) 5694 goto err; 5695 else if (ret > 0) 5696 break; 5697 continue; 5698 } 5699 5700 btrfs_item_key_to_cpu(leaf, &found_key, slot); 5701 5702 if (found_key.objectid != key.objectid) 5703 break; 5704 if (found_key.type != BTRFS_DIR_INDEX_KEY) 5705 break; 5706 if (found_key.offset < ctx->pos) 5707 goto next; 5708 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 5709 goto next; 5710 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 5711 name_len = btrfs_dir_name_len(leaf, di); 5712 if ((total_len + sizeof(struct dir_entry) + name_len) >= 5713 PAGE_SIZE) { 5714 btrfs_release_path(path); 5715 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5716 if (ret) 5717 goto nopos; 5718 addr = private->filldir_buf; 5719 entries = 0; 5720 total_len = 0; 5721 goto again; 5722 } 5723 5724 entry = addr; 5725 put_unaligned(name_len, &entry->name_len); 5726 name_ptr = (char *)(entry + 1); 5727 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), 5728 name_len); 5729 put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)), 5730 &entry->type); 5731 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5732 put_unaligned(location.objectid, &entry->ino); 5733 put_unaligned(found_key.offset, &entry->offset); 5734 entries++; 5735 addr += sizeof(struct dir_entry) + name_len; 5736 total_len += sizeof(struct dir_entry) + name_len; 5737 next: 5738 path->slots[0]++; 5739 } 5740 btrfs_release_path(path); 5741 5742 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5743 if (ret) 5744 goto nopos; 5745 5746 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 5747 if (ret) 5748 goto nopos; 5749 5750 /* 5751 * Stop new entries from being returned after we return the last 5752 * entry. 5753 * 5754 * New directory entries are assigned a strictly increasing 5755 * offset. This means that new entries created during readdir 5756 * are *guaranteed* to be seen in the future by that readdir. 5757 * This has broken buggy programs which operate on names as 5758 * they're returned by readdir. Until we re-use freed offsets 5759 * we have this hack to stop new entries from being returned 5760 * under the assumption that they'll never reach this huge 5761 * offset. 5762 * 5763 * This is being careful not to overflow 32bit loff_t unless the 5764 * last entry requires it because doing so has broken 32bit apps 5765 * in the past. 5766 */ 5767 if (ctx->pos >= INT_MAX) 5768 ctx->pos = LLONG_MAX; 5769 else 5770 ctx->pos = INT_MAX; 5771 nopos: 5772 ret = 0; 5773 err: 5774 if (put) 5775 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); 5776 btrfs_free_path(path); 5777 return ret; 5778 } 5779 5780 /* 5781 * This is somewhat expensive, updating the tree every time the 5782 * inode changes. But, it is most likely to find the inode in cache. 5783 * FIXME, needs more benchmarking...there are no reasons other than performance 5784 * to keep or drop this code. 5785 */ 5786 static int btrfs_dirty_inode(struct inode *inode) 5787 { 5788 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5789 struct btrfs_root *root = BTRFS_I(inode)->root; 5790 struct btrfs_trans_handle *trans; 5791 int ret; 5792 5793 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 5794 return 0; 5795 5796 trans = btrfs_join_transaction(root); 5797 if (IS_ERR(trans)) 5798 return PTR_ERR(trans); 5799 5800 ret = btrfs_update_inode(trans, root, inode); 5801 if (ret && ret == -ENOSPC) { 5802 /* whoops, lets try again with the full transaction */ 5803 btrfs_end_transaction(trans); 5804 trans = btrfs_start_transaction(root, 1); 5805 if (IS_ERR(trans)) 5806 return PTR_ERR(trans); 5807 5808 ret = btrfs_update_inode(trans, root, inode); 5809 } 5810 btrfs_end_transaction(trans); 5811 if (BTRFS_I(inode)->delayed_node) 5812 btrfs_balance_delayed_items(fs_info); 5813 5814 return ret; 5815 } 5816 5817 /* 5818 * This is a copy of file_update_time. We need this so we can return error on 5819 * ENOSPC for updating the inode in the case of file write and mmap writes. 5820 */ 5821 static int btrfs_update_time(struct inode *inode, struct timespec64 *now, 5822 int flags) 5823 { 5824 struct btrfs_root *root = BTRFS_I(inode)->root; 5825 bool dirty = flags & ~S_VERSION; 5826 5827 if (btrfs_root_readonly(root)) 5828 return -EROFS; 5829 5830 if (flags & S_VERSION) 5831 dirty |= inode_maybe_inc_iversion(inode, dirty); 5832 if (flags & S_CTIME) 5833 inode->i_ctime = *now; 5834 if (flags & S_MTIME) 5835 inode->i_mtime = *now; 5836 if (flags & S_ATIME) 5837 inode->i_atime = *now; 5838 return dirty ? btrfs_dirty_inode(inode) : 0; 5839 } 5840 5841 /* 5842 * find the highest existing sequence number in a directory 5843 * and then set the in-memory index_cnt variable to reflect 5844 * free sequence numbers 5845 */ 5846 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 5847 { 5848 struct btrfs_root *root = inode->root; 5849 struct btrfs_key key, found_key; 5850 struct btrfs_path *path; 5851 struct extent_buffer *leaf; 5852 int ret; 5853 5854 key.objectid = btrfs_ino(inode); 5855 key.type = BTRFS_DIR_INDEX_KEY; 5856 key.offset = (u64)-1; 5857 5858 path = btrfs_alloc_path(); 5859 if (!path) 5860 return -ENOMEM; 5861 5862 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5863 if (ret < 0) 5864 goto out; 5865 /* FIXME: we should be able to handle this */ 5866 if (ret == 0) 5867 goto out; 5868 ret = 0; 5869 5870 /* 5871 * MAGIC NUMBER EXPLANATION: 5872 * since we search a directory based on f_pos we have to start at 2 5873 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody 5874 * else has to start at 2 5875 */ 5876 if (path->slots[0] == 0) { 5877 inode->index_cnt = 2; 5878 goto out; 5879 } 5880 5881 path->slots[0]--; 5882 5883 leaf = path->nodes[0]; 5884 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5885 5886 if (found_key.objectid != btrfs_ino(inode) || 5887 found_key.type != BTRFS_DIR_INDEX_KEY) { 5888 inode->index_cnt = 2; 5889 goto out; 5890 } 5891 5892 inode->index_cnt = found_key.offset + 1; 5893 out: 5894 btrfs_free_path(path); 5895 return ret; 5896 } 5897 5898 /* 5899 * helper to find a free sequence number in a given directory. This current 5900 * code is very simple, later versions will do smarter things in the btree 5901 */ 5902 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 5903 { 5904 int ret = 0; 5905 5906 if (dir->index_cnt == (u64)-1) { 5907 ret = btrfs_inode_delayed_dir_index_count(dir); 5908 if (ret) { 5909 ret = btrfs_set_inode_index_count(dir); 5910 if (ret) 5911 return ret; 5912 } 5913 } 5914 5915 *index = dir->index_cnt; 5916 dir->index_cnt++; 5917 5918 return ret; 5919 } 5920 5921 static int btrfs_insert_inode_locked(struct inode *inode) 5922 { 5923 struct btrfs_iget_args args; 5924 5925 args.ino = BTRFS_I(inode)->location.objectid; 5926 args.root = BTRFS_I(inode)->root; 5927 5928 return insert_inode_locked4(inode, 5929 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 5930 btrfs_find_actor, &args); 5931 } 5932 5933 /* 5934 * Inherit flags from the parent inode. 5935 * 5936 * Currently only the compression flags and the cow flags are inherited. 5937 */ 5938 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir) 5939 { 5940 unsigned int flags; 5941 5942 if (!dir) 5943 return; 5944 5945 flags = BTRFS_I(dir)->flags; 5946 5947 if (flags & BTRFS_INODE_NOCOMPRESS) { 5948 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS; 5949 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 5950 } else if (flags & BTRFS_INODE_COMPRESS) { 5951 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; 5952 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; 5953 } 5954 5955 if (flags & BTRFS_INODE_NODATACOW) { 5956 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; 5957 if (S_ISREG(inode->i_mode)) 5958 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 5959 } 5960 5961 btrfs_sync_inode_flags_to_i_flags(inode); 5962 } 5963 5964 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, 5965 struct btrfs_root *root, 5966 struct inode *dir, 5967 const char *name, int name_len, 5968 u64 ref_objectid, u64 objectid, 5969 umode_t mode, u64 *index) 5970 { 5971 struct btrfs_fs_info *fs_info = root->fs_info; 5972 struct inode *inode; 5973 struct btrfs_inode_item *inode_item; 5974 struct btrfs_key *location; 5975 struct btrfs_path *path; 5976 struct btrfs_inode_ref *ref; 5977 struct btrfs_key key[2]; 5978 u32 sizes[2]; 5979 int nitems = name ? 2 : 1; 5980 unsigned long ptr; 5981 unsigned int nofs_flag; 5982 int ret; 5983 5984 path = btrfs_alloc_path(); 5985 if (!path) 5986 return ERR_PTR(-ENOMEM); 5987 5988 nofs_flag = memalloc_nofs_save(); 5989 inode = new_inode(fs_info->sb); 5990 memalloc_nofs_restore(nofs_flag); 5991 if (!inode) { 5992 btrfs_free_path(path); 5993 return ERR_PTR(-ENOMEM); 5994 } 5995 5996 /* 5997 * O_TMPFILE, set link count to 0, so that after this point, 5998 * we fill in an inode item with the correct link count. 5999 */ 6000 if (!name) 6001 set_nlink(inode, 0); 6002 6003 /* 6004 * we have to initialize this early, so we can reclaim the inode 6005 * number if we fail afterwards in this function. 6006 */ 6007 inode->i_ino = objectid; 6008 6009 if (dir && name) { 6010 trace_btrfs_inode_request(dir); 6011 6012 ret = btrfs_set_inode_index(BTRFS_I(dir), index); 6013 if (ret) { 6014 btrfs_free_path(path); 6015 iput(inode); 6016 return ERR_PTR(ret); 6017 } 6018 } else if (dir) { 6019 *index = 0; 6020 } 6021 /* 6022 * index_cnt is ignored for everything but a dir, 6023 * btrfs_set_inode_index_count has an explanation for the magic 6024 * number 6025 */ 6026 BTRFS_I(inode)->index_cnt = 2; 6027 BTRFS_I(inode)->dir_index = *index; 6028 BTRFS_I(inode)->root = btrfs_grab_root(root); 6029 BTRFS_I(inode)->generation = trans->transid; 6030 inode->i_generation = BTRFS_I(inode)->generation; 6031 6032 /* 6033 * We could have gotten an inode number from somebody who was fsynced 6034 * and then removed in this same transaction, so let's just set full 6035 * sync since it will be a full sync anyway and this will blow away the 6036 * old info in the log. 6037 */ 6038 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 6039 6040 key[0].objectid = objectid; 6041 key[0].type = BTRFS_INODE_ITEM_KEY; 6042 key[0].offset = 0; 6043 6044 sizes[0] = sizeof(struct btrfs_inode_item); 6045 6046 if (name) { 6047 /* 6048 * Start new inodes with an inode_ref. This is slightly more 6049 * efficient for small numbers of hard links since they will 6050 * be packed into one item. Extended refs will kick in if we 6051 * add more hard links than can fit in the ref item. 6052 */ 6053 key[1].objectid = objectid; 6054 key[1].type = BTRFS_INODE_REF_KEY; 6055 key[1].offset = ref_objectid; 6056 6057 sizes[1] = name_len + sizeof(*ref); 6058 } 6059 6060 location = &BTRFS_I(inode)->location; 6061 location->objectid = objectid; 6062 location->offset = 0; 6063 location->type = BTRFS_INODE_ITEM_KEY; 6064 6065 ret = btrfs_insert_inode_locked(inode); 6066 if (ret < 0) { 6067 iput(inode); 6068 goto fail; 6069 } 6070 6071 path->leave_spinning = 1; 6072 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems); 6073 if (ret != 0) 6074 goto fail_unlock; 6075 6076 inode_init_owner(inode, dir, mode); 6077 inode_set_bytes(inode, 0); 6078 6079 inode->i_mtime = current_time(inode); 6080 inode->i_atime = inode->i_mtime; 6081 inode->i_ctime = inode->i_mtime; 6082 BTRFS_I(inode)->i_otime = inode->i_mtime; 6083 6084 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6085 struct btrfs_inode_item); 6086 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6087 sizeof(*inode_item)); 6088 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6089 6090 if (name) { 6091 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6092 struct btrfs_inode_ref); 6093 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); 6094 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); 6095 ptr = (unsigned long)(ref + 1); 6096 write_extent_buffer(path->nodes[0], name, ptr, name_len); 6097 } 6098 6099 btrfs_mark_buffer_dirty(path->nodes[0]); 6100 btrfs_free_path(path); 6101 6102 btrfs_inherit_iflags(inode, dir); 6103 6104 if (S_ISREG(mode)) { 6105 if (btrfs_test_opt(fs_info, NODATASUM)) 6106 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6107 if (btrfs_test_opt(fs_info, NODATACOW)) 6108 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6109 BTRFS_INODE_NODATASUM; 6110 } 6111 6112 inode_tree_add(inode); 6113 6114 trace_btrfs_inode_new(inode); 6115 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6116 6117 btrfs_update_root_times(trans, root); 6118 6119 ret = btrfs_inode_inherit_props(trans, inode, dir); 6120 if (ret) 6121 btrfs_err(fs_info, 6122 "error inheriting props for ino %llu (root %llu): %d", 6123 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); 6124 6125 return inode; 6126 6127 fail_unlock: 6128 discard_new_inode(inode); 6129 fail: 6130 if (dir && name) 6131 BTRFS_I(dir)->index_cnt--; 6132 btrfs_free_path(path); 6133 return ERR_PTR(ret); 6134 } 6135 6136 /* 6137 * utility function to add 'inode' into 'parent_inode' with 6138 * a give name and a given sequence number. 6139 * if 'add_backref' is true, also insert a backref from the 6140 * inode to the parent directory. 6141 */ 6142 int btrfs_add_link(struct btrfs_trans_handle *trans, 6143 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6144 const char *name, int name_len, int add_backref, u64 index) 6145 { 6146 int ret = 0; 6147 struct btrfs_key key; 6148 struct btrfs_root *root = parent_inode->root; 6149 u64 ino = btrfs_ino(inode); 6150 u64 parent_ino = btrfs_ino(parent_inode); 6151 6152 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6153 memcpy(&key, &inode->root->root_key, sizeof(key)); 6154 } else { 6155 key.objectid = ino; 6156 key.type = BTRFS_INODE_ITEM_KEY; 6157 key.offset = 0; 6158 } 6159 6160 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6161 ret = btrfs_add_root_ref(trans, key.objectid, 6162 root->root_key.objectid, parent_ino, 6163 index, name, name_len); 6164 } else if (add_backref) { 6165 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, 6166 parent_ino, index); 6167 } 6168 6169 /* Nothing to clean up yet */ 6170 if (ret) 6171 return ret; 6172 6173 ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key, 6174 btrfs_inode_type(&inode->vfs_inode), index); 6175 if (ret == -EEXIST || ret == -EOVERFLOW) 6176 goto fail_dir_item; 6177 else if (ret) { 6178 btrfs_abort_transaction(trans, ret); 6179 return ret; 6180 } 6181 6182 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6183 name_len * 2); 6184 inode_inc_iversion(&parent_inode->vfs_inode); 6185 /* 6186 * If we are replaying a log tree, we do not want to update the mtime 6187 * and ctime of the parent directory with the current time, since the 6188 * log replay procedure is responsible for setting them to their correct 6189 * values (the ones it had when the fsync was done). 6190 */ 6191 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { 6192 struct timespec64 now = current_time(&parent_inode->vfs_inode); 6193 6194 parent_inode->vfs_inode.i_mtime = now; 6195 parent_inode->vfs_inode.i_ctime = now; 6196 } 6197 ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode); 6198 if (ret) 6199 btrfs_abort_transaction(trans, ret); 6200 return ret; 6201 6202 fail_dir_item: 6203 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6204 u64 local_index; 6205 int err; 6206 err = btrfs_del_root_ref(trans, key.objectid, 6207 root->root_key.objectid, parent_ino, 6208 &local_index, name, name_len); 6209 if (err) 6210 btrfs_abort_transaction(trans, err); 6211 } else if (add_backref) { 6212 u64 local_index; 6213 int err; 6214 6215 err = btrfs_del_inode_ref(trans, root, name, name_len, 6216 ino, parent_ino, &local_index); 6217 if (err) 6218 btrfs_abort_transaction(trans, err); 6219 } 6220 6221 /* Return the original error code */ 6222 return ret; 6223 } 6224 6225 static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 6226 struct btrfs_inode *dir, struct dentry *dentry, 6227 struct btrfs_inode *inode, int backref, u64 index) 6228 { 6229 int err = btrfs_add_link(trans, dir, inode, 6230 dentry->d_name.name, dentry->d_name.len, 6231 backref, index); 6232 if (err > 0) 6233 err = -EEXIST; 6234 return err; 6235 } 6236 6237 static int btrfs_mknod(struct inode *dir, struct dentry *dentry, 6238 umode_t mode, dev_t rdev) 6239 { 6240 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6241 struct btrfs_trans_handle *trans; 6242 struct btrfs_root *root = BTRFS_I(dir)->root; 6243 struct inode *inode = NULL; 6244 int err; 6245 u64 objectid; 6246 u64 index = 0; 6247 6248 /* 6249 * 2 for inode item and ref 6250 * 2 for dir items 6251 * 1 for xattr if selinux is on 6252 */ 6253 trans = btrfs_start_transaction(root, 5); 6254 if (IS_ERR(trans)) 6255 return PTR_ERR(trans); 6256 6257 err = btrfs_find_free_ino(root, &objectid); 6258 if (err) 6259 goto out_unlock; 6260 6261 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6262 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, 6263 mode, &index); 6264 if (IS_ERR(inode)) { 6265 err = PTR_ERR(inode); 6266 inode = NULL; 6267 goto out_unlock; 6268 } 6269 6270 /* 6271 * If the active LSM wants to access the inode during 6272 * d_instantiate it needs these. Smack checks to see 6273 * if the filesystem supports xattrs by looking at the 6274 * ops vector. 6275 */ 6276 inode->i_op = &btrfs_special_inode_operations; 6277 init_special_inode(inode, inode->i_mode, rdev); 6278 6279 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6280 if (err) 6281 goto out_unlock; 6282 6283 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), 6284 0, index); 6285 if (err) 6286 goto out_unlock; 6287 6288 btrfs_update_inode(trans, root, inode); 6289 d_instantiate_new(dentry, inode); 6290 6291 out_unlock: 6292 btrfs_end_transaction(trans); 6293 btrfs_btree_balance_dirty(fs_info); 6294 if (err && inode) { 6295 inode_dec_link_count(inode); 6296 discard_new_inode(inode); 6297 } 6298 return err; 6299 } 6300 6301 static int btrfs_create(struct inode *dir, struct dentry *dentry, 6302 umode_t mode, bool excl) 6303 { 6304 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6305 struct btrfs_trans_handle *trans; 6306 struct btrfs_root *root = BTRFS_I(dir)->root; 6307 struct inode *inode = NULL; 6308 int err; 6309 u64 objectid; 6310 u64 index = 0; 6311 6312 /* 6313 * 2 for inode item and ref 6314 * 2 for dir items 6315 * 1 for xattr if selinux is on 6316 */ 6317 trans = btrfs_start_transaction(root, 5); 6318 if (IS_ERR(trans)) 6319 return PTR_ERR(trans); 6320 6321 err = btrfs_find_free_ino(root, &objectid); 6322 if (err) 6323 goto out_unlock; 6324 6325 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6326 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, 6327 mode, &index); 6328 if (IS_ERR(inode)) { 6329 err = PTR_ERR(inode); 6330 inode = NULL; 6331 goto out_unlock; 6332 } 6333 /* 6334 * If the active LSM wants to access the inode during 6335 * d_instantiate it needs these. Smack checks to see 6336 * if the filesystem supports xattrs by looking at the 6337 * ops vector. 6338 */ 6339 inode->i_fop = &btrfs_file_operations; 6340 inode->i_op = &btrfs_file_inode_operations; 6341 inode->i_mapping->a_ops = &btrfs_aops; 6342 6343 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6344 if (err) 6345 goto out_unlock; 6346 6347 err = btrfs_update_inode(trans, root, inode); 6348 if (err) 6349 goto out_unlock; 6350 6351 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), 6352 0, index); 6353 if (err) 6354 goto out_unlock; 6355 6356 d_instantiate_new(dentry, inode); 6357 6358 out_unlock: 6359 btrfs_end_transaction(trans); 6360 if (err && inode) { 6361 inode_dec_link_count(inode); 6362 discard_new_inode(inode); 6363 } 6364 btrfs_btree_balance_dirty(fs_info); 6365 return err; 6366 } 6367 6368 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6369 struct dentry *dentry) 6370 { 6371 struct btrfs_trans_handle *trans = NULL; 6372 struct btrfs_root *root = BTRFS_I(dir)->root; 6373 struct inode *inode = d_inode(old_dentry); 6374 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 6375 u64 index; 6376 int err; 6377 int drop_inode = 0; 6378 6379 /* do not allow sys_link's with other subvols of the same device */ 6380 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) 6381 return -EXDEV; 6382 6383 if (inode->i_nlink >= BTRFS_LINK_MAX) 6384 return -EMLINK; 6385 6386 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6387 if (err) 6388 goto fail; 6389 6390 /* 6391 * 2 items for inode and inode ref 6392 * 2 items for dir items 6393 * 1 item for parent inode 6394 * 1 item for orphan item deletion if O_TMPFILE 6395 */ 6396 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6397 if (IS_ERR(trans)) { 6398 err = PTR_ERR(trans); 6399 trans = NULL; 6400 goto fail; 6401 } 6402 6403 /* There are several dir indexes for this inode, clear the cache. */ 6404 BTRFS_I(inode)->dir_index = 0ULL; 6405 inc_nlink(inode); 6406 inode_inc_iversion(inode); 6407 inode->i_ctime = current_time(inode); 6408 ihold(inode); 6409 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6410 6411 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), 6412 1, index); 6413 6414 if (err) { 6415 drop_inode = 1; 6416 } else { 6417 struct dentry *parent = dentry->d_parent; 6418 6419 err = btrfs_update_inode(trans, root, inode); 6420 if (err) 6421 goto fail; 6422 if (inode->i_nlink == 1) { 6423 /* 6424 * If new hard link count is 1, it's a file created 6425 * with open(2) O_TMPFILE flag. 6426 */ 6427 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6428 if (err) 6429 goto fail; 6430 } 6431 d_instantiate(dentry, inode); 6432 btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent); 6433 } 6434 6435 fail: 6436 if (trans) 6437 btrfs_end_transaction(trans); 6438 if (drop_inode) { 6439 inode_dec_link_count(inode); 6440 iput(inode); 6441 } 6442 btrfs_btree_balance_dirty(fs_info); 6443 return err; 6444 } 6445 6446 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 6447 { 6448 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6449 struct inode *inode = NULL; 6450 struct btrfs_trans_handle *trans; 6451 struct btrfs_root *root = BTRFS_I(dir)->root; 6452 int err = 0; 6453 u64 objectid = 0; 6454 u64 index = 0; 6455 6456 /* 6457 * 2 items for inode and ref 6458 * 2 items for dir items 6459 * 1 for xattr if selinux is on 6460 */ 6461 trans = btrfs_start_transaction(root, 5); 6462 if (IS_ERR(trans)) 6463 return PTR_ERR(trans); 6464 6465 err = btrfs_find_free_ino(root, &objectid); 6466 if (err) 6467 goto out_fail; 6468 6469 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6470 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, 6471 S_IFDIR | mode, &index); 6472 if (IS_ERR(inode)) { 6473 err = PTR_ERR(inode); 6474 inode = NULL; 6475 goto out_fail; 6476 } 6477 6478 /* these must be set before we unlock the inode */ 6479 inode->i_op = &btrfs_dir_inode_operations; 6480 inode->i_fop = &btrfs_dir_file_operations; 6481 6482 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6483 if (err) 6484 goto out_fail; 6485 6486 btrfs_i_size_write(BTRFS_I(inode), 0); 6487 err = btrfs_update_inode(trans, root, inode); 6488 if (err) 6489 goto out_fail; 6490 6491 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6492 dentry->d_name.name, 6493 dentry->d_name.len, 0, index); 6494 if (err) 6495 goto out_fail; 6496 6497 d_instantiate_new(dentry, inode); 6498 6499 out_fail: 6500 btrfs_end_transaction(trans); 6501 if (err && inode) { 6502 inode_dec_link_count(inode); 6503 discard_new_inode(inode); 6504 } 6505 btrfs_btree_balance_dirty(fs_info); 6506 return err; 6507 } 6508 6509 static noinline int uncompress_inline(struct btrfs_path *path, 6510 struct page *page, 6511 size_t pg_offset, u64 extent_offset, 6512 struct btrfs_file_extent_item *item) 6513 { 6514 int ret; 6515 struct extent_buffer *leaf = path->nodes[0]; 6516 char *tmp; 6517 size_t max_size; 6518 unsigned long inline_size; 6519 unsigned long ptr; 6520 int compress_type; 6521 6522 WARN_ON(pg_offset != 0); 6523 compress_type = btrfs_file_extent_compression(leaf, item); 6524 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6525 inline_size = btrfs_file_extent_inline_item_len(leaf, 6526 btrfs_item_nr(path->slots[0])); 6527 tmp = kmalloc(inline_size, GFP_NOFS); 6528 if (!tmp) 6529 return -ENOMEM; 6530 ptr = btrfs_file_extent_inline_start(item); 6531 6532 read_extent_buffer(leaf, tmp, ptr, inline_size); 6533 6534 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6535 ret = btrfs_decompress(compress_type, tmp, page, 6536 extent_offset, inline_size, max_size); 6537 6538 /* 6539 * decompression code contains a memset to fill in any space between the end 6540 * of the uncompressed data and the end of max_size in case the decompressed 6541 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6542 * the end of an inline extent and the beginning of the next block, so we 6543 * cover that region here. 6544 */ 6545 6546 if (max_size + pg_offset < PAGE_SIZE) { 6547 char *map = kmap(page); 6548 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); 6549 kunmap(page); 6550 } 6551 kfree(tmp); 6552 return ret; 6553 } 6554 6555 /** 6556 * btrfs_get_extent - Lookup the first extent overlapping a range in a file. 6557 * @inode: file to search in 6558 * @page: page to read extent data into if the extent is inline 6559 * @pg_offset: offset into @page to copy to 6560 * @start: file offset 6561 * @len: length of range starting at @start 6562 * 6563 * This returns the first &struct extent_map which overlaps with the given 6564 * range, reading it from the B-tree and caching it if necessary. Note that 6565 * there may be more extents which overlap the given range after the returned 6566 * extent_map. 6567 * 6568 * If @page is not NULL and the extent is inline, this also reads the extent 6569 * data directly into the page and marks the extent up to date in the io_tree. 6570 * 6571 * Return: ERR_PTR on error, non-NULL extent_map on success. 6572 */ 6573 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6574 struct page *page, size_t pg_offset, 6575 u64 start, u64 len) 6576 { 6577 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6578 int ret = 0; 6579 u64 extent_start = 0; 6580 u64 extent_end = 0; 6581 u64 objectid = btrfs_ino(inode); 6582 int extent_type = -1; 6583 struct btrfs_path *path = NULL; 6584 struct btrfs_root *root = inode->root; 6585 struct btrfs_file_extent_item *item; 6586 struct extent_buffer *leaf; 6587 struct btrfs_key found_key; 6588 struct extent_map *em = NULL; 6589 struct extent_map_tree *em_tree = &inode->extent_tree; 6590 struct extent_io_tree *io_tree = &inode->io_tree; 6591 6592 read_lock(&em_tree->lock); 6593 em = lookup_extent_mapping(em_tree, start, len); 6594 read_unlock(&em_tree->lock); 6595 6596 if (em) { 6597 if (em->start > start || em->start + em->len <= start) 6598 free_extent_map(em); 6599 else if (em->block_start == EXTENT_MAP_INLINE && page) 6600 free_extent_map(em); 6601 else 6602 goto out; 6603 } 6604 em = alloc_extent_map(); 6605 if (!em) { 6606 ret = -ENOMEM; 6607 goto out; 6608 } 6609 em->start = EXTENT_MAP_HOLE; 6610 em->orig_start = EXTENT_MAP_HOLE; 6611 em->len = (u64)-1; 6612 em->block_len = (u64)-1; 6613 6614 path = btrfs_alloc_path(); 6615 if (!path) { 6616 ret = -ENOMEM; 6617 goto out; 6618 } 6619 6620 /* Chances are we'll be called again, so go ahead and do readahead */ 6621 path->reada = READA_FORWARD; 6622 6623 /* 6624 * Unless we're going to uncompress the inline extent, no sleep would 6625 * happen. 6626 */ 6627 path->leave_spinning = 1; 6628 6629 path->recurse = btrfs_is_free_space_inode(inode); 6630 6631 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6632 if (ret < 0) { 6633 goto out; 6634 } else if (ret > 0) { 6635 if (path->slots[0] == 0) 6636 goto not_found; 6637 path->slots[0]--; 6638 ret = 0; 6639 } 6640 6641 leaf = path->nodes[0]; 6642 item = btrfs_item_ptr(leaf, path->slots[0], 6643 struct btrfs_file_extent_item); 6644 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6645 if (found_key.objectid != objectid || 6646 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6647 /* 6648 * If we backup past the first extent we want to move forward 6649 * and see if there is an extent in front of us, otherwise we'll 6650 * say there is a hole for our whole search range which can 6651 * cause problems. 6652 */ 6653 extent_end = start; 6654 goto next; 6655 } 6656 6657 extent_type = btrfs_file_extent_type(leaf, item); 6658 extent_start = found_key.offset; 6659 extent_end = btrfs_file_extent_end(path); 6660 if (extent_type == BTRFS_FILE_EXTENT_REG || 6661 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6662 /* Only regular file could have regular/prealloc extent */ 6663 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6664 ret = -EUCLEAN; 6665 btrfs_crit(fs_info, 6666 "regular/prealloc extent found for non-regular inode %llu", 6667 btrfs_ino(inode)); 6668 goto out; 6669 } 6670 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6671 extent_start); 6672 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6673 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 6674 path->slots[0], 6675 extent_start); 6676 } 6677 next: 6678 if (start >= extent_end) { 6679 path->slots[0]++; 6680 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6681 ret = btrfs_next_leaf(root, path); 6682 if (ret < 0) 6683 goto out; 6684 else if (ret > 0) 6685 goto not_found; 6686 6687 leaf = path->nodes[0]; 6688 } 6689 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6690 if (found_key.objectid != objectid || 6691 found_key.type != BTRFS_EXTENT_DATA_KEY) 6692 goto not_found; 6693 if (start + len <= found_key.offset) 6694 goto not_found; 6695 if (start > found_key.offset) 6696 goto next; 6697 6698 /* New extent overlaps with existing one */ 6699 em->start = start; 6700 em->orig_start = start; 6701 em->len = found_key.offset - start; 6702 em->block_start = EXTENT_MAP_HOLE; 6703 goto insert; 6704 } 6705 6706 btrfs_extent_item_to_extent_map(inode, path, item, !page, em); 6707 6708 if (extent_type == BTRFS_FILE_EXTENT_REG || 6709 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6710 goto insert; 6711 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6712 unsigned long ptr; 6713 char *map; 6714 size_t size; 6715 size_t extent_offset; 6716 size_t copy_size; 6717 6718 if (!page) 6719 goto out; 6720 6721 size = btrfs_file_extent_ram_bytes(leaf, item); 6722 extent_offset = page_offset(page) + pg_offset - extent_start; 6723 copy_size = min_t(u64, PAGE_SIZE - pg_offset, 6724 size - extent_offset); 6725 em->start = extent_start + extent_offset; 6726 em->len = ALIGN(copy_size, fs_info->sectorsize); 6727 em->orig_block_len = em->len; 6728 em->orig_start = em->start; 6729 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 6730 6731 btrfs_set_path_blocking(path); 6732 if (!PageUptodate(page)) { 6733 if (btrfs_file_extent_compression(leaf, item) != 6734 BTRFS_COMPRESS_NONE) { 6735 ret = uncompress_inline(path, page, pg_offset, 6736 extent_offset, item); 6737 if (ret) 6738 goto out; 6739 } else { 6740 map = kmap(page); 6741 read_extent_buffer(leaf, map + pg_offset, ptr, 6742 copy_size); 6743 if (pg_offset + copy_size < PAGE_SIZE) { 6744 memset(map + pg_offset + copy_size, 0, 6745 PAGE_SIZE - pg_offset - 6746 copy_size); 6747 } 6748 kunmap(page); 6749 } 6750 flush_dcache_page(page); 6751 } 6752 set_extent_uptodate(io_tree, em->start, 6753 extent_map_end(em) - 1, NULL, GFP_NOFS); 6754 goto insert; 6755 } 6756 not_found: 6757 em->start = start; 6758 em->orig_start = start; 6759 em->len = len; 6760 em->block_start = EXTENT_MAP_HOLE; 6761 insert: 6762 ret = 0; 6763 btrfs_release_path(path); 6764 if (em->start > start || extent_map_end(em) <= start) { 6765 btrfs_err(fs_info, 6766 "bad extent! em: [%llu %llu] passed [%llu %llu]", 6767 em->start, em->len, start, len); 6768 ret = -EIO; 6769 goto out; 6770 } 6771 6772 write_lock(&em_tree->lock); 6773 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 6774 write_unlock(&em_tree->lock); 6775 out: 6776 btrfs_free_path(path); 6777 6778 trace_btrfs_get_extent(root, inode, em); 6779 6780 if (ret) { 6781 free_extent_map(em); 6782 return ERR_PTR(ret); 6783 } 6784 return em; 6785 } 6786 6787 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, 6788 u64 start, u64 len) 6789 { 6790 struct extent_map *em; 6791 struct extent_map *hole_em = NULL; 6792 u64 delalloc_start = start; 6793 u64 end; 6794 u64 delalloc_len; 6795 u64 delalloc_end; 6796 int err = 0; 6797 6798 em = btrfs_get_extent(inode, NULL, 0, start, len); 6799 if (IS_ERR(em)) 6800 return em; 6801 /* 6802 * If our em maps to: 6803 * - a hole or 6804 * - a pre-alloc extent, 6805 * there might actually be delalloc bytes behind it. 6806 */ 6807 if (em->block_start != EXTENT_MAP_HOLE && 6808 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 6809 return em; 6810 else 6811 hole_em = em; 6812 6813 /* check to see if we've wrapped (len == -1 or similar) */ 6814 end = start + len; 6815 if (end < start) 6816 end = (u64)-1; 6817 else 6818 end -= 1; 6819 6820 em = NULL; 6821 6822 /* ok, we didn't find anything, lets look for delalloc */ 6823 delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start, 6824 end, len, EXTENT_DELALLOC, 1); 6825 delalloc_end = delalloc_start + delalloc_len; 6826 if (delalloc_end < delalloc_start) 6827 delalloc_end = (u64)-1; 6828 6829 /* 6830 * We didn't find anything useful, return the original results from 6831 * get_extent() 6832 */ 6833 if (delalloc_start > end || delalloc_end <= start) { 6834 em = hole_em; 6835 hole_em = NULL; 6836 goto out; 6837 } 6838 6839 /* 6840 * Adjust the delalloc_start to make sure it doesn't go backwards from 6841 * the start they passed in 6842 */ 6843 delalloc_start = max(start, delalloc_start); 6844 delalloc_len = delalloc_end - delalloc_start; 6845 6846 if (delalloc_len > 0) { 6847 u64 hole_start; 6848 u64 hole_len; 6849 const u64 hole_end = extent_map_end(hole_em); 6850 6851 em = alloc_extent_map(); 6852 if (!em) { 6853 err = -ENOMEM; 6854 goto out; 6855 } 6856 6857 ASSERT(hole_em); 6858 /* 6859 * When btrfs_get_extent can't find anything it returns one 6860 * huge hole 6861 * 6862 * Make sure what it found really fits our range, and adjust to 6863 * make sure it is based on the start from the caller 6864 */ 6865 if (hole_end <= start || hole_em->start > end) { 6866 free_extent_map(hole_em); 6867 hole_em = NULL; 6868 } else { 6869 hole_start = max(hole_em->start, start); 6870 hole_len = hole_end - hole_start; 6871 } 6872 6873 if (hole_em && delalloc_start > hole_start) { 6874 /* 6875 * Our hole starts before our delalloc, so we have to 6876 * return just the parts of the hole that go until the 6877 * delalloc starts 6878 */ 6879 em->len = min(hole_len, delalloc_start - hole_start); 6880 em->start = hole_start; 6881 em->orig_start = hole_start; 6882 /* 6883 * Don't adjust block start at all, it is fixed at 6884 * EXTENT_MAP_HOLE 6885 */ 6886 em->block_start = hole_em->block_start; 6887 em->block_len = hole_len; 6888 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) 6889 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 6890 } else { 6891 /* 6892 * Hole is out of passed range or it starts after 6893 * delalloc range 6894 */ 6895 em->start = delalloc_start; 6896 em->len = delalloc_len; 6897 em->orig_start = delalloc_start; 6898 em->block_start = EXTENT_MAP_DELALLOC; 6899 em->block_len = delalloc_len; 6900 } 6901 } else { 6902 return hole_em; 6903 } 6904 out: 6905 6906 free_extent_map(hole_em); 6907 if (err) { 6908 free_extent_map(em); 6909 return ERR_PTR(err); 6910 } 6911 return em; 6912 } 6913 6914 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, 6915 const u64 start, 6916 const u64 len, 6917 const u64 orig_start, 6918 const u64 block_start, 6919 const u64 block_len, 6920 const u64 orig_block_len, 6921 const u64 ram_bytes, 6922 const int type) 6923 { 6924 struct extent_map *em = NULL; 6925 int ret; 6926 6927 if (type != BTRFS_ORDERED_NOCOW) { 6928 em = create_io_em(inode, start, len, orig_start, block_start, 6929 block_len, orig_block_len, ram_bytes, 6930 BTRFS_COMPRESS_NONE, /* compress_type */ 6931 type); 6932 if (IS_ERR(em)) 6933 goto out; 6934 } 6935 ret = btrfs_add_ordered_extent_dio(inode, start, block_start, len, 6936 block_len, type); 6937 if (ret) { 6938 if (em) { 6939 free_extent_map(em); 6940 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 6941 } 6942 em = ERR_PTR(ret); 6943 } 6944 out: 6945 6946 return em; 6947 } 6948 6949 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, 6950 u64 start, u64 len) 6951 { 6952 struct btrfs_root *root = inode->root; 6953 struct btrfs_fs_info *fs_info = root->fs_info; 6954 struct extent_map *em; 6955 struct btrfs_key ins; 6956 u64 alloc_hint; 6957 int ret; 6958 6959 alloc_hint = get_extent_allocation_hint(inode, start, len); 6960 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 6961 0, alloc_hint, &ins, 1, 1); 6962 if (ret) 6963 return ERR_PTR(ret); 6964 6965 em = btrfs_create_dio_extent(inode, start, ins.offset, start, 6966 ins.objectid, ins.offset, ins.offset, 6967 ins.offset, BTRFS_ORDERED_REGULAR); 6968 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 6969 if (IS_ERR(em)) 6970 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 6971 1); 6972 6973 return em; 6974 } 6975 6976 /* 6977 * Check if we can do nocow write into the range [@offset, @offset + @len) 6978 * 6979 * @offset: File offset 6980 * @len: The length to write, will be updated to the nocow writeable 6981 * range 6982 * @orig_start: (optional) Return the original file offset of the file extent 6983 * @orig_len: (optional) Return the original on-disk length of the file extent 6984 * @ram_bytes: (optional) Return the ram_bytes of the file extent 6985 * @strict: if true, omit optimizations that might force us into unnecessary 6986 * cow. e.g., don't trust generation number. 6987 * 6988 * This function will flush ordered extents in the range to ensure proper 6989 * nocow checks for (nowait == false) case. 6990 * 6991 * Return: 6992 * >0 and update @len if we can do nocow write 6993 * 0 if we can't do nocow write 6994 * <0 if error happened 6995 * 6996 * NOTE: This only checks the file extents, caller is responsible to wait for 6997 * any ordered extents. 6998 */ 6999 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7000 u64 *orig_start, u64 *orig_block_len, 7001 u64 *ram_bytes, bool strict) 7002 { 7003 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7004 struct btrfs_path *path; 7005 int ret; 7006 struct extent_buffer *leaf; 7007 struct btrfs_root *root = BTRFS_I(inode)->root; 7008 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7009 struct btrfs_file_extent_item *fi; 7010 struct btrfs_key key; 7011 u64 disk_bytenr; 7012 u64 backref_offset; 7013 u64 extent_end; 7014 u64 num_bytes; 7015 int slot; 7016 int found_type; 7017 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW); 7018 7019 path = btrfs_alloc_path(); 7020 if (!path) 7021 return -ENOMEM; 7022 7023 ret = btrfs_lookup_file_extent(NULL, root, path, 7024 btrfs_ino(BTRFS_I(inode)), offset, 0); 7025 if (ret < 0) 7026 goto out; 7027 7028 slot = path->slots[0]; 7029 if (ret == 1) { 7030 if (slot == 0) { 7031 /* can't find the item, must cow */ 7032 ret = 0; 7033 goto out; 7034 } 7035 slot--; 7036 } 7037 ret = 0; 7038 leaf = path->nodes[0]; 7039 btrfs_item_key_to_cpu(leaf, &key, slot); 7040 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7041 key.type != BTRFS_EXTENT_DATA_KEY) { 7042 /* not our file or wrong item type, must cow */ 7043 goto out; 7044 } 7045 7046 if (key.offset > offset) { 7047 /* Wrong offset, must cow */ 7048 goto out; 7049 } 7050 7051 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 7052 found_type = btrfs_file_extent_type(leaf, fi); 7053 if (found_type != BTRFS_FILE_EXTENT_REG && 7054 found_type != BTRFS_FILE_EXTENT_PREALLOC) { 7055 /* not a regular extent, must cow */ 7056 goto out; 7057 } 7058 7059 if (!nocow && found_type == BTRFS_FILE_EXTENT_REG) 7060 goto out; 7061 7062 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 7063 if (extent_end <= offset) 7064 goto out; 7065 7066 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 7067 if (disk_bytenr == 0) 7068 goto out; 7069 7070 if (btrfs_file_extent_compression(leaf, fi) || 7071 btrfs_file_extent_encryption(leaf, fi) || 7072 btrfs_file_extent_other_encoding(leaf, fi)) 7073 goto out; 7074 7075 /* 7076 * Do the same check as in btrfs_cross_ref_exist but without the 7077 * unnecessary search. 7078 */ 7079 if (!strict && 7080 (btrfs_file_extent_generation(leaf, fi) <= 7081 btrfs_root_last_snapshot(&root->root_item))) 7082 goto out; 7083 7084 backref_offset = btrfs_file_extent_offset(leaf, fi); 7085 7086 if (orig_start) { 7087 *orig_start = key.offset - backref_offset; 7088 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 7089 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7090 } 7091 7092 if (btrfs_extent_readonly(fs_info, disk_bytenr)) 7093 goto out; 7094 7095 num_bytes = min(offset + *len, extent_end) - offset; 7096 if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7097 u64 range_end; 7098 7099 range_end = round_up(offset + num_bytes, 7100 root->fs_info->sectorsize) - 1; 7101 ret = test_range_bit(io_tree, offset, range_end, 7102 EXTENT_DELALLOC, 0, NULL); 7103 if (ret) { 7104 ret = -EAGAIN; 7105 goto out; 7106 } 7107 } 7108 7109 btrfs_release_path(path); 7110 7111 /* 7112 * look for other files referencing this extent, if we 7113 * find any we must cow 7114 */ 7115 7116 ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)), 7117 key.offset - backref_offset, disk_bytenr, 7118 strict); 7119 if (ret) { 7120 ret = 0; 7121 goto out; 7122 } 7123 7124 /* 7125 * adjust disk_bytenr and num_bytes to cover just the bytes 7126 * in this extent we are about to write. If there 7127 * are any csums in that range we have to cow in order 7128 * to keep the csums correct 7129 */ 7130 disk_bytenr += backref_offset; 7131 disk_bytenr += offset - key.offset; 7132 if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes)) 7133 goto out; 7134 /* 7135 * all of the above have passed, it is safe to overwrite this extent 7136 * without cow 7137 */ 7138 *len = num_bytes; 7139 ret = 1; 7140 out: 7141 btrfs_free_path(path); 7142 return ret; 7143 } 7144 7145 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7146 struct extent_state **cached_state, bool writing) 7147 { 7148 struct btrfs_ordered_extent *ordered; 7149 int ret = 0; 7150 7151 while (1) { 7152 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7153 cached_state); 7154 /* 7155 * We're concerned with the entire range that we're going to be 7156 * doing DIO to, so we need to make sure there's no ordered 7157 * extents in this range. 7158 */ 7159 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, 7160 lockend - lockstart + 1); 7161 7162 /* 7163 * We need to make sure there are no buffered pages in this 7164 * range either, we could have raced between the invalidate in 7165 * generic_file_direct_write and locking the extent. The 7166 * invalidate needs to happen so that reads after a write do not 7167 * get stale data. 7168 */ 7169 if (!ordered && 7170 (!writing || !filemap_range_has_page(inode->i_mapping, 7171 lockstart, lockend))) 7172 break; 7173 7174 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7175 cached_state); 7176 7177 if (ordered) { 7178 /* 7179 * If we are doing a DIO read and the ordered extent we 7180 * found is for a buffered write, we can not wait for it 7181 * to complete and retry, because if we do so we can 7182 * deadlock with concurrent buffered writes on page 7183 * locks. This happens only if our DIO read covers more 7184 * than one extent map, if at this point has already 7185 * created an ordered extent for a previous extent map 7186 * and locked its range in the inode's io tree, and a 7187 * concurrent write against that previous extent map's 7188 * range and this range started (we unlock the ranges 7189 * in the io tree only when the bios complete and 7190 * buffered writes always lock pages before attempting 7191 * to lock range in the io tree). 7192 */ 7193 if (writing || 7194 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) 7195 btrfs_start_ordered_extent(ordered, 1); 7196 else 7197 ret = -ENOTBLK; 7198 btrfs_put_ordered_extent(ordered); 7199 } else { 7200 /* 7201 * We could trigger writeback for this range (and wait 7202 * for it to complete) and then invalidate the pages for 7203 * this range (through invalidate_inode_pages2_range()), 7204 * but that can lead us to a deadlock with a concurrent 7205 * call to readahead (a buffered read or a defrag call 7206 * triggered a readahead) on a page lock due to an 7207 * ordered dio extent we created before but did not have 7208 * yet a corresponding bio submitted (whence it can not 7209 * complete), which makes readahead wait for that 7210 * ordered extent to complete while holding a lock on 7211 * that page. 7212 */ 7213 ret = -ENOTBLK; 7214 } 7215 7216 if (ret) 7217 break; 7218 7219 cond_resched(); 7220 } 7221 7222 return ret; 7223 } 7224 7225 /* The callers of this must take lock_extent() */ 7226 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 7227 u64 len, u64 orig_start, u64 block_start, 7228 u64 block_len, u64 orig_block_len, 7229 u64 ram_bytes, int compress_type, 7230 int type) 7231 { 7232 struct extent_map_tree *em_tree; 7233 struct extent_map *em; 7234 int ret; 7235 7236 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7237 type == BTRFS_ORDERED_COMPRESSED || 7238 type == BTRFS_ORDERED_NOCOW || 7239 type == BTRFS_ORDERED_REGULAR); 7240 7241 em_tree = &inode->extent_tree; 7242 em = alloc_extent_map(); 7243 if (!em) 7244 return ERR_PTR(-ENOMEM); 7245 7246 em->start = start; 7247 em->orig_start = orig_start; 7248 em->len = len; 7249 em->block_len = block_len; 7250 em->block_start = block_start; 7251 em->orig_block_len = orig_block_len; 7252 em->ram_bytes = ram_bytes; 7253 em->generation = -1; 7254 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7255 if (type == BTRFS_ORDERED_PREALLOC) { 7256 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7257 } else if (type == BTRFS_ORDERED_COMPRESSED) { 7258 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 7259 em->compress_type = compress_type; 7260 } 7261 7262 do { 7263 btrfs_drop_extent_cache(inode, em->start, 7264 em->start + em->len - 1, 0); 7265 write_lock(&em_tree->lock); 7266 ret = add_extent_mapping(em_tree, em, 1); 7267 write_unlock(&em_tree->lock); 7268 /* 7269 * The caller has taken lock_extent(), who could race with us 7270 * to add em? 7271 */ 7272 } while (ret == -EEXIST); 7273 7274 if (ret) { 7275 free_extent_map(em); 7276 return ERR_PTR(ret); 7277 } 7278 7279 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7280 return em; 7281 } 7282 7283 7284 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7285 struct inode *inode, 7286 struct btrfs_dio_data *dio_data, 7287 u64 start, u64 len) 7288 { 7289 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7290 struct extent_map *em = *map; 7291 int ret = 0; 7292 7293 /* 7294 * We don't allocate a new extent in the following cases 7295 * 7296 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7297 * existing extent. 7298 * 2) The extent is marked as PREALLOC. We're good to go here and can 7299 * just use the extent. 7300 * 7301 */ 7302 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7303 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7304 em->block_start != EXTENT_MAP_HOLE)) { 7305 int type; 7306 u64 block_start, orig_start, orig_block_len, ram_bytes; 7307 7308 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7309 type = BTRFS_ORDERED_PREALLOC; 7310 else 7311 type = BTRFS_ORDERED_NOCOW; 7312 len = min(len, em->len - (start - em->start)); 7313 block_start = em->block_start + (start - em->start); 7314 7315 if (can_nocow_extent(inode, start, &len, &orig_start, 7316 &orig_block_len, &ram_bytes, false) == 1 && 7317 btrfs_inc_nocow_writers(fs_info, block_start)) { 7318 struct extent_map *em2; 7319 7320 em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len, 7321 orig_start, block_start, 7322 len, orig_block_len, 7323 ram_bytes, type); 7324 btrfs_dec_nocow_writers(fs_info, block_start); 7325 if (type == BTRFS_ORDERED_PREALLOC) { 7326 free_extent_map(em); 7327 *map = em = em2; 7328 } 7329 7330 if (em2 && IS_ERR(em2)) { 7331 ret = PTR_ERR(em2); 7332 goto out; 7333 } 7334 /* 7335 * For inode marked NODATACOW or extent marked PREALLOC, 7336 * use the existing or preallocated extent, so does not 7337 * need to adjust btrfs_space_info's bytes_may_use. 7338 */ 7339 btrfs_free_reserved_data_space_noquota(fs_info, len); 7340 goto skip_cow; 7341 } 7342 } 7343 7344 /* this will cow the extent */ 7345 free_extent_map(em); 7346 *map = em = btrfs_new_extent_direct(BTRFS_I(inode), start, len); 7347 if (IS_ERR(em)) { 7348 ret = PTR_ERR(em); 7349 goto out; 7350 } 7351 7352 len = min(len, em->len - (start - em->start)); 7353 7354 skip_cow: 7355 /* 7356 * Need to update the i_size under the extent lock so buffered 7357 * readers will get the updated i_size when we unlock. 7358 */ 7359 if (start + len > i_size_read(inode)) 7360 i_size_write(inode, start + len); 7361 7362 dio_data->reserve -= len; 7363 out: 7364 return ret; 7365 } 7366 7367 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, 7368 loff_t length, unsigned int flags, struct iomap *iomap, 7369 struct iomap *srcmap) 7370 { 7371 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7372 struct extent_map *em; 7373 struct extent_state *cached_state = NULL; 7374 struct btrfs_dio_data *dio_data = NULL; 7375 u64 lockstart, lockend; 7376 const bool write = !!(flags & IOMAP_WRITE); 7377 int ret = 0; 7378 u64 len = length; 7379 bool unlock_extents = false; 7380 bool sync = (current->journal_info == BTRFS_DIO_SYNC_STUB); 7381 7382 /* 7383 * We used current->journal_info here to see if we were sync, but 7384 * there's a lot of tests in the enospc machinery to not do flushing if 7385 * we have a journal_info set, so we need to clear this out and re-set 7386 * it in iomap_end. 7387 */ 7388 ASSERT(current->journal_info == NULL || 7389 current->journal_info == BTRFS_DIO_SYNC_STUB); 7390 current->journal_info = NULL; 7391 7392 if (!write) 7393 len = min_t(u64, len, fs_info->sectorsize); 7394 7395 lockstart = start; 7396 lockend = start + len - 1; 7397 7398 /* 7399 * The generic stuff only does filemap_write_and_wait_range, which 7400 * isn't enough if we've written compressed pages to this area, so we 7401 * need to flush the dirty pages again to make absolutely sure that any 7402 * outstanding dirty pages are on disk. 7403 */ 7404 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7405 &BTRFS_I(inode)->runtime_flags)) { 7406 ret = filemap_fdatawrite_range(inode->i_mapping, start, 7407 start + length - 1); 7408 if (ret) 7409 return ret; 7410 } 7411 7412 dio_data = kzalloc(sizeof(*dio_data), GFP_NOFS); 7413 if (!dio_data) 7414 return -ENOMEM; 7415 7416 dio_data->sync = sync; 7417 dio_data->length = length; 7418 if (write) { 7419 dio_data->reserve = round_up(length, fs_info->sectorsize); 7420 ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), 7421 &dio_data->data_reserved, 7422 start, dio_data->reserve); 7423 if (ret) { 7424 extent_changeset_free(dio_data->data_reserved); 7425 kfree(dio_data); 7426 return ret; 7427 } 7428 } 7429 iomap->private = dio_data; 7430 7431 7432 /* 7433 * If this errors out it's because we couldn't invalidate pagecache for 7434 * this range and we need to fallback to buffered. 7435 */ 7436 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, write)) { 7437 ret = -ENOTBLK; 7438 goto err; 7439 } 7440 7441 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 7442 if (IS_ERR(em)) { 7443 ret = PTR_ERR(em); 7444 goto unlock_err; 7445 } 7446 7447 /* 7448 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7449 * io. INLINE is special, and we could probably kludge it in here, but 7450 * it's still buffered so for safety lets just fall back to the generic 7451 * buffered path. 7452 * 7453 * For COMPRESSED we _have_ to read the entire extent in so we can 7454 * decompress it, so there will be buffering required no matter what we 7455 * do, so go ahead and fallback to buffered. 7456 * 7457 * We return -ENOTBLK because that's what makes DIO go ahead and go back 7458 * to buffered IO. Don't blame me, this is the price we pay for using 7459 * the generic code. 7460 */ 7461 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7462 em->block_start == EXTENT_MAP_INLINE) { 7463 free_extent_map(em); 7464 ret = -ENOTBLK; 7465 goto unlock_err; 7466 } 7467 7468 len = min(len, em->len - (start - em->start)); 7469 if (write) { 7470 ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, 7471 start, len); 7472 if (ret < 0) 7473 goto unlock_err; 7474 unlock_extents = true; 7475 /* Recalc len in case the new em is smaller than requested */ 7476 len = min(len, em->len - (start - em->start)); 7477 } else { 7478 /* 7479 * We need to unlock only the end area that we aren't using. 7480 * The rest is going to be unlocked by the endio routine. 7481 */ 7482 lockstart = start + len; 7483 if (lockstart < lockend) 7484 unlock_extents = true; 7485 } 7486 7487 if (unlock_extents) 7488 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 7489 lockstart, lockend, &cached_state); 7490 else 7491 free_extent_state(cached_state); 7492 7493 /* 7494 * Translate extent map information to iomap. 7495 * We trim the extents (and move the addr) even though iomap code does 7496 * that, since we have locked only the parts we are performing I/O in. 7497 */ 7498 if ((em->block_start == EXTENT_MAP_HOLE) || 7499 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { 7500 iomap->addr = IOMAP_NULL_ADDR; 7501 iomap->type = IOMAP_HOLE; 7502 } else { 7503 iomap->addr = em->block_start + (start - em->start); 7504 iomap->type = IOMAP_MAPPED; 7505 } 7506 iomap->offset = start; 7507 iomap->bdev = fs_info->fs_devices->latest_bdev; 7508 iomap->length = len; 7509 7510 free_extent_map(em); 7511 7512 return 0; 7513 7514 unlock_err: 7515 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7516 &cached_state); 7517 err: 7518 if (dio_data) { 7519 btrfs_delalloc_release_space(BTRFS_I(inode), 7520 dio_data->data_reserved, start, 7521 dio_data->reserve, true); 7522 btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->reserve); 7523 extent_changeset_free(dio_data->data_reserved); 7524 kfree(dio_data); 7525 } 7526 return ret; 7527 } 7528 7529 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, 7530 ssize_t written, unsigned int flags, struct iomap *iomap) 7531 { 7532 int ret = 0; 7533 struct btrfs_dio_data *dio_data = iomap->private; 7534 size_t submitted = dio_data->submitted; 7535 const bool write = !!(flags & IOMAP_WRITE); 7536 7537 if (!write && (iomap->type == IOMAP_HOLE)) { 7538 /* If reading from a hole, unlock and return */ 7539 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1); 7540 goto out; 7541 } 7542 7543 if (submitted < length) { 7544 pos += submitted; 7545 length -= submitted; 7546 if (write) 7547 __endio_write_update_ordered(BTRFS_I(inode), pos, 7548 length, false); 7549 else 7550 unlock_extent(&BTRFS_I(inode)->io_tree, pos, 7551 pos + length - 1); 7552 ret = -ENOTBLK; 7553 } 7554 7555 if (write) { 7556 if (dio_data->reserve) 7557 btrfs_delalloc_release_space(BTRFS_I(inode), 7558 dio_data->data_reserved, pos, 7559 dio_data->reserve, true); 7560 btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->length); 7561 extent_changeset_free(dio_data->data_reserved); 7562 } 7563 out: 7564 /* 7565 * We're all done, we can re-set the current->journal_info now safely 7566 * for our endio. 7567 */ 7568 if (dio_data->sync) { 7569 ASSERT(current->journal_info == NULL); 7570 current->journal_info = BTRFS_DIO_SYNC_STUB; 7571 } 7572 kfree(dio_data); 7573 iomap->private = NULL; 7574 7575 return ret; 7576 } 7577 7578 static void btrfs_dio_private_put(struct btrfs_dio_private *dip) 7579 { 7580 /* 7581 * This implies a barrier so that stores to dio_bio->bi_status before 7582 * this and loads of dio_bio->bi_status after this are fully ordered. 7583 */ 7584 if (!refcount_dec_and_test(&dip->refs)) 7585 return; 7586 7587 if (bio_op(dip->dio_bio) == REQ_OP_WRITE) { 7588 __endio_write_update_ordered(BTRFS_I(dip->inode), 7589 dip->logical_offset, 7590 dip->bytes, 7591 !dip->dio_bio->bi_status); 7592 } else { 7593 unlock_extent(&BTRFS_I(dip->inode)->io_tree, 7594 dip->logical_offset, 7595 dip->logical_offset + dip->bytes - 1); 7596 } 7597 7598 bio_endio(dip->dio_bio); 7599 kfree(dip); 7600 } 7601 7602 static blk_status_t submit_dio_repair_bio(struct inode *inode, struct bio *bio, 7603 int mirror_num, 7604 unsigned long bio_flags) 7605 { 7606 struct btrfs_dio_private *dip = bio->bi_private; 7607 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7608 blk_status_t ret; 7609 7610 BUG_ON(bio_op(bio) == REQ_OP_WRITE); 7611 7612 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); 7613 if (ret) 7614 return ret; 7615 7616 refcount_inc(&dip->refs); 7617 ret = btrfs_map_bio(fs_info, bio, mirror_num); 7618 if (ret) 7619 refcount_dec(&dip->refs); 7620 return ret; 7621 } 7622 7623 static blk_status_t btrfs_check_read_dio_bio(struct inode *inode, 7624 struct btrfs_io_bio *io_bio, 7625 const bool uptodate) 7626 { 7627 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 7628 const u32 sectorsize = fs_info->sectorsize; 7629 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 7630 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7631 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); 7632 struct bio_vec bvec; 7633 struct bvec_iter iter; 7634 u64 start = io_bio->logical; 7635 int icsum = 0; 7636 blk_status_t err = BLK_STS_OK; 7637 7638 __bio_for_each_segment(bvec, &io_bio->bio, iter, io_bio->iter) { 7639 unsigned int i, nr_sectors, pgoff; 7640 7641 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); 7642 pgoff = bvec.bv_offset; 7643 for (i = 0; i < nr_sectors; i++) { 7644 ASSERT(pgoff < PAGE_SIZE); 7645 if (uptodate && 7646 (!csum || !check_data_csum(inode, io_bio, icsum, 7647 bvec.bv_page, pgoff, 7648 start, sectorsize))) { 7649 clean_io_failure(fs_info, failure_tree, io_tree, 7650 start, bvec.bv_page, 7651 btrfs_ino(BTRFS_I(inode)), 7652 pgoff); 7653 } else { 7654 blk_status_t status; 7655 7656 status = btrfs_submit_read_repair(inode, 7657 &io_bio->bio, 7658 start - io_bio->logical, 7659 bvec.bv_page, pgoff, 7660 start, 7661 start + sectorsize - 1, 7662 io_bio->mirror_num, 7663 submit_dio_repair_bio); 7664 if (status) 7665 err = status; 7666 } 7667 start += sectorsize; 7668 icsum++; 7669 pgoff += sectorsize; 7670 } 7671 } 7672 return err; 7673 } 7674 7675 static void __endio_write_update_ordered(struct btrfs_inode *inode, 7676 const u64 offset, const u64 bytes, 7677 const bool uptodate) 7678 { 7679 struct btrfs_fs_info *fs_info = inode->root->fs_info; 7680 struct btrfs_ordered_extent *ordered = NULL; 7681 struct btrfs_workqueue *wq; 7682 u64 ordered_offset = offset; 7683 u64 ordered_bytes = bytes; 7684 u64 last_offset; 7685 7686 if (btrfs_is_free_space_inode(inode)) 7687 wq = fs_info->endio_freespace_worker; 7688 else 7689 wq = fs_info->endio_write_workers; 7690 7691 while (ordered_offset < offset + bytes) { 7692 last_offset = ordered_offset; 7693 if (btrfs_dec_test_first_ordered_pending(inode, &ordered, 7694 &ordered_offset, 7695 ordered_bytes, 7696 uptodate)) { 7697 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, 7698 NULL); 7699 btrfs_queue_work(wq, &ordered->work); 7700 } 7701 /* 7702 * If btrfs_dec_test_ordered_pending does not find any ordered 7703 * extent in the range, we can exit. 7704 */ 7705 if (ordered_offset == last_offset) 7706 return; 7707 /* 7708 * Our bio might span multiple ordered extents. In this case 7709 * we keep going until we have accounted the whole dio. 7710 */ 7711 if (ordered_offset < offset + bytes) { 7712 ordered_bytes = offset + bytes - ordered_offset; 7713 ordered = NULL; 7714 } 7715 } 7716 } 7717 7718 static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data, 7719 struct bio *bio, u64 offset) 7720 { 7721 struct inode *inode = private_data; 7722 7723 return btrfs_csum_one_bio(BTRFS_I(inode), bio, offset, 1); 7724 } 7725 7726 static void btrfs_end_dio_bio(struct bio *bio) 7727 { 7728 struct btrfs_dio_private *dip = bio->bi_private; 7729 blk_status_t err = bio->bi_status; 7730 7731 if (err) 7732 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, 7733 "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d", 7734 btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio), 7735 bio->bi_opf, 7736 (unsigned long long)bio->bi_iter.bi_sector, 7737 bio->bi_iter.bi_size, err); 7738 7739 if (bio_op(bio) == REQ_OP_READ) { 7740 err = btrfs_check_read_dio_bio(dip->inode, btrfs_io_bio(bio), 7741 !err); 7742 } 7743 7744 if (err) 7745 dip->dio_bio->bi_status = err; 7746 7747 bio_put(bio); 7748 btrfs_dio_private_put(dip); 7749 } 7750 7751 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, 7752 struct inode *inode, u64 file_offset, int async_submit) 7753 { 7754 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7755 struct btrfs_dio_private *dip = bio->bi_private; 7756 bool write = bio_op(bio) == REQ_OP_WRITE; 7757 blk_status_t ret; 7758 7759 /* Check btrfs_submit_bio_hook() for rules about async submit. */ 7760 if (async_submit) 7761 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); 7762 7763 if (!write) { 7764 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); 7765 if (ret) 7766 goto err; 7767 } 7768 7769 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 7770 goto map; 7771 7772 if (write && async_submit) { 7773 ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0, 7774 file_offset, inode, 7775 btrfs_submit_bio_start_direct_io); 7776 goto err; 7777 } else if (write) { 7778 /* 7779 * If we aren't doing async submit, calculate the csum of the 7780 * bio now. 7781 */ 7782 ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, 1); 7783 if (ret) 7784 goto err; 7785 } else { 7786 u64 csum_offset; 7787 7788 csum_offset = file_offset - dip->logical_offset; 7789 csum_offset >>= inode->i_sb->s_blocksize_bits; 7790 csum_offset *= btrfs_super_csum_size(fs_info->super_copy); 7791 btrfs_io_bio(bio)->csum = dip->csums + csum_offset; 7792 } 7793 map: 7794 ret = btrfs_map_bio(fs_info, bio, 0); 7795 err: 7796 return ret; 7797 } 7798 7799 /* 7800 * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked 7801 * or ordered extents whether or not we submit any bios. 7802 */ 7803 static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, 7804 struct inode *inode, 7805 loff_t file_offset) 7806 { 7807 const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); 7808 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); 7809 size_t dip_size; 7810 struct btrfs_dio_private *dip; 7811 7812 dip_size = sizeof(*dip); 7813 if (!write && csum) { 7814 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7815 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 7816 size_t nblocks; 7817 7818 nblocks = dio_bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 7819 dip_size += csum_size * nblocks; 7820 } 7821 7822 dip = kzalloc(dip_size, GFP_NOFS); 7823 if (!dip) 7824 return NULL; 7825 7826 dip->inode = inode; 7827 dip->logical_offset = file_offset; 7828 dip->bytes = dio_bio->bi_iter.bi_size; 7829 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; 7830 dip->dio_bio = dio_bio; 7831 refcount_set(&dip->refs, 1); 7832 return dip; 7833 } 7834 7835 static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap, 7836 struct bio *dio_bio, loff_t file_offset) 7837 { 7838 const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); 7839 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); 7840 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7841 const bool raid56 = (btrfs_data_alloc_profile(fs_info) & 7842 BTRFS_BLOCK_GROUP_RAID56_MASK); 7843 struct btrfs_dio_private *dip; 7844 struct bio *bio; 7845 u64 start_sector; 7846 int async_submit = 0; 7847 u64 submit_len; 7848 int clone_offset = 0; 7849 int clone_len; 7850 int ret; 7851 blk_status_t status; 7852 struct btrfs_io_geometry geom; 7853 struct btrfs_dio_data *dio_data = iomap->private; 7854 7855 dip = btrfs_create_dio_private(dio_bio, inode, file_offset); 7856 if (!dip) { 7857 if (!write) { 7858 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, 7859 file_offset + dio_bio->bi_iter.bi_size - 1); 7860 } 7861 dio_bio->bi_status = BLK_STS_RESOURCE; 7862 bio_endio(dio_bio); 7863 return BLK_QC_T_NONE; 7864 } 7865 7866 if (!write && csum) { 7867 /* 7868 * Load the csums up front to reduce csum tree searches and 7869 * contention when submitting bios. 7870 */ 7871 status = btrfs_lookup_bio_sums(inode, dio_bio, file_offset, 7872 dip->csums); 7873 if (status != BLK_STS_OK) 7874 goto out_err; 7875 } 7876 7877 start_sector = dio_bio->bi_iter.bi_sector; 7878 submit_len = dio_bio->bi_iter.bi_size; 7879 7880 do { 7881 ret = btrfs_get_io_geometry(fs_info, btrfs_op(dio_bio), 7882 start_sector << 9, submit_len, 7883 &geom); 7884 if (ret) { 7885 status = errno_to_blk_status(ret); 7886 goto out_err; 7887 } 7888 ASSERT(geom.len <= INT_MAX); 7889 7890 clone_len = min_t(int, submit_len, geom.len); 7891 7892 /* 7893 * This will never fail as it's passing GPF_NOFS and 7894 * the allocation is backed by btrfs_bioset. 7895 */ 7896 bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len); 7897 bio->bi_private = dip; 7898 bio->bi_end_io = btrfs_end_dio_bio; 7899 btrfs_io_bio(bio)->logical = file_offset; 7900 7901 ASSERT(submit_len >= clone_len); 7902 submit_len -= clone_len; 7903 7904 /* 7905 * Increase the count before we submit the bio so we know 7906 * the end IO handler won't happen before we increase the 7907 * count. Otherwise, the dip might get freed before we're 7908 * done setting it up. 7909 * 7910 * We transfer the initial reference to the last bio, so we 7911 * don't need to increment the reference count for the last one. 7912 */ 7913 if (submit_len > 0) { 7914 refcount_inc(&dip->refs); 7915 /* 7916 * If we are submitting more than one bio, submit them 7917 * all asynchronously. The exception is RAID 5 or 6, as 7918 * asynchronous checksums make it difficult to collect 7919 * full stripe writes. 7920 */ 7921 if (!raid56) 7922 async_submit = 1; 7923 } 7924 7925 status = btrfs_submit_dio_bio(bio, inode, file_offset, 7926 async_submit); 7927 if (status) { 7928 bio_put(bio); 7929 if (submit_len > 0) 7930 refcount_dec(&dip->refs); 7931 goto out_err; 7932 } 7933 7934 dio_data->submitted += clone_len; 7935 clone_offset += clone_len; 7936 start_sector += clone_len >> 9; 7937 file_offset += clone_len; 7938 } while (submit_len > 0); 7939 return BLK_QC_T_NONE; 7940 7941 out_err: 7942 dip->dio_bio->bi_status = status; 7943 btrfs_dio_private_put(dip); 7944 return BLK_QC_T_NONE; 7945 } 7946 7947 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, 7948 const struct iov_iter *iter, loff_t offset) 7949 { 7950 int seg; 7951 int i; 7952 unsigned int blocksize_mask = fs_info->sectorsize - 1; 7953 ssize_t retval = -EINVAL; 7954 7955 if (offset & blocksize_mask) 7956 goto out; 7957 7958 if (iov_iter_alignment(iter) & blocksize_mask) 7959 goto out; 7960 7961 /* If this is a write we don't need to check anymore */ 7962 if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter)) 7963 return 0; 7964 /* 7965 * Check to make sure we don't have duplicate iov_base's in this 7966 * iovec, if so return EINVAL, otherwise we'll get csum errors 7967 * when reading back. 7968 */ 7969 for (seg = 0; seg < iter->nr_segs; seg++) { 7970 for (i = seg + 1; i < iter->nr_segs; i++) { 7971 if (iter->iov[seg].iov_base == iter->iov[i].iov_base) 7972 goto out; 7973 } 7974 } 7975 retval = 0; 7976 out: 7977 return retval; 7978 } 7979 7980 static inline int btrfs_maybe_fsync_end_io(struct kiocb *iocb, ssize_t size, 7981 int error, unsigned flags) 7982 { 7983 /* 7984 * Now if we're still in the context of our submitter we know we can't 7985 * safely run generic_write_sync(), so clear our flag here so that the 7986 * caller knows to follow up with a sync. 7987 */ 7988 if (current->journal_info == BTRFS_DIO_SYNC_STUB) { 7989 current->journal_info = NULL; 7990 return error; 7991 } 7992 7993 if (error) 7994 return error; 7995 7996 if (size) { 7997 iocb->ki_flags |= IOCB_DSYNC; 7998 return generic_write_sync(iocb, size); 7999 } 8000 8001 return 0; 8002 } 8003 8004 static const struct iomap_ops btrfs_dio_iomap_ops = { 8005 .iomap_begin = btrfs_dio_iomap_begin, 8006 .iomap_end = btrfs_dio_iomap_end, 8007 }; 8008 8009 static const struct iomap_dio_ops btrfs_dio_ops = { 8010 .submit_io = btrfs_submit_direct, 8011 }; 8012 8013 static const struct iomap_dio_ops btrfs_sync_dops = { 8014 .submit_io = btrfs_submit_direct, 8015 .end_io = btrfs_maybe_fsync_end_io, 8016 }; 8017 8018 ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 8019 { 8020 struct file *file = iocb->ki_filp; 8021 struct inode *inode = file->f_mapping->host; 8022 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8023 struct extent_changeset *data_reserved = NULL; 8024 loff_t offset = iocb->ki_pos; 8025 size_t count = 0; 8026 bool relock = false; 8027 ssize_t ret; 8028 8029 if (check_direct_IO(fs_info, iter, offset)) 8030 return 0; 8031 8032 count = iov_iter_count(iter); 8033 if (iov_iter_rw(iter) == WRITE) { 8034 /* 8035 * If the write DIO is beyond the EOF, we need update 8036 * the isize, but it is protected by i_mutex. So we can 8037 * not unlock the i_mutex at this case. 8038 */ 8039 if (offset + count <= inode->i_size) { 8040 inode_unlock(inode); 8041 relock = true; 8042 } 8043 down_read(&BTRFS_I(inode)->dio_sem); 8044 } 8045 8046 /* 8047 * We have are actually a sync iocb, so we need our fancy endio to know 8048 * if we need to sync. 8049 */ 8050 if (current->journal_info) 8051 ret = iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, 8052 &btrfs_sync_dops, is_sync_kiocb(iocb)); 8053 else 8054 ret = iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, 8055 &btrfs_dio_ops, is_sync_kiocb(iocb)); 8056 8057 if (ret == -ENOTBLK) 8058 ret = 0; 8059 8060 if (iov_iter_rw(iter) == WRITE) 8061 up_read(&BTRFS_I(inode)->dio_sem); 8062 8063 if (relock) 8064 inode_lock(inode); 8065 8066 extent_changeset_free(data_reserved); 8067 return ret; 8068 } 8069 8070 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 8071 u64 start, u64 len) 8072 { 8073 int ret; 8074 8075 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 8076 if (ret) 8077 return ret; 8078 8079 return extent_fiemap(BTRFS_I(inode), fieinfo, start, len); 8080 } 8081 8082 int btrfs_readpage(struct file *file, struct page *page) 8083 { 8084 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); 8085 u64 start = page_offset(page); 8086 u64 end = start + PAGE_SIZE - 1; 8087 unsigned long bio_flags = 0; 8088 struct bio *bio = NULL; 8089 int ret; 8090 8091 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); 8092 8093 ret = btrfs_do_readpage(page, NULL, &bio, &bio_flags, 0, NULL); 8094 if (bio) 8095 ret = submit_one_bio(bio, 0, bio_flags); 8096 return ret; 8097 } 8098 8099 static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 8100 { 8101 struct inode *inode = page->mapping->host; 8102 int ret; 8103 8104 if (current->flags & PF_MEMALLOC) { 8105 redirty_page_for_writepage(wbc, page); 8106 unlock_page(page); 8107 return 0; 8108 } 8109 8110 /* 8111 * If we are under memory pressure we will call this directly from the 8112 * VM, we need to make sure we have the inode referenced for the ordered 8113 * extent. If not just return like we didn't do anything. 8114 */ 8115 if (!igrab(inode)) { 8116 redirty_page_for_writepage(wbc, page); 8117 return AOP_WRITEPAGE_ACTIVATE; 8118 } 8119 ret = extent_write_full_page(page, wbc); 8120 btrfs_add_delayed_iput(inode); 8121 return ret; 8122 } 8123 8124 static int btrfs_writepages(struct address_space *mapping, 8125 struct writeback_control *wbc) 8126 { 8127 return extent_writepages(mapping, wbc); 8128 } 8129 8130 static void btrfs_readahead(struct readahead_control *rac) 8131 { 8132 extent_readahead(rac); 8133 } 8134 8135 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8136 { 8137 int ret = try_release_extent_mapping(page, gfp_flags); 8138 if (ret == 1) 8139 detach_page_private(page); 8140 return ret; 8141 } 8142 8143 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8144 { 8145 if (PageWriteback(page) || PageDirty(page)) 8146 return 0; 8147 return __btrfs_releasepage(page, gfp_flags); 8148 } 8149 8150 #ifdef CONFIG_MIGRATION 8151 static int btrfs_migratepage(struct address_space *mapping, 8152 struct page *newpage, struct page *page, 8153 enum migrate_mode mode) 8154 { 8155 int ret; 8156 8157 ret = migrate_page_move_mapping(mapping, newpage, page, 0); 8158 if (ret != MIGRATEPAGE_SUCCESS) 8159 return ret; 8160 8161 if (page_has_private(page)) 8162 attach_page_private(newpage, detach_page_private(page)); 8163 8164 if (PagePrivate2(page)) { 8165 ClearPagePrivate2(page); 8166 SetPagePrivate2(newpage); 8167 } 8168 8169 if (mode != MIGRATE_SYNC_NO_COPY) 8170 migrate_page_copy(newpage, page); 8171 else 8172 migrate_page_states(newpage, page); 8173 return MIGRATEPAGE_SUCCESS; 8174 } 8175 #endif 8176 8177 static void btrfs_invalidatepage(struct page *page, unsigned int offset, 8178 unsigned int length) 8179 { 8180 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); 8181 struct extent_io_tree *tree = &inode->io_tree; 8182 struct btrfs_ordered_extent *ordered; 8183 struct extent_state *cached_state = NULL; 8184 u64 page_start = page_offset(page); 8185 u64 page_end = page_start + PAGE_SIZE - 1; 8186 u64 start; 8187 u64 end; 8188 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 8189 8190 /* 8191 * we have the page locked, so new writeback can't start, 8192 * and the dirty bit won't be cleared while we are here. 8193 * 8194 * Wait for IO on this page so that we can safely clear 8195 * the PagePrivate2 bit and do ordered accounting 8196 */ 8197 wait_on_page_writeback(page); 8198 8199 if (offset) { 8200 btrfs_releasepage(page, GFP_NOFS); 8201 return; 8202 } 8203 8204 if (!inode_evicting) 8205 lock_extent_bits(tree, page_start, page_end, &cached_state); 8206 again: 8207 start = page_start; 8208 ordered = btrfs_lookup_ordered_range(inode, start, page_end - start + 1); 8209 if (ordered) { 8210 end = min(page_end, 8211 ordered->file_offset + ordered->num_bytes - 1); 8212 /* 8213 * IO on this page will never be started, so we need 8214 * to account for any ordered extents now 8215 */ 8216 if (!inode_evicting) 8217 clear_extent_bit(tree, start, end, 8218 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 8219 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8220 EXTENT_DEFRAG, 1, 0, &cached_state); 8221 /* 8222 * whoever cleared the private bit is responsible 8223 * for the finish_ordered_io 8224 */ 8225 if (TestClearPagePrivate2(page)) { 8226 struct btrfs_ordered_inode_tree *tree; 8227 u64 new_len; 8228 8229 tree = &inode->ordered_tree; 8230 8231 spin_lock_irq(&tree->lock); 8232 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8233 new_len = start - ordered->file_offset; 8234 if (new_len < ordered->truncated_len) 8235 ordered->truncated_len = new_len; 8236 spin_unlock_irq(&tree->lock); 8237 8238 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8239 start, 8240 end - start + 1, 1)) 8241 btrfs_finish_ordered_io(ordered); 8242 } 8243 btrfs_put_ordered_extent(ordered); 8244 if (!inode_evicting) { 8245 cached_state = NULL; 8246 lock_extent_bits(tree, start, end, 8247 &cached_state); 8248 } 8249 8250 start = end + 1; 8251 if (start < page_end) 8252 goto again; 8253 } 8254 8255 /* 8256 * Qgroup reserved space handler 8257 * Page here will be either 8258 * 1) Already written to disk or ordered extent already submitted 8259 * Then its QGROUP_RESERVED bit in io_tree is already cleaned. 8260 * Qgroup will be handled by its qgroup_record then. 8261 * btrfs_qgroup_free_data() call will do nothing here. 8262 * 8263 * 2) Not written to disk yet 8264 * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED 8265 * bit of its io_tree, and free the qgroup reserved data space. 8266 * Since the IO will never happen for this page. 8267 */ 8268 btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); 8269 if (!inode_evicting) { 8270 clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | 8271 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 8272 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, 8273 &cached_state); 8274 8275 __btrfs_releasepage(page, GFP_NOFS); 8276 } 8277 8278 ClearPageChecked(page); 8279 detach_page_private(page); 8280 } 8281 8282 /* 8283 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8284 * called from a page fault handler when a page is first dirtied. Hence we must 8285 * be careful to check for EOF conditions here. We set the page up correctly 8286 * for a written page which means we get ENOSPC checking when writing into 8287 * holes and correct delalloc and unwritten extent mapping on filesystems that 8288 * support these features. 8289 * 8290 * We are not allowed to take the i_mutex here so we have to play games to 8291 * protect against truncate races as the page could now be beyond EOF. Because 8292 * truncate_setsize() writes the inode size before removing pages, once we have 8293 * the page lock we can determine safely if the page is beyond EOF. If it is not 8294 * beyond EOF, then the page is guaranteed safe against truncation until we 8295 * unlock the page. 8296 */ 8297 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 8298 { 8299 struct page *page = vmf->page; 8300 struct inode *inode = file_inode(vmf->vma->vm_file); 8301 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8302 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8303 struct btrfs_ordered_extent *ordered; 8304 struct extent_state *cached_state = NULL; 8305 struct extent_changeset *data_reserved = NULL; 8306 char *kaddr; 8307 unsigned long zero_start; 8308 loff_t size; 8309 vm_fault_t ret; 8310 int ret2; 8311 int reserved = 0; 8312 u64 reserved_space; 8313 u64 page_start; 8314 u64 page_end; 8315 u64 end; 8316 8317 reserved_space = PAGE_SIZE; 8318 8319 sb_start_pagefault(inode->i_sb); 8320 page_start = page_offset(page); 8321 page_end = page_start + PAGE_SIZE - 1; 8322 end = page_end; 8323 8324 /* 8325 * Reserving delalloc space after obtaining the page lock can lead to 8326 * deadlock. For example, if a dirty page is locked by this function 8327 * and the call to btrfs_delalloc_reserve_space() ends up triggering 8328 * dirty page write out, then the btrfs_writepage() function could 8329 * end up waiting indefinitely to get a lock on the page currently 8330 * being processed by btrfs_page_mkwrite() function. 8331 */ 8332 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 8333 page_start, reserved_space); 8334 if (!ret2) { 8335 ret2 = file_update_time(vmf->vma->vm_file); 8336 reserved = 1; 8337 } 8338 if (ret2) { 8339 ret = vmf_error(ret2); 8340 if (reserved) 8341 goto out; 8342 goto out_noreserve; 8343 } 8344 8345 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8346 again: 8347 lock_page(page); 8348 size = i_size_read(inode); 8349 8350 if ((page->mapping != inode->i_mapping) || 8351 (page_start >= size)) { 8352 /* page got truncated out from underneath us */ 8353 goto out_unlock; 8354 } 8355 wait_on_page_writeback(page); 8356 8357 lock_extent_bits(io_tree, page_start, page_end, &cached_state); 8358 set_page_extent_mapped(page); 8359 8360 /* 8361 * we can't set the delalloc bits if there are pending ordered 8362 * extents. Drop our locks and wait for them to finish 8363 */ 8364 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, 8365 PAGE_SIZE); 8366 if (ordered) { 8367 unlock_extent_cached(io_tree, page_start, page_end, 8368 &cached_state); 8369 unlock_page(page); 8370 btrfs_start_ordered_extent(ordered, 1); 8371 btrfs_put_ordered_extent(ordered); 8372 goto again; 8373 } 8374 8375 if (page->index == ((size - 1) >> PAGE_SHIFT)) { 8376 reserved_space = round_up(size - page_start, 8377 fs_info->sectorsize); 8378 if (reserved_space < PAGE_SIZE) { 8379 end = page_start + reserved_space - 1; 8380 btrfs_delalloc_release_space(BTRFS_I(inode), 8381 data_reserved, page_start, 8382 PAGE_SIZE - reserved_space, true); 8383 } 8384 } 8385 8386 /* 8387 * page_mkwrite gets called when the page is firstly dirtied after it's 8388 * faulted in, but write(2) could also dirty a page and set delalloc 8389 * bits, thus in this case for space account reason, we still need to 8390 * clear any delalloc bits within this page range since we have to 8391 * reserve data&meta space before lock_page() (see above comments). 8392 */ 8393 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, 8394 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8395 EXTENT_DEFRAG, 0, 0, &cached_state); 8396 8397 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, 8398 &cached_state); 8399 if (ret2) { 8400 unlock_extent_cached(io_tree, page_start, page_end, 8401 &cached_state); 8402 ret = VM_FAULT_SIGBUS; 8403 goto out_unlock; 8404 } 8405 8406 /* page is wholly or partially inside EOF */ 8407 if (page_start + PAGE_SIZE > size) 8408 zero_start = offset_in_page(size); 8409 else 8410 zero_start = PAGE_SIZE; 8411 8412 if (zero_start != PAGE_SIZE) { 8413 kaddr = kmap(page); 8414 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start); 8415 flush_dcache_page(page); 8416 kunmap(page); 8417 } 8418 ClearPageChecked(page); 8419 set_page_dirty(page); 8420 SetPageUptodate(page); 8421 8422 BTRFS_I(inode)->last_trans = fs_info->generation; 8423 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 8424 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; 8425 8426 unlock_extent_cached(io_tree, page_start, page_end, &cached_state); 8427 8428 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8429 sb_end_pagefault(inode->i_sb); 8430 extent_changeset_free(data_reserved); 8431 return VM_FAULT_LOCKED; 8432 8433 out_unlock: 8434 unlock_page(page); 8435 out: 8436 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8437 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, 8438 reserved_space, (ret != 0)); 8439 out_noreserve: 8440 sb_end_pagefault(inode->i_sb); 8441 extent_changeset_free(data_reserved); 8442 return ret; 8443 } 8444 8445 static int btrfs_truncate(struct inode *inode, bool skip_writeback) 8446 { 8447 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8448 struct btrfs_root *root = BTRFS_I(inode)->root; 8449 struct btrfs_block_rsv *rsv; 8450 int ret; 8451 struct btrfs_trans_handle *trans; 8452 u64 mask = fs_info->sectorsize - 1; 8453 u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 8454 8455 if (!skip_writeback) { 8456 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), 8457 (u64)-1); 8458 if (ret) 8459 return ret; 8460 } 8461 8462 /* 8463 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 8464 * things going on here: 8465 * 8466 * 1) We need to reserve space to update our inode. 8467 * 8468 * 2) We need to have something to cache all the space that is going to 8469 * be free'd up by the truncate operation, but also have some slack 8470 * space reserved in case it uses space during the truncate (thank you 8471 * very much snapshotting). 8472 * 8473 * And we need these to be separate. The fact is we can use a lot of 8474 * space doing the truncate, and we have no earthly idea how much space 8475 * we will use, so we need the truncate reservation to be separate so it 8476 * doesn't end up using space reserved for updating the inode. We also 8477 * need to be able to stop the transaction and start a new one, which 8478 * means we need to be able to update the inode several times, and we 8479 * have no idea of knowing how many times that will be, so we can't just 8480 * reserve 1 item for the entirety of the operation, so that has to be 8481 * done separately as well. 8482 * 8483 * So that leaves us with 8484 * 8485 * 1) rsv - for the truncate reservation, which we will steal from the 8486 * transaction reservation. 8487 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 8488 * updating the inode. 8489 */ 8490 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 8491 if (!rsv) 8492 return -ENOMEM; 8493 rsv->size = min_size; 8494 rsv->failfast = 1; 8495 8496 /* 8497 * 1 for the truncate slack space 8498 * 1 for updating the inode. 8499 */ 8500 trans = btrfs_start_transaction(root, 2); 8501 if (IS_ERR(trans)) { 8502 ret = PTR_ERR(trans); 8503 goto out; 8504 } 8505 8506 /* Migrate the slack space for the truncate to our reserve */ 8507 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 8508 min_size, false); 8509 BUG_ON(ret); 8510 8511 /* 8512 * So if we truncate and then write and fsync we normally would just 8513 * write the extents that changed, which is a problem if we need to 8514 * first truncate that entire inode. So set this flag so we write out 8515 * all of the extents in the inode to the sync log so we're completely 8516 * safe. 8517 */ 8518 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 8519 trans->block_rsv = rsv; 8520 8521 while (1) { 8522 ret = btrfs_truncate_inode_items(trans, root, inode, 8523 inode->i_size, 8524 BTRFS_EXTENT_DATA_KEY); 8525 trans->block_rsv = &fs_info->trans_block_rsv; 8526 if (ret != -ENOSPC && ret != -EAGAIN) 8527 break; 8528 8529 ret = btrfs_update_inode(trans, root, inode); 8530 if (ret) 8531 break; 8532 8533 btrfs_end_transaction(trans); 8534 btrfs_btree_balance_dirty(fs_info); 8535 8536 trans = btrfs_start_transaction(root, 2); 8537 if (IS_ERR(trans)) { 8538 ret = PTR_ERR(trans); 8539 trans = NULL; 8540 break; 8541 } 8542 8543 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 8544 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 8545 rsv, min_size, false); 8546 BUG_ON(ret); /* shouldn't happen */ 8547 trans->block_rsv = rsv; 8548 } 8549 8550 /* 8551 * We can't call btrfs_truncate_block inside a trans handle as we could 8552 * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know 8553 * we've truncated everything except the last little bit, and can do 8554 * btrfs_truncate_block and then update the disk_i_size. 8555 */ 8556 if (ret == NEED_TRUNCATE_BLOCK) { 8557 btrfs_end_transaction(trans); 8558 btrfs_btree_balance_dirty(fs_info); 8559 8560 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0); 8561 if (ret) 8562 goto out; 8563 trans = btrfs_start_transaction(root, 1); 8564 if (IS_ERR(trans)) { 8565 ret = PTR_ERR(trans); 8566 goto out; 8567 } 8568 btrfs_inode_safe_disk_i_size_write(inode, 0); 8569 } 8570 8571 if (trans) { 8572 int ret2; 8573 8574 trans->block_rsv = &fs_info->trans_block_rsv; 8575 ret2 = btrfs_update_inode(trans, root, inode); 8576 if (ret2 && !ret) 8577 ret = ret2; 8578 8579 ret2 = btrfs_end_transaction(trans); 8580 if (ret2 && !ret) 8581 ret = ret2; 8582 btrfs_btree_balance_dirty(fs_info); 8583 } 8584 out: 8585 btrfs_free_block_rsv(fs_info, rsv); 8586 8587 return ret; 8588 } 8589 8590 /* 8591 * create a new subvolume directory/inode (helper for the ioctl). 8592 */ 8593 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 8594 struct btrfs_root *new_root, 8595 struct btrfs_root *parent_root, 8596 u64 new_dirid) 8597 { 8598 struct inode *inode; 8599 int err; 8600 u64 index = 0; 8601 8602 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, 8603 new_dirid, new_dirid, 8604 S_IFDIR | (~current_umask() & S_IRWXUGO), 8605 &index); 8606 if (IS_ERR(inode)) 8607 return PTR_ERR(inode); 8608 inode->i_op = &btrfs_dir_inode_operations; 8609 inode->i_fop = &btrfs_dir_file_operations; 8610 8611 set_nlink(inode, 1); 8612 btrfs_i_size_write(BTRFS_I(inode), 0); 8613 unlock_new_inode(inode); 8614 8615 err = btrfs_subvol_inherit_props(trans, new_root, parent_root); 8616 if (err) 8617 btrfs_err(new_root->fs_info, 8618 "error inheriting subvolume %llu properties: %d", 8619 new_root->root_key.objectid, err); 8620 8621 err = btrfs_update_inode(trans, new_root, inode); 8622 8623 iput(inode); 8624 return err; 8625 } 8626 8627 struct inode *btrfs_alloc_inode(struct super_block *sb) 8628 { 8629 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 8630 struct btrfs_inode *ei; 8631 struct inode *inode; 8632 8633 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL); 8634 if (!ei) 8635 return NULL; 8636 8637 ei->root = NULL; 8638 ei->generation = 0; 8639 ei->last_trans = 0; 8640 ei->last_sub_trans = 0; 8641 ei->logged_trans = 0; 8642 ei->delalloc_bytes = 0; 8643 ei->new_delalloc_bytes = 0; 8644 ei->defrag_bytes = 0; 8645 ei->disk_i_size = 0; 8646 ei->flags = 0; 8647 ei->csum_bytes = 0; 8648 ei->index_cnt = (u64)-1; 8649 ei->dir_index = 0; 8650 ei->last_unlink_trans = 0; 8651 ei->last_reflink_trans = 0; 8652 ei->last_log_commit = 0; 8653 8654 spin_lock_init(&ei->lock); 8655 ei->outstanding_extents = 0; 8656 if (sb->s_magic != BTRFS_TEST_MAGIC) 8657 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 8658 BTRFS_BLOCK_RSV_DELALLOC); 8659 ei->runtime_flags = 0; 8660 ei->prop_compress = BTRFS_COMPRESS_NONE; 8661 ei->defrag_compress = BTRFS_COMPRESS_NONE; 8662 8663 ei->delayed_node = NULL; 8664 8665 ei->i_otime.tv_sec = 0; 8666 ei->i_otime.tv_nsec = 0; 8667 8668 inode = &ei->vfs_inode; 8669 extent_map_tree_init(&ei->extent_tree); 8670 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode); 8671 extent_io_tree_init(fs_info, &ei->io_failure_tree, 8672 IO_TREE_INODE_IO_FAILURE, inode); 8673 extent_io_tree_init(fs_info, &ei->file_extent_tree, 8674 IO_TREE_INODE_FILE_EXTENT, inode); 8675 ei->io_tree.track_uptodate = true; 8676 ei->io_failure_tree.track_uptodate = true; 8677 atomic_set(&ei->sync_writers, 0); 8678 mutex_init(&ei->log_mutex); 8679 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 8680 INIT_LIST_HEAD(&ei->delalloc_inodes); 8681 INIT_LIST_HEAD(&ei->delayed_iput); 8682 RB_CLEAR_NODE(&ei->rb_node); 8683 init_rwsem(&ei->dio_sem); 8684 8685 return inode; 8686 } 8687 8688 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 8689 void btrfs_test_destroy_inode(struct inode *inode) 8690 { 8691 btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); 8692 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8693 } 8694 #endif 8695 8696 void btrfs_free_inode(struct inode *inode) 8697 { 8698 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8699 } 8700 8701 void btrfs_destroy_inode(struct inode *vfs_inode) 8702 { 8703 struct btrfs_ordered_extent *ordered; 8704 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 8705 struct btrfs_root *root = inode->root; 8706 8707 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 8708 WARN_ON(vfs_inode->i_data.nrpages); 8709 WARN_ON(inode->block_rsv.reserved); 8710 WARN_ON(inode->block_rsv.size); 8711 WARN_ON(inode->outstanding_extents); 8712 WARN_ON(inode->delalloc_bytes); 8713 WARN_ON(inode->new_delalloc_bytes); 8714 WARN_ON(inode->csum_bytes); 8715 WARN_ON(inode->defrag_bytes); 8716 8717 /* 8718 * This can happen where we create an inode, but somebody else also 8719 * created the same inode and we need to destroy the one we already 8720 * created. 8721 */ 8722 if (!root) 8723 return; 8724 8725 while (1) { 8726 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8727 if (!ordered) 8728 break; 8729 else { 8730 btrfs_err(root->fs_info, 8731 "found ordered extent %llu %llu on inode cleanup", 8732 ordered->file_offset, ordered->num_bytes); 8733 btrfs_remove_ordered_extent(inode, ordered); 8734 btrfs_put_ordered_extent(ordered); 8735 btrfs_put_ordered_extent(ordered); 8736 } 8737 } 8738 btrfs_qgroup_check_reserved_leak(inode); 8739 inode_tree_del(inode); 8740 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 8741 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 8742 btrfs_put_root(inode->root); 8743 } 8744 8745 int btrfs_drop_inode(struct inode *inode) 8746 { 8747 struct btrfs_root *root = BTRFS_I(inode)->root; 8748 8749 if (root == NULL) 8750 return 1; 8751 8752 /* the snap/subvol tree is on deleting */ 8753 if (btrfs_root_refs(&root->root_item) == 0) 8754 return 1; 8755 else 8756 return generic_drop_inode(inode); 8757 } 8758 8759 static void init_once(void *foo) 8760 { 8761 struct btrfs_inode *ei = (struct btrfs_inode *) foo; 8762 8763 inode_init_once(&ei->vfs_inode); 8764 } 8765 8766 void __cold btrfs_destroy_cachep(void) 8767 { 8768 /* 8769 * Make sure all delayed rcu free inodes are flushed before we 8770 * destroy cache. 8771 */ 8772 rcu_barrier(); 8773 kmem_cache_destroy(btrfs_inode_cachep); 8774 kmem_cache_destroy(btrfs_trans_handle_cachep); 8775 kmem_cache_destroy(btrfs_path_cachep); 8776 kmem_cache_destroy(btrfs_free_space_cachep); 8777 kmem_cache_destroy(btrfs_free_space_bitmap_cachep); 8778 } 8779 8780 int __init btrfs_init_cachep(void) 8781 { 8782 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 8783 sizeof(struct btrfs_inode), 0, 8784 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, 8785 init_once); 8786 if (!btrfs_inode_cachep) 8787 goto fail; 8788 8789 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", 8790 sizeof(struct btrfs_trans_handle), 0, 8791 SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL); 8792 if (!btrfs_trans_handle_cachep) 8793 goto fail; 8794 8795 btrfs_path_cachep = kmem_cache_create("btrfs_path", 8796 sizeof(struct btrfs_path), 0, 8797 SLAB_MEM_SPREAD, NULL); 8798 if (!btrfs_path_cachep) 8799 goto fail; 8800 8801 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", 8802 sizeof(struct btrfs_free_space), 0, 8803 SLAB_MEM_SPREAD, NULL); 8804 if (!btrfs_free_space_cachep) 8805 goto fail; 8806 8807 btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap", 8808 PAGE_SIZE, PAGE_SIZE, 8809 SLAB_RED_ZONE, NULL); 8810 if (!btrfs_free_space_bitmap_cachep) 8811 goto fail; 8812 8813 return 0; 8814 fail: 8815 btrfs_destroy_cachep(); 8816 return -ENOMEM; 8817 } 8818 8819 static int btrfs_getattr(const struct path *path, struct kstat *stat, 8820 u32 request_mask, unsigned int flags) 8821 { 8822 u64 delalloc_bytes; 8823 struct inode *inode = d_inode(path->dentry); 8824 u32 blocksize = inode->i_sb->s_blocksize; 8825 u32 bi_flags = BTRFS_I(inode)->flags; 8826 8827 stat->result_mask |= STATX_BTIME; 8828 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; 8829 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; 8830 if (bi_flags & BTRFS_INODE_APPEND) 8831 stat->attributes |= STATX_ATTR_APPEND; 8832 if (bi_flags & BTRFS_INODE_COMPRESS) 8833 stat->attributes |= STATX_ATTR_COMPRESSED; 8834 if (bi_flags & BTRFS_INODE_IMMUTABLE) 8835 stat->attributes |= STATX_ATTR_IMMUTABLE; 8836 if (bi_flags & BTRFS_INODE_NODUMP) 8837 stat->attributes |= STATX_ATTR_NODUMP; 8838 8839 stat->attributes_mask |= (STATX_ATTR_APPEND | 8840 STATX_ATTR_COMPRESSED | 8841 STATX_ATTR_IMMUTABLE | 8842 STATX_ATTR_NODUMP); 8843 8844 generic_fillattr(inode, stat); 8845 stat->dev = BTRFS_I(inode)->root->anon_dev; 8846 8847 spin_lock(&BTRFS_I(inode)->lock); 8848 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 8849 spin_unlock(&BTRFS_I(inode)->lock); 8850 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + 8851 ALIGN(delalloc_bytes, blocksize)) >> 9; 8852 return 0; 8853 } 8854 8855 static int btrfs_rename_exchange(struct inode *old_dir, 8856 struct dentry *old_dentry, 8857 struct inode *new_dir, 8858 struct dentry *new_dentry) 8859 { 8860 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 8861 struct btrfs_trans_handle *trans; 8862 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8863 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8864 struct inode *new_inode = new_dentry->d_inode; 8865 struct inode *old_inode = old_dentry->d_inode; 8866 struct timespec64 ctime = current_time(old_inode); 8867 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8868 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 8869 u64 old_idx = 0; 8870 u64 new_idx = 0; 8871 int ret; 8872 int ret2; 8873 bool root_log_pinned = false; 8874 bool dest_log_pinned = false; 8875 8876 /* we only allow rename subvolume link between subvolumes */ 8877 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 8878 return -EXDEV; 8879 8880 /* close the race window with snapshot create/destroy ioctl */ 8881 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 8882 new_ino == BTRFS_FIRST_FREE_OBJECTID) 8883 down_read(&fs_info->subvol_sem); 8884 8885 /* 8886 * We want to reserve the absolute worst case amount of items. So if 8887 * both inodes are subvols and we need to unlink them then that would 8888 * require 4 item modifications, but if they are both normal inodes it 8889 * would require 5 item modifications, so we'll assume their normal 8890 * inodes. So 5 * 2 is 10, plus 2 for the new links, so 12 total items 8891 * should cover the worst case number of items we'll modify. 8892 */ 8893 trans = btrfs_start_transaction(root, 12); 8894 if (IS_ERR(trans)) { 8895 ret = PTR_ERR(trans); 8896 goto out_notrans; 8897 } 8898 8899 if (dest != root) 8900 btrfs_record_root_in_trans(trans, dest); 8901 8902 /* 8903 * We need to find a free sequence number both in the source and 8904 * in the destination directory for the exchange. 8905 */ 8906 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 8907 if (ret) 8908 goto out_fail; 8909 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 8910 if (ret) 8911 goto out_fail; 8912 8913 BTRFS_I(old_inode)->dir_index = 0ULL; 8914 BTRFS_I(new_inode)->dir_index = 0ULL; 8915 8916 /* Reference for the source. */ 8917 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8918 /* force full log commit if subvolume involved. */ 8919 btrfs_set_log_full_commit(trans); 8920 } else { 8921 btrfs_pin_log_trans(root); 8922 root_log_pinned = true; 8923 ret = btrfs_insert_inode_ref(trans, dest, 8924 new_dentry->d_name.name, 8925 new_dentry->d_name.len, 8926 old_ino, 8927 btrfs_ino(BTRFS_I(new_dir)), 8928 old_idx); 8929 if (ret) 8930 goto out_fail; 8931 } 8932 8933 /* And now for the dest. */ 8934 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8935 /* force full log commit if subvolume involved. */ 8936 btrfs_set_log_full_commit(trans); 8937 } else { 8938 btrfs_pin_log_trans(dest); 8939 dest_log_pinned = true; 8940 ret = btrfs_insert_inode_ref(trans, root, 8941 old_dentry->d_name.name, 8942 old_dentry->d_name.len, 8943 new_ino, 8944 btrfs_ino(BTRFS_I(old_dir)), 8945 new_idx); 8946 if (ret) 8947 goto out_fail; 8948 } 8949 8950 /* Update inode version and ctime/mtime. */ 8951 inode_inc_iversion(old_dir); 8952 inode_inc_iversion(new_dir); 8953 inode_inc_iversion(old_inode); 8954 inode_inc_iversion(new_inode); 8955 old_dir->i_ctime = old_dir->i_mtime = ctime; 8956 new_dir->i_ctime = new_dir->i_mtime = ctime; 8957 old_inode->i_ctime = ctime; 8958 new_inode->i_ctime = ctime; 8959 8960 if (old_dentry->d_parent != new_dentry->d_parent) { 8961 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8962 BTRFS_I(old_inode), 1); 8963 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 8964 BTRFS_I(new_inode), 1); 8965 } 8966 8967 /* src is a subvolume */ 8968 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8969 ret = btrfs_unlink_subvol(trans, old_dir, old_dentry); 8970 } else { /* src is an inode */ 8971 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), 8972 BTRFS_I(old_dentry->d_inode), 8973 old_dentry->d_name.name, 8974 old_dentry->d_name.len); 8975 if (!ret) 8976 ret = btrfs_update_inode(trans, root, old_inode); 8977 } 8978 if (ret) { 8979 btrfs_abort_transaction(trans, ret); 8980 goto out_fail; 8981 } 8982 8983 /* dest is a subvolume */ 8984 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8985 ret = btrfs_unlink_subvol(trans, new_dir, new_dentry); 8986 } else { /* dest is an inode */ 8987 ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), 8988 BTRFS_I(new_dentry->d_inode), 8989 new_dentry->d_name.name, 8990 new_dentry->d_name.len); 8991 if (!ret) 8992 ret = btrfs_update_inode(trans, dest, new_inode); 8993 } 8994 if (ret) { 8995 btrfs_abort_transaction(trans, ret); 8996 goto out_fail; 8997 } 8998 8999 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9000 new_dentry->d_name.name, 9001 new_dentry->d_name.len, 0, old_idx); 9002 if (ret) { 9003 btrfs_abort_transaction(trans, ret); 9004 goto out_fail; 9005 } 9006 9007 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 9008 old_dentry->d_name.name, 9009 old_dentry->d_name.len, 0, new_idx); 9010 if (ret) { 9011 btrfs_abort_transaction(trans, ret); 9012 goto out_fail; 9013 } 9014 9015 if (old_inode->i_nlink == 1) 9016 BTRFS_I(old_inode)->dir_index = old_idx; 9017 if (new_inode->i_nlink == 1) 9018 BTRFS_I(new_inode)->dir_index = new_idx; 9019 9020 if (root_log_pinned) { 9021 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), 9022 new_dentry->d_parent); 9023 btrfs_end_log_trans(root); 9024 root_log_pinned = false; 9025 } 9026 if (dest_log_pinned) { 9027 btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir), 9028 old_dentry->d_parent); 9029 btrfs_end_log_trans(dest); 9030 dest_log_pinned = false; 9031 } 9032 out_fail: 9033 /* 9034 * If we have pinned a log and an error happened, we unpin tasks 9035 * trying to sync the log and force them to fallback to a transaction 9036 * commit if the log currently contains any of the inodes involved in 9037 * this rename operation (to ensure we do not persist a log with an 9038 * inconsistent state for any of these inodes or leading to any 9039 * inconsistencies when replayed). If the transaction was aborted, the 9040 * abortion reason is propagated to userspace when attempting to commit 9041 * the transaction. If the log does not contain any of these inodes, we 9042 * allow the tasks to sync it. 9043 */ 9044 if (ret && (root_log_pinned || dest_log_pinned)) { 9045 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || 9046 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || 9047 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || 9048 (new_inode && 9049 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) 9050 btrfs_set_log_full_commit(trans); 9051 9052 if (root_log_pinned) { 9053 btrfs_end_log_trans(root); 9054 root_log_pinned = false; 9055 } 9056 if (dest_log_pinned) { 9057 btrfs_end_log_trans(dest); 9058 dest_log_pinned = false; 9059 } 9060 } 9061 ret2 = btrfs_end_transaction(trans); 9062 ret = ret ? ret : ret2; 9063 out_notrans: 9064 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 9065 old_ino == BTRFS_FIRST_FREE_OBJECTID) 9066 up_read(&fs_info->subvol_sem); 9067 9068 return ret; 9069 } 9070 9071 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, 9072 struct btrfs_root *root, 9073 struct inode *dir, 9074 struct dentry *dentry) 9075 { 9076 int ret; 9077 struct inode *inode; 9078 u64 objectid; 9079 u64 index; 9080 9081 ret = btrfs_find_free_ino(root, &objectid); 9082 if (ret) 9083 return ret; 9084 9085 inode = btrfs_new_inode(trans, root, dir, 9086 dentry->d_name.name, 9087 dentry->d_name.len, 9088 btrfs_ino(BTRFS_I(dir)), 9089 objectid, 9090 S_IFCHR | WHITEOUT_MODE, 9091 &index); 9092 9093 if (IS_ERR(inode)) { 9094 ret = PTR_ERR(inode); 9095 return ret; 9096 } 9097 9098 inode->i_op = &btrfs_special_inode_operations; 9099 init_special_inode(inode, inode->i_mode, 9100 WHITEOUT_DEV); 9101 9102 ret = btrfs_init_inode_security(trans, inode, dir, 9103 &dentry->d_name); 9104 if (ret) 9105 goto out; 9106 9107 ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, 9108 BTRFS_I(inode), 0, index); 9109 if (ret) 9110 goto out; 9111 9112 ret = btrfs_update_inode(trans, root, inode); 9113 out: 9114 unlock_new_inode(inode); 9115 if (ret) 9116 inode_dec_link_count(inode); 9117 iput(inode); 9118 9119 return ret; 9120 } 9121 9122 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 9123 struct inode *new_dir, struct dentry *new_dentry, 9124 unsigned int flags) 9125 { 9126 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 9127 struct btrfs_trans_handle *trans; 9128 unsigned int trans_num_items; 9129 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9130 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9131 struct inode *new_inode = d_inode(new_dentry); 9132 struct inode *old_inode = d_inode(old_dentry); 9133 u64 index = 0; 9134 int ret; 9135 int ret2; 9136 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9137 bool log_pinned = false; 9138 9139 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9140 return -EPERM; 9141 9142 /* we only allow rename subvolume link between subvolumes */ 9143 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9144 return -EXDEV; 9145 9146 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 9147 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 9148 return -ENOTEMPTY; 9149 9150 if (S_ISDIR(old_inode->i_mode) && new_inode && 9151 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 9152 return -ENOTEMPTY; 9153 9154 9155 /* check for collisions, even if the name isn't there */ 9156 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, 9157 new_dentry->d_name.name, 9158 new_dentry->d_name.len); 9159 9160 if (ret) { 9161 if (ret == -EEXIST) { 9162 /* we shouldn't get 9163 * eexist without a new_inode */ 9164 if (WARN_ON(!new_inode)) { 9165 return ret; 9166 } 9167 } else { 9168 /* maybe -EOVERFLOW */ 9169 return ret; 9170 } 9171 } 9172 ret = 0; 9173 9174 /* 9175 * we're using rename to replace one file with another. Start IO on it 9176 * now so we don't add too much work to the end of the transaction 9177 */ 9178 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 9179 filemap_flush(old_inode->i_mapping); 9180 9181 /* close the racy window with snapshot create/destroy ioctl */ 9182 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9183 down_read(&fs_info->subvol_sem); 9184 /* 9185 * We want to reserve the absolute worst case amount of items. So if 9186 * both inodes are subvols and we need to unlink them then that would 9187 * require 4 item modifications, but if they are both normal inodes it 9188 * would require 5 item modifications, so we'll assume they are normal 9189 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 9190 * should cover the worst case number of items we'll modify. 9191 * If our rename has the whiteout flag, we need more 5 units for the 9192 * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item 9193 * when selinux is enabled). 9194 */ 9195 trans_num_items = 11; 9196 if (flags & RENAME_WHITEOUT) 9197 trans_num_items += 5; 9198 trans = btrfs_start_transaction(root, trans_num_items); 9199 if (IS_ERR(trans)) { 9200 ret = PTR_ERR(trans); 9201 goto out_notrans; 9202 } 9203 9204 if (dest != root) 9205 btrfs_record_root_in_trans(trans, dest); 9206 9207 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 9208 if (ret) 9209 goto out_fail; 9210 9211 BTRFS_I(old_inode)->dir_index = 0ULL; 9212 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9213 /* force full log commit if subvolume involved. */ 9214 btrfs_set_log_full_commit(trans); 9215 } else { 9216 btrfs_pin_log_trans(root); 9217 log_pinned = true; 9218 ret = btrfs_insert_inode_ref(trans, dest, 9219 new_dentry->d_name.name, 9220 new_dentry->d_name.len, 9221 old_ino, 9222 btrfs_ino(BTRFS_I(new_dir)), index); 9223 if (ret) 9224 goto out_fail; 9225 } 9226 9227 inode_inc_iversion(old_dir); 9228 inode_inc_iversion(new_dir); 9229 inode_inc_iversion(old_inode); 9230 old_dir->i_ctime = old_dir->i_mtime = 9231 new_dir->i_ctime = new_dir->i_mtime = 9232 old_inode->i_ctime = current_time(old_dir); 9233 9234 if (old_dentry->d_parent != new_dentry->d_parent) 9235 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9236 BTRFS_I(old_inode), 1); 9237 9238 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9239 ret = btrfs_unlink_subvol(trans, old_dir, old_dentry); 9240 } else { 9241 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), 9242 BTRFS_I(d_inode(old_dentry)), 9243 old_dentry->d_name.name, 9244 old_dentry->d_name.len); 9245 if (!ret) 9246 ret = btrfs_update_inode(trans, root, old_inode); 9247 } 9248 if (ret) { 9249 btrfs_abort_transaction(trans, ret); 9250 goto out_fail; 9251 } 9252 9253 if (new_inode) { 9254 inode_inc_iversion(new_inode); 9255 new_inode->i_ctime = current_time(new_inode); 9256 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 9257 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9258 ret = btrfs_unlink_subvol(trans, new_dir, new_dentry); 9259 BUG_ON(new_inode->i_nlink == 0); 9260 } else { 9261 ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), 9262 BTRFS_I(d_inode(new_dentry)), 9263 new_dentry->d_name.name, 9264 new_dentry->d_name.len); 9265 } 9266 if (!ret && new_inode->i_nlink == 0) 9267 ret = btrfs_orphan_add(trans, 9268 BTRFS_I(d_inode(new_dentry))); 9269 if (ret) { 9270 btrfs_abort_transaction(trans, ret); 9271 goto out_fail; 9272 } 9273 } 9274 9275 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9276 new_dentry->d_name.name, 9277 new_dentry->d_name.len, 0, index); 9278 if (ret) { 9279 btrfs_abort_transaction(trans, ret); 9280 goto out_fail; 9281 } 9282 9283 if (old_inode->i_nlink == 1) 9284 BTRFS_I(old_inode)->dir_index = index; 9285 9286 if (log_pinned) { 9287 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), 9288 new_dentry->d_parent); 9289 btrfs_end_log_trans(root); 9290 log_pinned = false; 9291 } 9292 9293 if (flags & RENAME_WHITEOUT) { 9294 ret = btrfs_whiteout_for_rename(trans, root, old_dir, 9295 old_dentry); 9296 9297 if (ret) { 9298 btrfs_abort_transaction(trans, ret); 9299 goto out_fail; 9300 } 9301 } 9302 out_fail: 9303 /* 9304 * If we have pinned the log and an error happened, we unpin tasks 9305 * trying to sync the log and force them to fallback to a transaction 9306 * commit if the log currently contains any of the inodes involved in 9307 * this rename operation (to ensure we do not persist a log with an 9308 * inconsistent state for any of these inodes or leading to any 9309 * inconsistencies when replayed). If the transaction was aborted, the 9310 * abortion reason is propagated to userspace when attempting to commit 9311 * the transaction. If the log does not contain any of these inodes, we 9312 * allow the tasks to sync it. 9313 */ 9314 if (ret && log_pinned) { 9315 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || 9316 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || 9317 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || 9318 (new_inode && 9319 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) 9320 btrfs_set_log_full_commit(trans); 9321 9322 btrfs_end_log_trans(root); 9323 log_pinned = false; 9324 } 9325 ret2 = btrfs_end_transaction(trans); 9326 ret = ret ? ret : ret2; 9327 out_notrans: 9328 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9329 up_read(&fs_info->subvol_sem); 9330 9331 return ret; 9332 } 9333 9334 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry, 9335 struct inode *new_dir, struct dentry *new_dentry, 9336 unsigned int flags) 9337 { 9338 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 9339 return -EINVAL; 9340 9341 if (flags & RENAME_EXCHANGE) 9342 return btrfs_rename_exchange(old_dir, old_dentry, new_dir, 9343 new_dentry); 9344 9345 return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags); 9346 } 9347 9348 struct btrfs_delalloc_work { 9349 struct inode *inode; 9350 struct completion completion; 9351 struct list_head list; 9352 struct btrfs_work work; 9353 }; 9354 9355 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9356 { 9357 struct btrfs_delalloc_work *delalloc_work; 9358 struct inode *inode; 9359 9360 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9361 work); 9362 inode = delalloc_work->inode; 9363 filemap_flush(inode->i_mapping); 9364 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9365 &BTRFS_I(inode)->runtime_flags)) 9366 filemap_flush(inode->i_mapping); 9367 9368 iput(inode); 9369 complete(&delalloc_work->completion); 9370 } 9371 9372 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 9373 { 9374 struct btrfs_delalloc_work *work; 9375 9376 work = kmalloc(sizeof(*work), GFP_NOFS); 9377 if (!work) 9378 return NULL; 9379 9380 init_completion(&work->completion); 9381 INIT_LIST_HEAD(&work->list); 9382 work->inode = inode; 9383 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); 9384 9385 return work; 9386 } 9387 9388 /* 9389 * some fairly slow code that needs optimization. This walks the list 9390 * of all the inodes with pending delalloc and forces them to disk. 9391 */ 9392 static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot) 9393 { 9394 struct btrfs_inode *binode; 9395 struct inode *inode; 9396 struct btrfs_delalloc_work *work, *next; 9397 struct list_head works; 9398 struct list_head splice; 9399 int ret = 0; 9400 9401 INIT_LIST_HEAD(&works); 9402 INIT_LIST_HEAD(&splice); 9403 9404 mutex_lock(&root->delalloc_mutex); 9405 spin_lock(&root->delalloc_lock); 9406 list_splice_init(&root->delalloc_inodes, &splice); 9407 while (!list_empty(&splice)) { 9408 binode = list_entry(splice.next, struct btrfs_inode, 9409 delalloc_inodes); 9410 9411 list_move_tail(&binode->delalloc_inodes, 9412 &root->delalloc_inodes); 9413 inode = igrab(&binode->vfs_inode); 9414 if (!inode) { 9415 cond_resched_lock(&root->delalloc_lock); 9416 continue; 9417 } 9418 spin_unlock(&root->delalloc_lock); 9419 9420 if (snapshot) 9421 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 9422 &binode->runtime_flags); 9423 work = btrfs_alloc_delalloc_work(inode); 9424 if (!work) { 9425 iput(inode); 9426 ret = -ENOMEM; 9427 goto out; 9428 } 9429 list_add_tail(&work->list, &works); 9430 btrfs_queue_work(root->fs_info->flush_workers, 9431 &work->work); 9432 if (*nr != U64_MAX) { 9433 (*nr)--; 9434 if (*nr == 0) 9435 goto out; 9436 } 9437 cond_resched(); 9438 spin_lock(&root->delalloc_lock); 9439 } 9440 spin_unlock(&root->delalloc_lock); 9441 9442 out: 9443 list_for_each_entry_safe(work, next, &works, list) { 9444 list_del_init(&work->list); 9445 wait_for_completion(&work->completion); 9446 kfree(work); 9447 } 9448 9449 if (!list_empty(&splice)) { 9450 spin_lock(&root->delalloc_lock); 9451 list_splice_tail(&splice, &root->delalloc_inodes); 9452 spin_unlock(&root->delalloc_lock); 9453 } 9454 mutex_unlock(&root->delalloc_mutex); 9455 return ret; 9456 } 9457 9458 int btrfs_start_delalloc_snapshot(struct btrfs_root *root) 9459 { 9460 struct btrfs_fs_info *fs_info = root->fs_info; 9461 u64 nr = U64_MAX; 9462 9463 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 9464 return -EROFS; 9465 9466 return start_delalloc_inodes(root, &nr, true); 9467 } 9468 9469 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr) 9470 { 9471 struct btrfs_root *root; 9472 struct list_head splice; 9473 int ret; 9474 9475 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 9476 return -EROFS; 9477 9478 INIT_LIST_HEAD(&splice); 9479 9480 mutex_lock(&fs_info->delalloc_root_mutex); 9481 spin_lock(&fs_info->delalloc_root_lock); 9482 list_splice_init(&fs_info->delalloc_roots, &splice); 9483 while (!list_empty(&splice) && nr) { 9484 root = list_first_entry(&splice, struct btrfs_root, 9485 delalloc_root); 9486 root = btrfs_grab_root(root); 9487 BUG_ON(!root); 9488 list_move_tail(&root->delalloc_root, 9489 &fs_info->delalloc_roots); 9490 spin_unlock(&fs_info->delalloc_root_lock); 9491 9492 ret = start_delalloc_inodes(root, &nr, false); 9493 btrfs_put_root(root); 9494 if (ret < 0) 9495 goto out; 9496 spin_lock(&fs_info->delalloc_root_lock); 9497 } 9498 spin_unlock(&fs_info->delalloc_root_lock); 9499 9500 ret = 0; 9501 out: 9502 if (!list_empty(&splice)) { 9503 spin_lock(&fs_info->delalloc_root_lock); 9504 list_splice_tail(&splice, &fs_info->delalloc_roots); 9505 spin_unlock(&fs_info->delalloc_root_lock); 9506 } 9507 mutex_unlock(&fs_info->delalloc_root_mutex); 9508 return ret; 9509 } 9510 9511 static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 9512 const char *symname) 9513 { 9514 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9515 struct btrfs_trans_handle *trans; 9516 struct btrfs_root *root = BTRFS_I(dir)->root; 9517 struct btrfs_path *path; 9518 struct btrfs_key key; 9519 struct inode *inode = NULL; 9520 int err; 9521 u64 objectid; 9522 u64 index = 0; 9523 int name_len; 9524 int datasize; 9525 unsigned long ptr; 9526 struct btrfs_file_extent_item *ei; 9527 struct extent_buffer *leaf; 9528 9529 name_len = strlen(symname); 9530 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 9531 return -ENAMETOOLONG; 9532 9533 /* 9534 * 2 items for inode item and ref 9535 * 2 items for dir items 9536 * 1 item for updating parent inode item 9537 * 1 item for the inline extent item 9538 * 1 item for xattr if selinux is on 9539 */ 9540 trans = btrfs_start_transaction(root, 7); 9541 if (IS_ERR(trans)) 9542 return PTR_ERR(trans); 9543 9544 err = btrfs_find_free_ino(root, &objectid); 9545 if (err) 9546 goto out_unlock; 9547 9548 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 9549 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), 9550 objectid, S_IFLNK|S_IRWXUGO, &index); 9551 if (IS_ERR(inode)) { 9552 err = PTR_ERR(inode); 9553 inode = NULL; 9554 goto out_unlock; 9555 } 9556 9557 /* 9558 * If the active LSM wants to access the inode during 9559 * d_instantiate it needs these. Smack checks to see 9560 * if the filesystem supports xattrs by looking at the 9561 * ops vector. 9562 */ 9563 inode->i_fop = &btrfs_file_operations; 9564 inode->i_op = &btrfs_file_inode_operations; 9565 inode->i_mapping->a_ops = &btrfs_aops; 9566 9567 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 9568 if (err) 9569 goto out_unlock; 9570 9571 path = btrfs_alloc_path(); 9572 if (!path) { 9573 err = -ENOMEM; 9574 goto out_unlock; 9575 } 9576 key.objectid = btrfs_ino(BTRFS_I(inode)); 9577 key.offset = 0; 9578 key.type = BTRFS_EXTENT_DATA_KEY; 9579 datasize = btrfs_file_extent_calc_inline_size(name_len); 9580 err = btrfs_insert_empty_item(trans, root, path, &key, 9581 datasize); 9582 if (err) { 9583 btrfs_free_path(path); 9584 goto out_unlock; 9585 } 9586 leaf = path->nodes[0]; 9587 ei = btrfs_item_ptr(leaf, path->slots[0], 9588 struct btrfs_file_extent_item); 9589 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9590 btrfs_set_file_extent_type(leaf, ei, 9591 BTRFS_FILE_EXTENT_INLINE); 9592 btrfs_set_file_extent_encryption(leaf, ei, 0); 9593 btrfs_set_file_extent_compression(leaf, ei, 0); 9594 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9595 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9596 9597 ptr = btrfs_file_extent_inline_start(ei); 9598 write_extent_buffer(leaf, symname, ptr, name_len); 9599 btrfs_mark_buffer_dirty(leaf); 9600 btrfs_free_path(path); 9601 9602 inode->i_op = &btrfs_symlink_inode_operations; 9603 inode_nohighmem(inode); 9604 inode_set_bytes(inode, name_len); 9605 btrfs_i_size_write(BTRFS_I(inode), name_len); 9606 err = btrfs_update_inode(trans, root, inode); 9607 /* 9608 * Last step, add directory indexes for our symlink inode. This is the 9609 * last step to avoid extra cleanup of these indexes if an error happens 9610 * elsewhere above. 9611 */ 9612 if (!err) 9613 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, 9614 BTRFS_I(inode), 0, index); 9615 if (err) 9616 goto out_unlock; 9617 9618 d_instantiate_new(dentry, inode); 9619 9620 out_unlock: 9621 btrfs_end_transaction(trans); 9622 if (err && inode) { 9623 inode_dec_link_count(inode); 9624 discard_new_inode(inode); 9625 } 9626 btrfs_btree_balance_dirty(fs_info); 9627 return err; 9628 } 9629 9630 static struct btrfs_trans_handle *insert_prealloc_file_extent( 9631 struct btrfs_trans_handle *trans_in, 9632 struct inode *inode, struct btrfs_key *ins, 9633 u64 file_offset) 9634 { 9635 struct btrfs_file_extent_item stack_fi; 9636 struct btrfs_replace_extent_info extent_info; 9637 struct btrfs_trans_handle *trans = trans_in; 9638 struct btrfs_path *path; 9639 u64 start = ins->objectid; 9640 u64 len = ins->offset; 9641 int ret; 9642 9643 memset(&stack_fi, 0, sizeof(stack_fi)); 9644 9645 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 9646 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 9647 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 9648 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 9649 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 9650 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9651 /* Encryption and other encoding is reserved and all 0 */ 9652 9653 ret = btrfs_qgroup_release_data(BTRFS_I(inode), file_offset, len); 9654 if (ret < 0) 9655 return ERR_PTR(ret); 9656 9657 if (trans) { 9658 ret = insert_reserved_file_extent(trans, BTRFS_I(inode), 9659 file_offset, &stack_fi, ret); 9660 if (ret) 9661 return ERR_PTR(ret); 9662 return trans; 9663 } 9664 9665 extent_info.disk_offset = start; 9666 extent_info.disk_len = len; 9667 extent_info.data_offset = 0; 9668 extent_info.data_len = len; 9669 extent_info.file_offset = file_offset; 9670 extent_info.extent_buf = (char *)&stack_fi; 9671 extent_info.is_new_extent = true; 9672 extent_info.qgroup_reserved = ret; 9673 extent_info.insertions = 0; 9674 9675 path = btrfs_alloc_path(); 9676 if (!path) 9677 return ERR_PTR(-ENOMEM); 9678 9679 ret = btrfs_replace_file_extents(inode, path, file_offset, 9680 file_offset + len - 1, &extent_info, 9681 &trans); 9682 btrfs_free_path(path); 9683 if (ret) 9684 return ERR_PTR(ret); 9685 9686 return trans; 9687 } 9688 9689 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9690 u64 start, u64 num_bytes, u64 min_size, 9691 loff_t actual_len, u64 *alloc_hint, 9692 struct btrfs_trans_handle *trans) 9693 { 9694 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 9695 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 9696 struct extent_map *em; 9697 struct btrfs_root *root = BTRFS_I(inode)->root; 9698 struct btrfs_key ins; 9699 u64 cur_offset = start; 9700 u64 clear_offset = start; 9701 u64 i_size; 9702 u64 cur_bytes; 9703 u64 last_alloc = (u64)-1; 9704 int ret = 0; 9705 bool own_trans = true; 9706 u64 end = start + num_bytes - 1; 9707 9708 if (trans) 9709 own_trans = false; 9710 while (num_bytes > 0) { 9711 cur_bytes = min_t(u64, num_bytes, SZ_256M); 9712 cur_bytes = max(cur_bytes, min_size); 9713 /* 9714 * If we are severely fragmented we could end up with really 9715 * small allocations, so if the allocator is returning small 9716 * chunks lets make its job easier by only searching for those 9717 * sized chunks. 9718 */ 9719 cur_bytes = min(cur_bytes, last_alloc); 9720 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 9721 min_size, 0, *alloc_hint, &ins, 1, 0); 9722 if (ret) 9723 break; 9724 9725 /* 9726 * We've reserved this space, and thus converted it from 9727 * ->bytes_may_use to ->bytes_reserved. Any error that happens 9728 * from here on out we will only need to clear our reservation 9729 * for the remaining unreserved area, so advance our 9730 * clear_offset by our extent size. 9731 */ 9732 clear_offset += ins.offset; 9733 9734 last_alloc = ins.offset; 9735 trans = insert_prealloc_file_extent(trans, inode, &ins, cur_offset); 9736 /* 9737 * Now that we inserted the prealloc extent we can finally 9738 * decrement the number of reservations in the block group. 9739 * If we did it before, we could race with relocation and have 9740 * relocation miss the reserved extent, making it fail later. 9741 */ 9742 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9743 if (IS_ERR(trans)) { 9744 ret = PTR_ERR(trans); 9745 btrfs_free_reserved_extent(fs_info, ins.objectid, 9746 ins.offset, 0); 9747 break; 9748 } 9749 9750 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, 9751 cur_offset + ins.offset -1, 0); 9752 9753 em = alloc_extent_map(); 9754 if (!em) { 9755 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 9756 &BTRFS_I(inode)->runtime_flags); 9757 goto next; 9758 } 9759 9760 em->start = cur_offset; 9761 em->orig_start = cur_offset; 9762 em->len = ins.offset; 9763 em->block_start = ins.objectid; 9764 em->block_len = ins.offset; 9765 em->orig_block_len = ins.offset; 9766 em->ram_bytes = ins.offset; 9767 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 9768 em->generation = trans->transid; 9769 9770 while (1) { 9771 write_lock(&em_tree->lock); 9772 ret = add_extent_mapping(em_tree, em, 1); 9773 write_unlock(&em_tree->lock); 9774 if (ret != -EEXIST) 9775 break; 9776 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, 9777 cur_offset + ins.offset - 1, 9778 0); 9779 } 9780 free_extent_map(em); 9781 next: 9782 num_bytes -= ins.offset; 9783 cur_offset += ins.offset; 9784 *alloc_hint = ins.objectid + ins.offset; 9785 9786 inode_inc_iversion(inode); 9787 inode->i_ctime = current_time(inode); 9788 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9789 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9790 (actual_len > inode->i_size) && 9791 (cur_offset > inode->i_size)) { 9792 if (cur_offset > actual_len) 9793 i_size = actual_len; 9794 else 9795 i_size = cur_offset; 9796 i_size_write(inode, i_size); 9797 btrfs_inode_safe_disk_i_size_write(inode, 0); 9798 } 9799 9800 ret = btrfs_update_inode(trans, root, inode); 9801 9802 if (ret) { 9803 btrfs_abort_transaction(trans, ret); 9804 if (own_trans) 9805 btrfs_end_transaction(trans); 9806 break; 9807 } 9808 9809 if (own_trans) { 9810 btrfs_end_transaction(trans); 9811 trans = NULL; 9812 } 9813 } 9814 if (clear_offset < end) 9815 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 9816 end - clear_offset + 1); 9817 return ret; 9818 } 9819 9820 int btrfs_prealloc_file_range(struct inode *inode, int mode, 9821 u64 start, u64 num_bytes, u64 min_size, 9822 loff_t actual_len, u64 *alloc_hint) 9823 { 9824 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9825 min_size, actual_len, alloc_hint, 9826 NULL); 9827 } 9828 9829 int btrfs_prealloc_file_range_trans(struct inode *inode, 9830 struct btrfs_trans_handle *trans, int mode, 9831 u64 start, u64 num_bytes, u64 min_size, 9832 loff_t actual_len, u64 *alloc_hint) 9833 { 9834 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9835 min_size, actual_len, alloc_hint, trans); 9836 } 9837 9838 static int btrfs_set_page_dirty(struct page *page) 9839 { 9840 return __set_page_dirty_nobuffers(page); 9841 } 9842 9843 static int btrfs_permission(struct inode *inode, int mask) 9844 { 9845 struct btrfs_root *root = BTRFS_I(inode)->root; 9846 umode_t mode = inode->i_mode; 9847 9848 if (mask & MAY_WRITE && 9849 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9850 if (btrfs_root_readonly(root)) 9851 return -EROFS; 9852 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9853 return -EACCES; 9854 } 9855 return generic_permission(inode, mask); 9856 } 9857 9858 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 9859 { 9860 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9861 struct btrfs_trans_handle *trans; 9862 struct btrfs_root *root = BTRFS_I(dir)->root; 9863 struct inode *inode = NULL; 9864 u64 objectid; 9865 u64 index; 9866 int ret = 0; 9867 9868 /* 9869 * 5 units required for adding orphan entry 9870 */ 9871 trans = btrfs_start_transaction(root, 5); 9872 if (IS_ERR(trans)) 9873 return PTR_ERR(trans); 9874 9875 ret = btrfs_find_free_ino(root, &objectid); 9876 if (ret) 9877 goto out; 9878 9879 inode = btrfs_new_inode(trans, root, dir, NULL, 0, 9880 btrfs_ino(BTRFS_I(dir)), objectid, mode, &index); 9881 if (IS_ERR(inode)) { 9882 ret = PTR_ERR(inode); 9883 inode = NULL; 9884 goto out; 9885 } 9886 9887 inode->i_fop = &btrfs_file_operations; 9888 inode->i_op = &btrfs_file_inode_operations; 9889 9890 inode->i_mapping->a_ops = &btrfs_aops; 9891 9892 ret = btrfs_init_inode_security(trans, inode, dir, NULL); 9893 if (ret) 9894 goto out; 9895 9896 ret = btrfs_update_inode(trans, root, inode); 9897 if (ret) 9898 goto out; 9899 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 9900 if (ret) 9901 goto out; 9902 9903 /* 9904 * We set number of links to 0 in btrfs_new_inode(), and here we set 9905 * it to 1 because d_tmpfile() will issue a warning if the count is 0, 9906 * through: 9907 * 9908 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9909 */ 9910 set_nlink(inode, 1); 9911 d_tmpfile(dentry, inode); 9912 unlock_new_inode(inode); 9913 mark_inode_dirty(inode); 9914 out: 9915 btrfs_end_transaction(trans); 9916 if (ret && inode) 9917 discard_new_inode(inode); 9918 btrfs_btree_balance_dirty(fs_info); 9919 return ret; 9920 } 9921 9922 void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 9923 { 9924 struct inode *inode = tree->private_data; 9925 unsigned long index = start >> PAGE_SHIFT; 9926 unsigned long end_index = end >> PAGE_SHIFT; 9927 struct page *page; 9928 9929 while (index <= end_index) { 9930 page = find_get_page(inode->i_mapping, index); 9931 ASSERT(page); /* Pages should be in the extent_io_tree */ 9932 set_page_writeback(page); 9933 put_page(page); 9934 index++; 9935 } 9936 } 9937 9938 #ifdef CONFIG_SWAP 9939 /* 9940 * Add an entry indicating a block group or device which is pinned by a 9941 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 9942 * negative errno on failure. 9943 */ 9944 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 9945 bool is_block_group) 9946 { 9947 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 9948 struct btrfs_swapfile_pin *sp, *entry; 9949 struct rb_node **p; 9950 struct rb_node *parent = NULL; 9951 9952 sp = kmalloc(sizeof(*sp), GFP_NOFS); 9953 if (!sp) 9954 return -ENOMEM; 9955 sp->ptr = ptr; 9956 sp->inode = inode; 9957 sp->is_block_group = is_block_group; 9958 9959 spin_lock(&fs_info->swapfile_pins_lock); 9960 p = &fs_info->swapfile_pins.rb_node; 9961 while (*p) { 9962 parent = *p; 9963 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 9964 if (sp->ptr < entry->ptr || 9965 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 9966 p = &(*p)->rb_left; 9967 } else if (sp->ptr > entry->ptr || 9968 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 9969 p = &(*p)->rb_right; 9970 } else { 9971 spin_unlock(&fs_info->swapfile_pins_lock); 9972 kfree(sp); 9973 return 1; 9974 } 9975 } 9976 rb_link_node(&sp->node, parent, p); 9977 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 9978 spin_unlock(&fs_info->swapfile_pins_lock); 9979 return 0; 9980 } 9981 9982 /* Free all of the entries pinned by this swapfile. */ 9983 static void btrfs_free_swapfile_pins(struct inode *inode) 9984 { 9985 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 9986 struct btrfs_swapfile_pin *sp; 9987 struct rb_node *node, *next; 9988 9989 spin_lock(&fs_info->swapfile_pins_lock); 9990 node = rb_first(&fs_info->swapfile_pins); 9991 while (node) { 9992 next = rb_next(node); 9993 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 9994 if (sp->inode == inode) { 9995 rb_erase(&sp->node, &fs_info->swapfile_pins); 9996 if (sp->is_block_group) 9997 btrfs_put_block_group(sp->ptr); 9998 kfree(sp); 9999 } 10000 node = next; 10001 } 10002 spin_unlock(&fs_info->swapfile_pins_lock); 10003 } 10004 10005 struct btrfs_swap_info { 10006 u64 start; 10007 u64 block_start; 10008 u64 block_len; 10009 u64 lowest_ppage; 10010 u64 highest_ppage; 10011 unsigned long nr_pages; 10012 int nr_extents; 10013 }; 10014 10015 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 10016 struct btrfs_swap_info *bsi) 10017 { 10018 unsigned long nr_pages; 10019 u64 first_ppage, first_ppage_reported, next_ppage; 10020 int ret; 10021 10022 first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT; 10023 next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len, 10024 PAGE_SIZE) >> PAGE_SHIFT; 10025 10026 if (first_ppage >= next_ppage) 10027 return 0; 10028 nr_pages = next_ppage - first_ppage; 10029 10030 first_ppage_reported = first_ppage; 10031 if (bsi->start == 0) 10032 first_ppage_reported++; 10033 if (bsi->lowest_ppage > first_ppage_reported) 10034 bsi->lowest_ppage = first_ppage_reported; 10035 if (bsi->highest_ppage < (next_ppage - 1)) 10036 bsi->highest_ppage = next_ppage - 1; 10037 10038 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 10039 if (ret < 0) 10040 return ret; 10041 bsi->nr_extents += ret; 10042 bsi->nr_pages += nr_pages; 10043 return 0; 10044 } 10045 10046 static void btrfs_swap_deactivate(struct file *file) 10047 { 10048 struct inode *inode = file_inode(file); 10049 10050 btrfs_free_swapfile_pins(inode); 10051 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 10052 } 10053 10054 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10055 sector_t *span) 10056 { 10057 struct inode *inode = file_inode(file); 10058 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10059 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 10060 struct extent_state *cached_state = NULL; 10061 struct extent_map *em = NULL; 10062 struct btrfs_device *device = NULL; 10063 struct btrfs_swap_info bsi = { 10064 .lowest_ppage = (sector_t)-1ULL, 10065 }; 10066 int ret = 0; 10067 u64 isize; 10068 u64 start; 10069 10070 /* 10071 * If the swap file was just created, make sure delalloc is done. If the 10072 * file changes again after this, the user is doing something stupid and 10073 * we don't really care. 10074 */ 10075 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 10076 if (ret) 10077 return ret; 10078 10079 /* 10080 * The inode is locked, so these flags won't change after we check them. 10081 */ 10082 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 10083 btrfs_warn(fs_info, "swapfile must not be compressed"); 10084 return -EINVAL; 10085 } 10086 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 10087 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 10088 return -EINVAL; 10089 } 10090 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 10091 btrfs_warn(fs_info, "swapfile must not be checksummed"); 10092 return -EINVAL; 10093 } 10094 10095 /* 10096 * Balance or device remove/replace/resize can move stuff around from 10097 * under us. The exclop protection makes sure they aren't running/won't 10098 * run concurrently while we are mapping the swap extents, and 10099 * fs_info->swapfile_pins prevents them from running while the swap 10100 * file is active and moving the extents. Note that this also prevents 10101 * a concurrent device add which isn't actually necessary, but it's not 10102 * really worth the trouble to allow it. 10103 */ 10104 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 10105 btrfs_warn(fs_info, 10106 "cannot activate swapfile while exclusive operation is running"); 10107 return -EBUSY; 10108 } 10109 /* 10110 * Snapshots can create extents which require COW even if NODATACOW is 10111 * set. We use this counter to prevent snapshots. We must increment it 10112 * before walking the extents because we don't want a concurrent 10113 * snapshot to run after we've already checked the extents. 10114 */ 10115 atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles); 10116 10117 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 10118 10119 lock_extent_bits(io_tree, 0, isize - 1, &cached_state); 10120 start = 0; 10121 while (start < isize) { 10122 u64 logical_block_start, physical_block_start; 10123 struct btrfs_block_group *bg; 10124 u64 len = isize - start; 10125 10126 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 10127 if (IS_ERR(em)) { 10128 ret = PTR_ERR(em); 10129 goto out; 10130 } 10131 10132 if (em->block_start == EXTENT_MAP_HOLE) { 10133 btrfs_warn(fs_info, "swapfile must not have holes"); 10134 ret = -EINVAL; 10135 goto out; 10136 } 10137 if (em->block_start == EXTENT_MAP_INLINE) { 10138 /* 10139 * It's unlikely we'll ever actually find ourselves 10140 * here, as a file small enough to fit inline won't be 10141 * big enough to store more than the swap header, but in 10142 * case something changes in the future, let's catch it 10143 * here rather than later. 10144 */ 10145 btrfs_warn(fs_info, "swapfile must not be inline"); 10146 ret = -EINVAL; 10147 goto out; 10148 } 10149 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10150 btrfs_warn(fs_info, "swapfile must not be compressed"); 10151 ret = -EINVAL; 10152 goto out; 10153 } 10154 10155 logical_block_start = em->block_start + (start - em->start); 10156 len = min(len, em->len - (start - em->start)); 10157 free_extent_map(em); 10158 em = NULL; 10159 10160 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true); 10161 if (ret < 0) { 10162 goto out; 10163 } else if (ret) { 10164 ret = 0; 10165 } else { 10166 btrfs_warn(fs_info, 10167 "swapfile must not be copy-on-write"); 10168 ret = -EINVAL; 10169 goto out; 10170 } 10171 10172 em = btrfs_get_chunk_map(fs_info, logical_block_start, len); 10173 if (IS_ERR(em)) { 10174 ret = PTR_ERR(em); 10175 goto out; 10176 } 10177 10178 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 10179 btrfs_warn(fs_info, 10180 "swapfile must have single data profile"); 10181 ret = -EINVAL; 10182 goto out; 10183 } 10184 10185 if (device == NULL) { 10186 device = em->map_lookup->stripes[0].dev; 10187 ret = btrfs_add_swapfile_pin(inode, device, false); 10188 if (ret == 1) 10189 ret = 0; 10190 else if (ret) 10191 goto out; 10192 } else if (device != em->map_lookup->stripes[0].dev) { 10193 btrfs_warn(fs_info, "swapfile must be on one device"); 10194 ret = -EINVAL; 10195 goto out; 10196 } 10197 10198 physical_block_start = (em->map_lookup->stripes[0].physical + 10199 (logical_block_start - em->start)); 10200 len = min(len, em->len - (logical_block_start - em->start)); 10201 free_extent_map(em); 10202 em = NULL; 10203 10204 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 10205 if (!bg) { 10206 btrfs_warn(fs_info, 10207 "could not find block group containing swapfile"); 10208 ret = -EINVAL; 10209 goto out; 10210 } 10211 10212 ret = btrfs_add_swapfile_pin(inode, bg, true); 10213 if (ret) { 10214 btrfs_put_block_group(bg); 10215 if (ret == 1) 10216 ret = 0; 10217 else 10218 goto out; 10219 } 10220 10221 if (bsi.block_len && 10222 bsi.block_start + bsi.block_len == physical_block_start) { 10223 bsi.block_len += len; 10224 } else { 10225 if (bsi.block_len) { 10226 ret = btrfs_add_swap_extent(sis, &bsi); 10227 if (ret) 10228 goto out; 10229 } 10230 bsi.start = start; 10231 bsi.block_start = physical_block_start; 10232 bsi.block_len = len; 10233 } 10234 10235 start += len; 10236 } 10237 10238 if (bsi.block_len) 10239 ret = btrfs_add_swap_extent(sis, &bsi); 10240 10241 out: 10242 if (!IS_ERR_OR_NULL(em)) 10243 free_extent_map(em); 10244 10245 unlock_extent_cached(io_tree, 0, isize - 1, &cached_state); 10246 10247 if (ret) 10248 btrfs_swap_deactivate(file); 10249 10250 btrfs_exclop_finish(fs_info); 10251 10252 if (ret) 10253 return ret; 10254 10255 if (device) 10256 sis->bdev = device->bdev; 10257 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10258 sis->max = bsi.nr_pages; 10259 sis->pages = bsi.nr_pages - 1; 10260 sis->highest_bit = bsi.nr_pages - 1; 10261 return bsi.nr_extents; 10262 } 10263 #else 10264 static void btrfs_swap_deactivate(struct file *file) 10265 { 10266 } 10267 10268 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10269 sector_t *span) 10270 { 10271 return -EOPNOTSUPP; 10272 } 10273 #endif 10274 10275 static const struct inode_operations btrfs_dir_inode_operations = { 10276 .getattr = btrfs_getattr, 10277 .lookup = btrfs_lookup, 10278 .create = btrfs_create, 10279 .unlink = btrfs_unlink, 10280 .link = btrfs_link, 10281 .mkdir = btrfs_mkdir, 10282 .rmdir = btrfs_rmdir, 10283 .rename = btrfs_rename2, 10284 .symlink = btrfs_symlink, 10285 .setattr = btrfs_setattr, 10286 .mknod = btrfs_mknod, 10287 .listxattr = btrfs_listxattr, 10288 .permission = btrfs_permission, 10289 .get_acl = btrfs_get_acl, 10290 .set_acl = btrfs_set_acl, 10291 .update_time = btrfs_update_time, 10292 .tmpfile = btrfs_tmpfile, 10293 }; 10294 10295 static const struct file_operations btrfs_dir_file_operations = { 10296 .llseek = generic_file_llseek, 10297 .read = generic_read_dir, 10298 .iterate_shared = btrfs_real_readdir, 10299 .open = btrfs_opendir, 10300 .unlocked_ioctl = btrfs_ioctl, 10301 #ifdef CONFIG_COMPAT 10302 .compat_ioctl = btrfs_compat_ioctl, 10303 #endif 10304 .release = btrfs_release_file, 10305 .fsync = btrfs_sync_file, 10306 }; 10307 10308 /* 10309 * btrfs doesn't support the bmap operation because swapfiles 10310 * use bmap to make a mapping of extents in the file. They assume 10311 * these extents won't change over the life of the file and they 10312 * use the bmap result to do IO directly to the drive. 10313 * 10314 * the btrfs bmap call would return logical addresses that aren't 10315 * suitable for IO and they also will change frequently as COW 10316 * operations happen. So, swapfile + btrfs == corruption. 10317 * 10318 * For now we're avoiding this by dropping bmap. 10319 */ 10320 static const struct address_space_operations btrfs_aops = { 10321 .readpage = btrfs_readpage, 10322 .writepage = btrfs_writepage, 10323 .writepages = btrfs_writepages, 10324 .readahead = btrfs_readahead, 10325 .direct_IO = noop_direct_IO, 10326 .invalidatepage = btrfs_invalidatepage, 10327 .releasepage = btrfs_releasepage, 10328 #ifdef CONFIG_MIGRATION 10329 .migratepage = btrfs_migratepage, 10330 #endif 10331 .set_page_dirty = btrfs_set_page_dirty, 10332 .error_remove_page = generic_error_remove_page, 10333 .swap_activate = btrfs_swap_activate, 10334 .swap_deactivate = btrfs_swap_deactivate, 10335 }; 10336 10337 static const struct inode_operations btrfs_file_inode_operations = { 10338 .getattr = btrfs_getattr, 10339 .setattr = btrfs_setattr, 10340 .listxattr = btrfs_listxattr, 10341 .permission = btrfs_permission, 10342 .fiemap = btrfs_fiemap, 10343 .get_acl = btrfs_get_acl, 10344 .set_acl = btrfs_set_acl, 10345 .update_time = btrfs_update_time, 10346 }; 10347 static const struct inode_operations btrfs_special_inode_operations = { 10348 .getattr = btrfs_getattr, 10349 .setattr = btrfs_setattr, 10350 .permission = btrfs_permission, 10351 .listxattr = btrfs_listxattr, 10352 .get_acl = btrfs_get_acl, 10353 .set_acl = btrfs_set_acl, 10354 .update_time = btrfs_update_time, 10355 }; 10356 static const struct inode_operations btrfs_symlink_inode_operations = { 10357 .get_link = page_get_link, 10358 .getattr = btrfs_getattr, 10359 .setattr = btrfs_setattr, 10360 .permission = btrfs_permission, 10361 .listxattr = btrfs_listxattr, 10362 .update_time = btrfs_update_time, 10363 }; 10364 10365 const struct dentry_operations btrfs_dentry_operations = { 10366 .d_delete = btrfs_dentry_delete, 10367 }; 10368