1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 #include <linux/time.h> 15 #include <linux/init.h> 16 #include <linux/string.h> 17 #include <linux/backing-dev.h> 18 #include <linux/writeback.h> 19 #include <linux/compat.h> 20 #include <linux/xattr.h> 21 #include <linux/posix_acl.h> 22 #include <linux/falloc.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/btrfs.h> 26 #include <linux/blkdev.h> 27 #include <linux/posix_acl_xattr.h> 28 #include <linux/uio.h> 29 #include <linux/magic.h> 30 #include <linux/iversion.h> 31 #include <linux/swap.h> 32 #include <linux/migrate.h> 33 #include <linux/sched/mm.h> 34 #include <linux/iomap.h> 35 #include <asm/unaligned.h> 36 #include <linux/fsverity.h> 37 #include "misc.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "print-tree.h" 43 #include "ordered-data.h" 44 #include "xattr.h" 45 #include "tree-log.h" 46 #include "volumes.h" 47 #include "compression.h" 48 #include "locking.h" 49 #include "free-space-cache.h" 50 #include "props.h" 51 #include "qgroup.h" 52 #include "delalloc-space.h" 53 #include "block-group.h" 54 #include "space-info.h" 55 #include "zoned.h" 56 #include "subpage.h" 57 #include "inode-item.h" 58 59 struct btrfs_iget_args { 60 u64 ino; 61 struct btrfs_root *root; 62 }; 63 64 struct btrfs_dio_data { 65 ssize_t submitted; 66 struct extent_changeset *data_reserved; 67 bool data_space_reserved; 68 bool nocow_done; 69 }; 70 71 struct btrfs_dio_private { 72 struct inode *inode; 73 74 /* 75 * Since DIO can use anonymous page, we cannot use page_offset() to 76 * grab the file offset, thus need a dedicated member for file offset. 77 */ 78 u64 file_offset; 79 /* Used for bio::bi_size */ 80 u32 bytes; 81 82 /* 83 * References to this structure. There is one reference per in-flight 84 * bio plus one while we're still setting up. 85 */ 86 refcount_t refs; 87 88 /* Array of checksums */ 89 u8 *csums; 90 91 /* This must be last */ 92 struct bio bio; 93 }; 94 95 static struct bio_set btrfs_dio_bioset; 96 97 struct btrfs_rename_ctx { 98 /* Output field. Stores the index number of the old directory entry. */ 99 u64 index; 100 }; 101 102 static const struct inode_operations btrfs_dir_inode_operations; 103 static const struct inode_operations btrfs_symlink_inode_operations; 104 static const struct inode_operations btrfs_special_inode_operations; 105 static const struct inode_operations btrfs_file_inode_operations; 106 static const struct address_space_operations btrfs_aops; 107 static const struct file_operations btrfs_dir_file_operations; 108 109 static struct kmem_cache *btrfs_inode_cachep; 110 struct kmem_cache *btrfs_trans_handle_cachep; 111 struct kmem_cache *btrfs_path_cachep; 112 struct kmem_cache *btrfs_free_space_cachep; 113 struct kmem_cache *btrfs_free_space_bitmap_cachep; 114 115 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 116 static int btrfs_truncate(struct inode *inode, bool skip_writeback); 117 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 118 static noinline int cow_file_range(struct btrfs_inode *inode, 119 struct page *locked_page, 120 u64 start, u64 end, int *page_started, 121 unsigned long *nr_written, int unlock); 122 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 123 u64 len, u64 orig_start, u64 block_start, 124 u64 block_len, u64 orig_block_len, 125 u64 ram_bytes, int compress_type, 126 int type); 127 128 static void __endio_write_update_ordered(struct btrfs_inode *inode, 129 const u64 offset, const u64 bytes, 130 const bool uptodate); 131 132 /* 133 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed 134 * 135 * ilock_flags can have the following bit set: 136 * 137 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 138 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 139 * return -EAGAIN 140 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 141 */ 142 int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags) 143 { 144 if (ilock_flags & BTRFS_ILOCK_SHARED) { 145 if (ilock_flags & BTRFS_ILOCK_TRY) { 146 if (!inode_trylock_shared(inode)) 147 return -EAGAIN; 148 else 149 return 0; 150 } 151 inode_lock_shared(inode); 152 } else { 153 if (ilock_flags & BTRFS_ILOCK_TRY) { 154 if (!inode_trylock(inode)) 155 return -EAGAIN; 156 else 157 return 0; 158 } 159 inode_lock(inode); 160 } 161 if (ilock_flags & BTRFS_ILOCK_MMAP) 162 down_write(&BTRFS_I(inode)->i_mmap_lock); 163 return 0; 164 } 165 166 /* 167 * btrfs_inode_unlock - unock inode i_rwsem 168 * 169 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 170 * to decide whether the lock acquired is shared or exclusive. 171 */ 172 void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags) 173 { 174 if (ilock_flags & BTRFS_ILOCK_MMAP) 175 up_write(&BTRFS_I(inode)->i_mmap_lock); 176 if (ilock_flags & BTRFS_ILOCK_SHARED) 177 inode_unlock_shared(inode); 178 else 179 inode_unlock(inode); 180 } 181 182 /* 183 * Cleanup all submitted ordered extents in specified range to handle errors 184 * from the btrfs_run_delalloc_range() callback. 185 * 186 * NOTE: caller must ensure that when an error happens, it can not call 187 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 188 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 189 * to be released, which we want to happen only when finishing the ordered 190 * extent (btrfs_finish_ordered_io()). 191 */ 192 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 193 struct page *locked_page, 194 u64 offset, u64 bytes) 195 { 196 unsigned long index = offset >> PAGE_SHIFT; 197 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 198 u64 page_start = page_offset(locked_page); 199 u64 page_end = page_start + PAGE_SIZE - 1; 200 201 struct page *page; 202 203 while (index <= end_index) { 204 /* 205 * For locked page, we will call end_extent_writepage() on it 206 * in run_delalloc_range() for the error handling. That 207 * end_extent_writepage() function will call 208 * btrfs_mark_ordered_io_finished() to clear page Ordered and 209 * run the ordered extent accounting. 210 * 211 * Here we can't just clear the Ordered bit, or 212 * btrfs_mark_ordered_io_finished() would skip the accounting 213 * for the page range, and the ordered extent will never finish. 214 */ 215 if (index == (page_offset(locked_page) >> PAGE_SHIFT)) { 216 index++; 217 continue; 218 } 219 page = find_get_page(inode->vfs_inode.i_mapping, index); 220 index++; 221 if (!page) 222 continue; 223 224 /* 225 * Here we just clear all Ordered bits for every page in the 226 * range, then __endio_write_update_ordered() will handle 227 * the ordered extent accounting for the range. 228 */ 229 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page, 230 offset, bytes); 231 put_page(page); 232 } 233 234 /* The locked page covers the full range, nothing needs to be done */ 235 if (bytes + offset <= page_offset(locked_page) + PAGE_SIZE) 236 return; 237 /* 238 * In case this page belongs to the delalloc range being instantiated 239 * then skip it, since the first page of a range is going to be 240 * properly cleaned up by the caller of run_delalloc_range 241 */ 242 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 243 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE; 244 offset = page_offset(locked_page) + PAGE_SIZE; 245 } 246 247 return __endio_write_update_ordered(inode, offset, bytes, false); 248 } 249 250 static int btrfs_dirty_inode(struct inode *inode); 251 252 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 253 struct btrfs_new_inode_args *args) 254 { 255 int err; 256 257 if (args->default_acl) { 258 err = __btrfs_set_acl(trans, args->inode, args->default_acl, 259 ACL_TYPE_DEFAULT); 260 if (err) 261 return err; 262 } 263 if (args->acl) { 264 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 265 if (err) 266 return err; 267 } 268 if (!args->default_acl && !args->acl) 269 cache_no_acl(args->inode); 270 return btrfs_xattr_security_init(trans, args->inode, args->dir, 271 &args->dentry->d_name); 272 } 273 274 /* 275 * this does all the hard work for inserting an inline extent into 276 * the btree. The caller should have done a btrfs_drop_extents so that 277 * no overlapping inline items exist in the btree 278 */ 279 static int insert_inline_extent(struct btrfs_trans_handle *trans, 280 struct btrfs_path *path, 281 struct btrfs_inode *inode, bool extent_inserted, 282 size_t size, size_t compressed_size, 283 int compress_type, 284 struct page **compressed_pages, 285 bool update_i_size) 286 { 287 struct btrfs_root *root = inode->root; 288 struct extent_buffer *leaf; 289 struct page *page = NULL; 290 char *kaddr; 291 unsigned long ptr; 292 struct btrfs_file_extent_item *ei; 293 int ret; 294 size_t cur_size = size; 295 u64 i_size; 296 297 ASSERT((compressed_size > 0 && compressed_pages) || 298 (compressed_size == 0 && !compressed_pages)); 299 300 if (compressed_size && compressed_pages) 301 cur_size = compressed_size; 302 303 if (!extent_inserted) { 304 struct btrfs_key key; 305 size_t datasize; 306 307 key.objectid = btrfs_ino(inode); 308 key.offset = 0; 309 key.type = BTRFS_EXTENT_DATA_KEY; 310 311 datasize = btrfs_file_extent_calc_inline_size(cur_size); 312 ret = btrfs_insert_empty_item(trans, root, path, &key, 313 datasize); 314 if (ret) 315 goto fail; 316 } 317 leaf = path->nodes[0]; 318 ei = btrfs_item_ptr(leaf, path->slots[0], 319 struct btrfs_file_extent_item); 320 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 321 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 322 btrfs_set_file_extent_encryption(leaf, ei, 0); 323 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 324 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 325 ptr = btrfs_file_extent_inline_start(ei); 326 327 if (compress_type != BTRFS_COMPRESS_NONE) { 328 struct page *cpage; 329 int i = 0; 330 while (compressed_size > 0) { 331 cpage = compressed_pages[i]; 332 cur_size = min_t(unsigned long, compressed_size, 333 PAGE_SIZE); 334 335 kaddr = kmap_atomic(cpage); 336 write_extent_buffer(leaf, kaddr, ptr, cur_size); 337 kunmap_atomic(kaddr); 338 339 i++; 340 ptr += cur_size; 341 compressed_size -= cur_size; 342 } 343 btrfs_set_file_extent_compression(leaf, ei, 344 compress_type); 345 } else { 346 page = find_get_page(inode->vfs_inode.i_mapping, 0); 347 btrfs_set_file_extent_compression(leaf, ei, 0); 348 kaddr = kmap_atomic(page); 349 write_extent_buffer(leaf, kaddr, ptr, size); 350 kunmap_atomic(kaddr); 351 put_page(page); 352 } 353 btrfs_mark_buffer_dirty(leaf); 354 btrfs_release_path(path); 355 356 /* 357 * We align size to sectorsize for inline extents just for simplicity 358 * sake. 359 */ 360 ret = btrfs_inode_set_file_extent_range(inode, 0, 361 ALIGN(size, root->fs_info->sectorsize)); 362 if (ret) 363 goto fail; 364 365 /* 366 * We're an inline extent, so nobody can extend the file past i_size 367 * without locking a page we already have locked. 368 * 369 * We must do any i_size and inode updates before we unlock the pages. 370 * Otherwise we could end up racing with unlink. 371 */ 372 i_size = i_size_read(&inode->vfs_inode); 373 if (update_i_size && size > i_size) { 374 i_size_write(&inode->vfs_inode, size); 375 i_size = size; 376 } 377 inode->disk_i_size = i_size; 378 379 fail: 380 return ret; 381 } 382 383 384 /* 385 * conditionally insert an inline extent into the file. This 386 * does the checks required to make sure the data is small enough 387 * to fit as an inline extent. 388 */ 389 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size, 390 size_t compressed_size, 391 int compress_type, 392 struct page **compressed_pages, 393 bool update_i_size) 394 { 395 struct btrfs_drop_extents_args drop_args = { 0 }; 396 struct btrfs_root *root = inode->root; 397 struct btrfs_fs_info *fs_info = root->fs_info; 398 struct btrfs_trans_handle *trans; 399 u64 data_len = (compressed_size ?: size); 400 int ret; 401 struct btrfs_path *path; 402 403 /* 404 * We can create an inline extent if it ends at or beyond the current 405 * i_size, is no larger than a sector (decompressed), and the (possibly 406 * compressed) data fits in a leaf and the configured maximum inline 407 * size. 408 */ 409 if (size < i_size_read(&inode->vfs_inode) || 410 size > fs_info->sectorsize || 411 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || 412 data_len > fs_info->max_inline) 413 return 1; 414 415 path = btrfs_alloc_path(); 416 if (!path) 417 return -ENOMEM; 418 419 trans = btrfs_join_transaction(root); 420 if (IS_ERR(trans)) { 421 btrfs_free_path(path); 422 return PTR_ERR(trans); 423 } 424 trans->block_rsv = &inode->block_rsv; 425 426 drop_args.path = path; 427 drop_args.start = 0; 428 drop_args.end = fs_info->sectorsize; 429 drop_args.drop_cache = true; 430 drop_args.replace_extent = true; 431 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 432 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 433 if (ret) { 434 btrfs_abort_transaction(trans, ret); 435 goto out; 436 } 437 438 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 439 size, compressed_size, compress_type, 440 compressed_pages, update_i_size); 441 if (ret && ret != -ENOSPC) { 442 btrfs_abort_transaction(trans, ret); 443 goto out; 444 } else if (ret == -ENOSPC) { 445 ret = 1; 446 goto out; 447 } 448 449 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 450 ret = btrfs_update_inode(trans, root, inode); 451 if (ret && ret != -ENOSPC) { 452 btrfs_abort_transaction(trans, ret); 453 goto out; 454 } else if (ret == -ENOSPC) { 455 ret = 1; 456 goto out; 457 } 458 459 btrfs_set_inode_full_sync(inode); 460 out: 461 /* 462 * Don't forget to free the reserved space, as for inlined extent 463 * it won't count as data extent, free them directly here. 464 * And at reserve time, it's always aligned to page size, so 465 * just free one page here. 466 */ 467 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE); 468 btrfs_free_path(path); 469 btrfs_end_transaction(trans); 470 return ret; 471 } 472 473 struct async_extent { 474 u64 start; 475 u64 ram_size; 476 u64 compressed_size; 477 struct page **pages; 478 unsigned long nr_pages; 479 int compress_type; 480 struct list_head list; 481 }; 482 483 struct async_chunk { 484 struct inode *inode; 485 struct page *locked_page; 486 u64 start; 487 u64 end; 488 unsigned int write_flags; 489 struct list_head extents; 490 struct cgroup_subsys_state *blkcg_css; 491 struct btrfs_work work; 492 struct async_cow *async_cow; 493 }; 494 495 struct async_cow { 496 atomic_t num_chunks; 497 struct async_chunk chunks[]; 498 }; 499 500 static noinline int add_async_extent(struct async_chunk *cow, 501 u64 start, u64 ram_size, 502 u64 compressed_size, 503 struct page **pages, 504 unsigned long nr_pages, 505 int compress_type) 506 { 507 struct async_extent *async_extent; 508 509 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 510 BUG_ON(!async_extent); /* -ENOMEM */ 511 async_extent->start = start; 512 async_extent->ram_size = ram_size; 513 async_extent->compressed_size = compressed_size; 514 async_extent->pages = pages; 515 async_extent->nr_pages = nr_pages; 516 async_extent->compress_type = compress_type; 517 list_add_tail(&async_extent->list, &cow->extents); 518 return 0; 519 } 520 521 /* 522 * Check if the inode needs to be submitted to compression, based on mount 523 * options, defragmentation, properties or heuristics. 524 */ 525 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 526 u64 end) 527 { 528 struct btrfs_fs_info *fs_info = inode->root->fs_info; 529 530 if (!btrfs_inode_can_compress(inode)) { 531 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 532 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 533 btrfs_ino(inode)); 534 return 0; 535 } 536 /* 537 * Special check for subpage. 538 * 539 * We lock the full page then run each delalloc range in the page, thus 540 * for the following case, we will hit some subpage specific corner case: 541 * 542 * 0 32K 64K 543 * | |///////| |///////| 544 * \- A \- B 545 * 546 * In above case, both range A and range B will try to unlock the full 547 * page [0, 64K), causing the one finished later will have page 548 * unlocked already, triggering various page lock requirement BUG_ON()s. 549 * 550 * So here we add an artificial limit that subpage compression can only 551 * if the range is fully page aligned. 552 * 553 * In theory we only need to ensure the first page is fully covered, but 554 * the tailing partial page will be locked until the full compression 555 * finishes, delaying the write of other range. 556 * 557 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range 558 * first to prevent any submitted async extent to unlock the full page. 559 * By this, we can ensure for subpage case that only the last async_cow 560 * will unlock the full page. 561 */ 562 if (fs_info->sectorsize < PAGE_SIZE) { 563 if (!IS_ALIGNED(start, PAGE_SIZE) || 564 !IS_ALIGNED(end + 1, PAGE_SIZE)) 565 return 0; 566 } 567 568 /* force compress */ 569 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 570 return 1; 571 /* defrag ioctl */ 572 if (inode->defrag_compress) 573 return 1; 574 /* bad compression ratios */ 575 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 576 return 0; 577 if (btrfs_test_opt(fs_info, COMPRESS) || 578 inode->flags & BTRFS_INODE_COMPRESS || 579 inode->prop_compress) 580 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); 581 return 0; 582 } 583 584 static inline void inode_should_defrag(struct btrfs_inode *inode, 585 u64 start, u64 end, u64 num_bytes, u32 small_write) 586 { 587 /* If this is a small write inside eof, kick off a defrag */ 588 if (num_bytes < small_write && 589 (start > 0 || end + 1 < inode->disk_i_size)) 590 btrfs_add_inode_defrag(NULL, inode, small_write); 591 } 592 593 /* 594 * we create compressed extents in two phases. The first 595 * phase compresses a range of pages that have already been 596 * locked (both pages and state bits are locked). 597 * 598 * This is done inside an ordered work queue, and the compression 599 * is spread across many cpus. The actual IO submission is step 600 * two, and the ordered work queue takes care of making sure that 601 * happens in the same order things were put onto the queue by 602 * writepages and friends. 603 * 604 * If this code finds it can't get good compression, it puts an 605 * entry onto the work queue to write the uncompressed bytes. This 606 * makes sure that both compressed inodes and uncompressed inodes 607 * are written in the same order that the flusher thread sent them 608 * down. 609 */ 610 static noinline int compress_file_range(struct async_chunk *async_chunk) 611 { 612 struct inode *inode = async_chunk->inode; 613 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 614 u64 blocksize = fs_info->sectorsize; 615 u64 start = async_chunk->start; 616 u64 end = async_chunk->end; 617 u64 actual_end; 618 u64 i_size; 619 int ret = 0; 620 struct page **pages = NULL; 621 unsigned long nr_pages; 622 unsigned long total_compressed = 0; 623 unsigned long total_in = 0; 624 int i; 625 int will_compress; 626 int compress_type = fs_info->compress_type; 627 int compressed_extents = 0; 628 int redirty = 0; 629 630 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, 631 SZ_16K); 632 633 /* 634 * We need to save i_size before now because it could change in between 635 * us evaluating the size and assigning it. This is because we lock and 636 * unlock the page in truncate and fallocate, and then modify the i_size 637 * later on. 638 * 639 * The barriers are to emulate READ_ONCE, remove that once i_size_read 640 * does that for us. 641 */ 642 barrier(); 643 i_size = i_size_read(inode); 644 barrier(); 645 actual_end = min_t(u64, i_size, end + 1); 646 again: 647 will_compress = 0; 648 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 649 nr_pages = min_t(unsigned long, nr_pages, 650 BTRFS_MAX_COMPRESSED / PAGE_SIZE); 651 652 /* 653 * we don't want to send crud past the end of i_size through 654 * compression, that's just a waste of CPU time. So, if the 655 * end of the file is before the start of our current 656 * requested range of bytes, we bail out to the uncompressed 657 * cleanup code that can deal with all of this. 658 * 659 * It isn't really the fastest way to fix things, but this is a 660 * very uncommon corner. 661 */ 662 if (actual_end <= start) 663 goto cleanup_and_bail_uncompressed; 664 665 total_compressed = actual_end - start; 666 667 /* 668 * Skip compression for a small file range(<=blocksize) that 669 * isn't an inline extent, since it doesn't save disk space at all. 670 */ 671 if (total_compressed <= blocksize && 672 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 673 goto cleanup_and_bail_uncompressed; 674 675 /* 676 * For subpage case, we require full page alignment for the sector 677 * aligned range. 678 * Thus we must also check against @actual_end, not just @end. 679 */ 680 if (blocksize < PAGE_SIZE) { 681 if (!IS_ALIGNED(start, PAGE_SIZE) || 682 !IS_ALIGNED(round_up(actual_end, blocksize), PAGE_SIZE)) 683 goto cleanup_and_bail_uncompressed; 684 } 685 686 total_compressed = min_t(unsigned long, total_compressed, 687 BTRFS_MAX_UNCOMPRESSED); 688 total_in = 0; 689 ret = 0; 690 691 /* 692 * we do compression for mount -o compress and when the 693 * inode has not been flagged as nocompress. This flag can 694 * change at any time if we discover bad compression ratios. 695 */ 696 if (inode_need_compress(BTRFS_I(inode), start, end)) { 697 WARN_ON(pages); 698 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 699 if (!pages) { 700 /* just bail out to the uncompressed code */ 701 nr_pages = 0; 702 goto cont; 703 } 704 705 if (BTRFS_I(inode)->defrag_compress) 706 compress_type = BTRFS_I(inode)->defrag_compress; 707 else if (BTRFS_I(inode)->prop_compress) 708 compress_type = BTRFS_I(inode)->prop_compress; 709 710 /* 711 * we need to call clear_page_dirty_for_io on each 712 * page in the range. Otherwise applications with the file 713 * mmap'd can wander in and change the page contents while 714 * we are compressing them. 715 * 716 * If the compression fails for any reason, we set the pages 717 * dirty again later on. 718 * 719 * Note that the remaining part is redirtied, the start pointer 720 * has moved, the end is the original one. 721 */ 722 if (!redirty) { 723 extent_range_clear_dirty_for_io(inode, start, end); 724 redirty = 1; 725 } 726 727 /* Compression level is applied here and only here */ 728 ret = btrfs_compress_pages( 729 compress_type | (fs_info->compress_level << 4), 730 inode->i_mapping, start, 731 pages, 732 &nr_pages, 733 &total_in, 734 &total_compressed); 735 736 if (!ret) { 737 unsigned long offset = offset_in_page(total_compressed); 738 struct page *page = pages[nr_pages - 1]; 739 740 /* zero the tail end of the last page, we might be 741 * sending it down to disk 742 */ 743 if (offset) 744 memzero_page(page, offset, PAGE_SIZE - offset); 745 will_compress = 1; 746 } 747 } 748 cont: 749 /* 750 * Check cow_file_range() for why we don't even try to create inline 751 * extent for subpage case. 752 */ 753 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 754 /* lets try to make an inline extent */ 755 if (ret || total_in < actual_end) { 756 /* we didn't compress the entire range, try 757 * to make an uncompressed inline extent. 758 */ 759 ret = cow_file_range_inline(BTRFS_I(inode), actual_end, 760 0, BTRFS_COMPRESS_NONE, 761 NULL, false); 762 } else { 763 /* try making a compressed inline extent */ 764 ret = cow_file_range_inline(BTRFS_I(inode), actual_end, 765 total_compressed, 766 compress_type, pages, 767 false); 768 } 769 if (ret <= 0) { 770 unsigned long clear_flags = EXTENT_DELALLOC | 771 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 772 EXTENT_DO_ACCOUNTING; 773 unsigned long page_error_op; 774 775 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; 776 777 /* 778 * inline extent creation worked or returned error, 779 * we don't need to create any more async work items. 780 * Unlock and free up our temp pages. 781 * 782 * We use DO_ACCOUNTING here because we need the 783 * delalloc_release_metadata to be done _after_ we drop 784 * our outstanding extent for clearing delalloc for this 785 * range. 786 */ 787 extent_clear_unlock_delalloc(BTRFS_I(inode), start, end, 788 NULL, 789 clear_flags, 790 PAGE_UNLOCK | 791 PAGE_START_WRITEBACK | 792 page_error_op | 793 PAGE_END_WRITEBACK); 794 795 /* 796 * Ensure we only free the compressed pages if we have 797 * them allocated, as we can still reach here with 798 * inode_need_compress() == false. 799 */ 800 if (pages) { 801 for (i = 0; i < nr_pages; i++) { 802 WARN_ON(pages[i]->mapping); 803 put_page(pages[i]); 804 } 805 kfree(pages); 806 } 807 return 0; 808 } 809 } 810 811 if (will_compress) { 812 /* 813 * we aren't doing an inline extent round the compressed size 814 * up to a block size boundary so the allocator does sane 815 * things 816 */ 817 total_compressed = ALIGN(total_compressed, blocksize); 818 819 /* 820 * one last check to make sure the compression is really a 821 * win, compare the page count read with the blocks on disk, 822 * compression must free at least one sector size 823 */ 824 total_in = round_up(total_in, fs_info->sectorsize); 825 if (total_compressed + blocksize <= total_in) { 826 compressed_extents++; 827 828 /* 829 * The async work queues will take care of doing actual 830 * allocation on disk for these compressed pages, and 831 * will submit them to the elevator. 832 */ 833 add_async_extent(async_chunk, start, total_in, 834 total_compressed, pages, nr_pages, 835 compress_type); 836 837 if (start + total_in < end) { 838 start += total_in; 839 pages = NULL; 840 cond_resched(); 841 goto again; 842 } 843 return compressed_extents; 844 } 845 } 846 if (pages) { 847 /* 848 * the compression code ran but failed to make things smaller, 849 * free any pages it allocated and our page pointer array 850 */ 851 for (i = 0; i < nr_pages; i++) { 852 WARN_ON(pages[i]->mapping); 853 put_page(pages[i]); 854 } 855 kfree(pages); 856 pages = NULL; 857 total_compressed = 0; 858 nr_pages = 0; 859 860 /* flag the file so we don't compress in the future */ 861 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && 862 !(BTRFS_I(inode)->prop_compress)) { 863 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 864 } 865 } 866 cleanup_and_bail_uncompressed: 867 /* 868 * No compression, but we still need to write the pages in the file 869 * we've been given so far. redirty the locked page if it corresponds 870 * to our extent and set things up for the async work queue to run 871 * cow_file_range to do the normal delalloc dance. 872 */ 873 if (async_chunk->locked_page && 874 (page_offset(async_chunk->locked_page) >= start && 875 page_offset(async_chunk->locked_page)) <= end) { 876 __set_page_dirty_nobuffers(async_chunk->locked_page); 877 /* unlocked later on in the async handlers */ 878 } 879 880 if (redirty) 881 extent_range_redirty_for_io(inode, start, end); 882 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 883 BTRFS_COMPRESS_NONE); 884 compressed_extents++; 885 886 return compressed_extents; 887 } 888 889 static void free_async_extent_pages(struct async_extent *async_extent) 890 { 891 int i; 892 893 if (!async_extent->pages) 894 return; 895 896 for (i = 0; i < async_extent->nr_pages; i++) { 897 WARN_ON(async_extent->pages[i]->mapping); 898 put_page(async_extent->pages[i]); 899 } 900 kfree(async_extent->pages); 901 async_extent->nr_pages = 0; 902 async_extent->pages = NULL; 903 } 904 905 static int submit_uncompressed_range(struct btrfs_inode *inode, 906 struct async_extent *async_extent, 907 struct page *locked_page) 908 { 909 u64 start = async_extent->start; 910 u64 end = async_extent->start + async_extent->ram_size - 1; 911 unsigned long nr_written = 0; 912 int page_started = 0; 913 int ret; 914 915 /* 916 * Call cow_file_range() to run the delalloc range directly, since we 917 * won't go to NOCOW or async path again. 918 * 919 * Also we call cow_file_range() with @unlock_page == 0, so that we 920 * can directly submit them without interruption. 921 */ 922 ret = cow_file_range(inode, locked_page, start, end, &page_started, 923 &nr_written, 0); 924 /* Inline extent inserted, page gets unlocked and everything is done */ 925 if (page_started) { 926 ret = 0; 927 goto out; 928 } 929 if (ret < 0) { 930 if (locked_page) 931 unlock_page(locked_page); 932 goto out; 933 } 934 935 ret = extent_write_locked_range(&inode->vfs_inode, start, end); 936 /* All pages will be unlocked, including @locked_page */ 937 out: 938 kfree(async_extent); 939 return ret; 940 } 941 942 static int submit_one_async_extent(struct btrfs_inode *inode, 943 struct async_chunk *async_chunk, 944 struct async_extent *async_extent, 945 u64 *alloc_hint) 946 { 947 struct extent_io_tree *io_tree = &inode->io_tree; 948 struct btrfs_root *root = inode->root; 949 struct btrfs_fs_info *fs_info = root->fs_info; 950 struct btrfs_key ins; 951 struct page *locked_page = NULL; 952 struct extent_map *em; 953 int ret = 0; 954 u64 start = async_extent->start; 955 u64 end = async_extent->start + async_extent->ram_size - 1; 956 957 /* 958 * If async_chunk->locked_page is in the async_extent range, we need to 959 * handle it. 960 */ 961 if (async_chunk->locked_page) { 962 u64 locked_page_start = page_offset(async_chunk->locked_page); 963 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1; 964 965 if (!(start >= locked_page_end || end <= locked_page_start)) 966 locked_page = async_chunk->locked_page; 967 } 968 lock_extent(io_tree, start, end); 969 970 /* We have fall back to uncompressed write */ 971 if (!async_extent->pages) 972 return submit_uncompressed_range(inode, async_extent, locked_page); 973 974 ret = btrfs_reserve_extent(root, async_extent->ram_size, 975 async_extent->compressed_size, 976 async_extent->compressed_size, 977 0, *alloc_hint, &ins, 1, 1); 978 if (ret) { 979 free_async_extent_pages(async_extent); 980 /* 981 * Here we used to try again by going back to non-compressed 982 * path for ENOSPC. But we can't reserve space even for 983 * compressed size, how could it work for uncompressed size 984 * which requires larger size? So here we directly go error 985 * path. 986 */ 987 goto out_free; 988 } 989 990 /* Here we're doing allocation and writeback of the compressed pages */ 991 em = create_io_em(inode, start, 992 async_extent->ram_size, /* len */ 993 start, /* orig_start */ 994 ins.objectid, /* block_start */ 995 ins.offset, /* block_len */ 996 ins.offset, /* orig_block_len */ 997 async_extent->ram_size, /* ram_bytes */ 998 async_extent->compress_type, 999 BTRFS_ORDERED_COMPRESSED); 1000 if (IS_ERR(em)) { 1001 ret = PTR_ERR(em); 1002 goto out_free_reserve; 1003 } 1004 free_extent_map(em); 1005 1006 ret = btrfs_add_ordered_extent(inode, start, /* file_offset */ 1007 async_extent->ram_size, /* num_bytes */ 1008 async_extent->ram_size, /* ram_bytes */ 1009 ins.objectid, /* disk_bytenr */ 1010 ins.offset, /* disk_num_bytes */ 1011 0, /* offset */ 1012 1 << BTRFS_ORDERED_COMPRESSED, 1013 async_extent->compress_type); 1014 if (ret) { 1015 btrfs_drop_extent_cache(inode, start, end, 0); 1016 goto out_free_reserve; 1017 } 1018 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1019 1020 /* Clear dirty, set writeback and unlock the pages. */ 1021 extent_clear_unlock_delalloc(inode, start, end, 1022 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 1023 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1024 if (btrfs_submit_compressed_write(inode, start, /* file_offset */ 1025 async_extent->ram_size, /* num_bytes */ 1026 ins.objectid, /* disk_bytenr */ 1027 ins.offset, /* compressed_len */ 1028 async_extent->pages, /* compressed_pages */ 1029 async_extent->nr_pages, 1030 async_chunk->write_flags, 1031 async_chunk->blkcg_css, true)) { 1032 const u64 start = async_extent->start; 1033 const u64 end = start + async_extent->ram_size - 1; 1034 1035 btrfs_writepage_endio_finish_ordered(inode, NULL, start, end, 0); 1036 1037 extent_clear_unlock_delalloc(inode, start, end, NULL, 0, 1038 PAGE_END_WRITEBACK | PAGE_SET_ERROR); 1039 free_async_extent_pages(async_extent); 1040 } 1041 *alloc_hint = ins.objectid + ins.offset; 1042 kfree(async_extent); 1043 return ret; 1044 1045 out_free_reserve: 1046 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1047 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1048 out_free: 1049 extent_clear_unlock_delalloc(inode, start, end, 1050 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 1051 EXTENT_DELALLOC_NEW | 1052 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1053 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1054 PAGE_END_WRITEBACK | PAGE_SET_ERROR); 1055 free_async_extent_pages(async_extent); 1056 kfree(async_extent); 1057 return ret; 1058 } 1059 1060 /* 1061 * Phase two of compressed writeback. This is the ordered portion of the code, 1062 * which only gets called in the order the work was queued. We walk all the 1063 * async extents created by compress_file_range and send them down to the disk. 1064 */ 1065 static noinline void submit_compressed_extents(struct async_chunk *async_chunk) 1066 { 1067 struct btrfs_inode *inode = BTRFS_I(async_chunk->inode); 1068 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1069 struct async_extent *async_extent; 1070 u64 alloc_hint = 0; 1071 int ret = 0; 1072 1073 while (!list_empty(&async_chunk->extents)) { 1074 u64 extent_start; 1075 u64 ram_size; 1076 1077 async_extent = list_entry(async_chunk->extents.next, 1078 struct async_extent, list); 1079 list_del(&async_extent->list); 1080 extent_start = async_extent->start; 1081 ram_size = async_extent->ram_size; 1082 1083 ret = submit_one_async_extent(inode, async_chunk, async_extent, 1084 &alloc_hint); 1085 btrfs_debug(fs_info, 1086 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1087 inode->root->root_key.objectid, 1088 btrfs_ino(inode), extent_start, ram_size, ret); 1089 } 1090 } 1091 1092 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1093 u64 num_bytes) 1094 { 1095 struct extent_map_tree *em_tree = &inode->extent_tree; 1096 struct extent_map *em; 1097 u64 alloc_hint = 0; 1098 1099 read_lock(&em_tree->lock); 1100 em = search_extent_mapping(em_tree, start, num_bytes); 1101 if (em) { 1102 /* 1103 * if block start isn't an actual block number then find the 1104 * first block in this inode and use that as a hint. If that 1105 * block is also bogus then just don't worry about it. 1106 */ 1107 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 1108 free_extent_map(em); 1109 em = search_extent_mapping(em_tree, 0, 0); 1110 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 1111 alloc_hint = em->block_start; 1112 if (em) 1113 free_extent_map(em); 1114 } else { 1115 alloc_hint = em->block_start; 1116 free_extent_map(em); 1117 } 1118 } 1119 read_unlock(&em_tree->lock); 1120 1121 return alloc_hint; 1122 } 1123 1124 /* 1125 * when extent_io.c finds a delayed allocation range in the file, 1126 * the call backs end up in this code. The basic idea is to 1127 * allocate extents on disk for the range, and create ordered data structs 1128 * in ram to track those extents. 1129 * 1130 * locked_page is the page that writepage had locked already. We use 1131 * it to make sure we don't do extra locks or unlocks. 1132 * 1133 * *page_started is set to one if we unlock locked_page and do everything 1134 * required to start IO on it. It may be clean and already done with 1135 * IO when we return. 1136 */ 1137 static noinline int cow_file_range(struct btrfs_inode *inode, 1138 struct page *locked_page, 1139 u64 start, u64 end, int *page_started, 1140 unsigned long *nr_written, int unlock) 1141 { 1142 struct btrfs_root *root = inode->root; 1143 struct btrfs_fs_info *fs_info = root->fs_info; 1144 u64 alloc_hint = 0; 1145 u64 num_bytes; 1146 unsigned long ram_size; 1147 u64 cur_alloc_size = 0; 1148 u64 min_alloc_size; 1149 u64 blocksize = fs_info->sectorsize; 1150 struct btrfs_key ins; 1151 struct extent_map *em; 1152 unsigned clear_bits; 1153 unsigned long page_ops; 1154 bool extent_reserved = false; 1155 int ret = 0; 1156 1157 if (btrfs_is_free_space_inode(inode)) { 1158 ret = -EINVAL; 1159 goto out_unlock; 1160 } 1161 1162 num_bytes = ALIGN(end - start + 1, blocksize); 1163 num_bytes = max(blocksize, num_bytes); 1164 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1165 1166 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1167 1168 /* 1169 * Due to the page size limit, for subpage we can only trigger the 1170 * writeback for the dirty sectors of page, that means data writeback 1171 * is doing more writeback than what we want. 1172 * 1173 * This is especially unexpected for some call sites like fallocate, 1174 * where we only increase i_size after everything is done. 1175 * This means we can trigger inline extent even if we didn't want to. 1176 * So here we skip inline extent creation completely. 1177 */ 1178 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 1179 u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode), 1180 end + 1); 1181 1182 /* lets try to make an inline extent */ 1183 ret = cow_file_range_inline(inode, actual_end, 0, 1184 BTRFS_COMPRESS_NONE, NULL, false); 1185 if (ret == 0) { 1186 /* 1187 * We use DO_ACCOUNTING here because we need the 1188 * delalloc_release_metadata to be run _after_ we drop 1189 * our outstanding extent for clearing delalloc for this 1190 * range. 1191 */ 1192 extent_clear_unlock_delalloc(inode, start, end, 1193 locked_page, 1194 EXTENT_LOCKED | EXTENT_DELALLOC | 1195 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1196 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1197 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); 1198 *nr_written = *nr_written + 1199 (end - start + PAGE_SIZE) / PAGE_SIZE; 1200 *page_started = 1; 1201 /* 1202 * locked_page is locked by the caller of 1203 * writepage_delalloc(), not locked by 1204 * __process_pages_contig(). 1205 * 1206 * We can't let __process_pages_contig() to unlock it, 1207 * as it doesn't have any subpage::writers recorded. 1208 * 1209 * Here we manually unlock the page, since the caller 1210 * can't use page_started to determine if it's an 1211 * inline extent or a compressed extent. 1212 */ 1213 unlock_page(locked_page); 1214 goto out; 1215 } else if (ret < 0) { 1216 goto out_unlock; 1217 } 1218 } 1219 1220 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 1221 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 1222 1223 /* 1224 * Relocation relies on the relocated extents to have exactly the same 1225 * size as the original extents. Normally writeback for relocation data 1226 * extents follows a NOCOW path because relocation preallocates the 1227 * extents. However, due to an operation such as scrub turning a block 1228 * group to RO mode, it may fallback to COW mode, so we must make sure 1229 * an extent allocated during COW has exactly the requested size and can 1230 * not be split into smaller extents, otherwise relocation breaks and 1231 * fails during the stage where it updates the bytenr of file extent 1232 * items. 1233 */ 1234 if (btrfs_is_data_reloc_root(root)) 1235 min_alloc_size = num_bytes; 1236 else 1237 min_alloc_size = fs_info->sectorsize; 1238 1239 while (num_bytes > 0) { 1240 cur_alloc_size = num_bytes; 1241 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, 1242 min_alloc_size, 0, alloc_hint, 1243 &ins, 1, 1); 1244 if (ret < 0) 1245 goto out_unlock; 1246 cur_alloc_size = ins.offset; 1247 extent_reserved = true; 1248 1249 ram_size = ins.offset; 1250 em = create_io_em(inode, start, ins.offset, /* len */ 1251 start, /* orig_start */ 1252 ins.objectid, /* block_start */ 1253 ins.offset, /* block_len */ 1254 ins.offset, /* orig_block_len */ 1255 ram_size, /* ram_bytes */ 1256 BTRFS_COMPRESS_NONE, /* compress_type */ 1257 BTRFS_ORDERED_REGULAR /* type */); 1258 if (IS_ERR(em)) { 1259 ret = PTR_ERR(em); 1260 goto out_reserve; 1261 } 1262 free_extent_map(em); 1263 1264 ret = btrfs_add_ordered_extent(inode, start, ram_size, ram_size, 1265 ins.objectid, cur_alloc_size, 0, 1266 1 << BTRFS_ORDERED_REGULAR, 1267 BTRFS_COMPRESS_NONE); 1268 if (ret) 1269 goto out_drop_extent_cache; 1270 1271 if (btrfs_is_data_reloc_root(root)) { 1272 ret = btrfs_reloc_clone_csums(inode, start, 1273 cur_alloc_size); 1274 /* 1275 * Only drop cache here, and process as normal. 1276 * 1277 * We must not allow extent_clear_unlock_delalloc() 1278 * at out_unlock label to free meta of this ordered 1279 * extent, as its meta should be freed by 1280 * btrfs_finish_ordered_io(). 1281 * 1282 * So we must continue until @start is increased to 1283 * skip current ordered extent. 1284 */ 1285 if (ret) 1286 btrfs_drop_extent_cache(inode, start, 1287 start + ram_size - 1, 0); 1288 } 1289 1290 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1291 1292 /* 1293 * We're not doing compressed IO, don't unlock the first page 1294 * (which the caller expects to stay locked), don't clear any 1295 * dirty bits and don't set any writeback bits 1296 * 1297 * Do set the Ordered (Private2) bit so we know this page was 1298 * properly setup for writepage. 1299 */ 1300 page_ops = unlock ? PAGE_UNLOCK : 0; 1301 page_ops |= PAGE_SET_ORDERED; 1302 1303 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, 1304 locked_page, 1305 EXTENT_LOCKED | EXTENT_DELALLOC, 1306 page_ops); 1307 if (num_bytes < cur_alloc_size) 1308 num_bytes = 0; 1309 else 1310 num_bytes -= cur_alloc_size; 1311 alloc_hint = ins.objectid + ins.offset; 1312 start += cur_alloc_size; 1313 extent_reserved = false; 1314 1315 /* 1316 * btrfs_reloc_clone_csums() error, since start is increased 1317 * extent_clear_unlock_delalloc() at out_unlock label won't 1318 * free metadata of current ordered extent, we're OK to exit. 1319 */ 1320 if (ret) 1321 goto out_unlock; 1322 } 1323 out: 1324 return ret; 1325 1326 out_drop_extent_cache: 1327 btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); 1328 out_reserve: 1329 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1330 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1331 out_unlock: 1332 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1333 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1334 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1335 /* 1336 * If we reserved an extent for our delalloc range (or a subrange) and 1337 * failed to create the respective ordered extent, then it means that 1338 * when we reserved the extent we decremented the extent's size from 1339 * the data space_info's bytes_may_use counter and incremented the 1340 * space_info's bytes_reserved counter by the same amount. We must make 1341 * sure extent_clear_unlock_delalloc() does not try to decrement again 1342 * the data space_info's bytes_may_use counter, therefore we do not pass 1343 * it the flag EXTENT_CLEAR_DATA_RESV. 1344 */ 1345 if (extent_reserved) { 1346 extent_clear_unlock_delalloc(inode, start, 1347 start + cur_alloc_size - 1, 1348 locked_page, 1349 clear_bits, 1350 page_ops); 1351 start += cur_alloc_size; 1352 if (start >= end) 1353 goto out; 1354 } 1355 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1356 clear_bits | EXTENT_CLEAR_DATA_RESV, 1357 page_ops); 1358 goto out; 1359 } 1360 1361 /* 1362 * work queue call back to started compression on a file and pages 1363 */ 1364 static noinline void async_cow_start(struct btrfs_work *work) 1365 { 1366 struct async_chunk *async_chunk; 1367 int compressed_extents; 1368 1369 async_chunk = container_of(work, struct async_chunk, work); 1370 1371 compressed_extents = compress_file_range(async_chunk); 1372 if (compressed_extents == 0) { 1373 btrfs_add_delayed_iput(async_chunk->inode); 1374 async_chunk->inode = NULL; 1375 } 1376 } 1377 1378 /* 1379 * work queue call back to submit previously compressed pages 1380 */ 1381 static noinline void async_cow_submit(struct btrfs_work *work) 1382 { 1383 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1384 work); 1385 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1386 unsigned long nr_pages; 1387 1388 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1389 PAGE_SHIFT; 1390 1391 /* 1392 * ->inode could be NULL if async_chunk_start has failed to compress, 1393 * in which case we don't have anything to submit, yet we need to 1394 * always adjust ->async_delalloc_pages as its paired with the init 1395 * happening in cow_file_range_async 1396 */ 1397 if (async_chunk->inode) 1398 submit_compressed_extents(async_chunk); 1399 1400 /* atomic_sub_return implies a barrier */ 1401 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1402 5 * SZ_1M) 1403 cond_wake_up_nomb(&fs_info->async_submit_wait); 1404 } 1405 1406 static noinline void async_cow_free(struct btrfs_work *work) 1407 { 1408 struct async_chunk *async_chunk; 1409 struct async_cow *async_cow; 1410 1411 async_chunk = container_of(work, struct async_chunk, work); 1412 if (async_chunk->inode) 1413 btrfs_add_delayed_iput(async_chunk->inode); 1414 if (async_chunk->blkcg_css) 1415 css_put(async_chunk->blkcg_css); 1416 1417 async_cow = async_chunk->async_cow; 1418 if (atomic_dec_and_test(&async_cow->num_chunks)) 1419 kvfree(async_cow); 1420 } 1421 1422 static int cow_file_range_async(struct btrfs_inode *inode, 1423 struct writeback_control *wbc, 1424 struct page *locked_page, 1425 u64 start, u64 end, int *page_started, 1426 unsigned long *nr_written) 1427 { 1428 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1429 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1430 struct async_cow *ctx; 1431 struct async_chunk *async_chunk; 1432 unsigned long nr_pages; 1433 u64 cur_end; 1434 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1435 int i; 1436 bool should_compress; 1437 unsigned nofs_flag; 1438 const unsigned int write_flags = wbc_to_write_flags(wbc); 1439 1440 unlock_extent(&inode->io_tree, start, end); 1441 1442 if (inode->flags & BTRFS_INODE_NOCOMPRESS && 1443 !btrfs_test_opt(fs_info, FORCE_COMPRESS)) { 1444 num_chunks = 1; 1445 should_compress = false; 1446 } else { 1447 should_compress = true; 1448 } 1449 1450 nofs_flag = memalloc_nofs_save(); 1451 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1452 memalloc_nofs_restore(nofs_flag); 1453 1454 if (!ctx) { 1455 unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | 1456 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1457 EXTENT_DO_ACCOUNTING; 1458 unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | 1459 PAGE_END_WRITEBACK | PAGE_SET_ERROR; 1460 1461 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1462 clear_bits, page_ops); 1463 return -ENOMEM; 1464 } 1465 1466 async_chunk = ctx->chunks; 1467 atomic_set(&ctx->num_chunks, num_chunks); 1468 1469 for (i = 0; i < num_chunks; i++) { 1470 if (should_compress) 1471 cur_end = min(end, start + SZ_512K - 1); 1472 else 1473 cur_end = end; 1474 1475 /* 1476 * igrab is called higher up in the call chain, take only the 1477 * lightweight reference for the callback lifetime 1478 */ 1479 ihold(&inode->vfs_inode); 1480 async_chunk[i].async_cow = ctx; 1481 async_chunk[i].inode = &inode->vfs_inode; 1482 async_chunk[i].start = start; 1483 async_chunk[i].end = cur_end; 1484 async_chunk[i].write_flags = write_flags; 1485 INIT_LIST_HEAD(&async_chunk[i].extents); 1486 1487 /* 1488 * The locked_page comes all the way from writepage and its 1489 * the original page we were actually given. As we spread 1490 * this large delalloc region across multiple async_chunk 1491 * structs, only the first struct needs a pointer to locked_page 1492 * 1493 * This way we don't need racey decisions about who is supposed 1494 * to unlock it. 1495 */ 1496 if (locked_page) { 1497 /* 1498 * Depending on the compressibility, the pages might or 1499 * might not go through async. We want all of them to 1500 * be accounted against wbc once. Let's do it here 1501 * before the paths diverge. wbc accounting is used 1502 * only for foreign writeback detection and doesn't 1503 * need full accuracy. Just account the whole thing 1504 * against the first page. 1505 */ 1506 wbc_account_cgroup_owner(wbc, locked_page, 1507 cur_end - start); 1508 async_chunk[i].locked_page = locked_page; 1509 locked_page = NULL; 1510 } else { 1511 async_chunk[i].locked_page = NULL; 1512 } 1513 1514 if (blkcg_css != blkcg_root_css) { 1515 css_get(blkcg_css); 1516 async_chunk[i].blkcg_css = blkcg_css; 1517 } else { 1518 async_chunk[i].blkcg_css = NULL; 1519 } 1520 1521 btrfs_init_work(&async_chunk[i].work, async_cow_start, 1522 async_cow_submit, async_cow_free); 1523 1524 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1525 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1526 1527 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1528 1529 *nr_written += nr_pages; 1530 start = cur_end + 1; 1531 } 1532 *page_started = 1; 1533 return 0; 1534 } 1535 1536 static noinline int run_delalloc_zoned(struct btrfs_inode *inode, 1537 struct page *locked_page, u64 start, 1538 u64 end, int *page_started, 1539 unsigned long *nr_written) 1540 { 1541 int ret; 1542 1543 ret = cow_file_range(inode, locked_page, start, end, page_started, 1544 nr_written, 0); 1545 if (ret) 1546 return ret; 1547 1548 if (*page_started) 1549 return 0; 1550 1551 __set_page_dirty_nobuffers(locked_page); 1552 account_page_redirty(locked_page); 1553 extent_write_locked_range(&inode->vfs_inode, start, end); 1554 *page_started = 1; 1555 1556 return 0; 1557 } 1558 1559 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, 1560 u64 bytenr, u64 num_bytes) 1561 { 1562 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr); 1563 struct btrfs_ordered_sum *sums; 1564 int ret; 1565 LIST_HEAD(list); 1566 1567 ret = btrfs_lookup_csums_range(csum_root, bytenr, 1568 bytenr + num_bytes - 1, &list, 0); 1569 if (ret == 0 && list_empty(&list)) 1570 return 0; 1571 1572 while (!list_empty(&list)) { 1573 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1574 list_del(&sums->list); 1575 kfree(sums); 1576 } 1577 if (ret < 0) 1578 return ret; 1579 return 1; 1580 } 1581 1582 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, 1583 const u64 start, const u64 end, 1584 int *page_started, unsigned long *nr_written) 1585 { 1586 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1587 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1588 const u64 range_bytes = end + 1 - start; 1589 struct extent_io_tree *io_tree = &inode->io_tree; 1590 u64 range_start = start; 1591 u64 count; 1592 1593 /* 1594 * If EXTENT_NORESERVE is set it means that when the buffered write was 1595 * made we had not enough available data space and therefore we did not 1596 * reserve data space for it, since we though we could do NOCOW for the 1597 * respective file range (either there is prealloc extent or the inode 1598 * has the NOCOW bit set). 1599 * 1600 * However when we need to fallback to COW mode (because for example the 1601 * block group for the corresponding extent was turned to RO mode by a 1602 * scrub or relocation) we need to do the following: 1603 * 1604 * 1) We increment the bytes_may_use counter of the data space info. 1605 * If COW succeeds, it allocates a new data extent and after doing 1606 * that it decrements the space info's bytes_may_use counter and 1607 * increments its bytes_reserved counter by the same amount (we do 1608 * this at btrfs_add_reserved_bytes()). So we need to increment the 1609 * bytes_may_use counter to compensate (when space is reserved at 1610 * buffered write time, the bytes_may_use counter is incremented); 1611 * 1612 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1613 * that if the COW path fails for any reason, it decrements (through 1614 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1615 * data space info, which we incremented in the step above. 1616 * 1617 * If we need to fallback to cow and the inode corresponds to a free 1618 * space cache inode or an inode of the data relocation tree, we must 1619 * also increment bytes_may_use of the data space_info for the same 1620 * reason. Space caches and relocated data extents always get a prealloc 1621 * extent for them, however scrub or balance may have set the block 1622 * group that contains that extent to RO mode and therefore force COW 1623 * when starting writeback. 1624 */ 1625 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1626 EXTENT_NORESERVE, 0); 1627 if (count > 0 || is_space_ino || is_reloc_ino) { 1628 u64 bytes = count; 1629 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1630 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1631 1632 if (is_space_ino || is_reloc_ino) 1633 bytes = range_bytes; 1634 1635 spin_lock(&sinfo->lock); 1636 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1637 spin_unlock(&sinfo->lock); 1638 1639 if (count > 0) 1640 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1641 0, 0, NULL); 1642 } 1643 1644 return cow_file_range(inode, locked_page, start, end, page_started, 1645 nr_written, 1); 1646 } 1647 1648 struct can_nocow_file_extent_args { 1649 /* Input fields. */ 1650 1651 /* Start file offset of the range we want to NOCOW. */ 1652 u64 start; 1653 /* End file offset (inclusive) of the range we want to NOCOW. */ 1654 u64 end; 1655 bool writeback_path; 1656 bool strict; 1657 /* 1658 * Free the path passed to can_nocow_file_extent() once it's not needed 1659 * anymore. 1660 */ 1661 bool free_path; 1662 1663 /* Output fields. Only set when can_nocow_file_extent() returns 1. */ 1664 1665 u64 disk_bytenr; 1666 u64 disk_num_bytes; 1667 u64 extent_offset; 1668 /* Number of bytes that can be written to in NOCOW mode. */ 1669 u64 num_bytes; 1670 }; 1671 1672 /* 1673 * Check if we can NOCOW the file extent that the path points to. 1674 * This function may return with the path released, so the caller should check 1675 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1676 * 1677 * Returns: < 0 on error 1678 * 0 if we can not NOCOW 1679 * 1 if we can NOCOW 1680 */ 1681 static int can_nocow_file_extent(struct btrfs_path *path, 1682 struct btrfs_key *key, 1683 struct btrfs_inode *inode, 1684 struct can_nocow_file_extent_args *args) 1685 { 1686 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 1687 struct extent_buffer *leaf = path->nodes[0]; 1688 struct btrfs_root *root = inode->root; 1689 struct btrfs_file_extent_item *fi; 1690 u64 extent_end; 1691 u8 extent_type; 1692 int can_nocow = 0; 1693 int ret = 0; 1694 1695 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 1696 extent_type = btrfs_file_extent_type(leaf, fi); 1697 1698 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1699 goto out; 1700 1701 /* Can't access these fields unless we know it's not an inline extent. */ 1702 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1703 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1704 args->extent_offset = btrfs_file_extent_offset(leaf, fi); 1705 1706 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1707 extent_type == BTRFS_FILE_EXTENT_REG) 1708 goto out; 1709 1710 /* 1711 * If the extent was created before the generation where the last snapshot 1712 * for its subvolume was created, then this implies the extent is shared, 1713 * hence we must COW. 1714 */ 1715 if (!args->strict && 1716 btrfs_file_extent_generation(leaf, fi) <= 1717 btrfs_root_last_snapshot(&root->root_item)) 1718 goto out; 1719 1720 /* An explicit hole, must COW. */ 1721 if (args->disk_bytenr == 0) 1722 goto out; 1723 1724 /* Compressed/encrypted/encoded extents must be COWed. */ 1725 if (btrfs_file_extent_compression(leaf, fi) || 1726 btrfs_file_extent_encryption(leaf, fi) || 1727 btrfs_file_extent_other_encoding(leaf, fi)) 1728 goto out; 1729 1730 extent_end = btrfs_file_extent_end(path); 1731 1732 /* 1733 * The following checks can be expensive, as they need to take other 1734 * locks and do btree or rbtree searches, so release the path to avoid 1735 * blocking other tasks for too long. 1736 */ 1737 btrfs_release_path(path); 1738 1739 ret = btrfs_cross_ref_exist(root, btrfs_ino(inode), 1740 key->offset - args->extent_offset, 1741 args->disk_bytenr, false, path); 1742 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1743 if (ret != 0) 1744 goto out; 1745 1746 if (args->free_path) { 1747 /* 1748 * We don't need the path anymore, plus through the 1749 * csum_exist_in_range() call below we will end up allocating 1750 * another path. So free the path to avoid unnecessary extra 1751 * memory usage. 1752 */ 1753 btrfs_free_path(path); 1754 path = NULL; 1755 } 1756 1757 /* If there are pending snapshots for this root, we must COW. */ 1758 if (args->writeback_path && !is_freespace_inode && 1759 atomic_read(&root->snapshot_force_cow)) 1760 goto out; 1761 1762 args->disk_bytenr += args->extent_offset; 1763 args->disk_bytenr += args->start - key->offset; 1764 args->num_bytes = min(args->end + 1, extent_end) - args->start; 1765 1766 /* 1767 * Force COW if csums exist in the range. This ensures that csums for a 1768 * given extent are either valid or do not exist. 1769 */ 1770 ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes); 1771 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1772 if (ret != 0) 1773 goto out; 1774 1775 can_nocow = 1; 1776 out: 1777 if (args->free_path && path) 1778 btrfs_free_path(path); 1779 1780 return ret < 0 ? ret : can_nocow; 1781 } 1782 1783 /* 1784 * when nowcow writeback call back. This checks for snapshots or COW copies 1785 * of the extents that exist in the file, and COWs the file as required. 1786 * 1787 * If no cow copies or snapshots exist, we write directly to the existing 1788 * blocks on disk 1789 */ 1790 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 1791 struct page *locked_page, 1792 const u64 start, const u64 end, 1793 int *page_started, 1794 unsigned long *nr_written) 1795 { 1796 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1797 struct btrfs_root *root = inode->root; 1798 struct btrfs_path *path; 1799 u64 cow_start = (u64)-1; 1800 u64 cur_offset = start; 1801 int ret; 1802 bool check_prev = true; 1803 u64 ino = btrfs_ino(inode); 1804 struct btrfs_block_group *bg; 1805 bool nocow = false; 1806 struct can_nocow_file_extent_args nocow_args = { 0 }; 1807 1808 path = btrfs_alloc_path(); 1809 if (!path) { 1810 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1811 EXTENT_LOCKED | EXTENT_DELALLOC | 1812 EXTENT_DO_ACCOUNTING | 1813 EXTENT_DEFRAG, PAGE_UNLOCK | 1814 PAGE_START_WRITEBACK | 1815 PAGE_END_WRITEBACK); 1816 return -ENOMEM; 1817 } 1818 1819 nocow_args.end = end; 1820 nocow_args.writeback_path = true; 1821 1822 while (1) { 1823 struct btrfs_key found_key; 1824 struct btrfs_file_extent_item *fi; 1825 struct extent_buffer *leaf; 1826 u64 extent_end; 1827 u64 ram_bytes; 1828 u64 nocow_end; 1829 int extent_type; 1830 1831 nocow = false; 1832 1833 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 1834 cur_offset, 0); 1835 if (ret < 0) 1836 goto error; 1837 1838 /* 1839 * If there is no extent for our range when doing the initial 1840 * search, then go back to the previous slot as it will be the 1841 * one containing the search offset 1842 */ 1843 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1844 leaf = path->nodes[0]; 1845 btrfs_item_key_to_cpu(leaf, &found_key, 1846 path->slots[0] - 1); 1847 if (found_key.objectid == ino && 1848 found_key.type == BTRFS_EXTENT_DATA_KEY) 1849 path->slots[0]--; 1850 } 1851 check_prev = false; 1852 next_slot: 1853 /* Go to next leaf if we have exhausted the current one */ 1854 leaf = path->nodes[0]; 1855 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1856 ret = btrfs_next_leaf(root, path); 1857 if (ret < 0) { 1858 if (cow_start != (u64)-1) 1859 cur_offset = cow_start; 1860 goto error; 1861 } 1862 if (ret > 0) 1863 break; 1864 leaf = path->nodes[0]; 1865 } 1866 1867 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1868 1869 /* Didn't find anything for our INO */ 1870 if (found_key.objectid > ino) 1871 break; 1872 /* 1873 * Keep searching until we find an EXTENT_ITEM or there are no 1874 * more extents for this inode 1875 */ 1876 if (WARN_ON_ONCE(found_key.objectid < ino) || 1877 found_key.type < BTRFS_EXTENT_DATA_KEY) { 1878 path->slots[0]++; 1879 goto next_slot; 1880 } 1881 1882 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 1883 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 1884 found_key.offset > end) 1885 break; 1886 1887 /* 1888 * If the found extent starts after requested offset, then 1889 * adjust extent_end to be right before this extent begins 1890 */ 1891 if (found_key.offset > cur_offset) { 1892 extent_end = found_key.offset; 1893 extent_type = 0; 1894 goto out_check; 1895 } 1896 1897 /* 1898 * Found extent which begins before our range and potentially 1899 * intersect it 1900 */ 1901 fi = btrfs_item_ptr(leaf, path->slots[0], 1902 struct btrfs_file_extent_item); 1903 extent_type = btrfs_file_extent_type(leaf, fi); 1904 /* If this is triggered then we have a memory corruption. */ 1905 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 1906 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 1907 ret = -EUCLEAN; 1908 goto error; 1909 } 1910 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1911 extent_end = btrfs_file_extent_end(path); 1912 1913 /* 1914 * If the extent we got ends before our current offset, skip to 1915 * the next extent. 1916 */ 1917 if (extent_end <= cur_offset) { 1918 path->slots[0]++; 1919 goto next_slot; 1920 } 1921 1922 nocow_args.start = cur_offset; 1923 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 1924 if (ret < 0) { 1925 if (cow_start != (u64)-1) 1926 cur_offset = cow_start; 1927 goto error; 1928 } else if (ret == 0) { 1929 goto out_check; 1930 } 1931 1932 ret = 0; 1933 bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr); 1934 if (bg) 1935 nocow = true; 1936 out_check: 1937 /* 1938 * If nocow is false then record the beginning of the range 1939 * that needs to be COWed 1940 */ 1941 if (!nocow) { 1942 if (cow_start == (u64)-1) 1943 cow_start = cur_offset; 1944 cur_offset = extent_end; 1945 if (cur_offset > end) 1946 break; 1947 if (!path->nodes[0]) 1948 continue; 1949 path->slots[0]++; 1950 goto next_slot; 1951 } 1952 1953 /* 1954 * COW range from cow_start to found_key.offset - 1. As the key 1955 * will contain the beginning of the first extent that can be 1956 * NOCOW, following one which needs to be COW'ed 1957 */ 1958 if (cow_start != (u64)-1) { 1959 ret = fallback_to_cow(inode, locked_page, 1960 cow_start, found_key.offset - 1, 1961 page_started, nr_written); 1962 if (ret) 1963 goto error; 1964 cow_start = (u64)-1; 1965 } 1966 1967 nocow_end = cur_offset + nocow_args.num_bytes - 1; 1968 1969 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1970 u64 orig_start = found_key.offset - nocow_args.extent_offset; 1971 struct extent_map *em; 1972 1973 em = create_io_em(inode, cur_offset, nocow_args.num_bytes, 1974 orig_start, 1975 nocow_args.disk_bytenr, /* block_start */ 1976 nocow_args.num_bytes, /* block_len */ 1977 nocow_args.disk_num_bytes, /* orig_block_len */ 1978 ram_bytes, BTRFS_COMPRESS_NONE, 1979 BTRFS_ORDERED_PREALLOC); 1980 if (IS_ERR(em)) { 1981 ret = PTR_ERR(em); 1982 goto error; 1983 } 1984 free_extent_map(em); 1985 ret = btrfs_add_ordered_extent(inode, 1986 cur_offset, nocow_args.num_bytes, 1987 nocow_args.num_bytes, 1988 nocow_args.disk_bytenr, 1989 nocow_args.num_bytes, 0, 1990 1 << BTRFS_ORDERED_PREALLOC, 1991 BTRFS_COMPRESS_NONE); 1992 if (ret) { 1993 btrfs_drop_extent_cache(inode, cur_offset, 1994 nocow_end, 0); 1995 goto error; 1996 } 1997 } else { 1998 ret = btrfs_add_ordered_extent(inode, cur_offset, 1999 nocow_args.num_bytes, 2000 nocow_args.num_bytes, 2001 nocow_args.disk_bytenr, 2002 nocow_args.num_bytes, 2003 0, 2004 1 << BTRFS_ORDERED_NOCOW, 2005 BTRFS_COMPRESS_NONE); 2006 if (ret) 2007 goto error; 2008 } 2009 2010 if (nocow) { 2011 btrfs_dec_nocow_writers(bg); 2012 nocow = false; 2013 } 2014 2015 if (btrfs_is_data_reloc_root(root)) 2016 /* 2017 * Error handled later, as we must prevent 2018 * extent_clear_unlock_delalloc() in error handler 2019 * from freeing metadata of created ordered extent. 2020 */ 2021 ret = btrfs_reloc_clone_csums(inode, cur_offset, 2022 nocow_args.num_bytes); 2023 2024 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, 2025 locked_page, EXTENT_LOCKED | 2026 EXTENT_DELALLOC | 2027 EXTENT_CLEAR_DATA_RESV, 2028 PAGE_UNLOCK | PAGE_SET_ORDERED); 2029 2030 cur_offset = extent_end; 2031 2032 /* 2033 * btrfs_reloc_clone_csums() error, now we're OK to call error 2034 * handler, as metadata for created ordered extent will only 2035 * be freed by btrfs_finish_ordered_io(). 2036 */ 2037 if (ret) 2038 goto error; 2039 if (cur_offset > end) 2040 break; 2041 } 2042 btrfs_release_path(path); 2043 2044 if (cur_offset <= end && cow_start == (u64)-1) 2045 cow_start = cur_offset; 2046 2047 if (cow_start != (u64)-1) { 2048 cur_offset = end; 2049 ret = fallback_to_cow(inode, locked_page, cow_start, end, 2050 page_started, nr_written); 2051 if (ret) 2052 goto error; 2053 } 2054 2055 error: 2056 if (nocow) 2057 btrfs_dec_nocow_writers(bg); 2058 2059 if (ret && cur_offset < end) 2060 extent_clear_unlock_delalloc(inode, cur_offset, end, 2061 locked_page, EXTENT_LOCKED | 2062 EXTENT_DELALLOC | EXTENT_DEFRAG | 2063 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2064 PAGE_START_WRITEBACK | 2065 PAGE_END_WRITEBACK); 2066 btrfs_free_path(path); 2067 return ret; 2068 } 2069 2070 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2071 { 2072 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2073 if (inode->defrag_bytes && 2074 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 2075 0, NULL)) 2076 return false; 2077 return true; 2078 } 2079 return false; 2080 } 2081 2082 /* 2083 * Function to process delayed allocation (create CoW) for ranges which are 2084 * being touched for the first time. 2085 */ 2086 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, 2087 u64 start, u64 end, int *page_started, unsigned long *nr_written, 2088 struct writeback_control *wbc) 2089 { 2090 int ret; 2091 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2092 2093 /* 2094 * The range must cover part of the @locked_page, or the returned 2095 * @page_started can confuse the caller. 2096 */ 2097 ASSERT(!(end <= page_offset(locked_page) || 2098 start >= page_offset(locked_page) + PAGE_SIZE)); 2099 2100 if (should_nocow(inode, start, end)) { 2101 /* 2102 * Normally on a zoned device we're only doing COW writes, but 2103 * in case of relocation on a zoned filesystem we have taken 2104 * precaution, that we're only writing sequentially. It's safe 2105 * to use run_delalloc_nocow() here, like for regular 2106 * preallocated inodes. 2107 */ 2108 ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root)); 2109 ret = run_delalloc_nocow(inode, locked_page, start, end, 2110 page_started, nr_written); 2111 } else if (!btrfs_inode_can_compress(inode) || 2112 !inode_need_compress(inode, start, end)) { 2113 if (zoned) 2114 ret = run_delalloc_zoned(inode, locked_page, start, end, 2115 page_started, nr_written); 2116 else 2117 ret = cow_file_range(inode, locked_page, start, end, 2118 page_started, nr_written, 1); 2119 } else { 2120 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 2121 ret = cow_file_range_async(inode, wbc, locked_page, start, end, 2122 page_started, nr_written); 2123 } 2124 ASSERT(ret <= 0); 2125 if (ret) 2126 btrfs_cleanup_ordered_extents(inode, locked_page, start, 2127 end - start + 1); 2128 return ret; 2129 } 2130 2131 void btrfs_split_delalloc_extent(struct inode *inode, 2132 struct extent_state *orig, u64 split) 2133 { 2134 u64 size; 2135 2136 /* not delalloc, ignore it */ 2137 if (!(orig->state & EXTENT_DELALLOC)) 2138 return; 2139 2140 size = orig->end - orig->start + 1; 2141 if (size > BTRFS_MAX_EXTENT_SIZE) { 2142 u32 num_extents; 2143 u64 new_size; 2144 2145 /* 2146 * See the explanation in btrfs_merge_delalloc_extent, the same 2147 * applies here, just in reverse. 2148 */ 2149 new_size = orig->end - split + 1; 2150 num_extents = count_max_extents(new_size); 2151 new_size = split - orig->start; 2152 num_extents += count_max_extents(new_size); 2153 if (count_max_extents(size) >= num_extents) 2154 return; 2155 } 2156 2157 spin_lock(&BTRFS_I(inode)->lock); 2158 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); 2159 spin_unlock(&BTRFS_I(inode)->lock); 2160 } 2161 2162 /* 2163 * Handle merged delayed allocation extents so we can keep track of new extents 2164 * that are just merged onto old extents, such as when we are doing sequential 2165 * writes, so we can properly account for the metadata space we'll need. 2166 */ 2167 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new, 2168 struct extent_state *other) 2169 { 2170 u64 new_size, old_size; 2171 u32 num_extents; 2172 2173 /* not delalloc, ignore it */ 2174 if (!(other->state & EXTENT_DELALLOC)) 2175 return; 2176 2177 if (new->start > other->start) 2178 new_size = new->end - other->start + 1; 2179 else 2180 new_size = other->end - new->start + 1; 2181 2182 /* we're not bigger than the max, unreserve the space and go */ 2183 if (new_size <= BTRFS_MAX_EXTENT_SIZE) { 2184 spin_lock(&BTRFS_I(inode)->lock); 2185 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); 2186 spin_unlock(&BTRFS_I(inode)->lock); 2187 return; 2188 } 2189 2190 /* 2191 * We have to add up either side to figure out how many extents were 2192 * accounted for before we merged into one big extent. If the number of 2193 * extents we accounted for is <= the amount we need for the new range 2194 * then we can return, otherwise drop. Think of it like this 2195 * 2196 * [ 4k][MAX_SIZE] 2197 * 2198 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2199 * need 2 outstanding extents, on one side we have 1 and the other side 2200 * we have 1 so they are == and we can return. But in this case 2201 * 2202 * [MAX_SIZE+4k][MAX_SIZE+4k] 2203 * 2204 * Each range on their own accounts for 2 extents, but merged together 2205 * they are only 3 extents worth of accounting, so we need to drop in 2206 * this case. 2207 */ 2208 old_size = other->end - other->start + 1; 2209 num_extents = count_max_extents(old_size); 2210 old_size = new->end - new->start + 1; 2211 num_extents += count_max_extents(old_size); 2212 if (count_max_extents(new_size) >= num_extents) 2213 return; 2214 2215 spin_lock(&BTRFS_I(inode)->lock); 2216 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); 2217 spin_unlock(&BTRFS_I(inode)->lock); 2218 } 2219 2220 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 2221 struct inode *inode) 2222 { 2223 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2224 2225 spin_lock(&root->delalloc_lock); 2226 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 2227 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 2228 &root->delalloc_inodes); 2229 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2230 &BTRFS_I(inode)->runtime_flags); 2231 root->nr_delalloc_inodes++; 2232 if (root->nr_delalloc_inodes == 1) { 2233 spin_lock(&fs_info->delalloc_root_lock); 2234 BUG_ON(!list_empty(&root->delalloc_root)); 2235 list_add_tail(&root->delalloc_root, 2236 &fs_info->delalloc_roots); 2237 spin_unlock(&fs_info->delalloc_root_lock); 2238 } 2239 } 2240 spin_unlock(&root->delalloc_lock); 2241 } 2242 2243 2244 void __btrfs_del_delalloc_inode(struct btrfs_root *root, 2245 struct btrfs_inode *inode) 2246 { 2247 struct btrfs_fs_info *fs_info = root->fs_info; 2248 2249 if (!list_empty(&inode->delalloc_inodes)) { 2250 list_del_init(&inode->delalloc_inodes); 2251 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2252 &inode->runtime_flags); 2253 root->nr_delalloc_inodes--; 2254 if (!root->nr_delalloc_inodes) { 2255 ASSERT(list_empty(&root->delalloc_inodes)); 2256 spin_lock(&fs_info->delalloc_root_lock); 2257 BUG_ON(list_empty(&root->delalloc_root)); 2258 list_del_init(&root->delalloc_root); 2259 spin_unlock(&fs_info->delalloc_root_lock); 2260 } 2261 } 2262 } 2263 2264 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 2265 struct btrfs_inode *inode) 2266 { 2267 spin_lock(&root->delalloc_lock); 2268 __btrfs_del_delalloc_inode(root, inode); 2269 spin_unlock(&root->delalloc_lock); 2270 } 2271 2272 /* 2273 * Properly track delayed allocation bytes in the inode and to maintain the 2274 * list of inodes that have pending delalloc work to be done. 2275 */ 2276 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state, 2277 unsigned *bits) 2278 { 2279 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2280 2281 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) 2282 WARN_ON(1); 2283 /* 2284 * set_bit and clear bit hooks normally require _irqsave/restore 2285 * but in this case, we are only testing for the DELALLOC 2286 * bit, which is only set or cleared with irqs on 2287 */ 2288 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 2289 struct btrfs_root *root = BTRFS_I(inode)->root; 2290 u64 len = state->end + 1 - state->start; 2291 u32 num_extents = count_max_extents(len); 2292 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode)); 2293 2294 spin_lock(&BTRFS_I(inode)->lock); 2295 btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents); 2296 spin_unlock(&BTRFS_I(inode)->lock); 2297 2298 /* For sanity tests */ 2299 if (btrfs_is_testing(fs_info)) 2300 return; 2301 2302 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2303 fs_info->delalloc_batch); 2304 spin_lock(&BTRFS_I(inode)->lock); 2305 BTRFS_I(inode)->delalloc_bytes += len; 2306 if (*bits & EXTENT_DEFRAG) 2307 BTRFS_I(inode)->defrag_bytes += len; 2308 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2309 &BTRFS_I(inode)->runtime_flags)) 2310 btrfs_add_delalloc_inodes(root, inode); 2311 spin_unlock(&BTRFS_I(inode)->lock); 2312 } 2313 2314 if (!(state->state & EXTENT_DELALLOC_NEW) && 2315 (*bits & EXTENT_DELALLOC_NEW)) { 2316 spin_lock(&BTRFS_I(inode)->lock); 2317 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 - 2318 state->start; 2319 spin_unlock(&BTRFS_I(inode)->lock); 2320 } 2321 } 2322 2323 /* 2324 * Once a range is no longer delalloc this function ensures that proper 2325 * accounting happens. 2326 */ 2327 void btrfs_clear_delalloc_extent(struct inode *vfs_inode, 2328 struct extent_state *state, unsigned *bits) 2329 { 2330 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 2331 struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb); 2332 u64 len = state->end + 1 - state->start; 2333 u32 num_extents = count_max_extents(len); 2334 2335 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) { 2336 spin_lock(&inode->lock); 2337 inode->defrag_bytes -= len; 2338 spin_unlock(&inode->lock); 2339 } 2340 2341 /* 2342 * set_bit and clear bit hooks normally require _irqsave/restore 2343 * but in this case, we are only testing for the DELALLOC 2344 * bit, which is only set or cleared with irqs on 2345 */ 2346 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 2347 struct btrfs_root *root = inode->root; 2348 bool do_list = !btrfs_is_free_space_inode(inode); 2349 2350 spin_lock(&inode->lock); 2351 btrfs_mod_outstanding_extents(inode, -num_extents); 2352 spin_unlock(&inode->lock); 2353 2354 /* 2355 * We don't reserve metadata space for space cache inodes so we 2356 * don't need to call delalloc_release_metadata if there is an 2357 * error. 2358 */ 2359 if (*bits & EXTENT_CLEAR_META_RESV && 2360 root != fs_info->tree_root) 2361 btrfs_delalloc_release_metadata(inode, len, false); 2362 2363 /* For sanity tests. */ 2364 if (btrfs_is_testing(fs_info)) 2365 return; 2366 2367 if (!btrfs_is_data_reloc_root(root) && 2368 do_list && !(state->state & EXTENT_NORESERVE) && 2369 (*bits & EXTENT_CLEAR_DATA_RESV)) 2370 btrfs_free_reserved_data_space_noquota(fs_info, len); 2371 2372 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2373 fs_info->delalloc_batch); 2374 spin_lock(&inode->lock); 2375 inode->delalloc_bytes -= len; 2376 if (do_list && inode->delalloc_bytes == 0 && 2377 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2378 &inode->runtime_flags)) 2379 btrfs_del_delalloc_inode(root, inode); 2380 spin_unlock(&inode->lock); 2381 } 2382 2383 if ((state->state & EXTENT_DELALLOC_NEW) && 2384 (*bits & EXTENT_DELALLOC_NEW)) { 2385 spin_lock(&inode->lock); 2386 ASSERT(inode->new_delalloc_bytes >= len); 2387 inode->new_delalloc_bytes -= len; 2388 if (*bits & EXTENT_ADD_INODE_BYTES) 2389 inode_add_bytes(&inode->vfs_inode, len); 2390 spin_unlock(&inode->lock); 2391 } 2392 } 2393 2394 /* 2395 * in order to insert checksums into the metadata in large chunks, 2396 * we wait until bio submission time. All the pages in the bio are 2397 * checksummed and sums are attached onto the ordered extent record. 2398 * 2399 * At IO completion time the cums attached on the ordered extent record 2400 * are inserted into the btree 2401 */ 2402 static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio, 2403 u64 dio_file_offset) 2404 { 2405 return btrfs_csum_one_bio(BTRFS_I(inode), bio, (u64)-1, false); 2406 } 2407 2408 /* 2409 * Split an extent_map at [start, start + len] 2410 * 2411 * This function is intended to be used only for extract_ordered_extent(). 2412 */ 2413 static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len, 2414 u64 pre, u64 post) 2415 { 2416 struct extent_map_tree *em_tree = &inode->extent_tree; 2417 struct extent_map *em; 2418 struct extent_map *split_pre = NULL; 2419 struct extent_map *split_mid = NULL; 2420 struct extent_map *split_post = NULL; 2421 int ret = 0; 2422 unsigned long flags; 2423 2424 /* Sanity check */ 2425 if (pre == 0 && post == 0) 2426 return 0; 2427 2428 split_pre = alloc_extent_map(); 2429 if (pre) 2430 split_mid = alloc_extent_map(); 2431 if (post) 2432 split_post = alloc_extent_map(); 2433 if (!split_pre || (pre && !split_mid) || (post && !split_post)) { 2434 ret = -ENOMEM; 2435 goto out; 2436 } 2437 2438 ASSERT(pre + post < len); 2439 2440 lock_extent(&inode->io_tree, start, start + len - 1); 2441 write_lock(&em_tree->lock); 2442 em = lookup_extent_mapping(em_tree, start, len); 2443 if (!em) { 2444 ret = -EIO; 2445 goto out_unlock; 2446 } 2447 2448 ASSERT(em->len == len); 2449 ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); 2450 ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE); 2451 ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 2452 ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags)); 2453 ASSERT(!list_empty(&em->list)); 2454 2455 flags = em->flags; 2456 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 2457 2458 /* First, replace the em with a new extent_map starting from * em->start */ 2459 split_pre->start = em->start; 2460 split_pre->len = (pre ? pre : em->len - post); 2461 split_pre->orig_start = split_pre->start; 2462 split_pre->block_start = em->block_start; 2463 split_pre->block_len = split_pre->len; 2464 split_pre->orig_block_len = split_pre->block_len; 2465 split_pre->ram_bytes = split_pre->len; 2466 split_pre->flags = flags; 2467 split_pre->compress_type = em->compress_type; 2468 split_pre->generation = em->generation; 2469 2470 replace_extent_mapping(em_tree, em, split_pre, 1); 2471 2472 /* 2473 * Now we only have an extent_map at: 2474 * [em->start, em->start + pre] if pre != 0 2475 * [em->start, em->start + em->len - post] if pre == 0 2476 */ 2477 2478 if (pre) { 2479 /* Insert the middle extent_map */ 2480 split_mid->start = em->start + pre; 2481 split_mid->len = em->len - pre - post; 2482 split_mid->orig_start = split_mid->start; 2483 split_mid->block_start = em->block_start + pre; 2484 split_mid->block_len = split_mid->len; 2485 split_mid->orig_block_len = split_mid->block_len; 2486 split_mid->ram_bytes = split_mid->len; 2487 split_mid->flags = flags; 2488 split_mid->compress_type = em->compress_type; 2489 split_mid->generation = em->generation; 2490 add_extent_mapping(em_tree, split_mid, 1); 2491 } 2492 2493 if (post) { 2494 split_post->start = em->start + em->len - post; 2495 split_post->len = post; 2496 split_post->orig_start = split_post->start; 2497 split_post->block_start = em->block_start + em->len - post; 2498 split_post->block_len = split_post->len; 2499 split_post->orig_block_len = split_post->block_len; 2500 split_post->ram_bytes = split_post->len; 2501 split_post->flags = flags; 2502 split_post->compress_type = em->compress_type; 2503 split_post->generation = em->generation; 2504 add_extent_mapping(em_tree, split_post, 1); 2505 } 2506 2507 /* Once for us */ 2508 free_extent_map(em); 2509 /* Once for the tree */ 2510 free_extent_map(em); 2511 2512 out_unlock: 2513 write_unlock(&em_tree->lock); 2514 unlock_extent(&inode->io_tree, start, start + len - 1); 2515 out: 2516 free_extent_map(split_pre); 2517 free_extent_map(split_mid); 2518 free_extent_map(split_post); 2519 2520 return ret; 2521 } 2522 2523 static blk_status_t extract_ordered_extent(struct btrfs_inode *inode, 2524 struct bio *bio, loff_t file_offset) 2525 { 2526 struct btrfs_ordered_extent *ordered; 2527 u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT; 2528 u64 file_len; 2529 u64 len = bio->bi_iter.bi_size; 2530 u64 end = start + len; 2531 u64 ordered_end; 2532 u64 pre, post; 2533 int ret = 0; 2534 2535 ordered = btrfs_lookup_ordered_extent(inode, file_offset); 2536 if (WARN_ON_ONCE(!ordered)) 2537 return BLK_STS_IOERR; 2538 2539 /* No need to split */ 2540 if (ordered->disk_num_bytes == len) 2541 goto out; 2542 2543 /* We cannot split once end_bio'd ordered extent */ 2544 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) { 2545 ret = -EINVAL; 2546 goto out; 2547 } 2548 2549 /* We cannot split a compressed ordered extent */ 2550 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) { 2551 ret = -EINVAL; 2552 goto out; 2553 } 2554 2555 ordered_end = ordered->disk_bytenr + ordered->disk_num_bytes; 2556 /* bio must be in one ordered extent */ 2557 if (WARN_ON_ONCE(start < ordered->disk_bytenr || end > ordered_end)) { 2558 ret = -EINVAL; 2559 goto out; 2560 } 2561 2562 /* Checksum list should be empty */ 2563 if (WARN_ON_ONCE(!list_empty(&ordered->list))) { 2564 ret = -EINVAL; 2565 goto out; 2566 } 2567 2568 file_len = ordered->num_bytes; 2569 pre = start - ordered->disk_bytenr; 2570 post = ordered_end - end; 2571 2572 ret = btrfs_split_ordered_extent(ordered, pre, post); 2573 if (ret) 2574 goto out; 2575 ret = split_zoned_em(inode, file_offset, file_len, pre, post); 2576 2577 out: 2578 btrfs_put_ordered_extent(ordered); 2579 2580 return errno_to_blk_status(ret); 2581 } 2582 2583 /* 2584 * extent_io.c submission hook. This does the right thing for csum calculation 2585 * on write, or reading the csums from the tree before a read. 2586 * 2587 * Rules about async/sync submit, 2588 * a) read: sync submit 2589 * 2590 * b) write without checksum: sync submit 2591 * 2592 * c) write with checksum: 2593 * c-1) if bio is issued by fsync: sync submit 2594 * (sync_writers != 0) 2595 * 2596 * c-2) if root is reloc root: sync submit 2597 * (only in case of buffered IO) 2598 * 2599 * c-3) otherwise: async submit 2600 */ 2601 void btrfs_submit_data_bio(struct inode *inode, struct bio *bio, 2602 int mirror_num, enum btrfs_compression_type compress_type) 2603 { 2604 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2605 struct btrfs_root *root = BTRFS_I(inode)->root; 2606 enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; 2607 blk_status_t ret = 0; 2608 int skip_sum; 2609 int async = !atomic_read(&BTRFS_I(inode)->sync_writers); 2610 2611 skip_sum = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) || 2612 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state); 2613 2614 if (btrfs_is_free_space_inode(BTRFS_I(inode))) 2615 metadata = BTRFS_WQ_ENDIO_FREE_SPACE; 2616 2617 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 2618 struct page *page = bio_first_bvec_all(bio)->bv_page; 2619 loff_t file_offset = page_offset(page); 2620 2621 ret = extract_ordered_extent(BTRFS_I(inode), bio, file_offset); 2622 if (ret) 2623 goto out; 2624 } 2625 2626 if (btrfs_op(bio) != BTRFS_MAP_WRITE) { 2627 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata); 2628 if (ret) 2629 goto out; 2630 2631 if (compress_type != BTRFS_COMPRESS_NONE) { 2632 /* 2633 * btrfs_submit_compressed_read will handle completing 2634 * the bio if there were any errors, so just return 2635 * here. 2636 */ 2637 btrfs_submit_compressed_read(inode, bio, mirror_num); 2638 return; 2639 } else { 2640 /* 2641 * Lookup bio sums does extra checks around whether we 2642 * need to csum or not, which is why we ignore skip_sum 2643 * here. 2644 */ 2645 ret = btrfs_lookup_bio_sums(inode, bio, NULL); 2646 if (ret) 2647 goto out; 2648 } 2649 goto mapit; 2650 } else if (async && !skip_sum) { 2651 /* csum items have already been cloned */ 2652 if (btrfs_is_data_reloc_root(root)) 2653 goto mapit; 2654 /* we're doing a write, do the async checksumming */ 2655 ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 2656 0, btrfs_submit_bio_start); 2657 goto out; 2658 } else if (!skip_sum) { 2659 ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, (u64)-1, false); 2660 if (ret) 2661 goto out; 2662 } 2663 2664 mapit: 2665 ret = btrfs_map_bio(fs_info, bio, mirror_num); 2666 2667 out: 2668 if (ret) { 2669 bio->bi_status = ret; 2670 bio_endio(bio); 2671 } 2672 } 2673 2674 /* 2675 * given a list of ordered sums record them in the inode. This happens 2676 * at IO completion time based on sums calculated at bio submission time. 2677 */ 2678 static int add_pending_csums(struct btrfs_trans_handle *trans, 2679 struct list_head *list) 2680 { 2681 struct btrfs_ordered_sum *sum; 2682 struct btrfs_root *csum_root = NULL; 2683 int ret; 2684 2685 list_for_each_entry(sum, list, list) { 2686 trans->adding_csums = true; 2687 if (!csum_root) 2688 csum_root = btrfs_csum_root(trans->fs_info, 2689 sum->bytenr); 2690 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2691 trans->adding_csums = false; 2692 if (ret) 2693 return ret; 2694 } 2695 return 0; 2696 } 2697 2698 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2699 const u64 start, 2700 const u64 len, 2701 struct extent_state **cached_state) 2702 { 2703 u64 search_start = start; 2704 const u64 end = start + len - 1; 2705 2706 while (search_start < end) { 2707 const u64 search_len = end - search_start + 1; 2708 struct extent_map *em; 2709 u64 em_len; 2710 int ret = 0; 2711 2712 em = btrfs_get_extent(inode, NULL, 0, search_start, search_len); 2713 if (IS_ERR(em)) 2714 return PTR_ERR(em); 2715 2716 if (em->block_start != EXTENT_MAP_HOLE) 2717 goto next; 2718 2719 em_len = em->len; 2720 if (em->start < search_start) 2721 em_len -= search_start - em->start; 2722 if (em_len > search_len) 2723 em_len = search_len; 2724 2725 ret = set_extent_bit(&inode->io_tree, search_start, 2726 search_start + em_len - 1, 2727 EXTENT_DELALLOC_NEW, 0, NULL, cached_state, 2728 GFP_NOFS, NULL); 2729 next: 2730 search_start = extent_map_end(em); 2731 free_extent_map(em); 2732 if (ret) 2733 return ret; 2734 } 2735 return 0; 2736 } 2737 2738 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2739 unsigned int extra_bits, 2740 struct extent_state **cached_state) 2741 { 2742 WARN_ON(PAGE_ALIGNED(end)); 2743 2744 if (start >= i_size_read(&inode->vfs_inode) && 2745 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2746 /* 2747 * There can't be any extents following eof in this case so just 2748 * set the delalloc new bit for the range directly. 2749 */ 2750 extra_bits |= EXTENT_DELALLOC_NEW; 2751 } else { 2752 int ret; 2753 2754 ret = btrfs_find_new_delalloc_bytes(inode, start, 2755 end + 1 - start, 2756 cached_state); 2757 if (ret) 2758 return ret; 2759 } 2760 2761 return set_extent_delalloc(&inode->io_tree, start, end, extra_bits, 2762 cached_state); 2763 } 2764 2765 /* see btrfs_writepage_start_hook for details on why this is required */ 2766 struct btrfs_writepage_fixup { 2767 struct page *page; 2768 struct inode *inode; 2769 struct btrfs_work work; 2770 }; 2771 2772 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2773 { 2774 struct btrfs_writepage_fixup *fixup; 2775 struct btrfs_ordered_extent *ordered; 2776 struct extent_state *cached_state = NULL; 2777 struct extent_changeset *data_reserved = NULL; 2778 struct page *page; 2779 struct btrfs_inode *inode; 2780 u64 page_start; 2781 u64 page_end; 2782 int ret = 0; 2783 bool free_delalloc_space = true; 2784 2785 fixup = container_of(work, struct btrfs_writepage_fixup, work); 2786 page = fixup->page; 2787 inode = BTRFS_I(fixup->inode); 2788 page_start = page_offset(page); 2789 page_end = page_offset(page) + PAGE_SIZE - 1; 2790 2791 /* 2792 * This is similar to page_mkwrite, we need to reserve the space before 2793 * we take the page lock. 2794 */ 2795 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2796 PAGE_SIZE); 2797 again: 2798 lock_page(page); 2799 2800 /* 2801 * Before we queued this fixup, we took a reference on the page. 2802 * page->mapping may go NULL, but it shouldn't be moved to a different 2803 * address space. 2804 */ 2805 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 2806 /* 2807 * Unfortunately this is a little tricky, either 2808 * 2809 * 1) We got here and our page had already been dealt with and 2810 * we reserved our space, thus ret == 0, so we need to just 2811 * drop our space reservation and bail. This can happen the 2812 * first time we come into the fixup worker, or could happen 2813 * while waiting for the ordered extent. 2814 * 2) Our page was already dealt with, but we happened to get an 2815 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2816 * this case we obviously don't have anything to release, but 2817 * because the page was already dealt with we don't want to 2818 * mark the page with an error, so make sure we're resetting 2819 * ret to 0. This is why we have this check _before_ the ret 2820 * check, because we do not want to have a surprise ENOSPC 2821 * when the page was already properly dealt with. 2822 */ 2823 if (!ret) { 2824 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2825 btrfs_delalloc_release_space(inode, data_reserved, 2826 page_start, PAGE_SIZE, 2827 true); 2828 } 2829 ret = 0; 2830 goto out_page; 2831 } 2832 2833 /* 2834 * We can't mess with the page state unless it is locked, so now that 2835 * it is locked bail if we failed to make our space reservation. 2836 */ 2837 if (ret) 2838 goto out_page; 2839 2840 lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state); 2841 2842 /* already ordered? We're done */ 2843 if (PageOrdered(page)) 2844 goto out_reserved; 2845 2846 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2847 if (ordered) { 2848 unlock_extent_cached(&inode->io_tree, page_start, page_end, 2849 &cached_state); 2850 unlock_page(page); 2851 btrfs_start_ordered_extent(ordered, 1); 2852 btrfs_put_ordered_extent(ordered); 2853 goto again; 2854 } 2855 2856 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2857 &cached_state); 2858 if (ret) 2859 goto out_reserved; 2860 2861 /* 2862 * Everything went as planned, we're now the owner of a dirty page with 2863 * delayed allocation bits set and space reserved for our COW 2864 * destination. 2865 * 2866 * The page was dirty when we started, nothing should have cleaned it. 2867 */ 2868 BUG_ON(!PageDirty(page)); 2869 free_delalloc_space = false; 2870 out_reserved: 2871 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2872 if (free_delalloc_space) 2873 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2874 PAGE_SIZE, true); 2875 unlock_extent_cached(&inode->io_tree, page_start, page_end, 2876 &cached_state); 2877 out_page: 2878 if (ret) { 2879 /* 2880 * We hit ENOSPC or other errors. Update the mapping and page 2881 * to reflect the errors and clean the page. 2882 */ 2883 mapping_set_error(page->mapping, ret); 2884 end_extent_writepage(page, ret, page_start, page_end); 2885 clear_page_dirty_for_io(page); 2886 SetPageError(page); 2887 } 2888 btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE); 2889 unlock_page(page); 2890 put_page(page); 2891 kfree(fixup); 2892 extent_changeset_free(data_reserved); 2893 /* 2894 * As a precaution, do a delayed iput in case it would be the last iput 2895 * that could need flushing space. Recursing back to fixup worker would 2896 * deadlock. 2897 */ 2898 btrfs_add_delayed_iput(&inode->vfs_inode); 2899 } 2900 2901 /* 2902 * There are a few paths in the higher layers of the kernel that directly 2903 * set the page dirty bit without asking the filesystem if it is a 2904 * good idea. This causes problems because we want to make sure COW 2905 * properly happens and the data=ordered rules are followed. 2906 * 2907 * In our case any range that doesn't have the ORDERED bit set 2908 * hasn't been properly setup for IO. We kick off an async process 2909 * to fix it up. The async helper will wait for ordered extents, set 2910 * the delalloc bit and make it safe to write the page. 2911 */ 2912 int btrfs_writepage_cow_fixup(struct page *page) 2913 { 2914 struct inode *inode = page->mapping->host; 2915 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2916 struct btrfs_writepage_fixup *fixup; 2917 2918 /* This page has ordered extent covering it already */ 2919 if (PageOrdered(page)) 2920 return 0; 2921 2922 /* 2923 * PageChecked is set below when we create a fixup worker for this page, 2924 * don't try to create another one if we're already PageChecked() 2925 * 2926 * The extent_io writepage code will redirty the page if we send back 2927 * EAGAIN. 2928 */ 2929 if (PageChecked(page)) 2930 return -EAGAIN; 2931 2932 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2933 if (!fixup) 2934 return -EAGAIN; 2935 2936 /* 2937 * We are already holding a reference to this inode from 2938 * write_cache_pages. We need to hold it because the space reservation 2939 * takes place outside of the page lock, and we can't trust 2940 * page->mapping outside of the page lock. 2941 */ 2942 ihold(inode); 2943 btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE); 2944 get_page(page); 2945 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); 2946 fixup->page = page; 2947 fixup->inode = inode; 2948 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2949 2950 return -EAGAIN; 2951 } 2952 2953 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2954 struct btrfs_inode *inode, u64 file_pos, 2955 struct btrfs_file_extent_item *stack_fi, 2956 const bool update_inode_bytes, 2957 u64 qgroup_reserved) 2958 { 2959 struct btrfs_root *root = inode->root; 2960 const u64 sectorsize = root->fs_info->sectorsize; 2961 struct btrfs_path *path; 2962 struct extent_buffer *leaf; 2963 struct btrfs_key ins; 2964 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 2965 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 2966 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 2967 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 2968 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 2969 struct btrfs_drop_extents_args drop_args = { 0 }; 2970 int ret; 2971 2972 path = btrfs_alloc_path(); 2973 if (!path) 2974 return -ENOMEM; 2975 2976 /* 2977 * we may be replacing one extent in the tree with another. 2978 * The new extent is pinned in the extent map, and we don't want 2979 * to drop it from the cache until it is completely in the btree. 2980 * 2981 * So, tell btrfs_drop_extents to leave this extent in the cache. 2982 * the caller is expected to unpin it and allow it to be merged 2983 * with the others. 2984 */ 2985 drop_args.path = path; 2986 drop_args.start = file_pos; 2987 drop_args.end = file_pos + num_bytes; 2988 drop_args.replace_extent = true; 2989 drop_args.extent_item_size = sizeof(*stack_fi); 2990 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 2991 if (ret) 2992 goto out; 2993 2994 if (!drop_args.extent_inserted) { 2995 ins.objectid = btrfs_ino(inode); 2996 ins.offset = file_pos; 2997 ins.type = BTRFS_EXTENT_DATA_KEY; 2998 2999 ret = btrfs_insert_empty_item(trans, root, path, &ins, 3000 sizeof(*stack_fi)); 3001 if (ret) 3002 goto out; 3003 } 3004 leaf = path->nodes[0]; 3005 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 3006 write_extent_buffer(leaf, stack_fi, 3007 btrfs_item_ptr_offset(leaf, path->slots[0]), 3008 sizeof(struct btrfs_file_extent_item)); 3009 3010 btrfs_mark_buffer_dirty(leaf); 3011 btrfs_release_path(path); 3012 3013 /* 3014 * If we dropped an inline extent here, we know the range where it is 3015 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 3016 * number of bytes only for that range containing the inline extent. 3017 * The remaining of the range will be processed when clearning the 3018 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 3019 */ 3020 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 3021 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 3022 3023 inline_size = drop_args.bytes_found - inline_size; 3024 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 3025 drop_args.bytes_found -= inline_size; 3026 num_bytes -= sectorsize; 3027 } 3028 3029 if (update_inode_bytes) 3030 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 3031 3032 ins.objectid = disk_bytenr; 3033 ins.offset = disk_num_bytes; 3034 ins.type = BTRFS_EXTENT_ITEM_KEY; 3035 3036 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 3037 if (ret) 3038 goto out; 3039 3040 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 3041 file_pos - offset, 3042 qgroup_reserved, &ins); 3043 out: 3044 btrfs_free_path(path); 3045 3046 return ret; 3047 } 3048 3049 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 3050 u64 start, u64 len) 3051 { 3052 struct btrfs_block_group *cache; 3053 3054 cache = btrfs_lookup_block_group(fs_info, start); 3055 ASSERT(cache); 3056 3057 spin_lock(&cache->lock); 3058 cache->delalloc_bytes -= len; 3059 spin_unlock(&cache->lock); 3060 3061 btrfs_put_block_group(cache); 3062 } 3063 3064 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 3065 struct btrfs_ordered_extent *oe) 3066 { 3067 struct btrfs_file_extent_item stack_fi; 3068 bool update_inode_bytes; 3069 u64 num_bytes = oe->num_bytes; 3070 u64 ram_bytes = oe->ram_bytes; 3071 3072 memset(&stack_fi, 0, sizeof(stack_fi)); 3073 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 3074 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 3075 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 3076 oe->disk_num_bytes); 3077 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 3078 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) 3079 num_bytes = ram_bytes = oe->truncated_len; 3080 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 3081 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 3082 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 3083 /* Encryption and other encoding is reserved and all 0 */ 3084 3085 /* 3086 * For delalloc, when completing an ordered extent we update the inode's 3087 * bytes when clearing the range in the inode's io tree, so pass false 3088 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 3089 * except if the ordered extent was truncated. 3090 */ 3091 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 3092 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3093 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3094 3095 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), 3096 oe->file_offset, &stack_fi, 3097 update_inode_bytes, oe->qgroup_rsv); 3098 } 3099 3100 /* 3101 * As ordered data IO finishes, this gets called so we can finish 3102 * an ordered extent if the range of bytes in the file it covers are 3103 * fully written. 3104 */ 3105 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) 3106 { 3107 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); 3108 struct btrfs_root *root = inode->root; 3109 struct btrfs_fs_info *fs_info = root->fs_info; 3110 struct btrfs_trans_handle *trans = NULL; 3111 struct extent_io_tree *io_tree = &inode->io_tree; 3112 struct extent_state *cached_state = NULL; 3113 u64 start, end; 3114 int compress_type = 0; 3115 int ret = 0; 3116 u64 logical_len = ordered_extent->num_bytes; 3117 bool freespace_inode; 3118 bool truncated = false; 3119 bool clear_reserved_extent = true; 3120 unsigned int clear_bits = EXTENT_DEFRAG; 3121 3122 start = ordered_extent->file_offset; 3123 end = start + ordered_extent->num_bytes - 1; 3124 3125 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3126 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3127 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3128 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3129 clear_bits |= EXTENT_DELALLOC_NEW; 3130 3131 freespace_inode = btrfs_is_free_space_inode(inode); 3132 3133 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3134 ret = -EIO; 3135 goto out; 3136 } 3137 3138 /* A valid bdev implies a write on a sequential zone */ 3139 if (ordered_extent->bdev) { 3140 btrfs_rewrite_logical_zoned(ordered_extent); 3141 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3142 ordered_extent->disk_num_bytes); 3143 } 3144 3145 btrfs_free_io_failure_record(inode, start, end); 3146 3147 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3148 truncated = true; 3149 logical_len = ordered_extent->truncated_len; 3150 /* Truncated the entire extent, don't bother adding */ 3151 if (!logical_len) 3152 goto out; 3153 } 3154 3155 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3156 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 3157 3158 btrfs_inode_safe_disk_i_size_write(inode, 0); 3159 if (freespace_inode) 3160 trans = btrfs_join_transaction_spacecache(root); 3161 else 3162 trans = btrfs_join_transaction(root); 3163 if (IS_ERR(trans)) { 3164 ret = PTR_ERR(trans); 3165 trans = NULL; 3166 goto out; 3167 } 3168 trans->block_rsv = &inode->block_rsv; 3169 ret = btrfs_update_inode_fallback(trans, root, inode); 3170 if (ret) /* -ENOMEM or corruption */ 3171 btrfs_abort_transaction(trans, ret); 3172 goto out; 3173 } 3174 3175 clear_bits |= EXTENT_LOCKED; 3176 lock_extent_bits(io_tree, start, end, &cached_state); 3177 3178 if (freespace_inode) 3179 trans = btrfs_join_transaction_spacecache(root); 3180 else 3181 trans = btrfs_join_transaction(root); 3182 if (IS_ERR(trans)) { 3183 ret = PTR_ERR(trans); 3184 trans = NULL; 3185 goto out; 3186 } 3187 3188 trans->block_rsv = &inode->block_rsv; 3189 3190 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3191 compress_type = ordered_extent->compress_type; 3192 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3193 BUG_ON(compress_type); 3194 ret = btrfs_mark_extent_written(trans, inode, 3195 ordered_extent->file_offset, 3196 ordered_extent->file_offset + 3197 logical_len); 3198 } else { 3199 BUG_ON(root == fs_info->tree_root); 3200 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3201 if (!ret) { 3202 clear_reserved_extent = false; 3203 btrfs_release_delalloc_bytes(fs_info, 3204 ordered_extent->disk_bytenr, 3205 ordered_extent->disk_num_bytes); 3206 } 3207 } 3208 unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset, 3209 ordered_extent->num_bytes, trans->transid); 3210 if (ret < 0) { 3211 btrfs_abort_transaction(trans, ret); 3212 goto out; 3213 } 3214 3215 ret = add_pending_csums(trans, &ordered_extent->list); 3216 if (ret) { 3217 btrfs_abort_transaction(trans, ret); 3218 goto out; 3219 } 3220 3221 /* 3222 * If this is a new delalloc range, clear its new delalloc flag to 3223 * update the inode's number of bytes. This needs to be done first 3224 * before updating the inode item. 3225 */ 3226 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3227 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3228 clear_extent_bit(&inode->io_tree, start, end, 3229 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3230 0, 0, &cached_state); 3231 3232 btrfs_inode_safe_disk_i_size_write(inode, 0); 3233 ret = btrfs_update_inode_fallback(trans, root, inode); 3234 if (ret) { /* -ENOMEM or corruption */ 3235 btrfs_abort_transaction(trans, ret); 3236 goto out; 3237 } 3238 ret = 0; 3239 out: 3240 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3241 (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0, 3242 &cached_state); 3243 3244 if (trans) 3245 btrfs_end_transaction(trans); 3246 3247 if (ret || truncated) { 3248 u64 unwritten_start = start; 3249 3250 /* 3251 * If we failed to finish this ordered extent for any reason we 3252 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3253 * extent, and mark the inode with the error if it wasn't 3254 * already set. Any error during writeback would have already 3255 * set the mapping error, so we need to set it if we're the ones 3256 * marking this ordered extent as failed. 3257 */ 3258 if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR, 3259 &ordered_extent->flags)) 3260 mapping_set_error(ordered_extent->inode->i_mapping, -EIO); 3261 3262 if (truncated) 3263 unwritten_start += logical_len; 3264 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3265 3266 /* Drop the cache for the part of the extent we didn't write. */ 3267 btrfs_drop_extent_cache(inode, unwritten_start, end, 0); 3268 3269 /* 3270 * If the ordered extent had an IOERR or something else went 3271 * wrong we need to return the space for this ordered extent 3272 * back to the allocator. We only free the extent in the 3273 * truncated case if we didn't write out the extent at all. 3274 * 3275 * If we made it past insert_reserved_file_extent before we 3276 * errored out then we don't need to do this as the accounting 3277 * has already been done. 3278 */ 3279 if ((ret || !logical_len) && 3280 clear_reserved_extent && 3281 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3282 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3283 /* 3284 * Discard the range before returning it back to the 3285 * free space pool 3286 */ 3287 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3288 btrfs_discard_extent(fs_info, 3289 ordered_extent->disk_bytenr, 3290 ordered_extent->disk_num_bytes, 3291 NULL); 3292 btrfs_free_reserved_extent(fs_info, 3293 ordered_extent->disk_bytenr, 3294 ordered_extent->disk_num_bytes, 1); 3295 } 3296 } 3297 3298 /* 3299 * This needs to be done to make sure anybody waiting knows we are done 3300 * updating everything for this ordered extent. 3301 */ 3302 btrfs_remove_ordered_extent(inode, ordered_extent); 3303 3304 /* once for us */ 3305 btrfs_put_ordered_extent(ordered_extent); 3306 /* once for the tree */ 3307 btrfs_put_ordered_extent(ordered_extent); 3308 3309 return ret; 3310 } 3311 3312 static void finish_ordered_fn(struct btrfs_work *work) 3313 { 3314 struct btrfs_ordered_extent *ordered_extent; 3315 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 3316 btrfs_finish_ordered_io(ordered_extent); 3317 } 3318 3319 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode, 3320 struct page *page, u64 start, 3321 u64 end, bool uptodate) 3322 { 3323 trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate); 3324 3325 btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start, 3326 finish_ordered_fn, uptodate); 3327 } 3328 3329 /* 3330 * check_data_csum - verify checksum of one sector of uncompressed data 3331 * @inode: inode 3332 * @io_bio: btrfs_io_bio which contains the csum 3333 * @bio_offset: offset to the beginning of the bio (in bytes) 3334 * @page: page where is the data to be verified 3335 * @pgoff: offset inside the page 3336 * @start: logical offset in the file 3337 * 3338 * The length of such check is always one sector size. 3339 */ 3340 static int check_data_csum(struct inode *inode, struct btrfs_bio *bbio, 3341 u32 bio_offset, struct page *page, u32 pgoff, 3342 u64 start) 3343 { 3344 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3345 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3346 char *kaddr; 3347 u32 len = fs_info->sectorsize; 3348 const u32 csum_size = fs_info->csum_size; 3349 unsigned int offset_sectors; 3350 u8 *csum_expected; 3351 u8 csum[BTRFS_CSUM_SIZE]; 3352 3353 ASSERT(pgoff + len <= PAGE_SIZE); 3354 3355 offset_sectors = bio_offset >> fs_info->sectorsize_bits; 3356 csum_expected = ((u8 *)bbio->csum) + offset_sectors * csum_size; 3357 3358 kaddr = kmap_atomic(page); 3359 shash->tfm = fs_info->csum_shash; 3360 3361 crypto_shash_digest(shash, kaddr + pgoff, len, csum); 3362 kunmap_atomic(kaddr); 3363 3364 if (memcmp(csum, csum_expected, csum_size)) 3365 goto zeroit; 3366 3367 return 0; 3368 zeroit: 3369 btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, 3370 bbio->mirror_num); 3371 if (bbio->device) 3372 btrfs_dev_stat_inc_and_print(bbio->device, 3373 BTRFS_DEV_STAT_CORRUPTION_ERRS); 3374 memzero_page(page, pgoff, len); 3375 return -EIO; 3376 } 3377 3378 /* 3379 * When reads are done, we need to check csums to verify the data is correct. 3380 * if there's a match, we allow the bio to finish. If not, the code in 3381 * extent_io.c will try to find good copies for us. 3382 * 3383 * @bio_offset: offset to the beginning of the bio (in bytes) 3384 * @start: file offset of the range start 3385 * @end: file offset of the range end (inclusive) 3386 * 3387 * Return a bitmap where bit set means a csum mismatch, and bit not set means 3388 * csum match. 3389 */ 3390 unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio, 3391 u32 bio_offset, struct page *page, 3392 u64 start, u64 end) 3393 { 3394 struct inode *inode = page->mapping->host; 3395 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3396 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3397 struct btrfs_root *root = BTRFS_I(inode)->root; 3398 const u32 sectorsize = root->fs_info->sectorsize; 3399 u32 pg_off; 3400 unsigned int result = 0; 3401 3402 if (btrfs_page_test_checked(fs_info, page, start, end + 1 - start)) { 3403 btrfs_page_clear_checked(fs_info, page, start, end + 1 - start); 3404 return 0; 3405 } 3406 3407 /* 3408 * This only happens for NODATASUM or compressed read. 3409 * Normally this should be covered by above check for compressed read 3410 * or the next check for NODATASUM. Just do a quicker exit here. 3411 */ 3412 if (bbio->csum == NULL) 3413 return 0; 3414 3415 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 3416 return 0; 3417 3418 if (unlikely(test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))) 3419 return 0; 3420 3421 ASSERT(page_offset(page) <= start && 3422 end <= page_offset(page) + PAGE_SIZE - 1); 3423 for (pg_off = offset_in_page(start); 3424 pg_off < offset_in_page(end); 3425 pg_off += sectorsize, bio_offset += sectorsize) { 3426 u64 file_offset = pg_off + page_offset(page); 3427 int ret; 3428 3429 if (btrfs_is_data_reloc_root(root) && 3430 test_range_bit(io_tree, file_offset, 3431 file_offset + sectorsize - 1, 3432 EXTENT_NODATASUM, 1, NULL)) { 3433 /* Skip the range without csum for data reloc inode */ 3434 clear_extent_bits(io_tree, file_offset, 3435 file_offset + sectorsize - 1, 3436 EXTENT_NODATASUM); 3437 continue; 3438 } 3439 ret = check_data_csum(inode, bbio, bio_offset, page, pg_off, 3440 page_offset(page) + pg_off); 3441 if (ret < 0) { 3442 const int nr_bit = (pg_off - offset_in_page(start)) >> 3443 root->fs_info->sectorsize_bits; 3444 3445 result |= (1U << nr_bit); 3446 } 3447 } 3448 return result; 3449 } 3450 3451 /* 3452 * btrfs_add_delayed_iput - perform a delayed iput on @inode 3453 * 3454 * @inode: The inode we want to perform iput on 3455 * 3456 * This function uses the generic vfs_inode::i_count to track whether we should 3457 * just decrement it (in case it's > 1) or if this is the last iput then link 3458 * the inode to the delayed iput machinery. Delayed iputs are processed at 3459 * transaction commit time/superblock commit/cleaner kthread. 3460 */ 3461 void btrfs_add_delayed_iput(struct inode *inode) 3462 { 3463 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3464 struct btrfs_inode *binode = BTRFS_I(inode); 3465 3466 if (atomic_add_unless(&inode->i_count, -1, 1)) 3467 return; 3468 3469 atomic_inc(&fs_info->nr_delayed_iputs); 3470 spin_lock(&fs_info->delayed_iput_lock); 3471 ASSERT(list_empty(&binode->delayed_iput)); 3472 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); 3473 spin_unlock(&fs_info->delayed_iput_lock); 3474 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3475 wake_up_process(fs_info->cleaner_kthread); 3476 } 3477 3478 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3479 struct btrfs_inode *inode) 3480 { 3481 list_del_init(&inode->delayed_iput); 3482 spin_unlock(&fs_info->delayed_iput_lock); 3483 iput(&inode->vfs_inode); 3484 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3485 wake_up(&fs_info->delayed_iputs_wait); 3486 spin_lock(&fs_info->delayed_iput_lock); 3487 } 3488 3489 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3490 struct btrfs_inode *inode) 3491 { 3492 if (!list_empty(&inode->delayed_iput)) { 3493 spin_lock(&fs_info->delayed_iput_lock); 3494 if (!list_empty(&inode->delayed_iput)) 3495 run_delayed_iput_locked(fs_info, inode); 3496 spin_unlock(&fs_info->delayed_iput_lock); 3497 } 3498 } 3499 3500 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3501 { 3502 3503 spin_lock(&fs_info->delayed_iput_lock); 3504 while (!list_empty(&fs_info->delayed_iputs)) { 3505 struct btrfs_inode *inode; 3506 3507 inode = list_first_entry(&fs_info->delayed_iputs, 3508 struct btrfs_inode, delayed_iput); 3509 run_delayed_iput_locked(fs_info, inode); 3510 cond_resched_lock(&fs_info->delayed_iput_lock); 3511 } 3512 spin_unlock(&fs_info->delayed_iput_lock); 3513 } 3514 3515 /** 3516 * Wait for flushing all delayed iputs 3517 * 3518 * @fs_info: the filesystem 3519 * 3520 * This will wait on any delayed iputs that are currently running with KILLABLE 3521 * set. Once they are all done running we will return, unless we are killed in 3522 * which case we return EINTR. This helps in user operations like fallocate etc 3523 * that might get blocked on the iputs. 3524 * 3525 * Return EINTR if we were killed, 0 if nothing's pending 3526 */ 3527 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3528 { 3529 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3530 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3531 if (ret) 3532 return -EINTR; 3533 return 0; 3534 } 3535 3536 /* 3537 * This creates an orphan entry for the given inode in case something goes wrong 3538 * in the middle of an unlink. 3539 */ 3540 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3541 struct btrfs_inode *inode) 3542 { 3543 int ret; 3544 3545 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3546 if (ret && ret != -EEXIST) { 3547 btrfs_abort_transaction(trans, ret); 3548 return ret; 3549 } 3550 3551 return 0; 3552 } 3553 3554 /* 3555 * We have done the delete so we can go ahead and remove the orphan item for 3556 * this particular inode. 3557 */ 3558 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3559 struct btrfs_inode *inode) 3560 { 3561 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3562 } 3563 3564 /* 3565 * this cleans up any orphans that may be left on the list from the last use 3566 * of this root. 3567 */ 3568 int btrfs_orphan_cleanup(struct btrfs_root *root) 3569 { 3570 struct btrfs_fs_info *fs_info = root->fs_info; 3571 struct btrfs_path *path; 3572 struct extent_buffer *leaf; 3573 struct btrfs_key key, found_key; 3574 struct btrfs_trans_handle *trans; 3575 struct inode *inode; 3576 u64 last_objectid = 0; 3577 int ret = 0, nr_unlink = 0; 3578 3579 /* Bail out if the cleanup is already running. */ 3580 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3581 return 0; 3582 3583 path = btrfs_alloc_path(); 3584 if (!path) { 3585 ret = -ENOMEM; 3586 goto out; 3587 } 3588 path->reada = READA_BACK; 3589 3590 key.objectid = BTRFS_ORPHAN_OBJECTID; 3591 key.type = BTRFS_ORPHAN_ITEM_KEY; 3592 key.offset = (u64)-1; 3593 3594 while (1) { 3595 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3596 if (ret < 0) 3597 goto out; 3598 3599 /* 3600 * if ret == 0 means we found what we were searching for, which 3601 * is weird, but possible, so only screw with path if we didn't 3602 * find the key and see if we have stuff that matches 3603 */ 3604 if (ret > 0) { 3605 ret = 0; 3606 if (path->slots[0] == 0) 3607 break; 3608 path->slots[0]--; 3609 } 3610 3611 /* pull out the item */ 3612 leaf = path->nodes[0]; 3613 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3614 3615 /* make sure the item matches what we want */ 3616 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3617 break; 3618 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3619 break; 3620 3621 /* release the path since we're done with it */ 3622 btrfs_release_path(path); 3623 3624 /* 3625 * this is where we are basically btrfs_lookup, without the 3626 * crossing root thing. we store the inode number in the 3627 * offset of the orphan item. 3628 */ 3629 3630 if (found_key.offset == last_objectid) { 3631 btrfs_err(fs_info, 3632 "Error removing orphan entry, stopping orphan cleanup"); 3633 ret = -EINVAL; 3634 goto out; 3635 } 3636 3637 last_objectid = found_key.offset; 3638 3639 found_key.objectid = found_key.offset; 3640 found_key.type = BTRFS_INODE_ITEM_KEY; 3641 found_key.offset = 0; 3642 inode = btrfs_iget(fs_info->sb, last_objectid, root); 3643 ret = PTR_ERR_OR_ZERO(inode); 3644 if (ret && ret != -ENOENT) 3645 goto out; 3646 3647 if (ret == -ENOENT && root == fs_info->tree_root) { 3648 struct btrfs_root *dead_root; 3649 int is_dead_root = 0; 3650 3651 /* 3652 * This is an orphan in the tree root. Currently these 3653 * could come from 2 sources: 3654 * a) a root (snapshot/subvolume) deletion in progress 3655 * b) a free space cache inode 3656 * We need to distinguish those two, as the orphan item 3657 * for a root must not get deleted before the deletion 3658 * of the snapshot/subvolume's tree completes. 3659 * 3660 * btrfs_find_orphan_roots() ran before us, which has 3661 * found all deleted roots and loaded them into 3662 * fs_info->fs_roots. So here we can find if an 3663 * orphan item corresponds to a deleted root by looking 3664 * up the root from that xarray. 3665 */ 3666 3667 spin_lock(&fs_info->fs_roots_lock); 3668 dead_root = xa_load(&fs_info->fs_roots, 3669 (unsigned long)found_key.objectid); 3670 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3671 is_dead_root = 1; 3672 spin_unlock(&fs_info->fs_roots_lock); 3673 3674 if (is_dead_root) { 3675 /* prevent this orphan from being found again */ 3676 key.offset = found_key.objectid - 1; 3677 continue; 3678 } 3679 3680 } 3681 3682 /* 3683 * If we have an inode with links, there are a couple of 3684 * possibilities: 3685 * 3686 * 1. We were halfway through creating fsverity metadata for the 3687 * file. In that case, the orphan item represents incomplete 3688 * fsverity metadata which must be cleaned up with 3689 * btrfs_drop_verity_items and deleting the orphan item. 3690 3691 * 2. Old kernels (before v3.12) used to create an 3692 * orphan item for truncate indicating that there were possibly 3693 * extent items past i_size that needed to be deleted. In v3.12, 3694 * truncate was changed to update i_size in sync with the extent 3695 * items, but the (useless) orphan item was still created. Since 3696 * v4.18, we don't create the orphan item for truncate at all. 3697 * 3698 * So, this item could mean that we need to do a truncate, but 3699 * only if this filesystem was last used on a pre-v3.12 kernel 3700 * and was not cleanly unmounted. The odds of that are quite 3701 * slim, and it's a pain to do the truncate now, so just delete 3702 * the orphan item. 3703 * 3704 * It's also possible that this orphan item was supposed to be 3705 * deleted but wasn't. The inode number may have been reused, 3706 * but either way, we can delete the orphan item. 3707 */ 3708 if (ret == -ENOENT || inode->i_nlink) { 3709 if (!ret) { 3710 ret = btrfs_drop_verity_items(BTRFS_I(inode)); 3711 iput(inode); 3712 if (ret) 3713 goto out; 3714 } 3715 trans = btrfs_start_transaction(root, 1); 3716 if (IS_ERR(trans)) { 3717 ret = PTR_ERR(trans); 3718 goto out; 3719 } 3720 btrfs_debug(fs_info, "auto deleting %Lu", 3721 found_key.objectid); 3722 ret = btrfs_del_orphan_item(trans, root, 3723 found_key.objectid); 3724 btrfs_end_transaction(trans); 3725 if (ret) 3726 goto out; 3727 continue; 3728 } 3729 3730 nr_unlink++; 3731 3732 /* this will do delete_inode and everything for us */ 3733 iput(inode); 3734 } 3735 /* release the path since we're done with it */ 3736 btrfs_release_path(path); 3737 3738 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3739 trans = btrfs_join_transaction(root); 3740 if (!IS_ERR(trans)) 3741 btrfs_end_transaction(trans); 3742 } 3743 3744 if (nr_unlink) 3745 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3746 3747 out: 3748 if (ret) 3749 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3750 btrfs_free_path(path); 3751 return ret; 3752 } 3753 3754 /* 3755 * very simple check to peek ahead in the leaf looking for xattrs. If we 3756 * don't find any xattrs, we know there can't be any acls. 3757 * 3758 * slot is the slot the inode is in, objectid is the objectid of the inode 3759 */ 3760 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3761 int slot, u64 objectid, 3762 int *first_xattr_slot) 3763 { 3764 u32 nritems = btrfs_header_nritems(leaf); 3765 struct btrfs_key found_key; 3766 static u64 xattr_access = 0; 3767 static u64 xattr_default = 0; 3768 int scanned = 0; 3769 3770 if (!xattr_access) { 3771 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3772 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3773 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3774 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3775 } 3776 3777 slot++; 3778 *first_xattr_slot = -1; 3779 while (slot < nritems) { 3780 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3781 3782 /* we found a different objectid, there must not be acls */ 3783 if (found_key.objectid != objectid) 3784 return 0; 3785 3786 /* we found an xattr, assume we've got an acl */ 3787 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3788 if (*first_xattr_slot == -1) 3789 *first_xattr_slot = slot; 3790 if (found_key.offset == xattr_access || 3791 found_key.offset == xattr_default) 3792 return 1; 3793 } 3794 3795 /* 3796 * we found a key greater than an xattr key, there can't 3797 * be any acls later on 3798 */ 3799 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3800 return 0; 3801 3802 slot++; 3803 scanned++; 3804 3805 /* 3806 * it goes inode, inode backrefs, xattrs, extents, 3807 * so if there are a ton of hard links to an inode there can 3808 * be a lot of backrefs. Don't waste time searching too hard, 3809 * this is just an optimization 3810 */ 3811 if (scanned >= 8) 3812 break; 3813 } 3814 /* we hit the end of the leaf before we found an xattr or 3815 * something larger than an xattr. We have to assume the inode 3816 * has acls 3817 */ 3818 if (*first_xattr_slot == -1) 3819 *first_xattr_slot = slot; 3820 return 1; 3821 } 3822 3823 /* 3824 * read an inode from the btree into the in-memory inode 3825 */ 3826 static int btrfs_read_locked_inode(struct inode *inode, 3827 struct btrfs_path *in_path) 3828 { 3829 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3830 struct btrfs_path *path = in_path; 3831 struct extent_buffer *leaf; 3832 struct btrfs_inode_item *inode_item; 3833 struct btrfs_root *root = BTRFS_I(inode)->root; 3834 struct btrfs_key location; 3835 unsigned long ptr; 3836 int maybe_acls; 3837 u32 rdev; 3838 int ret; 3839 bool filled = false; 3840 int first_xattr_slot; 3841 3842 ret = btrfs_fill_inode(inode, &rdev); 3843 if (!ret) 3844 filled = true; 3845 3846 if (!path) { 3847 path = btrfs_alloc_path(); 3848 if (!path) 3849 return -ENOMEM; 3850 } 3851 3852 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3853 3854 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3855 if (ret) { 3856 if (path != in_path) 3857 btrfs_free_path(path); 3858 return ret; 3859 } 3860 3861 leaf = path->nodes[0]; 3862 3863 if (filled) 3864 goto cache_index; 3865 3866 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3867 struct btrfs_inode_item); 3868 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3869 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3870 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3871 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3872 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3873 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 3874 round_up(i_size_read(inode), fs_info->sectorsize)); 3875 3876 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3877 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3878 3879 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3880 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3881 3882 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 3883 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 3884 3885 BTRFS_I(inode)->i_otime.tv_sec = 3886 btrfs_timespec_sec(leaf, &inode_item->otime); 3887 BTRFS_I(inode)->i_otime.tv_nsec = 3888 btrfs_timespec_nsec(leaf, &inode_item->otime); 3889 3890 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3891 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3892 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3893 3894 inode_set_iversion_queried(inode, 3895 btrfs_inode_sequence(leaf, inode_item)); 3896 inode->i_generation = BTRFS_I(inode)->generation; 3897 inode->i_rdev = 0; 3898 rdev = btrfs_inode_rdev(leaf, inode_item); 3899 3900 BTRFS_I(inode)->index_cnt = (u64)-1; 3901 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 3902 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 3903 3904 cache_index: 3905 /* 3906 * If we were modified in the current generation and evicted from memory 3907 * and then re-read we need to do a full sync since we don't have any 3908 * idea about which extents were modified before we were evicted from 3909 * cache. 3910 * 3911 * This is required for both inode re-read from disk and delayed inode 3912 * in the delayed_nodes xarray. 3913 */ 3914 if (BTRFS_I(inode)->last_trans == fs_info->generation) 3915 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3916 &BTRFS_I(inode)->runtime_flags); 3917 3918 /* 3919 * We don't persist the id of the transaction where an unlink operation 3920 * against the inode was last made. So here we assume the inode might 3921 * have been evicted, and therefore the exact value of last_unlink_trans 3922 * lost, and set it to last_trans to avoid metadata inconsistencies 3923 * between the inode and its parent if the inode is fsync'ed and the log 3924 * replayed. For example, in the scenario: 3925 * 3926 * touch mydir/foo 3927 * ln mydir/foo mydir/bar 3928 * sync 3929 * unlink mydir/bar 3930 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3931 * xfs_io -c fsync mydir/foo 3932 * <power failure> 3933 * mount fs, triggers fsync log replay 3934 * 3935 * We must make sure that when we fsync our inode foo we also log its 3936 * parent inode, otherwise after log replay the parent still has the 3937 * dentry with the "bar" name but our inode foo has a link count of 1 3938 * and doesn't have an inode ref with the name "bar" anymore. 3939 * 3940 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3941 * but it guarantees correctness at the expense of occasional full 3942 * transaction commits on fsync if our inode is a directory, or if our 3943 * inode is not a directory, logging its parent unnecessarily. 3944 */ 3945 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3946 3947 /* 3948 * Same logic as for last_unlink_trans. We don't persist the generation 3949 * of the last transaction where this inode was used for a reflink 3950 * operation, so after eviction and reloading the inode we must be 3951 * pessimistic and assume the last transaction that modified the inode. 3952 */ 3953 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 3954 3955 path->slots[0]++; 3956 if (inode->i_nlink != 1 || 3957 path->slots[0] >= btrfs_header_nritems(leaf)) 3958 goto cache_acl; 3959 3960 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3961 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 3962 goto cache_acl; 3963 3964 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3965 if (location.type == BTRFS_INODE_REF_KEY) { 3966 struct btrfs_inode_ref *ref; 3967 3968 ref = (struct btrfs_inode_ref *)ptr; 3969 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3970 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3971 struct btrfs_inode_extref *extref; 3972 3973 extref = (struct btrfs_inode_extref *)ptr; 3974 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3975 extref); 3976 } 3977 cache_acl: 3978 /* 3979 * try to precache a NULL acl entry for files that don't have 3980 * any xattrs or acls 3981 */ 3982 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3983 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 3984 if (first_xattr_slot != -1) { 3985 path->slots[0] = first_xattr_slot; 3986 ret = btrfs_load_inode_props(inode, path); 3987 if (ret) 3988 btrfs_err(fs_info, 3989 "error loading props for ino %llu (root %llu): %d", 3990 btrfs_ino(BTRFS_I(inode)), 3991 root->root_key.objectid, ret); 3992 } 3993 if (path != in_path) 3994 btrfs_free_path(path); 3995 3996 if (!maybe_acls) 3997 cache_no_acl(inode); 3998 3999 switch (inode->i_mode & S_IFMT) { 4000 case S_IFREG: 4001 inode->i_mapping->a_ops = &btrfs_aops; 4002 inode->i_fop = &btrfs_file_operations; 4003 inode->i_op = &btrfs_file_inode_operations; 4004 break; 4005 case S_IFDIR: 4006 inode->i_fop = &btrfs_dir_file_operations; 4007 inode->i_op = &btrfs_dir_inode_operations; 4008 break; 4009 case S_IFLNK: 4010 inode->i_op = &btrfs_symlink_inode_operations; 4011 inode_nohighmem(inode); 4012 inode->i_mapping->a_ops = &btrfs_aops; 4013 break; 4014 default: 4015 inode->i_op = &btrfs_special_inode_operations; 4016 init_special_inode(inode, inode->i_mode, rdev); 4017 break; 4018 } 4019 4020 btrfs_sync_inode_flags_to_i_flags(inode); 4021 return 0; 4022 } 4023 4024 /* 4025 * given a leaf and an inode, copy the inode fields into the leaf 4026 */ 4027 static void fill_inode_item(struct btrfs_trans_handle *trans, 4028 struct extent_buffer *leaf, 4029 struct btrfs_inode_item *item, 4030 struct inode *inode) 4031 { 4032 struct btrfs_map_token token; 4033 u64 flags; 4034 4035 btrfs_init_map_token(&token, leaf); 4036 4037 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 4038 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 4039 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 4040 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 4041 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 4042 4043 btrfs_set_token_timespec_sec(&token, &item->atime, 4044 inode->i_atime.tv_sec); 4045 btrfs_set_token_timespec_nsec(&token, &item->atime, 4046 inode->i_atime.tv_nsec); 4047 4048 btrfs_set_token_timespec_sec(&token, &item->mtime, 4049 inode->i_mtime.tv_sec); 4050 btrfs_set_token_timespec_nsec(&token, &item->mtime, 4051 inode->i_mtime.tv_nsec); 4052 4053 btrfs_set_token_timespec_sec(&token, &item->ctime, 4054 inode->i_ctime.tv_sec); 4055 btrfs_set_token_timespec_nsec(&token, &item->ctime, 4056 inode->i_ctime.tv_nsec); 4057 4058 btrfs_set_token_timespec_sec(&token, &item->otime, 4059 BTRFS_I(inode)->i_otime.tv_sec); 4060 btrfs_set_token_timespec_nsec(&token, &item->otime, 4061 BTRFS_I(inode)->i_otime.tv_nsec); 4062 4063 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 4064 btrfs_set_token_inode_generation(&token, item, 4065 BTRFS_I(inode)->generation); 4066 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 4067 btrfs_set_token_inode_transid(&token, item, trans->transid); 4068 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 4069 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 4070 BTRFS_I(inode)->ro_flags); 4071 btrfs_set_token_inode_flags(&token, item, flags); 4072 btrfs_set_token_inode_block_group(&token, item, 0); 4073 } 4074 4075 /* 4076 * copy everything in the in-memory inode into the btree. 4077 */ 4078 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 4079 struct btrfs_root *root, 4080 struct btrfs_inode *inode) 4081 { 4082 struct btrfs_inode_item *inode_item; 4083 struct btrfs_path *path; 4084 struct extent_buffer *leaf; 4085 int ret; 4086 4087 path = btrfs_alloc_path(); 4088 if (!path) 4089 return -ENOMEM; 4090 4091 ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1); 4092 if (ret) { 4093 if (ret > 0) 4094 ret = -ENOENT; 4095 goto failed; 4096 } 4097 4098 leaf = path->nodes[0]; 4099 inode_item = btrfs_item_ptr(leaf, path->slots[0], 4100 struct btrfs_inode_item); 4101 4102 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 4103 btrfs_mark_buffer_dirty(leaf); 4104 btrfs_set_inode_last_trans(trans, inode); 4105 ret = 0; 4106 failed: 4107 btrfs_free_path(path); 4108 return ret; 4109 } 4110 4111 /* 4112 * copy everything in the in-memory inode into the btree. 4113 */ 4114 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 4115 struct btrfs_root *root, 4116 struct btrfs_inode *inode) 4117 { 4118 struct btrfs_fs_info *fs_info = root->fs_info; 4119 int ret; 4120 4121 /* 4122 * If the inode is a free space inode, we can deadlock during commit 4123 * if we put it into the delayed code. 4124 * 4125 * The data relocation inode should also be directly updated 4126 * without delay 4127 */ 4128 if (!btrfs_is_free_space_inode(inode) 4129 && !btrfs_is_data_reloc_root(root) 4130 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4131 btrfs_update_root_times(trans, root); 4132 4133 ret = btrfs_delayed_update_inode(trans, root, inode); 4134 if (!ret) 4135 btrfs_set_inode_last_trans(trans, inode); 4136 return ret; 4137 } 4138 4139 return btrfs_update_inode_item(trans, root, inode); 4140 } 4141 4142 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4143 struct btrfs_root *root, struct btrfs_inode *inode) 4144 { 4145 int ret; 4146 4147 ret = btrfs_update_inode(trans, root, inode); 4148 if (ret == -ENOSPC) 4149 return btrfs_update_inode_item(trans, root, inode); 4150 return ret; 4151 } 4152 4153 /* 4154 * unlink helper that gets used here in inode.c and in the tree logging 4155 * recovery code. It remove a link in a directory with a given name, and 4156 * also drops the back refs in the inode to the directory 4157 */ 4158 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4159 struct btrfs_inode *dir, 4160 struct btrfs_inode *inode, 4161 const char *name, int name_len, 4162 struct btrfs_rename_ctx *rename_ctx) 4163 { 4164 struct btrfs_root *root = dir->root; 4165 struct btrfs_fs_info *fs_info = root->fs_info; 4166 struct btrfs_path *path; 4167 int ret = 0; 4168 struct btrfs_dir_item *di; 4169 u64 index; 4170 u64 ino = btrfs_ino(inode); 4171 u64 dir_ino = btrfs_ino(dir); 4172 4173 path = btrfs_alloc_path(); 4174 if (!path) { 4175 ret = -ENOMEM; 4176 goto out; 4177 } 4178 4179 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4180 name, name_len, -1); 4181 if (IS_ERR_OR_NULL(di)) { 4182 ret = di ? PTR_ERR(di) : -ENOENT; 4183 goto err; 4184 } 4185 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4186 if (ret) 4187 goto err; 4188 btrfs_release_path(path); 4189 4190 /* 4191 * If we don't have dir index, we have to get it by looking up 4192 * the inode ref, since we get the inode ref, remove it directly, 4193 * it is unnecessary to do delayed deletion. 4194 * 4195 * But if we have dir index, needn't search inode ref to get it. 4196 * Since the inode ref is close to the inode item, it is better 4197 * that we delay to delete it, and just do this deletion when 4198 * we update the inode item. 4199 */ 4200 if (inode->dir_index) { 4201 ret = btrfs_delayed_delete_inode_ref(inode); 4202 if (!ret) { 4203 index = inode->dir_index; 4204 goto skip_backref; 4205 } 4206 } 4207 4208 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, 4209 dir_ino, &index); 4210 if (ret) { 4211 btrfs_info(fs_info, 4212 "failed to delete reference to %.*s, inode %llu parent %llu", 4213 name_len, name, ino, dir_ino); 4214 btrfs_abort_transaction(trans, ret); 4215 goto err; 4216 } 4217 skip_backref: 4218 if (rename_ctx) 4219 rename_ctx->index = index; 4220 4221 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4222 if (ret) { 4223 btrfs_abort_transaction(trans, ret); 4224 goto err; 4225 } 4226 4227 /* 4228 * If we are in a rename context, we don't need to update anything in the 4229 * log. That will be done later during the rename by btrfs_log_new_name(). 4230 * Besides that, doing it here would only cause extra unncessary btree 4231 * operations on the log tree, increasing latency for applications. 4232 */ 4233 if (!rename_ctx) { 4234 btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, 4235 dir_ino); 4236 btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir, 4237 index); 4238 } 4239 4240 /* 4241 * If we have a pending delayed iput we could end up with the final iput 4242 * being run in btrfs-cleaner context. If we have enough of these built 4243 * up we can end up burning a lot of time in btrfs-cleaner without any 4244 * way to throttle the unlinks. Since we're currently holding a ref on 4245 * the inode we can run the delayed iput here without any issues as the 4246 * final iput won't be done until after we drop the ref we're currently 4247 * holding. 4248 */ 4249 btrfs_run_delayed_iput(fs_info, inode); 4250 err: 4251 btrfs_free_path(path); 4252 if (ret) 4253 goto out; 4254 4255 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); 4256 inode_inc_iversion(&inode->vfs_inode); 4257 inode_inc_iversion(&dir->vfs_inode); 4258 inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime = 4259 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode); 4260 ret = btrfs_update_inode(trans, root, dir); 4261 out: 4262 return ret; 4263 } 4264 4265 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4266 struct btrfs_inode *dir, struct btrfs_inode *inode, 4267 const char *name, int name_len) 4268 { 4269 int ret; 4270 ret = __btrfs_unlink_inode(trans, dir, inode, name, name_len, NULL); 4271 if (!ret) { 4272 drop_nlink(&inode->vfs_inode); 4273 ret = btrfs_update_inode(trans, inode->root, inode); 4274 } 4275 return ret; 4276 } 4277 4278 /* 4279 * helper to start transaction for unlink and rmdir. 4280 * 4281 * unlink and rmdir are special in btrfs, they do not always free space, so 4282 * if we cannot make our reservations the normal way try and see if there is 4283 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4284 * allow the unlink to occur. 4285 */ 4286 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) 4287 { 4288 struct btrfs_root *root = BTRFS_I(dir)->root; 4289 4290 /* 4291 * 1 for the possible orphan item 4292 * 1 for the dir item 4293 * 1 for the dir index 4294 * 1 for the inode ref 4295 * 1 for the inode 4296 * 1 for the parent inode 4297 */ 4298 return btrfs_start_transaction_fallback_global_rsv(root, 6); 4299 } 4300 4301 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4302 { 4303 struct btrfs_trans_handle *trans; 4304 struct inode *inode = d_inode(dentry); 4305 int ret; 4306 4307 trans = __unlink_start_trans(dir); 4308 if (IS_ERR(trans)) 4309 return PTR_ERR(trans); 4310 4311 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4312 0); 4313 4314 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), 4315 BTRFS_I(d_inode(dentry)), dentry->d_name.name, 4316 dentry->d_name.len); 4317 if (ret) 4318 goto out; 4319 4320 if (inode->i_nlink == 0) { 4321 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4322 if (ret) 4323 goto out; 4324 } 4325 4326 out: 4327 btrfs_end_transaction(trans); 4328 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4329 return ret; 4330 } 4331 4332 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4333 struct inode *dir, struct dentry *dentry) 4334 { 4335 struct btrfs_root *root = BTRFS_I(dir)->root; 4336 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4337 struct btrfs_path *path; 4338 struct extent_buffer *leaf; 4339 struct btrfs_dir_item *di; 4340 struct btrfs_key key; 4341 const char *name = dentry->d_name.name; 4342 int name_len = dentry->d_name.len; 4343 u64 index; 4344 int ret; 4345 u64 objectid; 4346 u64 dir_ino = btrfs_ino(BTRFS_I(dir)); 4347 4348 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4349 objectid = inode->root->root_key.objectid; 4350 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4351 objectid = inode->location.objectid; 4352 } else { 4353 WARN_ON(1); 4354 return -EINVAL; 4355 } 4356 4357 path = btrfs_alloc_path(); 4358 if (!path) 4359 return -ENOMEM; 4360 4361 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4362 name, name_len, -1); 4363 if (IS_ERR_OR_NULL(di)) { 4364 ret = di ? PTR_ERR(di) : -ENOENT; 4365 goto out; 4366 } 4367 4368 leaf = path->nodes[0]; 4369 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4370 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4371 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4372 if (ret) { 4373 btrfs_abort_transaction(trans, ret); 4374 goto out; 4375 } 4376 btrfs_release_path(path); 4377 4378 /* 4379 * This is a placeholder inode for a subvolume we didn't have a 4380 * reference to at the time of the snapshot creation. In the meantime 4381 * we could have renamed the real subvol link into our snapshot, so 4382 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4383 * Instead simply lookup the dir_index_item for this entry so we can 4384 * remove it. Otherwise we know we have a ref to the root and we can 4385 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4386 */ 4387 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4388 di = btrfs_search_dir_index_item(root, path, dir_ino, 4389 name, name_len); 4390 if (IS_ERR_OR_NULL(di)) { 4391 if (!di) 4392 ret = -ENOENT; 4393 else 4394 ret = PTR_ERR(di); 4395 btrfs_abort_transaction(trans, ret); 4396 goto out; 4397 } 4398 4399 leaf = path->nodes[0]; 4400 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4401 index = key.offset; 4402 btrfs_release_path(path); 4403 } else { 4404 ret = btrfs_del_root_ref(trans, objectid, 4405 root->root_key.objectid, dir_ino, 4406 &index, name, name_len); 4407 if (ret) { 4408 btrfs_abort_transaction(trans, ret); 4409 goto out; 4410 } 4411 } 4412 4413 ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index); 4414 if (ret) { 4415 btrfs_abort_transaction(trans, ret); 4416 goto out; 4417 } 4418 4419 btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); 4420 inode_inc_iversion(dir); 4421 dir->i_mtime = dir->i_ctime = current_time(dir); 4422 ret = btrfs_update_inode_fallback(trans, root, BTRFS_I(dir)); 4423 if (ret) 4424 btrfs_abort_transaction(trans, ret); 4425 out: 4426 btrfs_free_path(path); 4427 return ret; 4428 } 4429 4430 /* 4431 * Helper to check if the subvolume references other subvolumes or if it's 4432 * default. 4433 */ 4434 static noinline int may_destroy_subvol(struct btrfs_root *root) 4435 { 4436 struct btrfs_fs_info *fs_info = root->fs_info; 4437 struct btrfs_path *path; 4438 struct btrfs_dir_item *di; 4439 struct btrfs_key key; 4440 u64 dir_id; 4441 int ret; 4442 4443 path = btrfs_alloc_path(); 4444 if (!path) 4445 return -ENOMEM; 4446 4447 /* Make sure this root isn't set as the default subvol */ 4448 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4449 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4450 dir_id, "default", 7, 0); 4451 if (di && !IS_ERR(di)) { 4452 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4453 if (key.objectid == root->root_key.objectid) { 4454 ret = -EPERM; 4455 btrfs_err(fs_info, 4456 "deleting default subvolume %llu is not allowed", 4457 key.objectid); 4458 goto out; 4459 } 4460 btrfs_release_path(path); 4461 } 4462 4463 key.objectid = root->root_key.objectid; 4464 key.type = BTRFS_ROOT_REF_KEY; 4465 key.offset = (u64)-1; 4466 4467 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4468 if (ret < 0) 4469 goto out; 4470 BUG_ON(ret == 0); 4471 4472 ret = 0; 4473 if (path->slots[0] > 0) { 4474 path->slots[0]--; 4475 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4476 if (key.objectid == root->root_key.objectid && 4477 key.type == BTRFS_ROOT_REF_KEY) 4478 ret = -ENOTEMPTY; 4479 } 4480 out: 4481 btrfs_free_path(path); 4482 return ret; 4483 } 4484 4485 /* Delete all dentries for inodes belonging to the root */ 4486 static void btrfs_prune_dentries(struct btrfs_root *root) 4487 { 4488 struct btrfs_fs_info *fs_info = root->fs_info; 4489 struct rb_node *node; 4490 struct rb_node *prev; 4491 struct btrfs_inode *entry; 4492 struct inode *inode; 4493 u64 objectid = 0; 4494 4495 if (!BTRFS_FS_ERROR(fs_info)) 4496 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4497 4498 spin_lock(&root->inode_lock); 4499 again: 4500 node = root->inode_tree.rb_node; 4501 prev = NULL; 4502 while (node) { 4503 prev = node; 4504 entry = rb_entry(node, struct btrfs_inode, rb_node); 4505 4506 if (objectid < btrfs_ino(entry)) 4507 node = node->rb_left; 4508 else if (objectid > btrfs_ino(entry)) 4509 node = node->rb_right; 4510 else 4511 break; 4512 } 4513 if (!node) { 4514 while (prev) { 4515 entry = rb_entry(prev, struct btrfs_inode, rb_node); 4516 if (objectid <= btrfs_ino(entry)) { 4517 node = prev; 4518 break; 4519 } 4520 prev = rb_next(prev); 4521 } 4522 } 4523 while (node) { 4524 entry = rb_entry(node, struct btrfs_inode, rb_node); 4525 objectid = btrfs_ino(entry) + 1; 4526 inode = igrab(&entry->vfs_inode); 4527 if (inode) { 4528 spin_unlock(&root->inode_lock); 4529 if (atomic_read(&inode->i_count) > 1) 4530 d_prune_aliases(inode); 4531 /* 4532 * btrfs_drop_inode will have it removed from the inode 4533 * cache when its usage count hits zero. 4534 */ 4535 iput(inode); 4536 cond_resched(); 4537 spin_lock(&root->inode_lock); 4538 goto again; 4539 } 4540 4541 if (cond_resched_lock(&root->inode_lock)) 4542 goto again; 4543 4544 node = rb_next(node); 4545 } 4546 spin_unlock(&root->inode_lock); 4547 } 4548 4549 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) 4550 { 4551 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 4552 struct btrfs_root *root = BTRFS_I(dir)->root; 4553 struct inode *inode = d_inode(dentry); 4554 struct btrfs_root *dest = BTRFS_I(inode)->root; 4555 struct btrfs_trans_handle *trans; 4556 struct btrfs_block_rsv block_rsv; 4557 u64 root_flags; 4558 int ret; 4559 4560 /* 4561 * Don't allow to delete a subvolume with send in progress. This is 4562 * inside the inode lock so the error handling that has to drop the bit 4563 * again is not run concurrently. 4564 */ 4565 spin_lock(&dest->root_item_lock); 4566 if (dest->send_in_progress) { 4567 spin_unlock(&dest->root_item_lock); 4568 btrfs_warn(fs_info, 4569 "attempt to delete subvolume %llu during send", 4570 dest->root_key.objectid); 4571 return -EPERM; 4572 } 4573 if (atomic_read(&dest->nr_swapfiles)) { 4574 spin_unlock(&dest->root_item_lock); 4575 btrfs_warn(fs_info, 4576 "attempt to delete subvolume %llu with active swapfile", 4577 root->root_key.objectid); 4578 return -EPERM; 4579 } 4580 root_flags = btrfs_root_flags(&dest->root_item); 4581 btrfs_set_root_flags(&dest->root_item, 4582 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4583 spin_unlock(&dest->root_item_lock); 4584 4585 down_write(&fs_info->subvol_sem); 4586 4587 ret = may_destroy_subvol(dest); 4588 if (ret) 4589 goto out_up_write; 4590 4591 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4592 /* 4593 * One for dir inode, 4594 * two for dir entries, 4595 * two for root ref/backref. 4596 */ 4597 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4598 if (ret) 4599 goto out_up_write; 4600 4601 trans = btrfs_start_transaction(root, 0); 4602 if (IS_ERR(trans)) { 4603 ret = PTR_ERR(trans); 4604 goto out_release; 4605 } 4606 trans->block_rsv = &block_rsv; 4607 trans->bytes_reserved = block_rsv.size; 4608 4609 btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); 4610 4611 ret = btrfs_unlink_subvol(trans, dir, dentry); 4612 if (ret) { 4613 btrfs_abort_transaction(trans, ret); 4614 goto out_end_trans; 4615 } 4616 4617 ret = btrfs_record_root_in_trans(trans, dest); 4618 if (ret) { 4619 btrfs_abort_transaction(trans, ret); 4620 goto out_end_trans; 4621 } 4622 4623 memset(&dest->root_item.drop_progress, 0, 4624 sizeof(dest->root_item.drop_progress)); 4625 btrfs_set_root_drop_level(&dest->root_item, 0); 4626 btrfs_set_root_refs(&dest->root_item, 0); 4627 4628 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4629 ret = btrfs_insert_orphan_item(trans, 4630 fs_info->tree_root, 4631 dest->root_key.objectid); 4632 if (ret) { 4633 btrfs_abort_transaction(trans, ret); 4634 goto out_end_trans; 4635 } 4636 } 4637 4638 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4639 BTRFS_UUID_KEY_SUBVOL, 4640 dest->root_key.objectid); 4641 if (ret && ret != -ENOENT) { 4642 btrfs_abort_transaction(trans, ret); 4643 goto out_end_trans; 4644 } 4645 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4646 ret = btrfs_uuid_tree_remove(trans, 4647 dest->root_item.received_uuid, 4648 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4649 dest->root_key.objectid); 4650 if (ret && ret != -ENOENT) { 4651 btrfs_abort_transaction(trans, ret); 4652 goto out_end_trans; 4653 } 4654 } 4655 4656 free_anon_bdev(dest->anon_dev); 4657 dest->anon_dev = 0; 4658 out_end_trans: 4659 trans->block_rsv = NULL; 4660 trans->bytes_reserved = 0; 4661 ret = btrfs_end_transaction(trans); 4662 inode->i_flags |= S_DEAD; 4663 out_release: 4664 btrfs_subvolume_release_metadata(root, &block_rsv); 4665 out_up_write: 4666 up_write(&fs_info->subvol_sem); 4667 if (ret) { 4668 spin_lock(&dest->root_item_lock); 4669 root_flags = btrfs_root_flags(&dest->root_item); 4670 btrfs_set_root_flags(&dest->root_item, 4671 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4672 spin_unlock(&dest->root_item_lock); 4673 } else { 4674 d_invalidate(dentry); 4675 btrfs_prune_dentries(dest); 4676 ASSERT(dest->send_in_progress == 0); 4677 } 4678 4679 return ret; 4680 } 4681 4682 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4683 { 4684 struct inode *inode = d_inode(dentry); 4685 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 4686 int err = 0; 4687 struct btrfs_trans_handle *trans; 4688 u64 last_unlink_trans; 4689 4690 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4691 return -ENOTEMPTY; 4692 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { 4693 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4694 btrfs_err(fs_info, 4695 "extent tree v2 doesn't support snapshot deletion yet"); 4696 return -EOPNOTSUPP; 4697 } 4698 return btrfs_delete_subvolume(dir, dentry); 4699 } 4700 4701 trans = __unlink_start_trans(dir); 4702 if (IS_ERR(trans)) 4703 return PTR_ERR(trans); 4704 4705 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4706 err = btrfs_unlink_subvol(trans, dir, dentry); 4707 goto out; 4708 } 4709 4710 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4711 if (err) 4712 goto out; 4713 4714 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4715 4716 /* now the directory is empty */ 4717 err = btrfs_unlink_inode(trans, BTRFS_I(dir), 4718 BTRFS_I(d_inode(dentry)), dentry->d_name.name, 4719 dentry->d_name.len); 4720 if (!err) { 4721 btrfs_i_size_write(BTRFS_I(inode), 0); 4722 /* 4723 * Propagate the last_unlink_trans value of the deleted dir to 4724 * its parent directory. This is to prevent an unrecoverable 4725 * log tree in the case we do something like this: 4726 * 1) create dir foo 4727 * 2) create snapshot under dir foo 4728 * 3) delete the snapshot 4729 * 4) rmdir foo 4730 * 5) mkdir foo 4731 * 6) fsync foo or some file inside foo 4732 */ 4733 if (last_unlink_trans >= trans->transid) 4734 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4735 } 4736 out: 4737 btrfs_end_transaction(trans); 4738 btrfs_btree_balance_dirty(fs_info); 4739 4740 return err; 4741 } 4742 4743 /* 4744 * btrfs_truncate_block - read, zero a chunk and write a block 4745 * @inode - inode that we're zeroing 4746 * @from - the offset to start zeroing 4747 * @len - the length to zero, 0 to zero the entire range respective to the 4748 * offset 4749 * @front - zero up to the offset instead of from the offset on 4750 * 4751 * This will find the block for the "from" offset and cow the block and zero the 4752 * part we want to zero. This is used with truncate and hole punching. 4753 */ 4754 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 4755 int front) 4756 { 4757 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4758 struct address_space *mapping = inode->vfs_inode.i_mapping; 4759 struct extent_io_tree *io_tree = &inode->io_tree; 4760 struct btrfs_ordered_extent *ordered; 4761 struct extent_state *cached_state = NULL; 4762 struct extent_changeset *data_reserved = NULL; 4763 bool only_release_metadata = false; 4764 u32 blocksize = fs_info->sectorsize; 4765 pgoff_t index = from >> PAGE_SHIFT; 4766 unsigned offset = from & (blocksize - 1); 4767 struct page *page; 4768 gfp_t mask = btrfs_alloc_write_mask(mapping); 4769 size_t write_bytes = blocksize; 4770 int ret = 0; 4771 u64 block_start; 4772 u64 block_end; 4773 4774 if (IS_ALIGNED(offset, blocksize) && 4775 (!len || IS_ALIGNED(len, blocksize))) 4776 goto out; 4777 4778 block_start = round_down(from, blocksize); 4779 block_end = block_start + blocksize - 1; 4780 4781 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 4782 blocksize); 4783 if (ret < 0) { 4784 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes) > 0) { 4785 /* For nocow case, no need to reserve data space */ 4786 only_release_metadata = true; 4787 } else { 4788 goto out; 4789 } 4790 } 4791 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 4792 if (ret < 0) { 4793 if (!only_release_metadata) 4794 btrfs_free_reserved_data_space(inode, data_reserved, 4795 block_start, blocksize); 4796 goto out; 4797 } 4798 again: 4799 page = find_or_create_page(mapping, index, mask); 4800 if (!page) { 4801 btrfs_delalloc_release_space(inode, data_reserved, block_start, 4802 blocksize, true); 4803 btrfs_delalloc_release_extents(inode, blocksize); 4804 ret = -ENOMEM; 4805 goto out; 4806 } 4807 ret = set_page_extent_mapped(page); 4808 if (ret < 0) 4809 goto out_unlock; 4810 4811 if (!PageUptodate(page)) { 4812 ret = btrfs_read_folio(NULL, page_folio(page)); 4813 lock_page(page); 4814 if (page->mapping != mapping) { 4815 unlock_page(page); 4816 put_page(page); 4817 goto again; 4818 } 4819 if (!PageUptodate(page)) { 4820 ret = -EIO; 4821 goto out_unlock; 4822 } 4823 } 4824 wait_on_page_writeback(page); 4825 4826 lock_extent_bits(io_tree, block_start, block_end, &cached_state); 4827 4828 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4829 if (ordered) { 4830 unlock_extent_cached(io_tree, block_start, block_end, 4831 &cached_state); 4832 unlock_page(page); 4833 put_page(page); 4834 btrfs_start_ordered_extent(ordered, 1); 4835 btrfs_put_ordered_extent(ordered); 4836 goto again; 4837 } 4838 4839 clear_extent_bit(&inode->io_tree, block_start, block_end, 4840 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4841 0, 0, &cached_state); 4842 4843 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 4844 &cached_state); 4845 if (ret) { 4846 unlock_extent_cached(io_tree, block_start, block_end, 4847 &cached_state); 4848 goto out_unlock; 4849 } 4850 4851 if (offset != blocksize) { 4852 if (!len) 4853 len = blocksize - offset; 4854 if (front) 4855 memzero_page(page, (block_start - page_offset(page)), 4856 offset); 4857 else 4858 memzero_page(page, (block_start - page_offset(page)) + offset, 4859 len); 4860 flush_dcache_page(page); 4861 } 4862 btrfs_page_clear_checked(fs_info, page, block_start, 4863 block_end + 1 - block_start); 4864 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); 4865 unlock_extent_cached(io_tree, block_start, block_end, &cached_state); 4866 4867 if (only_release_metadata) 4868 set_extent_bit(&inode->io_tree, block_start, block_end, 4869 EXTENT_NORESERVE, 0, NULL, NULL, GFP_NOFS, NULL); 4870 4871 out_unlock: 4872 if (ret) { 4873 if (only_release_metadata) 4874 btrfs_delalloc_release_metadata(inode, blocksize, true); 4875 else 4876 btrfs_delalloc_release_space(inode, data_reserved, 4877 block_start, blocksize, true); 4878 } 4879 btrfs_delalloc_release_extents(inode, blocksize); 4880 unlock_page(page); 4881 put_page(page); 4882 out: 4883 if (only_release_metadata) 4884 btrfs_check_nocow_unlock(inode); 4885 extent_changeset_free(data_reserved); 4886 return ret; 4887 } 4888 4889 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode, 4890 u64 offset, u64 len) 4891 { 4892 struct btrfs_fs_info *fs_info = root->fs_info; 4893 struct btrfs_trans_handle *trans; 4894 struct btrfs_drop_extents_args drop_args = { 0 }; 4895 int ret; 4896 4897 /* 4898 * If NO_HOLES is enabled, we don't need to do anything. 4899 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 4900 * or btrfs_update_inode() will be called, which guarantee that the next 4901 * fsync will know this inode was changed and needs to be logged. 4902 */ 4903 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 4904 return 0; 4905 4906 /* 4907 * 1 - for the one we're dropping 4908 * 1 - for the one we're adding 4909 * 1 - for updating the inode. 4910 */ 4911 trans = btrfs_start_transaction(root, 3); 4912 if (IS_ERR(trans)) 4913 return PTR_ERR(trans); 4914 4915 drop_args.start = offset; 4916 drop_args.end = offset + len; 4917 drop_args.drop_cache = true; 4918 4919 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 4920 if (ret) { 4921 btrfs_abort_transaction(trans, ret); 4922 btrfs_end_transaction(trans); 4923 return ret; 4924 } 4925 4926 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), 4927 offset, 0, 0, len, 0, len, 0, 0, 0); 4928 if (ret) { 4929 btrfs_abort_transaction(trans, ret); 4930 } else { 4931 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 4932 btrfs_update_inode(trans, root, inode); 4933 } 4934 btrfs_end_transaction(trans); 4935 return ret; 4936 } 4937 4938 /* 4939 * This function puts in dummy file extents for the area we're creating a hole 4940 * for. So if we are truncating this file to a larger size we need to insert 4941 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4942 * the range between oldsize and size 4943 */ 4944 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 4945 { 4946 struct btrfs_root *root = inode->root; 4947 struct btrfs_fs_info *fs_info = root->fs_info; 4948 struct extent_io_tree *io_tree = &inode->io_tree; 4949 struct extent_map *em = NULL; 4950 struct extent_state *cached_state = NULL; 4951 struct extent_map_tree *em_tree = &inode->extent_tree; 4952 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 4953 u64 block_end = ALIGN(size, fs_info->sectorsize); 4954 u64 last_byte; 4955 u64 cur_offset; 4956 u64 hole_size; 4957 int err = 0; 4958 4959 /* 4960 * If our size started in the middle of a block we need to zero out the 4961 * rest of the block before we expand the i_size, otherwise we could 4962 * expose stale data. 4963 */ 4964 err = btrfs_truncate_block(inode, oldsize, 0, 0); 4965 if (err) 4966 return err; 4967 4968 if (size <= hole_start) 4969 return 0; 4970 4971 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 4972 &cached_state); 4973 cur_offset = hole_start; 4974 while (1) { 4975 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 4976 block_end - cur_offset); 4977 if (IS_ERR(em)) { 4978 err = PTR_ERR(em); 4979 em = NULL; 4980 break; 4981 } 4982 last_byte = min(extent_map_end(em), block_end); 4983 last_byte = ALIGN(last_byte, fs_info->sectorsize); 4984 hole_size = last_byte - cur_offset; 4985 4986 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 4987 struct extent_map *hole_em; 4988 4989 err = maybe_insert_hole(root, inode, cur_offset, 4990 hole_size); 4991 if (err) 4992 break; 4993 4994 err = btrfs_inode_set_file_extent_range(inode, 4995 cur_offset, hole_size); 4996 if (err) 4997 break; 4998 4999 btrfs_drop_extent_cache(inode, cur_offset, 5000 cur_offset + hole_size - 1, 0); 5001 hole_em = alloc_extent_map(); 5002 if (!hole_em) { 5003 btrfs_set_inode_full_sync(inode); 5004 goto next; 5005 } 5006 hole_em->start = cur_offset; 5007 hole_em->len = hole_size; 5008 hole_em->orig_start = cur_offset; 5009 5010 hole_em->block_start = EXTENT_MAP_HOLE; 5011 hole_em->block_len = 0; 5012 hole_em->orig_block_len = 0; 5013 hole_em->ram_bytes = hole_size; 5014 hole_em->compress_type = BTRFS_COMPRESS_NONE; 5015 hole_em->generation = fs_info->generation; 5016 5017 while (1) { 5018 write_lock(&em_tree->lock); 5019 err = add_extent_mapping(em_tree, hole_em, 1); 5020 write_unlock(&em_tree->lock); 5021 if (err != -EEXIST) 5022 break; 5023 btrfs_drop_extent_cache(inode, cur_offset, 5024 cur_offset + 5025 hole_size - 1, 0); 5026 } 5027 free_extent_map(hole_em); 5028 } else { 5029 err = btrfs_inode_set_file_extent_range(inode, 5030 cur_offset, hole_size); 5031 if (err) 5032 break; 5033 } 5034 next: 5035 free_extent_map(em); 5036 em = NULL; 5037 cur_offset = last_byte; 5038 if (cur_offset >= block_end) 5039 break; 5040 } 5041 free_extent_map(em); 5042 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state); 5043 return err; 5044 } 5045 5046 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 5047 { 5048 struct btrfs_root *root = BTRFS_I(inode)->root; 5049 struct btrfs_trans_handle *trans; 5050 loff_t oldsize = i_size_read(inode); 5051 loff_t newsize = attr->ia_size; 5052 int mask = attr->ia_valid; 5053 int ret; 5054 5055 /* 5056 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 5057 * special case where we need to update the times despite not having 5058 * these flags set. For all other operations the VFS set these flags 5059 * explicitly if it wants a timestamp update. 5060 */ 5061 if (newsize != oldsize) { 5062 inode_inc_iversion(inode); 5063 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) 5064 inode->i_ctime = inode->i_mtime = 5065 current_time(inode); 5066 } 5067 5068 if (newsize > oldsize) { 5069 /* 5070 * Don't do an expanding truncate while snapshotting is ongoing. 5071 * This is to ensure the snapshot captures a fully consistent 5072 * state of this file - if the snapshot captures this expanding 5073 * truncation, it must capture all writes that happened before 5074 * this truncation. 5075 */ 5076 btrfs_drew_write_lock(&root->snapshot_lock); 5077 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 5078 if (ret) { 5079 btrfs_drew_write_unlock(&root->snapshot_lock); 5080 return ret; 5081 } 5082 5083 trans = btrfs_start_transaction(root, 1); 5084 if (IS_ERR(trans)) { 5085 btrfs_drew_write_unlock(&root->snapshot_lock); 5086 return PTR_ERR(trans); 5087 } 5088 5089 i_size_write(inode, newsize); 5090 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 5091 pagecache_isize_extended(inode, oldsize, newsize); 5092 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 5093 btrfs_drew_write_unlock(&root->snapshot_lock); 5094 btrfs_end_transaction(trans); 5095 } else { 5096 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5097 5098 if (btrfs_is_zoned(fs_info)) { 5099 ret = btrfs_wait_ordered_range(inode, 5100 ALIGN(newsize, fs_info->sectorsize), 5101 (u64)-1); 5102 if (ret) 5103 return ret; 5104 } 5105 5106 /* 5107 * We're truncating a file that used to have good data down to 5108 * zero. Make sure any new writes to the file get on disk 5109 * on close. 5110 */ 5111 if (newsize == 0) 5112 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5113 &BTRFS_I(inode)->runtime_flags); 5114 5115 truncate_setsize(inode, newsize); 5116 5117 inode_dio_wait(inode); 5118 5119 ret = btrfs_truncate(inode, newsize == oldsize); 5120 if (ret && inode->i_nlink) { 5121 int err; 5122 5123 /* 5124 * Truncate failed, so fix up the in-memory size. We 5125 * adjusted disk_i_size down as we removed extents, so 5126 * wait for disk_i_size to be stable and then update the 5127 * in-memory size to match. 5128 */ 5129 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); 5130 if (err) 5131 return err; 5132 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5133 } 5134 } 5135 5136 return ret; 5137 } 5138 5139 static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 5140 struct iattr *attr) 5141 { 5142 struct inode *inode = d_inode(dentry); 5143 struct btrfs_root *root = BTRFS_I(inode)->root; 5144 int err; 5145 5146 if (btrfs_root_readonly(root)) 5147 return -EROFS; 5148 5149 err = setattr_prepare(mnt_userns, dentry, attr); 5150 if (err) 5151 return err; 5152 5153 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5154 err = btrfs_setsize(inode, attr); 5155 if (err) 5156 return err; 5157 } 5158 5159 if (attr->ia_valid) { 5160 setattr_copy(mnt_userns, inode, attr); 5161 inode_inc_iversion(inode); 5162 err = btrfs_dirty_inode(inode); 5163 5164 if (!err && attr->ia_valid & ATTR_MODE) 5165 err = posix_acl_chmod(mnt_userns, inode, inode->i_mode); 5166 } 5167 5168 return err; 5169 } 5170 5171 /* 5172 * While truncating the inode pages during eviction, we get the VFS 5173 * calling btrfs_invalidate_folio() against each folio of the inode. This 5174 * is slow because the calls to btrfs_invalidate_folio() result in a 5175 * huge amount of calls to lock_extent_bits() and clear_extent_bit(), 5176 * which keep merging and splitting extent_state structures over and over, 5177 * wasting lots of time. 5178 * 5179 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5180 * skip all those expensive operations on a per folio basis and do only 5181 * the ordered io finishing, while we release here the extent_map and 5182 * extent_state structures, without the excessive merging and splitting. 5183 */ 5184 static void evict_inode_truncate_pages(struct inode *inode) 5185 { 5186 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5187 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree; 5188 struct rb_node *node; 5189 5190 ASSERT(inode->i_state & I_FREEING); 5191 truncate_inode_pages_final(&inode->i_data); 5192 5193 write_lock(&map_tree->lock); 5194 while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) { 5195 struct extent_map *em; 5196 5197 node = rb_first_cached(&map_tree->map); 5198 em = rb_entry(node, struct extent_map, rb_node); 5199 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 5200 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 5201 remove_extent_mapping(map_tree, em); 5202 free_extent_map(em); 5203 if (need_resched()) { 5204 write_unlock(&map_tree->lock); 5205 cond_resched(); 5206 write_lock(&map_tree->lock); 5207 } 5208 } 5209 write_unlock(&map_tree->lock); 5210 5211 /* 5212 * Keep looping until we have no more ranges in the io tree. 5213 * We can have ongoing bios started by readahead that have 5214 * their endio callback (extent_io.c:end_bio_extent_readpage) 5215 * still in progress (unlocked the pages in the bio but did not yet 5216 * unlocked the ranges in the io tree). Therefore this means some 5217 * ranges can still be locked and eviction started because before 5218 * submitting those bios, which are executed by a separate task (work 5219 * queue kthread), inode references (inode->i_count) were not taken 5220 * (which would be dropped in the end io callback of each bio). 5221 * Therefore here we effectively end up waiting for those bios and 5222 * anyone else holding locked ranges without having bumped the inode's 5223 * reference count - if we don't do it, when they access the inode's 5224 * io_tree to unlock a range it may be too late, leading to an 5225 * use-after-free issue. 5226 */ 5227 spin_lock(&io_tree->lock); 5228 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5229 struct extent_state *state; 5230 struct extent_state *cached_state = NULL; 5231 u64 start; 5232 u64 end; 5233 unsigned state_flags; 5234 5235 node = rb_first(&io_tree->state); 5236 state = rb_entry(node, struct extent_state, rb_node); 5237 start = state->start; 5238 end = state->end; 5239 state_flags = state->state; 5240 spin_unlock(&io_tree->lock); 5241 5242 lock_extent_bits(io_tree, start, end, &cached_state); 5243 5244 /* 5245 * If still has DELALLOC flag, the extent didn't reach disk, 5246 * and its reserved space won't be freed by delayed_ref. 5247 * So we need to free its reserved space here. 5248 * (Refer to comment in btrfs_invalidate_folio, case 2) 5249 * 5250 * Note, end is the bytenr of last byte, so we need + 1 here. 5251 */ 5252 if (state_flags & EXTENT_DELALLOC) 5253 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5254 end - start + 1); 5255 5256 clear_extent_bit(io_tree, start, end, 5257 EXTENT_LOCKED | EXTENT_DELALLOC | 5258 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, 5259 &cached_state); 5260 5261 cond_resched(); 5262 spin_lock(&io_tree->lock); 5263 } 5264 spin_unlock(&io_tree->lock); 5265 } 5266 5267 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5268 struct btrfs_block_rsv *rsv) 5269 { 5270 struct btrfs_fs_info *fs_info = root->fs_info; 5271 struct btrfs_trans_handle *trans; 5272 u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1); 5273 int ret; 5274 5275 /* 5276 * Eviction should be taking place at some place safe because of our 5277 * delayed iputs. However the normal flushing code will run delayed 5278 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5279 * 5280 * We reserve the delayed_refs_extra here again because we can't use 5281 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5282 * above. We reserve our extra bit here because we generate a ton of 5283 * delayed refs activity by truncating. 5284 * 5285 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5286 * if we fail to make this reservation we can re-try without the 5287 * delayed_refs_extra so we can make some forward progress. 5288 */ 5289 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5290 BTRFS_RESERVE_FLUSH_EVICT); 5291 if (ret) { 5292 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5293 BTRFS_RESERVE_FLUSH_EVICT); 5294 if (ret) { 5295 btrfs_warn(fs_info, 5296 "could not allocate space for delete; will truncate on mount"); 5297 return ERR_PTR(-ENOSPC); 5298 } 5299 delayed_refs_extra = 0; 5300 } 5301 5302 trans = btrfs_join_transaction(root); 5303 if (IS_ERR(trans)) 5304 return trans; 5305 5306 if (delayed_refs_extra) { 5307 trans->block_rsv = &fs_info->trans_block_rsv; 5308 trans->bytes_reserved = delayed_refs_extra; 5309 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5310 delayed_refs_extra, 1); 5311 } 5312 return trans; 5313 } 5314 5315 void btrfs_evict_inode(struct inode *inode) 5316 { 5317 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5318 struct btrfs_trans_handle *trans; 5319 struct btrfs_root *root = BTRFS_I(inode)->root; 5320 struct btrfs_block_rsv *rsv; 5321 int ret; 5322 5323 trace_btrfs_inode_evict(inode); 5324 5325 if (!root) { 5326 fsverity_cleanup_inode(inode); 5327 clear_inode(inode); 5328 return; 5329 } 5330 5331 evict_inode_truncate_pages(inode); 5332 5333 if (inode->i_nlink && 5334 ((btrfs_root_refs(&root->root_item) != 0 && 5335 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5336 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5337 goto no_delete; 5338 5339 if (is_bad_inode(inode)) 5340 goto no_delete; 5341 5342 btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); 5343 5344 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5345 goto no_delete; 5346 5347 if (inode->i_nlink > 0) { 5348 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5349 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5350 goto no_delete; 5351 } 5352 5353 /* 5354 * This makes sure the inode item in tree is uptodate and the space for 5355 * the inode update is released. 5356 */ 5357 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5358 if (ret) 5359 goto no_delete; 5360 5361 /* 5362 * This drops any pending insert or delete operations we have for this 5363 * inode. We could have a delayed dir index deletion queued up, but 5364 * we're removing the inode completely so that'll be taken care of in 5365 * the truncate. 5366 */ 5367 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5368 5369 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5370 if (!rsv) 5371 goto no_delete; 5372 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5373 rsv->failfast = 1; 5374 5375 btrfs_i_size_write(BTRFS_I(inode), 0); 5376 5377 while (1) { 5378 struct btrfs_truncate_control control = { 5379 .inode = BTRFS_I(inode), 5380 .ino = btrfs_ino(BTRFS_I(inode)), 5381 .new_size = 0, 5382 .min_type = 0, 5383 }; 5384 5385 trans = evict_refill_and_join(root, rsv); 5386 if (IS_ERR(trans)) 5387 goto free_rsv; 5388 5389 trans->block_rsv = rsv; 5390 5391 ret = btrfs_truncate_inode_items(trans, root, &control); 5392 trans->block_rsv = &fs_info->trans_block_rsv; 5393 btrfs_end_transaction(trans); 5394 btrfs_btree_balance_dirty(fs_info); 5395 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5396 goto free_rsv; 5397 else if (!ret) 5398 break; 5399 } 5400 5401 /* 5402 * Errors here aren't a big deal, it just means we leave orphan items in 5403 * the tree. They will be cleaned up on the next mount. If the inode 5404 * number gets reused, cleanup deletes the orphan item without doing 5405 * anything, and unlink reuses the existing orphan item. 5406 * 5407 * If it turns out that we are dropping too many of these, we might want 5408 * to add a mechanism for retrying these after a commit. 5409 */ 5410 trans = evict_refill_and_join(root, rsv); 5411 if (!IS_ERR(trans)) { 5412 trans->block_rsv = rsv; 5413 btrfs_orphan_del(trans, BTRFS_I(inode)); 5414 trans->block_rsv = &fs_info->trans_block_rsv; 5415 btrfs_end_transaction(trans); 5416 } 5417 5418 free_rsv: 5419 btrfs_free_block_rsv(fs_info, rsv); 5420 no_delete: 5421 /* 5422 * If we didn't successfully delete, the orphan item will still be in 5423 * the tree and we'll retry on the next mount. Again, we might also want 5424 * to retry these periodically in the future. 5425 */ 5426 btrfs_remove_delayed_node(BTRFS_I(inode)); 5427 fsverity_cleanup_inode(inode); 5428 clear_inode(inode); 5429 } 5430 5431 /* 5432 * Return the key found in the dir entry in the location pointer, fill @type 5433 * with BTRFS_FT_*, and return 0. 5434 * 5435 * If no dir entries were found, returns -ENOENT. 5436 * If found a corrupted location in dir entry, returns -EUCLEAN. 5437 */ 5438 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, 5439 struct btrfs_key *location, u8 *type) 5440 { 5441 const char *name = dentry->d_name.name; 5442 int namelen = dentry->d_name.len; 5443 struct btrfs_dir_item *di; 5444 struct btrfs_path *path; 5445 struct btrfs_root *root = BTRFS_I(dir)->root; 5446 int ret = 0; 5447 5448 path = btrfs_alloc_path(); 5449 if (!path) 5450 return -ENOMEM; 5451 5452 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)), 5453 name, namelen, 0); 5454 if (IS_ERR_OR_NULL(di)) { 5455 ret = di ? PTR_ERR(di) : -ENOENT; 5456 goto out; 5457 } 5458 5459 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5460 if (location->type != BTRFS_INODE_ITEM_KEY && 5461 location->type != BTRFS_ROOT_ITEM_KEY) { 5462 ret = -EUCLEAN; 5463 btrfs_warn(root->fs_info, 5464 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5465 __func__, name, btrfs_ino(BTRFS_I(dir)), 5466 location->objectid, location->type, location->offset); 5467 } 5468 if (!ret) 5469 *type = btrfs_dir_type(path->nodes[0], di); 5470 out: 5471 btrfs_free_path(path); 5472 return ret; 5473 } 5474 5475 /* 5476 * when we hit a tree root in a directory, the btrfs part of the inode 5477 * needs to be changed to reflect the root directory of the tree root. This 5478 * is kind of like crossing a mount point. 5479 */ 5480 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5481 struct inode *dir, 5482 struct dentry *dentry, 5483 struct btrfs_key *location, 5484 struct btrfs_root **sub_root) 5485 { 5486 struct btrfs_path *path; 5487 struct btrfs_root *new_root; 5488 struct btrfs_root_ref *ref; 5489 struct extent_buffer *leaf; 5490 struct btrfs_key key; 5491 int ret; 5492 int err = 0; 5493 5494 path = btrfs_alloc_path(); 5495 if (!path) { 5496 err = -ENOMEM; 5497 goto out; 5498 } 5499 5500 err = -ENOENT; 5501 key.objectid = BTRFS_I(dir)->root->root_key.objectid; 5502 key.type = BTRFS_ROOT_REF_KEY; 5503 key.offset = location->objectid; 5504 5505 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5506 if (ret) { 5507 if (ret < 0) 5508 err = ret; 5509 goto out; 5510 } 5511 5512 leaf = path->nodes[0]; 5513 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5514 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) || 5515 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 5516 goto out; 5517 5518 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, 5519 (unsigned long)(ref + 1), 5520 dentry->d_name.len); 5521 if (ret) 5522 goto out; 5523 5524 btrfs_release_path(path); 5525 5526 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5527 if (IS_ERR(new_root)) { 5528 err = PTR_ERR(new_root); 5529 goto out; 5530 } 5531 5532 *sub_root = new_root; 5533 location->objectid = btrfs_root_dirid(&new_root->root_item); 5534 location->type = BTRFS_INODE_ITEM_KEY; 5535 location->offset = 0; 5536 err = 0; 5537 out: 5538 btrfs_free_path(path); 5539 return err; 5540 } 5541 5542 static void inode_tree_add(struct inode *inode) 5543 { 5544 struct btrfs_root *root = BTRFS_I(inode)->root; 5545 struct btrfs_inode *entry; 5546 struct rb_node **p; 5547 struct rb_node *parent; 5548 struct rb_node *new = &BTRFS_I(inode)->rb_node; 5549 u64 ino = btrfs_ino(BTRFS_I(inode)); 5550 5551 if (inode_unhashed(inode)) 5552 return; 5553 parent = NULL; 5554 spin_lock(&root->inode_lock); 5555 p = &root->inode_tree.rb_node; 5556 while (*p) { 5557 parent = *p; 5558 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5559 5560 if (ino < btrfs_ino(entry)) 5561 p = &parent->rb_left; 5562 else if (ino > btrfs_ino(entry)) 5563 p = &parent->rb_right; 5564 else { 5565 WARN_ON(!(entry->vfs_inode.i_state & 5566 (I_WILL_FREE | I_FREEING))); 5567 rb_replace_node(parent, new, &root->inode_tree); 5568 RB_CLEAR_NODE(parent); 5569 spin_unlock(&root->inode_lock); 5570 return; 5571 } 5572 } 5573 rb_link_node(new, parent, p); 5574 rb_insert_color(new, &root->inode_tree); 5575 spin_unlock(&root->inode_lock); 5576 } 5577 5578 static void inode_tree_del(struct btrfs_inode *inode) 5579 { 5580 struct btrfs_root *root = inode->root; 5581 int empty = 0; 5582 5583 spin_lock(&root->inode_lock); 5584 if (!RB_EMPTY_NODE(&inode->rb_node)) { 5585 rb_erase(&inode->rb_node, &root->inode_tree); 5586 RB_CLEAR_NODE(&inode->rb_node); 5587 empty = RB_EMPTY_ROOT(&root->inode_tree); 5588 } 5589 spin_unlock(&root->inode_lock); 5590 5591 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5592 spin_lock(&root->inode_lock); 5593 empty = RB_EMPTY_ROOT(&root->inode_tree); 5594 spin_unlock(&root->inode_lock); 5595 if (empty) 5596 btrfs_add_dead_root(root); 5597 } 5598 } 5599 5600 5601 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5602 { 5603 struct btrfs_iget_args *args = p; 5604 5605 inode->i_ino = args->ino; 5606 BTRFS_I(inode)->location.objectid = args->ino; 5607 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 5608 BTRFS_I(inode)->location.offset = 0; 5609 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5610 BUG_ON(args->root && !BTRFS_I(inode)->root); 5611 return 0; 5612 } 5613 5614 static int btrfs_find_actor(struct inode *inode, void *opaque) 5615 { 5616 struct btrfs_iget_args *args = opaque; 5617 5618 return args->ino == BTRFS_I(inode)->location.objectid && 5619 args->root == BTRFS_I(inode)->root; 5620 } 5621 5622 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, 5623 struct btrfs_root *root) 5624 { 5625 struct inode *inode; 5626 struct btrfs_iget_args args; 5627 unsigned long hashval = btrfs_inode_hash(ino, root); 5628 5629 args.ino = ino; 5630 args.root = root; 5631 5632 inode = iget5_locked(s, hashval, btrfs_find_actor, 5633 btrfs_init_locked_inode, 5634 (void *)&args); 5635 return inode; 5636 } 5637 5638 /* 5639 * Get an inode object given its inode number and corresponding root. 5640 * Path can be preallocated to prevent recursing back to iget through 5641 * allocator. NULL is also valid but may require an additional allocation 5642 * later. 5643 */ 5644 struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 5645 struct btrfs_root *root, struct btrfs_path *path) 5646 { 5647 struct inode *inode; 5648 5649 inode = btrfs_iget_locked(s, ino, root); 5650 if (!inode) 5651 return ERR_PTR(-ENOMEM); 5652 5653 if (inode->i_state & I_NEW) { 5654 int ret; 5655 5656 ret = btrfs_read_locked_inode(inode, path); 5657 if (!ret) { 5658 inode_tree_add(inode); 5659 unlock_new_inode(inode); 5660 } else { 5661 iget_failed(inode); 5662 /* 5663 * ret > 0 can come from btrfs_search_slot called by 5664 * btrfs_read_locked_inode, this means the inode item 5665 * was not found. 5666 */ 5667 if (ret > 0) 5668 ret = -ENOENT; 5669 inode = ERR_PTR(ret); 5670 } 5671 } 5672 5673 return inode; 5674 } 5675 5676 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) 5677 { 5678 return btrfs_iget_path(s, ino, root, NULL); 5679 } 5680 5681 static struct inode *new_simple_dir(struct super_block *s, 5682 struct btrfs_key *key, 5683 struct btrfs_root *root) 5684 { 5685 struct inode *inode = new_inode(s); 5686 5687 if (!inode) 5688 return ERR_PTR(-ENOMEM); 5689 5690 BTRFS_I(inode)->root = btrfs_grab_root(root); 5691 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5692 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5693 5694 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5695 /* 5696 * We only need lookup, the rest is read-only and there's no inode 5697 * associated with the dentry 5698 */ 5699 inode->i_op = &simple_dir_inode_operations; 5700 inode->i_opflags &= ~IOP_XATTR; 5701 inode->i_fop = &simple_dir_operations; 5702 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5703 inode->i_mtime = current_time(inode); 5704 inode->i_atime = inode->i_mtime; 5705 inode->i_ctime = inode->i_mtime; 5706 BTRFS_I(inode)->i_otime = inode->i_mtime; 5707 5708 return inode; 5709 } 5710 5711 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5712 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5713 static_assert(BTRFS_FT_DIR == FT_DIR); 5714 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5715 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5716 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5717 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5718 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5719 5720 static inline u8 btrfs_inode_type(struct inode *inode) 5721 { 5722 return fs_umode_to_ftype(inode->i_mode); 5723 } 5724 5725 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5726 { 5727 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 5728 struct inode *inode; 5729 struct btrfs_root *root = BTRFS_I(dir)->root; 5730 struct btrfs_root *sub_root = root; 5731 struct btrfs_key location; 5732 u8 di_type = 0; 5733 int ret = 0; 5734 5735 if (dentry->d_name.len > BTRFS_NAME_LEN) 5736 return ERR_PTR(-ENAMETOOLONG); 5737 5738 ret = btrfs_inode_by_name(dir, dentry, &location, &di_type); 5739 if (ret < 0) 5740 return ERR_PTR(ret); 5741 5742 if (location.type == BTRFS_INODE_ITEM_KEY) { 5743 inode = btrfs_iget(dir->i_sb, location.objectid, root); 5744 if (IS_ERR(inode)) 5745 return inode; 5746 5747 /* Do extra check against inode mode with di_type */ 5748 if (btrfs_inode_type(inode) != di_type) { 5749 btrfs_crit(fs_info, 5750 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5751 inode->i_mode, btrfs_inode_type(inode), 5752 di_type); 5753 iput(inode); 5754 return ERR_PTR(-EUCLEAN); 5755 } 5756 return inode; 5757 } 5758 5759 ret = fixup_tree_root_location(fs_info, dir, dentry, 5760 &location, &sub_root); 5761 if (ret < 0) { 5762 if (ret != -ENOENT) 5763 inode = ERR_PTR(ret); 5764 else 5765 inode = new_simple_dir(dir->i_sb, &location, sub_root); 5766 } else { 5767 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); 5768 } 5769 if (root != sub_root) 5770 btrfs_put_root(sub_root); 5771 5772 if (!IS_ERR(inode) && root != sub_root) { 5773 down_read(&fs_info->cleanup_work_sem); 5774 if (!sb_rdonly(inode->i_sb)) 5775 ret = btrfs_orphan_cleanup(sub_root); 5776 up_read(&fs_info->cleanup_work_sem); 5777 if (ret) { 5778 iput(inode); 5779 inode = ERR_PTR(ret); 5780 } 5781 } 5782 5783 return inode; 5784 } 5785 5786 static int btrfs_dentry_delete(const struct dentry *dentry) 5787 { 5788 struct btrfs_root *root; 5789 struct inode *inode = d_inode(dentry); 5790 5791 if (!inode && !IS_ROOT(dentry)) 5792 inode = d_inode(dentry->d_parent); 5793 5794 if (inode) { 5795 root = BTRFS_I(inode)->root; 5796 if (btrfs_root_refs(&root->root_item) == 0) 5797 return 1; 5798 5799 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5800 return 1; 5801 } 5802 return 0; 5803 } 5804 5805 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5806 unsigned int flags) 5807 { 5808 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5809 5810 if (inode == ERR_PTR(-ENOENT)) 5811 inode = NULL; 5812 return d_splice_alias(inode, dentry); 5813 } 5814 5815 /* 5816 * All this infrastructure exists because dir_emit can fault, and we are holding 5817 * the tree lock when doing readdir. For now just allocate a buffer and copy 5818 * our information into that, and then dir_emit from the buffer. This is 5819 * similar to what NFS does, only we don't keep the buffer around in pagecache 5820 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5821 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5822 * tree lock. 5823 */ 5824 static int btrfs_opendir(struct inode *inode, struct file *file) 5825 { 5826 struct btrfs_file_private *private; 5827 5828 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5829 if (!private) 5830 return -ENOMEM; 5831 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5832 if (!private->filldir_buf) { 5833 kfree(private); 5834 return -ENOMEM; 5835 } 5836 file->private_data = private; 5837 return 0; 5838 } 5839 5840 struct dir_entry { 5841 u64 ino; 5842 u64 offset; 5843 unsigned type; 5844 int name_len; 5845 }; 5846 5847 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5848 { 5849 while (entries--) { 5850 struct dir_entry *entry = addr; 5851 char *name = (char *)(entry + 1); 5852 5853 ctx->pos = get_unaligned(&entry->offset); 5854 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5855 get_unaligned(&entry->ino), 5856 get_unaligned(&entry->type))) 5857 return 1; 5858 addr += sizeof(struct dir_entry) + 5859 get_unaligned(&entry->name_len); 5860 ctx->pos++; 5861 } 5862 return 0; 5863 } 5864 5865 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5866 { 5867 struct inode *inode = file_inode(file); 5868 struct btrfs_root *root = BTRFS_I(inode)->root; 5869 struct btrfs_file_private *private = file->private_data; 5870 struct btrfs_dir_item *di; 5871 struct btrfs_key key; 5872 struct btrfs_key found_key; 5873 struct btrfs_path *path; 5874 void *addr; 5875 struct list_head ins_list; 5876 struct list_head del_list; 5877 int ret; 5878 char *name_ptr; 5879 int name_len; 5880 int entries = 0; 5881 int total_len = 0; 5882 bool put = false; 5883 struct btrfs_key location; 5884 5885 if (!dir_emit_dots(file, ctx)) 5886 return 0; 5887 5888 path = btrfs_alloc_path(); 5889 if (!path) 5890 return -ENOMEM; 5891 5892 addr = private->filldir_buf; 5893 path->reada = READA_FORWARD; 5894 5895 INIT_LIST_HEAD(&ins_list); 5896 INIT_LIST_HEAD(&del_list); 5897 put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list); 5898 5899 again: 5900 key.type = BTRFS_DIR_INDEX_KEY; 5901 key.offset = ctx->pos; 5902 key.objectid = btrfs_ino(BTRFS_I(inode)); 5903 5904 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 5905 struct dir_entry *entry; 5906 struct extent_buffer *leaf = path->nodes[0]; 5907 5908 if (found_key.objectid != key.objectid) 5909 break; 5910 if (found_key.type != BTRFS_DIR_INDEX_KEY) 5911 break; 5912 if (found_key.offset < ctx->pos) 5913 continue; 5914 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 5915 continue; 5916 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 5917 name_len = btrfs_dir_name_len(leaf, di); 5918 if ((total_len + sizeof(struct dir_entry) + name_len) >= 5919 PAGE_SIZE) { 5920 btrfs_release_path(path); 5921 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5922 if (ret) 5923 goto nopos; 5924 addr = private->filldir_buf; 5925 entries = 0; 5926 total_len = 0; 5927 goto again; 5928 } 5929 5930 entry = addr; 5931 put_unaligned(name_len, &entry->name_len); 5932 name_ptr = (char *)(entry + 1); 5933 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), 5934 name_len); 5935 put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)), 5936 &entry->type); 5937 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5938 put_unaligned(location.objectid, &entry->ino); 5939 put_unaligned(found_key.offset, &entry->offset); 5940 entries++; 5941 addr += sizeof(struct dir_entry) + name_len; 5942 total_len += sizeof(struct dir_entry) + name_len; 5943 } 5944 /* Catch error encountered during iteration */ 5945 if (ret < 0) 5946 goto err; 5947 5948 btrfs_release_path(path); 5949 5950 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5951 if (ret) 5952 goto nopos; 5953 5954 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 5955 if (ret) 5956 goto nopos; 5957 5958 /* 5959 * Stop new entries from being returned after we return the last 5960 * entry. 5961 * 5962 * New directory entries are assigned a strictly increasing 5963 * offset. This means that new entries created during readdir 5964 * are *guaranteed* to be seen in the future by that readdir. 5965 * This has broken buggy programs which operate on names as 5966 * they're returned by readdir. Until we re-use freed offsets 5967 * we have this hack to stop new entries from being returned 5968 * under the assumption that they'll never reach this huge 5969 * offset. 5970 * 5971 * This is being careful not to overflow 32bit loff_t unless the 5972 * last entry requires it because doing so has broken 32bit apps 5973 * in the past. 5974 */ 5975 if (ctx->pos >= INT_MAX) 5976 ctx->pos = LLONG_MAX; 5977 else 5978 ctx->pos = INT_MAX; 5979 nopos: 5980 ret = 0; 5981 err: 5982 if (put) 5983 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); 5984 btrfs_free_path(path); 5985 return ret; 5986 } 5987 5988 /* 5989 * This is somewhat expensive, updating the tree every time the 5990 * inode changes. But, it is most likely to find the inode in cache. 5991 * FIXME, needs more benchmarking...there are no reasons other than performance 5992 * to keep or drop this code. 5993 */ 5994 static int btrfs_dirty_inode(struct inode *inode) 5995 { 5996 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5997 struct btrfs_root *root = BTRFS_I(inode)->root; 5998 struct btrfs_trans_handle *trans; 5999 int ret; 6000 6001 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 6002 return 0; 6003 6004 trans = btrfs_join_transaction(root); 6005 if (IS_ERR(trans)) 6006 return PTR_ERR(trans); 6007 6008 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 6009 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) { 6010 /* whoops, lets try again with the full transaction */ 6011 btrfs_end_transaction(trans); 6012 trans = btrfs_start_transaction(root, 1); 6013 if (IS_ERR(trans)) 6014 return PTR_ERR(trans); 6015 6016 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 6017 } 6018 btrfs_end_transaction(trans); 6019 if (BTRFS_I(inode)->delayed_node) 6020 btrfs_balance_delayed_items(fs_info); 6021 6022 return ret; 6023 } 6024 6025 /* 6026 * This is a copy of file_update_time. We need this so we can return error on 6027 * ENOSPC for updating the inode in the case of file write and mmap writes. 6028 */ 6029 static int btrfs_update_time(struct inode *inode, struct timespec64 *now, 6030 int flags) 6031 { 6032 struct btrfs_root *root = BTRFS_I(inode)->root; 6033 bool dirty = flags & ~S_VERSION; 6034 6035 if (btrfs_root_readonly(root)) 6036 return -EROFS; 6037 6038 if (flags & S_VERSION) 6039 dirty |= inode_maybe_inc_iversion(inode, dirty); 6040 if (flags & S_CTIME) 6041 inode->i_ctime = *now; 6042 if (flags & S_MTIME) 6043 inode->i_mtime = *now; 6044 if (flags & S_ATIME) 6045 inode->i_atime = *now; 6046 return dirty ? btrfs_dirty_inode(inode) : 0; 6047 } 6048 6049 /* 6050 * find the highest existing sequence number in a directory 6051 * and then set the in-memory index_cnt variable to reflect 6052 * free sequence numbers 6053 */ 6054 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 6055 { 6056 struct btrfs_root *root = inode->root; 6057 struct btrfs_key key, found_key; 6058 struct btrfs_path *path; 6059 struct extent_buffer *leaf; 6060 int ret; 6061 6062 key.objectid = btrfs_ino(inode); 6063 key.type = BTRFS_DIR_INDEX_KEY; 6064 key.offset = (u64)-1; 6065 6066 path = btrfs_alloc_path(); 6067 if (!path) 6068 return -ENOMEM; 6069 6070 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6071 if (ret < 0) 6072 goto out; 6073 /* FIXME: we should be able to handle this */ 6074 if (ret == 0) 6075 goto out; 6076 ret = 0; 6077 6078 if (path->slots[0] == 0) { 6079 inode->index_cnt = BTRFS_DIR_START_INDEX; 6080 goto out; 6081 } 6082 6083 path->slots[0]--; 6084 6085 leaf = path->nodes[0]; 6086 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6087 6088 if (found_key.objectid != btrfs_ino(inode) || 6089 found_key.type != BTRFS_DIR_INDEX_KEY) { 6090 inode->index_cnt = BTRFS_DIR_START_INDEX; 6091 goto out; 6092 } 6093 6094 inode->index_cnt = found_key.offset + 1; 6095 out: 6096 btrfs_free_path(path); 6097 return ret; 6098 } 6099 6100 /* 6101 * helper to find a free sequence number in a given directory. This current 6102 * code is very simple, later versions will do smarter things in the btree 6103 */ 6104 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6105 { 6106 int ret = 0; 6107 6108 if (dir->index_cnt == (u64)-1) { 6109 ret = btrfs_inode_delayed_dir_index_count(dir); 6110 if (ret) { 6111 ret = btrfs_set_inode_index_count(dir); 6112 if (ret) 6113 return ret; 6114 } 6115 } 6116 6117 *index = dir->index_cnt; 6118 dir->index_cnt++; 6119 6120 return ret; 6121 } 6122 6123 static int btrfs_insert_inode_locked(struct inode *inode) 6124 { 6125 struct btrfs_iget_args args; 6126 6127 args.ino = BTRFS_I(inode)->location.objectid; 6128 args.root = BTRFS_I(inode)->root; 6129 6130 return insert_inode_locked4(inode, 6131 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6132 btrfs_find_actor, &args); 6133 } 6134 6135 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6136 unsigned int *trans_num_items) 6137 { 6138 struct inode *dir = args->dir; 6139 struct inode *inode = args->inode; 6140 int ret; 6141 6142 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6143 if (ret) 6144 return ret; 6145 6146 /* 1 to add inode item */ 6147 *trans_num_items = 1; 6148 /* 1 to add compression property */ 6149 if (BTRFS_I(dir)->prop_compress) 6150 (*trans_num_items)++; 6151 /* 1 to add default ACL xattr */ 6152 if (args->default_acl) 6153 (*trans_num_items)++; 6154 /* 1 to add access ACL xattr */ 6155 if (args->acl) 6156 (*trans_num_items)++; 6157 #ifdef CONFIG_SECURITY 6158 /* 1 to add LSM xattr */ 6159 if (dir->i_security) 6160 (*trans_num_items)++; 6161 #endif 6162 if (args->orphan) { 6163 /* 1 to add orphan item */ 6164 (*trans_num_items)++; 6165 } else { 6166 /* 6167 * 1 to add dir item 6168 * 1 to add dir index 6169 * 1 to update parent inode item 6170 * 6171 * No need for 1 unit for the inode ref item because it is 6172 * inserted in a batch together with the inode item at 6173 * btrfs_create_new_inode(). 6174 */ 6175 *trans_num_items += 3; 6176 } 6177 return 0; 6178 } 6179 6180 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6181 { 6182 posix_acl_release(args->acl); 6183 posix_acl_release(args->default_acl); 6184 } 6185 6186 /* 6187 * Inherit flags from the parent inode. 6188 * 6189 * Currently only the compression flags and the cow flags are inherited. 6190 */ 6191 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir) 6192 { 6193 unsigned int flags; 6194 6195 flags = BTRFS_I(dir)->flags; 6196 6197 if (flags & BTRFS_INODE_NOCOMPRESS) { 6198 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS; 6199 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 6200 } else if (flags & BTRFS_INODE_COMPRESS) { 6201 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; 6202 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; 6203 } 6204 6205 if (flags & BTRFS_INODE_NODATACOW) { 6206 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; 6207 if (S_ISREG(inode->i_mode)) 6208 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6209 } 6210 6211 btrfs_sync_inode_flags_to_i_flags(inode); 6212 } 6213 6214 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6215 struct btrfs_new_inode_args *args) 6216 { 6217 struct inode *dir = args->dir; 6218 struct inode *inode = args->inode; 6219 const char *name = args->orphan ? NULL : args->dentry->d_name.name; 6220 int name_len = args->orphan ? 0 : args->dentry->d_name.len; 6221 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6222 struct btrfs_root *root; 6223 struct btrfs_inode_item *inode_item; 6224 struct btrfs_key *location; 6225 struct btrfs_path *path; 6226 u64 objectid; 6227 struct btrfs_inode_ref *ref; 6228 struct btrfs_key key[2]; 6229 u32 sizes[2]; 6230 struct btrfs_item_batch batch; 6231 unsigned long ptr; 6232 int ret; 6233 6234 path = btrfs_alloc_path(); 6235 if (!path) 6236 return -ENOMEM; 6237 6238 if (!args->subvol) 6239 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6240 root = BTRFS_I(inode)->root; 6241 6242 ret = btrfs_get_free_objectid(root, &objectid); 6243 if (ret) 6244 goto out; 6245 inode->i_ino = objectid; 6246 6247 if (args->orphan) { 6248 /* 6249 * O_TMPFILE, set link count to 0, so that after this point, we 6250 * fill in an inode item with the correct link count. 6251 */ 6252 set_nlink(inode, 0); 6253 } else { 6254 trace_btrfs_inode_request(dir); 6255 6256 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6257 if (ret) 6258 goto out; 6259 } 6260 /* index_cnt is ignored for everything but a dir. */ 6261 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6262 BTRFS_I(inode)->generation = trans->transid; 6263 inode->i_generation = BTRFS_I(inode)->generation; 6264 6265 /* 6266 * Subvolumes don't inherit flags from their parent directory. 6267 * Originally this was probably by accident, but we probably can't 6268 * change it now without compatibility issues. 6269 */ 6270 if (!args->subvol) 6271 btrfs_inherit_iflags(inode, dir); 6272 6273 if (S_ISREG(inode->i_mode)) { 6274 if (btrfs_test_opt(fs_info, NODATASUM)) 6275 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6276 if (btrfs_test_opt(fs_info, NODATACOW)) 6277 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6278 BTRFS_INODE_NODATASUM; 6279 } 6280 6281 location = &BTRFS_I(inode)->location; 6282 location->objectid = objectid; 6283 location->offset = 0; 6284 location->type = BTRFS_INODE_ITEM_KEY; 6285 6286 ret = btrfs_insert_inode_locked(inode); 6287 if (ret < 0) { 6288 if (!args->orphan) 6289 BTRFS_I(dir)->index_cnt--; 6290 goto out; 6291 } 6292 6293 /* 6294 * We could have gotten an inode number from somebody who was fsynced 6295 * and then removed in this same transaction, so let's just set full 6296 * sync since it will be a full sync anyway and this will blow away the 6297 * old info in the log. 6298 */ 6299 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6300 6301 key[0].objectid = objectid; 6302 key[0].type = BTRFS_INODE_ITEM_KEY; 6303 key[0].offset = 0; 6304 6305 sizes[0] = sizeof(struct btrfs_inode_item); 6306 6307 if (!args->orphan) { 6308 /* 6309 * Start new inodes with an inode_ref. This is slightly more 6310 * efficient for small numbers of hard links since they will 6311 * be packed into one item. Extended refs will kick in if we 6312 * add more hard links than can fit in the ref item. 6313 */ 6314 key[1].objectid = objectid; 6315 key[1].type = BTRFS_INODE_REF_KEY; 6316 if (args->subvol) { 6317 key[1].offset = objectid; 6318 sizes[1] = 2 + sizeof(*ref); 6319 } else { 6320 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6321 sizes[1] = name_len + sizeof(*ref); 6322 } 6323 } 6324 6325 batch.keys = &key[0]; 6326 batch.data_sizes = &sizes[0]; 6327 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6328 batch.nr = args->orphan ? 1 : 2; 6329 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6330 if (ret != 0) { 6331 btrfs_abort_transaction(trans, ret); 6332 goto discard; 6333 } 6334 6335 inode->i_mtime = current_time(inode); 6336 inode->i_atime = inode->i_mtime; 6337 inode->i_ctime = inode->i_mtime; 6338 BTRFS_I(inode)->i_otime = inode->i_mtime; 6339 6340 /* 6341 * We're going to fill the inode item now, so at this point the inode 6342 * must be fully initialized. 6343 */ 6344 6345 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6346 struct btrfs_inode_item); 6347 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6348 sizeof(*inode_item)); 6349 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6350 6351 if (!args->orphan) { 6352 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6353 struct btrfs_inode_ref); 6354 ptr = (unsigned long)(ref + 1); 6355 if (args->subvol) { 6356 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6357 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6358 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6359 } else { 6360 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); 6361 btrfs_set_inode_ref_index(path->nodes[0], ref, 6362 BTRFS_I(inode)->dir_index); 6363 write_extent_buffer(path->nodes[0], name, ptr, name_len); 6364 } 6365 } 6366 6367 btrfs_mark_buffer_dirty(path->nodes[0]); 6368 btrfs_release_path(path); 6369 6370 if (args->subvol) { 6371 struct inode *parent; 6372 6373 /* 6374 * Subvolumes inherit properties from their parent subvolume, 6375 * not the directory they were created in. 6376 */ 6377 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, 6378 BTRFS_I(dir)->root); 6379 if (IS_ERR(parent)) { 6380 ret = PTR_ERR(parent); 6381 } else { 6382 ret = btrfs_inode_inherit_props(trans, inode, parent); 6383 iput(parent); 6384 } 6385 } else { 6386 ret = btrfs_inode_inherit_props(trans, inode, dir); 6387 } 6388 if (ret) { 6389 btrfs_err(fs_info, 6390 "error inheriting props for ino %llu (root %llu): %d", 6391 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, 6392 ret); 6393 } 6394 6395 /* 6396 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6397 * probably a bug. 6398 */ 6399 if (!args->subvol) { 6400 ret = btrfs_init_inode_security(trans, args); 6401 if (ret) { 6402 btrfs_abort_transaction(trans, ret); 6403 goto discard; 6404 } 6405 } 6406 6407 inode_tree_add(inode); 6408 6409 trace_btrfs_inode_new(inode); 6410 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6411 6412 btrfs_update_root_times(trans, root); 6413 6414 if (args->orphan) { 6415 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6416 } else { 6417 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6418 name_len, 0, BTRFS_I(inode)->dir_index); 6419 } 6420 if (ret) { 6421 btrfs_abort_transaction(trans, ret); 6422 goto discard; 6423 } 6424 6425 ret = 0; 6426 goto out; 6427 6428 discard: 6429 /* 6430 * discard_new_inode() calls iput(), but the caller owns the reference 6431 * to the inode. 6432 */ 6433 ihold(inode); 6434 discard_new_inode(inode); 6435 out: 6436 btrfs_free_path(path); 6437 return ret; 6438 } 6439 6440 /* 6441 * utility function to add 'inode' into 'parent_inode' with 6442 * a give name and a given sequence number. 6443 * if 'add_backref' is true, also insert a backref from the 6444 * inode to the parent directory. 6445 */ 6446 int btrfs_add_link(struct btrfs_trans_handle *trans, 6447 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6448 const char *name, int name_len, int add_backref, u64 index) 6449 { 6450 int ret = 0; 6451 struct btrfs_key key; 6452 struct btrfs_root *root = parent_inode->root; 6453 u64 ino = btrfs_ino(inode); 6454 u64 parent_ino = btrfs_ino(parent_inode); 6455 6456 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6457 memcpy(&key, &inode->root->root_key, sizeof(key)); 6458 } else { 6459 key.objectid = ino; 6460 key.type = BTRFS_INODE_ITEM_KEY; 6461 key.offset = 0; 6462 } 6463 6464 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6465 ret = btrfs_add_root_ref(trans, key.objectid, 6466 root->root_key.objectid, parent_ino, 6467 index, name, name_len); 6468 } else if (add_backref) { 6469 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, 6470 parent_ino, index); 6471 } 6472 6473 /* Nothing to clean up yet */ 6474 if (ret) 6475 return ret; 6476 6477 ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key, 6478 btrfs_inode_type(&inode->vfs_inode), index); 6479 if (ret == -EEXIST || ret == -EOVERFLOW) 6480 goto fail_dir_item; 6481 else if (ret) { 6482 btrfs_abort_transaction(trans, ret); 6483 return ret; 6484 } 6485 6486 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6487 name_len * 2); 6488 inode_inc_iversion(&parent_inode->vfs_inode); 6489 /* 6490 * If we are replaying a log tree, we do not want to update the mtime 6491 * and ctime of the parent directory with the current time, since the 6492 * log replay procedure is responsible for setting them to their correct 6493 * values (the ones it had when the fsync was done). 6494 */ 6495 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { 6496 struct timespec64 now = current_time(&parent_inode->vfs_inode); 6497 6498 parent_inode->vfs_inode.i_mtime = now; 6499 parent_inode->vfs_inode.i_ctime = now; 6500 } 6501 ret = btrfs_update_inode(trans, root, parent_inode); 6502 if (ret) 6503 btrfs_abort_transaction(trans, ret); 6504 return ret; 6505 6506 fail_dir_item: 6507 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6508 u64 local_index; 6509 int err; 6510 err = btrfs_del_root_ref(trans, key.objectid, 6511 root->root_key.objectid, parent_ino, 6512 &local_index, name, name_len); 6513 if (err) 6514 btrfs_abort_transaction(trans, err); 6515 } else if (add_backref) { 6516 u64 local_index; 6517 int err; 6518 6519 err = btrfs_del_inode_ref(trans, root, name, name_len, 6520 ino, parent_ino, &local_index); 6521 if (err) 6522 btrfs_abort_transaction(trans, err); 6523 } 6524 6525 /* Return the original error code */ 6526 return ret; 6527 } 6528 6529 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6530 struct inode *inode) 6531 { 6532 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6533 struct btrfs_root *root = BTRFS_I(dir)->root; 6534 struct btrfs_new_inode_args new_inode_args = { 6535 .dir = dir, 6536 .dentry = dentry, 6537 .inode = inode, 6538 }; 6539 unsigned int trans_num_items; 6540 struct btrfs_trans_handle *trans; 6541 int err; 6542 6543 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6544 if (err) 6545 goto out_inode; 6546 6547 trans = btrfs_start_transaction(root, trans_num_items); 6548 if (IS_ERR(trans)) { 6549 err = PTR_ERR(trans); 6550 goto out_new_inode_args; 6551 } 6552 6553 err = btrfs_create_new_inode(trans, &new_inode_args); 6554 if (!err) 6555 d_instantiate_new(dentry, inode); 6556 6557 btrfs_end_transaction(trans); 6558 btrfs_btree_balance_dirty(fs_info); 6559 out_new_inode_args: 6560 btrfs_new_inode_args_destroy(&new_inode_args); 6561 out_inode: 6562 if (err) 6563 iput(inode); 6564 return err; 6565 } 6566 6567 static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, 6568 struct dentry *dentry, umode_t mode, dev_t rdev) 6569 { 6570 struct inode *inode; 6571 6572 inode = new_inode(dir->i_sb); 6573 if (!inode) 6574 return -ENOMEM; 6575 inode_init_owner(mnt_userns, inode, dir, mode); 6576 inode->i_op = &btrfs_special_inode_operations; 6577 init_special_inode(inode, inode->i_mode, rdev); 6578 return btrfs_create_common(dir, dentry, inode); 6579 } 6580 6581 static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir, 6582 struct dentry *dentry, umode_t mode, bool excl) 6583 { 6584 struct inode *inode; 6585 6586 inode = new_inode(dir->i_sb); 6587 if (!inode) 6588 return -ENOMEM; 6589 inode_init_owner(mnt_userns, inode, dir, mode); 6590 inode->i_fop = &btrfs_file_operations; 6591 inode->i_op = &btrfs_file_inode_operations; 6592 inode->i_mapping->a_ops = &btrfs_aops; 6593 return btrfs_create_common(dir, dentry, inode); 6594 } 6595 6596 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6597 struct dentry *dentry) 6598 { 6599 struct btrfs_trans_handle *trans = NULL; 6600 struct btrfs_root *root = BTRFS_I(dir)->root; 6601 struct inode *inode = d_inode(old_dentry); 6602 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 6603 u64 index; 6604 int err; 6605 int drop_inode = 0; 6606 6607 /* do not allow sys_link's with other subvols of the same device */ 6608 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) 6609 return -EXDEV; 6610 6611 if (inode->i_nlink >= BTRFS_LINK_MAX) 6612 return -EMLINK; 6613 6614 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6615 if (err) 6616 goto fail; 6617 6618 /* 6619 * 2 items for inode and inode ref 6620 * 2 items for dir items 6621 * 1 item for parent inode 6622 * 1 item for orphan item deletion if O_TMPFILE 6623 */ 6624 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6625 if (IS_ERR(trans)) { 6626 err = PTR_ERR(trans); 6627 trans = NULL; 6628 goto fail; 6629 } 6630 6631 /* There are several dir indexes for this inode, clear the cache. */ 6632 BTRFS_I(inode)->dir_index = 0ULL; 6633 inc_nlink(inode); 6634 inode_inc_iversion(inode); 6635 inode->i_ctime = current_time(inode); 6636 ihold(inode); 6637 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6638 6639 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6640 dentry->d_name.name, dentry->d_name.len, 1, index); 6641 6642 if (err) { 6643 drop_inode = 1; 6644 } else { 6645 struct dentry *parent = dentry->d_parent; 6646 6647 err = btrfs_update_inode(trans, root, BTRFS_I(inode)); 6648 if (err) 6649 goto fail; 6650 if (inode->i_nlink == 1) { 6651 /* 6652 * If new hard link count is 1, it's a file created 6653 * with open(2) O_TMPFILE flag. 6654 */ 6655 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6656 if (err) 6657 goto fail; 6658 } 6659 d_instantiate(dentry, inode); 6660 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent); 6661 } 6662 6663 fail: 6664 if (trans) 6665 btrfs_end_transaction(trans); 6666 if (drop_inode) { 6667 inode_dec_link_count(inode); 6668 iput(inode); 6669 } 6670 btrfs_btree_balance_dirty(fs_info); 6671 return err; 6672 } 6673 6674 static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 6675 struct dentry *dentry, umode_t mode) 6676 { 6677 struct inode *inode; 6678 6679 inode = new_inode(dir->i_sb); 6680 if (!inode) 6681 return -ENOMEM; 6682 inode_init_owner(mnt_userns, inode, dir, S_IFDIR | mode); 6683 inode->i_op = &btrfs_dir_inode_operations; 6684 inode->i_fop = &btrfs_dir_file_operations; 6685 return btrfs_create_common(dir, dentry, inode); 6686 } 6687 6688 static noinline int uncompress_inline(struct btrfs_path *path, 6689 struct page *page, 6690 size_t pg_offset, u64 extent_offset, 6691 struct btrfs_file_extent_item *item) 6692 { 6693 int ret; 6694 struct extent_buffer *leaf = path->nodes[0]; 6695 char *tmp; 6696 size_t max_size; 6697 unsigned long inline_size; 6698 unsigned long ptr; 6699 int compress_type; 6700 6701 WARN_ON(pg_offset != 0); 6702 compress_type = btrfs_file_extent_compression(leaf, item); 6703 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6704 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 6705 tmp = kmalloc(inline_size, GFP_NOFS); 6706 if (!tmp) 6707 return -ENOMEM; 6708 ptr = btrfs_file_extent_inline_start(item); 6709 6710 read_extent_buffer(leaf, tmp, ptr, inline_size); 6711 6712 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6713 ret = btrfs_decompress(compress_type, tmp, page, 6714 extent_offset, inline_size, max_size); 6715 6716 /* 6717 * decompression code contains a memset to fill in any space between the end 6718 * of the uncompressed data and the end of max_size in case the decompressed 6719 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6720 * the end of an inline extent and the beginning of the next block, so we 6721 * cover that region here. 6722 */ 6723 6724 if (max_size + pg_offset < PAGE_SIZE) 6725 memzero_page(page, pg_offset + max_size, 6726 PAGE_SIZE - max_size - pg_offset); 6727 kfree(tmp); 6728 return ret; 6729 } 6730 6731 /** 6732 * btrfs_get_extent - Lookup the first extent overlapping a range in a file. 6733 * @inode: file to search in 6734 * @page: page to read extent data into if the extent is inline 6735 * @pg_offset: offset into @page to copy to 6736 * @start: file offset 6737 * @len: length of range starting at @start 6738 * 6739 * This returns the first &struct extent_map which overlaps with the given 6740 * range, reading it from the B-tree and caching it if necessary. Note that 6741 * there may be more extents which overlap the given range after the returned 6742 * extent_map. 6743 * 6744 * If @page is not NULL and the extent is inline, this also reads the extent 6745 * data directly into the page and marks the extent up to date in the io_tree. 6746 * 6747 * Return: ERR_PTR on error, non-NULL extent_map on success. 6748 */ 6749 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6750 struct page *page, size_t pg_offset, 6751 u64 start, u64 len) 6752 { 6753 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6754 int ret = 0; 6755 u64 extent_start = 0; 6756 u64 extent_end = 0; 6757 u64 objectid = btrfs_ino(inode); 6758 int extent_type = -1; 6759 struct btrfs_path *path = NULL; 6760 struct btrfs_root *root = inode->root; 6761 struct btrfs_file_extent_item *item; 6762 struct extent_buffer *leaf; 6763 struct btrfs_key found_key; 6764 struct extent_map *em = NULL; 6765 struct extent_map_tree *em_tree = &inode->extent_tree; 6766 struct extent_io_tree *io_tree = &inode->io_tree; 6767 6768 read_lock(&em_tree->lock); 6769 em = lookup_extent_mapping(em_tree, start, len); 6770 read_unlock(&em_tree->lock); 6771 6772 if (em) { 6773 if (em->start > start || em->start + em->len <= start) 6774 free_extent_map(em); 6775 else if (em->block_start == EXTENT_MAP_INLINE && page) 6776 free_extent_map(em); 6777 else 6778 goto out; 6779 } 6780 em = alloc_extent_map(); 6781 if (!em) { 6782 ret = -ENOMEM; 6783 goto out; 6784 } 6785 em->start = EXTENT_MAP_HOLE; 6786 em->orig_start = EXTENT_MAP_HOLE; 6787 em->len = (u64)-1; 6788 em->block_len = (u64)-1; 6789 6790 path = btrfs_alloc_path(); 6791 if (!path) { 6792 ret = -ENOMEM; 6793 goto out; 6794 } 6795 6796 /* Chances are we'll be called again, so go ahead and do readahead */ 6797 path->reada = READA_FORWARD; 6798 6799 /* 6800 * The same explanation in load_free_space_cache applies here as well, 6801 * we only read when we're loading the free space cache, and at that 6802 * point the commit_root has everything we need. 6803 */ 6804 if (btrfs_is_free_space_inode(inode)) { 6805 path->search_commit_root = 1; 6806 path->skip_locking = 1; 6807 } 6808 6809 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6810 if (ret < 0) { 6811 goto out; 6812 } else if (ret > 0) { 6813 if (path->slots[0] == 0) 6814 goto not_found; 6815 path->slots[0]--; 6816 ret = 0; 6817 } 6818 6819 leaf = path->nodes[0]; 6820 item = btrfs_item_ptr(leaf, path->slots[0], 6821 struct btrfs_file_extent_item); 6822 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6823 if (found_key.objectid != objectid || 6824 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6825 /* 6826 * If we backup past the first extent we want to move forward 6827 * and see if there is an extent in front of us, otherwise we'll 6828 * say there is a hole for our whole search range which can 6829 * cause problems. 6830 */ 6831 extent_end = start; 6832 goto next; 6833 } 6834 6835 extent_type = btrfs_file_extent_type(leaf, item); 6836 extent_start = found_key.offset; 6837 extent_end = btrfs_file_extent_end(path); 6838 if (extent_type == BTRFS_FILE_EXTENT_REG || 6839 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6840 /* Only regular file could have regular/prealloc extent */ 6841 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6842 ret = -EUCLEAN; 6843 btrfs_crit(fs_info, 6844 "regular/prealloc extent found for non-regular inode %llu", 6845 btrfs_ino(inode)); 6846 goto out; 6847 } 6848 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6849 extent_start); 6850 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6851 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 6852 path->slots[0], 6853 extent_start); 6854 } 6855 next: 6856 if (start >= extent_end) { 6857 path->slots[0]++; 6858 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6859 ret = btrfs_next_leaf(root, path); 6860 if (ret < 0) 6861 goto out; 6862 else if (ret > 0) 6863 goto not_found; 6864 6865 leaf = path->nodes[0]; 6866 } 6867 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6868 if (found_key.objectid != objectid || 6869 found_key.type != BTRFS_EXTENT_DATA_KEY) 6870 goto not_found; 6871 if (start + len <= found_key.offset) 6872 goto not_found; 6873 if (start > found_key.offset) 6874 goto next; 6875 6876 /* New extent overlaps with existing one */ 6877 em->start = start; 6878 em->orig_start = start; 6879 em->len = found_key.offset - start; 6880 em->block_start = EXTENT_MAP_HOLE; 6881 goto insert; 6882 } 6883 6884 btrfs_extent_item_to_extent_map(inode, path, item, !page, em); 6885 6886 if (extent_type == BTRFS_FILE_EXTENT_REG || 6887 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6888 goto insert; 6889 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6890 unsigned long ptr; 6891 char *map; 6892 size_t size; 6893 size_t extent_offset; 6894 size_t copy_size; 6895 6896 if (!page) 6897 goto out; 6898 6899 size = btrfs_file_extent_ram_bytes(leaf, item); 6900 extent_offset = page_offset(page) + pg_offset - extent_start; 6901 copy_size = min_t(u64, PAGE_SIZE - pg_offset, 6902 size - extent_offset); 6903 em->start = extent_start + extent_offset; 6904 em->len = ALIGN(copy_size, fs_info->sectorsize); 6905 em->orig_block_len = em->len; 6906 em->orig_start = em->start; 6907 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 6908 6909 if (!PageUptodate(page)) { 6910 if (btrfs_file_extent_compression(leaf, item) != 6911 BTRFS_COMPRESS_NONE) { 6912 ret = uncompress_inline(path, page, pg_offset, 6913 extent_offset, item); 6914 if (ret) 6915 goto out; 6916 } else { 6917 map = kmap_local_page(page); 6918 read_extent_buffer(leaf, map + pg_offset, ptr, 6919 copy_size); 6920 if (pg_offset + copy_size < PAGE_SIZE) { 6921 memset(map + pg_offset + copy_size, 0, 6922 PAGE_SIZE - pg_offset - 6923 copy_size); 6924 } 6925 kunmap_local(map); 6926 } 6927 flush_dcache_page(page); 6928 } 6929 set_extent_uptodate(io_tree, em->start, 6930 extent_map_end(em) - 1, NULL, GFP_NOFS); 6931 goto insert; 6932 } 6933 not_found: 6934 em->start = start; 6935 em->orig_start = start; 6936 em->len = len; 6937 em->block_start = EXTENT_MAP_HOLE; 6938 insert: 6939 ret = 0; 6940 btrfs_release_path(path); 6941 if (em->start > start || extent_map_end(em) <= start) { 6942 btrfs_err(fs_info, 6943 "bad extent! em: [%llu %llu] passed [%llu %llu]", 6944 em->start, em->len, start, len); 6945 ret = -EIO; 6946 goto out; 6947 } 6948 6949 write_lock(&em_tree->lock); 6950 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 6951 write_unlock(&em_tree->lock); 6952 out: 6953 btrfs_free_path(path); 6954 6955 trace_btrfs_get_extent(root, inode, em); 6956 6957 if (ret) { 6958 free_extent_map(em); 6959 return ERR_PTR(ret); 6960 } 6961 return em; 6962 } 6963 6964 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, 6965 u64 start, u64 len) 6966 { 6967 struct extent_map *em; 6968 struct extent_map *hole_em = NULL; 6969 u64 delalloc_start = start; 6970 u64 end; 6971 u64 delalloc_len; 6972 u64 delalloc_end; 6973 int err = 0; 6974 6975 em = btrfs_get_extent(inode, NULL, 0, start, len); 6976 if (IS_ERR(em)) 6977 return em; 6978 /* 6979 * If our em maps to: 6980 * - a hole or 6981 * - a pre-alloc extent, 6982 * there might actually be delalloc bytes behind it. 6983 */ 6984 if (em->block_start != EXTENT_MAP_HOLE && 6985 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 6986 return em; 6987 else 6988 hole_em = em; 6989 6990 /* check to see if we've wrapped (len == -1 or similar) */ 6991 end = start + len; 6992 if (end < start) 6993 end = (u64)-1; 6994 else 6995 end -= 1; 6996 6997 em = NULL; 6998 6999 /* ok, we didn't find anything, lets look for delalloc */ 7000 delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start, 7001 end, len, EXTENT_DELALLOC, 1); 7002 delalloc_end = delalloc_start + delalloc_len; 7003 if (delalloc_end < delalloc_start) 7004 delalloc_end = (u64)-1; 7005 7006 /* 7007 * We didn't find anything useful, return the original results from 7008 * get_extent() 7009 */ 7010 if (delalloc_start > end || delalloc_end <= start) { 7011 em = hole_em; 7012 hole_em = NULL; 7013 goto out; 7014 } 7015 7016 /* 7017 * Adjust the delalloc_start to make sure it doesn't go backwards from 7018 * the start they passed in 7019 */ 7020 delalloc_start = max(start, delalloc_start); 7021 delalloc_len = delalloc_end - delalloc_start; 7022 7023 if (delalloc_len > 0) { 7024 u64 hole_start; 7025 u64 hole_len; 7026 const u64 hole_end = extent_map_end(hole_em); 7027 7028 em = alloc_extent_map(); 7029 if (!em) { 7030 err = -ENOMEM; 7031 goto out; 7032 } 7033 7034 ASSERT(hole_em); 7035 /* 7036 * When btrfs_get_extent can't find anything it returns one 7037 * huge hole 7038 * 7039 * Make sure what it found really fits our range, and adjust to 7040 * make sure it is based on the start from the caller 7041 */ 7042 if (hole_end <= start || hole_em->start > end) { 7043 free_extent_map(hole_em); 7044 hole_em = NULL; 7045 } else { 7046 hole_start = max(hole_em->start, start); 7047 hole_len = hole_end - hole_start; 7048 } 7049 7050 if (hole_em && delalloc_start > hole_start) { 7051 /* 7052 * Our hole starts before our delalloc, so we have to 7053 * return just the parts of the hole that go until the 7054 * delalloc starts 7055 */ 7056 em->len = min(hole_len, delalloc_start - hole_start); 7057 em->start = hole_start; 7058 em->orig_start = hole_start; 7059 /* 7060 * Don't adjust block start at all, it is fixed at 7061 * EXTENT_MAP_HOLE 7062 */ 7063 em->block_start = hole_em->block_start; 7064 em->block_len = hole_len; 7065 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) 7066 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 7067 } else { 7068 /* 7069 * Hole is out of passed range or it starts after 7070 * delalloc range 7071 */ 7072 em->start = delalloc_start; 7073 em->len = delalloc_len; 7074 em->orig_start = delalloc_start; 7075 em->block_start = EXTENT_MAP_DELALLOC; 7076 em->block_len = delalloc_len; 7077 } 7078 } else { 7079 return hole_em; 7080 } 7081 out: 7082 7083 free_extent_map(hole_em); 7084 if (err) { 7085 free_extent_map(em); 7086 return ERR_PTR(err); 7087 } 7088 return em; 7089 } 7090 7091 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, 7092 const u64 start, 7093 const u64 len, 7094 const u64 orig_start, 7095 const u64 block_start, 7096 const u64 block_len, 7097 const u64 orig_block_len, 7098 const u64 ram_bytes, 7099 const int type) 7100 { 7101 struct extent_map *em = NULL; 7102 int ret; 7103 7104 if (type != BTRFS_ORDERED_NOCOW) { 7105 em = create_io_em(inode, start, len, orig_start, block_start, 7106 block_len, orig_block_len, ram_bytes, 7107 BTRFS_COMPRESS_NONE, /* compress_type */ 7108 type); 7109 if (IS_ERR(em)) 7110 goto out; 7111 } 7112 ret = btrfs_add_ordered_extent(inode, start, len, len, block_start, 7113 block_len, 0, 7114 (1 << type) | 7115 (1 << BTRFS_ORDERED_DIRECT), 7116 BTRFS_COMPRESS_NONE); 7117 if (ret) { 7118 if (em) { 7119 free_extent_map(em); 7120 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 7121 } 7122 em = ERR_PTR(ret); 7123 } 7124 out: 7125 7126 return em; 7127 } 7128 7129 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, 7130 u64 start, u64 len) 7131 { 7132 struct btrfs_root *root = inode->root; 7133 struct btrfs_fs_info *fs_info = root->fs_info; 7134 struct extent_map *em; 7135 struct btrfs_key ins; 7136 u64 alloc_hint; 7137 int ret; 7138 7139 alloc_hint = get_extent_allocation_hint(inode, start, len); 7140 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 7141 0, alloc_hint, &ins, 1, 1); 7142 if (ret) 7143 return ERR_PTR(ret); 7144 7145 em = btrfs_create_dio_extent(inode, start, ins.offset, start, 7146 ins.objectid, ins.offset, ins.offset, 7147 ins.offset, BTRFS_ORDERED_REGULAR); 7148 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 7149 if (IS_ERR(em)) 7150 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 7151 1); 7152 7153 return em; 7154 } 7155 7156 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 7157 { 7158 struct btrfs_block_group *block_group; 7159 bool readonly = false; 7160 7161 block_group = btrfs_lookup_block_group(fs_info, bytenr); 7162 if (!block_group || block_group->ro) 7163 readonly = true; 7164 if (block_group) 7165 btrfs_put_block_group(block_group); 7166 return readonly; 7167 } 7168 7169 /* 7170 * Check if we can do nocow write into the range [@offset, @offset + @len) 7171 * 7172 * @offset: File offset 7173 * @len: The length to write, will be updated to the nocow writeable 7174 * range 7175 * @orig_start: (optional) Return the original file offset of the file extent 7176 * @orig_len: (optional) Return the original on-disk length of the file extent 7177 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7178 * @strict: if true, omit optimizations that might force us into unnecessary 7179 * cow. e.g., don't trust generation number. 7180 * 7181 * Return: 7182 * >0 and update @len if we can do nocow write 7183 * 0 if we can't do nocow write 7184 * <0 if error happened 7185 * 7186 * NOTE: This only checks the file extents, caller is responsible to wait for 7187 * any ordered extents. 7188 */ 7189 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7190 u64 *orig_start, u64 *orig_block_len, 7191 u64 *ram_bytes, bool strict) 7192 { 7193 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7194 struct can_nocow_file_extent_args nocow_args = { 0 }; 7195 struct btrfs_path *path; 7196 int ret; 7197 struct extent_buffer *leaf; 7198 struct btrfs_root *root = BTRFS_I(inode)->root; 7199 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7200 struct btrfs_file_extent_item *fi; 7201 struct btrfs_key key; 7202 int found_type; 7203 7204 path = btrfs_alloc_path(); 7205 if (!path) 7206 return -ENOMEM; 7207 7208 ret = btrfs_lookup_file_extent(NULL, root, path, 7209 btrfs_ino(BTRFS_I(inode)), offset, 0); 7210 if (ret < 0) 7211 goto out; 7212 7213 if (ret == 1) { 7214 if (path->slots[0] == 0) { 7215 /* can't find the item, must cow */ 7216 ret = 0; 7217 goto out; 7218 } 7219 path->slots[0]--; 7220 } 7221 ret = 0; 7222 leaf = path->nodes[0]; 7223 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7224 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7225 key.type != BTRFS_EXTENT_DATA_KEY) { 7226 /* not our file or wrong item type, must cow */ 7227 goto out; 7228 } 7229 7230 if (key.offset > offset) { 7231 /* Wrong offset, must cow */ 7232 goto out; 7233 } 7234 7235 if (btrfs_file_extent_end(path) <= offset) 7236 goto out; 7237 7238 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7239 found_type = btrfs_file_extent_type(leaf, fi); 7240 if (ram_bytes) 7241 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7242 7243 nocow_args.start = offset; 7244 nocow_args.end = offset + *len - 1; 7245 nocow_args.strict = strict; 7246 nocow_args.free_path = true; 7247 7248 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args); 7249 /* can_nocow_file_extent() has freed the path. */ 7250 path = NULL; 7251 7252 if (ret != 1) { 7253 /* Treat errors as not being able to NOCOW. */ 7254 ret = 0; 7255 goto out; 7256 } 7257 7258 ret = 0; 7259 if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr)) 7260 goto out; 7261 7262 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7263 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7264 u64 range_end; 7265 7266 range_end = round_up(offset + nocow_args.num_bytes, 7267 root->fs_info->sectorsize) - 1; 7268 ret = test_range_bit(io_tree, offset, range_end, 7269 EXTENT_DELALLOC, 0, NULL); 7270 if (ret) { 7271 ret = -EAGAIN; 7272 goto out; 7273 } 7274 } 7275 7276 if (orig_start) 7277 *orig_start = key.offset - nocow_args.extent_offset; 7278 if (orig_block_len) 7279 *orig_block_len = nocow_args.disk_num_bytes; 7280 7281 *len = nocow_args.num_bytes; 7282 ret = 1; 7283 out: 7284 btrfs_free_path(path); 7285 return ret; 7286 } 7287 7288 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7289 struct extent_state **cached_state, 7290 unsigned int iomap_flags) 7291 { 7292 const bool writing = (iomap_flags & IOMAP_WRITE); 7293 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7294 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7295 struct btrfs_ordered_extent *ordered; 7296 int ret = 0; 7297 7298 while (1) { 7299 if (nowait) { 7300 if (!try_lock_extent(io_tree, lockstart, lockend)) 7301 return -EAGAIN; 7302 } else { 7303 lock_extent_bits(io_tree, lockstart, lockend, cached_state); 7304 } 7305 /* 7306 * We're concerned with the entire range that we're going to be 7307 * doing DIO to, so we need to make sure there's no ordered 7308 * extents in this range. 7309 */ 7310 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, 7311 lockend - lockstart + 1); 7312 7313 /* 7314 * We need to make sure there are no buffered pages in this 7315 * range either, we could have raced between the invalidate in 7316 * generic_file_direct_write and locking the extent. The 7317 * invalidate needs to happen so that reads after a write do not 7318 * get stale data. 7319 */ 7320 if (!ordered && 7321 (!writing || !filemap_range_has_page(inode->i_mapping, 7322 lockstart, lockend))) 7323 break; 7324 7325 unlock_extent_cached(io_tree, lockstart, lockend, cached_state); 7326 7327 if (ordered) { 7328 if (nowait) { 7329 btrfs_put_ordered_extent(ordered); 7330 ret = -EAGAIN; 7331 break; 7332 } 7333 /* 7334 * If we are doing a DIO read and the ordered extent we 7335 * found is for a buffered write, we can not wait for it 7336 * to complete and retry, because if we do so we can 7337 * deadlock with concurrent buffered writes on page 7338 * locks. This happens only if our DIO read covers more 7339 * than one extent map, if at this point has already 7340 * created an ordered extent for a previous extent map 7341 * and locked its range in the inode's io tree, and a 7342 * concurrent write against that previous extent map's 7343 * range and this range started (we unlock the ranges 7344 * in the io tree only when the bios complete and 7345 * buffered writes always lock pages before attempting 7346 * to lock range in the io tree). 7347 */ 7348 if (writing || 7349 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) 7350 btrfs_start_ordered_extent(ordered, 1); 7351 else 7352 ret = nowait ? -EAGAIN : -ENOTBLK; 7353 btrfs_put_ordered_extent(ordered); 7354 } else { 7355 /* 7356 * We could trigger writeback for this range (and wait 7357 * for it to complete) and then invalidate the pages for 7358 * this range (through invalidate_inode_pages2_range()), 7359 * but that can lead us to a deadlock with a concurrent 7360 * call to readahead (a buffered read or a defrag call 7361 * triggered a readahead) on a page lock due to an 7362 * ordered dio extent we created before but did not have 7363 * yet a corresponding bio submitted (whence it can not 7364 * complete), which makes readahead wait for that 7365 * ordered extent to complete while holding a lock on 7366 * that page. 7367 */ 7368 ret = nowait ? -EAGAIN : -ENOTBLK; 7369 } 7370 7371 if (ret) 7372 break; 7373 7374 cond_resched(); 7375 } 7376 7377 return ret; 7378 } 7379 7380 /* The callers of this must take lock_extent() */ 7381 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 7382 u64 len, u64 orig_start, u64 block_start, 7383 u64 block_len, u64 orig_block_len, 7384 u64 ram_bytes, int compress_type, 7385 int type) 7386 { 7387 struct extent_map_tree *em_tree; 7388 struct extent_map *em; 7389 int ret; 7390 7391 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7392 type == BTRFS_ORDERED_COMPRESSED || 7393 type == BTRFS_ORDERED_NOCOW || 7394 type == BTRFS_ORDERED_REGULAR); 7395 7396 em_tree = &inode->extent_tree; 7397 em = alloc_extent_map(); 7398 if (!em) 7399 return ERR_PTR(-ENOMEM); 7400 7401 em->start = start; 7402 em->orig_start = orig_start; 7403 em->len = len; 7404 em->block_len = block_len; 7405 em->block_start = block_start; 7406 em->orig_block_len = orig_block_len; 7407 em->ram_bytes = ram_bytes; 7408 em->generation = -1; 7409 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7410 if (type == BTRFS_ORDERED_PREALLOC) { 7411 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7412 } else if (type == BTRFS_ORDERED_COMPRESSED) { 7413 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 7414 em->compress_type = compress_type; 7415 } 7416 7417 do { 7418 btrfs_drop_extent_cache(inode, em->start, 7419 em->start + em->len - 1, 0); 7420 write_lock(&em_tree->lock); 7421 ret = add_extent_mapping(em_tree, em, 1); 7422 write_unlock(&em_tree->lock); 7423 /* 7424 * The caller has taken lock_extent(), who could race with us 7425 * to add em? 7426 */ 7427 } while (ret == -EEXIST); 7428 7429 if (ret) { 7430 free_extent_map(em); 7431 return ERR_PTR(ret); 7432 } 7433 7434 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7435 return em; 7436 } 7437 7438 7439 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7440 struct inode *inode, 7441 struct btrfs_dio_data *dio_data, 7442 u64 start, u64 len, 7443 unsigned int iomap_flags) 7444 { 7445 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7446 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7447 struct extent_map *em = *map; 7448 int type; 7449 u64 block_start, orig_start, orig_block_len, ram_bytes; 7450 struct btrfs_block_group *bg; 7451 bool can_nocow = false; 7452 bool space_reserved = false; 7453 u64 prev_len; 7454 int ret = 0; 7455 7456 /* 7457 * We don't allocate a new extent in the following cases 7458 * 7459 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7460 * existing extent. 7461 * 2) The extent is marked as PREALLOC. We're good to go here and can 7462 * just use the extent. 7463 * 7464 */ 7465 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7466 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7467 em->block_start != EXTENT_MAP_HOLE)) { 7468 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7469 type = BTRFS_ORDERED_PREALLOC; 7470 else 7471 type = BTRFS_ORDERED_NOCOW; 7472 len = min(len, em->len - (start - em->start)); 7473 block_start = em->block_start + (start - em->start); 7474 7475 if (can_nocow_extent(inode, start, &len, &orig_start, 7476 &orig_block_len, &ram_bytes, false) == 1) { 7477 bg = btrfs_inc_nocow_writers(fs_info, block_start); 7478 if (bg) 7479 can_nocow = true; 7480 } 7481 } 7482 7483 prev_len = len; 7484 if (can_nocow) { 7485 struct extent_map *em2; 7486 7487 /* We can NOCOW, so only need to reserve metadata space. */ 7488 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7489 nowait); 7490 if (ret < 0) { 7491 /* Our caller expects us to free the input extent map. */ 7492 free_extent_map(em); 7493 *map = NULL; 7494 btrfs_dec_nocow_writers(bg); 7495 if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) 7496 ret = -EAGAIN; 7497 goto out; 7498 } 7499 space_reserved = true; 7500 7501 em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len, 7502 orig_start, block_start, 7503 len, orig_block_len, 7504 ram_bytes, type); 7505 btrfs_dec_nocow_writers(bg); 7506 if (type == BTRFS_ORDERED_PREALLOC) { 7507 free_extent_map(em); 7508 *map = em = em2; 7509 } 7510 7511 if (IS_ERR(em2)) { 7512 ret = PTR_ERR(em2); 7513 goto out; 7514 } 7515 7516 dio_data->nocow_done = true; 7517 } else { 7518 /* Our caller expects us to free the input extent map. */ 7519 free_extent_map(em); 7520 *map = NULL; 7521 7522 if (nowait) 7523 return -EAGAIN; 7524 7525 /* 7526 * If we could not allocate data space before locking the file 7527 * range and we can't do a NOCOW write, then we have to fail. 7528 */ 7529 if (!dio_data->data_space_reserved) 7530 return -ENOSPC; 7531 7532 /* 7533 * We have to COW and we have already reserved data space before, 7534 * so now we reserve only metadata. 7535 */ 7536 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7537 false); 7538 if (ret < 0) 7539 goto out; 7540 space_reserved = true; 7541 7542 em = btrfs_new_extent_direct(BTRFS_I(inode), start, len); 7543 if (IS_ERR(em)) { 7544 ret = PTR_ERR(em); 7545 goto out; 7546 } 7547 *map = em; 7548 len = min(len, em->len - (start - em->start)); 7549 if (len < prev_len) 7550 btrfs_delalloc_release_metadata(BTRFS_I(inode), 7551 prev_len - len, true); 7552 } 7553 7554 /* 7555 * We have created our ordered extent, so we can now release our reservation 7556 * for an outstanding extent. 7557 */ 7558 btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len); 7559 7560 /* 7561 * Need to update the i_size under the extent lock so buffered 7562 * readers will get the updated i_size when we unlock. 7563 */ 7564 if (start + len > i_size_read(inode)) 7565 i_size_write(inode, start + len); 7566 out: 7567 if (ret && space_reserved) { 7568 btrfs_delalloc_release_extents(BTRFS_I(inode), len); 7569 btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true); 7570 } 7571 return ret; 7572 } 7573 7574 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, 7575 loff_t length, unsigned int flags, struct iomap *iomap, 7576 struct iomap *srcmap) 7577 { 7578 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7579 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7580 struct extent_map *em; 7581 struct extent_state *cached_state = NULL; 7582 struct btrfs_dio_data *dio_data = iter->private; 7583 u64 lockstart, lockend; 7584 const bool write = !!(flags & IOMAP_WRITE); 7585 int ret = 0; 7586 u64 len = length; 7587 const u64 data_alloc_len = length; 7588 bool unlock_extents = false; 7589 7590 if (!write) 7591 len = min_t(u64, len, fs_info->sectorsize); 7592 7593 lockstart = start; 7594 lockend = start + len - 1; 7595 7596 /* 7597 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't 7598 * enough if we've written compressed pages to this area, so we need to 7599 * flush the dirty pages again to make absolutely sure that any 7600 * outstanding dirty pages are on disk - the first flush only starts 7601 * compression on the data, while keeping the pages locked, so by the 7602 * time the second flush returns we know bios for the compressed pages 7603 * were submitted and finished, and the pages no longer under writeback. 7604 * 7605 * If we have a NOWAIT request and we have any pages in the range that 7606 * are locked, likely due to compression still in progress, we don't want 7607 * to block on page locks. We also don't want to block on pages marked as 7608 * dirty or under writeback (same as for the non-compression case). 7609 * iomap_dio_rw() did the same check, but after that and before we got 7610 * here, mmap'ed writes may have happened or buffered reads started 7611 * (readpage() and readahead(), which lock pages), as we haven't locked 7612 * the file range yet. 7613 */ 7614 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7615 &BTRFS_I(inode)->runtime_flags)) { 7616 if (flags & IOMAP_NOWAIT) { 7617 if (filemap_range_needs_writeback(inode->i_mapping, 7618 lockstart, lockend)) 7619 return -EAGAIN; 7620 } else { 7621 ret = filemap_fdatawrite_range(inode->i_mapping, start, 7622 start + length - 1); 7623 if (ret) 7624 return ret; 7625 } 7626 } 7627 7628 memset(dio_data, 0, sizeof(*dio_data)); 7629 7630 /* 7631 * We always try to allocate data space and must do it before locking 7632 * the file range, to avoid deadlocks with concurrent writes to the same 7633 * range if the range has several extents and the writes don't expand the 7634 * current i_size (the inode lock is taken in shared mode). If we fail to 7635 * allocate data space here we continue and later, after locking the 7636 * file range, we fail with ENOSPC only if we figure out we can not do a 7637 * NOCOW write. 7638 */ 7639 if (write && !(flags & IOMAP_NOWAIT)) { 7640 ret = btrfs_check_data_free_space(BTRFS_I(inode), 7641 &dio_data->data_reserved, 7642 start, data_alloc_len); 7643 if (!ret) 7644 dio_data->data_space_reserved = true; 7645 else if (ret && !(BTRFS_I(inode)->flags & 7646 (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 7647 goto err; 7648 } 7649 7650 /* 7651 * If this errors out it's because we couldn't invalidate pagecache for 7652 * this range and we need to fallback to buffered IO, or we are doing a 7653 * NOWAIT read/write and we need to block. 7654 */ 7655 ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags); 7656 if (ret < 0) 7657 goto err; 7658 7659 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 7660 if (IS_ERR(em)) { 7661 ret = PTR_ERR(em); 7662 goto unlock_err; 7663 } 7664 7665 /* 7666 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7667 * io. INLINE is special, and we could probably kludge it in here, but 7668 * it's still buffered so for safety lets just fall back to the generic 7669 * buffered path. 7670 * 7671 * For COMPRESSED we _have_ to read the entire extent in so we can 7672 * decompress it, so there will be buffering required no matter what we 7673 * do, so go ahead and fallback to buffered. 7674 * 7675 * We return -ENOTBLK because that's what makes DIO go ahead and go back 7676 * to buffered IO. Don't blame me, this is the price we pay for using 7677 * the generic code. 7678 */ 7679 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7680 em->block_start == EXTENT_MAP_INLINE) { 7681 free_extent_map(em); 7682 ret = -ENOTBLK; 7683 goto unlock_err; 7684 } 7685 7686 len = min(len, em->len - (start - em->start)); 7687 7688 /* 7689 * If we have a NOWAIT request and the range contains multiple extents 7690 * (or a mix of extents and holes), then we return -EAGAIN to make the 7691 * caller fallback to a context where it can do a blocking (without 7692 * NOWAIT) request. This way we avoid doing partial IO and returning 7693 * success to the caller, which is not optimal for writes and for reads 7694 * it can result in unexpected behaviour for an application. 7695 * 7696 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling 7697 * iomap_dio_rw(), we can end up returning less data then what the caller 7698 * asked for, resulting in an unexpected, and incorrect, short read. 7699 * That is, the caller asked to read N bytes and we return less than that, 7700 * which is wrong unless we are crossing EOF. This happens if we get a 7701 * page fault error when trying to fault in pages for the buffer that is 7702 * associated to the struct iov_iter passed to iomap_dio_rw(), and we 7703 * have previously submitted bios for other extents in the range, in 7704 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of 7705 * those bios have completed by the time we get the page fault error, 7706 * which we return back to our caller - we should only return EIOCBQUEUED 7707 * after we have submitted bios for all the extents in the range. 7708 */ 7709 if ((flags & IOMAP_NOWAIT) && len < length) { 7710 free_extent_map(em); 7711 ret = -EAGAIN; 7712 goto unlock_err; 7713 } 7714 7715 if (write) { 7716 ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, 7717 start, len, flags); 7718 if (ret < 0) 7719 goto unlock_err; 7720 unlock_extents = true; 7721 /* Recalc len in case the new em is smaller than requested */ 7722 len = min(len, em->len - (start - em->start)); 7723 if (dio_data->data_space_reserved) { 7724 u64 release_offset; 7725 u64 release_len = 0; 7726 7727 if (dio_data->nocow_done) { 7728 release_offset = start; 7729 release_len = data_alloc_len; 7730 } else if (len < data_alloc_len) { 7731 release_offset = start + len; 7732 release_len = data_alloc_len - len; 7733 } 7734 7735 if (release_len > 0) 7736 btrfs_free_reserved_data_space(BTRFS_I(inode), 7737 dio_data->data_reserved, 7738 release_offset, 7739 release_len); 7740 } 7741 } else { 7742 /* 7743 * We need to unlock only the end area that we aren't using. 7744 * The rest is going to be unlocked by the endio routine. 7745 */ 7746 lockstart = start + len; 7747 if (lockstart < lockend) 7748 unlock_extents = true; 7749 } 7750 7751 if (unlock_extents) 7752 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 7753 lockstart, lockend, &cached_state); 7754 else 7755 free_extent_state(cached_state); 7756 7757 /* 7758 * Translate extent map information to iomap. 7759 * We trim the extents (and move the addr) even though iomap code does 7760 * that, since we have locked only the parts we are performing I/O in. 7761 */ 7762 if ((em->block_start == EXTENT_MAP_HOLE) || 7763 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { 7764 iomap->addr = IOMAP_NULL_ADDR; 7765 iomap->type = IOMAP_HOLE; 7766 } else { 7767 iomap->addr = em->block_start + (start - em->start); 7768 iomap->type = IOMAP_MAPPED; 7769 } 7770 iomap->offset = start; 7771 iomap->bdev = fs_info->fs_devices->latest_dev->bdev; 7772 iomap->length = len; 7773 7774 if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start)) 7775 iomap->flags |= IOMAP_F_ZONE_APPEND; 7776 7777 free_extent_map(em); 7778 7779 return 0; 7780 7781 unlock_err: 7782 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7783 &cached_state); 7784 err: 7785 if (dio_data->data_space_reserved) { 7786 btrfs_free_reserved_data_space(BTRFS_I(inode), 7787 dio_data->data_reserved, 7788 start, data_alloc_len); 7789 extent_changeset_free(dio_data->data_reserved); 7790 } 7791 7792 return ret; 7793 } 7794 7795 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, 7796 ssize_t written, unsigned int flags, struct iomap *iomap) 7797 { 7798 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7799 struct btrfs_dio_data *dio_data = iter->private; 7800 size_t submitted = dio_data->submitted; 7801 const bool write = !!(flags & IOMAP_WRITE); 7802 int ret = 0; 7803 7804 if (!write && (iomap->type == IOMAP_HOLE)) { 7805 /* If reading from a hole, unlock and return */ 7806 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1); 7807 return 0; 7808 } 7809 7810 if (submitted < length) { 7811 pos += submitted; 7812 length -= submitted; 7813 if (write) 7814 __endio_write_update_ordered(BTRFS_I(inode), pos, 7815 length, false); 7816 else 7817 unlock_extent(&BTRFS_I(inode)->io_tree, pos, 7818 pos + length - 1); 7819 ret = -ENOTBLK; 7820 } 7821 7822 if (write) 7823 extent_changeset_free(dio_data->data_reserved); 7824 return ret; 7825 } 7826 7827 static void btrfs_dio_private_put(struct btrfs_dio_private *dip) 7828 { 7829 /* 7830 * This implies a barrier so that stores to dio_bio->bi_status before 7831 * this and loads of dio_bio->bi_status after this are fully ordered. 7832 */ 7833 if (!refcount_dec_and_test(&dip->refs)) 7834 return; 7835 7836 if (btrfs_op(&dip->bio) == BTRFS_MAP_WRITE) { 7837 __endio_write_update_ordered(BTRFS_I(dip->inode), 7838 dip->file_offset, 7839 dip->bytes, 7840 !dip->bio.bi_status); 7841 } else { 7842 unlock_extent(&BTRFS_I(dip->inode)->io_tree, 7843 dip->file_offset, 7844 dip->file_offset + dip->bytes - 1); 7845 } 7846 7847 kfree(dip->csums); 7848 bio_endio(&dip->bio); 7849 } 7850 7851 static void submit_dio_repair_bio(struct inode *inode, struct bio *bio, 7852 int mirror_num, 7853 enum btrfs_compression_type compress_type) 7854 { 7855 struct btrfs_dio_private *dip = bio->bi_private; 7856 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7857 7858 BUG_ON(bio_op(bio) == REQ_OP_WRITE); 7859 7860 if (btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA)) 7861 return; 7862 7863 refcount_inc(&dip->refs); 7864 if (btrfs_map_bio(fs_info, bio, mirror_num)) 7865 refcount_dec(&dip->refs); 7866 } 7867 7868 static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip, 7869 struct btrfs_bio *bbio, 7870 const bool uptodate) 7871 { 7872 struct inode *inode = dip->inode; 7873 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 7874 const u32 sectorsize = fs_info->sectorsize; 7875 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 7876 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7877 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); 7878 struct bio_vec bvec; 7879 struct bvec_iter iter; 7880 u32 bio_offset = 0; 7881 blk_status_t err = BLK_STS_OK; 7882 7883 __bio_for_each_segment(bvec, &bbio->bio, iter, bbio->iter) { 7884 unsigned int i, nr_sectors, pgoff; 7885 7886 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); 7887 pgoff = bvec.bv_offset; 7888 for (i = 0; i < nr_sectors; i++) { 7889 u64 start = bbio->file_offset + bio_offset; 7890 7891 ASSERT(pgoff < PAGE_SIZE); 7892 if (uptodate && 7893 (!csum || !check_data_csum(inode, bbio, 7894 bio_offset, bvec.bv_page, 7895 pgoff, start))) { 7896 clean_io_failure(fs_info, failure_tree, io_tree, 7897 start, bvec.bv_page, 7898 btrfs_ino(BTRFS_I(inode)), 7899 pgoff); 7900 } else { 7901 int ret; 7902 7903 ret = btrfs_repair_one_sector(inode, &bbio->bio, 7904 bio_offset, bvec.bv_page, pgoff, 7905 start, bbio->mirror_num, 7906 submit_dio_repair_bio); 7907 if (ret) 7908 err = errno_to_blk_status(ret); 7909 } 7910 ASSERT(bio_offset + sectorsize > bio_offset); 7911 bio_offset += sectorsize; 7912 pgoff += sectorsize; 7913 } 7914 } 7915 return err; 7916 } 7917 7918 static void __endio_write_update_ordered(struct btrfs_inode *inode, 7919 const u64 offset, const u64 bytes, 7920 const bool uptodate) 7921 { 7922 btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, 7923 finish_ordered_fn, uptodate); 7924 } 7925 7926 static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode, 7927 struct bio *bio, 7928 u64 dio_file_offset) 7929 { 7930 return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, false); 7931 } 7932 7933 static void btrfs_end_dio_bio(struct bio *bio) 7934 { 7935 struct btrfs_dio_private *dip = bio->bi_private; 7936 struct btrfs_bio *bbio = btrfs_bio(bio); 7937 blk_status_t err = bio->bi_status; 7938 7939 if (err) 7940 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, 7941 "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d", 7942 btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio), 7943 bio->bi_opf, bio->bi_iter.bi_sector, 7944 bio->bi_iter.bi_size, err); 7945 7946 if (bio_op(bio) == REQ_OP_READ) 7947 err = btrfs_check_read_dio_bio(dip, bbio, !err); 7948 7949 if (err) 7950 dip->bio.bi_status = err; 7951 7952 btrfs_record_physical_zoned(dip->inode, bbio->file_offset, bio); 7953 7954 bio_put(bio); 7955 btrfs_dio_private_put(dip); 7956 } 7957 7958 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, 7959 struct inode *inode, u64 file_offset, int async_submit) 7960 { 7961 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7962 struct btrfs_dio_private *dip = bio->bi_private; 7963 bool write = btrfs_op(bio) == BTRFS_MAP_WRITE; 7964 blk_status_t ret; 7965 7966 /* Check btrfs_submit_bio_hook() for rules about async submit. */ 7967 if (async_submit) 7968 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); 7969 7970 if (!write) { 7971 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); 7972 if (ret) 7973 goto err; 7974 } 7975 7976 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 7977 goto map; 7978 7979 if (write && async_submit) { 7980 ret = btrfs_wq_submit_bio(inode, bio, 0, file_offset, 7981 btrfs_submit_bio_start_direct_io); 7982 goto err; 7983 } else if (write) { 7984 /* 7985 * If we aren't doing async submit, calculate the csum of the 7986 * bio now. 7987 */ 7988 ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, false); 7989 if (ret) 7990 goto err; 7991 } else { 7992 u64 csum_offset; 7993 7994 csum_offset = file_offset - dip->file_offset; 7995 csum_offset >>= fs_info->sectorsize_bits; 7996 csum_offset *= fs_info->csum_size; 7997 btrfs_bio(bio)->csum = dip->csums + csum_offset; 7998 } 7999 map: 8000 ret = btrfs_map_bio(fs_info, bio, 0); 8001 err: 8002 return ret; 8003 } 8004 8005 static void btrfs_submit_direct(const struct iomap_iter *iter, 8006 struct bio *dio_bio, loff_t file_offset) 8007 { 8008 struct btrfs_dio_private *dip = 8009 container_of(dio_bio, struct btrfs_dio_private, bio); 8010 struct inode *inode = iter->inode; 8011 const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE); 8012 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8013 const bool raid56 = (btrfs_data_alloc_profile(fs_info) & 8014 BTRFS_BLOCK_GROUP_RAID56_MASK); 8015 struct bio *bio; 8016 u64 start_sector; 8017 int async_submit = 0; 8018 u64 submit_len; 8019 u64 clone_offset = 0; 8020 u64 clone_len; 8021 u64 logical; 8022 int ret; 8023 blk_status_t status; 8024 struct btrfs_io_geometry geom; 8025 struct btrfs_dio_data *dio_data = iter->private; 8026 struct extent_map *em = NULL; 8027 8028 dip->inode = inode; 8029 dip->file_offset = file_offset; 8030 dip->bytes = dio_bio->bi_iter.bi_size; 8031 refcount_set(&dip->refs, 1); 8032 dip->csums = NULL; 8033 8034 if (!write && !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 8035 unsigned int nr_sectors = 8036 (dio_bio->bi_iter.bi_size >> fs_info->sectorsize_bits); 8037 8038 /* 8039 * Load the csums up front to reduce csum tree searches and 8040 * contention when submitting bios. 8041 */ 8042 status = BLK_STS_RESOURCE; 8043 dip->csums = kcalloc(nr_sectors, fs_info->csum_size, GFP_NOFS); 8044 if (!dip) 8045 goto out_err; 8046 8047 status = btrfs_lookup_bio_sums(inode, dio_bio, dip->csums); 8048 if (status != BLK_STS_OK) 8049 goto out_err; 8050 } 8051 8052 start_sector = dio_bio->bi_iter.bi_sector; 8053 submit_len = dio_bio->bi_iter.bi_size; 8054 8055 do { 8056 logical = start_sector << 9; 8057 em = btrfs_get_chunk_map(fs_info, logical, submit_len); 8058 if (IS_ERR(em)) { 8059 status = errno_to_blk_status(PTR_ERR(em)); 8060 em = NULL; 8061 goto out_err_em; 8062 } 8063 ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(dio_bio), 8064 logical, &geom); 8065 if (ret) { 8066 status = errno_to_blk_status(ret); 8067 goto out_err_em; 8068 } 8069 8070 clone_len = min(submit_len, geom.len); 8071 ASSERT(clone_len <= UINT_MAX); 8072 8073 /* 8074 * This will never fail as it's passing GPF_NOFS and 8075 * the allocation is backed by btrfs_bioset. 8076 */ 8077 bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len); 8078 bio->bi_private = dip; 8079 bio->bi_end_io = btrfs_end_dio_bio; 8080 btrfs_bio(bio)->file_offset = file_offset; 8081 8082 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 8083 status = extract_ordered_extent(BTRFS_I(inode), bio, 8084 file_offset); 8085 if (status) { 8086 bio_put(bio); 8087 goto out_err; 8088 } 8089 } 8090 8091 ASSERT(submit_len >= clone_len); 8092 submit_len -= clone_len; 8093 8094 /* 8095 * Increase the count before we submit the bio so we know 8096 * the end IO handler won't happen before we increase the 8097 * count. Otherwise, the dip might get freed before we're 8098 * done setting it up. 8099 * 8100 * We transfer the initial reference to the last bio, so we 8101 * don't need to increment the reference count for the last one. 8102 */ 8103 if (submit_len > 0) { 8104 refcount_inc(&dip->refs); 8105 /* 8106 * If we are submitting more than one bio, submit them 8107 * all asynchronously. The exception is RAID 5 or 6, as 8108 * asynchronous checksums make it difficult to collect 8109 * full stripe writes. 8110 */ 8111 if (!raid56) 8112 async_submit = 1; 8113 } 8114 8115 status = btrfs_submit_dio_bio(bio, inode, file_offset, 8116 async_submit); 8117 if (status) { 8118 bio_put(bio); 8119 if (submit_len > 0) 8120 refcount_dec(&dip->refs); 8121 goto out_err_em; 8122 } 8123 8124 dio_data->submitted += clone_len; 8125 clone_offset += clone_len; 8126 start_sector += clone_len >> 9; 8127 file_offset += clone_len; 8128 8129 free_extent_map(em); 8130 } while (submit_len > 0); 8131 return; 8132 8133 out_err_em: 8134 free_extent_map(em); 8135 out_err: 8136 dio_bio->bi_status = status; 8137 btrfs_dio_private_put(dip); 8138 } 8139 8140 static const struct iomap_ops btrfs_dio_iomap_ops = { 8141 .iomap_begin = btrfs_dio_iomap_begin, 8142 .iomap_end = btrfs_dio_iomap_end, 8143 }; 8144 8145 static const struct iomap_dio_ops btrfs_dio_ops = { 8146 .submit_io = btrfs_submit_direct, 8147 .bio_set = &btrfs_dio_bioset, 8148 }; 8149 8150 ssize_t btrfs_dio_rw(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) 8151 { 8152 struct btrfs_dio_data data; 8153 8154 return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 8155 IOMAP_DIO_PARTIAL, &data, done_before); 8156 } 8157 8158 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 8159 u64 start, u64 len) 8160 { 8161 int ret; 8162 8163 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 8164 if (ret) 8165 return ret; 8166 8167 return extent_fiemap(BTRFS_I(inode), fieinfo, start, len); 8168 } 8169 8170 static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 8171 { 8172 struct inode *inode = page->mapping->host; 8173 int ret; 8174 8175 if (current->flags & PF_MEMALLOC) { 8176 redirty_page_for_writepage(wbc, page); 8177 unlock_page(page); 8178 return 0; 8179 } 8180 8181 /* 8182 * If we are under memory pressure we will call this directly from the 8183 * VM, we need to make sure we have the inode referenced for the ordered 8184 * extent. If not just return like we didn't do anything. 8185 */ 8186 if (!igrab(inode)) { 8187 redirty_page_for_writepage(wbc, page); 8188 return AOP_WRITEPAGE_ACTIVATE; 8189 } 8190 ret = extent_write_full_page(page, wbc); 8191 btrfs_add_delayed_iput(inode); 8192 return ret; 8193 } 8194 8195 static int btrfs_writepages(struct address_space *mapping, 8196 struct writeback_control *wbc) 8197 { 8198 return extent_writepages(mapping, wbc); 8199 } 8200 8201 static void btrfs_readahead(struct readahead_control *rac) 8202 { 8203 extent_readahead(rac); 8204 } 8205 8206 /* 8207 * For release_folio() and invalidate_folio() we have a race window where 8208 * folio_end_writeback() is called but the subpage spinlock is not yet released. 8209 * If we continue to release/invalidate the page, we could cause use-after-free 8210 * for subpage spinlock. So this function is to spin and wait for subpage 8211 * spinlock. 8212 */ 8213 static void wait_subpage_spinlock(struct page *page) 8214 { 8215 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); 8216 struct btrfs_subpage *subpage; 8217 8218 if (!btrfs_is_subpage(fs_info, page)) 8219 return; 8220 8221 ASSERT(PagePrivate(page) && page->private); 8222 subpage = (struct btrfs_subpage *)page->private; 8223 8224 /* 8225 * This may look insane as we just acquire the spinlock and release it, 8226 * without doing anything. But we just want to make sure no one is 8227 * still holding the subpage spinlock. 8228 * And since the page is not dirty nor writeback, and we have page 8229 * locked, the only possible way to hold a spinlock is from the endio 8230 * function to clear page writeback. 8231 * 8232 * Here we just acquire the spinlock so that all existing callers 8233 * should exit and we're safe to release/invalidate the page. 8234 */ 8235 spin_lock_irq(&subpage->lock); 8236 spin_unlock_irq(&subpage->lock); 8237 } 8238 8239 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 8240 { 8241 int ret = try_release_extent_mapping(&folio->page, gfp_flags); 8242 8243 if (ret == 1) { 8244 wait_subpage_spinlock(&folio->page); 8245 clear_page_extent_mapped(&folio->page); 8246 } 8247 return ret; 8248 } 8249 8250 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 8251 { 8252 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 8253 return false; 8254 return __btrfs_release_folio(folio, gfp_flags); 8255 } 8256 8257 #ifdef CONFIG_MIGRATION 8258 static int btrfs_migratepage(struct address_space *mapping, 8259 struct page *newpage, struct page *page, 8260 enum migrate_mode mode) 8261 { 8262 int ret; 8263 8264 ret = migrate_page_move_mapping(mapping, newpage, page, 0); 8265 if (ret != MIGRATEPAGE_SUCCESS) 8266 return ret; 8267 8268 if (page_has_private(page)) 8269 attach_page_private(newpage, detach_page_private(page)); 8270 8271 if (PageOrdered(page)) { 8272 ClearPageOrdered(page); 8273 SetPageOrdered(newpage); 8274 } 8275 8276 if (mode != MIGRATE_SYNC_NO_COPY) 8277 migrate_page_copy(newpage, page); 8278 else 8279 migrate_page_states(newpage, page); 8280 return MIGRATEPAGE_SUCCESS; 8281 } 8282 #endif 8283 8284 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 8285 size_t length) 8286 { 8287 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); 8288 struct btrfs_fs_info *fs_info = inode->root->fs_info; 8289 struct extent_io_tree *tree = &inode->io_tree; 8290 struct extent_state *cached_state = NULL; 8291 u64 page_start = folio_pos(folio); 8292 u64 page_end = page_start + folio_size(folio) - 1; 8293 u64 cur; 8294 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 8295 8296 /* 8297 * We have folio locked so no new ordered extent can be created on this 8298 * page, nor bio can be submitted for this folio. 8299 * 8300 * But already submitted bio can still be finished on this folio. 8301 * Furthermore, endio function won't skip folio which has Ordered 8302 * (Private2) already cleared, so it's possible for endio and 8303 * invalidate_folio to do the same ordered extent accounting twice 8304 * on one folio. 8305 * 8306 * So here we wait for any submitted bios to finish, so that we won't 8307 * do double ordered extent accounting on the same folio. 8308 */ 8309 folio_wait_writeback(folio); 8310 wait_subpage_spinlock(&folio->page); 8311 8312 /* 8313 * For subpage case, we have call sites like 8314 * btrfs_punch_hole_lock_range() which passes range not aligned to 8315 * sectorsize. 8316 * If the range doesn't cover the full folio, we don't need to and 8317 * shouldn't clear page extent mapped, as folio->private can still 8318 * record subpage dirty bits for other part of the range. 8319 * 8320 * For cases that invalidate the full folio even the range doesn't 8321 * cover the full folio, like invalidating the last folio, we're 8322 * still safe to wait for ordered extent to finish. 8323 */ 8324 if (!(offset == 0 && length == folio_size(folio))) { 8325 btrfs_release_folio(folio, GFP_NOFS); 8326 return; 8327 } 8328 8329 if (!inode_evicting) 8330 lock_extent_bits(tree, page_start, page_end, &cached_state); 8331 8332 cur = page_start; 8333 while (cur < page_end) { 8334 struct btrfs_ordered_extent *ordered; 8335 bool delete_states; 8336 u64 range_end; 8337 u32 range_len; 8338 8339 ordered = btrfs_lookup_first_ordered_range(inode, cur, 8340 page_end + 1 - cur); 8341 if (!ordered) { 8342 range_end = page_end; 8343 /* 8344 * No ordered extent covering this range, we are safe 8345 * to delete all extent states in the range. 8346 */ 8347 delete_states = true; 8348 goto next; 8349 } 8350 if (ordered->file_offset > cur) { 8351 /* 8352 * There is a range between [cur, oe->file_offset) not 8353 * covered by any ordered extent. 8354 * We are safe to delete all extent states, and handle 8355 * the ordered extent in the next iteration. 8356 */ 8357 range_end = ordered->file_offset - 1; 8358 delete_states = true; 8359 goto next; 8360 } 8361 8362 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 8363 page_end); 8364 ASSERT(range_end + 1 - cur < U32_MAX); 8365 range_len = range_end + 1 - cur; 8366 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) { 8367 /* 8368 * If Ordered (Private2) is cleared, it means endio has 8369 * already been executed for the range. 8370 * We can't delete the extent states as 8371 * btrfs_finish_ordered_io() may still use some of them. 8372 */ 8373 delete_states = false; 8374 goto next; 8375 } 8376 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len); 8377 8378 /* 8379 * IO on this page will never be started, so we need to account 8380 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 8381 * here, must leave that up for the ordered extent completion. 8382 * 8383 * This will also unlock the range for incoming 8384 * btrfs_finish_ordered_io(). 8385 */ 8386 if (!inode_evicting) 8387 clear_extent_bit(tree, cur, range_end, 8388 EXTENT_DELALLOC | 8389 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8390 EXTENT_DEFRAG, 1, 0, &cached_state); 8391 8392 spin_lock_irq(&inode->ordered_tree.lock); 8393 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8394 ordered->truncated_len = min(ordered->truncated_len, 8395 cur - ordered->file_offset); 8396 spin_unlock_irq(&inode->ordered_tree.lock); 8397 8398 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8399 cur, range_end + 1 - cur)) { 8400 btrfs_finish_ordered_io(ordered); 8401 /* 8402 * The ordered extent has finished, now we're again 8403 * safe to delete all extent states of the range. 8404 */ 8405 delete_states = true; 8406 } else { 8407 /* 8408 * btrfs_finish_ordered_io() will get executed by endio 8409 * of other pages, thus we can't delete extent states 8410 * anymore 8411 */ 8412 delete_states = false; 8413 } 8414 next: 8415 if (ordered) 8416 btrfs_put_ordered_extent(ordered); 8417 /* 8418 * Qgroup reserved space handler 8419 * Sector(s) here will be either: 8420 * 8421 * 1) Already written to disk or bio already finished 8422 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 8423 * Qgroup will be handled by its qgroup_record then. 8424 * btrfs_qgroup_free_data() call will do nothing here. 8425 * 8426 * 2) Not written to disk yet 8427 * Then btrfs_qgroup_free_data() call will clear the 8428 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 8429 * reserved data space. 8430 * Since the IO will never happen for this page. 8431 */ 8432 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur); 8433 if (!inode_evicting) { 8434 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 8435 EXTENT_DELALLOC | EXTENT_UPTODATE | 8436 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 8437 delete_states, &cached_state); 8438 } 8439 cur = range_end + 1; 8440 } 8441 /* 8442 * We have iterated through all ordered extents of the page, the page 8443 * should not have Ordered (Private2) anymore, or the above iteration 8444 * did something wrong. 8445 */ 8446 ASSERT(!folio_test_ordered(folio)); 8447 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio)); 8448 if (!inode_evicting) 8449 __btrfs_release_folio(folio, GFP_NOFS); 8450 clear_page_extent_mapped(&folio->page); 8451 } 8452 8453 /* 8454 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8455 * called from a page fault handler when a page is first dirtied. Hence we must 8456 * be careful to check for EOF conditions here. We set the page up correctly 8457 * for a written page which means we get ENOSPC checking when writing into 8458 * holes and correct delalloc and unwritten extent mapping on filesystems that 8459 * support these features. 8460 * 8461 * We are not allowed to take the i_mutex here so we have to play games to 8462 * protect against truncate races as the page could now be beyond EOF. Because 8463 * truncate_setsize() writes the inode size before removing pages, once we have 8464 * the page lock we can determine safely if the page is beyond EOF. If it is not 8465 * beyond EOF, then the page is guaranteed safe against truncation until we 8466 * unlock the page. 8467 */ 8468 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 8469 { 8470 struct page *page = vmf->page; 8471 struct inode *inode = file_inode(vmf->vma->vm_file); 8472 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8473 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8474 struct btrfs_ordered_extent *ordered; 8475 struct extent_state *cached_state = NULL; 8476 struct extent_changeset *data_reserved = NULL; 8477 unsigned long zero_start; 8478 loff_t size; 8479 vm_fault_t ret; 8480 int ret2; 8481 int reserved = 0; 8482 u64 reserved_space; 8483 u64 page_start; 8484 u64 page_end; 8485 u64 end; 8486 8487 reserved_space = PAGE_SIZE; 8488 8489 sb_start_pagefault(inode->i_sb); 8490 page_start = page_offset(page); 8491 page_end = page_start + PAGE_SIZE - 1; 8492 end = page_end; 8493 8494 /* 8495 * Reserving delalloc space after obtaining the page lock can lead to 8496 * deadlock. For example, if a dirty page is locked by this function 8497 * and the call to btrfs_delalloc_reserve_space() ends up triggering 8498 * dirty page write out, then the btrfs_writepage() function could 8499 * end up waiting indefinitely to get a lock on the page currently 8500 * being processed by btrfs_page_mkwrite() function. 8501 */ 8502 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 8503 page_start, reserved_space); 8504 if (!ret2) { 8505 ret2 = file_update_time(vmf->vma->vm_file); 8506 reserved = 1; 8507 } 8508 if (ret2) { 8509 ret = vmf_error(ret2); 8510 if (reserved) 8511 goto out; 8512 goto out_noreserve; 8513 } 8514 8515 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8516 again: 8517 down_read(&BTRFS_I(inode)->i_mmap_lock); 8518 lock_page(page); 8519 size = i_size_read(inode); 8520 8521 if ((page->mapping != inode->i_mapping) || 8522 (page_start >= size)) { 8523 /* page got truncated out from underneath us */ 8524 goto out_unlock; 8525 } 8526 wait_on_page_writeback(page); 8527 8528 lock_extent_bits(io_tree, page_start, page_end, &cached_state); 8529 ret2 = set_page_extent_mapped(page); 8530 if (ret2 < 0) { 8531 ret = vmf_error(ret2); 8532 unlock_extent_cached(io_tree, page_start, page_end, &cached_state); 8533 goto out_unlock; 8534 } 8535 8536 /* 8537 * we can't set the delalloc bits if there are pending ordered 8538 * extents. Drop our locks and wait for them to finish 8539 */ 8540 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, 8541 PAGE_SIZE); 8542 if (ordered) { 8543 unlock_extent_cached(io_tree, page_start, page_end, 8544 &cached_state); 8545 unlock_page(page); 8546 up_read(&BTRFS_I(inode)->i_mmap_lock); 8547 btrfs_start_ordered_extent(ordered, 1); 8548 btrfs_put_ordered_extent(ordered); 8549 goto again; 8550 } 8551 8552 if (page->index == ((size - 1) >> PAGE_SHIFT)) { 8553 reserved_space = round_up(size - page_start, 8554 fs_info->sectorsize); 8555 if (reserved_space < PAGE_SIZE) { 8556 end = page_start + reserved_space - 1; 8557 btrfs_delalloc_release_space(BTRFS_I(inode), 8558 data_reserved, page_start, 8559 PAGE_SIZE - reserved_space, true); 8560 } 8561 } 8562 8563 /* 8564 * page_mkwrite gets called when the page is firstly dirtied after it's 8565 * faulted in, but write(2) could also dirty a page and set delalloc 8566 * bits, thus in this case for space account reason, we still need to 8567 * clear any delalloc bits within this page range since we have to 8568 * reserve data&meta space before lock_page() (see above comments). 8569 */ 8570 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, 8571 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8572 EXTENT_DEFRAG, 0, 0, &cached_state); 8573 8574 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, 8575 &cached_state); 8576 if (ret2) { 8577 unlock_extent_cached(io_tree, page_start, page_end, 8578 &cached_state); 8579 ret = VM_FAULT_SIGBUS; 8580 goto out_unlock; 8581 } 8582 8583 /* page is wholly or partially inside EOF */ 8584 if (page_start + PAGE_SIZE > size) 8585 zero_start = offset_in_page(size); 8586 else 8587 zero_start = PAGE_SIZE; 8588 8589 if (zero_start != PAGE_SIZE) { 8590 memzero_page(page, zero_start, PAGE_SIZE - zero_start); 8591 flush_dcache_page(page); 8592 } 8593 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); 8594 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); 8595 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); 8596 8597 btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); 8598 8599 unlock_extent_cached(io_tree, page_start, page_end, &cached_state); 8600 up_read(&BTRFS_I(inode)->i_mmap_lock); 8601 8602 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8603 sb_end_pagefault(inode->i_sb); 8604 extent_changeset_free(data_reserved); 8605 return VM_FAULT_LOCKED; 8606 8607 out_unlock: 8608 unlock_page(page); 8609 up_read(&BTRFS_I(inode)->i_mmap_lock); 8610 out: 8611 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8612 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, 8613 reserved_space, (ret != 0)); 8614 out_noreserve: 8615 sb_end_pagefault(inode->i_sb); 8616 extent_changeset_free(data_reserved); 8617 return ret; 8618 } 8619 8620 static int btrfs_truncate(struct inode *inode, bool skip_writeback) 8621 { 8622 struct btrfs_truncate_control control = { 8623 .inode = BTRFS_I(inode), 8624 .ino = btrfs_ino(BTRFS_I(inode)), 8625 .min_type = BTRFS_EXTENT_DATA_KEY, 8626 .clear_extent_range = true, 8627 }; 8628 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8629 struct btrfs_root *root = BTRFS_I(inode)->root; 8630 struct btrfs_block_rsv *rsv; 8631 int ret; 8632 struct btrfs_trans_handle *trans; 8633 u64 mask = fs_info->sectorsize - 1; 8634 u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 8635 8636 if (!skip_writeback) { 8637 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), 8638 (u64)-1); 8639 if (ret) 8640 return ret; 8641 } 8642 8643 /* 8644 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 8645 * things going on here: 8646 * 8647 * 1) We need to reserve space to update our inode. 8648 * 8649 * 2) We need to have something to cache all the space that is going to 8650 * be free'd up by the truncate operation, but also have some slack 8651 * space reserved in case it uses space during the truncate (thank you 8652 * very much snapshotting). 8653 * 8654 * And we need these to be separate. The fact is we can use a lot of 8655 * space doing the truncate, and we have no earthly idea how much space 8656 * we will use, so we need the truncate reservation to be separate so it 8657 * doesn't end up using space reserved for updating the inode. We also 8658 * need to be able to stop the transaction and start a new one, which 8659 * means we need to be able to update the inode several times, and we 8660 * have no idea of knowing how many times that will be, so we can't just 8661 * reserve 1 item for the entirety of the operation, so that has to be 8662 * done separately as well. 8663 * 8664 * So that leaves us with 8665 * 8666 * 1) rsv - for the truncate reservation, which we will steal from the 8667 * transaction reservation. 8668 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 8669 * updating the inode. 8670 */ 8671 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 8672 if (!rsv) 8673 return -ENOMEM; 8674 rsv->size = min_size; 8675 rsv->failfast = 1; 8676 8677 /* 8678 * 1 for the truncate slack space 8679 * 1 for updating the inode. 8680 */ 8681 trans = btrfs_start_transaction(root, 2); 8682 if (IS_ERR(trans)) { 8683 ret = PTR_ERR(trans); 8684 goto out; 8685 } 8686 8687 /* Migrate the slack space for the truncate to our reserve */ 8688 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 8689 min_size, false); 8690 BUG_ON(ret); 8691 8692 trans->block_rsv = rsv; 8693 8694 while (1) { 8695 struct extent_state *cached_state = NULL; 8696 const u64 new_size = inode->i_size; 8697 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 8698 8699 control.new_size = new_size; 8700 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1, 8701 &cached_state); 8702 /* 8703 * We want to drop from the next block forward in case this new 8704 * size is not block aligned since we will be keeping the last 8705 * block of the extent just the way it is. 8706 */ 8707 btrfs_drop_extent_cache(BTRFS_I(inode), 8708 ALIGN(new_size, fs_info->sectorsize), 8709 (u64)-1, 0); 8710 8711 ret = btrfs_truncate_inode_items(trans, root, &control); 8712 8713 inode_sub_bytes(inode, control.sub_bytes); 8714 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), control.last_size); 8715 8716 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, 8717 (u64)-1, &cached_state); 8718 8719 trans->block_rsv = &fs_info->trans_block_rsv; 8720 if (ret != -ENOSPC && ret != -EAGAIN) 8721 break; 8722 8723 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 8724 if (ret) 8725 break; 8726 8727 btrfs_end_transaction(trans); 8728 btrfs_btree_balance_dirty(fs_info); 8729 8730 trans = btrfs_start_transaction(root, 2); 8731 if (IS_ERR(trans)) { 8732 ret = PTR_ERR(trans); 8733 trans = NULL; 8734 break; 8735 } 8736 8737 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 8738 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 8739 rsv, min_size, false); 8740 BUG_ON(ret); /* shouldn't happen */ 8741 trans->block_rsv = rsv; 8742 } 8743 8744 /* 8745 * We can't call btrfs_truncate_block inside a trans handle as we could 8746 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 8747 * know we've truncated everything except the last little bit, and can 8748 * do btrfs_truncate_block and then update the disk_i_size. 8749 */ 8750 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 8751 btrfs_end_transaction(trans); 8752 btrfs_btree_balance_dirty(fs_info); 8753 8754 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0); 8755 if (ret) 8756 goto out; 8757 trans = btrfs_start_transaction(root, 1); 8758 if (IS_ERR(trans)) { 8759 ret = PTR_ERR(trans); 8760 goto out; 8761 } 8762 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 8763 } 8764 8765 if (trans) { 8766 int ret2; 8767 8768 trans->block_rsv = &fs_info->trans_block_rsv; 8769 ret2 = btrfs_update_inode(trans, root, BTRFS_I(inode)); 8770 if (ret2 && !ret) 8771 ret = ret2; 8772 8773 ret2 = btrfs_end_transaction(trans); 8774 if (ret2 && !ret) 8775 ret = ret2; 8776 btrfs_btree_balance_dirty(fs_info); 8777 } 8778 out: 8779 btrfs_free_block_rsv(fs_info, rsv); 8780 /* 8781 * So if we truncate and then write and fsync we normally would just 8782 * write the extents that changed, which is a problem if we need to 8783 * first truncate that entire inode. So set this flag so we write out 8784 * all of the extents in the inode to the sync log so we're completely 8785 * safe. 8786 * 8787 * If no extents were dropped or trimmed we don't need to force the next 8788 * fsync to truncate all the inode's items from the log and re-log them 8789 * all. This means the truncate operation did not change the file size, 8790 * or changed it to a smaller size but there was only an implicit hole 8791 * between the old i_size and the new i_size, and there were no prealloc 8792 * extents beyond i_size to drop. 8793 */ 8794 if (control.extents_found > 0) 8795 btrfs_set_inode_full_sync(BTRFS_I(inode)); 8796 8797 return ret; 8798 } 8799 8800 struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns, 8801 struct inode *dir) 8802 { 8803 struct inode *inode; 8804 8805 inode = new_inode(dir->i_sb); 8806 if (inode) { 8807 /* 8808 * Subvolumes don't inherit the sgid bit or the parent's gid if 8809 * the parent's sgid bit is set. This is probably a bug. 8810 */ 8811 inode_init_owner(mnt_userns, inode, NULL, 8812 S_IFDIR | (~current_umask() & S_IRWXUGO)); 8813 inode->i_op = &btrfs_dir_inode_operations; 8814 inode->i_fop = &btrfs_dir_file_operations; 8815 } 8816 return inode; 8817 } 8818 8819 struct inode *btrfs_alloc_inode(struct super_block *sb) 8820 { 8821 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 8822 struct btrfs_inode *ei; 8823 struct inode *inode; 8824 8825 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 8826 if (!ei) 8827 return NULL; 8828 8829 ei->root = NULL; 8830 ei->generation = 0; 8831 ei->last_trans = 0; 8832 ei->last_sub_trans = 0; 8833 ei->logged_trans = 0; 8834 ei->delalloc_bytes = 0; 8835 ei->new_delalloc_bytes = 0; 8836 ei->defrag_bytes = 0; 8837 ei->disk_i_size = 0; 8838 ei->flags = 0; 8839 ei->ro_flags = 0; 8840 ei->csum_bytes = 0; 8841 ei->index_cnt = (u64)-1; 8842 ei->dir_index = 0; 8843 ei->last_unlink_trans = 0; 8844 ei->last_reflink_trans = 0; 8845 ei->last_log_commit = 0; 8846 8847 spin_lock_init(&ei->lock); 8848 ei->outstanding_extents = 0; 8849 if (sb->s_magic != BTRFS_TEST_MAGIC) 8850 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 8851 BTRFS_BLOCK_RSV_DELALLOC); 8852 ei->runtime_flags = 0; 8853 ei->prop_compress = BTRFS_COMPRESS_NONE; 8854 ei->defrag_compress = BTRFS_COMPRESS_NONE; 8855 8856 ei->delayed_node = NULL; 8857 8858 ei->i_otime.tv_sec = 0; 8859 ei->i_otime.tv_nsec = 0; 8860 8861 inode = &ei->vfs_inode; 8862 extent_map_tree_init(&ei->extent_tree); 8863 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode); 8864 extent_io_tree_init(fs_info, &ei->io_failure_tree, 8865 IO_TREE_INODE_IO_FAILURE, inode); 8866 extent_io_tree_init(fs_info, &ei->file_extent_tree, 8867 IO_TREE_INODE_FILE_EXTENT, inode); 8868 ei->io_tree.track_uptodate = true; 8869 ei->io_failure_tree.track_uptodate = true; 8870 atomic_set(&ei->sync_writers, 0); 8871 mutex_init(&ei->log_mutex); 8872 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 8873 INIT_LIST_HEAD(&ei->delalloc_inodes); 8874 INIT_LIST_HEAD(&ei->delayed_iput); 8875 RB_CLEAR_NODE(&ei->rb_node); 8876 init_rwsem(&ei->i_mmap_lock); 8877 8878 return inode; 8879 } 8880 8881 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 8882 void btrfs_test_destroy_inode(struct inode *inode) 8883 { 8884 btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); 8885 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8886 } 8887 #endif 8888 8889 void btrfs_free_inode(struct inode *inode) 8890 { 8891 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8892 } 8893 8894 void btrfs_destroy_inode(struct inode *vfs_inode) 8895 { 8896 struct btrfs_ordered_extent *ordered; 8897 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 8898 struct btrfs_root *root = inode->root; 8899 8900 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 8901 WARN_ON(vfs_inode->i_data.nrpages); 8902 WARN_ON(inode->block_rsv.reserved); 8903 WARN_ON(inode->block_rsv.size); 8904 WARN_ON(inode->outstanding_extents); 8905 if (!S_ISDIR(vfs_inode->i_mode)) { 8906 WARN_ON(inode->delalloc_bytes); 8907 WARN_ON(inode->new_delalloc_bytes); 8908 } 8909 WARN_ON(inode->csum_bytes); 8910 WARN_ON(inode->defrag_bytes); 8911 8912 /* 8913 * This can happen where we create an inode, but somebody else also 8914 * created the same inode and we need to destroy the one we already 8915 * created. 8916 */ 8917 if (!root) 8918 return; 8919 8920 while (1) { 8921 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8922 if (!ordered) 8923 break; 8924 else { 8925 btrfs_err(root->fs_info, 8926 "found ordered extent %llu %llu on inode cleanup", 8927 ordered->file_offset, ordered->num_bytes); 8928 btrfs_remove_ordered_extent(inode, ordered); 8929 btrfs_put_ordered_extent(ordered); 8930 btrfs_put_ordered_extent(ordered); 8931 } 8932 } 8933 btrfs_qgroup_check_reserved_leak(inode); 8934 inode_tree_del(inode); 8935 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 8936 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 8937 btrfs_put_root(inode->root); 8938 } 8939 8940 int btrfs_drop_inode(struct inode *inode) 8941 { 8942 struct btrfs_root *root = BTRFS_I(inode)->root; 8943 8944 if (root == NULL) 8945 return 1; 8946 8947 /* the snap/subvol tree is on deleting */ 8948 if (btrfs_root_refs(&root->root_item) == 0) 8949 return 1; 8950 else 8951 return generic_drop_inode(inode); 8952 } 8953 8954 static void init_once(void *foo) 8955 { 8956 struct btrfs_inode *ei = foo; 8957 8958 inode_init_once(&ei->vfs_inode); 8959 } 8960 8961 void __cold btrfs_destroy_cachep(void) 8962 { 8963 /* 8964 * Make sure all delayed rcu free inodes are flushed before we 8965 * destroy cache. 8966 */ 8967 rcu_barrier(); 8968 bioset_exit(&btrfs_dio_bioset); 8969 kmem_cache_destroy(btrfs_inode_cachep); 8970 kmem_cache_destroy(btrfs_trans_handle_cachep); 8971 kmem_cache_destroy(btrfs_path_cachep); 8972 kmem_cache_destroy(btrfs_free_space_cachep); 8973 kmem_cache_destroy(btrfs_free_space_bitmap_cachep); 8974 } 8975 8976 int __init btrfs_init_cachep(void) 8977 { 8978 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 8979 sizeof(struct btrfs_inode), 0, 8980 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, 8981 init_once); 8982 if (!btrfs_inode_cachep) 8983 goto fail; 8984 8985 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", 8986 sizeof(struct btrfs_trans_handle), 0, 8987 SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL); 8988 if (!btrfs_trans_handle_cachep) 8989 goto fail; 8990 8991 btrfs_path_cachep = kmem_cache_create("btrfs_path", 8992 sizeof(struct btrfs_path), 0, 8993 SLAB_MEM_SPREAD, NULL); 8994 if (!btrfs_path_cachep) 8995 goto fail; 8996 8997 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", 8998 sizeof(struct btrfs_free_space), 0, 8999 SLAB_MEM_SPREAD, NULL); 9000 if (!btrfs_free_space_cachep) 9001 goto fail; 9002 9003 btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap", 9004 PAGE_SIZE, PAGE_SIZE, 9005 SLAB_MEM_SPREAD, NULL); 9006 if (!btrfs_free_space_bitmap_cachep) 9007 goto fail; 9008 9009 if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE, 9010 offsetof(struct btrfs_dio_private, bio), 9011 BIOSET_NEED_BVECS)) 9012 goto fail; 9013 9014 return 0; 9015 fail: 9016 btrfs_destroy_cachep(); 9017 return -ENOMEM; 9018 } 9019 9020 static int btrfs_getattr(struct user_namespace *mnt_userns, 9021 const struct path *path, struct kstat *stat, 9022 u32 request_mask, unsigned int flags) 9023 { 9024 u64 delalloc_bytes; 9025 u64 inode_bytes; 9026 struct inode *inode = d_inode(path->dentry); 9027 u32 blocksize = inode->i_sb->s_blocksize; 9028 u32 bi_flags = BTRFS_I(inode)->flags; 9029 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 9030 9031 stat->result_mask |= STATX_BTIME; 9032 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; 9033 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; 9034 if (bi_flags & BTRFS_INODE_APPEND) 9035 stat->attributes |= STATX_ATTR_APPEND; 9036 if (bi_flags & BTRFS_INODE_COMPRESS) 9037 stat->attributes |= STATX_ATTR_COMPRESSED; 9038 if (bi_flags & BTRFS_INODE_IMMUTABLE) 9039 stat->attributes |= STATX_ATTR_IMMUTABLE; 9040 if (bi_flags & BTRFS_INODE_NODUMP) 9041 stat->attributes |= STATX_ATTR_NODUMP; 9042 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 9043 stat->attributes |= STATX_ATTR_VERITY; 9044 9045 stat->attributes_mask |= (STATX_ATTR_APPEND | 9046 STATX_ATTR_COMPRESSED | 9047 STATX_ATTR_IMMUTABLE | 9048 STATX_ATTR_NODUMP); 9049 9050 generic_fillattr(mnt_userns, inode, stat); 9051 stat->dev = BTRFS_I(inode)->root->anon_dev; 9052 9053 spin_lock(&BTRFS_I(inode)->lock); 9054 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 9055 inode_bytes = inode_get_bytes(inode); 9056 spin_unlock(&BTRFS_I(inode)->lock); 9057 stat->blocks = (ALIGN(inode_bytes, blocksize) + 9058 ALIGN(delalloc_bytes, blocksize)) >> 9; 9059 return 0; 9060 } 9061 9062 static int btrfs_rename_exchange(struct inode *old_dir, 9063 struct dentry *old_dentry, 9064 struct inode *new_dir, 9065 struct dentry *new_dentry) 9066 { 9067 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 9068 struct btrfs_trans_handle *trans; 9069 unsigned int trans_num_items; 9070 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9071 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9072 struct inode *new_inode = new_dentry->d_inode; 9073 struct inode *old_inode = old_dentry->d_inode; 9074 struct timespec64 ctime = current_time(old_inode); 9075 struct btrfs_rename_ctx old_rename_ctx; 9076 struct btrfs_rename_ctx new_rename_ctx; 9077 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9078 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 9079 u64 old_idx = 0; 9080 u64 new_idx = 0; 9081 int ret; 9082 int ret2; 9083 bool need_abort = false; 9084 9085 /* 9086 * For non-subvolumes allow exchange only within one subvolume, in the 9087 * same inode namespace. Two subvolumes (represented as directory) can 9088 * be exchanged as they're a logical link and have a fixed inode number. 9089 */ 9090 if (root != dest && 9091 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 9092 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 9093 return -EXDEV; 9094 9095 /* close the race window with snapshot create/destroy ioctl */ 9096 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 9097 new_ino == BTRFS_FIRST_FREE_OBJECTID) 9098 down_read(&fs_info->subvol_sem); 9099 9100 /* 9101 * For each inode: 9102 * 1 to remove old dir item 9103 * 1 to remove old dir index 9104 * 1 to add new dir item 9105 * 1 to add new dir index 9106 * 1 to update parent inode 9107 * 9108 * If the parents are the same, we only need to account for one 9109 */ 9110 trans_num_items = (old_dir == new_dir ? 9 : 10); 9111 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9112 /* 9113 * 1 to remove old root ref 9114 * 1 to remove old root backref 9115 * 1 to add new root ref 9116 * 1 to add new root backref 9117 */ 9118 trans_num_items += 4; 9119 } else { 9120 /* 9121 * 1 to update inode item 9122 * 1 to remove old inode ref 9123 * 1 to add new inode ref 9124 */ 9125 trans_num_items += 3; 9126 } 9127 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 9128 trans_num_items += 4; 9129 else 9130 trans_num_items += 3; 9131 trans = btrfs_start_transaction(root, trans_num_items); 9132 if (IS_ERR(trans)) { 9133 ret = PTR_ERR(trans); 9134 goto out_notrans; 9135 } 9136 9137 if (dest != root) { 9138 ret = btrfs_record_root_in_trans(trans, dest); 9139 if (ret) 9140 goto out_fail; 9141 } 9142 9143 /* 9144 * We need to find a free sequence number both in the source and 9145 * in the destination directory for the exchange. 9146 */ 9147 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 9148 if (ret) 9149 goto out_fail; 9150 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 9151 if (ret) 9152 goto out_fail; 9153 9154 BTRFS_I(old_inode)->dir_index = 0ULL; 9155 BTRFS_I(new_inode)->dir_index = 0ULL; 9156 9157 /* Reference for the source. */ 9158 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9159 /* force full log commit if subvolume involved. */ 9160 btrfs_set_log_full_commit(trans); 9161 } else { 9162 ret = btrfs_insert_inode_ref(trans, dest, 9163 new_dentry->d_name.name, 9164 new_dentry->d_name.len, 9165 old_ino, 9166 btrfs_ino(BTRFS_I(new_dir)), 9167 old_idx); 9168 if (ret) 9169 goto out_fail; 9170 need_abort = true; 9171 } 9172 9173 /* And now for the dest. */ 9174 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 9175 /* force full log commit if subvolume involved. */ 9176 btrfs_set_log_full_commit(trans); 9177 } else { 9178 ret = btrfs_insert_inode_ref(trans, root, 9179 old_dentry->d_name.name, 9180 old_dentry->d_name.len, 9181 new_ino, 9182 btrfs_ino(BTRFS_I(old_dir)), 9183 new_idx); 9184 if (ret) { 9185 if (need_abort) 9186 btrfs_abort_transaction(trans, ret); 9187 goto out_fail; 9188 } 9189 } 9190 9191 /* Update inode version and ctime/mtime. */ 9192 inode_inc_iversion(old_dir); 9193 inode_inc_iversion(new_dir); 9194 inode_inc_iversion(old_inode); 9195 inode_inc_iversion(new_inode); 9196 old_dir->i_ctime = old_dir->i_mtime = ctime; 9197 new_dir->i_ctime = new_dir->i_mtime = ctime; 9198 old_inode->i_ctime = ctime; 9199 new_inode->i_ctime = ctime; 9200 9201 if (old_dentry->d_parent != new_dentry->d_parent) { 9202 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9203 BTRFS_I(old_inode), 1); 9204 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 9205 BTRFS_I(new_inode), 1); 9206 } 9207 9208 /* src is a subvolume */ 9209 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9210 ret = btrfs_unlink_subvol(trans, old_dir, old_dentry); 9211 } else { /* src is an inode */ 9212 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 9213 BTRFS_I(old_dentry->d_inode), 9214 old_dentry->d_name.name, 9215 old_dentry->d_name.len, 9216 &old_rename_ctx); 9217 if (!ret) 9218 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 9219 } 9220 if (ret) { 9221 btrfs_abort_transaction(trans, ret); 9222 goto out_fail; 9223 } 9224 9225 /* dest is a subvolume */ 9226 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 9227 ret = btrfs_unlink_subvol(trans, new_dir, new_dentry); 9228 } else { /* dest is an inode */ 9229 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 9230 BTRFS_I(new_dentry->d_inode), 9231 new_dentry->d_name.name, 9232 new_dentry->d_name.len, 9233 &new_rename_ctx); 9234 if (!ret) 9235 ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode)); 9236 } 9237 if (ret) { 9238 btrfs_abort_transaction(trans, ret); 9239 goto out_fail; 9240 } 9241 9242 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9243 new_dentry->d_name.name, 9244 new_dentry->d_name.len, 0, old_idx); 9245 if (ret) { 9246 btrfs_abort_transaction(trans, ret); 9247 goto out_fail; 9248 } 9249 9250 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 9251 old_dentry->d_name.name, 9252 old_dentry->d_name.len, 0, new_idx); 9253 if (ret) { 9254 btrfs_abort_transaction(trans, ret); 9255 goto out_fail; 9256 } 9257 9258 if (old_inode->i_nlink == 1) 9259 BTRFS_I(old_inode)->dir_index = old_idx; 9260 if (new_inode->i_nlink == 1) 9261 BTRFS_I(new_inode)->dir_index = new_idx; 9262 9263 /* 9264 * Now pin the logs of the roots. We do it to ensure that no other task 9265 * can sync the logs while we are in progress with the rename, because 9266 * that could result in an inconsistency in case any of the inodes that 9267 * are part of this rename operation were logged before. 9268 */ 9269 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9270 btrfs_pin_log_trans(root); 9271 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 9272 btrfs_pin_log_trans(dest); 9273 9274 /* Do the log updates for all inodes. */ 9275 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9276 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9277 old_rename_ctx.index, new_dentry->d_parent); 9278 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 9279 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 9280 new_rename_ctx.index, old_dentry->d_parent); 9281 9282 /* Now unpin the logs. */ 9283 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9284 btrfs_end_log_trans(root); 9285 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 9286 btrfs_end_log_trans(dest); 9287 out_fail: 9288 ret2 = btrfs_end_transaction(trans); 9289 ret = ret ? ret : ret2; 9290 out_notrans: 9291 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 9292 old_ino == BTRFS_FIRST_FREE_OBJECTID) 9293 up_read(&fs_info->subvol_sem); 9294 9295 return ret; 9296 } 9297 9298 static struct inode *new_whiteout_inode(struct user_namespace *mnt_userns, 9299 struct inode *dir) 9300 { 9301 struct inode *inode; 9302 9303 inode = new_inode(dir->i_sb); 9304 if (inode) { 9305 inode_init_owner(mnt_userns, inode, dir, 9306 S_IFCHR | WHITEOUT_MODE); 9307 inode->i_op = &btrfs_special_inode_operations; 9308 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 9309 } 9310 return inode; 9311 } 9312 9313 static int btrfs_rename(struct user_namespace *mnt_userns, 9314 struct inode *old_dir, struct dentry *old_dentry, 9315 struct inode *new_dir, struct dentry *new_dentry, 9316 unsigned int flags) 9317 { 9318 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 9319 struct btrfs_new_inode_args whiteout_args = { 9320 .dir = old_dir, 9321 .dentry = old_dentry, 9322 }; 9323 struct btrfs_trans_handle *trans; 9324 unsigned int trans_num_items; 9325 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9326 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9327 struct inode *new_inode = d_inode(new_dentry); 9328 struct inode *old_inode = d_inode(old_dentry); 9329 struct btrfs_rename_ctx rename_ctx; 9330 u64 index = 0; 9331 int ret; 9332 int ret2; 9333 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9334 9335 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9336 return -EPERM; 9337 9338 /* we only allow rename subvolume link between subvolumes */ 9339 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9340 return -EXDEV; 9341 9342 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 9343 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 9344 return -ENOTEMPTY; 9345 9346 if (S_ISDIR(old_inode->i_mode) && new_inode && 9347 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 9348 return -ENOTEMPTY; 9349 9350 9351 /* check for collisions, even if the name isn't there */ 9352 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, 9353 new_dentry->d_name.name, 9354 new_dentry->d_name.len); 9355 9356 if (ret) { 9357 if (ret == -EEXIST) { 9358 /* we shouldn't get 9359 * eexist without a new_inode */ 9360 if (WARN_ON(!new_inode)) { 9361 return ret; 9362 } 9363 } else { 9364 /* maybe -EOVERFLOW */ 9365 return ret; 9366 } 9367 } 9368 ret = 0; 9369 9370 /* 9371 * we're using rename to replace one file with another. Start IO on it 9372 * now so we don't add too much work to the end of the transaction 9373 */ 9374 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 9375 filemap_flush(old_inode->i_mapping); 9376 9377 if (flags & RENAME_WHITEOUT) { 9378 whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir); 9379 if (!whiteout_args.inode) 9380 return -ENOMEM; 9381 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 9382 if (ret) 9383 goto out_whiteout_inode; 9384 } else { 9385 /* 1 to update the old parent inode. */ 9386 trans_num_items = 1; 9387 } 9388 9389 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9390 /* Close the race window with snapshot create/destroy ioctl */ 9391 down_read(&fs_info->subvol_sem); 9392 /* 9393 * 1 to remove old root ref 9394 * 1 to remove old root backref 9395 * 1 to add new root ref 9396 * 1 to add new root backref 9397 */ 9398 trans_num_items += 4; 9399 } else { 9400 /* 9401 * 1 to update inode 9402 * 1 to remove old inode ref 9403 * 1 to add new inode ref 9404 */ 9405 trans_num_items += 3; 9406 } 9407 /* 9408 * 1 to remove old dir item 9409 * 1 to remove old dir index 9410 * 1 to add new dir item 9411 * 1 to add new dir index 9412 */ 9413 trans_num_items += 4; 9414 /* 1 to update new parent inode if it's not the same as the old parent */ 9415 if (new_dir != old_dir) 9416 trans_num_items++; 9417 if (new_inode) { 9418 /* 9419 * 1 to update inode 9420 * 1 to remove inode ref 9421 * 1 to remove dir item 9422 * 1 to remove dir index 9423 * 1 to possibly add orphan item 9424 */ 9425 trans_num_items += 5; 9426 } 9427 trans = btrfs_start_transaction(root, trans_num_items); 9428 if (IS_ERR(trans)) { 9429 ret = PTR_ERR(trans); 9430 goto out_notrans; 9431 } 9432 9433 if (dest != root) { 9434 ret = btrfs_record_root_in_trans(trans, dest); 9435 if (ret) 9436 goto out_fail; 9437 } 9438 9439 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 9440 if (ret) 9441 goto out_fail; 9442 9443 BTRFS_I(old_inode)->dir_index = 0ULL; 9444 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9445 /* force full log commit if subvolume involved. */ 9446 btrfs_set_log_full_commit(trans); 9447 } else { 9448 ret = btrfs_insert_inode_ref(trans, dest, 9449 new_dentry->d_name.name, 9450 new_dentry->d_name.len, 9451 old_ino, 9452 btrfs_ino(BTRFS_I(new_dir)), index); 9453 if (ret) 9454 goto out_fail; 9455 } 9456 9457 inode_inc_iversion(old_dir); 9458 inode_inc_iversion(new_dir); 9459 inode_inc_iversion(old_inode); 9460 old_dir->i_ctime = old_dir->i_mtime = 9461 new_dir->i_ctime = new_dir->i_mtime = 9462 old_inode->i_ctime = current_time(old_dir); 9463 9464 if (old_dentry->d_parent != new_dentry->d_parent) 9465 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9466 BTRFS_I(old_inode), 1); 9467 9468 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9469 ret = btrfs_unlink_subvol(trans, old_dir, old_dentry); 9470 } else { 9471 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 9472 BTRFS_I(d_inode(old_dentry)), 9473 old_dentry->d_name.name, 9474 old_dentry->d_name.len, 9475 &rename_ctx); 9476 if (!ret) 9477 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 9478 } 9479 if (ret) { 9480 btrfs_abort_transaction(trans, ret); 9481 goto out_fail; 9482 } 9483 9484 if (new_inode) { 9485 inode_inc_iversion(new_inode); 9486 new_inode->i_ctime = current_time(new_inode); 9487 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 9488 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9489 ret = btrfs_unlink_subvol(trans, new_dir, new_dentry); 9490 BUG_ON(new_inode->i_nlink == 0); 9491 } else { 9492 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 9493 BTRFS_I(d_inode(new_dentry)), 9494 new_dentry->d_name.name, 9495 new_dentry->d_name.len); 9496 } 9497 if (!ret && new_inode->i_nlink == 0) 9498 ret = btrfs_orphan_add(trans, 9499 BTRFS_I(d_inode(new_dentry))); 9500 if (ret) { 9501 btrfs_abort_transaction(trans, ret); 9502 goto out_fail; 9503 } 9504 } 9505 9506 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9507 new_dentry->d_name.name, 9508 new_dentry->d_name.len, 0, index); 9509 if (ret) { 9510 btrfs_abort_transaction(trans, ret); 9511 goto out_fail; 9512 } 9513 9514 if (old_inode->i_nlink == 1) 9515 BTRFS_I(old_inode)->dir_index = index; 9516 9517 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9518 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9519 rename_ctx.index, new_dentry->d_parent); 9520 9521 if (flags & RENAME_WHITEOUT) { 9522 ret = btrfs_create_new_inode(trans, &whiteout_args); 9523 if (ret) { 9524 btrfs_abort_transaction(trans, ret); 9525 goto out_fail; 9526 } else { 9527 unlock_new_inode(whiteout_args.inode); 9528 iput(whiteout_args.inode); 9529 whiteout_args.inode = NULL; 9530 } 9531 } 9532 out_fail: 9533 ret2 = btrfs_end_transaction(trans); 9534 ret = ret ? ret : ret2; 9535 out_notrans: 9536 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9537 up_read(&fs_info->subvol_sem); 9538 if (flags & RENAME_WHITEOUT) 9539 btrfs_new_inode_args_destroy(&whiteout_args); 9540 out_whiteout_inode: 9541 if (flags & RENAME_WHITEOUT) 9542 iput(whiteout_args.inode); 9543 return ret; 9544 } 9545 9546 static int btrfs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir, 9547 struct dentry *old_dentry, struct inode *new_dir, 9548 struct dentry *new_dentry, unsigned int flags) 9549 { 9550 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 9551 return -EINVAL; 9552 9553 if (flags & RENAME_EXCHANGE) 9554 return btrfs_rename_exchange(old_dir, old_dentry, new_dir, 9555 new_dentry); 9556 9557 return btrfs_rename(mnt_userns, old_dir, old_dentry, new_dir, 9558 new_dentry, flags); 9559 } 9560 9561 struct btrfs_delalloc_work { 9562 struct inode *inode; 9563 struct completion completion; 9564 struct list_head list; 9565 struct btrfs_work work; 9566 }; 9567 9568 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9569 { 9570 struct btrfs_delalloc_work *delalloc_work; 9571 struct inode *inode; 9572 9573 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9574 work); 9575 inode = delalloc_work->inode; 9576 filemap_flush(inode->i_mapping); 9577 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9578 &BTRFS_I(inode)->runtime_flags)) 9579 filemap_flush(inode->i_mapping); 9580 9581 iput(inode); 9582 complete(&delalloc_work->completion); 9583 } 9584 9585 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 9586 { 9587 struct btrfs_delalloc_work *work; 9588 9589 work = kmalloc(sizeof(*work), GFP_NOFS); 9590 if (!work) 9591 return NULL; 9592 9593 init_completion(&work->completion); 9594 INIT_LIST_HEAD(&work->list); 9595 work->inode = inode; 9596 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); 9597 9598 return work; 9599 } 9600 9601 /* 9602 * some fairly slow code that needs optimization. This walks the list 9603 * of all the inodes with pending delalloc and forces them to disk. 9604 */ 9605 static int start_delalloc_inodes(struct btrfs_root *root, 9606 struct writeback_control *wbc, bool snapshot, 9607 bool in_reclaim_context) 9608 { 9609 struct btrfs_inode *binode; 9610 struct inode *inode; 9611 struct btrfs_delalloc_work *work, *next; 9612 struct list_head works; 9613 struct list_head splice; 9614 int ret = 0; 9615 bool full_flush = wbc->nr_to_write == LONG_MAX; 9616 9617 INIT_LIST_HEAD(&works); 9618 INIT_LIST_HEAD(&splice); 9619 9620 mutex_lock(&root->delalloc_mutex); 9621 spin_lock(&root->delalloc_lock); 9622 list_splice_init(&root->delalloc_inodes, &splice); 9623 while (!list_empty(&splice)) { 9624 binode = list_entry(splice.next, struct btrfs_inode, 9625 delalloc_inodes); 9626 9627 list_move_tail(&binode->delalloc_inodes, 9628 &root->delalloc_inodes); 9629 9630 if (in_reclaim_context && 9631 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 9632 continue; 9633 9634 inode = igrab(&binode->vfs_inode); 9635 if (!inode) { 9636 cond_resched_lock(&root->delalloc_lock); 9637 continue; 9638 } 9639 spin_unlock(&root->delalloc_lock); 9640 9641 if (snapshot) 9642 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 9643 &binode->runtime_flags); 9644 if (full_flush) { 9645 work = btrfs_alloc_delalloc_work(inode); 9646 if (!work) { 9647 iput(inode); 9648 ret = -ENOMEM; 9649 goto out; 9650 } 9651 list_add_tail(&work->list, &works); 9652 btrfs_queue_work(root->fs_info->flush_workers, 9653 &work->work); 9654 } else { 9655 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); 9656 btrfs_add_delayed_iput(inode); 9657 if (ret || wbc->nr_to_write <= 0) 9658 goto out; 9659 } 9660 cond_resched(); 9661 spin_lock(&root->delalloc_lock); 9662 } 9663 spin_unlock(&root->delalloc_lock); 9664 9665 out: 9666 list_for_each_entry_safe(work, next, &works, list) { 9667 list_del_init(&work->list); 9668 wait_for_completion(&work->completion); 9669 kfree(work); 9670 } 9671 9672 if (!list_empty(&splice)) { 9673 spin_lock(&root->delalloc_lock); 9674 list_splice_tail(&splice, &root->delalloc_inodes); 9675 spin_unlock(&root->delalloc_lock); 9676 } 9677 mutex_unlock(&root->delalloc_mutex); 9678 return ret; 9679 } 9680 9681 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 9682 { 9683 struct writeback_control wbc = { 9684 .nr_to_write = LONG_MAX, 9685 .sync_mode = WB_SYNC_NONE, 9686 .range_start = 0, 9687 .range_end = LLONG_MAX, 9688 }; 9689 struct btrfs_fs_info *fs_info = root->fs_info; 9690 9691 if (BTRFS_FS_ERROR(fs_info)) 9692 return -EROFS; 9693 9694 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context); 9695 } 9696 9697 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 9698 bool in_reclaim_context) 9699 { 9700 struct writeback_control wbc = { 9701 .nr_to_write = nr, 9702 .sync_mode = WB_SYNC_NONE, 9703 .range_start = 0, 9704 .range_end = LLONG_MAX, 9705 }; 9706 struct btrfs_root *root; 9707 struct list_head splice; 9708 int ret; 9709 9710 if (BTRFS_FS_ERROR(fs_info)) 9711 return -EROFS; 9712 9713 INIT_LIST_HEAD(&splice); 9714 9715 mutex_lock(&fs_info->delalloc_root_mutex); 9716 spin_lock(&fs_info->delalloc_root_lock); 9717 list_splice_init(&fs_info->delalloc_roots, &splice); 9718 while (!list_empty(&splice)) { 9719 /* 9720 * Reset nr_to_write here so we know that we're doing a full 9721 * flush. 9722 */ 9723 if (nr == LONG_MAX) 9724 wbc.nr_to_write = LONG_MAX; 9725 9726 root = list_first_entry(&splice, struct btrfs_root, 9727 delalloc_root); 9728 root = btrfs_grab_root(root); 9729 BUG_ON(!root); 9730 list_move_tail(&root->delalloc_root, 9731 &fs_info->delalloc_roots); 9732 spin_unlock(&fs_info->delalloc_root_lock); 9733 9734 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context); 9735 btrfs_put_root(root); 9736 if (ret < 0 || wbc.nr_to_write <= 0) 9737 goto out; 9738 spin_lock(&fs_info->delalloc_root_lock); 9739 } 9740 spin_unlock(&fs_info->delalloc_root_lock); 9741 9742 ret = 0; 9743 out: 9744 if (!list_empty(&splice)) { 9745 spin_lock(&fs_info->delalloc_root_lock); 9746 list_splice_tail(&splice, &fs_info->delalloc_roots); 9747 spin_unlock(&fs_info->delalloc_root_lock); 9748 } 9749 mutex_unlock(&fs_info->delalloc_root_mutex); 9750 return ret; 9751 } 9752 9753 static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, 9754 struct dentry *dentry, const char *symname) 9755 { 9756 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9757 struct btrfs_trans_handle *trans; 9758 struct btrfs_root *root = BTRFS_I(dir)->root; 9759 struct btrfs_path *path; 9760 struct btrfs_key key; 9761 struct inode *inode; 9762 struct btrfs_new_inode_args new_inode_args = { 9763 .dir = dir, 9764 .dentry = dentry, 9765 }; 9766 unsigned int trans_num_items; 9767 int err; 9768 int name_len; 9769 int datasize; 9770 unsigned long ptr; 9771 struct btrfs_file_extent_item *ei; 9772 struct extent_buffer *leaf; 9773 9774 name_len = strlen(symname); 9775 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 9776 return -ENAMETOOLONG; 9777 9778 inode = new_inode(dir->i_sb); 9779 if (!inode) 9780 return -ENOMEM; 9781 inode_init_owner(mnt_userns, inode, dir, S_IFLNK | S_IRWXUGO); 9782 inode->i_op = &btrfs_symlink_inode_operations; 9783 inode_nohighmem(inode); 9784 inode->i_mapping->a_ops = &btrfs_aops; 9785 btrfs_i_size_write(BTRFS_I(inode), name_len); 9786 inode_set_bytes(inode, name_len); 9787 9788 new_inode_args.inode = inode; 9789 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9790 if (err) 9791 goto out_inode; 9792 /* 1 additional item for the inline extent */ 9793 trans_num_items++; 9794 9795 trans = btrfs_start_transaction(root, trans_num_items); 9796 if (IS_ERR(trans)) { 9797 err = PTR_ERR(trans); 9798 goto out_new_inode_args; 9799 } 9800 9801 err = btrfs_create_new_inode(trans, &new_inode_args); 9802 if (err) 9803 goto out; 9804 9805 path = btrfs_alloc_path(); 9806 if (!path) { 9807 err = -ENOMEM; 9808 btrfs_abort_transaction(trans, err); 9809 discard_new_inode(inode); 9810 inode = NULL; 9811 goto out; 9812 } 9813 key.objectid = btrfs_ino(BTRFS_I(inode)); 9814 key.offset = 0; 9815 key.type = BTRFS_EXTENT_DATA_KEY; 9816 datasize = btrfs_file_extent_calc_inline_size(name_len); 9817 err = btrfs_insert_empty_item(trans, root, path, &key, 9818 datasize); 9819 if (err) { 9820 btrfs_abort_transaction(trans, err); 9821 btrfs_free_path(path); 9822 discard_new_inode(inode); 9823 inode = NULL; 9824 goto out; 9825 } 9826 leaf = path->nodes[0]; 9827 ei = btrfs_item_ptr(leaf, path->slots[0], 9828 struct btrfs_file_extent_item); 9829 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9830 btrfs_set_file_extent_type(leaf, ei, 9831 BTRFS_FILE_EXTENT_INLINE); 9832 btrfs_set_file_extent_encryption(leaf, ei, 0); 9833 btrfs_set_file_extent_compression(leaf, ei, 0); 9834 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9835 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9836 9837 ptr = btrfs_file_extent_inline_start(ei); 9838 write_extent_buffer(leaf, symname, ptr, name_len); 9839 btrfs_mark_buffer_dirty(leaf); 9840 btrfs_free_path(path); 9841 9842 d_instantiate_new(dentry, inode); 9843 err = 0; 9844 out: 9845 btrfs_end_transaction(trans); 9846 btrfs_btree_balance_dirty(fs_info); 9847 out_new_inode_args: 9848 btrfs_new_inode_args_destroy(&new_inode_args); 9849 out_inode: 9850 if (err) 9851 iput(inode); 9852 return err; 9853 } 9854 9855 static struct btrfs_trans_handle *insert_prealloc_file_extent( 9856 struct btrfs_trans_handle *trans_in, 9857 struct btrfs_inode *inode, 9858 struct btrfs_key *ins, 9859 u64 file_offset) 9860 { 9861 struct btrfs_file_extent_item stack_fi; 9862 struct btrfs_replace_extent_info extent_info; 9863 struct btrfs_trans_handle *trans = trans_in; 9864 struct btrfs_path *path; 9865 u64 start = ins->objectid; 9866 u64 len = ins->offset; 9867 int qgroup_released; 9868 int ret; 9869 9870 memset(&stack_fi, 0, sizeof(stack_fi)); 9871 9872 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 9873 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 9874 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 9875 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 9876 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 9877 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9878 /* Encryption and other encoding is reserved and all 0 */ 9879 9880 qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len); 9881 if (qgroup_released < 0) 9882 return ERR_PTR(qgroup_released); 9883 9884 if (trans) { 9885 ret = insert_reserved_file_extent(trans, inode, 9886 file_offset, &stack_fi, 9887 true, qgroup_released); 9888 if (ret) 9889 goto free_qgroup; 9890 return trans; 9891 } 9892 9893 extent_info.disk_offset = start; 9894 extent_info.disk_len = len; 9895 extent_info.data_offset = 0; 9896 extent_info.data_len = len; 9897 extent_info.file_offset = file_offset; 9898 extent_info.extent_buf = (char *)&stack_fi; 9899 extent_info.is_new_extent = true; 9900 extent_info.qgroup_reserved = qgroup_released; 9901 extent_info.insertions = 0; 9902 9903 path = btrfs_alloc_path(); 9904 if (!path) { 9905 ret = -ENOMEM; 9906 goto free_qgroup; 9907 } 9908 9909 ret = btrfs_replace_file_extents(inode, path, file_offset, 9910 file_offset + len - 1, &extent_info, 9911 &trans); 9912 btrfs_free_path(path); 9913 if (ret) 9914 goto free_qgroup; 9915 return trans; 9916 9917 free_qgroup: 9918 /* 9919 * We have released qgroup data range at the beginning of the function, 9920 * and normally qgroup_released bytes will be freed when committing 9921 * transaction. 9922 * But if we error out early, we have to free what we have released 9923 * or we leak qgroup data reservation. 9924 */ 9925 btrfs_qgroup_free_refroot(inode->root->fs_info, 9926 inode->root->root_key.objectid, qgroup_released, 9927 BTRFS_QGROUP_RSV_DATA); 9928 return ERR_PTR(ret); 9929 } 9930 9931 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9932 u64 start, u64 num_bytes, u64 min_size, 9933 loff_t actual_len, u64 *alloc_hint, 9934 struct btrfs_trans_handle *trans) 9935 { 9936 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 9937 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 9938 struct extent_map *em; 9939 struct btrfs_root *root = BTRFS_I(inode)->root; 9940 struct btrfs_key ins; 9941 u64 cur_offset = start; 9942 u64 clear_offset = start; 9943 u64 i_size; 9944 u64 cur_bytes; 9945 u64 last_alloc = (u64)-1; 9946 int ret = 0; 9947 bool own_trans = true; 9948 u64 end = start + num_bytes - 1; 9949 9950 if (trans) 9951 own_trans = false; 9952 while (num_bytes > 0) { 9953 cur_bytes = min_t(u64, num_bytes, SZ_256M); 9954 cur_bytes = max(cur_bytes, min_size); 9955 /* 9956 * If we are severely fragmented we could end up with really 9957 * small allocations, so if the allocator is returning small 9958 * chunks lets make its job easier by only searching for those 9959 * sized chunks. 9960 */ 9961 cur_bytes = min(cur_bytes, last_alloc); 9962 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 9963 min_size, 0, *alloc_hint, &ins, 1, 0); 9964 if (ret) 9965 break; 9966 9967 /* 9968 * We've reserved this space, and thus converted it from 9969 * ->bytes_may_use to ->bytes_reserved. Any error that happens 9970 * from here on out we will only need to clear our reservation 9971 * for the remaining unreserved area, so advance our 9972 * clear_offset by our extent size. 9973 */ 9974 clear_offset += ins.offset; 9975 9976 last_alloc = ins.offset; 9977 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 9978 &ins, cur_offset); 9979 /* 9980 * Now that we inserted the prealloc extent we can finally 9981 * decrement the number of reservations in the block group. 9982 * If we did it before, we could race with relocation and have 9983 * relocation miss the reserved extent, making it fail later. 9984 */ 9985 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9986 if (IS_ERR(trans)) { 9987 ret = PTR_ERR(trans); 9988 btrfs_free_reserved_extent(fs_info, ins.objectid, 9989 ins.offset, 0); 9990 break; 9991 } 9992 9993 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, 9994 cur_offset + ins.offset -1, 0); 9995 9996 em = alloc_extent_map(); 9997 if (!em) { 9998 btrfs_set_inode_full_sync(BTRFS_I(inode)); 9999 goto next; 10000 } 10001 10002 em->start = cur_offset; 10003 em->orig_start = cur_offset; 10004 em->len = ins.offset; 10005 em->block_start = ins.objectid; 10006 em->block_len = ins.offset; 10007 em->orig_block_len = ins.offset; 10008 em->ram_bytes = ins.offset; 10009 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 10010 em->generation = trans->transid; 10011 10012 while (1) { 10013 write_lock(&em_tree->lock); 10014 ret = add_extent_mapping(em_tree, em, 1); 10015 write_unlock(&em_tree->lock); 10016 if (ret != -EEXIST) 10017 break; 10018 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, 10019 cur_offset + ins.offset - 1, 10020 0); 10021 } 10022 free_extent_map(em); 10023 next: 10024 num_bytes -= ins.offset; 10025 cur_offset += ins.offset; 10026 *alloc_hint = ins.objectid + ins.offset; 10027 10028 inode_inc_iversion(inode); 10029 inode->i_ctime = current_time(inode); 10030 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 10031 if (!(mode & FALLOC_FL_KEEP_SIZE) && 10032 (actual_len > inode->i_size) && 10033 (cur_offset > inode->i_size)) { 10034 if (cur_offset > actual_len) 10035 i_size = actual_len; 10036 else 10037 i_size = cur_offset; 10038 i_size_write(inode, i_size); 10039 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 10040 } 10041 10042 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 10043 10044 if (ret) { 10045 btrfs_abort_transaction(trans, ret); 10046 if (own_trans) 10047 btrfs_end_transaction(trans); 10048 break; 10049 } 10050 10051 if (own_trans) { 10052 btrfs_end_transaction(trans); 10053 trans = NULL; 10054 } 10055 } 10056 if (clear_offset < end) 10057 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 10058 end - clear_offset + 1); 10059 return ret; 10060 } 10061 10062 int btrfs_prealloc_file_range(struct inode *inode, int mode, 10063 u64 start, u64 num_bytes, u64 min_size, 10064 loff_t actual_len, u64 *alloc_hint) 10065 { 10066 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 10067 min_size, actual_len, alloc_hint, 10068 NULL); 10069 } 10070 10071 int btrfs_prealloc_file_range_trans(struct inode *inode, 10072 struct btrfs_trans_handle *trans, int mode, 10073 u64 start, u64 num_bytes, u64 min_size, 10074 loff_t actual_len, u64 *alloc_hint) 10075 { 10076 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 10077 min_size, actual_len, alloc_hint, trans); 10078 } 10079 10080 static int btrfs_permission(struct user_namespace *mnt_userns, 10081 struct inode *inode, int mask) 10082 { 10083 struct btrfs_root *root = BTRFS_I(inode)->root; 10084 umode_t mode = inode->i_mode; 10085 10086 if (mask & MAY_WRITE && 10087 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 10088 if (btrfs_root_readonly(root)) 10089 return -EROFS; 10090 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 10091 return -EACCES; 10092 } 10093 return generic_permission(mnt_userns, inode, mask); 10094 } 10095 10096 static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, 10097 struct dentry *dentry, umode_t mode) 10098 { 10099 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 10100 struct btrfs_trans_handle *trans; 10101 struct btrfs_root *root = BTRFS_I(dir)->root; 10102 struct inode *inode; 10103 struct btrfs_new_inode_args new_inode_args = { 10104 .dir = dir, 10105 .dentry = dentry, 10106 .orphan = true, 10107 }; 10108 unsigned int trans_num_items; 10109 int ret; 10110 10111 inode = new_inode(dir->i_sb); 10112 if (!inode) 10113 return -ENOMEM; 10114 inode_init_owner(mnt_userns, inode, dir, mode); 10115 inode->i_fop = &btrfs_file_operations; 10116 inode->i_op = &btrfs_file_inode_operations; 10117 inode->i_mapping->a_ops = &btrfs_aops; 10118 10119 new_inode_args.inode = inode; 10120 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 10121 if (ret) 10122 goto out_inode; 10123 10124 trans = btrfs_start_transaction(root, trans_num_items); 10125 if (IS_ERR(trans)) { 10126 ret = PTR_ERR(trans); 10127 goto out_new_inode_args; 10128 } 10129 10130 ret = btrfs_create_new_inode(trans, &new_inode_args); 10131 10132 /* 10133 * We set number of links to 0 in btrfs_create_new_inode(), and here we 10134 * set it to 1 because d_tmpfile() will issue a warning if the count is 10135 * 0, through: 10136 * 10137 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 10138 */ 10139 set_nlink(inode, 1); 10140 10141 if (!ret) { 10142 d_tmpfile(dentry, inode); 10143 unlock_new_inode(inode); 10144 mark_inode_dirty(inode); 10145 } 10146 10147 btrfs_end_transaction(trans); 10148 btrfs_btree_balance_dirty(fs_info); 10149 out_new_inode_args: 10150 btrfs_new_inode_args_destroy(&new_inode_args); 10151 out_inode: 10152 if (ret) 10153 iput(inode); 10154 return ret; 10155 } 10156 10157 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end) 10158 { 10159 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10160 unsigned long index = start >> PAGE_SHIFT; 10161 unsigned long end_index = end >> PAGE_SHIFT; 10162 struct page *page; 10163 u32 len; 10164 10165 ASSERT(end + 1 - start <= U32_MAX); 10166 len = end + 1 - start; 10167 while (index <= end_index) { 10168 page = find_get_page(inode->vfs_inode.i_mapping, index); 10169 ASSERT(page); /* Pages should be in the extent_io_tree */ 10170 10171 btrfs_page_set_writeback(fs_info, page, start, len); 10172 put_page(page); 10173 index++; 10174 } 10175 } 10176 10177 static int btrfs_encoded_io_compression_from_extent( 10178 struct btrfs_fs_info *fs_info, 10179 int compress_type) 10180 { 10181 switch (compress_type) { 10182 case BTRFS_COMPRESS_NONE: 10183 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 10184 case BTRFS_COMPRESS_ZLIB: 10185 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 10186 case BTRFS_COMPRESS_LZO: 10187 /* 10188 * The LZO format depends on the sector size. 64K is the maximum 10189 * sector size that we support. 10190 */ 10191 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 10192 return -EINVAL; 10193 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 10194 (fs_info->sectorsize_bits - 12); 10195 case BTRFS_COMPRESS_ZSTD: 10196 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 10197 default: 10198 return -EUCLEAN; 10199 } 10200 } 10201 10202 static ssize_t btrfs_encoded_read_inline( 10203 struct kiocb *iocb, 10204 struct iov_iter *iter, u64 start, 10205 u64 lockend, 10206 struct extent_state **cached_state, 10207 u64 extent_start, size_t count, 10208 struct btrfs_ioctl_encoded_io_args *encoded, 10209 bool *unlocked) 10210 { 10211 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10212 struct btrfs_root *root = inode->root; 10213 struct btrfs_fs_info *fs_info = root->fs_info; 10214 struct extent_io_tree *io_tree = &inode->io_tree; 10215 struct btrfs_path *path; 10216 struct extent_buffer *leaf; 10217 struct btrfs_file_extent_item *item; 10218 u64 ram_bytes; 10219 unsigned long ptr; 10220 void *tmp; 10221 ssize_t ret; 10222 10223 path = btrfs_alloc_path(); 10224 if (!path) { 10225 ret = -ENOMEM; 10226 goto out; 10227 } 10228 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 10229 extent_start, 0); 10230 if (ret) { 10231 if (ret > 0) { 10232 /* The extent item disappeared? */ 10233 ret = -EIO; 10234 } 10235 goto out; 10236 } 10237 leaf = path->nodes[0]; 10238 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 10239 10240 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 10241 ptr = btrfs_file_extent_inline_start(item); 10242 10243 encoded->len = min_t(u64, extent_start + ram_bytes, 10244 inode->vfs_inode.i_size) - iocb->ki_pos; 10245 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10246 btrfs_file_extent_compression(leaf, item)); 10247 if (ret < 0) 10248 goto out; 10249 encoded->compression = ret; 10250 if (encoded->compression) { 10251 size_t inline_size; 10252 10253 inline_size = btrfs_file_extent_inline_item_len(leaf, 10254 path->slots[0]); 10255 if (inline_size > count) { 10256 ret = -ENOBUFS; 10257 goto out; 10258 } 10259 count = inline_size; 10260 encoded->unencoded_len = ram_bytes; 10261 encoded->unencoded_offset = iocb->ki_pos - extent_start; 10262 } else { 10263 count = min_t(u64, count, encoded->len); 10264 encoded->len = count; 10265 encoded->unencoded_len = count; 10266 ptr += iocb->ki_pos - extent_start; 10267 } 10268 10269 tmp = kmalloc(count, GFP_NOFS); 10270 if (!tmp) { 10271 ret = -ENOMEM; 10272 goto out; 10273 } 10274 read_extent_buffer(leaf, tmp, ptr, count); 10275 btrfs_release_path(path); 10276 unlock_extent_cached(io_tree, start, lockend, cached_state); 10277 btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED); 10278 *unlocked = true; 10279 10280 ret = copy_to_iter(tmp, count, iter); 10281 if (ret != count) 10282 ret = -EFAULT; 10283 kfree(tmp); 10284 out: 10285 btrfs_free_path(path); 10286 return ret; 10287 } 10288 10289 struct btrfs_encoded_read_private { 10290 struct btrfs_inode *inode; 10291 u64 file_offset; 10292 wait_queue_head_t wait; 10293 atomic_t pending; 10294 blk_status_t status; 10295 bool skip_csum; 10296 }; 10297 10298 static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode, 10299 struct bio *bio, int mirror_num) 10300 { 10301 struct btrfs_encoded_read_private *priv = bio->bi_private; 10302 struct btrfs_bio *bbio = btrfs_bio(bio); 10303 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10304 blk_status_t ret; 10305 10306 if (!priv->skip_csum) { 10307 ret = btrfs_lookup_bio_sums(&inode->vfs_inode, bio, NULL); 10308 if (ret) 10309 return ret; 10310 } 10311 10312 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); 10313 if (ret) { 10314 btrfs_bio_free_csum(bbio); 10315 return ret; 10316 } 10317 10318 atomic_inc(&priv->pending); 10319 ret = btrfs_map_bio(fs_info, bio, mirror_num); 10320 if (ret) { 10321 atomic_dec(&priv->pending); 10322 btrfs_bio_free_csum(bbio); 10323 } 10324 return ret; 10325 } 10326 10327 static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio) 10328 { 10329 const bool uptodate = (bbio->bio.bi_status == BLK_STS_OK); 10330 struct btrfs_encoded_read_private *priv = bbio->bio.bi_private; 10331 struct btrfs_inode *inode = priv->inode; 10332 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10333 u32 sectorsize = fs_info->sectorsize; 10334 struct bio_vec *bvec; 10335 struct bvec_iter_all iter_all; 10336 u64 start = priv->file_offset; 10337 u32 bio_offset = 0; 10338 10339 if (priv->skip_csum || !uptodate) 10340 return bbio->bio.bi_status; 10341 10342 bio_for_each_segment_all(bvec, &bbio->bio, iter_all) { 10343 unsigned int i, nr_sectors, pgoff; 10344 10345 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len); 10346 pgoff = bvec->bv_offset; 10347 for (i = 0; i < nr_sectors; i++) { 10348 ASSERT(pgoff < PAGE_SIZE); 10349 if (check_data_csum(&inode->vfs_inode, bbio, bio_offset, 10350 bvec->bv_page, pgoff, start)) 10351 return BLK_STS_IOERR; 10352 start += sectorsize; 10353 bio_offset += sectorsize; 10354 pgoff += sectorsize; 10355 } 10356 } 10357 return BLK_STS_OK; 10358 } 10359 10360 static void btrfs_encoded_read_endio(struct bio *bio) 10361 { 10362 struct btrfs_encoded_read_private *priv = bio->bi_private; 10363 struct btrfs_bio *bbio = btrfs_bio(bio); 10364 blk_status_t status; 10365 10366 status = btrfs_encoded_read_verify_csum(bbio); 10367 if (status) { 10368 /* 10369 * The memory barrier implied by the atomic_dec_return() here 10370 * pairs with the memory barrier implied by the 10371 * atomic_dec_return() or io_wait_event() in 10372 * btrfs_encoded_read_regular_fill_pages() to ensure that this 10373 * write is observed before the load of status in 10374 * btrfs_encoded_read_regular_fill_pages(). 10375 */ 10376 WRITE_ONCE(priv->status, status); 10377 } 10378 if (!atomic_dec_return(&priv->pending)) 10379 wake_up(&priv->wait); 10380 btrfs_bio_free_csum(bbio); 10381 bio_put(bio); 10382 } 10383 10384 static int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 10385 u64 file_offset, 10386 u64 disk_bytenr, 10387 u64 disk_io_size, 10388 struct page **pages) 10389 { 10390 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10391 struct btrfs_encoded_read_private priv = { 10392 .inode = inode, 10393 .file_offset = file_offset, 10394 .pending = ATOMIC_INIT(1), 10395 .skip_csum = (inode->flags & BTRFS_INODE_NODATASUM), 10396 }; 10397 unsigned long i = 0; 10398 u64 cur = 0; 10399 int ret; 10400 10401 init_waitqueue_head(&priv.wait); 10402 /* 10403 * Submit bios for the extent, splitting due to bio or stripe limits as 10404 * necessary. 10405 */ 10406 while (cur < disk_io_size) { 10407 struct extent_map *em; 10408 struct btrfs_io_geometry geom; 10409 struct bio *bio = NULL; 10410 u64 remaining; 10411 10412 em = btrfs_get_chunk_map(fs_info, disk_bytenr + cur, 10413 disk_io_size - cur); 10414 if (IS_ERR(em)) { 10415 ret = PTR_ERR(em); 10416 } else { 10417 ret = btrfs_get_io_geometry(fs_info, em, BTRFS_MAP_READ, 10418 disk_bytenr + cur, &geom); 10419 free_extent_map(em); 10420 } 10421 if (ret) { 10422 WRITE_ONCE(priv.status, errno_to_blk_status(ret)); 10423 break; 10424 } 10425 remaining = min(geom.len, disk_io_size - cur); 10426 while (bio || remaining) { 10427 size_t bytes = min_t(u64, remaining, PAGE_SIZE); 10428 10429 if (!bio) { 10430 bio = btrfs_bio_alloc(BIO_MAX_VECS); 10431 bio->bi_iter.bi_sector = 10432 (disk_bytenr + cur) >> SECTOR_SHIFT; 10433 bio->bi_end_io = btrfs_encoded_read_endio; 10434 bio->bi_private = &priv; 10435 bio->bi_opf = REQ_OP_READ; 10436 } 10437 10438 if (!bytes || 10439 bio_add_page(bio, pages[i], bytes, 0) < bytes) { 10440 blk_status_t status; 10441 10442 status = submit_encoded_read_bio(inode, bio, 0); 10443 if (status) { 10444 WRITE_ONCE(priv.status, status); 10445 bio_put(bio); 10446 goto out; 10447 } 10448 bio = NULL; 10449 continue; 10450 } 10451 10452 i++; 10453 cur += bytes; 10454 remaining -= bytes; 10455 } 10456 } 10457 10458 out: 10459 if (atomic_dec_return(&priv.pending)) 10460 io_wait_event(priv.wait, !atomic_read(&priv.pending)); 10461 /* See btrfs_encoded_read_endio() for ordering. */ 10462 return blk_status_to_errno(READ_ONCE(priv.status)); 10463 } 10464 10465 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, 10466 struct iov_iter *iter, 10467 u64 start, u64 lockend, 10468 struct extent_state **cached_state, 10469 u64 disk_bytenr, u64 disk_io_size, 10470 size_t count, bool compressed, 10471 bool *unlocked) 10472 { 10473 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10474 struct extent_io_tree *io_tree = &inode->io_tree; 10475 struct page **pages; 10476 unsigned long nr_pages, i; 10477 u64 cur; 10478 size_t page_offset; 10479 ssize_t ret; 10480 10481 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 10482 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 10483 if (!pages) 10484 return -ENOMEM; 10485 ret = btrfs_alloc_page_array(nr_pages, pages); 10486 if (ret) { 10487 ret = -ENOMEM; 10488 goto out; 10489 } 10490 10491 ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr, 10492 disk_io_size, pages); 10493 if (ret) 10494 goto out; 10495 10496 unlock_extent_cached(io_tree, start, lockend, cached_state); 10497 btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED); 10498 *unlocked = true; 10499 10500 if (compressed) { 10501 i = 0; 10502 page_offset = 0; 10503 } else { 10504 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 10505 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 10506 } 10507 cur = 0; 10508 while (cur < count) { 10509 size_t bytes = min_t(size_t, count - cur, 10510 PAGE_SIZE - page_offset); 10511 10512 if (copy_page_to_iter(pages[i], page_offset, bytes, 10513 iter) != bytes) { 10514 ret = -EFAULT; 10515 goto out; 10516 } 10517 i++; 10518 cur += bytes; 10519 page_offset = 0; 10520 } 10521 ret = count; 10522 out: 10523 for (i = 0; i < nr_pages; i++) { 10524 if (pages[i]) 10525 __free_page(pages[i]); 10526 } 10527 kfree(pages); 10528 return ret; 10529 } 10530 10531 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 10532 struct btrfs_ioctl_encoded_io_args *encoded) 10533 { 10534 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10535 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10536 struct extent_io_tree *io_tree = &inode->io_tree; 10537 ssize_t ret; 10538 size_t count = iov_iter_count(iter); 10539 u64 start, lockend, disk_bytenr, disk_io_size; 10540 struct extent_state *cached_state = NULL; 10541 struct extent_map *em; 10542 bool unlocked = false; 10543 10544 file_accessed(iocb->ki_filp); 10545 10546 btrfs_inode_lock(&inode->vfs_inode, BTRFS_ILOCK_SHARED); 10547 10548 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 10549 btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED); 10550 return 0; 10551 } 10552 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 10553 /* 10554 * We don't know how long the extent containing iocb->ki_pos is, but if 10555 * it's compressed we know that it won't be longer than this. 10556 */ 10557 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 10558 10559 for (;;) { 10560 struct btrfs_ordered_extent *ordered; 10561 10562 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, 10563 lockend - start + 1); 10564 if (ret) 10565 goto out_unlock_inode; 10566 lock_extent_bits(io_tree, start, lockend, &cached_state); 10567 ordered = btrfs_lookup_ordered_range(inode, start, 10568 lockend - start + 1); 10569 if (!ordered) 10570 break; 10571 btrfs_put_ordered_extent(ordered); 10572 unlock_extent_cached(io_tree, start, lockend, &cached_state); 10573 cond_resched(); 10574 } 10575 10576 em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1); 10577 if (IS_ERR(em)) { 10578 ret = PTR_ERR(em); 10579 goto out_unlock_extent; 10580 } 10581 10582 if (em->block_start == EXTENT_MAP_INLINE) { 10583 u64 extent_start = em->start; 10584 10585 /* 10586 * For inline extents we get everything we need out of the 10587 * extent item. 10588 */ 10589 free_extent_map(em); 10590 em = NULL; 10591 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 10592 &cached_state, extent_start, 10593 count, encoded, &unlocked); 10594 goto out; 10595 } 10596 10597 /* 10598 * We only want to return up to EOF even if the extent extends beyond 10599 * that. 10600 */ 10601 encoded->len = min_t(u64, extent_map_end(em), 10602 inode->vfs_inode.i_size) - iocb->ki_pos; 10603 if (em->block_start == EXTENT_MAP_HOLE || 10604 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 10605 disk_bytenr = EXTENT_MAP_HOLE; 10606 count = min_t(u64, count, encoded->len); 10607 encoded->len = count; 10608 encoded->unencoded_len = count; 10609 } else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10610 disk_bytenr = em->block_start; 10611 /* 10612 * Bail if the buffer isn't large enough to return the whole 10613 * compressed extent. 10614 */ 10615 if (em->block_len > count) { 10616 ret = -ENOBUFS; 10617 goto out_em; 10618 } 10619 disk_io_size = count = em->block_len; 10620 encoded->unencoded_len = em->ram_bytes; 10621 encoded->unencoded_offset = iocb->ki_pos - em->orig_start; 10622 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10623 em->compress_type); 10624 if (ret < 0) 10625 goto out_em; 10626 encoded->compression = ret; 10627 } else { 10628 disk_bytenr = em->block_start + (start - em->start); 10629 if (encoded->len > count) 10630 encoded->len = count; 10631 /* 10632 * Don't read beyond what we locked. This also limits the page 10633 * allocations that we'll do. 10634 */ 10635 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 10636 count = start + disk_io_size - iocb->ki_pos; 10637 encoded->len = count; 10638 encoded->unencoded_len = count; 10639 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize); 10640 } 10641 free_extent_map(em); 10642 em = NULL; 10643 10644 if (disk_bytenr == EXTENT_MAP_HOLE) { 10645 unlock_extent_cached(io_tree, start, lockend, &cached_state); 10646 btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED); 10647 unlocked = true; 10648 ret = iov_iter_zero(count, iter); 10649 if (ret != count) 10650 ret = -EFAULT; 10651 } else { 10652 ret = btrfs_encoded_read_regular(iocb, iter, start, lockend, 10653 &cached_state, disk_bytenr, 10654 disk_io_size, count, 10655 encoded->compression, 10656 &unlocked); 10657 } 10658 10659 out: 10660 if (ret >= 0) 10661 iocb->ki_pos += encoded->len; 10662 out_em: 10663 free_extent_map(em); 10664 out_unlock_extent: 10665 if (!unlocked) 10666 unlock_extent_cached(io_tree, start, lockend, &cached_state); 10667 out_unlock_inode: 10668 if (!unlocked) 10669 btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED); 10670 return ret; 10671 } 10672 10673 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 10674 const struct btrfs_ioctl_encoded_io_args *encoded) 10675 { 10676 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10677 struct btrfs_root *root = inode->root; 10678 struct btrfs_fs_info *fs_info = root->fs_info; 10679 struct extent_io_tree *io_tree = &inode->io_tree; 10680 struct extent_changeset *data_reserved = NULL; 10681 struct extent_state *cached_state = NULL; 10682 int compression; 10683 size_t orig_count; 10684 u64 start, end; 10685 u64 num_bytes, ram_bytes, disk_num_bytes; 10686 unsigned long nr_pages, i; 10687 struct page **pages; 10688 struct btrfs_key ins; 10689 bool extent_reserved = false; 10690 struct extent_map *em; 10691 ssize_t ret; 10692 10693 switch (encoded->compression) { 10694 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 10695 compression = BTRFS_COMPRESS_ZLIB; 10696 break; 10697 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 10698 compression = BTRFS_COMPRESS_ZSTD; 10699 break; 10700 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 10701 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 10702 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 10703 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 10704 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 10705 /* The sector size must match for LZO. */ 10706 if (encoded->compression - 10707 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 10708 fs_info->sectorsize_bits) 10709 return -EINVAL; 10710 compression = BTRFS_COMPRESS_LZO; 10711 break; 10712 default: 10713 return -EINVAL; 10714 } 10715 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 10716 return -EINVAL; 10717 10718 orig_count = iov_iter_count(from); 10719 10720 /* The extent size must be sane. */ 10721 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 10722 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 10723 return -EINVAL; 10724 10725 /* 10726 * The compressed data must be smaller than the decompressed data. 10727 * 10728 * It's of course possible for data to compress to larger or the same 10729 * size, but the buffered I/O path falls back to no compression for such 10730 * data, and we don't want to break any assumptions by creating these 10731 * extents. 10732 * 10733 * Note that this is less strict than the current check we have that the 10734 * compressed data must be at least one sector smaller than the 10735 * decompressed data. We only want to enforce the weaker requirement 10736 * from old kernels that it is at least one byte smaller. 10737 */ 10738 if (orig_count >= encoded->unencoded_len) 10739 return -EINVAL; 10740 10741 /* The extent must start on a sector boundary. */ 10742 start = iocb->ki_pos; 10743 if (!IS_ALIGNED(start, fs_info->sectorsize)) 10744 return -EINVAL; 10745 10746 /* 10747 * The extent must end on a sector boundary. However, we allow a write 10748 * which ends at or extends i_size to have an unaligned length; we round 10749 * up the extent size and set i_size to the unaligned end. 10750 */ 10751 if (start + encoded->len < inode->vfs_inode.i_size && 10752 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 10753 return -EINVAL; 10754 10755 /* Finally, the offset in the unencoded data must be sector-aligned. */ 10756 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 10757 return -EINVAL; 10758 10759 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 10760 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 10761 end = start + num_bytes - 1; 10762 10763 /* 10764 * If the extent cannot be inline, the compressed data on disk must be 10765 * sector-aligned. For convenience, we extend it with zeroes if it 10766 * isn't. 10767 */ 10768 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 10769 nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 10770 pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT); 10771 if (!pages) 10772 return -ENOMEM; 10773 for (i = 0; i < nr_pages; i++) { 10774 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 10775 char *kaddr; 10776 10777 pages[i] = alloc_page(GFP_KERNEL_ACCOUNT); 10778 if (!pages[i]) { 10779 ret = -ENOMEM; 10780 goto out_pages; 10781 } 10782 kaddr = kmap(pages[i]); 10783 if (copy_from_iter(kaddr, bytes, from) != bytes) { 10784 kunmap(pages[i]); 10785 ret = -EFAULT; 10786 goto out_pages; 10787 } 10788 if (bytes < PAGE_SIZE) 10789 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 10790 kunmap(pages[i]); 10791 } 10792 10793 for (;;) { 10794 struct btrfs_ordered_extent *ordered; 10795 10796 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes); 10797 if (ret) 10798 goto out_pages; 10799 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 10800 start >> PAGE_SHIFT, 10801 end >> PAGE_SHIFT); 10802 if (ret) 10803 goto out_pages; 10804 lock_extent_bits(io_tree, start, end, &cached_state); 10805 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 10806 if (!ordered && 10807 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 10808 break; 10809 if (ordered) 10810 btrfs_put_ordered_extent(ordered); 10811 unlock_extent_cached(io_tree, start, end, &cached_state); 10812 cond_resched(); 10813 } 10814 10815 /* 10816 * We don't use the higher-level delalloc space functions because our 10817 * num_bytes and disk_num_bytes are different. 10818 */ 10819 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 10820 if (ret) 10821 goto out_unlock; 10822 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 10823 if (ret) 10824 goto out_free_data_space; 10825 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 10826 false); 10827 if (ret) 10828 goto out_qgroup_free_data; 10829 10830 /* Try an inline extent first. */ 10831 if (start == 0 && encoded->unencoded_len == encoded->len && 10832 encoded->unencoded_offset == 0) { 10833 ret = cow_file_range_inline(inode, encoded->len, orig_count, 10834 compression, pages, true); 10835 if (ret <= 0) { 10836 if (ret == 0) 10837 ret = orig_count; 10838 goto out_delalloc_release; 10839 } 10840 } 10841 10842 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 10843 disk_num_bytes, 0, 0, &ins, 1, 1); 10844 if (ret) 10845 goto out_delalloc_release; 10846 extent_reserved = true; 10847 10848 em = create_io_em(inode, start, num_bytes, 10849 start - encoded->unencoded_offset, ins.objectid, 10850 ins.offset, ins.offset, ram_bytes, compression, 10851 BTRFS_ORDERED_COMPRESSED); 10852 if (IS_ERR(em)) { 10853 ret = PTR_ERR(em); 10854 goto out_free_reserved; 10855 } 10856 free_extent_map(em); 10857 10858 ret = btrfs_add_ordered_extent(inode, start, num_bytes, ram_bytes, 10859 ins.objectid, ins.offset, 10860 encoded->unencoded_offset, 10861 (1 << BTRFS_ORDERED_ENCODED) | 10862 (1 << BTRFS_ORDERED_COMPRESSED), 10863 compression); 10864 if (ret) { 10865 btrfs_drop_extent_cache(inode, start, end, 0); 10866 goto out_free_reserved; 10867 } 10868 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10869 10870 if (start + encoded->len > inode->vfs_inode.i_size) 10871 i_size_write(&inode->vfs_inode, start + encoded->len); 10872 10873 unlock_extent_cached(io_tree, start, end, &cached_state); 10874 10875 btrfs_delalloc_release_extents(inode, num_bytes); 10876 10877 if (btrfs_submit_compressed_write(inode, start, num_bytes, ins.objectid, 10878 ins.offset, pages, nr_pages, 0, NULL, 10879 false)) { 10880 btrfs_writepage_endio_finish_ordered(inode, pages[0], start, end, 0); 10881 ret = -EIO; 10882 goto out_pages; 10883 } 10884 ret = orig_count; 10885 goto out; 10886 10887 out_free_reserved: 10888 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10889 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 10890 out_delalloc_release: 10891 btrfs_delalloc_release_extents(inode, num_bytes); 10892 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 10893 out_qgroup_free_data: 10894 if (ret < 0) 10895 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes); 10896 out_free_data_space: 10897 /* 10898 * If btrfs_reserve_extent() succeeded, then we already decremented 10899 * bytes_may_use. 10900 */ 10901 if (!extent_reserved) 10902 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); 10903 out_unlock: 10904 unlock_extent_cached(io_tree, start, end, &cached_state); 10905 out_pages: 10906 for (i = 0; i < nr_pages; i++) { 10907 if (pages[i]) 10908 __free_page(pages[i]); 10909 } 10910 kvfree(pages); 10911 out: 10912 if (ret >= 0) 10913 iocb->ki_pos += encoded->len; 10914 return ret; 10915 } 10916 10917 #ifdef CONFIG_SWAP 10918 /* 10919 * Add an entry indicating a block group or device which is pinned by a 10920 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 10921 * negative errno on failure. 10922 */ 10923 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 10924 bool is_block_group) 10925 { 10926 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10927 struct btrfs_swapfile_pin *sp, *entry; 10928 struct rb_node **p; 10929 struct rb_node *parent = NULL; 10930 10931 sp = kmalloc(sizeof(*sp), GFP_NOFS); 10932 if (!sp) 10933 return -ENOMEM; 10934 sp->ptr = ptr; 10935 sp->inode = inode; 10936 sp->is_block_group = is_block_group; 10937 sp->bg_extent_count = 1; 10938 10939 spin_lock(&fs_info->swapfile_pins_lock); 10940 p = &fs_info->swapfile_pins.rb_node; 10941 while (*p) { 10942 parent = *p; 10943 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 10944 if (sp->ptr < entry->ptr || 10945 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 10946 p = &(*p)->rb_left; 10947 } else if (sp->ptr > entry->ptr || 10948 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 10949 p = &(*p)->rb_right; 10950 } else { 10951 if (is_block_group) 10952 entry->bg_extent_count++; 10953 spin_unlock(&fs_info->swapfile_pins_lock); 10954 kfree(sp); 10955 return 1; 10956 } 10957 } 10958 rb_link_node(&sp->node, parent, p); 10959 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 10960 spin_unlock(&fs_info->swapfile_pins_lock); 10961 return 0; 10962 } 10963 10964 /* Free all of the entries pinned by this swapfile. */ 10965 static void btrfs_free_swapfile_pins(struct inode *inode) 10966 { 10967 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10968 struct btrfs_swapfile_pin *sp; 10969 struct rb_node *node, *next; 10970 10971 spin_lock(&fs_info->swapfile_pins_lock); 10972 node = rb_first(&fs_info->swapfile_pins); 10973 while (node) { 10974 next = rb_next(node); 10975 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 10976 if (sp->inode == inode) { 10977 rb_erase(&sp->node, &fs_info->swapfile_pins); 10978 if (sp->is_block_group) { 10979 btrfs_dec_block_group_swap_extents(sp->ptr, 10980 sp->bg_extent_count); 10981 btrfs_put_block_group(sp->ptr); 10982 } 10983 kfree(sp); 10984 } 10985 node = next; 10986 } 10987 spin_unlock(&fs_info->swapfile_pins_lock); 10988 } 10989 10990 struct btrfs_swap_info { 10991 u64 start; 10992 u64 block_start; 10993 u64 block_len; 10994 u64 lowest_ppage; 10995 u64 highest_ppage; 10996 unsigned long nr_pages; 10997 int nr_extents; 10998 }; 10999 11000 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 11001 struct btrfs_swap_info *bsi) 11002 { 11003 unsigned long nr_pages; 11004 unsigned long max_pages; 11005 u64 first_ppage, first_ppage_reported, next_ppage; 11006 int ret; 11007 11008 /* 11009 * Our swapfile may have had its size extended after the swap header was 11010 * written. In that case activating the swapfile should not go beyond 11011 * the max size set in the swap header. 11012 */ 11013 if (bsi->nr_pages >= sis->max) 11014 return 0; 11015 11016 max_pages = sis->max - bsi->nr_pages; 11017 first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT; 11018 next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len, 11019 PAGE_SIZE) >> PAGE_SHIFT; 11020 11021 if (first_ppage >= next_ppage) 11022 return 0; 11023 nr_pages = next_ppage - first_ppage; 11024 nr_pages = min(nr_pages, max_pages); 11025 11026 first_ppage_reported = first_ppage; 11027 if (bsi->start == 0) 11028 first_ppage_reported++; 11029 if (bsi->lowest_ppage > first_ppage_reported) 11030 bsi->lowest_ppage = first_ppage_reported; 11031 if (bsi->highest_ppage < (next_ppage - 1)) 11032 bsi->highest_ppage = next_ppage - 1; 11033 11034 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 11035 if (ret < 0) 11036 return ret; 11037 bsi->nr_extents += ret; 11038 bsi->nr_pages += nr_pages; 11039 return 0; 11040 } 11041 11042 static void btrfs_swap_deactivate(struct file *file) 11043 { 11044 struct inode *inode = file_inode(file); 11045 11046 btrfs_free_swapfile_pins(inode); 11047 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 11048 } 11049 11050 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 11051 sector_t *span) 11052 { 11053 struct inode *inode = file_inode(file); 11054 struct btrfs_root *root = BTRFS_I(inode)->root; 11055 struct btrfs_fs_info *fs_info = root->fs_info; 11056 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 11057 struct extent_state *cached_state = NULL; 11058 struct extent_map *em = NULL; 11059 struct btrfs_device *device = NULL; 11060 struct btrfs_swap_info bsi = { 11061 .lowest_ppage = (sector_t)-1ULL, 11062 }; 11063 int ret = 0; 11064 u64 isize; 11065 u64 start; 11066 11067 /* 11068 * If the swap file was just created, make sure delalloc is done. If the 11069 * file changes again after this, the user is doing something stupid and 11070 * we don't really care. 11071 */ 11072 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 11073 if (ret) 11074 return ret; 11075 11076 /* 11077 * The inode is locked, so these flags won't change after we check them. 11078 */ 11079 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 11080 btrfs_warn(fs_info, "swapfile must not be compressed"); 11081 return -EINVAL; 11082 } 11083 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 11084 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 11085 return -EINVAL; 11086 } 11087 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 11088 btrfs_warn(fs_info, "swapfile must not be checksummed"); 11089 return -EINVAL; 11090 } 11091 11092 /* 11093 * Balance or device remove/replace/resize can move stuff around from 11094 * under us. The exclop protection makes sure they aren't running/won't 11095 * run concurrently while we are mapping the swap extents, and 11096 * fs_info->swapfile_pins prevents them from running while the swap 11097 * file is active and moving the extents. Note that this also prevents 11098 * a concurrent device add which isn't actually necessary, but it's not 11099 * really worth the trouble to allow it. 11100 */ 11101 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 11102 btrfs_warn(fs_info, 11103 "cannot activate swapfile while exclusive operation is running"); 11104 return -EBUSY; 11105 } 11106 11107 /* 11108 * Prevent snapshot creation while we are activating the swap file. 11109 * We do not want to race with snapshot creation. If snapshot creation 11110 * already started before we bumped nr_swapfiles from 0 to 1 and 11111 * completes before the first write into the swap file after it is 11112 * activated, than that write would fallback to COW. 11113 */ 11114 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 11115 btrfs_exclop_finish(fs_info); 11116 btrfs_warn(fs_info, 11117 "cannot activate swapfile because snapshot creation is in progress"); 11118 return -EINVAL; 11119 } 11120 /* 11121 * Snapshots can create extents which require COW even if NODATACOW is 11122 * set. We use this counter to prevent snapshots. We must increment it 11123 * before walking the extents because we don't want a concurrent 11124 * snapshot to run after we've already checked the extents. 11125 * 11126 * It is possible that subvolume is marked for deletion but still not 11127 * removed yet. To prevent this race, we check the root status before 11128 * activating the swapfile. 11129 */ 11130 spin_lock(&root->root_item_lock); 11131 if (btrfs_root_dead(root)) { 11132 spin_unlock(&root->root_item_lock); 11133 11134 btrfs_exclop_finish(fs_info); 11135 btrfs_warn(fs_info, 11136 "cannot activate swapfile because subvolume %llu is being deleted", 11137 root->root_key.objectid); 11138 return -EPERM; 11139 } 11140 atomic_inc(&root->nr_swapfiles); 11141 spin_unlock(&root->root_item_lock); 11142 11143 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 11144 11145 lock_extent_bits(io_tree, 0, isize - 1, &cached_state); 11146 start = 0; 11147 while (start < isize) { 11148 u64 logical_block_start, physical_block_start; 11149 struct btrfs_block_group *bg; 11150 u64 len = isize - start; 11151 11152 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 11153 if (IS_ERR(em)) { 11154 ret = PTR_ERR(em); 11155 goto out; 11156 } 11157 11158 if (em->block_start == EXTENT_MAP_HOLE) { 11159 btrfs_warn(fs_info, "swapfile must not have holes"); 11160 ret = -EINVAL; 11161 goto out; 11162 } 11163 if (em->block_start == EXTENT_MAP_INLINE) { 11164 /* 11165 * It's unlikely we'll ever actually find ourselves 11166 * here, as a file small enough to fit inline won't be 11167 * big enough to store more than the swap header, but in 11168 * case something changes in the future, let's catch it 11169 * here rather than later. 11170 */ 11171 btrfs_warn(fs_info, "swapfile must not be inline"); 11172 ret = -EINVAL; 11173 goto out; 11174 } 11175 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 11176 btrfs_warn(fs_info, "swapfile must not be compressed"); 11177 ret = -EINVAL; 11178 goto out; 11179 } 11180 11181 logical_block_start = em->block_start + (start - em->start); 11182 len = min(len, em->len - (start - em->start)); 11183 free_extent_map(em); 11184 em = NULL; 11185 11186 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true); 11187 if (ret < 0) { 11188 goto out; 11189 } else if (ret) { 11190 ret = 0; 11191 } else { 11192 btrfs_warn(fs_info, 11193 "swapfile must not be copy-on-write"); 11194 ret = -EINVAL; 11195 goto out; 11196 } 11197 11198 em = btrfs_get_chunk_map(fs_info, logical_block_start, len); 11199 if (IS_ERR(em)) { 11200 ret = PTR_ERR(em); 11201 goto out; 11202 } 11203 11204 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 11205 btrfs_warn(fs_info, 11206 "swapfile must have single data profile"); 11207 ret = -EINVAL; 11208 goto out; 11209 } 11210 11211 if (device == NULL) { 11212 device = em->map_lookup->stripes[0].dev; 11213 ret = btrfs_add_swapfile_pin(inode, device, false); 11214 if (ret == 1) 11215 ret = 0; 11216 else if (ret) 11217 goto out; 11218 } else if (device != em->map_lookup->stripes[0].dev) { 11219 btrfs_warn(fs_info, "swapfile must be on one device"); 11220 ret = -EINVAL; 11221 goto out; 11222 } 11223 11224 physical_block_start = (em->map_lookup->stripes[0].physical + 11225 (logical_block_start - em->start)); 11226 len = min(len, em->len - (logical_block_start - em->start)); 11227 free_extent_map(em); 11228 em = NULL; 11229 11230 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 11231 if (!bg) { 11232 btrfs_warn(fs_info, 11233 "could not find block group containing swapfile"); 11234 ret = -EINVAL; 11235 goto out; 11236 } 11237 11238 if (!btrfs_inc_block_group_swap_extents(bg)) { 11239 btrfs_warn(fs_info, 11240 "block group for swapfile at %llu is read-only%s", 11241 bg->start, 11242 atomic_read(&fs_info->scrubs_running) ? 11243 " (scrub running)" : ""); 11244 btrfs_put_block_group(bg); 11245 ret = -EINVAL; 11246 goto out; 11247 } 11248 11249 ret = btrfs_add_swapfile_pin(inode, bg, true); 11250 if (ret) { 11251 btrfs_put_block_group(bg); 11252 if (ret == 1) 11253 ret = 0; 11254 else 11255 goto out; 11256 } 11257 11258 if (bsi.block_len && 11259 bsi.block_start + bsi.block_len == physical_block_start) { 11260 bsi.block_len += len; 11261 } else { 11262 if (bsi.block_len) { 11263 ret = btrfs_add_swap_extent(sis, &bsi); 11264 if (ret) 11265 goto out; 11266 } 11267 bsi.start = start; 11268 bsi.block_start = physical_block_start; 11269 bsi.block_len = len; 11270 } 11271 11272 start += len; 11273 } 11274 11275 if (bsi.block_len) 11276 ret = btrfs_add_swap_extent(sis, &bsi); 11277 11278 out: 11279 if (!IS_ERR_OR_NULL(em)) 11280 free_extent_map(em); 11281 11282 unlock_extent_cached(io_tree, 0, isize - 1, &cached_state); 11283 11284 if (ret) 11285 btrfs_swap_deactivate(file); 11286 11287 btrfs_drew_write_unlock(&root->snapshot_lock); 11288 11289 btrfs_exclop_finish(fs_info); 11290 11291 if (ret) 11292 return ret; 11293 11294 if (device) 11295 sis->bdev = device->bdev; 11296 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 11297 sis->max = bsi.nr_pages; 11298 sis->pages = bsi.nr_pages - 1; 11299 sis->highest_bit = bsi.nr_pages - 1; 11300 return bsi.nr_extents; 11301 } 11302 #else 11303 static void btrfs_swap_deactivate(struct file *file) 11304 { 11305 } 11306 11307 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 11308 sector_t *span) 11309 { 11310 return -EOPNOTSUPP; 11311 } 11312 #endif 11313 11314 /* 11315 * Update the number of bytes used in the VFS' inode. When we replace extents in 11316 * a range (clone, dedupe, fallocate's zero range), we must update the number of 11317 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 11318 * always get a correct value. 11319 */ 11320 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 11321 const u64 add_bytes, 11322 const u64 del_bytes) 11323 { 11324 if (add_bytes == del_bytes) 11325 return; 11326 11327 spin_lock(&inode->lock); 11328 if (del_bytes > 0) 11329 inode_sub_bytes(&inode->vfs_inode, del_bytes); 11330 if (add_bytes > 0) 11331 inode_add_bytes(&inode->vfs_inode, add_bytes); 11332 spin_unlock(&inode->lock); 11333 } 11334 11335 /** 11336 * Verify that there are no ordered extents for a given file range. 11337 * 11338 * @inode: The target inode. 11339 * @start: Start offset of the file range, should be sector size aligned. 11340 * @end: End offset (inclusive) of the file range, its value +1 should be 11341 * sector size aligned. 11342 * 11343 * This should typically be used for cases where we locked an inode's VFS lock in 11344 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 11345 * we have flushed all delalloc in the range, we have waited for all ordered 11346 * extents in the range to complete and finally we have locked the file range in 11347 * the inode's io_tree. 11348 */ 11349 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 11350 { 11351 struct btrfs_root *root = inode->root; 11352 struct btrfs_ordered_extent *ordered; 11353 11354 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 11355 return; 11356 11357 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 11358 if (ordered) { 11359 btrfs_err(root->fs_info, 11360 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 11361 start, end, btrfs_ino(inode), root->root_key.objectid, 11362 ordered->file_offset, 11363 ordered->file_offset + ordered->num_bytes - 1); 11364 btrfs_put_ordered_extent(ordered); 11365 } 11366 11367 ASSERT(ordered == NULL); 11368 } 11369 11370 static const struct inode_operations btrfs_dir_inode_operations = { 11371 .getattr = btrfs_getattr, 11372 .lookup = btrfs_lookup, 11373 .create = btrfs_create, 11374 .unlink = btrfs_unlink, 11375 .link = btrfs_link, 11376 .mkdir = btrfs_mkdir, 11377 .rmdir = btrfs_rmdir, 11378 .rename = btrfs_rename2, 11379 .symlink = btrfs_symlink, 11380 .setattr = btrfs_setattr, 11381 .mknod = btrfs_mknod, 11382 .listxattr = btrfs_listxattr, 11383 .permission = btrfs_permission, 11384 .get_acl = btrfs_get_acl, 11385 .set_acl = btrfs_set_acl, 11386 .update_time = btrfs_update_time, 11387 .tmpfile = btrfs_tmpfile, 11388 .fileattr_get = btrfs_fileattr_get, 11389 .fileattr_set = btrfs_fileattr_set, 11390 }; 11391 11392 static const struct file_operations btrfs_dir_file_operations = { 11393 .llseek = generic_file_llseek, 11394 .read = generic_read_dir, 11395 .iterate_shared = btrfs_real_readdir, 11396 .open = btrfs_opendir, 11397 .unlocked_ioctl = btrfs_ioctl, 11398 #ifdef CONFIG_COMPAT 11399 .compat_ioctl = btrfs_compat_ioctl, 11400 #endif 11401 .release = btrfs_release_file, 11402 .fsync = btrfs_sync_file, 11403 }; 11404 11405 /* 11406 * btrfs doesn't support the bmap operation because swapfiles 11407 * use bmap to make a mapping of extents in the file. They assume 11408 * these extents won't change over the life of the file and they 11409 * use the bmap result to do IO directly to the drive. 11410 * 11411 * the btrfs bmap call would return logical addresses that aren't 11412 * suitable for IO and they also will change frequently as COW 11413 * operations happen. So, swapfile + btrfs == corruption. 11414 * 11415 * For now we're avoiding this by dropping bmap. 11416 */ 11417 static const struct address_space_operations btrfs_aops = { 11418 .read_folio = btrfs_read_folio, 11419 .writepage = btrfs_writepage, 11420 .writepages = btrfs_writepages, 11421 .readahead = btrfs_readahead, 11422 .direct_IO = noop_direct_IO, 11423 .invalidate_folio = btrfs_invalidate_folio, 11424 .release_folio = btrfs_release_folio, 11425 #ifdef CONFIG_MIGRATION 11426 .migratepage = btrfs_migratepage, 11427 #endif 11428 .dirty_folio = filemap_dirty_folio, 11429 .error_remove_page = generic_error_remove_page, 11430 .swap_activate = btrfs_swap_activate, 11431 .swap_deactivate = btrfs_swap_deactivate, 11432 }; 11433 11434 static const struct inode_operations btrfs_file_inode_operations = { 11435 .getattr = btrfs_getattr, 11436 .setattr = btrfs_setattr, 11437 .listxattr = btrfs_listxattr, 11438 .permission = btrfs_permission, 11439 .fiemap = btrfs_fiemap, 11440 .get_acl = btrfs_get_acl, 11441 .set_acl = btrfs_set_acl, 11442 .update_time = btrfs_update_time, 11443 .fileattr_get = btrfs_fileattr_get, 11444 .fileattr_set = btrfs_fileattr_set, 11445 }; 11446 static const struct inode_operations btrfs_special_inode_operations = { 11447 .getattr = btrfs_getattr, 11448 .setattr = btrfs_setattr, 11449 .permission = btrfs_permission, 11450 .listxattr = btrfs_listxattr, 11451 .get_acl = btrfs_get_acl, 11452 .set_acl = btrfs_set_acl, 11453 .update_time = btrfs_update_time, 11454 }; 11455 static const struct inode_operations btrfs_symlink_inode_operations = { 11456 .get_link = page_get_link, 11457 .getattr = btrfs_getattr, 11458 .setattr = btrfs_setattr, 11459 .permission = btrfs_permission, 11460 .listxattr = btrfs_listxattr, 11461 .update_time = btrfs_update_time, 11462 }; 11463 11464 const struct dentry_operations btrfs_dentry_operations = { 11465 .d_delete = btrfs_dentry_delete, 11466 }; 11467