1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 #include <linux/time.h> 15 #include <linux/init.h> 16 #include <linux/string.h> 17 #include <linux/backing-dev.h> 18 #include <linux/writeback.h> 19 #include <linux/compat.h> 20 #include <linux/xattr.h> 21 #include <linux/posix_acl.h> 22 #include <linux/falloc.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/btrfs.h> 26 #include <linux/blkdev.h> 27 #include <linux/posix_acl_xattr.h> 28 #include <linux/uio.h> 29 #include <linux/magic.h> 30 #include <linux/iversion.h> 31 #include <linux/swap.h> 32 #include <linux/migrate.h> 33 #include <linux/sched/mm.h> 34 #include <linux/iomap.h> 35 #include <asm/unaligned.h> 36 #include <linux/fsverity.h> 37 #include "misc.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "ordered-data.h" 43 #include "xattr.h" 44 #include "tree-log.h" 45 #include "bio.h" 46 #include "compression.h" 47 #include "locking.h" 48 #include "props.h" 49 #include "qgroup.h" 50 #include "delalloc-space.h" 51 #include "block-group.h" 52 #include "space-info.h" 53 #include "zoned.h" 54 #include "subpage.h" 55 #include "inode-item.h" 56 #include "fs.h" 57 #include "accessors.h" 58 #include "extent-tree.h" 59 #include "root-tree.h" 60 #include "defrag.h" 61 #include "dir-item.h" 62 #include "file-item.h" 63 #include "uuid-tree.h" 64 #include "ioctl.h" 65 #include "file.h" 66 #include "acl.h" 67 #include "relocation.h" 68 #include "verity.h" 69 #include "super.h" 70 #include "orphan.h" 71 #include "backref.h" 72 #include "raid-stripe-tree.h" 73 74 struct btrfs_iget_args { 75 u64 ino; 76 struct btrfs_root *root; 77 }; 78 79 struct btrfs_dio_data { 80 ssize_t submitted; 81 struct extent_changeset *data_reserved; 82 struct btrfs_ordered_extent *ordered; 83 bool data_space_reserved; 84 bool nocow_done; 85 }; 86 87 struct btrfs_dio_private { 88 /* Range of I/O */ 89 u64 file_offset; 90 u32 bytes; 91 92 /* This must be last */ 93 struct btrfs_bio bbio; 94 }; 95 96 static struct bio_set btrfs_dio_bioset; 97 98 struct btrfs_rename_ctx { 99 /* Output field. Stores the index number of the old directory entry. */ 100 u64 index; 101 }; 102 103 /* 104 * Used by data_reloc_print_warning_inode() to pass needed info for filename 105 * resolution and output of error message. 106 */ 107 struct data_reloc_warn { 108 struct btrfs_path path; 109 struct btrfs_fs_info *fs_info; 110 u64 extent_item_size; 111 u64 logical; 112 int mirror_num; 113 }; 114 115 /* 116 * For the file_extent_tree, we want to hold the inode lock when we lookup and 117 * update the disk_i_size, but lockdep will complain because our io_tree we hold 118 * the tree lock and get the inode lock when setting delalloc. These two things 119 * are unrelated, so make a class for the file_extent_tree so we don't get the 120 * two locking patterns mixed up. 121 */ 122 static struct lock_class_key file_extent_tree_class; 123 124 static const struct inode_operations btrfs_dir_inode_operations; 125 static const struct inode_operations btrfs_symlink_inode_operations; 126 static const struct inode_operations btrfs_special_inode_operations; 127 static const struct inode_operations btrfs_file_inode_operations; 128 static const struct address_space_operations btrfs_aops; 129 static const struct file_operations btrfs_dir_file_operations; 130 131 static struct kmem_cache *btrfs_inode_cachep; 132 133 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 134 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback); 135 136 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 137 struct page *locked_page, u64 start, 138 u64 end, struct writeback_control *wbc, 139 bool pages_dirty); 140 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 141 u64 len, u64 orig_start, u64 block_start, 142 u64 block_len, u64 orig_block_len, 143 u64 ram_bytes, int compress_type, 144 int type); 145 146 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, 147 u64 root, void *warn_ctx) 148 { 149 struct data_reloc_warn *warn = warn_ctx; 150 struct btrfs_fs_info *fs_info = warn->fs_info; 151 struct extent_buffer *eb; 152 struct btrfs_inode_item *inode_item; 153 struct inode_fs_paths *ipath = NULL; 154 struct btrfs_root *local_root; 155 struct btrfs_key key; 156 unsigned int nofs_flag; 157 u32 nlink; 158 int ret; 159 160 local_root = btrfs_get_fs_root(fs_info, root, true); 161 if (IS_ERR(local_root)) { 162 ret = PTR_ERR(local_root); 163 goto err; 164 } 165 166 /* This makes the path point to (inum INODE_ITEM ioff). */ 167 key.objectid = inum; 168 key.type = BTRFS_INODE_ITEM_KEY; 169 key.offset = 0; 170 171 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0); 172 if (ret) { 173 btrfs_put_root(local_root); 174 btrfs_release_path(&warn->path); 175 goto err; 176 } 177 178 eb = warn->path.nodes[0]; 179 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item); 180 nlink = btrfs_inode_nlink(eb, inode_item); 181 btrfs_release_path(&warn->path); 182 183 nofs_flag = memalloc_nofs_save(); 184 ipath = init_ipath(4096, local_root, &warn->path); 185 memalloc_nofs_restore(nofs_flag); 186 if (IS_ERR(ipath)) { 187 btrfs_put_root(local_root); 188 ret = PTR_ERR(ipath); 189 ipath = NULL; 190 /* 191 * -ENOMEM, not a critical error, just output an generic error 192 * without filename. 193 */ 194 btrfs_warn(fs_info, 195 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu", 196 warn->logical, warn->mirror_num, root, inum, offset); 197 return ret; 198 } 199 ret = paths_from_inode(inum, ipath); 200 if (ret < 0) 201 goto err; 202 203 /* 204 * We deliberately ignore the bit ipath might have been too small to 205 * hold all of the paths here 206 */ 207 for (int i = 0; i < ipath->fspath->elem_cnt; i++) { 208 btrfs_warn(fs_info, 209 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)", 210 warn->logical, warn->mirror_num, root, inum, offset, 211 fs_info->sectorsize, nlink, 212 (char *)(unsigned long)ipath->fspath->val[i]); 213 } 214 215 btrfs_put_root(local_root); 216 free_ipath(ipath); 217 return 0; 218 219 err: 220 btrfs_warn(fs_info, 221 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d", 222 warn->logical, warn->mirror_num, root, inum, offset, ret); 223 224 free_ipath(ipath); 225 return ret; 226 } 227 228 /* 229 * Do extra user-friendly error output (e.g. lookup all the affected files). 230 * 231 * Return true if we succeeded doing the backref lookup. 232 * Return false if such lookup failed, and has to fallback to the old error message. 233 */ 234 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off, 235 const u8 *csum, const u8 *csum_expected, 236 int mirror_num) 237 { 238 struct btrfs_fs_info *fs_info = inode->root->fs_info; 239 struct btrfs_path path = { 0 }; 240 struct btrfs_key found_key = { 0 }; 241 struct extent_buffer *eb; 242 struct btrfs_extent_item *ei; 243 const u32 csum_size = fs_info->csum_size; 244 u64 logical; 245 u64 flags; 246 u32 item_size; 247 int ret; 248 249 mutex_lock(&fs_info->reloc_mutex); 250 logical = btrfs_get_reloc_bg_bytenr(fs_info); 251 mutex_unlock(&fs_info->reloc_mutex); 252 253 if (logical == U64_MAX) { 254 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation"); 255 btrfs_warn_rl(fs_info, 256 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 257 btrfs_root_id(inode->root), btrfs_ino(inode), file_off, 258 CSUM_FMT_VALUE(csum_size, csum), 259 CSUM_FMT_VALUE(csum_size, csum_expected), 260 mirror_num); 261 return; 262 } 263 264 logical += file_off; 265 btrfs_warn_rl(fs_info, 266 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 267 btrfs_root_id(inode->root), 268 btrfs_ino(inode), file_off, logical, 269 CSUM_FMT_VALUE(csum_size, csum), 270 CSUM_FMT_VALUE(csum_size, csum_expected), 271 mirror_num); 272 273 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags); 274 if (ret < 0) { 275 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d", 276 logical, ret); 277 return; 278 } 279 eb = path.nodes[0]; 280 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item); 281 item_size = btrfs_item_size(eb, path.slots[0]); 282 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 283 unsigned long ptr = 0; 284 u64 ref_root; 285 u8 ref_level; 286 287 while (true) { 288 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 289 item_size, &ref_root, 290 &ref_level); 291 if (ret < 0) { 292 btrfs_warn_rl(fs_info, 293 "failed to resolve tree backref for logical %llu: %d", 294 logical, ret); 295 break; 296 } 297 if (ret > 0) 298 break; 299 300 btrfs_warn_rl(fs_info, 301 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu", 302 logical, mirror_num, 303 (ref_level ? "node" : "leaf"), 304 ref_level, ref_root); 305 } 306 btrfs_release_path(&path); 307 } else { 308 struct btrfs_backref_walk_ctx ctx = { 0 }; 309 struct data_reloc_warn reloc_warn = { 0 }; 310 311 btrfs_release_path(&path); 312 313 ctx.bytenr = found_key.objectid; 314 ctx.extent_item_pos = logical - found_key.objectid; 315 ctx.fs_info = fs_info; 316 317 reloc_warn.logical = logical; 318 reloc_warn.extent_item_size = found_key.offset; 319 reloc_warn.mirror_num = mirror_num; 320 reloc_warn.fs_info = fs_info; 321 322 iterate_extent_inodes(&ctx, true, 323 data_reloc_print_warning_inode, &reloc_warn); 324 } 325 } 326 327 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, 328 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) 329 { 330 struct btrfs_root *root = inode->root; 331 const u32 csum_size = root->fs_info->csum_size; 332 333 /* For data reloc tree, it's better to do a backref lookup instead. */ 334 if (btrfs_root_id(root) == BTRFS_DATA_RELOC_TREE_OBJECTID) 335 return print_data_reloc_error(inode, logical_start, csum, 336 csum_expected, mirror_num); 337 338 /* Output without objectid, which is more meaningful */ 339 if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) { 340 btrfs_warn_rl(root->fs_info, 341 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 342 btrfs_root_id(root), btrfs_ino(inode), 343 logical_start, 344 CSUM_FMT_VALUE(csum_size, csum), 345 CSUM_FMT_VALUE(csum_size, csum_expected), 346 mirror_num); 347 } else { 348 btrfs_warn_rl(root->fs_info, 349 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 350 btrfs_root_id(root), btrfs_ino(inode), 351 logical_start, 352 CSUM_FMT_VALUE(csum_size, csum), 353 CSUM_FMT_VALUE(csum_size, csum_expected), 354 mirror_num); 355 } 356 } 357 358 /* 359 * Lock inode i_rwsem based on arguments passed. 360 * 361 * ilock_flags can have the following bit set: 362 * 363 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 364 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 365 * return -EAGAIN 366 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 367 */ 368 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) 369 { 370 if (ilock_flags & BTRFS_ILOCK_SHARED) { 371 if (ilock_flags & BTRFS_ILOCK_TRY) { 372 if (!inode_trylock_shared(&inode->vfs_inode)) 373 return -EAGAIN; 374 else 375 return 0; 376 } 377 inode_lock_shared(&inode->vfs_inode); 378 } else { 379 if (ilock_flags & BTRFS_ILOCK_TRY) { 380 if (!inode_trylock(&inode->vfs_inode)) 381 return -EAGAIN; 382 else 383 return 0; 384 } 385 inode_lock(&inode->vfs_inode); 386 } 387 if (ilock_flags & BTRFS_ILOCK_MMAP) 388 down_write(&inode->i_mmap_lock); 389 return 0; 390 } 391 392 /* 393 * Unock inode i_rwsem. 394 * 395 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 396 * to decide whether the lock acquired is shared or exclusive. 397 */ 398 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) 399 { 400 if (ilock_flags & BTRFS_ILOCK_MMAP) 401 up_write(&inode->i_mmap_lock); 402 if (ilock_flags & BTRFS_ILOCK_SHARED) 403 inode_unlock_shared(&inode->vfs_inode); 404 else 405 inode_unlock(&inode->vfs_inode); 406 } 407 408 /* 409 * Cleanup all submitted ordered extents in specified range to handle errors 410 * from the btrfs_run_delalloc_range() callback. 411 * 412 * NOTE: caller must ensure that when an error happens, it can not call 413 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 414 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 415 * to be released, which we want to happen only when finishing the ordered 416 * extent (btrfs_finish_ordered_io()). 417 */ 418 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 419 struct page *locked_page, 420 u64 offset, u64 bytes) 421 { 422 unsigned long index = offset >> PAGE_SHIFT; 423 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 424 u64 page_start = 0, page_end = 0; 425 struct page *page; 426 427 if (locked_page) { 428 page_start = page_offset(locked_page); 429 page_end = page_start + PAGE_SIZE - 1; 430 } 431 432 while (index <= end_index) { 433 /* 434 * For locked page, we will call btrfs_mark_ordered_io_finished 435 * through btrfs_mark_ordered_io_finished() on it 436 * in run_delalloc_range() for the error handling, which will 437 * clear page Ordered and run the ordered extent accounting. 438 * 439 * Here we can't just clear the Ordered bit, or 440 * btrfs_mark_ordered_io_finished() would skip the accounting 441 * for the page range, and the ordered extent will never finish. 442 */ 443 if (locked_page && index == (page_start >> PAGE_SHIFT)) { 444 index++; 445 continue; 446 } 447 page = find_get_page(inode->vfs_inode.i_mapping, index); 448 index++; 449 if (!page) 450 continue; 451 452 /* 453 * Here we just clear all Ordered bits for every page in the 454 * range, then btrfs_mark_ordered_io_finished() will handle 455 * the ordered extent accounting for the range. 456 */ 457 btrfs_folio_clamp_clear_ordered(inode->root->fs_info, 458 page_folio(page), offset, bytes); 459 put_page(page); 460 } 461 462 if (locked_page) { 463 /* The locked page covers the full range, nothing needs to be done */ 464 if (bytes + offset <= page_start + PAGE_SIZE) 465 return; 466 /* 467 * In case this page belongs to the delalloc range being 468 * instantiated then skip it, since the first page of a range is 469 * going to be properly cleaned up by the caller of 470 * run_delalloc_range 471 */ 472 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 473 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE; 474 offset = page_offset(locked_page) + PAGE_SIZE; 475 } 476 } 477 478 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false); 479 } 480 481 static int btrfs_dirty_inode(struct btrfs_inode *inode); 482 483 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 484 struct btrfs_new_inode_args *args) 485 { 486 int err; 487 488 if (args->default_acl) { 489 err = __btrfs_set_acl(trans, args->inode, args->default_acl, 490 ACL_TYPE_DEFAULT); 491 if (err) 492 return err; 493 } 494 if (args->acl) { 495 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 496 if (err) 497 return err; 498 } 499 if (!args->default_acl && !args->acl) 500 cache_no_acl(args->inode); 501 return btrfs_xattr_security_init(trans, args->inode, args->dir, 502 &args->dentry->d_name); 503 } 504 505 /* 506 * this does all the hard work for inserting an inline extent into 507 * the btree. The caller should have done a btrfs_drop_extents so that 508 * no overlapping inline items exist in the btree 509 */ 510 static int insert_inline_extent(struct btrfs_trans_handle *trans, 511 struct btrfs_path *path, 512 struct btrfs_inode *inode, bool extent_inserted, 513 size_t size, size_t compressed_size, 514 int compress_type, 515 struct folio *compressed_folio, 516 bool update_i_size) 517 { 518 struct btrfs_root *root = inode->root; 519 struct extent_buffer *leaf; 520 struct page *page = NULL; 521 const u32 sectorsize = trans->fs_info->sectorsize; 522 char *kaddr; 523 unsigned long ptr; 524 struct btrfs_file_extent_item *ei; 525 int ret; 526 size_t cur_size = size; 527 u64 i_size; 528 529 /* 530 * The decompressed size must still be no larger than a sector. Under 531 * heavy race, we can have size == 0 passed in, but that shouldn't be a 532 * big deal and we can continue the insertion. 533 */ 534 ASSERT(size <= sectorsize); 535 536 /* 537 * The compressed size also needs to be no larger than a sector. 538 * That's also why we only need one page as the parameter. 539 */ 540 if (compressed_folio) 541 ASSERT(compressed_size <= sectorsize); 542 else 543 ASSERT(compressed_size == 0); 544 545 if (compressed_size && compressed_folio) 546 cur_size = compressed_size; 547 548 if (!extent_inserted) { 549 struct btrfs_key key; 550 size_t datasize; 551 552 key.objectid = btrfs_ino(inode); 553 key.offset = 0; 554 key.type = BTRFS_EXTENT_DATA_KEY; 555 556 datasize = btrfs_file_extent_calc_inline_size(cur_size); 557 ret = btrfs_insert_empty_item(trans, root, path, &key, 558 datasize); 559 if (ret) 560 goto fail; 561 } 562 leaf = path->nodes[0]; 563 ei = btrfs_item_ptr(leaf, path->slots[0], 564 struct btrfs_file_extent_item); 565 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 566 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 567 btrfs_set_file_extent_encryption(leaf, ei, 0); 568 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 569 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 570 ptr = btrfs_file_extent_inline_start(ei); 571 572 if (compress_type != BTRFS_COMPRESS_NONE) { 573 kaddr = kmap_local_folio(compressed_folio, 0); 574 write_extent_buffer(leaf, kaddr, ptr, compressed_size); 575 kunmap_local(kaddr); 576 577 btrfs_set_file_extent_compression(leaf, ei, 578 compress_type); 579 } else { 580 page = find_get_page(inode->vfs_inode.i_mapping, 0); 581 btrfs_set_file_extent_compression(leaf, ei, 0); 582 kaddr = kmap_local_page(page); 583 write_extent_buffer(leaf, kaddr, ptr, size); 584 kunmap_local(kaddr); 585 put_page(page); 586 } 587 btrfs_mark_buffer_dirty(trans, leaf); 588 btrfs_release_path(path); 589 590 /* 591 * We align size to sectorsize for inline extents just for simplicity 592 * sake. 593 */ 594 ret = btrfs_inode_set_file_extent_range(inode, 0, 595 ALIGN(size, root->fs_info->sectorsize)); 596 if (ret) 597 goto fail; 598 599 /* 600 * We're an inline extent, so nobody can extend the file past i_size 601 * without locking a page we already have locked. 602 * 603 * We must do any i_size and inode updates before we unlock the pages. 604 * Otherwise we could end up racing with unlink. 605 */ 606 i_size = i_size_read(&inode->vfs_inode); 607 if (update_i_size && size > i_size) { 608 i_size_write(&inode->vfs_inode, size); 609 i_size = size; 610 } 611 inode->disk_i_size = i_size; 612 613 fail: 614 return ret; 615 } 616 617 static bool can_cow_file_range_inline(struct btrfs_inode *inode, 618 u64 offset, u64 size, 619 size_t compressed_size) 620 { 621 struct btrfs_fs_info *fs_info = inode->root->fs_info; 622 u64 data_len = (compressed_size ?: size); 623 624 /* Inline extents must start at offset 0. */ 625 if (offset != 0) 626 return false; 627 628 /* 629 * Due to the page size limit, for subpage we can only trigger the 630 * writeback for the dirty sectors of page, that means data writeback 631 * is doing more writeback than what we want. 632 * 633 * This is especially unexpected for some call sites like fallocate, 634 * where we only increase i_size after everything is done. 635 * This means we can trigger inline extent even if we didn't want to. 636 * So here we skip inline extent creation completely. 637 */ 638 if (fs_info->sectorsize != PAGE_SIZE) 639 return false; 640 641 /* Inline extents are limited to sectorsize. */ 642 if (size > fs_info->sectorsize) 643 return false; 644 645 /* We cannot exceed the maximum inline data size. */ 646 if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 647 return false; 648 649 /* We cannot exceed the user specified max_inline size. */ 650 if (data_len > fs_info->max_inline) 651 return false; 652 653 /* Inline extents must be the entirety of the file. */ 654 if (size < i_size_read(&inode->vfs_inode)) 655 return false; 656 657 return true; 658 } 659 660 /* 661 * conditionally insert an inline extent into the file. This 662 * does the checks required to make sure the data is small enough 663 * to fit as an inline extent. 664 * 665 * If being used directly, you must have already checked we're allowed to cow 666 * the range by getting true from can_cow_file_range_inline(). 667 */ 668 static noinline int __cow_file_range_inline(struct btrfs_inode *inode, u64 offset, 669 u64 size, size_t compressed_size, 670 int compress_type, 671 struct folio *compressed_folio, 672 bool update_i_size) 673 { 674 struct btrfs_drop_extents_args drop_args = { 0 }; 675 struct btrfs_root *root = inode->root; 676 struct btrfs_fs_info *fs_info = root->fs_info; 677 struct btrfs_trans_handle *trans; 678 u64 data_len = (compressed_size ?: size); 679 int ret; 680 struct btrfs_path *path; 681 682 path = btrfs_alloc_path(); 683 if (!path) 684 return -ENOMEM; 685 686 trans = btrfs_join_transaction(root); 687 if (IS_ERR(trans)) { 688 btrfs_free_path(path); 689 return PTR_ERR(trans); 690 } 691 trans->block_rsv = &inode->block_rsv; 692 693 drop_args.path = path; 694 drop_args.start = 0; 695 drop_args.end = fs_info->sectorsize; 696 drop_args.drop_cache = true; 697 drop_args.replace_extent = true; 698 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 699 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 700 if (ret) { 701 btrfs_abort_transaction(trans, ret); 702 goto out; 703 } 704 705 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 706 size, compressed_size, compress_type, 707 compressed_folio, update_i_size); 708 if (ret && ret != -ENOSPC) { 709 btrfs_abort_transaction(trans, ret); 710 goto out; 711 } else if (ret == -ENOSPC) { 712 ret = 1; 713 goto out; 714 } 715 716 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 717 ret = btrfs_update_inode(trans, inode); 718 if (ret && ret != -ENOSPC) { 719 btrfs_abort_transaction(trans, ret); 720 goto out; 721 } else if (ret == -ENOSPC) { 722 ret = 1; 723 goto out; 724 } 725 726 btrfs_set_inode_full_sync(inode); 727 out: 728 /* 729 * Don't forget to free the reserved space, as for inlined extent 730 * it won't count as data extent, free them directly here. 731 * And at reserve time, it's always aligned to page size, so 732 * just free one page here. 733 */ 734 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL); 735 btrfs_free_path(path); 736 btrfs_end_transaction(trans); 737 return ret; 738 } 739 740 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset, 741 u64 end, 742 size_t compressed_size, 743 int compress_type, 744 struct folio *compressed_folio, 745 bool update_i_size) 746 { 747 struct extent_state *cached = NULL; 748 unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 749 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED; 750 u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1); 751 int ret; 752 753 if (!can_cow_file_range_inline(inode, offset, size, compressed_size)) 754 return 1; 755 756 lock_extent(&inode->io_tree, offset, end, &cached); 757 ret = __cow_file_range_inline(inode, offset, size, compressed_size, 758 compress_type, compressed_folio, 759 update_i_size); 760 if (ret > 0) { 761 unlock_extent(&inode->io_tree, offset, end, &cached); 762 return ret; 763 } 764 765 extent_clear_unlock_delalloc(inode, offset, end, NULL, &cached, 766 clear_flags, 767 PAGE_UNLOCK | PAGE_START_WRITEBACK | 768 PAGE_END_WRITEBACK); 769 return ret; 770 } 771 772 struct async_extent { 773 u64 start; 774 u64 ram_size; 775 u64 compressed_size; 776 struct folio **folios; 777 unsigned long nr_folios; 778 int compress_type; 779 struct list_head list; 780 }; 781 782 struct async_chunk { 783 struct btrfs_inode *inode; 784 struct page *locked_page; 785 u64 start; 786 u64 end; 787 blk_opf_t write_flags; 788 struct list_head extents; 789 struct cgroup_subsys_state *blkcg_css; 790 struct btrfs_work work; 791 struct async_cow *async_cow; 792 }; 793 794 struct async_cow { 795 atomic_t num_chunks; 796 struct async_chunk chunks[]; 797 }; 798 799 static noinline int add_async_extent(struct async_chunk *cow, 800 u64 start, u64 ram_size, 801 u64 compressed_size, 802 struct folio **folios, 803 unsigned long nr_folios, 804 int compress_type) 805 { 806 struct async_extent *async_extent; 807 808 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 809 if (!async_extent) 810 return -ENOMEM; 811 async_extent->start = start; 812 async_extent->ram_size = ram_size; 813 async_extent->compressed_size = compressed_size; 814 async_extent->folios = folios; 815 async_extent->nr_folios = nr_folios; 816 async_extent->compress_type = compress_type; 817 list_add_tail(&async_extent->list, &cow->extents); 818 return 0; 819 } 820 821 /* 822 * Check if the inode needs to be submitted to compression, based on mount 823 * options, defragmentation, properties or heuristics. 824 */ 825 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 826 u64 end) 827 { 828 struct btrfs_fs_info *fs_info = inode->root->fs_info; 829 830 if (!btrfs_inode_can_compress(inode)) { 831 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 832 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 833 btrfs_ino(inode)); 834 return 0; 835 } 836 /* 837 * Special check for subpage. 838 * 839 * We lock the full page then run each delalloc range in the page, thus 840 * for the following case, we will hit some subpage specific corner case: 841 * 842 * 0 32K 64K 843 * | |///////| |///////| 844 * \- A \- B 845 * 846 * In above case, both range A and range B will try to unlock the full 847 * page [0, 64K), causing the one finished later will have page 848 * unlocked already, triggering various page lock requirement BUG_ON()s. 849 * 850 * So here we add an artificial limit that subpage compression can only 851 * if the range is fully page aligned. 852 * 853 * In theory we only need to ensure the first page is fully covered, but 854 * the tailing partial page will be locked until the full compression 855 * finishes, delaying the write of other range. 856 * 857 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range 858 * first to prevent any submitted async extent to unlock the full page. 859 * By this, we can ensure for subpage case that only the last async_cow 860 * will unlock the full page. 861 */ 862 if (fs_info->sectorsize < PAGE_SIZE) { 863 if (!PAGE_ALIGNED(start) || 864 !PAGE_ALIGNED(end + 1)) 865 return 0; 866 } 867 868 /* force compress */ 869 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 870 return 1; 871 /* defrag ioctl */ 872 if (inode->defrag_compress) 873 return 1; 874 /* bad compression ratios */ 875 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 876 return 0; 877 if (btrfs_test_opt(fs_info, COMPRESS) || 878 inode->flags & BTRFS_INODE_COMPRESS || 879 inode->prop_compress) 880 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); 881 return 0; 882 } 883 884 static inline void inode_should_defrag(struct btrfs_inode *inode, 885 u64 start, u64 end, u64 num_bytes, u32 small_write) 886 { 887 /* If this is a small write inside eof, kick off a defrag */ 888 if (num_bytes < small_write && 889 (start > 0 || end + 1 < inode->disk_i_size)) 890 btrfs_add_inode_defrag(NULL, inode, small_write); 891 } 892 893 /* 894 * Work queue call back to started compression on a file and pages. 895 * 896 * This is done inside an ordered work queue, and the compression is spread 897 * across many cpus. The actual IO submission is step two, and the ordered work 898 * queue takes care of making sure that happens in the same order things were 899 * put onto the queue by writepages and friends. 900 * 901 * If this code finds it can't get good compression, it puts an entry onto the 902 * work queue to write the uncompressed bytes. This makes sure that both 903 * compressed inodes and uncompressed inodes are written in the same order that 904 * the flusher thread sent them down. 905 */ 906 static void compress_file_range(struct btrfs_work *work) 907 { 908 struct async_chunk *async_chunk = 909 container_of(work, struct async_chunk, work); 910 struct btrfs_inode *inode = async_chunk->inode; 911 struct btrfs_fs_info *fs_info = inode->root->fs_info; 912 struct address_space *mapping = inode->vfs_inode.i_mapping; 913 u64 blocksize = fs_info->sectorsize; 914 u64 start = async_chunk->start; 915 u64 end = async_chunk->end; 916 u64 actual_end; 917 u64 i_size; 918 int ret = 0; 919 struct folio **folios; 920 unsigned long nr_folios; 921 unsigned long total_compressed = 0; 922 unsigned long total_in = 0; 923 unsigned int poff; 924 int i; 925 int compress_type = fs_info->compress_type; 926 927 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); 928 929 /* 930 * We need to call clear_page_dirty_for_io on each page in the range. 931 * Otherwise applications with the file mmap'd can wander in and change 932 * the page contents while we are compressing them. 933 */ 934 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); 935 936 /* 937 * We need to save i_size before now because it could change in between 938 * us evaluating the size and assigning it. This is because we lock and 939 * unlock the page in truncate and fallocate, and then modify the i_size 940 * later on. 941 * 942 * The barriers are to emulate READ_ONCE, remove that once i_size_read 943 * does that for us. 944 */ 945 barrier(); 946 i_size = i_size_read(&inode->vfs_inode); 947 barrier(); 948 actual_end = min_t(u64, i_size, end + 1); 949 again: 950 folios = NULL; 951 nr_folios = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 952 nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED_PAGES); 953 954 /* 955 * we don't want to send crud past the end of i_size through 956 * compression, that's just a waste of CPU time. So, if the 957 * end of the file is before the start of our current 958 * requested range of bytes, we bail out to the uncompressed 959 * cleanup code that can deal with all of this. 960 * 961 * It isn't really the fastest way to fix things, but this is a 962 * very uncommon corner. 963 */ 964 if (actual_end <= start) 965 goto cleanup_and_bail_uncompressed; 966 967 total_compressed = actual_end - start; 968 969 /* 970 * Skip compression for a small file range(<=blocksize) that 971 * isn't an inline extent, since it doesn't save disk space at all. 972 */ 973 if (total_compressed <= blocksize && 974 (start > 0 || end + 1 < inode->disk_i_size)) 975 goto cleanup_and_bail_uncompressed; 976 977 /* 978 * For subpage case, we require full page alignment for the sector 979 * aligned range. 980 * Thus we must also check against @actual_end, not just @end. 981 */ 982 if (blocksize < PAGE_SIZE) { 983 if (!PAGE_ALIGNED(start) || 984 !PAGE_ALIGNED(round_up(actual_end, blocksize))) 985 goto cleanup_and_bail_uncompressed; 986 } 987 988 total_compressed = min_t(unsigned long, total_compressed, 989 BTRFS_MAX_UNCOMPRESSED); 990 total_in = 0; 991 ret = 0; 992 993 /* 994 * We do compression for mount -o compress and when the inode has not 995 * been flagged as NOCOMPRESS. This flag can change at any time if we 996 * discover bad compression ratios. 997 */ 998 if (!inode_need_compress(inode, start, end)) 999 goto cleanup_and_bail_uncompressed; 1000 1001 folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS); 1002 if (!folios) { 1003 /* 1004 * Memory allocation failure is not a fatal error, we can fall 1005 * back to uncompressed code. 1006 */ 1007 goto cleanup_and_bail_uncompressed; 1008 } 1009 1010 if (inode->defrag_compress) 1011 compress_type = inode->defrag_compress; 1012 else if (inode->prop_compress) 1013 compress_type = inode->prop_compress; 1014 1015 /* Compression level is applied here. */ 1016 ret = btrfs_compress_folios(compress_type | (fs_info->compress_level << 4), 1017 mapping, start, folios, &nr_folios, &total_in, 1018 &total_compressed); 1019 if (ret) 1020 goto mark_incompressible; 1021 1022 /* 1023 * Zero the tail end of the last page, as we might be sending it down 1024 * to disk. 1025 */ 1026 poff = offset_in_page(total_compressed); 1027 if (poff) 1028 folio_zero_range(folios[nr_folios - 1], poff, PAGE_SIZE - poff); 1029 1030 /* 1031 * Try to create an inline extent. 1032 * 1033 * If we didn't compress the entire range, try to create an uncompressed 1034 * inline extent, else a compressed one. 1035 * 1036 * Check cow_file_range() for why we don't even try to create inline 1037 * extent for the subpage case. 1038 */ 1039 if (total_in < actual_end) 1040 ret = cow_file_range_inline(inode, start, end, 0, 1041 BTRFS_COMPRESS_NONE, NULL, false); 1042 else 1043 ret = cow_file_range_inline(inode, start, end, total_compressed, 1044 compress_type, folios[0], false); 1045 if (ret <= 0) { 1046 if (ret < 0) 1047 mapping_set_error(mapping, -EIO); 1048 goto free_pages; 1049 } 1050 1051 /* 1052 * We aren't doing an inline extent. Round the compressed size up to a 1053 * block size boundary so the allocator does sane things. 1054 */ 1055 total_compressed = ALIGN(total_compressed, blocksize); 1056 1057 /* 1058 * One last check to make sure the compression is really a win, compare 1059 * the page count read with the blocks on disk, compression must free at 1060 * least one sector. 1061 */ 1062 total_in = round_up(total_in, fs_info->sectorsize); 1063 if (total_compressed + blocksize > total_in) 1064 goto mark_incompressible; 1065 1066 /* 1067 * The async work queues will take care of doing actual allocation on 1068 * disk for these compressed pages, and will submit the bios. 1069 */ 1070 ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios, 1071 nr_folios, compress_type); 1072 BUG_ON(ret); 1073 if (start + total_in < end) { 1074 start += total_in; 1075 cond_resched(); 1076 goto again; 1077 } 1078 return; 1079 1080 mark_incompressible: 1081 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) 1082 inode->flags |= BTRFS_INODE_NOCOMPRESS; 1083 cleanup_and_bail_uncompressed: 1084 ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 1085 BTRFS_COMPRESS_NONE); 1086 BUG_ON(ret); 1087 free_pages: 1088 if (folios) { 1089 for (i = 0; i < nr_folios; i++) { 1090 WARN_ON(folios[i]->mapping); 1091 btrfs_free_compr_folio(folios[i]); 1092 } 1093 kfree(folios); 1094 } 1095 } 1096 1097 static void free_async_extent_pages(struct async_extent *async_extent) 1098 { 1099 int i; 1100 1101 if (!async_extent->folios) 1102 return; 1103 1104 for (i = 0; i < async_extent->nr_folios; i++) { 1105 WARN_ON(async_extent->folios[i]->mapping); 1106 btrfs_free_compr_folio(async_extent->folios[i]); 1107 } 1108 kfree(async_extent->folios); 1109 async_extent->nr_folios = 0; 1110 async_extent->folios = NULL; 1111 } 1112 1113 static void submit_uncompressed_range(struct btrfs_inode *inode, 1114 struct async_extent *async_extent, 1115 struct page *locked_page) 1116 { 1117 u64 start = async_extent->start; 1118 u64 end = async_extent->start + async_extent->ram_size - 1; 1119 int ret; 1120 struct writeback_control wbc = { 1121 .sync_mode = WB_SYNC_ALL, 1122 .range_start = start, 1123 .range_end = end, 1124 .no_cgroup_owner = 1, 1125 }; 1126 1127 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); 1128 ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false); 1129 wbc_detach_inode(&wbc); 1130 if (ret < 0) { 1131 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1); 1132 if (locked_page) { 1133 const u64 page_start = page_offset(locked_page); 1134 1135 set_page_writeback(locked_page); 1136 end_page_writeback(locked_page); 1137 btrfs_mark_ordered_io_finished(inode, locked_page, 1138 page_start, PAGE_SIZE, 1139 !ret); 1140 mapping_set_error(locked_page->mapping, ret); 1141 unlock_page(locked_page); 1142 } 1143 } 1144 } 1145 1146 static void submit_one_async_extent(struct async_chunk *async_chunk, 1147 struct async_extent *async_extent, 1148 u64 *alloc_hint) 1149 { 1150 struct btrfs_inode *inode = async_chunk->inode; 1151 struct extent_io_tree *io_tree = &inode->io_tree; 1152 struct btrfs_root *root = inode->root; 1153 struct btrfs_fs_info *fs_info = root->fs_info; 1154 struct btrfs_ordered_extent *ordered; 1155 struct btrfs_key ins; 1156 struct page *locked_page = NULL; 1157 struct extent_state *cached = NULL; 1158 struct extent_map *em; 1159 int ret = 0; 1160 u64 start = async_extent->start; 1161 u64 end = async_extent->start + async_extent->ram_size - 1; 1162 1163 if (async_chunk->blkcg_css) 1164 kthread_associate_blkcg(async_chunk->blkcg_css); 1165 1166 /* 1167 * If async_chunk->locked_page is in the async_extent range, we need to 1168 * handle it. 1169 */ 1170 if (async_chunk->locked_page) { 1171 u64 locked_page_start = page_offset(async_chunk->locked_page); 1172 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1; 1173 1174 if (!(start >= locked_page_end || end <= locked_page_start)) 1175 locked_page = async_chunk->locked_page; 1176 } 1177 1178 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) { 1179 submit_uncompressed_range(inode, async_extent, locked_page); 1180 goto done; 1181 } 1182 1183 ret = btrfs_reserve_extent(root, async_extent->ram_size, 1184 async_extent->compressed_size, 1185 async_extent->compressed_size, 1186 0, *alloc_hint, &ins, 1, 1); 1187 if (ret) { 1188 /* 1189 * We can't reserve contiguous space for the compressed size. 1190 * Unlikely, but it's possible that we could have enough 1191 * non-contiguous space for the uncompressed size instead. So 1192 * fall back to uncompressed. 1193 */ 1194 submit_uncompressed_range(inode, async_extent, locked_page); 1195 goto done; 1196 } 1197 1198 lock_extent(io_tree, start, end, &cached); 1199 1200 /* Here we're doing allocation and writeback of the compressed pages */ 1201 em = create_io_em(inode, start, 1202 async_extent->ram_size, /* len */ 1203 start, /* orig_start */ 1204 ins.objectid, /* block_start */ 1205 ins.offset, /* block_len */ 1206 ins.offset, /* orig_block_len */ 1207 async_extent->ram_size, /* ram_bytes */ 1208 async_extent->compress_type, 1209 BTRFS_ORDERED_COMPRESSED); 1210 if (IS_ERR(em)) { 1211 ret = PTR_ERR(em); 1212 goto out_free_reserve; 1213 } 1214 free_extent_map(em); 1215 1216 ordered = btrfs_alloc_ordered_extent(inode, start, /* file_offset */ 1217 async_extent->ram_size, /* num_bytes */ 1218 async_extent->ram_size, /* ram_bytes */ 1219 ins.objectid, /* disk_bytenr */ 1220 ins.offset, /* disk_num_bytes */ 1221 0, /* offset */ 1222 1 << BTRFS_ORDERED_COMPRESSED, 1223 async_extent->compress_type); 1224 if (IS_ERR(ordered)) { 1225 btrfs_drop_extent_map_range(inode, start, end, false); 1226 ret = PTR_ERR(ordered); 1227 goto out_free_reserve; 1228 } 1229 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1230 1231 /* Clear dirty, set writeback and unlock the pages. */ 1232 extent_clear_unlock_delalloc(inode, start, end, 1233 NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC, 1234 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1235 btrfs_submit_compressed_write(ordered, 1236 async_extent->folios, /* compressed_folios */ 1237 async_extent->nr_folios, 1238 async_chunk->write_flags, true); 1239 *alloc_hint = ins.objectid + ins.offset; 1240 done: 1241 if (async_chunk->blkcg_css) 1242 kthread_associate_blkcg(NULL); 1243 kfree(async_extent); 1244 return; 1245 1246 out_free_reserve: 1247 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1248 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1249 mapping_set_error(inode->vfs_inode.i_mapping, -EIO); 1250 extent_clear_unlock_delalloc(inode, start, end, 1251 NULL, &cached, 1252 EXTENT_LOCKED | EXTENT_DELALLOC | 1253 EXTENT_DELALLOC_NEW | 1254 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1255 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1256 PAGE_END_WRITEBACK); 1257 free_async_extent_pages(async_extent); 1258 if (async_chunk->blkcg_css) 1259 kthread_associate_blkcg(NULL); 1260 btrfs_debug(fs_info, 1261 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1262 btrfs_root_id(root), btrfs_ino(inode), start, 1263 async_extent->ram_size, ret); 1264 kfree(async_extent); 1265 } 1266 1267 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1268 u64 num_bytes) 1269 { 1270 struct extent_map_tree *em_tree = &inode->extent_tree; 1271 struct extent_map *em; 1272 u64 alloc_hint = 0; 1273 1274 read_lock(&em_tree->lock); 1275 em = search_extent_mapping(em_tree, start, num_bytes); 1276 if (em) { 1277 /* 1278 * if block start isn't an actual block number then find the 1279 * first block in this inode and use that as a hint. If that 1280 * block is also bogus then just don't worry about it. 1281 */ 1282 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 1283 free_extent_map(em); 1284 em = search_extent_mapping(em_tree, 0, 0); 1285 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 1286 alloc_hint = em->block_start; 1287 if (em) 1288 free_extent_map(em); 1289 } else { 1290 alloc_hint = em->block_start; 1291 free_extent_map(em); 1292 } 1293 } 1294 read_unlock(&em_tree->lock); 1295 1296 return alloc_hint; 1297 } 1298 1299 /* 1300 * when extent_io.c finds a delayed allocation range in the file, 1301 * the call backs end up in this code. The basic idea is to 1302 * allocate extents on disk for the range, and create ordered data structs 1303 * in ram to track those extents. 1304 * 1305 * locked_page is the page that writepage had locked already. We use 1306 * it to make sure we don't do extra locks or unlocks. 1307 * 1308 * When this function fails, it unlocks all pages except @locked_page. 1309 * 1310 * When this function successfully creates an inline extent, it returns 1 and 1311 * unlocks all pages including locked_page and starts I/O on them. 1312 * (In reality inline extents are limited to a single page, so locked_page is 1313 * the only page handled anyway). 1314 * 1315 * When this function succeed and creates a normal extent, the page locking 1316 * status depends on the passed in flags: 1317 * 1318 * - If @keep_locked is set, all pages are kept locked. 1319 * - Else all pages except for @locked_page are unlocked. 1320 * 1321 * When a failure happens in the second or later iteration of the 1322 * while-loop, the ordered extents created in previous iterations are kept 1323 * intact. So, the caller must clean them up by calling 1324 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for 1325 * example. 1326 */ 1327 static noinline int cow_file_range(struct btrfs_inode *inode, 1328 struct page *locked_page, u64 start, u64 end, 1329 u64 *done_offset, 1330 bool keep_locked, bool no_inline) 1331 { 1332 struct btrfs_root *root = inode->root; 1333 struct btrfs_fs_info *fs_info = root->fs_info; 1334 struct extent_state *cached = NULL; 1335 u64 alloc_hint = 0; 1336 u64 orig_start = start; 1337 u64 num_bytes; 1338 unsigned long ram_size; 1339 u64 cur_alloc_size = 0; 1340 u64 min_alloc_size; 1341 u64 blocksize = fs_info->sectorsize; 1342 struct btrfs_key ins; 1343 struct extent_map *em; 1344 unsigned clear_bits; 1345 unsigned long page_ops; 1346 bool extent_reserved = false; 1347 int ret = 0; 1348 1349 if (btrfs_is_free_space_inode(inode)) { 1350 ret = -EINVAL; 1351 goto out_unlock; 1352 } 1353 1354 num_bytes = ALIGN(end - start + 1, blocksize); 1355 num_bytes = max(blocksize, num_bytes); 1356 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1357 1358 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1359 1360 if (!no_inline) { 1361 /* lets try to make an inline extent */ 1362 ret = cow_file_range_inline(inode, start, end, 0, 1363 BTRFS_COMPRESS_NONE, NULL, false); 1364 if (ret <= 0) { 1365 /* 1366 * We succeeded, return 1 so the caller knows we're done 1367 * with this page and already handled the IO. 1368 * 1369 * If there was an error then cow_file_range_inline() has 1370 * already done the cleanup. 1371 */ 1372 if (ret == 0) 1373 ret = 1; 1374 goto done; 1375 } 1376 } 1377 1378 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 1379 1380 /* 1381 * Relocation relies on the relocated extents to have exactly the same 1382 * size as the original extents. Normally writeback for relocation data 1383 * extents follows a NOCOW path because relocation preallocates the 1384 * extents. However, due to an operation such as scrub turning a block 1385 * group to RO mode, it may fallback to COW mode, so we must make sure 1386 * an extent allocated during COW has exactly the requested size and can 1387 * not be split into smaller extents, otherwise relocation breaks and 1388 * fails during the stage where it updates the bytenr of file extent 1389 * items. 1390 */ 1391 if (btrfs_is_data_reloc_root(root)) 1392 min_alloc_size = num_bytes; 1393 else 1394 min_alloc_size = fs_info->sectorsize; 1395 1396 while (num_bytes > 0) { 1397 struct btrfs_ordered_extent *ordered; 1398 1399 cur_alloc_size = num_bytes; 1400 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, 1401 min_alloc_size, 0, alloc_hint, 1402 &ins, 1, 1); 1403 if (ret == -EAGAIN) { 1404 /* 1405 * btrfs_reserve_extent only returns -EAGAIN for zoned 1406 * file systems, which is an indication that there are 1407 * no active zones to allocate from at the moment. 1408 * 1409 * If this is the first loop iteration, wait for at 1410 * least one zone to finish before retrying the 1411 * allocation. Otherwise ask the caller to write out 1412 * the already allocated blocks before coming back to 1413 * us, or return -ENOSPC if it can't handle retries. 1414 */ 1415 ASSERT(btrfs_is_zoned(fs_info)); 1416 if (start == orig_start) { 1417 wait_on_bit_io(&inode->root->fs_info->flags, 1418 BTRFS_FS_NEED_ZONE_FINISH, 1419 TASK_UNINTERRUPTIBLE); 1420 continue; 1421 } 1422 if (done_offset) { 1423 *done_offset = start - 1; 1424 return 0; 1425 } 1426 ret = -ENOSPC; 1427 } 1428 if (ret < 0) 1429 goto out_unlock; 1430 cur_alloc_size = ins.offset; 1431 extent_reserved = true; 1432 1433 ram_size = ins.offset; 1434 1435 lock_extent(&inode->io_tree, start, start + ram_size - 1, 1436 &cached); 1437 1438 em = create_io_em(inode, start, ins.offset, /* len */ 1439 start, /* orig_start */ 1440 ins.objectid, /* block_start */ 1441 ins.offset, /* block_len */ 1442 ins.offset, /* orig_block_len */ 1443 ram_size, /* ram_bytes */ 1444 BTRFS_COMPRESS_NONE, /* compress_type */ 1445 BTRFS_ORDERED_REGULAR /* type */); 1446 if (IS_ERR(em)) { 1447 unlock_extent(&inode->io_tree, start, 1448 start + ram_size - 1, &cached); 1449 ret = PTR_ERR(em); 1450 goto out_reserve; 1451 } 1452 free_extent_map(em); 1453 1454 ordered = btrfs_alloc_ordered_extent(inode, start, ram_size, 1455 ram_size, ins.objectid, cur_alloc_size, 1456 0, 1 << BTRFS_ORDERED_REGULAR, 1457 BTRFS_COMPRESS_NONE); 1458 if (IS_ERR(ordered)) { 1459 unlock_extent(&inode->io_tree, start, 1460 start + ram_size - 1, &cached); 1461 ret = PTR_ERR(ordered); 1462 goto out_drop_extent_cache; 1463 } 1464 1465 if (btrfs_is_data_reloc_root(root)) { 1466 ret = btrfs_reloc_clone_csums(ordered); 1467 1468 /* 1469 * Only drop cache here, and process as normal. 1470 * 1471 * We must not allow extent_clear_unlock_delalloc() 1472 * at out_unlock label to free meta of this ordered 1473 * extent, as its meta should be freed by 1474 * btrfs_finish_ordered_io(). 1475 * 1476 * So we must continue until @start is increased to 1477 * skip current ordered extent. 1478 */ 1479 if (ret) 1480 btrfs_drop_extent_map_range(inode, start, 1481 start + ram_size - 1, 1482 false); 1483 } 1484 btrfs_put_ordered_extent(ordered); 1485 1486 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1487 1488 /* 1489 * We're not doing compressed IO, don't unlock the first page 1490 * (which the caller expects to stay locked), don't clear any 1491 * dirty bits and don't set any writeback bits 1492 * 1493 * Do set the Ordered (Private2) bit so we know this page was 1494 * properly setup for writepage. 1495 */ 1496 page_ops = (keep_locked ? 0 : PAGE_UNLOCK); 1497 page_ops |= PAGE_SET_ORDERED; 1498 1499 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, 1500 locked_page, &cached, 1501 EXTENT_LOCKED | EXTENT_DELALLOC, 1502 page_ops); 1503 if (num_bytes < cur_alloc_size) 1504 num_bytes = 0; 1505 else 1506 num_bytes -= cur_alloc_size; 1507 alloc_hint = ins.objectid + ins.offset; 1508 start += cur_alloc_size; 1509 extent_reserved = false; 1510 1511 /* 1512 * btrfs_reloc_clone_csums() error, since start is increased 1513 * extent_clear_unlock_delalloc() at out_unlock label won't 1514 * free metadata of current ordered extent, we're OK to exit. 1515 */ 1516 if (ret) 1517 goto out_unlock; 1518 } 1519 done: 1520 if (done_offset) 1521 *done_offset = end; 1522 return ret; 1523 1524 out_drop_extent_cache: 1525 btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false); 1526 out_reserve: 1527 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1528 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1529 out_unlock: 1530 /* 1531 * Now, we have three regions to clean up: 1532 * 1533 * |-------(1)----|---(2)---|-------------(3)----------| 1534 * `- orig_start `- start `- start + cur_alloc_size `- end 1535 * 1536 * We process each region below. 1537 */ 1538 1539 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1540 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1541 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1542 1543 /* 1544 * For the range (1). We have already instantiated the ordered extents 1545 * for this region. They are cleaned up by 1546 * btrfs_cleanup_ordered_extents() in e.g, 1547 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are 1548 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW | 1549 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup 1550 * function. 1551 * 1552 * However, in case of @keep_locked, we still need to unlock the pages 1553 * (except @locked_page) to ensure all the pages are unlocked. 1554 */ 1555 if (keep_locked && orig_start < start) { 1556 if (!locked_page) 1557 mapping_set_error(inode->vfs_inode.i_mapping, ret); 1558 extent_clear_unlock_delalloc(inode, orig_start, start - 1, 1559 locked_page, NULL, 0, page_ops); 1560 } 1561 1562 /* 1563 * At this point we're unlocked, we want to make sure we're only 1564 * clearing these flags under the extent lock, so lock the rest of the 1565 * range and clear everything up. 1566 */ 1567 lock_extent(&inode->io_tree, start, end, NULL); 1568 1569 /* 1570 * For the range (2). If we reserved an extent for our delalloc range 1571 * (or a subrange) and failed to create the respective ordered extent, 1572 * then it means that when we reserved the extent we decremented the 1573 * extent's size from the data space_info's bytes_may_use counter and 1574 * incremented the space_info's bytes_reserved counter by the same 1575 * amount. We must make sure extent_clear_unlock_delalloc() does not try 1576 * to decrement again the data space_info's bytes_may_use counter, 1577 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV. 1578 */ 1579 if (extent_reserved) { 1580 extent_clear_unlock_delalloc(inode, start, 1581 start + cur_alloc_size - 1, 1582 locked_page, &cached, 1583 clear_bits, 1584 page_ops); 1585 start += cur_alloc_size; 1586 } 1587 1588 /* 1589 * For the range (3). We never touched the region. In addition to the 1590 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data 1591 * space_info's bytes_may_use counter, reserved in 1592 * btrfs_check_data_free_space(). 1593 */ 1594 if (start < end) { 1595 clear_bits |= EXTENT_CLEAR_DATA_RESV; 1596 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1597 &cached, clear_bits, page_ops); 1598 } 1599 return ret; 1600 } 1601 1602 /* 1603 * Phase two of compressed writeback. This is the ordered portion of the code, 1604 * which only gets called in the order the work was queued. We walk all the 1605 * async extents created by compress_file_range and send them down to the disk. 1606 * 1607 * If called with @do_free == true then it'll try to finish the work and free 1608 * the work struct eventually. 1609 */ 1610 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free) 1611 { 1612 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1613 work); 1614 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1615 struct async_extent *async_extent; 1616 unsigned long nr_pages; 1617 u64 alloc_hint = 0; 1618 1619 if (do_free) { 1620 struct async_chunk *async_chunk; 1621 struct async_cow *async_cow; 1622 1623 async_chunk = container_of(work, struct async_chunk, work); 1624 btrfs_add_delayed_iput(async_chunk->inode); 1625 if (async_chunk->blkcg_css) 1626 css_put(async_chunk->blkcg_css); 1627 1628 async_cow = async_chunk->async_cow; 1629 if (atomic_dec_and_test(&async_cow->num_chunks)) 1630 kvfree(async_cow); 1631 return; 1632 } 1633 1634 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1635 PAGE_SHIFT; 1636 1637 while (!list_empty(&async_chunk->extents)) { 1638 async_extent = list_entry(async_chunk->extents.next, 1639 struct async_extent, list); 1640 list_del(&async_extent->list); 1641 submit_one_async_extent(async_chunk, async_extent, &alloc_hint); 1642 } 1643 1644 /* atomic_sub_return implies a barrier */ 1645 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1646 5 * SZ_1M) 1647 cond_wake_up_nomb(&fs_info->async_submit_wait); 1648 } 1649 1650 static bool run_delalloc_compressed(struct btrfs_inode *inode, 1651 struct page *locked_page, u64 start, 1652 u64 end, struct writeback_control *wbc) 1653 { 1654 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1655 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1656 struct async_cow *ctx; 1657 struct async_chunk *async_chunk; 1658 unsigned long nr_pages; 1659 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1660 int i; 1661 unsigned nofs_flag; 1662 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1663 1664 nofs_flag = memalloc_nofs_save(); 1665 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1666 memalloc_nofs_restore(nofs_flag); 1667 if (!ctx) 1668 return false; 1669 1670 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 1671 1672 async_chunk = ctx->chunks; 1673 atomic_set(&ctx->num_chunks, num_chunks); 1674 1675 for (i = 0; i < num_chunks; i++) { 1676 u64 cur_end = min(end, start + SZ_512K - 1); 1677 1678 /* 1679 * igrab is called higher up in the call chain, take only the 1680 * lightweight reference for the callback lifetime 1681 */ 1682 ihold(&inode->vfs_inode); 1683 async_chunk[i].async_cow = ctx; 1684 async_chunk[i].inode = inode; 1685 async_chunk[i].start = start; 1686 async_chunk[i].end = cur_end; 1687 async_chunk[i].write_flags = write_flags; 1688 INIT_LIST_HEAD(&async_chunk[i].extents); 1689 1690 /* 1691 * The locked_page comes all the way from writepage and its 1692 * the original page we were actually given. As we spread 1693 * this large delalloc region across multiple async_chunk 1694 * structs, only the first struct needs a pointer to locked_page 1695 * 1696 * This way we don't need racey decisions about who is supposed 1697 * to unlock it. 1698 */ 1699 if (locked_page) { 1700 /* 1701 * Depending on the compressibility, the pages might or 1702 * might not go through async. We want all of them to 1703 * be accounted against wbc once. Let's do it here 1704 * before the paths diverge. wbc accounting is used 1705 * only for foreign writeback detection and doesn't 1706 * need full accuracy. Just account the whole thing 1707 * against the first page. 1708 */ 1709 wbc_account_cgroup_owner(wbc, locked_page, 1710 cur_end - start); 1711 async_chunk[i].locked_page = locked_page; 1712 locked_page = NULL; 1713 } else { 1714 async_chunk[i].locked_page = NULL; 1715 } 1716 1717 if (blkcg_css != blkcg_root_css) { 1718 css_get(blkcg_css); 1719 async_chunk[i].blkcg_css = blkcg_css; 1720 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT; 1721 } else { 1722 async_chunk[i].blkcg_css = NULL; 1723 } 1724 1725 btrfs_init_work(&async_chunk[i].work, compress_file_range, 1726 submit_compressed_extents); 1727 1728 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1729 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1730 1731 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1732 1733 start = cur_end + 1; 1734 } 1735 return true; 1736 } 1737 1738 /* 1739 * Run the delalloc range from start to end, and write back any dirty pages 1740 * covered by the range. 1741 */ 1742 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 1743 struct page *locked_page, u64 start, 1744 u64 end, struct writeback_control *wbc, 1745 bool pages_dirty) 1746 { 1747 u64 done_offset = end; 1748 int ret; 1749 1750 while (start <= end) { 1751 ret = cow_file_range(inode, locked_page, start, end, &done_offset, 1752 true, false); 1753 if (ret) 1754 return ret; 1755 extent_write_locked_range(&inode->vfs_inode, locked_page, start, 1756 done_offset, wbc, pages_dirty); 1757 start = done_offset + 1; 1758 } 1759 1760 return 1; 1761 } 1762 1763 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, 1764 const u64 start, const u64 end) 1765 { 1766 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1767 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1768 const u64 range_bytes = end + 1 - start; 1769 struct extent_io_tree *io_tree = &inode->io_tree; 1770 struct extent_state *cached_state = NULL; 1771 u64 range_start = start; 1772 u64 count; 1773 int ret; 1774 1775 /* 1776 * If EXTENT_NORESERVE is set it means that when the buffered write was 1777 * made we had not enough available data space and therefore we did not 1778 * reserve data space for it, since we though we could do NOCOW for the 1779 * respective file range (either there is prealloc extent or the inode 1780 * has the NOCOW bit set). 1781 * 1782 * However when we need to fallback to COW mode (because for example the 1783 * block group for the corresponding extent was turned to RO mode by a 1784 * scrub or relocation) we need to do the following: 1785 * 1786 * 1) We increment the bytes_may_use counter of the data space info. 1787 * If COW succeeds, it allocates a new data extent and after doing 1788 * that it decrements the space info's bytes_may_use counter and 1789 * increments its bytes_reserved counter by the same amount (we do 1790 * this at btrfs_add_reserved_bytes()). So we need to increment the 1791 * bytes_may_use counter to compensate (when space is reserved at 1792 * buffered write time, the bytes_may_use counter is incremented); 1793 * 1794 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1795 * that if the COW path fails for any reason, it decrements (through 1796 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1797 * data space info, which we incremented in the step above. 1798 * 1799 * If we need to fallback to cow and the inode corresponds to a free 1800 * space cache inode or an inode of the data relocation tree, we must 1801 * also increment bytes_may_use of the data space_info for the same 1802 * reason. Space caches and relocated data extents always get a prealloc 1803 * extent for them, however scrub or balance may have set the block 1804 * group that contains that extent to RO mode and therefore force COW 1805 * when starting writeback. 1806 */ 1807 lock_extent(io_tree, start, end, &cached_state); 1808 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1809 EXTENT_NORESERVE, 0, NULL); 1810 if (count > 0 || is_space_ino || is_reloc_ino) { 1811 u64 bytes = count; 1812 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1813 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1814 1815 if (is_space_ino || is_reloc_ino) 1816 bytes = range_bytes; 1817 1818 spin_lock(&sinfo->lock); 1819 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1820 spin_unlock(&sinfo->lock); 1821 1822 if (count > 0) 1823 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1824 NULL); 1825 } 1826 unlock_extent(io_tree, start, end, &cached_state); 1827 1828 /* 1829 * Don't try to create inline extents, as a mix of inline extent that 1830 * is written out and unlocked directly and a normal NOCOW extent 1831 * doesn't work. 1832 */ 1833 ret = cow_file_range(inode, locked_page, start, end, NULL, false, true); 1834 ASSERT(ret != 1); 1835 return ret; 1836 } 1837 1838 struct can_nocow_file_extent_args { 1839 /* Input fields. */ 1840 1841 /* Start file offset of the range we want to NOCOW. */ 1842 u64 start; 1843 /* End file offset (inclusive) of the range we want to NOCOW. */ 1844 u64 end; 1845 bool writeback_path; 1846 bool strict; 1847 /* 1848 * Free the path passed to can_nocow_file_extent() once it's not needed 1849 * anymore. 1850 */ 1851 bool free_path; 1852 1853 /* Output fields. Only set when can_nocow_file_extent() returns 1. */ 1854 1855 u64 disk_bytenr; 1856 u64 disk_num_bytes; 1857 u64 extent_offset; 1858 /* Number of bytes that can be written to in NOCOW mode. */ 1859 u64 num_bytes; 1860 }; 1861 1862 /* 1863 * Check if we can NOCOW the file extent that the path points to. 1864 * This function may return with the path released, so the caller should check 1865 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1866 * 1867 * Returns: < 0 on error 1868 * 0 if we can not NOCOW 1869 * 1 if we can NOCOW 1870 */ 1871 static int can_nocow_file_extent(struct btrfs_path *path, 1872 struct btrfs_key *key, 1873 struct btrfs_inode *inode, 1874 struct can_nocow_file_extent_args *args) 1875 { 1876 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 1877 struct extent_buffer *leaf = path->nodes[0]; 1878 struct btrfs_root *root = inode->root; 1879 struct btrfs_file_extent_item *fi; 1880 struct btrfs_root *csum_root; 1881 u64 extent_end; 1882 u8 extent_type; 1883 int can_nocow = 0; 1884 int ret = 0; 1885 bool nowait = path->nowait; 1886 1887 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 1888 extent_type = btrfs_file_extent_type(leaf, fi); 1889 1890 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1891 goto out; 1892 1893 /* Can't access these fields unless we know it's not an inline extent. */ 1894 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1895 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1896 args->extent_offset = btrfs_file_extent_offset(leaf, fi); 1897 1898 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1899 extent_type == BTRFS_FILE_EXTENT_REG) 1900 goto out; 1901 1902 /* 1903 * If the extent was created before the generation where the last snapshot 1904 * for its subvolume was created, then this implies the extent is shared, 1905 * hence we must COW. 1906 */ 1907 if (!args->strict && 1908 btrfs_file_extent_generation(leaf, fi) <= 1909 btrfs_root_last_snapshot(&root->root_item)) 1910 goto out; 1911 1912 /* An explicit hole, must COW. */ 1913 if (args->disk_bytenr == 0) 1914 goto out; 1915 1916 /* Compressed/encrypted/encoded extents must be COWed. */ 1917 if (btrfs_file_extent_compression(leaf, fi) || 1918 btrfs_file_extent_encryption(leaf, fi) || 1919 btrfs_file_extent_other_encoding(leaf, fi)) 1920 goto out; 1921 1922 extent_end = btrfs_file_extent_end(path); 1923 1924 /* 1925 * The following checks can be expensive, as they need to take other 1926 * locks and do btree or rbtree searches, so release the path to avoid 1927 * blocking other tasks for too long. 1928 */ 1929 btrfs_release_path(path); 1930 1931 ret = btrfs_cross_ref_exist(root, btrfs_ino(inode), 1932 key->offset - args->extent_offset, 1933 args->disk_bytenr, args->strict, path); 1934 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1935 if (ret != 0) 1936 goto out; 1937 1938 if (args->free_path) { 1939 /* 1940 * We don't need the path anymore, plus through the 1941 * btrfs_lookup_csums_list() call below we will end up allocating 1942 * another path. So free the path to avoid unnecessary extra 1943 * memory usage. 1944 */ 1945 btrfs_free_path(path); 1946 path = NULL; 1947 } 1948 1949 /* If there are pending snapshots for this root, we must COW. */ 1950 if (args->writeback_path && !is_freespace_inode && 1951 atomic_read(&root->snapshot_force_cow)) 1952 goto out; 1953 1954 args->disk_bytenr += args->extent_offset; 1955 args->disk_bytenr += args->start - key->offset; 1956 args->num_bytes = min(args->end + 1, extent_end) - args->start; 1957 1958 /* 1959 * Force COW if csums exist in the range. This ensures that csums for a 1960 * given extent are either valid or do not exist. 1961 */ 1962 1963 csum_root = btrfs_csum_root(root->fs_info, args->disk_bytenr); 1964 ret = btrfs_lookup_csums_list(csum_root, args->disk_bytenr, 1965 args->disk_bytenr + args->num_bytes - 1, 1966 NULL, nowait); 1967 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1968 if (ret != 0) 1969 goto out; 1970 1971 can_nocow = 1; 1972 out: 1973 if (args->free_path && path) 1974 btrfs_free_path(path); 1975 1976 return ret < 0 ? ret : can_nocow; 1977 } 1978 1979 /* 1980 * when nowcow writeback call back. This checks for snapshots or COW copies 1981 * of the extents that exist in the file, and COWs the file as required. 1982 * 1983 * If no cow copies or snapshots exist, we write directly to the existing 1984 * blocks on disk 1985 */ 1986 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 1987 struct page *locked_page, 1988 const u64 start, const u64 end) 1989 { 1990 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1991 struct btrfs_root *root = inode->root; 1992 struct btrfs_path *path; 1993 u64 cow_start = (u64)-1; 1994 u64 cur_offset = start; 1995 int ret; 1996 bool check_prev = true; 1997 u64 ino = btrfs_ino(inode); 1998 struct can_nocow_file_extent_args nocow_args = { 0 }; 1999 2000 /* 2001 * Normally on a zoned device we're only doing COW writes, but in case 2002 * of relocation on a zoned filesystem serializes I/O so that we're only 2003 * writing sequentially and can end up here as well. 2004 */ 2005 ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root)); 2006 2007 path = btrfs_alloc_path(); 2008 if (!path) { 2009 ret = -ENOMEM; 2010 goto error; 2011 } 2012 2013 nocow_args.end = end; 2014 nocow_args.writeback_path = true; 2015 2016 while (cur_offset <= end) { 2017 struct btrfs_block_group *nocow_bg = NULL; 2018 struct btrfs_ordered_extent *ordered; 2019 struct btrfs_key found_key; 2020 struct btrfs_file_extent_item *fi; 2021 struct extent_buffer *leaf; 2022 struct extent_state *cached_state = NULL; 2023 u64 extent_end; 2024 u64 ram_bytes; 2025 u64 nocow_end; 2026 int extent_type; 2027 bool is_prealloc; 2028 2029 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 2030 cur_offset, 0); 2031 if (ret < 0) 2032 goto error; 2033 2034 /* 2035 * If there is no extent for our range when doing the initial 2036 * search, then go back to the previous slot as it will be the 2037 * one containing the search offset 2038 */ 2039 if (ret > 0 && path->slots[0] > 0 && check_prev) { 2040 leaf = path->nodes[0]; 2041 btrfs_item_key_to_cpu(leaf, &found_key, 2042 path->slots[0] - 1); 2043 if (found_key.objectid == ino && 2044 found_key.type == BTRFS_EXTENT_DATA_KEY) 2045 path->slots[0]--; 2046 } 2047 check_prev = false; 2048 next_slot: 2049 /* Go to next leaf if we have exhausted the current one */ 2050 leaf = path->nodes[0]; 2051 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2052 ret = btrfs_next_leaf(root, path); 2053 if (ret < 0) 2054 goto error; 2055 if (ret > 0) 2056 break; 2057 leaf = path->nodes[0]; 2058 } 2059 2060 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2061 2062 /* Didn't find anything for our INO */ 2063 if (found_key.objectid > ino) 2064 break; 2065 /* 2066 * Keep searching until we find an EXTENT_ITEM or there are no 2067 * more extents for this inode 2068 */ 2069 if (WARN_ON_ONCE(found_key.objectid < ino) || 2070 found_key.type < BTRFS_EXTENT_DATA_KEY) { 2071 path->slots[0]++; 2072 goto next_slot; 2073 } 2074 2075 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 2076 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 2077 found_key.offset > end) 2078 break; 2079 2080 /* 2081 * If the found extent starts after requested offset, then 2082 * adjust extent_end to be right before this extent begins 2083 */ 2084 if (found_key.offset > cur_offset) { 2085 extent_end = found_key.offset; 2086 extent_type = 0; 2087 goto must_cow; 2088 } 2089 2090 /* 2091 * Found extent which begins before our range and potentially 2092 * intersect it 2093 */ 2094 fi = btrfs_item_ptr(leaf, path->slots[0], 2095 struct btrfs_file_extent_item); 2096 extent_type = btrfs_file_extent_type(leaf, fi); 2097 /* If this is triggered then we have a memory corruption. */ 2098 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 2099 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 2100 ret = -EUCLEAN; 2101 goto error; 2102 } 2103 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 2104 extent_end = btrfs_file_extent_end(path); 2105 2106 /* 2107 * If the extent we got ends before our current offset, skip to 2108 * the next extent. 2109 */ 2110 if (extent_end <= cur_offset) { 2111 path->slots[0]++; 2112 goto next_slot; 2113 } 2114 2115 nocow_args.start = cur_offset; 2116 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 2117 if (ret < 0) 2118 goto error; 2119 if (ret == 0) 2120 goto must_cow; 2121 2122 ret = 0; 2123 nocow_bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr); 2124 if (!nocow_bg) { 2125 must_cow: 2126 /* 2127 * If we can't perform NOCOW writeback for the range, 2128 * then record the beginning of the range that needs to 2129 * be COWed. It will be written out before the next 2130 * NOCOW range if we find one, or when exiting this 2131 * loop. 2132 */ 2133 if (cow_start == (u64)-1) 2134 cow_start = cur_offset; 2135 cur_offset = extent_end; 2136 if (cur_offset > end) 2137 break; 2138 if (!path->nodes[0]) 2139 continue; 2140 path->slots[0]++; 2141 goto next_slot; 2142 } 2143 2144 /* 2145 * COW range from cow_start to found_key.offset - 1. As the key 2146 * will contain the beginning of the first extent that can be 2147 * NOCOW, following one which needs to be COW'ed 2148 */ 2149 if (cow_start != (u64)-1) { 2150 ret = fallback_to_cow(inode, locked_page, 2151 cow_start, found_key.offset - 1); 2152 cow_start = (u64)-1; 2153 if (ret) { 2154 btrfs_dec_nocow_writers(nocow_bg); 2155 goto error; 2156 } 2157 } 2158 2159 nocow_end = cur_offset + nocow_args.num_bytes - 1; 2160 lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state); 2161 2162 is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC; 2163 if (is_prealloc) { 2164 u64 orig_start = found_key.offset - nocow_args.extent_offset; 2165 struct extent_map *em; 2166 2167 em = create_io_em(inode, cur_offset, nocow_args.num_bytes, 2168 orig_start, 2169 nocow_args.disk_bytenr, /* block_start */ 2170 nocow_args.num_bytes, /* block_len */ 2171 nocow_args.disk_num_bytes, /* orig_block_len */ 2172 ram_bytes, BTRFS_COMPRESS_NONE, 2173 BTRFS_ORDERED_PREALLOC); 2174 if (IS_ERR(em)) { 2175 unlock_extent(&inode->io_tree, cur_offset, 2176 nocow_end, &cached_state); 2177 btrfs_dec_nocow_writers(nocow_bg); 2178 ret = PTR_ERR(em); 2179 goto error; 2180 } 2181 free_extent_map(em); 2182 } 2183 2184 ordered = btrfs_alloc_ordered_extent(inode, cur_offset, 2185 nocow_args.num_bytes, nocow_args.num_bytes, 2186 nocow_args.disk_bytenr, nocow_args.num_bytes, 0, 2187 is_prealloc 2188 ? (1 << BTRFS_ORDERED_PREALLOC) 2189 : (1 << BTRFS_ORDERED_NOCOW), 2190 BTRFS_COMPRESS_NONE); 2191 btrfs_dec_nocow_writers(nocow_bg); 2192 if (IS_ERR(ordered)) { 2193 if (is_prealloc) { 2194 btrfs_drop_extent_map_range(inode, cur_offset, 2195 nocow_end, false); 2196 } 2197 unlock_extent(&inode->io_tree, cur_offset, 2198 nocow_end, &cached_state); 2199 ret = PTR_ERR(ordered); 2200 goto error; 2201 } 2202 2203 if (btrfs_is_data_reloc_root(root)) 2204 /* 2205 * Error handled later, as we must prevent 2206 * extent_clear_unlock_delalloc() in error handler 2207 * from freeing metadata of created ordered extent. 2208 */ 2209 ret = btrfs_reloc_clone_csums(ordered); 2210 btrfs_put_ordered_extent(ordered); 2211 2212 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, 2213 locked_page, &cached_state, 2214 EXTENT_LOCKED | EXTENT_DELALLOC | 2215 EXTENT_CLEAR_DATA_RESV, 2216 PAGE_UNLOCK | PAGE_SET_ORDERED); 2217 2218 cur_offset = extent_end; 2219 2220 /* 2221 * btrfs_reloc_clone_csums() error, now we're OK to call error 2222 * handler, as metadata for created ordered extent will only 2223 * be freed by btrfs_finish_ordered_io(). 2224 */ 2225 if (ret) 2226 goto error; 2227 } 2228 btrfs_release_path(path); 2229 2230 if (cur_offset <= end && cow_start == (u64)-1) 2231 cow_start = cur_offset; 2232 2233 if (cow_start != (u64)-1) { 2234 cur_offset = end; 2235 ret = fallback_to_cow(inode, locked_page, cow_start, end); 2236 cow_start = (u64)-1; 2237 if (ret) 2238 goto error; 2239 } 2240 2241 btrfs_free_path(path); 2242 return 0; 2243 2244 error: 2245 /* 2246 * If an error happened while a COW region is outstanding, cur_offset 2247 * needs to be reset to cow_start to ensure the COW region is unlocked 2248 * as well. 2249 */ 2250 if (cow_start != (u64)-1) 2251 cur_offset = cow_start; 2252 2253 /* 2254 * We need to lock the extent here because we're clearing DELALLOC and 2255 * we're not locked at this point. 2256 */ 2257 if (cur_offset < end) { 2258 struct extent_state *cached = NULL; 2259 2260 lock_extent(&inode->io_tree, cur_offset, end, &cached); 2261 extent_clear_unlock_delalloc(inode, cur_offset, end, 2262 locked_page, &cached, 2263 EXTENT_LOCKED | EXTENT_DELALLOC | 2264 EXTENT_DEFRAG | 2265 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2266 PAGE_START_WRITEBACK | 2267 PAGE_END_WRITEBACK); 2268 } 2269 btrfs_free_path(path); 2270 return ret; 2271 } 2272 2273 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2274 { 2275 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2276 if (inode->defrag_bytes && 2277 test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG)) 2278 return false; 2279 return true; 2280 } 2281 return false; 2282 } 2283 2284 /* 2285 * Function to process delayed allocation (create CoW) for ranges which are 2286 * being touched for the first time. 2287 */ 2288 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, 2289 u64 start, u64 end, struct writeback_control *wbc) 2290 { 2291 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2292 int ret; 2293 2294 /* 2295 * The range must cover part of the @locked_page, or a return of 1 2296 * can confuse the caller. 2297 */ 2298 ASSERT(!(end <= page_offset(locked_page) || 2299 start >= page_offset(locked_page) + PAGE_SIZE)); 2300 2301 if (should_nocow(inode, start, end)) { 2302 ret = run_delalloc_nocow(inode, locked_page, start, end); 2303 goto out; 2304 } 2305 2306 if (btrfs_inode_can_compress(inode) && 2307 inode_need_compress(inode, start, end) && 2308 run_delalloc_compressed(inode, locked_page, start, end, wbc)) 2309 return 1; 2310 2311 if (zoned) 2312 ret = run_delalloc_cow(inode, locked_page, start, end, wbc, 2313 true); 2314 else 2315 ret = cow_file_range(inode, locked_page, start, end, NULL, 2316 false, false); 2317 2318 out: 2319 if (ret < 0) 2320 btrfs_cleanup_ordered_extents(inode, locked_page, start, 2321 end - start + 1); 2322 return ret; 2323 } 2324 2325 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 2326 struct extent_state *orig, u64 split) 2327 { 2328 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2329 u64 size; 2330 2331 lockdep_assert_held(&inode->io_tree.lock); 2332 2333 /* not delalloc, ignore it */ 2334 if (!(orig->state & EXTENT_DELALLOC)) 2335 return; 2336 2337 size = orig->end - orig->start + 1; 2338 if (size > fs_info->max_extent_size) { 2339 u32 num_extents; 2340 u64 new_size; 2341 2342 /* 2343 * See the explanation in btrfs_merge_delalloc_extent, the same 2344 * applies here, just in reverse. 2345 */ 2346 new_size = orig->end - split + 1; 2347 num_extents = count_max_extents(fs_info, new_size); 2348 new_size = split - orig->start; 2349 num_extents += count_max_extents(fs_info, new_size); 2350 if (count_max_extents(fs_info, size) >= num_extents) 2351 return; 2352 } 2353 2354 spin_lock(&inode->lock); 2355 btrfs_mod_outstanding_extents(inode, 1); 2356 spin_unlock(&inode->lock); 2357 } 2358 2359 /* 2360 * Handle merged delayed allocation extents so we can keep track of new extents 2361 * that are just merged onto old extents, such as when we are doing sequential 2362 * writes, so we can properly account for the metadata space we'll need. 2363 */ 2364 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 2365 struct extent_state *other) 2366 { 2367 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2368 u64 new_size, old_size; 2369 u32 num_extents; 2370 2371 lockdep_assert_held(&inode->io_tree.lock); 2372 2373 /* not delalloc, ignore it */ 2374 if (!(other->state & EXTENT_DELALLOC)) 2375 return; 2376 2377 if (new->start > other->start) 2378 new_size = new->end - other->start + 1; 2379 else 2380 new_size = other->end - new->start + 1; 2381 2382 /* we're not bigger than the max, unreserve the space and go */ 2383 if (new_size <= fs_info->max_extent_size) { 2384 spin_lock(&inode->lock); 2385 btrfs_mod_outstanding_extents(inode, -1); 2386 spin_unlock(&inode->lock); 2387 return; 2388 } 2389 2390 /* 2391 * We have to add up either side to figure out how many extents were 2392 * accounted for before we merged into one big extent. If the number of 2393 * extents we accounted for is <= the amount we need for the new range 2394 * then we can return, otherwise drop. Think of it like this 2395 * 2396 * [ 4k][MAX_SIZE] 2397 * 2398 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2399 * need 2 outstanding extents, on one side we have 1 and the other side 2400 * we have 1 so they are == and we can return. But in this case 2401 * 2402 * [MAX_SIZE+4k][MAX_SIZE+4k] 2403 * 2404 * Each range on their own accounts for 2 extents, but merged together 2405 * they are only 3 extents worth of accounting, so we need to drop in 2406 * this case. 2407 */ 2408 old_size = other->end - other->start + 1; 2409 num_extents = count_max_extents(fs_info, old_size); 2410 old_size = new->end - new->start + 1; 2411 num_extents += count_max_extents(fs_info, old_size); 2412 if (count_max_extents(fs_info, new_size) >= num_extents) 2413 return; 2414 2415 spin_lock(&inode->lock); 2416 btrfs_mod_outstanding_extents(inode, -1); 2417 spin_unlock(&inode->lock); 2418 } 2419 2420 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode) 2421 { 2422 struct btrfs_root *root = inode->root; 2423 struct btrfs_fs_info *fs_info = root->fs_info; 2424 2425 spin_lock(&root->delalloc_lock); 2426 ASSERT(list_empty(&inode->delalloc_inodes)); 2427 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 2428 root->nr_delalloc_inodes++; 2429 if (root->nr_delalloc_inodes == 1) { 2430 spin_lock(&fs_info->delalloc_root_lock); 2431 ASSERT(list_empty(&root->delalloc_root)); 2432 list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots); 2433 spin_unlock(&fs_info->delalloc_root_lock); 2434 } 2435 spin_unlock(&root->delalloc_lock); 2436 } 2437 2438 void btrfs_del_delalloc_inode(struct btrfs_inode *inode) 2439 { 2440 struct btrfs_root *root = inode->root; 2441 struct btrfs_fs_info *fs_info = root->fs_info; 2442 2443 lockdep_assert_held(&root->delalloc_lock); 2444 2445 /* 2446 * We may be called after the inode was already deleted from the list, 2447 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(), 2448 * and then later through btrfs_clear_delalloc_extent() while the inode 2449 * still has ->delalloc_bytes > 0. 2450 */ 2451 if (!list_empty(&inode->delalloc_inodes)) { 2452 list_del_init(&inode->delalloc_inodes); 2453 root->nr_delalloc_inodes--; 2454 if (!root->nr_delalloc_inodes) { 2455 ASSERT(list_empty(&root->delalloc_inodes)); 2456 spin_lock(&fs_info->delalloc_root_lock); 2457 ASSERT(!list_empty(&root->delalloc_root)); 2458 list_del_init(&root->delalloc_root); 2459 spin_unlock(&fs_info->delalloc_root_lock); 2460 } 2461 } 2462 } 2463 2464 /* 2465 * Properly track delayed allocation bytes in the inode and to maintain the 2466 * list of inodes that have pending delalloc work to be done. 2467 */ 2468 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 2469 u32 bits) 2470 { 2471 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2472 2473 lockdep_assert_held(&inode->io_tree.lock); 2474 2475 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC)) 2476 WARN_ON(1); 2477 /* 2478 * set_bit and clear bit hooks normally require _irqsave/restore 2479 * but in this case, we are only testing for the DELALLOC 2480 * bit, which is only set or cleared with irqs on 2481 */ 2482 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2483 u64 len = state->end + 1 - state->start; 2484 u64 prev_delalloc_bytes; 2485 u32 num_extents = count_max_extents(fs_info, len); 2486 2487 spin_lock(&inode->lock); 2488 btrfs_mod_outstanding_extents(inode, num_extents); 2489 spin_unlock(&inode->lock); 2490 2491 /* For sanity tests */ 2492 if (btrfs_is_testing(fs_info)) 2493 return; 2494 2495 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2496 fs_info->delalloc_batch); 2497 spin_lock(&inode->lock); 2498 prev_delalloc_bytes = inode->delalloc_bytes; 2499 inode->delalloc_bytes += len; 2500 if (bits & EXTENT_DEFRAG) 2501 inode->defrag_bytes += len; 2502 spin_unlock(&inode->lock); 2503 2504 /* 2505 * We don't need to be under the protection of the inode's lock, 2506 * because we are called while holding the inode's io_tree lock 2507 * and are therefore protected against concurrent calls of this 2508 * function and btrfs_clear_delalloc_extent(). 2509 */ 2510 if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0) 2511 btrfs_add_delalloc_inode(inode); 2512 } 2513 2514 if (!(state->state & EXTENT_DELALLOC_NEW) && 2515 (bits & EXTENT_DELALLOC_NEW)) { 2516 spin_lock(&inode->lock); 2517 inode->new_delalloc_bytes += state->end + 1 - state->start; 2518 spin_unlock(&inode->lock); 2519 } 2520 } 2521 2522 /* 2523 * Once a range is no longer delalloc this function ensures that proper 2524 * accounting happens. 2525 */ 2526 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 2527 struct extent_state *state, u32 bits) 2528 { 2529 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2530 u64 len = state->end + 1 - state->start; 2531 u32 num_extents = count_max_extents(fs_info, len); 2532 2533 lockdep_assert_held(&inode->io_tree.lock); 2534 2535 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { 2536 spin_lock(&inode->lock); 2537 inode->defrag_bytes -= len; 2538 spin_unlock(&inode->lock); 2539 } 2540 2541 /* 2542 * set_bit and clear bit hooks normally require _irqsave/restore 2543 * but in this case, we are only testing for the DELALLOC 2544 * bit, which is only set or cleared with irqs on 2545 */ 2546 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2547 struct btrfs_root *root = inode->root; 2548 u64 new_delalloc_bytes; 2549 2550 spin_lock(&inode->lock); 2551 btrfs_mod_outstanding_extents(inode, -num_extents); 2552 spin_unlock(&inode->lock); 2553 2554 /* 2555 * We don't reserve metadata space for space cache inodes so we 2556 * don't need to call delalloc_release_metadata if there is an 2557 * error. 2558 */ 2559 if (bits & EXTENT_CLEAR_META_RESV && 2560 root != fs_info->tree_root) 2561 btrfs_delalloc_release_metadata(inode, len, true); 2562 2563 /* For sanity tests. */ 2564 if (btrfs_is_testing(fs_info)) 2565 return; 2566 2567 if (!btrfs_is_data_reloc_root(root) && 2568 !btrfs_is_free_space_inode(inode) && 2569 !(state->state & EXTENT_NORESERVE) && 2570 (bits & EXTENT_CLEAR_DATA_RESV)) 2571 btrfs_free_reserved_data_space_noquota(fs_info, len); 2572 2573 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2574 fs_info->delalloc_batch); 2575 spin_lock(&inode->lock); 2576 inode->delalloc_bytes -= len; 2577 new_delalloc_bytes = inode->delalloc_bytes; 2578 spin_unlock(&inode->lock); 2579 2580 /* 2581 * We don't need to be under the protection of the inode's lock, 2582 * because we are called while holding the inode's io_tree lock 2583 * and are therefore protected against concurrent calls of this 2584 * function and btrfs_set_delalloc_extent(). 2585 */ 2586 if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) { 2587 spin_lock(&root->delalloc_lock); 2588 btrfs_del_delalloc_inode(inode); 2589 spin_unlock(&root->delalloc_lock); 2590 } 2591 } 2592 2593 if ((state->state & EXTENT_DELALLOC_NEW) && 2594 (bits & EXTENT_DELALLOC_NEW)) { 2595 spin_lock(&inode->lock); 2596 ASSERT(inode->new_delalloc_bytes >= len); 2597 inode->new_delalloc_bytes -= len; 2598 if (bits & EXTENT_ADD_INODE_BYTES) 2599 inode_add_bytes(&inode->vfs_inode, len); 2600 spin_unlock(&inode->lock); 2601 } 2602 } 2603 2604 static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, 2605 struct btrfs_ordered_extent *ordered) 2606 { 2607 u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 2608 u64 len = bbio->bio.bi_iter.bi_size; 2609 struct btrfs_ordered_extent *new; 2610 int ret; 2611 2612 /* Must always be called for the beginning of an ordered extent. */ 2613 if (WARN_ON_ONCE(start != ordered->disk_bytenr)) 2614 return -EINVAL; 2615 2616 /* No need to split if the ordered extent covers the entire bio. */ 2617 if (ordered->disk_num_bytes == len) { 2618 refcount_inc(&ordered->refs); 2619 bbio->ordered = ordered; 2620 return 0; 2621 } 2622 2623 /* 2624 * Don't split the extent_map for NOCOW extents, as we're writing into 2625 * a pre-existing one. 2626 */ 2627 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 2628 ret = split_extent_map(bbio->inode, bbio->file_offset, 2629 ordered->num_bytes, len, 2630 ordered->disk_bytenr); 2631 if (ret) 2632 return ret; 2633 } 2634 2635 new = btrfs_split_ordered_extent(ordered, len); 2636 if (IS_ERR(new)) 2637 return PTR_ERR(new); 2638 bbio->ordered = new; 2639 return 0; 2640 } 2641 2642 /* 2643 * given a list of ordered sums record them in the inode. This happens 2644 * at IO completion time based on sums calculated at bio submission time. 2645 */ 2646 static int add_pending_csums(struct btrfs_trans_handle *trans, 2647 struct list_head *list) 2648 { 2649 struct btrfs_ordered_sum *sum; 2650 struct btrfs_root *csum_root = NULL; 2651 int ret; 2652 2653 list_for_each_entry(sum, list, list) { 2654 trans->adding_csums = true; 2655 if (!csum_root) 2656 csum_root = btrfs_csum_root(trans->fs_info, 2657 sum->logical); 2658 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2659 trans->adding_csums = false; 2660 if (ret) 2661 return ret; 2662 } 2663 return 0; 2664 } 2665 2666 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2667 const u64 start, 2668 const u64 len, 2669 struct extent_state **cached_state) 2670 { 2671 u64 search_start = start; 2672 const u64 end = start + len - 1; 2673 2674 while (search_start < end) { 2675 const u64 search_len = end - search_start + 1; 2676 struct extent_map *em; 2677 u64 em_len; 2678 int ret = 0; 2679 2680 em = btrfs_get_extent(inode, NULL, search_start, search_len); 2681 if (IS_ERR(em)) 2682 return PTR_ERR(em); 2683 2684 if (em->block_start != EXTENT_MAP_HOLE) 2685 goto next; 2686 2687 em_len = em->len; 2688 if (em->start < search_start) 2689 em_len -= search_start - em->start; 2690 if (em_len > search_len) 2691 em_len = search_len; 2692 2693 ret = set_extent_bit(&inode->io_tree, search_start, 2694 search_start + em_len - 1, 2695 EXTENT_DELALLOC_NEW, cached_state); 2696 next: 2697 search_start = extent_map_end(em); 2698 free_extent_map(em); 2699 if (ret) 2700 return ret; 2701 } 2702 return 0; 2703 } 2704 2705 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2706 unsigned int extra_bits, 2707 struct extent_state **cached_state) 2708 { 2709 WARN_ON(PAGE_ALIGNED(end)); 2710 2711 if (start >= i_size_read(&inode->vfs_inode) && 2712 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2713 /* 2714 * There can't be any extents following eof in this case so just 2715 * set the delalloc new bit for the range directly. 2716 */ 2717 extra_bits |= EXTENT_DELALLOC_NEW; 2718 } else { 2719 int ret; 2720 2721 ret = btrfs_find_new_delalloc_bytes(inode, start, 2722 end + 1 - start, 2723 cached_state); 2724 if (ret) 2725 return ret; 2726 } 2727 2728 return set_extent_bit(&inode->io_tree, start, end, 2729 EXTENT_DELALLOC | extra_bits, cached_state); 2730 } 2731 2732 /* see btrfs_writepage_start_hook for details on why this is required */ 2733 struct btrfs_writepage_fixup { 2734 struct page *page; 2735 struct btrfs_inode *inode; 2736 struct btrfs_work work; 2737 }; 2738 2739 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2740 { 2741 struct btrfs_writepage_fixup *fixup = 2742 container_of(work, struct btrfs_writepage_fixup, work); 2743 struct btrfs_ordered_extent *ordered; 2744 struct extent_state *cached_state = NULL; 2745 struct extent_changeset *data_reserved = NULL; 2746 struct page *page = fixup->page; 2747 struct btrfs_inode *inode = fixup->inode; 2748 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2749 u64 page_start = page_offset(page); 2750 u64 page_end = page_offset(page) + PAGE_SIZE - 1; 2751 int ret = 0; 2752 bool free_delalloc_space = true; 2753 2754 /* 2755 * This is similar to page_mkwrite, we need to reserve the space before 2756 * we take the page lock. 2757 */ 2758 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2759 PAGE_SIZE); 2760 again: 2761 lock_page(page); 2762 2763 /* 2764 * Before we queued this fixup, we took a reference on the page. 2765 * page->mapping may go NULL, but it shouldn't be moved to a different 2766 * address space. 2767 */ 2768 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 2769 /* 2770 * Unfortunately this is a little tricky, either 2771 * 2772 * 1) We got here and our page had already been dealt with and 2773 * we reserved our space, thus ret == 0, so we need to just 2774 * drop our space reservation and bail. This can happen the 2775 * first time we come into the fixup worker, or could happen 2776 * while waiting for the ordered extent. 2777 * 2) Our page was already dealt with, but we happened to get an 2778 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2779 * this case we obviously don't have anything to release, but 2780 * because the page was already dealt with we don't want to 2781 * mark the page with an error, so make sure we're resetting 2782 * ret to 0. This is why we have this check _before_ the ret 2783 * check, because we do not want to have a surprise ENOSPC 2784 * when the page was already properly dealt with. 2785 */ 2786 if (!ret) { 2787 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2788 btrfs_delalloc_release_space(inode, data_reserved, 2789 page_start, PAGE_SIZE, 2790 true); 2791 } 2792 ret = 0; 2793 goto out_page; 2794 } 2795 2796 /* 2797 * We can't mess with the page state unless it is locked, so now that 2798 * it is locked bail if we failed to make our space reservation. 2799 */ 2800 if (ret) 2801 goto out_page; 2802 2803 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2804 2805 /* already ordered? We're done */ 2806 if (PageOrdered(page)) 2807 goto out_reserved; 2808 2809 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2810 if (ordered) { 2811 unlock_extent(&inode->io_tree, page_start, page_end, 2812 &cached_state); 2813 unlock_page(page); 2814 btrfs_start_ordered_extent(ordered); 2815 btrfs_put_ordered_extent(ordered); 2816 goto again; 2817 } 2818 2819 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2820 &cached_state); 2821 if (ret) 2822 goto out_reserved; 2823 2824 /* 2825 * Everything went as planned, we're now the owner of a dirty page with 2826 * delayed allocation bits set and space reserved for our COW 2827 * destination. 2828 * 2829 * The page was dirty when we started, nothing should have cleaned it. 2830 */ 2831 BUG_ON(!PageDirty(page)); 2832 free_delalloc_space = false; 2833 out_reserved: 2834 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2835 if (free_delalloc_space) 2836 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2837 PAGE_SIZE, true); 2838 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2839 out_page: 2840 if (ret) { 2841 /* 2842 * We hit ENOSPC or other errors. Update the mapping and page 2843 * to reflect the errors and clean the page. 2844 */ 2845 mapping_set_error(page->mapping, ret); 2846 btrfs_mark_ordered_io_finished(inode, page, page_start, 2847 PAGE_SIZE, !ret); 2848 clear_page_dirty_for_io(page); 2849 } 2850 btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE); 2851 unlock_page(page); 2852 put_page(page); 2853 kfree(fixup); 2854 extent_changeset_free(data_reserved); 2855 /* 2856 * As a precaution, do a delayed iput in case it would be the last iput 2857 * that could need flushing space. Recursing back to fixup worker would 2858 * deadlock. 2859 */ 2860 btrfs_add_delayed_iput(inode); 2861 } 2862 2863 /* 2864 * There are a few paths in the higher layers of the kernel that directly 2865 * set the page dirty bit without asking the filesystem if it is a 2866 * good idea. This causes problems because we want to make sure COW 2867 * properly happens and the data=ordered rules are followed. 2868 * 2869 * In our case any range that doesn't have the ORDERED bit set 2870 * hasn't been properly setup for IO. We kick off an async process 2871 * to fix it up. The async helper will wait for ordered extents, set 2872 * the delalloc bit and make it safe to write the page. 2873 */ 2874 int btrfs_writepage_cow_fixup(struct page *page) 2875 { 2876 struct inode *inode = page->mapping->host; 2877 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 2878 struct btrfs_writepage_fixup *fixup; 2879 2880 /* This page has ordered extent covering it already */ 2881 if (PageOrdered(page)) 2882 return 0; 2883 2884 /* 2885 * PageChecked is set below when we create a fixup worker for this page, 2886 * don't try to create another one if we're already PageChecked() 2887 * 2888 * The extent_io writepage code will redirty the page if we send back 2889 * EAGAIN. 2890 */ 2891 if (PageChecked(page)) 2892 return -EAGAIN; 2893 2894 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2895 if (!fixup) 2896 return -EAGAIN; 2897 2898 /* 2899 * We are already holding a reference to this inode from 2900 * write_cache_pages. We need to hold it because the space reservation 2901 * takes place outside of the page lock, and we can't trust 2902 * page->mapping outside of the page lock. 2903 */ 2904 ihold(inode); 2905 btrfs_folio_set_checked(fs_info, page_folio(page), page_offset(page), PAGE_SIZE); 2906 get_page(page); 2907 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL); 2908 fixup->page = page; 2909 fixup->inode = BTRFS_I(inode); 2910 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2911 2912 return -EAGAIN; 2913 } 2914 2915 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2916 struct btrfs_inode *inode, u64 file_pos, 2917 struct btrfs_file_extent_item *stack_fi, 2918 const bool update_inode_bytes, 2919 u64 qgroup_reserved) 2920 { 2921 struct btrfs_root *root = inode->root; 2922 const u64 sectorsize = root->fs_info->sectorsize; 2923 struct btrfs_path *path; 2924 struct extent_buffer *leaf; 2925 struct btrfs_key ins; 2926 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 2927 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 2928 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 2929 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 2930 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 2931 struct btrfs_drop_extents_args drop_args = { 0 }; 2932 int ret; 2933 2934 path = btrfs_alloc_path(); 2935 if (!path) 2936 return -ENOMEM; 2937 2938 /* 2939 * we may be replacing one extent in the tree with another. 2940 * The new extent is pinned in the extent map, and we don't want 2941 * to drop it from the cache until it is completely in the btree. 2942 * 2943 * So, tell btrfs_drop_extents to leave this extent in the cache. 2944 * the caller is expected to unpin it and allow it to be merged 2945 * with the others. 2946 */ 2947 drop_args.path = path; 2948 drop_args.start = file_pos; 2949 drop_args.end = file_pos + num_bytes; 2950 drop_args.replace_extent = true; 2951 drop_args.extent_item_size = sizeof(*stack_fi); 2952 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 2953 if (ret) 2954 goto out; 2955 2956 if (!drop_args.extent_inserted) { 2957 ins.objectid = btrfs_ino(inode); 2958 ins.offset = file_pos; 2959 ins.type = BTRFS_EXTENT_DATA_KEY; 2960 2961 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2962 sizeof(*stack_fi)); 2963 if (ret) 2964 goto out; 2965 } 2966 leaf = path->nodes[0]; 2967 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 2968 write_extent_buffer(leaf, stack_fi, 2969 btrfs_item_ptr_offset(leaf, path->slots[0]), 2970 sizeof(struct btrfs_file_extent_item)); 2971 2972 btrfs_mark_buffer_dirty(trans, leaf); 2973 btrfs_release_path(path); 2974 2975 /* 2976 * If we dropped an inline extent here, we know the range where it is 2977 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 2978 * number of bytes only for that range containing the inline extent. 2979 * The remaining of the range will be processed when clearning the 2980 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 2981 */ 2982 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 2983 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 2984 2985 inline_size = drop_args.bytes_found - inline_size; 2986 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 2987 drop_args.bytes_found -= inline_size; 2988 num_bytes -= sectorsize; 2989 } 2990 2991 if (update_inode_bytes) 2992 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 2993 2994 ins.objectid = disk_bytenr; 2995 ins.offset = disk_num_bytes; 2996 ins.type = BTRFS_EXTENT_ITEM_KEY; 2997 2998 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 2999 if (ret) 3000 goto out; 3001 3002 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 3003 file_pos - offset, 3004 qgroup_reserved, &ins); 3005 out: 3006 btrfs_free_path(path); 3007 3008 return ret; 3009 } 3010 3011 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 3012 u64 start, u64 len) 3013 { 3014 struct btrfs_block_group *cache; 3015 3016 cache = btrfs_lookup_block_group(fs_info, start); 3017 ASSERT(cache); 3018 3019 spin_lock(&cache->lock); 3020 cache->delalloc_bytes -= len; 3021 spin_unlock(&cache->lock); 3022 3023 btrfs_put_block_group(cache); 3024 } 3025 3026 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 3027 struct btrfs_ordered_extent *oe) 3028 { 3029 struct btrfs_file_extent_item stack_fi; 3030 bool update_inode_bytes; 3031 u64 num_bytes = oe->num_bytes; 3032 u64 ram_bytes = oe->ram_bytes; 3033 3034 memset(&stack_fi, 0, sizeof(stack_fi)); 3035 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 3036 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 3037 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 3038 oe->disk_num_bytes); 3039 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 3040 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) { 3041 num_bytes = oe->truncated_len; 3042 ram_bytes = num_bytes; 3043 } 3044 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 3045 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 3046 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 3047 /* Encryption and other encoding is reserved and all 0 */ 3048 3049 /* 3050 * For delalloc, when completing an ordered extent we update the inode's 3051 * bytes when clearing the range in the inode's io tree, so pass false 3052 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 3053 * except if the ordered extent was truncated. 3054 */ 3055 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 3056 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3057 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3058 3059 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), 3060 oe->file_offset, &stack_fi, 3061 update_inode_bytes, oe->qgroup_rsv); 3062 } 3063 3064 /* 3065 * As ordered data IO finishes, this gets called so we can finish 3066 * an ordered extent if the range of bytes in the file it covers are 3067 * fully written. 3068 */ 3069 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) 3070 { 3071 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); 3072 struct btrfs_root *root = inode->root; 3073 struct btrfs_fs_info *fs_info = root->fs_info; 3074 struct btrfs_trans_handle *trans = NULL; 3075 struct extent_io_tree *io_tree = &inode->io_tree; 3076 struct extent_state *cached_state = NULL; 3077 u64 start, end; 3078 int compress_type = 0; 3079 int ret = 0; 3080 u64 logical_len = ordered_extent->num_bytes; 3081 bool freespace_inode; 3082 bool truncated = false; 3083 bool clear_reserved_extent = true; 3084 unsigned int clear_bits = EXTENT_DEFRAG; 3085 3086 start = ordered_extent->file_offset; 3087 end = start + ordered_extent->num_bytes - 1; 3088 3089 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3090 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3091 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3092 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3093 clear_bits |= EXTENT_DELALLOC_NEW; 3094 3095 freespace_inode = btrfs_is_free_space_inode(inode); 3096 if (!freespace_inode) 3097 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3098 3099 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3100 ret = -EIO; 3101 goto out; 3102 } 3103 3104 if (btrfs_is_zoned(fs_info)) 3105 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3106 ordered_extent->disk_num_bytes); 3107 3108 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3109 truncated = true; 3110 logical_len = ordered_extent->truncated_len; 3111 /* Truncated the entire extent, don't bother adding */ 3112 if (!logical_len) 3113 goto out; 3114 } 3115 3116 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3117 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 3118 3119 btrfs_inode_safe_disk_i_size_write(inode, 0); 3120 if (freespace_inode) 3121 trans = btrfs_join_transaction_spacecache(root); 3122 else 3123 trans = btrfs_join_transaction(root); 3124 if (IS_ERR(trans)) { 3125 ret = PTR_ERR(trans); 3126 trans = NULL; 3127 goto out; 3128 } 3129 trans->block_rsv = &inode->block_rsv; 3130 ret = btrfs_update_inode_fallback(trans, inode); 3131 if (ret) /* -ENOMEM or corruption */ 3132 btrfs_abort_transaction(trans, ret); 3133 goto out; 3134 } 3135 3136 clear_bits |= EXTENT_LOCKED; 3137 lock_extent(io_tree, start, end, &cached_state); 3138 3139 if (freespace_inode) 3140 trans = btrfs_join_transaction_spacecache(root); 3141 else 3142 trans = btrfs_join_transaction(root); 3143 if (IS_ERR(trans)) { 3144 ret = PTR_ERR(trans); 3145 trans = NULL; 3146 goto out; 3147 } 3148 3149 trans->block_rsv = &inode->block_rsv; 3150 3151 ret = btrfs_insert_raid_extent(trans, ordered_extent); 3152 if (ret) 3153 goto out; 3154 3155 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3156 compress_type = ordered_extent->compress_type; 3157 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3158 BUG_ON(compress_type); 3159 ret = btrfs_mark_extent_written(trans, inode, 3160 ordered_extent->file_offset, 3161 ordered_extent->file_offset + 3162 logical_len); 3163 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, 3164 ordered_extent->disk_num_bytes); 3165 } else { 3166 BUG_ON(root == fs_info->tree_root); 3167 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3168 if (!ret) { 3169 clear_reserved_extent = false; 3170 btrfs_release_delalloc_bytes(fs_info, 3171 ordered_extent->disk_bytenr, 3172 ordered_extent->disk_num_bytes); 3173 } 3174 } 3175 if (ret < 0) { 3176 btrfs_abort_transaction(trans, ret); 3177 goto out; 3178 } 3179 3180 ret = unpin_extent_cache(inode, ordered_extent->file_offset, 3181 ordered_extent->num_bytes, trans->transid); 3182 if (ret < 0) { 3183 btrfs_abort_transaction(trans, ret); 3184 goto out; 3185 } 3186 3187 ret = add_pending_csums(trans, &ordered_extent->list); 3188 if (ret) { 3189 btrfs_abort_transaction(trans, ret); 3190 goto out; 3191 } 3192 3193 /* 3194 * If this is a new delalloc range, clear its new delalloc flag to 3195 * update the inode's number of bytes. This needs to be done first 3196 * before updating the inode item. 3197 */ 3198 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3199 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3200 clear_extent_bit(&inode->io_tree, start, end, 3201 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3202 &cached_state); 3203 3204 btrfs_inode_safe_disk_i_size_write(inode, 0); 3205 ret = btrfs_update_inode_fallback(trans, inode); 3206 if (ret) { /* -ENOMEM or corruption */ 3207 btrfs_abort_transaction(trans, ret); 3208 goto out; 3209 } 3210 out: 3211 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3212 &cached_state); 3213 3214 if (trans) 3215 btrfs_end_transaction(trans); 3216 3217 if (ret || truncated) { 3218 u64 unwritten_start = start; 3219 3220 /* 3221 * If we failed to finish this ordered extent for any reason we 3222 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3223 * extent, and mark the inode with the error if it wasn't 3224 * already set. Any error during writeback would have already 3225 * set the mapping error, so we need to set it if we're the ones 3226 * marking this ordered extent as failed. 3227 */ 3228 if (ret) 3229 btrfs_mark_ordered_extent_error(ordered_extent); 3230 3231 if (truncated) 3232 unwritten_start += logical_len; 3233 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3234 3235 /* 3236 * Drop extent maps for the part of the extent we didn't write. 3237 * 3238 * We have an exception here for the free_space_inode, this is 3239 * because when we do btrfs_get_extent() on the free space inode 3240 * we will search the commit root. If this is a new block group 3241 * we won't find anything, and we will trip over the assert in 3242 * writepage where we do ASSERT(em->block_start != 3243 * EXTENT_MAP_HOLE). 3244 * 3245 * Theoretically we could also skip this for any NOCOW extent as 3246 * we don't mess with the extent map tree in the NOCOW case, but 3247 * for now simply skip this if we are the free space inode. 3248 */ 3249 if (!btrfs_is_free_space_inode(inode)) 3250 btrfs_drop_extent_map_range(inode, unwritten_start, 3251 end, false); 3252 3253 /* 3254 * If the ordered extent had an IOERR or something else went 3255 * wrong we need to return the space for this ordered extent 3256 * back to the allocator. We only free the extent in the 3257 * truncated case if we didn't write out the extent at all. 3258 * 3259 * If we made it past insert_reserved_file_extent before we 3260 * errored out then we don't need to do this as the accounting 3261 * has already been done. 3262 */ 3263 if ((ret || !logical_len) && 3264 clear_reserved_extent && 3265 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3266 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3267 /* 3268 * Discard the range before returning it back to the 3269 * free space pool 3270 */ 3271 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3272 btrfs_discard_extent(fs_info, 3273 ordered_extent->disk_bytenr, 3274 ordered_extent->disk_num_bytes, 3275 NULL); 3276 btrfs_free_reserved_extent(fs_info, 3277 ordered_extent->disk_bytenr, 3278 ordered_extent->disk_num_bytes, 1); 3279 /* 3280 * Actually free the qgroup rsv which was released when 3281 * the ordered extent was created. 3282 */ 3283 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root), 3284 ordered_extent->qgroup_rsv, 3285 BTRFS_QGROUP_RSV_DATA); 3286 } 3287 } 3288 3289 /* 3290 * This needs to be done to make sure anybody waiting knows we are done 3291 * updating everything for this ordered extent. 3292 */ 3293 btrfs_remove_ordered_extent(inode, ordered_extent); 3294 3295 /* once for us */ 3296 btrfs_put_ordered_extent(ordered_extent); 3297 /* once for the tree */ 3298 btrfs_put_ordered_extent(ordered_extent); 3299 3300 return ret; 3301 } 3302 3303 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered) 3304 { 3305 if (btrfs_is_zoned(inode_to_fs_info(ordered->inode)) && 3306 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) && 3307 list_empty(&ordered->bioc_list)) 3308 btrfs_finish_ordered_zoned(ordered); 3309 return btrfs_finish_one_ordered(ordered); 3310 } 3311 3312 /* 3313 * Verify the checksum for a single sector without any extra action that depend 3314 * on the type of I/O. 3315 */ 3316 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, 3317 u32 pgoff, u8 *csum, const u8 * const csum_expected) 3318 { 3319 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3320 char *kaddr; 3321 3322 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); 3323 3324 shash->tfm = fs_info->csum_shash; 3325 3326 kaddr = kmap_local_page(page) + pgoff; 3327 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 3328 kunmap_local(kaddr); 3329 3330 if (memcmp(csum, csum_expected, fs_info->csum_size)) 3331 return -EIO; 3332 return 0; 3333 } 3334 3335 /* 3336 * Verify the checksum of a single data sector. 3337 * 3338 * @bbio: btrfs_io_bio which contains the csum 3339 * @dev: device the sector is on 3340 * @bio_offset: offset to the beginning of the bio (in bytes) 3341 * @bv: bio_vec to check 3342 * 3343 * Check if the checksum on a data block is valid. When a checksum mismatch is 3344 * detected, report the error and fill the corrupted range with zero. 3345 * 3346 * Return %true if the sector is ok or had no checksum to start with, else %false. 3347 */ 3348 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, 3349 u32 bio_offset, struct bio_vec *bv) 3350 { 3351 struct btrfs_inode *inode = bbio->inode; 3352 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3353 u64 file_offset = bbio->file_offset + bio_offset; 3354 u64 end = file_offset + bv->bv_len - 1; 3355 u8 *csum_expected; 3356 u8 csum[BTRFS_CSUM_SIZE]; 3357 3358 ASSERT(bv->bv_len == fs_info->sectorsize); 3359 3360 if (!bbio->csum) 3361 return true; 3362 3363 if (btrfs_is_data_reloc_root(inode->root) && 3364 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, 3365 NULL)) { 3366 /* Skip the range without csum for data reloc inode */ 3367 clear_extent_bits(&inode->io_tree, file_offset, end, 3368 EXTENT_NODATASUM); 3369 return true; 3370 } 3371 3372 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * 3373 fs_info->csum_size; 3374 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, 3375 csum_expected)) 3376 goto zeroit; 3377 return true; 3378 3379 zeroit: 3380 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected, 3381 bbio->mirror_num); 3382 if (dev) 3383 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 3384 memzero_bvec(bv); 3385 return false; 3386 } 3387 3388 /* 3389 * Perform a delayed iput on @inode. 3390 * 3391 * @inode: The inode we want to perform iput on 3392 * 3393 * This function uses the generic vfs_inode::i_count to track whether we should 3394 * just decrement it (in case it's > 1) or if this is the last iput then link 3395 * the inode to the delayed iput machinery. Delayed iputs are processed at 3396 * transaction commit time/superblock commit/cleaner kthread. 3397 */ 3398 void btrfs_add_delayed_iput(struct btrfs_inode *inode) 3399 { 3400 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3401 unsigned long flags; 3402 3403 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) 3404 return; 3405 3406 atomic_inc(&fs_info->nr_delayed_iputs); 3407 /* 3408 * Need to be irq safe here because we can be called from either an irq 3409 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq 3410 * context. 3411 */ 3412 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags); 3413 ASSERT(list_empty(&inode->delayed_iput)); 3414 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); 3415 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags); 3416 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3417 wake_up_process(fs_info->cleaner_kthread); 3418 } 3419 3420 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3421 struct btrfs_inode *inode) 3422 { 3423 list_del_init(&inode->delayed_iput); 3424 spin_unlock_irq(&fs_info->delayed_iput_lock); 3425 iput(&inode->vfs_inode); 3426 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3427 wake_up(&fs_info->delayed_iputs_wait); 3428 spin_lock_irq(&fs_info->delayed_iput_lock); 3429 } 3430 3431 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3432 struct btrfs_inode *inode) 3433 { 3434 if (!list_empty(&inode->delayed_iput)) { 3435 spin_lock_irq(&fs_info->delayed_iput_lock); 3436 if (!list_empty(&inode->delayed_iput)) 3437 run_delayed_iput_locked(fs_info, inode); 3438 spin_unlock_irq(&fs_info->delayed_iput_lock); 3439 } 3440 } 3441 3442 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3443 { 3444 /* 3445 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which 3446 * calls btrfs_add_delayed_iput() and that needs to lock 3447 * fs_info->delayed_iput_lock. So we need to disable irqs here to 3448 * prevent a deadlock. 3449 */ 3450 spin_lock_irq(&fs_info->delayed_iput_lock); 3451 while (!list_empty(&fs_info->delayed_iputs)) { 3452 struct btrfs_inode *inode; 3453 3454 inode = list_first_entry(&fs_info->delayed_iputs, 3455 struct btrfs_inode, delayed_iput); 3456 run_delayed_iput_locked(fs_info, inode); 3457 if (need_resched()) { 3458 spin_unlock_irq(&fs_info->delayed_iput_lock); 3459 cond_resched(); 3460 spin_lock_irq(&fs_info->delayed_iput_lock); 3461 } 3462 } 3463 spin_unlock_irq(&fs_info->delayed_iput_lock); 3464 } 3465 3466 /* 3467 * Wait for flushing all delayed iputs 3468 * 3469 * @fs_info: the filesystem 3470 * 3471 * This will wait on any delayed iputs that are currently running with KILLABLE 3472 * set. Once they are all done running we will return, unless we are killed in 3473 * which case we return EINTR. This helps in user operations like fallocate etc 3474 * that might get blocked on the iputs. 3475 * 3476 * Return EINTR if we were killed, 0 if nothing's pending 3477 */ 3478 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3479 { 3480 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3481 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3482 if (ret) 3483 return -EINTR; 3484 return 0; 3485 } 3486 3487 /* 3488 * This creates an orphan entry for the given inode in case something goes wrong 3489 * in the middle of an unlink. 3490 */ 3491 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3492 struct btrfs_inode *inode) 3493 { 3494 int ret; 3495 3496 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3497 if (ret && ret != -EEXIST) { 3498 btrfs_abort_transaction(trans, ret); 3499 return ret; 3500 } 3501 3502 return 0; 3503 } 3504 3505 /* 3506 * We have done the delete so we can go ahead and remove the orphan item for 3507 * this particular inode. 3508 */ 3509 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3510 struct btrfs_inode *inode) 3511 { 3512 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3513 } 3514 3515 /* 3516 * this cleans up any orphans that may be left on the list from the last use 3517 * of this root. 3518 */ 3519 int btrfs_orphan_cleanup(struct btrfs_root *root) 3520 { 3521 struct btrfs_fs_info *fs_info = root->fs_info; 3522 struct btrfs_path *path; 3523 struct extent_buffer *leaf; 3524 struct btrfs_key key, found_key; 3525 struct btrfs_trans_handle *trans; 3526 struct inode *inode; 3527 u64 last_objectid = 0; 3528 int ret = 0, nr_unlink = 0; 3529 3530 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3531 return 0; 3532 3533 path = btrfs_alloc_path(); 3534 if (!path) { 3535 ret = -ENOMEM; 3536 goto out; 3537 } 3538 path->reada = READA_BACK; 3539 3540 key.objectid = BTRFS_ORPHAN_OBJECTID; 3541 key.type = BTRFS_ORPHAN_ITEM_KEY; 3542 key.offset = (u64)-1; 3543 3544 while (1) { 3545 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3546 if (ret < 0) 3547 goto out; 3548 3549 /* 3550 * if ret == 0 means we found what we were searching for, which 3551 * is weird, but possible, so only screw with path if we didn't 3552 * find the key and see if we have stuff that matches 3553 */ 3554 if (ret > 0) { 3555 ret = 0; 3556 if (path->slots[0] == 0) 3557 break; 3558 path->slots[0]--; 3559 } 3560 3561 /* pull out the item */ 3562 leaf = path->nodes[0]; 3563 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3564 3565 /* make sure the item matches what we want */ 3566 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3567 break; 3568 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3569 break; 3570 3571 /* release the path since we're done with it */ 3572 btrfs_release_path(path); 3573 3574 /* 3575 * this is where we are basically btrfs_lookup, without the 3576 * crossing root thing. we store the inode number in the 3577 * offset of the orphan item. 3578 */ 3579 3580 if (found_key.offset == last_objectid) { 3581 /* 3582 * We found the same inode as before. This means we were 3583 * not able to remove its items via eviction triggered 3584 * by an iput(). A transaction abort may have happened, 3585 * due to -ENOSPC for example, so try to grab the error 3586 * that lead to a transaction abort, if any. 3587 */ 3588 btrfs_err(fs_info, 3589 "Error removing orphan entry, stopping orphan cleanup"); 3590 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL; 3591 goto out; 3592 } 3593 3594 last_objectid = found_key.offset; 3595 3596 found_key.objectid = found_key.offset; 3597 found_key.type = BTRFS_INODE_ITEM_KEY; 3598 found_key.offset = 0; 3599 inode = btrfs_iget(fs_info->sb, last_objectid, root); 3600 if (IS_ERR(inode)) { 3601 ret = PTR_ERR(inode); 3602 inode = NULL; 3603 if (ret != -ENOENT) 3604 goto out; 3605 } 3606 3607 if (!inode && root == fs_info->tree_root) { 3608 struct btrfs_root *dead_root; 3609 int is_dead_root = 0; 3610 3611 /* 3612 * This is an orphan in the tree root. Currently these 3613 * could come from 2 sources: 3614 * a) a root (snapshot/subvolume) deletion in progress 3615 * b) a free space cache inode 3616 * We need to distinguish those two, as the orphan item 3617 * for a root must not get deleted before the deletion 3618 * of the snapshot/subvolume's tree completes. 3619 * 3620 * btrfs_find_orphan_roots() ran before us, which has 3621 * found all deleted roots and loaded them into 3622 * fs_info->fs_roots_radix. So here we can find if an 3623 * orphan item corresponds to a deleted root by looking 3624 * up the root from that radix tree. 3625 */ 3626 3627 spin_lock(&fs_info->fs_roots_radix_lock); 3628 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3629 (unsigned long)found_key.objectid); 3630 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3631 is_dead_root = 1; 3632 spin_unlock(&fs_info->fs_roots_radix_lock); 3633 3634 if (is_dead_root) { 3635 /* prevent this orphan from being found again */ 3636 key.offset = found_key.objectid - 1; 3637 continue; 3638 } 3639 3640 } 3641 3642 /* 3643 * If we have an inode with links, there are a couple of 3644 * possibilities: 3645 * 3646 * 1. We were halfway through creating fsverity metadata for the 3647 * file. In that case, the orphan item represents incomplete 3648 * fsverity metadata which must be cleaned up with 3649 * btrfs_drop_verity_items and deleting the orphan item. 3650 3651 * 2. Old kernels (before v3.12) used to create an 3652 * orphan item for truncate indicating that there were possibly 3653 * extent items past i_size that needed to be deleted. In v3.12, 3654 * truncate was changed to update i_size in sync with the extent 3655 * items, but the (useless) orphan item was still created. Since 3656 * v4.18, we don't create the orphan item for truncate at all. 3657 * 3658 * So, this item could mean that we need to do a truncate, but 3659 * only if this filesystem was last used on a pre-v3.12 kernel 3660 * and was not cleanly unmounted. The odds of that are quite 3661 * slim, and it's a pain to do the truncate now, so just delete 3662 * the orphan item. 3663 * 3664 * It's also possible that this orphan item was supposed to be 3665 * deleted but wasn't. The inode number may have been reused, 3666 * but either way, we can delete the orphan item. 3667 */ 3668 if (!inode || inode->i_nlink) { 3669 if (inode) { 3670 ret = btrfs_drop_verity_items(BTRFS_I(inode)); 3671 iput(inode); 3672 inode = NULL; 3673 if (ret) 3674 goto out; 3675 } 3676 trans = btrfs_start_transaction(root, 1); 3677 if (IS_ERR(trans)) { 3678 ret = PTR_ERR(trans); 3679 goto out; 3680 } 3681 btrfs_debug(fs_info, "auto deleting %Lu", 3682 found_key.objectid); 3683 ret = btrfs_del_orphan_item(trans, root, 3684 found_key.objectid); 3685 btrfs_end_transaction(trans); 3686 if (ret) 3687 goto out; 3688 continue; 3689 } 3690 3691 nr_unlink++; 3692 3693 /* this will do delete_inode and everything for us */ 3694 iput(inode); 3695 } 3696 /* release the path since we're done with it */ 3697 btrfs_release_path(path); 3698 3699 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3700 trans = btrfs_join_transaction(root); 3701 if (!IS_ERR(trans)) 3702 btrfs_end_transaction(trans); 3703 } 3704 3705 if (nr_unlink) 3706 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3707 3708 out: 3709 if (ret) 3710 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3711 btrfs_free_path(path); 3712 return ret; 3713 } 3714 3715 /* 3716 * very simple check to peek ahead in the leaf looking for xattrs. If we 3717 * don't find any xattrs, we know there can't be any acls. 3718 * 3719 * slot is the slot the inode is in, objectid is the objectid of the inode 3720 */ 3721 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3722 int slot, u64 objectid, 3723 int *first_xattr_slot) 3724 { 3725 u32 nritems = btrfs_header_nritems(leaf); 3726 struct btrfs_key found_key; 3727 static u64 xattr_access = 0; 3728 static u64 xattr_default = 0; 3729 int scanned = 0; 3730 3731 if (!xattr_access) { 3732 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3733 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3734 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3735 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3736 } 3737 3738 slot++; 3739 *first_xattr_slot = -1; 3740 while (slot < nritems) { 3741 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3742 3743 /* we found a different objectid, there must not be acls */ 3744 if (found_key.objectid != objectid) 3745 return 0; 3746 3747 /* we found an xattr, assume we've got an acl */ 3748 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3749 if (*first_xattr_slot == -1) 3750 *first_xattr_slot = slot; 3751 if (found_key.offset == xattr_access || 3752 found_key.offset == xattr_default) 3753 return 1; 3754 } 3755 3756 /* 3757 * we found a key greater than an xattr key, there can't 3758 * be any acls later on 3759 */ 3760 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3761 return 0; 3762 3763 slot++; 3764 scanned++; 3765 3766 /* 3767 * it goes inode, inode backrefs, xattrs, extents, 3768 * so if there are a ton of hard links to an inode there can 3769 * be a lot of backrefs. Don't waste time searching too hard, 3770 * this is just an optimization 3771 */ 3772 if (scanned >= 8) 3773 break; 3774 } 3775 /* we hit the end of the leaf before we found an xattr or 3776 * something larger than an xattr. We have to assume the inode 3777 * has acls 3778 */ 3779 if (*first_xattr_slot == -1) 3780 *first_xattr_slot = slot; 3781 return 1; 3782 } 3783 3784 /* 3785 * read an inode from the btree into the in-memory inode 3786 */ 3787 static int btrfs_read_locked_inode(struct inode *inode, 3788 struct btrfs_path *in_path) 3789 { 3790 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 3791 struct btrfs_path *path = in_path; 3792 struct extent_buffer *leaf; 3793 struct btrfs_inode_item *inode_item; 3794 struct btrfs_root *root = BTRFS_I(inode)->root; 3795 struct btrfs_key location; 3796 unsigned long ptr; 3797 int maybe_acls; 3798 u32 rdev; 3799 int ret; 3800 bool filled = false; 3801 int first_xattr_slot; 3802 3803 ret = btrfs_fill_inode(inode, &rdev); 3804 if (!ret) 3805 filled = true; 3806 3807 if (!path) { 3808 path = btrfs_alloc_path(); 3809 if (!path) 3810 return -ENOMEM; 3811 } 3812 3813 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3814 3815 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3816 if (ret) { 3817 if (path != in_path) 3818 btrfs_free_path(path); 3819 return ret; 3820 } 3821 3822 leaf = path->nodes[0]; 3823 3824 if (filled) 3825 goto cache_index; 3826 3827 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3828 struct btrfs_inode_item); 3829 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3830 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3831 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3832 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3833 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3834 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 3835 round_up(i_size_read(inode), fs_info->sectorsize)); 3836 3837 inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime), 3838 btrfs_timespec_nsec(leaf, &inode_item->atime)); 3839 3840 inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime), 3841 btrfs_timespec_nsec(leaf, &inode_item->mtime)); 3842 3843 inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime), 3844 btrfs_timespec_nsec(leaf, &inode_item->ctime)); 3845 3846 BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime); 3847 BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime); 3848 3849 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3850 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3851 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3852 3853 inode_set_iversion_queried(inode, 3854 btrfs_inode_sequence(leaf, inode_item)); 3855 inode->i_generation = BTRFS_I(inode)->generation; 3856 inode->i_rdev = 0; 3857 rdev = btrfs_inode_rdev(leaf, inode_item); 3858 3859 BTRFS_I(inode)->index_cnt = (u64)-1; 3860 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 3861 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 3862 3863 cache_index: 3864 /* 3865 * If we were modified in the current generation and evicted from memory 3866 * and then re-read we need to do a full sync since we don't have any 3867 * idea about which extents were modified before we were evicted from 3868 * cache. 3869 * 3870 * This is required for both inode re-read from disk and delayed inode 3871 * in the delayed_nodes xarray. 3872 */ 3873 if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info)) 3874 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3875 &BTRFS_I(inode)->runtime_flags); 3876 3877 /* 3878 * We don't persist the id of the transaction where an unlink operation 3879 * against the inode was last made. So here we assume the inode might 3880 * have been evicted, and therefore the exact value of last_unlink_trans 3881 * lost, and set it to last_trans to avoid metadata inconsistencies 3882 * between the inode and its parent if the inode is fsync'ed and the log 3883 * replayed. For example, in the scenario: 3884 * 3885 * touch mydir/foo 3886 * ln mydir/foo mydir/bar 3887 * sync 3888 * unlink mydir/bar 3889 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3890 * xfs_io -c fsync mydir/foo 3891 * <power failure> 3892 * mount fs, triggers fsync log replay 3893 * 3894 * We must make sure that when we fsync our inode foo we also log its 3895 * parent inode, otherwise after log replay the parent still has the 3896 * dentry with the "bar" name but our inode foo has a link count of 1 3897 * and doesn't have an inode ref with the name "bar" anymore. 3898 * 3899 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3900 * but it guarantees correctness at the expense of occasional full 3901 * transaction commits on fsync if our inode is a directory, or if our 3902 * inode is not a directory, logging its parent unnecessarily. 3903 */ 3904 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3905 3906 /* 3907 * Same logic as for last_unlink_trans. We don't persist the generation 3908 * of the last transaction where this inode was used for a reflink 3909 * operation, so after eviction and reloading the inode we must be 3910 * pessimistic and assume the last transaction that modified the inode. 3911 */ 3912 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 3913 3914 path->slots[0]++; 3915 if (inode->i_nlink != 1 || 3916 path->slots[0] >= btrfs_header_nritems(leaf)) 3917 goto cache_acl; 3918 3919 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3920 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 3921 goto cache_acl; 3922 3923 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3924 if (location.type == BTRFS_INODE_REF_KEY) { 3925 struct btrfs_inode_ref *ref; 3926 3927 ref = (struct btrfs_inode_ref *)ptr; 3928 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3929 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3930 struct btrfs_inode_extref *extref; 3931 3932 extref = (struct btrfs_inode_extref *)ptr; 3933 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3934 extref); 3935 } 3936 cache_acl: 3937 /* 3938 * try to precache a NULL acl entry for files that don't have 3939 * any xattrs or acls 3940 */ 3941 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3942 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 3943 if (first_xattr_slot != -1) { 3944 path->slots[0] = first_xattr_slot; 3945 ret = btrfs_load_inode_props(inode, path); 3946 if (ret) 3947 btrfs_err(fs_info, 3948 "error loading props for ino %llu (root %llu): %d", 3949 btrfs_ino(BTRFS_I(inode)), 3950 btrfs_root_id(root), ret); 3951 } 3952 if (path != in_path) 3953 btrfs_free_path(path); 3954 3955 if (!maybe_acls) 3956 cache_no_acl(inode); 3957 3958 switch (inode->i_mode & S_IFMT) { 3959 case S_IFREG: 3960 inode->i_mapping->a_ops = &btrfs_aops; 3961 inode->i_fop = &btrfs_file_operations; 3962 inode->i_op = &btrfs_file_inode_operations; 3963 break; 3964 case S_IFDIR: 3965 inode->i_fop = &btrfs_dir_file_operations; 3966 inode->i_op = &btrfs_dir_inode_operations; 3967 break; 3968 case S_IFLNK: 3969 inode->i_op = &btrfs_symlink_inode_operations; 3970 inode_nohighmem(inode); 3971 inode->i_mapping->a_ops = &btrfs_aops; 3972 break; 3973 default: 3974 inode->i_op = &btrfs_special_inode_operations; 3975 init_special_inode(inode, inode->i_mode, rdev); 3976 break; 3977 } 3978 3979 btrfs_sync_inode_flags_to_i_flags(inode); 3980 return 0; 3981 } 3982 3983 /* 3984 * given a leaf and an inode, copy the inode fields into the leaf 3985 */ 3986 static void fill_inode_item(struct btrfs_trans_handle *trans, 3987 struct extent_buffer *leaf, 3988 struct btrfs_inode_item *item, 3989 struct inode *inode) 3990 { 3991 struct btrfs_map_token token; 3992 u64 flags; 3993 3994 btrfs_init_map_token(&token, leaf); 3995 3996 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 3997 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 3998 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 3999 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 4000 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 4001 4002 btrfs_set_token_timespec_sec(&token, &item->atime, 4003 inode_get_atime_sec(inode)); 4004 btrfs_set_token_timespec_nsec(&token, &item->atime, 4005 inode_get_atime_nsec(inode)); 4006 4007 btrfs_set_token_timespec_sec(&token, &item->mtime, 4008 inode_get_mtime_sec(inode)); 4009 btrfs_set_token_timespec_nsec(&token, &item->mtime, 4010 inode_get_mtime_nsec(inode)); 4011 4012 btrfs_set_token_timespec_sec(&token, &item->ctime, 4013 inode_get_ctime_sec(inode)); 4014 btrfs_set_token_timespec_nsec(&token, &item->ctime, 4015 inode_get_ctime_nsec(inode)); 4016 4017 btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec); 4018 btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec); 4019 4020 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 4021 btrfs_set_token_inode_generation(&token, item, 4022 BTRFS_I(inode)->generation); 4023 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 4024 btrfs_set_token_inode_transid(&token, item, trans->transid); 4025 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 4026 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 4027 BTRFS_I(inode)->ro_flags); 4028 btrfs_set_token_inode_flags(&token, item, flags); 4029 btrfs_set_token_inode_block_group(&token, item, 0); 4030 } 4031 4032 /* 4033 * copy everything in the in-memory inode into the btree. 4034 */ 4035 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 4036 struct btrfs_inode *inode) 4037 { 4038 struct btrfs_inode_item *inode_item; 4039 struct btrfs_path *path; 4040 struct extent_buffer *leaf; 4041 int ret; 4042 4043 path = btrfs_alloc_path(); 4044 if (!path) 4045 return -ENOMEM; 4046 4047 ret = btrfs_lookup_inode(trans, inode->root, path, &inode->location, 1); 4048 if (ret) { 4049 if (ret > 0) 4050 ret = -ENOENT; 4051 goto failed; 4052 } 4053 4054 leaf = path->nodes[0]; 4055 inode_item = btrfs_item_ptr(leaf, path->slots[0], 4056 struct btrfs_inode_item); 4057 4058 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 4059 btrfs_mark_buffer_dirty(trans, leaf); 4060 btrfs_set_inode_last_trans(trans, inode); 4061 ret = 0; 4062 failed: 4063 btrfs_free_path(path); 4064 return ret; 4065 } 4066 4067 /* 4068 * copy everything in the in-memory inode into the btree. 4069 */ 4070 int btrfs_update_inode(struct btrfs_trans_handle *trans, 4071 struct btrfs_inode *inode) 4072 { 4073 struct btrfs_root *root = inode->root; 4074 struct btrfs_fs_info *fs_info = root->fs_info; 4075 int ret; 4076 4077 /* 4078 * If the inode is a free space inode, we can deadlock during commit 4079 * if we put it into the delayed code. 4080 * 4081 * The data relocation inode should also be directly updated 4082 * without delay 4083 */ 4084 if (!btrfs_is_free_space_inode(inode) 4085 && !btrfs_is_data_reloc_root(root) 4086 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4087 btrfs_update_root_times(trans, root); 4088 4089 ret = btrfs_delayed_update_inode(trans, inode); 4090 if (!ret) 4091 btrfs_set_inode_last_trans(trans, inode); 4092 return ret; 4093 } 4094 4095 return btrfs_update_inode_item(trans, inode); 4096 } 4097 4098 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4099 struct btrfs_inode *inode) 4100 { 4101 int ret; 4102 4103 ret = btrfs_update_inode(trans, inode); 4104 if (ret == -ENOSPC) 4105 return btrfs_update_inode_item(trans, inode); 4106 return ret; 4107 } 4108 4109 /* 4110 * unlink helper that gets used here in inode.c and in the tree logging 4111 * recovery code. It remove a link in a directory with a given name, and 4112 * also drops the back refs in the inode to the directory 4113 */ 4114 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4115 struct btrfs_inode *dir, 4116 struct btrfs_inode *inode, 4117 const struct fscrypt_str *name, 4118 struct btrfs_rename_ctx *rename_ctx) 4119 { 4120 struct btrfs_root *root = dir->root; 4121 struct btrfs_fs_info *fs_info = root->fs_info; 4122 struct btrfs_path *path; 4123 int ret = 0; 4124 struct btrfs_dir_item *di; 4125 u64 index; 4126 u64 ino = btrfs_ino(inode); 4127 u64 dir_ino = btrfs_ino(dir); 4128 4129 path = btrfs_alloc_path(); 4130 if (!path) { 4131 ret = -ENOMEM; 4132 goto out; 4133 } 4134 4135 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); 4136 if (IS_ERR_OR_NULL(di)) { 4137 ret = di ? PTR_ERR(di) : -ENOENT; 4138 goto err; 4139 } 4140 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4141 if (ret) 4142 goto err; 4143 btrfs_release_path(path); 4144 4145 /* 4146 * If we don't have dir index, we have to get it by looking up 4147 * the inode ref, since we get the inode ref, remove it directly, 4148 * it is unnecessary to do delayed deletion. 4149 * 4150 * But if we have dir index, needn't search inode ref to get it. 4151 * Since the inode ref is close to the inode item, it is better 4152 * that we delay to delete it, and just do this deletion when 4153 * we update the inode item. 4154 */ 4155 if (inode->dir_index) { 4156 ret = btrfs_delayed_delete_inode_ref(inode); 4157 if (!ret) { 4158 index = inode->dir_index; 4159 goto skip_backref; 4160 } 4161 } 4162 4163 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); 4164 if (ret) { 4165 btrfs_info(fs_info, 4166 "failed to delete reference to %.*s, inode %llu parent %llu", 4167 name->len, name->name, ino, dir_ino); 4168 btrfs_abort_transaction(trans, ret); 4169 goto err; 4170 } 4171 skip_backref: 4172 if (rename_ctx) 4173 rename_ctx->index = index; 4174 4175 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4176 if (ret) { 4177 btrfs_abort_transaction(trans, ret); 4178 goto err; 4179 } 4180 4181 /* 4182 * If we are in a rename context, we don't need to update anything in the 4183 * log. That will be done later during the rename by btrfs_log_new_name(). 4184 * Besides that, doing it here would only cause extra unnecessary btree 4185 * operations on the log tree, increasing latency for applications. 4186 */ 4187 if (!rename_ctx) { 4188 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino); 4189 btrfs_del_dir_entries_in_log(trans, root, name, dir, index); 4190 } 4191 4192 /* 4193 * If we have a pending delayed iput we could end up with the final iput 4194 * being run in btrfs-cleaner context. If we have enough of these built 4195 * up we can end up burning a lot of time in btrfs-cleaner without any 4196 * way to throttle the unlinks. Since we're currently holding a ref on 4197 * the inode we can run the delayed iput here without any issues as the 4198 * final iput won't be done until after we drop the ref we're currently 4199 * holding. 4200 */ 4201 btrfs_run_delayed_iput(fs_info, inode); 4202 err: 4203 btrfs_free_path(path); 4204 if (ret) 4205 goto out; 4206 4207 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4208 inode_inc_iversion(&inode->vfs_inode); 4209 inode_inc_iversion(&dir->vfs_inode); 4210 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); 4211 ret = btrfs_update_inode(trans, dir); 4212 out: 4213 return ret; 4214 } 4215 4216 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4217 struct btrfs_inode *dir, struct btrfs_inode *inode, 4218 const struct fscrypt_str *name) 4219 { 4220 int ret; 4221 4222 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); 4223 if (!ret) { 4224 drop_nlink(&inode->vfs_inode); 4225 ret = btrfs_update_inode(trans, inode); 4226 } 4227 return ret; 4228 } 4229 4230 /* 4231 * helper to start transaction for unlink and rmdir. 4232 * 4233 * unlink and rmdir are special in btrfs, they do not always free space, so 4234 * if we cannot make our reservations the normal way try and see if there is 4235 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4236 * allow the unlink to occur. 4237 */ 4238 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) 4239 { 4240 struct btrfs_root *root = dir->root; 4241 4242 return btrfs_start_transaction_fallback_global_rsv(root, 4243 BTRFS_UNLINK_METADATA_UNITS); 4244 } 4245 4246 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4247 { 4248 struct btrfs_trans_handle *trans; 4249 struct inode *inode = d_inode(dentry); 4250 int ret; 4251 struct fscrypt_name fname; 4252 4253 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4254 if (ret) 4255 return ret; 4256 4257 /* This needs to handle no-key deletions later on */ 4258 4259 trans = __unlink_start_trans(BTRFS_I(dir)); 4260 if (IS_ERR(trans)) { 4261 ret = PTR_ERR(trans); 4262 goto fscrypt_free; 4263 } 4264 4265 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4266 false); 4267 4268 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4269 &fname.disk_name); 4270 if (ret) 4271 goto end_trans; 4272 4273 if (inode->i_nlink == 0) { 4274 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4275 if (ret) 4276 goto end_trans; 4277 } 4278 4279 end_trans: 4280 btrfs_end_transaction(trans); 4281 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4282 fscrypt_free: 4283 fscrypt_free_filename(&fname); 4284 return ret; 4285 } 4286 4287 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4288 struct btrfs_inode *dir, struct dentry *dentry) 4289 { 4290 struct btrfs_root *root = dir->root; 4291 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4292 struct btrfs_path *path; 4293 struct extent_buffer *leaf; 4294 struct btrfs_dir_item *di; 4295 struct btrfs_key key; 4296 u64 index; 4297 int ret; 4298 u64 objectid; 4299 u64 dir_ino = btrfs_ino(dir); 4300 struct fscrypt_name fname; 4301 4302 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 4303 if (ret) 4304 return ret; 4305 4306 /* This needs to handle no-key deletions later on */ 4307 4308 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4309 objectid = btrfs_root_id(inode->root); 4310 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4311 objectid = inode->location.objectid; 4312 } else { 4313 WARN_ON(1); 4314 fscrypt_free_filename(&fname); 4315 return -EINVAL; 4316 } 4317 4318 path = btrfs_alloc_path(); 4319 if (!path) { 4320 ret = -ENOMEM; 4321 goto out; 4322 } 4323 4324 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4325 &fname.disk_name, -1); 4326 if (IS_ERR_OR_NULL(di)) { 4327 ret = di ? PTR_ERR(di) : -ENOENT; 4328 goto out; 4329 } 4330 4331 leaf = path->nodes[0]; 4332 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4333 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4334 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4335 if (ret) { 4336 btrfs_abort_transaction(trans, ret); 4337 goto out; 4338 } 4339 btrfs_release_path(path); 4340 4341 /* 4342 * This is a placeholder inode for a subvolume we didn't have a 4343 * reference to at the time of the snapshot creation. In the meantime 4344 * we could have renamed the real subvol link into our snapshot, so 4345 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4346 * Instead simply lookup the dir_index_item for this entry so we can 4347 * remove it. Otherwise we know we have a ref to the root and we can 4348 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4349 */ 4350 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4351 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); 4352 if (IS_ERR_OR_NULL(di)) { 4353 if (!di) 4354 ret = -ENOENT; 4355 else 4356 ret = PTR_ERR(di); 4357 btrfs_abort_transaction(trans, ret); 4358 goto out; 4359 } 4360 4361 leaf = path->nodes[0]; 4362 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4363 index = key.offset; 4364 btrfs_release_path(path); 4365 } else { 4366 ret = btrfs_del_root_ref(trans, objectid, 4367 btrfs_root_id(root), dir_ino, 4368 &index, &fname.disk_name); 4369 if (ret) { 4370 btrfs_abort_transaction(trans, ret); 4371 goto out; 4372 } 4373 } 4374 4375 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4376 if (ret) { 4377 btrfs_abort_transaction(trans, ret); 4378 goto out; 4379 } 4380 4381 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); 4382 inode_inc_iversion(&dir->vfs_inode); 4383 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); 4384 ret = btrfs_update_inode_fallback(trans, dir); 4385 if (ret) 4386 btrfs_abort_transaction(trans, ret); 4387 out: 4388 btrfs_free_path(path); 4389 fscrypt_free_filename(&fname); 4390 return ret; 4391 } 4392 4393 /* 4394 * Helper to check if the subvolume references other subvolumes or if it's 4395 * default. 4396 */ 4397 static noinline int may_destroy_subvol(struct btrfs_root *root) 4398 { 4399 struct btrfs_fs_info *fs_info = root->fs_info; 4400 struct btrfs_path *path; 4401 struct btrfs_dir_item *di; 4402 struct btrfs_key key; 4403 struct fscrypt_str name = FSTR_INIT("default", 7); 4404 u64 dir_id; 4405 int ret; 4406 4407 path = btrfs_alloc_path(); 4408 if (!path) 4409 return -ENOMEM; 4410 4411 /* Make sure this root isn't set as the default subvol */ 4412 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4413 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4414 dir_id, &name, 0); 4415 if (di && !IS_ERR(di)) { 4416 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4417 if (key.objectid == btrfs_root_id(root)) { 4418 ret = -EPERM; 4419 btrfs_err(fs_info, 4420 "deleting default subvolume %llu is not allowed", 4421 key.objectid); 4422 goto out; 4423 } 4424 btrfs_release_path(path); 4425 } 4426 4427 key.objectid = btrfs_root_id(root); 4428 key.type = BTRFS_ROOT_REF_KEY; 4429 key.offset = (u64)-1; 4430 4431 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4432 if (ret < 0) 4433 goto out; 4434 if (ret == 0) { 4435 /* 4436 * Key with offset -1 found, there would have to exist a root 4437 * with such id, but this is out of valid range. 4438 */ 4439 ret = -EUCLEAN; 4440 goto out; 4441 } 4442 4443 ret = 0; 4444 if (path->slots[0] > 0) { 4445 path->slots[0]--; 4446 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4447 if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY) 4448 ret = -ENOTEMPTY; 4449 } 4450 out: 4451 btrfs_free_path(path); 4452 return ret; 4453 } 4454 4455 /* Delete all dentries for inodes belonging to the root */ 4456 static void btrfs_prune_dentries(struct btrfs_root *root) 4457 { 4458 struct btrfs_fs_info *fs_info = root->fs_info; 4459 struct btrfs_inode *inode; 4460 u64 min_ino = 0; 4461 4462 if (!BTRFS_FS_ERROR(fs_info)) 4463 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4464 4465 inode = btrfs_find_first_inode(root, min_ino); 4466 while (inode) { 4467 if (atomic_read(&inode->vfs_inode.i_count) > 1) 4468 d_prune_aliases(&inode->vfs_inode); 4469 4470 min_ino = btrfs_ino(inode) + 1; 4471 /* 4472 * btrfs_drop_inode() will have it removed from the inode 4473 * cache when its usage count hits zero. 4474 */ 4475 iput(&inode->vfs_inode); 4476 cond_resched(); 4477 inode = btrfs_find_first_inode(root, min_ino); 4478 } 4479 } 4480 4481 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) 4482 { 4483 struct btrfs_root *root = dir->root; 4484 struct btrfs_fs_info *fs_info = root->fs_info; 4485 struct inode *inode = d_inode(dentry); 4486 struct btrfs_root *dest = BTRFS_I(inode)->root; 4487 struct btrfs_trans_handle *trans; 4488 struct btrfs_block_rsv block_rsv; 4489 u64 root_flags; 4490 u64 qgroup_reserved = 0; 4491 int ret; 4492 4493 down_write(&fs_info->subvol_sem); 4494 4495 /* 4496 * Don't allow to delete a subvolume with send in progress. This is 4497 * inside the inode lock so the error handling that has to drop the bit 4498 * again is not run concurrently. 4499 */ 4500 spin_lock(&dest->root_item_lock); 4501 if (dest->send_in_progress) { 4502 spin_unlock(&dest->root_item_lock); 4503 btrfs_warn(fs_info, 4504 "attempt to delete subvolume %llu during send", 4505 btrfs_root_id(dest)); 4506 ret = -EPERM; 4507 goto out_up_write; 4508 } 4509 if (atomic_read(&dest->nr_swapfiles)) { 4510 spin_unlock(&dest->root_item_lock); 4511 btrfs_warn(fs_info, 4512 "attempt to delete subvolume %llu with active swapfile", 4513 btrfs_root_id(root)); 4514 ret = -EPERM; 4515 goto out_up_write; 4516 } 4517 root_flags = btrfs_root_flags(&dest->root_item); 4518 btrfs_set_root_flags(&dest->root_item, 4519 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4520 spin_unlock(&dest->root_item_lock); 4521 4522 ret = may_destroy_subvol(dest); 4523 if (ret) 4524 goto out_undead; 4525 4526 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4527 /* 4528 * One for dir inode, 4529 * two for dir entries, 4530 * two for root ref/backref. 4531 */ 4532 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4533 if (ret) 4534 goto out_undead; 4535 qgroup_reserved = block_rsv.qgroup_rsv_reserved; 4536 4537 trans = btrfs_start_transaction(root, 0); 4538 if (IS_ERR(trans)) { 4539 ret = PTR_ERR(trans); 4540 goto out_release; 4541 } 4542 ret = btrfs_record_root_in_trans(trans, root); 4543 if (ret) { 4544 btrfs_abort_transaction(trans, ret); 4545 goto out_end_trans; 4546 } 4547 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); 4548 qgroup_reserved = 0; 4549 trans->block_rsv = &block_rsv; 4550 trans->bytes_reserved = block_rsv.size; 4551 4552 btrfs_record_snapshot_destroy(trans, dir); 4553 4554 ret = btrfs_unlink_subvol(trans, dir, dentry); 4555 if (ret) { 4556 btrfs_abort_transaction(trans, ret); 4557 goto out_end_trans; 4558 } 4559 4560 ret = btrfs_record_root_in_trans(trans, dest); 4561 if (ret) { 4562 btrfs_abort_transaction(trans, ret); 4563 goto out_end_trans; 4564 } 4565 4566 memset(&dest->root_item.drop_progress, 0, 4567 sizeof(dest->root_item.drop_progress)); 4568 btrfs_set_root_drop_level(&dest->root_item, 0); 4569 btrfs_set_root_refs(&dest->root_item, 0); 4570 4571 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4572 ret = btrfs_insert_orphan_item(trans, 4573 fs_info->tree_root, 4574 btrfs_root_id(dest)); 4575 if (ret) { 4576 btrfs_abort_transaction(trans, ret); 4577 goto out_end_trans; 4578 } 4579 } 4580 4581 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4582 BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest)); 4583 if (ret && ret != -ENOENT) { 4584 btrfs_abort_transaction(trans, ret); 4585 goto out_end_trans; 4586 } 4587 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4588 ret = btrfs_uuid_tree_remove(trans, 4589 dest->root_item.received_uuid, 4590 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4591 btrfs_root_id(dest)); 4592 if (ret && ret != -ENOENT) { 4593 btrfs_abort_transaction(trans, ret); 4594 goto out_end_trans; 4595 } 4596 } 4597 4598 free_anon_bdev(dest->anon_dev); 4599 dest->anon_dev = 0; 4600 out_end_trans: 4601 trans->block_rsv = NULL; 4602 trans->bytes_reserved = 0; 4603 ret = btrfs_end_transaction(trans); 4604 inode->i_flags |= S_DEAD; 4605 out_release: 4606 btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL); 4607 if (qgroup_reserved) 4608 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); 4609 out_undead: 4610 if (ret) { 4611 spin_lock(&dest->root_item_lock); 4612 root_flags = btrfs_root_flags(&dest->root_item); 4613 btrfs_set_root_flags(&dest->root_item, 4614 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4615 spin_unlock(&dest->root_item_lock); 4616 } 4617 out_up_write: 4618 up_write(&fs_info->subvol_sem); 4619 if (!ret) { 4620 d_invalidate(dentry); 4621 btrfs_prune_dentries(dest); 4622 ASSERT(dest->send_in_progress == 0); 4623 } 4624 4625 return ret; 4626 } 4627 4628 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4629 { 4630 struct inode *inode = d_inode(dentry); 4631 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 4632 int ret = 0; 4633 struct btrfs_trans_handle *trans; 4634 u64 last_unlink_trans; 4635 struct fscrypt_name fname; 4636 4637 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4638 return -ENOTEMPTY; 4639 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { 4640 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4641 btrfs_err(fs_info, 4642 "extent tree v2 doesn't support snapshot deletion yet"); 4643 return -EOPNOTSUPP; 4644 } 4645 return btrfs_delete_subvolume(BTRFS_I(dir), dentry); 4646 } 4647 4648 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4649 if (ret) 4650 return ret; 4651 4652 /* This needs to handle no-key deletions later on */ 4653 4654 trans = __unlink_start_trans(BTRFS_I(dir)); 4655 if (IS_ERR(trans)) { 4656 ret = PTR_ERR(trans); 4657 goto out_notrans; 4658 } 4659 4660 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4661 ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); 4662 goto out; 4663 } 4664 4665 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4666 if (ret) 4667 goto out; 4668 4669 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4670 4671 /* now the directory is empty */ 4672 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4673 &fname.disk_name); 4674 if (!ret) { 4675 btrfs_i_size_write(BTRFS_I(inode), 0); 4676 /* 4677 * Propagate the last_unlink_trans value of the deleted dir to 4678 * its parent directory. This is to prevent an unrecoverable 4679 * log tree in the case we do something like this: 4680 * 1) create dir foo 4681 * 2) create snapshot under dir foo 4682 * 3) delete the snapshot 4683 * 4) rmdir foo 4684 * 5) mkdir foo 4685 * 6) fsync foo or some file inside foo 4686 */ 4687 if (last_unlink_trans >= trans->transid) 4688 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4689 } 4690 out: 4691 btrfs_end_transaction(trans); 4692 out_notrans: 4693 btrfs_btree_balance_dirty(fs_info); 4694 fscrypt_free_filename(&fname); 4695 4696 return ret; 4697 } 4698 4699 /* 4700 * Read, zero a chunk and write a block. 4701 * 4702 * @inode - inode that we're zeroing 4703 * @from - the offset to start zeroing 4704 * @len - the length to zero, 0 to zero the entire range respective to the 4705 * offset 4706 * @front - zero up to the offset instead of from the offset on 4707 * 4708 * This will find the block for the "from" offset and cow the block and zero the 4709 * part we want to zero. This is used with truncate and hole punching. 4710 */ 4711 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 4712 int front) 4713 { 4714 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4715 struct address_space *mapping = inode->vfs_inode.i_mapping; 4716 struct extent_io_tree *io_tree = &inode->io_tree; 4717 struct btrfs_ordered_extent *ordered; 4718 struct extent_state *cached_state = NULL; 4719 struct extent_changeset *data_reserved = NULL; 4720 bool only_release_metadata = false; 4721 u32 blocksize = fs_info->sectorsize; 4722 pgoff_t index = from >> PAGE_SHIFT; 4723 unsigned offset = from & (blocksize - 1); 4724 struct folio *folio; 4725 gfp_t mask = btrfs_alloc_write_mask(mapping); 4726 size_t write_bytes = blocksize; 4727 int ret = 0; 4728 u64 block_start; 4729 u64 block_end; 4730 4731 if (IS_ALIGNED(offset, blocksize) && 4732 (!len || IS_ALIGNED(len, blocksize))) 4733 goto out; 4734 4735 block_start = round_down(from, blocksize); 4736 block_end = block_start + blocksize - 1; 4737 4738 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 4739 blocksize, false); 4740 if (ret < 0) { 4741 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { 4742 /* For nocow case, no need to reserve data space */ 4743 only_release_metadata = true; 4744 } else { 4745 goto out; 4746 } 4747 } 4748 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 4749 if (ret < 0) { 4750 if (!only_release_metadata) 4751 btrfs_free_reserved_data_space(inode, data_reserved, 4752 block_start, blocksize); 4753 goto out; 4754 } 4755 again: 4756 folio = __filemap_get_folio(mapping, index, 4757 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); 4758 if (IS_ERR(folio)) { 4759 btrfs_delalloc_release_space(inode, data_reserved, block_start, 4760 blocksize, true); 4761 btrfs_delalloc_release_extents(inode, blocksize); 4762 ret = -ENOMEM; 4763 goto out; 4764 } 4765 4766 if (!folio_test_uptodate(folio)) { 4767 ret = btrfs_read_folio(NULL, folio); 4768 folio_lock(folio); 4769 if (folio->mapping != mapping) { 4770 folio_unlock(folio); 4771 folio_put(folio); 4772 goto again; 4773 } 4774 if (!folio_test_uptodate(folio)) { 4775 ret = -EIO; 4776 goto out_unlock; 4777 } 4778 } 4779 4780 /* 4781 * We unlock the page after the io is completed and then re-lock it 4782 * above. release_folio() could have come in between that and cleared 4783 * folio private, but left the page in the mapping. Set the page mapped 4784 * here to make sure it's properly set for the subpage stuff. 4785 */ 4786 ret = set_folio_extent_mapped(folio); 4787 if (ret < 0) 4788 goto out_unlock; 4789 4790 folio_wait_writeback(folio); 4791 4792 lock_extent(io_tree, block_start, block_end, &cached_state); 4793 4794 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4795 if (ordered) { 4796 unlock_extent(io_tree, block_start, block_end, &cached_state); 4797 folio_unlock(folio); 4798 folio_put(folio); 4799 btrfs_start_ordered_extent(ordered); 4800 btrfs_put_ordered_extent(ordered); 4801 goto again; 4802 } 4803 4804 clear_extent_bit(&inode->io_tree, block_start, block_end, 4805 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4806 &cached_state); 4807 4808 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 4809 &cached_state); 4810 if (ret) { 4811 unlock_extent(io_tree, block_start, block_end, &cached_state); 4812 goto out_unlock; 4813 } 4814 4815 if (offset != blocksize) { 4816 if (!len) 4817 len = blocksize - offset; 4818 if (front) 4819 folio_zero_range(folio, block_start - folio_pos(folio), 4820 offset); 4821 else 4822 folio_zero_range(folio, 4823 (block_start - folio_pos(folio)) + offset, 4824 len); 4825 } 4826 btrfs_folio_clear_checked(fs_info, folio, block_start, 4827 block_end + 1 - block_start); 4828 btrfs_folio_set_dirty(fs_info, folio, block_start, 4829 block_end + 1 - block_start); 4830 unlock_extent(io_tree, block_start, block_end, &cached_state); 4831 4832 if (only_release_metadata) 4833 set_extent_bit(&inode->io_tree, block_start, block_end, 4834 EXTENT_NORESERVE, NULL); 4835 4836 out_unlock: 4837 if (ret) { 4838 if (only_release_metadata) 4839 btrfs_delalloc_release_metadata(inode, blocksize, true); 4840 else 4841 btrfs_delalloc_release_space(inode, data_reserved, 4842 block_start, blocksize, true); 4843 } 4844 btrfs_delalloc_release_extents(inode, blocksize); 4845 folio_unlock(folio); 4846 folio_put(folio); 4847 out: 4848 if (only_release_metadata) 4849 btrfs_check_nocow_unlock(inode); 4850 extent_changeset_free(data_reserved); 4851 return ret; 4852 } 4853 4854 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len) 4855 { 4856 struct btrfs_root *root = inode->root; 4857 struct btrfs_fs_info *fs_info = root->fs_info; 4858 struct btrfs_trans_handle *trans; 4859 struct btrfs_drop_extents_args drop_args = { 0 }; 4860 int ret; 4861 4862 /* 4863 * If NO_HOLES is enabled, we don't need to do anything. 4864 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 4865 * or btrfs_update_inode() will be called, which guarantee that the next 4866 * fsync will know this inode was changed and needs to be logged. 4867 */ 4868 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 4869 return 0; 4870 4871 /* 4872 * 1 - for the one we're dropping 4873 * 1 - for the one we're adding 4874 * 1 - for updating the inode. 4875 */ 4876 trans = btrfs_start_transaction(root, 3); 4877 if (IS_ERR(trans)) 4878 return PTR_ERR(trans); 4879 4880 drop_args.start = offset; 4881 drop_args.end = offset + len; 4882 drop_args.drop_cache = true; 4883 4884 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 4885 if (ret) { 4886 btrfs_abort_transaction(trans, ret); 4887 btrfs_end_transaction(trans); 4888 return ret; 4889 } 4890 4891 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len); 4892 if (ret) { 4893 btrfs_abort_transaction(trans, ret); 4894 } else { 4895 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 4896 btrfs_update_inode(trans, inode); 4897 } 4898 btrfs_end_transaction(trans); 4899 return ret; 4900 } 4901 4902 /* 4903 * This function puts in dummy file extents for the area we're creating a hole 4904 * for. So if we are truncating this file to a larger size we need to insert 4905 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4906 * the range between oldsize and size 4907 */ 4908 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 4909 { 4910 struct btrfs_root *root = inode->root; 4911 struct btrfs_fs_info *fs_info = root->fs_info; 4912 struct extent_io_tree *io_tree = &inode->io_tree; 4913 struct extent_map *em = NULL; 4914 struct extent_state *cached_state = NULL; 4915 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 4916 u64 block_end = ALIGN(size, fs_info->sectorsize); 4917 u64 last_byte; 4918 u64 cur_offset; 4919 u64 hole_size; 4920 int ret = 0; 4921 4922 /* 4923 * If our size started in the middle of a block we need to zero out the 4924 * rest of the block before we expand the i_size, otherwise we could 4925 * expose stale data. 4926 */ 4927 ret = btrfs_truncate_block(inode, oldsize, 0, 0); 4928 if (ret) 4929 return ret; 4930 4931 if (size <= hole_start) 4932 return 0; 4933 4934 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 4935 &cached_state); 4936 cur_offset = hole_start; 4937 while (1) { 4938 em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset); 4939 if (IS_ERR(em)) { 4940 ret = PTR_ERR(em); 4941 em = NULL; 4942 break; 4943 } 4944 last_byte = min(extent_map_end(em), block_end); 4945 last_byte = ALIGN(last_byte, fs_info->sectorsize); 4946 hole_size = last_byte - cur_offset; 4947 4948 if (!(em->flags & EXTENT_FLAG_PREALLOC)) { 4949 struct extent_map *hole_em; 4950 4951 ret = maybe_insert_hole(inode, cur_offset, hole_size); 4952 if (ret) 4953 break; 4954 4955 ret = btrfs_inode_set_file_extent_range(inode, 4956 cur_offset, hole_size); 4957 if (ret) 4958 break; 4959 4960 hole_em = alloc_extent_map(); 4961 if (!hole_em) { 4962 btrfs_drop_extent_map_range(inode, cur_offset, 4963 cur_offset + hole_size - 1, 4964 false); 4965 btrfs_set_inode_full_sync(inode); 4966 goto next; 4967 } 4968 hole_em->start = cur_offset; 4969 hole_em->len = hole_size; 4970 hole_em->orig_start = cur_offset; 4971 4972 hole_em->block_start = EXTENT_MAP_HOLE; 4973 hole_em->block_len = 0; 4974 hole_em->orig_block_len = 0; 4975 hole_em->ram_bytes = hole_size; 4976 hole_em->generation = btrfs_get_fs_generation(fs_info); 4977 4978 ret = btrfs_replace_extent_map_range(inode, hole_em, true); 4979 free_extent_map(hole_em); 4980 } else { 4981 ret = btrfs_inode_set_file_extent_range(inode, 4982 cur_offset, hole_size); 4983 if (ret) 4984 break; 4985 } 4986 next: 4987 free_extent_map(em); 4988 em = NULL; 4989 cur_offset = last_byte; 4990 if (cur_offset >= block_end) 4991 break; 4992 } 4993 free_extent_map(em); 4994 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); 4995 return ret; 4996 } 4997 4998 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 4999 { 5000 struct btrfs_root *root = BTRFS_I(inode)->root; 5001 struct btrfs_trans_handle *trans; 5002 loff_t oldsize = i_size_read(inode); 5003 loff_t newsize = attr->ia_size; 5004 int mask = attr->ia_valid; 5005 int ret; 5006 5007 /* 5008 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 5009 * special case where we need to update the times despite not having 5010 * these flags set. For all other operations the VFS set these flags 5011 * explicitly if it wants a timestamp update. 5012 */ 5013 if (newsize != oldsize) { 5014 inode_inc_iversion(inode); 5015 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) { 5016 inode_set_mtime_to_ts(inode, 5017 inode_set_ctime_current(inode)); 5018 } 5019 } 5020 5021 if (newsize > oldsize) { 5022 /* 5023 * Don't do an expanding truncate while snapshotting is ongoing. 5024 * This is to ensure the snapshot captures a fully consistent 5025 * state of this file - if the snapshot captures this expanding 5026 * truncation, it must capture all writes that happened before 5027 * this truncation. 5028 */ 5029 btrfs_drew_write_lock(&root->snapshot_lock); 5030 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 5031 if (ret) { 5032 btrfs_drew_write_unlock(&root->snapshot_lock); 5033 return ret; 5034 } 5035 5036 trans = btrfs_start_transaction(root, 1); 5037 if (IS_ERR(trans)) { 5038 btrfs_drew_write_unlock(&root->snapshot_lock); 5039 return PTR_ERR(trans); 5040 } 5041 5042 i_size_write(inode, newsize); 5043 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 5044 pagecache_isize_extended(inode, oldsize, newsize); 5045 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 5046 btrfs_drew_write_unlock(&root->snapshot_lock); 5047 btrfs_end_transaction(trans); 5048 } else { 5049 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 5050 5051 if (btrfs_is_zoned(fs_info)) { 5052 ret = btrfs_wait_ordered_range(inode, 5053 ALIGN(newsize, fs_info->sectorsize), 5054 (u64)-1); 5055 if (ret) 5056 return ret; 5057 } 5058 5059 /* 5060 * We're truncating a file that used to have good data down to 5061 * zero. Make sure any new writes to the file get on disk 5062 * on close. 5063 */ 5064 if (newsize == 0) 5065 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5066 &BTRFS_I(inode)->runtime_flags); 5067 5068 truncate_setsize(inode, newsize); 5069 5070 inode_dio_wait(inode); 5071 5072 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); 5073 if (ret && inode->i_nlink) { 5074 int err; 5075 5076 /* 5077 * Truncate failed, so fix up the in-memory size. We 5078 * adjusted disk_i_size down as we removed extents, so 5079 * wait for disk_i_size to be stable and then update the 5080 * in-memory size to match. 5081 */ 5082 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); 5083 if (err) 5084 return err; 5085 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5086 } 5087 } 5088 5089 return ret; 5090 } 5091 5092 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 5093 struct iattr *attr) 5094 { 5095 struct inode *inode = d_inode(dentry); 5096 struct btrfs_root *root = BTRFS_I(inode)->root; 5097 int err; 5098 5099 if (btrfs_root_readonly(root)) 5100 return -EROFS; 5101 5102 err = setattr_prepare(idmap, dentry, attr); 5103 if (err) 5104 return err; 5105 5106 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5107 err = btrfs_setsize(inode, attr); 5108 if (err) 5109 return err; 5110 } 5111 5112 if (attr->ia_valid) { 5113 setattr_copy(idmap, inode, attr); 5114 inode_inc_iversion(inode); 5115 err = btrfs_dirty_inode(BTRFS_I(inode)); 5116 5117 if (!err && attr->ia_valid & ATTR_MODE) 5118 err = posix_acl_chmod(idmap, dentry, inode->i_mode); 5119 } 5120 5121 return err; 5122 } 5123 5124 /* 5125 * While truncating the inode pages during eviction, we get the VFS 5126 * calling btrfs_invalidate_folio() against each folio of the inode. This 5127 * is slow because the calls to btrfs_invalidate_folio() result in a 5128 * huge amount of calls to lock_extent() and clear_extent_bit(), 5129 * which keep merging and splitting extent_state structures over and over, 5130 * wasting lots of time. 5131 * 5132 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5133 * skip all those expensive operations on a per folio basis and do only 5134 * the ordered io finishing, while we release here the extent_map and 5135 * extent_state structures, without the excessive merging and splitting. 5136 */ 5137 static void evict_inode_truncate_pages(struct inode *inode) 5138 { 5139 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5140 struct rb_node *node; 5141 5142 ASSERT(inode->i_state & I_FREEING); 5143 truncate_inode_pages_final(&inode->i_data); 5144 5145 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 5146 5147 /* 5148 * Keep looping until we have no more ranges in the io tree. 5149 * We can have ongoing bios started by readahead that have 5150 * their endio callback (extent_io.c:end_bio_extent_readpage) 5151 * still in progress (unlocked the pages in the bio but did not yet 5152 * unlocked the ranges in the io tree). Therefore this means some 5153 * ranges can still be locked and eviction started because before 5154 * submitting those bios, which are executed by a separate task (work 5155 * queue kthread), inode references (inode->i_count) were not taken 5156 * (which would be dropped in the end io callback of each bio). 5157 * Therefore here we effectively end up waiting for those bios and 5158 * anyone else holding locked ranges without having bumped the inode's 5159 * reference count - if we don't do it, when they access the inode's 5160 * io_tree to unlock a range it may be too late, leading to an 5161 * use-after-free issue. 5162 */ 5163 spin_lock(&io_tree->lock); 5164 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5165 struct extent_state *state; 5166 struct extent_state *cached_state = NULL; 5167 u64 start; 5168 u64 end; 5169 unsigned state_flags; 5170 5171 node = rb_first(&io_tree->state); 5172 state = rb_entry(node, struct extent_state, rb_node); 5173 start = state->start; 5174 end = state->end; 5175 state_flags = state->state; 5176 spin_unlock(&io_tree->lock); 5177 5178 lock_extent(io_tree, start, end, &cached_state); 5179 5180 /* 5181 * If still has DELALLOC flag, the extent didn't reach disk, 5182 * and its reserved space won't be freed by delayed_ref. 5183 * So we need to free its reserved space here. 5184 * (Refer to comment in btrfs_invalidate_folio, case 2) 5185 * 5186 * Note, end is the bytenr of last byte, so we need + 1 here. 5187 */ 5188 if (state_flags & EXTENT_DELALLOC) 5189 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5190 end - start + 1, NULL); 5191 5192 clear_extent_bit(io_tree, start, end, 5193 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, 5194 &cached_state); 5195 5196 cond_resched(); 5197 spin_lock(&io_tree->lock); 5198 } 5199 spin_unlock(&io_tree->lock); 5200 } 5201 5202 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5203 struct btrfs_block_rsv *rsv) 5204 { 5205 struct btrfs_fs_info *fs_info = root->fs_info; 5206 struct btrfs_trans_handle *trans; 5207 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1); 5208 int ret; 5209 5210 /* 5211 * Eviction should be taking place at some place safe because of our 5212 * delayed iputs. However the normal flushing code will run delayed 5213 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5214 * 5215 * We reserve the delayed_refs_extra here again because we can't use 5216 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5217 * above. We reserve our extra bit here because we generate a ton of 5218 * delayed refs activity by truncating. 5219 * 5220 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5221 * if we fail to make this reservation we can re-try without the 5222 * delayed_refs_extra so we can make some forward progress. 5223 */ 5224 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5225 BTRFS_RESERVE_FLUSH_EVICT); 5226 if (ret) { 5227 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5228 BTRFS_RESERVE_FLUSH_EVICT); 5229 if (ret) { 5230 btrfs_warn(fs_info, 5231 "could not allocate space for delete; will truncate on mount"); 5232 return ERR_PTR(-ENOSPC); 5233 } 5234 delayed_refs_extra = 0; 5235 } 5236 5237 trans = btrfs_join_transaction(root); 5238 if (IS_ERR(trans)) 5239 return trans; 5240 5241 if (delayed_refs_extra) { 5242 trans->block_rsv = &fs_info->trans_block_rsv; 5243 trans->bytes_reserved = delayed_refs_extra; 5244 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5245 delayed_refs_extra, true); 5246 } 5247 return trans; 5248 } 5249 5250 void btrfs_evict_inode(struct inode *inode) 5251 { 5252 struct btrfs_fs_info *fs_info; 5253 struct btrfs_trans_handle *trans; 5254 struct btrfs_root *root = BTRFS_I(inode)->root; 5255 struct btrfs_block_rsv *rsv = NULL; 5256 int ret; 5257 5258 trace_btrfs_inode_evict(inode); 5259 5260 if (!root) { 5261 fsverity_cleanup_inode(inode); 5262 clear_inode(inode); 5263 return; 5264 } 5265 5266 fs_info = inode_to_fs_info(inode); 5267 evict_inode_truncate_pages(inode); 5268 5269 if (inode->i_nlink && 5270 ((btrfs_root_refs(&root->root_item) != 0 && 5271 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) || 5272 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5273 goto out; 5274 5275 if (is_bad_inode(inode)) 5276 goto out; 5277 5278 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5279 goto out; 5280 5281 if (inode->i_nlink > 0) { 5282 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5283 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID); 5284 goto out; 5285 } 5286 5287 /* 5288 * This makes sure the inode item in tree is uptodate and the space for 5289 * the inode update is released. 5290 */ 5291 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5292 if (ret) 5293 goto out; 5294 5295 /* 5296 * This drops any pending insert or delete operations we have for this 5297 * inode. We could have a delayed dir index deletion queued up, but 5298 * we're removing the inode completely so that'll be taken care of in 5299 * the truncate. 5300 */ 5301 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5302 5303 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5304 if (!rsv) 5305 goto out; 5306 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5307 rsv->failfast = true; 5308 5309 btrfs_i_size_write(BTRFS_I(inode), 0); 5310 5311 while (1) { 5312 struct btrfs_truncate_control control = { 5313 .inode = BTRFS_I(inode), 5314 .ino = btrfs_ino(BTRFS_I(inode)), 5315 .new_size = 0, 5316 .min_type = 0, 5317 }; 5318 5319 trans = evict_refill_and_join(root, rsv); 5320 if (IS_ERR(trans)) 5321 goto out; 5322 5323 trans->block_rsv = rsv; 5324 5325 ret = btrfs_truncate_inode_items(trans, root, &control); 5326 trans->block_rsv = &fs_info->trans_block_rsv; 5327 btrfs_end_transaction(trans); 5328 /* 5329 * We have not added new delayed items for our inode after we 5330 * have flushed its delayed items, so no need to throttle on 5331 * delayed items. However we have modified extent buffers. 5332 */ 5333 btrfs_btree_balance_dirty_nodelay(fs_info); 5334 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5335 goto out; 5336 else if (!ret) 5337 break; 5338 } 5339 5340 /* 5341 * Errors here aren't a big deal, it just means we leave orphan items in 5342 * the tree. They will be cleaned up on the next mount. If the inode 5343 * number gets reused, cleanup deletes the orphan item without doing 5344 * anything, and unlink reuses the existing orphan item. 5345 * 5346 * If it turns out that we are dropping too many of these, we might want 5347 * to add a mechanism for retrying these after a commit. 5348 */ 5349 trans = evict_refill_and_join(root, rsv); 5350 if (!IS_ERR(trans)) { 5351 trans->block_rsv = rsv; 5352 btrfs_orphan_del(trans, BTRFS_I(inode)); 5353 trans->block_rsv = &fs_info->trans_block_rsv; 5354 btrfs_end_transaction(trans); 5355 } 5356 5357 out: 5358 btrfs_free_block_rsv(fs_info, rsv); 5359 /* 5360 * If we didn't successfully delete, the orphan item will still be in 5361 * the tree and we'll retry on the next mount. Again, we might also want 5362 * to retry these periodically in the future. 5363 */ 5364 btrfs_remove_delayed_node(BTRFS_I(inode)); 5365 fsverity_cleanup_inode(inode); 5366 clear_inode(inode); 5367 } 5368 5369 /* 5370 * Return the key found in the dir entry in the location pointer, fill @type 5371 * with BTRFS_FT_*, and return 0. 5372 * 5373 * If no dir entries were found, returns -ENOENT. 5374 * If found a corrupted location in dir entry, returns -EUCLEAN. 5375 */ 5376 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, 5377 struct btrfs_key *location, u8 *type) 5378 { 5379 struct btrfs_dir_item *di; 5380 struct btrfs_path *path; 5381 struct btrfs_root *root = dir->root; 5382 int ret = 0; 5383 struct fscrypt_name fname; 5384 5385 path = btrfs_alloc_path(); 5386 if (!path) 5387 return -ENOMEM; 5388 5389 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 5390 if (ret < 0) 5391 goto out; 5392 /* 5393 * fscrypt_setup_filename() should never return a positive value, but 5394 * gcc on sparc/parisc thinks it can, so assert that doesn't happen. 5395 */ 5396 ASSERT(ret == 0); 5397 5398 /* This needs to handle no-key deletions later on */ 5399 5400 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), 5401 &fname.disk_name, 0); 5402 if (IS_ERR_OR_NULL(di)) { 5403 ret = di ? PTR_ERR(di) : -ENOENT; 5404 goto out; 5405 } 5406 5407 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5408 if (location->type != BTRFS_INODE_ITEM_KEY && 5409 location->type != BTRFS_ROOT_ITEM_KEY) { 5410 ret = -EUCLEAN; 5411 btrfs_warn(root->fs_info, 5412 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5413 __func__, fname.disk_name.name, btrfs_ino(dir), 5414 location->objectid, location->type, location->offset); 5415 } 5416 if (!ret) 5417 *type = btrfs_dir_ftype(path->nodes[0], di); 5418 out: 5419 fscrypt_free_filename(&fname); 5420 btrfs_free_path(path); 5421 return ret; 5422 } 5423 5424 /* 5425 * when we hit a tree root in a directory, the btrfs part of the inode 5426 * needs to be changed to reflect the root directory of the tree root. This 5427 * is kind of like crossing a mount point. 5428 */ 5429 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5430 struct btrfs_inode *dir, 5431 struct dentry *dentry, 5432 struct btrfs_key *location, 5433 struct btrfs_root **sub_root) 5434 { 5435 struct btrfs_path *path; 5436 struct btrfs_root *new_root; 5437 struct btrfs_root_ref *ref; 5438 struct extent_buffer *leaf; 5439 struct btrfs_key key; 5440 int ret; 5441 int err = 0; 5442 struct fscrypt_name fname; 5443 5444 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); 5445 if (ret) 5446 return ret; 5447 5448 path = btrfs_alloc_path(); 5449 if (!path) { 5450 err = -ENOMEM; 5451 goto out; 5452 } 5453 5454 err = -ENOENT; 5455 key.objectid = btrfs_root_id(dir->root); 5456 key.type = BTRFS_ROOT_REF_KEY; 5457 key.offset = location->objectid; 5458 5459 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5460 if (ret) { 5461 if (ret < 0) 5462 err = ret; 5463 goto out; 5464 } 5465 5466 leaf = path->nodes[0]; 5467 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5468 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5469 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) 5470 goto out; 5471 5472 ret = memcmp_extent_buffer(leaf, fname.disk_name.name, 5473 (unsigned long)(ref + 1), fname.disk_name.len); 5474 if (ret) 5475 goto out; 5476 5477 btrfs_release_path(path); 5478 5479 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5480 if (IS_ERR(new_root)) { 5481 err = PTR_ERR(new_root); 5482 goto out; 5483 } 5484 5485 *sub_root = new_root; 5486 location->objectid = btrfs_root_dirid(&new_root->root_item); 5487 location->type = BTRFS_INODE_ITEM_KEY; 5488 location->offset = 0; 5489 err = 0; 5490 out: 5491 btrfs_free_path(path); 5492 fscrypt_free_filename(&fname); 5493 return err; 5494 } 5495 5496 static void inode_tree_add(struct btrfs_inode *inode) 5497 { 5498 struct btrfs_root *root = inode->root; 5499 struct btrfs_inode *entry; 5500 struct rb_node **p; 5501 struct rb_node *parent; 5502 struct rb_node *new = &inode->rb_node; 5503 u64 ino = btrfs_ino(inode); 5504 5505 if (inode_unhashed(&inode->vfs_inode)) 5506 return; 5507 parent = NULL; 5508 spin_lock(&root->inode_lock); 5509 p = &root->inode_tree.rb_node; 5510 while (*p) { 5511 parent = *p; 5512 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5513 5514 if (ino < btrfs_ino(entry)) 5515 p = &parent->rb_left; 5516 else if (ino > btrfs_ino(entry)) 5517 p = &parent->rb_right; 5518 else { 5519 WARN_ON(!(entry->vfs_inode.i_state & 5520 (I_WILL_FREE | I_FREEING))); 5521 rb_replace_node(parent, new, &root->inode_tree); 5522 RB_CLEAR_NODE(parent); 5523 spin_unlock(&root->inode_lock); 5524 return; 5525 } 5526 } 5527 rb_link_node(new, parent, p); 5528 rb_insert_color(new, &root->inode_tree); 5529 spin_unlock(&root->inode_lock); 5530 } 5531 5532 static void inode_tree_del(struct btrfs_inode *inode) 5533 { 5534 struct btrfs_root *root = inode->root; 5535 int empty = 0; 5536 5537 spin_lock(&root->inode_lock); 5538 if (!RB_EMPTY_NODE(&inode->rb_node)) { 5539 rb_erase(&inode->rb_node, &root->inode_tree); 5540 RB_CLEAR_NODE(&inode->rb_node); 5541 empty = RB_EMPTY_ROOT(&root->inode_tree); 5542 } 5543 spin_unlock(&root->inode_lock); 5544 5545 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5546 spin_lock(&root->inode_lock); 5547 empty = RB_EMPTY_ROOT(&root->inode_tree); 5548 spin_unlock(&root->inode_lock); 5549 if (empty) 5550 btrfs_add_dead_root(root); 5551 } 5552 } 5553 5554 5555 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5556 { 5557 struct btrfs_iget_args *args = p; 5558 5559 inode->i_ino = args->ino; 5560 BTRFS_I(inode)->location.objectid = args->ino; 5561 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 5562 BTRFS_I(inode)->location.offset = 0; 5563 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5564 5565 if (args->root && args->root == args->root->fs_info->tree_root && 5566 args->ino != BTRFS_BTREE_INODE_OBJECTID) 5567 set_bit(BTRFS_INODE_FREE_SPACE_INODE, 5568 &BTRFS_I(inode)->runtime_flags); 5569 return 0; 5570 } 5571 5572 static int btrfs_find_actor(struct inode *inode, void *opaque) 5573 { 5574 struct btrfs_iget_args *args = opaque; 5575 5576 return args->ino == BTRFS_I(inode)->location.objectid && 5577 args->root == BTRFS_I(inode)->root; 5578 } 5579 5580 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, 5581 struct btrfs_root *root) 5582 { 5583 struct inode *inode; 5584 struct btrfs_iget_args args; 5585 unsigned long hashval = btrfs_inode_hash(ino, root); 5586 5587 args.ino = ino; 5588 args.root = root; 5589 5590 inode = iget5_locked(s, hashval, btrfs_find_actor, 5591 btrfs_init_locked_inode, 5592 (void *)&args); 5593 return inode; 5594 } 5595 5596 /* 5597 * Get an inode object given its inode number and corresponding root. 5598 * Path can be preallocated to prevent recursing back to iget through 5599 * allocator. NULL is also valid but may require an additional allocation 5600 * later. 5601 */ 5602 struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 5603 struct btrfs_root *root, struct btrfs_path *path) 5604 { 5605 struct inode *inode; 5606 5607 inode = btrfs_iget_locked(s, ino, root); 5608 if (!inode) 5609 return ERR_PTR(-ENOMEM); 5610 5611 if (inode->i_state & I_NEW) { 5612 int ret; 5613 5614 ret = btrfs_read_locked_inode(inode, path); 5615 if (!ret) { 5616 inode_tree_add(BTRFS_I(inode)); 5617 unlock_new_inode(inode); 5618 } else { 5619 iget_failed(inode); 5620 /* 5621 * ret > 0 can come from btrfs_search_slot called by 5622 * btrfs_read_locked_inode, this means the inode item 5623 * was not found. 5624 */ 5625 if (ret > 0) 5626 ret = -ENOENT; 5627 inode = ERR_PTR(ret); 5628 } 5629 } 5630 5631 return inode; 5632 } 5633 5634 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) 5635 { 5636 return btrfs_iget_path(s, ino, root, NULL); 5637 } 5638 5639 static struct inode *new_simple_dir(struct inode *dir, 5640 struct btrfs_key *key, 5641 struct btrfs_root *root) 5642 { 5643 struct timespec64 ts; 5644 struct inode *inode = new_inode(dir->i_sb); 5645 5646 if (!inode) 5647 return ERR_PTR(-ENOMEM); 5648 5649 BTRFS_I(inode)->root = btrfs_grab_root(root); 5650 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5651 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5652 5653 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5654 /* 5655 * We only need lookup, the rest is read-only and there's no inode 5656 * associated with the dentry 5657 */ 5658 inode->i_op = &simple_dir_inode_operations; 5659 inode->i_opflags &= ~IOP_XATTR; 5660 inode->i_fop = &simple_dir_operations; 5661 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5662 5663 ts = inode_set_ctime_current(inode); 5664 inode_set_mtime_to_ts(inode, ts); 5665 inode_set_atime_to_ts(inode, inode_get_atime(dir)); 5666 BTRFS_I(inode)->i_otime_sec = ts.tv_sec; 5667 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec; 5668 5669 inode->i_uid = dir->i_uid; 5670 inode->i_gid = dir->i_gid; 5671 5672 return inode; 5673 } 5674 5675 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5676 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5677 static_assert(BTRFS_FT_DIR == FT_DIR); 5678 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5679 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5680 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5681 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5682 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5683 5684 static inline u8 btrfs_inode_type(struct inode *inode) 5685 { 5686 return fs_umode_to_ftype(inode->i_mode); 5687 } 5688 5689 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5690 { 5691 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 5692 struct inode *inode; 5693 struct btrfs_root *root = BTRFS_I(dir)->root; 5694 struct btrfs_root *sub_root = root; 5695 struct btrfs_key location; 5696 u8 di_type = 0; 5697 int ret = 0; 5698 5699 if (dentry->d_name.len > BTRFS_NAME_LEN) 5700 return ERR_PTR(-ENAMETOOLONG); 5701 5702 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type); 5703 if (ret < 0) 5704 return ERR_PTR(ret); 5705 5706 if (location.type == BTRFS_INODE_ITEM_KEY) { 5707 inode = btrfs_iget(dir->i_sb, location.objectid, root); 5708 if (IS_ERR(inode)) 5709 return inode; 5710 5711 /* Do extra check against inode mode with di_type */ 5712 if (btrfs_inode_type(inode) != di_type) { 5713 btrfs_crit(fs_info, 5714 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5715 inode->i_mode, btrfs_inode_type(inode), 5716 di_type); 5717 iput(inode); 5718 return ERR_PTR(-EUCLEAN); 5719 } 5720 return inode; 5721 } 5722 5723 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry, 5724 &location, &sub_root); 5725 if (ret < 0) { 5726 if (ret != -ENOENT) 5727 inode = ERR_PTR(ret); 5728 else 5729 inode = new_simple_dir(dir, &location, root); 5730 } else { 5731 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); 5732 btrfs_put_root(sub_root); 5733 5734 if (IS_ERR(inode)) 5735 return inode; 5736 5737 down_read(&fs_info->cleanup_work_sem); 5738 if (!sb_rdonly(inode->i_sb)) 5739 ret = btrfs_orphan_cleanup(sub_root); 5740 up_read(&fs_info->cleanup_work_sem); 5741 if (ret) { 5742 iput(inode); 5743 inode = ERR_PTR(ret); 5744 } 5745 } 5746 5747 return inode; 5748 } 5749 5750 static int btrfs_dentry_delete(const struct dentry *dentry) 5751 { 5752 struct btrfs_root *root; 5753 struct inode *inode = d_inode(dentry); 5754 5755 if (!inode && !IS_ROOT(dentry)) 5756 inode = d_inode(dentry->d_parent); 5757 5758 if (inode) { 5759 root = BTRFS_I(inode)->root; 5760 if (btrfs_root_refs(&root->root_item) == 0) 5761 return 1; 5762 5763 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5764 return 1; 5765 } 5766 return 0; 5767 } 5768 5769 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5770 unsigned int flags) 5771 { 5772 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5773 5774 if (inode == ERR_PTR(-ENOENT)) 5775 inode = NULL; 5776 return d_splice_alias(inode, dentry); 5777 } 5778 5779 /* 5780 * Find the highest existing sequence number in a directory and then set the 5781 * in-memory index_cnt variable to the first free sequence number. 5782 */ 5783 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 5784 { 5785 struct btrfs_root *root = inode->root; 5786 struct btrfs_key key, found_key; 5787 struct btrfs_path *path; 5788 struct extent_buffer *leaf; 5789 int ret; 5790 5791 key.objectid = btrfs_ino(inode); 5792 key.type = BTRFS_DIR_INDEX_KEY; 5793 key.offset = (u64)-1; 5794 5795 path = btrfs_alloc_path(); 5796 if (!path) 5797 return -ENOMEM; 5798 5799 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5800 if (ret < 0) 5801 goto out; 5802 /* FIXME: we should be able to handle this */ 5803 if (ret == 0) 5804 goto out; 5805 ret = 0; 5806 5807 if (path->slots[0] == 0) { 5808 inode->index_cnt = BTRFS_DIR_START_INDEX; 5809 goto out; 5810 } 5811 5812 path->slots[0]--; 5813 5814 leaf = path->nodes[0]; 5815 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5816 5817 if (found_key.objectid != btrfs_ino(inode) || 5818 found_key.type != BTRFS_DIR_INDEX_KEY) { 5819 inode->index_cnt = BTRFS_DIR_START_INDEX; 5820 goto out; 5821 } 5822 5823 inode->index_cnt = found_key.offset + 1; 5824 out: 5825 btrfs_free_path(path); 5826 return ret; 5827 } 5828 5829 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) 5830 { 5831 int ret = 0; 5832 5833 btrfs_inode_lock(dir, 0); 5834 if (dir->index_cnt == (u64)-1) { 5835 ret = btrfs_inode_delayed_dir_index_count(dir); 5836 if (ret) { 5837 ret = btrfs_set_inode_index_count(dir); 5838 if (ret) 5839 goto out; 5840 } 5841 } 5842 5843 /* index_cnt is the index number of next new entry, so decrement it. */ 5844 *index = dir->index_cnt - 1; 5845 out: 5846 btrfs_inode_unlock(dir, 0); 5847 5848 return ret; 5849 } 5850 5851 /* 5852 * All this infrastructure exists because dir_emit can fault, and we are holding 5853 * the tree lock when doing readdir. For now just allocate a buffer and copy 5854 * our information into that, and then dir_emit from the buffer. This is 5855 * similar to what NFS does, only we don't keep the buffer around in pagecache 5856 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5857 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5858 * tree lock. 5859 */ 5860 static int btrfs_opendir(struct inode *inode, struct file *file) 5861 { 5862 struct btrfs_file_private *private; 5863 u64 last_index; 5864 int ret; 5865 5866 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); 5867 if (ret) 5868 return ret; 5869 5870 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5871 if (!private) 5872 return -ENOMEM; 5873 private->last_index = last_index; 5874 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5875 if (!private->filldir_buf) { 5876 kfree(private); 5877 return -ENOMEM; 5878 } 5879 file->private_data = private; 5880 return 0; 5881 } 5882 5883 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence) 5884 { 5885 struct btrfs_file_private *private = file->private_data; 5886 int ret; 5887 5888 ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)), 5889 &private->last_index); 5890 if (ret) 5891 return ret; 5892 5893 return generic_file_llseek(file, offset, whence); 5894 } 5895 5896 struct dir_entry { 5897 u64 ino; 5898 u64 offset; 5899 unsigned type; 5900 int name_len; 5901 }; 5902 5903 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5904 { 5905 while (entries--) { 5906 struct dir_entry *entry = addr; 5907 char *name = (char *)(entry + 1); 5908 5909 ctx->pos = get_unaligned(&entry->offset); 5910 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5911 get_unaligned(&entry->ino), 5912 get_unaligned(&entry->type))) 5913 return 1; 5914 addr += sizeof(struct dir_entry) + 5915 get_unaligned(&entry->name_len); 5916 ctx->pos++; 5917 } 5918 return 0; 5919 } 5920 5921 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5922 { 5923 struct inode *inode = file_inode(file); 5924 struct btrfs_root *root = BTRFS_I(inode)->root; 5925 struct btrfs_file_private *private = file->private_data; 5926 struct btrfs_dir_item *di; 5927 struct btrfs_key key; 5928 struct btrfs_key found_key; 5929 struct btrfs_path *path; 5930 void *addr; 5931 LIST_HEAD(ins_list); 5932 LIST_HEAD(del_list); 5933 int ret; 5934 char *name_ptr; 5935 int name_len; 5936 int entries = 0; 5937 int total_len = 0; 5938 bool put = false; 5939 struct btrfs_key location; 5940 5941 if (!dir_emit_dots(file, ctx)) 5942 return 0; 5943 5944 path = btrfs_alloc_path(); 5945 if (!path) 5946 return -ENOMEM; 5947 5948 addr = private->filldir_buf; 5949 path->reada = READA_FORWARD; 5950 5951 put = btrfs_readdir_get_delayed_items(inode, private->last_index, 5952 &ins_list, &del_list); 5953 5954 again: 5955 key.type = BTRFS_DIR_INDEX_KEY; 5956 key.offset = ctx->pos; 5957 key.objectid = btrfs_ino(BTRFS_I(inode)); 5958 5959 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 5960 struct dir_entry *entry; 5961 struct extent_buffer *leaf = path->nodes[0]; 5962 u8 ftype; 5963 5964 if (found_key.objectid != key.objectid) 5965 break; 5966 if (found_key.type != BTRFS_DIR_INDEX_KEY) 5967 break; 5968 if (found_key.offset < ctx->pos) 5969 continue; 5970 if (found_key.offset > private->last_index) 5971 break; 5972 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 5973 continue; 5974 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 5975 name_len = btrfs_dir_name_len(leaf, di); 5976 if ((total_len + sizeof(struct dir_entry) + name_len) >= 5977 PAGE_SIZE) { 5978 btrfs_release_path(path); 5979 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5980 if (ret) 5981 goto nopos; 5982 addr = private->filldir_buf; 5983 entries = 0; 5984 total_len = 0; 5985 goto again; 5986 } 5987 5988 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di)); 5989 entry = addr; 5990 name_ptr = (char *)(entry + 1); 5991 read_extent_buffer(leaf, name_ptr, 5992 (unsigned long)(di + 1), name_len); 5993 put_unaligned(name_len, &entry->name_len); 5994 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); 5995 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5996 put_unaligned(location.objectid, &entry->ino); 5997 put_unaligned(found_key.offset, &entry->offset); 5998 entries++; 5999 addr += sizeof(struct dir_entry) + name_len; 6000 total_len += sizeof(struct dir_entry) + name_len; 6001 } 6002 /* Catch error encountered during iteration */ 6003 if (ret < 0) 6004 goto err; 6005 6006 btrfs_release_path(path); 6007 6008 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6009 if (ret) 6010 goto nopos; 6011 6012 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 6013 if (ret) 6014 goto nopos; 6015 6016 /* 6017 * Stop new entries from being returned after we return the last 6018 * entry. 6019 * 6020 * New directory entries are assigned a strictly increasing 6021 * offset. This means that new entries created during readdir 6022 * are *guaranteed* to be seen in the future by that readdir. 6023 * This has broken buggy programs which operate on names as 6024 * they're returned by readdir. Until we re-use freed offsets 6025 * we have this hack to stop new entries from being returned 6026 * under the assumption that they'll never reach this huge 6027 * offset. 6028 * 6029 * This is being careful not to overflow 32bit loff_t unless the 6030 * last entry requires it because doing so has broken 32bit apps 6031 * in the past. 6032 */ 6033 if (ctx->pos >= INT_MAX) 6034 ctx->pos = LLONG_MAX; 6035 else 6036 ctx->pos = INT_MAX; 6037 nopos: 6038 ret = 0; 6039 err: 6040 if (put) 6041 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); 6042 btrfs_free_path(path); 6043 return ret; 6044 } 6045 6046 /* 6047 * This is somewhat expensive, updating the tree every time the 6048 * inode changes. But, it is most likely to find the inode in cache. 6049 * FIXME, needs more benchmarking...there are no reasons other than performance 6050 * to keep or drop this code. 6051 */ 6052 static int btrfs_dirty_inode(struct btrfs_inode *inode) 6053 { 6054 struct btrfs_root *root = inode->root; 6055 struct btrfs_fs_info *fs_info = root->fs_info; 6056 struct btrfs_trans_handle *trans; 6057 int ret; 6058 6059 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) 6060 return 0; 6061 6062 trans = btrfs_join_transaction(root); 6063 if (IS_ERR(trans)) 6064 return PTR_ERR(trans); 6065 6066 ret = btrfs_update_inode(trans, inode); 6067 if (ret == -ENOSPC || ret == -EDQUOT) { 6068 /* whoops, lets try again with the full transaction */ 6069 btrfs_end_transaction(trans); 6070 trans = btrfs_start_transaction(root, 1); 6071 if (IS_ERR(trans)) 6072 return PTR_ERR(trans); 6073 6074 ret = btrfs_update_inode(trans, inode); 6075 } 6076 btrfs_end_transaction(trans); 6077 if (inode->delayed_node) 6078 btrfs_balance_delayed_items(fs_info); 6079 6080 return ret; 6081 } 6082 6083 /* 6084 * This is a copy of file_update_time. We need this so we can return error on 6085 * ENOSPC for updating the inode in the case of file write and mmap writes. 6086 */ 6087 static int btrfs_update_time(struct inode *inode, int flags) 6088 { 6089 struct btrfs_root *root = BTRFS_I(inode)->root; 6090 bool dirty; 6091 6092 if (btrfs_root_readonly(root)) 6093 return -EROFS; 6094 6095 dirty = inode_update_timestamps(inode, flags); 6096 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6097 } 6098 6099 /* 6100 * helper to find a free sequence number in a given directory. This current 6101 * code is very simple, later versions will do smarter things in the btree 6102 */ 6103 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6104 { 6105 int ret = 0; 6106 6107 if (dir->index_cnt == (u64)-1) { 6108 ret = btrfs_inode_delayed_dir_index_count(dir); 6109 if (ret) { 6110 ret = btrfs_set_inode_index_count(dir); 6111 if (ret) 6112 return ret; 6113 } 6114 } 6115 6116 *index = dir->index_cnt; 6117 dir->index_cnt++; 6118 6119 return ret; 6120 } 6121 6122 static int btrfs_insert_inode_locked(struct inode *inode) 6123 { 6124 struct btrfs_iget_args args; 6125 6126 args.ino = BTRFS_I(inode)->location.objectid; 6127 args.root = BTRFS_I(inode)->root; 6128 6129 return insert_inode_locked4(inode, 6130 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6131 btrfs_find_actor, &args); 6132 } 6133 6134 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6135 unsigned int *trans_num_items) 6136 { 6137 struct inode *dir = args->dir; 6138 struct inode *inode = args->inode; 6139 int ret; 6140 6141 if (!args->orphan) { 6142 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, 6143 &args->fname); 6144 if (ret) 6145 return ret; 6146 } 6147 6148 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6149 if (ret) { 6150 fscrypt_free_filename(&args->fname); 6151 return ret; 6152 } 6153 6154 /* 1 to add inode item */ 6155 *trans_num_items = 1; 6156 /* 1 to add compression property */ 6157 if (BTRFS_I(dir)->prop_compress) 6158 (*trans_num_items)++; 6159 /* 1 to add default ACL xattr */ 6160 if (args->default_acl) 6161 (*trans_num_items)++; 6162 /* 1 to add access ACL xattr */ 6163 if (args->acl) 6164 (*trans_num_items)++; 6165 #ifdef CONFIG_SECURITY 6166 /* 1 to add LSM xattr */ 6167 if (dir->i_security) 6168 (*trans_num_items)++; 6169 #endif 6170 if (args->orphan) { 6171 /* 1 to add orphan item */ 6172 (*trans_num_items)++; 6173 } else { 6174 /* 6175 * 1 to add dir item 6176 * 1 to add dir index 6177 * 1 to update parent inode item 6178 * 6179 * No need for 1 unit for the inode ref item because it is 6180 * inserted in a batch together with the inode item at 6181 * btrfs_create_new_inode(). 6182 */ 6183 *trans_num_items += 3; 6184 } 6185 return 0; 6186 } 6187 6188 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6189 { 6190 posix_acl_release(args->acl); 6191 posix_acl_release(args->default_acl); 6192 fscrypt_free_filename(&args->fname); 6193 } 6194 6195 /* 6196 * Inherit flags from the parent inode. 6197 * 6198 * Currently only the compression flags and the cow flags are inherited. 6199 */ 6200 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir) 6201 { 6202 unsigned int flags; 6203 6204 flags = dir->flags; 6205 6206 if (flags & BTRFS_INODE_NOCOMPRESS) { 6207 inode->flags &= ~BTRFS_INODE_COMPRESS; 6208 inode->flags |= BTRFS_INODE_NOCOMPRESS; 6209 } else if (flags & BTRFS_INODE_COMPRESS) { 6210 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 6211 inode->flags |= BTRFS_INODE_COMPRESS; 6212 } 6213 6214 if (flags & BTRFS_INODE_NODATACOW) { 6215 inode->flags |= BTRFS_INODE_NODATACOW; 6216 if (S_ISREG(inode->vfs_inode.i_mode)) 6217 inode->flags |= BTRFS_INODE_NODATASUM; 6218 } 6219 6220 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); 6221 } 6222 6223 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6224 struct btrfs_new_inode_args *args) 6225 { 6226 struct timespec64 ts; 6227 struct inode *dir = args->dir; 6228 struct inode *inode = args->inode; 6229 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; 6230 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 6231 struct btrfs_root *root; 6232 struct btrfs_inode_item *inode_item; 6233 struct btrfs_key *location; 6234 struct btrfs_path *path; 6235 u64 objectid; 6236 struct btrfs_inode_ref *ref; 6237 struct btrfs_key key[2]; 6238 u32 sizes[2]; 6239 struct btrfs_item_batch batch; 6240 unsigned long ptr; 6241 int ret; 6242 6243 path = btrfs_alloc_path(); 6244 if (!path) 6245 return -ENOMEM; 6246 6247 if (!args->subvol) 6248 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6249 root = BTRFS_I(inode)->root; 6250 6251 ret = btrfs_get_free_objectid(root, &objectid); 6252 if (ret) 6253 goto out; 6254 inode->i_ino = objectid; 6255 6256 if (args->orphan) { 6257 /* 6258 * O_TMPFILE, set link count to 0, so that after this point, we 6259 * fill in an inode item with the correct link count. 6260 */ 6261 set_nlink(inode, 0); 6262 } else { 6263 trace_btrfs_inode_request(dir); 6264 6265 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6266 if (ret) 6267 goto out; 6268 } 6269 /* index_cnt is ignored for everything but a dir. */ 6270 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6271 BTRFS_I(inode)->generation = trans->transid; 6272 inode->i_generation = BTRFS_I(inode)->generation; 6273 6274 /* 6275 * We don't have any capability xattrs set here yet, shortcut any 6276 * queries for the xattrs here. If we add them later via the inode 6277 * security init path or any other path this flag will be cleared. 6278 */ 6279 set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags); 6280 6281 /* 6282 * Subvolumes don't inherit flags from their parent directory. 6283 * Originally this was probably by accident, but we probably can't 6284 * change it now without compatibility issues. 6285 */ 6286 if (!args->subvol) 6287 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir)); 6288 6289 if (S_ISREG(inode->i_mode)) { 6290 if (btrfs_test_opt(fs_info, NODATASUM)) 6291 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6292 if (btrfs_test_opt(fs_info, NODATACOW)) 6293 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6294 BTRFS_INODE_NODATASUM; 6295 } 6296 6297 location = &BTRFS_I(inode)->location; 6298 location->objectid = objectid; 6299 location->offset = 0; 6300 location->type = BTRFS_INODE_ITEM_KEY; 6301 6302 ret = btrfs_insert_inode_locked(inode); 6303 if (ret < 0) { 6304 if (!args->orphan) 6305 BTRFS_I(dir)->index_cnt--; 6306 goto out; 6307 } 6308 6309 /* 6310 * We could have gotten an inode number from somebody who was fsynced 6311 * and then removed in this same transaction, so let's just set full 6312 * sync since it will be a full sync anyway and this will blow away the 6313 * old info in the log. 6314 */ 6315 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6316 6317 key[0].objectid = objectid; 6318 key[0].type = BTRFS_INODE_ITEM_KEY; 6319 key[0].offset = 0; 6320 6321 sizes[0] = sizeof(struct btrfs_inode_item); 6322 6323 if (!args->orphan) { 6324 /* 6325 * Start new inodes with an inode_ref. This is slightly more 6326 * efficient for small numbers of hard links since they will 6327 * be packed into one item. Extended refs will kick in if we 6328 * add more hard links than can fit in the ref item. 6329 */ 6330 key[1].objectid = objectid; 6331 key[1].type = BTRFS_INODE_REF_KEY; 6332 if (args->subvol) { 6333 key[1].offset = objectid; 6334 sizes[1] = 2 + sizeof(*ref); 6335 } else { 6336 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6337 sizes[1] = name->len + sizeof(*ref); 6338 } 6339 } 6340 6341 batch.keys = &key[0]; 6342 batch.data_sizes = &sizes[0]; 6343 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6344 batch.nr = args->orphan ? 1 : 2; 6345 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6346 if (ret != 0) { 6347 btrfs_abort_transaction(trans, ret); 6348 goto discard; 6349 } 6350 6351 ts = simple_inode_init_ts(inode); 6352 BTRFS_I(inode)->i_otime_sec = ts.tv_sec; 6353 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec; 6354 6355 /* 6356 * We're going to fill the inode item now, so at this point the inode 6357 * must be fully initialized. 6358 */ 6359 6360 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6361 struct btrfs_inode_item); 6362 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6363 sizeof(*inode_item)); 6364 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6365 6366 if (!args->orphan) { 6367 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6368 struct btrfs_inode_ref); 6369 ptr = (unsigned long)(ref + 1); 6370 if (args->subvol) { 6371 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6372 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6373 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6374 } else { 6375 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 6376 name->len); 6377 btrfs_set_inode_ref_index(path->nodes[0], ref, 6378 BTRFS_I(inode)->dir_index); 6379 write_extent_buffer(path->nodes[0], name->name, ptr, 6380 name->len); 6381 } 6382 } 6383 6384 btrfs_mark_buffer_dirty(trans, path->nodes[0]); 6385 /* 6386 * We don't need the path anymore, plus inheriting properties, adding 6387 * ACLs, security xattrs, orphan item or adding the link, will result in 6388 * allocating yet another path. So just free our path. 6389 */ 6390 btrfs_free_path(path); 6391 path = NULL; 6392 6393 if (args->subvol) { 6394 struct inode *parent; 6395 6396 /* 6397 * Subvolumes inherit properties from their parent subvolume, 6398 * not the directory they were created in. 6399 */ 6400 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, 6401 BTRFS_I(dir)->root); 6402 if (IS_ERR(parent)) { 6403 ret = PTR_ERR(parent); 6404 } else { 6405 ret = btrfs_inode_inherit_props(trans, inode, parent); 6406 iput(parent); 6407 } 6408 } else { 6409 ret = btrfs_inode_inherit_props(trans, inode, dir); 6410 } 6411 if (ret) { 6412 btrfs_err(fs_info, 6413 "error inheriting props for ino %llu (root %llu): %d", 6414 btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret); 6415 } 6416 6417 /* 6418 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6419 * probably a bug. 6420 */ 6421 if (!args->subvol) { 6422 ret = btrfs_init_inode_security(trans, args); 6423 if (ret) { 6424 btrfs_abort_transaction(trans, ret); 6425 goto discard; 6426 } 6427 } 6428 6429 inode_tree_add(BTRFS_I(inode)); 6430 6431 trace_btrfs_inode_new(inode); 6432 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6433 6434 btrfs_update_root_times(trans, root); 6435 6436 if (args->orphan) { 6437 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6438 } else { 6439 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6440 0, BTRFS_I(inode)->dir_index); 6441 } 6442 if (ret) { 6443 btrfs_abort_transaction(trans, ret); 6444 goto discard; 6445 } 6446 6447 return 0; 6448 6449 discard: 6450 /* 6451 * discard_new_inode() calls iput(), but the caller owns the reference 6452 * to the inode. 6453 */ 6454 ihold(inode); 6455 discard_new_inode(inode); 6456 out: 6457 btrfs_free_path(path); 6458 return ret; 6459 } 6460 6461 /* 6462 * utility function to add 'inode' into 'parent_inode' with 6463 * a give name and a given sequence number. 6464 * if 'add_backref' is true, also insert a backref from the 6465 * inode to the parent directory. 6466 */ 6467 int btrfs_add_link(struct btrfs_trans_handle *trans, 6468 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6469 const struct fscrypt_str *name, int add_backref, u64 index) 6470 { 6471 int ret = 0; 6472 struct btrfs_key key; 6473 struct btrfs_root *root = parent_inode->root; 6474 u64 ino = btrfs_ino(inode); 6475 u64 parent_ino = btrfs_ino(parent_inode); 6476 6477 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6478 memcpy(&key, &inode->root->root_key, sizeof(key)); 6479 } else { 6480 key.objectid = ino; 6481 key.type = BTRFS_INODE_ITEM_KEY; 6482 key.offset = 0; 6483 } 6484 6485 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6486 ret = btrfs_add_root_ref(trans, key.objectid, 6487 btrfs_root_id(root), parent_ino, 6488 index, name); 6489 } else if (add_backref) { 6490 ret = btrfs_insert_inode_ref(trans, root, name, 6491 ino, parent_ino, index); 6492 } 6493 6494 /* Nothing to clean up yet */ 6495 if (ret) 6496 return ret; 6497 6498 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, 6499 btrfs_inode_type(&inode->vfs_inode), index); 6500 if (ret == -EEXIST || ret == -EOVERFLOW) 6501 goto fail_dir_item; 6502 else if (ret) { 6503 btrfs_abort_transaction(trans, ret); 6504 return ret; 6505 } 6506 6507 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6508 name->len * 2); 6509 inode_inc_iversion(&parent_inode->vfs_inode); 6510 /* 6511 * If we are replaying a log tree, we do not want to update the mtime 6512 * and ctime of the parent directory with the current time, since the 6513 * log replay procedure is responsible for setting them to their correct 6514 * values (the ones it had when the fsync was done). 6515 */ 6516 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) 6517 inode_set_mtime_to_ts(&parent_inode->vfs_inode, 6518 inode_set_ctime_current(&parent_inode->vfs_inode)); 6519 6520 ret = btrfs_update_inode(trans, parent_inode); 6521 if (ret) 6522 btrfs_abort_transaction(trans, ret); 6523 return ret; 6524 6525 fail_dir_item: 6526 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6527 u64 local_index; 6528 int err; 6529 err = btrfs_del_root_ref(trans, key.objectid, 6530 btrfs_root_id(root), parent_ino, 6531 &local_index, name); 6532 if (err) 6533 btrfs_abort_transaction(trans, err); 6534 } else if (add_backref) { 6535 u64 local_index; 6536 int err; 6537 6538 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, 6539 &local_index); 6540 if (err) 6541 btrfs_abort_transaction(trans, err); 6542 } 6543 6544 /* Return the original error code */ 6545 return ret; 6546 } 6547 6548 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6549 struct inode *inode) 6550 { 6551 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 6552 struct btrfs_root *root = BTRFS_I(dir)->root; 6553 struct btrfs_new_inode_args new_inode_args = { 6554 .dir = dir, 6555 .dentry = dentry, 6556 .inode = inode, 6557 }; 6558 unsigned int trans_num_items; 6559 struct btrfs_trans_handle *trans; 6560 int err; 6561 6562 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6563 if (err) 6564 goto out_inode; 6565 6566 trans = btrfs_start_transaction(root, trans_num_items); 6567 if (IS_ERR(trans)) { 6568 err = PTR_ERR(trans); 6569 goto out_new_inode_args; 6570 } 6571 6572 err = btrfs_create_new_inode(trans, &new_inode_args); 6573 if (!err) 6574 d_instantiate_new(dentry, inode); 6575 6576 btrfs_end_transaction(trans); 6577 btrfs_btree_balance_dirty(fs_info); 6578 out_new_inode_args: 6579 btrfs_new_inode_args_destroy(&new_inode_args); 6580 out_inode: 6581 if (err) 6582 iput(inode); 6583 return err; 6584 } 6585 6586 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir, 6587 struct dentry *dentry, umode_t mode, dev_t rdev) 6588 { 6589 struct inode *inode; 6590 6591 inode = new_inode(dir->i_sb); 6592 if (!inode) 6593 return -ENOMEM; 6594 inode_init_owner(idmap, inode, dir, mode); 6595 inode->i_op = &btrfs_special_inode_operations; 6596 init_special_inode(inode, inode->i_mode, rdev); 6597 return btrfs_create_common(dir, dentry, inode); 6598 } 6599 6600 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir, 6601 struct dentry *dentry, umode_t mode, bool excl) 6602 { 6603 struct inode *inode; 6604 6605 inode = new_inode(dir->i_sb); 6606 if (!inode) 6607 return -ENOMEM; 6608 inode_init_owner(idmap, inode, dir, mode); 6609 inode->i_fop = &btrfs_file_operations; 6610 inode->i_op = &btrfs_file_inode_operations; 6611 inode->i_mapping->a_ops = &btrfs_aops; 6612 return btrfs_create_common(dir, dentry, inode); 6613 } 6614 6615 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6616 struct dentry *dentry) 6617 { 6618 struct btrfs_trans_handle *trans = NULL; 6619 struct btrfs_root *root = BTRFS_I(dir)->root; 6620 struct inode *inode = d_inode(old_dentry); 6621 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 6622 struct fscrypt_name fname; 6623 u64 index; 6624 int err; 6625 int drop_inode = 0; 6626 6627 /* do not allow sys_link's with other subvols of the same device */ 6628 if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root)) 6629 return -EXDEV; 6630 6631 if (inode->i_nlink >= BTRFS_LINK_MAX) 6632 return -EMLINK; 6633 6634 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); 6635 if (err) 6636 goto fail; 6637 6638 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6639 if (err) 6640 goto fail; 6641 6642 /* 6643 * 2 items for inode and inode ref 6644 * 2 items for dir items 6645 * 1 item for parent inode 6646 * 1 item for orphan item deletion if O_TMPFILE 6647 */ 6648 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6649 if (IS_ERR(trans)) { 6650 err = PTR_ERR(trans); 6651 trans = NULL; 6652 goto fail; 6653 } 6654 6655 /* There are several dir indexes for this inode, clear the cache. */ 6656 BTRFS_I(inode)->dir_index = 0ULL; 6657 inc_nlink(inode); 6658 inode_inc_iversion(inode); 6659 inode_set_ctime_current(inode); 6660 ihold(inode); 6661 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6662 6663 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6664 &fname.disk_name, 1, index); 6665 6666 if (err) { 6667 drop_inode = 1; 6668 } else { 6669 struct dentry *parent = dentry->d_parent; 6670 6671 err = btrfs_update_inode(trans, BTRFS_I(inode)); 6672 if (err) 6673 goto fail; 6674 if (inode->i_nlink == 1) { 6675 /* 6676 * If new hard link count is 1, it's a file created 6677 * with open(2) O_TMPFILE flag. 6678 */ 6679 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6680 if (err) 6681 goto fail; 6682 } 6683 d_instantiate(dentry, inode); 6684 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent); 6685 } 6686 6687 fail: 6688 fscrypt_free_filename(&fname); 6689 if (trans) 6690 btrfs_end_transaction(trans); 6691 if (drop_inode) { 6692 inode_dec_link_count(inode); 6693 iput(inode); 6694 } 6695 btrfs_btree_balance_dirty(fs_info); 6696 return err; 6697 } 6698 6699 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 6700 struct dentry *dentry, umode_t mode) 6701 { 6702 struct inode *inode; 6703 6704 inode = new_inode(dir->i_sb); 6705 if (!inode) 6706 return -ENOMEM; 6707 inode_init_owner(idmap, inode, dir, S_IFDIR | mode); 6708 inode->i_op = &btrfs_dir_inode_operations; 6709 inode->i_fop = &btrfs_dir_file_operations; 6710 return btrfs_create_common(dir, dentry, inode); 6711 } 6712 6713 static noinline int uncompress_inline(struct btrfs_path *path, 6714 struct page *page, 6715 struct btrfs_file_extent_item *item) 6716 { 6717 int ret; 6718 struct extent_buffer *leaf = path->nodes[0]; 6719 char *tmp; 6720 size_t max_size; 6721 unsigned long inline_size; 6722 unsigned long ptr; 6723 int compress_type; 6724 6725 compress_type = btrfs_file_extent_compression(leaf, item); 6726 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6727 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 6728 tmp = kmalloc(inline_size, GFP_NOFS); 6729 if (!tmp) 6730 return -ENOMEM; 6731 ptr = btrfs_file_extent_inline_start(item); 6732 6733 read_extent_buffer(leaf, tmp, ptr, inline_size); 6734 6735 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6736 ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size); 6737 6738 /* 6739 * decompression code contains a memset to fill in any space between the end 6740 * of the uncompressed data and the end of max_size in case the decompressed 6741 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6742 * the end of an inline extent and the beginning of the next block, so we 6743 * cover that region here. 6744 */ 6745 6746 if (max_size < PAGE_SIZE) 6747 memzero_page(page, max_size, PAGE_SIZE - max_size); 6748 kfree(tmp); 6749 return ret; 6750 } 6751 6752 static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path, 6753 struct page *page) 6754 { 6755 struct btrfs_file_extent_item *fi; 6756 void *kaddr; 6757 size_t copy_size; 6758 6759 if (!page || PageUptodate(page)) 6760 return 0; 6761 6762 ASSERT(page_offset(page) == 0); 6763 6764 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 6765 struct btrfs_file_extent_item); 6766 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) 6767 return uncompress_inline(path, page, fi); 6768 6769 copy_size = min_t(u64, PAGE_SIZE, 6770 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); 6771 kaddr = kmap_local_page(page); 6772 read_extent_buffer(path->nodes[0], kaddr, 6773 btrfs_file_extent_inline_start(fi), copy_size); 6774 kunmap_local(kaddr); 6775 if (copy_size < PAGE_SIZE) 6776 memzero_page(page, copy_size, PAGE_SIZE - copy_size); 6777 return 0; 6778 } 6779 6780 /* 6781 * Lookup the first extent overlapping a range in a file. 6782 * 6783 * @inode: file to search in 6784 * @page: page to read extent data into if the extent is inline 6785 * @start: file offset 6786 * @len: length of range starting at @start 6787 * 6788 * Return the first &struct extent_map which overlaps the given range, reading 6789 * it from the B-tree and caching it if necessary. Note that there may be more 6790 * extents which overlap the given range after the returned extent_map. 6791 * 6792 * If @page is not NULL and the extent is inline, this also reads the extent 6793 * data directly into the page and marks the extent up to date in the io_tree. 6794 * 6795 * Return: ERR_PTR on error, non-NULL extent_map on success. 6796 */ 6797 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6798 struct page *page, u64 start, u64 len) 6799 { 6800 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6801 int ret = 0; 6802 u64 extent_start = 0; 6803 u64 extent_end = 0; 6804 u64 objectid = btrfs_ino(inode); 6805 int extent_type = -1; 6806 struct btrfs_path *path = NULL; 6807 struct btrfs_root *root = inode->root; 6808 struct btrfs_file_extent_item *item; 6809 struct extent_buffer *leaf; 6810 struct btrfs_key found_key; 6811 struct extent_map *em = NULL; 6812 struct extent_map_tree *em_tree = &inode->extent_tree; 6813 6814 read_lock(&em_tree->lock); 6815 em = lookup_extent_mapping(em_tree, start, len); 6816 read_unlock(&em_tree->lock); 6817 6818 if (em) { 6819 if (em->start > start || em->start + em->len <= start) 6820 free_extent_map(em); 6821 else if (em->block_start == EXTENT_MAP_INLINE && page) 6822 free_extent_map(em); 6823 else 6824 goto out; 6825 } 6826 em = alloc_extent_map(); 6827 if (!em) { 6828 ret = -ENOMEM; 6829 goto out; 6830 } 6831 em->start = EXTENT_MAP_HOLE; 6832 em->orig_start = EXTENT_MAP_HOLE; 6833 em->len = (u64)-1; 6834 em->block_len = (u64)-1; 6835 6836 path = btrfs_alloc_path(); 6837 if (!path) { 6838 ret = -ENOMEM; 6839 goto out; 6840 } 6841 6842 /* Chances are we'll be called again, so go ahead and do readahead */ 6843 path->reada = READA_FORWARD; 6844 6845 /* 6846 * The same explanation in load_free_space_cache applies here as well, 6847 * we only read when we're loading the free space cache, and at that 6848 * point the commit_root has everything we need. 6849 */ 6850 if (btrfs_is_free_space_inode(inode)) { 6851 path->search_commit_root = 1; 6852 path->skip_locking = 1; 6853 } 6854 6855 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6856 if (ret < 0) { 6857 goto out; 6858 } else if (ret > 0) { 6859 if (path->slots[0] == 0) 6860 goto not_found; 6861 path->slots[0]--; 6862 ret = 0; 6863 } 6864 6865 leaf = path->nodes[0]; 6866 item = btrfs_item_ptr(leaf, path->slots[0], 6867 struct btrfs_file_extent_item); 6868 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6869 if (found_key.objectid != objectid || 6870 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6871 /* 6872 * If we backup past the first extent we want to move forward 6873 * and see if there is an extent in front of us, otherwise we'll 6874 * say there is a hole for our whole search range which can 6875 * cause problems. 6876 */ 6877 extent_end = start; 6878 goto next; 6879 } 6880 6881 extent_type = btrfs_file_extent_type(leaf, item); 6882 extent_start = found_key.offset; 6883 extent_end = btrfs_file_extent_end(path); 6884 if (extent_type == BTRFS_FILE_EXTENT_REG || 6885 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6886 /* Only regular file could have regular/prealloc extent */ 6887 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6888 ret = -EUCLEAN; 6889 btrfs_crit(fs_info, 6890 "regular/prealloc extent found for non-regular inode %llu", 6891 btrfs_ino(inode)); 6892 goto out; 6893 } 6894 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6895 extent_start); 6896 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6897 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 6898 path->slots[0], 6899 extent_start); 6900 } 6901 next: 6902 if (start >= extent_end) { 6903 path->slots[0]++; 6904 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6905 ret = btrfs_next_leaf(root, path); 6906 if (ret < 0) 6907 goto out; 6908 else if (ret > 0) 6909 goto not_found; 6910 6911 leaf = path->nodes[0]; 6912 } 6913 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6914 if (found_key.objectid != objectid || 6915 found_key.type != BTRFS_EXTENT_DATA_KEY) 6916 goto not_found; 6917 if (start + len <= found_key.offset) 6918 goto not_found; 6919 if (start > found_key.offset) 6920 goto next; 6921 6922 /* New extent overlaps with existing one */ 6923 em->start = start; 6924 em->orig_start = start; 6925 em->len = found_key.offset - start; 6926 em->block_start = EXTENT_MAP_HOLE; 6927 goto insert; 6928 } 6929 6930 btrfs_extent_item_to_extent_map(inode, path, item, em); 6931 6932 if (extent_type == BTRFS_FILE_EXTENT_REG || 6933 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6934 goto insert; 6935 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6936 /* 6937 * Inline extent can only exist at file offset 0. This is 6938 * ensured by tree-checker and inline extent creation path. 6939 * Thus all members representing file offsets should be zero. 6940 */ 6941 ASSERT(extent_start == 0); 6942 ASSERT(em->start == 0); 6943 6944 /* 6945 * btrfs_extent_item_to_extent_map() should have properly 6946 * initialized em members already. 6947 * 6948 * Other members are not utilized for inline extents. 6949 */ 6950 ASSERT(em->block_start == EXTENT_MAP_INLINE); 6951 ASSERT(em->len == fs_info->sectorsize); 6952 6953 ret = read_inline_extent(inode, path, page); 6954 if (ret < 0) 6955 goto out; 6956 goto insert; 6957 } 6958 not_found: 6959 em->start = start; 6960 em->orig_start = start; 6961 em->len = len; 6962 em->block_start = EXTENT_MAP_HOLE; 6963 insert: 6964 ret = 0; 6965 btrfs_release_path(path); 6966 if (em->start > start || extent_map_end(em) <= start) { 6967 btrfs_err(fs_info, 6968 "bad extent! em: [%llu %llu] passed [%llu %llu]", 6969 em->start, em->len, start, len); 6970 ret = -EIO; 6971 goto out; 6972 } 6973 6974 write_lock(&em_tree->lock); 6975 ret = btrfs_add_extent_mapping(inode, &em, start, len); 6976 write_unlock(&em_tree->lock); 6977 out: 6978 btrfs_free_path(path); 6979 6980 trace_btrfs_get_extent(root, inode, em); 6981 6982 if (ret) { 6983 free_extent_map(em); 6984 return ERR_PTR(ret); 6985 } 6986 return em; 6987 } 6988 6989 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, 6990 struct btrfs_dio_data *dio_data, 6991 const u64 start, 6992 const u64 len, 6993 const u64 orig_start, 6994 const u64 block_start, 6995 const u64 block_len, 6996 const u64 orig_block_len, 6997 const u64 ram_bytes, 6998 const int type) 6999 { 7000 struct extent_map *em = NULL; 7001 struct btrfs_ordered_extent *ordered; 7002 7003 if (type != BTRFS_ORDERED_NOCOW) { 7004 em = create_io_em(inode, start, len, orig_start, block_start, 7005 block_len, orig_block_len, ram_bytes, 7006 BTRFS_COMPRESS_NONE, /* compress_type */ 7007 type); 7008 if (IS_ERR(em)) 7009 goto out; 7010 } 7011 ordered = btrfs_alloc_ordered_extent(inode, start, len, len, 7012 block_start, block_len, 0, 7013 (1 << type) | 7014 (1 << BTRFS_ORDERED_DIRECT), 7015 BTRFS_COMPRESS_NONE); 7016 if (IS_ERR(ordered)) { 7017 if (em) { 7018 free_extent_map(em); 7019 btrfs_drop_extent_map_range(inode, start, 7020 start + len - 1, false); 7021 } 7022 em = ERR_CAST(ordered); 7023 } else { 7024 ASSERT(!dio_data->ordered); 7025 dio_data->ordered = ordered; 7026 } 7027 out: 7028 7029 return em; 7030 } 7031 7032 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, 7033 struct btrfs_dio_data *dio_data, 7034 u64 start, u64 len) 7035 { 7036 struct btrfs_root *root = inode->root; 7037 struct btrfs_fs_info *fs_info = root->fs_info; 7038 struct extent_map *em; 7039 struct btrfs_key ins; 7040 u64 alloc_hint; 7041 int ret; 7042 7043 alloc_hint = get_extent_allocation_hint(inode, start, len); 7044 again: 7045 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 7046 0, alloc_hint, &ins, 1, 1); 7047 if (ret == -EAGAIN) { 7048 ASSERT(btrfs_is_zoned(fs_info)); 7049 wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH, 7050 TASK_UNINTERRUPTIBLE); 7051 goto again; 7052 } 7053 if (ret) 7054 return ERR_PTR(ret); 7055 7056 em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start, 7057 ins.objectid, ins.offset, ins.offset, 7058 ins.offset, BTRFS_ORDERED_REGULAR); 7059 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 7060 if (IS_ERR(em)) 7061 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 7062 1); 7063 7064 return em; 7065 } 7066 7067 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 7068 { 7069 struct btrfs_block_group *block_group; 7070 bool readonly = false; 7071 7072 block_group = btrfs_lookup_block_group(fs_info, bytenr); 7073 if (!block_group || block_group->ro) 7074 readonly = true; 7075 if (block_group) 7076 btrfs_put_block_group(block_group); 7077 return readonly; 7078 } 7079 7080 /* 7081 * Check if we can do nocow write into the range [@offset, @offset + @len) 7082 * 7083 * @offset: File offset 7084 * @len: The length to write, will be updated to the nocow writeable 7085 * range 7086 * @orig_start: (optional) Return the original file offset of the file extent 7087 * @orig_len: (optional) Return the original on-disk length of the file extent 7088 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7089 * @strict: if true, omit optimizations that might force us into unnecessary 7090 * cow. e.g., don't trust generation number. 7091 * 7092 * Return: 7093 * >0 and update @len if we can do nocow write 7094 * 0 if we can't do nocow write 7095 * <0 if error happened 7096 * 7097 * NOTE: This only checks the file extents, caller is responsible to wait for 7098 * any ordered extents. 7099 */ 7100 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7101 u64 *orig_start, u64 *orig_block_len, 7102 u64 *ram_bytes, bool nowait, bool strict) 7103 { 7104 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 7105 struct can_nocow_file_extent_args nocow_args = { 0 }; 7106 struct btrfs_path *path; 7107 int ret; 7108 struct extent_buffer *leaf; 7109 struct btrfs_root *root = BTRFS_I(inode)->root; 7110 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7111 struct btrfs_file_extent_item *fi; 7112 struct btrfs_key key; 7113 int found_type; 7114 7115 path = btrfs_alloc_path(); 7116 if (!path) 7117 return -ENOMEM; 7118 path->nowait = nowait; 7119 7120 ret = btrfs_lookup_file_extent(NULL, root, path, 7121 btrfs_ino(BTRFS_I(inode)), offset, 0); 7122 if (ret < 0) 7123 goto out; 7124 7125 if (ret == 1) { 7126 if (path->slots[0] == 0) { 7127 /* can't find the item, must cow */ 7128 ret = 0; 7129 goto out; 7130 } 7131 path->slots[0]--; 7132 } 7133 ret = 0; 7134 leaf = path->nodes[0]; 7135 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7136 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7137 key.type != BTRFS_EXTENT_DATA_KEY) { 7138 /* not our file or wrong item type, must cow */ 7139 goto out; 7140 } 7141 7142 if (key.offset > offset) { 7143 /* Wrong offset, must cow */ 7144 goto out; 7145 } 7146 7147 if (btrfs_file_extent_end(path) <= offset) 7148 goto out; 7149 7150 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7151 found_type = btrfs_file_extent_type(leaf, fi); 7152 if (ram_bytes) 7153 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7154 7155 nocow_args.start = offset; 7156 nocow_args.end = offset + *len - 1; 7157 nocow_args.strict = strict; 7158 nocow_args.free_path = true; 7159 7160 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args); 7161 /* can_nocow_file_extent() has freed the path. */ 7162 path = NULL; 7163 7164 if (ret != 1) { 7165 /* Treat errors as not being able to NOCOW. */ 7166 ret = 0; 7167 goto out; 7168 } 7169 7170 ret = 0; 7171 if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr)) 7172 goto out; 7173 7174 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7175 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7176 u64 range_end; 7177 7178 range_end = round_up(offset + nocow_args.num_bytes, 7179 root->fs_info->sectorsize) - 1; 7180 ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC); 7181 if (ret) { 7182 ret = -EAGAIN; 7183 goto out; 7184 } 7185 } 7186 7187 if (orig_start) 7188 *orig_start = key.offset - nocow_args.extent_offset; 7189 if (orig_block_len) 7190 *orig_block_len = nocow_args.disk_num_bytes; 7191 7192 *len = nocow_args.num_bytes; 7193 ret = 1; 7194 out: 7195 btrfs_free_path(path); 7196 return ret; 7197 } 7198 7199 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7200 struct extent_state **cached_state, 7201 unsigned int iomap_flags) 7202 { 7203 const bool writing = (iomap_flags & IOMAP_WRITE); 7204 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7205 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7206 struct btrfs_ordered_extent *ordered; 7207 int ret = 0; 7208 7209 while (1) { 7210 if (nowait) { 7211 if (!try_lock_extent(io_tree, lockstart, lockend, 7212 cached_state)) 7213 return -EAGAIN; 7214 } else { 7215 lock_extent(io_tree, lockstart, lockend, cached_state); 7216 } 7217 /* 7218 * We're concerned with the entire range that we're going to be 7219 * doing DIO to, so we need to make sure there's no ordered 7220 * extents in this range. 7221 */ 7222 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, 7223 lockend - lockstart + 1); 7224 7225 /* 7226 * We need to make sure there are no buffered pages in this 7227 * range either, we could have raced between the invalidate in 7228 * generic_file_direct_write and locking the extent. The 7229 * invalidate needs to happen so that reads after a write do not 7230 * get stale data. 7231 */ 7232 if (!ordered && 7233 (!writing || !filemap_range_has_page(inode->i_mapping, 7234 lockstart, lockend))) 7235 break; 7236 7237 unlock_extent(io_tree, lockstart, lockend, cached_state); 7238 7239 if (ordered) { 7240 if (nowait) { 7241 btrfs_put_ordered_extent(ordered); 7242 ret = -EAGAIN; 7243 break; 7244 } 7245 /* 7246 * If we are doing a DIO read and the ordered extent we 7247 * found is for a buffered write, we can not wait for it 7248 * to complete and retry, because if we do so we can 7249 * deadlock with concurrent buffered writes on page 7250 * locks. This happens only if our DIO read covers more 7251 * than one extent map, if at this point has already 7252 * created an ordered extent for a previous extent map 7253 * and locked its range in the inode's io tree, and a 7254 * concurrent write against that previous extent map's 7255 * range and this range started (we unlock the ranges 7256 * in the io tree only when the bios complete and 7257 * buffered writes always lock pages before attempting 7258 * to lock range in the io tree). 7259 */ 7260 if (writing || 7261 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) 7262 btrfs_start_ordered_extent(ordered); 7263 else 7264 ret = nowait ? -EAGAIN : -ENOTBLK; 7265 btrfs_put_ordered_extent(ordered); 7266 } else { 7267 /* 7268 * We could trigger writeback for this range (and wait 7269 * for it to complete) and then invalidate the pages for 7270 * this range (through invalidate_inode_pages2_range()), 7271 * but that can lead us to a deadlock with a concurrent 7272 * call to readahead (a buffered read or a defrag call 7273 * triggered a readahead) on a page lock due to an 7274 * ordered dio extent we created before but did not have 7275 * yet a corresponding bio submitted (whence it can not 7276 * complete), which makes readahead wait for that 7277 * ordered extent to complete while holding a lock on 7278 * that page. 7279 */ 7280 ret = nowait ? -EAGAIN : -ENOTBLK; 7281 } 7282 7283 if (ret) 7284 break; 7285 7286 cond_resched(); 7287 } 7288 7289 return ret; 7290 } 7291 7292 /* The callers of this must take lock_extent() */ 7293 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 7294 u64 len, u64 orig_start, u64 block_start, 7295 u64 block_len, u64 orig_block_len, 7296 u64 ram_bytes, int compress_type, 7297 int type) 7298 { 7299 struct extent_map *em; 7300 int ret; 7301 7302 /* 7303 * Note the missing NOCOW type. 7304 * 7305 * For pure NOCOW writes, we should not create an io extent map, but 7306 * just reusing the existing one. 7307 * Only PREALLOC writes (NOCOW write into preallocated range) can 7308 * create an io extent map. 7309 */ 7310 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7311 type == BTRFS_ORDERED_COMPRESSED || 7312 type == BTRFS_ORDERED_REGULAR); 7313 7314 switch (type) { 7315 case BTRFS_ORDERED_PREALLOC: 7316 /* Uncompressed extents. */ 7317 ASSERT(block_len == len); 7318 7319 /* We're only referring part of a larger preallocated extent. */ 7320 ASSERT(block_len <= ram_bytes); 7321 break; 7322 case BTRFS_ORDERED_REGULAR: 7323 /* Uncompressed extents. */ 7324 ASSERT(block_len == len); 7325 7326 /* COW results a new extent matching our file extent size. */ 7327 ASSERT(orig_block_len == len); 7328 ASSERT(ram_bytes == len); 7329 7330 /* Since it's a new extent, we should not have any offset. */ 7331 ASSERT(orig_start == start); 7332 break; 7333 case BTRFS_ORDERED_COMPRESSED: 7334 /* Must be compressed. */ 7335 ASSERT(compress_type != BTRFS_COMPRESS_NONE); 7336 7337 /* 7338 * Encoded write can make us to refer to part of the 7339 * uncompressed extent. 7340 */ 7341 ASSERT(len <= ram_bytes); 7342 break; 7343 } 7344 7345 em = alloc_extent_map(); 7346 if (!em) 7347 return ERR_PTR(-ENOMEM); 7348 7349 em->start = start; 7350 em->orig_start = orig_start; 7351 em->len = len; 7352 em->block_len = block_len; 7353 em->block_start = block_start; 7354 em->orig_block_len = orig_block_len; 7355 em->ram_bytes = ram_bytes; 7356 em->generation = -1; 7357 em->flags |= EXTENT_FLAG_PINNED; 7358 if (type == BTRFS_ORDERED_COMPRESSED) 7359 extent_map_set_compression(em, compress_type); 7360 7361 ret = btrfs_replace_extent_map_range(inode, em, true); 7362 if (ret) { 7363 free_extent_map(em); 7364 return ERR_PTR(ret); 7365 } 7366 7367 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7368 return em; 7369 } 7370 7371 7372 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7373 struct inode *inode, 7374 struct btrfs_dio_data *dio_data, 7375 u64 start, u64 *lenp, 7376 unsigned int iomap_flags) 7377 { 7378 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7379 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 7380 struct extent_map *em = *map; 7381 int type; 7382 u64 block_start, orig_start, orig_block_len, ram_bytes; 7383 struct btrfs_block_group *bg; 7384 bool can_nocow = false; 7385 bool space_reserved = false; 7386 u64 len = *lenp; 7387 u64 prev_len; 7388 int ret = 0; 7389 7390 /* 7391 * We don't allocate a new extent in the following cases 7392 * 7393 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7394 * existing extent. 7395 * 2) The extent is marked as PREALLOC. We're good to go here and can 7396 * just use the extent. 7397 * 7398 */ 7399 if ((em->flags & EXTENT_FLAG_PREALLOC) || 7400 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7401 em->block_start != EXTENT_MAP_HOLE)) { 7402 if (em->flags & EXTENT_FLAG_PREALLOC) 7403 type = BTRFS_ORDERED_PREALLOC; 7404 else 7405 type = BTRFS_ORDERED_NOCOW; 7406 len = min(len, em->len - (start - em->start)); 7407 block_start = em->block_start + (start - em->start); 7408 7409 if (can_nocow_extent(inode, start, &len, &orig_start, 7410 &orig_block_len, &ram_bytes, false, false) == 1) { 7411 bg = btrfs_inc_nocow_writers(fs_info, block_start); 7412 if (bg) 7413 can_nocow = true; 7414 } 7415 } 7416 7417 prev_len = len; 7418 if (can_nocow) { 7419 struct extent_map *em2; 7420 7421 /* We can NOCOW, so only need to reserve metadata space. */ 7422 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7423 nowait); 7424 if (ret < 0) { 7425 /* Our caller expects us to free the input extent map. */ 7426 free_extent_map(em); 7427 *map = NULL; 7428 btrfs_dec_nocow_writers(bg); 7429 if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) 7430 ret = -EAGAIN; 7431 goto out; 7432 } 7433 space_reserved = true; 7434 7435 em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len, 7436 orig_start, block_start, 7437 len, orig_block_len, 7438 ram_bytes, type); 7439 btrfs_dec_nocow_writers(bg); 7440 if (type == BTRFS_ORDERED_PREALLOC) { 7441 free_extent_map(em); 7442 *map = em2; 7443 em = em2; 7444 } 7445 7446 if (IS_ERR(em2)) { 7447 ret = PTR_ERR(em2); 7448 goto out; 7449 } 7450 7451 dio_data->nocow_done = true; 7452 } else { 7453 /* Our caller expects us to free the input extent map. */ 7454 free_extent_map(em); 7455 *map = NULL; 7456 7457 if (nowait) { 7458 ret = -EAGAIN; 7459 goto out; 7460 } 7461 7462 /* 7463 * If we could not allocate data space before locking the file 7464 * range and we can't do a NOCOW write, then we have to fail. 7465 */ 7466 if (!dio_data->data_space_reserved) { 7467 ret = -ENOSPC; 7468 goto out; 7469 } 7470 7471 /* 7472 * We have to COW and we have already reserved data space before, 7473 * so now we reserve only metadata. 7474 */ 7475 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7476 false); 7477 if (ret < 0) 7478 goto out; 7479 space_reserved = true; 7480 7481 em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len); 7482 if (IS_ERR(em)) { 7483 ret = PTR_ERR(em); 7484 goto out; 7485 } 7486 *map = em; 7487 len = min(len, em->len - (start - em->start)); 7488 if (len < prev_len) 7489 btrfs_delalloc_release_metadata(BTRFS_I(inode), 7490 prev_len - len, true); 7491 } 7492 7493 /* 7494 * We have created our ordered extent, so we can now release our reservation 7495 * for an outstanding extent. 7496 */ 7497 btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len); 7498 7499 /* 7500 * Need to update the i_size under the extent lock so buffered 7501 * readers will get the updated i_size when we unlock. 7502 */ 7503 if (start + len > i_size_read(inode)) 7504 i_size_write(inode, start + len); 7505 out: 7506 if (ret && space_reserved) { 7507 btrfs_delalloc_release_extents(BTRFS_I(inode), len); 7508 btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true); 7509 } 7510 *lenp = len; 7511 return ret; 7512 } 7513 7514 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, 7515 loff_t length, unsigned int flags, struct iomap *iomap, 7516 struct iomap *srcmap) 7517 { 7518 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7519 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 7520 struct extent_map *em; 7521 struct extent_state *cached_state = NULL; 7522 struct btrfs_dio_data *dio_data = iter->private; 7523 u64 lockstart, lockend; 7524 const bool write = !!(flags & IOMAP_WRITE); 7525 int ret = 0; 7526 u64 len = length; 7527 const u64 data_alloc_len = length; 7528 bool unlock_extents = false; 7529 7530 /* 7531 * We could potentially fault if we have a buffer > PAGE_SIZE, and if 7532 * we're NOWAIT we may submit a bio for a partial range and return 7533 * EIOCBQUEUED, which would result in an errant short read. 7534 * 7535 * The best way to handle this would be to allow for partial completions 7536 * of iocb's, so we could submit the partial bio, return and fault in 7537 * the rest of the pages, and then submit the io for the rest of the 7538 * range. However we don't have that currently, so simply return 7539 * -EAGAIN at this point so that the normal path is used. 7540 */ 7541 if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE) 7542 return -EAGAIN; 7543 7544 /* 7545 * Cap the size of reads to that usually seen in buffered I/O as we need 7546 * to allocate a contiguous array for the checksums. 7547 */ 7548 if (!write) 7549 len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS); 7550 7551 lockstart = start; 7552 lockend = start + len - 1; 7553 7554 /* 7555 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't 7556 * enough if we've written compressed pages to this area, so we need to 7557 * flush the dirty pages again to make absolutely sure that any 7558 * outstanding dirty pages are on disk - the first flush only starts 7559 * compression on the data, while keeping the pages locked, so by the 7560 * time the second flush returns we know bios for the compressed pages 7561 * were submitted and finished, and the pages no longer under writeback. 7562 * 7563 * If we have a NOWAIT request and we have any pages in the range that 7564 * are locked, likely due to compression still in progress, we don't want 7565 * to block on page locks. We also don't want to block on pages marked as 7566 * dirty or under writeback (same as for the non-compression case). 7567 * iomap_dio_rw() did the same check, but after that and before we got 7568 * here, mmap'ed writes may have happened or buffered reads started 7569 * (readpage() and readahead(), which lock pages), as we haven't locked 7570 * the file range yet. 7571 */ 7572 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7573 &BTRFS_I(inode)->runtime_flags)) { 7574 if (flags & IOMAP_NOWAIT) { 7575 if (filemap_range_needs_writeback(inode->i_mapping, 7576 lockstart, lockend)) 7577 return -EAGAIN; 7578 } else { 7579 ret = filemap_fdatawrite_range(inode->i_mapping, start, 7580 start + length - 1); 7581 if (ret) 7582 return ret; 7583 } 7584 } 7585 7586 memset(dio_data, 0, sizeof(*dio_data)); 7587 7588 /* 7589 * We always try to allocate data space and must do it before locking 7590 * the file range, to avoid deadlocks with concurrent writes to the same 7591 * range if the range has several extents and the writes don't expand the 7592 * current i_size (the inode lock is taken in shared mode). If we fail to 7593 * allocate data space here we continue and later, after locking the 7594 * file range, we fail with ENOSPC only if we figure out we can not do a 7595 * NOCOW write. 7596 */ 7597 if (write && !(flags & IOMAP_NOWAIT)) { 7598 ret = btrfs_check_data_free_space(BTRFS_I(inode), 7599 &dio_data->data_reserved, 7600 start, data_alloc_len, false); 7601 if (!ret) 7602 dio_data->data_space_reserved = true; 7603 else if (ret && !(BTRFS_I(inode)->flags & 7604 (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 7605 goto err; 7606 } 7607 7608 /* 7609 * If this errors out it's because we couldn't invalidate pagecache for 7610 * this range and we need to fallback to buffered IO, or we are doing a 7611 * NOWAIT read/write and we need to block. 7612 */ 7613 ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags); 7614 if (ret < 0) 7615 goto err; 7616 7617 em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len); 7618 if (IS_ERR(em)) { 7619 ret = PTR_ERR(em); 7620 goto unlock_err; 7621 } 7622 7623 /* 7624 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7625 * io. INLINE is special, and we could probably kludge it in here, but 7626 * it's still buffered so for safety lets just fall back to the generic 7627 * buffered path. 7628 * 7629 * For COMPRESSED we _have_ to read the entire extent in so we can 7630 * decompress it, so there will be buffering required no matter what we 7631 * do, so go ahead and fallback to buffered. 7632 * 7633 * We return -ENOTBLK because that's what makes DIO go ahead and go back 7634 * to buffered IO. Don't blame me, this is the price we pay for using 7635 * the generic code. 7636 */ 7637 if (extent_map_is_compressed(em) || 7638 em->block_start == EXTENT_MAP_INLINE) { 7639 free_extent_map(em); 7640 /* 7641 * If we are in a NOWAIT context, return -EAGAIN in order to 7642 * fallback to buffered IO. This is not only because we can 7643 * block with buffered IO (no support for NOWAIT semantics at 7644 * the moment) but also to avoid returning short reads to user 7645 * space - this happens if we were able to read some data from 7646 * previous non-compressed extents and then when we fallback to 7647 * buffered IO, at btrfs_file_read_iter() by calling 7648 * filemap_read(), we fail to fault in pages for the read buffer, 7649 * in which case filemap_read() returns a short read (the number 7650 * of bytes previously read is > 0, so it does not return -EFAULT). 7651 */ 7652 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; 7653 goto unlock_err; 7654 } 7655 7656 len = min(len, em->len - (start - em->start)); 7657 7658 /* 7659 * If we have a NOWAIT request and the range contains multiple extents 7660 * (or a mix of extents and holes), then we return -EAGAIN to make the 7661 * caller fallback to a context where it can do a blocking (without 7662 * NOWAIT) request. This way we avoid doing partial IO and returning 7663 * success to the caller, which is not optimal for writes and for reads 7664 * it can result in unexpected behaviour for an application. 7665 * 7666 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling 7667 * iomap_dio_rw(), we can end up returning less data then what the caller 7668 * asked for, resulting in an unexpected, and incorrect, short read. 7669 * That is, the caller asked to read N bytes and we return less than that, 7670 * which is wrong unless we are crossing EOF. This happens if we get a 7671 * page fault error when trying to fault in pages for the buffer that is 7672 * associated to the struct iov_iter passed to iomap_dio_rw(), and we 7673 * have previously submitted bios for other extents in the range, in 7674 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of 7675 * those bios have completed by the time we get the page fault error, 7676 * which we return back to our caller - we should only return EIOCBQUEUED 7677 * after we have submitted bios for all the extents in the range. 7678 */ 7679 if ((flags & IOMAP_NOWAIT) && len < length) { 7680 free_extent_map(em); 7681 ret = -EAGAIN; 7682 goto unlock_err; 7683 } 7684 7685 if (write) { 7686 ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, 7687 start, &len, flags); 7688 if (ret < 0) 7689 goto unlock_err; 7690 unlock_extents = true; 7691 /* Recalc len in case the new em is smaller than requested */ 7692 len = min(len, em->len - (start - em->start)); 7693 if (dio_data->data_space_reserved) { 7694 u64 release_offset; 7695 u64 release_len = 0; 7696 7697 if (dio_data->nocow_done) { 7698 release_offset = start; 7699 release_len = data_alloc_len; 7700 } else if (len < data_alloc_len) { 7701 release_offset = start + len; 7702 release_len = data_alloc_len - len; 7703 } 7704 7705 if (release_len > 0) 7706 btrfs_free_reserved_data_space(BTRFS_I(inode), 7707 dio_data->data_reserved, 7708 release_offset, 7709 release_len); 7710 } 7711 } else { 7712 /* 7713 * We need to unlock only the end area that we aren't using. 7714 * The rest is going to be unlocked by the endio routine. 7715 */ 7716 lockstart = start + len; 7717 if (lockstart < lockend) 7718 unlock_extents = true; 7719 } 7720 7721 if (unlock_extents) 7722 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7723 &cached_state); 7724 else 7725 free_extent_state(cached_state); 7726 7727 /* 7728 * Translate extent map information to iomap. 7729 * We trim the extents (and move the addr) even though iomap code does 7730 * that, since we have locked only the parts we are performing I/O in. 7731 */ 7732 if ((em->block_start == EXTENT_MAP_HOLE) || 7733 ((em->flags & EXTENT_FLAG_PREALLOC) && !write)) { 7734 iomap->addr = IOMAP_NULL_ADDR; 7735 iomap->type = IOMAP_HOLE; 7736 } else { 7737 iomap->addr = em->block_start + (start - em->start); 7738 iomap->type = IOMAP_MAPPED; 7739 } 7740 iomap->offset = start; 7741 iomap->bdev = fs_info->fs_devices->latest_dev->bdev; 7742 iomap->length = len; 7743 free_extent_map(em); 7744 7745 return 0; 7746 7747 unlock_err: 7748 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7749 &cached_state); 7750 err: 7751 if (dio_data->data_space_reserved) { 7752 btrfs_free_reserved_data_space(BTRFS_I(inode), 7753 dio_data->data_reserved, 7754 start, data_alloc_len); 7755 extent_changeset_free(dio_data->data_reserved); 7756 } 7757 7758 return ret; 7759 } 7760 7761 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, 7762 ssize_t written, unsigned int flags, struct iomap *iomap) 7763 { 7764 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7765 struct btrfs_dio_data *dio_data = iter->private; 7766 size_t submitted = dio_data->submitted; 7767 const bool write = !!(flags & IOMAP_WRITE); 7768 int ret = 0; 7769 7770 if (!write && (iomap->type == IOMAP_HOLE)) { 7771 /* If reading from a hole, unlock and return */ 7772 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, 7773 NULL); 7774 return 0; 7775 } 7776 7777 if (submitted < length) { 7778 pos += submitted; 7779 length -= submitted; 7780 if (write) 7781 btrfs_finish_ordered_extent(dio_data->ordered, NULL, 7782 pos, length, false); 7783 else 7784 unlock_extent(&BTRFS_I(inode)->io_tree, pos, 7785 pos + length - 1, NULL); 7786 ret = -ENOTBLK; 7787 } 7788 if (write) { 7789 btrfs_put_ordered_extent(dio_data->ordered); 7790 dio_data->ordered = NULL; 7791 } 7792 7793 if (write) 7794 extent_changeset_free(dio_data->data_reserved); 7795 return ret; 7796 } 7797 7798 static void btrfs_dio_end_io(struct btrfs_bio *bbio) 7799 { 7800 struct btrfs_dio_private *dip = 7801 container_of(bbio, struct btrfs_dio_private, bbio); 7802 struct btrfs_inode *inode = bbio->inode; 7803 struct bio *bio = &bbio->bio; 7804 7805 if (bio->bi_status) { 7806 btrfs_warn(inode->root->fs_info, 7807 "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d", 7808 btrfs_ino(inode), bio->bi_opf, 7809 dip->file_offset, dip->bytes, bio->bi_status); 7810 } 7811 7812 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 7813 btrfs_finish_ordered_extent(bbio->ordered, NULL, 7814 dip->file_offset, dip->bytes, 7815 !bio->bi_status); 7816 } else { 7817 unlock_extent(&inode->io_tree, dip->file_offset, 7818 dip->file_offset + dip->bytes - 1, NULL); 7819 } 7820 7821 bbio->bio.bi_private = bbio->private; 7822 iomap_dio_bio_end_io(bio); 7823 } 7824 7825 static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio, 7826 loff_t file_offset) 7827 { 7828 struct btrfs_bio *bbio = btrfs_bio(bio); 7829 struct btrfs_dio_private *dip = 7830 container_of(bbio, struct btrfs_dio_private, bbio); 7831 struct btrfs_dio_data *dio_data = iter->private; 7832 7833 btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info, 7834 btrfs_dio_end_io, bio->bi_private); 7835 bbio->inode = BTRFS_I(iter->inode); 7836 bbio->file_offset = file_offset; 7837 7838 dip->file_offset = file_offset; 7839 dip->bytes = bio->bi_iter.bi_size; 7840 7841 dio_data->submitted += bio->bi_iter.bi_size; 7842 7843 /* 7844 * Check if we are doing a partial write. If we are, we need to split 7845 * the ordered extent to match the submitted bio. Hang on to the 7846 * remaining unfinishable ordered_extent in dio_data so that it can be 7847 * cancelled in iomap_end to avoid a deadlock wherein faulting the 7848 * remaining pages is blocked on the outstanding ordered extent. 7849 */ 7850 if (iter->flags & IOMAP_WRITE) { 7851 int ret; 7852 7853 ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered); 7854 if (ret) { 7855 btrfs_finish_ordered_extent(dio_data->ordered, NULL, 7856 file_offset, dip->bytes, 7857 !ret); 7858 bio->bi_status = errno_to_blk_status(ret); 7859 iomap_dio_bio_end_io(bio); 7860 return; 7861 } 7862 } 7863 7864 btrfs_submit_bio(bbio, 0); 7865 } 7866 7867 static const struct iomap_ops btrfs_dio_iomap_ops = { 7868 .iomap_begin = btrfs_dio_iomap_begin, 7869 .iomap_end = btrfs_dio_iomap_end, 7870 }; 7871 7872 static const struct iomap_dio_ops btrfs_dio_ops = { 7873 .submit_io = btrfs_dio_submit_io, 7874 .bio_set = &btrfs_dio_bioset, 7875 }; 7876 7877 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) 7878 { 7879 struct btrfs_dio_data data = { 0 }; 7880 7881 return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7882 IOMAP_DIO_PARTIAL, &data, done_before); 7883 } 7884 7885 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, 7886 size_t done_before) 7887 { 7888 struct btrfs_dio_data data = { 0 }; 7889 7890 return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7891 IOMAP_DIO_PARTIAL, &data, done_before); 7892 } 7893 7894 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 7895 u64 start, u64 len) 7896 { 7897 struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 7898 int ret; 7899 7900 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 7901 if (ret) 7902 return ret; 7903 7904 /* 7905 * fiemap_prep() called filemap_write_and_wait() for the whole possible 7906 * file range (0 to LLONG_MAX), but that is not enough if we have 7907 * compression enabled. The first filemap_fdatawrite_range() only kicks 7908 * in the compression of data (in an async thread) and will return 7909 * before the compression is done and writeback is started. A second 7910 * filemap_fdatawrite_range() is needed to wait for the compression to 7911 * complete and writeback to start. We also need to wait for ordered 7912 * extents to complete, because our fiemap implementation uses mainly 7913 * file extent items to list the extents, searching for extent maps 7914 * only for file ranges with holes or prealloc extents to figure out 7915 * if we have delalloc in those ranges. 7916 */ 7917 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { 7918 ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX); 7919 if (ret) 7920 return ret; 7921 } 7922 7923 btrfs_inode_lock(btrfs_inode, BTRFS_ILOCK_SHARED); 7924 7925 /* 7926 * We did an initial flush to avoid holding the inode's lock while 7927 * triggering writeback and waiting for the completion of IO and ordered 7928 * extents. Now after we locked the inode we do it again, because it's 7929 * possible a new write may have happened in between those two steps. 7930 */ 7931 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { 7932 ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX); 7933 if (ret) { 7934 btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED); 7935 return ret; 7936 } 7937 } 7938 7939 ret = extent_fiemap(btrfs_inode, fieinfo, start, len); 7940 btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED); 7941 7942 return ret; 7943 } 7944 7945 /* 7946 * For release_folio() and invalidate_folio() we have a race window where 7947 * folio_end_writeback() is called but the subpage spinlock is not yet released. 7948 * If we continue to release/invalidate the page, we could cause use-after-free 7949 * for subpage spinlock. So this function is to spin and wait for subpage 7950 * spinlock. 7951 */ 7952 static void wait_subpage_spinlock(struct page *page) 7953 { 7954 struct btrfs_fs_info *fs_info = page_to_fs_info(page); 7955 struct folio *folio = page_folio(page); 7956 struct btrfs_subpage *subpage; 7957 7958 if (!btrfs_is_subpage(fs_info, page->mapping)) 7959 return; 7960 7961 ASSERT(folio_test_private(folio) && folio_get_private(folio)); 7962 subpage = folio_get_private(folio); 7963 7964 /* 7965 * This may look insane as we just acquire the spinlock and release it, 7966 * without doing anything. But we just want to make sure no one is 7967 * still holding the subpage spinlock. 7968 * And since the page is not dirty nor writeback, and we have page 7969 * locked, the only possible way to hold a spinlock is from the endio 7970 * function to clear page writeback. 7971 * 7972 * Here we just acquire the spinlock so that all existing callers 7973 * should exit and we're safe to release/invalidate the page. 7974 */ 7975 spin_lock_irq(&subpage->lock); 7976 spin_unlock_irq(&subpage->lock); 7977 } 7978 7979 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7980 { 7981 if (try_release_extent_mapping(&folio->page, gfp_flags)) { 7982 wait_subpage_spinlock(&folio->page); 7983 clear_page_extent_mapped(&folio->page); 7984 return true; 7985 } 7986 return false; 7987 } 7988 7989 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7990 { 7991 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 7992 return false; 7993 return __btrfs_release_folio(folio, gfp_flags); 7994 } 7995 7996 #ifdef CONFIG_MIGRATION 7997 static int btrfs_migrate_folio(struct address_space *mapping, 7998 struct folio *dst, struct folio *src, 7999 enum migrate_mode mode) 8000 { 8001 int ret = filemap_migrate_folio(mapping, dst, src, mode); 8002 8003 if (ret != MIGRATEPAGE_SUCCESS) 8004 return ret; 8005 8006 if (folio_test_ordered(src)) { 8007 folio_clear_ordered(src); 8008 folio_set_ordered(dst); 8009 } 8010 8011 return MIGRATEPAGE_SUCCESS; 8012 } 8013 #else 8014 #define btrfs_migrate_folio NULL 8015 #endif 8016 8017 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 8018 size_t length) 8019 { 8020 struct btrfs_inode *inode = folio_to_inode(folio); 8021 struct btrfs_fs_info *fs_info = inode->root->fs_info; 8022 struct extent_io_tree *tree = &inode->io_tree; 8023 struct extent_state *cached_state = NULL; 8024 u64 page_start = folio_pos(folio); 8025 u64 page_end = page_start + folio_size(folio) - 1; 8026 u64 cur; 8027 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 8028 8029 /* 8030 * We have folio locked so no new ordered extent can be created on this 8031 * page, nor bio can be submitted for this folio. 8032 * 8033 * But already submitted bio can still be finished on this folio. 8034 * Furthermore, endio function won't skip folio which has Ordered 8035 * (Private2) already cleared, so it's possible for endio and 8036 * invalidate_folio to do the same ordered extent accounting twice 8037 * on one folio. 8038 * 8039 * So here we wait for any submitted bios to finish, so that we won't 8040 * do double ordered extent accounting on the same folio. 8041 */ 8042 folio_wait_writeback(folio); 8043 wait_subpage_spinlock(&folio->page); 8044 8045 /* 8046 * For subpage case, we have call sites like 8047 * btrfs_punch_hole_lock_range() which passes range not aligned to 8048 * sectorsize. 8049 * If the range doesn't cover the full folio, we don't need to and 8050 * shouldn't clear page extent mapped, as folio->private can still 8051 * record subpage dirty bits for other part of the range. 8052 * 8053 * For cases that invalidate the full folio even the range doesn't 8054 * cover the full folio, like invalidating the last folio, we're 8055 * still safe to wait for ordered extent to finish. 8056 */ 8057 if (!(offset == 0 && length == folio_size(folio))) { 8058 btrfs_release_folio(folio, GFP_NOFS); 8059 return; 8060 } 8061 8062 if (!inode_evicting) 8063 lock_extent(tree, page_start, page_end, &cached_state); 8064 8065 cur = page_start; 8066 while (cur < page_end) { 8067 struct btrfs_ordered_extent *ordered; 8068 u64 range_end; 8069 u32 range_len; 8070 u32 extra_flags = 0; 8071 8072 ordered = btrfs_lookup_first_ordered_range(inode, cur, 8073 page_end + 1 - cur); 8074 if (!ordered) { 8075 range_end = page_end; 8076 /* 8077 * No ordered extent covering this range, we are safe 8078 * to delete all extent states in the range. 8079 */ 8080 extra_flags = EXTENT_CLEAR_ALL_BITS; 8081 goto next; 8082 } 8083 if (ordered->file_offset > cur) { 8084 /* 8085 * There is a range between [cur, oe->file_offset) not 8086 * covered by any ordered extent. 8087 * We are safe to delete all extent states, and handle 8088 * the ordered extent in the next iteration. 8089 */ 8090 range_end = ordered->file_offset - 1; 8091 extra_flags = EXTENT_CLEAR_ALL_BITS; 8092 goto next; 8093 } 8094 8095 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 8096 page_end); 8097 ASSERT(range_end + 1 - cur < U32_MAX); 8098 range_len = range_end + 1 - cur; 8099 if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) { 8100 /* 8101 * If Ordered (Private2) is cleared, it means endio has 8102 * already been executed for the range. 8103 * We can't delete the extent states as 8104 * btrfs_finish_ordered_io() may still use some of them. 8105 */ 8106 goto next; 8107 } 8108 btrfs_folio_clear_ordered(fs_info, folio, cur, range_len); 8109 8110 /* 8111 * IO on this page will never be started, so we need to account 8112 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 8113 * here, must leave that up for the ordered extent completion. 8114 * 8115 * This will also unlock the range for incoming 8116 * btrfs_finish_ordered_io(). 8117 */ 8118 if (!inode_evicting) 8119 clear_extent_bit(tree, cur, range_end, 8120 EXTENT_DELALLOC | 8121 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8122 EXTENT_DEFRAG, &cached_state); 8123 8124 spin_lock_irq(&inode->ordered_tree_lock); 8125 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8126 ordered->truncated_len = min(ordered->truncated_len, 8127 cur - ordered->file_offset); 8128 spin_unlock_irq(&inode->ordered_tree_lock); 8129 8130 /* 8131 * If the ordered extent has finished, we're safe to delete all 8132 * the extent states of the range, otherwise 8133 * btrfs_finish_ordered_io() will get executed by endio for 8134 * other pages, so we can't delete extent states. 8135 */ 8136 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8137 cur, range_end + 1 - cur)) { 8138 btrfs_finish_ordered_io(ordered); 8139 /* 8140 * The ordered extent has finished, now we're again 8141 * safe to delete all extent states of the range. 8142 */ 8143 extra_flags = EXTENT_CLEAR_ALL_BITS; 8144 } 8145 next: 8146 if (ordered) 8147 btrfs_put_ordered_extent(ordered); 8148 /* 8149 * Qgroup reserved space handler 8150 * Sector(s) here will be either: 8151 * 8152 * 1) Already written to disk or bio already finished 8153 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 8154 * Qgroup will be handled by its qgroup_record then. 8155 * btrfs_qgroup_free_data() call will do nothing here. 8156 * 8157 * 2) Not written to disk yet 8158 * Then btrfs_qgroup_free_data() call will clear the 8159 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 8160 * reserved data space. 8161 * Since the IO will never happen for this page. 8162 */ 8163 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL); 8164 if (!inode_evicting) { 8165 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 8166 EXTENT_DELALLOC | EXTENT_UPTODATE | 8167 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG | 8168 extra_flags, &cached_state); 8169 } 8170 cur = range_end + 1; 8171 } 8172 /* 8173 * We have iterated through all ordered extents of the page, the page 8174 * should not have Ordered (Private2) anymore, or the above iteration 8175 * did something wrong. 8176 */ 8177 ASSERT(!folio_test_ordered(folio)); 8178 btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio)); 8179 if (!inode_evicting) 8180 __btrfs_release_folio(folio, GFP_NOFS); 8181 clear_page_extent_mapped(&folio->page); 8182 } 8183 8184 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) 8185 { 8186 struct btrfs_truncate_control control = { 8187 .inode = inode, 8188 .ino = btrfs_ino(inode), 8189 .min_type = BTRFS_EXTENT_DATA_KEY, 8190 .clear_extent_range = true, 8191 }; 8192 struct btrfs_root *root = inode->root; 8193 struct btrfs_fs_info *fs_info = root->fs_info; 8194 struct btrfs_block_rsv *rsv; 8195 int ret; 8196 struct btrfs_trans_handle *trans; 8197 u64 mask = fs_info->sectorsize - 1; 8198 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 8199 8200 if (!skip_writeback) { 8201 ret = btrfs_wait_ordered_range(&inode->vfs_inode, 8202 inode->vfs_inode.i_size & (~mask), 8203 (u64)-1); 8204 if (ret) 8205 return ret; 8206 } 8207 8208 /* 8209 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 8210 * things going on here: 8211 * 8212 * 1) We need to reserve space to update our inode. 8213 * 8214 * 2) We need to have something to cache all the space that is going to 8215 * be free'd up by the truncate operation, but also have some slack 8216 * space reserved in case it uses space during the truncate (thank you 8217 * very much snapshotting). 8218 * 8219 * And we need these to be separate. The fact is we can use a lot of 8220 * space doing the truncate, and we have no earthly idea how much space 8221 * we will use, so we need the truncate reservation to be separate so it 8222 * doesn't end up using space reserved for updating the inode. We also 8223 * need to be able to stop the transaction and start a new one, which 8224 * means we need to be able to update the inode several times, and we 8225 * have no idea of knowing how many times that will be, so we can't just 8226 * reserve 1 item for the entirety of the operation, so that has to be 8227 * done separately as well. 8228 * 8229 * So that leaves us with 8230 * 8231 * 1) rsv - for the truncate reservation, which we will steal from the 8232 * transaction reservation. 8233 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 8234 * updating the inode. 8235 */ 8236 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 8237 if (!rsv) 8238 return -ENOMEM; 8239 rsv->size = min_size; 8240 rsv->failfast = true; 8241 8242 /* 8243 * 1 for the truncate slack space 8244 * 1 for updating the inode. 8245 */ 8246 trans = btrfs_start_transaction(root, 2); 8247 if (IS_ERR(trans)) { 8248 ret = PTR_ERR(trans); 8249 goto out; 8250 } 8251 8252 /* Migrate the slack space for the truncate to our reserve */ 8253 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 8254 min_size, false); 8255 /* 8256 * We have reserved 2 metadata units when we started the transaction and 8257 * min_size matches 1 unit, so this should never fail, but if it does, 8258 * it's not critical we just fail truncation. 8259 */ 8260 if (WARN_ON(ret)) { 8261 btrfs_end_transaction(trans); 8262 goto out; 8263 } 8264 8265 trans->block_rsv = rsv; 8266 8267 while (1) { 8268 struct extent_state *cached_state = NULL; 8269 const u64 new_size = inode->vfs_inode.i_size; 8270 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 8271 8272 control.new_size = new_size; 8273 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8274 /* 8275 * We want to drop from the next block forward in case this new 8276 * size is not block aligned since we will be keeping the last 8277 * block of the extent just the way it is. 8278 */ 8279 btrfs_drop_extent_map_range(inode, 8280 ALIGN(new_size, fs_info->sectorsize), 8281 (u64)-1, false); 8282 8283 ret = btrfs_truncate_inode_items(trans, root, &control); 8284 8285 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 8286 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 8287 8288 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8289 8290 trans->block_rsv = &fs_info->trans_block_rsv; 8291 if (ret != -ENOSPC && ret != -EAGAIN) 8292 break; 8293 8294 ret = btrfs_update_inode(trans, inode); 8295 if (ret) 8296 break; 8297 8298 btrfs_end_transaction(trans); 8299 btrfs_btree_balance_dirty(fs_info); 8300 8301 trans = btrfs_start_transaction(root, 2); 8302 if (IS_ERR(trans)) { 8303 ret = PTR_ERR(trans); 8304 trans = NULL; 8305 break; 8306 } 8307 8308 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 8309 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 8310 rsv, min_size, false); 8311 /* 8312 * We have reserved 2 metadata units when we started the 8313 * transaction and min_size matches 1 unit, so this should never 8314 * fail, but if it does, it's not critical we just fail truncation. 8315 */ 8316 if (WARN_ON(ret)) 8317 break; 8318 8319 trans->block_rsv = rsv; 8320 } 8321 8322 /* 8323 * We can't call btrfs_truncate_block inside a trans handle as we could 8324 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 8325 * know we've truncated everything except the last little bit, and can 8326 * do btrfs_truncate_block and then update the disk_i_size. 8327 */ 8328 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 8329 btrfs_end_transaction(trans); 8330 btrfs_btree_balance_dirty(fs_info); 8331 8332 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); 8333 if (ret) 8334 goto out; 8335 trans = btrfs_start_transaction(root, 1); 8336 if (IS_ERR(trans)) { 8337 ret = PTR_ERR(trans); 8338 goto out; 8339 } 8340 btrfs_inode_safe_disk_i_size_write(inode, 0); 8341 } 8342 8343 if (trans) { 8344 int ret2; 8345 8346 trans->block_rsv = &fs_info->trans_block_rsv; 8347 ret2 = btrfs_update_inode(trans, inode); 8348 if (ret2 && !ret) 8349 ret = ret2; 8350 8351 ret2 = btrfs_end_transaction(trans); 8352 if (ret2 && !ret) 8353 ret = ret2; 8354 btrfs_btree_balance_dirty(fs_info); 8355 } 8356 out: 8357 btrfs_free_block_rsv(fs_info, rsv); 8358 /* 8359 * So if we truncate and then write and fsync we normally would just 8360 * write the extents that changed, which is a problem if we need to 8361 * first truncate that entire inode. So set this flag so we write out 8362 * all of the extents in the inode to the sync log so we're completely 8363 * safe. 8364 * 8365 * If no extents were dropped or trimmed we don't need to force the next 8366 * fsync to truncate all the inode's items from the log and re-log them 8367 * all. This means the truncate operation did not change the file size, 8368 * or changed it to a smaller size but there was only an implicit hole 8369 * between the old i_size and the new i_size, and there were no prealloc 8370 * extents beyond i_size to drop. 8371 */ 8372 if (control.extents_found > 0) 8373 btrfs_set_inode_full_sync(inode); 8374 8375 return ret; 8376 } 8377 8378 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap, 8379 struct inode *dir) 8380 { 8381 struct inode *inode; 8382 8383 inode = new_inode(dir->i_sb); 8384 if (inode) { 8385 /* 8386 * Subvolumes don't inherit the sgid bit or the parent's gid if 8387 * the parent's sgid bit is set. This is probably a bug. 8388 */ 8389 inode_init_owner(idmap, inode, NULL, 8390 S_IFDIR | (~current_umask() & S_IRWXUGO)); 8391 inode->i_op = &btrfs_dir_inode_operations; 8392 inode->i_fop = &btrfs_dir_file_operations; 8393 } 8394 return inode; 8395 } 8396 8397 struct inode *btrfs_alloc_inode(struct super_block *sb) 8398 { 8399 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 8400 struct btrfs_inode *ei; 8401 struct inode *inode; 8402 struct extent_io_tree *file_extent_tree = NULL; 8403 8404 /* Self tests may pass a NULL fs_info. */ 8405 if (fs_info && !btrfs_fs_incompat(fs_info, NO_HOLES)) { 8406 file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL); 8407 if (!file_extent_tree) 8408 return NULL; 8409 } 8410 8411 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 8412 if (!ei) { 8413 kfree(file_extent_tree); 8414 return NULL; 8415 } 8416 8417 ei->root = NULL; 8418 ei->generation = 0; 8419 ei->last_trans = 0; 8420 ei->last_sub_trans = 0; 8421 ei->logged_trans = 0; 8422 ei->delalloc_bytes = 0; 8423 ei->new_delalloc_bytes = 0; 8424 ei->defrag_bytes = 0; 8425 ei->disk_i_size = 0; 8426 ei->flags = 0; 8427 ei->ro_flags = 0; 8428 ei->csum_bytes = 0; 8429 ei->index_cnt = (u64)-1; 8430 ei->dir_index = 0; 8431 ei->last_unlink_trans = 0; 8432 ei->last_reflink_trans = 0; 8433 ei->last_log_commit = 0; 8434 8435 spin_lock_init(&ei->lock); 8436 ei->outstanding_extents = 0; 8437 if (sb->s_magic != BTRFS_TEST_MAGIC) 8438 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 8439 BTRFS_BLOCK_RSV_DELALLOC); 8440 ei->runtime_flags = 0; 8441 ei->prop_compress = BTRFS_COMPRESS_NONE; 8442 ei->defrag_compress = BTRFS_COMPRESS_NONE; 8443 8444 ei->delayed_node = NULL; 8445 8446 ei->i_otime_sec = 0; 8447 ei->i_otime_nsec = 0; 8448 8449 inode = &ei->vfs_inode; 8450 extent_map_tree_init(&ei->extent_tree); 8451 8452 /* This io tree sets the valid inode. */ 8453 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); 8454 ei->io_tree.inode = ei; 8455 8456 ei->file_extent_tree = file_extent_tree; 8457 if (file_extent_tree) { 8458 extent_io_tree_init(fs_info, ei->file_extent_tree, 8459 IO_TREE_INODE_FILE_EXTENT); 8460 /* Lockdep class is set only for the file extent tree. */ 8461 lockdep_set_class(&ei->file_extent_tree->lock, &file_extent_tree_class); 8462 } 8463 mutex_init(&ei->log_mutex); 8464 spin_lock_init(&ei->ordered_tree_lock); 8465 ei->ordered_tree = RB_ROOT; 8466 ei->ordered_tree_last = NULL; 8467 INIT_LIST_HEAD(&ei->delalloc_inodes); 8468 INIT_LIST_HEAD(&ei->delayed_iput); 8469 RB_CLEAR_NODE(&ei->rb_node); 8470 init_rwsem(&ei->i_mmap_lock); 8471 8472 return inode; 8473 } 8474 8475 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 8476 void btrfs_test_destroy_inode(struct inode *inode) 8477 { 8478 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 8479 kfree(BTRFS_I(inode)->file_extent_tree); 8480 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8481 } 8482 #endif 8483 8484 void btrfs_free_inode(struct inode *inode) 8485 { 8486 kfree(BTRFS_I(inode)->file_extent_tree); 8487 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8488 } 8489 8490 void btrfs_destroy_inode(struct inode *vfs_inode) 8491 { 8492 struct btrfs_ordered_extent *ordered; 8493 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 8494 struct btrfs_root *root = inode->root; 8495 bool freespace_inode; 8496 8497 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 8498 WARN_ON(vfs_inode->i_data.nrpages); 8499 WARN_ON(inode->block_rsv.reserved); 8500 WARN_ON(inode->block_rsv.size); 8501 WARN_ON(inode->outstanding_extents); 8502 if (!S_ISDIR(vfs_inode->i_mode)) { 8503 WARN_ON(inode->delalloc_bytes); 8504 WARN_ON(inode->new_delalloc_bytes); 8505 } 8506 WARN_ON(inode->csum_bytes); 8507 WARN_ON(inode->defrag_bytes); 8508 8509 /* 8510 * This can happen where we create an inode, but somebody else also 8511 * created the same inode and we need to destroy the one we already 8512 * created. 8513 */ 8514 if (!root) 8515 return; 8516 8517 /* 8518 * If this is a free space inode do not take the ordered extents lockdep 8519 * map. 8520 */ 8521 freespace_inode = btrfs_is_free_space_inode(inode); 8522 8523 while (1) { 8524 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8525 if (!ordered) 8526 break; 8527 else { 8528 btrfs_err(root->fs_info, 8529 "found ordered extent %llu %llu on inode cleanup", 8530 ordered->file_offset, ordered->num_bytes); 8531 8532 if (!freespace_inode) 8533 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); 8534 8535 btrfs_remove_ordered_extent(inode, ordered); 8536 btrfs_put_ordered_extent(ordered); 8537 btrfs_put_ordered_extent(ordered); 8538 } 8539 } 8540 btrfs_qgroup_check_reserved_leak(inode); 8541 inode_tree_del(inode); 8542 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 8543 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 8544 btrfs_put_root(inode->root); 8545 } 8546 8547 int btrfs_drop_inode(struct inode *inode) 8548 { 8549 struct btrfs_root *root = BTRFS_I(inode)->root; 8550 8551 if (root == NULL) 8552 return 1; 8553 8554 /* the snap/subvol tree is on deleting */ 8555 if (btrfs_root_refs(&root->root_item) == 0) 8556 return 1; 8557 else 8558 return generic_drop_inode(inode); 8559 } 8560 8561 static void init_once(void *foo) 8562 { 8563 struct btrfs_inode *ei = foo; 8564 8565 inode_init_once(&ei->vfs_inode); 8566 } 8567 8568 void __cold btrfs_destroy_cachep(void) 8569 { 8570 /* 8571 * Make sure all delayed rcu free inodes are flushed before we 8572 * destroy cache. 8573 */ 8574 rcu_barrier(); 8575 bioset_exit(&btrfs_dio_bioset); 8576 kmem_cache_destroy(btrfs_inode_cachep); 8577 } 8578 8579 int __init btrfs_init_cachep(void) 8580 { 8581 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 8582 sizeof(struct btrfs_inode), 0, 8583 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, 8584 init_once); 8585 if (!btrfs_inode_cachep) 8586 goto fail; 8587 8588 if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE, 8589 offsetof(struct btrfs_dio_private, bbio.bio), 8590 BIOSET_NEED_BVECS)) 8591 goto fail; 8592 8593 return 0; 8594 fail: 8595 btrfs_destroy_cachep(); 8596 return -ENOMEM; 8597 } 8598 8599 static int btrfs_getattr(struct mnt_idmap *idmap, 8600 const struct path *path, struct kstat *stat, 8601 u32 request_mask, unsigned int flags) 8602 { 8603 u64 delalloc_bytes; 8604 u64 inode_bytes; 8605 struct inode *inode = d_inode(path->dentry); 8606 u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize; 8607 u32 bi_flags = BTRFS_I(inode)->flags; 8608 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 8609 8610 stat->result_mask |= STATX_BTIME; 8611 stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec; 8612 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec; 8613 if (bi_flags & BTRFS_INODE_APPEND) 8614 stat->attributes |= STATX_ATTR_APPEND; 8615 if (bi_flags & BTRFS_INODE_COMPRESS) 8616 stat->attributes |= STATX_ATTR_COMPRESSED; 8617 if (bi_flags & BTRFS_INODE_IMMUTABLE) 8618 stat->attributes |= STATX_ATTR_IMMUTABLE; 8619 if (bi_flags & BTRFS_INODE_NODUMP) 8620 stat->attributes |= STATX_ATTR_NODUMP; 8621 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 8622 stat->attributes |= STATX_ATTR_VERITY; 8623 8624 stat->attributes_mask |= (STATX_ATTR_APPEND | 8625 STATX_ATTR_COMPRESSED | 8626 STATX_ATTR_IMMUTABLE | 8627 STATX_ATTR_NODUMP); 8628 8629 generic_fillattr(idmap, request_mask, inode, stat); 8630 stat->dev = BTRFS_I(inode)->root->anon_dev; 8631 8632 stat->subvol = BTRFS_I(inode)->root->root_key.objectid; 8633 stat->result_mask |= STATX_SUBVOL; 8634 8635 spin_lock(&BTRFS_I(inode)->lock); 8636 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 8637 inode_bytes = inode_get_bytes(inode); 8638 spin_unlock(&BTRFS_I(inode)->lock); 8639 stat->blocks = (ALIGN(inode_bytes, blocksize) + 8640 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT; 8641 return 0; 8642 } 8643 8644 static int btrfs_rename_exchange(struct inode *old_dir, 8645 struct dentry *old_dentry, 8646 struct inode *new_dir, 8647 struct dentry *new_dentry) 8648 { 8649 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); 8650 struct btrfs_trans_handle *trans; 8651 unsigned int trans_num_items; 8652 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8653 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8654 struct inode *new_inode = new_dentry->d_inode; 8655 struct inode *old_inode = old_dentry->d_inode; 8656 struct btrfs_rename_ctx old_rename_ctx; 8657 struct btrfs_rename_ctx new_rename_ctx; 8658 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8659 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 8660 u64 old_idx = 0; 8661 u64 new_idx = 0; 8662 int ret; 8663 int ret2; 8664 bool need_abort = false; 8665 struct fscrypt_name old_fname, new_fname; 8666 struct fscrypt_str *old_name, *new_name; 8667 8668 /* 8669 * For non-subvolumes allow exchange only within one subvolume, in the 8670 * same inode namespace. Two subvolumes (represented as directory) can 8671 * be exchanged as they're a logical link and have a fixed inode number. 8672 */ 8673 if (root != dest && 8674 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 8675 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 8676 return -EXDEV; 8677 8678 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8679 if (ret) 8680 return ret; 8681 8682 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8683 if (ret) { 8684 fscrypt_free_filename(&old_fname); 8685 return ret; 8686 } 8687 8688 old_name = &old_fname.disk_name; 8689 new_name = &new_fname.disk_name; 8690 8691 /* close the race window with snapshot create/destroy ioctl */ 8692 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 8693 new_ino == BTRFS_FIRST_FREE_OBJECTID) 8694 down_read(&fs_info->subvol_sem); 8695 8696 /* 8697 * For each inode: 8698 * 1 to remove old dir item 8699 * 1 to remove old dir index 8700 * 1 to add new dir item 8701 * 1 to add new dir index 8702 * 1 to update parent inode 8703 * 8704 * If the parents are the same, we only need to account for one 8705 */ 8706 trans_num_items = (old_dir == new_dir ? 9 : 10); 8707 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8708 /* 8709 * 1 to remove old root ref 8710 * 1 to remove old root backref 8711 * 1 to add new root ref 8712 * 1 to add new root backref 8713 */ 8714 trans_num_items += 4; 8715 } else { 8716 /* 8717 * 1 to update inode item 8718 * 1 to remove old inode ref 8719 * 1 to add new inode ref 8720 */ 8721 trans_num_items += 3; 8722 } 8723 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 8724 trans_num_items += 4; 8725 else 8726 trans_num_items += 3; 8727 trans = btrfs_start_transaction(root, trans_num_items); 8728 if (IS_ERR(trans)) { 8729 ret = PTR_ERR(trans); 8730 goto out_notrans; 8731 } 8732 8733 if (dest != root) { 8734 ret = btrfs_record_root_in_trans(trans, dest); 8735 if (ret) 8736 goto out_fail; 8737 } 8738 8739 /* 8740 * We need to find a free sequence number both in the source and 8741 * in the destination directory for the exchange. 8742 */ 8743 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 8744 if (ret) 8745 goto out_fail; 8746 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 8747 if (ret) 8748 goto out_fail; 8749 8750 BTRFS_I(old_inode)->dir_index = 0ULL; 8751 BTRFS_I(new_inode)->dir_index = 0ULL; 8752 8753 /* Reference for the source. */ 8754 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8755 /* force full log commit if subvolume involved. */ 8756 btrfs_set_log_full_commit(trans); 8757 } else { 8758 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, 8759 btrfs_ino(BTRFS_I(new_dir)), 8760 old_idx); 8761 if (ret) 8762 goto out_fail; 8763 need_abort = true; 8764 } 8765 8766 /* And now for the dest. */ 8767 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8768 /* force full log commit if subvolume involved. */ 8769 btrfs_set_log_full_commit(trans); 8770 } else { 8771 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, 8772 btrfs_ino(BTRFS_I(old_dir)), 8773 new_idx); 8774 if (ret) { 8775 if (need_abort) 8776 btrfs_abort_transaction(trans, ret); 8777 goto out_fail; 8778 } 8779 } 8780 8781 /* Update inode version and ctime/mtime. */ 8782 inode_inc_iversion(old_dir); 8783 inode_inc_iversion(new_dir); 8784 inode_inc_iversion(old_inode); 8785 inode_inc_iversion(new_inode); 8786 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 8787 8788 if (old_dentry->d_parent != new_dentry->d_parent) { 8789 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8790 BTRFS_I(old_inode), true); 8791 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 8792 BTRFS_I(new_inode), true); 8793 } 8794 8795 /* src is a subvolume */ 8796 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8797 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8798 } else { /* src is an inode */ 8799 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8800 BTRFS_I(old_dentry->d_inode), 8801 old_name, &old_rename_ctx); 8802 if (!ret) 8803 ret = btrfs_update_inode(trans, BTRFS_I(old_inode)); 8804 } 8805 if (ret) { 8806 btrfs_abort_transaction(trans, ret); 8807 goto out_fail; 8808 } 8809 8810 /* dest is a subvolume */ 8811 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8812 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8813 } else { /* dest is an inode */ 8814 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8815 BTRFS_I(new_dentry->d_inode), 8816 new_name, &new_rename_ctx); 8817 if (!ret) 8818 ret = btrfs_update_inode(trans, BTRFS_I(new_inode)); 8819 } 8820 if (ret) { 8821 btrfs_abort_transaction(trans, ret); 8822 goto out_fail; 8823 } 8824 8825 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8826 new_name, 0, old_idx); 8827 if (ret) { 8828 btrfs_abort_transaction(trans, ret); 8829 goto out_fail; 8830 } 8831 8832 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 8833 old_name, 0, new_idx); 8834 if (ret) { 8835 btrfs_abort_transaction(trans, ret); 8836 goto out_fail; 8837 } 8838 8839 if (old_inode->i_nlink == 1) 8840 BTRFS_I(old_inode)->dir_index = old_idx; 8841 if (new_inode->i_nlink == 1) 8842 BTRFS_I(new_inode)->dir_index = new_idx; 8843 8844 /* 8845 * Now pin the logs of the roots. We do it to ensure that no other task 8846 * can sync the logs while we are in progress with the rename, because 8847 * that could result in an inconsistency in case any of the inodes that 8848 * are part of this rename operation were logged before. 8849 */ 8850 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8851 btrfs_pin_log_trans(root); 8852 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8853 btrfs_pin_log_trans(dest); 8854 8855 /* Do the log updates for all inodes. */ 8856 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8857 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8858 old_rename_ctx.index, new_dentry->d_parent); 8859 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8860 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 8861 new_rename_ctx.index, old_dentry->d_parent); 8862 8863 /* Now unpin the logs. */ 8864 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8865 btrfs_end_log_trans(root); 8866 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8867 btrfs_end_log_trans(dest); 8868 out_fail: 8869 ret2 = btrfs_end_transaction(trans); 8870 ret = ret ? ret : ret2; 8871 out_notrans: 8872 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 8873 old_ino == BTRFS_FIRST_FREE_OBJECTID) 8874 up_read(&fs_info->subvol_sem); 8875 8876 fscrypt_free_filename(&new_fname); 8877 fscrypt_free_filename(&old_fname); 8878 return ret; 8879 } 8880 8881 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap, 8882 struct inode *dir) 8883 { 8884 struct inode *inode; 8885 8886 inode = new_inode(dir->i_sb); 8887 if (inode) { 8888 inode_init_owner(idmap, inode, dir, 8889 S_IFCHR | WHITEOUT_MODE); 8890 inode->i_op = &btrfs_special_inode_operations; 8891 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 8892 } 8893 return inode; 8894 } 8895 8896 static int btrfs_rename(struct mnt_idmap *idmap, 8897 struct inode *old_dir, struct dentry *old_dentry, 8898 struct inode *new_dir, struct dentry *new_dentry, 8899 unsigned int flags) 8900 { 8901 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); 8902 struct btrfs_new_inode_args whiteout_args = { 8903 .dir = old_dir, 8904 .dentry = old_dentry, 8905 }; 8906 struct btrfs_trans_handle *trans; 8907 unsigned int trans_num_items; 8908 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8909 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8910 struct inode *new_inode = d_inode(new_dentry); 8911 struct inode *old_inode = d_inode(old_dentry); 8912 struct btrfs_rename_ctx rename_ctx; 8913 u64 index = 0; 8914 int ret; 8915 int ret2; 8916 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8917 struct fscrypt_name old_fname, new_fname; 8918 8919 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 8920 return -EPERM; 8921 8922 /* we only allow rename subvolume link between subvolumes */ 8923 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 8924 return -EXDEV; 8925 8926 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 8927 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 8928 return -ENOTEMPTY; 8929 8930 if (S_ISDIR(old_inode->i_mode) && new_inode && 8931 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 8932 return -ENOTEMPTY; 8933 8934 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8935 if (ret) 8936 return ret; 8937 8938 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8939 if (ret) { 8940 fscrypt_free_filename(&old_fname); 8941 return ret; 8942 } 8943 8944 /* check for collisions, even if the name isn't there */ 8945 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); 8946 if (ret) { 8947 if (ret == -EEXIST) { 8948 /* we shouldn't get 8949 * eexist without a new_inode */ 8950 if (WARN_ON(!new_inode)) { 8951 goto out_fscrypt_names; 8952 } 8953 } else { 8954 /* maybe -EOVERFLOW */ 8955 goto out_fscrypt_names; 8956 } 8957 } 8958 ret = 0; 8959 8960 /* 8961 * we're using rename to replace one file with another. Start IO on it 8962 * now so we don't add too much work to the end of the transaction 8963 */ 8964 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 8965 filemap_flush(old_inode->i_mapping); 8966 8967 if (flags & RENAME_WHITEOUT) { 8968 whiteout_args.inode = new_whiteout_inode(idmap, old_dir); 8969 if (!whiteout_args.inode) { 8970 ret = -ENOMEM; 8971 goto out_fscrypt_names; 8972 } 8973 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 8974 if (ret) 8975 goto out_whiteout_inode; 8976 } else { 8977 /* 1 to update the old parent inode. */ 8978 trans_num_items = 1; 8979 } 8980 8981 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8982 /* Close the race window with snapshot create/destroy ioctl */ 8983 down_read(&fs_info->subvol_sem); 8984 /* 8985 * 1 to remove old root ref 8986 * 1 to remove old root backref 8987 * 1 to add new root ref 8988 * 1 to add new root backref 8989 */ 8990 trans_num_items += 4; 8991 } else { 8992 /* 8993 * 1 to update inode 8994 * 1 to remove old inode ref 8995 * 1 to add new inode ref 8996 */ 8997 trans_num_items += 3; 8998 } 8999 /* 9000 * 1 to remove old dir item 9001 * 1 to remove old dir index 9002 * 1 to add new dir item 9003 * 1 to add new dir index 9004 */ 9005 trans_num_items += 4; 9006 /* 1 to update new parent inode if it's not the same as the old parent */ 9007 if (new_dir != old_dir) 9008 trans_num_items++; 9009 if (new_inode) { 9010 /* 9011 * 1 to update inode 9012 * 1 to remove inode ref 9013 * 1 to remove dir item 9014 * 1 to remove dir index 9015 * 1 to possibly add orphan item 9016 */ 9017 trans_num_items += 5; 9018 } 9019 trans = btrfs_start_transaction(root, trans_num_items); 9020 if (IS_ERR(trans)) { 9021 ret = PTR_ERR(trans); 9022 goto out_notrans; 9023 } 9024 9025 if (dest != root) { 9026 ret = btrfs_record_root_in_trans(trans, dest); 9027 if (ret) 9028 goto out_fail; 9029 } 9030 9031 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 9032 if (ret) 9033 goto out_fail; 9034 9035 BTRFS_I(old_inode)->dir_index = 0ULL; 9036 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9037 /* force full log commit if subvolume involved. */ 9038 btrfs_set_log_full_commit(trans); 9039 } else { 9040 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, 9041 old_ino, btrfs_ino(BTRFS_I(new_dir)), 9042 index); 9043 if (ret) 9044 goto out_fail; 9045 } 9046 9047 inode_inc_iversion(old_dir); 9048 inode_inc_iversion(new_dir); 9049 inode_inc_iversion(old_inode); 9050 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 9051 9052 if (old_dentry->d_parent != new_dentry->d_parent) 9053 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9054 BTRFS_I(old_inode), true); 9055 9056 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9057 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 9058 } else { 9059 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 9060 BTRFS_I(d_inode(old_dentry)), 9061 &old_fname.disk_name, &rename_ctx); 9062 if (!ret) 9063 ret = btrfs_update_inode(trans, BTRFS_I(old_inode)); 9064 } 9065 if (ret) { 9066 btrfs_abort_transaction(trans, ret); 9067 goto out_fail; 9068 } 9069 9070 if (new_inode) { 9071 inode_inc_iversion(new_inode); 9072 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 9073 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9074 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 9075 BUG_ON(new_inode->i_nlink == 0); 9076 } else { 9077 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 9078 BTRFS_I(d_inode(new_dentry)), 9079 &new_fname.disk_name); 9080 } 9081 if (!ret && new_inode->i_nlink == 0) 9082 ret = btrfs_orphan_add(trans, 9083 BTRFS_I(d_inode(new_dentry))); 9084 if (ret) { 9085 btrfs_abort_transaction(trans, ret); 9086 goto out_fail; 9087 } 9088 } 9089 9090 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9091 &new_fname.disk_name, 0, index); 9092 if (ret) { 9093 btrfs_abort_transaction(trans, ret); 9094 goto out_fail; 9095 } 9096 9097 if (old_inode->i_nlink == 1) 9098 BTRFS_I(old_inode)->dir_index = index; 9099 9100 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9101 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9102 rename_ctx.index, new_dentry->d_parent); 9103 9104 if (flags & RENAME_WHITEOUT) { 9105 ret = btrfs_create_new_inode(trans, &whiteout_args); 9106 if (ret) { 9107 btrfs_abort_transaction(trans, ret); 9108 goto out_fail; 9109 } else { 9110 unlock_new_inode(whiteout_args.inode); 9111 iput(whiteout_args.inode); 9112 whiteout_args.inode = NULL; 9113 } 9114 } 9115 out_fail: 9116 ret2 = btrfs_end_transaction(trans); 9117 ret = ret ? ret : ret2; 9118 out_notrans: 9119 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9120 up_read(&fs_info->subvol_sem); 9121 if (flags & RENAME_WHITEOUT) 9122 btrfs_new_inode_args_destroy(&whiteout_args); 9123 out_whiteout_inode: 9124 if (flags & RENAME_WHITEOUT) 9125 iput(whiteout_args.inode); 9126 out_fscrypt_names: 9127 fscrypt_free_filename(&old_fname); 9128 fscrypt_free_filename(&new_fname); 9129 return ret; 9130 } 9131 9132 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir, 9133 struct dentry *old_dentry, struct inode *new_dir, 9134 struct dentry *new_dentry, unsigned int flags) 9135 { 9136 int ret; 9137 9138 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 9139 return -EINVAL; 9140 9141 if (flags & RENAME_EXCHANGE) 9142 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir, 9143 new_dentry); 9144 else 9145 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir, 9146 new_dentry, flags); 9147 9148 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); 9149 9150 return ret; 9151 } 9152 9153 struct btrfs_delalloc_work { 9154 struct inode *inode; 9155 struct completion completion; 9156 struct list_head list; 9157 struct btrfs_work work; 9158 }; 9159 9160 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9161 { 9162 struct btrfs_delalloc_work *delalloc_work; 9163 struct inode *inode; 9164 9165 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9166 work); 9167 inode = delalloc_work->inode; 9168 filemap_flush(inode->i_mapping); 9169 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9170 &BTRFS_I(inode)->runtime_flags)) 9171 filemap_flush(inode->i_mapping); 9172 9173 iput(inode); 9174 complete(&delalloc_work->completion); 9175 } 9176 9177 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 9178 { 9179 struct btrfs_delalloc_work *work; 9180 9181 work = kmalloc(sizeof(*work), GFP_NOFS); 9182 if (!work) 9183 return NULL; 9184 9185 init_completion(&work->completion); 9186 INIT_LIST_HEAD(&work->list); 9187 work->inode = inode; 9188 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL); 9189 9190 return work; 9191 } 9192 9193 /* 9194 * some fairly slow code that needs optimization. This walks the list 9195 * of all the inodes with pending delalloc and forces them to disk. 9196 */ 9197 static int start_delalloc_inodes(struct btrfs_root *root, 9198 struct writeback_control *wbc, bool snapshot, 9199 bool in_reclaim_context) 9200 { 9201 struct btrfs_inode *binode; 9202 struct inode *inode; 9203 struct btrfs_delalloc_work *work, *next; 9204 LIST_HEAD(works); 9205 LIST_HEAD(splice); 9206 int ret = 0; 9207 bool full_flush = wbc->nr_to_write == LONG_MAX; 9208 9209 mutex_lock(&root->delalloc_mutex); 9210 spin_lock(&root->delalloc_lock); 9211 list_splice_init(&root->delalloc_inodes, &splice); 9212 while (!list_empty(&splice)) { 9213 binode = list_entry(splice.next, struct btrfs_inode, 9214 delalloc_inodes); 9215 9216 list_move_tail(&binode->delalloc_inodes, 9217 &root->delalloc_inodes); 9218 9219 if (in_reclaim_context && 9220 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 9221 continue; 9222 9223 inode = igrab(&binode->vfs_inode); 9224 if (!inode) { 9225 cond_resched_lock(&root->delalloc_lock); 9226 continue; 9227 } 9228 spin_unlock(&root->delalloc_lock); 9229 9230 if (snapshot) 9231 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 9232 &binode->runtime_flags); 9233 if (full_flush) { 9234 work = btrfs_alloc_delalloc_work(inode); 9235 if (!work) { 9236 iput(inode); 9237 ret = -ENOMEM; 9238 goto out; 9239 } 9240 list_add_tail(&work->list, &works); 9241 btrfs_queue_work(root->fs_info->flush_workers, 9242 &work->work); 9243 } else { 9244 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); 9245 btrfs_add_delayed_iput(BTRFS_I(inode)); 9246 if (ret || wbc->nr_to_write <= 0) 9247 goto out; 9248 } 9249 cond_resched(); 9250 spin_lock(&root->delalloc_lock); 9251 } 9252 spin_unlock(&root->delalloc_lock); 9253 9254 out: 9255 list_for_each_entry_safe(work, next, &works, list) { 9256 list_del_init(&work->list); 9257 wait_for_completion(&work->completion); 9258 kfree(work); 9259 } 9260 9261 if (!list_empty(&splice)) { 9262 spin_lock(&root->delalloc_lock); 9263 list_splice_tail(&splice, &root->delalloc_inodes); 9264 spin_unlock(&root->delalloc_lock); 9265 } 9266 mutex_unlock(&root->delalloc_mutex); 9267 return ret; 9268 } 9269 9270 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 9271 { 9272 struct writeback_control wbc = { 9273 .nr_to_write = LONG_MAX, 9274 .sync_mode = WB_SYNC_NONE, 9275 .range_start = 0, 9276 .range_end = LLONG_MAX, 9277 }; 9278 struct btrfs_fs_info *fs_info = root->fs_info; 9279 9280 if (BTRFS_FS_ERROR(fs_info)) 9281 return -EROFS; 9282 9283 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context); 9284 } 9285 9286 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 9287 bool in_reclaim_context) 9288 { 9289 struct writeback_control wbc = { 9290 .nr_to_write = nr, 9291 .sync_mode = WB_SYNC_NONE, 9292 .range_start = 0, 9293 .range_end = LLONG_MAX, 9294 }; 9295 struct btrfs_root *root; 9296 LIST_HEAD(splice); 9297 int ret; 9298 9299 if (BTRFS_FS_ERROR(fs_info)) 9300 return -EROFS; 9301 9302 mutex_lock(&fs_info->delalloc_root_mutex); 9303 spin_lock(&fs_info->delalloc_root_lock); 9304 list_splice_init(&fs_info->delalloc_roots, &splice); 9305 while (!list_empty(&splice)) { 9306 /* 9307 * Reset nr_to_write here so we know that we're doing a full 9308 * flush. 9309 */ 9310 if (nr == LONG_MAX) 9311 wbc.nr_to_write = LONG_MAX; 9312 9313 root = list_first_entry(&splice, struct btrfs_root, 9314 delalloc_root); 9315 root = btrfs_grab_root(root); 9316 BUG_ON(!root); 9317 list_move_tail(&root->delalloc_root, 9318 &fs_info->delalloc_roots); 9319 spin_unlock(&fs_info->delalloc_root_lock); 9320 9321 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context); 9322 btrfs_put_root(root); 9323 if (ret < 0 || wbc.nr_to_write <= 0) 9324 goto out; 9325 spin_lock(&fs_info->delalloc_root_lock); 9326 } 9327 spin_unlock(&fs_info->delalloc_root_lock); 9328 9329 ret = 0; 9330 out: 9331 if (!list_empty(&splice)) { 9332 spin_lock(&fs_info->delalloc_root_lock); 9333 list_splice_tail(&splice, &fs_info->delalloc_roots); 9334 spin_unlock(&fs_info->delalloc_root_lock); 9335 } 9336 mutex_unlock(&fs_info->delalloc_root_mutex); 9337 return ret; 9338 } 9339 9340 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, 9341 struct dentry *dentry, const char *symname) 9342 { 9343 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 9344 struct btrfs_trans_handle *trans; 9345 struct btrfs_root *root = BTRFS_I(dir)->root; 9346 struct btrfs_path *path; 9347 struct btrfs_key key; 9348 struct inode *inode; 9349 struct btrfs_new_inode_args new_inode_args = { 9350 .dir = dir, 9351 .dentry = dentry, 9352 }; 9353 unsigned int trans_num_items; 9354 int err; 9355 int name_len; 9356 int datasize; 9357 unsigned long ptr; 9358 struct btrfs_file_extent_item *ei; 9359 struct extent_buffer *leaf; 9360 9361 name_len = strlen(symname); 9362 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 9363 return -ENAMETOOLONG; 9364 9365 inode = new_inode(dir->i_sb); 9366 if (!inode) 9367 return -ENOMEM; 9368 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO); 9369 inode->i_op = &btrfs_symlink_inode_operations; 9370 inode_nohighmem(inode); 9371 inode->i_mapping->a_ops = &btrfs_aops; 9372 btrfs_i_size_write(BTRFS_I(inode), name_len); 9373 inode_set_bytes(inode, name_len); 9374 9375 new_inode_args.inode = inode; 9376 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9377 if (err) 9378 goto out_inode; 9379 /* 1 additional item for the inline extent */ 9380 trans_num_items++; 9381 9382 trans = btrfs_start_transaction(root, trans_num_items); 9383 if (IS_ERR(trans)) { 9384 err = PTR_ERR(trans); 9385 goto out_new_inode_args; 9386 } 9387 9388 err = btrfs_create_new_inode(trans, &new_inode_args); 9389 if (err) 9390 goto out; 9391 9392 path = btrfs_alloc_path(); 9393 if (!path) { 9394 err = -ENOMEM; 9395 btrfs_abort_transaction(trans, err); 9396 discard_new_inode(inode); 9397 inode = NULL; 9398 goto out; 9399 } 9400 key.objectid = btrfs_ino(BTRFS_I(inode)); 9401 key.offset = 0; 9402 key.type = BTRFS_EXTENT_DATA_KEY; 9403 datasize = btrfs_file_extent_calc_inline_size(name_len); 9404 err = btrfs_insert_empty_item(trans, root, path, &key, 9405 datasize); 9406 if (err) { 9407 btrfs_abort_transaction(trans, err); 9408 btrfs_free_path(path); 9409 discard_new_inode(inode); 9410 inode = NULL; 9411 goto out; 9412 } 9413 leaf = path->nodes[0]; 9414 ei = btrfs_item_ptr(leaf, path->slots[0], 9415 struct btrfs_file_extent_item); 9416 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9417 btrfs_set_file_extent_type(leaf, ei, 9418 BTRFS_FILE_EXTENT_INLINE); 9419 btrfs_set_file_extent_encryption(leaf, ei, 0); 9420 btrfs_set_file_extent_compression(leaf, ei, 0); 9421 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9422 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9423 9424 ptr = btrfs_file_extent_inline_start(ei); 9425 write_extent_buffer(leaf, symname, ptr, name_len); 9426 btrfs_mark_buffer_dirty(trans, leaf); 9427 btrfs_free_path(path); 9428 9429 d_instantiate_new(dentry, inode); 9430 err = 0; 9431 out: 9432 btrfs_end_transaction(trans); 9433 btrfs_btree_balance_dirty(fs_info); 9434 out_new_inode_args: 9435 btrfs_new_inode_args_destroy(&new_inode_args); 9436 out_inode: 9437 if (err) 9438 iput(inode); 9439 return err; 9440 } 9441 9442 static struct btrfs_trans_handle *insert_prealloc_file_extent( 9443 struct btrfs_trans_handle *trans_in, 9444 struct btrfs_inode *inode, 9445 struct btrfs_key *ins, 9446 u64 file_offset) 9447 { 9448 struct btrfs_file_extent_item stack_fi; 9449 struct btrfs_replace_extent_info extent_info; 9450 struct btrfs_trans_handle *trans = trans_in; 9451 struct btrfs_path *path; 9452 u64 start = ins->objectid; 9453 u64 len = ins->offset; 9454 u64 qgroup_released = 0; 9455 int ret; 9456 9457 memset(&stack_fi, 0, sizeof(stack_fi)); 9458 9459 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 9460 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 9461 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 9462 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 9463 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 9464 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9465 /* Encryption and other encoding is reserved and all 0 */ 9466 9467 ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released); 9468 if (ret < 0) 9469 return ERR_PTR(ret); 9470 9471 if (trans) { 9472 ret = insert_reserved_file_extent(trans, inode, 9473 file_offset, &stack_fi, 9474 true, qgroup_released); 9475 if (ret) 9476 goto free_qgroup; 9477 return trans; 9478 } 9479 9480 extent_info.disk_offset = start; 9481 extent_info.disk_len = len; 9482 extent_info.data_offset = 0; 9483 extent_info.data_len = len; 9484 extent_info.file_offset = file_offset; 9485 extent_info.extent_buf = (char *)&stack_fi; 9486 extent_info.is_new_extent = true; 9487 extent_info.update_times = true; 9488 extent_info.qgroup_reserved = qgroup_released; 9489 extent_info.insertions = 0; 9490 9491 path = btrfs_alloc_path(); 9492 if (!path) { 9493 ret = -ENOMEM; 9494 goto free_qgroup; 9495 } 9496 9497 ret = btrfs_replace_file_extents(inode, path, file_offset, 9498 file_offset + len - 1, &extent_info, 9499 &trans); 9500 btrfs_free_path(path); 9501 if (ret) 9502 goto free_qgroup; 9503 return trans; 9504 9505 free_qgroup: 9506 /* 9507 * We have released qgroup data range at the beginning of the function, 9508 * and normally qgroup_released bytes will be freed when committing 9509 * transaction. 9510 * But if we error out early, we have to free what we have released 9511 * or we leak qgroup data reservation. 9512 */ 9513 btrfs_qgroup_free_refroot(inode->root->fs_info, 9514 btrfs_root_id(inode->root), qgroup_released, 9515 BTRFS_QGROUP_RSV_DATA); 9516 return ERR_PTR(ret); 9517 } 9518 9519 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9520 u64 start, u64 num_bytes, u64 min_size, 9521 loff_t actual_len, u64 *alloc_hint, 9522 struct btrfs_trans_handle *trans) 9523 { 9524 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 9525 struct extent_map *em; 9526 struct btrfs_root *root = BTRFS_I(inode)->root; 9527 struct btrfs_key ins; 9528 u64 cur_offset = start; 9529 u64 clear_offset = start; 9530 u64 i_size; 9531 u64 cur_bytes; 9532 u64 last_alloc = (u64)-1; 9533 int ret = 0; 9534 bool own_trans = true; 9535 u64 end = start + num_bytes - 1; 9536 9537 if (trans) 9538 own_trans = false; 9539 while (num_bytes > 0) { 9540 cur_bytes = min_t(u64, num_bytes, SZ_256M); 9541 cur_bytes = max(cur_bytes, min_size); 9542 /* 9543 * If we are severely fragmented we could end up with really 9544 * small allocations, so if the allocator is returning small 9545 * chunks lets make its job easier by only searching for those 9546 * sized chunks. 9547 */ 9548 cur_bytes = min(cur_bytes, last_alloc); 9549 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 9550 min_size, 0, *alloc_hint, &ins, 1, 0); 9551 if (ret) 9552 break; 9553 9554 /* 9555 * We've reserved this space, and thus converted it from 9556 * ->bytes_may_use to ->bytes_reserved. Any error that happens 9557 * from here on out we will only need to clear our reservation 9558 * for the remaining unreserved area, so advance our 9559 * clear_offset by our extent size. 9560 */ 9561 clear_offset += ins.offset; 9562 9563 last_alloc = ins.offset; 9564 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 9565 &ins, cur_offset); 9566 /* 9567 * Now that we inserted the prealloc extent we can finally 9568 * decrement the number of reservations in the block group. 9569 * If we did it before, we could race with relocation and have 9570 * relocation miss the reserved extent, making it fail later. 9571 */ 9572 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9573 if (IS_ERR(trans)) { 9574 ret = PTR_ERR(trans); 9575 btrfs_free_reserved_extent(fs_info, ins.objectid, 9576 ins.offset, 0); 9577 break; 9578 } 9579 9580 em = alloc_extent_map(); 9581 if (!em) { 9582 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, 9583 cur_offset + ins.offset - 1, false); 9584 btrfs_set_inode_full_sync(BTRFS_I(inode)); 9585 goto next; 9586 } 9587 9588 em->start = cur_offset; 9589 em->orig_start = cur_offset; 9590 em->len = ins.offset; 9591 em->block_start = ins.objectid; 9592 em->block_len = ins.offset; 9593 em->orig_block_len = ins.offset; 9594 em->ram_bytes = ins.offset; 9595 em->flags |= EXTENT_FLAG_PREALLOC; 9596 em->generation = trans->transid; 9597 9598 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); 9599 free_extent_map(em); 9600 next: 9601 num_bytes -= ins.offset; 9602 cur_offset += ins.offset; 9603 *alloc_hint = ins.objectid + ins.offset; 9604 9605 inode_inc_iversion(inode); 9606 inode_set_ctime_current(inode); 9607 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9608 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9609 (actual_len > inode->i_size) && 9610 (cur_offset > inode->i_size)) { 9611 if (cur_offset > actual_len) 9612 i_size = actual_len; 9613 else 9614 i_size = cur_offset; 9615 i_size_write(inode, i_size); 9616 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 9617 } 9618 9619 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 9620 9621 if (ret) { 9622 btrfs_abort_transaction(trans, ret); 9623 if (own_trans) 9624 btrfs_end_transaction(trans); 9625 break; 9626 } 9627 9628 if (own_trans) { 9629 btrfs_end_transaction(trans); 9630 trans = NULL; 9631 } 9632 } 9633 if (clear_offset < end) 9634 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 9635 end - clear_offset + 1); 9636 return ret; 9637 } 9638 9639 int btrfs_prealloc_file_range(struct inode *inode, int mode, 9640 u64 start, u64 num_bytes, u64 min_size, 9641 loff_t actual_len, u64 *alloc_hint) 9642 { 9643 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9644 min_size, actual_len, alloc_hint, 9645 NULL); 9646 } 9647 9648 int btrfs_prealloc_file_range_trans(struct inode *inode, 9649 struct btrfs_trans_handle *trans, int mode, 9650 u64 start, u64 num_bytes, u64 min_size, 9651 loff_t actual_len, u64 *alloc_hint) 9652 { 9653 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9654 min_size, actual_len, alloc_hint, trans); 9655 } 9656 9657 static int btrfs_permission(struct mnt_idmap *idmap, 9658 struct inode *inode, int mask) 9659 { 9660 struct btrfs_root *root = BTRFS_I(inode)->root; 9661 umode_t mode = inode->i_mode; 9662 9663 if (mask & MAY_WRITE && 9664 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9665 if (btrfs_root_readonly(root)) 9666 return -EROFS; 9667 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9668 return -EACCES; 9669 } 9670 return generic_permission(idmap, inode, mask); 9671 } 9672 9673 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 9674 struct file *file, umode_t mode) 9675 { 9676 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 9677 struct btrfs_trans_handle *trans; 9678 struct btrfs_root *root = BTRFS_I(dir)->root; 9679 struct inode *inode; 9680 struct btrfs_new_inode_args new_inode_args = { 9681 .dir = dir, 9682 .dentry = file->f_path.dentry, 9683 .orphan = true, 9684 }; 9685 unsigned int trans_num_items; 9686 int ret; 9687 9688 inode = new_inode(dir->i_sb); 9689 if (!inode) 9690 return -ENOMEM; 9691 inode_init_owner(idmap, inode, dir, mode); 9692 inode->i_fop = &btrfs_file_operations; 9693 inode->i_op = &btrfs_file_inode_operations; 9694 inode->i_mapping->a_ops = &btrfs_aops; 9695 9696 new_inode_args.inode = inode; 9697 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9698 if (ret) 9699 goto out_inode; 9700 9701 trans = btrfs_start_transaction(root, trans_num_items); 9702 if (IS_ERR(trans)) { 9703 ret = PTR_ERR(trans); 9704 goto out_new_inode_args; 9705 } 9706 9707 ret = btrfs_create_new_inode(trans, &new_inode_args); 9708 9709 /* 9710 * We set number of links to 0 in btrfs_create_new_inode(), and here we 9711 * set it to 1 because d_tmpfile() will issue a warning if the count is 9712 * 0, through: 9713 * 9714 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9715 */ 9716 set_nlink(inode, 1); 9717 9718 if (!ret) { 9719 d_tmpfile(file, inode); 9720 unlock_new_inode(inode); 9721 mark_inode_dirty(inode); 9722 } 9723 9724 btrfs_end_transaction(trans); 9725 btrfs_btree_balance_dirty(fs_info); 9726 out_new_inode_args: 9727 btrfs_new_inode_args_destroy(&new_inode_args); 9728 out_inode: 9729 if (ret) 9730 iput(inode); 9731 return finish_open_simple(file, ret); 9732 } 9733 9734 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end) 9735 { 9736 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9737 unsigned long index = start >> PAGE_SHIFT; 9738 unsigned long end_index = end >> PAGE_SHIFT; 9739 struct page *page; 9740 u32 len; 9741 9742 ASSERT(end + 1 - start <= U32_MAX); 9743 len = end + 1 - start; 9744 while (index <= end_index) { 9745 page = find_get_page(inode->vfs_inode.i_mapping, index); 9746 ASSERT(page); /* Pages should be in the extent_io_tree */ 9747 9748 /* This is for data, which doesn't yet support larger folio. */ 9749 ASSERT(folio_order(page_folio(page)) == 0); 9750 btrfs_folio_set_writeback(fs_info, page_folio(page), start, len); 9751 put_page(page); 9752 index++; 9753 } 9754 } 9755 9756 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 9757 int compress_type) 9758 { 9759 switch (compress_type) { 9760 case BTRFS_COMPRESS_NONE: 9761 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 9762 case BTRFS_COMPRESS_ZLIB: 9763 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 9764 case BTRFS_COMPRESS_LZO: 9765 /* 9766 * The LZO format depends on the sector size. 64K is the maximum 9767 * sector size that we support. 9768 */ 9769 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 9770 return -EINVAL; 9771 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 9772 (fs_info->sectorsize_bits - 12); 9773 case BTRFS_COMPRESS_ZSTD: 9774 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 9775 default: 9776 return -EUCLEAN; 9777 } 9778 } 9779 9780 static ssize_t btrfs_encoded_read_inline( 9781 struct kiocb *iocb, 9782 struct iov_iter *iter, u64 start, 9783 u64 lockend, 9784 struct extent_state **cached_state, 9785 u64 extent_start, size_t count, 9786 struct btrfs_ioctl_encoded_io_args *encoded, 9787 bool *unlocked) 9788 { 9789 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9790 struct btrfs_root *root = inode->root; 9791 struct btrfs_fs_info *fs_info = root->fs_info; 9792 struct extent_io_tree *io_tree = &inode->io_tree; 9793 struct btrfs_path *path; 9794 struct extent_buffer *leaf; 9795 struct btrfs_file_extent_item *item; 9796 u64 ram_bytes; 9797 unsigned long ptr; 9798 void *tmp; 9799 ssize_t ret; 9800 9801 path = btrfs_alloc_path(); 9802 if (!path) { 9803 ret = -ENOMEM; 9804 goto out; 9805 } 9806 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 9807 extent_start, 0); 9808 if (ret) { 9809 if (ret > 0) { 9810 /* The extent item disappeared? */ 9811 ret = -EIO; 9812 } 9813 goto out; 9814 } 9815 leaf = path->nodes[0]; 9816 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 9817 9818 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 9819 ptr = btrfs_file_extent_inline_start(item); 9820 9821 encoded->len = min_t(u64, extent_start + ram_bytes, 9822 inode->vfs_inode.i_size) - iocb->ki_pos; 9823 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9824 btrfs_file_extent_compression(leaf, item)); 9825 if (ret < 0) 9826 goto out; 9827 encoded->compression = ret; 9828 if (encoded->compression) { 9829 size_t inline_size; 9830 9831 inline_size = btrfs_file_extent_inline_item_len(leaf, 9832 path->slots[0]); 9833 if (inline_size > count) { 9834 ret = -ENOBUFS; 9835 goto out; 9836 } 9837 count = inline_size; 9838 encoded->unencoded_len = ram_bytes; 9839 encoded->unencoded_offset = iocb->ki_pos - extent_start; 9840 } else { 9841 count = min_t(u64, count, encoded->len); 9842 encoded->len = count; 9843 encoded->unencoded_len = count; 9844 ptr += iocb->ki_pos - extent_start; 9845 } 9846 9847 tmp = kmalloc(count, GFP_NOFS); 9848 if (!tmp) { 9849 ret = -ENOMEM; 9850 goto out; 9851 } 9852 read_extent_buffer(leaf, tmp, ptr, count); 9853 btrfs_release_path(path); 9854 unlock_extent(io_tree, start, lockend, cached_state); 9855 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9856 *unlocked = true; 9857 9858 ret = copy_to_iter(tmp, count, iter); 9859 if (ret != count) 9860 ret = -EFAULT; 9861 kfree(tmp); 9862 out: 9863 btrfs_free_path(path); 9864 return ret; 9865 } 9866 9867 struct btrfs_encoded_read_private { 9868 wait_queue_head_t wait; 9869 atomic_t pending; 9870 blk_status_t status; 9871 }; 9872 9873 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) 9874 { 9875 struct btrfs_encoded_read_private *priv = bbio->private; 9876 9877 if (bbio->bio.bi_status) { 9878 /* 9879 * The memory barrier implied by the atomic_dec_return() here 9880 * pairs with the memory barrier implied by the 9881 * atomic_dec_return() or io_wait_event() in 9882 * btrfs_encoded_read_regular_fill_pages() to ensure that this 9883 * write is observed before the load of status in 9884 * btrfs_encoded_read_regular_fill_pages(). 9885 */ 9886 WRITE_ONCE(priv->status, bbio->bio.bi_status); 9887 } 9888 if (!atomic_dec_return(&priv->pending)) 9889 wake_up(&priv->wait); 9890 bio_put(&bbio->bio); 9891 } 9892 9893 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 9894 u64 file_offset, u64 disk_bytenr, 9895 u64 disk_io_size, struct page **pages) 9896 { 9897 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9898 struct btrfs_encoded_read_private priv = { 9899 .pending = ATOMIC_INIT(1), 9900 }; 9901 unsigned long i = 0; 9902 struct btrfs_bio *bbio; 9903 9904 init_waitqueue_head(&priv.wait); 9905 9906 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9907 btrfs_encoded_read_endio, &priv); 9908 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9909 bbio->inode = inode; 9910 9911 do { 9912 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); 9913 9914 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { 9915 atomic_inc(&priv.pending); 9916 btrfs_submit_bio(bbio, 0); 9917 9918 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9919 btrfs_encoded_read_endio, &priv); 9920 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9921 bbio->inode = inode; 9922 continue; 9923 } 9924 9925 i++; 9926 disk_bytenr += bytes; 9927 disk_io_size -= bytes; 9928 } while (disk_io_size); 9929 9930 atomic_inc(&priv.pending); 9931 btrfs_submit_bio(bbio, 0); 9932 9933 if (atomic_dec_return(&priv.pending)) 9934 io_wait_event(priv.wait, !atomic_read(&priv.pending)); 9935 /* See btrfs_encoded_read_endio() for ordering. */ 9936 return blk_status_to_errno(READ_ONCE(priv.status)); 9937 } 9938 9939 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, 9940 struct iov_iter *iter, 9941 u64 start, u64 lockend, 9942 struct extent_state **cached_state, 9943 u64 disk_bytenr, u64 disk_io_size, 9944 size_t count, bool compressed, 9945 bool *unlocked) 9946 { 9947 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9948 struct extent_io_tree *io_tree = &inode->io_tree; 9949 struct page **pages; 9950 unsigned long nr_pages, i; 9951 u64 cur; 9952 size_t page_offset; 9953 ssize_t ret; 9954 9955 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 9956 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 9957 if (!pages) 9958 return -ENOMEM; 9959 ret = btrfs_alloc_page_array(nr_pages, pages, 0); 9960 if (ret) { 9961 ret = -ENOMEM; 9962 goto out; 9963 } 9964 9965 ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr, 9966 disk_io_size, pages); 9967 if (ret) 9968 goto out; 9969 9970 unlock_extent(io_tree, start, lockend, cached_state); 9971 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9972 *unlocked = true; 9973 9974 if (compressed) { 9975 i = 0; 9976 page_offset = 0; 9977 } else { 9978 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 9979 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 9980 } 9981 cur = 0; 9982 while (cur < count) { 9983 size_t bytes = min_t(size_t, count - cur, 9984 PAGE_SIZE - page_offset); 9985 9986 if (copy_page_to_iter(pages[i], page_offset, bytes, 9987 iter) != bytes) { 9988 ret = -EFAULT; 9989 goto out; 9990 } 9991 i++; 9992 cur += bytes; 9993 page_offset = 0; 9994 } 9995 ret = count; 9996 out: 9997 for (i = 0; i < nr_pages; i++) { 9998 if (pages[i]) 9999 __free_page(pages[i]); 10000 } 10001 kfree(pages); 10002 return ret; 10003 } 10004 10005 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 10006 struct btrfs_ioctl_encoded_io_args *encoded) 10007 { 10008 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10009 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10010 struct extent_io_tree *io_tree = &inode->io_tree; 10011 ssize_t ret; 10012 size_t count = iov_iter_count(iter); 10013 u64 start, lockend, disk_bytenr, disk_io_size; 10014 struct extent_state *cached_state = NULL; 10015 struct extent_map *em; 10016 bool unlocked = false; 10017 10018 file_accessed(iocb->ki_filp); 10019 10020 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED); 10021 10022 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 10023 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10024 return 0; 10025 } 10026 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 10027 /* 10028 * We don't know how long the extent containing iocb->ki_pos is, but if 10029 * it's compressed we know that it won't be longer than this. 10030 */ 10031 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 10032 10033 for (;;) { 10034 struct btrfs_ordered_extent *ordered; 10035 10036 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, 10037 lockend - start + 1); 10038 if (ret) 10039 goto out_unlock_inode; 10040 lock_extent(io_tree, start, lockend, &cached_state); 10041 ordered = btrfs_lookup_ordered_range(inode, start, 10042 lockend - start + 1); 10043 if (!ordered) 10044 break; 10045 btrfs_put_ordered_extent(ordered); 10046 unlock_extent(io_tree, start, lockend, &cached_state); 10047 cond_resched(); 10048 } 10049 10050 em = btrfs_get_extent(inode, NULL, start, lockend - start + 1); 10051 if (IS_ERR(em)) { 10052 ret = PTR_ERR(em); 10053 goto out_unlock_extent; 10054 } 10055 10056 if (em->block_start == EXTENT_MAP_INLINE) { 10057 u64 extent_start = em->start; 10058 10059 /* 10060 * For inline extents we get everything we need out of the 10061 * extent item. 10062 */ 10063 free_extent_map(em); 10064 em = NULL; 10065 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 10066 &cached_state, extent_start, 10067 count, encoded, &unlocked); 10068 goto out; 10069 } 10070 10071 /* 10072 * We only want to return up to EOF even if the extent extends beyond 10073 * that. 10074 */ 10075 encoded->len = min_t(u64, extent_map_end(em), 10076 inode->vfs_inode.i_size) - iocb->ki_pos; 10077 if (em->block_start == EXTENT_MAP_HOLE || 10078 (em->flags & EXTENT_FLAG_PREALLOC)) { 10079 disk_bytenr = EXTENT_MAP_HOLE; 10080 count = min_t(u64, count, encoded->len); 10081 encoded->len = count; 10082 encoded->unencoded_len = count; 10083 } else if (extent_map_is_compressed(em)) { 10084 disk_bytenr = em->block_start; 10085 /* 10086 * Bail if the buffer isn't large enough to return the whole 10087 * compressed extent. 10088 */ 10089 if (em->block_len > count) { 10090 ret = -ENOBUFS; 10091 goto out_em; 10092 } 10093 disk_io_size = em->block_len; 10094 count = em->block_len; 10095 encoded->unencoded_len = em->ram_bytes; 10096 encoded->unencoded_offset = iocb->ki_pos - em->orig_start; 10097 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10098 extent_map_compression(em)); 10099 if (ret < 0) 10100 goto out_em; 10101 encoded->compression = ret; 10102 } else { 10103 disk_bytenr = em->block_start + (start - em->start); 10104 if (encoded->len > count) 10105 encoded->len = count; 10106 /* 10107 * Don't read beyond what we locked. This also limits the page 10108 * allocations that we'll do. 10109 */ 10110 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 10111 count = start + disk_io_size - iocb->ki_pos; 10112 encoded->len = count; 10113 encoded->unencoded_len = count; 10114 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize); 10115 } 10116 free_extent_map(em); 10117 em = NULL; 10118 10119 if (disk_bytenr == EXTENT_MAP_HOLE) { 10120 unlock_extent(io_tree, start, lockend, &cached_state); 10121 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10122 unlocked = true; 10123 ret = iov_iter_zero(count, iter); 10124 if (ret != count) 10125 ret = -EFAULT; 10126 } else { 10127 ret = btrfs_encoded_read_regular(iocb, iter, start, lockend, 10128 &cached_state, disk_bytenr, 10129 disk_io_size, count, 10130 encoded->compression, 10131 &unlocked); 10132 } 10133 10134 out: 10135 if (ret >= 0) 10136 iocb->ki_pos += encoded->len; 10137 out_em: 10138 free_extent_map(em); 10139 out_unlock_extent: 10140 if (!unlocked) 10141 unlock_extent(io_tree, start, lockend, &cached_state); 10142 out_unlock_inode: 10143 if (!unlocked) 10144 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10145 return ret; 10146 } 10147 10148 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 10149 const struct btrfs_ioctl_encoded_io_args *encoded) 10150 { 10151 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10152 struct btrfs_root *root = inode->root; 10153 struct btrfs_fs_info *fs_info = root->fs_info; 10154 struct extent_io_tree *io_tree = &inode->io_tree; 10155 struct extent_changeset *data_reserved = NULL; 10156 struct extent_state *cached_state = NULL; 10157 struct btrfs_ordered_extent *ordered; 10158 int compression; 10159 size_t orig_count; 10160 u64 start, end; 10161 u64 num_bytes, ram_bytes, disk_num_bytes; 10162 unsigned long nr_folios, i; 10163 struct folio **folios; 10164 struct btrfs_key ins; 10165 bool extent_reserved = false; 10166 struct extent_map *em; 10167 ssize_t ret; 10168 10169 switch (encoded->compression) { 10170 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 10171 compression = BTRFS_COMPRESS_ZLIB; 10172 break; 10173 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 10174 compression = BTRFS_COMPRESS_ZSTD; 10175 break; 10176 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 10177 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 10178 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 10179 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 10180 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 10181 /* The sector size must match for LZO. */ 10182 if (encoded->compression - 10183 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 10184 fs_info->sectorsize_bits) 10185 return -EINVAL; 10186 compression = BTRFS_COMPRESS_LZO; 10187 break; 10188 default: 10189 return -EINVAL; 10190 } 10191 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 10192 return -EINVAL; 10193 10194 /* 10195 * Compressed extents should always have checksums, so error out if we 10196 * have a NOCOW file or inode was created while mounted with NODATASUM. 10197 */ 10198 if (inode->flags & BTRFS_INODE_NODATASUM) 10199 return -EINVAL; 10200 10201 orig_count = iov_iter_count(from); 10202 10203 /* The extent size must be sane. */ 10204 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 10205 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 10206 return -EINVAL; 10207 10208 /* 10209 * The compressed data must be smaller than the decompressed data. 10210 * 10211 * It's of course possible for data to compress to larger or the same 10212 * size, but the buffered I/O path falls back to no compression for such 10213 * data, and we don't want to break any assumptions by creating these 10214 * extents. 10215 * 10216 * Note that this is less strict than the current check we have that the 10217 * compressed data must be at least one sector smaller than the 10218 * decompressed data. We only want to enforce the weaker requirement 10219 * from old kernels that it is at least one byte smaller. 10220 */ 10221 if (orig_count >= encoded->unencoded_len) 10222 return -EINVAL; 10223 10224 /* The extent must start on a sector boundary. */ 10225 start = iocb->ki_pos; 10226 if (!IS_ALIGNED(start, fs_info->sectorsize)) 10227 return -EINVAL; 10228 10229 /* 10230 * The extent must end on a sector boundary. However, we allow a write 10231 * which ends at or extends i_size to have an unaligned length; we round 10232 * up the extent size and set i_size to the unaligned end. 10233 */ 10234 if (start + encoded->len < inode->vfs_inode.i_size && 10235 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 10236 return -EINVAL; 10237 10238 /* Finally, the offset in the unencoded data must be sector-aligned. */ 10239 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 10240 return -EINVAL; 10241 10242 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 10243 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 10244 end = start + num_bytes - 1; 10245 10246 /* 10247 * If the extent cannot be inline, the compressed data on disk must be 10248 * sector-aligned. For convenience, we extend it with zeroes if it 10249 * isn't. 10250 */ 10251 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 10252 nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 10253 folios = kvcalloc(nr_folios, sizeof(struct page *), GFP_KERNEL_ACCOUNT); 10254 if (!folios) 10255 return -ENOMEM; 10256 for (i = 0; i < nr_folios; i++) { 10257 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 10258 char *kaddr; 10259 10260 folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0); 10261 if (!folios[i]) { 10262 ret = -ENOMEM; 10263 goto out_folios; 10264 } 10265 kaddr = kmap_local_folio(folios[i], 0); 10266 if (copy_from_iter(kaddr, bytes, from) != bytes) { 10267 kunmap_local(kaddr); 10268 ret = -EFAULT; 10269 goto out_folios; 10270 } 10271 if (bytes < PAGE_SIZE) 10272 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 10273 kunmap_local(kaddr); 10274 } 10275 10276 for (;;) { 10277 struct btrfs_ordered_extent *ordered; 10278 10279 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes); 10280 if (ret) 10281 goto out_folios; 10282 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 10283 start >> PAGE_SHIFT, 10284 end >> PAGE_SHIFT); 10285 if (ret) 10286 goto out_folios; 10287 lock_extent(io_tree, start, end, &cached_state); 10288 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 10289 if (!ordered && 10290 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 10291 break; 10292 if (ordered) 10293 btrfs_put_ordered_extent(ordered); 10294 unlock_extent(io_tree, start, end, &cached_state); 10295 cond_resched(); 10296 } 10297 10298 /* 10299 * We don't use the higher-level delalloc space functions because our 10300 * num_bytes and disk_num_bytes are different. 10301 */ 10302 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 10303 if (ret) 10304 goto out_unlock; 10305 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 10306 if (ret) 10307 goto out_free_data_space; 10308 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 10309 false); 10310 if (ret) 10311 goto out_qgroup_free_data; 10312 10313 /* Try an inline extent first. */ 10314 if (encoded->unencoded_len == encoded->len && 10315 encoded->unencoded_offset == 0 && 10316 can_cow_file_range_inline(inode, start, encoded->len, orig_count)) { 10317 ret = __cow_file_range_inline(inode, start, encoded->len, 10318 orig_count, compression, folios[0], 10319 true); 10320 if (ret <= 0) { 10321 if (ret == 0) 10322 ret = orig_count; 10323 goto out_delalloc_release; 10324 } 10325 } 10326 10327 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 10328 disk_num_bytes, 0, 0, &ins, 1, 1); 10329 if (ret) 10330 goto out_delalloc_release; 10331 extent_reserved = true; 10332 10333 em = create_io_em(inode, start, num_bytes, 10334 start - encoded->unencoded_offset, ins.objectid, 10335 ins.offset, ins.offset, ram_bytes, compression, 10336 BTRFS_ORDERED_COMPRESSED); 10337 if (IS_ERR(em)) { 10338 ret = PTR_ERR(em); 10339 goto out_free_reserved; 10340 } 10341 free_extent_map(em); 10342 10343 ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes, 10344 ins.objectid, ins.offset, 10345 encoded->unencoded_offset, 10346 (1 << BTRFS_ORDERED_ENCODED) | 10347 (1 << BTRFS_ORDERED_COMPRESSED), 10348 compression); 10349 if (IS_ERR(ordered)) { 10350 btrfs_drop_extent_map_range(inode, start, end, false); 10351 ret = PTR_ERR(ordered); 10352 goto out_free_reserved; 10353 } 10354 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10355 10356 if (start + encoded->len > inode->vfs_inode.i_size) 10357 i_size_write(&inode->vfs_inode, start + encoded->len); 10358 10359 unlock_extent(io_tree, start, end, &cached_state); 10360 10361 btrfs_delalloc_release_extents(inode, num_bytes); 10362 10363 btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false); 10364 ret = orig_count; 10365 goto out; 10366 10367 out_free_reserved: 10368 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10369 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 10370 out_delalloc_release: 10371 btrfs_delalloc_release_extents(inode, num_bytes); 10372 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 10373 out_qgroup_free_data: 10374 if (ret < 0) 10375 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL); 10376 out_free_data_space: 10377 /* 10378 * If btrfs_reserve_extent() succeeded, then we already decremented 10379 * bytes_may_use. 10380 */ 10381 if (!extent_reserved) 10382 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); 10383 out_unlock: 10384 unlock_extent(io_tree, start, end, &cached_state); 10385 out_folios: 10386 for (i = 0; i < nr_folios; i++) { 10387 if (folios[i]) 10388 __folio_put(folios[i]); 10389 } 10390 kvfree(folios); 10391 out: 10392 if (ret >= 0) 10393 iocb->ki_pos += encoded->len; 10394 return ret; 10395 } 10396 10397 #ifdef CONFIG_SWAP 10398 /* 10399 * Add an entry indicating a block group or device which is pinned by a 10400 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 10401 * negative errno on failure. 10402 */ 10403 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 10404 bool is_block_group) 10405 { 10406 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10407 struct btrfs_swapfile_pin *sp, *entry; 10408 struct rb_node **p; 10409 struct rb_node *parent = NULL; 10410 10411 sp = kmalloc(sizeof(*sp), GFP_NOFS); 10412 if (!sp) 10413 return -ENOMEM; 10414 sp->ptr = ptr; 10415 sp->inode = inode; 10416 sp->is_block_group = is_block_group; 10417 sp->bg_extent_count = 1; 10418 10419 spin_lock(&fs_info->swapfile_pins_lock); 10420 p = &fs_info->swapfile_pins.rb_node; 10421 while (*p) { 10422 parent = *p; 10423 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 10424 if (sp->ptr < entry->ptr || 10425 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 10426 p = &(*p)->rb_left; 10427 } else if (sp->ptr > entry->ptr || 10428 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 10429 p = &(*p)->rb_right; 10430 } else { 10431 if (is_block_group) 10432 entry->bg_extent_count++; 10433 spin_unlock(&fs_info->swapfile_pins_lock); 10434 kfree(sp); 10435 return 1; 10436 } 10437 } 10438 rb_link_node(&sp->node, parent, p); 10439 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 10440 spin_unlock(&fs_info->swapfile_pins_lock); 10441 return 0; 10442 } 10443 10444 /* Free all of the entries pinned by this swapfile. */ 10445 static void btrfs_free_swapfile_pins(struct inode *inode) 10446 { 10447 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10448 struct btrfs_swapfile_pin *sp; 10449 struct rb_node *node, *next; 10450 10451 spin_lock(&fs_info->swapfile_pins_lock); 10452 node = rb_first(&fs_info->swapfile_pins); 10453 while (node) { 10454 next = rb_next(node); 10455 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 10456 if (sp->inode == inode) { 10457 rb_erase(&sp->node, &fs_info->swapfile_pins); 10458 if (sp->is_block_group) { 10459 btrfs_dec_block_group_swap_extents(sp->ptr, 10460 sp->bg_extent_count); 10461 btrfs_put_block_group(sp->ptr); 10462 } 10463 kfree(sp); 10464 } 10465 node = next; 10466 } 10467 spin_unlock(&fs_info->swapfile_pins_lock); 10468 } 10469 10470 struct btrfs_swap_info { 10471 u64 start; 10472 u64 block_start; 10473 u64 block_len; 10474 u64 lowest_ppage; 10475 u64 highest_ppage; 10476 unsigned long nr_pages; 10477 int nr_extents; 10478 }; 10479 10480 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 10481 struct btrfs_swap_info *bsi) 10482 { 10483 unsigned long nr_pages; 10484 unsigned long max_pages; 10485 u64 first_ppage, first_ppage_reported, next_ppage; 10486 int ret; 10487 10488 /* 10489 * Our swapfile may have had its size extended after the swap header was 10490 * written. In that case activating the swapfile should not go beyond 10491 * the max size set in the swap header. 10492 */ 10493 if (bsi->nr_pages >= sis->max) 10494 return 0; 10495 10496 max_pages = sis->max - bsi->nr_pages; 10497 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; 10498 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; 10499 10500 if (first_ppage >= next_ppage) 10501 return 0; 10502 nr_pages = next_ppage - first_ppage; 10503 nr_pages = min(nr_pages, max_pages); 10504 10505 first_ppage_reported = first_ppage; 10506 if (bsi->start == 0) 10507 first_ppage_reported++; 10508 if (bsi->lowest_ppage > first_ppage_reported) 10509 bsi->lowest_ppage = first_ppage_reported; 10510 if (bsi->highest_ppage < (next_ppage - 1)) 10511 bsi->highest_ppage = next_ppage - 1; 10512 10513 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 10514 if (ret < 0) 10515 return ret; 10516 bsi->nr_extents += ret; 10517 bsi->nr_pages += nr_pages; 10518 return 0; 10519 } 10520 10521 static void btrfs_swap_deactivate(struct file *file) 10522 { 10523 struct inode *inode = file_inode(file); 10524 10525 btrfs_free_swapfile_pins(inode); 10526 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 10527 } 10528 10529 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10530 sector_t *span) 10531 { 10532 struct inode *inode = file_inode(file); 10533 struct btrfs_root *root = BTRFS_I(inode)->root; 10534 struct btrfs_fs_info *fs_info = root->fs_info; 10535 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 10536 struct extent_state *cached_state = NULL; 10537 struct extent_map *em = NULL; 10538 struct btrfs_chunk_map *map = NULL; 10539 struct btrfs_device *device = NULL; 10540 struct btrfs_swap_info bsi = { 10541 .lowest_ppage = (sector_t)-1ULL, 10542 }; 10543 int ret = 0; 10544 u64 isize; 10545 u64 start; 10546 10547 /* 10548 * If the swap file was just created, make sure delalloc is done. If the 10549 * file changes again after this, the user is doing something stupid and 10550 * we don't really care. 10551 */ 10552 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 10553 if (ret) 10554 return ret; 10555 10556 /* 10557 * The inode is locked, so these flags won't change after we check them. 10558 */ 10559 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 10560 btrfs_warn(fs_info, "swapfile must not be compressed"); 10561 return -EINVAL; 10562 } 10563 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 10564 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 10565 return -EINVAL; 10566 } 10567 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 10568 btrfs_warn(fs_info, "swapfile must not be checksummed"); 10569 return -EINVAL; 10570 } 10571 10572 /* 10573 * Balance or device remove/replace/resize can move stuff around from 10574 * under us. The exclop protection makes sure they aren't running/won't 10575 * run concurrently while we are mapping the swap extents, and 10576 * fs_info->swapfile_pins prevents them from running while the swap 10577 * file is active and moving the extents. Note that this also prevents 10578 * a concurrent device add which isn't actually necessary, but it's not 10579 * really worth the trouble to allow it. 10580 */ 10581 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 10582 btrfs_warn(fs_info, 10583 "cannot activate swapfile while exclusive operation is running"); 10584 return -EBUSY; 10585 } 10586 10587 /* 10588 * Prevent snapshot creation while we are activating the swap file. 10589 * We do not want to race with snapshot creation. If snapshot creation 10590 * already started before we bumped nr_swapfiles from 0 to 1 and 10591 * completes before the first write into the swap file after it is 10592 * activated, than that write would fallback to COW. 10593 */ 10594 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 10595 btrfs_exclop_finish(fs_info); 10596 btrfs_warn(fs_info, 10597 "cannot activate swapfile because snapshot creation is in progress"); 10598 return -EINVAL; 10599 } 10600 /* 10601 * Snapshots can create extents which require COW even if NODATACOW is 10602 * set. We use this counter to prevent snapshots. We must increment it 10603 * before walking the extents because we don't want a concurrent 10604 * snapshot to run after we've already checked the extents. 10605 * 10606 * It is possible that subvolume is marked for deletion but still not 10607 * removed yet. To prevent this race, we check the root status before 10608 * activating the swapfile. 10609 */ 10610 spin_lock(&root->root_item_lock); 10611 if (btrfs_root_dead(root)) { 10612 spin_unlock(&root->root_item_lock); 10613 10614 btrfs_exclop_finish(fs_info); 10615 btrfs_warn(fs_info, 10616 "cannot activate swapfile because subvolume %llu is being deleted", 10617 btrfs_root_id(root)); 10618 return -EPERM; 10619 } 10620 atomic_inc(&root->nr_swapfiles); 10621 spin_unlock(&root->root_item_lock); 10622 10623 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 10624 10625 lock_extent(io_tree, 0, isize - 1, &cached_state); 10626 start = 0; 10627 while (start < isize) { 10628 u64 logical_block_start, physical_block_start; 10629 struct btrfs_block_group *bg; 10630 u64 len = isize - start; 10631 10632 em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len); 10633 if (IS_ERR(em)) { 10634 ret = PTR_ERR(em); 10635 goto out; 10636 } 10637 10638 if (em->block_start == EXTENT_MAP_HOLE) { 10639 btrfs_warn(fs_info, "swapfile must not have holes"); 10640 ret = -EINVAL; 10641 goto out; 10642 } 10643 if (em->block_start == EXTENT_MAP_INLINE) { 10644 /* 10645 * It's unlikely we'll ever actually find ourselves 10646 * here, as a file small enough to fit inline won't be 10647 * big enough to store more than the swap header, but in 10648 * case something changes in the future, let's catch it 10649 * here rather than later. 10650 */ 10651 btrfs_warn(fs_info, "swapfile must not be inline"); 10652 ret = -EINVAL; 10653 goto out; 10654 } 10655 if (extent_map_is_compressed(em)) { 10656 btrfs_warn(fs_info, "swapfile must not be compressed"); 10657 ret = -EINVAL; 10658 goto out; 10659 } 10660 10661 logical_block_start = em->block_start + (start - em->start); 10662 len = min(len, em->len - (start - em->start)); 10663 free_extent_map(em); 10664 em = NULL; 10665 10666 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true); 10667 if (ret < 0) { 10668 goto out; 10669 } else if (ret) { 10670 ret = 0; 10671 } else { 10672 btrfs_warn(fs_info, 10673 "swapfile must not be copy-on-write"); 10674 ret = -EINVAL; 10675 goto out; 10676 } 10677 10678 map = btrfs_get_chunk_map(fs_info, logical_block_start, len); 10679 if (IS_ERR(map)) { 10680 ret = PTR_ERR(map); 10681 goto out; 10682 } 10683 10684 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 10685 btrfs_warn(fs_info, 10686 "swapfile must have single data profile"); 10687 ret = -EINVAL; 10688 goto out; 10689 } 10690 10691 if (device == NULL) { 10692 device = map->stripes[0].dev; 10693 ret = btrfs_add_swapfile_pin(inode, device, false); 10694 if (ret == 1) 10695 ret = 0; 10696 else if (ret) 10697 goto out; 10698 } else if (device != map->stripes[0].dev) { 10699 btrfs_warn(fs_info, "swapfile must be on one device"); 10700 ret = -EINVAL; 10701 goto out; 10702 } 10703 10704 physical_block_start = (map->stripes[0].physical + 10705 (logical_block_start - map->start)); 10706 len = min(len, map->chunk_len - (logical_block_start - map->start)); 10707 btrfs_free_chunk_map(map); 10708 map = NULL; 10709 10710 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 10711 if (!bg) { 10712 btrfs_warn(fs_info, 10713 "could not find block group containing swapfile"); 10714 ret = -EINVAL; 10715 goto out; 10716 } 10717 10718 if (!btrfs_inc_block_group_swap_extents(bg)) { 10719 btrfs_warn(fs_info, 10720 "block group for swapfile at %llu is read-only%s", 10721 bg->start, 10722 atomic_read(&fs_info->scrubs_running) ? 10723 " (scrub running)" : ""); 10724 btrfs_put_block_group(bg); 10725 ret = -EINVAL; 10726 goto out; 10727 } 10728 10729 ret = btrfs_add_swapfile_pin(inode, bg, true); 10730 if (ret) { 10731 btrfs_put_block_group(bg); 10732 if (ret == 1) 10733 ret = 0; 10734 else 10735 goto out; 10736 } 10737 10738 if (bsi.block_len && 10739 bsi.block_start + bsi.block_len == physical_block_start) { 10740 bsi.block_len += len; 10741 } else { 10742 if (bsi.block_len) { 10743 ret = btrfs_add_swap_extent(sis, &bsi); 10744 if (ret) 10745 goto out; 10746 } 10747 bsi.start = start; 10748 bsi.block_start = physical_block_start; 10749 bsi.block_len = len; 10750 } 10751 10752 start += len; 10753 } 10754 10755 if (bsi.block_len) 10756 ret = btrfs_add_swap_extent(sis, &bsi); 10757 10758 out: 10759 if (!IS_ERR_OR_NULL(em)) 10760 free_extent_map(em); 10761 if (!IS_ERR_OR_NULL(map)) 10762 btrfs_free_chunk_map(map); 10763 10764 unlock_extent(io_tree, 0, isize - 1, &cached_state); 10765 10766 if (ret) 10767 btrfs_swap_deactivate(file); 10768 10769 btrfs_drew_write_unlock(&root->snapshot_lock); 10770 10771 btrfs_exclop_finish(fs_info); 10772 10773 if (ret) 10774 return ret; 10775 10776 if (device) 10777 sis->bdev = device->bdev; 10778 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10779 sis->max = bsi.nr_pages; 10780 sis->pages = bsi.nr_pages - 1; 10781 sis->highest_bit = bsi.nr_pages - 1; 10782 return bsi.nr_extents; 10783 } 10784 #else 10785 static void btrfs_swap_deactivate(struct file *file) 10786 { 10787 } 10788 10789 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10790 sector_t *span) 10791 { 10792 return -EOPNOTSUPP; 10793 } 10794 #endif 10795 10796 /* 10797 * Update the number of bytes used in the VFS' inode. When we replace extents in 10798 * a range (clone, dedupe, fallocate's zero range), we must update the number of 10799 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 10800 * always get a correct value. 10801 */ 10802 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 10803 const u64 add_bytes, 10804 const u64 del_bytes) 10805 { 10806 if (add_bytes == del_bytes) 10807 return; 10808 10809 spin_lock(&inode->lock); 10810 if (del_bytes > 0) 10811 inode_sub_bytes(&inode->vfs_inode, del_bytes); 10812 if (add_bytes > 0) 10813 inode_add_bytes(&inode->vfs_inode, add_bytes); 10814 spin_unlock(&inode->lock); 10815 } 10816 10817 /* 10818 * Verify that there are no ordered extents for a given file range. 10819 * 10820 * @inode: The target inode. 10821 * @start: Start offset of the file range, should be sector size aligned. 10822 * @end: End offset (inclusive) of the file range, its value +1 should be 10823 * sector size aligned. 10824 * 10825 * This should typically be used for cases where we locked an inode's VFS lock in 10826 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 10827 * we have flushed all delalloc in the range, we have waited for all ordered 10828 * extents in the range to complete and finally we have locked the file range in 10829 * the inode's io_tree. 10830 */ 10831 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 10832 { 10833 struct btrfs_root *root = inode->root; 10834 struct btrfs_ordered_extent *ordered; 10835 10836 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 10837 return; 10838 10839 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 10840 if (ordered) { 10841 btrfs_err(root->fs_info, 10842 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 10843 start, end, btrfs_ino(inode), btrfs_root_id(root), 10844 ordered->file_offset, 10845 ordered->file_offset + ordered->num_bytes - 1); 10846 btrfs_put_ordered_extent(ordered); 10847 } 10848 10849 ASSERT(ordered == NULL); 10850 } 10851 10852 /* 10853 * Find the first inode with a minimum number. 10854 * 10855 * @root: The root to search for. 10856 * @min_ino: The minimum inode number. 10857 * 10858 * Find the first inode in the @root with a number >= @min_ino and return it. 10859 * Returns NULL if no such inode found. 10860 */ 10861 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino) 10862 { 10863 struct rb_node *node; 10864 struct rb_node *prev; 10865 struct btrfs_inode *inode; 10866 10867 spin_lock(&root->inode_lock); 10868 again: 10869 node = root->inode_tree.rb_node; 10870 prev = NULL; 10871 while (node) { 10872 prev = node; 10873 inode = rb_entry(node, struct btrfs_inode, rb_node); 10874 if (min_ino < btrfs_ino(inode)) 10875 node = node->rb_left; 10876 else if (min_ino > btrfs_ino(inode)) 10877 node = node->rb_right; 10878 else 10879 break; 10880 } 10881 10882 if (!node) { 10883 while (prev) { 10884 inode = rb_entry(prev, struct btrfs_inode, rb_node); 10885 if (min_ino <= btrfs_ino(inode)) { 10886 node = prev; 10887 break; 10888 } 10889 prev = rb_next(prev); 10890 } 10891 } 10892 10893 while (node) { 10894 inode = rb_entry(prev, struct btrfs_inode, rb_node); 10895 if (igrab(&inode->vfs_inode)) { 10896 spin_unlock(&root->inode_lock); 10897 return inode; 10898 } 10899 10900 min_ino = btrfs_ino(inode) + 1; 10901 if (cond_resched_lock(&root->inode_lock)) 10902 goto again; 10903 10904 node = rb_next(node); 10905 } 10906 spin_unlock(&root->inode_lock); 10907 10908 return NULL; 10909 } 10910 10911 static const struct inode_operations btrfs_dir_inode_operations = { 10912 .getattr = btrfs_getattr, 10913 .lookup = btrfs_lookup, 10914 .create = btrfs_create, 10915 .unlink = btrfs_unlink, 10916 .link = btrfs_link, 10917 .mkdir = btrfs_mkdir, 10918 .rmdir = btrfs_rmdir, 10919 .rename = btrfs_rename2, 10920 .symlink = btrfs_symlink, 10921 .setattr = btrfs_setattr, 10922 .mknod = btrfs_mknod, 10923 .listxattr = btrfs_listxattr, 10924 .permission = btrfs_permission, 10925 .get_inode_acl = btrfs_get_acl, 10926 .set_acl = btrfs_set_acl, 10927 .update_time = btrfs_update_time, 10928 .tmpfile = btrfs_tmpfile, 10929 .fileattr_get = btrfs_fileattr_get, 10930 .fileattr_set = btrfs_fileattr_set, 10931 }; 10932 10933 static const struct file_operations btrfs_dir_file_operations = { 10934 .llseek = btrfs_dir_llseek, 10935 .read = generic_read_dir, 10936 .iterate_shared = btrfs_real_readdir, 10937 .open = btrfs_opendir, 10938 .unlocked_ioctl = btrfs_ioctl, 10939 #ifdef CONFIG_COMPAT 10940 .compat_ioctl = btrfs_compat_ioctl, 10941 #endif 10942 .release = btrfs_release_file, 10943 .fsync = btrfs_sync_file, 10944 }; 10945 10946 /* 10947 * btrfs doesn't support the bmap operation because swapfiles 10948 * use bmap to make a mapping of extents in the file. They assume 10949 * these extents won't change over the life of the file and they 10950 * use the bmap result to do IO directly to the drive. 10951 * 10952 * the btrfs bmap call would return logical addresses that aren't 10953 * suitable for IO and they also will change frequently as COW 10954 * operations happen. So, swapfile + btrfs == corruption. 10955 * 10956 * For now we're avoiding this by dropping bmap. 10957 */ 10958 static const struct address_space_operations btrfs_aops = { 10959 .read_folio = btrfs_read_folio, 10960 .writepages = btrfs_writepages, 10961 .readahead = btrfs_readahead, 10962 .invalidate_folio = btrfs_invalidate_folio, 10963 .release_folio = btrfs_release_folio, 10964 .migrate_folio = btrfs_migrate_folio, 10965 .dirty_folio = filemap_dirty_folio, 10966 .error_remove_folio = generic_error_remove_folio, 10967 .swap_activate = btrfs_swap_activate, 10968 .swap_deactivate = btrfs_swap_deactivate, 10969 }; 10970 10971 static const struct inode_operations btrfs_file_inode_operations = { 10972 .getattr = btrfs_getattr, 10973 .setattr = btrfs_setattr, 10974 .listxattr = btrfs_listxattr, 10975 .permission = btrfs_permission, 10976 .fiemap = btrfs_fiemap, 10977 .get_inode_acl = btrfs_get_acl, 10978 .set_acl = btrfs_set_acl, 10979 .update_time = btrfs_update_time, 10980 .fileattr_get = btrfs_fileattr_get, 10981 .fileattr_set = btrfs_fileattr_set, 10982 }; 10983 static const struct inode_operations btrfs_special_inode_operations = { 10984 .getattr = btrfs_getattr, 10985 .setattr = btrfs_setattr, 10986 .permission = btrfs_permission, 10987 .listxattr = btrfs_listxattr, 10988 .get_inode_acl = btrfs_get_acl, 10989 .set_acl = btrfs_set_acl, 10990 .update_time = btrfs_update_time, 10991 }; 10992 static const struct inode_operations btrfs_symlink_inode_operations = { 10993 .get_link = page_get_link, 10994 .getattr = btrfs_getattr, 10995 .setattr = btrfs_setattr, 10996 .permission = btrfs_permission, 10997 .listxattr = btrfs_listxattr, 10998 .update_time = btrfs_update_time, 10999 }; 11000 11001 const struct dentry_operations btrfs_dentry_operations = { 11002 .d_delete = btrfs_dentry_delete, 11003 }; 11004