1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 #include <linux/time.h> 15 #include <linux/init.h> 16 #include <linux/string.h> 17 #include <linux/backing-dev.h> 18 #include <linux/writeback.h> 19 #include <linux/compat.h> 20 #include <linux/xattr.h> 21 #include <linux/posix_acl.h> 22 #include <linux/falloc.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/btrfs.h> 26 #include <linux/blkdev.h> 27 #include <linux/posix_acl_xattr.h> 28 #include <linux/uio.h> 29 #include <linux/magic.h> 30 #include <linux/iversion.h> 31 #include <linux/swap.h> 32 #include <linux/migrate.h> 33 #include <linux/sched/mm.h> 34 #include <linux/iomap.h> 35 #include <linux/unaligned.h> 36 #include <linux/fsverity.h> 37 #include "misc.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "ordered-data.h" 43 #include "xattr.h" 44 #include "tree-log.h" 45 #include "bio.h" 46 #include "compression.h" 47 #include "locking.h" 48 #include "props.h" 49 #include "qgroup.h" 50 #include "delalloc-space.h" 51 #include "block-group.h" 52 #include "space-info.h" 53 #include "zoned.h" 54 #include "subpage.h" 55 #include "inode-item.h" 56 #include "fs.h" 57 #include "accessors.h" 58 #include "extent-tree.h" 59 #include "root-tree.h" 60 #include "defrag.h" 61 #include "dir-item.h" 62 #include "file-item.h" 63 #include "uuid-tree.h" 64 #include "ioctl.h" 65 #include "file.h" 66 #include "acl.h" 67 #include "relocation.h" 68 #include "verity.h" 69 #include "super.h" 70 #include "orphan.h" 71 #include "backref.h" 72 #include "raid-stripe-tree.h" 73 #include "fiemap.h" 74 75 struct btrfs_iget_args { 76 u64 ino; 77 struct btrfs_root *root; 78 }; 79 80 struct btrfs_rename_ctx { 81 /* Output field. Stores the index number of the old directory entry. */ 82 u64 index; 83 }; 84 85 /* 86 * Used by data_reloc_print_warning_inode() to pass needed info for filename 87 * resolution and output of error message. 88 */ 89 struct data_reloc_warn { 90 struct btrfs_path path; 91 struct btrfs_fs_info *fs_info; 92 u64 extent_item_size; 93 u64 logical; 94 int mirror_num; 95 }; 96 97 /* 98 * For the file_extent_tree, we want to hold the inode lock when we lookup and 99 * update the disk_i_size, but lockdep will complain because our io_tree we hold 100 * the tree lock and get the inode lock when setting delalloc. These two things 101 * are unrelated, so make a class for the file_extent_tree so we don't get the 102 * two locking patterns mixed up. 103 */ 104 static struct lock_class_key file_extent_tree_class; 105 106 static const struct inode_operations btrfs_dir_inode_operations; 107 static const struct inode_operations btrfs_symlink_inode_operations; 108 static const struct inode_operations btrfs_special_inode_operations; 109 static const struct inode_operations btrfs_file_inode_operations; 110 static const struct address_space_operations btrfs_aops; 111 static const struct file_operations btrfs_dir_file_operations; 112 113 static struct kmem_cache *btrfs_inode_cachep; 114 115 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 116 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback); 117 118 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 119 struct folio *locked_folio, u64 start, 120 u64 end, struct writeback_control *wbc, 121 bool pages_dirty); 122 123 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, 124 u64 root, void *warn_ctx) 125 { 126 struct data_reloc_warn *warn = warn_ctx; 127 struct btrfs_fs_info *fs_info = warn->fs_info; 128 struct extent_buffer *eb; 129 struct btrfs_inode_item *inode_item; 130 struct inode_fs_paths *ipath = NULL; 131 struct btrfs_root *local_root; 132 struct btrfs_key key; 133 unsigned int nofs_flag; 134 u32 nlink; 135 int ret; 136 137 local_root = btrfs_get_fs_root(fs_info, root, true); 138 if (IS_ERR(local_root)) { 139 ret = PTR_ERR(local_root); 140 goto err; 141 } 142 143 /* This makes the path point to (inum INODE_ITEM ioff). */ 144 key.objectid = inum; 145 key.type = BTRFS_INODE_ITEM_KEY; 146 key.offset = 0; 147 148 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0); 149 if (ret) { 150 btrfs_put_root(local_root); 151 btrfs_release_path(&warn->path); 152 goto err; 153 } 154 155 eb = warn->path.nodes[0]; 156 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item); 157 nlink = btrfs_inode_nlink(eb, inode_item); 158 btrfs_release_path(&warn->path); 159 160 nofs_flag = memalloc_nofs_save(); 161 ipath = init_ipath(4096, local_root, &warn->path); 162 memalloc_nofs_restore(nofs_flag); 163 if (IS_ERR(ipath)) { 164 btrfs_put_root(local_root); 165 ret = PTR_ERR(ipath); 166 ipath = NULL; 167 /* 168 * -ENOMEM, not a critical error, just output an generic error 169 * without filename. 170 */ 171 btrfs_warn(fs_info, 172 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu", 173 warn->logical, warn->mirror_num, root, inum, offset); 174 return ret; 175 } 176 ret = paths_from_inode(inum, ipath); 177 if (ret < 0) 178 goto err; 179 180 /* 181 * We deliberately ignore the bit ipath might have been too small to 182 * hold all of the paths here 183 */ 184 for (int i = 0; i < ipath->fspath->elem_cnt; i++) { 185 btrfs_warn(fs_info, 186 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)", 187 warn->logical, warn->mirror_num, root, inum, offset, 188 fs_info->sectorsize, nlink, 189 (char *)(unsigned long)ipath->fspath->val[i]); 190 } 191 192 btrfs_put_root(local_root); 193 free_ipath(ipath); 194 return 0; 195 196 err: 197 btrfs_warn(fs_info, 198 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d", 199 warn->logical, warn->mirror_num, root, inum, offset, ret); 200 201 free_ipath(ipath); 202 return ret; 203 } 204 205 /* 206 * Do extra user-friendly error output (e.g. lookup all the affected files). 207 * 208 * Return true if we succeeded doing the backref lookup. 209 * Return false if such lookup failed, and has to fallback to the old error message. 210 */ 211 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off, 212 const u8 *csum, const u8 *csum_expected, 213 int mirror_num) 214 { 215 struct btrfs_fs_info *fs_info = inode->root->fs_info; 216 struct btrfs_path path = { 0 }; 217 struct btrfs_key found_key = { 0 }; 218 struct extent_buffer *eb; 219 struct btrfs_extent_item *ei; 220 const u32 csum_size = fs_info->csum_size; 221 u64 logical; 222 u64 flags; 223 u32 item_size; 224 int ret; 225 226 mutex_lock(&fs_info->reloc_mutex); 227 logical = btrfs_get_reloc_bg_bytenr(fs_info); 228 mutex_unlock(&fs_info->reloc_mutex); 229 230 if (logical == U64_MAX) { 231 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation"); 232 btrfs_warn_rl(fs_info, 233 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 234 btrfs_root_id(inode->root), btrfs_ino(inode), file_off, 235 CSUM_FMT_VALUE(csum_size, csum), 236 CSUM_FMT_VALUE(csum_size, csum_expected), 237 mirror_num); 238 return; 239 } 240 241 logical += file_off; 242 btrfs_warn_rl(fs_info, 243 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 244 btrfs_root_id(inode->root), 245 btrfs_ino(inode), file_off, logical, 246 CSUM_FMT_VALUE(csum_size, csum), 247 CSUM_FMT_VALUE(csum_size, csum_expected), 248 mirror_num); 249 250 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags); 251 if (ret < 0) { 252 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d", 253 logical, ret); 254 return; 255 } 256 eb = path.nodes[0]; 257 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item); 258 item_size = btrfs_item_size(eb, path.slots[0]); 259 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 260 unsigned long ptr = 0; 261 u64 ref_root; 262 u8 ref_level; 263 264 while (true) { 265 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 266 item_size, &ref_root, 267 &ref_level); 268 if (ret < 0) { 269 btrfs_warn_rl(fs_info, 270 "failed to resolve tree backref for logical %llu: %d", 271 logical, ret); 272 break; 273 } 274 if (ret > 0) 275 break; 276 277 btrfs_warn_rl(fs_info, 278 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu", 279 logical, mirror_num, 280 (ref_level ? "node" : "leaf"), 281 ref_level, ref_root); 282 } 283 btrfs_release_path(&path); 284 } else { 285 struct btrfs_backref_walk_ctx ctx = { 0 }; 286 struct data_reloc_warn reloc_warn = { 0 }; 287 288 btrfs_release_path(&path); 289 290 ctx.bytenr = found_key.objectid; 291 ctx.extent_item_pos = logical - found_key.objectid; 292 ctx.fs_info = fs_info; 293 294 reloc_warn.logical = logical; 295 reloc_warn.extent_item_size = found_key.offset; 296 reloc_warn.mirror_num = mirror_num; 297 reloc_warn.fs_info = fs_info; 298 299 iterate_extent_inodes(&ctx, true, 300 data_reloc_print_warning_inode, &reloc_warn); 301 } 302 } 303 304 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, 305 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) 306 { 307 struct btrfs_root *root = inode->root; 308 const u32 csum_size = root->fs_info->csum_size; 309 310 /* For data reloc tree, it's better to do a backref lookup instead. */ 311 if (btrfs_root_id(root) == BTRFS_DATA_RELOC_TREE_OBJECTID) 312 return print_data_reloc_error(inode, logical_start, csum, 313 csum_expected, mirror_num); 314 315 /* Output without objectid, which is more meaningful */ 316 if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) { 317 btrfs_warn_rl(root->fs_info, 318 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 319 btrfs_root_id(root), btrfs_ino(inode), 320 logical_start, 321 CSUM_FMT_VALUE(csum_size, csum), 322 CSUM_FMT_VALUE(csum_size, csum_expected), 323 mirror_num); 324 } else { 325 btrfs_warn_rl(root->fs_info, 326 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 327 btrfs_root_id(root), btrfs_ino(inode), 328 logical_start, 329 CSUM_FMT_VALUE(csum_size, csum), 330 CSUM_FMT_VALUE(csum_size, csum_expected), 331 mirror_num); 332 } 333 } 334 335 /* 336 * Lock inode i_rwsem based on arguments passed. 337 * 338 * ilock_flags can have the following bit set: 339 * 340 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 341 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 342 * return -EAGAIN 343 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 344 */ 345 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) 346 { 347 if (ilock_flags & BTRFS_ILOCK_SHARED) { 348 if (ilock_flags & BTRFS_ILOCK_TRY) { 349 if (!inode_trylock_shared(&inode->vfs_inode)) 350 return -EAGAIN; 351 else 352 return 0; 353 } 354 inode_lock_shared(&inode->vfs_inode); 355 } else { 356 if (ilock_flags & BTRFS_ILOCK_TRY) { 357 if (!inode_trylock(&inode->vfs_inode)) 358 return -EAGAIN; 359 else 360 return 0; 361 } 362 inode_lock(&inode->vfs_inode); 363 } 364 if (ilock_flags & BTRFS_ILOCK_MMAP) 365 down_write(&inode->i_mmap_lock); 366 return 0; 367 } 368 369 /* 370 * Unock inode i_rwsem. 371 * 372 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 373 * to decide whether the lock acquired is shared or exclusive. 374 */ 375 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) 376 { 377 if (ilock_flags & BTRFS_ILOCK_MMAP) 378 up_write(&inode->i_mmap_lock); 379 if (ilock_flags & BTRFS_ILOCK_SHARED) 380 inode_unlock_shared(&inode->vfs_inode); 381 else 382 inode_unlock(&inode->vfs_inode); 383 } 384 385 /* 386 * Cleanup all submitted ordered extents in specified range to handle errors 387 * from the btrfs_run_delalloc_range() callback. 388 * 389 * NOTE: caller must ensure that when an error happens, it can not call 390 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 391 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 392 * to be released, which we want to happen only when finishing the ordered 393 * extent (btrfs_finish_ordered_io()). 394 */ 395 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 396 u64 offset, u64 bytes) 397 { 398 unsigned long index = offset >> PAGE_SHIFT; 399 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 400 struct folio *folio; 401 402 while (index <= end_index) { 403 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index); 404 index++; 405 if (IS_ERR(folio)) 406 continue; 407 408 /* 409 * Here we just clear all Ordered bits for every page in the 410 * range, then btrfs_mark_ordered_io_finished() will handle 411 * the ordered extent accounting for the range. 412 */ 413 btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio, 414 offset, bytes); 415 folio_put(folio); 416 } 417 418 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false); 419 } 420 421 static int btrfs_dirty_inode(struct btrfs_inode *inode); 422 423 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 424 struct btrfs_new_inode_args *args) 425 { 426 int err; 427 428 if (args->default_acl) { 429 err = __btrfs_set_acl(trans, args->inode, args->default_acl, 430 ACL_TYPE_DEFAULT); 431 if (err) 432 return err; 433 } 434 if (args->acl) { 435 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 436 if (err) 437 return err; 438 } 439 if (!args->default_acl && !args->acl) 440 cache_no_acl(args->inode); 441 return btrfs_xattr_security_init(trans, args->inode, args->dir, 442 &args->dentry->d_name); 443 } 444 445 /* 446 * this does all the hard work for inserting an inline extent into 447 * the btree. The caller should have done a btrfs_drop_extents so that 448 * no overlapping inline items exist in the btree 449 */ 450 static int insert_inline_extent(struct btrfs_trans_handle *trans, 451 struct btrfs_path *path, 452 struct btrfs_inode *inode, bool extent_inserted, 453 size_t size, size_t compressed_size, 454 int compress_type, 455 struct folio *compressed_folio, 456 bool update_i_size) 457 { 458 struct btrfs_root *root = inode->root; 459 struct extent_buffer *leaf; 460 const u32 sectorsize = trans->fs_info->sectorsize; 461 char *kaddr; 462 unsigned long ptr; 463 struct btrfs_file_extent_item *ei; 464 int ret; 465 size_t cur_size = size; 466 u64 i_size; 467 468 /* 469 * The decompressed size must still be no larger than a sector. Under 470 * heavy race, we can have size == 0 passed in, but that shouldn't be a 471 * big deal and we can continue the insertion. 472 */ 473 ASSERT(size <= sectorsize); 474 475 /* 476 * The compressed size also needs to be no larger than a sector. 477 * That's also why we only need one page as the parameter. 478 */ 479 if (compressed_folio) 480 ASSERT(compressed_size <= sectorsize); 481 else 482 ASSERT(compressed_size == 0); 483 484 if (compressed_size && compressed_folio) 485 cur_size = compressed_size; 486 487 if (!extent_inserted) { 488 struct btrfs_key key; 489 size_t datasize; 490 491 key.objectid = btrfs_ino(inode); 492 key.offset = 0; 493 key.type = BTRFS_EXTENT_DATA_KEY; 494 495 datasize = btrfs_file_extent_calc_inline_size(cur_size); 496 ret = btrfs_insert_empty_item(trans, root, path, &key, 497 datasize); 498 if (ret) 499 goto fail; 500 } 501 leaf = path->nodes[0]; 502 ei = btrfs_item_ptr(leaf, path->slots[0], 503 struct btrfs_file_extent_item); 504 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 505 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 506 btrfs_set_file_extent_encryption(leaf, ei, 0); 507 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 508 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 509 ptr = btrfs_file_extent_inline_start(ei); 510 511 if (compress_type != BTRFS_COMPRESS_NONE) { 512 kaddr = kmap_local_folio(compressed_folio, 0); 513 write_extent_buffer(leaf, kaddr, ptr, compressed_size); 514 kunmap_local(kaddr); 515 516 btrfs_set_file_extent_compression(leaf, ei, 517 compress_type); 518 } else { 519 struct folio *folio; 520 521 folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0); 522 ASSERT(!IS_ERR(folio)); 523 btrfs_set_file_extent_compression(leaf, ei, 0); 524 kaddr = kmap_local_folio(folio, 0); 525 write_extent_buffer(leaf, kaddr, ptr, size); 526 kunmap_local(kaddr); 527 folio_put(folio); 528 } 529 btrfs_release_path(path); 530 531 /* 532 * We align size to sectorsize for inline extents just for simplicity 533 * sake. 534 */ 535 ret = btrfs_inode_set_file_extent_range(inode, 0, 536 ALIGN(size, root->fs_info->sectorsize)); 537 if (ret) 538 goto fail; 539 540 /* 541 * We're an inline extent, so nobody can extend the file past i_size 542 * without locking a page we already have locked. 543 * 544 * We must do any i_size and inode updates before we unlock the pages. 545 * Otherwise we could end up racing with unlink. 546 */ 547 i_size = i_size_read(&inode->vfs_inode); 548 if (update_i_size && size > i_size) { 549 i_size_write(&inode->vfs_inode, size); 550 i_size = size; 551 } 552 inode->disk_i_size = i_size; 553 554 fail: 555 return ret; 556 } 557 558 static bool can_cow_file_range_inline(struct btrfs_inode *inode, 559 u64 offset, u64 size, 560 size_t compressed_size) 561 { 562 struct btrfs_fs_info *fs_info = inode->root->fs_info; 563 u64 data_len = (compressed_size ?: size); 564 565 /* Inline extents must start at offset 0. */ 566 if (offset != 0) 567 return false; 568 569 /* 570 * Due to the page size limit, for subpage we can only trigger the 571 * writeback for the dirty sectors of page, that means data writeback 572 * is doing more writeback than what we want. 573 * 574 * This is especially unexpected for some call sites like fallocate, 575 * where we only increase i_size after everything is done. 576 * This means we can trigger inline extent even if we didn't want to. 577 * So here we skip inline extent creation completely. 578 */ 579 if (fs_info->sectorsize != PAGE_SIZE) 580 return false; 581 582 /* Inline extents are limited to sectorsize. */ 583 if (size > fs_info->sectorsize) 584 return false; 585 586 /* We cannot exceed the maximum inline data size. */ 587 if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 588 return false; 589 590 /* We cannot exceed the user specified max_inline size. */ 591 if (data_len > fs_info->max_inline) 592 return false; 593 594 /* Inline extents must be the entirety of the file. */ 595 if (size < i_size_read(&inode->vfs_inode)) 596 return false; 597 598 return true; 599 } 600 601 /* 602 * conditionally insert an inline extent into the file. This 603 * does the checks required to make sure the data is small enough 604 * to fit as an inline extent. 605 * 606 * If being used directly, you must have already checked we're allowed to cow 607 * the range by getting true from can_cow_file_range_inline(). 608 */ 609 static noinline int __cow_file_range_inline(struct btrfs_inode *inode, 610 u64 size, size_t compressed_size, 611 int compress_type, 612 struct folio *compressed_folio, 613 bool update_i_size) 614 { 615 struct btrfs_drop_extents_args drop_args = { 0 }; 616 struct btrfs_root *root = inode->root; 617 struct btrfs_fs_info *fs_info = root->fs_info; 618 struct btrfs_trans_handle *trans; 619 u64 data_len = (compressed_size ?: size); 620 int ret; 621 struct btrfs_path *path; 622 623 path = btrfs_alloc_path(); 624 if (!path) 625 return -ENOMEM; 626 627 trans = btrfs_join_transaction(root); 628 if (IS_ERR(trans)) { 629 btrfs_free_path(path); 630 return PTR_ERR(trans); 631 } 632 trans->block_rsv = &inode->block_rsv; 633 634 drop_args.path = path; 635 drop_args.start = 0; 636 drop_args.end = fs_info->sectorsize; 637 drop_args.drop_cache = true; 638 drop_args.replace_extent = true; 639 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 640 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 641 if (ret) { 642 btrfs_abort_transaction(trans, ret); 643 goto out; 644 } 645 646 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 647 size, compressed_size, compress_type, 648 compressed_folio, update_i_size); 649 if (ret && ret != -ENOSPC) { 650 btrfs_abort_transaction(trans, ret); 651 goto out; 652 } else if (ret == -ENOSPC) { 653 ret = 1; 654 goto out; 655 } 656 657 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 658 ret = btrfs_update_inode(trans, inode); 659 if (ret && ret != -ENOSPC) { 660 btrfs_abort_transaction(trans, ret); 661 goto out; 662 } else if (ret == -ENOSPC) { 663 ret = 1; 664 goto out; 665 } 666 667 btrfs_set_inode_full_sync(inode); 668 out: 669 /* 670 * Don't forget to free the reserved space, as for inlined extent 671 * it won't count as data extent, free them directly here. 672 * And at reserve time, it's always aligned to page size, so 673 * just free one page here. 674 */ 675 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL); 676 btrfs_free_path(path); 677 btrfs_end_transaction(trans); 678 return ret; 679 } 680 681 static noinline int cow_file_range_inline(struct btrfs_inode *inode, 682 struct folio *locked_folio, 683 u64 offset, u64 end, 684 size_t compressed_size, 685 int compress_type, 686 struct folio *compressed_folio, 687 bool update_i_size) 688 { 689 struct extent_state *cached = NULL; 690 unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 691 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED; 692 u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1); 693 int ret; 694 695 if (!can_cow_file_range_inline(inode, offset, size, compressed_size)) 696 return 1; 697 698 lock_extent(&inode->io_tree, offset, end, &cached); 699 ret = __cow_file_range_inline(inode, size, compressed_size, 700 compress_type, compressed_folio, 701 update_i_size); 702 if (ret > 0) { 703 unlock_extent(&inode->io_tree, offset, end, &cached); 704 return ret; 705 } 706 707 /* 708 * In the successful case (ret == 0 here), cow_file_range will return 1. 709 * 710 * Quite a bit further up the callstack in extent_writepage(), ret == 1 711 * is treated as a short circuited success and does not unlock the folio, 712 * so we must do it here. 713 * 714 * In the failure case, the locked_folio does get unlocked by 715 * btrfs_folio_end_all_writers, which asserts that it is still locked 716 * at that point, so we must *not* unlock it here. 717 * 718 * The other two callsites in compress_file_range do not have a 719 * locked_folio, so they are not relevant to this logic. 720 */ 721 if (ret == 0) 722 locked_folio = NULL; 723 724 extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached, 725 clear_flags, PAGE_UNLOCK | 726 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); 727 return ret; 728 } 729 730 struct async_extent { 731 u64 start; 732 u64 ram_size; 733 u64 compressed_size; 734 struct folio **folios; 735 unsigned long nr_folios; 736 int compress_type; 737 struct list_head list; 738 }; 739 740 struct async_chunk { 741 struct btrfs_inode *inode; 742 struct folio *locked_folio; 743 u64 start; 744 u64 end; 745 blk_opf_t write_flags; 746 struct list_head extents; 747 struct cgroup_subsys_state *blkcg_css; 748 struct btrfs_work work; 749 struct async_cow *async_cow; 750 }; 751 752 struct async_cow { 753 atomic_t num_chunks; 754 struct async_chunk chunks[]; 755 }; 756 757 static noinline int add_async_extent(struct async_chunk *cow, 758 u64 start, u64 ram_size, 759 u64 compressed_size, 760 struct folio **folios, 761 unsigned long nr_folios, 762 int compress_type) 763 { 764 struct async_extent *async_extent; 765 766 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 767 if (!async_extent) 768 return -ENOMEM; 769 async_extent->start = start; 770 async_extent->ram_size = ram_size; 771 async_extent->compressed_size = compressed_size; 772 async_extent->folios = folios; 773 async_extent->nr_folios = nr_folios; 774 async_extent->compress_type = compress_type; 775 list_add_tail(&async_extent->list, &cow->extents); 776 return 0; 777 } 778 779 /* 780 * Check if the inode needs to be submitted to compression, based on mount 781 * options, defragmentation, properties or heuristics. 782 */ 783 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 784 u64 end) 785 { 786 struct btrfs_fs_info *fs_info = inode->root->fs_info; 787 788 if (!btrfs_inode_can_compress(inode)) { 789 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 790 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 791 btrfs_ino(inode)); 792 return 0; 793 } 794 /* 795 * Only enable sector perfect compression for experimental builds. 796 * 797 * This is a big feature change for subpage cases, and can hit 798 * different corner cases, so only limit this feature for 799 * experimental build for now. 800 * 801 * ETA for moving this out of experimental builds is 6.15. 802 */ 803 if (fs_info->sectorsize < PAGE_SIZE && 804 !IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) { 805 if (!PAGE_ALIGNED(start) || 806 !PAGE_ALIGNED(end + 1)) 807 return 0; 808 } 809 810 /* force compress */ 811 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 812 return 1; 813 /* defrag ioctl */ 814 if (inode->defrag_compress) 815 return 1; 816 /* bad compression ratios */ 817 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 818 return 0; 819 if (btrfs_test_opt(fs_info, COMPRESS) || 820 inode->flags & BTRFS_INODE_COMPRESS || 821 inode->prop_compress) 822 return btrfs_compress_heuristic(inode, start, end); 823 return 0; 824 } 825 826 static inline void inode_should_defrag(struct btrfs_inode *inode, 827 u64 start, u64 end, u64 num_bytes, u32 small_write) 828 { 829 /* If this is a small write inside eof, kick off a defrag */ 830 if (num_bytes < small_write && 831 (start > 0 || end + 1 < inode->disk_i_size)) 832 btrfs_add_inode_defrag(inode, small_write); 833 } 834 835 static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) 836 { 837 unsigned long end_index = end >> PAGE_SHIFT; 838 struct folio *folio; 839 int ret = 0; 840 841 for (unsigned long index = start >> PAGE_SHIFT; 842 index <= end_index; index++) { 843 folio = filemap_get_folio(inode->i_mapping, index); 844 if (IS_ERR(folio)) { 845 if (!ret) 846 ret = PTR_ERR(folio); 847 continue; 848 } 849 btrfs_folio_clamp_clear_dirty(inode_to_fs_info(inode), folio, start, 850 end + 1 - start); 851 folio_put(folio); 852 } 853 return ret; 854 } 855 856 /* 857 * Work queue call back to started compression on a file and pages. 858 * 859 * This is done inside an ordered work queue, and the compression is spread 860 * across many cpus. The actual IO submission is step two, and the ordered work 861 * queue takes care of making sure that happens in the same order things were 862 * put onto the queue by writepages and friends. 863 * 864 * If this code finds it can't get good compression, it puts an entry onto the 865 * work queue to write the uncompressed bytes. This makes sure that both 866 * compressed inodes and uncompressed inodes are written in the same order that 867 * the flusher thread sent them down. 868 */ 869 static void compress_file_range(struct btrfs_work *work) 870 { 871 struct async_chunk *async_chunk = 872 container_of(work, struct async_chunk, work); 873 struct btrfs_inode *inode = async_chunk->inode; 874 struct btrfs_fs_info *fs_info = inode->root->fs_info; 875 struct address_space *mapping = inode->vfs_inode.i_mapping; 876 u64 blocksize = fs_info->sectorsize; 877 u64 start = async_chunk->start; 878 u64 end = async_chunk->end; 879 u64 actual_end; 880 u64 i_size; 881 int ret = 0; 882 struct folio **folios; 883 unsigned long nr_folios; 884 unsigned long total_compressed = 0; 885 unsigned long total_in = 0; 886 unsigned int poff; 887 int i; 888 int compress_type = fs_info->compress_type; 889 890 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); 891 892 /* 893 * We need to call clear_page_dirty_for_io on each page in the range. 894 * Otherwise applications with the file mmap'd can wander in and change 895 * the page contents while we are compressing them. 896 */ 897 ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); 898 899 /* 900 * All the folios should have been locked thus no failure. 901 * 902 * And even if some folios are missing, btrfs_compress_folios() 903 * would handle them correctly, so here just do an ASSERT() check for 904 * early logic errors. 905 */ 906 ASSERT(ret == 0); 907 908 /* 909 * We need to save i_size before now because it could change in between 910 * us evaluating the size and assigning it. This is because we lock and 911 * unlock the page in truncate and fallocate, and then modify the i_size 912 * later on. 913 * 914 * The barriers are to emulate READ_ONCE, remove that once i_size_read 915 * does that for us. 916 */ 917 barrier(); 918 i_size = i_size_read(&inode->vfs_inode); 919 barrier(); 920 actual_end = min_t(u64, i_size, end + 1); 921 again: 922 folios = NULL; 923 nr_folios = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 924 nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED_PAGES); 925 926 /* 927 * we don't want to send crud past the end of i_size through 928 * compression, that's just a waste of CPU time. So, if the 929 * end of the file is before the start of our current 930 * requested range of bytes, we bail out to the uncompressed 931 * cleanup code that can deal with all of this. 932 * 933 * It isn't really the fastest way to fix things, but this is a 934 * very uncommon corner. 935 */ 936 if (actual_end <= start) 937 goto cleanup_and_bail_uncompressed; 938 939 total_compressed = actual_end - start; 940 941 /* 942 * Skip compression for a small file range(<=blocksize) that 943 * isn't an inline extent, since it doesn't save disk space at all. 944 */ 945 if (total_compressed <= blocksize && 946 (start > 0 || end + 1 < inode->disk_i_size)) 947 goto cleanup_and_bail_uncompressed; 948 949 total_compressed = min_t(unsigned long, total_compressed, 950 BTRFS_MAX_UNCOMPRESSED); 951 total_in = 0; 952 ret = 0; 953 954 /* 955 * We do compression for mount -o compress and when the inode has not 956 * been flagged as NOCOMPRESS. This flag can change at any time if we 957 * discover bad compression ratios. 958 */ 959 if (!inode_need_compress(inode, start, end)) 960 goto cleanup_and_bail_uncompressed; 961 962 folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS); 963 if (!folios) { 964 /* 965 * Memory allocation failure is not a fatal error, we can fall 966 * back to uncompressed code. 967 */ 968 goto cleanup_and_bail_uncompressed; 969 } 970 971 if (inode->defrag_compress) 972 compress_type = inode->defrag_compress; 973 else if (inode->prop_compress) 974 compress_type = inode->prop_compress; 975 976 /* Compression level is applied here. */ 977 ret = btrfs_compress_folios(compress_type | (fs_info->compress_level << 4), 978 mapping, start, folios, &nr_folios, &total_in, 979 &total_compressed); 980 if (ret) 981 goto mark_incompressible; 982 983 /* 984 * Zero the tail end of the last page, as we might be sending it down 985 * to disk. 986 */ 987 poff = offset_in_page(total_compressed); 988 if (poff) 989 folio_zero_range(folios[nr_folios - 1], poff, PAGE_SIZE - poff); 990 991 /* 992 * Try to create an inline extent. 993 * 994 * If we didn't compress the entire range, try to create an uncompressed 995 * inline extent, else a compressed one. 996 * 997 * Check cow_file_range() for why we don't even try to create inline 998 * extent for the subpage case. 999 */ 1000 if (total_in < actual_end) 1001 ret = cow_file_range_inline(inode, NULL, start, end, 0, 1002 BTRFS_COMPRESS_NONE, NULL, false); 1003 else 1004 ret = cow_file_range_inline(inode, NULL, start, end, total_compressed, 1005 compress_type, folios[0], false); 1006 if (ret <= 0) { 1007 if (ret < 0) 1008 mapping_set_error(mapping, -EIO); 1009 goto free_pages; 1010 } 1011 1012 /* 1013 * We aren't doing an inline extent. Round the compressed size up to a 1014 * block size boundary so the allocator does sane things. 1015 */ 1016 total_compressed = ALIGN(total_compressed, blocksize); 1017 1018 /* 1019 * One last check to make sure the compression is really a win, compare 1020 * the page count read with the blocks on disk, compression must free at 1021 * least one sector. 1022 */ 1023 total_in = round_up(total_in, fs_info->sectorsize); 1024 if (total_compressed + blocksize > total_in) 1025 goto mark_incompressible; 1026 1027 /* 1028 * The async work queues will take care of doing actual allocation on 1029 * disk for these compressed pages, and will submit the bios. 1030 */ 1031 ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios, 1032 nr_folios, compress_type); 1033 BUG_ON(ret); 1034 if (start + total_in < end) { 1035 start += total_in; 1036 cond_resched(); 1037 goto again; 1038 } 1039 return; 1040 1041 mark_incompressible: 1042 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) 1043 inode->flags |= BTRFS_INODE_NOCOMPRESS; 1044 cleanup_and_bail_uncompressed: 1045 ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 1046 BTRFS_COMPRESS_NONE); 1047 BUG_ON(ret); 1048 free_pages: 1049 if (folios) { 1050 for (i = 0; i < nr_folios; i++) { 1051 WARN_ON(folios[i]->mapping); 1052 btrfs_free_compr_folio(folios[i]); 1053 } 1054 kfree(folios); 1055 } 1056 } 1057 1058 static void free_async_extent_pages(struct async_extent *async_extent) 1059 { 1060 int i; 1061 1062 if (!async_extent->folios) 1063 return; 1064 1065 for (i = 0; i < async_extent->nr_folios; i++) { 1066 WARN_ON(async_extent->folios[i]->mapping); 1067 btrfs_free_compr_folio(async_extent->folios[i]); 1068 } 1069 kfree(async_extent->folios); 1070 async_extent->nr_folios = 0; 1071 async_extent->folios = NULL; 1072 } 1073 1074 static void submit_uncompressed_range(struct btrfs_inode *inode, 1075 struct async_extent *async_extent, 1076 struct folio *locked_folio) 1077 { 1078 u64 start = async_extent->start; 1079 u64 end = async_extent->start + async_extent->ram_size - 1; 1080 int ret; 1081 struct writeback_control wbc = { 1082 .sync_mode = WB_SYNC_ALL, 1083 .range_start = start, 1084 .range_end = end, 1085 .no_cgroup_owner = 1, 1086 }; 1087 1088 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); 1089 ret = run_delalloc_cow(inode, locked_folio, start, end, 1090 &wbc, false); 1091 wbc_detach_inode(&wbc); 1092 if (ret < 0) { 1093 btrfs_cleanup_ordered_extents(inode, start, end - start + 1); 1094 if (locked_folio) 1095 btrfs_folio_end_lock(inode->root->fs_info, locked_folio, 1096 start, async_extent->ram_size); 1097 btrfs_err_rl(inode->root->fs_info, 1098 "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d", 1099 __func__, btrfs_root_id(inode->root), 1100 btrfs_ino(inode), start, async_extent->ram_size, ret); 1101 } 1102 } 1103 1104 static void submit_one_async_extent(struct async_chunk *async_chunk, 1105 struct async_extent *async_extent, 1106 u64 *alloc_hint) 1107 { 1108 struct btrfs_inode *inode = async_chunk->inode; 1109 struct extent_io_tree *io_tree = &inode->io_tree; 1110 struct btrfs_root *root = inode->root; 1111 struct btrfs_fs_info *fs_info = root->fs_info; 1112 struct btrfs_ordered_extent *ordered; 1113 struct btrfs_file_extent file_extent; 1114 struct btrfs_key ins; 1115 struct folio *locked_folio = NULL; 1116 struct extent_state *cached = NULL; 1117 struct extent_map *em; 1118 int ret = 0; 1119 u64 start = async_extent->start; 1120 u64 end = async_extent->start + async_extent->ram_size - 1; 1121 1122 if (async_chunk->blkcg_css) 1123 kthread_associate_blkcg(async_chunk->blkcg_css); 1124 1125 /* 1126 * If async_chunk->locked_folio is in the async_extent range, we need to 1127 * handle it. 1128 */ 1129 if (async_chunk->locked_folio) { 1130 u64 locked_folio_start = folio_pos(async_chunk->locked_folio); 1131 u64 locked_folio_end = locked_folio_start + 1132 folio_size(async_chunk->locked_folio) - 1; 1133 1134 if (!(start >= locked_folio_end || end <= locked_folio_start)) 1135 locked_folio = async_chunk->locked_folio; 1136 } 1137 1138 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) { 1139 submit_uncompressed_range(inode, async_extent, locked_folio); 1140 goto done; 1141 } 1142 1143 ret = btrfs_reserve_extent(root, async_extent->ram_size, 1144 async_extent->compressed_size, 1145 async_extent->compressed_size, 1146 0, *alloc_hint, &ins, 1, 1); 1147 if (ret) { 1148 /* 1149 * We can't reserve contiguous space for the compressed size. 1150 * Unlikely, but it's possible that we could have enough 1151 * non-contiguous space for the uncompressed size instead. So 1152 * fall back to uncompressed. 1153 */ 1154 submit_uncompressed_range(inode, async_extent, locked_folio); 1155 goto done; 1156 } 1157 1158 lock_extent(io_tree, start, end, &cached); 1159 1160 /* Here we're doing allocation and writeback of the compressed pages */ 1161 file_extent.disk_bytenr = ins.objectid; 1162 file_extent.disk_num_bytes = ins.offset; 1163 file_extent.ram_bytes = async_extent->ram_size; 1164 file_extent.num_bytes = async_extent->ram_size; 1165 file_extent.offset = 0; 1166 file_extent.compression = async_extent->compress_type; 1167 1168 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED); 1169 if (IS_ERR(em)) { 1170 ret = PTR_ERR(em); 1171 goto out_free_reserve; 1172 } 1173 free_extent_map(em); 1174 1175 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, 1176 1 << BTRFS_ORDERED_COMPRESSED); 1177 if (IS_ERR(ordered)) { 1178 btrfs_drop_extent_map_range(inode, start, end, false); 1179 ret = PTR_ERR(ordered); 1180 goto out_free_reserve; 1181 } 1182 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1183 1184 /* Clear dirty, set writeback and unlock the pages. */ 1185 extent_clear_unlock_delalloc(inode, start, end, 1186 NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC, 1187 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1188 btrfs_submit_compressed_write(ordered, 1189 async_extent->folios, /* compressed_folios */ 1190 async_extent->nr_folios, 1191 async_chunk->write_flags, true); 1192 *alloc_hint = ins.objectid + ins.offset; 1193 done: 1194 if (async_chunk->blkcg_css) 1195 kthread_associate_blkcg(NULL); 1196 kfree(async_extent); 1197 return; 1198 1199 out_free_reserve: 1200 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1201 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1202 mapping_set_error(inode->vfs_inode.i_mapping, -EIO); 1203 extent_clear_unlock_delalloc(inode, start, end, 1204 NULL, &cached, 1205 EXTENT_LOCKED | EXTENT_DELALLOC | 1206 EXTENT_DELALLOC_NEW | 1207 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1208 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1209 PAGE_END_WRITEBACK); 1210 free_async_extent_pages(async_extent); 1211 if (async_chunk->blkcg_css) 1212 kthread_associate_blkcg(NULL); 1213 btrfs_debug(fs_info, 1214 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1215 btrfs_root_id(root), btrfs_ino(inode), start, 1216 async_extent->ram_size, ret); 1217 kfree(async_extent); 1218 } 1219 1220 u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1221 u64 num_bytes) 1222 { 1223 struct extent_map_tree *em_tree = &inode->extent_tree; 1224 struct extent_map *em; 1225 u64 alloc_hint = 0; 1226 1227 read_lock(&em_tree->lock); 1228 em = search_extent_mapping(em_tree, start, num_bytes); 1229 if (em) { 1230 /* 1231 * if block start isn't an actual block number then find the 1232 * first block in this inode and use that as a hint. If that 1233 * block is also bogus then just don't worry about it. 1234 */ 1235 if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) { 1236 free_extent_map(em); 1237 em = search_extent_mapping(em_tree, 0, 0); 1238 if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE) 1239 alloc_hint = extent_map_block_start(em); 1240 if (em) 1241 free_extent_map(em); 1242 } else { 1243 alloc_hint = extent_map_block_start(em); 1244 free_extent_map(em); 1245 } 1246 } 1247 read_unlock(&em_tree->lock); 1248 1249 return alloc_hint; 1250 } 1251 1252 /* 1253 * when extent_io.c finds a delayed allocation range in the file, 1254 * the call backs end up in this code. The basic idea is to 1255 * allocate extents on disk for the range, and create ordered data structs 1256 * in ram to track those extents. 1257 * 1258 * locked_folio is the folio that writepage had locked already. We use 1259 * it to make sure we don't do extra locks or unlocks. 1260 * 1261 * When this function fails, it unlocks all pages except @locked_folio. 1262 * 1263 * When this function successfully creates an inline extent, it returns 1 and 1264 * unlocks all pages including locked_folio and starts I/O on them. 1265 * (In reality inline extents are limited to a single page, so locked_folio is 1266 * the only page handled anyway). 1267 * 1268 * When this function succeed and creates a normal extent, the page locking 1269 * status depends on the passed in flags: 1270 * 1271 * - If @keep_locked is set, all pages are kept locked. 1272 * - Else all pages except for @locked_folio are unlocked. 1273 * 1274 * When a failure happens in the second or later iteration of the 1275 * while-loop, the ordered extents created in previous iterations are kept 1276 * intact. So, the caller must clean them up by calling 1277 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for 1278 * example. 1279 */ 1280 static noinline int cow_file_range(struct btrfs_inode *inode, 1281 struct folio *locked_folio, u64 start, 1282 u64 end, u64 *done_offset, 1283 bool keep_locked, bool no_inline) 1284 { 1285 struct btrfs_root *root = inode->root; 1286 struct btrfs_fs_info *fs_info = root->fs_info; 1287 struct extent_state *cached = NULL; 1288 u64 alloc_hint = 0; 1289 u64 orig_start = start; 1290 u64 num_bytes; 1291 u64 cur_alloc_size = 0; 1292 u64 min_alloc_size; 1293 u64 blocksize = fs_info->sectorsize; 1294 struct btrfs_key ins; 1295 struct extent_map *em; 1296 unsigned clear_bits; 1297 unsigned long page_ops; 1298 int ret = 0; 1299 1300 if (btrfs_is_free_space_inode(inode)) { 1301 ret = -EINVAL; 1302 goto out_unlock; 1303 } 1304 1305 num_bytes = ALIGN(end - start + 1, blocksize); 1306 num_bytes = max(blocksize, num_bytes); 1307 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1308 1309 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1310 1311 if (!no_inline) { 1312 /* lets try to make an inline extent */ 1313 ret = cow_file_range_inline(inode, locked_folio, start, end, 0, 1314 BTRFS_COMPRESS_NONE, NULL, false); 1315 if (ret <= 0) { 1316 /* 1317 * We succeeded, return 1 so the caller knows we're done 1318 * with this page and already handled the IO. 1319 * 1320 * If there was an error then cow_file_range_inline() has 1321 * already done the cleanup. 1322 */ 1323 if (ret == 0) 1324 ret = 1; 1325 goto done; 1326 } 1327 } 1328 1329 alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes); 1330 1331 /* 1332 * We're not doing compressed IO, don't unlock the first page (which 1333 * the caller expects to stay locked), don't clear any dirty bits and 1334 * don't set any writeback bits. 1335 * 1336 * Do set the Ordered (Private2) bit so we know this page was properly 1337 * setup for writepage. 1338 */ 1339 page_ops = (keep_locked ? 0 : PAGE_UNLOCK); 1340 page_ops |= PAGE_SET_ORDERED; 1341 1342 /* 1343 * Relocation relies on the relocated extents to have exactly the same 1344 * size as the original extents. Normally writeback for relocation data 1345 * extents follows a NOCOW path because relocation preallocates the 1346 * extents. However, due to an operation such as scrub turning a block 1347 * group to RO mode, it may fallback to COW mode, so we must make sure 1348 * an extent allocated during COW has exactly the requested size and can 1349 * not be split into smaller extents, otherwise relocation breaks and 1350 * fails during the stage where it updates the bytenr of file extent 1351 * items. 1352 */ 1353 if (btrfs_is_data_reloc_root(root)) 1354 min_alloc_size = num_bytes; 1355 else 1356 min_alloc_size = fs_info->sectorsize; 1357 1358 while (num_bytes > 0) { 1359 struct btrfs_ordered_extent *ordered; 1360 struct btrfs_file_extent file_extent; 1361 1362 ret = btrfs_reserve_extent(root, num_bytes, num_bytes, 1363 min_alloc_size, 0, alloc_hint, 1364 &ins, 1, 1); 1365 if (ret == -EAGAIN) { 1366 /* 1367 * btrfs_reserve_extent only returns -EAGAIN for zoned 1368 * file systems, which is an indication that there are 1369 * no active zones to allocate from at the moment. 1370 * 1371 * If this is the first loop iteration, wait for at 1372 * least one zone to finish before retrying the 1373 * allocation. Otherwise ask the caller to write out 1374 * the already allocated blocks before coming back to 1375 * us, or return -ENOSPC if it can't handle retries. 1376 */ 1377 ASSERT(btrfs_is_zoned(fs_info)); 1378 if (start == orig_start) { 1379 wait_on_bit_io(&inode->root->fs_info->flags, 1380 BTRFS_FS_NEED_ZONE_FINISH, 1381 TASK_UNINTERRUPTIBLE); 1382 continue; 1383 } 1384 if (done_offset) { 1385 *done_offset = start - 1; 1386 return 0; 1387 } 1388 ret = -ENOSPC; 1389 } 1390 if (ret < 0) 1391 goto out_unlock; 1392 cur_alloc_size = ins.offset; 1393 1394 file_extent.disk_bytenr = ins.objectid; 1395 file_extent.disk_num_bytes = ins.offset; 1396 file_extent.num_bytes = ins.offset; 1397 file_extent.ram_bytes = ins.offset; 1398 file_extent.offset = 0; 1399 file_extent.compression = BTRFS_COMPRESS_NONE; 1400 1401 /* 1402 * Locked range will be released either during error clean up or 1403 * after the whole range is finished. 1404 */ 1405 lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1, 1406 &cached); 1407 1408 em = btrfs_create_io_em(inode, start, &file_extent, 1409 BTRFS_ORDERED_REGULAR); 1410 if (IS_ERR(em)) { 1411 unlock_extent(&inode->io_tree, start, 1412 start + cur_alloc_size - 1, &cached); 1413 ret = PTR_ERR(em); 1414 goto out_reserve; 1415 } 1416 free_extent_map(em); 1417 1418 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, 1419 1 << BTRFS_ORDERED_REGULAR); 1420 if (IS_ERR(ordered)) { 1421 unlock_extent(&inode->io_tree, start, 1422 start + cur_alloc_size - 1, &cached); 1423 ret = PTR_ERR(ordered); 1424 goto out_drop_extent_cache; 1425 } 1426 1427 if (btrfs_is_data_reloc_root(root)) { 1428 ret = btrfs_reloc_clone_csums(ordered); 1429 1430 /* 1431 * Only drop cache here, and process as normal. 1432 * 1433 * We must not allow extent_clear_unlock_delalloc() 1434 * at out_unlock label to free meta of this ordered 1435 * extent, as its meta should be freed by 1436 * btrfs_finish_ordered_io(). 1437 * 1438 * So we must continue until @start is increased to 1439 * skip current ordered extent. 1440 */ 1441 if (ret) 1442 btrfs_drop_extent_map_range(inode, start, 1443 start + cur_alloc_size - 1, 1444 false); 1445 } 1446 btrfs_put_ordered_extent(ordered); 1447 1448 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1449 1450 if (num_bytes < cur_alloc_size) 1451 num_bytes = 0; 1452 else 1453 num_bytes -= cur_alloc_size; 1454 alloc_hint = ins.objectid + ins.offset; 1455 start += cur_alloc_size; 1456 cur_alloc_size = 0; 1457 1458 /* 1459 * btrfs_reloc_clone_csums() error, since start is increased 1460 * extent_clear_unlock_delalloc() at out_unlock label won't 1461 * free metadata of current ordered extent, we're OK to exit. 1462 */ 1463 if (ret) 1464 goto out_unlock; 1465 } 1466 extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached, 1467 EXTENT_LOCKED | EXTENT_DELALLOC, page_ops); 1468 done: 1469 if (done_offset) 1470 *done_offset = end; 1471 return ret; 1472 1473 out_drop_extent_cache: 1474 btrfs_drop_extent_map_range(inode, start, start + cur_alloc_size - 1, false); 1475 out_reserve: 1476 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1477 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1478 out_unlock: 1479 /* 1480 * Now, we have three regions to clean up: 1481 * 1482 * |-------(1)----|---(2)---|-------------(3)----------| 1483 * `- orig_start `- start `- start + cur_alloc_size `- end 1484 * 1485 * We process each region below. 1486 */ 1487 1488 /* 1489 * For the range (1). We have already instantiated the ordered extents 1490 * for this region. They are cleaned up by 1491 * btrfs_cleanup_ordered_extents() in e.g, 1492 * btrfs_run_delalloc_range(). 1493 * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV 1494 * are also handled by the cleanup function. 1495 * 1496 * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and 1497 * finish the writeback of the involved folios, which will be never submitted. 1498 */ 1499 if (orig_start < start) { 1500 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC; 1501 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1502 1503 if (!locked_folio) 1504 mapping_set_error(inode->vfs_inode.i_mapping, ret); 1505 extent_clear_unlock_delalloc(inode, orig_start, start - 1, 1506 locked_folio, NULL, clear_bits, page_ops); 1507 } 1508 1509 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1510 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1511 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1512 1513 /* 1514 * For the range (2). If we reserved an extent for our delalloc range 1515 * (or a subrange) and failed to create the respective ordered extent, 1516 * then it means that when we reserved the extent we decremented the 1517 * extent's size from the data space_info's bytes_may_use counter and 1518 * incremented the space_info's bytes_reserved counter by the same 1519 * amount. We must make sure extent_clear_unlock_delalloc() does not try 1520 * to decrement again the data space_info's bytes_may_use counter, 1521 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV. 1522 */ 1523 if (cur_alloc_size) { 1524 extent_clear_unlock_delalloc(inode, start, 1525 start + cur_alloc_size - 1, 1526 locked_folio, &cached, clear_bits, 1527 page_ops); 1528 btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL); 1529 } 1530 1531 /* 1532 * For the range (3). We never touched the region. In addition to the 1533 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data 1534 * space_info's bytes_may_use counter, reserved in 1535 * btrfs_check_data_free_space(). 1536 */ 1537 if (start + cur_alloc_size < end) { 1538 clear_bits |= EXTENT_CLEAR_DATA_RESV; 1539 extent_clear_unlock_delalloc(inode, start + cur_alloc_size, 1540 end, locked_folio, 1541 &cached, clear_bits, page_ops); 1542 btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size, 1543 end - start - cur_alloc_size + 1, NULL); 1544 } 1545 btrfs_err_rl(fs_info, 1546 "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d", 1547 __func__, btrfs_root_id(inode->root), 1548 btrfs_ino(inode), orig_start, end + 1 - orig_start, ret); 1549 return ret; 1550 } 1551 1552 /* 1553 * Phase two of compressed writeback. This is the ordered portion of the code, 1554 * which only gets called in the order the work was queued. We walk all the 1555 * async extents created by compress_file_range and send them down to the disk. 1556 * 1557 * If called with @do_free == true then it'll try to finish the work and free 1558 * the work struct eventually. 1559 */ 1560 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free) 1561 { 1562 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1563 work); 1564 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1565 struct async_extent *async_extent; 1566 unsigned long nr_pages; 1567 u64 alloc_hint = 0; 1568 1569 if (do_free) { 1570 struct async_cow *async_cow; 1571 1572 btrfs_add_delayed_iput(async_chunk->inode); 1573 if (async_chunk->blkcg_css) 1574 css_put(async_chunk->blkcg_css); 1575 1576 async_cow = async_chunk->async_cow; 1577 if (atomic_dec_and_test(&async_cow->num_chunks)) 1578 kvfree(async_cow); 1579 return; 1580 } 1581 1582 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1583 PAGE_SHIFT; 1584 1585 while (!list_empty(&async_chunk->extents)) { 1586 async_extent = list_entry(async_chunk->extents.next, 1587 struct async_extent, list); 1588 list_del(&async_extent->list); 1589 submit_one_async_extent(async_chunk, async_extent, &alloc_hint); 1590 } 1591 1592 /* atomic_sub_return implies a barrier */ 1593 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1594 5 * SZ_1M) 1595 cond_wake_up_nomb(&fs_info->async_submit_wait); 1596 } 1597 1598 static bool run_delalloc_compressed(struct btrfs_inode *inode, 1599 struct folio *locked_folio, u64 start, 1600 u64 end, struct writeback_control *wbc) 1601 { 1602 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1603 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1604 struct async_cow *ctx; 1605 struct async_chunk *async_chunk; 1606 unsigned long nr_pages; 1607 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1608 int i; 1609 unsigned nofs_flag; 1610 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1611 1612 nofs_flag = memalloc_nofs_save(); 1613 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1614 memalloc_nofs_restore(nofs_flag); 1615 if (!ctx) 1616 return false; 1617 1618 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 1619 1620 async_chunk = ctx->chunks; 1621 atomic_set(&ctx->num_chunks, num_chunks); 1622 1623 for (i = 0; i < num_chunks; i++) { 1624 u64 cur_end = min(end, start + SZ_512K - 1); 1625 1626 /* 1627 * igrab is called higher up in the call chain, take only the 1628 * lightweight reference for the callback lifetime 1629 */ 1630 ihold(&inode->vfs_inode); 1631 async_chunk[i].async_cow = ctx; 1632 async_chunk[i].inode = inode; 1633 async_chunk[i].start = start; 1634 async_chunk[i].end = cur_end; 1635 async_chunk[i].write_flags = write_flags; 1636 INIT_LIST_HEAD(&async_chunk[i].extents); 1637 1638 /* 1639 * The locked_folio comes all the way from writepage and its 1640 * the original folio we were actually given. As we spread 1641 * this large delalloc region across multiple async_chunk 1642 * structs, only the first struct needs a pointer to 1643 * locked_folio. 1644 * 1645 * This way we don't need racey decisions about who is supposed 1646 * to unlock it. 1647 */ 1648 if (locked_folio) { 1649 /* 1650 * Depending on the compressibility, the pages might or 1651 * might not go through async. We want all of them to 1652 * be accounted against wbc once. Let's do it here 1653 * before the paths diverge. wbc accounting is used 1654 * only for foreign writeback detection and doesn't 1655 * need full accuracy. Just account the whole thing 1656 * against the first page. 1657 */ 1658 wbc_account_cgroup_owner(wbc, locked_folio, 1659 cur_end - start); 1660 async_chunk[i].locked_folio = locked_folio; 1661 locked_folio = NULL; 1662 } else { 1663 async_chunk[i].locked_folio = NULL; 1664 } 1665 1666 if (blkcg_css != blkcg_root_css) { 1667 css_get(blkcg_css); 1668 async_chunk[i].blkcg_css = blkcg_css; 1669 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT; 1670 } else { 1671 async_chunk[i].blkcg_css = NULL; 1672 } 1673 1674 btrfs_init_work(&async_chunk[i].work, compress_file_range, 1675 submit_compressed_extents); 1676 1677 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1678 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1679 1680 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1681 1682 start = cur_end + 1; 1683 } 1684 return true; 1685 } 1686 1687 /* 1688 * Run the delalloc range from start to end, and write back any dirty pages 1689 * covered by the range. 1690 */ 1691 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 1692 struct folio *locked_folio, u64 start, 1693 u64 end, struct writeback_control *wbc, 1694 bool pages_dirty) 1695 { 1696 u64 done_offset = end; 1697 int ret; 1698 1699 while (start <= end) { 1700 ret = cow_file_range(inode, locked_folio, start, end, 1701 &done_offset, true, false); 1702 if (ret) 1703 return ret; 1704 extent_write_locked_range(&inode->vfs_inode, locked_folio, 1705 start, done_offset, wbc, pages_dirty); 1706 start = done_offset + 1; 1707 } 1708 1709 return 1; 1710 } 1711 1712 static int fallback_to_cow(struct btrfs_inode *inode, 1713 struct folio *locked_folio, const u64 start, 1714 const u64 end) 1715 { 1716 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1717 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1718 const u64 range_bytes = end + 1 - start; 1719 struct extent_io_tree *io_tree = &inode->io_tree; 1720 struct extent_state *cached_state = NULL; 1721 u64 range_start = start; 1722 u64 count; 1723 int ret; 1724 1725 /* 1726 * If EXTENT_NORESERVE is set it means that when the buffered write was 1727 * made we had not enough available data space and therefore we did not 1728 * reserve data space for it, since we though we could do NOCOW for the 1729 * respective file range (either there is prealloc extent or the inode 1730 * has the NOCOW bit set). 1731 * 1732 * However when we need to fallback to COW mode (because for example the 1733 * block group for the corresponding extent was turned to RO mode by a 1734 * scrub or relocation) we need to do the following: 1735 * 1736 * 1) We increment the bytes_may_use counter of the data space info. 1737 * If COW succeeds, it allocates a new data extent and after doing 1738 * that it decrements the space info's bytes_may_use counter and 1739 * increments its bytes_reserved counter by the same amount (we do 1740 * this at btrfs_add_reserved_bytes()). So we need to increment the 1741 * bytes_may_use counter to compensate (when space is reserved at 1742 * buffered write time, the bytes_may_use counter is incremented); 1743 * 1744 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1745 * that if the COW path fails for any reason, it decrements (through 1746 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1747 * data space info, which we incremented in the step above. 1748 * 1749 * If we need to fallback to cow and the inode corresponds to a free 1750 * space cache inode or an inode of the data relocation tree, we must 1751 * also increment bytes_may_use of the data space_info for the same 1752 * reason. Space caches and relocated data extents always get a prealloc 1753 * extent for them, however scrub or balance may have set the block 1754 * group that contains that extent to RO mode and therefore force COW 1755 * when starting writeback. 1756 */ 1757 lock_extent(io_tree, start, end, &cached_state); 1758 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1759 EXTENT_NORESERVE, 0, NULL); 1760 if (count > 0 || is_space_ino || is_reloc_ino) { 1761 u64 bytes = count; 1762 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1763 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1764 1765 if (is_space_ino || is_reloc_ino) 1766 bytes = range_bytes; 1767 1768 spin_lock(&sinfo->lock); 1769 btrfs_space_info_update_bytes_may_use(sinfo, bytes); 1770 spin_unlock(&sinfo->lock); 1771 1772 if (count > 0) 1773 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1774 NULL); 1775 } 1776 unlock_extent(io_tree, start, end, &cached_state); 1777 1778 /* 1779 * Don't try to create inline extents, as a mix of inline extent that 1780 * is written out and unlocked directly and a normal NOCOW extent 1781 * doesn't work. 1782 */ 1783 ret = cow_file_range(inode, locked_folio, start, end, NULL, false, 1784 true); 1785 ASSERT(ret != 1); 1786 return ret; 1787 } 1788 1789 struct can_nocow_file_extent_args { 1790 /* Input fields. */ 1791 1792 /* Start file offset of the range we want to NOCOW. */ 1793 u64 start; 1794 /* End file offset (inclusive) of the range we want to NOCOW. */ 1795 u64 end; 1796 bool writeback_path; 1797 /* 1798 * Free the path passed to can_nocow_file_extent() once it's not needed 1799 * anymore. 1800 */ 1801 bool free_path; 1802 1803 /* 1804 * Output fields. Only set when can_nocow_file_extent() returns 1. 1805 * The expected file extent for the NOCOW write. 1806 */ 1807 struct btrfs_file_extent file_extent; 1808 }; 1809 1810 /* 1811 * Check if we can NOCOW the file extent that the path points to. 1812 * This function may return with the path released, so the caller should check 1813 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1814 * 1815 * Returns: < 0 on error 1816 * 0 if we can not NOCOW 1817 * 1 if we can NOCOW 1818 */ 1819 static int can_nocow_file_extent(struct btrfs_path *path, 1820 struct btrfs_key *key, 1821 struct btrfs_inode *inode, 1822 struct can_nocow_file_extent_args *args) 1823 { 1824 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 1825 struct extent_buffer *leaf = path->nodes[0]; 1826 struct btrfs_root *root = inode->root; 1827 struct btrfs_file_extent_item *fi; 1828 struct btrfs_root *csum_root; 1829 u64 io_start; 1830 u64 extent_end; 1831 u8 extent_type; 1832 int can_nocow = 0; 1833 int ret = 0; 1834 bool nowait = path->nowait; 1835 1836 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 1837 extent_type = btrfs_file_extent_type(leaf, fi); 1838 1839 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1840 goto out; 1841 1842 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1843 extent_type == BTRFS_FILE_EXTENT_REG) 1844 goto out; 1845 1846 /* 1847 * If the extent was created before the generation where the last snapshot 1848 * for its subvolume was created, then this implies the extent is shared, 1849 * hence we must COW. 1850 */ 1851 if (btrfs_file_extent_generation(leaf, fi) <= 1852 btrfs_root_last_snapshot(&root->root_item)) 1853 goto out; 1854 1855 /* An explicit hole, must COW. */ 1856 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) 1857 goto out; 1858 1859 /* Compressed/encrypted/encoded extents must be COWed. */ 1860 if (btrfs_file_extent_compression(leaf, fi) || 1861 btrfs_file_extent_encryption(leaf, fi) || 1862 btrfs_file_extent_other_encoding(leaf, fi)) 1863 goto out; 1864 1865 extent_end = btrfs_file_extent_end(path); 1866 1867 args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1868 args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1869 args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1870 args->file_extent.offset = btrfs_file_extent_offset(leaf, fi); 1871 args->file_extent.compression = btrfs_file_extent_compression(leaf, fi); 1872 1873 /* 1874 * The following checks can be expensive, as they need to take other 1875 * locks and do btree or rbtree searches, so release the path to avoid 1876 * blocking other tasks for too long. 1877 */ 1878 btrfs_release_path(path); 1879 1880 ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset, 1881 args->file_extent.disk_bytenr, path); 1882 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1883 if (ret != 0) 1884 goto out; 1885 1886 if (args->free_path) { 1887 /* 1888 * We don't need the path anymore, plus through the 1889 * btrfs_lookup_csums_list() call below we will end up allocating 1890 * another path. So free the path to avoid unnecessary extra 1891 * memory usage. 1892 */ 1893 btrfs_free_path(path); 1894 path = NULL; 1895 } 1896 1897 /* If there are pending snapshots for this root, we must COW. */ 1898 if (args->writeback_path && !is_freespace_inode && 1899 atomic_read(&root->snapshot_force_cow)) 1900 goto out; 1901 1902 args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start; 1903 args->file_extent.offset += args->start - key->offset; 1904 io_start = args->file_extent.disk_bytenr + args->file_extent.offset; 1905 1906 /* 1907 * Force COW if csums exist in the range. This ensures that csums for a 1908 * given extent are either valid or do not exist. 1909 */ 1910 1911 csum_root = btrfs_csum_root(root->fs_info, io_start); 1912 ret = btrfs_lookup_csums_list(csum_root, io_start, 1913 io_start + args->file_extent.num_bytes - 1, 1914 NULL, nowait); 1915 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1916 if (ret != 0) 1917 goto out; 1918 1919 can_nocow = 1; 1920 out: 1921 if (args->free_path && path) 1922 btrfs_free_path(path); 1923 1924 return ret < 0 ? ret : can_nocow; 1925 } 1926 1927 /* 1928 * Cleanup the dirty folios which will never be submitted due to error. 1929 * 1930 * When running a delalloc range, we may need to split the ranges (due to 1931 * fragmentation or NOCOW). If we hit an error in the later part, we will error 1932 * out and previously successfully executed range will never be submitted, thus 1933 * we have to cleanup those folios by clearing their dirty flag, starting and 1934 * finishing the writeback. 1935 */ 1936 static void cleanup_dirty_folios(struct btrfs_inode *inode, 1937 struct folio *locked_folio, 1938 u64 start, u64 end, int error) 1939 { 1940 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1941 struct address_space *mapping = inode->vfs_inode.i_mapping; 1942 pgoff_t start_index = start >> PAGE_SHIFT; 1943 pgoff_t end_index = end >> PAGE_SHIFT; 1944 u32 len; 1945 1946 ASSERT(end + 1 - start < U32_MAX); 1947 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 1948 IS_ALIGNED(end + 1, fs_info->sectorsize)); 1949 len = end + 1 - start; 1950 1951 /* 1952 * Handle the locked folio first. 1953 * The btrfs_folio_clamp_*() helpers can handle range out of the folio case. 1954 */ 1955 btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len); 1956 1957 for (pgoff_t index = start_index; index <= end_index; index++) { 1958 struct folio *folio; 1959 1960 /* Already handled at the beginning. */ 1961 if (index == locked_folio->index) 1962 continue; 1963 folio = __filemap_get_folio(mapping, index, FGP_LOCK, GFP_NOFS); 1964 /* Cache already dropped, no need to do any cleanup. */ 1965 if (IS_ERR(folio)) 1966 continue; 1967 btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len); 1968 folio_unlock(folio); 1969 folio_put(folio); 1970 } 1971 mapping_set_error(mapping, error); 1972 } 1973 1974 /* 1975 * when nowcow writeback call back. This checks for snapshots or COW copies 1976 * of the extents that exist in the file, and COWs the file as required. 1977 * 1978 * If no cow copies or snapshots exist, we write directly to the existing 1979 * blocks on disk 1980 */ 1981 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 1982 struct folio *locked_folio, 1983 const u64 start, const u64 end) 1984 { 1985 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1986 struct btrfs_root *root = inode->root; 1987 struct btrfs_path *path; 1988 u64 cow_start = (u64)-1; 1989 /* 1990 * If not 0, represents the inclusive end of the last fallback_to_cow() 1991 * range. Only for error handling. 1992 */ 1993 u64 cow_end = 0; 1994 u64 cur_offset = start; 1995 int ret; 1996 bool check_prev = true; 1997 u64 ino = btrfs_ino(inode); 1998 struct can_nocow_file_extent_args nocow_args = { 0 }; 1999 2000 /* 2001 * Normally on a zoned device we're only doing COW writes, but in case 2002 * of relocation on a zoned filesystem serializes I/O so that we're only 2003 * writing sequentially and can end up here as well. 2004 */ 2005 ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root)); 2006 2007 path = btrfs_alloc_path(); 2008 if (!path) { 2009 ret = -ENOMEM; 2010 goto error; 2011 } 2012 2013 nocow_args.end = end; 2014 nocow_args.writeback_path = true; 2015 2016 while (cur_offset <= end) { 2017 struct btrfs_block_group *nocow_bg = NULL; 2018 struct btrfs_ordered_extent *ordered; 2019 struct btrfs_key found_key; 2020 struct btrfs_file_extent_item *fi; 2021 struct extent_buffer *leaf; 2022 struct extent_state *cached_state = NULL; 2023 u64 extent_end; 2024 u64 nocow_end; 2025 int extent_type; 2026 bool is_prealloc; 2027 2028 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 2029 cur_offset, 0); 2030 if (ret < 0) 2031 goto error; 2032 2033 /* 2034 * If there is no extent for our range when doing the initial 2035 * search, then go back to the previous slot as it will be the 2036 * one containing the search offset 2037 */ 2038 if (ret > 0 && path->slots[0] > 0 && check_prev) { 2039 leaf = path->nodes[0]; 2040 btrfs_item_key_to_cpu(leaf, &found_key, 2041 path->slots[0] - 1); 2042 if (found_key.objectid == ino && 2043 found_key.type == BTRFS_EXTENT_DATA_KEY) 2044 path->slots[0]--; 2045 } 2046 check_prev = false; 2047 next_slot: 2048 /* Go to next leaf if we have exhausted the current one */ 2049 leaf = path->nodes[0]; 2050 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2051 ret = btrfs_next_leaf(root, path); 2052 if (ret < 0) 2053 goto error; 2054 if (ret > 0) 2055 break; 2056 leaf = path->nodes[0]; 2057 } 2058 2059 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2060 2061 /* Didn't find anything for our INO */ 2062 if (found_key.objectid > ino) 2063 break; 2064 /* 2065 * Keep searching until we find an EXTENT_ITEM or there are no 2066 * more extents for this inode 2067 */ 2068 if (WARN_ON_ONCE(found_key.objectid < ino) || 2069 found_key.type < BTRFS_EXTENT_DATA_KEY) { 2070 path->slots[0]++; 2071 goto next_slot; 2072 } 2073 2074 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 2075 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 2076 found_key.offset > end) 2077 break; 2078 2079 /* 2080 * If the found extent starts after requested offset, then 2081 * adjust extent_end to be right before this extent begins 2082 */ 2083 if (found_key.offset > cur_offset) { 2084 extent_end = found_key.offset; 2085 extent_type = 0; 2086 goto must_cow; 2087 } 2088 2089 /* 2090 * Found extent which begins before our range and potentially 2091 * intersect it 2092 */ 2093 fi = btrfs_item_ptr(leaf, path->slots[0], 2094 struct btrfs_file_extent_item); 2095 extent_type = btrfs_file_extent_type(leaf, fi); 2096 /* If this is triggered then we have a memory corruption. */ 2097 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 2098 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 2099 ret = -EUCLEAN; 2100 goto error; 2101 } 2102 extent_end = btrfs_file_extent_end(path); 2103 2104 /* 2105 * If the extent we got ends before our current offset, skip to 2106 * the next extent. 2107 */ 2108 if (extent_end <= cur_offset) { 2109 path->slots[0]++; 2110 goto next_slot; 2111 } 2112 2113 nocow_args.start = cur_offset; 2114 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 2115 if (ret < 0) 2116 goto error; 2117 if (ret == 0) 2118 goto must_cow; 2119 2120 ret = 0; 2121 nocow_bg = btrfs_inc_nocow_writers(fs_info, 2122 nocow_args.file_extent.disk_bytenr + 2123 nocow_args.file_extent.offset); 2124 if (!nocow_bg) { 2125 must_cow: 2126 /* 2127 * If we can't perform NOCOW writeback for the range, 2128 * then record the beginning of the range that needs to 2129 * be COWed. It will be written out before the next 2130 * NOCOW range if we find one, or when exiting this 2131 * loop. 2132 */ 2133 if (cow_start == (u64)-1) 2134 cow_start = cur_offset; 2135 cur_offset = extent_end; 2136 if (cur_offset > end) 2137 break; 2138 if (!path->nodes[0]) 2139 continue; 2140 path->slots[0]++; 2141 goto next_slot; 2142 } 2143 2144 /* 2145 * COW range from cow_start to found_key.offset - 1. As the key 2146 * will contain the beginning of the first extent that can be 2147 * NOCOW, following one which needs to be COW'ed 2148 */ 2149 if (cow_start != (u64)-1) { 2150 ret = fallback_to_cow(inode, locked_folio, cow_start, 2151 found_key.offset - 1); 2152 cow_start = (u64)-1; 2153 if (ret) { 2154 cow_end = found_key.offset - 1; 2155 btrfs_dec_nocow_writers(nocow_bg); 2156 goto error; 2157 } 2158 } 2159 2160 nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1; 2161 lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state); 2162 2163 is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC; 2164 if (is_prealloc) { 2165 struct extent_map *em; 2166 2167 em = btrfs_create_io_em(inode, cur_offset, 2168 &nocow_args.file_extent, 2169 BTRFS_ORDERED_PREALLOC); 2170 if (IS_ERR(em)) { 2171 unlock_extent(&inode->io_tree, cur_offset, 2172 nocow_end, &cached_state); 2173 btrfs_dec_nocow_writers(nocow_bg); 2174 ret = PTR_ERR(em); 2175 goto error; 2176 } 2177 free_extent_map(em); 2178 } 2179 2180 ordered = btrfs_alloc_ordered_extent(inode, cur_offset, 2181 &nocow_args.file_extent, 2182 is_prealloc 2183 ? (1 << BTRFS_ORDERED_PREALLOC) 2184 : (1 << BTRFS_ORDERED_NOCOW)); 2185 btrfs_dec_nocow_writers(nocow_bg); 2186 if (IS_ERR(ordered)) { 2187 if (is_prealloc) { 2188 btrfs_drop_extent_map_range(inode, cur_offset, 2189 nocow_end, false); 2190 } 2191 unlock_extent(&inode->io_tree, cur_offset, 2192 nocow_end, &cached_state); 2193 ret = PTR_ERR(ordered); 2194 goto error; 2195 } 2196 2197 if (btrfs_is_data_reloc_root(root)) 2198 /* 2199 * Error handled later, as we must prevent 2200 * extent_clear_unlock_delalloc() in error handler 2201 * from freeing metadata of created ordered extent. 2202 */ 2203 ret = btrfs_reloc_clone_csums(ordered); 2204 btrfs_put_ordered_extent(ordered); 2205 2206 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, 2207 locked_folio, &cached_state, 2208 EXTENT_LOCKED | EXTENT_DELALLOC | 2209 EXTENT_CLEAR_DATA_RESV, 2210 PAGE_UNLOCK | PAGE_SET_ORDERED); 2211 2212 cur_offset = extent_end; 2213 2214 /* 2215 * btrfs_reloc_clone_csums() error, now we're OK to call error 2216 * handler, as metadata for created ordered extent will only 2217 * be freed by btrfs_finish_ordered_io(). 2218 */ 2219 if (ret) 2220 goto error; 2221 } 2222 btrfs_release_path(path); 2223 2224 if (cur_offset <= end && cow_start == (u64)-1) 2225 cow_start = cur_offset; 2226 2227 if (cow_start != (u64)-1) { 2228 ret = fallback_to_cow(inode, locked_folio, cow_start, end); 2229 cow_start = (u64)-1; 2230 if (ret) { 2231 cow_end = end; 2232 goto error; 2233 } 2234 } 2235 2236 btrfs_free_path(path); 2237 return 0; 2238 2239 error: 2240 /* 2241 * There are several error cases: 2242 * 2243 * 1) Failed without falling back to COW 2244 * start cur_offset end 2245 * |/////////////| | 2246 * 2247 * For range [start, cur_offset) the folios are already unlocked (except 2248 * @locked_folio), EXTENT_DELALLOC already removed. 2249 * Only need to clear the dirty flag as they will never be submitted. 2250 * Ordered extent and extent maps are handled by 2251 * btrfs_mark_ordered_io_finished() inside run_delalloc_range(). 2252 * 2253 * 2) Failed with error from fallback_to_cow() 2254 * start cur_offset cow_end end 2255 * |/////////////|-----------| | 2256 * 2257 * For range [start, cur_offset) it's the same as case 1). 2258 * But for range [cur_offset, cow_end), the folios have dirty flag 2259 * cleared and unlocked, EXTENT_DEALLLOC cleared by cow_file_range(). 2260 * 2261 * Thus we should not call extent_clear_unlock_delalloc() on range 2262 * [cur_offset, cow_end), as the folios are already unlocked. 2263 * 2264 * So clear the folio dirty flags for [start, cur_offset) first. 2265 */ 2266 if (cur_offset > start) 2267 cleanup_dirty_folios(inode, locked_folio, start, cur_offset - 1, ret); 2268 2269 /* 2270 * If an error happened while a COW region is outstanding, cur_offset 2271 * needs to be reset to @cow_end + 1 to skip the COW range, as 2272 * cow_file_range() will do the proper cleanup at error. 2273 */ 2274 if (cow_end) 2275 cur_offset = cow_end + 1; 2276 2277 /* 2278 * We need to lock the extent here because we're clearing DELALLOC and 2279 * we're not locked at this point. 2280 */ 2281 if (cur_offset < end) { 2282 struct extent_state *cached = NULL; 2283 2284 lock_extent(&inode->io_tree, cur_offset, end, &cached); 2285 extent_clear_unlock_delalloc(inode, cur_offset, end, 2286 locked_folio, &cached, 2287 EXTENT_LOCKED | EXTENT_DELALLOC | 2288 EXTENT_DEFRAG | 2289 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2290 PAGE_START_WRITEBACK | 2291 PAGE_END_WRITEBACK); 2292 btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL); 2293 } 2294 btrfs_free_path(path); 2295 btrfs_err_rl(fs_info, 2296 "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d", 2297 __func__, btrfs_root_id(inode->root), 2298 btrfs_ino(inode), start, end + 1 - start, ret); 2299 return ret; 2300 } 2301 2302 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2303 { 2304 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2305 if (inode->defrag_bytes && 2306 test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG)) 2307 return false; 2308 return true; 2309 } 2310 return false; 2311 } 2312 2313 /* 2314 * Function to process delayed allocation (create CoW) for ranges which are 2315 * being touched for the first time. 2316 */ 2317 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio, 2318 u64 start, u64 end, struct writeback_control *wbc) 2319 { 2320 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2321 int ret; 2322 2323 /* 2324 * The range must cover part of the @locked_folio, or a return of 1 2325 * can confuse the caller. 2326 */ 2327 ASSERT(!(end <= folio_pos(locked_folio) || 2328 start >= folio_pos(locked_folio) + folio_size(locked_folio))); 2329 2330 if (should_nocow(inode, start, end)) { 2331 ret = run_delalloc_nocow(inode, locked_folio, start, end); 2332 goto out; 2333 } 2334 2335 if (btrfs_inode_can_compress(inode) && 2336 inode_need_compress(inode, start, end) && 2337 run_delalloc_compressed(inode, locked_folio, start, end, wbc)) 2338 return 1; 2339 2340 if (zoned) 2341 ret = run_delalloc_cow(inode, locked_folio, start, end, wbc, 2342 true); 2343 else 2344 ret = cow_file_range(inode, locked_folio, start, end, NULL, 2345 false, false); 2346 2347 out: 2348 if (ret < 0) 2349 btrfs_cleanup_ordered_extents(inode, start, end - start + 1); 2350 return ret; 2351 } 2352 2353 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 2354 struct extent_state *orig, u64 split) 2355 { 2356 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2357 u64 size; 2358 2359 lockdep_assert_held(&inode->io_tree.lock); 2360 2361 /* not delalloc, ignore it */ 2362 if (!(orig->state & EXTENT_DELALLOC)) 2363 return; 2364 2365 size = orig->end - orig->start + 1; 2366 if (size > fs_info->max_extent_size) { 2367 u32 num_extents; 2368 u64 new_size; 2369 2370 /* 2371 * See the explanation in btrfs_merge_delalloc_extent, the same 2372 * applies here, just in reverse. 2373 */ 2374 new_size = orig->end - split + 1; 2375 num_extents = count_max_extents(fs_info, new_size); 2376 new_size = split - orig->start; 2377 num_extents += count_max_extents(fs_info, new_size); 2378 if (count_max_extents(fs_info, size) >= num_extents) 2379 return; 2380 } 2381 2382 spin_lock(&inode->lock); 2383 btrfs_mod_outstanding_extents(inode, 1); 2384 spin_unlock(&inode->lock); 2385 } 2386 2387 /* 2388 * Handle merged delayed allocation extents so we can keep track of new extents 2389 * that are just merged onto old extents, such as when we are doing sequential 2390 * writes, so we can properly account for the metadata space we'll need. 2391 */ 2392 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 2393 struct extent_state *other) 2394 { 2395 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2396 u64 new_size, old_size; 2397 u32 num_extents; 2398 2399 lockdep_assert_held(&inode->io_tree.lock); 2400 2401 /* not delalloc, ignore it */ 2402 if (!(other->state & EXTENT_DELALLOC)) 2403 return; 2404 2405 if (new->start > other->start) 2406 new_size = new->end - other->start + 1; 2407 else 2408 new_size = other->end - new->start + 1; 2409 2410 /* we're not bigger than the max, unreserve the space and go */ 2411 if (new_size <= fs_info->max_extent_size) { 2412 spin_lock(&inode->lock); 2413 btrfs_mod_outstanding_extents(inode, -1); 2414 spin_unlock(&inode->lock); 2415 return; 2416 } 2417 2418 /* 2419 * We have to add up either side to figure out how many extents were 2420 * accounted for before we merged into one big extent. If the number of 2421 * extents we accounted for is <= the amount we need for the new range 2422 * then we can return, otherwise drop. Think of it like this 2423 * 2424 * [ 4k][MAX_SIZE] 2425 * 2426 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2427 * need 2 outstanding extents, on one side we have 1 and the other side 2428 * we have 1 so they are == and we can return. But in this case 2429 * 2430 * [MAX_SIZE+4k][MAX_SIZE+4k] 2431 * 2432 * Each range on their own accounts for 2 extents, but merged together 2433 * they are only 3 extents worth of accounting, so we need to drop in 2434 * this case. 2435 */ 2436 old_size = other->end - other->start + 1; 2437 num_extents = count_max_extents(fs_info, old_size); 2438 old_size = new->end - new->start + 1; 2439 num_extents += count_max_extents(fs_info, old_size); 2440 if (count_max_extents(fs_info, new_size) >= num_extents) 2441 return; 2442 2443 spin_lock(&inode->lock); 2444 btrfs_mod_outstanding_extents(inode, -1); 2445 spin_unlock(&inode->lock); 2446 } 2447 2448 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode) 2449 { 2450 struct btrfs_root *root = inode->root; 2451 struct btrfs_fs_info *fs_info = root->fs_info; 2452 2453 spin_lock(&root->delalloc_lock); 2454 ASSERT(list_empty(&inode->delalloc_inodes)); 2455 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 2456 root->nr_delalloc_inodes++; 2457 if (root->nr_delalloc_inodes == 1) { 2458 spin_lock(&fs_info->delalloc_root_lock); 2459 ASSERT(list_empty(&root->delalloc_root)); 2460 list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots); 2461 spin_unlock(&fs_info->delalloc_root_lock); 2462 } 2463 spin_unlock(&root->delalloc_lock); 2464 } 2465 2466 void btrfs_del_delalloc_inode(struct btrfs_inode *inode) 2467 { 2468 struct btrfs_root *root = inode->root; 2469 struct btrfs_fs_info *fs_info = root->fs_info; 2470 2471 lockdep_assert_held(&root->delalloc_lock); 2472 2473 /* 2474 * We may be called after the inode was already deleted from the list, 2475 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(), 2476 * and then later through btrfs_clear_delalloc_extent() while the inode 2477 * still has ->delalloc_bytes > 0. 2478 */ 2479 if (!list_empty(&inode->delalloc_inodes)) { 2480 list_del_init(&inode->delalloc_inodes); 2481 root->nr_delalloc_inodes--; 2482 if (!root->nr_delalloc_inodes) { 2483 ASSERT(list_empty(&root->delalloc_inodes)); 2484 spin_lock(&fs_info->delalloc_root_lock); 2485 ASSERT(!list_empty(&root->delalloc_root)); 2486 list_del_init(&root->delalloc_root); 2487 spin_unlock(&fs_info->delalloc_root_lock); 2488 } 2489 } 2490 } 2491 2492 /* 2493 * Properly track delayed allocation bytes in the inode and to maintain the 2494 * list of inodes that have pending delalloc work to be done. 2495 */ 2496 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 2497 u32 bits) 2498 { 2499 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2500 2501 lockdep_assert_held(&inode->io_tree.lock); 2502 2503 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC)) 2504 WARN_ON(1); 2505 /* 2506 * set_bit and clear bit hooks normally require _irqsave/restore 2507 * but in this case, we are only testing for the DELALLOC 2508 * bit, which is only set or cleared with irqs on 2509 */ 2510 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2511 u64 len = state->end + 1 - state->start; 2512 u64 prev_delalloc_bytes; 2513 u32 num_extents = count_max_extents(fs_info, len); 2514 2515 spin_lock(&inode->lock); 2516 btrfs_mod_outstanding_extents(inode, num_extents); 2517 spin_unlock(&inode->lock); 2518 2519 /* For sanity tests */ 2520 if (btrfs_is_testing(fs_info)) 2521 return; 2522 2523 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2524 fs_info->delalloc_batch); 2525 spin_lock(&inode->lock); 2526 prev_delalloc_bytes = inode->delalloc_bytes; 2527 inode->delalloc_bytes += len; 2528 if (bits & EXTENT_DEFRAG) 2529 inode->defrag_bytes += len; 2530 spin_unlock(&inode->lock); 2531 2532 /* 2533 * We don't need to be under the protection of the inode's lock, 2534 * because we are called while holding the inode's io_tree lock 2535 * and are therefore protected against concurrent calls of this 2536 * function and btrfs_clear_delalloc_extent(). 2537 */ 2538 if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0) 2539 btrfs_add_delalloc_inode(inode); 2540 } 2541 2542 if (!(state->state & EXTENT_DELALLOC_NEW) && 2543 (bits & EXTENT_DELALLOC_NEW)) { 2544 spin_lock(&inode->lock); 2545 inode->new_delalloc_bytes += state->end + 1 - state->start; 2546 spin_unlock(&inode->lock); 2547 } 2548 } 2549 2550 /* 2551 * Once a range is no longer delalloc this function ensures that proper 2552 * accounting happens. 2553 */ 2554 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 2555 struct extent_state *state, u32 bits) 2556 { 2557 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2558 u64 len = state->end + 1 - state->start; 2559 u32 num_extents = count_max_extents(fs_info, len); 2560 2561 lockdep_assert_held(&inode->io_tree.lock); 2562 2563 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { 2564 spin_lock(&inode->lock); 2565 inode->defrag_bytes -= len; 2566 spin_unlock(&inode->lock); 2567 } 2568 2569 /* 2570 * set_bit and clear bit hooks normally require _irqsave/restore 2571 * but in this case, we are only testing for the DELALLOC 2572 * bit, which is only set or cleared with irqs on 2573 */ 2574 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2575 struct btrfs_root *root = inode->root; 2576 u64 new_delalloc_bytes; 2577 2578 spin_lock(&inode->lock); 2579 btrfs_mod_outstanding_extents(inode, -num_extents); 2580 spin_unlock(&inode->lock); 2581 2582 /* 2583 * We don't reserve metadata space for space cache inodes so we 2584 * don't need to call delalloc_release_metadata if there is an 2585 * error. 2586 */ 2587 if (bits & EXTENT_CLEAR_META_RESV && 2588 root != fs_info->tree_root) 2589 btrfs_delalloc_release_metadata(inode, len, true); 2590 2591 /* For sanity tests. */ 2592 if (btrfs_is_testing(fs_info)) 2593 return; 2594 2595 if (!btrfs_is_data_reloc_root(root) && 2596 !btrfs_is_free_space_inode(inode) && 2597 !(state->state & EXTENT_NORESERVE) && 2598 (bits & EXTENT_CLEAR_DATA_RESV)) 2599 btrfs_free_reserved_data_space_noquota(fs_info, len); 2600 2601 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2602 fs_info->delalloc_batch); 2603 spin_lock(&inode->lock); 2604 inode->delalloc_bytes -= len; 2605 new_delalloc_bytes = inode->delalloc_bytes; 2606 spin_unlock(&inode->lock); 2607 2608 /* 2609 * We don't need to be under the protection of the inode's lock, 2610 * because we are called while holding the inode's io_tree lock 2611 * and are therefore protected against concurrent calls of this 2612 * function and btrfs_set_delalloc_extent(). 2613 */ 2614 if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) { 2615 spin_lock(&root->delalloc_lock); 2616 btrfs_del_delalloc_inode(inode); 2617 spin_unlock(&root->delalloc_lock); 2618 } 2619 } 2620 2621 if ((state->state & EXTENT_DELALLOC_NEW) && 2622 (bits & EXTENT_DELALLOC_NEW)) { 2623 spin_lock(&inode->lock); 2624 ASSERT(inode->new_delalloc_bytes >= len); 2625 inode->new_delalloc_bytes -= len; 2626 if (bits & EXTENT_ADD_INODE_BYTES) 2627 inode_add_bytes(&inode->vfs_inode, len); 2628 spin_unlock(&inode->lock); 2629 } 2630 } 2631 2632 /* 2633 * given a list of ordered sums record them in the inode. This happens 2634 * at IO completion time based on sums calculated at bio submission time. 2635 */ 2636 static int add_pending_csums(struct btrfs_trans_handle *trans, 2637 struct list_head *list) 2638 { 2639 struct btrfs_ordered_sum *sum; 2640 struct btrfs_root *csum_root = NULL; 2641 int ret; 2642 2643 list_for_each_entry(sum, list, list) { 2644 trans->adding_csums = true; 2645 if (!csum_root) 2646 csum_root = btrfs_csum_root(trans->fs_info, 2647 sum->logical); 2648 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2649 trans->adding_csums = false; 2650 if (ret) 2651 return ret; 2652 } 2653 return 0; 2654 } 2655 2656 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2657 const u64 start, 2658 const u64 len, 2659 struct extent_state **cached_state) 2660 { 2661 u64 search_start = start; 2662 const u64 end = start + len - 1; 2663 2664 while (search_start < end) { 2665 const u64 search_len = end - search_start + 1; 2666 struct extent_map *em; 2667 u64 em_len; 2668 int ret = 0; 2669 2670 em = btrfs_get_extent(inode, NULL, search_start, search_len); 2671 if (IS_ERR(em)) 2672 return PTR_ERR(em); 2673 2674 if (em->disk_bytenr != EXTENT_MAP_HOLE) 2675 goto next; 2676 2677 em_len = em->len; 2678 if (em->start < search_start) 2679 em_len -= search_start - em->start; 2680 if (em_len > search_len) 2681 em_len = search_len; 2682 2683 ret = set_extent_bit(&inode->io_tree, search_start, 2684 search_start + em_len - 1, 2685 EXTENT_DELALLOC_NEW, cached_state); 2686 next: 2687 search_start = extent_map_end(em); 2688 free_extent_map(em); 2689 if (ret) 2690 return ret; 2691 } 2692 return 0; 2693 } 2694 2695 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2696 unsigned int extra_bits, 2697 struct extent_state **cached_state) 2698 { 2699 WARN_ON(PAGE_ALIGNED(end)); 2700 2701 if (start >= i_size_read(&inode->vfs_inode) && 2702 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2703 /* 2704 * There can't be any extents following eof in this case so just 2705 * set the delalloc new bit for the range directly. 2706 */ 2707 extra_bits |= EXTENT_DELALLOC_NEW; 2708 } else { 2709 int ret; 2710 2711 ret = btrfs_find_new_delalloc_bytes(inode, start, 2712 end + 1 - start, 2713 cached_state); 2714 if (ret) 2715 return ret; 2716 } 2717 2718 return set_extent_bit(&inode->io_tree, start, end, 2719 EXTENT_DELALLOC | extra_bits, cached_state); 2720 } 2721 2722 /* see btrfs_writepage_start_hook for details on why this is required */ 2723 struct btrfs_writepage_fixup { 2724 struct folio *folio; 2725 struct btrfs_inode *inode; 2726 struct btrfs_work work; 2727 }; 2728 2729 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2730 { 2731 struct btrfs_writepage_fixup *fixup = 2732 container_of(work, struct btrfs_writepage_fixup, work); 2733 struct btrfs_ordered_extent *ordered; 2734 struct extent_state *cached_state = NULL; 2735 struct extent_changeset *data_reserved = NULL; 2736 struct folio *folio = fixup->folio; 2737 struct btrfs_inode *inode = fixup->inode; 2738 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2739 u64 page_start = folio_pos(folio); 2740 u64 page_end = folio_pos(folio) + folio_size(folio) - 1; 2741 int ret = 0; 2742 bool free_delalloc_space = true; 2743 2744 /* 2745 * This is similar to page_mkwrite, we need to reserve the space before 2746 * we take the folio lock. 2747 */ 2748 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2749 folio_size(folio)); 2750 again: 2751 folio_lock(folio); 2752 2753 /* 2754 * Before we queued this fixup, we took a reference on the folio. 2755 * folio->mapping may go NULL, but it shouldn't be moved to a different 2756 * address space. 2757 */ 2758 if (!folio->mapping || !folio_test_dirty(folio) || 2759 !folio_test_checked(folio)) { 2760 /* 2761 * Unfortunately this is a little tricky, either 2762 * 2763 * 1) We got here and our folio had already been dealt with and 2764 * we reserved our space, thus ret == 0, so we need to just 2765 * drop our space reservation and bail. This can happen the 2766 * first time we come into the fixup worker, or could happen 2767 * while waiting for the ordered extent. 2768 * 2) Our folio was already dealt with, but we happened to get an 2769 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2770 * this case we obviously don't have anything to release, but 2771 * because the folio was already dealt with we don't want to 2772 * mark the folio with an error, so make sure we're resetting 2773 * ret to 0. This is why we have this check _before_ the ret 2774 * check, because we do not want to have a surprise ENOSPC 2775 * when the folio was already properly dealt with. 2776 */ 2777 if (!ret) { 2778 btrfs_delalloc_release_extents(inode, folio_size(folio)); 2779 btrfs_delalloc_release_space(inode, data_reserved, 2780 page_start, folio_size(folio), 2781 true); 2782 } 2783 ret = 0; 2784 goto out_page; 2785 } 2786 2787 /* 2788 * We can't mess with the folio state unless it is locked, so now that 2789 * it is locked bail if we failed to make our space reservation. 2790 */ 2791 if (ret) 2792 goto out_page; 2793 2794 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2795 2796 /* already ordered? We're done */ 2797 if (folio_test_ordered(folio)) 2798 goto out_reserved; 2799 2800 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2801 if (ordered) { 2802 unlock_extent(&inode->io_tree, page_start, page_end, 2803 &cached_state); 2804 folio_unlock(folio); 2805 btrfs_start_ordered_extent(ordered); 2806 btrfs_put_ordered_extent(ordered); 2807 goto again; 2808 } 2809 2810 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2811 &cached_state); 2812 if (ret) 2813 goto out_reserved; 2814 2815 /* 2816 * Everything went as planned, we're now the owner of a dirty page with 2817 * delayed allocation bits set and space reserved for our COW 2818 * destination. 2819 * 2820 * The page was dirty when we started, nothing should have cleaned it. 2821 */ 2822 BUG_ON(!folio_test_dirty(folio)); 2823 free_delalloc_space = false; 2824 out_reserved: 2825 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2826 if (free_delalloc_space) 2827 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2828 PAGE_SIZE, true); 2829 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2830 out_page: 2831 if (ret) { 2832 /* 2833 * We hit ENOSPC or other errors. Update the mapping and page 2834 * to reflect the errors and clean the page. 2835 */ 2836 mapping_set_error(folio->mapping, ret); 2837 btrfs_mark_ordered_io_finished(inode, folio, page_start, 2838 folio_size(folio), !ret); 2839 folio_clear_dirty_for_io(folio); 2840 } 2841 btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE); 2842 folio_unlock(folio); 2843 folio_put(folio); 2844 kfree(fixup); 2845 extent_changeset_free(data_reserved); 2846 /* 2847 * As a precaution, do a delayed iput in case it would be the last iput 2848 * that could need flushing space. Recursing back to fixup worker would 2849 * deadlock. 2850 */ 2851 btrfs_add_delayed_iput(inode); 2852 } 2853 2854 /* 2855 * There are a few paths in the higher layers of the kernel that directly 2856 * set the folio dirty bit without asking the filesystem if it is a 2857 * good idea. This causes problems because we want to make sure COW 2858 * properly happens and the data=ordered rules are followed. 2859 * 2860 * In our case any range that doesn't have the ORDERED bit set 2861 * hasn't been properly setup for IO. We kick off an async process 2862 * to fix it up. The async helper will wait for ordered extents, set 2863 * the delalloc bit and make it safe to write the folio. 2864 */ 2865 int btrfs_writepage_cow_fixup(struct folio *folio) 2866 { 2867 struct inode *inode = folio->mapping->host; 2868 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 2869 struct btrfs_writepage_fixup *fixup; 2870 2871 /* This folio has ordered extent covering it already */ 2872 if (folio_test_ordered(folio)) 2873 return 0; 2874 2875 /* 2876 * folio_checked is set below when we create a fixup worker for this 2877 * folio, don't try to create another one if we're already 2878 * folio_test_checked. 2879 * 2880 * The extent_io writepage code will redirty the foio if we send back 2881 * EAGAIN. 2882 */ 2883 if (folio_test_checked(folio)) 2884 return -EAGAIN; 2885 2886 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2887 if (!fixup) 2888 return -EAGAIN; 2889 2890 /* 2891 * We are already holding a reference to this inode from 2892 * write_cache_pages. We need to hold it because the space reservation 2893 * takes place outside of the folio lock, and we can't trust 2894 * page->mapping outside of the folio lock. 2895 */ 2896 ihold(inode); 2897 btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio)); 2898 folio_get(folio); 2899 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL); 2900 fixup->folio = folio; 2901 fixup->inode = BTRFS_I(inode); 2902 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2903 2904 return -EAGAIN; 2905 } 2906 2907 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2908 struct btrfs_inode *inode, u64 file_pos, 2909 struct btrfs_file_extent_item *stack_fi, 2910 const bool update_inode_bytes, 2911 u64 qgroup_reserved) 2912 { 2913 struct btrfs_root *root = inode->root; 2914 const u64 sectorsize = root->fs_info->sectorsize; 2915 struct btrfs_path *path; 2916 struct extent_buffer *leaf; 2917 struct btrfs_key ins; 2918 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 2919 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 2920 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 2921 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 2922 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 2923 struct btrfs_drop_extents_args drop_args = { 0 }; 2924 int ret; 2925 2926 path = btrfs_alloc_path(); 2927 if (!path) 2928 return -ENOMEM; 2929 2930 /* 2931 * we may be replacing one extent in the tree with another. 2932 * The new extent is pinned in the extent map, and we don't want 2933 * to drop it from the cache until it is completely in the btree. 2934 * 2935 * So, tell btrfs_drop_extents to leave this extent in the cache. 2936 * the caller is expected to unpin it and allow it to be merged 2937 * with the others. 2938 */ 2939 drop_args.path = path; 2940 drop_args.start = file_pos; 2941 drop_args.end = file_pos + num_bytes; 2942 drop_args.replace_extent = true; 2943 drop_args.extent_item_size = sizeof(*stack_fi); 2944 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 2945 if (ret) 2946 goto out; 2947 2948 if (!drop_args.extent_inserted) { 2949 ins.objectid = btrfs_ino(inode); 2950 ins.offset = file_pos; 2951 ins.type = BTRFS_EXTENT_DATA_KEY; 2952 2953 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2954 sizeof(*stack_fi)); 2955 if (ret) 2956 goto out; 2957 } 2958 leaf = path->nodes[0]; 2959 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 2960 write_extent_buffer(leaf, stack_fi, 2961 btrfs_item_ptr_offset(leaf, path->slots[0]), 2962 sizeof(struct btrfs_file_extent_item)); 2963 2964 btrfs_release_path(path); 2965 2966 /* 2967 * If we dropped an inline extent here, we know the range where it is 2968 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 2969 * number of bytes only for that range containing the inline extent. 2970 * The remaining of the range will be processed when clearning the 2971 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 2972 */ 2973 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 2974 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 2975 2976 inline_size = drop_args.bytes_found - inline_size; 2977 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 2978 drop_args.bytes_found -= inline_size; 2979 num_bytes -= sectorsize; 2980 } 2981 2982 if (update_inode_bytes) 2983 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 2984 2985 ins.objectid = disk_bytenr; 2986 ins.offset = disk_num_bytes; 2987 ins.type = BTRFS_EXTENT_ITEM_KEY; 2988 2989 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 2990 if (ret) 2991 goto out; 2992 2993 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 2994 file_pos - offset, 2995 qgroup_reserved, &ins); 2996 out: 2997 btrfs_free_path(path); 2998 2999 return ret; 3000 } 3001 3002 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 3003 u64 start, u64 len) 3004 { 3005 struct btrfs_block_group *cache; 3006 3007 cache = btrfs_lookup_block_group(fs_info, start); 3008 ASSERT(cache); 3009 3010 spin_lock(&cache->lock); 3011 cache->delalloc_bytes -= len; 3012 spin_unlock(&cache->lock); 3013 3014 btrfs_put_block_group(cache); 3015 } 3016 3017 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 3018 struct btrfs_ordered_extent *oe) 3019 { 3020 struct btrfs_file_extent_item stack_fi; 3021 bool update_inode_bytes; 3022 u64 num_bytes = oe->num_bytes; 3023 u64 ram_bytes = oe->ram_bytes; 3024 3025 memset(&stack_fi, 0, sizeof(stack_fi)); 3026 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 3027 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 3028 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 3029 oe->disk_num_bytes); 3030 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 3031 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) 3032 num_bytes = oe->truncated_len; 3033 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 3034 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 3035 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 3036 /* Encryption and other encoding is reserved and all 0 */ 3037 3038 /* 3039 * For delalloc, when completing an ordered extent we update the inode's 3040 * bytes when clearing the range in the inode's io tree, so pass false 3041 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 3042 * except if the ordered extent was truncated. 3043 */ 3044 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 3045 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3046 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3047 3048 return insert_reserved_file_extent(trans, oe->inode, 3049 oe->file_offset, &stack_fi, 3050 update_inode_bytes, oe->qgroup_rsv); 3051 } 3052 3053 /* 3054 * As ordered data IO finishes, this gets called so we can finish 3055 * an ordered extent if the range of bytes in the file it covers are 3056 * fully written. 3057 */ 3058 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) 3059 { 3060 struct btrfs_inode *inode = ordered_extent->inode; 3061 struct btrfs_root *root = inode->root; 3062 struct btrfs_fs_info *fs_info = root->fs_info; 3063 struct btrfs_trans_handle *trans = NULL; 3064 struct extent_io_tree *io_tree = &inode->io_tree; 3065 struct extent_state *cached_state = NULL; 3066 u64 start, end; 3067 int compress_type = 0; 3068 int ret = 0; 3069 u64 logical_len = ordered_extent->num_bytes; 3070 bool freespace_inode; 3071 bool truncated = false; 3072 bool clear_reserved_extent = true; 3073 unsigned int clear_bits = EXTENT_DEFRAG; 3074 3075 start = ordered_extent->file_offset; 3076 end = start + ordered_extent->num_bytes - 1; 3077 3078 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3079 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3080 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3081 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3082 clear_bits |= EXTENT_DELALLOC_NEW; 3083 3084 freespace_inode = btrfs_is_free_space_inode(inode); 3085 if (!freespace_inode) 3086 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3087 3088 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3089 ret = -EIO; 3090 goto out; 3091 } 3092 3093 if (btrfs_is_zoned(fs_info)) 3094 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3095 ordered_extent->disk_num_bytes); 3096 3097 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3098 truncated = true; 3099 logical_len = ordered_extent->truncated_len; 3100 /* Truncated the entire extent, don't bother adding */ 3101 if (!logical_len) 3102 goto out; 3103 } 3104 3105 /* 3106 * If it's a COW write we need to lock the extent range as we will be 3107 * inserting/replacing file extent items and unpinning an extent map. 3108 * This must be taken before joining a transaction, as it's a higher 3109 * level lock (like the inode's VFS lock), otherwise we can run into an 3110 * ABBA deadlock with other tasks (transactions work like a lock, 3111 * depending on their current state). 3112 */ 3113 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3114 clear_bits |= EXTENT_LOCKED; 3115 lock_extent(io_tree, start, end, &cached_state); 3116 } 3117 3118 if (freespace_inode) 3119 trans = btrfs_join_transaction_spacecache(root); 3120 else 3121 trans = btrfs_join_transaction(root); 3122 if (IS_ERR(trans)) { 3123 ret = PTR_ERR(trans); 3124 trans = NULL; 3125 goto out; 3126 } 3127 3128 trans->block_rsv = &inode->block_rsv; 3129 3130 ret = btrfs_insert_raid_extent(trans, ordered_extent); 3131 if (ret) { 3132 btrfs_abort_transaction(trans, ret); 3133 goto out; 3134 } 3135 3136 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3137 /* Logic error */ 3138 ASSERT(list_empty(&ordered_extent->list)); 3139 if (!list_empty(&ordered_extent->list)) { 3140 ret = -EINVAL; 3141 btrfs_abort_transaction(trans, ret); 3142 goto out; 3143 } 3144 3145 btrfs_inode_safe_disk_i_size_write(inode, 0); 3146 ret = btrfs_update_inode_fallback(trans, inode); 3147 if (ret) { 3148 /* -ENOMEM or corruption */ 3149 btrfs_abort_transaction(trans, ret); 3150 } 3151 goto out; 3152 } 3153 3154 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3155 compress_type = ordered_extent->compress_type; 3156 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3157 BUG_ON(compress_type); 3158 ret = btrfs_mark_extent_written(trans, inode, 3159 ordered_extent->file_offset, 3160 ordered_extent->file_offset + 3161 logical_len); 3162 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, 3163 ordered_extent->disk_num_bytes); 3164 } else { 3165 BUG_ON(root == fs_info->tree_root); 3166 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3167 if (!ret) { 3168 clear_reserved_extent = false; 3169 btrfs_release_delalloc_bytes(fs_info, 3170 ordered_extent->disk_bytenr, 3171 ordered_extent->disk_num_bytes); 3172 } 3173 } 3174 if (ret < 0) { 3175 btrfs_abort_transaction(trans, ret); 3176 goto out; 3177 } 3178 3179 ret = unpin_extent_cache(inode, ordered_extent->file_offset, 3180 ordered_extent->num_bytes, trans->transid); 3181 if (ret < 0) { 3182 btrfs_abort_transaction(trans, ret); 3183 goto out; 3184 } 3185 3186 ret = add_pending_csums(trans, &ordered_extent->list); 3187 if (ret) { 3188 btrfs_abort_transaction(trans, ret); 3189 goto out; 3190 } 3191 3192 /* 3193 * If this is a new delalloc range, clear its new delalloc flag to 3194 * update the inode's number of bytes. This needs to be done first 3195 * before updating the inode item. 3196 */ 3197 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3198 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3199 clear_extent_bit(&inode->io_tree, start, end, 3200 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3201 &cached_state); 3202 3203 btrfs_inode_safe_disk_i_size_write(inode, 0); 3204 ret = btrfs_update_inode_fallback(trans, inode); 3205 if (ret) { /* -ENOMEM or corruption */ 3206 btrfs_abort_transaction(trans, ret); 3207 goto out; 3208 } 3209 out: 3210 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3211 &cached_state); 3212 3213 if (trans) 3214 btrfs_end_transaction(trans); 3215 3216 if (ret || truncated) { 3217 u64 unwritten_start = start; 3218 3219 /* 3220 * If we failed to finish this ordered extent for any reason we 3221 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3222 * extent, and mark the inode with the error if it wasn't 3223 * already set. Any error during writeback would have already 3224 * set the mapping error, so we need to set it if we're the ones 3225 * marking this ordered extent as failed. 3226 */ 3227 if (ret) 3228 btrfs_mark_ordered_extent_error(ordered_extent); 3229 3230 if (truncated) 3231 unwritten_start += logical_len; 3232 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3233 3234 /* 3235 * Drop extent maps for the part of the extent we didn't write. 3236 * 3237 * We have an exception here for the free_space_inode, this is 3238 * because when we do btrfs_get_extent() on the free space inode 3239 * we will search the commit root. If this is a new block group 3240 * we won't find anything, and we will trip over the assert in 3241 * writepage where we do ASSERT(em->block_start != 3242 * EXTENT_MAP_HOLE). 3243 * 3244 * Theoretically we could also skip this for any NOCOW extent as 3245 * we don't mess with the extent map tree in the NOCOW case, but 3246 * for now simply skip this if we are the free space inode. 3247 */ 3248 if (!btrfs_is_free_space_inode(inode)) 3249 btrfs_drop_extent_map_range(inode, unwritten_start, 3250 end, false); 3251 3252 /* 3253 * If the ordered extent had an IOERR or something else went 3254 * wrong we need to return the space for this ordered extent 3255 * back to the allocator. We only free the extent in the 3256 * truncated case if we didn't write out the extent at all. 3257 * 3258 * If we made it past insert_reserved_file_extent before we 3259 * errored out then we don't need to do this as the accounting 3260 * has already been done. 3261 */ 3262 if ((ret || !logical_len) && 3263 clear_reserved_extent && 3264 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3265 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3266 /* 3267 * Discard the range before returning it back to the 3268 * free space pool 3269 */ 3270 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3271 btrfs_discard_extent(fs_info, 3272 ordered_extent->disk_bytenr, 3273 ordered_extent->disk_num_bytes, 3274 NULL); 3275 btrfs_free_reserved_extent(fs_info, 3276 ordered_extent->disk_bytenr, 3277 ordered_extent->disk_num_bytes, 1); 3278 /* 3279 * Actually free the qgroup rsv which was released when 3280 * the ordered extent was created. 3281 */ 3282 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root), 3283 ordered_extent->qgroup_rsv, 3284 BTRFS_QGROUP_RSV_DATA); 3285 } 3286 } 3287 3288 /* 3289 * This needs to be done to make sure anybody waiting knows we are done 3290 * updating everything for this ordered extent. 3291 */ 3292 btrfs_remove_ordered_extent(inode, ordered_extent); 3293 3294 /* once for us */ 3295 btrfs_put_ordered_extent(ordered_extent); 3296 /* once for the tree */ 3297 btrfs_put_ordered_extent(ordered_extent); 3298 3299 return ret; 3300 } 3301 3302 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered) 3303 { 3304 if (btrfs_is_zoned(ordered->inode->root->fs_info) && 3305 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) && 3306 list_empty(&ordered->bioc_list)) 3307 btrfs_finish_ordered_zoned(ordered); 3308 return btrfs_finish_one_ordered(ordered); 3309 } 3310 3311 /* 3312 * Verify the checksum for a single sector without any extra action that depend 3313 * on the type of I/O. 3314 */ 3315 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, 3316 u32 pgoff, u8 *csum, const u8 * const csum_expected) 3317 { 3318 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3319 char *kaddr; 3320 3321 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); 3322 3323 shash->tfm = fs_info->csum_shash; 3324 3325 kaddr = kmap_local_page(page) + pgoff; 3326 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 3327 kunmap_local(kaddr); 3328 3329 if (memcmp(csum, csum_expected, fs_info->csum_size)) 3330 return -EIO; 3331 return 0; 3332 } 3333 3334 /* 3335 * Verify the checksum of a single data sector. 3336 * 3337 * @bbio: btrfs_io_bio which contains the csum 3338 * @dev: device the sector is on 3339 * @bio_offset: offset to the beginning of the bio (in bytes) 3340 * @bv: bio_vec to check 3341 * 3342 * Check if the checksum on a data block is valid. When a checksum mismatch is 3343 * detected, report the error and fill the corrupted range with zero. 3344 * 3345 * Return %true if the sector is ok or had no checksum to start with, else %false. 3346 */ 3347 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, 3348 u32 bio_offset, struct bio_vec *bv) 3349 { 3350 struct btrfs_inode *inode = bbio->inode; 3351 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3352 u64 file_offset = bbio->file_offset + bio_offset; 3353 u64 end = file_offset + bv->bv_len - 1; 3354 u8 *csum_expected; 3355 u8 csum[BTRFS_CSUM_SIZE]; 3356 3357 ASSERT(bv->bv_len == fs_info->sectorsize); 3358 3359 if (!bbio->csum) 3360 return true; 3361 3362 if (btrfs_is_data_reloc_root(inode->root) && 3363 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, 3364 NULL)) { 3365 /* Skip the range without csum for data reloc inode */ 3366 clear_extent_bits(&inode->io_tree, file_offset, end, 3367 EXTENT_NODATASUM); 3368 return true; 3369 } 3370 3371 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * 3372 fs_info->csum_size; 3373 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, 3374 csum_expected)) 3375 goto zeroit; 3376 return true; 3377 3378 zeroit: 3379 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected, 3380 bbio->mirror_num); 3381 if (dev) 3382 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 3383 memzero_bvec(bv); 3384 return false; 3385 } 3386 3387 /* 3388 * Perform a delayed iput on @inode. 3389 * 3390 * @inode: The inode we want to perform iput on 3391 * 3392 * This function uses the generic vfs_inode::i_count to track whether we should 3393 * just decrement it (in case it's > 1) or if this is the last iput then link 3394 * the inode to the delayed iput machinery. Delayed iputs are processed at 3395 * transaction commit time/superblock commit/cleaner kthread. 3396 */ 3397 void btrfs_add_delayed_iput(struct btrfs_inode *inode) 3398 { 3399 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3400 unsigned long flags; 3401 3402 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) 3403 return; 3404 3405 atomic_inc(&fs_info->nr_delayed_iputs); 3406 /* 3407 * Need to be irq safe here because we can be called from either an irq 3408 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq 3409 * context. 3410 */ 3411 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags); 3412 ASSERT(list_empty(&inode->delayed_iput)); 3413 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); 3414 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags); 3415 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3416 wake_up_process(fs_info->cleaner_kthread); 3417 } 3418 3419 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3420 struct btrfs_inode *inode) 3421 { 3422 list_del_init(&inode->delayed_iput); 3423 spin_unlock_irq(&fs_info->delayed_iput_lock); 3424 iput(&inode->vfs_inode); 3425 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3426 wake_up(&fs_info->delayed_iputs_wait); 3427 spin_lock_irq(&fs_info->delayed_iput_lock); 3428 } 3429 3430 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3431 struct btrfs_inode *inode) 3432 { 3433 if (!list_empty(&inode->delayed_iput)) { 3434 spin_lock_irq(&fs_info->delayed_iput_lock); 3435 if (!list_empty(&inode->delayed_iput)) 3436 run_delayed_iput_locked(fs_info, inode); 3437 spin_unlock_irq(&fs_info->delayed_iput_lock); 3438 } 3439 } 3440 3441 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3442 { 3443 /* 3444 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which 3445 * calls btrfs_add_delayed_iput() and that needs to lock 3446 * fs_info->delayed_iput_lock. So we need to disable irqs here to 3447 * prevent a deadlock. 3448 */ 3449 spin_lock_irq(&fs_info->delayed_iput_lock); 3450 while (!list_empty(&fs_info->delayed_iputs)) { 3451 struct btrfs_inode *inode; 3452 3453 inode = list_first_entry(&fs_info->delayed_iputs, 3454 struct btrfs_inode, delayed_iput); 3455 run_delayed_iput_locked(fs_info, inode); 3456 if (need_resched()) { 3457 spin_unlock_irq(&fs_info->delayed_iput_lock); 3458 cond_resched(); 3459 spin_lock_irq(&fs_info->delayed_iput_lock); 3460 } 3461 } 3462 spin_unlock_irq(&fs_info->delayed_iput_lock); 3463 } 3464 3465 /* 3466 * Wait for flushing all delayed iputs 3467 * 3468 * @fs_info: the filesystem 3469 * 3470 * This will wait on any delayed iputs that are currently running with KILLABLE 3471 * set. Once they are all done running we will return, unless we are killed in 3472 * which case we return EINTR. This helps in user operations like fallocate etc 3473 * that might get blocked on the iputs. 3474 * 3475 * Return EINTR if we were killed, 0 if nothing's pending 3476 */ 3477 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3478 { 3479 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3480 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3481 if (ret) 3482 return -EINTR; 3483 return 0; 3484 } 3485 3486 /* 3487 * This creates an orphan entry for the given inode in case something goes wrong 3488 * in the middle of an unlink. 3489 */ 3490 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3491 struct btrfs_inode *inode) 3492 { 3493 int ret; 3494 3495 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3496 if (ret && ret != -EEXIST) { 3497 btrfs_abort_transaction(trans, ret); 3498 return ret; 3499 } 3500 3501 return 0; 3502 } 3503 3504 /* 3505 * We have done the delete so we can go ahead and remove the orphan item for 3506 * this particular inode. 3507 */ 3508 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3509 struct btrfs_inode *inode) 3510 { 3511 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3512 } 3513 3514 /* 3515 * this cleans up any orphans that may be left on the list from the last use 3516 * of this root. 3517 */ 3518 int btrfs_orphan_cleanup(struct btrfs_root *root) 3519 { 3520 struct btrfs_fs_info *fs_info = root->fs_info; 3521 struct btrfs_path *path; 3522 struct extent_buffer *leaf; 3523 struct btrfs_key key, found_key; 3524 struct btrfs_trans_handle *trans; 3525 struct inode *inode; 3526 u64 last_objectid = 0; 3527 int ret = 0, nr_unlink = 0; 3528 3529 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3530 return 0; 3531 3532 path = btrfs_alloc_path(); 3533 if (!path) { 3534 ret = -ENOMEM; 3535 goto out; 3536 } 3537 path->reada = READA_BACK; 3538 3539 key.objectid = BTRFS_ORPHAN_OBJECTID; 3540 key.type = BTRFS_ORPHAN_ITEM_KEY; 3541 key.offset = (u64)-1; 3542 3543 while (1) { 3544 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3545 if (ret < 0) 3546 goto out; 3547 3548 /* 3549 * if ret == 0 means we found what we were searching for, which 3550 * is weird, but possible, so only screw with path if we didn't 3551 * find the key and see if we have stuff that matches 3552 */ 3553 if (ret > 0) { 3554 ret = 0; 3555 if (path->slots[0] == 0) 3556 break; 3557 path->slots[0]--; 3558 } 3559 3560 /* pull out the item */ 3561 leaf = path->nodes[0]; 3562 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3563 3564 /* make sure the item matches what we want */ 3565 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3566 break; 3567 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3568 break; 3569 3570 /* release the path since we're done with it */ 3571 btrfs_release_path(path); 3572 3573 /* 3574 * this is where we are basically btrfs_lookup, without the 3575 * crossing root thing. we store the inode number in the 3576 * offset of the orphan item. 3577 */ 3578 3579 if (found_key.offset == last_objectid) { 3580 /* 3581 * We found the same inode as before. This means we were 3582 * not able to remove its items via eviction triggered 3583 * by an iput(). A transaction abort may have happened, 3584 * due to -ENOSPC for example, so try to grab the error 3585 * that lead to a transaction abort, if any. 3586 */ 3587 btrfs_err(fs_info, 3588 "Error removing orphan entry, stopping orphan cleanup"); 3589 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL; 3590 goto out; 3591 } 3592 3593 last_objectid = found_key.offset; 3594 3595 found_key.objectid = found_key.offset; 3596 found_key.type = BTRFS_INODE_ITEM_KEY; 3597 found_key.offset = 0; 3598 inode = btrfs_iget(last_objectid, root); 3599 if (IS_ERR(inode)) { 3600 ret = PTR_ERR(inode); 3601 inode = NULL; 3602 if (ret != -ENOENT) 3603 goto out; 3604 } 3605 3606 if (!inode && root == fs_info->tree_root) { 3607 struct btrfs_root *dead_root; 3608 int is_dead_root = 0; 3609 3610 /* 3611 * This is an orphan in the tree root. Currently these 3612 * could come from 2 sources: 3613 * a) a root (snapshot/subvolume) deletion in progress 3614 * b) a free space cache inode 3615 * We need to distinguish those two, as the orphan item 3616 * for a root must not get deleted before the deletion 3617 * of the snapshot/subvolume's tree completes. 3618 * 3619 * btrfs_find_orphan_roots() ran before us, which has 3620 * found all deleted roots and loaded them into 3621 * fs_info->fs_roots_radix. So here we can find if an 3622 * orphan item corresponds to a deleted root by looking 3623 * up the root from that radix tree. 3624 */ 3625 3626 spin_lock(&fs_info->fs_roots_radix_lock); 3627 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3628 (unsigned long)found_key.objectid); 3629 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3630 is_dead_root = 1; 3631 spin_unlock(&fs_info->fs_roots_radix_lock); 3632 3633 if (is_dead_root) { 3634 /* prevent this orphan from being found again */ 3635 key.offset = found_key.objectid - 1; 3636 continue; 3637 } 3638 3639 } 3640 3641 /* 3642 * If we have an inode with links, there are a couple of 3643 * possibilities: 3644 * 3645 * 1. We were halfway through creating fsverity metadata for the 3646 * file. In that case, the orphan item represents incomplete 3647 * fsverity metadata which must be cleaned up with 3648 * btrfs_drop_verity_items and deleting the orphan item. 3649 3650 * 2. Old kernels (before v3.12) used to create an 3651 * orphan item for truncate indicating that there were possibly 3652 * extent items past i_size that needed to be deleted. In v3.12, 3653 * truncate was changed to update i_size in sync with the extent 3654 * items, but the (useless) orphan item was still created. Since 3655 * v4.18, we don't create the orphan item for truncate at all. 3656 * 3657 * So, this item could mean that we need to do a truncate, but 3658 * only if this filesystem was last used on a pre-v3.12 kernel 3659 * and was not cleanly unmounted. The odds of that are quite 3660 * slim, and it's a pain to do the truncate now, so just delete 3661 * the orphan item. 3662 * 3663 * It's also possible that this orphan item was supposed to be 3664 * deleted but wasn't. The inode number may have been reused, 3665 * but either way, we can delete the orphan item. 3666 */ 3667 if (!inode || inode->i_nlink) { 3668 if (inode) { 3669 ret = btrfs_drop_verity_items(BTRFS_I(inode)); 3670 iput(inode); 3671 inode = NULL; 3672 if (ret) 3673 goto out; 3674 } 3675 trans = btrfs_start_transaction(root, 1); 3676 if (IS_ERR(trans)) { 3677 ret = PTR_ERR(trans); 3678 goto out; 3679 } 3680 btrfs_debug(fs_info, "auto deleting %Lu", 3681 found_key.objectid); 3682 ret = btrfs_del_orphan_item(trans, root, 3683 found_key.objectid); 3684 btrfs_end_transaction(trans); 3685 if (ret) 3686 goto out; 3687 continue; 3688 } 3689 3690 nr_unlink++; 3691 3692 /* this will do delete_inode and everything for us */ 3693 iput(inode); 3694 } 3695 /* release the path since we're done with it */ 3696 btrfs_release_path(path); 3697 3698 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3699 trans = btrfs_join_transaction(root); 3700 if (!IS_ERR(trans)) 3701 btrfs_end_transaction(trans); 3702 } 3703 3704 if (nr_unlink) 3705 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3706 3707 out: 3708 if (ret) 3709 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3710 btrfs_free_path(path); 3711 return ret; 3712 } 3713 3714 /* 3715 * very simple check to peek ahead in the leaf looking for xattrs. If we 3716 * don't find any xattrs, we know there can't be any acls. 3717 * 3718 * slot is the slot the inode is in, objectid is the objectid of the inode 3719 */ 3720 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3721 int slot, u64 objectid, 3722 int *first_xattr_slot) 3723 { 3724 u32 nritems = btrfs_header_nritems(leaf); 3725 struct btrfs_key found_key; 3726 static u64 xattr_access = 0; 3727 static u64 xattr_default = 0; 3728 int scanned = 0; 3729 3730 if (!xattr_access) { 3731 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3732 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3733 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3734 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3735 } 3736 3737 slot++; 3738 *first_xattr_slot = -1; 3739 while (slot < nritems) { 3740 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3741 3742 /* we found a different objectid, there must not be acls */ 3743 if (found_key.objectid != objectid) 3744 return 0; 3745 3746 /* we found an xattr, assume we've got an acl */ 3747 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3748 if (*first_xattr_slot == -1) 3749 *first_xattr_slot = slot; 3750 if (found_key.offset == xattr_access || 3751 found_key.offset == xattr_default) 3752 return 1; 3753 } 3754 3755 /* 3756 * we found a key greater than an xattr key, there can't 3757 * be any acls later on 3758 */ 3759 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3760 return 0; 3761 3762 slot++; 3763 scanned++; 3764 3765 /* 3766 * it goes inode, inode backrefs, xattrs, extents, 3767 * so if there are a ton of hard links to an inode there can 3768 * be a lot of backrefs. Don't waste time searching too hard, 3769 * this is just an optimization 3770 */ 3771 if (scanned >= 8) 3772 break; 3773 } 3774 /* we hit the end of the leaf before we found an xattr or 3775 * something larger than an xattr. We have to assume the inode 3776 * has acls 3777 */ 3778 if (*first_xattr_slot == -1) 3779 *first_xattr_slot = slot; 3780 return 1; 3781 } 3782 3783 static int btrfs_init_file_extent_tree(struct btrfs_inode *inode) 3784 { 3785 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3786 3787 if (WARN_ON_ONCE(inode->file_extent_tree)) 3788 return 0; 3789 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 3790 return 0; 3791 if (!S_ISREG(inode->vfs_inode.i_mode)) 3792 return 0; 3793 if (btrfs_is_free_space_inode(inode)) 3794 return 0; 3795 3796 inode->file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL); 3797 if (!inode->file_extent_tree) 3798 return -ENOMEM; 3799 3800 extent_io_tree_init(fs_info, inode->file_extent_tree, IO_TREE_INODE_FILE_EXTENT); 3801 /* Lockdep class is set only for the file extent tree. */ 3802 lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class); 3803 3804 return 0; 3805 } 3806 3807 static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc) 3808 { 3809 struct btrfs_root *root = inode->root; 3810 struct btrfs_inode *existing; 3811 const u64 ino = btrfs_ino(inode); 3812 int ret; 3813 3814 if (inode_unhashed(&inode->vfs_inode)) 3815 return 0; 3816 3817 if (prealloc) { 3818 ret = xa_reserve(&root->inodes, ino, GFP_NOFS); 3819 if (ret) 3820 return ret; 3821 } 3822 3823 existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC); 3824 3825 if (xa_is_err(existing)) { 3826 ret = xa_err(existing); 3827 ASSERT(ret != -EINVAL); 3828 ASSERT(ret != -ENOMEM); 3829 return ret; 3830 } else if (existing) { 3831 WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING))); 3832 } 3833 3834 return 0; 3835 } 3836 3837 /* 3838 * Read a locked inode from the btree into the in-memory inode and add it to 3839 * its root list/tree. 3840 * 3841 * On failure clean up the inode. 3842 */ 3843 static int btrfs_read_locked_inode(struct inode *inode, struct btrfs_path *path) 3844 { 3845 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 3846 struct extent_buffer *leaf; 3847 struct btrfs_inode_item *inode_item; 3848 struct btrfs_root *root = BTRFS_I(inode)->root; 3849 struct btrfs_key location; 3850 unsigned long ptr; 3851 int maybe_acls; 3852 u32 rdev; 3853 int ret; 3854 bool filled = false; 3855 int first_xattr_slot; 3856 3857 ret = btrfs_init_file_extent_tree(BTRFS_I(inode)); 3858 if (ret) 3859 goto out; 3860 3861 ret = btrfs_fill_inode(inode, &rdev); 3862 if (!ret) 3863 filled = true; 3864 3865 ASSERT(path); 3866 3867 btrfs_get_inode_key(BTRFS_I(inode), &location); 3868 3869 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3870 if (ret) { 3871 /* 3872 * ret > 0 can come from btrfs_search_slot called by 3873 * btrfs_lookup_inode(), this means the inode was not found. 3874 */ 3875 if (ret > 0) 3876 ret = -ENOENT; 3877 goto out; 3878 } 3879 3880 leaf = path->nodes[0]; 3881 3882 if (filled) 3883 goto cache_index; 3884 3885 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3886 struct btrfs_inode_item); 3887 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3888 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3889 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3890 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3891 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3892 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 3893 round_up(i_size_read(inode), fs_info->sectorsize)); 3894 3895 inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime), 3896 btrfs_timespec_nsec(leaf, &inode_item->atime)); 3897 3898 inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime), 3899 btrfs_timespec_nsec(leaf, &inode_item->mtime)); 3900 3901 inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime), 3902 btrfs_timespec_nsec(leaf, &inode_item->ctime)); 3903 3904 BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime); 3905 BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime); 3906 3907 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3908 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3909 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3910 3911 inode_set_iversion_queried(inode, 3912 btrfs_inode_sequence(leaf, inode_item)); 3913 inode->i_generation = BTRFS_I(inode)->generation; 3914 inode->i_rdev = 0; 3915 rdev = btrfs_inode_rdev(leaf, inode_item); 3916 3917 if (S_ISDIR(inode->i_mode)) 3918 BTRFS_I(inode)->index_cnt = (u64)-1; 3919 3920 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 3921 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 3922 3923 cache_index: 3924 /* 3925 * If we were modified in the current generation and evicted from memory 3926 * and then re-read we need to do a full sync since we don't have any 3927 * idea about which extents were modified before we were evicted from 3928 * cache. 3929 * 3930 * This is required for both inode re-read from disk and delayed inode 3931 * in the delayed_nodes xarray. 3932 */ 3933 if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info)) 3934 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3935 &BTRFS_I(inode)->runtime_flags); 3936 3937 /* 3938 * We don't persist the id of the transaction where an unlink operation 3939 * against the inode was last made. So here we assume the inode might 3940 * have been evicted, and therefore the exact value of last_unlink_trans 3941 * lost, and set it to last_trans to avoid metadata inconsistencies 3942 * between the inode and its parent if the inode is fsync'ed and the log 3943 * replayed. For example, in the scenario: 3944 * 3945 * touch mydir/foo 3946 * ln mydir/foo mydir/bar 3947 * sync 3948 * unlink mydir/bar 3949 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3950 * xfs_io -c fsync mydir/foo 3951 * <power failure> 3952 * mount fs, triggers fsync log replay 3953 * 3954 * We must make sure that when we fsync our inode foo we also log its 3955 * parent inode, otherwise after log replay the parent still has the 3956 * dentry with the "bar" name but our inode foo has a link count of 1 3957 * and doesn't have an inode ref with the name "bar" anymore. 3958 * 3959 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3960 * but it guarantees correctness at the expense of occasional full 3961 * transaction commits on fsync if our inode is a directory, or if our 3962 * inode is not a directory, logging its parent unnecessarily. 3963 */ 3964 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3965 3966 /* 3967 * Same logic as for last_unlink_trans. We don't persist the generation 3968 * of the last transaction where this inode was used for a reflink 3969 * operation, so after eviction and reloading the inode we must be 3970 * pessimistic and assume the last transaction that modified the inode. 3971 */ 3972 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 3973 3974 path->slots[0]++; 3975 if (inode->i_nlink != 1 || 3976 path->slots[0] >= btrfs_header_nritems(leaf)) 3977 goto cache_acl; 3978 3979 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3980 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 3981 goto cache_acl; 3982 3983 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3984 if (location.type == BTRFS_INODE_REF_KEY) { 3985 struct btrfs_inode_ref *ref; 3986 3987 ref = (struct btrfs_inode_ref *)ptr; 3988 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3989 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3990 struct btrfs_inode_extref *extref; 3991 3992 extref = (struct btrfs_inode_extref *)ptr; 3993 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3994 extref); 3995 } 3996 cache_acl: 3997 /* 3998 * try to precache a NULL acl entry for files that don't have 3999 * any xattrs or acls 4000 */ 4001 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 4002 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 4003 if (first_xattr_slot != -1) { 4004 path->slots[0] = first_xattr_slot; 4005 ret = btrfs_load_inode_props(inode, path); 4006 if (ret) 4007 btrfs_err(fs_info, 4008 "error loading props for ino %llu (root %llu): %d", 4009 btrfs_ino(BTRFS_I(inode)), 4010 btrfs_root_id(root), ret); 4011 } 4012 4013 if (!maybe_acls) 4014 cache_no_acl(inode); 4015 4016 switch (inode->i_mode & S_IFMT) { 4017 case S_IFREG: 4018 inode->i_mapping->a_ops = &btrfs_aops; 4019 inode->i_fop = &btrfs_file_operations; 4020 inode->i_op = &btrfs_file_inode_operations; 4021 break; 4022 case S_IFDIR: 4023 inode->i_fop = &btrfs_dir_file_operations; 4024 inode->i_op = &btrfs_dir_inode_operations; 4025 break; 4026 case S_IFLNK: 4027 inode->i_op = &btrfs_symlink_inode_operations; 4028 inode_nohighmem(inode); 4029 inode->i_mapping->a_ops = &btrfs_aops; 4030 break; 4031 default: 4032 inode->i_op = &btrfs_special_inode_operations; 4033 init_special_inode(inode, inode->i_mode, rdev); 4034 break; 4035 } 4036 4037 btrfs_sync_inode_flags_to_i_flags(inode); 4038 4039 ret = btrfs_add_inode_to_root(BTRFS_I(inode), true); 4040 if (ret) 4041 goto out; 4042 4043 return 0; 4044 out: 4045 iget_failed(inode); 4046 return ret; 4047 } 4048 4049 /* 4050 * given a leaf and an inode, copy the inode fields into the leaf 4051 */ 4052 static void fill_inode_item(struct btrfs_trans_handle *trans, 4053 struct extent_buffer *leaf, 4054 struct btrfs_inode_item *item, 4055 struct inode *inode) 4056 { 4057 struct btrfs_map_token token; 4058 u64 flags; 4059 4060 btrfs_init_map_token(&token, leaf); 4061 4062 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 4063 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 4064 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 4065 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 4066 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 4067 4068 btrfs_set_token_timespec_sec(&token, &item->atime, 4069 inode_get_atime_sec(inode)); 4070 btrfs_set_token_timespec_nsec(&token, &item->atime, 4071 inode_get_atime_nsec(inode)); 4072 4073 btrfs_set_token_timespec_sec(&token, &item->mtime, 4074 inode_get_mtime_sec(inode)); 4075 btrfs_set_token_timespec_nsec(&token, &item->mtime, 4076 inode_get_mtime_nsec(inode)); 4077 4078 btrfs_set_token_timespec_sec(&token, &item->ctime, 4079 inode_get_ctime_sec(inode)); 4080 btrfs_set_token_timespec_nsec(&token, &item->ctime, 4081 inode_get_ctime_nsec(inode)); 4082 4083 btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec); 4084 btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec); 4085 4086 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 4087 btrfs_set_token_inode_generation(&token, item, 4088 BTRFS_I(inode)->generation); 4089 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 4090 btrfs_set_token_inode_transid(&token, item, trans->transid); 4091 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 4092 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 4093 BTRFS_I(inode)->ro_flags); 4094 btrfs_set_token_inode_flags(&token, item, flags); 4095 btrfs_set_token_inode_block_group(&token, item, 0); 4096 } 4097 4098 /* 4099 * copy everything in the in-memory inode into the btree. 4100 */ 4101 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 4102 struct btrfs_inode *inode) 4103 { 4104 struct btrfs_inode_item *inode_item; 4105 struct btrfs_path *path; 4106 struct extent_buffer *leaf; 4107 struct btrfs_key key; 4108 int ret; 4109 4110 path = btrfs_alloc_path(); 4111 if (!path) 4112 return -ENOMEM; 4113 4114 btrfs_get_inode_key(inode, &key); 4115 ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1); 4116 if (ret) { 4117 if (ret > 0) 4118 ret = -ENOENT; 4119 goto failed; 4120 } 4121 4122 leaf = path->nodes[0]; 4123 inode_item = btrfs_item_ptr(leaf, path->slots[0], 4124 struct btrfs_inode_item); 4125 4126 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 4127 btrfs_set_inode_last_trans(trans, inode); 4128 ret = 0; 4129 failed: 4130 btrfs_free_path(path); 4131 return ret; 4132 } 4133 4134 /* 4135 * copy everything in the in-memory inode into the btree. 4136 */ 4137 int btrfs_update_inode(struct btrfs_trans_handle *trans, 4138 struct btrfs_inode *inode) 4139 { 4140 struct btrfs_root *root = inode->root; 4141 struct btrfs_fs_info *fs_info = root->fs_info; 4142 int ret; 4143 4144 /* 4145 * If the inode is a free space inode, we can deadlock during commit 4146 * if we put it into the delayed code. 4147 * 4148 * The data relocation inode should also be directly updated 4149 * without delay 4150 */ 4151 if (!btrfs_is_free_space_inode(inode) 4152 && !btrfs_is_data_reloc_root(root) 4153 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4154 btrfs_update_root_times(trans, root); 4155 4156 ret = btrfs_delayed_update_inode(trans, inode); 4157 if (!ret) 4158 btrfs_set_inode_last_trans(trans, inode); 4159 return ret; 4160 } 4161 4162 return btrfs_update_inode_item(trans, inode); 4163 } 4164 4165 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4166 struct btrfs_inode *inode) 4167 { 4168 int ret; 4169 4170 ret = btrfs_update_inode(trans, inode); 4171 if (ret == -ENOSPC) 4172 return btrfs_update_inode_item(trans, inode); 4173 return ret; 4174 } 4175 4176 /* 4177 * unlink helper that gets used here in inode.c and in the tree logging 4178 * recovery code. It remove a link in a directory with a given name, and 4179 * also drops the back refs in the inode to the directory 4180 */ 4181 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4182 struct btrfs_inode *dir, 4183 struct btrfs_inode *inode, 4184 const struct fscrypt_str *name, 4185 struct btrfs_rename_ctx *rename_ctx) 4186 { 4187 struct btrfs_root *root = dir->root; 4188 struct btrfs_fs_info *fs_info = root->fs_info; 4189 struct btrfs_path *path; 4190 int ret = 0; 4191 struct btrfs_dir_item *di; 4192 u64 index; 4193 u64 ino = btrfs_ino(inode); 4194 u64 dir_ino = btrfs_ino(dir); 4195 4196 path = btrfs_alloc_path(); 4197 if (!path) { 4198 ret = -ENOMEM; 4199 goto out; 4200 } 4201 4202 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); 4203 if (IS_ERR_OR_NULL(di)) { 4204 ret = di ? PTR_ERR(di) : -ENOENT; 4205 goto err; 4206 } 4207 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4208 if (ret) 4209 goto err; 4210 btrfs_release_path(path); 4211 4212 /* 4213 * If we don't have dir index, we have to get it by looking up 4214 * the inode ref, since we get the inode ref, remove it directly, 4215 * it is unnecessary to do delayed deletion. 4216 * 4217 * But if we have dir index, needn't search inode ref to get it. 4218 * Since the inode ref is close to the inode item, it is better 4219 * that we delay to delete it, and just do this deletion when 4220 * we update the inode item. 4221 */ 4222 if (inode->dir_index) { 4223 ret = btrfs_delayed_delete_inode_ref(inode); 4224 if (!ret) { 4225 index = inode->dir_index; 4226 goto skip_backref; 4227 } 4228 } 4229 4230 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); 4231 if (ret) { 4232 btrfs_info(fs_info, 4233 "failed to delete reference to %.*s, inode %llu parent %llu", 4234 name->len, name->name, ino, dir_ino); 4235 btrfs_abort_transaction(trans, ret); 4236 goto err; 4237 } 4238 skip_backref: 4239 if (rename_ctx) 4240 rename_ctx->index = index; 4241 4242 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4243 if (ret) { 4244 btrfs_abort_transaction(trans, ret); 4245 goto err; 4246 } 4247 4248 /* 4249 * If we are in a rename context, we don't need to update anything in the 4250 * log. That will be done later during the rename by btrfs_log_new_name(). 4251 * Besides that, doing it here would only cause extra unnecessary btree 4252 * operations on the log tree, increasing latency for applications. 4253 */ 4254 if (!rename_ctx) { 4255 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino); 4256 btrfs_del_dir_entries_in_log(trans, root, name, dir, index); 4257 } 4258 4259 /* 4260 * If we have a pending delayed iput we could end up with the final iput 4261 * being run in btrfs-cleaner context. If we have enough of these built 4262 * up we can end up burning a lot of time in btrfs-cleaner without any 4263 * way to throttle the unlinks. Since we're currently holding a ref on 4264 * the inode we can run the delayed iput here without any issues as the 4265 * final iput won't be done until after we drop the ref we're currently 4266 * holding. 4267 */ 4268 btrfs_run_delayed_iput(fs_info, inode); 4269 err: 4270 btrfs_free_path(path); 4271 if (ret) 4272 goto out; 4273 4274 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4275 inode_inc_iversion(&inode->vfs_inode); 4276 inode_set_ctime_current(&inode->vfs_inode); 4277 inode_inc_iversion(&dir->vfs_inode); 4278 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); 4279 ret = btrfs_update_inode(trans, dir); 4280 out: 4281 return ret; 4282 } 4283 4284 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4285 struct btrfs_inode *dir, struct btrfs_inode *inode, 4286 const struct fscrypt_str *name) 4287 { 4288 int ret; 4289 4290 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); 4291 if (!ret) { 4292 drop_nlink(&inode->vfs_inode); 4293 ret = btrfs_update_inode(trans, inode); 4294 } 4295 return ret; 4296 } 4297 4298 /* 4299 * helper to start transaction for unlink and rmdir. 4300 * 4301 * unlink and rmdir are special in btrfs, they do not always free space, so 4302 * if we cannot make our reservations the normal way try and see if there is 4303 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4304 * allow the unlink to occur. 4305 */ 4306 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) 4307 { 4308 struct btrfs_root *root = dir->root; 4309 4310 return btrfs_start_transaction_fallback_global_rsv(root, 4311 BTRFS_UNLINK_METADATA_UNITS); 4312 } 4313 4314 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4315 { 4316 struct btrfs_trans_handle *trans; 4317 struct inode *inode = d_inode(dentry); 4318 int ret; 4319 struct fscrypt_name fname; 4320 4321 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4322 if (ret) 4323 return ret; 4324 4325 /* This needs to handle no-key deletions later on */ 4326 4327 trans = __unlink_start_trans(BTRFS_I(dir)); 4328 if (IS_ERR(trans)) { 4329 ret = PTR_ERR(trans); 4330 goto fscrypt_free; 4331 } 4332 4333 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4334 false); 4335 4336 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4337 &fname.disk_name); 4338 if (ret) 4339 goto end_trans; 4340 4341 if (inode->i_nlink == 0) { 4342 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4343 if (ret) 4344 goto end_trans; 4345 } 4346 4347 end_trans: 4348 btrfs_end_transaction(trans); 4349 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4350 fscrypt_free: 4351 fscrypt_free_filename(&fname); 4352 return ret; 4353 } 4354 4355 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4356 struct btrfs_inode *dir, struct dentry *dentry) 4357 { 4358 struct btrfs_root *root = dir->root; 4359 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4360 struct btrfs_path *path; 4361 struct extent_buffer *leaf; 4362 struct btrfs_dir_item *di; 4363 struct btrfs_key key; 4364 u64 index; 4365 int ret; 4366 u64 objectid; 4367 u64 dir_ino = btrfs_ino(dir); 4368 struct fscrypt_name fname; 4369 4370 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 4371 if (ret) 4372 return ret; 4373 4374 /* This needs to handle no-key deletions later on */ 4375 4376 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4377 objectid = btrfs_root_id(inode->root); 4378 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4379 objectid = inode->ref_root_id; 4380 } else { 4381 WARN_ON(1); 4382 fscrypt_free_filename(&fname); 4383 return -EINVAL; 4384 } 4385 4386 path = btrfs_alloc_path(); 4387 if (!path) { 4388 ret = -ENOMEM; 4389 goto out; 4390 } 4391 4392 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4393 &fname.disk_name, -1); 4394 if (IS_ERR_OR_NULL(di)) { 4395 ret = di ? PTR_ERR(di) : -ENOENT; 4396 goto out; 4397 } 4398 4399 leaf = path->nodes[0]; 4400 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4401 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4402 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4403 if (ret) { 4404 btrfs_abort_transaction(trans, ret); 4405 goto out; 4406 } 4407 btrfs_release_path(path); 4408 4409 /* 4410 * This is a placeholder inode for a subvolume we didn't have a 4411 * reference to at the time of the snapshot creation. In the meantime 4412 * we could have renamed the real subvol link into our snapshot, so 4413 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4414 * Instead simply lookup the dir_index_item for this entry so we can 4415 * remove it. Otherwise we know we have a ref to the root and we can 4416 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4417 */ 4418 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4419 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); 4420 if (IS_ERR(di)) { 4421 ret = PTR_ERR(di); 4422 btrfs_abort_transaction(trans, ret); 4423 goto out; 4424 } 4425 4426 leaf = path->nodes[0]; 4427 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4428 index = key.offset; 4429 btrfs_release_path(path); 4430 } else { 4431 ret = btrfs_del_root_ref(trans, objectid, 4432 btrfs_root_id(root), dir_ino, 4433 &index, &fname.disk_name); 4434 if (ret) { 4435 btrfs_abort_transaction(trans, ret); 4436 goto out; 4437 } 4438 } 4439 4440 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4441 if (ret) { 4442 btrfs_abort_transaction(trans, ret); 4443 goto out; 4444 } 4445 4446 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); 4447 inode_inc_iversion(&dir->vfs_inode); 4448 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); 4449 ret = btrfs_update_inode_fallback(trans, dir); 4450 if (ret) 4451 btrfs_abort_transaction(trans, ret); 4452 out: 4453 btrfs_free_path(path); 4454 fscrypt_free_filename(&fname); 4455 return ret; 4456 } 4457 4458 /* 4459 * Helper to check if the subvolume references other subvolumes or if it's 4460 * default. 4461 */ 4462 static noinline int may_destroy_subvol(struct btrfs_root *root) 4463 { 4464 struct btrfs_fs_info *fs_info = root->fs_info; 4465 struct btrfs_path *path; 4466 struct btrfs_dir_item *di; 4467 struct btrfs_key key; 4468 struct fscrypt_str name = FSTR_INIT("default", 7); 4469 u64 dir_id; 4470 int ret; 4471 4472 path = btrfs_alloc_path(); 4473 if (!path) 4474 return -ENOMEM; 4475 4476 /* Make sure this root isn't set as the default subvol */ 4477 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4478 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4479 dir_id, &name, 0); 4480 if (di && !IS_ERR(di)) { 4481 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4482 if (key.objectid == btrfs_root_id(root)) { 4483 ret = -EPERM; 4484 btrfs_err(fs_info, 4485 "deleting default subvolume %llu is not allowed", 4486 key.objectid); 4487 goto out; 4488 } 4489 btrfs_release_path(path); 4490 } 4491 4492 key.objectid = btrfs_root_id(root); 4493 key.type = BTRFS_ROOT_REF_KEY; 4494 key.offset = (u64)-1; 4495 4496 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4497 if (ret < 0) 4498 goto out; 4499 if (ret == 0) { 4500 /* 4501 * Key with offset -1 found, there would have to exist a root 4502 * with such id, but this is out of valid range. 4503 */ 4504 ret = -EUCLEAN; 4505 goto out; 4506 } 4507 4508 ret = 0; 4509 if (path->slots[0] > 0) { 4510 path->slots[0]--; 4511 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4512 if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY) 4513 ret = -ENOTEMPTY; 4514 } 4515 out: 4516 btrfs_free_path(path); 4517 return ret; 4518 } 4519 4520 /* Delete all dentries for inodes belonging to the root */ 4521 static void btrfs_prune_dentries(struct btrfs_root *root) 4522 { 4523 struct btrfs_fs_info *fs_info = root->fs_info; 4524 struct btrfs_inode *inode; 4525 u64 min_ino = 0; 4526 4527 if (!BTRFS_FS_ERROR(fs_info)) 4528 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4529 4530 inode = btrfs_find_first_inode(root, min_ino); 4531 while (inode) { 4532 if (atomic_read(&inode->vfs_inode.i_count) > 1) 4533 d_prune_aliases(&inode->vfs_inode); 4534 4535 min_ino = btrfs_ino(inode) + 1; 4536 /* 4537 * btrfs_drop_inode() will have it removed from the inode 4538 * cache when its usage count hits zero. 4539 */ 4540 iput(&inode->vfs_inode); 4541 cond_resched(); 4542 inode = btrfs_find_first_inode(root, min_ino); 4543 } 4544 } 4545 4546 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) 4547 { 4548 struct btrfs_root *root = dir->root; 4549 struct btrfs_fs_info *fs_info = root->fs_info; 4550 struct inode *inode = d_inode(dentry); 4551 struct btrfs_root *dest = BTRFS_I(inode)->root; 4552 struct btrfs_trans_handle *trans; 4553 struct btrfs_block_rsv block_rsv; 4554 u64 root_flags; 4555 u64 qgroup_reserved = 0; 4556 int ret; 4557 4558 down_write(&fs_info->subvol_sem); 4559 4560 /* 4561 * Don't allow to delete a subvolume with send in progress. This is 4562 * inside the inode lock so the error handling that has to drop the bit 4563 * again is not run concurrently. 4564 */ 4565 spin_lock(&dest->root_item_lock); 4566 if (dest->send_in_progress) { 4567 spin_unlock(&dest->root_item_lock); 4568 btrfs_warn(fs_info, 4569 "attempt to delete subvolume %llu during send", 4570 btrfs_root_id(dest)); 4571 ret = -EPERM; 4572 goto out_up_write; 4573 } 4574 if (atomic_read(&dest->nr_swapfiles)) { 4575 spin_unlock(&dest->root_item_lock); 4576 btrfs_warn(fs_info, 4577 "attempt to delete subvolume %llu with active swapfile", 4578 btrfs_root_id(root)); 4579 ret = -EPERM; 4580 goto out_up_write; 4581 } 4582 root_flags = btrfs_root_flags(&dest->root_item); 4583 btrfs_set_root_flags(&dest->root_item, 4584 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4585 spin_unlock(&dest->root_item_lock); 4586 4587 ret = may_destroy_subvol(dest); 4588 if (ret) 4589 goto out_undead; 4590 4591 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4592 /* 4593 * One for dir inode, 4594 * two for dir entries, 4595 * two for root ref/backref. 4596 */ 4597 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4598 if (ret) 4599 goto out_undead; 4600 qgroup_reserved = block_rsv.qgroup_rsv_reserved; 4601 4602 trans = btrfs_start_transaction(root, 0); 4603 if (IS_ERR(trans)) { 4604 ret = PTR_ERR(trans); 4605 goto out_release; 4606 } 4607 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); 4608 qgroup_reserved = 0; 4609 trans->block_rsv = &block_rsv; 4610 trans->bytes_reserved = block_rsv.size; 4611 4612 btrfs_record_snapshot_destroy(trans, dir); 4613 4614 ret = btrfs_unlink_subvol(trans, dir, dentry); 4615 if (ret) { 4616 btrfs_abort_transaction(trans, ret); 4617 goto out_end_trans; 4618 } 4619 4620 ret = btrfs_record_root_in_trans(trans, dest); 4621 if (ret) { 4622 btrfs_abort_transaction(trans, ret); 4623 goto out_end_trans; 4624 } 4625 4626 memset(&dest->root_item.drop_progress, 0, 4627 sizeof(dest->root_item.drop_progress)); 4628 btrfs_set_root_drop_level(&dest->root_item, 0); 4629 btrfs_set_root_refs(&dest->root_item, 0); 4630 4631 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4632 ret = btrfs_insert_orphan_item(trans, 4633 fs_info->tree_root, 4634 btrfs_root_id(dest)); 4635 if (ret) { 4636 btrfs_abort_transaction(trans, ret); 4637 goto out_end_trans; 4638 } 4639 } 4640 4641 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4642 BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest)); 4643 if (ret && ret != -ENOENT) { 4644 btrfs_abort_transaction(trans, ret); 4645 goto out_end_trans; 4646 } 4647 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4648 ret = btrfs_uuid_tree_remove(trans, 4649 dest->root_item.received_uuid, 4650 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4651 btrfs_root_id(dest)); 4652 if (ret && ret != -ENOENT) { 4653 btrfs_abort_transaction(trans, ret); 4654 goto out_end_trans; 4655 } 4656 } 4657 4658 free_anon_bdev(dest->anon_dev); 4659 dest->anon_dev = 0; 4660 out_end_trans: 4661 trans->block_rsv = NULL; 4662 trans->bytes_reserved = 0; 4663 ret = btrfs_end_transaction(trans); 4664 inode->i_flags |= S_DEAD; 4665 out_release: 4666 btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL); 4667 if (qgroup_reserved) 4668 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); 4669 out_undead: 4670 if (ret) { 4671 spin_lock(&dest->root_item_lock); 4672 root_flags = btrfs_root_flags(&dest->root_item); 4673 btrfs_set_root_flags(&dest->root_item, 4674 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4675 spin_unlock(&dest->root_item_lock); 4676 } 4677 out_up_write: 4678 up_write(&fs_info->subvol_sem); 4679 if (!ret) { 4680 d_invalidate(dentry); 4681 btrfs_prune_dentries(dest); 4682 ASSERT(dest->send_in_progress == 0); 4683 } 4684 4685 return ret; 4686 } 4687 4688 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4689 { 4690 struct inode *inode = d_inode(dentry); 4691 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 4692 int ret = 0; 4693 struct btrfs_trans_handle *trans; 4694 u64 last_unlink_trans; 4695 struct fscrypt_name fname; 4696 4697 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4698 return -ENOTEMPTY; 4699 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { 4700 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4701 btrfs_err(fs_info, 4702 "extent tree v2 doesn't support snapshot deletion yet"); 4703 return -EOPNOTSUPP; 4704 } 4705 return btrfs_delete_subvolume(BTRFS_I(dir), dentry); 4706 } 4707 4708 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4709 if (ret) 4710 return ret; 4711 4712 /* This needs to handle no-key deletions later on */ 4713 4714 trans = __unlink_start_trans(BTRFS_I(dir)); 4715 if (IS_ERR(trans)) { 4716 ret = PTR_ERR(trans); 4717 goto out_notrans; 4718 } 4719 4720 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4721 ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); 4722 goto out; 4723 } 4724 4725 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4726 if (ret) 4727 goto out; 4728 4729 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4730 4731 /* now the directory is empty */ 4732 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4733 &fname.disk_name); 4734 if (!ret) { 4735 btrfs_i_size_write(BTRFS_I(inode), 0); 4736 /* 4737 * Propagate the last_unlink_trans value of the deleted dir to 4738 * its parent directory. This is to prevent an unrecoverable 4739 * log tree in the case we do something like this: 4740 * 1) create dir foo 4741 * 2) create snapshot under dir foo 4742 * 3) delete the snapshot 4743 * 4) rmdir foo 4744 * 5) mkdir foo 4745 * 6) fsync foo or some file inside foo 4746 */ 4747 if (last_unlink_trans >= trans->transid) 4748 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4749 } 4750 out: 4751 btrfs_end_transaction(trans); 4752 out_notrans: 4753 btrfs_btree_balance_dirty(fs_info); 4754 fscrypt_free_filename(&fname); 4755 4756 return ret; 4757 } 4758 4759 /* 4760 * Read, zero a chunk and write a block. 4761 * 4762 * @inode - inode that we're zeroing 4763 * @from - the offset to start zeroing 4764 * @len - the length to zero, 0 to zero the entire range respective to the 4765 * offset 4766 * @front - zero up to the offset instead of from the offset on 4767 * 4768 * This will find the block for the "from" offset and cow the block and zero the 4769 * part we want to zero. This is used with truncate and hole punching. 4770 */ 4771 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 4772 int front) 4773 { 4774 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4775 struct address_space *mapping = inode->vfs_inode.i_mapping; 4776 struct extent_io_tree *io_tree = &inode->io_tree; 4777 struct btrfs_ordered_extent *ordered; 4778 struct extent_state *cached_state = NULL; 4779 struct extent_changeset *data_reserved = NULL; 4780 bool only_release_metadata = false; 4781 u32 blocksize = fs_info->sectorsize; 4782 pgoff_t index = from >> PAGE_SHIFT; 4783 unsigned offset = from & (blocksize - 1); 4784 struct folio *folio; 4785 gfp_t mask = btrfs_alloc_write_mask(mapping); 4786 size_t write_bytes = blocksize; 4787 int ret = 0; 4788 u64 block_start; 4789 u64 block_end; 4790 4791 if (IS_ALIGNED(offset, blocksize) && 4792 (!len || IS_ALIGNED(len, blocksize))) 4793 goto out; 4794 4795 block_start = round_down(from, blocksize); 4796 block_end = block_start + blocksize - 1; 4797 4798 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 4799 blocksize, false); 4800 if (ret < 0) { 4801 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { 4802 /* For nocow case, no need to reserve data space */ 4803 only_release_metadata = true; 4804 } else { 4805 goto out; 4806 } 4807 } 4808 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 4809 if (ret < 0) { 4810 if (!only_release_metadata) 4811 btrfs_free_reserved_data_space(inode, data_reserved, 4812 block_start, blocksize); 4813 goto out; 4814 } 4815 again: 4816 folio = __filemap_get_folio(mapping, index, 4817 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); 4818 if (IS_ERR(folio)) { 4819 btrfs_delalloc_release_space(inode, data_reserved, block_start, 4820 blocksize, true); 4821 btrfs_delalloc_release_extents(inode, blocksize); 4822 ret = -ENOMEM; 4823 goto out; 4824 } 4825 4826 if (!folio_test_uptodate(folio)) { 4827 ret = btrfs_read_folio(NULL, folio); 4828 folio_lock(folio); 4829 if (folio->mapping != mapping) { 4830 folio_unlock(folio); 4831 folio_put(folio); 4832 goto again; 4833 } 4834 if (!folio_test_uptodate(folio)) { 4835 ret = -EIO; 4836 goto out_unlock; 4837 } 4838 } 4839 4840 /* 4841 * We unlock the page after the io is completed and then re-lock it 4842 * above. release_folio() could have come in between that and cleared 4843 * folio private, but left the page in the mapping. Set the page mapped 4844 * here to make sure it's properly set for the subpage stuff. 4845 */ 4846 ret = set_folio_extent_mapped(folio); 4847 if (ret < 0) 4848 goto out_unlock; 4849 4850 folio_wait_writeback(folio); 4851 4852 lock_extent(io_tree, block_start, block_end, &cached_state); 4853 4854 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4855 if (ordered) { 4856 unlock_extent(io_tree, block_start, block_end, &cached_state); 4857 folio_unlock(folio); 4858 folio_put(folio); 4859 btrfs_start_ordered_extent(ordered); 4860 btrfs_put_ordered_extent(ordered); 4861 goto again; 4862 } 4863 4864 clear_extent_bit(&inode->io_tree, block_start, block_end, 4865 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4866 &cached_state); 4867 4868 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 4869 &cached_state); 4870 if (ret) { 4871 unlock_extent(io_tree, block_start, block_end, &cached_state); 4872 goto out_unlock; 4873 } 4874 4875 if (offset != blocksize) { 4876 if (!len) 4877 len = blocksize - offset; 4878 if (front) 4879 folio_zero_range(folio, block_start - folio_pos(folio), 4880 offset); 4881 else 4882 folio_zero_range(folio, 4883 (block_start - folio_pos(folio)) + offset, 4884 len); 4885 } 4886 btrfs_folio_clear_checked(fs_info, folio, block_start, 4887 block_end + 1 - block_start); 4888 btrfs_folio_set_dirty(fs_info, folio, block_start, 4889 block_end + 1 - block_start); 4890 unlock_extent(io_tree, block_start, block_end, &cached_state); 4891 4892 if (only_release_metadata) 4893 set_extent_bit(&inode->io_tree, block_start, block_end, 4894 EXTENT_NORESERVE, NULL); 4895 4896 out_unlock: 4897 if (ret) { 4898 if (only_release_metadata) 4899 btrfs_delalloc_release_metadata(inode, blocksize, true); 4900 else 4901 btrfs_delalloc_release_space(inode, data_reserved, 4902 block_start, blocksize, true); 4903 } 4904 btrfs_delalloc_release_extents(inode, blocksize); 4905 folio_unlock(folio); 4906 folio_put(folio); 4907 out: 4908 if (only_release_metadata) 4909 btrfs_check_nocow_unlock(inode); 4910 extent_changeset_free(data_reserved); 4911 return ret; 4912 } 4913 4914 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len) 4915 { 4916 struct btrfs_root *root = inode->root; 4917 struct btrfs_fs_info *fs_info = root->fs_info; 4918 struct btrfs_trans_handle *trans; 4919 struct btrfs_drop_extents_args drop_args = { 0 }; 4920 int ret; 4921 4922 /* 4923 * If NO_HOLES is enabled, we don't need to do anything. 4924 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 4925 * or btrfs_update_inode() will be called, which guarantee that the next 4926 * fsync will know this inode was changed and needs to be logged. 4927 */ 4928 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 4929 return 0; 4930 4931 /* 4932 * 1 - for the one we're dropping 4933 * 1 - for the one we're adding 4934 * 1 - for updating the inode. 4935 */ 4936 trans = btrfs_start_transaction(root, 3); 4937 if (IS_ERR(trans)) 4938 return PTR_ERR(trans); 4939 4940 drop_args.start = offset; 4941 drop_args.end = offset + len; 4942 drop_args.drop_cache = true; 4943 4944 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 4945 if (ret) { 4946 btrfs_abort_transaction(trans, ret); 4947 btrfs_end_transaction(trans); 4948 return ret; 4949 } 4950 4951 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len); 4952 if (ret) { 4953 btrfs_abort_transaction(trans, ret); 4954 } else { 4955 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 4956 btrfs_update_inode(trans, inode); 4957 } 4958 btrfs_end_transaction(trans); 4959 return ret; 4960 } 4961 4962 /* 4963 * This function puts in dummy file extents for the area we're creating a hole 4964 * for. So if we are truncating this file to a larger size we need to insert 4965 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4966 * the range between oldsize and size 4967 */ 4968 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 4969 { 4970 struct btrfs_root *root = inode->root; 4971 struct btrfs_fs_info *fs_info = root->fs_info; 4972 struct extent_io_tree *io_tree = &inode->io_tree; 4973 struct extent_map *em = NULL; 4974 struct extent_state *cached_state = NULL; 4975 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 4976 u64 block_end = ALIGN(size, fs_info->sectorsize); 4977 u64 last_byte; 4978 u64 cur_offset; 4979 u64 hole_size; 4980 int ret = 0; 4981 4982 /* 4983 * If our size started in the middle of a block we need to zero out the 4984 * rest of the block before we expand the i_size, otherwise we could 4985 * expose stale data. 4986 */ 4987 ret = btrfs_truncate_block(inode, oldsize, 0, 0); 4988 if (ret) 4989 return ret; 4990 4991 if (size <= hole_start) 4992 return 0; 4993 4994 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 4995 &cached_state); 4996 cur_offset = hole_start; 4997 while (1) { 4998 em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset); 4999 if (IS_ERR(em)) { 5000 ret = PTR_ERR(em); 5001 em = NULL; 5002 break; 5003 } 5004 last_byte = min(extent_map_end(em), block_end); 5005 last_byte = ALIGN(last_byte, fs_info->sectorsize); 5006 hole_size = last_byte - cur_offset; 5007 5008 if (!(em->flags & EXTENT_FLAG_PREALLOC)) { 5009 struct extent_map *hole_em; 5010 5011 ret = maybe_insert_hole(inode, cur_offset, hole_size); 5012 if (ret) 5013 break; 5014 5015 ret = btrfs_inode_set_file_extent_range(inode, 5016 cur_offset, hole_size); 5017 if (ret) 5018 break; 5019 5020 hole_em = alloc_extent_map(); 5021 if (!hole_em) { 5022 btrfs_drop_extent_map_range(inode, cur_offset, 5023 cur_offset + hole_size - 1, 5024 false); 5025 btrfs_set_inode_full_sync(inode); 5026 goto next; 5027 } 5028 hole_em->start = cur_offset; 5029 hole_em->len = hole_size; 5030 5031 hole_em->disk_bytenr = EXTENT_MAP_HOLE; 5032 hole_em->disk_num_bytes = 0; 5033 hole_em->ram_bytes = hole_size; 5034 hole_em->generation = btrfs_get_fs_generation(fs_info); 5035 5036 ret = btrfs_replace_extent_map_range(inode, hole_em, true); 5037 free_extent_map(hole_em); 5038 } else { 5039 ret = btrfs_inode_set_file_extent_range(inode, 5040 cur_offset, hole_size); 5041 if (ret) 5042 break; 5043 } 5044 next: 5045 free_extent_map(em); 5046 em = NULL; 5047 cur_offset = last_byte; 5048 if (cur_offset >= block_end) 5049 break; 5050 } 5051 free_extent_map(em); 5052 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); 5053 return ret; 5054 } 5055 5056 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 5057 { 5058 struct btrfs_root *root = BTRFS_I(inode)->root; 5059 struct btrfs_trans_handle *trans; 5060 loff_t oldsize = i_size_read(inode); 5061 loff_t newsize = attr->ia_size; 5062 int mask = attr->ia_valid; 5063 int ret; 5064 5065 /* 5066 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 5067 * special case where we need to update the times despite not having 5068 * these flags set. For all other operations the VFS set these flags 5069 * explicitly if it wants a timestamp update. 5070 */ 5071 if (newsize != oldsize) { 5072 inode_inc_iversion(inode); 5073 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) { 5074 inode_set_mtime_to_ts(inode, 5075 inode_set_ctime_current(inode)); 5076 } 5077 } 5078 5079 if (newsize > oldsize) { 5080 /* 5081 * Don't do an expanding truncate while snapshotting is ongoing. 5082 * This is to ensure the snapshot captures a fully consistent 5083 * state of this file - if the snapshot captures this expanding 5084 * truncation, it must capture all writes that happened before 5085 * this truncation. 5086 */ 5087 btrfs_drew_write_lock(&root->snapshot_lock); 5088 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 5089 if (ret) { 5090 btrfs_drew_write_unlock(&root->snapshot_lock); 5091 return ret; 5092 } 5093 5094 trans = btrfs_start_transaction(root, 1); 5095 if (IS_ERR(trans)) { 5096 btrfs_drew_write_unlock(&root->snapshot_lock); 5097 return PTR_ERR(trans); 5098 } 5099 5100 i_size_write(inode, newsize); 5101 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 5102 pagecache_isize_extended(inode, oldsize, newsize); 5103 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 5104 btrfs_drew_write_unlock(&root->snapshot_lock); 5105 btrfs_end_transaction(trans); 5106 } else { 5107 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 5108 5109 if (btrfs_is_zoned(fs_info)) { 5110 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 5111 ALIGN(newsize, fs_info->sectorsize), 5112 (u64)-1); 5113 if (ret) 5114 return ret; 5115 } 5116 5117 /* 5118 * We're truncating a file that used to have good data down to 5119 * zero. Make sure any new writes to the file get on disk 5120 * on close. 5121 */ 5122 if (newsize == 0) 5123 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5124 &BTRFS_I(inode)->runtime_flags); 5125 5126 truncate_setsize(inode, newsize); 5127 5128 inode_dio_wait(inode); 5129 5130 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); 5131 if (ret && inode->i_nlink) { 5132 int err; 5133 5134 /* 5135 * Truncate failed, so fix up the in-memory size. We 5136 * adjusted disk_i_size down as we removed extents, so 5137 * wait for disk_i_size to be stable and then update the 5138 * in-memory size to match. 5139 */ 5140 err = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); 5141 if (err) 5142 return err; 5143 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5144 } 5145 } 5146 5147 return ret; 5148 } 5149 5150 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 5151 struct iattr *attr) 5152 { 5153 struct inode *inode = d_inode(dentry); 5154 struct btrfs_root *root = BTRFS_I(inode)->root; 5155 int err; 5156 5157 if (btrfs_root_readonly(root)) 5158 return -EROFS; 5159 5160 err = setattr_prepare(idmap, dentry, attr); 5161 if (err) 5162 return err; 5163 5164 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5165 err = btrfs_setsize(inode, attr); 5166 if (err) 5167 return err; 5168 } 5169 5170 if (attr->ia_valid) { 5171 setattr_copy(idmap, inode, attr); 5172 inode_inc_iversion(inode); 5173 err = btrfs_dirty_inode(BTRFS_I(inode)); 5174 5175 if (!err && attr->ia_valid & ATTR_MODE) 5176 err = posix_acl_chmod(idmap, dentry, inode->i_mode); 5177 } 5178 5179 return err; 5180 } 5181 5182 /* 5183 * While truncating the inode pages during eviction, we get the VFS 5184 * calling btrfs_invalidate_folio() against each folio of the inode. This 5185 * is slow because the calls to btrfs_invalidate_folio() result in a 5186 * huge amount of calls to lock_extent() and clear_extent_bit(), 5187 * which keep merging and splitting extent_state structures over and over, 5188 * wasting lots of time. 5189 * 5190 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5191 * skip all those expensive operations on a per folio basis and do only 5192 * the ordered io finishing, while we release here the extent_map and 5193 * extent_state structures, without the excessive merging and splitting. 5194 */ 5195 static void evict_inode_truncate_pages(struct inode *inode) 5196 { 5197 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5198 struct rb_node *node; 5199 5200 ASSERT(inode->i_state & I_FREEING); 5201 truncate_inode_pages_final(&inode->i_data); 5202 5203 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 5204 5205 /* 5206 * Keep looping until we have no more ranges in the io tree. 5207 * We can have ongoing bios started by readahead that have 5208 * their endio callback (extent_io.c:end_bio_extent_readpage) 5209 * still in progress (unlocked the pages in the bio but did not yet 5210 * unlocked the ranges in the io tree). Therefore this means some 5211 * ranges can still be locked and eviction started because before 5212 * submitting those bios, which are executed by a separate task (work 5213 * queue kthread), inode references (inode->i_count) were not taken 5214 * (which would be dropped in the end io callback of each bio). 5215 * Therefore here we effectively end up waiting for those bios and 5216 * anyone else holding locked ranges without having bumped the inode's 5217 * reference count - if we don't do it, when they access the inode's 5218 * io_tree to unlock a range it may be too late, leading to an 5219 * use-after-free issue. 5220 */ 5221 spin_lock(&io_tree->lock); 5222 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5223 struct extent_state *state; 5224 struct extent_state *cached_state = NULL; 5225 u64 start; 5226 u64 end; 5227 unsigned state_flags; 5228 5229 node = rb_first(&io_tree->state); 5230 state = rb_entry(node, struct extent_state, rb_node); 5231 start = state->start; 5232 end = state->end; 5233 state_flags = state->state; 5234 spin_unlock(&io_tree->lock); 5235 5236 lock_extent(io_tree, start, end, &cached_state); 5237 5238 /* 5239 * If still has DELALLOC flag, the extent didn't reach disk, 5240 * and its reserved space won't be freed by delayed_ref. 5241 * So we need to free its reserved space here. 5242 * (Refer to comment in btrfs_invalidate_folio, case 2) 5243 * 5244 * Note, end is the bytenr of last byte, so we need + 1 here. 5245 */ 5246 if (state_flags & EXTENT_DELALLOC) 5247 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5248 end - start + 1, NULL); 5249 5250 clear_extent_bit(io_tree, start, end, 5251 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, 5252 &cached_state); 5253 5254 cond_resched(); 5255 spin_lock(&io_tree->lock); 5256 } 5257 spin_unlock(&io_tree->lock); 5258 } 5259 5260 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5261 struct btrfs_block_rsv *rsv) 5262 { 5263 struct btrfs_fs_info *fs_info = root->fs_info; 5264 struct btrfs_trans_handle *trans; 5265 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1); 5266 int ret; 5267 5268 /* 5269 * Eviction should be taking place at some place safe because of our 5270 * delayed iputs. However the normal flushing code will run delayed 5271 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5272 * 5273 * We reserve the delayed_refs_extra here again because we can't use 5274 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5275 * above. We reserve our extra bit here because we generate a ton of 5276 * delayed refs activity by truncating. 5277 * 5278 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5279 * if we fail to make this reservation we can re-try without the 5280 * delayed_refs_extra so we can make some forward progress. 5281 */ 5282 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5283 BTRFS_RESERVE_FLUSH_EVICT); 5284 if (ret) { 5285 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5286 BTRFS_RESERVE_FLUSH_EVICT); 5287 if (ret) { 5288 btrfs_warn(fs_info, 5289 "could not allocate space for delete; will truncate on mount"); 5290 return ERR_PTR(-ENOSPC); 5291 } 5292 delayed_refs_extra = 0; 5293 } 5294 5295 trans = btrfs_join_transaction(root); 5296 if (IS_ERR(trans)) 5297 return trans; 5298 5299 if (delayed_refs_extra) { 5300 trans->block_rsv = &fs_info->trans_block_rsv; 5301 trans->bytes_reserved = delayed_refs_extra; 5302 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5303 delayed_refs_extra, true); 5304 } 5305 return trans; 5306 } 5307 5308 void btrfs_evict_inode(struct inode *inode) 5309 { 5310 struct btrfs_fs_info *fs_info; 5311 struct btrfs_trans_handle *trans; 5312 struct btrfs_root *root = BTRFS_I(inode)->root; 5313 struct btrfs_block_rsv *rsv = NULL; 5314 int ret; 5315 5316 trace_btrfs_inode_evict(inode); 5317 5318 if (!root) { 5319 fsverity_cleanup_inode(inode); 5320 clear_inode(inode); 5321 return; 5322 } 5323 5324 fs_info = inode_to_fs_info(inode); 5325 evict_inode_truncate_pages(inode); 5326 5327 if (inode->i_nlink && 5328 ((btrfs_root_refs(&root->root_item) != 0 && 5329 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) || 5330 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5331 goto out; 5332 5333 if (is_bad_inode(inode)) 5334 goto out; 5335 5336 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5337 goto out; 5338 5339 if (inode->i_nlink > 0) { 5340 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5341 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID); 5342 goto out; 5343 } 5344 5345 /* 5346 * This makes sure the inode item in tree is uptodate and the space for 5347 * the inode update is released. 5348 */ 5349 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5350 if (ret) 5351 goto out; 5352 5353 /* 5354 * This drops any pending insert or delete operations we have for this 5355 * inode. We could have a delayed dir index deletion queued up, but 5356 * we're removing the inode completely so that'll be taken care of in 5357 * the truncate. 5358 */ 5359 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5360 5361 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5362 if (!rsv) 5363 goto out; 5364 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5365 rsv->failfast = true; 5366 5367 btrfs_i_size_write(BTRFS_I(inode), 0); 5368 5369 while (1) { 5370 struct btrfs_truncate_control control = { 5371 .inode = BTRFS_I(inode), 5372 .ino = btrfs_ino(BTRFS_I(inode)), 5373 .new_size = 0, 5374 .min_type = 0, 5375 }; 5376 5377 trans = evict_refill_and_join(root, rsv); 5378 if (IS_ERR(trans)) 5379 goto out; 5380 5381 trans->block_rsv = rsv; 5382 5383 ret = btrfs_truncate_inode_items(trans, root, &control); 5384 trans->block_rsv = &fs_info->trans_block_rsv; 5385 btrfs_end_transaction(trans); 5386 /* 5387 * We have not added new delayed items for our inode after we 5388 * have flushed its delayed items, so no need to throttle on 5389 * delayed items. However we have modified extent buffers. 5390 */ 5391 btrfs_btree_balance_dirty_nodelay(fs_info); 5392 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5393 goto out; 5394 else if (!ret) 5395 break; 5396 } 5397 5398 /* 5399 * Errors here aren't a big deal, it just means we leave orphan items in 5400 * the tree. They will be cleaned up on the next mount. If the inode 5401 * number gets reused, cleanup deletes the orphan item without doing 5402 * anything, and unlink reuses the existing orphan item. 5403 * 5404 * If it turns out that we are dropping too many of these, we might want 5405 * to add a mechanism for retrying these after a commit. 5406 */ 5407 trans = evict_refill_and_join(root, rsv); 5408 if (!IS_ERR(trans)) { 5409 trans->block_rsv = rsv; 5410 btrfs_orphan_del(trans, BTRFS_I(inode)); 5411 trans->block_rsv = &fs_info->trans_block_rsv; 5412 btrfs_end_transaction(trans); 5413 } 5414 5415 out: 5416 btrfs_free_block_rsv(fs_info, rsv); 5417 /* 5418 * If we didn't successfully delete, the orphan item will still be in 5419 * the tree and we'll retry on the next mount. Again, we might also want 5420 * to retry these periodically in the future. 5421 */ 5422 btrfs_remove_delayed_node(BTRFS_I(inode)); 5423 fsverity_cleanup_inode(inode); 5424 clear_inode(inode); 5425 } 5426 5427 /* 5428 * Return the key found in the dir entry in the location pointer, fill @type 5429 * with BTRFS_FT_*, and return 0. 5430 * 5431 * If no dir entries were found, returns -ENOENT. 5432 * If found a corrupted location in dir entry, returns -EUCLEAN. 5433 */ 5434 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, 5435 struct btrfs_key *location, u8 *type) 5436 { 5437 struct btrfs_dir_item *di; 5438 struct btrfs_path *path; 5439 struct btrfs_root *root = dir->root; 5440 int ret = 0; 5441 struct fscrypt_name fname; 5442 5443 path = btrfs_alloc_path(); 5444 if (!path) 5445 return -ENOMEM; 5446 5447 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 5448 if (ret < 0) 5449 goto out; 5450 /* 5451 * fscrypt_setup_filename() should never return a positive value, but 5452 * gcc on sparc/parisc thinks it can, so assert that doesn't happen. 5453 */ 5454 ASSERT(ret == 0); 5455 5456 /* This needs to handle no-key deletions later on */ 5457 5458 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), 5459 &fname.disk_name, 0); 5460 if (IS_ERR_OR_NULL(di)) { 5461 ret = di ? PTR_ERR(di) : -ENOENT; 5462 goto out; 5463 } 5464 5465 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5466 if (location->type != BTRFS_INODE_ITEM_KEY && 5467 location->type != BTRFS_ROOT_ITEM_KEY) { 5468 ret = -EUCLEAN; 5469 btrfs_warn(root->fs_info, 5470 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5471 __func__, fname.disk_name.name, btrfs_ino(dir), 5472 location->objectid, location->type, location->offset); 5473 } 5474 if (!ret) 5475 *type = btrfs_dir_ftype(path->nodes[0], di); 5476 out: 5477 fscrypt_free_filename(&fname); 5478 btrfs_free_path(path); 5479 return ret; 5480 } 5481 5482 /* 5483 * when we hit a tree root in a directory, the btrfs part of the inode 5484 * needs to be changed to reflect the root directory of the tree root. This 5485 * is kind of like crossing a mount point. 5486 */ 5487 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5488 struct btrfs_inode *dir, 5489 struct dentry *dentry, 5490 struct btrfs_key *location, 5491 struct btrfs_root **sub_root) 5492 { 5493 struct btrfs_path *path; 5494 struct btrfs_root *new_root; 5495 struct btrfs_root_ref *ref; 5496 struct extent_buffer *leaf; 5497 struct btrfs_key key; 5498 int ret; 5499 int err = 0; 5500 struct fscrypt_name fname; 5501 5502 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); 5503 if (ret) 5504 return ret; 5505 5506 path = btrfs_alloc_path(); 5507 if (!path) { 5508 err = -ENOMEM; 5509 goto out; 5510 } 5511 5512 err = -ENOENT; 5513 key.objectid = btrfs_root_id(dir->root); 5514 key.type = BTRFS_ROOT_REF_KEY; 5515 key.offset = location->objectid; 5516 5517 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5518 if (ret) { 5519 if (ret < 0) 5520 err = ret; 5521 goto out; 5522 } 5523 5524 leaf = path->nodes[0]; 5525 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5526 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5527 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) 5528 goto out; 5529 5530 ret = memcmp_extent_buffer(leaf, fname.disk_name.name, 5531 (unsigned long)(ref + 1), fname.disk_name.len); 5532 if (ret) 5533 goto out; 5534 5535 btrfs_release_path(path); 5536 5537 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5538 if (IS_ERR(new_root)) { 5539 err = PTR_ERR(new_root); 5540 goto out; 5541 } 5542 5543 *sub_root = new_root; 5544 location->objectid = btrfs_root_dirid(&new_root->root_item); 5545 location->type = BTRFS_INODE_ITEM_KEY; 5546 location->offset = 0; 5547 err = 0; 5548 out: 5549 btrfs_free_path(path); 5550 fscrypt_free_filename(&fname); 5551 return err; 5552 } 5553 5554 5555 5556 static void btrfs_del_inode_from_root(struct btrfs_inode *inode) 5557 { 5558 struct btrfs_root *root = inode->root; 5559 struct btrfs_inode *entry; 5560 bool empty = false; 5561 5562 xa_lock(&root->inodes); 5563 entry = __xa_erase(&root->inodes, btrfs_ino(inode)); 5564 if (entry == inode) 5565 empty = xa_empty(&root->inodes); 5566 xa_unlock(&root->inodes); 5567 5568 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5569 xa_lock(&root->inodes); 5570 empty = xa_empty(&root->inodes); 5571 xa_unlock(&root->inodes); 5572 if (empty) 5573 btrfs_add_dead_root(root); 5574 } 5575 } 5576 5577 5578 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5579 { 5580 struct btrfs_iget_args *args = p; 5581 5582 btrfs_set_inode_number(BTRFS_I(inode), args->ino); 5583 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5584 5585 if (args->root && args->root == args->root->fs_info->tree_root && 5586 args->ino != BTRFS_BTREE_INODE_OBJECTID) 5587 set_bit(BTRFS_INODE_FREE_SPACE_INODE, 5588 &BTRFS_I(inode)->runtime_flags); 5589 return 0; 5590 } 5591 5592 static int btrfs_find_actor(struct inode *inode, void *opaque) 5593 { 5594 struct btrfs_iget_args *args = opaque; 5595 5596 return args->ino == btrfs_ino(BTRFS_I(inode)) && 5597 args->root == BTRFS_I(inode)->root; 5598 } 5599 5600 static struct inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root) 5601 { 5602 struct inode *inode; 5603 struct btrfs_iget_args args; 5604 unsigned long hashval = btrfs_inode_hash(ino, root); 5605 5606 args.ino = ino; 5607 args.root = root; 5608 5609 inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor, 5610 btrfs_init_locked_inode, 5611 (void *)&args); 5612 return inode; 5613 } 5614 5615 /* 5616 * Get an inode object given its inode number and corresponding root. Path is 5617 * preallocated to prevent recursing back to iget through allocator. 5618 */ 5619 struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root, 5620 struct btrfs_path *path) 5621 { 5622 struct inode *inode; 5623 int ret; 5624 5625 inode = btrfs_iget_locked(ino, root); 5626 if (!inode) 5627 return ERR_PTR(-ENOMEM); 5628 5629 if (!(inode->i_state & I_NEW)) 5630 return inode; 5631 5632 ret = btrfs_read_locked_inode(inode, path); 5633 if (ret) 5634 return ERR_PTR(ret); 5635 5636 unlock_new_inode(inode); 5637 return inode; 5638 } 5639 5640 /* 5641 * Get an inode object given its inode number and corresponding root. 5642 */ 5643 struct inode *btrfs_iget(u64 ino, struct btrfs_root *root) 5644 { 5645 struct inode *inode; 5646 struct btrfs_path *path; 5647 int ret; 5648 5649 inode = btrfs_iget_locked(ino, root); 5650 if (!inode) 5651 return ERR_PTR(-ENOMEM); 5652 5653 if (!(inode->i_state & I_NEW)) 5654 return inode; 5655 5656 path = btrfs_alloc_path(); 5657 if (!path) 5658 return ERR_PTR(-ENOMEM); 5659 5660 ret = btrfs_read_locked_inode(inode, path); 5661 btrfs_free_path(path); 5662 if (ret) 5663 return ERR_PTR(ret); 5664 5665 unlock_new_inode(inode); 5666 return inode; 5667 } 5668 5669 static struct inode *new_simple_dir(struct inode *dir, 5670 struct btrfs_key *key, 5671 struct btrfs_root *root) 5672 { 5673 struct timespec64 ts; 5674 struct inode *inode = new_inode(dir->i_sb); 5675 5676 if (!inode) 5677 return ERR_PTR(-ENOMEM); 5678 5679 BTRFS_I(inode)->root = btrfs_grab_root(root); 5680 BTRFS_I(inode)->ref_root_id = key->objectid; 5681 set_bit(BTRFS_INODE_ROOT_STUB, &BTRFS_I(inode)->runtime_flags); 5682 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5683 5684 btrfs_set_inode_number(BTRFS_I(inode), BTRFS_EMPTY_SUBVOL_DIR_OBJECTID); 5685 /* 5686 * We only need lookup, the rest is read-only and there's no inode 5687 * associated with the dentry 5688 */ 5689 inode->i_op = &simple_dir_inode_operations; 5690 inode->i_opflags &= ~IOP_XATTR; 5691 inode->i_fop = &simple_dir_operations; 5692 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5693 5694 ts = inode_set_ctime_current(inode); 5695 inode_set_mtime_to_ts(inode, ts); 5696 inode_set_atime_to_ts(inode, inode_get_atime(dir)); 5697 BTRFS_I(inode)->i_otime_sec = ts.tv_sec; 5698 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec; 5699 5700 inode->i_uid = dir->i_uid; 5701 inode->i_gid = dir->i_gid; 5702 5703 return inode; 5704 } 5705 5706 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5707 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5708 static_assert(BTRFS_FT_DIR == FT_DIR); 5709 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5710 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5711 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5712 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5713 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5714 5715 static inline u8 btrfs_inode_type(struct inode *inode) 5716 { 5717 return fs_umode_to_ftype(inode->i_mode); 5718 } 5719 5720 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5721 { 5722 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 5723 struct inode *inode; 5724 struct btrfs_root *root = BTRFS_I(dir)->root; 5725 struct btrfs_root *sub_root = root; 5726 struct btrfs_key location = { 0 }; 5727 u8 di_type = 0; 5728 int ret = 0; 5729 5730 if (dentry->d_name.len > BTRFS_NAME_LEN) 5731 return ERR_PTR(-ENAMETOOLONG); 5732 5733 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type); 5734 if (ret < 0) 5735 return ERR_PTR(ret); 5736 5737 if (location.type == BTRFS_INODE_ITEM_KEY) { 5738 inode = btrfs_iget(location.objectid, root); 5739 if (IS_ERR(inode)) 5740 return inode; 5741 5742 /* Do extra check against inode mode with di_type */ 5743 if (btrfs_inode_type(inode) != di_type) { 5744 btrfs_crit(fs_info, 5745 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5746 inode->i_mode, btrfs_inode_type(inode), 5747 di_type); 5748 iput(inode); 5749 return ERR_PTR(-EUCLEAN); 5750 } 5751 return inode; 5752 } 5753 5754 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry, 5755 &location, &sub_root); 5756 if (ret < 0) { 5757 if (ret != -ENOENT) 5758 inode = ERR_PTR(ret); 5759 else 5760 inode = new_simple_dir(dir, &location, root); 5761 } else { 5762 inode = btrfs_iget(location.objectid, sub_root); 5763 btrfs_put_root(sub_root); 5764 5765 if (IS_ERR(inode)) 5766 return inode; 5767 5768 down_read(&fs_info->cleanup_work_sem); 5769 if (!sb_rdonly(inode->i_sb)) 5770 ret = btrfs_orphan_cleanup(sub_root); 5771 up_read(&fs_info->cleanup_work_sem); 5772 if (ret) { 5773 iput(inode); 5774 inode = ERR_PTR(ret); 5775 } 5776 } 5777 5778 return inode; 5779 } 5780 5781 static int btrfs_dentry_delete(const struct dentry *dentry) 5782 { 5783 struct btrfs_root *root; 5784 struct inode *inode = d_inode(dentry); 5785 5786 if (!inode && !IS_ROOT(dentry)) 5787 inode = d_inode(dentry->d_parent); 5788 5789 if (inode) { 5790 root = BTRFS_I(inode)->root; 5791 if (btrfs_root_refs(&root->root_item) == 0) 5792 return 1; 5793 5794 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5795 return 1; 5796 } 5797 return 0; 5798 } 5799 5800 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5801 unsigned int flags) 5802 { 5803 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5804 5805 if (inode == ERR_PTR(-ENOENT)) 5806 inode = NULL; 5807 return d_splice_alias(inode, dentry); 5808 } 5809 5810 /* 5811 * Find the highest existing sequence number in a directory and then set the 5812 * in-memory index_cnt variable to the first free sequence number. 5813 */ 5814 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 5815 { 5816 struct btrfs_root *root = inode->root; 5817 struct btrfs_key key, found_key; 5818 struct btrfs_path *path; 5819 struct extent_buffer *leaf; 5820 int ret; 5821 5822 key.objectid = btrfs_ino(inode); 5823 key.type = BTRFS_DIR_INDEX_KEY; 5824 key.offset = (u64)-1; 5825 5826 path = btrfs_alloc_path(); 5827 if (!path) 5828 return -ENOMEM; 5829 5830 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5831 if (ret < 0) 5832 goto out; 5833 /* FIXME: we should be able to handle this */ 5834 if (ret == 0) 5835 goto out; 5836 ret = 0; 5837 5838 if (path->slots[0] == 0) { 5839 inode->index_cnt = BTRFS_DIR_START_INDEX; 5840 goto out; 5841 } 5842 5843 path->slots[0]--; 5844 5845 leaf = path->nodes[0]; 5846 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5847 5848 if (found_key.objectid != btrfs_ino(inode) || 5849 found_key.type != BTRFS_DIR_INDEX_KEY) { 5850 inode->index_cnt = BTRFS_DIR_START_INDEX; 5851 goto out; 5852 } 5853 5854 inode->index_cnt = found_key.offset + 1; 5855 out: 5856 btrfs_free_path(path); 5857 return ret; 5858 } 5859 5860 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) 5861 { 5862 int ret = 0; 5863 5864 btrfs_inode_lock(dir, 0); 5865 if (dir->index_cnt == (u64)-1) { 5866 ret = btrfs_inode_delayed_dir_index_count(dir); 5867 if (ret) { 5868 ret = btrfs_set_inode_index_count(dir); 5869 if (ret) 5870 goto out; 5871 } 5872 } 5873 5874 /* index_cnt is the index number of next new entry, so decrement it. */ 5875 *index = dir->index_cnt - 1; 5876 out: 5877 btrfs_inode_unlock(dir, 0); 5878 5879 return ret; 5880 } 5881 5882 /* 5883 * All this infrastructure exists because dir_emit can fault, and we are holding 5884 * the tree lock when doing readdir. For now just allocate a buffer and copy 5885 * our information into that, and then dir_emit from the buffer. This is 5886 * similar to what NFS does, only we don't keep the buffer around in pagecache 5887 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5888 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5889 * tree lock. 5890 */ 5891 static int btrfs_opendir(struct inode *inode, struct file *file) 5892 { 5893 struct btrfs_file_private *private; 5894 u64 last_index; 5895 int ret; 5896 5897 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); 5898 if (ret) 5899 return ret; 5900 5901 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5902 if (!private) 5903 return -ENOMEM; 5904 private->last_index = last_index; 5905 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5906 if (!private->filldir_buf) { 5907 kfree(private); 5908 return -ENOMEM; 5909 } 5910 file->private_data = private; 5911 return 0; 5912 } 5913 5914 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence) 5915 { 5916 struct btrfs_file_private *private = file->private_data; 5917 int ret; 5918 5919 ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)), 5920 &private->last_index); 5921 if (ret) 5922 return ret; 5923 5924 return generic_file_llseek(file, offset, whence); 5925 } 5926 5927 struct dir_entry { 5928 u64 ino; 5929 u64 offset; 5930 unsigned type; 5931 int name_len; 5932 }; 5933 5934 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5935 { 5936 while (entries--) { 5937 struct dir_entry *entry = addr; 5938 char *name = (char *)(entry + 1); 5939 5940 ctx->pos = get_unaligned(&entry->offset); 5941 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5942 get_unaligned(&entry->ino), 5943 get_unaligned(&entry->type))) 5944 return 1; 5945 addr += sizeof(struct dir_entry) + 5946 get_unaligned(&entry->name_len); 5947 ctx->pos++; 5948 } 5949 return 0; 5950 } 5951 5952 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5953 { 5954 struct inode *inode = file_inode(file); 5955 struct btrfs_root *root = BTRFS_I(inode)->root; 5956 struct btrfs_file_private *private = file->private_data; 5957 struct btrfs_dir_item *di; 5958 struct btrfs_key key; 5959 struct btrfs_key found_key; 5960 struct btrfs_path *path; 5961 void *addr; 5962 LIST_HEAD(ins_list); 5963 LIST_HEAD(del_list); 5964 int ret; 5965 char *name_ptr; 5966 int name_len; 5967 int entries = 0; 5968 int total_len = 0; 5969 bool put = false; 5970 struct btrfs_key location; 5971 5972 if (!dir_emit_dots(file, ctx)) 5973 return 0; 5974 5975 path = btrfs_alloc_path(); 5976 if (!path) 5977 return -ENOMEM; 5978 5979 addr = private->filldir_buf; 5980 path->reada = READA_FORWARD; 5981 5982 put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index, 5983 &ins_list, &del_list); 5984 5985 again: 5986 key.type = BTRFS_DIR_INDEX_KEY; 5987 key.offset = ctx->pos; 5988 key.objectid = btrfs_ino(BTRFS_I(inode)); 5989 5990 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 5991 struct dir_entry *entry; 5992 struct extent_buffer *leaf = path->nodes[0]; 5993 u8 ftype; 5994 5995 if (found_key.objectid != key.objectid) 5996 break; 5997 if (found_key.type != BTRFS_DIR_INDEX_KEY) 5998 break; 5999 if (found_key.offset < ctx->pos) 6000 continue; 6001 if (found_key.offset > private->last_index) 6002 break; 6003 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 6004 continue; 6005 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 6006 name_len = btrfs_dir_name_len(leaf, di); 6007 if ((total_len + sizeof(struct dir_entry) + name_len) >= 6008 PAGE_SIZE) { 6009 btrfs_release_path(path); 6010 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6011 if (ret) 6012 goto nopos; 6013 addr = private->filldir_buf; 6014 entries = 0; 6015 total_len = 0; 6016 goto again; 6017 } 6018 6019 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di)); 6020 entry = addr; 6021 name_ptr = (char *)(entry + 1); 6022 read_extent_buffer(leaf, name_ptr, 6023 (unsigned long)(di + 1), name_len); 6024 put_unaligned(name_len, &entry->name_len); 6025 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); 6026 btrfs_dir_item_key_to_cpu(leaf, di, &location); 6027 put_unaligned(location.objectid, &entry->ino); 6028 put_unaligned(found_key.offset, &entry->offset); 6029 entries++; 6030 addr += sizeof(struct dir_entry) + name_len; 6031 total_len += sizeof(struct dir_entry) + name_len; 6032 } 6033 /* Catch error encountered during iteration */ 6034 if (ret < 0) 6035 goto err; 6036 6037 btrfs_release_path(path); 6038 6039 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6040 if (ret) 6041 goto nopos; 6042 6043 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 6044 if (ret) 6045 goto nopos; 6046 6047 /* 6048 * Stop new entries from being returned after we return the last 6049 * entry. 6050 * 6051 * New directory entries are assigned a strictly increasing 6052 * offset. This means that new entries created during readdir 6053 * are *guaranteed* to be seen in the future by that readdir. 6054 * This has broken buggy programs which operate on names as 6055 * they're returned by readdir. Until we reuse freed offsets 6056 * we have this hack to stop new entries from being returned 6057 * under the assumption that they'll never reach this huge 6058 * offset. 6059 * 6060 * This is being careful not to overflow 32bit loff_t unless the 6061 * last entry requires it because doing so has broken 32bit apps 6062 * in the past. 6063 */ 6064 if (ctx->pos >= INT_MAX) 6065 ctx->pos = LLONG_MAX; 6066 else 6067 ctx->pos = INT_MAX; 6068 nopos: 6069 ret = 0; 6070 err: 6071 if (put) 6072 btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list); 6073 btrfs_free_path(path); 6074 return ret; 6075 } 6076 6077 /* 6078 * This is somewhat expensive, updating the tree every time the 6079 * inode changes. But, it is most likely to find the inode in cache. 6080 * FIXME, needs more benchmarking...there are no reasons other than performance 6081 * to keep or drop this code. 6082 */ 6083 static int btrfs_dirty_inode(struct btrfs_inode *inode) 6084 { 6085 struct btrfs_root *root = inode->root; 6086 struct btrfs_fs_info *fs_info = root->fs_info; 6087 struct btrfs_trans_handle *trans; 6088 int ret; 6089 6090 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) 6091 return 0; 6092 6093 trans = btrfs_join_transaction(root); 6094 if (IS_ERR(trans)) 6095 return PTR_ERR(trans); 6096 6097 ret = btrfs_update_inode(trans, inode); 6098 if (ret == -ENOSPC || ret == -EDQUOT) { 6099 /* whoops, lets try again with the full transaction */ 6100 btrfs_end_transaction(trans); 6101 trans = btrfs_start_transaction(root, 1); 6102 if (IS_ERR(trans)) 6103 return PTR_ERR(trans); 6104 6105 ret = btrfs_update_inode(trans, inode); 6106 } 6107 btrfs_end_transaction(trans); 6108 if (inode->delayed_node) 6109 btrfs_balance_delayed_items(fs_info); 6110 6111 return ret; 6112 } 6113 6114 /* 6115 * This is a copy of file_update_time. We need this so we can return error on 6116 * ENOSPC for updating the inode in the case of file write and mmap writes. 6117 */ 6118 static int btrfs_update_time(struct inode *inode, int flags) 6119 { 6120 struct btrfs_root *root = BTRFS_I(inode)->root; 6121 bool dirty; 6122 6123 if (btrfs_root_readonly(root)) 6124 return -EROFS; 6125 6126 dirty = inode_update_timestamps(inode, flags); 6127 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6128 } 6129 6130 /* 6131 * helper to find a free sequence number in a given directory. This current 6132 * code is very simple, later versions will do smarter things in the btree 6133 */ 6134 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6135 { 6136 int ret = 0; 6137 6138 if (dir->index_cnt == (u64)-1) { 6139 ret = btrfs_inode_delayed_dir_index_count(dir); 6140 if (ret) { 6141 ret = btrfs_set_inode_index_count(dir); 6142 if (ret) 6143 return ret; 6144 } 6145 } 6146 6147 *index = dir->index_cnt; 6148 dir->index_cnt++; 6149 6150 return ret; 6151 } 6152 6153 static int btrfs_insert_inode_locked(struct inode *inode) 6154 { 6155 struct btrfs_iget_args args; 6156 6157 args.ino = btrfs_ino(BTRFS_I(inode)); 6158 args.root = BTRFS_I(inode)->root; 6159 6160 return insert_inode_locked4(inode, 6161 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6162 btrfs_find_actor, &args); 6163 } 6164 6165 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6166 unsigned int *trans_num_items) 6167 { 6168 struct inode *dir = args->dir; 6169 struct inode *inode = args->inode; 6170 int ret; 6171 6172 if (!args->orphan) { 6173 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, 6174 &args->fname); 6175 if (ret) 6176 return ret; 6177 } 6178 6179 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6180 if (ret) { 6181 fscrypt_free_filename(&args->fname); 6182 return ret; 6183 } 6184 6185 /* 1 to add inode item */ 6186 *trans_num_items = 1; 6187 /* 1 to add compression property */ 6188 if (BTRFS_I(dir)->prop_compress) 6189 (*trans_num_items)++; 6190 /* 1 to add default ACL xattr */ 6191 if (args->default_acl) 6192 (*trans_num_items)++; 6193 /* 1 to add access ACL xattr */ 6194 if (args->acl) 6195 (*trans_num_items)++; 6196 #ifdef CONFIG_SECURITY 6197 /* 1 to add LSM xattr */ 6198 if (dir->i_security) 6199 (*trans_num_items)++; 6200 #endif 6201 if (args->orphan) { 6202 /* 1 to add orphan item */ 6203 (*trans_num_items)++; 6204 } else { 6205 /* 6206 * 1 to add dir item 6207 * 1 to add dir index 6208 * 1 to update parent inode item 6209 * 6210 * No need for 1 unit for the inode ref item because it is 6211 * inserted in a batch together with the inode item at 6212 * btrfs_create_new_inode(). 6213 */ 6214 *trans_num_items += 3; 6215 } 6216 return 0; 6217 } 6218 6219 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6220 { 6221 posix_acl_release(args->acl); 6222 posix_acl_release(args->default_acl); 6223 fscrypt_free_filename(&args->fname); 6224 } 6225 6226 /* 6227 * Inherit flags from the parent inode. 6228 * 6229 * Currently only the compression flags and the cow flags are inherited. 6230 */ 6231 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir) 6232 { 6233 unsigned int flags; 6234 6235 flags = dir->flags; 6236 6237 if (flags & BTRFS_INODE_NOCOMPRESS) { 6238 inode->flags &= ~BTRFS_INODE_COMPRESS; 6239 inode->flags |= BTRFS_INODE_NOCOMPRESS; 6240 } else if (flags & BTRFS_INODE_COMPRESS) { 6241 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 6242 inode->flags |= BTRFS_INODE_COMPRESS; 6243 } 6244 6245 if (flags & BTRFS_INODE_NODATACOW) { 6246 inode->flags |= BTRFS_INODE_NODATACOW; 6247 if (S_ISREG(inode->vfs_inode.i_mode)) 6248 inode->flags |= BTRFS_INODE_NODATASUM; 6249 } 6250 6251 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); 6252 } 6253 6254 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6255 struct btrfs_new_inode_args *args) 6256 { 6257 struct timespec64 ts; 6258 struct inode *dir = args->dir; 6259 struct inode *inode = args->inode; 6260 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; 6261 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 6262 struct btrfs_root *root; 6263 struct btrfs_inode_item *inode_item; 6264 struct btrfs_path *path; 6265 u64 objectid; 6266 struct btrfs_inode_ref *ref; 6267 struct btrfs_key key[2]; 6268 u32 sizes[2]; 6269 struct btrfs_item_batch batch; 6270 unsigned long ptr; 6271 int ret; 6272 bool xa_reserved = false; 6273 6274 path = btrfs_alloc_path(); 6275 if (!path) 6276 return -ENOMEM; 6277 6278 if (!args->subvol) 6279 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6280 root = BTRFS_I(inode)->root; 6281 6282 ret = btrfs_init_file_extent_tree(BTRFS_I(inode)); 6283 if (ret) 6284 goto out; 6285 6286 ret = btrfs_get_free_objectid(root, &objectid); 6287 if (ret) 6288 goto out; 6289 btrfs_set_inode_number(BTRFS_I(inode), objectid); 6290 6291 ret = xa_reserve(&root->inodes, objectid, GFP_NOFS); 6292 if (ret) 6293 goto out; 6294 xa_reserved = true; 6295 6296 if (args->orphan) { 6297 /* 6298 * O_TMPFILE, set link count to 0, so that after this point, we 6299 * fill in an inode item with the correct link count. 6300 */ 6301 set_nlink(inode, 0); 6302 } else { 6303 trace_btrfs_inode_request(dir); 6304 6305 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6306 if (ret) 6307 goto out; 6308 } 6309 6310 if (S_ISDIR(inode->i_mode)) 6311 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6312 6313 BTRFS_I(inode)->generation = trans->transid; 6314 inode->i_generation = BTRFS_I(inode)->generation; 6315 6316 /* 6317 * We don't have any capability xattrs set here yet, shortcut any 6318 * queries for the xattrs here. If we add them later via the inode 6319 * security init path or any other path this flag will be cleared. 6320 */ 6321 set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags); 6322 6323 /* 6324 * Subvolumes don't inherit flags from their parent directory. 6325 * Originally this was probably by accident, but we probably can't 6326 * change it now without compatibility issues. 6327 */ 6328 if (!args->subvol) 6329 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir)); 6330 6331 if (S_ISREG(inode->i_mode)) { 6332 if (btrfs_test_opt(fs_info, NODATASUM)) 6333 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6334 if (btrfs_test_opt(fs_info, NODATACOW)) 6335 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6336 BTRFS_INODE_NODATASUM; 6337 } 6338 6339 ret = btrfs_insert_inode_locked(inode); 6340 if (ret < 0) { 6341 if (!args->orphan) 6342 BTRFS_I(dir)->index_cnt--; 6343 goto out; 6344 } 6345 6346 /* 6347 * We could have gotten an inode number from somebody who was fsynced 6348 * and then removed in this same transaction, so let's just set full 6349 * sync since it will be a full sync anyway and this will blow away the 6350 * old info in the log. 6351 */ 6352 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6353 6354 key[0].objectid = objectid; 6355 key[0].type = BTRFS_INODE_ITEM_KEY; 6356 key[0].offset = 0; 6357 6358 sizes[0] = sizeof(struct btrfs_inode_item); 6359 6360 if (!args->orphan) { 6361 /* 6362 * Start new inodes with an inode_ref. This is slightly more 6363 * efficient for small numbers of hard links since they will 6364 * be packed into one item. Extended refs will kick in if we 6365 * add more hard links than can fit in the ref item. 6366 */ 6367 key[1].objectid = objectid; 6368 key[1].type = BTRFS_INODE_REF_KEY; 6369 if (args->subvol) { 6370 key[1].offset = objectid; 6371 sizes[1] = 2 + sizeof(*ref); 6372 } else { 6373 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6374 sizes[1] = name->len + sizeof(*ref); 6375 } 6376 } 6377 6378 batch.keys = &key[0]; 6379 batch.data_sizes = &sizes[0]; 6380 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6381 batch.nr = args->orphan ? 1 : 2; 6382 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6383 if (ret != 0) { 6384 btrfs_abort_transaction(trans, ret); 6385 goto discard; 6386 } 6387 6388 ts = simple_inode_init_ts(inode); 6389 BTRFS_I(inode)->i_otime_sec = ts.tv_sec; 6390 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec; 6391 6392 /* 6393 * We're going to fill the inode item now, so at this point the inode 6394 * must be fully initialized. 6395 */ 6396 6397 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6398 struct btrfs_inode_item); 6399 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6400 sizeof(*inode_item)); 6401 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6402 6403 if (!args->orphan) { 6404 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6405 struct btrfs_inode_ref); 6406 ptr = (unsigned long)(ref + 1); 6407 if (args->subvol) { 6408 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6409 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6410 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6411 } else { 6412 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 6413 name->len); 6414 btrfs_set_inode_ref_index(path->nodes[0], ref, 6415 BTRFS_I(inode)->dir_index); 6416 write_extent_buffer(path->nodes[0], name->name, ptr, 6417 name->len); 6418 } 6419 } 6420 6421 /* 6422 * We don't need the path anymore, plus inheriting properties, adding 6423 * ACLs, security xattrs, orphan item or adding the link, will result in 6424 * allocating yet another path. So just free our path. 6425 */ 6426 btrfs_free_path(path); 6427 path = NULL; 6428 6429 if (args->subvol) { 6430 struct inode *parent; 6431 6432 /* 6433 * Subvolumes inherit properties from their parent subvolume, 6434 * not the directory they were created in. 6435 */ 6436 parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root); 6437 if (IS_ERR(parent)) { 6438 ret = PTR_ERR(parent); 6439 } else { 6440 ret = btrfs_inode_inherit_props(trans, inode, parent); 6441 iput(parent); 6442 } 6443 } else { 6444 ret = btrfs_inode_inherit_props(trans, inode, dir); 6445 } 6446 if (ret) { 6447 btrfs_err(fs_info, 6448 "error inheriting props for ino %llu (root %llu): %d", 6449 btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret); 6450 } 6451 6452 /* 6453 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6454 * probably a bug. 6455 */ 6456 if (!args->subvol) { 6457 ret = btrfs_init_inode_security(trans, args); 6458 if (ret) { 6459 btrfs_abort_transaction(trans, ret); 6460 goto discard; 6461 } 6462 } 6463 6464 ret = btrfs_add_inode_to_root(BTRFS_I(inode), false); 6465 if (WARN_ON(ret)) { 6466 /* Shouldn't happen, we used xa_reserve() before. */ 6467 btrfs_abort_transaction(trans, ret); 6468 goto discard; 6469 } 6470 6471 trace_btrfs_inode_new(inode); 6472 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6473 6474 btrfs_update_root_times(trans, root); 6475 6476 if (args->orphan) { 6477 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6478 } else { 6479 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6480 0, BTRFS_I(inode)->dir_index); 6481 } 6482 if (ret) { 6483 btrfs_abort_transaction(trans, ret); 6484 goto discard; 6485 } 6486 6487 return 0; 6488 6489 discard: 6490 /* 6491 * discard_new_inode() calls iput(), but the caller owns the reference 6492 * to the inode. 6493 */ 6494 ihold(inode); 6495 discard_new_inode(inode); 6496 out: 6497 if (xa_reserved) 6498 xa_release(&root->inodes, objectid); 6499 6500 btrfs_free_path(path); 6501 return ret; 6502 } 6503 6504 /* 6505 * utility function to add 'inode' into 'parent_inode' with 6506 * a give name and a given sequence number. 6507 * if 'add_backref' is true, also insert a backref from the 6508 * inode to the parent directory. 6509 */ 6510 int btrfs_add_link(struct btrfs_trans_handle *trans, 6511 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6512 const struct fscrypt_str *name, int add_backref, u64 index) 6513 { 6514 int ret = 0; 6515 struct btrfs_key key; 6516 struct btrfs_root *root = parent_inode->root; 6517 u64 ino = btrfs_ino(inode); 6518 u64 parent_ino = btrfs_ino(parent_inode); 6519 6520 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6521 memcpy(&key, &inode->root->root_key, sizeof(key)); 6522 } else { 6523 key.objectid = ino; 6524 key.type = BTRFS_INODE_ITEM_KEY; 6525 key.offset = 0; 6526 } 6527 6528 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6529 ret = btrfs_add_root_ref(trans, key.objectid, 6530 btrfs_root_id(root), parent_ino, 6531 index, name); 6532 } else if (add_backref) { 6533 ret = btrfs_insert_inode_ref(trans, root, name, 6534 ino, parent_ino, index); 6535 } 6536 6537 /* Nothing to clean up yet */ 6538 if (ret) 6539 return ret; 6540 6541 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, 6542 btrfs_inode_type(&inode->vfs_inode), index); 6543 if (ret == -EEXIST || ret == -EOVERFLOW) 6544 goto fail_dir_item; 6545 else if (ret) { 6546 btrfs_abort_transaction(trans, ret); 6547 return ret; 6548 } 6549 6550 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6551 name->len * 2); 6552 inode_inc_iversion(&parent_inode->vfs_inode); 6553 /* 6554 * If we are replaying a log tree, we do not want to update the mtime 6555 * and ctime of the parent directory with the current time, since the 6556 * log replay procedure is responsible for setting them to their correct 6557 * values (the ones it had when the fsync was done). 6558 */ 6559 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) 6560 inode_set_mtime_to_ts(&parent_inode->vfs_inode, 6561 inode_set_ctime_current(&parent_inode->vfs_inode)); 6562 6563 ret = btrfs_update_inode(trans, parent_inode); 6564 if (ret) 6565 btrfs_abort_transaction(trans, ret); 6566 return ret; 6567 6568 fail_dir_item: 6569 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6570 u64 local_index; 6571 int err; 6572 err = btrfs_del_root_ref(trans, key.objectid, 6573 btrfs_root_id(root), parent_ino, 6574 &local_index, name); 6575 if (err) 6576 btrfs_abort_transaction(trans, err); 6577 } else if (add_backref) { 6578 u64 local_index; 6579 int err; 6580 6581 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, 6582 &local_index); 6583 if (err) 6584 btrfs_abort_transaction(trans, err); 6585 } 6586 6587 /* Return the original error code */ 6588 return ret; 6589 } 6590 6591 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6592 struct inode *inode) 6593 { 6594 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 6595 struct btrfs_root *root = BTRFS_I(dir)->root; 6596 struct btrfs_new_inode_args new_inode_args = { 6597 .dir = dir, 6598 .dentry = dentry, 6599 .inode = inode, 6600 }; 6601 unsigned int trans_num_items; 6602 struct btrfs_trans_handle *trans; 6603 int err; 6604 6605 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6606 if (err) 6607 goto out_inode; 6608 6609 trans = btrfs_start_transaction(root, trans_num_items); 6610 if (IS_ERR(trans)) { 6611 err = PTR_ERR(trans); 6612 goto out_new_inode_args; 6613 } 6614 6615 err = btrfs_create_new_inode(trans, &new_inode_args); 6616 if (!err) 6617 d_instantiate_new(dentry, inode); 6618 6619 btrfs_end_transaction(trans); 6620 btrfs_btree_balance_dirty(fs_info); 6621 out_new_inode_args: 6622 btrfs_new_inode_args_destroy(&new_inode_args); 6623 out_inode: 6624 if (err) 6625 iput(inode); 6626 return err; 6627 } 6628 6629 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir, 6630 struct dentry *dentry, umode_t mode, dev_t rdev) 6631 { 6632 struct inode *inode; 6633 6634 inode = new_inode(dir->i_sb); 6635 if (!inode) 6636 return -ENOMEM; 6637 inode_init_owner(idmap, inode, dir, mode); 6638 inode->i_op = &btrfs_special_inode_operations; 6639 init_special_inode(inode, inode->i_mode, rdev); 6640 return btrfs_create_common(dir, dentry, inode); 6641 } 6642 6643 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir, 6644 struct dentry *dentry, umode_t mode, bool excl) 6645 { 6646 struct inode *inode; 6647 6648 inode = new_inode(dir->i_sb); 6649 if (!inode) 6650 return -ENOMEM; 6651 inode_init_owner(idmap, inode, dir, mode); 6652 inode->i_fop = &btrfs_file_operations; 6653 inode->i_op = &btrfs_file_inode_operations; 6654 inode->i_mapping->a_ops = &btrfs_aops; 6655 return btrfs_create_common(dir, dentry, inode); 6656 } 6657 6658 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6659 struct dentry *dentry) 6660 { 6661 struct btrfs_trans_handle *trans = NULL; 6662 struct btrfs_root *root = BTRFS_I(dir)->root; 6663 struct inode *inode = d_inode(old_dentry); 6664 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 6665 struct fscrypt_name fname; 6666 u64 index; 6667 int err; 6668 int drop_inode = 0; 6669 6670 /* do not allow sys_link's with other subvols of the same device */ 6671 if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root)) 6672 return -EXDEV; 6673 6674 if (inode->i_nlink >= BTRFS_LINK_MAX) 6675 return -EMLINK; 6676 6677 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); 6678 if (err) 6679 goto fail; 6680 6681 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6682 if (err) 6683 goto fail; 6684 6685 /* 6686 * 2 items for inode and inode ref 6687 * 2 items for dir items 6688 * 1 item for parent inode 6689 * 1 item for orphan item deletion if O_TMPFILE 6690 */ 6691 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6692 if (IS_ERR(trans)) { 6693 err = PTR_ERR(trans); 6694 trans = NULL; 6695 goto fail; 6696 } 6697 6698 /* There are several dir indexes for this inode, clear the cache. */ 6699 BTRFS_I(inode)->dir_index = 0ULL; 6700 inc_nlink(inode); 6701 inode_inc_iversion(inode); 6702 inode_set_ctime_current(inode); 6703 ihold(inode); 6704 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6705 6706 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6707 &fname.disk_name, 1, index); 6708 6709 if (err) { 6710 drop_inode = 1; 6711 } else { 6712 struct dentry *parent = dentry->d_parent; 6713 6714 err = btrfs_update_inode(trans, BTRFS_I(inode)); 6715 if (err) 6716 goto fail; 6717 if (inode->i_nlink == 1) { 6718 /* 6719 * If new hard link count is 1, it's a file created 6720 * with open(2) O_TMPFILE flag. 6721 */ 6722 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6723 if (err) 6724 goto fail; 6725 } 6726 d_instantiate(dentry, inode); 6727 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent); 6728 } 6729 6730 fail: 6731 fscrypt_free_filename(&fname); 6732 if (trans) 6733 btrfs_end_transaction(trans); 6734 if (drop_inode) { 6735 inode_dec_link_count(inode); 6736 iput(inode); 6737 } 6738 btrfs_btree_balance_dirty(fs_info); 6739 return err; 6740 } 6741 6742 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 6743 struct dentry *dentry, umode_t mode) 6744 { 6745 struct inode *inode; 6746 6747 inode = new_inode(dir->i_sb); 6748 if (!inode) 6749 return -ENOMEM; 6750 inode_init_owner(idmap, inode, dir, S_IFDIR | mode); 6751 inode->i_op = &btrfs_dir_inode_operations; 6752 inode->i_fop = &btrfs_dir_file_operations; 6753 return btrfs_create_common(dir, dentry, inode); 6754 } 6755 6756 static noinline int uncompress_inline(struct btrfs_path *path, 6757 struct folio *folio, 6758 struct btrfs_file_extent_item *item) 6759 { 6760 int ret; 6761 struct extent_buffer *leaf = path->nodes[0]; 6762 char *tmp; 6763 size_t max_size; 6764 unsigned long inline_size; 6765 unsigned long ptr; 6766 int compress_type; 6767 6768 compress_type = btrfs_file_extent_compression(leaf, item); 6769 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6770 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 6771 tmp = kmalloc(inline_size, GFP_NOFS); 6772 if (!tmp) 6773 return -ENOMEM; 6774 ptr = btrfs_file_extent_inline_start(item); 6775 6776 read_extent_buffer(leaf, tmp, ptr, inline_size); 6777 6778 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6779 ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size, 6780 max_size); 6781 6782 /* 6783 * decompression code contains a memset to fill in any space between the end 6784 * of the uncompressed data and the end of max_size in case the decompressed 6785 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6786 * the end of an inline extent and the beginning of the next block, so we 6787 * cover that region here. 6788 */ 6789 6790 if (max_size < PAGE_SIZE) 6791 folio_zero_range(folio, max_size, PAGE_SIZE - max_size); 6792 kfree(tmp); 6793 return ret; 6794 } 6795 6796 static int read_inline_extent(struct btrfs_path *path, struct folio *folio) 6797 { 6798 struct btrfs_file_extent_item *fi; 6799 void *kaddr; 6800 size_t copy_size; 6801 6802 if (!folio || folio_test_uptodate(folio)) 6803 return 0; 6804 6805 ASSERT(folio_pos(folio) == 0); 6806 6807 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 6808 struct btrfs_file_extent_item); 6809 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) 6810 return uncompress_inline(path, folio, fi); 6811 6812 copy_size = min_t(u64, PAGE_SIZE, 6813 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); 6814 kaddr = kmap_local_folio(folio, 0); 6815 read_extent_buffer(path->nodes[0], kaddr, 6816 btrfs_file_extent_inline_start(fi), copy_size); 6817 kunmap_local(kaddr); 6818 if (copy_size < PAGE_SIZE) 6819 folio_zero_range(folio, copy_size, PAGE_SIZE - copy_size); 6820 return 0; 6821 } 6822 6823 /* 6824 * Lookup the first extent overlapping a range in a file. 6825 * 6826 * @inode: file to search in 6827 * @page: page to read extent data into if the extent is inline 6828 * @start: file offset 6829 * @len: length of range starting at @start 6830 * 6831 * Return the first &struct extent_map which overlaps the given range, reading 6832 * it from the B-tree and caching it if necessary. Note that there may be more 6833 * extents which overlap the given range after the returned extent_map. 6834 * 6835 * If @page is not NULL and the extent is inline, this also reads the extent 6836 * data directly into the page and marks the extent up to date in the io_tree. 6837 * 6838 * Return: ERR_PTR on error, non-NULL extent_map on success. 6839 */ 6840 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6841 struct folio *folio, u64 start, u64 len) 6842 { 6843 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6844 int ret = 0; 6845 u64 extent_start = 0; 6846 u64 extent_end = 0; 6847 u64 objectid = btrfs_ino(inode); 6848 int extent_type = -1; 6849 struct btrfs_path *path = NULL; 6850 struct btrfs_root *root = inode->root; 6851 struct btrfs_file_extent_item *item; 6852 struct extent_buffer *leaf; 6853 struct btrfs_key found_key; 6854 struct extent_map *em = NULL; 6855 struct extent_map_tree *em_tree = &inode->extent_tree; 6856 6857 read_lock(&em_tree->lock); 6858 em = lookup_extent_mapping(em_tree, start, len); 6859 read_unlock(&em_tree->lock); 6860 6861 if (em) { 6862 if (em->start > start || em->start + em->len <= start) 6863 free_extent_map(em); 6864 else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio) 6865 free_extent_map(em); 6866 else 6867 goto out; 6868 } 6869 em = alloc_extent_map(); 6870 if (!em) { 6871 ret = -ENOMEM; 6872 goto out; 6873 } 6874 em->start = EXTENT_MAP_HOLE; 6875 em->disk_bytenr = EXTENT_MAP_HOLE; 6876 em->len = (u64)-1; 6877 6878 path = btrfs_alloc_path(); 6879 if (!path) { 6880 ret = -ENOMEM; 6881 goto out; 6882 } 6883 6884 /* Chances are we'll be called again, so go ahead and do readahead */ 6885 path->reada = READA_FORWARD; 6886 6887 /* 6888 * The same explanation in load_free_space_cache applies here as well, 6889 * we only read when we're loading the free space cache, and at that 6890 * point the commit_root has everything we need. 6891 */ 6892 if (btrfs_is_free_space_inode(inode)) { 6893 path->search_commit_root = 1; 6894 path->skip_locking = 1; 6895 } 6896 6897 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6898 if (ret < 0) { 6899 goto out; 6900 } else if (ret > 0) { 6901 if (path->slots[0] == 0) 6902 goto not_found; 6903 path->slots[0]--; 6904 ret = 0; 6905 } 6906 6907 leaf = path->nodes[0]; 6908 item = btrfs_item_ptr(leaf, path->slots[0], 6909 struct btrfs_file_extent_item); 6910 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6911 if (found_key.objectid != objectid || 6912 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6913 /* 6914 * If we backup past the first extent we want to move forward 6915 * and see if there is an extent in front of us, otherwise we'll 6916 * say there is a hole for our whole search range which can 6917 * cause problems. 6918 */ 6919 extent_end = start; 6920 goto next; 6921 } 6922 6923 extent_type = btrfs_file_extent_type(leaf, item); 6924 extent_start = found_key.offset; 6925 extent_end = btrfs_file_extent_end(path); 6926 if (extent_type == BTRFS_FILE_EXTENT_REG || 6927 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6928 /* Only regular file could have regular/prealloc extent */ 6929 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6930 ret = -EUCLEAN; 6931 btrfs_crit(fs_info, 6932 "regular/prealloc extent found for non-regular inode %llu", 6933 btrfs_ino(inode)); 6934 goto out; 6935 } 6936 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6937 extent_start); 6938 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6939 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 6940 path->slots[0], 6941 extent_start); 6942 } 6943 next: 6944 if (start >= extent_end) { 6945 path->slots[0]++; 6946 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6947 ret = btrfs_next_leaf(root, path); 6948 if (ret < 0) 6949 goto out; 6950 else if (ret > 0) 6951 goto not_found; 6952 6953 leaf = path->nodes[0]; 6954 } 6955 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6956 if (found_key.objectid != objectid || 6957 found_key.type != BTRFS_EXTENT_DATA_KEY) 6958 goto not_found; 6959 if (start + len <= found_key.offset) 6960 goto not_found; 6961 if (start > found_key.offset) 6962 goto next; 6963 6964 /* New extent overlaps with existing one */ 6965 em->start = start; 6966 em->len = found_key.offset - start; 6967 em->disk_bytenr = EXTENT_MAP_HOLE; 6968 goto insert; 6969 } 6970 6971 btrfs_extent_item_to_extent_map(inode, path, item, em); 6972 6973 if (extent_type == BTRFS_FILE_EXTENT_REG || 6974 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6975 goto insert; 6976 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6977 /* 6978 * Inline extent can only exist at file offset 0. This is 6979 * ensured by tree-checker and inline extent creation path. 6980 * Thus all members representing file offsets should be zero. 6981 */ 6982 ASSERT(extent_start == 0); 6983 ASSERT(em->start == 0); 6984 6985 /* 6986 * btrfs_extent_item_to_extent_map() should have properly 6987 * initialized em members already. 6988 * 6989 * Other members are not utilized for inline extents. 6990 */ 6991 ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE); 6992 ASSERT(em->len == fs_info->sectorsize); 6993 6994 ret = read_inline_extent(path, folio); 6995 if (ret < 0) 6996 goto out; 6997 goto insert; 6998 } 6999 not_found: 7000 em->start = start; 7001 em->len = len; 7002 em->disk_bytenr = EXTENT_MAP_HOLE; 7003 insert: 7004 ret = 0; 7005 btrfs_release_path(path); 7006 if (em->start > start || extent_map_end(em) <= start) { 7007 btrfs_err(fs_info, 7008 "bad extent! em: [%llu %llu] passed [%llu %llu]", 7009 em->start, em->len, start, len); 7010 ret = -EIO; 7011 goto out; 7012 } 7013 7014 write_lock(&em_tree->lock); 7015 ret = btrfs_add_extent_mapping(inode, &em, start, len); 7016 write_unlock(&em_tree->lock); 7017 out: 7018 btrfs_free_path(path); 7019 7020 trace_btrfs_get_extent(root, inode, em); 7021 7022 if (ret) { 7023 free_extent_map(em); 7024 return ERR_PTR(ret); 7025 } 7026 return em; 7027 } 7028 7029 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 7030 { 7031 struct btrfs_block_group *block_group; 7032 bool readonly = false; 7033 7034 block_group = btrfs_lookup_block_group(fs_info, bytenr); 7035 if (!block_group || block_group->ro) 7036 readonly = true; 7037 if (block_group) 7038 btrfs_put_block_group(block_group); 7039 return readonly; 7040 } 7041 7042 /* 7043 * Check if we can do nocow write into the range [@offset, @offset + @len) 7044 * 7045 * @offset: File offset 7046 * @len: The length to write, will be updated to the nocow writeable 7047 * range 7048 * @orig_start: (optional) Return the original file offset of the file extent 7049 * @orig_len: (optional) Return the original on-disk length of the file extent 7050 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7051 * 7052 * Return: 7053 * >0 and update @len if we can do nocow write 7054 * 0 if we can't do nocow write 7055 * <0 if error happened 7056 * 7057 * NOTE: This only checks the file extents, caller is responsible to wait for 7058 * any ordered extents. 7059 */ 7060 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7061 struct btrfs_file_extent *file_extent, 7062 bool nowait) 7063 { 7064 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 7065 struct can_nocow_file_extent_args nocow_args = { 0 }; 7066 struct btrfs_path *path; 7067 int ret; 7068 struct extent_buffer *leaf; 7069 struct btrfs_root *root = BTRFS_I(inode)->root; 7070 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7071 struct btrfs_file_extent_item *fi; 7072 struct btrfs_key key; 7073 int found_type; 7074 7075 path = btrfs_alloc_path(); 7076 if (!path) 7077 return -ENOMEM; 7078 path->nowait = nowait; 7079 7080 ret = btrfs_lookup_file_extent(NULL, root, path, 7081 btrfs_ino(BTRFS_I(inode)), offset, 0); 7082 if (ret < 0) 7083 goto out; 7084 7085 if (ret == 1) { 7086 if (path->slots[0] == 0) { 7087 /* can't find the item, must cow */ 7088 ret = 0; 7089 goto out; 7090 } 7091 path->slots[0]--; 7092 } 7093 ret = 0; 7094 leaf = path->nodes[0]; 7095 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7096 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7097 key.type != BTRFS_EXTENT_DATA_KEY) { 7098 /* not our file or wrong item type, must cow */ 7099 goto out; 7100 } 7101 7102 if (key.offset > offset) { 7103 /* Wrong offset, must cow */ 7104 goto out; 7105 } 7106 7107 if (btrfs_file_extent_end(path) <= offset) 7108 goto out; 7109 7110 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7111 found_type = btrfs_file_extent_type(leaf, fi); 7112 7113 nocow_args.start = offset; 7114 nocow_args.end = offset + *len - 1; 7115 nocow_args.free_path = true; 7116 7117 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args); 7118 /* can_nocow_file_extent() has freed the path. */ 7119 path = NULL; 7120 7121 if (ret != 1) { 7122 /* Treat errors as not being able to NOCOW. */ 7123 ret = 0; 7124 goto out; 7125 } 7126 7127 ret = 0; 7128 if (btrfs_extent_readonly(fs_info, 7129 nocow_args.file_extent.disk_bytenr + 7130 nocow_args.file_extent.offset)) 7131 goto out; 7132 7133 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7134 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7135 u64 range_end; 7136 7137 range_end = round_up(offset + nocow_args.file_extent.num_bytes, 7138 root->fs_info->sectorsize) - 1; 7139 ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC); 7140 if (ret) { 7141 ret = -EAGAIN; 7142 goto out; 7143 } 7144 } 7145 7146 if (file_extent) 7147 memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent)); 7148 7149 *len = nocow_args.file_extent.num_bytes; 7150 ret = 1; 7151 out: 7152 btrfs_free_path(path); 7153 return ret; 7154 } 7155 7156 /* The callers of this must take lock_extent() */ 7157 struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start, 7158 const struct btrfs_file_extent *file_extent, 7159 int type) 7160 { 7161 struct extent_map *em; 7162 int ret; 7163 7164 /* 7165 * Note the missing NOCOW type. 7166 * 7167 * For pure NOCOW writes, we should not create an io extent map, but 7168 * just reusing the existing one. 7169 * Only PREALLOC writes (NOCOW write into preallocated range) can 7170 * create an io extent map. 7171 */ 7172 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7173 type == BTRFS_ORDERED_COMPRESSED || 7174 type == BTRFS_ORDERED_REGULAR); 7175 7176 switch (type) { 7177 case BTRFS_ORDERED_PREALLOC: 7178 /* We're only referring part of a larger preallocated extent. */ 7179 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes); 7180 break; 7181 case BTRFS_ORDERED_REGULAR: 7182 /* COW results a new extent matching our file extent size. */ 7183 ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes); 7184 ASSERT(file_extent->ram_bytes == file_extent->num_bytes); 7185 7186 /* Since it's a new extent, we should not have any offset. */ 7187 ASSERT(file_extent->offset == 0); 7188 break; 7189 case BTRFS_ORDERED_COMPRESSED: 7190 /* Must be compressed. */ 7191 ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE); 7192 7193 /* 7194 * Encoded write can make us to refer to part of the 7195 * uncompressed extent. 7196 */ 7197 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes); 7198 break; 7199 } 7200 7201 em = alloc_extent_map(); 7202 if (!em) 7203 return ERR_PTR(-ENOMEM); 7204 7205 em->start = start; 7206 em->len = file_extent->num_bytes; 7207 em->disk_bytenr = file_extent->disk_bytenr; 7208 em->disk_num_bytes = file_extent->disk_num_bytes; 7209 em->ram_bytes = file_extent->ram_bytes; 7210 em->generation = -1; 7211 em->offset = file_extent->offset; 7212 em->flags |= EXTENT_FLAG_PINNED; 7213 if (type == BTRFS_ORDERED_COMPRESSED) 7214 extent_map_set_compression(em, file_extent->compression); 7215 7216 ret = btrfs_replace_extent_map_range(inode, em, true); 7217 if (ret) { 7218 free_extent_map(em); 7219 return ERR_PTR(ret); 7220 } 7221 7222 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7223 return em; 7224 } 7225 7226 /* 7227 * For release_folio() and invalidate_folio() we have a race window where 7228 * folio_end_writeback() is called but the subpage spinlock is not yet released. 7229 * If we continue to release/invalidate the page, we could cause use-after-free 7230 * for subpage spinlock. So this function is to spin and wait for subpage 7231 * spinlock. 7232 */ 7233 static void wait_subpage_spinlock(struct folio *folio) 7234 { 7235 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); 7236 struct btrfs_subpage *subpage; 7237 7238 if (!btrfs_is_subpage(fs_info, folio->mapping)) 7239 return; 7240 7241 ASSERT(folio_test_private(folio) && folio_get_private(folio)); 7242 subpage = folio_get_private(folio); 7243 7244 /* 7245 * This may look insane as we just acquire the spinlock and release it, 7246 * without doing anything. But we just want to make sure no one is 7247 * still holding the subpage spinlock. 7248 * And since the page is not dirty nor writeback, and we have page 7249 * locked, the only possible way to hold a spinlock is from the endio 7250 * function to clear page writeback. 7251 * 7252 * Here we just acquire the spinlock so that all existing callers 7253 * should exit and we're safe to release/invalidate the page. 7254 */ 7255 spin_lock_irq(&subpage->lock); 7256 spin_unlock_irq(&subpage->lock); 7257 } 7258 7259 static int btrfs_launder_folio(struct folio *folio) 7260 { 7261 return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio), 7262 PAGE_SIZE, NULL); 7263 } 7264 7265 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7266 { 7267 if (try_release_extent_mapping(folio, gfp_flags)) { 7268 wait_subpage_spinlock(folio); 7269 clear_folio_extent_mapped(folio); 7270 return true; 7271 } 7272 return false; 7273 } 7274 7275 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7276 { 7277 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 7278 return false; 7279 return __btrfs_release_folio(folio, gfp_flags); 7280 } 7281 7282 #ifdef CONFIG_MIGRATION 7283 static int btrfs_migrate_folio(struct address_space *mapping, 7284 struct folio *dst, struct folio *src, 7285 enum migrate_mode mode) 7286 { 7287 int ret = filemap_migrate_folio(mapping, dst, src, mode); 7288 7289 if (ret != MIGRATEPAGE_SUCCESS) 7290 return ret; 7291 7292 if (folio_test_ordered(src)) { 7293 folio_clear_ordered(src); 7294 folio_set_ordered(dst); 7295 } 7296 7297 return MIGRATEPAGE_SUCCESS; 7298 } 7299 #else 7300 #define btrfs_migrate_folio NULL 7301 #endif 7302 7303 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 7304 size_t length) 7305 { 7306 struct btrfs_inode *inode = folio_to_inode(folio); 7307 struct btrfs_fs_info *fs_info = inode->root->fs_info; 7308 struct extent_io_tree *tree = &inode->io_tree; 7309 struct extent_state *cached_state = NULL; 7310 u64 page_start = folio_pos(folio); 7311 u64 page_end = page_start + folio_size(folio) - 1; 7312 u64 cur; 7313 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 7314 7315 /* 7316 * We have folio locked so no new ordered extent can be created on this 7317 * page, nor bio can be submitted for this folio. 7318 * 7319 * But already submitted bio can still be finished on this folio. 7320 * Furthermore, endio function won't skip folio which has Ordered 7321 * already cleared, so it's possible for endio and 7322 * invalidate_folio to do the same ordered extent accounting twice 7323 * on one folio. 7324 * 7325 * So here we wait for any submitted bios to finish, so that we won't 7326 * do double ordered extent accounting on the same folio. 7327 */ 7328 folio_wait_writeback(folio); 7329 wait_subpage_spinlock(folio); 7330 7331 /* 7332 * For subpage case, we have call sites like 7333 * btrfs_punch_hole_lock_range() which passes range not aligned to 7334 * sectorsize. 7335 * If the range doesn't cover the full folio, we don't need to and 7336 * shouldn't clear page extent mapped, as folio->private can still 7337 * record subpage dirty bits for other part of the range. 7338 * 7339 * For cases that invalidate the full folio even the range doesn't 7340 * cover the full folio, like invalidating the last folio, we're 7341 * still safe to wait for ordered extent to finish. 7342 */ 7343 if (!(offset == 0 && length == folio_size(folio))) { 7344 btrfs_release_folio(folio, GFP_NOFS); 7345 return; 7346 } 7347 7348 if (!inode_evicting) 7349 lock_extent(tree, page_start, page_end, &cached_state); 7350 7351 cur = page_start; 7352 while (cur < page_end) { 7353 struct btrfs_ordered_extent *ordered; 7354 u64 range_end; 7355 u32 range_len; 7356 u32 extra_flags = 0; 7357 7358 ordered = btrfs_lookup_first_ordered_range(inode, cur, 7359 page_end + 1 - cur); 7360 if (!ordered) { 7361 range_end = page_end; 7362 /* 7363 * No ordered extent covering this range, we are safe 7364 * to delete all extent states in the range. 7365 */ 7366 extra_flags = EXTENT_CLEAR_ALL_BITS; 7367 goto next; 7368 } 7369 if (ordered->file_offset > cur) { 7370 /* 7371 * There is a range between [cur, oe->file_offset) not 7372 * covered by any ordered extent. 7373 * We are safe to delete all extent states, and handle 7374 * the ordered extent in the next iteration. 7375 */ 7376 range_end = ordered->file_offset - 1; 7377 extra_flags = EXTENT_CLEAR_ALL_BITS; 7378 goto next; 7379 } 7380 7381 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 7382 page_end); 7383 ASSERT(range_end + 1 - cur < U32_MAX); 7384 range_len = range_end + 1 - cur; 7385 if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) { 7386 /* 7387 * If Ordered is cleared, it means endio has 7388 * already been executed for the range. 7389 * We can't delete the extent states as 7390 * btrfs_finish_ordered_io() may still use some of them. 7391 */ 7392 goto next; 7393 } 7394 btrfs_folio_clear_ordered(fs_info, folio, cur, range_len); 7395 7396 /* 7397 * IO on this page will never be started, so we need to account 7398 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 7399 * here, must leave that up for the ordered extent completion. 7400 * 7401 * This will also unlock the range for incoming 7402 * btrfs_finish_ordered_io(). 7403 */ 7404 if (!inode_evicting) 7405 clear_extent_bit(tree, cur, range_end, 7406 EXTENT_DELALLOC | 7407 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 7408 EXTENT_DEFRAG, &cached_state); 7409 7410 spin_lock_irq(&inode->ordered_tree_lock); 7411 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 7412 ordered->truncated_len = min(ordered->truncated_len, 7413 cur - ordered->file_offset); 7414 spin_unlock_irq(&inode->ordered_tree_lock); 7415 7416 /* 7417 * If the ordered extent has finished, we're safe to delete all 7418 * the extent states of the range, otherwise 7419 * btrfs_finish_ordered_io() will get executed by endio for 7420 * other pages, so we can't delete extent states. 7421 */ 7422 if (btrfs_dec_test_ordered_pending(inode, &ordered, 7423 cur, range_end + 1 - cur)) { 7424 btrfs_finish_ordered_io(ordered); 7425 /* 7426 * The ordered extent has finished, now we're again 7427 * safe to delete all extent states of the range. 7428 */ 7429 extra_flags = EXTENT_CLEAR_ALL_BITS; 7430 } 7431 next: 7432 if (ordered) 7433 btrfs_put_ordered_extent(ordered); 7434 /* 7435 * Qgroup reserved space handler 7436 * Sector(s) here will be either: 7437 * 7438 * 1) Already written to disk or bio already finished 7439 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 7440 * Qgroup will be handled by its qgroup_record then. 7441 * btrfs_qgroup_free_data() call will do nothing here. 7442 * 7443 * 2) Not written to disk yet 7444 * Then btrfs_qgroup_free_data() call will clear the 7445 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 7446 * reserved data space. 7447 * Since the IO will never happen for this page. 7448 */ 7449 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL); 7450 if (!inode_evicting) { 7451 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 7452 EXTENT_DELALLOC | EXTENT_UPTODATE | 7453 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG | 7454 extra_flags, &cached_state); 7455 } 7456 cur = range_end + 1; 7457 } 7458 /* 7459 * We have iterated through all ordered extents of the page, the page 7460 * should not have Ordered anymore, or the above iteration 7461 * did something wrong. 7462 */ 7463 ASSERT(!folio_test_ordered(folio)); 7464 btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio)); 7465 if (!inode_evicting) 7466 __btrfs_release_folio(folio, GFP_NOFS); 7467 clear_folio_extent_mapped(folio); 7468 } 7469 7470 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) 7471 { 7472 struct btrfs_truncate_control control = { 7473 .inode = inode, 7474 .ino = btrfs_ino(inode), 7475 .min_type = BTRFS_EXTENT_DATA_KEY, 7476 .clear_extent_range = true, 7477 }; 7478 struct btrfs_root *root = inode->root; 7479 struct btrfs_fs_info *fs_info = root->fs_info; 7480 struct btrfs_block_rsv *rsv; 7481 int ret; 7482 struct btrfs_trans_handle *trans; 7483 u64 mask = fs_info->sectorsize - 1; 7484 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 7485 7486 if (!skip_writeback) { 7487 ret = btrfs_wait_ordered_range(inode, 7488 inode->vfs_inode.i_size & (~mask), 7489 (u64)-1); 7490 if (ret) 7491 return ret; 7492 } 7493 7494 /* 7495 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 7496 * things going on here: 7497 * 7498 * 1) We need to reserve space to update our inode. 7499 * 7500 * 2) We need to have something to cache all the space that is going to 7501 * be free'd up by the truncate operation, but also have some slack 7502 * space reserved in case it uses space during the truncate (thank you 7503 * very much snapshotting). 7504 * 7505 * And we need these to be separate. The fact is we can use a lot of 7506 * space doing the truncate, and we have no earthly idea how much space 7507 * we will use, so we need the truncate reservation to be separate so it 7508 * doesn't end up using space reserved for updating the inode. We also 7509 * need to be able to stop the transaction and start a new one, which 7510 * means we need to be able to update the inode several times, and we 7511 * have no idea of knowing how many times that will be, so we can't just 7512 * reserve 1 item for the entirety of the operation, so that has to be 7513 * done separately as well. 7514 * 7515 * So that leaves us with 7516 * 7517 * 1) rsv - for the truncate reservation, which we will steal from the 7518 * transaction reservation. 7519 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 7520 * updating the inode. 7521 */ 7522 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 7523 if (!rsv) 7524 return -ENOMEM; 7525 rsv->size = min_size; 7526 rsv->failfast = true; 7527 7528 /* 7529 * 1 for the truncate slack space 7530 * 1 for updating the inode. 7531 */ 7532 trans = btrfs_start_transaction(root, 2); 7533 if (IS_ERR(trans)) { 7534 ret = PTR_ERR(trans); 7535 goto out; 7536 } 7537 7538 /* Migrate the slack space for the truncate to our reserve */ 7539 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 7540 min_size, false); 7541 /* 7542 * We have reserved 2 metadata units when we started the transaction and 7543 * min_size matches 1 unit, so this should never fail, but if it does, 7544 * it's not critical we just fail truncation. 7545 */ 7546 if (WARN_ON(ret)) { 7547 btrfs_end_transaction(trans); 7548 goto out; 7549 } 7550 7551 trans->block_rsv = rsv; 7552 7553 while (1) { 7554 struct extent_state *cached_state = NULL; 7555 const u64 new_size = inode->vfs_inode.i_size; 7556 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 7557 7558 control.new_size = new_size; 7559 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 7560 /* 7561 * We want to drop from the next block forward in case this new 7562 * size is not block aligned since we will be keeping the last 7563 * block of the extent just the way it is. 7564 */ 7565 btrfs_drop_extent_map_range(inode, 7566 ALIGN(new_size, fs_info->sectorsize), 7567 (u64)-1, false); 7568 7569 ret = btrfs_truncate_inode_items(trans, root, &control); 7570 7571 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 7572 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 7573 7574 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 7575 7576 trans->block_rsv = &fs_info->trans_block_rsv; 7577 if (ret != -ENOSPC && ret != -EAGAIN) 7578 break; 7579 7580 ret = btrfs_update_inode(trans, inode); 7581 if (ret) 7582 break; 7583 7584 btrfs_end_transaction(trans); 7585 btrfs_btree_balance_dirty(fs_info); 7586 7587 trans = btrfs_start_transaction(root, 2); 7588 if (IS_ERR(trans)) { 7589 ret = PTR_ERR(trans); 7590 trans = NULL; 7591 break; 7592 } 7593 7594 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 7595 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 7596 rsv, min_size, false); 7597 /* 7598 * We have reserved 2 metadata units when we started the 7599 * transaction and min_size matches 1 unit, so this should never 7600 * fail, but if it does, it's not critical we just fail truncation. 7601 */ 7602 if (WARN_ON(ret)) 7603 break; 7604 7605 trans->block_rsv = rsv; 7606 } 7607 7608 /* 7609 * We can't call btrfs_truncate_block inside a trans handle as we could 7610 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 7611 * know we've truncated everything except the last little bit, and can 7612 * do btrfs_truncate_block and then update the disk_i_size. 7613 */ 7614 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 7615 btrfs_end_transaction(trans); 7616 btrfs_btree_balance_dirty(fs_info); 7617 7618 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); 7619 if (ret) 7620 goto out; 7621 trans = btrfs_start_transaction(root, 1); 7622 if (IS_ERR(trans)) { 7623 ret = PTR_ERR(trans); 7624 goto out; 7625 } 7626 btrfs_inode_safe_disk_i_size_write(inode, 0); 7627 } 7628 7629 if (trans) { 7630 int ret2; 7631 7632 trans->block_rsv = &fs_info->trans_block_rsv; 7633 ret2 = btrfs_update_inode(trans, inode); 7634 if (ret2 && !ret) 7635 ret = ret2; 7636 7637 ret2 = btrfs_end_transaction(trans); 7638 if (ret2 && !ret) 7639 ret = ret2; 7640 btrfs_btree_balance_dirty(fs_info); 7641 } 7642 out: 7643 btrfs_free_block_rsv(fs_info, rsv); 7644 /* 7645 * So if we truncate and then write and fsync we normally would just 7646 * write the extents that changed, which is a problem if we need to 7647 * first truncate that entire inode. So set this flag so we write out 7648 * all of the extents in the inode to the sync log so we're completely 7649 * safe. 7650 * 7651 * If no extents were dropped or trimmed we don't need to force the next 7652 * fsync to truncate all the inode's items from the log and re-log them 7653 * all. This means the truncate operation did not change the file size, 7654 * or changed it to a smaller size but there was only an implicit hole 7655 * between the old i_size and the new i_size, and there were no prealloc 7656 * extents beyond i_size to drop. 7657 */ 7658 if (control.extents_found > 0) 7659 btrfs_set_inode_full_sync(inode); 7660 7661 return ret; 7662 } 7663 7664 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap, 7665 struct inode *dir) 7666 { 7667 struct inode *inode; 7668 7669 inode = new_inode(dir->i_sb); 7670 if (inode) { 7671 /* 7672 * Subvolumes don't inherit the sgid bit or the parent's gid if 7673 * the parent's sgid bit is set. This is probably a bug. 7674 */ 7675 inode_init_owner(idmap, inode, NULL, 7676 S_IFDIR | (~current_umask() & S_IRWXUGO)); 7677 inode->i_op = &btrfs_dir_inode_operations; 7678 inode->i_fop = &btrfs_dir_file_operations; 7679 } 7680 return inode; 7681 } 7682 7683 struct inode *btrfs_alloc_inode(struct super_block *sb) 7684 { 7685 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 7686 struct btrfs_inode *ei; 7687 struct inode *inode; 7688 7689 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 7690 if (!ei) 7691 return NULL; 7692 7693 ei->root = NULL; 7694 ei->generation = 0; 7695 ei->last_trans = 0; 7696 ei->last_sub_trans = 0; 7697 ei->logged_trans = 0; 7698 ei->delalloc_bytes = 0; 7699 ei->new_delalloc_bytes = 0; 7700 ei->defrag_bytes = 0; 7701 ei->disk_i_size = 0; 7702 ei->flags = 0; 7703 ei->ro_flags = 0; 7704 /* 7705 * ->index_cnt will be properly initialized later when creating a new 7706 * inode (btrfs_create_new_inode()) or when reading an existing inode 7707 * from disk (btrfs_read_locked_inode()). 7708 */ 7709 ei->csum_bytes = 0; 7710 ei->dir_index = 0; 7711 ei->last_unlink_trans = 0; 7712 ei->last_reflink_trans = 0; 7713 ei->last_log_commit = 0; 7714 7715 spin_lock_init(&ei->lock); 7716 ei->outstanding_extents = 0; 7717 if (sb->s_magic != BTRFS_TEST_MAGIC) 7718 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 7719 BTRFS_BLOCK_RSV_DELALLOC); 7720 ei->runtime_flags = 0; 7721 ei->prop_compress = BTRFS_COMPRESS_NONE; 7722 ei->defrag_compress = BTRFS_COMPRESS_NONE; 7723 7724 ei->delayed_node = NULL; 7725 7726 ei->i_otime_sec = 0; 7727 ei->i_otime_nsec = 0; 7728 7729 inode = &ei->vfs_inode; 7730 extent_map_tree_init(&ei->extent_tree); 7731 7732 /* This io tree sets the valid inode. */ 7733 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); 7734 ei->io_tree.inode = ei; 7735 7736 ei->file_extent_tree = NULL; 7737 7738 mutex_init(&ei->log_mutex); 7739 spin_lock_init(&ei->ordered_tree_lock); 7740 ei->ordered_tree = RB_ROOT; 7741 ei->ordered_tree_last = NULL; 7742 INIT_LIST_HEAD(&ei->delalloc_inodes); 7743 INIT_LIST_HEAD(&ei->delayed_iput); 7744 init_rwsem(&ei->i_mmap_lock); 7745 7746 return inode; 7747 } 7748 7749 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 7750 void btrfs_test_destroy_inode(struct inode *inode) 7751 { 7752 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 7753 kfree(BTRFS_I(inode)->file_extent_tree); 7754 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 7755 } 7756 #endif 7757 7758 void btrfs_free_inode(struct inode *inode) 7759 { 7760 kfree(BTRFS_I(inode)->file_extent_tree); 7761 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 7762 } 7763 7764 void btrfs_destroy_inode(struct inode *vfs_inode) 7765 { 7766 struct btrfs_ordered_extent *ordered; 7767 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 7768 struct btrfs_root *root = inode->root; 7769 bool freespace_inode; 7770 7771 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 7772 WARN_ON(vfs_inode->i_data.nrpages); 7773 WARN_ON(inode->block_rsv.reserved); 7774 WARN_ON(inode->block_rsv.size); 7775 WARN_ON(inode->outstanding_extents); 7776 if (!S_ISDIR(vfs_inode->i_mode)) { 7777 WARN_ON(inode->delalloc_bytes); 7778 WARN_ON(inode->new_delalloc_bytes); 7779 WARN_ON(inode->csum_bytes); 7780 } 7781 if (!root || !btrfs_is_data_reloc_root(root)) 7782 WARN_ON(inode->defrag_bytes); 7783 7784 /* 7785 * This can happen where we create an inode, but somebody else also 7786 * created the same inode and we need to destroy the one we already 7787 * created. 7788 */ 7789 if (!root) 7790 return; 7791 7792 /* 7793 * If this is a free space inode do not take the ordered extents lockdep 7794 * map. 7795 */ 7796 freespace_inode = btrfs_is_free_space_inode(inode); 7797 7798 while (1) { 7799 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 7800 if (!ordered) 7801 break; 7802 else { 7803 btrfs_err(root->fs_info, 7804 "found ordered extent %llu %llu on inode cleanup", 7805 ordered->file_offset, ordered->num_bytes); 7806 7807 if (!freespace_inode) 7808 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); 7809 7810 btrfs_remove_ordered_extent(inode, ordered); 7811 btrfs_put_ordered_extent(ordered); 7812 btrfs_put_ordered_extent(ordered); 7813 } 7814 } 7815 btrfs_qgroup_check_reserved_leak(inode); 7816 btrfs_del_inode_from_root(inode); 7817 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 7818 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 7819 btrfs_put_root(inode->root); 7820 } 7821 7822 int btrfs_drop_inode(struct inode *inode) 7823 { 7824 struct btrfs_root *root = BTRFS_I(inode)->root; 7825 7826 if (root == NULL) 7827 return 1; 7828 7829 /* the snap/subvol tree is on deleting */ 7830 if (btrfs_root_refs(&root->root_item) == 0) 7831 return 1; 7832 else 7833 return generic_drop_inode(inode); 7834 } 7835 7836 static void init_once(void *foo) 7837 { 7838 struct btrfs_inode *ei = foo; 7839 7840 inode_init_once(&ei->vfs_inode); 7841 } 7842 7843 void __cold btrfs_destroy_cachep(void) 7844 { 7845 /* 7846 * Make sure all delayed rcu free inodes are flushed before we 7847 * destroy cache. 7848 */ 7849 rcu_barrier(); 7850 kmem_cache_destroy(btrfs_inode_cachep); 7851 } 7852 7853 int __init btrfs_init_cachep(void) 7854 { 7855 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 7856 sizeof(struct btrfs_inode), 0, 7857 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, 7858 init_once); 7859 if (!btrfs_inode_cachep) 7860 return -ENOMEM; 7861 7862 return 0; 7863 } 7864 7865 static int btrfs_getattr(struct mnt_idmap *idmap, 7866 const struct path *path, struct kstat *stat, 7867 u32 request_mask, unsigned int flags) 7868 { 7869 u64 delalloc_bytes; 7870 u64 inode_bytes; 7871 struct inode *inode = d_inode(path->dentry); 7872 u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize; 7873 u32 bi_flags = BTRFS_I(inode)->flags; 7874 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 7875 7876 stat->result_mask |= STATX_BTIME; 7877 stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec; 7878 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec; 7879 if (bi_flags & BTRFS_INODE_APPEND) 7880 stat->attributes |= STATX_ATTR_APPEND; 7881 if (bi_flags & BTRFS_INODE_COMPRESS) 7882 stat->attributes |= STATX_ATTR_COMPRESSED; 7883 if (bi_flags & BTRFS_INODE_IMMUTABLE) 7884 stat->attributes |= STATX_ATTR_IMMUTABLE; 7885 if (bi_flags & BTRFS_INODE_NODUMP) 7886 stat->attributes |= STATX_ATTR_NODUMP; 7887 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 7888 stat->attributes |= STATX_ATTR_VERITY; 7889 7890 stat->attributes_mask |= (STATX_ATTR_APPEND | 7891 STATX_ATTR_COMPRESSED | 7892 STATX_ATTR_IMMUTABLE | 7893 STATX_ATTR_NODUMP); 7894 7895 generic_fillattr(idmap, request_mask, inode, stat); 7896 stat->dev = BTRFS_I(inode)->root->anon_dev; 7897 7898 stat->subvol = BTRFS_I(inode)->root->root_key.objectid; 7899 stat->result_mask |= STATX_SUBVOL; 7900 7901 spin_lock(&BTRFS_I(inode)->lock); 7902 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 7903 inode_bytes = inode_get_bytes(inode); 7904 spin_unlock(&BTRFS_I(inode)->lock); 7905 stat->blocks = (ALIGN(inode_bytes, blocksize) + 7906 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT; 7907 return 0; 7908 } 7909 7910 static int btrfs_rename_exchange(struct inode *old_dir, 7911 struct dentry *old_dentry, 7912 struct inode *new_dir, 7913 struct dentry *new_dentry) 7914 { 7915 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); 7916 struct btrfs_trans_handle *trans; 7917 unsigned int trans_num_items; 7918 struct btrfs_root *root = BTRFS_I(old_dir)->root; 7919 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 7920 struct inode *new_inode = new_dentry->d_inode; 7921 struct inode *old_inode = old_dentry->d_inode; 7922 struct btrfs_rename_ctx old_rename_ctx; 7923 struct btrfs_rename_ctx new_rename_ctx; 7924 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 7925 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 7926 u64 old_idx = 0; 7927 u64 new_idx = 0; 7928 int ret; 7929 int ret2; 7930 bool need_abort = false; 7931 struct fscrypt_name old_fname, new_fname; 7932 struct fscrypt_str *old_name, *new_name; 7933 7934 /* 7935 * For non-subvolumes allow exchange only within one subvolume, in the 7936 * same inode namespace. Two subvolumes (represented as directory) can 7937 * be exchanged as they're a logical link and have a fixed inode number. 7938 */ 7939 if (root != dest && 7940 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 7941 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 7942 return -EXDEV; 7943 7944 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 7945 if (ret) 7946 return ret; 7947 7948 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 7949 if (ret) { 7950 fscrypt_free_filename(&old_fname); 7951 return ret; 7952 } 7953 7954 old_name = &old_fname.disk_name; 7955 new_name = &new_fname.disk_name; 7956 7957 /* close the race window with snapshot create/destroy ioctl */ 7958 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 7959 new_ino == BTRFS_FIRST_FREE_OBJECTID) 7960 down_read(&fs_info->subvol_sem); 7961 7962 /* 7963 * For each inode: 7964 * 1 to remove old dir item 7965 * 1 to remove old dir index 7966 * 1 to add new dir item 7967 * 1 to add new dir index 7968 * 1 to update parent inode 7969 * 7970 * If the parents are the same, we only need to account for one 7971 */ 7972 trans_num_items = (old_dir == new_dir ? 9 : 10); 7973 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 7974 /* 7975 * 1 to remove old root ref 7976 * 1 to remove old root backref 7977 * 1 to add new root ref 7978 * 1 to add new root backref 7979 */ 7980 trans_num_items += 4; 7981 } else { 7982 /* 7983 * 1 to update inode item 7984 * 1 to remove old inode ref 7985 * 1 to add new inode ref 7986 */ 7987 trans_num_items += 3; 7988 } 7989 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 7990 trans_num_items += 4; 7991 else 7992 trans_num_items += 3; 7993 trans = btrfs_start_transaction(root, trans_num_items); 7994 if (IS_ERR(trans)) { 7995 ret = PTR_ERR(trans); 7996 goto out_notrans; 7997 } 7998 7999 if (dest != root) { 8000 ret = btrfs_record_root_in_trans(trans, dest); 8001 if (ret) 8002 goto out_fail; 8003 } 8004 8005 /* 8006 * We need to find a free sequence number both in the source and 8007 * in the destination directory for the exchange. 8008 */ 8009 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 8010 if (ret) 8011 goto out_fail; 8012 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 8013 if (ret) 8014 goto out_fail; 8015 8016 BTRFS_I(old_inode)->dir_index = 0ULL; 8017 BTRFS_I(new_inode)->dir_index = 0ULL; 8018 8019 /* Reference for the source. */ 8020 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8021 /* force full log commit if subvolume involved. */ 8022 btrfs_set_log_full_commit(trans); 8023 } else { 8024 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, 8025 btrfs_ino(BTRFS_I(new_dir)), 8026 old_idx); 8027 if (ret) 8028 goto out_fail; 8029 need_abort = true; 8030 } 8031 8032 /* And now for the dest. */ 8033 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8034 /* force full log commit if subvolume involved. */ 8035 btrfs_set_log_full_commit(trans); 8036 } else { 8037 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, 8038 btrfs_ino(BTRFS_I(old_dir)), 8039 new_idx); 8040 if (ret) { 8041 if (need_abort) 8042 btrfs_abort_transaction(trans, ret); 8043 goto out_fail; 8044 } 8045 } 8046 8047 /* Update inode version and ctime/mtime. */ 8048 inode_inc_iversion(old_dir); 8049 inode_inc_iversion(new_dir); 8050 inode_inc_iversion(old_inode); 8051 inode_inc_iversion(new_inode); 8052 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 8053 8054 if (old_dentry->d_parent != new_dentry->d_parent) { 8055 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8056 BTRFS_I(old_inode), true); 8057 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 8058 BTRFS_I(new_inode), true); 8059 } 8060 8061 /* src is a subvolume */ 8062 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8063 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8064 if (ret) { 8065 btrfs_abort_transaction(trans, ret); 8066 goto out_fail; 8067 } 8068 } else { /* src is an inode */ 8069 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8070 BTRFS_I(old_dentry->d_inode), 8071 old_name, &old_rename_ctx); 8072 if (ret) { 8073 btrfs_abort_transaction(trans, ret); 8074 goto out_fail; 8075 } 8076 ret = btrfs_update_inode(trans, BTRFS_I(old_inode)); 8077 if (ret) { 8078 btrfs_abort_transaction(trans, ret); 8079 goto out_fail; 8080 } 8081 } 8082 8083 /* dest is a subvolume */ 8084 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8085 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8086 if (ret) { 8087 btrfs_abort_transaction(trans, ret); 8088 goto out_fail; 8089 } 8090 } else { /* dest is an inode */ 8091 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8092 BTRFS_I(new_dentry->d_inode), 8093 new_name, &new_rename_ctx); 8094 if (ret) { 8095 btrfs_abort_transaction(trans, ret); 8096 goto out_fail; 8097 } 8098 ret = btrfs_update_inode(trans, BTRFS_I(new_inode)); 8099 if (ret) { 8100 btrfs_abort_transaction(trans, ret); 8101 goto out_fail; 8102 } 8103 } 8104 8105 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8106 new_name, 0, old_idx); 8107 if (ret) { 8108 btrfs_abort_transaction(trans, ret); 8109 goto out_fail; 8110 } 8111 8112 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 8113 old_name, 0, new_idx); 8114 if (ret) { 8115 btrfs_abort_transaction(trans, ret); 8116 goto out_fail; 8117 } 8118 8119 if (old_inode->i_nlink == 1) 8120 BTRFS_I(old_inode)->dir_index = old_idx; 8121 if (new_inode->i_nlink == 1) 8122 BTRFS_I(new_inode)->dir_index = new_idx; 8123 8124 /* 8125 * Now pin the logs of the roots. We do it to ensure that no other task 8126 * can sync the logs while we are in progress with the rename, because 8127 * that could result in an inconsistency in case any of the inodes that 8128 * are part of this rename operation were logged before. 8129 */ 8130 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8131 btrfs_pin_log_trans(root); 8132 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8133 btrfs_pin_log_trans(dest); 8134 8135 /* Do the log updates for all inodes. */ 8136 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8137 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8138 old_rename_ctx.index, new_dentry->d_parent); 8139 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8140 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 8141 new_rename_ctx.index, old_dentry->d_parent); 8142 8143 /* Now unpin the logs. */ 8144 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8145 btrfs_end_log_trans(root); 8146 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8147 btrfs_end_log_trans(dest); 8148 out_fail: 8149 ret2 = btrfs_end_transaction(trans); 8150 ret = ret ? ret : ret2; 8151 out_notrans: 8152 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 8153 old_ino == BTRFS_FIRST_FREE_OBJECTID) 8154 up_read(&fs_info->subvol_sem); 8155 8156 fscrypt_free_filename(&new_fname); 8157 fscrypt_free_filename(&old_fname); 8158 return ret; 8159 } 8160 8161 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap, 8162 struct inode *dir) 8163 { 8164 struct inode *inode; 8165 8166 inode = new_inode(dir->i_sb); 8167 if (inode) { 8168 inode_init_owner(idmap, inode, dir, 8169 S_IFCHR | WHITEOUT_MODE); 8170 inode->i_op = &btrfs_special_inode_operations; 8171 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 8172 } 8173 return inode; 8174 } 8175 8176 static int btrfs_rename(struct mnt_idmap *idmap, 8177 struct inode *old_dir, struct dentry *old_dentry, 8178 struct inode *new_dir, struct dentry *new_dentry, 8179 unsigned int flags) 8180 { 8181 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); 8182 struct btrfs_new_inode_args whiteout_args = { 8183 .dir = old_dir, 8184 .dentry = old_dentry, 8185 }; 8186 struct btrfs_trans_handle *trans; 8187 unsigned int trans_num_items; 8188 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8189 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8190 struct inode *new_inode = d_inode(new_dentry); 8191 struct inode *old_inode = d_inode(old_dentry); 8192 struct btrfs_rename_ctx rename_ctx; 8193 u64 index = 0; 8194 int ret; 8195 int ret2; 8196 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8197 struct fscrypt_name old_fname, new_fname; 8198 8199 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 8200 return -EPERM; 8201 8202 /* we only allow rename subvolume link between subvolumes */ 8203 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 8204 return -EXDEV; 8205 8206 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 8207 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 8208 return -ENOTEMPTY; 8209 8210 if (S_ISDIR(old_inode->i_mode) && new_inode && 8211 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 8212 return -ENOTEMPTY; 8213 8214 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8215 if (ret) 8216 return ret; 8217 8218 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8219 if (ret) { 8220 fscrypt_free_filename(&old_fname); 8221 return ret; 8222 } 8223 8224 /* check for collisions, even if the name isn't there */ 8225 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); 8226 if (ret) { 8227 if (ret == -EEXIST) { 8228 /* we shouldn't get 8229 * eexist without a new_inode */ 8230 if (WARN_ON(!new_inode)) { 8231 goto out_fscrypt_names; 8232 } 8233 } else { 8234 /* maybe -EOVERFLOW */ 8235 goto out_fscrypt_names; 8236 } 8237 } 8238 ret = 0; 8239 8240 /* 8241 * we're using rename to replace one file with another. Start IO on it 8242 * now so we don't add too much work to the end of the transaction 8243 */ 8244 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 8245 filemap_flush(old_inode->i_mapping); 8246 8247 if (flags & RENAME_WHITEOUT) { 8248 whiteout_args.inode = new_whiteout_inode(idmap, old_dir); 8249 if (!whiteout_args.inode) { 8250 ret = -ENOMEM; 8251 goto out_fscrypt_names; 8252 } 8253 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 8254 if (ret) 8255 goto out_whiteout_inode; 8256 } else { 8257 /* 1 to update the old parent inode. */ 8258 trans_num_items = 1; 8259 } 8260 8261 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8262 /* Close the race window with snapshot create/destroy ioctl */ 8263 down_read(&fs_info->subvol_sem); 8264 /* 8265 * 1 to remove old root ref 8266 * 1 to remove old root backref 8267 * 1 to add new root ref 8268 * 1 to add new root backref 8269 */ 8270 trans_num_items += 4; 8271 } else { 8272 /* 8273 * 1 to update inode 8274 * 1 to remove old inode ref 8275 * 1 to add new inode ref 8276 */ 8277 trans_num_items += 3; 8278 } 8279 /* 8280 * 1 to remove old dir item 8281 * 1 to remove old dir index 8282 * 1 to add new dir item 8283 * 1 to add new dir index 8284 */ 8285 trans_num_items += 4; 8286 /* 1 to update new parent inode if it's not the same as the old parent */ 8287 if (new_dir != old_dir) 8288 trans_num_items++; 8289 if (new_inode) { 8290 /* 8291 * 1 to update inode 8292 * 1 to remove inode ref 8293 * 1 to remove dir item 8294 * 1 to remove dir index 8295 * 1 to possibly add orphan item 8296 */ 8297 trans_num_items += 5; 8298 } 8299 trans = btrfs_start_transaction(root, trans_num_items); 8300 if (IS_ERR(trans)) { 8301 ret = PTR_ERR(trans); 8302 goto out_notrans; 8303 } 8304 8305 if (dest != root) { 8306 ret = btrfs_record_root_in_trans(trans, dest); 8307 if (ret) 8308 goto out_fail; 8309 } 8310 8311 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 8312 if (ret) 8313 goto out_fail; 8314 8315 BTRFS_I(old_inode)->dir_index = 0ULL; 8316 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 8317 /* force full log commit if subvolume involved. */ 8318 btrfs_set_log_full_commit(trans); 8319 } else { 8320 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, 8321 old_ino, btrfs_ino(BTRFS_I(new_dir)), 8322 index); 8323 if (ret) 8324 goto out_fail; 8325 } 8326 8327 inode_inc_iversion(old_dir); 8328 inode_inc_iversion(new_dir); 8329 inode_inc_iversion(old_inode); 8330 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 8331 8332 if (old_dentry->d_parent != new_dentry->d_parent) 8333 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8334 BTRFS_I(old_inode), true); 8335 8336 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 8337 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8338 if (ret) { 8339 btrfs_abort_transaction(trans, ret); 8340 goto out_fail; 8341 } 8342 } else { 8343 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8344 BTRFS_I(d_inode(old_dentry)), 8345 &old_fname.disk_name, &rename_ctx); 8346 if (ret) { 8347 btrfs_abort_transaction(trans, ret); 8348 goto out_fail; 8349 } 8350 ret = btrfs_update_inode(trans, BTRFS_I(old_inode)); 8351 if (ret) { 8352 btrfs_abort_transaction(trans, ret); 8353 goto out_fail; 8354 } 8355 } 8356 8357 if (new_inode) { 8358 inode_inc_iversion(new_inode); 8359 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 8360 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 8361 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8362 if (ret) { 8363 btrfs_abort_transaction(trans, ret); 8364 goto out_fail; 8365 } 8366 BUG_ON(new_inode->i_nlink == 0); 8367 } else { 8368 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8369 BTRFS_I(d_inode(new_dentry)), 8370 &new_fname.disk_name); 8371 if (ret) { 8372 btrfs_abort_transaction(trans, ret); 8373 goto out_fail; 8374 } 8375 } 8376 if (new_inode->i_nlink == 0) { 8377 ret = btrfs_orphan_add(trans, 8378 BTRFS_I(d_inode(new_dentry))); 8379 if (ret) { 8380 btrfs_abort_transaction(trans, ret); 8381 goto out_fail; 8382 } 8383 } 8384 } 8385 8386 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8387 &new_fname.disk_name, 0, index); 8388 if (ret) { 8389 btrfs_abort_transaction(trans, ret); 8390 goto out_fail; 8391 } 8392 8393 if (old_inode->i_nlink == 1) 8394 BTRFS_I(old_inode)->dir_index = index; 8395 8396 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8397 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8398 rename_ctx.index, new_dentry->d_parent); 8399 8400 if (flags & RENAME_WHITEOUT) { 8401 ret = btrfs_create_new_inode(trans, &whiteout_args); 8402 if (ret) { 8403 btrfs_abort_transaction(trans, ret); 8404 goto out_fail; 8405 } else { 8406 unlock_new_inode(whiteout_args.inode); 8407 iput(whiteout_args.inode); 8408 whiteout_args.inode = NULL; 8409 } 8410 } 8411 out_fail: 8412 ret2 = btrfs_end_transaction(trans); 8413 ret = ret ? ret : ret2; 8414 out_notrans: 8415 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 8416 up_read(&fs_info->subvol_sem); 8417 if (flags & RENAME_WHITEOUT) 8418 btrfs_new_inode_args_destroy(&whiteout_args); 8419 out_whiteout_inode: 8420 if (flags & RENAME_WHITEOUT) 8421 iput(whiteout_args.inode); 8422 out_fscrypt_names: 8423 fscrypt_free_filename(&old_fname); 8424 fscrypt_free_filename(&new_fname); 8425 return ret; 8426 } 8427 8428 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir, 8429 struct dentry *old_dentry, struct inode *new_dir, 8430 struct dentry *new_dentry, unsigned int flags) 8431 { 8432 int ret; 8433 8434 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 8435 return -EINVAL; 8436 8437 if (flags & RENAME_EXCHANGE) 8438 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir, 8439 new_dentry); 8440 else 8441 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir, 8442 new_dentry, flags); 8443 8444 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); 8445 8446 return ret; 8447 } 8448 8449 struct btrfs_delalloc_work { 8450 struct inode *inode; 8451 struct completion completion; 8452 struct list_head list; 8453 struct btrfs_work work; 8454 }; 8455 8456 static void btrfs_run_delalloc_work(struct btrfs_work *work) 8457 { 8458 struct btrfs_delalloc_work *delalloc_work; 8459 struct inode *inode; 8460 8461 delalloc_work = container_of(work, struct btrfs_delalloc_work, 8462 work); 8463 inode = delalloc_work->inode; 8464 filemap_flush(inode->i_mapping); 8465 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 8466 &BTRFS_I(inode)->runtime_flags)) 8467 filemap_flush(inode->i_mapping); 8468 8469 iput(inode); 8470 complete(&delalloc_work->completion); 8471 } 8472 8473 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 8474 { 8475 struct btrfs_delalloc_work *work; 8476 8477 work = kmalloc(sizeof(*work), GFP_NOFS); 8478 if (!work) 8479 return NULL; 8480 8481 init_completion(&work->completion); 8482 INIT_LIST_HEAD(&work->list); 8483 work->inode = inode; 8484 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL); 8485 8486 return work; 8487 } 8488 8489 /* 8490 * some fairly slow code that needs optimization. This walks the list 8491 * of all the inodes with pending delalloc and forces them to disk. 8492 */ 8493 static int start_delalloc_inodes(struct btrfs_root *root, 8494 struct writeback_control *wbc, bool snapshot, 8495 bool in_reclaim_context) 8496 { 8497 struct btrfs_inode *binode; 8498 struct inode *inode; 8499 struct btrfs_delalloc_work *work, *next; 8500 LIST_HEAD(works); 8501 LIST_HEAD(splice); 8502 int ret = 0; 8503 bool full_flush = wbc->nr_to_write == LONG_MAX; 8504 8505 mutex_lock(&root->delalloc_mutex); 8506 spin_lock(&root->delalloc_lock); 8507 list_splice_init(&root->delalloc_inodes, &splice); 8508 while (!list_empty(&splice)) { 8509 binode = list_entry(splice.next, struct btrfs_inode, 8510 delalloc_inodes); 8511 8512 list_move_tail(&binode->delalloc_inodes, 8513 &root->delalloc_inodes); 8514 8515 if (in_reclaim_context && 8516 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 8517 continue; 8518 8519 inode = igrab(&binode->vfs_inode); 8520 if (!inode) { 8521 cond_resched_lock(&root->delalloc_lock); 8522 continue; 8523 } 8524 spin_unlock(&root->delalloc_lock); 8525 8526 if (snapshot) 8527 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 8528 &binode->runtime_flags); 8529 if (full_flush) { 8530 work = btrfs_alloc_delalloc_work(inode); 8531 if (!work) { 8532 iput(inode); 8533 ret = -ENOMEM; 8534 goto out; 8535 } 8536 list_add_tail(&work->list, &works); 8537 btrfs_queue_work(root->fs_info->flush_workers, 8538 &work->work); 8539 } else { 8540 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); 8541 btrfs_add_delayed_iput(BTRFS_I(inode)); 8542 if (ret || wbc->nr_to_write <= 0) 8543 goto out; 8544 } 8545 cond_resched(); 8546 spin_lock(&root->delalloc_lock); 8547 } 8548 spin_unlock(&root->delalloc_lock); 8549 8550 out: 8551 list_for_each_entry_safe(work, next, &works, list) { 8552 list_del_init(&work->list); 8553 wait_for_completion(&work->completion); 8554 kfree(work); 8555 } 8556 8557 if (!list_empty(&splice)) { 8558 spin_lock(&root->delalloc_lock); 8559 list_splice_tail(&splice, &root->delalloc_inodes); 8560 spin_unlock(&root->delalloc_lock); 8561 } 8562 mutex_unlock(&root->delalloc_mutex); 8563 return ret; 8564 } 8565 8566 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 8567 { 8568 struct writeback_control wbc = { 8569 .nr_to_write = LONG_MAX, 8570 .sync_mode = WB_SYNC_NONE, 8571 .range_start = 0, 8572 .range_end = LLONG_MAX, 8573 }; 8574 struct btrfs_fs_info *fs_info = root->fs_info; 8575 8576 if (BTRFS_FS_ERROR(fs_info)) 8577 return -EROFS; 8578 8579 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context); 8580 } 8581 8582 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 8583 bool in_reclaim_context) 8584 { 8585 struct writeback_control wbc = { 8586 .nr_to_write = nr, 8587 .sync_mode = WB_SYNC_NONE, 8588 .range_start = 0, 8589 .range_end = LLONG_MAX, 8590 }; 8591 struct btrfs_root *root; 8592 LIST_HEAD(splice); 8593 int ret; 8594 8595 if (BTRFS_FS_ERROR(fs_info)) 8596 return -EROFS; 8597 8598 mutex_lock(&fs_info->delalloc_root_mutex); 8599 spin_lock(&fs_info->delalloc_root_lock); 8600 list_splice_init(&fs_info->delalloc_roots, &splice); 8601 while (!list_empty(&splice)) { 8602 /* 8603 * Reset nr_to_write here so we know that we're doing a full 8604 * flush. 8605 */ 8606 if (nr == LONG_MAX) 8607 wbc.nr_to_write = LONG_MAX; 8608 8609 root = list_first_entry(&splice, struct btrfs_root, 8610 delalloc_root); 8611 root = btrfs_grab_root(root); 8612 BUG_ON(!root); 8613 list_move_tail(&root->delalloc_root, 8614 &fs_info->delalloc_roots); 8615 spin_unlock(&fs_info->delalloc_root_lock); 8616 8617 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context); 8618 btrfs_put_root(root); 8619 if (ret < 0 || wbc.nr_to_write <= 0) 8620 goto out; 8621 spin_lock(&fs_info->delalloc_root_lock); 8622 } 8623 spin_unlock(&fs_info->delalloc_root_lock); 8624 8625 ret = 0; 8626 out: 8627 if (!list_empty(&splice)) { 8628 spin_lock(&fs_info->delalloc_root_lock); 8629 list_splice_tail(&splice, &fs_info->delalloc_roots); 8630 spin_unlock(&fs_info->delalloc_root_lock); 8631 } 8632 mutex_unlock(&fs_info->delalloc_root_mutex); 8633 return ret; 8634 } 8635 8636 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, 8637 struct dentry *dentry, const char *symname) 8638 { 8639 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 8640 struct btrfs_trans_handle *trans; 8641 struct btrfs_root *root = BTRFS_I(dir)->root; 8642 struct btrfs_path *path; 8643 struct btrfs_key key; 8644 struct inode *inode; 8645 struct btrfs_new_inode_args new_inode_args = { 8646 .dir = dir, 8647 .dentry = dentry, 8648 }; 8649 unsigned int trans_num_items; 8650 int err; 8651 int name_len; 8652 int datasize; 8653 unsigned long ptr; 8654 struct btrfs_file_extent_item *ei; 8655 struct extent_buffer *leaf; 8656 8657 name_len = strlen(symname); 8658 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 8659 return -ENAMETOOLONG; 8660 8661 inode = new_inode(dir->i_sb); 8662 if (!inode) 8663 return -ENOMEM; 8664 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO); 8665 inode->i_op = &btrfs_symlink_inode_operations; 8666 inode_nohighmem(inode); 8667 inode->i_mapping->a_ops = &btrfs_aops; 8668 btrfs_i_size_write(BTRFS_I(inode), name_len); 8669 inode_set_bytes(inode, name_len); 8670 8671 new_inode_args.inode = inode; 8672 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 8673 if (err) 8674 goto out_inode; 8675 /* 1 additional item for the inline extent */ 8676 trans_num_items++; 8677 8678 trans = btrfs_start_transaction(root, trans_num_items); 8679 if (IS_ERR(trans)) { 8680 err = PTR_ERR(trans); 8681 goto out_new_inode_args; 8682 } 8683 8684 err = btrfs_create_new_inode(trans, &new_inode_args); 8685 if (err) 8686 goto out; 8687 8688 path = btrfs_alloc_path(); 8689 if (!path) { 8690 err = -ENOMEM; 8691 btrfs_abort_transaction(trans, err); 8692 discard_new_inode(inode); 8693 inode = NULL; 8694 goto out; 8695 } 8696 key.objectid = btrfs_ino(BTRFS_I(inode)); 8697 key.offset = 0; 8698 key.type = BTRFS_EXTENT_DATA_KEY; 8699 datasize = btrfs_file_extent_calc_inline_size(name_len); 8700 err = btrfs_insert_empty_item(trans, root, path, &key, 8701 datasize); 8702 if (err) { 8703 btrfs_abort_transaction(trans, err); 8704 btrfs_free_path(path); 8705 discard_new_inode(inode); 8706 inode = NULL; 8707 goto out; 8708 } 8709 leaf = path->nodes[0]; 8710 ei = btrfs_item_ptr(leaf, path->slots[0], 8711 struct btrfs_file_extent_item); 8712 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 8713 btrfs_set_file_extent_type(leaf, ei, 8714 BTRFS_FILE_EXTENT_INLINE); 8715 btrfs_set_file_extent_encryption(leaf, ei, 0); 8716 btrfs_set_file_extent_compression(leaf, ei, 0); 8717 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 8718 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 8719 8720 ptr = btrfs_file_extent_inline_start(ei); 8721 write_extent_buffer(leaf, symname, ptr, name_len); 8722 btrfs_free_path(path); 8723 8724 d_instantiate_new(dentry, inode); 8725 err = 0; 8726 out: 8727 btrfs_end_transaction(trans); 8728 btrfs_btree_balance_dirty(fs_info); 8729 out_new_inode_args: 8730 btrfs_new_inode_args_destroy(&new_inode_args); 8731 out_inode: 8732 if (err) 8733 iput(inode); 8734 return err; 8735 } 8736 8737 static struct btrfs_trans_handle *insert_prealloc_file_extent( 8738 struct btrfs_trans_handle *trans_in, 8739 struct btrfs_inode *inode, 8740 struct btrfs_key *ins, 8741 u64 file_offset) 8742 { 8743 struct btrfs_file_extent_item stack_fi; 8744 struct btrfs_replace_extent_info extent_info; 8745 struct btrfs_trans_handle *trans = trans_in; 8746 struct btrfs_path *path; 8747 u64 start = ins->objectid; 8748 u64 len = ins->offset; 8749 u64 qgroup_released = 0; 8750 int ret; 8751 8752 memset(&stack_fi, 0, sizeof(stack_fi)); 8753 8754 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 8755 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 8756 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 8757 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 8758 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 8759 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 8760 /* Encryption and other encoding is reserved and all 0 */ 8761 8762 ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released); 8763 if (ret < 0) 8764 return ERR_PTR(ret); 8765 8766 if (trans) { 8767 ret = insert_reserved_file_extent(trans, inode, 8768 file_offset, &stack_fi, 8769 true, qgroup_released); 8770 if (ret) 8771 goto free_qgroup; 8772 return trans; 8773 } 8774 8775 extent_info.disk_offset = start; 8776 extent_info.disk_len = len; 8777 extent_info.data_offset = 0; 8778 extent_info.data_len = len; 8779 extent_info.file_offset = file_offset; 8780 extent_info.extent_buf = (char *)&stack_fi; 8781 extent_info.is_new_extent = true; 8782 extent_info.update_times = true; 8783 extent_info.qgroup_reserved = qgroup_released; 8784 extent_info.insertions = 0; 8785 8786 path = btrfs_alloc_path(); 8787 if (!path) { 8788 ret = -ENOMEM; 8789 goto free_qgroup; 8790 } 8791 8792 ret = btrfs_replace_file_extents(inode, path, file_offset, 8793 file_offset + len - 1, &extent_info, 8794 &trans); 8795 btrfs_free_path(path); 8796 if (ret) 8797 goto free_qgroup; 8798 return trans; 8799 8800 free_qgroup: 8801 /* 8802 * We have released qgroup data range at the beginning of the function, 8803 * and normally qgroup_released bytes will be freed when committing 8804 * transaction. 8805 * But if we error out early, we have to free what we have released 8806 * or we leak qgroup data reservation. 8807 */ 8808 btrfs_qgroup_free_refroot(inode->root->fs_info, 8809 btrfs_root_id(inode->root), qgroup_released, 8810 BTRFS_QGROUP_RSV_DATA); 8811 return ERR_PTR(ret); 8812 } 8813 8814 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 8815 u64 start, u64 num_bytes, u64 min_size, 8816 loff_t actual_len, u64 *alloc_hint, 8817 struct btrfs_trans_handle *trans) 8818 { 8819 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 8820 struct extent_map *em; 8821 struct btrfs_root *root = BTRFS_I(inode)->root; 8822 struct btrfs_key ins; 8823 u64 cur_offset = start; 8824 u64 clear_offset = start; 8825 u64 i_size; 8826 u64 cur_bytes; 8827 u64 last_alloc = (u64)-1; 8828 int ret = 0; 8829 bool own_trans = true; 8830 u64 end = start + num_bytes - 1; 8831 8832 if (trans) 8833 own_trans = false; 8834 while (num_bytes > 0) { 8835 cur_bytes = min_t(u64, num_bytes, SZ_256M); 8836 cur_bytes = max(cur_bytes, min_size); 8837 /* 8838 * If we are severely fragmented we could end up with really 8839 * small allocations, so if the allocator is returning small 8840 * chunks lets make its job easier by only searching for those 8841 * sized chunks. 8842 */ 8843 cur_bytes = min(cur_bytes, last_alloc); 8844 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 8845 min_size, 0, *alloc_hint, &ins, 1, 0); 8846 if (ret) 8847 break; 8848 8849 /* 8850 * We've reserved this space, and thus converted it from 8851 * ->bytes_may_use to ->bytes_reserved. Any error that happens 8852 * from here on out we will only need to clear our reservation 8853 * for the remaining unreserved area, so advance our 8854 * clear_offset by our extent size. 8855 */ 8856 clear_offset += ins.offset; 8857 8858 last_alloc = ins.offset; 8859 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 8860 &ins, cur_offset); 8861 /* 8862 * Now that we inserted the prealloc extent we can finally 8863 * decrement the number of reservations in the block group. 8864 * If we did it before, we could race with relocation and have 8865 * relocation miss the reserved extent, making it fail later. 8866 */ 8867 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 8868 if (IS_ERR(trans)) { 8869 ret = PTR_ERR(trans); 8870 btrfs_free_reserved_extent(fs_info, ins.objectid, 8871 ins.offset, 0); 8872 break; 8873 } 8874 8875 em = alloc_extent_map(); 8876 if (!em) { 8877 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, 8878 cur_offset + ins.offset - 1, false); 8879 btrfs_set_inode_full_sync(BTRFS_I(inode)); 8880 goto next; 8881 } 8882 8883 em->start = cur_offset; 8884 em->len = ins.offset; 8885 em->disk_bytenr = ins.objectid; 8886 em->offset = 0; 8887 em->disk_num_bytes = ins.offset; 8888 em->ram_bytes = ins.offset; 8889 em->flags |= EXTENT_FLAG_PREALLOC; 8890 em->generation = trans->transid; 8891 8892 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); 8893 free_extent_map(em); 8894 next: 8895 num_bytes -= ins.offset; 8896 cur_offset += ins.offset; 8897 *alloc_hint = ins.objectid + ins.offset; 8898 8899 inode_inc_iversion(inode); 8900 inode_set_ctime_current(inode); 8901 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 8902 if (!(mode & FALLOC_FL_KEEP_SIZE) && 8903 (actual_len > inode->i_size) && 8904 (cur_offset > inode->i_size)) { 8905 if (cur_offset > actual_len) 8906 i_size = actual_len; 8907 else 8908 i_size = cur_offset; 8909 i_size_write(inode, i_size); 8910 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 8911 } 8912 8913 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 8914 8915 if (ret) { 8916 btrfs_abort_transaction(trans, ret); 8917 if (own_trans) 8918 btrfs_end_transaction(trans); 8919 break; 8920 } 8921 8922 if (own_trans) { 8923 btrfs_end_transaction(trans); 8924 trans = NULL; 8925 } 8926 } 8927 if (clear_offset < end) 8928 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 8929 end - clear_offset + 1); 8930 return ret; 8931 } 8932 8933 int btrfs_prealloc_file_range(struct inode *inode, int mode, 8934 u64 start, u64 num_bytes, u64 min_size, 8935 loff_t actual_len, u64 *alloc_hint) 8936 { 8937 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 8938 min_size, actual_len, alloc_hint, 8939 NULL); 8940 } 8941 8942 int btrfs_prealloc_file_range_trans(struct inode *inode, 8943 struct btrfs_trans_handle *trans, int mode, 8944 u64 start, u64 num_bytes, u64 min_size, 8945 loff_t actual_len, u64 *alloc_hint) 8946 { 8947 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 8948 min_size, actual_len, alloc_hint, trans); 8949 } 8950 8951 static int btrfs_permission(struct mnt_idmap *idmap, 8952 struct inode *inode, int mask) 8953 { 8954 struct btrfs_root *root = BTRFS_I(inode)->root; 8955 umode_t mode = inode->i_mode; 8956 8957 if (mask & MAY_WRITE && 8958 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 8959 if (btrfs_root_readonly(root)) 8960 return -EROFS; 8961 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 8962 return -EACCES; 8963 } 8964 return generic_permission(idmap, inode, mask); 8965 } 8966 8967 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 8968 struct file *file, umode_t mode) 8969 { 8970 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 8971 struct btrfs_trans_handle *trans; 8972 struct btrfs_root *root = BTRFS_I(dir)->root; 8973 struct inode *inode; 8974 struct btrfs_new_inode_args new_inode_args = { 8975 .dir = dir, 8976 .dentry = file->f_path.dentry, 8977 .orphan = true, 8978 }; 8979 unsigned int trans_num_items; 8980 int ret; 8981 8982 inode = new_inode(dir->i_sb); 8983 if (!inode) 8984 return -ENOMEM; 8985 inode_init_owner(idmap, inode, dir, mode); 8986 inode->i_fop = &btrfs_file_operations; 8987 inode->i_op = &btrfs_file_inode_operations; 8988 inode->i_mapping->a_ops = &btrfs_aops; 8989 8990 new_inode_args.inode = inode; 8991 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 8992 if (ret) 8993 goto out_inode; 8994 8995 trans = btrfs_start_transaction(root, trans_num_items); 8996 if (IS_ERR(trans)) { 8997 ret = PTR_ERR(trans); 8998 goto out_new_inode_args; 8999 } 9000 9001 ret = btrfs_create_new_inode(trans, &new_inode_args); 9002 9003 /* 9004 * We set number of links to 0 in btrfs_create_new_inode(), and here we 9005 * set it to 1 because d_tmpfile() will issue a warning if the count is 9006 * 0, through: 9007 * 9008 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9009 */ 9010 set_nlink(inode, 1); 9011 9012 if (!ret) { 9013 d_tmpfile(file, inode); 9014 unlock_new_inode(inode); 9015 mark_inode_dirty(inode); 9016 } 9017 9018 btrfs_end_transaction(trans); 9019 btrfs_btree_balance_dirty(fs_info); 9020 out_new_inode_args: 9021 btrfs_new_inode_args_destroy(&new_inode_args); 9022 out_inode: 9023 if (ret) 9024 iput(inode); 9025 return finish_open_simple(file, ret); 9026 } 9027 9028 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 9029 int compress_type) 9030 { 9031 switch (compress_type) { 9032 case BTRFS_COMPRESS_NONE: 9033 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 9034 case BTRFS_COMPRESS_ZLIB: 9035 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 9036 case BTRFS_COMPRESS_LZO: 9037 /* 9038 * The LZO format depends on the sector size. 64K is the maximum 9039 * sector size that we support. 9040 */ 9041 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 9042 return -EINVAL; 9043 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 9044 (fs_info->sectorsize_bits - 12); 9045 case BTRFS_COMPRESS_ZSTD: 9046 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 9047 default: 9048 return -EUCLEAN; 9049 } 9050 } 9051 9052 static ssize_t btrfs_encoded_read_inline( 9053 struct kiocb *iocb, 9054 struct iov_iter *iter, u64 start, 9055 u64 lockend, 9056 struct extent_state **cached_state, 9057 u64 extent_start, size_t count, 9058 struct btrfs_ioctl_encoded_io_args *encoded, 9059 bool *unlocked) 9060 { 9061 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9062 struct btrfs_root *root = inode->root; 9063 struct btrfs_fs_info *fs_info = root->fs_info; 9064 struct extent_io_tree *io_tree = &inode->io_tree; 9065 struct btrfs_path *path; 9066 struct extent_buffer *leaf; 9067 struct btrfs_file_extent_item *item; 9068 u64 ram_bytes; 9069 unsigned long ptr; 9070 void *tmp; 9071 ssize_t ret; 9072 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); 9073 9074 path = btrfs_alloc_path(); 9075 if (!path) { 9076 ret = -ENOMEM; 9077 goto out; 9078 } 9079 9080 path->nowait = nowait; 9081 9082 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 9083 extent_start, 0); 9084 if (ret) { 9085 if (ret > 0) { 9086 /* The extent item disappeared? */ 9087 ret = -EIO; 9088 } 9089 goto out; 9090 } 9091 leaf = path->nodes[0]; 9092 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 9093 9094 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 9095 ptr = btrfs_file_extent_inline_start(item); 9096 9097 encoded->len = min_t(u64, extent_start + ram_bytes, 9098 inode->vfs_inode.i_size) - iocb->ki_pos; 9099 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9100 btrfs_file_extent_compression(leaf, item)); 9101 if (ret < 0) 9102 goto out; 9103 encoded->compression = ret; 9104 if (encoded->compression) { 9105 size_t inline_size; 9106 9107 inline_size = btrfs_file_extent_inline_item_len(leaf, 9108 path->slots[0]); 9109 if (inline_size > count) { 9110 ret = -ENOBUFS; 9111 goto out; 9112 } 9113 count = inline_size; 9114 encoded->unencoded_len = ram_bytes; 9115 encoded->unencoded_offset = iocb->ki_pos - extent_start; 9116 } else { 9117 count = min_t(u64, count, encoded->len); 9118 encoded->len = count; 9119 encoded->unencoded_len = count; 9120 ptr += iocb->ki_pos - extent_start; 9121 } 9122 9123 tmp = kmalloc(count, GFP_NOFS); 9124 if (!tmp) { 9125 ret = -ENOMEM; 9126 goto out; 9127 } 9128 read_extent_buffer(leaf, tmp, ptr, count); 9129 btrfs_release_path(path); 9130 unlock_extent(io_tree, start, lockend, cached_state); 9131 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9132 *unlocked = true; 9133 9134 ret = copy_to_iter(tmp, count, iter); 9135 if (ret != count) 9136 ret = -EFAULT; 9137 kfree(tmp); 9138 out: 9139 btrfs_free_path(path); 9140 return ret; 9141 } 9142 9143 struct btrfs_encoded_read_private { 9144 struct completion done; 9145 void *uring_ctx; 9146 refcount_t pending_refs; 9147 blk_status_t status; 9148 }; 9149 9150 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) 9151 { 9152 struct btrfs_encoded_read_private *priv = bbio->private; 9153 9154 if (bbio->bio.bi_status) { 9155 /* 9156 * The memory barrier implied by the atomic_dec_return() here 9157 * pairs with the memory barrier implied by the 9158 * atomic_dec_return() or io_wait_event() in 9159 * btrfs_encoded_read_regular_fill_pages() to ensure that this 9160 * write is observed before the load of status in 9161 * btrfs_encoded_read_regular_fill_pages(). 9162 */ 9163 WRITE_ONCE(priv->status, bbio->bio.bi_status); 9164 } 9165 if (refcount_dec_and_test(&priv->pending_refs)) { 9166 int err = blk_status_to_errno(READ_ONCE(priv->status)); 9167 9168 if (priv->uring_ctx) { 9169 btrfs_uring_read_extent_endio(priv->uring_ctx, err); 9170 kfree(priv); 9171 } else { 9172 complete(&priv->done); 9173 } 9174 } 9175 bio_put(&bbio->bio); 9176 } 9177 9178 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 9179 u64 disk_bytenr, u64 disk_io_size, 9180 struct page **pages, void *uring_ctx) 9181 { 9182 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9183 struct btrfs_encoded_read_private *priv; 9184 unsigned long i = 0; 9185 struct btrfs_bio *bbio; 9186 int ret; 9187 9188 priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS); 9189 if (!priv) 9190 return -ENOMEM; 9191 9192 init_completion(&priv->done); 9193 refcount_set(&priv->pending_refs, 1); 9194 priv->status = 0; 9195 priv->uring_ctx = uring_ctx; 9196 9197 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9198 btrfs_encoded_read_endio, priv); 9199 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9200 bbio->inode = inode; 9201 9202 do { 9203 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); 9204 9205 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { 9206 refcount_inc(&priv->pending_refs); 9207 btrfs_submit_bbio(bbio, 0); 9208 9209 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9210 btrfs_encoded_read_endio, priv); 9211 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9212 bbio->inode = inode; 9213 continue; 9214 } 9215 9216 i++; 9217 disk_bytenr += bytes; 9218 disk_io_size -= bytes; 9219 } while (disk_io_size); 9220 9221 refcount_inc(&priv->pending_refs); 9222 btrfs_submit_bbio(bbio, 0); 9223 9224 if (uring_ctx) { 9225 if (refcount_dec_and_test(&priv->pending_refs)) { 9226 ret = blk_status_to_errno(READ_ONCE(priv->status)); 9227 btrfs_uring_read_extent_endio(uring_ctx, ret); 9228 kfree(priv); 9229 return ret; 9230 } 9231 9232 return -EIOCBQUEUED; 9233 } else { 9234 if (!refcount_dec_and_test(&priv->pending_refs)) 9235 wait_for_completion_io(&priv->done); 9236 /* See btrfs_encoded_read_endio() for ordering. */ 9237 ret = blk_status_to_errno(READ_ONCE(priv->status)); 9238 kfree(priv); 9239 return ret; 9240 } 9241 } 9242 9243 ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter, 9244 u64 start, u64 lockend, 9245 struct extent_state **cached_state, 9246 u64 disk_bytenr, u64 disk_io_size, 9247 size_t count, bool compressed, bool *unlocked) 9248 { 9249 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9250 struct extent_io_tree *io_tree = &inode->io_tree; 9251 struct page **pages; 9252 unsigned long nr_pages, i; 9253 u64 cur; 9254 size_t page_offset; 9255 ssize_t ret; 9256 9257 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 9258 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 9259 if (!pages) 9260 return -ENOMEM; 9261 ret = btrfs_alloc_page_array(nr_pages, pages, false); 9262 if (ret) { 9263 ret = -ENOMEM; 9264 goto out; 9265 } 9266 9267 ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr, 9268 disk_io_size, pages, NULL); 9269 if (ret) 9270 goto out; 9271 9272 unlock_extent(io_tree, start, lockend, cached_state); 9273 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9274 *unlocked = true; 9275 9276 if (compressed) { 9277 i = 0; 9278 page_offset = 0; 9279 } else { 9280 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 9281 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 9282 } 9283 cur = 0; 9284 while (cur < count) { 9285 size_t bytes = min_t(size_t, count - cur, 9286 PAGE_SIZE - page_offset); 9287 9288 if (copy_page_to_iter(pages[i], page_offset, bytes, 9289 iter) != bytes) { 9290 ret = -EFAULT; 9291 goto out; 9292 } 9293 i++; 9294 cur += bytes; 9295 page_offset = 0; 9296 } 9297 ret = count; 9298 out: 9299 for (i = 0; i < nr_pages; i++) { 9300 if (pages[i]) 9301 __free_page(pages[i]); 9302 } 9303 kfree(pages); 9304 return ret; 9305 } 9306 9307 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 9308 struct btrfs_ioctl_encoded_io_args *encoded, 9309 struct extent_state **cached_state, 9310 u64 *disk_bytenr, u64 *disk_io_size) 9311 { 9312 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9313 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9314 struct extent_io_tree *io_tree = &inode->io_tree; 9315 ssize_t ret; 9316 size_t count = iov_iter_count(iter); 9317 u64 start, lockend; 9318 struct extent_map *em; 9319 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); 9320 bool unlocked = false; 9321 9322 file_accessed(iocb->ki_filp); 9323 9324 ret = btrfs_inode_lock(inode, 9325 BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0)); 9326 if (ret) 9327 return ret; 9328 9329 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 9330 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9331 return 0; 9332 } 9333 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 9334 /* 9335 * We don't know how long the extent containing iocb->ki_pos is, but if 9336 * it's compressed we know that it won't be longer than this. 9337 */ 9338 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 9339 9340 if (nowait) { 9341 struct btrfs_ordered_extent *ordered; 9342 9343 if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping, 9344 start, lockend)) { 9345 ret = -EAGAIN; 9346 goto out_unlock_inode; 9347 } 9348 9349 if (!try_lock_extent(io_tree, start, lockend, cached_state)) { 9350 ret = -EAGAIN; 9351 goto out_unlock_inode; 9352 } 9353 9354 ordered = btrfs_lookup_ordered_range(inode, start, 9355 lockend - start + 1); 9356 if (ordered) { 9357 btrfs_put_ordered_extent(ordered); 9358 unlock_extent(io_tree, start, lockend, cached_state); 9359 ret = -EAGAIN; 9360 goto out_unlock_inode; 9361 } 9362 } else { 9363 for (;;) { 9364 struct btrfs_ordered_extent *ordered; 9365 9366 ret = btrfs_wait_ordered_range(inode, start, 9367 lockend - start + 1); 9368 if (ret) 9369 goto out_unlock_inode; 9370 9371 lock_extent(io_tree, start, lockend, cached_state); 9372 ordered = btrfs_lookup_ordered_range(inode, start, 9373 lockend - start + 1); 9374 if (!ordered) 9375 break; 9376 btrfs_put_ordered_extent(ordered); 9377 unlock_extent(io_tree, start, lockend, cached_state); 9378 cond_resched(); 9379 } 9380 } 9381 9382 em = btrfs_get_extent(inode, NULL, start, lockend - start + 1); 9383 if (IS_ERR(em)) { 9384 ret = PTR_ERR(em); 9385 goto out_unlock_extent; 9386 } 9387 9388 if (em->disk_bytenr == EXTENT_MAP_INLINE) { 9389 u64 extent_start = em->start; 9390 9391 /* 9392 * For inline extents we get everything we need out of the 9393 * extent item. 9394 */ 9395 free_extent_map(em); 9396 em = NULL; 9397 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 9398 cached_state, extent_start, 9399 count, encoded, &unlocked); 9400 goto out_unlock_extent; 9401 } 9402 9403 /* 9404 * We only want to return up to EOF even if the extent extends beyond 9405 * that. 9406 */ 9407 encoded->len = min_t(u64, extent_map_end(em), 9408 inode->vfs_inode.i_size) - iocb->ki_pos; 9409 if (em->disk_bytenr == EXTENT_MAP_HOLE || 9410 (em->flags & EXTENT_FLAG_PREALLOC)) { 9411 *disk_bytenr = EXTENT_MAP_HOLE; 9412 count = min_t(u64, count, encoded->len); 9413 encoded->len = count; 9414 encoded->unencoded_len = count; 9415 } else if (extent_map_is_compressed(em)) { 9416 *disk_bytenr = em->disk_bytenr; 9417 /* 9418 * Bail if the buffer isn't large enough to return the whole 9419 * compressed extent. 9420 */ 9421 if (em->disk_num_bytes > count) { 9422 ret = -ENOBUFS; 9423 goto out_em; 9424 } 9425 *disk_io_size = em->disk_num_bytes; 9426 count = em->disk_num_bytes; 9427 encoded->unencoded_len = em->ram_bytes; 9428 encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset); 9429 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9430 extent_map_compression(em)); 9431 if (ret < 0) 9432 goto out_em; 9433 encoded->compression = ret; 9434 } else { 9435 *disk_bytenr = extent_map_block_start(em) + (start - em->start); 9436 if (encoded->len > count) 9437 encoded->len = count; 9438 /* 9439 * Don't read beyond what we locked. This also limits the page 9440 * allocations that we'll do. 9441 */ 9442 *disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 9443 count = start + *disk_io_size - iocb->ki_pos; 9444 encoded->len = count; 9445 encoded->unencoded_len = count; 9446 *disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize); 9447 } 9448 free_extent_map(em); 9449 em = NULL; 9450 9451 if (*disk_bytenr == EXTENT_MAP_HOLE) { 9452 unlock_extent(io_tree, start, lockend, cached_state); 9453 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9454 unlocked = true; 9455 ret = iov_iter_zero(count, iter); 9456 if (ret != count) 9457 ret = -EFAULT; 9458 } else { 9459 ret = -EIOCBQUEUED; 9460 goto out_unlock_extent; 9461 } 9462 9463 out_em: 9464 free_extent_map(em); 9465 out_unlock_extent: 9466 /* Leave inode and extent locked if we need to do a read. */ 9467 if (!unlocked && ret != -EIOCBQUEUED) 9468 unlock_extent(io_tree, start, lockend, cached_state); 9469 out_unlock_inode: 9470 if (!unlocked && ret != -EIOCBQUEUED) 9471 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9472 return ret; 9473 } 9474 9475 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 9476 const struct btrfs_ioctl_encoded_io_args *encoded) 9477 { 9478 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9479 struct btrfs_root *root = inode->root; 9480 struct btrfs_fs_info *fs_info = root->fs_info; 9481 struct extent_io_tree *io_tree = &inode->io_tree; 9482 struct extent_changeset *data_reserved = NULL; 9483 struct extent_state *cached_state = NULL; 9484 struct btrfs_ordered_extent *ordered; 9485 struct btrfs_file_extent file_extent; 9486 int compression; 9487 size_t orig_count; 9488 u64 start, end; 9489 u64 num_bytes, ram_bytes, disk_num_bytes; 9490 unsigned long nr_folios, i; 9491 struct folio **folios; 9492 struct btrfs_key ins; 9493 bool extent_reserved = false; 9494 struct extent_map *em; 9495 ssize_t ret; 9496 9497 switch (encoded->compression) { 9498 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 9499 compression = BTRFS_COMPRESS_ZLIB; 9500 break; 9501 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 9502 compression = BTRFS_COMPRESS_ZSTD; 9503 break; 9504 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 9505 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 9506 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 9507 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 9508 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 9509 /* The sector size must match for LZO. */ 9510 if (encoded->compression - 9511 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 9512 fs_info->sectorsize_bits) 9513 return -EINVAL; 9514 compression = BTRFS_COMPRESS_LZO; 9515 break; 9516 default: 9517 return -EINVAL; 9518 } 9519 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 9520 return -EINVAL; 9521 9522 /* 9523 * Compressed extents should always have checksums, so error out if we 9524 * have a NOCOW file or inode was created while mounted with NODATASUM. 9525 */ 9526 if (inode->flags & BTRFS_INODE_NODATASUM) 9527 return -EINVAL; 9528 9529 orig_count = iov_iter_count(from); 9530 9531 /* The extent size must be sane. */ 9532 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 9533 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 9534 return -EINVAL; 9535 9536 /* 9537 * The compressed data must be smaller than the decompressed data. 9538 * 9539 * It's of course possible for data to compress to larger or the same 9540 * size, but the buffered I/O path falls back to no compression for such 9541 * data, and we don't want to break any assumptions by creating these 9542 * extents. 9543 * 9544 * Note that this is less strict than the current check we have that the 9545 * compressed data must be at least one sector smaller than the 9546 * decompressed data. We only want to enforce the weaker requirement 9547 * from old kernels that it is at least one byte smaller. 9548 */ 9549 if (orig_count >= encoded->unencoded_len) 9550 return -EINVAL; 9551 9552 /* The extent must start on a sector boundary. */ 9553 start = iocb->ki_pos; 9554 if (!IS_ALIGNED(start, fs_info->sectorsize)) 9555 return -EINVAL; 9556 9557 /* 9558 * The extent must end on a sector boundary. However, we allow a write 9559 * which ends at or extends i_size to have an unaligned length; we round 9560 * up the extent size and set i_size to the unaligned end. 9561 */ 9562 if (start + encoded->len < inode->vfs_inode.i_size && 9563 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 9564 return -EINVAL; 9565 9566 /* Finally, the offset in the unencoded data must be sector-aligned. */ 9567 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 9568 return -EINVAL; 9569 9570 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 9571 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 9572 end = start + num_bytes - 1; 9573 9574 /* 9575 * If the extent cannot be inline, the compressed data on disk must be 9576 * sector-aligned. For convenience, we extend it with zeroes if it 9577 * isn't. 9578 */ 9579 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 9580 nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 9581 folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT); 9582 if (!folios) 9583 return -ENOMEM; 9584 for (i = 0; i < nr_folios; i++) { 9585 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 9586 char *kaddr; 9587 9588 folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0); 9589 if (!folios[i]) { 9590 ret = -ENOMEM; 9591 goto out_folios; 9592 } 9593 kaddr = kmap_local_folio(folios[i], 0); 9594 if (copy_from_iter(kaddr, bytes, from) != bytes) { 9595 kunmap_local(kaddr); 9596 ret = -EFAULT; 9597 goto out_folios; 9598 } 9599 if (bytes < PAGE_SIZE) 9600 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 9601 kunmap_local(kaddr); 9602 } 9603 9604 for (;;) { 9605 struct btrfs_ordered_extent *ordered; 9606 9607 ret = btrfs_wait_ordered_range(inode, start, num_bytes); 9608 if (ret) 9609 goto out_folios; 9610 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 9611 start >> PAGE_SHIFT, 9612 end >> PAGE_SHIFT); 9613 if (ret) 9614 goto out_folios; 9615 lock_extent(io_tree, start, end, &cached_state); 9616 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 9617 if (!ordered && 9618 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 9619 break; 9620 if (ordered) 9621 btrfs_put_ordered_extent(ordered); 9622 unlock_extent(io_tree, start, end, &cached_state); 9623 cond_resched(); 9624 } 9625 9626 /* 9627 * We don't use the higher-level delalloc space functions because our 9628 * num_bytes and disk_num_bytes are different. 9629 */ 9630 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 9631 if (ret) 9632 goto out_unlock; 9633 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 9634 if (ret) 9635 goto out_free_data_space; 9636 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 9637 false); 9638 if (ret) 9639 goto out_qgroup_free_data; 9640 9641 /* Try an inline extent first. */ 9642 if (encoded->unencoded_len == encoded->len && 9643 encoded->unencoded_offset == 0 && 9644 can_cow_file_range_inline(inode, start, encoded->len, orig_count)) { 9645 ret = __cow_file_range_inline(inode, encoded->len, 9646 orig_count, compression, folios[0], 9647 true); 9648 if (ret <= 0) { 9649 if (ret == 0) 9650 ret = orig_count; 9651 goto out_delalloc_release; 9652 } 9653 } 9654 9655 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 9656 disk_num_bytes, 0, 0, &ins, 1, 1); 9657 if (ret) 9658 goto out_delalloc_release; 9659 extent_reserved = true; 9660 9661 file_extent.disk_bytenr = ins.objectid; 9662 file_extent.disk_num_bytes = ins.offset; 9663 file_extent.num_bytes = num_bytes; 9664 file_extent.ram_bytes = ram_bytes; 9665 file_extent.offset = encoded->unencoded_offset; 9666 file_extent.compression = compression; 9667 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED); 9668 if (IS_ERR(em)) { 9669 ret = PTR_ERR(em); 9670 goto out_free_reserved; 9671 } 9672 free_extent_map(em); 9673 9674 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, 9675 (1 << BTRFS_ORDERED_ENCODED) | 9676 (1 << BTRFS_ORDERED_COMPRESSED)); 9677 if (IS_ERR(ordered)) { 9678 btrfs_drop_extent_map_range(inode, start, end, false); 9679 ret = PTR_ERR(ordered); 9680 goto out_free_reserved; 9681 } 9682 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9683 9684 if (start + encoded->len > inode->vfs_inode.i_size) 9685 i_size_write(&inode->vfs_inode, start + encoded->len); 9686 9687 unlock_extent(io_tree, start, end, &cached_state); 9688 9689 btrfs_delalloc_release_extents(inode, num_bytes); 9690 9691 btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false); 9692 ret = orig_count; 9693 goto out; 9694 9695 out_free_reserved: 9696 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9697 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 9698 out_delalloc_release: 9699 btrfs_delalloc_release_extents(inode, num_bytes); 9700 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 9701 out_qgroup_free_data: 9702 if (ret < 0) 9703 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL); 9704 out_free_data_space: 9705 /* 9706 * If btrfs_reserve_extent() succeeded, then we already decremented 9707 * bytes_may_use. 9708 */ 9709 if (!extent_reserved) 9710 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); 9711 out_unlock: 9712 unlock_extent(io_tree, start, end, &cached_state); 9713 out_folios: 9714 for (i = 0; i < nr_folios; i++) { 9715 if (folios[i]) 9716 folio_put(folios[i]); 9717 } 9718 kvfree(folios); 9719 out: 9720 if (ret >= 0) 9721 iocb->ki_pos += encoded->len; 9722 return ret; 9723 } 9724 9725 #ifdef CONFIG_SWAP 9726 /* 9727 * Add an entry indicating a block group or device which is pinned by a 9728 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 9729 * negative errno on failure. 9730 */ 9731 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 9732 bool is_block_group) 9733 { 9734 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 9735 struct btrfs_swapfile_pin *sp, *entry; 9736 struct rb_node **p; 9737 struct rb_node *parent = NULL; 9738 9739 sp = kmalloc(sizeof(*sp), GFP_NOFS); 9740 if (!sp) 9741 return -ENOMEM; 9742 sp->ptr = ptr; 9743 sp->inode = inode; 9744 sp->is_block_group = is_block_group; 9745 sp->bg_extent_count = 1; 9746 9747 spin_lock(&fs_info->swapfile_pins_lock); 9748 p = &fs_info->swapfile_pins.rb_node; 9749 while (*p) { 9750 parent = *p; 9751 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 9752 if (sp->ptr < entry->ptr || 9753 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 9754 p = &(*p)->rb_left; 9755 } else if (sp->ptr > entry->ptr || 9756 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 9757 p = &(*p)->rb_right; 9758 } else { 9759 if (is_block_group) 9760 entry->bg_extent_count++; 9761 spin_unlock(&fs_info->swapfile_pins_lock); 9762 kfree(sp); 9763 return 1; 9764 } 9765 } 9766 rb_link_node(&sp->node, parent, p); 9767 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 9768 spin_unlock(&fs_info->swapfile_pins_lock); 9769 return 0; 9770 } 9771 9772 /* Free all of the entries pinned by this swapfile. */ 9773 static void btrfs_free_swapfile_pins(struct inode *inode) 9774 { 9775 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 9776 struct btrfs_swapfile_pin *sp; 9777 struct rb_node *node, *next; 9778 9779 spin_lock(&fs_info->swapfile_pins_lock); 9780 node = rb_first(&fs_info->swapfile_pins); 9781 while (node) { 9782 next = rb_next(node); 9783 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 9784 if (sp->inode == inode) { 9785 rb_erase(&sp->node, &fs_info->swapfile_pins); 9786 if (sp->is_block_group) { 9787 btrfs_dec_block_group_swap_extents(sp->ptr, 9788 sp->bg_extent_count); 9789 btrfs_put_block_group(sp->ptr); 9790 } 9791 kfree(sp); 9792 } 9793 node = next; 9794 } 9795 spin_unlock(&fs_info->swapfile_pins_lock); 9796 } 9797 9798 struct btrfs_swap_info { 9799 u64 start; 9800 u64 block_start; 9801 u64 block_len; 9802 u64 lowest_ppage; 9803 u64 highest_ppage; 9804 unsigned long nr_pages; 9805 int nr_extents; 9806 }; 9807 9808 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 9809 struct btrfs_swap_info *bsi) 9810 { 9811 unsigned long nr_pages; 9812 unsigned long max_pages; 9813 u64 first_ppage, first_ppage_reported, next_ppage; 9814 int ret; 9815 9816 /* 9817 * Our swapfile may have had its size extended after the swap header was 9818 * written. In that case activating the swapfile should not go beyond 9819 * the max size set in the swap header. 9820 */ 9821 if (bsi->nr_pages >= sis->max) 9822 return 0; 9823 9824 max_pages = sis->max - bsi->nr_pages; 9825 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; 9826 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; 9827 9828 if (first_ppage >= next_ppage) 9829 return 0; 9830 nr_pages = next_ppage - first_ppage; 9831 nr_pages = min(nr_pages, max_pages); 9832 9833 first_ppage_reported = first_ppage; 9834 if (bsi->start == 0) 9835 first_ppage_reported++; 9836 if (bsi->lowest_ppage > first_ppage_reported) 9837 bsi->lowest_ppage = first_ppage_reported; 9838 if (bsi->highest_ppage < (next_ppage - 1)) 9839 bsi->highest_ppage = next_ppage - 1; 9840 9841 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 9842 if (ret < 0) 9843 return ret; 9844 bsi->nr_extents += ret; 9845 bsi->nr_pages += nr_pages; 9846 return 0; 9847 } 9848 9849 static void btrfs_swap_deactivate(struct file *file) 9850 { 9851 struct inode *inode = file_inode(file); 9852 9853 btrfs_free_swapfile_pins(inode); 9854 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 9855 } 9856 9857 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 9858 sector_t *span) 9859 { 9860 struct inode *inode = file_inode(file); 9861 struct btrfs_root *root = BTRFS_I(inode)->root; 9862 struct btrfs_fs_info *fs_info = root->fs_info; 9863 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 9864 struct extent_state *cached_state = NULL; 9865 struct btrfs_chunk_map *map = NULL; 9866 struct btrfs_device *device = NULL; 9867 struct btrfs_swap_info bsi = { 9868 .lowest_ppage = (sector_t)-1ULL, 9869 }; 9870 struct btrfs_backref_share_check_ctx *backref_ctx = NULL; 9871 struct btrfs_path *path = NULL; 9872 int ret = 0; 9873 u64 isize; 9874 u64 prev_extent_end = 0; 9875 9876 /* 9877 * Acquire the inode's mmap lock to prevent races with memory mapped 9878 * writes, as they could happen after we flush delalloc below and before 9879 * we lock the extent range further below. The inode was already locked 9880 * up in the call chain. 9881 */ 9882 btrfs_assert_inode_locked(BTRFS_I(inode)); 9883 down_write(&BTRFS_I(inode)->i_mmap_lock); 9884 9885 /* 9886 * If the swap file was just created, make sure delalloc is done. If the 9887 * file changes again after this, the user is doing something stupid and 9888 * we don't really care. 9889 */ 9890 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); 9891 if (ret) 9892 goto out_unlock_mmap; 9893 9894 /* 9895 * The inode is locked, so these flags won't change after we check them. 9896 */ 9897 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 9898 btrfs_warn(fs_info, "swapfile must not be compressed"); 9899 ret = -EINVAL; 9900 goto out_unlock_mmap; 9901 } 9902 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 9903 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 9904 ret = -EINVAL; 9905 goto out_unlock_mmap; 9906 } 9907 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 9908 btrfs_warn(fs_info, "swapfile must not be checksummed"); 9909 ret = -EINVAL; 9910 goto out_unlock_mmap; 9911 } 9912 9913 path = btrfs_alloc_path(); 9914 backref_ctx = btrfs_alloc_backref_share_check_ctx(); 9915 if (!path || !backref_ctx) { 9916 ret = -ENOMEM; 9917 goto out_unlock_mmap; 9918 } 9919 9920 /* 9921 * Balance or device remove/replace/resize can move stuff around from 9922 * under us. The exclop protection makes sure they aren't running/won't 9923 * run concurrently while we are mapping the swap extents, and 9924 * fs_info->swapfile_pins prevents them from running while the swap 9925 * file is active and moving the extents. Note that this also prevents 9926 * a concurrent device add which isn't actually necessary, but it's not 9927 * really worth the trouble to allow it. 9928 */ 9929 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 9930 btrfs_warn(fs_info, 9931 "cannot activate swapfile while exclusive operation is running"); 9932 ret = -EBUSY; 9933 goto out_unlock_mmap; 9934 } 9935 9936 /* 9937 * Prevent snapshot creation while we are activating the swap file. 9938 * We do not want to race with snapshot creation. If snapshot creation 9939 * already started before we bumped nr_swapfiles from 0 to 1 and 9940 * completes before the first write into the swap file after it is 9941 * activated, than that write would fallback to COW. 9942 */ 9943 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 9944 btrfs_exclop_finish(fs_info); 9945 btrfs_warn(fs_info, 9946 "cannot activate swapfile because snapshot creation is in progress"); 9947 ret = -EINVAL; 9948 goto out_unlock_mmap; 9949 } 9950 /* 9951 * Snapshots can create extents which require COW even if NODATACOW is 9952 * set. We use this counter to prevent snapshots. We must increment it 9953 * before walking the extents because we don't want a concurrent 9954 * snapshot to run after we've already checked the extents. 9955 * 9956 * It is possible that subvolume is marked for deletion but still not 9957 * removed yet. To prevent this race, we check the root status before 9958 * activating the swapfile. 9959 */ 9960 spin_lock(&root->root_item_lock); 9961 if (btrfs_root_dead(root)) { 9962 spin_unlock(&root->root_item_lock); 9963 9964 btrfs_drew_write_unlock(&root->snapshot_lock); 9965 btrfs_exclop_finish(fs_info); 9966 btrfs_warn(fs_info, 9967 "cannot activate swapfile because subvolume %llu is being deleted", 9968 btrfs_root_id(root)); 9969 ret = -EPERM; 9970 goto out_unlock_mmap; 9971 } 9972 atomic_inc(&root->nr_swapfiles); 9973 spin_unlock(&root->root_item_lock); 9974 9975 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 9976 9977 lock_extent(io_tree, 0, isize - 1, &cached_state); 9978 while (prev_extent_end < isize) { 9979 struct btrfs_key key; 9980 struct extent_buffer *leaf; 9981 struct btrfs_file_extent_item *ei; 9982 struct btrfs_block_group *bg; 9983 u64 logical_block_start; 9984 u64 physical_block_start; 9985 u64 extent_gen; 9986 u64 disk_bytenr; 9987 u64 len; 9988 9989 key.objectid = btrfs_ino(BTRFS_I(inode)); 9990 key.type = BTRFS_EXTENT_DATA_KEY; 9991 key.offset = prev_extent_end; 9992 9993 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 9994 if (ret < 0) 9995 goto out; 9996 9997 /* 9998 * If key not found it means we have an implicit hole (NO_HOLES 9999 * is enabled). 10000 */ 10001 if (ret > 0) { 10002 btrfs_warn(fs_info, "swapfile must not have holes"); 10003 ret = -EINVAL; 10004 goto out; 10005 } 10006 10007 leaf = path->nodes[0]; 10008 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 10009 10010 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) { 10011 /* 10012 * It's unlikely we'll ever actually find ourselves 10013 * here, as a file small enough to fit inline won't be 10014 * big enough to store more than the swap header, but in 10015 * case something changes in the future, let's catch it 10016 * here rather than later. 10017 */ 10018 btrfs_warn(fs_info, "swapfile must not be inline"); 10019 ret = -EINVAL; 10020 goto out; 10021 } 10022 10023 if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) { 10024 btrfs_warn(fs_info, "swapfile must not be compressed"); 10025 ret = -EINVAL; 10026 goto out; 10027 } 10028 10029 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei); 10030 if (disk_bytenr == 0) { 10031 btrfs_warn(fs_info, "swapfile must not have holes"); 10032 ret = -EINVAL; 10033 goto out; 10034 } 10035 10036 logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei); 10037 extent_gen = btrfs_file_extent_generation(leaf, ei); 10038 prev_extent_end = btrfs_file_extent_end(path); 10039 10040 if (prev_extent_end > isize) 10041 len = isize - key.offset; 10042 else 10043 len = btrfs_file_extent_num_bytes(leaf, ei); 10044 10045 backref_ctx->curr_leaf_bytenr = leaf->start; 10046 10047 /* 10048 * Don't need the path anymore, release to avoid deadlocks when 10049 * calling btrfs_is_data_extent_shared() because when joining a 10050 * transaction it can block waiting for the current one's commit 10051 * which in turn may be trying to lock the same leaf to flush 10052 * delayed items for example. 10053 */ 10054 btrfs_release_path(path); 10055 10056 ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr, 10057 extent_gen, backref_ctx); 10058 if (ret < 0) { 10059 goto out; 10060 } else if (ret > 0) { 10061 btrfs_warn(fs_info, 10062 "swapfile must not be copy-on-write"); 10063 ret = -EINVAL; 10064 goto out; 10065 } 10066 10067 map = btrfs_get_chunk_map(fs_info, logical_block_start, len); 10068 if (IS_ERR(map)) { 10069 ret = PTR_ERR(map); 10070 goto out; 10071 } 10072 10073 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 10074 btrfs_warn(fs_info, 10075 "swapfile must have single data profile"); 10076 ret = -EINVAL; 10077 goto out; 10078 } 10079 10080 if (device == NULL) { 10081 device = map->stripes[0].dev; 10082 ret = btrfs_add_swapfile_pin(inode, device, false); 10083 if (ret == 1) 10084 ret = 0; 10085 else if (ret) 10086 goto out; 10087 } else if (device != map->stripes[0].dev) { 10088 btrfs_warn(fs_info, "swapfile must be on one device"); 10089 ret = -EINVAL; 10090 goto out; 10091 } 10092 10093 physical_block_start = (map->stripes[0].physical + 10094 (logical_block_start - map->start)); 10095 btrfs_free_chunk_map(map); 10096 map = NULL; 10097 10098 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 10099 if (!bg) { 10100 btrfs_warn(fs_info, 10101 "could not find block group containing swapfile"); 10102 ret = -EINVAL; 10103 goto out; 10104 } 10105 10106 if (!btrfs_inc_block_group_swap_extents(bg)) { 10107 btrfs_warn(fs_info, 10108 "block group for swapfile at %llu is read-only%s", 10109 bg->start, 10110 atomic_read(&fs_info->scrubs_running) ? 10111 " (scrub running)" : ""); 10112 btrfs_put_block_group(bg); 10113 ret = -EINVAL; 10114 goto out; 10115 } 10116 10117 ret = btrfs_add_swapfile_pin(inode, bg, true); 10118 if (ret) { 10119 btrfs_put_block_group(bg); 10120 if (ret == 1) 10121 ret = 0; 10122 else 10123 goto out; 10124 } 10125 10126 if (bsi.block_len && 10127 bsi.block_start + bsi.block_len == physical_block_start) { 10128 bsi.block_len += len; 10129 } else { 10130 if (bsi.block_len) { 10131 ret = btrfs_add_swap_extent(sis, &bsi); 10132 if (ret) 10133 goto out; 10134 } 10135 bsi.start = key.offset; 10136 bsi.block_start = physical_block_start; 10137 bsi.block_len = len; 10138 } 10139 10140 if (fatal_signal_pending(current)) { 10141 ret = -EINTR; 10142 goto out; 10143 } 10144 10145 cond_resched(); 10146 } 10147 10148 if (bsi.block_len) 10149 ret = btrfs_add_swap_extent(sis, &bsi); 10150 10151 out: 10152 if (!IS_ERR_OR_NULL(map)) 10153 btrfs_free_chunk_map(map); 10154 10155 unlock_extent(io_tree, 0, isize - 1, &cached_state); 10156 10157 if (ret) 10158 btrfs_swap_deactivate(file); 10159 10160 btrfs_drew_write_unlock(&root->snapshot_lock); 10161 10162 btrfs_exclop_finish(fs_info); 10163 10164 out_unlock_mmap: 10165 up_write(&BTRFS_I(inode)->i_mmap_lock); 10166 btrfs_free_backref_share_ctx(backref_ctx); 10167 btrfs_free_path(path); 10168 if (ret) 10169 return ret; 10170 10171 if (device) 10172 sis->bdev = device->bdev; 10173 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10174 sis->max = bsi.nr_pages; 10175 sis->pages = bsi.nr_pages - 1; 10176 sis->highest_bit = bsi.nr_pages - 1; 10177 return bsi.nr_extents; 10178 } 10179 #else 10180 static void btrfs_swap_deactivate(struct file *file) 10181 { 10182 } 10183 10184 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10185 sector_t *span) 10186 { 10187 return -EOPNOTSUPP; 10188 } 10189 #endif 10190 10191 /* 10192 * Update the number of bytes used in the VFS' inode. When we replace extents in 10193 * a range (clone, dedupe, fallocate's zero range), we must update the number of 10194 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 10195 * always get a correct value. 10196 */ 10197 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 10198 const u64 add_bytes, 10199 const u64 del_bytes) 10200 { 10201 if (add_bytes == del_bytes) 10202 return; 10203 10204 spin_lock(&inode->lock); 10205 if (del_bytes > 0) 10206 inode_sub_bytes(&inode->vfs_inode, del_bytes); 10207 if (add_bytes > 0) 10208 inode_add_bytes(&inode->vfs_inode, add_bytes); 10209 spin_unlock(&inode->lock); 10210 } 10211 10212 /* 10213 * Verify that there are no ordered extents for a given file range. 10214 * 10215 * @inode: The target inode. 10216 * @start: Start offset of the file range, should be sector size aligned. 10217 * @end: End offset (inclusive) of the file range, its value +1 should be 10218 * sector size aligned. 10219 * 10220 * This should typically be used for cases where we locked an inode's VFS lock in 10221 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 10222 * we have flushed all delalloc in the range, we have waited for all ordered 10223 * extents in the range to complete and finally we have locked the file range in 10224 * the inode's io_tree. 10225 */ 10226 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 10227 { 10228 struct btrfs_root *root = inode->root; 10229 struct btrfs_ordered_extent *ordered; 10230 10231 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 10232 return; 10233 10234 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 10235 if (ordered) { 10236 btrfs_err(root->fs_info, 10237 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 10238 start, end, btrfs_ino(inode), btrfs_root_id(root), 10239 ordered->file_offset, 10240 ordered->file_offset + ordered->num_bytes - 1); 10241 btrfs_put_ordered_extent(ordered); 10242 } 10243 10244 ASSERT(ordered == NULL); 10245 } 10246 10247 /* 10248 * Find the first inode with a minimum number. 10249 * 10250 * @root: The root to search for. 10251 * @min_ino: The minimum inode number. 10252 * 10253 * Find the first inode in the @root with a number >= @min_ino and return it. 10254 * Returns NULL if no such inode found. 10255 */ 10256 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino) 10257 { 10258 struct btrfs_inode *inode; 10259 unsigned long from = min_ino; 10260 10261 xa_lock(&root->inodes); 10262 while (true) { 10263 inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT); 10264 if (!inode) 10265 break; 10266 if (igrab(&inode->vfs_inode)) 10267 break; 10268 10269 from = btrfs_ino(inode) + 1; 10270 cond_resched_lock(&root->inodes.xa_lock); 10271 } 10272 xa_unlock(&root->inodes); 10273 10274 return inode; 10275 } 10276 10277 static const struct inode_operations btrfs_dir_inode_operations = { 10278 .getattr = btrfs_getattr, 10279 .lookup = btrfs_lookup, 10280 .create = btrfs_create, 10281 .unlink = btrfs_unlink, 10282 .link = btrfs_link, 10283 .mkdir = btrfs_mkdir, 10284 .rmdir = btrfs_rmdir, 10285 .rename = btrfs_rename2, 10286 .symlink = btrfs_symlink, 10287 .setattr = btrfs_setattr, 10288 .mknod = btrfs_mknod, 10289 .listxattr = btrfs_listxattr, 10290 .permission = btrfs_permission, 10291 .get_inode_acl = btrfs_get_acl, 10292 .set_acl = btrfs_set_acl, 10293 .update_time = btrfs_update_time, 10294 .tmpfile = btrfs_tmpfile, 10295 .fileattr_get = btrfs_fileattr_get, 10296 .fileattr_set = btrfs_fileattr_set, 10297 }; 10298 10299 static const struct file_operations btrfs_dir_file_operations = { 10300 .llseek = btrfs_dir_llseek, 10301 .read = generic_read_dir, 10302 .iterate_shared = btrfs_real_readdir, 10303 .open = btrfs_opendir, 10304 .unlocked_ioctl = btrfs_ioctl, 10305 #ifdef CONFIG_COMPAT 10306 .compat_ioctl = btrfs_compat_ioctl, 10307 #endif 10308 .release = btrfs_release_file, 10309 .fsync = btrfs_sync_file, 10310 }; 10311 10312 /* 10313 * btrfs doesn't support the bmap operation because swapfiles 10314 * use bmap to make a mapping of extents in the file. They assume 10315 * these extents won't change over the life of the file and they 10316 * use the bmap result to do IO directly to the drive. 10317 * 10318 * the btrfs bmap call would return logical addresses that aren't 10319 * suitable for IO and they also will change frequently as COW 10320 * operations happen. So, swapfile + btrfs == corruption. 10321 * 10322 * For now we're avoiding this by dropping bmap. 10323 */ 10324 static const struct address_space_operations btrfs_aops = { 10325 .read_folio = btrfs_read_folio, 10326 .writepages = btrfs_writepages, 10327 .readahead = btrfs_readahead, 10328 .invalidate_folio = btrfs_invalidate_folio, 10329 .launder_folio = btrfs_launder_folio, 10330 .release_folio = btrfs_release_folio, 10331 .migrate_folio = btrfs_migrate_folio, 10332 .dirty_folio = filemap_dirty_folio, 10333 .error_remove_folio = generic_error_remove_folio, 10334 .swap_activate = btrfs_swap_activate, 10335 .swap_deactivate = btrfs_swap_deactivate, 10336 }; 10337 10338 static const struct inode_operations btrfs_file_inode_operations = { 10339 .getattr = btrfs_getattr, 10340 .setattr = btrfs_setattr, 10341 .listxattr = btrfs_listxattr, 10342 .permission = btrfs_permission, 10343 .fiemap = btrfs_fiemap, 10344 .get_inode_acl = btrfs_get_acl, 10345 .set_acl = btrfs_set_acl, 10346 .update_time = btrfs_update_time, 10347 .fileattr_get = btrfs_fileattr_get, 10348 .fileattr_set = btrfs_fileattr_set, 10349 }; 10350 static const struct inode_operations btrfs_special_inode_operations = { 10351 .getattr = btrfs_getattr, 10352 .setattr = btrfs_setattr, 10353 .permission = btrfs_permission, 10354 .listxattr = btrfs_listxattr, 10355 .get_inode_acl = btrfs_get_acl, 10356 .set_acl = btrfs_set_acl, 10357 .update_time = btrfs_update_time, 10358 }; 10359 static const struct inode_operations btrfs_symlink_inode_operations = { 10360 .get_link = page_get_link, 10361 .getattr = btrfs_getattr, 10362 .setattr = btrfs_setattr, 10363 .permission = btrfs_permission, 10364 .listxattr = btrfs_listxattr, 10365 .update_time = btrfs_update_time, 10366 }; 10367 10368 const struct dentry_operations btrfs_dentry_operations = { 10369 .d_delete = btrfs_dentry_delete, 10370 }; 10371