1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/fs_struct.h> 13 #include <linux/pagemap.h> 14 #include <linux/highmem.h> 15 #include <linux/time.h> 16 #include <linux/init.h> 17 #include <linux/string.h> 18 #include <linux/backing-dev.h> 19 #include <linux/writeback.h> 20 #include <linux/compat.h> 21 #include <linux/xattr.h> 22 #include <linux/posix_acl.h> 23 #include <linux/falloc.h> 24 #include <linux/slab.h> 25 #include <linux/ratelimit.h> 26 #include <linux/btrfs.h> 27 #include <linux/blkdev.h> 28 #include <linux/posix_acl_xattr.h> 29 #include <linux/uio.h> 30 #include <linux/magic.h> 31 #include <linux/iversion.h> 32 #include <linux/swap.h> 33 #include <linux/migrate.h> 34 #include <linux/sched/mm.h> 35 #include <linux/iomap.h> 36 #include <linux/unaligned.h> 37 #include <linux/fsverity.h> 38 #include "misc.h" 39 #include "ctree.h" 40 #include "disk-io.h" 41 #include "transaction.h" 42 #include "btrfs_inode.h" 43 #include "ordered-data.h" 44 #include "xattr.h" 45 #include "tree-log.h" 46 #include "bio.h" 47 #include "compression.h" 48 #include "locking.h" 49 #include "props.h" 50 #include "qgroup.h" 51 #include "delalloc-space.h" 52 #include "block-group.h" 53 #include "space-info.h" 54 #include "zoned.h" 55 #include "subpage.h" 56 #include "inode-item.h" 57 #include "fs.h" 58 #include "accessors.h" 59 #include "extent-tree.h" 60 #include "root-tree.h" 61 #include "defrag.h" 62 #include "dir-item.h" 63 #include "file-item.h" 64 #include "uuid-tree.h" 65 #include "ioctl.h" 66 #include "file.h" 67 #include "acl.h" 68 #include "relocation.h" 69 #include "verity.h" 70 #include "super.h" 71 #include "orphan.h" 72 #include "backref.h" 73 #include "raid-stripe-tree.h" 74 #include "fiemap.h" 75 #include "delayed-inode.h" 76 77 #define COW_FILE_RANGE_KEEP_LOCKED (1UL << 0) 78 #define COW_FILE_RANGE_NO_INLINE (1UL << 1) 79 80 struct btrfs_iget_args { 81 u64 ino; 82 struct btrfs_root *root; 83 }; 84 85 struct btrfs_rename_ctx { 86 /* Output field. Stores the index number of the old directory entry. */ 87 u64 index; 88 }; 89 90 /* 91 * Used by data_reloc_print_warning_inode() to pass needed info for filename 92 * resolution and output of error message. 93 */ 94 struct data_reloc_warn { 95 struct btrfs_path path; 96 struct btrfs_fs_info *fs_info; 97 u64 extent_item_size; 98 u64 logical; 99 int mirror_num; 100 }; 101 102 /* 103 * For the file_extent_tree, we want to hold the inode lock when we lookup and 104 * update the disk_i_size, but lockdep will complain because our io_tree we hold 105 * the tree lock and get the inode lock when setting delalloc. These two things 106 * are unrelated, so make a class for the file_extent_tree so we don't get the 107 * two locking patterns mixed up. 108 */ 109 static struct lock_class_key file_extent_tree_class; 110 111 static const struct inode_operations btrfs_dir_inode_operations; 112 static const struct inode_operations btrfs_symlink_inode_operations; 113 static const struct inode_operations btrfs_special_inode_operations; 114 static const struct inode_operations btrfs_file_inode_operations; 115 static const struct address_space_operations btrfs_aops; 116 static const struct file_operations btrfs_dir_file_operations; 117 118 static struct kmem_cache *btrfs_inode_cachep; 119 120 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 121 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback); 122 123 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 124 struct folio *locked_folio, u64 start, 125 u64 end, struct writeback_control *wbc, 126 bool pages_dirty); 127 128 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, 129 u64 root, void *warn_ctx) 130 { 131 struct data_reloc_warn *warn = warn_ctx; 132 struct btrfs_fs_info *fs_info = warn->fs_info; 133 struct extent_buffer *eb; 134 struct btrfs_inode_item *inode_item; 135 struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL; 136 struct btrfs_root *local_root; 137 struct btrfs_key key; 138 unsigned int nofs_flag; 139 u32 nlink; 140 int ret; 141 142 local_root = btrfs_get_fs_root(fs_info, root, true); 143 if (IS_ERR(local_root)) { 144 ret = PTR_ERR(local_root); 145 goto err; 146 } 147 148 /* This makes the path point to (inum INODE_ITEM ioff). */ 149 key.objectid = inum; 150 key.type = BTRFS_INODE_ITEM_KEY; 151 key.offset = 0; 152 153 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0); 154 if (ret) { 155 btrfs_put_root(local_root); 156 btrfs_release_path(&warn->path); 157 goto err; 158 } 159 160 eb = warn->path.nodes[0]; 161 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item); 162 nlink = btrfs_inode_nlink(eb, inode_item); 163 btrfs_release_path(&warn->path); 164 165 nofs_flag = memalloc_nofs_save(); 166 ipath = init_ipath(4096, local_root, &warn->path); 167 memalloc_nofs_restore(nofs_flag); 168 if (IS_ERR(ipath)) { 169 btrfs_put_root(local_root); 170 ret = PTR_ERR(ipath); 171 ipath = NULL; 172 /* 173 * -ENOMEM, not a critical error, just output an generic error 174 * without filename. 175 */ 176 btrfs_warn(fs_info, 177 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu", 178 warn->logical, warn->mirror_num, root, inum, offset); 179 return ret; 180 } 181 ret = paths_from_inode(inum, ipath); 182 if (ret < 0) { 183 btrfs_put_root(local_root); 184 goto err; 185 } 186 187 /* 188 * We deliberately ignore the bit ipath might have been too small to 189 * hold all of the paths here 190 */ 191 for (int i = 0; i < ipath->fspath->elem_cnt; i++) { 192 btrfs_warn(fs_info, 193 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)", 194 warn->logical, warn->mirror_num, root, inum, offset, 195 fs_info->sectorsize, nlink, 196 (char *)(unsigned long)ipath->fspath->val[i]); 197 } 198 199 btrfs_put_root(local_root); 200 return 0; 201 202 err: 203 btrfs_warn(fs_info, 204 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d", 205 warn->logical, warn->mirror_num, root, inum, offset, ret); 206 207 return ret; 208 } 209 210 /* 211 * Do extra user-friendly error output (e.g. lookup all the affected files). 212 * 213 * Return true if we succeeded doing the backref lookup. 214 * Return false if such lookup failed, and has to fallback to the old error message. 215 */ 216 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off, 217 const u8 *csum, const u8 *csum_expected, 218 int mirror_num) 219 { 220 struct btrfs_fs_info *fs_info = inode->root->fs_info; 221 struct btrfs_path path = { 0 }; 222 struct btrfs_key found_key = { 0 }; 223 struct extent_buffer *eb; 224 struct btrfs_extent_item *ei; 225 const u32 csum_size = fs_info->csum_size; 226 u64 logical; 227 u64 flags; 228 u32 item_size; 229 int ret; 230 231 mutex_lock(&fs_info->reloc_mutex); 232 logical = btrfs_get_reloc_bg_bytenr(fs_info); 233 mutex_unlock(&fs_info->reloc_mutex); 234 235 if (logical == U64_MAX) { 236 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation"); 237 btrfs_warn_rl(fs_info, 238 "csum failed root %lld ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d", 239 btrfs_root_id(inode->root), btrfs_ino(inode), file_off, 240 BTRFS_CSUM_FMT_VALUE(csum_size, csum), 241 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected), 242 mirror_num); 243 return; 244 } 245 246 logical += file_off; 247 btrfs_warn_rl(fs_info, 248 "csum failed root %lld ino %llu off %llu logical %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d", 249 btrfs_root_id(inode->root), 250 btrfs_ino(inode), file_off, logical, 251 BTRFS_CSUM_FMT_VALUE(csum_size, csum), 252 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected), 253 mirror_num); 254 255 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags); 256 if (ret < 0) { 257 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d", 258 logical, ret); 259 btrfs_release_path(&path); 260 return; 261 } 262 eb = path.nodes[0]; 263 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item); 264 item_size = btrfs_item_size(eb, path.slots[0]); 265 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 266 unsigned long ptr = 0; 267 u64 ref_root; 268 u8 ref_level; 269 270 while (true) { 271 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 272 item_size, &ref_root, 273 &ref_level); 274 if (ret < 0) { 275 btrfs_warn_rl(fs_info, 276 "failed to resolve tree backref for logical %llu: %d", 277 logical, ret); 278 break; 279 } 280 if (ret > 0) 281 break; 282 283 btrfs_warn_rl(fs_info, 284 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu", 285 logical, mirror_num, 286 (ref_level ? "node" : "leaf"), 287 ref_level, ref_root); 288 } 289 btrfs_release_path(&path); 290 } else { 291 struct btrfs_backref_walk_ctx ctx = { 0 }; 292 struct data_reloc_warn reloc_warn = { 0 }; 293 294 btrfs_release_path(&path); 295 296 ctx.bytenr = found_key.objectid; 297 ctx.extent_item_pos = logical - found_key.objectid; 298 ctx.fs_info = fs_info; 299 300 reloc_warn.logical = logical; 301 reloc_warn.extent_item_size = found_key.offset; 302 reloc_warn.mirror_num = mirror_num; 303 reloc_warn.fs_info = fs_info; 304 305 iterate_extent_inodes(&ctx, true, 306 data_reloc_print_warning_inode, &reloc_warn); 307 } 308 } 309 310 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, 311 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) 312 { 313 struct btrfs_root *root = inode->root; 314 const u32 csum_size = root->fs_info->csum_size; 315 316 /* For data reloc tree, it's better to do a backref lookup instead. */ 317 if (btrfs_is_data_reloc_root(root)) 318 return print_data_reloc_error(inode, logical_start, csum, 319 csum_expected, mirror_num); 320 321 /* Output without objectid, which is more meaningful */ 322 if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) { 323 btrfs_warn_rl(root->fs_info, 324 "csum failed root %lld ino %lld off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d", 325 btrfs_root_id(root), btrfs_ino(inode), 326 logical_start, 327 BTRFS_CSUM_FMT_VALUE(csum_size, csum), 328 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected), 329 mirror_num); 330 } else { 331 btrfs_warn_rl(root->fs_info, 332 "csum failed root %llu ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d", 333 btrfs_root_id(root), btrfs_ino(inode), 334 logical_start, 335 BTRFS_CSUM_FMT_VALUE(csum_size, csum), 336 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected), 337 mirror_num); 338 } 339 } 340 341 /* 342 * Lock inode i_rwsem based on arguments passed. 343 * 344 * ilock_flags can have the following bit set: 345 * 346 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 347 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 348 * return -EAGAIN 349 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 350 */ 351 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) 352 { 353 if (ilock_flags & BTRFS_ILOCK_SHARED) { 354 if (ilock_flags & BTRFS_ILOCK_TRY) { 355 if (!inode_trylock_shared(&inode->vfs_inode)) 356 return -EAGAIN; 357 else 358 return 0; 359 } 360 inode_lock_shared(&inode->vfs_inode); 361 } else { 362 if (ilock_flags & BTRFS_ILOCK_TRY) { 363 if (!inode_trylock(&inode->vfs_inode)) 364 return -EAGAIN; 365 else 366 return 0; 367 } 368 inode_lock(&inode->vfs_inode); 369 } 370 if (ilock_flags & BTRFS_ILOCK_MMAP) 371 down_write(&inode->i_mmap_lock); 372 return 0; 373 } 374 375 /* 376 * Unlock inode i_rwsem. 377 * 378 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 379 * to decide whether the lock acquired is shared or exclusive. 380 */ 381 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) 382 { 383 if (ilock_flags & BTRFS_ILOCK_MMAP) 384 up_write(&inode->i_mmap_lock); 385 if (ilock_flags & BTRFS_ILOCK_SHARED) 386 inode_unlock_shared(&inode->vfs_inode); 387 else 388 inode_unlock(&inode->vfs_inode); 389 } 390 391 /* 392 * Cleanup all submitted ordered extents in specified range to handle errors 393 * from the btrfs_run_delalloc_range() callback. 394 * 395 * NOTE: caller must ensure that when an error happens, it can not call 396 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 397 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 398 * to be released, which we want to happen only when finishing the ordered 399 * extent (btrfs_finish_ordered_io()). 400 */ 401 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 402 u64 offset, u64 bytes) 403 { 404 pgoff_t index = offset >> PAGE_SHIFT; 405 const pgoff_t end_index = (offset + bytes - 1) >> PAGE_SHIFT; 406 struct folio *folio; 407 408 while (index <= end_index) { 409 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index); 410 if (IS_ERR(folio)) { 411 index++; 412 continue; 413 } 414 415 index = folio_next_index(folio); 416 /* 417 * Here we just clear all Ordered bits for every page in the 418 * range, then btrfs_mark_ordered_io_finished() will handle 419 * the ordered extent accounting for the range. 420 */ 421 btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio, 422 offset, bytes); 423 folio_put(folio); 424 } 425 426 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false); 427 } 428 429 static int btrfs_dirty_inode(struct btrfs_inode *inode); 430 431 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 432 struct btrfs_new_inode_args *args) 433 { 434 int ret; 435 436 if (args->default_acl) { 437 ret = __btrfs_set_acl(trans, args->inode, args->default_acl, 438 ACL_TYPE_DEFAULT); 439 if (ret) 440 return ret; 441 } 442 if (args->acl) { 443 ret = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 444 if (ret) 445 return ret; 446 } 447 if (!args->default_acl && !args->acl) 448 cache_no_acl(args->inode); 449 return btrfs_xattr_security_init(trans, args->inode, args->dir, 450 &args->dentry->d_name); 451 } 452 453 /* 454 * this does all the hard work for inserting an inline extent into 455 * the btree. The caller should have done a btrfs_drop_extents so that 456 * no overlapping inline items exist in the btree 457 */ 458 static int insert_inline_extent(struct btrfs_trans_handle *trans, 459 struct btrfs_path *path, 460 struct btrfs_inode *inode, bool extent_inserted, 461 size_t size, size_t compressed_size, 462 int compress_type, 463 struct folio *compressed_folio, 464 bool update_i_size) 465 { 466 struct btrfs_root *root = inode->root; 467 struct extent_buffer *leaf; 468 const u32 sectorsize = trans->fs_info->sectorsize; 469 char *kaddr; 470 unsigned long ptr; 471 struct btrfs_file_extent_item *ei; 472 int ret; 473 size_t cur_size = size; 474 u64 i_size; 475 476 /* 477 * The decompressed size must still be no larger than a sector. Under 478 * heavy race, we can have size == 0 passed in, but that shouldn't be a 479 * big deal and we can continue the insertion. 480 */ 481 ASSERT(size <= sectorsize); 482 483 /* 484 * The compressed size also needs to be no larger than a sector. 485 * That's also why we only need one page as the parameter. 486 */ 487 if (compressed_folio) 488 ASSERT(compressed_size <= sectorsize); 489 else 490 ASSERT(compressed_size == 0); 491 492 if (compressed_size && compressed_folio) 493 cur_size = compressed_size; 494 495 if (!extent_inserted) { 496 struct btrfs_key key; 497 size_t datasize; 498 499 key.objectid = btrfs_ino(inode); 500 key.type = BTRFS_EXTENT_DATA_KEY; 501 key.offset = 0; 502 503 datasize = btrfs_file_extent_calc_inline_size(cur_size); 504 ret = btrfs_insert_empty_item(trans, root, path, &key, 505 datasize); 506 if (ret) 507 goto fail; 508 } 509 leaf = path->nodes[0]; 510 ei = btrfs_item_ptr(leaf, path->slots[0], 511 struct btrfs_file_extent_item); 512 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 513 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 514 btrfs_set_file_extent_encryption(leaf, ei, 0); 515 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 516 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 517 ptr = btrfs_file_extent_inline_start(ei); 518 519 if (compress_type != BTRFS_COMPRESS_NONE) { 520 kaddr = kmap_local_folio(compressed_folio, 0); 521 write_extent_buffer(leaf, kaddr, ptr, compressed_size); 522 kunmap_local(kaddr); 523 524 btrfs_set_file_extent_compression(leaf, ei, 525 compress_type); 526 } else { 527 struct folio *folio; 528 529 folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0); 530 ASSERT(!IS_ERR(folio)); 531 btrfs_set_file_extent_compression(leaf, ei, 0); 532 kaddr = kmap_local_folio(folio, 0); 533 write_extent_buffer(leaf, kaddr, ptr, size); 534 kunmap_local(kaddr); 535 folio_put(folio); 536 } 537 btrfs_release_path(path); 538 539 /* 540 * We align size to sectorsize for inline extents just for simplicity 541 * sake. 542 */ 543 ret = btrfs_inode_set_file_extent_range(inode, 0, 544 ALIGN(size, root->fs_info->sectorsize)); 545 if (ret) 546 goto fail; 547 548 /* 549 * We're an inline extent, so nobody can extend the file past i_size 550 * without locking a page we already have locked. 551 * 552 * We must do any i_size and inode updates before we unlock the pages. 553 * Otherwise we could end up racing with unlink. 554 */ 555 i_size = i_size_read(&inode->vfs_inode); 556 if (update_i_size && size > i_size) { 557 i_size_write(&inode->vfs_inode, size); 558 i_size = size; 559 } 560 inode->disk_i_size = i_size; 561 562 fail: 563 return ret; 564 } 565 566 static bool can_cow_file_range_inline(struct btrfs_inode *inode, 567 u64 offset, u64 size, 568 size_t compressed_size) 569 { 570 struct btrfs_fs_info *fs_info = inode->root->fs_info; 571 u64 data_len = (compressed_size ?: size); 572 573 /* Inline extents must start at offset 0. */ 574 if (offset != 0) 575 return false; 576 577 /* Inline extents are limited to sectorsize. */ 578 if (size > fs_info->sectorsize) 579 return false; 580 581 /* We do not allow a non-compressed extent to be as large as block size. */ 582 if (data_len >= fs_info->sectorsize) 583 return false; 584 585 /* We cannot exceed the maximum inline data size. */ 586 if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 587 return false; 588 589 /* We cannot exceed the user specified max_inline size. */ 590 if (data_len > fs_info->max_inline) 591 return false; 592 593 /* Inline extents must be the entirety of the file. */ 594 if (size < i_size_read(&inode->vfs_inode)) 595 return false; 596 597 /* Encrypted file cannot be inlined. */ 598 if (IS_ENCRYPTED(&inode->vfs_inode)) 599 return false; 600 601 return true; 602 } 603 604 /* 605 * conditionally insert an inline extent into the file. This 606 * does the checks required to make sure the data is small enough 607 * to fit as an inline extent. 608 * 609 * If being used directly, you must have already checked we're allowed to cow 610 * the range by getting true from can_cow_file_range_inline(). 611 */ 612 static noinline int __cow_file_range_inline(struct btrfs_inode *inode, 613 u64 size, size_t compressed_size, 614 int compress_type, 615 struct folio *compressed_folio, 616 bool update_i_size) 617 { 618 struct btrfs_drop_extents_args drop_args = { 0 }; 619 struct btrfs_root *root = inode->root; 620 struct btrfs_fs_info *fs_info = root->fs_info; 621 struct btrfs_trans_handle *trans = NULL; 622 u64 data_len = (compressed_size ?: size); 623 int ret; 624 struct btrfs_path *path; 625 626 path = btrfs_alloc_path(); 627 if (!path) { 628 ret = -ENOMEM; 629 goto out; 630 } 631 632 trans = btrfs_join_transaction(root); 633 if (IS_ERR(trans)) { 634 ret = PTR_ERR(trans); 635 trans = NULL; 636 goto out; 637 } 638 trans->block_rsv = &inode->block_rsv; 639 640 drop_args.path = path; 641 drop_args.start = 0; 642 drop_args.end = fs_info->sectorsize; 643 drop_args.drop_cache = true; 644 drop_args.replace_extent = true; 645 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 646 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 647 if (unlikely(ret)) { 648 btrfs_abort_transaction(trans, ret); 649 goto out; 650 } 651 652 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 653 size, compressed_size, compress_type, 654 compressed_folio, update_i_size); 655 if (unlikely(ret && ret != -ENOSPC)) { 656 btrfs_abort_transaction(trans, ret); 657 goto out; 658 } else if (ret == -ENOSPC) { 659 ret = 1; 660 goto out; 661 } 662 663 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 664 ret = btrfs_update_inode(trans, inode); 665 if (unlikely(ret && ret != -ENOSPC)) { 666 btrfs_abort_transaction(trans, ret); 667 goto out; 668 } else if (ret == -ENOSPC) { 669 ret = 1; 670 goto out; 671 } 672 673 btrfs_set_inode_full_sync(inode); 674 out: 675 /* 676 * Don't forget to free the reserved space, as for inlined extent 677 * it won't count as data extent, free them directly here. 678 * And at reserve time, it's always aligned to page size, so 679 * just free one page here. 680 * 681 * If we fallback to non-inline (ret == 1) due to -ENOSPC, then we need 682 * to keep the data reservation. 683 */ 684 if (ret <= 0) 685 btrfs_qgroup_free_data(inode, NULL, 0, fs_info->sectorsize, NULL); 686 btrfs_free_path(path); 687 if (trans) 688 btrfs_end_transaction(trans); 689 return ret; 690 } 691 692 static noinline int cow_file_range_inline(struct btrfs_inode *inode, 693 struct folio *locked_folio, 694 u64 offset, u64 end, 695 size_t compressed_size, 696 int compress_type, 697 struct folio *compressed_folio, 698 bool update_i_size) 699 { 700 struct extent_state *cached = NULL; 701 unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 702 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED; 703 u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1); 704 int ret; 705 706 if (!can_cow_file_range_inline(inode, offset, size, compressed_size)) 707 return 1; 708 709 btrfs_lock_extent(&inode->io_tree, offset, end, &cached); 710 ret = __cow_file_range_inline(inode, size, compressed_size, 711 compress_type, compressed_folio, 712 update_i_size); 713 if (ret > 0) { 714 btrfs_unlock_extent(&inode->io_tree, offset, end, &cached); 715 return ret; 716 } 717 718 /* 719 * In the successful case (ret == 0 here), cow_file_range will return 1. 720 * 721 * Quite a bit further up the callstack in extent_writepage(), ret == 1 722 * is treated as a short circuited success and does not unlock the folio, 723 * so we must do it here. 724 * 725 * In the failure case, the locked_folio does get unlocked by 726 * btrfs_folio_end_all_writers, which asserts that it is still locked 727 * at that point, so we must *not* unlock it here. 728 * 729 * The other two callsites in compress_file_range do not have a 730 * locked_folio, so they are not relevant to this logic. 731 */ 732 if (ret == 0) 733 locked_folio = NULL; 734 735 extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached, 736 clear_flags, PAGE_UNLOCK | 737 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); 738 return ret; 739 } 740 741 struct async_extent { 742 u64 start; 743 u64 ram_size; 744 u64 compressed_size; 745 struct folio **folios; 746 unsigned long nr_folios; 747 int compress_type; 748 struct list_head list; 749 }; 750 751 struct async_chunk { 752 struct btrfs_inode *inode; 753 struct folio *locked_folio; 754 u64 start; 755 u64 end; 756 blk_opf_t write_flags; 757 struct list_head extents; 758 struct cgroup_subsys_state *blkcg_css; 759 struct btrfs_work work; 760 struct async_cow *async_cow; 761 }; 762 763 struct async_cow { 764 atomic_t num_chunks; 765 struct async_chunk chunks[]; 766 }; 767 768 static noinline int add_async_extent(struct async_chunk *cow, 769 u64 start, u64 ram_size, 770 u64 compressed_size, 771 struct folio **folios, 772 unsigned long nr_folios, 773 int compress_type) 774 { 775 struct async_extent *async_extent; 776 777 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 778 if (!async_extent) 779 return -ENOMEM; 780 async_extent->start = start; 781 async_extent->ram_size = ram_size; 782 async_extent->compressed_size = compressed_size; 783 async_extent->folios = folios; 784 async_extent->nr_folios = nr_folios; 785 async_extent->compress_type = compress_type; 786 list_add_tail(&async_extent->list, &cow->extents); 787 return 0; 788 } 789 790 /* 791 * Check if the inode needs to be submitted to compression, based on mount 792 * options, defragmentation, properties or heuristics. 793 */ 794 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 795 u64 end) 796 { 797 struct btrfs_fs_info *fs_info = inode->root->fs_info; 798 799 if (!btrfs_inode_can_compress(inode)) { 800 DEBUG_WARN("BTRFS: unexpected compression for ino %llu", btrfs_ino(inode)); 801 return 0; 802 } 803 804 /* Defrag ioctl takes precedence over mount options and properties. */ 805 if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS) 806 return 0; 807 if (BTRFS_COMPRESS_NONE < inode->defrag_compress && 808 inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) 809 return 1; 810 /* force compress */ 811 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 812 return 1; 813 /* bad compression ratios */ 814 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 815 return 0; 816 if (btrfs_test_opt(fs_info, COMPRESS) || 817 inode->flags & BTRFS_INODE_COMPRESS || 818 inode->prop_compress) 819 return btrfs_compress_heuristic(inode, start, end); 820 return 0; 821 } 822 823 static inline void inode_should_defrag(struct btrfs_inode *inode, 824 u64 start, u64 end, u64 num_bytes, u32 small_write) 825 { 826 /* If this is a small write inside eof, kick off a defrag */ 827 if (num_bytes < small_write && 828 (start > 0 || end + 1 < inode->disk_i_size)) 829 btrfs_add_inode_defrag(inode, small_write); 830 } 831 832 static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start, u64 end) 833 { 834 const pgoff_t end_index = end >> PAGE_SHIFT; 835 struct folio *folio; 836 int ret = 0; 837 838 for (pgoff_t index = start >> PAGE_SHIFT; index <= end_index; index++) { 839 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index); 840 if (IS_ERR(folio)) { 841 if (!ret) 842 ret = PTR_ERR(folio); 843 continue; 844 } 845 btrfs_folio_clamp_clear_dirty(inode->root->fs_info, folio, start, 846 end + 1 - start); 847 folio_put(folio); 848 } 849 return ret; 850 } 851 852 /* 853 * Work queue call back to started compression on a file and pages. 854 * 855 * This is done inside an ordered work queue, and the compression is spread 856 * across many cpus. The actual IO submission is step two, and the ordered work 857 * queue takes care of making sure that happens in the same order things were 858 * put onto the queue by writepages and friends. 859 * 860 * If this code finds it can't get good compression, it puts an entry onto the 861 * work queue to write the uncompressed bytes. This makes sure that both 862 * compressed inodes and uncompressed inodes are written in the same order that 863 * the flusher thread sent them down. 864 */ 865 static void compress_file_range(struct btrfs_work *work) 866 { 867 struct async_chunk *async_chunk = 868 container_of(work, struct async_chunk, work); 869 struct btrfs_inode *inode = async_chunk->inode; 870 struct btrfs_fs_info *fs_info = inode->root->fs_info; 871 struct address_space *mapping = inode->vfs_inode.i_mapping; 872 const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order; 873 const u32 min_folio_size = btrfs_min_folio_size(fs_info); 874 u64 blocksize = fs_info->sectorsize; 875 u64 start = async_chunk->start; 876 u64 end = async_chunk->end; 877 u64 actual_end; 878 u64 i_size; 879 int ret = 0; 880 struct folio **folios = NULL; 881 unsigned long nr_folios; 882 unsigned long total_compressed = 0; 883 unsigned long total_in = 0; 884 unsigned int loff; 885 int i; 886 int compress_type = fs_info->compress_type; 887 int compress_level = fs_info->compress_level; 888 889 if (unlikely(btrfs_is_shutdown(fs_info))) 890 goto cleanup_and_bail_uncompressed; 891 892 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); 893 894 /* 895 * We need to call clear_page_dirty_for_io on each page in the range. 896 * Otherwise applications with the file mmap'd can wander in and change 897 * the page contents while we are compressing them. 898 */ 899 ret = extent_range_clear_dirty_for_io(inode, start, end); 900 901 /* 902 * All the folios should have been locked thus no failure. 903 * 904 * And even if some folios are missing, btrfs_compress_folios() 905 * would handle them correctly, so here just do an ASSERT() check for 906 * early logic errors. 907 */ 908 ASSERT(ret == 0); 909 910 /* 911 * We need to save i_size before now because it could change in between 912 * us evaluating the size and assigning it. This is because we lock and 913 * unlock the page in truncate and fallocate, and then modify the i_size 914 * later on. 915 * 916 * The barriers are to emulate READ_ONCE, remove that once i_size_read 917 * does that for us. 918 */ 919 barrier(); 920 i_size = i_size_read(&inode->vfs_inode); 921 barrier(); 922 actual_end = min_t(u64, i_size, end + 1); 923 again: 924 folios = NULL; 925 nr_folios = (end >> min_folio_shift) - (start >> min_folio_shift) + 1; 926 nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED >> min_folio_shift); 927 928 /* 929 * we don't want to send crud past the end of i_size through 930 * compression, that's just a waste of CPU time. So, if the 931 * end of the file is before the start of our current 932 * requested range of bytes, we bail out to the uncompressed 933 * cleanup code that can deal with all of this. 934 * 935 * It isn't really the fastest way to fix things, but this is a 936 * very uncommon corner. 937 */ 938 if (actual_end <= start) 939 goto cleanup_and_bail_uncompressed; 940 941 total_compressed = actual_end - start; 942 943 /* 944 * Skip compression for a small file range(<=blocksize) that 945 * isn't an inline extent, since it doesn't save disk space at all. 946 */ 947 if (total_compressed <= blocksize && 948 (start > 0 || end + 1 < inode->disk_i_size)) 949 goto cleanup_and_bail_uncompressed; 950 951 total_compressed = min_t(unsigned long, total_compressed, 952 BTRFS_MAX_UNCOMPRESSED); 953 total_in = 0; 954 ret = 0; 955 956 /* 957 * We do compression for mount -o compress and when the inode has not 958 * been flagged as NOCOMPRESS. This flag can change at any time if we 959 * discover bad compression ratios. 960 */ 961 if (!inode_need_compress(inode, start, end)) 962 goto cleanup_and_bail_uncompressed; 963 964 folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS); 965 if (!folios) { 966 /* 967 * Memory allocation failure is not a fatal error, we can fall 968 * back to uncompressed code. 969 */ 970 goto cleanup_and_bail_uncompressed; 971 } 972 973 if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) { 974 compress_type = inode->defrag_compress; 975 compress_level = inode->defrag_compress_level; 976 } else if (inode->prop_compress) { 977 compress_type = inode->prop_compress; 978 } 979 980 /* Compression level is applied here. */ 981 ret = btrfs_compress_folios(compress_type, compress_level, 982 inode, start, folios, &nr_folios, &total_in, 983 &total_compressed); 984 if (ret) 985 goto mark_incompressible; 986 987 /* 988 * Zero the tail end of the last folio, as we might be sending it down 989 * to disk. 990 */ 991 loff = (total_compressed & (min_folio_size - 1)); 992 if (loff) 993 folio_zero_range(folios[nr_folios - 1], loff, min_folio_size - loff); 994 995 /* 996 * Try to create an inline extent. 997 * 998 * If we didn't compress the entire range, try to create an uncompressed 999 * inline extent, else a compressed one. 1000 * 1001 * Check cow_file_range() for why we don't even try to create inline 1002 * extent for the subpage case. 1003 */ 1004 if (total_in < actual_end) 1005 ret = cow_file_range_inline(inode, NULL, start, end, 0, 1006 BTRFS_COMPRESS_NONE, NULL, false); 1007 else 1008 ret = cow_file_range_inline(inode, NULL, start, end, total_compressed, 1009 compress_type, folios[0], false); 1010 if (ret <= 0) { 1011 if (ret < 0) 1012 mapping_set_error(mapping, -EIO); 1013 goto free_pages; 1014 } 1015 1016 /* 1017 * We aren't doing an inline extent. Round the compressed size up to a 1018 * block size boundary so the allocator does sane things. 1019 */ 1020 total_compressed = ALIGN(total_compressed, blocksize); 1021 1022 /* 1023 * One last check to make sure the compression is really a win, compare 1024 * the page count read with the blocks on disk, compression must free at 1025 * least one sector. 1026 */ 1027 total_in = round_up(total_in, fs_info->sectorsize); 1028 if (total_compressed + blocksize > total_in) 1029 goto mark_incompressible; 1030 1031 /* 1032 * The async work queues will take care of doing actual allocation on 1033 * disk for these compressed pages, and will submit the bios. 1034 */ 1035 ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios, 1036 nr_folios, compress_type); 1037 BUG_ON(ret); 1038 if (start + total_in < end) { 1039 start += total_in; 1040 cond_resched(); 1041 goto again; 1042 } 1043 return; 1044 1045 mark_incompressible: 1046 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) 1047 inode->flags |= BTRFS_INODE_NOCOMPRESS; 1048 cleanup_and_bail_uncompressed: 1049 ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 1050 BTRFS_COMPRESS_NONE); 1051 BUG_ON(ret); 1052 free_pages: 1053 if (folios) { 1054 for (i = 0; i < nr_folios; i++) { 1055 WARN_ON(folios[i]->mapping); 1056 btrfs_free_compr_folio(folios[i]); 1057 } 1058 kfree(folios); 1059 } 1060 } 1061 1062 static void free_async_extent_pages(struct async_extent *async_extent) 1063 { 1064 int i; 1065 1066 if (!async_extent->folios) 1067 return; 1068 1069 for (i = 0; i < async_extent->nr_folios; i++) { 1070 WARN_ON(async_extent->folios[i]->mapping); 1071 btrfs_free_compr_folio(async_extent->folios[i]); 1072 } 1073 kfree(async_extent->folios); 1074 async_extent->nr_folios = 0; 1075 async_extent->folios = NULL; 1076 } 1077 1078 static void submit_uncompressed_range(struct btrfs_inode *inode, 1079 struct async_extent *async_extent, 1080 struct folio *locked_folio) 1081 { 1082 u64 start = async_extent->start; 1083 u64 end = async_extent->start + async_extent->ram_size - 1; 1084 int ret; 1085 struct writeback_control wbc = { 1086 .sync_mode = WB_SYNC_ALL, 1087 .range_start = start, 1088 .range_end = end, 1089 .no_cgroup_owner = 1, 1090 }; 1091 1092 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); 1093 ret = run_delalloc_cow(inode, locked_folio, start, end, 1094 &wbc, false); 1095 wbc_detach_inode(&wbc); 1096 if (ret < 0) { 1097 if (locked_folio) 1098 btrfs_folio_end_lock(inode->root->fs_info, locked_folio, 1099 start, async_extent->ram_size); 1100 btrfs_err_rl(inode->root->fs_info, 1101 "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d", 1102 __func__, btrfs_root_id(inode->root), 1103 btrfs_ino(inode), start, async_extent->ram_size, ret); 1104 } 1105 } 1106 1107 static void submit_one_async_extent(struct async_chunk *async_chunk, 1108 struct async_extent *async_extent, 1109 u64 *alloc_hint) 1110 { 1111 struct btrfs_inode *inode = async_chunk->inode; 1112 struct extent_io_tree *io_tree = &inode->io_tree; 1113 struct btrfs_root *root = inode->root; 1114 struct btrfs_fs_info *fs_info = root->fs_info; 1115 struct btrfs_ordered_extent *ordered; 1116 struct btrfs_file_extent file_extent; 1117 struct btrfs_key ins; 1118 struct folio *locked_folio = NULL; 1119 struct extent_state *cached = NULL; 1120 struct extent_map *em; 1121 int ret = 0; 1122 bool free_pages = false; 1123 u64 start = async_extent->start; 1124 u64 end = async_extent->start + async_extent->ram_size - 1; 1125 1126 if (async_chunk->blkcg_css) 1127 kthread_associate_blkcg(async_chunk->blkcg_css); 1128 1129 /* 1130 * If async_chunk->locked_folio is in the async_extent range, we need to 1131 * handle it. 1132 */ 1133 if (async_chunk->locked_folio) { 1134 u64 locked_folio_start = folio_pos(async_chunk->locked_folio); 1135 u64 locked_folio_end = locked_folio_start + 1136 folio_size(async_chunk->locked_folio) - 1; 1137 1138 if (!(start >= locked_folio_end || end <= locked_folio_start)) 1139 locked_folio = async_chunk->locked_folio; 1140 } 1141 1142 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) { 1143 ASSERT(!async_extent->folios); 1144 ASSERT(async_extent->nr_folios == 0); 1145 submit_uncompressed_range(inode, async_extent, locked_folio); 1146 free_pages = true; 1147 goto done; 1148 } 1149 1150 ret = btrfs_reserve_extent(root, async_extent->ram_size, 1151 async_extent->compressed_size, 1152 async_extent->compressed_size, 1153 0, *alloc_hint, &ins, true, true); 1154 if (ret) { 1155 /* 1156 * We can't reserve contiguous space for the compressed size. 1157 * Unlikely, but it's possible that we could have enough 1158 * non-contiguous space for the uncompressed size instead. So 1159 * fall back to uncompressed. 1160 */ 1161 submit_uncompressed_range(inode, async_extent, locked_folio); 1162 free_pages = true; 1163 goto done; 1164 } 1165 1166 btrfs_lock_extent(io_tree, start, end, &cached); 1167 1168 /* Here we're doing allocation and writeback of the compressed pages */ 1169 file_extent.disk_bytenr = ins.objectid; 1170 file_extent.disk_num_bytes = ins.offset; 1171 file_extent.ram_bytes = async_extent->ram_size; 1172 file_extent.num_bytes = async_extent->ram_size; 1173 file_extent.offset = 0; 1174 file_extent.compression = async_extent->compress_type; 1175 1176 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED); 1177 if (IS_ERR(em)) { 1178 ret = PTR_ERR(em); 1179 goto out_free_reserve; 1180 } 1181 btrfs_free_extent_map(em); 1182 1183 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, 1184 1U << BTRFS_ORDERED_COMPRESSED); 1185 if (IS_ERR(ordered)) { 1186 btrfs_drop_extent_map_range(inode, start, end, false); 1187 ret = PTR_ERR(ordered); 1188 goto out_free_reserve; 1189 } 1190 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1191 1192 /* Clear dirty, set writeback and unlock the pages. */ 1193 extent_clear_unlock_delalloc(inode, start, end, 1194 NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC, 1195 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1196 btrfs_submit_compressed_write(ordered, 1197 async_extent->folios, /* compressed_folios */ 1198 async_extent->nr_folios, 1199 async_chunk->write_flags, true); 1200 *alloc_hint = ins.objectid + ins.offset; 1201 done: 1202 if (async_chunk->blkcg_css) 1203 kthread_associate_blkcg(NULL); 1204 if (free_pages) 1205 free_async_extent_pages(async_extent); 1206 kfree(async_extent); 1207 return; 1208 1209 out_free_reserve: 1210 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1211 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true); 1212 mapping_set_error(inode->vfs_inode.i_mapping, -EIO); 1213 extent_clear_unlock_delalloc(inode, start, end, 1214 NULL, &cached, 1215 EXTENT_LOCKED | EXTENT_DELALLOC | 1216 EXTENT_DELALLOC_NEW | 1217 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1218 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1219 PAGE_END_WRITEBACK); 1220 free_async_extent_pages(async_extent); 1221 if (async_chunk->blkcg_css) 1222 kthread_associate_blkcg(NULL); 1223 btrfs_debug(fs_info, 1224 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1225 btrfs_root_id(root), btrfs_ino(inode), start, 1226 async_extent->ram_size, ret); 1227 kfree(async_extent); 1228 } 1229 1230 u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1231 u64 num_bytes) 1232 { 1233 struct extent_map_tree *em_tree = &inode->extent_tree; 1234 struct extent_map *em; 1235 u64 alloc_hint = 0; 1236 1237 read_lock(&em_tree->lock); 1238 em = btrfs_search_extent_mapping(em_tree, start, num_bytes); 1239 if (em) { 1240 /* 1241 * if block start isn't an actual block number then find the 1242 * first block in this inode and use that as a hint. If that 1243 * block is also bogus then just don't worry about it. 1244 */ 1245 if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) { 1246 btrfs_free_extent_map(em); 1247 em = btrfs_search_extent_mapping(em_tree, 0, 0); 1248 if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE) 1249 alloc_hint = btrfs_extent_map_block_start(em); 1250 if (em) 1251 btrfs_free_extent_map(em); 1252 } else { 1253 alloc_hint = btrfs_extent_map_block_start(em); 1254 btrfs_free_extent_map(em); 1255 } 1256 } 1257 read_unlock(&em_tree->lock); 1258 1259 return alloc_hint; 1260 } 1261 1262 /* 1263 * when extent_io.c finds a delayed allocation range in the file, 1264 * the call backs end up in this code. The basic idea is to 1265 * allocate extents on disk for the range, and create ordered data structs 1266 * in ram to track those extents. 1267 * 1268 * locked_folio is the folio that writepage had locked already. We use 1269 * it to make sure we don't do extra locks or unlocks. 1270 * 1271 * When this function fails, it unlocks all folios except @locked_folio. 1272 * 1273 * When this function successfully creates an inline extent, it returns 1 and 1274 * unlocks all folios including locked_folio and starts I/O on them. 1275 * (In reality inline extents are limited to a single block, so locked_folio is 1276 * the only folio handled anyway). 1277 * 1278 * When this function succeed and creates a normal extent, the folio locking 1279 * status depends on the passed in flags: 1280 * 1281 * - If COW_FILE_RANGE_KEEP_LOCKED flag is set, all folios are kept locked. 1282 * - Else all folios except for @locked_folio are unlocked. 1283 * 1284 * When a failure happens in the second or later iteration of the 1285 * while-loop, the ordered extents created in previous iterations are cleaned up. 1286 */ 1287 static noinline int cow_file_range(struct btrfs_inode *inode, 1288 struct folio *locked_folio, u64 start, 1289 u64 end, u64 *done_offset, 1290 unsigned long flags) 1291 { 1292 struct btrfs_root *root = inode->root; 1293 struct btrfs_fs_info *fs_info = root->fs_info; 1294 struct extent_state *cached = NULL; 1295 u64 alloc_hint = 0; 1296 u64 orig_start = start; 1297 u64 num_bytes; 1298 u64 cur_alloc_size = 0; 1299 u64 min_alloc_size; 1300 u64 blocksize = fs_info->sectorsize; 1301 struct btrfs_key ins; 1302 struct extent_map *em; 1303 unsigned clear_bits; 1304 unsigned long page_ops; 1305 int ret = 0; 1306 1307 if (unlikely(btrfs_is_shutdown(fs_info))) { 1308 ret = -EIO; 1309 goto out_unlock; 1310 } 1311 1312 if (btrfs_is_free_space_inode(inode)) { 1313 ret = -EINVAL; 1314 goto out_unlock; 1315 } 1316 1317 num_bytes = ALIGN(end - start + 1, blocksize); 1318 num_bytes = max(blocksize, num_bytes); 1319 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1320 1321 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1322 1323 if (!(flags & COW_FILE_RANGE_NO_INLINE)) { 1324 /* lets try to make an inline extent */ 1325 ret = cow_file_range_inline(inode, locked_folio, start, end, 0, 1326 BTRFS_COMPRESS_NONE, NULL, false); 1327 if (ret <= 0) { 1328 /* 1329 * We succeeded, return 1 so the caller knows we're done 1330 * with this page and already handled the IO. 1331 * 1332 * If there was an error then cow_file_range_inline() has 1333 * already done the cleanup. 1334 */ 1335 if (ret == 0) 1336 ret = 1; 1337 goto done; 1338 } 1339 } 1340 1341 alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes); 1342 1343 /* 1344 * We're not doing compressed IO, don't unlock the first page (which 1345 * the caller expects to stay locked), don't clear any dirty bits and 1346 * don't set any writeback bits. 1347 * 1348 * Do set the Ordered (Private2) bit so we know this page was properly 1349 * setup for writepage. 1350 */ 1351 page_ops = ((flags & COW_FILE_RANGE_KEEP_LOCKED) ? 0 : PAGE_UNLOCK); 1352 page_ops |= PAGE_SET_ORDERED; 1353 1354 /* 1355 * Relocation relies on the relocated extents to have exactly the same 1356 * size as the original extents. Normally writeback for relocation data 1357 * extents follows a NOCOW path because relocation preallocates the 1358 * extents. However, due to an operation such as scrub turning a block 1359 * group to RO mode, it may fallback to COW mode, so we must make sure 1360 * an extent allocated during COW has exactly the requested size and can 1361 * not be split into smaller extents, otherwise relocation breaks and 1362 * fails during the stage where it updates the bytenr of file extent 1363 * items. 1364 */ 1365 if (btrfs_is_data_reloc_root(root)) 1366 min_alloc_size = num_bytes; 1367 else 1368 min_alloc_size = fs_info->sectorsize; 1369 1370 while (num_bytes > 0) { 1371 struct btrfs_ordered_extent *ordered; 1372 struct btrfs_file_extent file_extent; 1373 1374 ret = btrfs_reserve_extent(root, num_bytes, num_bytes, 1375 min_alloc_size, 0, alloc_hint, 1376 &ins, true, true); 1377 if (ret == -EAGAIN) { 1378 /* 1379 * btrfs_reserve_extent only returns -EAGAIN for zoned 1380 * file systems, which is an indication that there are 1381 * no active zones to allocate from at the moment. 1382 * 1383 * If this is the first loop iteration, wait for at 1384 * least one zone to finish before retrying the 1385 * allocation. Otherwise ask the caller to write out 1386 * the already allocated blocks before coming back to 1387 * us, or return -ENOSPC if it can't handle retries. 1388 */ 1389 ASSERT(btrfs_is_zoned(fs_info)); 1390 if (start == orig_start) { 1391 wait_on_bit_io(&inode->root->fs_info->flags, 1392 BTRFS_FS_NEED_ZONE_FINISH, 1393 TASK_UNINTERRUPTIBLE); 1394 continue; 1395 } 1396 if (done_offset) { 1397 /* 1398 * Move @end to the end of the processed range, 1399 * and exit the loop to unlock the processed extents. 1400 */ 1401 end = start - 1; 1402 ret = 0; 1403 break; 1404 } 1405 ret = -ENOSPC; 1406 } 1407 if (ret < 0) 1408 goto out_unlock; 1409 cur_alloc_size = ins.offset; 1410 1411 file_extent.disk_bytenr = ins.objectid; 1412 file_extent.disk_num_bytes = ins.offset; 1413 file_extent.num_bytes = ins.offset; 1414 file_extent.ram_bytes = ins.offset; 1415 file_extent.offset = 0; 1416 file_extent.compression = BTRFS_COMPRESS_NONE; 1417 1418 /* 1419 * Locked range will be released either during error clean up or 1420 * after the whole range is finished. 1421 */ 1422 btrfs_lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1, 1423 &cached); 1424 1425 em = btrfs_create_io_em(inode, start, &file_extent, 1426 BTRFS_ORDERED_REGULAR); 1427 if (IS_ERR(em)) { 1428 btrfs_unlock_extent(&inode->io_tree, start, 1429 start + cur_alloc_size - 1, &cached); 1430 ret = PTR_ERR(em); 1431 goto out_reserve; 1432 } 1433 btrfs_free_extent_map(em); 1434 1435 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, 1436 1U << BTRFS_ORDERED_REGULAR); 1437 if (IS_ERR(ordered)) { 1438 btrfs_unlock_extent(&inode->io_tree, start, 1439 start + cur_alloc_size - 1, &cached); 1440 ret = PTR_ERR(ordered); 1441 goto out_drop_extent_cache; 1442 } 1443 1444 if (btrfs_is_data_reloc_root(root)) { 1445 ret = btrfs_reloc_clone_csums(ordered); 1446 1447 /* 1448 * Only drop cache here, and process as normal. 1449 * 1450 * We must not allow extent_clear_unlock_delalloc() 1451 * at out_unlock label to free meta of this ordered 1452 * extent, as its meta should be freed by 1453 * btrfs_finish_ordered_io(). 1454 * 1455 * So we must continue until @start is increased to 1456 * skip current ordered extent. 1457 */ 1458 if (ret) 1459 btrfs_drop_extent_map_range(inode, start, 1460 start + cur_alloc_size - 1, 1461 false); 1462 } 1463 btrfs_put_ordered_extent(ordered); 1464 1465 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1466 1467 if (num_bytes < cur_alloc_size) 1468 num_bytes = 0; 1469 else 1470 num_bytes -= cur_alloc_size; 1471 alloc_hint = ins.objectid + ins.offset; 1472 start += cur_alloc_size; 1473 cur_alloc_size = 0; 1474 1475 /* 1476 * btrfs_reloc_clone_csums() error, since start is increased 1477 * extent_clear_unlock_delalloc() at out_unlock label won't 1478 * free metadata of current ordered extent, we're OK to exit. 1479 */ 1480 if (ret) 1481 goto out_unlock; 1482 } 1483 extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached, 1484 EXTENT_LOCKED | EXTENT_DELALLOC, page_ops); 1485 done: 1486 if (done_offset) 1487 *done_offset = end; 1488 return ret; 1489 1490 out_drop_extent_cache: 1491 btrfs_drop_extent_map_range(inode, start, start + cur_alloc_size - 1, false); 1492 out_reserve: 1493 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1494 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true); 1495 out_unlock: 1496 /* 1497 * Now, we have three regions to clean up: 1498 * 1499 * |-------(1)----|---(2)---|-------------(3)----------| 1500 * `- orig_start `- start `- start + cur_alloc_size `- end 1501 * 1502 * We process each region below. 1503 */ 1504 1505 /* 1506 * For the range (1). We have already instantiated the ordered extents 1507 * for this region, thus we need to cleanup those ordered extents. 1508 * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV 1509 * are also handled by the ordered extents cleanup. 1510 * 1511 * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and 1512 * finish the writeback of the involved folios, which will be never submitted. 1513 */ 1514 if (orig_start < start) { 1515 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC; 1516 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1517 1518 if (!locked_folio) 1519 mapping_set_error(inode->vfs_inode.i_mapping, ret); 1520 1521 btrfs_cleanup_ordered_extents(inode, orig_start, start - orig_start); 1522 extent_clear_unlock_delalloc(inode, orig_start, start - 1, 1523 locked_folio, NULL, clear_bits, page_ops); 1524 } 1525 1526 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1527 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1528 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1529 1530 /* 1531 * For the range (2). If we reserved an extent for our delalloc range 1532 * (or a subrange) and failed to create the respective ordered extent, 1533 * then it means that when we reserved the extent we decremented the 1534 * extent's size from the data space_info's bytes_may_use counter and 1535 * incremented the space_info's bytes_reserved counter by the same 1536 * amount. We must make sure extent_clear_unlock_delalloc() does not try 1537 * to decrement again the data space_info's bytes_may_use counter, 1538 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV. 1539 */ 1540 if (cur_alloc_size) { 1541 extent_clear_unlock_delalloc(inode, start, 1542 start + cur_alloc_size - 1, 1543 locked_folio, &cached, clear_bits, 1544 page_ops); 1545 btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL); 1546 } 1547 1548 /* 1549 * For the range (3). We never touched the region. In addition to the 1550 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data 1551 * space_info's bytes_may_use counter, reserved in 1552 * btrfs_check_data_free_space(). 1553 */ 1554 if (start + cur_alloc_size < end) { 1555 clear_bits |= EXTENT_CLEAR_DATA_RESV; 1556 extent_clear_unlock_delalloc(inode, start + cur_alloc_size, 1557 end, locked_folio, 1558 &cached, clear_bits, page_ops); 1559 btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size, 1560 end - start - cur_alloc_size + 1, NULL); 1561 } 1562 btrfs_err(fs_info, 1563 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu cur_alloc_size=%llu: %d", 1564 __func__, btrfs_root_id(inode->root), 1565 btrfs_ino(inode), orig_start, end + 1 - orig_start, 1566 start, cur_alloc_size, ret); 1567 return ret; 1568 } 1569 1570 /* 1571 * Phase two of compressed writeback. This is the ordered portion of the code, 1572 * which only gets called in the order the work was queued. We walk all the 1573 * async extents created by compress_file_range and send them down to the disk. 1574 * 1575 * If called with @do_free == true then it'll try to finish the work and free 1576 * the work struct eventually. 1577 */ 1578 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free) 1579 { 1580 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1581 work); 1582 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1583 struct async_extent *async_extent; 1584 unsigned long nr_pages; 1585 u64 alloc_hint = 0; 1586 1587 if (do_free) { 1588 struct async_cow *async_cow; 1589 1590 btrfs_add_delayed_iput(async_chunk->inode); 1591 if (async_chunk->blkcg_css) 1592 css_put(async_chunk->blkcg_css); 1593 1594 async_cow = async_chunk->async_cow; 1595 if (atomic_dec_and_test(&async_cow->num_chunks)) 1596 kvfree(async_cow); 1597 return; 1598 } 1599 1600 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1601 PAGE_SHIFT; 1602 1603 while (!list_empty(&async_chunk->extents)) { 1604 async_extent = list_first_entry(&async_chunk->extents, 1605 struct async_extent, list); 1606 list_del(&async_extent->list); 1607 submit_one_async_extent(async_chunk, async_extent, &alloc_hint); 1608 } 1609 1610 /* atomic_sub_return implies a barrier */ 1611 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1612 5 * SZ_1M) 1613 cond_wake_up_nomb(&fs_info->async_submit_wait); 1614 } 1615 1616 static bool run_delalloc_compressed(struct btrfs_inode *inode, 1617 struct folio *locked_folio, u64 start, 1618 u64 end, struct writeback_control *wbc) 1619 { 1620 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1621 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1622 struct async_cow *ctx; 1623 struct async_chunk *async_chunk; 1624 unsigned long nr_pages; 1625 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1626 int i; 1627 unsigned nofs_flag; 1628 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1629 1630 nofs_flag = memalloc_nofs_save(); 1631 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1632 memalloc_nofs_restore(nofs_flag); 1633 if (!ctx) 1634 return false; 1635 1636 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 1637 1638 async_chunk = ctx->chunks; 1639 atomic_set(&ctx->num_chunks, num_chunks); 1640 1641 for (i = 0; i < num_chunks; i++) { 1642 u64 cur_end = min(end, start + SZ_512K - 1); 1643 1644 /* 1645 * igrab is called higher up in the call chain, take only the 1646 * lightweight reference for the callback lifetime 1647 */ 1648 ihold(&inode->vfs_inode); 1649 async_chunk[i].async_cow = ctx; 1650 async_chunk[i].inode = inode; 1651 async_chunk[i].start = start; 1652 async_chunk[i].end = cur_end; 1653 async_chunk[i].write_flags = write_flags; 1654 INIT_LIST_HEAD(&async_chunk[i].extents); 1655 1656 /* 1657 * The locked_folio comes all the way from writepage and its 1658 * the original folio we were actually given. As we spread 1659 * this large delalloc region across multiple async_chunk 1660 * structs, only the first struct needs a pointer to 1661 * locked_folio. 1662 * 1663 * This way we don't need racey decisions about who is supposed 1664 * to unlock it. 1665 */ 1666 if (locked_folio) { 1667 /* 1668 * Depending on the compressibility, the pages might or 1669 * might not go through async. We want all of them to 1670 * be accounted against wbc once. Let's do it here 1671 * before the paths diverge. wbc accounting is used 1672 * only for foreign writeback detection and doesn't 1673 * need full accuracy. Just account the whole thing 1674 * against the first page. 1675 */ 1676 wbc_account_cgroup_owner(wbc, locked_folio, 1677 cur_end - start); 1678 async_chunk[i].locked_folio = locked_folio; 1679 locked_folio = NULL; 1680 } else { 1681 async_chunk[i].locked_folio = NULL; 1682 } 1683 1684 if (blkcg_css != blkcg_root_css) { 1685 css_get(blkcg_css); 1686 async_chunk[i].blkcg_css = blkcg_css; 1687 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT; 1688 } else { 1689 async_chunk[i].blkcg_css = NULL; 1690 } 1691 1692 btrfs_init_work(&async_chunk[i].work, compress_file_range, 1693 submit_compressed_extents); 1694 1695 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1696 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1697 1698 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1699 1700 start = cur_end + 1; 1701 } 1702 return true; 1703 } 1704 1705 /* 1706 * Run the delalloc range from start to end, and write back any dirty pages 1707 * covered by the range. 1708 */ 1709 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 1710 struct folio *locked_folio, u64 start, 1711 u64 end, struct writeback_control *wbc, 1712 bool pages_dirty) 1713 { 1714 u64 done_offset = end; 1715 int ret; 1716 1717 while (start <= end) { 1718 ret = cow_file_range(inode, locked_folio, start, end, 1719 &done_offset, COW_FILE_RANGE_KEEP_LOCKED); 1720 if (ret) 1721 return ret; 1722 extent_write_locked_range(&inode->vfs_inode, locked_folio, 1723 start, done_offset, wbc, pages_dirty); 1724 start = done_offset + 1; 1725 } 1726 1727 return 1; 1728 } 1729 1730 static int fallback_to_cow(struct btrfs_inode *inode, 1731 struct folio *locked_folio, const u64 start, 1732 const u64 end) 1733 { 1734 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1735 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1736 const u64 range_bytes = end + 1 - start; 1737 struct extent_io_tree *io_tree = &inode->io_tree; 1738 struct extent_state *cached_state = NULL; 1739 u64 range_start = start; 1740 u64 count; 1741 int ret; 1742 1743 /* 1744 * If EXTENT_NORESERVE is set it means that when the buffered write was 1745 * made we had not enough available data space and therefore we did not 1746 * reserve data space for it, since we though we could do NOCOW for the 1747 * respective file range (either there is prealloc extent or the inode 1748 * has the NOCOW bit set). 1749 * 1750 * However when we need to fallback to COW mode (because for example the 1751 * block group for the corresponding extent was turned to RO mode by a 1752 * scrub or relocation) we need to do the following: 1753 * 1754 * 1) We increment the bytes_may_use counter of the data space info. 1755 * If COW succeeds, it allocates a new data extent and after doing 1756 * that it decrements the space info's bytes_may_use counter and 1757 * increments its bytes_reserved counter by the same amount (we do 1758 * this at btrfs_add_reserved_bytes()). So we need to increment the 1759 * bytes_may_use counter to compensate (when space is reserved at 1760 * buffered write time, the bytes_may_use counter is incremented); 1761 * 1762 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1763 * that if the COW path fails for any reason, it decrements (through 1764 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1765 * data space info, which we incremented in the step above. 1766 * 1767 * If we need to fallback to cow and the inode corresponds to a free 1768 * space cache inode or an inode of the data relocation tree, we must 1769 * also increment bytes_may_use of the data space_info for the same 1770 * reason. Space caches and relocated data extents always get a prealloc 1771 * extent for them, however scrub or balance may have set the block 1772 * group that contains that extent to RO mode and therefore force COW 1773 * when starting writeback. 1774 */ 1775 btrfs_lock_extent(io_tree, start, end, &cached_state); 1776 count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes, 1777 EXTENT_NORESERVE, 0, NULL); 1778 if (count > 0 || is_space_ino || is_reloc_ino) { 1779 u64 bytes = count; 1780 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1781 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1782 1783 if (is_space_ino || is_reloc_ino) 1784 bytes = range_bytes; 1785 1786 spin_lock(&sinfo->lock); 1787 btrfs_space_info_update_bytes_may_use(sinfo, bytes); 1788 spin_unlock(&sinfo->lock); 1789 1790 if (count > 0) 1791 btrfs_clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1792 &cached_state); 1793 } 1794 btrfs_unlock_extent(io_tree, start, end, &cached_state); 1795 1796 /* 1797 * Don't try to create inline extents, as a mix of inline extent that 1798 * is written out and unlocked directly and a normal NOCOW extent 1799 * doesn't work. 1800 * 1801 * And here we do not unlock the folio after a successful run. 1802 * The folios will be unlocked after everything is finished, or by error handling. 1803 * 1804 * This is to ensure error handling won't need to clear dirty/ordered flags without 1805 * a locked folio, which can race with writeback. 1806 */ 1807 ret = cow_file_range(inode, locked_folio, start, end, NULL, 1808 COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED); 1809 ASSERT(ret != 1); 1810 return ret; 1811 } 1812 1813 struct can_nocow_file_extent_args { 1814 /* Input fields. */ 1815 1816 /* Start file offset of the range we want to NOCOW. */ 1817 u64 start; 1818 /* End file offset (inclusive) of the range we want to NOCOW. */ 1819 u64 end; 1820 bool writeback_path; 1821 /* 1822 * Free the path passed to can_nocow_file_extent() once it's not needed 1823 * anymore. 1824 */ 1825 bool free_path; 1826 1827 /* 1828 * Output fields. Only set when can_nocow_file_extent() returns 1. 1829 * The expected file extent for the NOCOW write. 1830 */ 1831 struct btrfs_file_extent file_extent; 1832 }; 1833 1834 /* 1835 * Check if we can NOCOW the file extent that the path points to. 1836 * This function may return with the path released, so the caller should check 1837 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1838 * 1839 * Returns: < 0 on error 1840 * 0 if we can not NOCOW 1841 * 1 if we can NOCOW 1842 */ 1843 static int can_nocow_file_extent(struct btrfs_path *path, 1844 struct btrfs_key *key, 1845 struct btrfs_inode *inode, 1846 struct can_nocow_file_extent_args *args) 1847 { 1848 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 1849 struct extent_buffer *leaf = path->nodes[0]; 1850 struct btrfs_root *root = inode->root; 1851 struct btrfs_file_extent_item *fi; 1852 struct btrfs_root *csum_root; 1853 u64 io_start; 1854 u64 extent_end; 1855 u8 extent_type; 1856 int can_nocow = 0; 1857 int ret = 0; 1858 bool nowait = path->nowait; 1859 1860 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 1861 extent_type = btrfs_file_extent_type(leaf, fi); 1862 1863 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1864 goto out; 1865 1866 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1867 extent_type == BTRFS_FILE_EXTENT_REG) 1868 goto out; 1869 1870 /* 1871 * If the extent was created before the generation where the last snapshot 1872 * for its subvolume was created, then this implies the extent is shared, 1873 * hence we must COW. 1874 */ 1875 if (btrfs_file_extent_generation(leaf, fi) <= 1876 btrfs_root_last_snapshot(&root->root_item)) 1877 goto out; 1878 1879 /* An explicit hole, must COW. */ 1880 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) 1881 goto out; 1882 1883 /* Compressed/encrypted/encoded extents must be COWed. */ 1884 if (btrfs_file_extent_compression(leaf, fi) || 1885 btrfs_file_extent_encryption(leaf, fi) || 1886 btrfs_file_extent_other_encoding(leaf, fi)) 1887 goto out; 1888 1889 extent_end = btrfs_file_extent_end(path); 1890 1891 args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1892 args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1893 args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1894 args->file_extent.offset = btrfs_file_extent_offset(leaf, fi); 1895 args->file_extent.compression = btrfs_file_extent_compression(leaf, fi); 1896 1897 /* 1898 * The following checks can be expensive, as they need to take other 1899 * locks and do btree or rbtree searches, so release the path to avoid 1900 * blocking other tasks for too long. 1901 */ 1902 btrfs_release_path(path); 1903 1904 ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset, 1905 args->file_extent.disk_bytenr, path); 1906 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1907 if (ret != 0) 1908 goto out; 1909 1910 if (args->free_path) { 1911 /* 1912 * We don't need the path anymore, plus through the 1913 * btrfs_lookup_csums_list() call below we will end up allocating 1914 * another path. So free the path to avoid unnecessary extra 1915 * memory usage. 1916 */ 1917 btrfs_free_path(path); 1918 path = NULL; 1919 } 1920 1921 /* If there are pending snapshots for this root, we must COW. */ 1922 if (args->writeback_path && !is_freespace_inode && 1923 atomic_read(&root->snapshot_force_cow)) 1924 goto out; 1925 1926 args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start; 1927 args->file_extent.offset += args->start - key->offset; 1928 io_start = args->file_extent.disk_bytenr + args->file_extent.offset; 1929 1930 /* 1931 * Force COW if csums exist in the range. This ensures that csums for a 1932 * given extent are either valid or do not exist. 1933 */ 1934 1935 csum_root = btrfs_csum_root(root->fs_info, io_start); 1936 ret = btrfs_lookup_csums_list(csum_root, io_start, 1937 io_start + args->file_extent.num_bytes - 1, 1938 NULL, nowait); 1939 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1940 if (ret != 0) 1941 goto out; 1942 1943 can_nocow = 1; 1944 out: 1945 if (args->free_path && path) 1946 btrfs_free_path(path); 1947 1948 return ret < 0 ? ret : can_nocow; 1949 } 1950 1951 static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio, 1952 struct extent_state **cached, 1953 struct can_nocow_file_extent_args *nocow_args, 1954 u64 file_pos, bool is_prealloc) 1955 { 1956 struct btrfs_ordered_extent *ordered; 1957 const u64 len = nocow_args->file_extent.num_bytes; 1958 const u64 end = file_pos + len - 1; 1959 int ret = 0; 1960 1961 btrfs_lock_extent(&inode->io_tree, file_pos, end, cached); 1962 1963 if (is_prealloc) { 1964 struct extent_map *em; 1965 1966 em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent, 1967 BTRFS_ORDERED_PREALLOC); 1968 if (IS_ERR(em)) { 1969 ret = PTR_ERR(em); 1970 goto error; 1971 } 1972 btrfs_free_extent_map(em); 1973 } 1974 1975 ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent, 1976 is_prealloc 1977 ? (1U << BTRFS_ORDERED_PREALLOC) 1978 : (1U << BTRFS_ORDERED_NOCOW)); 1979 if (IS_ERR(ordered)) { 1980 if (is_prealloc) 1981 btrfs_drop_extent_map_range(inode, file_pos, end, false); 1982 ret = PTR_ERR(ordered); 1983 goto error; 1984 } 1985 1986 if (btrfs_is_data_reloc_root(inode->root)) 1987 /* 1988 * Errors are handled later, as we must prevent 1989 * extent_clear_unlock_delalloc() in error handler from freeing 1990 * metadata of the created ordered extent. 1991 */ 1992 ret = btrfs_reloc_clone_csums(ordered); 1993 btrfs_put_ordered_extent(ordered); 1994 1995 if (ret < 0) 1996 goto error; 1997 extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached, 1998 EXTENT_LOCKED | EXTENT_DELALLOC | 1999 EXTENT_CLEAR_DATA_RESV, 2000 PAGE_SET_ORDERED); 2001 return ret; 2002 2003 error: 2004 btrfs_cleanup_ordered_extents(inode, file_pos, len); 2005 extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached, 2006 EXTENT_LOCKED | EXTENT_DELALLOC | 2007 EXTENT_CLEAR_DATA_RESV, 2008 PAGE_UNLOCK | PAGE_START_WRITEBACK | 2009 PAGE_END_WRITEBACK); 2010 btrfs_err(inode->root->fs_info, 2011 "%s failed, root=%lld inode=%llu start=%llu len=%llu: %d", 2012 __func__, btrfs_root_id(inode->root), btrfs_ino(inode), 2013 file_pos, len, ret); 2014 return ret; 2015 } 2016 2017 /* 2018 * When nocow writeback calls back. This checks for snapshots or COW copies 2019 * of the extents that exist in the file, and COWs the file as required. 2020 * 2021 * If no cow copies or snapshots exist, we write directly to the existing 2022 * blocks on disk 2023 */ 2024 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 2025 struct folio *locked_folio, 2026 const u64 start, const u64 end) 2027 { 2028 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2029 struct btrfs_root *root = inode->root; 2030 struct btrfs_path *path = NULL; 2031 u64 cow_start = (u64)-1; 2032 /* 2033 * If not 0, represents the inclusive end of the last fallback_to_cow() 2034 * range. Only for error handling. 2035 * 2036 * The same for nocow_end, it's to avoid double cleaning up the range 2037 * already cleaned by nocow_one_range(). 2038 */ 2039 u64 cow_end = 0; 2040 u64 nocow_end = 0; 2041 u64 cur_offset = start; 2042 int ret; 2043 bool check_prev = true; 2044 u64 ino = btrfs_ino(inode); 2045 struct can_nocow_file_extent_args nocow_args = { 0 }; 2046 /* The range that has ordered extent(s). */ 2047 u64 oe_cleanup_start; 2048 u64 oe_cleanup_len = 0; 2049 /* The range that is untouched. */ 2050 u64 untouched_start; 2051 u64 untouched_len = 0; 2052 2053 /* 2054 * Normally on a zoned device we're only doing COW writes, but in case 2055 * of relocation on a zoned filesystem serializes I/O so that we're only 2056 * writing sequentially and can end up here as well. 2057 */ 2058 ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root)); 2059 2060 if (unlikely(btrfs_is_shutdown(fs_info))) { 2061 ret = -EIO; 2062 goto error; 2063 } 2064 path = btrfs_alloc_path(); 2065 if (!path) { 2066 ret = -ENOMEM; 2067 goto error; 2068 } 2069 2070 nocow_args.end = end; 2071 nocow_args.writeback_path = true; 2072 2073 while (cur_offset <= end) { 2074 struct btrfs_block_group *nocow_bg = NULL; 2075 struct btrfs_key found_key; 2076 struct btrfs_file_extent_item *fi; 2077 struct extent_buffer *leaf; 2078 struct extent_state *cached_state = NULL; 2079 u64 extent_end; 2080 int extent_type; 2081 2082 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 2083 cur_offset, 0); 2084 if (ret < 0) 2085 goto error; 2086 2087 /* 2088 * If there is no extent for our range when doing the initial 2089 * search, then go back to the previous slot as it will be the 2090 * one containing the search offset 2091 */ 2092 if (ret > 0 && path->slots[0] > 0 && check_prev) { 2093 leaf = path->nodes[0]; 2094 btrfs_item_key_to_cpu(leaf, &found_key, 2095 path->slots[0] - 1); 2096 if (found_key.objectid == ino && 2097 found_key.type == BTRFS_EXTENT_DATA_KEY) 2098 path->slots[0]--; 2099 } 2100 check_prev = false; 2101 next_slot: 2102 /* Go to next leaf if we have exhausted the current one */ 2103 leaf = path->nodes[0]; 2104 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2105 ret = btrfs_next_leaf(root, path); 2106 if (ret < 0) 2107 goto error; 2108 if (ret > 0) 2109 break; 2110 leaf = path->nodes[0]; 2111 } 2112 2113 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2114 2115 /* Didn't find anything for our INO */ 2116 if (found_key.objectid > ino) 2117 break; 2118 /* 2119 * Keep searching until we find an EXTENT_ITEM or there are no 2120 * more extents for this inode 2121 */ 2122 if (WARN_ON_ONCE(found_key.objectid < ino) || 2123 found_key.type < BTRFS_EXTENT_DATA_KEY) { 2124 path->slots[0]++; 2125 goto next_slot; 2126 } 2127 2128 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 2129 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 2130 found_key.offset > end) 2131 break; 2132 2133 /* 2134 * If the found extent starts after requested offset, then 2135 * adjust cur_offset to be right before this extent begins. 2136 */ 2137 if (found_key.offset > cur_offset) { 2138 if (cow_start == (u64)-1) 2139 cow_start = cur_offset; 2140 cur_offset = found_key.offset; 2141 goto next_slot; 2142 } 2143 2144 /* 2145 * Found extent which begins before our range and potentially 2146 * intersect it 2147 */ 2148 fi = btrfs_item_ptr(leaf, path->slots[0], 2149 struct btrfs_file_extent_item); 2150 extent_type = btrfs_file_extent_type(leaf, fi); 2151 /* If this is triggered then we have a memory corruption. */ 2152 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 2153 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 2154 ret = -EUCLEAN; 2155 goto error; 2156 } 2157 extent_end = btrfs_file_extent_end(path); 2158 2159 /* 2160 * If the extent we got ends before our current offset, skip to 2161 * the next extent. 2162 */ 2163 if (extent_end <= cur_offset) { 2164 path->slots[0]++; 2165 goto next_slot; 2166 } 2167 2168 nocow_args.start = cur_offset; 2169 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 2170 if (ret < 0) 2171 goto error; 2172 if (ret == 0) 2173 goto must_cow; 2174 2175 ret = 0; 2176 nocow_bg = btrfs_inc_nocow_writers(fs_info, 2177 nocow_args.file_extent.disk_bytenr + 2178 nocow_args.file_extent.offset); 2179 if (!nocow_bg) { 2180 must_cow: 2181 /* 2182 * If we can't perform NOCOW writeback for the range, 2183 * then record the beginning of the range that needs to 2184 * be COWed. It will be written out before the next 2185 * NOCOW range if we find one, or when exiting this 2186 * loop. 2187 */ 2188 if (cow_start == (u64)-1) 2189 cow_start = cur_offset; 2190 cur_offset = extent_end; 2191 if (cur_offset > end) 2192 break; 2193 if (!path->nodes[0]) 2194 continue; 2195 path->slots[0]++; 2196 goto next_slot; 2197 } 2198 2199 /* 2200 * COW range from cow_start to found_key.offset - 1. As the key 2201 * will contain the beginning of the first extent that can be 2202 * NOCOW, following one which needs to be COW'ed 2203 */ 2204 if (cow_start != (u64)-1) { 2205 ret = fallback_to_cow(inode, locked_folio, cow_start, 2206 found_key.offset - 1); 2207 if (ret) { 2208 cow_end = found_key.offset - 1; 2209 btrfs_dec_nocow_writers(nocow_bg); 2210 goto error; 2211 } 2212 cow_start = (u64)-1; 2213 } 2214 2215 ret = nocow_one_range(inode, locked_folio, &cached_state, 2216 &nocow_args, cur_offset, 2217 extent_type == BTRFS_FILE_EXTENT_PREALLOC); 2218 btrfs_dec_nocow_writers(nocow_bg); 2219 if (ret < 0) { 2220 nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1; 2221 goto error; 2222 } 2223 cur_offset = extent_end; 2224 } 2225 btrfs_release_path(path); 2226 2227 if (cur_offset <= end && cow_start == (u64)-1) 2228 cow_start = cur_offset; 2229 2230 if (cow_start != (u64)-1) { 2231 ret = fallback_to_cow(inode, locked_folio, cow_start, end); 2232 if (ret) { 2233 cow_end = end; 2234 goto error; 2235 } 2236 cow_start = (u64)-1; 2237 } 2238 2239 /* 2240 * Everything is finished without an error, can unlock the folios now. 2241 * 2242 * No need to touch the io tree range nor set folio ordered flag, as 2243 * fallback_to_cow() and nocow_one_range() have already handled them. 2244 */ 2245 extent_clear_unlock_delalloc(inode, start, end, locked_folio, NULL, 0, PAGE_UNLOCK); 2246 2247 btrfs_free_path(path); 2248 return 0; 2249 2250 error: 2251 if (cow_start == (u64)-1) { 2252 /* 2253 * case a) 2254 * start cur_offset end 2255 * | OE cleanup | Untouched | 2256 * 2257 * We finished a fallback_to_cow() or nocow_one_range() call, 2258 * but failed to check the next range. 2259 * 2260 * or 2261 * start cur_offset nocow_end end 2262 * | OE cleanup | Skip | Untouched | 2263 * 2264 * nocow_one_range() failed, the range [cur_offset, nocow_end] is 2265 * already cleaned up. 2266 */ 2267 oe_cleanup_start = start; 2268 oe_cleanup_len = cur_offset - start; 2269 if (nocow_end) 2270 untouched_start = nocow_end + 1; 2271 else 2272 untouched_start = cur_offset; 2273 untouched_len = end + 1 - untouched_start; 2274 } else if (cow_start != (u64)-1 && cow_end == 0) { 2275 /* 2276 * case b) 2277 * start cow_start cur_offset end 2278 * | OE cleanup | Untouched | 2279 * 2280 * We got a range that needs COW, but before we hit the next NOCOW range, 2281 * thus [cow_start, cur_offset) doesn't yet have any OE. 2282 */ 2283 oe_cleanup_start = start; 2284 oe_cleanup_len = cow_start - start; 2285 untouched_start = cow_start; 2286 untouched_len = end + 1 - untouched_start; 2287 } else { 2288 /* 2289 * case c) 2290 * start cow_start cow_end end 2291 * | OE cleanup | Skip | Untouched | 2292 * 2293 * fallback_to_cow() failed, and fallback_to_cow() will do the 2294 * cleanup for its range, we shouldn't touch the range 2295 * [cow_start, cow_end]. 2296 */ 2297 ASSERT(cow_start != (u64)-1 && cow_end != 0); 2298 oe_cleanup_start = start; 2299 oe_cleanup_len = cow_start - start; 2300 untouched_start = cow_end + 1; 2301 untouched_len = end + 1 - untouched_start; 2302 } 2303 2304 if (oe_cleanup_len) { 2305 const u64 oe_cleanup_end = oe_cleanup_start + oe_cleanup_len - 1; 2306 btrfs_cleanup_ordered_extents(inode, oe_cleanup_start, oe_cleanup_len); 2307 extent_clear_unlock_delalloc(inode, oe_cleanup_start, oe_cleanup_end, 2308 locked_folio, NULL, 2309 EXTENT_LOCKED | EXTENT_DELALLOC, 2310 PAGE_UNLOCK | PAGE_START_WRITEBACK | 2311 PAGE_END_WRITEBACK); 2312 } 2313 2314 if (untouched_len) { 2315 struct extent_state *cached = NULL; 2316 const u64 untouched_end = untouched_start + untouched_len - 1; 2317 2318 /* 2319 * We need to lock the extent here because we're clearing DELALLOC and 2320 * we're not locked at this point. 2321 */ 2322 btrfs_lock_extent(&inode->io_tree, untouched_start, untouched_end, &cached); 2323 extent_clear_unlock_delalloc(inode, untouched_start, untouched_end, 2324 locked_folio, &cached, 2325 EXTENT_LOCKED | EXTENT_DELALLOC | 2326 EXTENT_DEFRAG | 2327 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2328 PAGE_START_WRITEBACK | 2329 PAGE_END_WRITEBACK); 2330 btrfs_qgroup_free_data(inode, NULL, untouched_start, untouched_len, NULL); 2331 } 2332 btrfs_free_path(path); 2333 btrfs_err(fs_info, 2334 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu oe_cleanup=%llu oe_cleanup_len=%llu untouched_start=%llu untouched_len=%llu: %d", 2335 __func__, btrfs_root_id(inode->root), btrfs_ino(inode), 2336 start, end + 1 - start, cur_offset, oe_cleanup_start, oe_cleanup_len, 2337 untouched_start, untouched_len, ret); 2338 return ret; 2339 } 2340 2341 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2342 { 2343 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2344 if (inode->defrag_bytes && 2345 btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG)) 2346 return false; 2347 return true; 2348 } 2349 return false; 2350 } 2351 2352 /* 2353 * Function to process delayed allocation (create CoW) for ranges which are 2354 * being touched for the first time. 2355 */ 2356 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio, 2357 u64 start, u64 end, struct writeback_control *wbc) 2358 { 2359 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2360 int ret; 2361 2362 /* 2363 * The range must cover part of the @locked_folio, or a return of 1 2364 * can confuse the caller. 2365 */ 2366 ASSERT(!(end <= folio_pos(locked_folio) || 2367 start >= folio_next_pos(locked_folio))); 2368 2369 if (should_nocow(inode, start, end)) { 2370 ret = run_delalloc_nocow(inode, locked_folio, start, end); 2371 return ret; 2372 } 2373 2374 if (btrfs_inode_can_compress(inode) && 2375 inode_need_compress(inode, start, end) && 2376 run_delalloc_compressed(inode, locked_folio, start, end, wbc)) 2377 return 1; 2378 2379 if (zoned) 2380 ret = run_delalloc_cow(inode, locked_folio, start, end, wbc, 2381 true); 2382 else 2383 ret = cow_file_range(inode, locked_folio, start, end, NULL, 0); 2384 return ret; 2385 } 2386 2387 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 2388 struct extent_state *orig, u64 split) 2389 { 2390 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2391 u64 size; 2392 2393 lockdep_assert_held(&inode->io_tree.lock); 2394 2395 /* not delalloc, ignore it */ 2396 if (!(orig->state & EXTENT_DELALLOC)) 2397 return; 2398 2399 size = orig->end - orig->start + 1; 2400 if (size > fs_info->max_extent_size) { 2401 u32 num_extents; 2402 u64 new_size; 2403 2404 /* 2405 * See the explanation in btrfs_merge_delalloc_extent, the same 2406 * applies here, just in reverse. 2407 */ 2408 new_size = orig->end - split + 1; 2409 num_extents = count_max_extents(fs_info, new_size); 2410 new_size = split - orig->start; 2411 num_extents += count_max_extents(fs_info, new_size); 2412 if (count_max_extents(fs_info, size) >= num_extents) 2413 return; 2414 } 2415 2416 spin_lock(&inode->lock); 2417 btrfs_mod_outstanding_extents(inode, 1); 2418 spin_unlock(&inode->lock); 2419 } 2420 2421 /* 2422 * Handle merged delayed allocation extents so we can keep track of new extents 2423 * that are just merged onto old extents, such as when we are doing sequential 2424 * writes, so we can properly account for the metadata space we'll need. 2425 */ 2426 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 2427 struct extent_state *other) 2428 { 2429 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2430 u64 new_size, old_size; 2431 u32 num_extents; 2432 2433 lockdep_assert_held(&inode->io_tree.lock); 2434 2435 /* not delalloc, ignore it */ 2436 if (!(other->state & EXTENT_DELALLOC)) 2437 return; 2438 2439 if (new->start > other->start) 2440 new_size = new->end - other->start + 1; 2441 else 2442 new_size = other->end - new->start + 1; 2443 2444 /* we're not bigger than the max, unreserve the space and go */ 2445 if (new_size <= fs_info->max_extent_size) { 2446 spin_lock(&inode->lock); 2447 btrfs_mod_outstanding_extents(inode, -1); 2448 spin_unlock(&inode->lock); 2449 return; 2450 } 2451 2452 /* 2453 * We have to add up either side to figure out how many extents were 2454 * accounted for before we merged into one big extent. If the number of 2455 * extents we accounted for is <= the amount we need for the new range 2456 * then we can return, otherwise drop. Think of it like this 2457 * 2458 * [ 4k][MAX_SIZE] 2459 * 2460 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2461 * need 2 outstanding extents, on one side we have 1 and the other side 2462 * we have 1 so they are == and we can return. But in this case 2463 * 2464 * [MAX_SIZE+4k][MAX_SIZE+4k] 2465 * 2466 * Each range on their own accounts for 2 extents, but merged together 2467 * they are only 3 extents worth of accounting, so we need to drop in 2468 * this case. 2469 */ 2470 old_size = other->end - other->start + 1; 2471 num_extents = count_max_extents(fs_info, old_size); 2472 old_size = new->end - new->start + 1; 2473 num_extents += count_max_extents(fs_info, old_size); 2474 if (count_max_extents(fs_info, new_size) >= num_extents) 2475 return; 2476 2477 spin_lock(&inode->lock); 2478 btrfs_mod_outstanding_extents(inode, -1); 2479 spin_unlock(&inode->lock); 2480 } 2481 2482 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode) 2483 { 2484 struct btrfs_root *root = inode->root; 2485 struct btrfs_fs_info *fs_info = root->fs_info; 2486 2487 spin_lock(&root->delalloc_lock); 2488 ASSERT(list_empty(&inode->delalloc_inodes)); 2489 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 2490 root->nr_delalloc_inodes++; 2491 if (root->nr_delalloc_inodes == 1) { 2492 spin_lock(&fs_info->delalloc_root_lock); 2493 ASSERT(list_empty(&root->delalloc_root)); 2494 list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots); 2495 spin_unlock(&fs_info->delalloc_root_lock); 2496 } 2497 spin_unlock(&root->delalloc_lock); 2498 } 2499 2500 void btrfs_del_delalloc_inode(struct btrfs_inode *inode) 2501 { 2502 struct btrfs_root *root = inode->root; 2503 struct btrfs_fs_info *fs_info = root->fs_info; 2504 2505 lockdep_assert_held(&root->delalloc_lock); 2506 2507 /* 2508 * We may be called after the inode was already deleted from the list, 2509 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(), 2510 * and then later through btrfs_clear_delalloc_extent() while the inode 2511 * still has ->delalloc_bytes > 0. 2512 */ 2513 if (!list_empty(&inode->delalloc_inodes)) { 2514 list_del_init(&inode->delalloc_inodes); 2515 root->nr_delalloc_inodes--; 2516 if (!root->nr_delalloc_inodes) { 2517 ASSERT(list_empty(&root->delalloc_inodes)); 2518 spin_lock(&fs_info->delalloc_root_lock); 2519 ASSERT(!list_empty(&root->delalloc_root)); 2520 list_del_init(&root->delalloc_root); 2521 spin_unlock(&fs_info->delalloc_root_lock); 2522 } 2523 } 2524 } 2525 2526 /* 2527 * Properly track delayed allocation bytes in the inode and to maintain the 2528 * list of inodes that have pending delalloc work to be done. 2529 */ 2530 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 2531 u32 bits) 2532 { 2533 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2534 2535 lockdep_assert_held(&inode->io_tree.lock); 2536 2537 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC)) 2538 WARN_ON(1); 2539 /* 2540 * set_bit and clear bit hooks normally require _irqsave/restore 2541 * but in this case, we are only testing for the DELALLOC 2542 * bit, which is only set or cleared with irqs on 2543 */ 2544 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2545 u64 len = state->end + 1 - state->start; 2546 u64 prev_delalloc_bytes; 2547 u32 num_extents = count_max_extents(fs_info, len); 2548 2549 spin_lock(&inode->lock); 2550 btrfs_mod_outstanding_extents(inode, num_extents); 2551 spin_unlock(&inode->lock); 2552 2553 /* For sanity tests */ 2554 if (btrfs_is_testing(fs_info)) 2555 return; 2556 2557 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2558 fs_info->delalloc_batch); 2559 spin_lock(&inode->lock); 2560 prev_delalloc_bytes = inode->delalloc_bytes; 2561 inode->delalloc_bytes += len; 2562 if (bits & EXTENT_DEFRAG) 2563 inode->defrag_bytes += len; 2564 spin_unlock(&inode->lock); 2565 2566 /* 2567 * We don't need to be under the protection of the inode's lock, 2568 * because we are called while holding the inode's io_tree lock 2569 * and are therefore protected against concurrent calls of this 2570 * function and btrfs_clear_delalloc_extent(). 2571 */ 2572 if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0) 2573 btrfs_add_delalloc_inode(inode); 2574 } 2575 2576 if (!(state->state & EXTENT_DELALLOC_NEW) && 2577 (bits & EXTENT_DELALLOC_NEW)) { 2578 spin_lock(&inode->lock); 2579 inode->new_delalloc_bytes += state->end + 1 - state->start; 2580 spin_unlock(&inode->lock); 2581 } 2582 } 2583 2584 /* 2585 * Once a range is no longer delalloc this function ensures that proper 2586 * accounting happens. 2587 */ 2588 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 2589 struct extent_state *state, u32 bits) 2590 { 2591 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2592 u64 len = state->end + 1 - state->start; 2593 u32 num_extents = count_max_extents(fs_info, len); 2594 2595 lockdep_assert_held(&inode->io_tree.lock); 2596 2597 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { 2598 spin_lock(&inode->lock); 2599 inode->defrag_bytes -= len; 2600 spin_unlock(&inode->lock); 2601 } 2602 2603 /* 2604 * set_bit and clear bit hooks normally require _irqsave/restore 2605 * but in this case, we are only testing for the DELALLOC 2606 * bit, which is only set or cleared with irqs on 2607 */ 2608 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2609 struct btrfs_root *root = inode->root; 2610 u64 new_delalloc_bytes; 2611 2612 spin_lock(&inode->lock); 2613 btrfs_mod_outstanding_extents(inode, -num_extents); 2614 spin_unlock(&inode->lock); 2615 2616 /* 2617 * We don't reserve metadata space for space cache inodes so we 2618 * don't need to call delalloc_release_metadata if there is an 2619 * error. 2620 */ 2621 if (bits & EXTENT_CLEAR_META_RESV && 2622 root != fs_info->tree_root) 2623 btrfs_delalloc_release_metadata(inode, len, true); 2624 2625 /* For sanity tests. */ 2626 if (btrfs_is_testing(fs_info)) 2627 return; 2628 2629 if (!btrfs_is_data_reloc_root(root) && 2630 !btrfs_is_free_space_inode(inode) && 2631 !(state->state & EXTENT_NORESERVE) && 2632 (bits & EXTENT_CLEAR_DATA_RESV)) 2633 btrfs_free_reserved_data_space_noquota(inode, len); 2634 2635 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2636 fs_info->delalloc_batch); 2637 spin_lock(&inode->lock); 2638 inode->delalloc_bytes -= len; 2639 new_delalloc_bytes = inode->delalloc_bytes; 2640 spin_unlock(&inode->lock); 2641 2642 /* 2643 * We don't need to be under the protection of the inode's lock, 2644 * because we are called while holding the inode's io_tree lock 2645 * and are therefore protected against concurrent calls of this 2646 * function and btrfs_set_delalloc_extent(). 2647 */ 2648 if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) { 2649 spin_lock(&root->delalloc_lock); 2650 btrfs_del_delalloc_inode(inode); 2651 spin_unlock(&root->delalloc_lock); 2652 } 2653 } 2654 2655 if ((state->state & EXTENT_DELALLOC_NEW) && 2656 (bits & EXTENT_DELALLOC_NEW)) { 2657 spin_lock(&inode->lock); 2658 ASSERT(inode->new_delalloc_bytes >= len); 2659 inode->new_delalloc_bytes -= len; 2660 if (bits & EXTENT_ADD_INODE_BYTES) 2661 inode_add_bytes(&inode->vfs_inode, len); 2662 spin_unlock(&inode->lock); 2663 } 2664 } 2665 2666 /* 2667 * given a list of ordered sums record them in the inode. This happens 2668 * at IO completion time based on sums calculated at bio submission time. 2669 */ 2670 static int add_pending_csums(struct btrfs_trans_handle *trans, 2671 struct list_head *list) 2672 { 2673 struct btrfs_ordered_sum *sum; 2674 struct btrfs_root *csum_root = NULL; 2675 int ret; 2676 2677 list_for_each_entry(sum, list, list) { 2678 trans->adding_csums = true; 2679 if (!csum_root) 2680 csum_root = btrfs_csum_root(trans->fs_info, 2681 sum->logical); 2682 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2683 trans->adding_csums = false; 2684 if (ret) 2685 return ret; 2686 } 2687 return 0; 2688 } 2689 2690 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2691 const u64 start, 2692 const u64 len, 2693 struct extent_state **cached_state) 2694 { 2695 u64 search_start = start; 2696 const u64 end = start + len - 1; 2697 2698 while (search_start < end) { 2699 const u64 search_len = end - search_start + 1; 2700 struct extent_map *em; 2701 u64 em_len; 2702 int ret = 0; 2703 2704 em = btrfs_get_extent(inode, NULL, search_start, search_len); 2705 if (IS_ERR(em)) 2706 return PTR_ERR(em); 2707 2708 if (em->disk_bytenr != EXTENT_MAP_HOLE) 2709 goto next; 2710 2711 em_len = em->len; 2712 if (em->start < search_start) 2713 em_len -= search_start - em->start; 2714 if (em_len > search_len) 2715 em_len = search_len; 2716 2717 ret = btrfs_set_extent_bit(&inode->io_tree, search_start, 2718 search_start + em_len - 1, 2719 EXTENT_DELALLOC_NEW, cached_state); 2720 next: 2721 search_start = btrfs_extent_map_end(em); 2722 btrfs_free_extent_map(em); 2723 if (ret) 2724 return ret; 2725 } 2726 return 0; 2727 } 2728 2729 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2730 unsigned int extra_bits, 2731 struct extent_state **cached_state) 2732 { 2733 WARN_ON(PAGE_ALIGNED(end)); 2734 2735 if (start >= i_size_read(&inode->vfs_inode) && 2736 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2737 /* 2738 * There can't be any extents following eof in this case so just 2739 * set the delalloc new bit for the range directly. 2740 */ 2741 extra_bits |= EXTENT_DELALLOC_NEW; 2742 } else { 2743 int ret; 2744 2745 ret = btrfs_find_new_delalloc_bytes(inode, start, 2746 end + 1 - start, 2747 cached_state); 2748 if (ret) 2749 return ret; 2750 } 2751 2752 return btrfs_set_extent_bit(&inode->io_tree, start, end, 2753 EXTENT_DELALLOC | extra_bits, cached_state); 2754 } 2755 2756 /* see btrfs_writepage_start_hook for details on why this is required */ 2757 struct btrfs_writepage_fixup { 2758 struct folio *folio; 2759 struct btrfs_inode *inode; 2760 struct btrfs_work work; 2761 }; 2762 2763 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2764 { 2765 struct btrfs_writepage_fixup *fixup = 2766 container_of(work, struct btrfs_writepage_fixup, work); 2767 struct btrfs_ordered_extent *ordered; 2768 struct extent_state *cached_state = NULL; 2769 struct extent_changeset *data_reserved = NULL; 2770 struct folio *folio = fixup->folio; 2771 struct btrfs_inode *inode = fixup->inode; 2772 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2773 u64 page_start = folio_pos(folio); 2774 u64 page_end = folio_next_pos(folio) - 1; 2775 int ret = 0; 2776 bool free_delalloc_space = true; 2777 2778 /* 2779 * This is similar to page_mkwrite, we need to reserve the space before 2780 * we take the folio lock. 2781 */ 2782 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2783 folio_size(folio)); 2784 again: 2785 folio_lock(folio); 2786 2787 /* 2788 * Before we queued this fixup, we took a reference on the folio. 2789 * folio->mapping may go NULL, but it shouldn't be moved to a different 2790 * address space. 2791 */ 2792 if (!folio->mapping || !folio_test_dirty(folio) || 2793 !folio_test_checked(folio)) { 2794 /* 2795 * Unfortunately this is a little tricky, either 2796 * 2797 * 1) We got here and our folio had already been dealt with and 2798 * we reserved our space, thus ret == 0, so we need to just 2799 * drop our space reservation and bail. This can happen the 2800 * first time we come into the fixup worker, or could happen 2801 * while waiting for the ordered extent. 2802 * 2) Our folio was already dealt with, but we happened to get an 2803 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2804 * this case we obviously don't have anything to release, but 2805 * because the folio was already dealt with we don't want to 2806 * mark the folio with an error, so make sure we're resetting 2807 * ret to 0. This is why we have this check _before_ the ret 2808 * check, because we do not want to have a surprise ENOSPC 2809 * when the folio was already properly dealt with. 2810 */ 2811 if (!ret) { 2812 btrfs_delalloc_release_extents(inode, folio_size(folio)); 2813 btrfs_delalloc_release_space(inode, data_reserved, 2814 page_start, folio_size(folio), 2815 true); 2816 } 2817 ret = 0; 2818 goto out_page; 2819 } 2820 2821 /* 2822 * We can't mess with the folio state unless it is locked, so now that 2823 * it is locked bail if we failed to make our space reservation. 2824 */ 2825 if (ret) 2826 goto out_page; 2827 2828 btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2829 2830 /* already ordered? We're done */ 2831 if (folio_test_ordered(folio)) 2832 goto out_reserved; 2833 2834 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2835 if (ordered) { 2836 btrfs_unlock_extent(&inode->io_tree, page_start, page_end, 2837 &cached_state); 2838 folio_unlock(folio); 2839 btrfs_start_ordered_extent(ordered); 2840 btrfs_put_ordered_extent(ordered); 2841 goto again; 2842 } 2843 2844 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2845 &cached_state); 2846 if (ret) 2847 goto out_reserved; 2848 2849 /* 2850 * Everything went as planned, we're now the owner of a dirty page with 2851 * delayed allocation bits set and space reserved for our COW 2852 * destination. 2853 * 2854 * The page was dirty when we started, nothing should have cleaned it. 2855 */ 2856 BUG_ON(!folio_test_dirty(folio)); 2857 free_delalloc_space = false; 2858 out_reserved: 2859 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2860 if (free_delalloc_space) 2861 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2862 PAGE_SIZE, true); 2863 btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2864 out_page: 2865 if (ret) { 2866 /* 2867 * We hit ENOSPC or other errors. Update the mapping and page 2868 * to reflect the errors and clean the page. 2869 */ 2870 mapping_set_error(folio->mapping, ret); 2871 btrfs_mark_ordered_io_finished(inode, folio, page_start, 2872 folio_size(folio), !ret); 2873 folio_clear_dirty_for_io(folio); 2874 } 2875 btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE); 2876 folio_unlock(folio); 2877 folio_put(folio); 2878 kfree(fixup); 2879 extent_changeset_free(data_reserved); 2880 /* 2881 * As a precaution, do a delayed iput in case it would be the last iput 2882 * that could need flushing space. Recursing back to fixup worker would 2883 * deadlock. 2884 */ 2885 btrfs_add_delayed_iput(inode); 2886 } 2887 2888 /* 2889 * There are a few paths in the higher layers of the kernel that directly 2890 * set the folio dirty bit without asking the filesystem if it is a 2891 * good idea. This causes problems because we want to make sure COW 2892 * properly happens and the data=ordered rules are followed. 2893 * 2894 * In our case any range that doesn't have the ORDERED bit set 2895 * hasn't been properly setup for IO. We kick off an async process 2896 * to fix it up. The async helper will wait for ordered extents, set 2897 * the delalloc bit and make it safe to write the folio. 2898 */ 2899 int btrfs_writepage_cow_fixup(struct folio *folio) 2900 { 2901 struct inode *inode = folio->mapping->host; 2902 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 2903 struct btrfs_writepage_fixup *fixup; 2904 2905 /* This folio has ordered extent covering it already */ 2906 if (folio_test_ordered(folio)) 2907 return 0; 2908 2909 /* 2910 * For experimental build, we error out instead of EAGAIN. 2911 * 2912 * We should not hit such out-of-band dirty folios anymore. 2913 */ 2914 if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) { 2915 DEBUG_WARN(); 2916 btrfs_err_rl(fs_info, 2917 "root %lld ino %llu folio %llu is marked dirty without notifying the fs", 2918 btrfs_root_id(BTRFS_I(inode)->root), 2919 btrfs_ino(BTRFS_I(inode)), 2920 folio_pos(folio)); 2921 return -EUCLEAN; 2922 } 2923 2924 /* 2925 * folio_checked is set below when we create a fixup worker for this 2926 * folio, don't try to create another one if we're already 2927 * folio_test_checked. 2928 * 2929 * The extent_io writepage code will redirty the foio if we send back 2930 * EAGAIN. 2931 */ 2932 if (folio_test_checked(folio)) 2933 return -EAGAIN; 2934 2935 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2936 if (!fixup) 2937 return -EAGAIN; 2938 2939 /* 2940 * We are already holding a reference to this inode from 2941 * write_cache_pages. We need to hold it because the space reservation 2942 * takes place outside of the folio lock, and we can't trust 2943 * folio->mapping outside of the folio lock. 2944 */ 2945 ihold(inode); 2946 btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio)); 2947 folio_get(folio); 2948 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL); 2949 fixup->folio = folio; 2950 fixup->inode = BTRFS_I(inode); 2951 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2952 2953 return -EAGAIN; 2954 } 2955 2956 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2957 struct btrfs_inode *inode, u64 file_pos, 2958 struct btrfs_file_extent_item *stack_fi, 2959 const bool update_inode_bytes, 2960 u64 qgroup_reserved) 2961 { 2962 struct btrfs_root *root = inode->root; 2963 const u64 sectorsize = root->fs_info->sectorsize; 2964 BTRFS_PATH_AUTO_FREE(path); 2965 struct extent_buffer *leaf; 2966 struct btrfs_key ins; 2967 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 2968 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 2969 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 2970 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 2971 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 2972 struct btrfs_drop_extents_args drop_args = { 0 }; 2973 int ret; 2974 2975 path = btrfs_alloc_path(); 2976 if (!path) 2977 return -ENOMEM; 2978 2979 /* 2980 * we may be replacing one extent in the tree with another. 2981 * The new extent is pinned in the extent map, and we don't want 2982 * to drop it from the cache until it is completely in the btree. 2983 * 2984 * So, tell btrfs_drop_extents to leave this extent in the cache. 2985 * the caller is expected to unpin it and allow it to be merged 2986 * with the others. 2987 */ 2988 drop_args.path = path; 2989 drop_args.start = file_pos; 2990 drop_args.end = file_pos + num_bytes; 2991 drop_args.replace_extent = true; 2992 drop_args.extent_item_size = sizeof(*stack_fi); 2993 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 2994 if (ret) 2995 goto out; 2996 2997 if (!drop_args.extent_inserted) { 2998 ins.objectid = btrfs_ino(inode); 2999 ins.type = BTRFS_EXTENT_DATA_KEY; 3000 ins.offset = file_pos; 3001 3002 ret = btrfs_insert_empty_item(trans, root, path, &ins, 3003 sizeof(*stack_fi)); 3004 if (ret) 3005 goto out; 3006 } 3007 leaf = path->nodes[0]; 3008 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 3009 write_extent_buffer(leaf, stack_fi, 3010 btrfs_item_ptr_offset(leaf, path->slots[0]), 3011 sizeof(struct btrfs_file_extent_item)); 3012 3013 btrfs_release_path(path); 3014 3015 /* 3016 * If we dropped an inline extent here, we know the range where it is 3017 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 3018 * number of bytes only for that range containing the inline extent. 3019 * The remaining of the range will be processed when clearing the 3020 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 3021 */ 3022 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 3023 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 3024 3025 inline_size = drop_args.bytes_found - inline_size; 3026 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 3027 drop_args.bytes_found -= inline_size; 3028 num_bytes -= sectorsize; 3029 } 3030 3031 if (update_inode_bytes) 3032 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 3033 3034 ins.objectid = disk_bytenr; 3035 ins.type = BTRFS_EXTENT_ITEM_KEY; 3036 ins.offset = disk_num_bytes; 3037 3038 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 3039 if (ret) 3040 goto out; 3041 3042 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 3043 file_pos - offset, 3044 qgroup_reserved, &ins); 3045 out: 3046 return ret; 3047 } 3048 3049 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 3050 u64 start, u64 len) 3051 { 3052 struct btrfs_block_group *cache; 3053 3054 cache = btrfs_lookup_block_group(fs_info, start); 3055 ASSERT(cache); 3056 3057 spin_lock(&cache->lock); 3058 cache->delalloc_bytes -= len; 3059 spin_unlock(&cache->lock); 3060 3061 btrfs_put_block_group(cache); 3062 } 3063 3064 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 3065 struct btrfs_ordered_extent *oe) 3066 { 3067 struct btrfs_file_extent_item stack_fi; 3068 bool update_inode_bytes; 3069 u64 num_bytes = oe->num_bytes; 3070 u64 ram_bytes = oe->ram_bytes; 3071 3072 memset(&stack_fi, 0, sizeof(stack_fi)); 3073 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 3074 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 3075 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 3076 oe->disk_num_bytes); 3077 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 3078 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) 3079 num_bytes = oe->truncated_len; 3080 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 3081 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 3082 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 3083 /* Encryption and other encoding is reserved and all 0 */ 3084 3085 /* 3086 * For delalloc, when completing an ordered extent we update the inode's 3087 * bytes when clearing the range in the inode's io tree, so pass false 3088 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 3089 * except if the ordered extent was truncated. 3090 */ 3091 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 3092 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3093 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3094 3095 return insert_reserved_file_extent(trans, oe->inode, 3096 oe->file_offset, &stack_fi, 3097 update_inode_bytes, oe->qgroup_rsv); 3098 } 3099 3100 /* 3101 * As ordered data IO finishes, this gets called so we can finish 3102 * an ordered extent if the range of bytes in the file it covers are 3103 * fully written. 3104 */ 3105 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) 3106 { 3107 struct btrfs_inode *inode = ordered_extent->inode; 3108 struct btrfs_root *root = inode->root; 3109 struct btrfs_fs_info *fs_info = root->fs_info; 3110 struct btrfs_trans_handle *trans = NULL; 3111 struct extent_io_tree *io_tree = &inode->io_tree; 3112 struct extent_state *cached_state = NULL; 3113 u64 start, end; 3114 int compress_type = 0; 3115 int ret = 0; 3116 u64 logical_len = ordered_extent->num_bytes; 3117 bool freespace_inode; 3118 bool truncated = false; 3119 bool clear_reserved_extent = true; 3120 unsigned int clear_bits = EXTENT_DEFRAG; 3121 3122 start = ordered_extent->file_offset; 3123 end = start + ordered_extent->num_bytes - 1; 3124 3125 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3126 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3127 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3128 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3129 clear_bits |= EXTENT_DELALLOC_NEW; 3130 3131 freespace_inode = btrfs_is_free_space_inode(inode); 3132 if (!freespace_inode) 3133 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3134 3135 if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) { 3136 ret = -EIO; 3137 goto out; 3138 } 3139 3140 ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3141 ordered_extent->disk_num_bytes); 3142 if (ret) 3143 goto out; 3144 3145 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3146 truncated = true; 3147 logical_len = ordered_extent->truncated_len; 3148 /* Truncated the entire extent, don't bother adding */ 3149 if (!logical_len) 3150 goto out; 3151 } 3152 3153 /* 3154 * If it's a COW write we need to lock the extent range as we will be 3155 * inserting/replacing file extent items and unpinning an extent map. 3156 * This must be taken before joining a transaction, as it's a higher 3157 * level lock (like the inode's VFS lock), otherwise we can run into an 3158 * ABBA deadlock with other tasks (transactions work like a lock, 3159 * depending on their current state). 3160 */ 3161 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3162 clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED; 3163 btrfs_lock_extent_bits(io_tree, start, end, 3164 EXTENT_LOCKED | EXTENT_FINISHING_ORDERED, 3165 &cached_state); 3166 } 3167 3168 if (freespace_inode) 3169 trans = btrfs_join_transaction_spacecache(root); 3170 else 3171 trans = btrfs_join_transaction(root); 3172 if (IS_ERR(trans)) { 3173 ret = PTR_ERR(trans); 3174 trans = NULL; 3175 goto out; 3176 } 3177 3178 trans->block_rsv = &inode->block_rsv; 3179 3180 ret = btrfs_insert_raid_extent(trans, ordered_extent); 3181 if (unlikely(ret)) { 3182 btrfs_abort_transaction(trans, ret); 3183 goto out; 3184 } 3185 3186 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3187 /* Logic error */ 3188 ASSERT(list_empty(&ordered_extent->list)); 3189 if (unlikely(!list_empty(&ordered_extent->list))) { 3190 ret = -EINVAL; 3191 btrfs_abort_transaction(trans, ret); 3192 goto out; 3193 } 3194 3195 btrfs_inode_safe_disk_i_size_write(inode, 0); 3196 ret = btrfs_update_inode_fallback(trans, inode); 3197 if (unlikely(ret)) { 3198 /* -ENOMEM or corruption */ 3199 btrfs_abort_transaction(trans, ret); 3200 } 3201 goto out; 3202 } 3203 3204 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3205 compress_type = ordered_extent->compress_type; 3206 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3207 BUG_ON(compress_type); 3208 ret = btrfs_mark_extent_written(trans, inode, 3209 ordered_extent->file_offset, 3210 ordered_extent->file_offset + 3211 logical_len); 3212 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, 3213 ordered_extent->disk_num_bytes); 3214 } else { 3215 BUG_ON(root == fs_info->tree_root); 3216 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3217 if (!ret) { 3218 clear_reserved_extent = false; 3219 btrfs_release_delalloc_bytes(fs_info, 3220 ordered_extent->disk_bytenr, 3221 ordered_extent->disk_num_bytes); 3222 } 3223 } 3224 if (unlikely(ret < 0)) { 3225 btrfs_abort_transaction(trans, ret); 3226 goto out; 3227 } 3228 3229 ret = btrfs_unpin_extent_cache(inode, ordered_extent->file_offset, 3230 ordered_extent->num_bytes, trans->transid); 3231 if (unlikely(ret < 0)) { 3232 btrfs_abort_transaction(trans, ret); 3233 goto out; 3234 } 3235 3236 ret = add_pending_csums(trans, &ordered_extent->list); 3237 if (unlikely(ret)) { 3238 btrfs_abort_transaction(trans, ret); 3239 goto out; 3240 } 3241 3242 /* 3243 * If this is a new delalloc range, clear its new delalloc flag to 3244 * update the inode's number of bytes. This needs to be done first 3245 * before updating the inode item. 3246 */ 3247 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3248 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3249 btrfs_clear_extent_bit(&inode->io_tree, start, end, 3250 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3251 &cached_state); 3252 3253 btrfs_inode_safe_disk_i_size_write(inode, 0); 3254 ret = btrfs_update_inode_fallback(trans, inode); 3255 if (unlikely(ret)) { /* -ENOMEM or corruption */ 3256 btrfs_abort_transaction(trans, ret); 3257 goto out; 3258 } 3259 out: 3260 btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3261 &cached_state); 3262 3263 if (trans) 3264 btrfs_end_transaction(trans); 3265 3266 if (ret || truncated) { 3267 /* 3268 * If we failed to finish this ordered extent for any reason we 3269 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3270 * extent, and mark the inode with the error if it wasn't 3271 * already set. Any error during writeback would have already 3272 * set the mapping error, so we need to set it if we're the ones 3273 * marking this ordered extent as failed. 3274 */ 3275 if (ret) 3276 btrfs_mark_ordered_extent_error(ordered_extent); 3277 3278 /* 3279 * Drop extent maps for the part of the extent we didn't write. 3280 * 3281 * We have an exception here for the free_space_inode, this is 3282 * because when we do btrfs_get_extent() on the free space inode 3283 * we will search the commit root. If this is a new block group 3284 * we won't find anything, and we will trip over the assert in 3285 * writepage where we do ASSERT(em->block_start != 3286 * EXTENT_MAP_HOLE). 3287 * 3288 * Theoretically we could also skip this for any NOCOW extent as 3289 * we don't mess with the extent map tree in the NOCOW case, but 3290 * for now simply skip this if we are the free space inode. 3291 */ 3292 if (!btrfs_is_free_space_inode(inode)) { 3293 u64 unwritten_start = start; 3294 3295 if (truncated) 3296 unwritten_start += logical_len; 3297 3298 btrfs_drop_extent_map_range(inode, unwritten_start, 3299 end, false); 3300 } 3301 3302 /* 3303 * If the ordered extent had an IOERR or something else went 3304 * wrong we need to return the space for this ordered extent 3305 * back to the allocator. We only free the extent in the 3306 * truncated case if we didn't write out the extent at all. 3307 * 3308 * If we made it past insert_reserved_file_extent before we 3309 * errored out then we don't need to do this as the accounting 3310 * has already been done. 3311 */ 3312 if ((ret || !logical_len) && 3313 clear_reserved_extent && 3314 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3315 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3316 /* 3317 * Discard the range before returning it back to the 3318 * free space pool 3319 */ 3320 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3321 btrfs_discard_extent(fs_info, 3322 ordered_extent->disk_bytenr, 3323 ordered_extent->disk_num_bytes, 3324 NULL); 3325 btrfs_free_reserved_extent(fs_info, 3326 ordered_extent->disk_bytenr, 3327 ordered_extent->disk_num_bytes, true); 3328 /* 3329 * Actually free the qgroup rsv which was released when 3330 * the ordered extent was created. 3331 */ 3332 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root), 3333 ordered_extent->qgroup_rsv, 3334 BTRFS_QGROUP_RSV_DATA); 3335 } 3336 } 3337 3338 /* 3339 * This needs to be done to make sure anybody waiting knows we are done 3340 * updating everything for this ordered extent. 3341 */ 3342 btrfs_remove_ordered_extent(inode, ordered_extent); 3343 3344 /* once for us */ 3345 btrfs_put_ordered_extent(ordered_extent); 3346 /* once for the tree */ 3347 btrfs_put_ordered_extent(ordered_extent); 3348 3349 return ret; 3350 } 3351 3352 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered) 3353 { 3354 if (btrfs_is_zoned(ordered->inode->root->fs_info) && 3355 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) && 3356 list_empty(&ordered->bioc_list)) 3357 btrfs_finish_ordered_zoned(ordered); 3358 return btrfs_finish_one_ordered(ordered); 3359 } 3360 3361 /* 3362 * Calculate the checksum of an fs block at physical memory address @paddr, 3363 * and save the result to @dest. 3364 * 3365 * The folio containing @paddr must be large enough to contain a full fs block. 3366 */ 3367 void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info, 3368 const phys_addr_t paddr, u8 *dest) 3369 { 3370 struct folio *folio = page_folio(phys_to_page(paddr)); 3371 const u32 blocksize = fs_info->sectorsize; 3372 const u32 step = min(blocksize, PAGE_SIZE); 3373 const u32 nr_steps = blocksize / step; 3374 phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE]; 3375 3376 /* The full block must be inside the folio. */ 3377 ASSERT(offset_in_folio(folio, paddr) + blocksize <= folio_size(folio)); 3378 3379 for (int i = 0; i < nr_steps; i++) { 3380 u32 pindex = offset_in_folio(folio, paddr + i * step) >> PAGE_SHIFT; 3381 3382 /* 3383 * For bs <= ps cases, we will only run the loop once, so the offset 3384 * inside the page will only added to paddrs[0]. 3385 * 3386 * For bs > ps cases, the block must be page aligned, thus offset 3387 * inside the page will always be 0. 3388 */ 3389 paddrs[i] = page_to_phys(folio_page(folio, pindex)) + offset_in_page(paddr); 3390 } 3391 return btrfs_calculate_block_csum_pages(fs_info, paddrs, dest); 3392 } 3393 3394 /* 3395 * Calculate the checksum of a fs block backed by multiple noncontiguous pages 3396 * at @paddrs[] and save the result to @dest. 3397 * 3398 * The folio containing @paddr must be large enough to contain a full fs block. 3399 */ 3400 void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info, 3401 const phys_addr_t paddrs[], u8 *dest) 3402 { 3403 const u32 blocksize = fs_info->sectorsize; 3404 const u32 step = min(blocksize, PAGE_SIZE); 3405 const u32 nr_steps = blocksize / step; 3406 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3407 3408 shash->tfm = fs_info->csum_shash; 3409 crypto_shash_init(shash); 3410 for (int i = 0; i < nr_steps; i++) { 3411 const phys_addr_t paddr = paddrs[i]; 3412 void *kaddr; 3413 3414 ASSERT(offset_in_page(paddr) + step <= PAGE_SIZE); 3415 kaddr = kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr); 3416 crypto_shash_update(shash, kaddr, step); 3417 kunmap_local(kaddr); 3418 } 3419 crypto_shash_final(shash, dest); 3420 } 3421 3422 /* 3423 * Verify the checksum for a single sector without any extra action that depend 3424 * on the type of I/O. 3425 * 3426 * @kaddr must be a properly kmapped address. 3427 */ 3428 int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum, 3429 const u8 * const csum_expected) 3430 { 3431 btrfs_calculate_block_csum_folio(fs_info, paddr, csum); 3432 if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0)) 3433 return -EIO; 3434 return 0; 3435 } 3436 3437 /* 3438 * Verify the checksum of a single data sector, which can be scattered at 3439 * different noncontiguous pages. 3440 * 3441 * @bbio: btrfs_io_bio which contains the csum 3442 * @dev: device the sector is on 3443 * @bio_offset: offset to the beginning of the bio (in bytes) 3444 * @paddrs: physical addresses which back the fs block 3445 * 3446 * Check if the checksum on a data block is valid. When a checksum mismatch is 3447 * detected, report the error and fill the corrupted range with zero. 3448 * 3449 * Return %true if the sector is ok or had no checksum to start with, else %false. 3450 */ 3451 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, 3452 u32 bio_offset, const phys_addr_t paddrs[]) 3453 { 3454 struct btrfs_inode *inode = bbio->inode; 3455 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3456 const u32 blocksize = fs_info->sectorsize; 3457 const u32 step = min(blocksize, PAGE_SIZE); 3458 const u32 nr_steps = blocksize / step; 3459 u64 file_offset = bbio->file_offset + bio_offset; 3460 u64 end = file_offset + blocksize - 1; 3461 u8 *csum_expected; 3462 u8 csum[BTRFS_CSUM_SIZE]; 3463 3464 if (!bbio->csum) 3465 return true; 3466 3467 if (btrfs_is_data_reloc_root(inode->root) && 3468 btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, 3469 NULL)) { 3470 /* Skip the range without csum for data reloc inode */ 3471 btrfs_clear_extent_bit(&inode->io_tree, file_offset, end, 3472 EXTENT_NODATASUM, NULL); 3473 return true; 3474 } 3475 3476 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * 3477 fs_info->csum_size; 3478 btrfs_calculate_block_csum_pages(fs_info, paddrs, csum); 3479 if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0)) 3480 goto zeroit; 3481 return true; 3482 3483 zeroit: 3484 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected, 3485 bbio->mirror_num); 3486 if (dev) 3487 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 3488 for (int i = 0; i < nr_steps; i++) 3489 memzero_page(phys_to_page(paddrs[i]), offset_in_page(paddrs[i]), step); 3490 return false; 3491 } 3492 3493 /* 3494 * Perform a delayed iput on @inode. 3495 * 3496 * @inode: The inode we want to perform iput on 3497 * 3498 * This function uses the generic vfs_inode::i_count to track whether we should 3499 * just decrement it (in case it's > 1) or if this is the last iput then link 3500 * the inode to the delayed iput machinery. Delayed iputs are processed at 3501 * transaction commit time/superblock commit/cleaner kthread. 3502 */ 3503 void btrfs_add_delayed_iput(struct btrfs_inode *inode) 3504 { 3505 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3506 unsigned long flags; 3507 3508 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) 3509 return; 3510 3511 WARN_ON_ONCE(test_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state)); 3512 atomic_inc(&fs_info->nr_delayed_iputs); 3513 /* 3514 * Need to be irq safe here because we can be called from either an irq 3515 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq 3516 * context. 3517 */ 3518 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags); 3519 ASSERT(list_empty(&inode->delayed_iput)); 3520 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); 3521 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags); 3522 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3523 wake_up_process(fs_info->cleaner_kthread); 3524 } 3525 3526 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3527 struct btrfs_inode *inode) 3528 { 3529 list_del_init(&inode->delayed_iput); 3530 spin_unlock_irq(&fs_info->delayed_iput_lock); 3531 iput(&inode->vfs_inode); 3532 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3533 wake_up(&fs_info->delayed_iputs_wait); 3534 spin_lock_irq(&fs_info->delayed_iput_lock); 3535 } 3536 3537 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3538 struct btrfs_inode *inode) 3539 { 3540 if (!list_empty(&inode->delayed_iput)) { 3541 spin_lock_irq(&fs_info->delayed_iput_lock); 3542 if (!list_empty(&inode->delayed_iput)) 3543 run_delayed_iput_locked(fs_info, inode); 3544 spin_unlock_irq(&fs_info->delayed_iput_lock); 3545 } 3546 } 3547 3548 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3549 { 3550 /* 3551 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which 3552 * calls btrfs_add_delayed_iput() and that needs to lock 3553 * fs_info->delayed_iput_lock. So we need to disable irqs here to 3554 * prevent a deadlock. 3555 */ 3556 spin_lock_irq(&fs_info->delayed_iput_lock); 3557 while (!list_empty(&fs_info->delayed_iputs)) { 3558 struct btrfs_inode *inode; 3559 3560 inode = list_first_entry(&fs_info->delayed_iputs, 3561 struct btrfs_inode, delayed_iput); 3562 run_delayed_iput_locked(fs_info, inode); 3563 if (need_resched()) { 3564 spin_unlock_irq(&fs_info->delayed_iput_lock); 3565 cond_resched(); 3566 spin_lock_irq(&fs_info->delayed_iput_lock); 3567 } 3568 } 3569 spin_unlock_irq(&fs_info->delayed_iput_lock); 3570 } 3571 3572 /* 3573 * Wait for flushing all delayed iputs 3574 * 3575 * @fs_info: the filesystem 3576 * 3577 * This will wait on any delayed iputs that are currently running with KILLABLE 3578 * set. Once they are all done running we will return, unless we are killed in 3579 * which case we return EINTR. This helps in user operations like fallocate etc 3580 * that might get blocked on the iputs. 3581 * 3582 * Return EINTR if we were killed, 0 if nothing's pending 3583 */ 3584 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3585 { 3586 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3587 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3588 if (ret) 3589 return -EINTR; 3590 return 0; 3591 } 3592 3593 /* 3594 * This creates an orphan entry for the given inode in case something goes wrong 3595 * in the middle of an unlink. 3596 */ 3597 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3598 struct btrfs_inode *inode) 3599 { 3600 int ret; 3601 3602 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3603 if (unlikely(ret && ret != -EEXIST)) { 3604 btrfs_abort_transaction(trans, ret); 3605 return ret; 3606 } 3607 3608 return 0; 3609 } 3610 3611 /* 3612 * We have done the delete so we can go ahead and remove the orphan item for 3613 * this particular inode. 3614 */ 3615 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3616 struct btrfs_inode *inode) 3617 { 3618 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3619 } 3620 3621 /* 3622 * this cleans up any orphans that may be left on the list from the last use 3623 * of this root. 3624 */ 3625 int btrfs_orphan_cleanup(struct btrfs_root *root) 3626 { 3627 struct btrfs_fs_info *fs_info = root->fs_info; 3628 BTRFS_PATH_AUTO_FREE(path); 3629 struct extent_buffer *leaf; 3630 struct btrfs_key key, found_key; 3631 struct btrfs_trans_handle *trans; 3632 u64 last_objectid = 0; 3633 int ret = 0, nr_unlink = 0; 3634 3635 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3636 return 0; 3637 3638 path = btrfs_alloc_path(); 3639 if (!path) { 3640 ret = -ENOMEM; 3641 goto out; 3642 } 3643 path->reada = READA_BACK; 3644 3645 key.objectid = BTRFS_ORPHAN_OBJECTID; 3646 key.type = BTRFS_ORPHAN_ITEM_KEY; 3647 key.offset = (u64)-1; 3648 3649 while (1) { 3650 struct btrfs_inode *inode; 3651 3652 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3653 if (ret < 0) 3654 goto out; 3655 3656 /* 3657 * if ret == 0 means we found what we were searching for, which 3658 * is weird, but possible, so only screw with path if we didn't 3659 * find the key and see if we have stuff that matches 3660 */ 3661 if (ret > 0) { 3662 ret = 0; 3663 if (path->slots[0] == 0) 3664 break; 3665 path->slots[0]--; 3666 } 3667 3668 /* pull out the item */ 3669 leaf = path->nodes[0]; 3670 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3671 3672 /* make sure the item matches what we want */ 3673 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3674 break; 3675 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3676 break; 3677 3678 /* release the path since we're done with it */ 3679 btrfs_release_path(path); 3680 3681 /* 3682 * this is where we are basically btrfs_lookup, without the 3683 * crossing root thing. we store the inode number in the 3684 * offset of the orphan item. 3685 */ 3686 3687 if (found_key.offset == last_objectid) { 3688 /* 3689 * We found the same inode as before. This means we were 3690 * not able to remove its items via eviction triggered 3691 * by an iput(). A transaction abort may have happened, 3692 * due to -ENOSPC for example, so try to grab the error 3693 * that lead to a transaction abort, if any. 3694 */ 3695 btrfs_err(fs_info, 3696 "Error removing orphan entry, stopping orphan cleanup"); 3697 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL; 3698 goto out; 3699 } 3700 3701 last_objectid = found_key.offset; 3702 3703 found_key.objectid = found_key.offset; 3704 found_key.type = BTRFS_INODE_ITEM_KEY; 3705 found_key.offset = 0; 3706 inode = btrfs_iget(last_objectid, root); 3707 if (IS_ERR(inode)) { 3708 ret = PTR_ERR(inode); 3709 inode = NULL; 3710 if (ret != -ENOENT) 3711 goto out; 3712 } 3713 3714 if (!inode && root == fs_info->tree_root) { 3715 struct btrfs_root *dead_root; 3716 int is_dead_root = 0; 3717 3718 /* 3719 * This is an orphan in the tree root. Currently these 3720 * could come from 2 sources: 3721 * a) a root (snapshot/subvolume) deletion in progress 3722 * b) a free space cache inode 3723 * We need to distinguish those two, as the orphan item 3724 * for a root must not get deleted before the deletion 3725 * of the snapshot/subvolume's tree completes. 3726 * 3727 * btrfs_find_orphan_roots() ran before us, which has 3728 * found all deleted roots and loaded them into 3729 * fs_info->fs_roots_radix. So here we can find if an 3730 * orphan item corresponds to a deleted root by looking 3731 * up the root from that radix tree. 3732 */ 3733 3734 spin_lock(&fs_info->fs_roots_radix_lock); 3735 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3736 (unsigned long)found_key.objectid); 3737 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3738 is_dead_root = 1; 3739 spin_unlock(&fs_info->fs_roots_radix_lock); 3740 3741 if (is_dead_root) { 3742 /* prevent this orphan from being found again */ 3743 key.offset = found_key.objectid - 1; 3744 continue; 3745 } 3746 3747 } 3748 3749 /* 3750 * If we have an inode with links, there are a couple of 3751 * possibilities: 3752 * 3753 * 1. We were halfway through creating fsverity metadata for the 3754 * file. In that case, the orphan item represents incomplete 3755 * fsverity metadata which must be cleaned up with 3756 * btrfs_drop_verity_items and deleting the orphan item. 3757 3758 * 2. Old kernels (before v3.12) used to create an 3759 * orphan item for truncate indicating that there were possibly 3760 * extent items past i_size that needed to be deleted. In v3.12, 3761 * truncate was changed to update i_size in sync with the extent 3762 * items, but the (useless) orphan item was still created. Since 3763 * v4.18, we don't create the orphan item for truncate at all. 3764 * 3765 * So, this item could mean that we need to do a truncate, but 3766 * only if this filesystem was last used on a pre-v3.12 kernel 3767 * and was not cleanly unmounted. The odds of that are quite 3768 * slim, and it's a pain to do the truncate now, so just delete 3769 * the orphan item. 3770 * 3771 * It's also possible that this orphan item was supposed to be 3772 * deleted but wasn't. The inode number may have been reused, 3773 * but either way, we can delete the orphan item. 3774 */ 3775 if (!inode || inode->vfs_inode.i_nlink) { 3776 if (inode) { 3777 ret = btrfs_drop_verity_items(inode); 3778 iput(&inode->vfs_inode); 3779 inode = NULL; 3780 if (ret) 3781 goto out; 3782 } 3783 trans = btrfs_start_transaction(root, 1); 3784 if (IS_ERR(trans)) { 3785 ret = PTR_ERR(trans); 3786 goto out; 3787 } 3788 btrfs_debug(fs_info, "auto deleting %Lu", 3789 found_key.objectid); 3790 ret = btrfs_del_orphan_item(trans, root, 3791 found_key.objectid); 3792 btrfs_end_transaction(trans); 3793 if (ret) 3794 goto out; 3795 continue; 3796 } 3797 3798 nr_unlink++; 3799 3800 /* this will do delete_inode and everything for us */ 3801 iput(&inode->vfs_inode); 3802 } 3803 /* release the path since we're done with it */ 3804 btrfs_release_path(path); 3805 3806 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3807 trans = btrfs_join_transaction(root); 3808 if (!IS_ERR(trans)) 3809 btrfs_end_transaction(trans); 3810 } 3811 3812 if (nr_unlink) 3813 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3814 3815 out: 3816 if (ret) 3817 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3818 return ret; 3819 } 3820 3821 /* 3822 * Look ahead in the leaf for xattrs. If we don't find any then we know there 3823 * can't be any ACLs. 3824 * 3825 * @leaf: the eb leaf where to search 3826 * @slot: the slot the inode is in 3827 * @objectid: the objectid of the inode 3828 * 3829 * Return true if there is xattr/ACL, false otherwise. 3830 */ 3831 static noinline bool acls_after_inode_item(struct extent_buffer *leaf, 3832 int slot, u64 objectid, 3833 int *first_xattr_slot) 3834 { 3835 u32 nritems = btrfs_header_nritems(leaf); 3836 struct btrfs_key found_key; 3837 static u64 xattr_access = 0; 3838 static u64 xattr_default = 0; 3839 int scanned = 0; 3840 3841 if (!xattr_access) { 3842 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3843 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3844 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3845 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3846 } 3847 3848 slot++; 3849 *first_xattr_slot = -1; 3850 while (slot < nritems) { 3851 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3852 3853 /* We found a different objectid, there must be no ACLs. */ 3854 if (found_key.objectid != objectid) 3855 return false; 3856 3857 /* We found an xattr, assume we've got an ACL. */ 3858 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3859 if (*first_xattr_slot == -1) 3860 *first_xattr_slot = slot; 3861 if (found_key.offset == xattr_access || 3862 found_key.offset == xattr_default) 3863 return true; 3864 } 3865 3866 /* 3867 * We found a key greater than an xattr key, there can't be any 3868 * ACLs later on. 3869 */ 3870 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3871 return false; 3872 3873 slot++; 3874 scanned++; 3875 3876 /* 3877 * The item order goes like: 3878 * - inode 3879 * - inode backrefs 3880 * - xattrs 3881 * - extents, 3882 * 3883 * so if there are lots of hard links to an inode there can be 3884 * a lot of backrefs. Don't waste time searching too hard, 3885 * this is just an optimization. 3886 */ 3887 if (scanned >= 8) 3888 break; 3889 } 3890 /* 3891 * We hit the end of the leaf before we found an xattr or something 3892 * larger than an xattr. We have to assume the inode has ACLs. 3893 */ 3894 if (*first_xattr_slot == -1) 3895 *first_xattr_slot = slot; 3896 return true; 3897 } 3898 3899 static int btrfs_init_file_extent_tree(struct btrfs_inode *inode) 3900 { 3901 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3902 3903 if (WARN_ON_ONCE(inode->file_extent_tree)) 3904 return 0; 3905 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 3906 return 0; 3907 if (!S_ISREG(inode->vfs_inode.i_mode)) 3908 return 0; 3909 if (btrfs_is_free_space_inode(inode)) 3910 return 0; 3911 3912 inode->file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL); 3913 if (!inode->file_extent_tree) 3914 return -ENOMEM; 3915 3916 btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree, 3917 IO_TREE_INODE_FILE_EXTENT); 3918 /* Lockdep class is set only for the file extent tree. */ 3919 lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class); 3920 3921 return 0; 3922 } 3923 3924 static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc) 3925 { 3926 struct btrfs_root *root = inode->root; 3927 struct btrfs_inode *existing; 3928 const u64 ino = btrfs_ino(inode); 3929 int ret; 3930 3931 if (inode_unhashed(&inode->vfs_inode)) 3932 return 0; 3933 3934 if (prealloc) { 3935 ret = xa_reserve(&root->inodes, ino, GFP_NOFS); 3936 if (ret) 3937 return ret; 3938 } 3939 3940 existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC); 3941 3942 if (xa_is_err(existing)) { 3943 ret = xa_err(existing); 3944 ASSERT(ret != -EINVAL); 3945 ASSERT(ret != -ENOMEM); 3946 return ret; 3947 } else if (existing) { 3948 WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING))); 3949 } 3950 3951 return 0; 3952 } 3953 3954 /* 3955 * Read a locked inode from the btree into the in-memory inode and add it to 3956 * its root list/tree. 3957 * 3958 * On failure clean up the inode. 3959 */ 3960 static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path *path) 3961 { 3962 struct btrfs_root *root = inode->root; 3963 struct btrfs_fs_info *fs_info = root->fs_info; 3964 struct extent_buffer *leaf; 3965 struct btrfs_inode_item *inode_item; 3966 struct inode *vfs_inode = &inode->vfs_inode; 3967 struct btrfs_key location; 3968 unsigned long ptr; 3969 int maybe_acls; 3970 u32 rdev; 3971 int ret; 3972 bool filled = false; 3973 int first_xattr_slot; 3974 3975 ret = btrfs_fill_inode(inode, &rdev); 3976 if (!ret) 3977 filled = true; 3978 3979 ASSERT(path); 3980 3981 btrfs_get_inode_key(inode, &location); 3982 3983 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3984 if (ret) { 3985 /* 3986 * ret > 0 can come from btrfs_search_slot called by 3987 * btrfs_lookup_inode(), this means the inode was not found. 3988 */ 3989 if (ret > 0) 3990 ret = -ENOENT; 3991 goto out; 3992 } 3993 3994 leaf = path->nodes[0]; 3995 3996 if (filled) 3997 goto cache_index; 3998 3999 inode_item = btrfs_item_ptr(leaf, path->slots[0], 4000 struct btrfs_inode_item); 4001 vfs_inode->i_mode = btrfs_inode_mode(leaf, inode_item); 4002 set_nlink(vfs_inode, btrfs_inode_nlink(leaf, inode_item)); 4003 i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item)); 4004 i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item)); 4005 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 4006 4007 inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime), 4008 btrfs_timespec_nsec(leaf, &inode_item->atime)); 4009 4010 inode_set_mtime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->mtime), 4011 btrfs_timespec_nsec(leaf, &inode_item->mtime)); 4012 4013 inode_set_ctime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->ctime), 4014 btrfs_timespec_nsec(leaf, &inode_item->ctime)); 4015 4016 inode->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime); 4017 inode->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime); 4018 4019 inode_set_bytes(vfs_inode, btrfs_inode_nbytes(leaf, inode_item)); 4020 inode->generation = btrfs_inode_generation(leaf, inode_item); 4021 inode->last_trans = btrfs_inode_transid(leaf, inode_item); 4022 4023 inode_set_iversion_queried(vfs_inode, btrfs_inode_sequence(leaf, inode_item)); 4024 vfs_inode->i_generation = inode->generation; 4025 vfs_inode->i_rdev = 0; 4026 rdev = btrfs_inode_rdev(leaf, inode_item); 4027 4028 if (S_ISDIR(vfs_inode->i_mode)) 4029 inode->index_cnt = (u64)-1; 4030 4031 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 4032 &inode->flags, &inode->ro_flags); 4033 btrfs_update_inode_mapping_flags(inode); 4034 btrfs_set_inode_mapping_order(inode); 4035 4036 cache_index: 4037 ret = btrfs_init_file_extent_tree(inode); 4038 if (ret) 4039 goto out; 4040 btrfs_inode_set_file_extent_range(inode, 0, 4041 round_up(i_size_read(vfs_inode), fs_info->sectorsize)); 4042 /* 4043 * If we were modified in the current generation and evicted from memory 4044 * and then re-read we need to do a full sync since we don't have any 4045 * idea about which extents were modified before we were evicted from 4046 * cache. 4047 * 4048 * This is required for both inode re-read from disk and delayed inode 4049 * in the delayed_nodes xarray. 4050 */ 4051 if (inode->last_trans == btrfs_get_fs_generation(fs_info)) 4052 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); 4053 4054 /* 4055 * We don't persist the id of the transaction where an unlink operation 4056 * against the inode was last made. So here we assume the inode might 4057 * have been evicted, and therefore the exact value of last_unlink_trans 4058 * lost, and set it to last_trans to avoid metadata inconsistencies 4059 * between the inode and its parent if the inode is fsync'ed and the log 4060 * replayed. For example, in the scenario: 4061 * 4062 * touch mydir/foo 4063 * ln mydir/foo mydir/bar 4064 * sync 4065 * unlink mydir/bar 4066 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 4067 * xfs_io -c fsync mydir/foo 4068 * <power failure> 4069 * mount fs, triggers fsync log replay 4070 * 4071 * We must make sure that when we fsync our inode foo we also log its 4072 * parent inode, otherwise after log replay the parent still has the 4073 * dentry with the "bar" name but our inode foo has a link count of 1 4074 * and doesn't have an inode ref with the name "bar" anymore. 4075 * 4076 * Setting last_unlink_trans to last_trans is a pessimistic approach, 4077 * but it guarantees correctness at the expense of occasional full 4078 * transaction commits on fsync if our inode is a directory, or if our 4079 * inode is not a directory, logging its parent unnecessarily. 4080 */ 4081 inode->last_unlink_trans = inode->last_trans; 4082 4083 /* 4084 * Same logic as for last_unlink_trans. We don't persist the generation 4085 * of the last transaction where this inode was used for a reflink 4086 * operation, so after eviction and reloading the inode we must be 4087 * pessimistic and assume the last transaction that modified the inode. 4088 */ 4089 inode->last_reflink_trans = inode->last_trans; 4090 4091 path->slots[0]++; 4092 if (vfs_inode->i_nlink != 1 || 4093 path->slots[0] >= btrfs_header_nritems(leaf)) 4094 goto cache_acl; 4095 4096 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 4097 if (location.objectid != btrfs_ino(inode)) 4098 goto cache_acl; 4099 4100 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4101 if (location.type == BTRFS_INODE_REF_KEY) { 4102 struct btrfs_inode_ref *ref; 4103 4104 ref = (struct btrfs_inode_ref *)ptr; 4105 inode->dir_index = btrfs_inode_ref_index(leaf, ref); 4106 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 4107 struct btrfs_inode_extref *extref; 4108 4109 extref = (struct btrfs_inode_extref *)ptr; 4110 inode->dir_index = btrfs_inode_extref_index(leaf, extref); 4111 } 4112 cache_acl: 4113 /* 4114 * try to precache a NULL acl entry for files that don't have 4115 * any xattrs or acls 4116 */ 4117 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 4118 btrfs_ino(inode), &first_xattr_slot); 4119 if (first_xattr_slot != -1) { 4120 path->slots[0] = first_xattr_slot; 4121 ret = btrfs_load_inode_props(inode, path); 4122 if (ret) 4123 btrfs_err(fs_info, 4124 "error loading props for ino %llu (root %llu): %d", 4125 btrfs_ino(inode), btrfs_root_id(root), ret); 4126 } 4127 4128 if (!maybe_acls) 4129 cache_no_acl(vfs_inode); 4130 4131 switch (vfs_inode->i_mode & S_IFMT) { 4132 case S_IFREG: 4133 vfs_inode->i_mapping->a_ops = &btrfs_aops; 4134 vfs_inode->i_fop = &btrfs_file_operations; 4135 vfs_inode->i_op = &btrfs_file_inode_operations; 4136 break; 4137 case S_IFDIR: 4138 vfs_inode->i_fop = &btrfs_dir_file_operations; 4139 vfs_inode->i_op = &btrfs_dir_inode_operations; 4140 break; 4141 case S_IFLNK: 4142 vfs_inode->i_op = &btrfs_symlink_inode_operations; 4143 inode_nohighmem(vfs_inode); 4144 vfs_inode->i_mapping->a_ops = &btrfs_aops; 4145 break; 4146 default: 4147 vfs_inode->i_op = &btrfs_special_inode_operations; 4148 init_special_inode(vfs_inode, vfs_inode->i_mode, rdev); 4149 break; 4150 } 4151 4152 btrfs_sync_inode_flags_to_i_flags(inode); 4153 4154 ret = btrfs_add_inode_to_root(inode, true); 4155 if (ret) 4156 goto out; 4157 4158 return 0; 4159 out: 4160 iget_failed(vfs_inode); 4161 return ret; 4162 } 4163 4164 /* 4165 * given a leaf and an inode, copy the inode fields into the leaf 4166 */ 4167 static void fill_inode_item(struct btrfs_trans_handle *trans, 4168 struct extent_buffer *leaf, 4169 struct btrfs_inode_item *item, 4170 struct inode *inode) 4171 { 4172 u64 flags; 4173 4174 btrfs_set_inode_uid(leaf, item, i_uid_read(inode)); 4175 btrfs_set_inode_gid(leaf, item, i_gid_read(inode)); 4176 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); 4177 btrfs_set_inode_mode(leaf, item, inode->i_mode); 4178 btrfs_set_inode_nlink(leaf, item, inode->i_nlink); 4179 4180 btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode)); 4181 btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode)); 4182 4183 btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode)); 4184 btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode)); 4185 4186 btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode)); 4187 btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode)); 4188 4189 btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec); 4190 btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec); 4191 4192 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); 4193 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); 4194 btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode)); 4195 btrfs_set_inode_transid(leaf, item, trans->transid); 4196 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 4197 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 4198 BTRFS_I(inode)->ro_flags); 4199 btrfs_set_inode_flags(leaf, item, flags); 4200 btrfs_set_inode_block_group(leaf, item, 0); 4201 } 4202 4203 /* 4204 * copy everything in the in-memory inode into the btree. 4205 */ 4206 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 4207 struct btrfs_inode *inode) 4208 { 4209 struct btrfs_inode_item *inode_item; 4210 BTRFS_PATH_AUTO_FREE(path); 4211 struct extent_buffer *leaf; 4212 struct btrfs_key key; 4213 int ret; 4214 4215 path = btrfs_alloc_path(); 4216 if (!path) 4217 return -ENOMEM; 4218 4219 btrfs_get_inode_key(inode, &key); 4220 ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1); 4221 if (ret) { 4222 if (ret > 0) 4223 ret = -ENOENT; 4224 return ret; 4225 } 4226 4227 leaf = path->nodes[0]; 4228 inode_item = btrfs_item_ptr(leaf, path->slots[0], 4229 struct btrfs_inode_item); 4230 4231 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 4232 btrfs_set_inode_last_trans(trans, inode); 4233 return 0; 4234 } 4235 4236 /* 4237 * copy everything in the in-memory inode into the btree. 4238 */ 4239 int btrfs_update_inode(struct btrfs_trans_handle *trans, 4240 struct btrfs_inode *inode) 4241 { 4242 struct btrfs_root *root = inode->root; 4243 struct btrfs_fs_info *fs_info = root->fs_info; 4244 int ret; 4245 4246 /* 4247 * If the inode is a free space inode, we can deadlock during commit 4248 * if we put it into the delayed code. 4249 * 4250 * The data relocation inode should also be directly updated 4251 * without delay 4252 */ 4253 if (!btrfs_is_free_space_inode(inode) 4254 && !btrfs_is_data_reloc_root(root) 4255 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4256 btrfs_update_root_times(trans, root); 4257 4258 ret = btrfs_delayed_update_inode(trans, inode); 4259 if (!ret) 4260 btrfs_set_inode_last_trans(trans, inode); 4261 return ret; 4262 } 4263 4264 return btrfs_update_inode_item(trans, inode); 4265 } 4266 4267 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4268 struct btrfs_inode *inode) 4269 { 4270 int ret; 4271 4272 ret = btrfs_update_inode(trans, inode); 4273 if (ret == -ENOSPC) 4274 return btrfs_update_inode_item(trans, inode); 4275 return ret; 4276 } 4277 4278 static void update_time_after_link_or_unlink(struct btrfs_inode *dir) 4279 { 4280 struct timespec64 now; 4281 4282 /* 4283 * If we are replaying a log tree, we do not want to update the mtime 4284 * and ctime of the parent directory with the current time, since the 4285 * log replay procedure is responsible for setting them to their correct 4286 * values (the ones it had when the fsync was done). 4287 */ 4288 if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags)) 4289 return; 4290 4291 now = inode_set_ctime_current(&dir->vfs_inode); 4292 inode_set_mtime_to_ts(&dir->vfs_inode, now); 4293 } 4294 4295 /* 4296 * unlink helper that gets used here in inode.c and in the tree logging 4297 * recovery code. It remove a link in a directory with a given name, and 4298 * also drops the back refs in the inode to the directory 4299 */ 4300 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4301 struct btrfs_inode *dir, 4302 struct btrfs_inode *inode, 4303 const struct fscrypt_str *name, 4304 struct btrfs_rename_ctx *rename_ctx) 4305 { 4306 struct btrfs_root *root = dir->root; 4307 struct btrfs_fs_info *fs_info = root->fs_info; 4308 struct btrfs_path *path; 4309 int ret = 0; 4310 struct btrfs_dir_item *di; 4311 u64 index; 4312 u64 ino = btrfs_ino(inode); 4313 u64 dir_ino = btrfs_ino(dir); 4314 4315 path = btrfs_alloc_path(); 4316 if (!path) 4317 return -ENOMEM; 4318 4319 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); 4320 if (IS_ERR_OR_NULL(di)) { 4321 btrfs_free_path(path); 4322 return di ? PTR_ERR(di) : -ENOENT; 4323 } 4324 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4325 /* 4326 * Down the call chains below we'll also need to allocate a path, so no 4327 * need to hold on to this one for longer than necessary. 4328 */ 4329 btrfs_free_path(path); 4330 if (ret) 4331 return ret; 4332 4333 /* 4334 * If we don't have dir index, we have to get it by looking up 4335 * the inode ref, since we get the inode ref, remove it directly, 4336 * it is unnecessary to do delayed deletion. 4337 * 4338 * But if we have dir index, needn't search inode ref to get it. 4339 * Since the inode ref is close to the inode item, it is better 4340 * that we delay to delete it, and just do this deletion when 4341 * we update the inode item. 4342 */ 4343 if (inode->dir_index) { 4344 ret = btrfs_delayed_delete_inode_ref(inode); 4345 if (!ret) { 4346 index = inode->dir_index; 4347 goto skip_backref; 4348 } 4349 } 4350 4351 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); 4352 if (unlikely(ret)) { 4353 btrfs_crit(fs_info, 4354 "failed to delete reference to %.*s, root %llu inode %llu parent %llu", 4355 name->len, name->name, btrfs_root_id(root), ino, dir_ino); 4356 btrfs_abort_transaction(trans, ret); 4357 return ret; 4358 } 4359 skip_backref: 4360 if (rename_ctx) 4361 rename_ctx->index = index; 4362 4363 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4364 if (unlikely(ret)) { 4365 btrfs_abort_transaction(trans, ret); 4366 return ret; 4367 } 4368 4369 /* 4370 * If we are in a rename context, we don't need to update anything in the 4371 * log. That will be done later during the rename by btrfs_log_new_name(). 4372 * Besides that, doing it here would only cause extra unnecessary btree 4373 * operations on the log tree, increasing latency for applications. 4374 */ 4375 if (!rename_ctx) { 4376 btrfs_del_inode_ref_in_log(trans, name, inode, dir); 4377 btrfs_del_dir_entries_in_log(trans, name, dir, index); 4378 } 4379 4380 /* 4381 * If we have a pending delayed iput we could end up with the final iput 4382 * being run in btrfs-cleaner context. If we have enough of these built 4383 * up we can end up burning a lot of time in btrfs-cleaner without any 4384 * way to throttle the unlinks. Since we're currently holding a ref on 4385 * the inode we can run the delayed iput here without any issues as the 4386 * final iput won't be done until after we drop the ref we're currently 4387 * holding. 4388 */ 4389 btrfs_run_delayed_iput(fs_info, inode); 4390 4391 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4392 inode_inc_iversion(&inode->vfs_inode); 4393 inode_set_ctime_current(&inode->vfs_inode); 4394 inode_inc_iversion(&dir->vfs_inode); 4395 update_time_after_link_or_unlink(dir); 4396 4397 return btrfs_update_inode(trans, dir); 4398 } 4399 4400 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4401 struct btrfs_inode *dir, struct btrfs_inode *inode, 4402 const struct fscrypt_str *name) 4403 { 4404 int ret; 4405 4406 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); 4407 if (!ret) { 4408 drop_nlink(&inode->vfs_inode); 4409 ret = btrfs_update_inode(trans, inode); 4410 } 4411 return ret; 4412 } 4413 4414 /* 4415 * helper to start transaction for unlink and rmdir. 4416 * 4417 * unlink and rmdir are special in btrfs, they do not always free space, so 4418 * if we cannot make our reservations the normal way try and see if there is 4419 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4420 * allow the unlink to occur. 4421 */ 4422 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) 4423 { 4424 struct btrfs_root *root = dir->root; 4425 4426 return btrfs_start_transaction_fallback_global_rsv(root, 4427 BTRFS_UNLINK_METADATA_UNITS); 4428 } 4429 4430 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4431 { 4432 struct btrfs_trans_handle *trans; 4433 struct inode *inode = d_inode(dentry); 4434 int ret; 4435 struct fscrypt_name fname; 4436 4437 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4438 if (ret) 4439 return ret; 4440 4441 /* This needs to handle no-key deletions later on */ 4442 4443 trans = __unlink_start_trans(BTRFS_I(dir)); 4444 if (IS_ERR(trans)) { 4445 ret = PTR_ERR(trans); 4446 goto fscrypt_free; 4447 } 4448 4449 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4450 false); 4451 4452 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4453 &fname.disk_name); 4454 if (ret) 4455 goto end_trans; 4456 4457 if (inode->i_nlink == 0) { 4458 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4459 if (ret) 4460 goto end_trans; 4461 } 4462 4463 end_trans: 4464 btrfs_end_transaction(trans); 4465 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4466 fscrypt_free: 4467 fscrypt_free_filename(&fname); 4468 return ret; 4469 } 4470 4471 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4472 struct btrfs_inode *dir, struct dentry *dentry) 4473 { 4474 struct btrfs_root *root = dir->root; 4475 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4476 BTRFS_PATH_AUTO_FREE(path); 4477 struct extent_buffer *leaf; 4478 struct btrfs_dir_item *di; 4479 struct btrfs_key key; 4480 u64 index; 4481 int ret; 4482 u64 objectid; 4483 u64 dir_ino = btrfs_ino(dir); 4484 struct fscrypt_name fname; 4485 4486 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 4487 if (ret) 4488 return ret; 4489 4490 /* This needs to handle no-key deletions later on */ 4491 4492 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4493 objectid = btrfs_root_id(inode->root); 4494 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4495 objectid = inode->ref_root_id; 4496 } else { 4497 WARN_ON(1); 4498 fscrypt_free_filename(&fname); 4499 return -EINVAL; 4500 } 4501 4502 path = btrfs_alloc_path(); 4503 if (!path) { 4504 ret = -ENOMEM; 4505 goto out; 4506 } 4507 4508 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4509 &fname.disk_name, -1); 4510 if (IS_ERR_OR_NULL(di)) { 4511 ret = di ? PTR_ERR(di) : -ENOENT; 4512 goto out; 4513 } 4514 4515 leaf = path->nodes[0]; 4516 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4517 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4518 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4519 if (unlikely(ret)) { 4520 btrfs_abort_transaction(trans, ret); 4521 goto out; 4522 } 4523 btrfs_release_path(path); 4524 4525 /* 4526 * This is a placeholder inode for a subvolume we didn't have a 4527 * reference to at the time of the snapshot creation. In the meantime 4528 * we could have renamed the real subvol link into our snapshot, so 4529 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4530 * Instead simply lookup the dir_index_item for this entry so we can 4531 * remove it. Otherwise we know we have a ref to the root and we can 4532 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4533 */ 4534 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4535 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); 4536 if (IS_ERR(di)) { 4537 ret = PTR_ERR(di); 4538 btrfs_abort_transaction(trans, ret); 4539 goto out; 4540 } 4541 4542 leaf = path->nodes[0]; 4543 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4544 index = key.offset; 4545 btrfs_release_path(path); 4546 } else { 4547 ret = btrfs_del_root_ref(trans, objectid, 4548 btrfs_root_id(root), dir_ino, 4549 &index, &fname.disk_name); 4550 if (unlikely(ret)) { 4551 btrfs_abort_transaction(trans, ret); 4552 goto out; 4553 } 4554 } 4555 4556 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4557 if (unlikely(ret)) { 4558 btrfs_abort_transaction(trans, ret); 4559 goto out; 4560 } 4561 4562 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); 4563 inode_inc_iversion(&dir->vfs_inode); 4564 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); 4565 ret = btrfs_update_inode_fallback(trans, dir); 4566 if (ret) 4567 btrfs_abort_transaction(trans, ret); 4568 out: 4569 fscrypt_free_filename(&fname); 4570 return ret; 4571 } 4572 4573 /* 4574 * Helper to check if the subvolume references other subvolumes or if it's 4575 * default. 4576 */ 4577 static noinline int may_destroy_subvol(struct btrfs_root *root) 4578 { 4579 struct btrfs_fs_info *fs_info = root->fs_info; 4580 BTRFS_PATH_AUTO_FREE(path); 4581 struct btrfs_dir_item *di; 4582 struct btrfs_key key; 4583 struct fscrypt_str name = FSTR_INIT("default", 7); 4584 u64 dir_id; 4585 int ret; 4586 4587 path = btrfs_alloc_path(); 4588 if (!path) 4589 return -ENOMEM; 4590 4591 /* Make sure this root isn't set as the default subvol */ 4592 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4593 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4594 dir_id, &name, 0); 4595 if (di && !IS_ERR(di)) { 4596 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4597 if (key.objectid == btrfs_root_id(root)) { 4598 ret = -EPERM; 4599 btrfs_err(fs_info, 4600 "deleting default subvolume %llu is not allowed", 4601 key.objectid); 4602 return ret; 4603 } 4604 btrfs_release_path(path); 4605 } 4606 4607 key.objectid = btrfs_root_id(root); 4608 key.type = BTRFS_ROOT_REF_KEY; 4609 key.offset = (u64)-1; 4610 4611 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4612 if (ret < 0) 4613 return ret; 4614 if (unlikely(ret == 0)) { 4615 /* 4616 * Key with offset -1 found, there would have to exist a root 4617 * with such id, but this is out of valid range. 4618 */ 4619 return -EUCLEAN; 4620 } 4621 4622 ret = 0; 4623 if (path->slots[0] > 0) { 4624 path->slots[0]--; 4625 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4626 if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY) 4627 ret = -ENOTEMPTY; 4628 } 4629 4630 return ret; 4631 } 4632 4633 /* Delete all dentries for inodes belonging to the root */ 4634 static void btrfs_prune_dentries(struct btrfs_root *root) 4635 { 4636 struct btrfs_fs_info *fs_info = root->fs_info; 4637 struct btrfs_inode *inode; 4638 u64 min_ino = 0; 4639 4640 if (!BTRFS_FS_ERROR(fs_info)) 4641 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4642 4643 inode = btrfs_find_first_inode(root, min_ino); 4644 while (inode) { 4645 if (icount_read(&inode->vfs_inode) > 1) 4646 d_prune_aliases(&inode->vfs_inode); 4647 4648 min_ino = btrfs_ino(inode) + 1; 4649 /* 4650 * btrfs_drop_inode() will have it removed from the inode 4651 * cache when its usage count hits zero. 4652 */ 4653 iput(&inode->vfs_inode); 4654 cond_resched(); 4655 inode = btrfs_find_first_inode(root, min_ino); 4656 } 4657 } 4658 4659 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) 4660 { 4661 struct btrfs_root *root = dir->root; 4662 struct btrfs_fs_info *fs_info = root->fs_info; 4663 struct inode *inode = d_inode(dentry); 4664 struct btrfs_root *dest = BTRFS_I(inode)->root; 4665 struct btrfs_trans_handle *trans; 4666 struct btrfs_block_rsv block_rsv; 4667 u64 root_flags; 4668 u64 qgroup_reserved = 0; 4669 int ret; 4670 4671 down_write(&fs_info->subvol_sem); 4672 4673 /* 4674 * Don't allow to delete a subvolume with send in progress. This is 4675 * inside the inode lock so the error handling that has to drop the bit 4676 * again is not run concurrently. 4677 */ 4678 spin_lock(&dest->root_item_lock); 4679 if (dest->send_in_progress) { 4680 spin_unlock(&dest->root_item_lock); 4681 btrfs_warn(fs_info, 4682 "attempt to delete subvolume %llu during send", 4683 btrfs_root_id(dest)); 4684 ret = -EPERM; 4685 goto out_up_write; 4686 } 4687 if (atomic_read(&dest->nr_swapfiles)) { 4688 spin_unlock(&dest->root_item_lock); 4689 btrfs_warn(fs_info, 4690 "attempt to delete subvolume %llu with active swapfile", 4691 btrfs_root_id(root)); 4692 ret = -EPERM; 4693 goto out_up_write; 4694 } 4695 root_flags = btrfs_root_flags(&dest->root_item); 4696 btrfs_set_root_flags(&dest->root_item, 4697 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4698 spin_unlock(&dest->root_item_lock); 4699 4700 ret = may_destroy_subvol(dest); 4701 if (ret) 4702 goto out_undead; 4703 4704 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4705 /* 4706 * One for dir inode, 4707 * two for dir entries, 4708 * two for root ref/backref. 4709 */ 4710 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4711 if (ret) 4712 goto out_undead; 4713 qgroup_reserved = block_rsv.qgroup_rsv_reserved; 4714 4715 trans = btrfs_start_transaction(root, 0); 4716 if (IS_ERR(trans)) { 4717 ret = PTR_ERR(trans); 4718 goto out_release; 4719 } 4720 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); 4721 qgroup_reserved = 0; 4722 trans->block_rsv = &block_rsv; 4723 trans->bytes_reserved = block_rsv.size; 4724 4725 btrfs_record_snapshot_destroy(trans, dir); 4726 4727 ret = btrfs_unlink_subvol(trans, dir, dentry); 4728 if (unlikely(ret)) { 4729 btrfs_abort_transaction(trans, ret); 4730 goto out_end_trans; 4731 } 4732 4733 ret = btrfs_record_root_in_trans(trans, dest); 4734 if (unlikely(ret)) { 4735 btrfs_abort_transaction(trans, ret); 4736 goto out_end_trans; 4737 } 4738 4739 memset(&dest->root_item.drop_progress, 0, 4740 sizeof(dest->root_item.drop_progress)); 4741 btrfs_set_root_drop_level(&dest->root_item, 0); 4742 btrfs_set_root_refs(&dest->root_item, 0); 4743 4744 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4745 ret = btrfs_insert_orphan_item(trans, 4746 fs_info->tree_root, 4747 btrfs_root_id(dest)); 4748 if (unlikely(ret)) { 4749 btrfs_abort_transaction(trans, ret); 4750 goto out_end_trans; 4751 } 4752 } 4753 4754 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4755 BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest)); 4756 if (unlikely(ret && ret != -ENOENT)) { 4757 btrfs_abort_transaction(trans, ret); 4758 goto out_end_trans; 4759 } 4760 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4761 ret = btrfs_uuid_tree_remove(trans, 4762 dest->root_item.received_uuid, 4763 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4764 btrfs_root_id(dest)); 4765 if (unlikely(ret && ret != -ENOENT)) { 4766 btrfs_abort_transaction(trans, ret); 4767 goto out_end_trans; 4768 } 4769 } 4770 4771 free_anon_bdev(dest->anon_dev); 4772 dest->anon_dev = 0; 4773 out_end_trans: 4774 trans->block_rsv = NULL; 4775 trans->bytes_reserved = 0; 4776 ret = btrfs_end_transaction(trans); 4777 inode->i_flags |= S_DEAD; 4778 out_release: 4779 btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL); 4780 if (qgroup_reserved) 4781 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); 4782 out_undead: 4783 if (ret) { 4784 spin_lock(&dest->root_item_lock); 4785 root_flags = btrfs_root_flags(&dest->root_item); 4786 btrfs_set_root_flags(&dest->root_item, 4787 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4788 spin_unlock(&dest->root_item_lock); 4789 } 4790 out_up_write: 4791 up_write(&fs_info->subvol_sem); 4792 if (!ret) { 4793 d_invalidate(dentry); 4794 btrfs_prune_dentries(dest); 4795 ASSERT(dest->send_in_progress == 0); 4796 } 4797 4798 return ret; 4799 } 4800 4801 static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry) 4802 { 4803 struct btrfs_inode *dir = BTRFS_I(vfs_dir); 4804 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4805 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4806 int ret = 0; 4807 struct btrfs_trans_handle *trans; 4808 struct fscrypt_name fname; 4809 4810 if (inode->vfs_inode.i_size > BTRFS_EMPTY_DIR_SIZE) 4811 return -ENOTEMPTY; 4812 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4813 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4814 btrfs_err(fs_info, 4815 "extent tree v2 doesn't support snapshot deletion yet"); 4816 return -EOPNOTSUPP; 4817 } 4818 return btrfs_delete_subvolume(dir, dentry); 4819 } 4820 4821 ret = fscrypt_setup_filename(vfs_dir, &dentry->d_name, 1, &fname); 4822 if (ret) 4823 return ret; 4824 4825 /* This needs to handle no-key deletions later on */ 4826 4827 trans = __unlink_start_trans(dir); 4828 if (IS_ERR(trans)) { 4829 ret = PTR_ERR(trans); 4830 goto out_notrans; 4831 } 4832 4833 /* 4834 * Propagate the last_unlink_trans value of the deleted dir to its 4835 * parent directory. This is to prevent an unrecoverable log tree in the 4836 * case we do something like this: 4837 * 1) create dir foo 4838 * 2) create snapshot under dir foo 4839 * 3) delete the snapshot 4840 * 4) rmdir foo 4841 * 5) mkdir foo 4842 * 6) fsync foo or some file inside foo 4843 * 4844 * This is because we can't unlink other roots when replaying the dir 4845 * deletes for directory foo. 4846 */ 4847 if (inode->last_unlink_trans >= trans->transid) 4848 btrfs_record_snapshot_destroy(trans, dir); 4849 4850 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4851 ret = btrfs_unlink_subvol(trans, dir, dentry); 4852 goto out; 4853 } 4854 4855 ret = btrfs_orphan_add(trans, inode); 4856 if (ret) 4857 goto out; 4858 4859 /* now the directory is empty */ 4860 ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name); 4861 if (!ret) 4862 btrfs_i_size_write(inode, 0); 4863 out: 4864 btrfs_end_transaction(trans); 4865 out_notrans: 4866 btrfs_btree_balance_dirty(fs_info); 4867 fscrypt_free_filename(&fname); 4868 4869 return ret; 4870 } 4871 4872 static bool is_inside_block(u64 bytenr, u64 blockstart, u32 blocksize) 4873 { 4874 ASSERT(IS_ALIGNED(blockstart, blocksize), "blockstart=%llu blocksize=%u", 4875 blockstart, blocksize); 4876 4877 if (blockstart <= bytenr && bytenr <= blockstart + blocksize - 1) 4878 return true; 4879 return false; 4880 } 4881 4882 static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start) 4883 { 4884 const pgoff_t index = (start >> PAGE_SHIFT); 4885 struct address_space *mapping = inode->vfs_inode.i_mapping; 4886 struct folio *folio; 4887 u64 zero_start; 4888 u64 zero_end; 4889 int ret = 0; 4890 4891 again: 4892 folio = filemap_lock_folio(mapping, index); 4893 /* No folio present. */ 4894 if (IS_ERR(folio)) 4895 return 0; 4896 4897 if (!folio_test_uptodate(folio)) { 4898 ret = btrfs_read_folio(NULL, folio); 4899 folio_lock(folio); 4900 if (folio->mapping != mapping) { 4901 folio_unlock(folio); 4902 folio_put(folio); 4903 goto again; 4904 } 4905 if (unlikely(!folio_test_uptodate(folio))) { 4906 ret = -EIO; 4907 goto out_unlock; 4908 } 4909 } 4910 folio_wait_writeback(folio); 4911 4912 /* 4913 * We do not need to lock extents nor wait for OE, as it's already 4914 * beyond EOF. 4915 */ 4916 4917 zero_start = max_t(u64, folio_pos(folio), start); 4918 zero_end = folio_next_pos(folio); 4919 folio_zero_range(folio, zero_start - folio_pos(folio), 4920 zero_end - zero_start); 4921 4922 out_unlock: 4923 folio_unlock(folio); 4924 folio_put(folio); 4925 return ret; 4926 } 4927 4928 /* 4929 * Handle the truncation of a fs block. 4930 * 4931 * @inode - inode that we're zeroing 4932 * @offset - the file offset of the block to truncate 4933 * The value must be inside [@start, @end], and the function will do 4934 * extra checks if the block that covers @offset needs to be zeroed. 4935 * @start - the start file offset of the range we want to zero 4936 * @end - the end (inclusive) file offset of the range we want to zero. 4937 * 4938 * If the range is not block aligned, read out the folio that covers @offset, 4939 * and if needed zero blocks that are inside the folio and covered by [@start, @end). 4940 * If @start or @end + 1 lands inside a block, that block will be marked dirty 4941 * for writeback. 4942 * 4943 * This is utilized by hole punch, zero range, file expansion. 4944 */ 4945 int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end) 4946 { 4947 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4948 struct address_space *mapping = inode->vfs_inode.i_mapping; 4949 struct extent_io_tree *io_tree = &inode->io_tree; 4950 struct btrfs_ordered_extent *ordered; 4951 struct extent_state *cached_state = NULL; 4952 struct extent_changeset *data_reserved = NULL; 4953 bool only_release_metadata = false; 4954 u32 blocksize = fs_info->sectorsize; 4955 pgoff_t index = (offset >> PAGE_SHIFT); 4956 struct folio *folio; 4957 gfp_t mask = btrfs_alloc_write_mask(mapping); 4958 int ret = 0; 4959 const bool in_head_block = is_inside_block(offset, round_down(start, blocksize), 4960 blocksize); 4961 const bool in_tail_block = is_inside_block(offset, round_down(end, blocksize), 4962 blocksize); 4963 bool need_truncate_head = false; 4964 bool need_truncate_tail = false; 4965 u64 zero_start; 4966 u64 zero_end; 4967 u64 block_start; 4968 u64 block_end; 4969 4970 /* @offset should be inside the range. */ 4971 ASSERT(start <= offset && offset <= end, "offset=%llu start=%llu end=%llu", 4972 offset, start, end); 4973 4974 /* The range is aligned at both ends. */ 4975 if (IS_ALIGNED(start, blocksize) && IS_ALIGNED(end + 1, blocksize)) { 4976 /* 4977 * For block size < page size case, we may have polluted blocks 4978 * beyond EOF. So we also need to zero them out. 4979 */ 4980 if (end == (u64)-1 && blocksize < PAGE_SIZE) 4981 ret = truncate_block_zero_beyond_eof(inode, start); 4982 goto out; 4983 } 4984 4985 /* 4986 * @offset may not be inside the head nor tail block. In that case we 4987 * don't need to do anything. 4988 */ 4989 if (!in_head_block && !in_tail_block) 4990 goto out; 4991 4992 /* 4993 * Skip the truncation if the range in the target block is already aligned. 4994 * The seemingly complex check will also handle the same block case. 4995 */ 4996 if (in_head_block && !IS_ALIGNED(start, blocksize)) 4997 need_truncate_head = true; 4998 if (in_tail_block && !IS_ALIGNED(end + 1, blocksize)) 4999 need_truncate_tail = true; 5000 if (!need_truncate_head && !need_truncate_tail) 5001 goto out; 5002 5003 block_start = round_down(offset, blocksize); 5004 block_end = block_start + blocksize - 1; 5005 5006 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 5007 blocksize, false); 5008 if (ret < 0) { 5009 size_t write_bytes = blocksize; 5010 5011 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { 5012 /* For nocow case, no need to reserve data space. */ 5013 ASSERT(write_bytes == blocksize, "write_bytes=%zu blocksize=%u", 5014 write_bytes, blocksize); 5015 only_release_metadata = true; 5016 } else { 5017 goto out; 5018 } 5019 } 5020 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 5021 if (ret < 0) { 5022 if (!only_release_metadata) 5023 btrfs_free_reserved_data_space(inode, data_reserved, 5024 block_start, blocksize); 5025 goto out; 5026 } 5027 again: 5028 folio = __filemap_get_folio(mapping, index, 5029 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); 5030 if (IS_ERR(folio)) { 5031 if (only_release_metadata) 5032 btrfs_delalloc_release_metadata(inode, blocksize, true); 5033 else 5034 btrfs_delalloc_release_space(inode, data_reserved, 5035 block_start, blocksize, true); 5036 btrfs_delalloc_release_extents(inode, blocksize); 5037 ret = PTR_ERR(folio); 5038 goto out; 5039 } 5040 5041 if (!folio_test_uptodate(folio)) { 5042 ret = btrfs_read_folio(NULL, folio); 5043 folio_lock(folio); 5044 if (folio->mapping != mapping) { 5045 folio_unlock(folio); 5046 folio_put(folio); 5047 goto again; 5048 } 5049 if (unlikely(!folio_test_uptodate(folio))) { 5050 ret = -EIO; 5051 goto out_unlock; 5052 } 5053 } 5054 5055 /* 5056 * We unlock the page after the io is completed and then re-lock it 5057 * above. release_folio() could have come in between that and cleared 5058 * folio private, but left the page in the mapping. Set the page mapped 5059 * here to make sure it's properly set for the subpage stuff. 5060 */ 5061 ret = set_folio_extent_mapped(folio); 5062 if (ret < 0) 5063 goto out_unlock; 5064 5065 folio_wait_writeback(folio); 5066 5067 btrfs_lock_extent(io_tree, block_start, block_end, &cached_state); 5068 5069 ordered = btrfs_lookup_ordered_extent(inode, block_start); 5070 if (ordered) { 5071 btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state); 5072 folio_unlock(folio); 5073 folio_put(folio); 5074 btrfs_start_ordered_extent(ordered); 5075 btrfs_put_ordered_extent(ordered); 5076 goto again; 5077 } 5078 5079 btrfs_clear_extent_bit(&inode->io_tree, block_start, block_end, 5080 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 5081 &cached_state); 5082 5083 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 5084 &cached_state); 5085 if (ret) { 5086 btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state); 5087 goto out_unlock; 5088 } 5089 5090 if (end == (u64)-1) { 5091 /* 5092 * We're truncating beyond EOF, the remaining blocks normally are 5093 * already holes thus no need to zero again, but it's possible for 5094 * fs block size < page size cases to have memory mapped writes 5095 * to pollute ranges beyond EOF. 5096 * 5097 * In that case although such polluted blocks beyond EOF will 5098 * not reach disk, it still affects our page caches. 5099 */ 5100 zero_start = max_t(u64, folio_pos(folio), start); 5101 zero_end = min_t(u64, folio_next_pos(folio) - 1, end); 5102 } else { 5103 zero_start = max_t(u64, block_start, start); 5104 zero_end = min_t(u64, block_end, end); 5105 } 5106 folio_zero_range(folio, zero_start - folio_pos(folio), 5107 zero_end - zero_start + 1); 5108 5109 btrfs_folio_clear_checked(fs_info, folio, block_start, 5110 block_end + 1 - block_start); 5111 btrfs_folio_set_dirty(fs_info, folio, block_start, 5112 block_end + 1 - block_start); 5113 5114 if (only_release_metadata) 5115 btrfs_set_extent_bit(&inode->io_tree, block_start, block_end, 5116 EXTENT_NORESERVE, &cached_state); 5117 5118 btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state); 5119 5120 out_unlock: 5121 if (ret) { 5122 if (only_release_metadata) 5123 btrfs_delalloc_release_metadata(inode, blocksize, true); 5124 else 5125 btrfs_delalloc_release_space(inode, data_reserved, 5126 block_start, blocksize, true); 5127 } 5128 btrfs_delalloc_release_extents(inode, blocksize); 5129 folio_unlock(folio); 5130 folio_put(folio); 5131 out: 5132 if (only_release_metadata) 5133 btrfs_check_nocow_unlock(inode); 5134 extent_changeset_free(data_reserved); 5135 return ret; 5136 } 5137 5138 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len) 5139 { 5140 struct btrfs_root *root = inode->root; 5141 struct btrfs_fs_info *fs_info = root->fs_info; 5142 struct btrfs_trans_handle *trans; 5143 struct btrfs_drop_extents_args drop_args = { 0 }; 5144 int ret; 5145 5146 /* 5147 * If NO_HOLES is enabled, we don't need to do anything. 5148 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 5149 * or btrfs_update_inode() will be called, which guarantee that the next 5150 * fsync will know this inode was changed and needs to be logged. 5151 */ 5152 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 5153 return 0; 5154 5155 /* 5156 * 1 - for the one we're dropping 5157 * 1 - for the one we're adding 5158 * 1 - for updating the inode. 5159 */ 5160 trans = btrfs_start_transaction(root, 3); 5161 if (IS_ERR(trans)) 5162 return PTR_ERR(trans); 5163 5164 drop_args.start = offset; 5165 drop_args.end = offset + len; 5166 drop_args.drop_cache = true; 5167 5168 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 5169 if (unlikely(ret)) { 5170 btrfs_abort_transaction(trans, ret); 5171 btrfs_end_transaction(trans); 5172 return ret; 5173 } 5174 5175 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len); 5176 if (ret) { 5177 btrfs_abort_transaction(trans, ret); 5178 } else { 5179 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 5180 btrfs_update_inode(trans, inode); 5181 } 5182 btrfs_end_transaction(trans); 5183 return ret; 5184 } 5185 5186 /* 5187 * This function puts in dummy file extents for the area we're creating a hole 5188 * for. So if we are truncating this file to a larger size we need to insert 5189 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 5190 * the range between oldsize and size 5191 */ 5192 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 5193 { 5194 struct btrfs_root *root = inode->root; 5195 struct btrfs_fs_info *fs_info = root->fs_info; 5196 struct extent_io_tree *io_tree = &inode->io_tree; 5197 struct extent_map *em = NULL; 5198 struct extent_state *cached_state = NULL; 5199 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 5200 u64 block_end = ALIGN(size, fs_info->sectorsize); 5201 u64 last_byte; 5202 u64 cur_offset; 5203 u64 hole_size; 5204 int ret = 0; 5205 5206 /* 5207 * If our size started in the middle of a block we need to zero out the 5208 * rest of the block before we expand the i_size, otherwise we could 5209 * expose stale data. 5210 */ 5211 ret = btrfs_truncate_block(inode, oldsize, oldsize, -1); 5212 if (ret) 5213 return ret; 5214 5215 if (size <= hole_start) 5216 return 0; 5217 5218 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 5219 &cached_state); 5220 cur_offset = hole_start; 5221 while (1) { 5222 em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset); 5223 if (IS_ERR(em)) { 5224 ret = PTR_ERR(em); 5225 em = NULL; 5226 break; 5227 } 5228 last_byte = min(btrfs_extent_map_end(em), block_end); 5229 last_byte = ALIGN(last_byte, fs_info->sectorsize); 5230 hole_size = last_byte - cur_offset; 5231 5232 if (!(em->flags & EXTENT_FLAG_PREALLOC)) { 5233 struct extent_map *hole_em; 5234 5235 ret = maybe_insert_hole(inode, cur_offset, hole_size); 5236 if (ret) 5237 break; 5238 5239 ret = btrfs_inode_set_file_extent_range(inode, 5240 cur_offset, hole_size); 5241 if (ret) 5242 break; 5243 5244 hole_em = btrfs_alloc_extent_map(); 5245 if (!hole_em) { 5246 btrfs_drop_extent_map_range(inode, cur_offset, 5247 cur_offset + hole_size - 1, 5248 false); 5249 btrfs_set_inode_full_sync(inode); 5250 goto next; 5251 } 5252 hole_em->start = cur_offset; 5253 hole_em->len = hole_size; 5254 5255 hole_em->disk_bytenr = EXTENT_MAP_HOLE; 5256 hole_em->disk_num_bytes = 0; 5257 hole_em->ram_bytes = hole_size; 5258 hole_em->generation = btrfs_get_fs_generation(fs_info); 5259 5260 ret = btrfs_replace_extent_map_range(inode, hole_em, true); 5261 btrfs_free_extent_map(hole_em); 5262 } else { 5263 ret = btrfs_inode_set_file_extent_range(inode, 5264 cur_offset, hole_size); 5265 if (ret) 5266 break; 5267 } 5268 next: 5269 btrfs_free_extent_map(em); 5270 em = NULL; 5271 cur_offset = last_byte; 5272 if (cur_offset >= block_end) 5273 break; 5274 } 5275 btrfs_free_extent_map(em); 5276 btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); 5277 return ret; 5278 } 5279 5280 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 5281 { 5282 struct btrfs_root *root = BTRFS_I(inode)->root; 5283 struct btrfs_trans_handle *trans; 5284 loff_t oldsize = i_size_read(inode); 5285 loff_t newsize = attr->ia_size; 5286 int mask = attr->ia_valid; 5287 int ret; 5288 5289 /* 5290 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 5291 * special case where we need to update the times despite not having 5292 * these flags set. For all other operations the VFS set these flags 5293 * explicitly if it wants a timestamp update. 5294 */ 5295 if (newsize != oldsize) { 5296 inode_inc_iversion(inode); 5297 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) { 5298 inode_set_mtime_to_ts(inode, 5299 inode_set_ctime_current(inode)); 5300 } 5301 } 5302 5303 if (newsize > oldsize) { 5304 /* 5305 * Don't do an expanding truncate while snapshotting is ongoing. 5306 * This is to ensure the snapshot captures a fully consistent 5307 * state of this file - if the snapshot captures this expanding 5308 * truncation, it must capture all writes that happened before 5309 * this truncation. 5310 */ 5311 btrfs_drew_write_lock(&root->snapshot_lock); 5312 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 5313 if (ret) { 5314 btrfs_drew_write_unlock(&root->snapshot_lock); 5315 return ret; 5316 } 5317 5318 trans = btrfs_start_transaction(root, 1); 5319 if (IS_ERR(trans)) { 5320 btrfs_drew_write_unlock(&root->snapshot_lock); 5321 return PTR_ERR(trans); 5322 } 5323 5324 i_size_write(inode, newsize); 5325 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 5326 pagecache_isize_extended(inode, oldsize, newsize); 5327 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 5328 btrfs_drew_write_unlock(&root->snapshot_lock); 5329 btrfs_end_transaction(trans); 5330 } else { 5331 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 5332 5333 if (btrfs_is_zoned(fs_info)) { 5334 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 5335 ALIGN(newsize, fs_info->sectorsize), 5336 (u64)-1); 5337 if (ret) 5338 return ret; 5339 } 5340 5341 /* 5342 * We're truncating a file that used to have good data down to 5343 * zero. Make sure any new writes to the file get on disk 5344 * on close. 5345 */ 5346 if (newsize == 0) 5347 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5348 &BTRFS_I(inode)->runtime_flags); 5349 5350 truncate_setsize(inode, newsize); 5351 5352 inode_dio_wait(inode); 5353 5354 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); 5355 if (ret && inode->i_nlink) { 5356 int ret2; 5357 5358 /* 5359 * Truncate failed, so fix up the in-memory size. We 5360 * adjusted disk_i_size down as we removed extents, so 5361 * wait for disk_i_size to be stable and then update the 5362 * in-memory size to match. 5363 */ 5364 ret2 = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); 5365 if (ret2) 5366 return ret2; 5367 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5368 } 5369 } 5370 5371 return ret; 5372 } 5373 5374 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 5375 struct iattr *attr) 5376 { 5377 struct inode *inode = d_inode(dentry); 5378 struct btrfs_root *root = BTRFS_I(inode)->root; 5379 int ret; 5380 5381 if (btrfs_root_readonly(root)) 5382 return -EROFS; 5383 5384 ret = setattr_prepare(idmap, dentry, attr); 5385 if (ret) 5386 return ret; 5387 5388 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5389 ret = btrfs_setsize(inode, attr); 5390 if (ret) 5391 return ret; 5392 } 5393 5394 if (attr->ia_valid) { 5395 setattr_copy(idmap, inode, attr); 5396 inode_inc_iversion(inode); 5397 ret = btrfs_dirty_inode(BTRFS_I(inode)); 5398 5399 if (!ret && attr->ia_valid & ATTR_MODE) 5400 ret = posix_acl_chmod(idmap, dentry, inode->i_mode); 5401 } 5402 5403 return ret; 5404 } 5405 5406 /* 5407 * While truncating the inode pages during eviction, we get the VFS 5408 * calling btrfs_invalidate_folio() against each folio of the inode. This 5409 * is slow because the calls to btrfs_invalidate_folio() result in a 5410 * huge amount of calls to lock_extent() and clear_extent_bit(), 5411 * which keep merging and splitting extent_state structures over and over, 5412 * wasting lots of time. 5413 * 5414 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5415 * skip all those expensive operations on a per folio basis and do only 5416 * the ordered io finishing, while we release here the extent_map and 5417 * extent_state structures, without the excessive merging and splitting. 5418 */ 5419 static void evict_inode_truncate_pages(struct inode *inode) 5420 { 5421 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5422 struct rb_node *node; 5423 5424 ASSERT(inode_state_read_once(inode) & I_FREEING); 5425 truncate_inode_pages_final(&inode->i_data); 5426 5427 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 5428 5429 /* 5430 * Keep looping until we have no more ranges in the io tree. 5431 * We can have ongoing bios started by readahead that have 5432 * their endio callback (extent_io.c:end_bio_extent_readpage) 5433 * still in progress (unlocked the pages in the bio but did not yet 5434 * unlocked the ranges in the io tree). Therefore this means some 5435 * ranges can still be locked and eviction started because before 5436 * submitting those bios, which are executed by a separate task (work 5437 * queue kthread), inode references (inode->i_count) were not taken 5438 * (which would be dropped in the end io callback of each bio). 5439 * Therefore here we effectively end up waiting for those bios and 5440 * anyone else holding locked ranges without having bumped the inode's 5441 * reference count - if we don't do it, when they access the inode's 5442 * io_tree to unlock a range it may be too late, leading to an 5443 * use-after-free issue. 5444 */ 5445 spin_lock(&io_tree->lock); 5446 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5447 struct extent_state *state; 5448 struct extent_state *cached_state = NULL; 5449 u64 start; 5450 u64 end; 5451 unsigned state_flags; 5452 5453 node = rb_first(&io_tree->state); 5454 state = rb_entry(node, struct extent_state, rb_node); 5455 start = state->start; 5456 end = state->end; 5457 state_flags = state->state; 5458 spin_unlock(&io_tree->lock); 5459 5460 btrfs_lock_extent(io_tree, start, end, &cached_state); 5461 5462 /* 5463 * If still has DELALLOC flag, the extent didn't reach disk, 5464 * and its reserved space won't be freed by delayed_ref. 5465 * So we need to free its reserved space here. 5466 * (Refer to comment in btrfs_invalidate_folio, case 2) 5467 * 5468 * Note, end is the bytenr of last byte, so we need + 1 here. 5469 */ 5470 if (state_flags & EXTENT_DELALLOC) 5471 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5472 end - start + 1, NULL); 5473 5474 btrfs_clear_extent_bit(io_tree, start, end, 5475 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, 5476 &cached_state); 5477 5478 cond_resched(); 5479 spin_lock(&io_tree->lock); 5480 } 5481 spin_unlock(&io_tree->lock); 5482 } 5483 5484 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5485 struct btrfs_block_rsv *rsv) 5486 { 5487 struct btrfs_fs_info *fs_info = root->fs_info; 5488 struct btrfs_trans_handle *trans; 5489 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1); 5490 int ret; 5491 5492 /* 5493 * Eviction should be taking place at some place safe because of our 5494 * delayed iputs. However the normal flushing code will run delayed 5495 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5496 * 5497 * We reserve the delayed_refs_extra here again because we can't use 5498 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5499 * above. We reserve our extra bit here because we generate a ton of 5500 * delayed refs activity by truncating. 5501 * 5502 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5503 * if we fail to make this reservation we can re-try without the 5504 * delayed_refs_extra so we can make some forward progress. 5505 */ 5506 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5507 BTRFS_RESERVE_FLUSH_EVICT); 5508 if (ret) { 5509 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5510 BTRFS_RESERVE_FLUSH_EVICT); 5511 if (ret) { 5512 btrfs_warn(fs_info, 5513 "could not allocate space for delete; will truncate on mount"); 5514 return ERR_PTR(-ENOSPC); 5515 } 5516 delayed_refs_extra = 0; 5517 } 5518 5519 trans = btrfs_join_transaction(root); 5520 if (IS_ERR(trans)) 5521 return trans; 5522 5523 if (delayed_refs_extra) { 5524 trans->block_rsv = &fs_info->trans_block_rsv; 5525 trans->bytes_reserved = delayed_refs_extra; 5526 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5527 delayed_refs_extra, true); 5528 } 5529 return trans; 5530 } 5531 5532 void btrfs_evict_inode(struct inode *inode) 5533 { 5534 struct btrfs_fs_info *fs_info; 5535 struct btrfs_trans_handle *trans; 5536 struct btrfs_root *root = BTRFS_I(inode)->root; 5537 struct btrfs_block_rsv rsv; 5538 int ret; 5539 5540 trace_btrfs_inode_evict(inode); 5541 5542 if (!root) { 5543 fsverity_cleanup_inode(inode); 5544 clear_inode(inode); 5545 return; 5546 } 5547 5548 fs_info = inode_to_fs_info(inode); 5549 evict_inode_truncate_pages(inode); 5550 5551 if (inode->i_nlink && 5552 ((btrfs_root_refs(&root->root_item) != 0 && 5553 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) || 5554 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5555 goto out; 5556 5557 if (is_bad_inode(inode)) 5558 goto out; 5559 5560 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5561 goto out; 5562 5563 if (inode->i_nlink > 0) { 5564 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5565 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID); 5566 goto out; 5567 } 5568 5569 /* 5570 * This makes sure the inode item in tree is uptodate and the space for 5571 * the inode update is released. 5572 */ 5573 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5574 if (ret) 5575 goto out; 5576 5577 /* 5578 * This drops any pending insert or delete operations we have for this 5579 * inode. We could have a delayed dir index deletion queued up, but 5580 * we're removing the inode completely so that'll be taken care of in 5581 * the truncate. 5582 */ 5583 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5584 5585 btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP); 5586 rsv.size = btrfs_calc_metadata_size(fs_info, 1); 5587 rsv.failfast = true; 5588 5589 btrfs_i_size_write(BTRFS_I(inode), 0); 5590 5591 while (1) { 5592 struct btrfs_truncate_control control = { 5593 .inode = BTRFS_I(inode), 5594 .ino = btrfs_ino(BTRFS_I(inode)), 5595 .new_size = 0, 5596 .min_type = 0, 5597 }; 5598 5599 trans = evict_refill_and_join(root, &rsv); 5600 if (IS_ERR(trans)) 5601 goto out_release; 5602 5603 trans->block_rsv = &rsv; 5604 5605 ret = btrfs_truncate_inode_items(trans, root, &control); 5606 trans->block_rsv = &fs_info->trans_block_rsv; 5607 btrfs_end_transaction(trans); 5608 /* 5609 * We have not added new delayed items for our inode after we 5610 * have flushed its delayed items, so no need to throttle on 5611 * delayed items. However we have modified extent buffers. 5612 */ 5613 btrfs_btree_balance_dirty_nodelay(fs_info); 5614 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5615 goto out_release; 5616 else if (!ret) 5617 break; 5618 } 5619 5620 /* 5621 * Errors here aren't a big deal, it just means we leave orphan items in 5622 * the tree. They will be cleaned up on the next mount. If the inode 5623 * number gets reused, cleanup deletes the orphan item without doing 5624 * anything, and unlink reuses the existing orphan item. 5625 * 5626 * If it turns out that we are dropping too many of these, we might want 5627 * to add a mechanism for retrying these after a commit. 5628 */ 5629 trans = evict_refill_and_join(root, &rsv); 5630 if (!IS_ERR(trans)) { 5631 trans->block_rsv = &rsv; 5632 btrfs_orphan_del(trans, BTRFS_I(inode)); 5633 trans->block_rsv = &fs_info->trans_block_rsv; 5634 btrfs_end_transaction(trans); 5635 } 5636 5637 out_release: 5638 btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL); 5639 out: 5640 /* 5641 * If we didn't successfully delete, the orphan item will still be in 5642 * the tree and we'll retry on the next mount. Again, we might also want 5643 * to retry these periodically in the future. 5644 */ 5645 btrfs_remove_delayed_node(BTRFS_I(inode)); 5646 fsverity_cleanup_inode(inode); 5647 clear_inode(inode); 5648 } 5649 5650 /* 5651 * Return the key found in the dir entry in the location pointer, fill @type 5652 * with BTRFS_FT_*, and return 0. 5653 * 5654 * If no dir entries were found, returns -ENOENT. 5655 * If found a corrupted location in dir entry, returns -EUCLEAN. 5656 */ 5657 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, 5658 struct btrfs_key *location, u8 *type) 5659 { 5660 struct btrfs_dir_item *di; 5661 BTRFS_PATH_AUTO_FREE(path); 5662 struct btrfs_root *root = dir->root; 5663 int ret = 0; 5664 struct fscrypt_name fname; 5665 5666 path = btrfs_alloc_path(); 5667 if (!path) 5668 return -ENOMEM; 5669 5670 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 5671 if (ret < 0) 5672 return ret; 5673 /* 5674 * fscrypt_setup_filename() should never return a positive value, but 5675 * gcc on sparc/parisc thinks it can, so assert that doesn't happen. 5676 */ 5677 ASSERT(ret == 0); 5678 5679 /* This needs to handle no-key deletions later on */ 5680 5681 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), 5682 &fname.disk_name, 0); 5683 if (IS_ERR_OR_NULL(di)) { 5684 ret = di ? PTR_ERR(di) : -ENOENT; 5685 goto out; 5686 } 5687 5688 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5689 if (unlikely(location->type != BTRFS_INODE_ITEM_KEY && 5690 location->type != BTRFS_ROOT_ITEM_KEY)) { 5691 ret = -EUCLEAN; 5692 btrfs_warn(root->fs_info, 5693 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location " BTRFS_KEY_FMT ")", 5694 __func__, fname.disk_name.name, btrfs_ino(dir), 5695 BTRFS_KEY_FMT_VALUE(location)); 5696 } 5697 if (!ret) 5698 *type = btrfs_dir_ftype(path->nodes[0], di); 5699 out: 5700 fscrypt_free_filename(&fname); 5701 return ret; 5702 } 5703 5704 /* 5705 * when we hit a tree root in a directory, the btrfs part of the inode 5706 * needs to be changed to reflect the root directory of the tree root. This 5707 * is kind of like crossing a mount point. 5708 */ 5709 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5710 struct btrfs_inode *dir, 5711 struct dentry *dentry, 5712 struct btrfs_key *location, 5713 struct btrfs_root **sub_root) 5714 { 5715 BTRFS_PATH_AUTO_FREE(path); 5716 struct btrfs_root *new_root; 5717 struct btrfs_root_ref *ref; 5718 struct extent_buffer *leaf; 5719 struct btrfs_key key; 5720 int ret; 5721 int err = 0; 5722 struct fscrypt_name fname; 5723 5724 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); 5725 if (ret) 5726 return ret; 5727 5728 path = btrfs_alloc_path(); 5729 if (!path) { 5730 err = -ENOMEM; 5731 goto out; 5732 } 5733 5734 err = -ENOENT; 5735 key.objectid = btrfs_root_id(dir->root); 5736 key.type = BTRFS_ROOT_REF_KEY; 5737 key.offset = location->objectid; 5738 5739 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5740 if (ret) { 5741 if (ret < 0) 5742 err = ret; 5743 goto out; 5744 } 5745 5746 leaf = path->nodes[0]; 5747 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5748 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5749 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) 5750 goto out; 5751 5752 ret = memcmp_extent_buffer(leaf, fname.disk_name.name, 5753 (unsigned long)(ref + 1), fname.disk_name.len); 5754 if (ret) 5755 goto out; 5756 5757 btrfs_release_path(path); 5758 5759 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5760 if (IS_ERR(new_root)) { 5761 err = PTR_ERR(new_root); 5762 goto out; 5763 } 5764 5765 *sub_root = new_root; 5766 location->objectid = btrfs_root_dirid(&new_root->root_item); 5767 location->type = BTRFS_INODE_ITEM_KEY; 5768 location->offset = 0; 5769 err = 0; 5770 out: 5771 fscrypt_free_filename(&fname); 5772 return err; 5773 } 5774 5775 5776 5777 static void btrfs_del_inode_from_root(struct btrfs_inode *inode) 5778 { 5779 struct btrfs_root *root = inode->root; 5780 struct btrfs_inode *entry; 5781 bool empty = false; 5782 5783 xa_lock(&root->inodes); 5784 /* 5785 * This btrfs_inode is being freed and has already been unhashed at this 5786 * point. It's possible that another btrfs_inode has already been 5787 * allocated for the same inode and inserted itself into the root, so 5788 * don't delete it in that case. 5789 * 5790 * Note that this shouldn't need to allocate memory, so the gfp flags 5791 * don't really matter. 5792 */ 5793 entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL, 5794 GFP_ATOMIC); 5795 if (entry == inode) 5796 empty = xa_empty(&root->inodes); 5797 xa_unlock(&root->inodes); 5798 5799 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5800 xa_lock(&root->inodes); 5801 empty = xa_empty(&root->inodes); 5802 xa_unlock(&root->inodes); 5803 if (empty) 5804 btrfs_add_dead_root(root); 5805 } 5806 } 5807 5808 5809 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5810 { 5811 struct btrfs_iget_args *args = p; 5812 5813 btrfs_set_inode_number(BTRFS_I(inode), args->ino); 5814 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5815 5816 if (args->root && args->root == args->root->fs_info->tree_root && 5817 args->ino != BTRFS_BTREE_INODE_OBJECTID) 5818 set_bit(BTRFS_INODE_FREE_SPACE_INODE, 5819 &BTRFS_I(inode)->runtime_flags); 5820 return 0; 5821 } 5822 5823 static int btrfs_find_actor(struct inode *inode, void *opaque) 5824 { 5825 struct btrfs_iget_args *args = opaque; 5826 5827 return args->ino == btrfs_ino(BTRFS_I(inode)) && 5828 args->root == BTRFS_I(inode)->root; 5829 } 5830 5831 static struct btrfs_inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root) 5832 { 5833 struct inode *inode; 5834 struct btrfs_iget_args args; 5835 unsigned long hashval = btrfs_inode_hash(ino, root); 5836 5837 args.ino = ino; 5838 args.root = root; 5839 5840 inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor, 5841 btrfs_init_locked_inode, 5842 (void *)&args); 5843 if (!inode) 5844 return NULL; 5845 return BTRFS_I(inode); 5846 } 5847 5848 /* 5849 * Get an inode object given its inode number and corresponding root. Path is 5850 * preallocated to prevent recursing back to iget through allocator. 5851 */ 5852 struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root, 5853 struct btrfs_path *path) 5854 { 5855 struct btrfs_inode *inode; 5856 int ret; 5857 5858 inode = btrfs_iget_locked(ino, root); 5859 if (!inode) 5860 return ERR_PTR(-ENOMEM); 5861 5862 if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW)) 5863 return inode; 5864 5865 ret = btrfs_read_locked_inode(inode, path); 5866 if (ret) 5867 return ERR_PTR(ret); 5868 5869 unlock_new_inode(&inode->vfs_inode); 5870 return inode; 5871 } 5872 5873 /* 5874 * Get an inode object given its inode number and corresponding root. 5875 */ 5876 struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root) 5877 { 5878 struct btrfs_inode *inode; 5879 struct btrfs_path *path; 5880 int ret; 5881 5882 inode = btrfs_iget_locked(ino, root); 5883 if (!inode) 5884 return ERR_PTR(-ENOMEM); 5885 5886 if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW)) 5887 return inode; 5888 5889 path = btrfs_alloc_path(); 5890 if (!path) { 5891 iget_failed(&inode->vfs_inode); 5892 return ERR_PTR(-ENOMEM); 5893 } 5894 5895 ret = btrfs_read_locked_inode(inode, path); 5896 btrfs_free_path(path); 5897 if (ret) 5898 return ERR_PTR(ret); 5899 5900 if (S_ISDIR(inode->vfs_inode.i_mode)) 5901 inode->vfs_inode.i_opflags |= IOP_FASTPERM_MAY_EXEC; 5902 unlock_new_inode(&inode->vfs_inode); 5903 return inode; 5904 } 5905 5906 static struct btrfs_inode *new_simple_dir(struct inode *dir, 5907 struct btrfs_key *key, 5908 struct btrfs_root *root) 5909 { 5910 struct timespec64 ts; 5911 struct inode *vfs_inode; 5912 struct btrfs_inode *inode; 5913 5914 vfs_inode = new_inode(dir->i_sb); 5915 if (!vfs_inode) 5916 return ERR_PTR(-ENOMEM); 5917 5918 inode = BTRFS_I(vfs_inode); 5919 inode->root = btrfs_grab_root(root); 5920 inode->ref_root_id = key->objectid; 5921 set_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags); 5922 set_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags); 5923 5924 btrfs_set_inode_number(inode, BTRFS_EMPTY_SUBVOL_DIR_OBJECTID); 5925 /* 5926 * We only need lookup, the rest is read-only and there's no inode 5927 * associated with the dentry 5928 */ 5929 vfs_inode->i_op = &simple_dir_inode_operations; 5930 vfs_inode->i_opflags &= ~IOP_XATTR; 5931 vfs_inode->i_fop = &simple_dir_operations; 5932 vfs_inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5933 5934 ts = inode_set_ctime_current(vfs_inode); 5935 inode_set_mtime_to_ts(vfs_inode, ts); 5936 inode_set_atime_to_ts(vfs_inode, inode_get_atime(dir)); 5937 inode->i_otime_sec = ts.tv_sec; 5938 inode->i_otime_nsec = ts.tv_nsec; 5939 5940 vfs_inode->i_uid = dir->i_uid; 5941 vfs_inode->i_gid = dir->i_gid; 5942 5943 return inode; 5944 } 5945 5946 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5947 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5948 static_assert(BTRFS_FT_DIR == FT_DIR); 5949 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5950 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5951 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5952 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5953 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5954 5955 static inline u8 btrfs_inode_type(const struct btrfs_inode *inode) 5956 { 5957 return fs_umode_to_ftype(inode->vfs_inode.i_mode); 5958 } 5959 5960 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5961 { 5962 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 5963 struct btrfs_inode *inode; 5964 struct btrfs_root *root = BTRFS_I(dir)->root; 5965 struct btrfs_root *sub_root = root; 5966 struct btrfs_key location = { 0 }; 5967 u8 di_type = 0; 5968 int ret = 0; 5969 5970 if (dentry->d_name.len > BTRFS_NAME_LEN) 5971 return ERR_PTR(-ENAMETOOLONG); 5972 5973 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type); 5974 if (ret < 0) 5975 return ERR_PTR(ret); 5976 5977 if (location.type == BTRFS_INODE_ITEM_KEY) { 5978 inode = btrfs_iget(location.objectid, root); 5979 if (IS_ERR(inode)) 5980 return ERR_CAST(inode); 5981 5982 /* Do extra check against inode mode with di_type */ 5983 if (unlikely(btrfs_inode_type(inode) != di_type)) { 5984 btrfs_crit(fs_info, 5985 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5986 inode->vfs_inode.i_mode, btrfs_inode_type(inode), 5987 di_type); 5988 iput(&inode->vfs_inode); 5989 return ERR_PTR(-EUCLEAN); 5990 } 5991 return &inode->vfs_inode; 5992 } 5993 5994 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry, 5995 &location, &sub_root); 5996 if (ret < 0) { 5997 if (ret != -ENOENT) 5998 inode = ERR_PTR(ret); 5999 else 6000 inode = new_simple_dir(dir, &location, root); 6001 } else { 6002 inode = btrfs_iget(location.objectid, sub_root); 6003 btrfs_put_root(sub_root); 6004 6005 if (IS_ERR(inode)) 6006 return ERR_CAST(inode); 6007 6008 down_read(&fs_info->cleanup_work_sem); 6009 if (!sb_rdonly(inode->vfs_inode.i_sb)) 6010 ret = btrfs_orphan_cleanup(sub_root); 6011 up_read(&fs_info->cleanup_work_sem); 6012 if (ret) { 6013 iput(&inode->vfs_inode); 6014 inode = ERR_PTR(ret); 6015 } 6016 } 6017 6018 if (IS_ERR(inode)) 6019 return ERR_CAST(inode); 6020 6021 return &inode->vfs_inode; 6022 } 6023 6024 static int btrfs_dentry_delete(const struct dentry *dentry) 6025 { 6026 struct btrfs_root *root; 6027 struct inode *inode = d_inode(dentry); 6028 6029 if (!inode && !IS_ROOT(dentry)) 6030 inode = d_inode(dentry->d_parent); 6031 6032 if (inode) { 6033 root = BTRFS_I(inode)->root; 6034 if (btrfs_root_refs(&root->root_item) == 0) 6035 return 1; 6036 6037 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 6038 return 1; 6039 } 6040 return 0; 6041 } 6042 6043 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 6044 unsigned int flags) 6045 { 6046 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 6047 6048 if (inode == ERR_PTR(-ENOENT)) 6049 inode = NULL; 6050 return d_splice_alias(inode, dentry); 6051 } 6052 6053 /* 6054 * Find the highest existing sequence number in a directory and then set the 6055 * in-memory index_cnt variable to the first free sequence number. 6056 */ 6057 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 6058 { 6059 struct btrfs_root *root = inode->root; 6060 struct btrfs_key key, found_key; 6061 BTRFS_PATH_AUTO_FREE(path); 6062 struct extent_buffer *leaf; 6063 int ret; 6064 6065 key.objectid = btrfs_ino(inode); 6066 key.type = BTRFS_DIR_INDEX_KEY; 6067 key.offset = (u64)-1; 6068 6069 path = btrfs_alloc_path(); 6070 if (!path) 6071 return -ENOMEM; 6072 6073 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6074 if (ret < 0) 6075 return ret; 6076 /* FIXME: we should be able to handle this */ 6077 if (ret == 0) 6078 return ret; 6079 6080 if (path->slots[0] == 0) { 6081 inode->index_cnt = BTRFS_DIR_START_INDEX; 6082 return 0; 6083 } 6084 6085 path->slots[0]--; 6086 6087 leaf = path->nodes[0]; 6088 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6089 6090 if (found_key.objectid != btrfs_ino(inode) || 6091 found_key.type != BTRFS_DIR_INDEX_KEY) { 6092 inode->index_cnt = BTRFS_DIR_START_INDEX; 6093 return 0; 6094 } 6095 6096 inode->index_cnt = found_key.offset + 1; 6097 6098 return 0; 6099 } 6100 6101 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) 6102 { 6103 int ret = 0; 6104 6105 btrfs_inode_lock(dir, 0); 6106 if (dir->index_cnt == (u64)-1) { 6107 ret = btrfs_inode_delayed_dir_index_count(dir); 6108 if (ret) { 6109 ret = btrfs_set_inode_index_count(dir); 6110 if (ret) 6111 goto out; 6112 } 6113 } 6114 6115 /* index_cnt is the index number of next new entry, so decrement it. */ 6116 *index = dir->index_cnt - 1; 6117 out: 6118 btrfs_inode_unlock(dir, 0); 6119 6120 return ret; 6121 } 6122 6123 /* 6124 * All this infrastructure exists because dir_emit can fault, and we are holding 6125 * the tree lock when doing readdir. For now just allocate a buffer and copy 6126 * our information into that, and then dir_emit from the buffer. This is 6127 * similar to what NFS does, only we don't keep the buffer around in pagecache 6128 * because I'm afraid I'll mess that up. Long term we need to make filldir do 6129 * copy_to_user_inatomic so we don't have to worry about page faulting under the 6130 * tree lock. 6131 */ 6132 static int btrfs_opendir(struct inode *inode, struct file *file) 6133 { 6134 struct btrfs_file_private *private; 6135 u64 last_index; 6136 int ret; 6137 6138 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); 6139 if (ret) 6140 return ret; 6141 6142 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 6143 if (!private) 6144 return -ENOMEM; 6145 private->last_index = last_index; 6146 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 6147 if (!private->filldir_buf) { 6148 kfree(private); 6149 return -ENOMEM; 6150 } 6151 file->private_data = private; 6152 return 0; 6153 } 6154 6155 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence) 6156 { 6157 struct btrfs_file_private *private = file->private_data; 6158 int ret; 6159 6160 ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)), 6161 &private->last_index); 6162 if (ret) 6163 return ret; 6164 6165 return generic_file_llseek(file, offset, whence); 6166 } 6167 6168 struct dir_entry { 6169 u64 ino; 6170 u64 offset; 6171 unsigned type; 6172 int name_len; 6173 }; 6174 6175 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 6176 { 6177 while (entries--) { 6178 struct dir_entry *entry = addr; 6179 char *name = (char *)(entry + 1); 6180 6181 ctx->pos = get_unaligned(&entry->offset); 6182 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 6183 get_unaligned(&entry->ino), 6184 get_unaligned(&entry->type))) 6185 return 1; 6186 addr += sizeof(struct dir_entry) + 6187 get_unaligned(&entry->name_len); 6188 ctx->pos++; 6189 } 6190 return 0; 6191 } 6192 6193 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 6194 { 6195 struct inode *inode = file_inode(file); 6196 struct btrfs_root *root = BTRFS_I(inode)->root; 6197 struct btrfs_file_private *private = file->private_data; 6198 struct btrfs_dir_item *di; 6199 struct btrfs_key key; 6200 struct btrfs_key found_key; 6201 BTRFS_PATH_AUTO_FREE(path); 6202 void *addr; 6203 LIST_HEAD(ins_list); 6204 LIST_HEAD(del_list); 6205 int ret; 6206 char *name_ptr; 6207 int name_len; 6208 int entries = 0; 6209 int total_len = 0; 6210 bool put = false; 6211 struct btrfs_key location; 6212 6213 if (!dir_emit_dots(file, ctx)) 6214 return 0; 6215 6216 path = btrfs_alloc_path(); 6217 if (!path) 6218 return -ENOMEM; 6219 6220 addr = private->filldir_buf; 6221 path->reada = READA_FORWARD; 6222 6223 put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index, 6224 &ins_list, &del_list); 6225 6226 again: 6227 key.type = BTRFS_DIR_INDEX_KEY; 6228 key.offset = ctx->pos; 6229 key.objectid = btrfs_ino(BTRFS_I(inode)); 6230 6231 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 6232 struct dir_entry *entry; 6233 struct extent_buffer *leaf = path->nodes[0]; 6234 u8 ftype; 6235 6236 if (found_key.objectid != key.objectid) 6237 break; 6238 if (found_key.type != BTRFS_DIR_INDEX_KEY) 6239 break; 6240 if (found_key.offset < ctx->pos) 6241 continue; 6242 if (found_key.offset > private->last_index) 6243 break; 6244 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 6245 continue; 6246 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 6247 name_len = btrfs_dir_name_len(leaf, di); 6248 if ((total_len + sizeof(struct dir_entry) + name_len) >= 6249 PAGE_SIZE) { 6250 btrfs_release_path(path); 6251 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6252 if (ret) 6253 goto nopos; 6254 addr = private->filldir_buf; 6255 entries = 0; 6256 total_len = 0; 6257 goto again; 6258 } 6259 6260 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di)); 6261 entry = addr; 6262 name_ptr = (char *)(entry + 1); 6263 read_extent_buffer(leaf, name_ptr, 6264 (unsigned long)(di + 1), name_len); 6265 put_unaligned(name_len, &entry->name_len); 6266 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); 6267 btrfs_dir_item_key_to_cpu(leaf, di, &location); 6268 put_unaligned(location.objectid, &entry->ino); 6269 put_unaligned(found_key.offset, &entry->offset); 6270 entries++; 6271 addr += sizeof(struct dir_entry) + name_len; 6272 total_len += sizeof(struct dir_entry) + name_len; 6273 } 6274 /* Catch error encountered during iteration */ 6275 if (ret < 0) 6276 goto err; 6277 6278 btrfs_release_path(path); 6279 6280 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6281 if (ret) 6282 goto nopos; 6283 6284 if (btrfs_readdir_delayed_dir_index(ctx, &ins_list)) 6285 goto nopos; 6286 6287 /* 6288 * Stop new entries from being returned after we return the last 6289 * entry. 6290 * 6291 * New directory entries are assigned a strictly increasing 6292 * offset. This means that new entries created during readdir 6293 * are *guaranteed* to be seen in the future by that readdir. 6294 * This has broken buggy programs which operate on names as 6295 * they're returned by readdir. Until we reuse freed offsets 6296 * we have this hack to stop new entries from being returned 6297 * under the assumption that they'll never reach this huge 6298 * offset. 6299 * 6300 * This is being careful not to overflow 32bit loff_t unless the 6301 * last entry requires it because doing so has broken 32bit apps 6302 * in the past. 6303 */ 6304 if (ctx->pos >= INT_MAX) 6305 ctx->pos = LLONG_MAX; 6306 else 6307 ctx->pos = INT_MAX; 6308 nopos: 6309 ret = 0; 6310 err: 6311 if (put) 6312 btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list); 6313 return ret; 6314 } 6315 6316 /* 6317 * This is somewhat expensive, updating the tree every time the 6318 * inode changes. But, it is most likely to find the inode in cache. 6319 * FIXME, needs more benchmarking...there are no reasons other than performance 6320 * to keep or drop this code. 6321 */ 6322 static int btrfs_dirty_inode(struct btrfs_inode *inode) 6323 { 6324 struct btrfs_root *root = inode->root; 6325 struct btrfs_fs_info *fs_info = root->fs_info; 6326 struct btrfs_trans_handle *trans; 6327 int ret; 6328 6329 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) 6330 return 0; 6331 6332 trans = btrfs_join_transaction(root); 6333 if (IS_ERR(trans)) 6334 return PTR_ERR(trans); 6335 6336 ret = btrfs_update_inode(trans, inode); 6337 if (ret == -ENOSPC || ret == -EDQUOT) { 6338 /* whoops, lets try again with the full transaction */ 6339 btrfs_end_transaction(trans); 6340 trans = btrfs_start_transaction(root, 1); 6341 if (IS_ERR(trans)) 6342 return PTR_ERR(trans); 6343 6344 ret = btrfs_update_inode(trans, inode); 6345 } 6346 btrfs_end_transaction(trans); 6347 if (inode->delayed_node) 6348 btrfs_balance_delayed_items(fs_info); 6349 6350 return ret; 6351 } 6352 6353 /* 6354 * We need our own ->update_time so that we can return error on ENOSPC for 6355 * updating the inode in the case of file write and mmap writes. 6356 */ 6357 static int btrfs_update_time(struct inode *inode, int flags) 6358 { 6359 struct btrfs_root *root = BTRFS_I(inode)->root; 6360 bool dirty; 6361 6362 if (btrfs_root_readonly(root)) 6363 return -EROFS; 6364 6365 dirty = inode_update_timestamps(inode, flags); 6366 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6367 } 6368 6369 /* 6370 * helper to find a free sequence number in a given directory. This current 6371 * code is very simple, later versions will do smarter things in the btree 6372 */ 6373 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6374 { 6375 int ret = 0; 6376 6377 if (dir->index_cnt == (u64)-1) { 6378 ret = btrfs_inode_delayed_dir_index_count(dir); 6379 if (ret) { 6380 ret = btrfs_set_inode_index_count(dir); 6381 if (ret) 6382 return ret; 6383 } 6384 } 6385 6386 *index = dir->index_cnt; 6387 dir->index_cnt++; 6388 6389 return ret; 6390 } 6391 6392 static int btrfs_insert_inode_locked(struct inode *inode) 6393 { 6394 struct btrfs_iget_args args; 6395 6396 args.ino = btrfs_ino(BTRFS_I(inode)); 6397 args.root = BTRFS_I(inode)->root; 6398 6399 return insert_inode_locked4(inode, 6400 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6401 btrfs_find_actor, &args); 6402 } 6403 6404 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6405 unsigned int *trans_num_items) 6406 { 6407 struct inode *dir = args->dir; 6408 struct inode *inode = args->inode; 6409 int ret; 6410 6411 if (!args->orphan) { 6412 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, 6413 &args->fname); 6414 if (ret) 6415 return ret; 6416 } 6417 6418 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6419 if (ret) { 6420 fscrypt_free_filename(&args->fname); 6421 return ret; 6422 } 6423 6424 /* 1 to add inode item */ 6425 *trans_num_items = 1; 6426 /* 1 to add compression property */ 6427 if (BTRFS_I(dir)->prop_compress) 6428 (*trans_num_items)++; 6429 /* 1 to add default ACL xattr */ 6430 if (args->default_acl) 6431 (*trans_num_items)++; 6432 /* 1 to add access ACL xattr */ 6433 if (args->acl) 6434 (*trans_num_items)++; 6435 #ifdef CONFIG_SECURITY 6436 /* 1 to add LSM xattr */ 6437 if (dir->i_security) 6438 (*trans_num_items)++; 6439 #endif 6440 if (args->orphan) { 6441 /* 1 to add orphan item */ 6442 (*trans_num_items)++; 6443 } else { 6444 /* 6445 * 1 to add dir item 6446 * 1 to add dir index 6447 * 1 to update parent inode item 6448 * 6449 * No need for 1 unit for the inode ref item because it is 6450 * inserted in a batch together with the inode item at 6451 * btrfs_create_new_inode(). 6452 */ 6453 *trans_num_items += 3; 6454 } 6455 return 0; 6456 } 6457 6458 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6459 { 6460 posix_acl_release(args->acl); 6461 posix_acl_release(args->default_acl); 6462 fscrypt_free_filename(&args->fname); 6463 } 6464 6465 /* 6466 * Inherit flags from the parent inode. 6467 * 6468 * Currently only the compression flags and the cow flags are inherited. 6469 */ 6470 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir) 6471 { 6472 unsigned int flags; 6473 6474 flags = dir->flags; 6475 6476 if (flags & BTRFS_INODE_NOCOMPRESS) { 6477 inode->flags &= ~BTRFS_INODE_COMPRESS; 6478 inode->flags |= BTRFS_INODE_NOCOMPRESS; 6479 } else if (flags & BTRFS_INODE_COMPRESS) { 6480 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 6481 inode->flags |= BTRFS_INODE_COMPRESS; 6482 } 6483 6484 if (flags & BTRFS_INODE_NODATACOW) { 6485 inode->flags |= BTRFS_INODE_NODATACOW; 6486 if (S_ISREG(inode->vfs_inode.i_mode)) 6487 inode->flags |= BTRFS_INODE_NODATASUM; 6488 } 6489 6490 btrfs_sync_inode_flags_to_i_flags(inode); 6491 } 6492 6493 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6494 struct btrfs_new_inode_args *args) 6495 { 6496 struct timespec64 ts; 6497 struct inode *dir = args->dir; 6498 struct inode *inode = args->inode; 6499 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; 6500 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 6501 struct btrfs_root *root; 6502 struct btrfs_inode_item *inode_item; 6503 struct btrfs_path *path; 6504 u64 objectid; 6505 struct btrfs_inode_ref *ref; 6506 struct btrfs_key key[2]; 6507 u32 sizes[2]; 6508 struct btrfs_item_batch batch; 6509 unsigned long ptr; 6510 int ret; 6511 bool xa_reserved = false; 6512 6513 path = btrfs_alloc_path(); 6514 if (!path) 6515 return -ENOMEM; 6516 6517 if (!args->subvol) 6518 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6519 root = BTRFS_I(inode)->root; 6520 6521 ret = btrfs_init_file_extent_tree(BTRFS_I(inode)); 6522 if (ret) 6523 goto out; 6524 6525 ret = btrfs_get_free_objectid(root, &objectid); 6526 if (ret) 6527 goto out; 6528 btrfs_set_inode_number(BTRFS_I(inode), objectid); 6529 6530 ret = xa_reserve(&root->inodes, objectid, GFP_NOFS); 6531 if (ret) 6532 goto out; 6533 xa_reserved = true; 6534 6535 if (args->orphan) { 6536 /* 6537 * O_TMPFILE, set link count to 0, so that after this point, we 6538 * fill in an inode item with the correct link count. 6539 */ 6540 set_nlink(inode, 0); 6541 } else { 6542 trace_btrfs_inode_request(dir); 6543 6544 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6545 if (ret) 6546 goto out; 6547 } 6548 6549 if (S_ISDIR(inode->i_mode)) 6550 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6551 6552 BTRFS_I(inode)->generation = trans->transid; 6553 inode->i_generation = BTRFS_I(inode)->generation; 6554 6555 /* 6556 * We don't have any capability xattrs set here yet, shortcut any 6557 * queries for the xattrs here. If we add them later via the inode 6558 * security init path or any other path this flag will be cleared. 6559 */ 6560 set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags); 6561 6562 /* 6563 * Subvolumes don't inherit flags from their parent directory. 6564 * Originally this was probably by accident, but we probably can't 6565 * change it now without compatibility issues. 6566 */ 6567 if (!args->subvol) 6568 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir)); 6569 6570 btrfs_set_inode_mapping_order(BTRFS_I(inode)); 6571 if (S_ISREG(inode->i_mode)) { 6572 if (btrfs_test_opt(fs_info, NODATASUM)) 6573 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6574 if (btrfs_test_opt(fs_info, NODATACOW)) 6575 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6576 BTRFS_INODE_NODATASUM; 6577 btrfs_update_inode_mapping_flags(BTRFS_I(inode)); 6578 } 6579 6580 ret = btrfs_insert_inode_locked(inode); 6581 if (ret < 0) { 6582 if (!args->orphan) 6583 BTRFS_I(dir)->index_cnt--; 6584 goto out; 6585 } 6586 6587 /* 6588 * We could have gotten an inode number from somebody who was fsynced 6589 * and then removed in this same transaction, so let's just set full 6590 * sync since it will be a full sync anyway and this will blow away the 6591 * old info in the log. 6592 */ 6593 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6594 6595 key[0].objectid = objectid; 6596 key[0].type = BTRFS_INODE_ITEM_KEY; 6597 key[0].offset = 0; 6598 6599 sizes[0] = sizeof(struct btrfs_inode_item); 6600 6601 if (!args->orphan) { 6602 /* 6603 * Start new inodes with an inode_ref. This is slightly more 6604 * efficient for small numbers of hard links since they will 6605 * be packed into one item. Extended refs will kick in if we 6606 * add more hard links than can fit in the ref item. 6607 */ 6608 key[1].objectid = objectid; 6609 key[1].type = BTRFS_INODE_REF_KEY; 6610 if (args->subvol) { 6611 key[1].offset = objectid; 6612 sizes[1] = 2 + sizeof(*ref); 6613 } else { 6614 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6615 sizes[1] = name->len + sizeof(*ref); 6616 } 6617 } 6618 6619 batch.keys = &key[0]; 6620 batch.data_sizes = &sizes[0]; 6621 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6622 batch.nr = args->orphan ? 1 : 2; 6623 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6624 if (unlikely(ret != 0)) { 6625 btrfs_abort_transaction(trans, ret); 6626 goto discard; 6627 } 6628 6629 ts = simple_inode_init_ts(inode); 6630 BTRFS_I(inode)->i_otime_sec = ts.tv_sec; 6631 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec; 6632 6633 /* 6634 * We're going to fill the inode item now, so at this point the inode 6635 * must be fully initialized. 6636 */ 6637 6638 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6639 struct btrfs_inode_item); 6640 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6641 sizeof(*inode_item)); 6642 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6643 6644 if (!args->orphan) { 6645 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6646 struct btrfs_inode_ref); 6647 ptr = (unsigned long)(ref + 1); 6648 if (args->subvol) { 6649 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6650 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6651 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6652 } else { 6653 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 6654 name->len); 6655 btrfs_set_inode_ref_index(path->nodes[0], ref, 6656 BTRFS_I(inode)->dir_index); 6657 write_extent_buffer(path->nodes[0], name->name, ptr, 6658 name->len); 6659 } 6660 } 6661 6662 /* 6663 * We don't need the path anymore, plus inheriting properties, adding 6664 * ACLs, security xattrs, orphan item or adding the link, will result in 6665 * allocating yet another path. So just free our path. 6666 */ 6667 btrfs_free_path(path); 6668 path = NULL; 6669 6670 if (args->subvol) { 6671 struct btrfs_inode *parent; 6672 6673 /* 6674 * Subvolumes inherit properties from their parent subvolume, 6675 * not the directory they were created in. 6676 */ 6677 parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root); 6678 if (IS_ERR(parent)) { 6679 ret = PTR_ERR(parent); 6680 } else { 6681 ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode), 6682 parent); 6683 iput(&parent->vfs_inode); 6684 } 6685 } else { 6686 ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode), 6687 BTRFS_I(dir)); 6688 } 6689 if (ret) { 6690 btrfs_err(fs_info, 6691 "error inheriting props for ino %llu (root %llu): %d", 6692 btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret); 6693 } 6694 6695 /* 6696 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6697 * probably a bug. 6698 */ 6699 if (!args->subvol) { 6700 ret = btrfs_init_inode_security(trans, args); 6701 if (unlikely(ret)) { 6702 btrfs_abort_transaction(trans, ret); 6703 goto discard; 6704 } 6705 } 6706 6707 ret = btrfs_add_inode_to_root(BTRFS_I(inode), false); 6708 if (WARN_ON(ret)) { 6709 /* Shouldn't happen, we used xa_reserve() before. */ 6710 btrfs_abort_transaction(trans, ret); 6711 goto discard; 6712 } 6713 6714 trace_btrfs_inode_new(inode); 6715 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6716 6717 btrfs_update_root_times(trans, root); 6718 6719 if (args->orphan) { 6720 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6721 if (unlikely(ret)) { 6722 btrfs_abort_transaction(trans, ret); 6723 goto discard; 6724 } 6725 } else { 6726 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6727 0, BTRFS_I(inode)->dir_index); 6728 if (unlikely(ret)) { 6729 btrfs_abort_transaction(trans, ret); 6730 goto discard; 6731 } 6732 } 6733 6734 return 0; 6735 6736 discard: 6737 /* 6738 * discard_new_inode() calls iput(), but the caller owns the reference 6739 * to the inode. 6740 */ 6741 ihold(inode); 6742 discard_new_inode(inode); 6743 out: 6744 if (xa_reserved) 6745 xa_release(&root->inodes, objectid); 6746 6747 btrfs_free_path(path); 6748 return ret; 6749 } 6750 6751 /* 6752 * utility function to add 'inode' into 'parent_inode' with 6753 * a give name and a given sequence number. 6754 * if 'add_backref' is true, also insert a backref from the 6755 * inode to the parent directory. 6756 */ 6757 int btrfs_add_link(struct btrfs_trans_handle *trans, 6758 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6759 const struct fscrypt_str *name, bool add_backref, u64 index) 6760 { 6761 int ret = 0; 6762 struct btrfs_key key; 6763 struct btrfs_root *root = parent_inode->root; 6764 u64 ino = btrfs_ino(inode); 6765 u64 parent_ino = btrfs_ino(parent_inode); 6766 6767 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6768 memcpy(&key, &inode->root->root_key, sizeof(key)); 6769 } else { 6770 key.objectid = ino; 6771 key.type = BTRFS_INODE_ITEM_KEY; 6772 key.offset = 0; 6773 } 6774 6775 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6776 ret = btrfs_add_root_ref(trans, key.objectid, 6777 btrfs_root_id(root), parent_ino, 6778 index, name); 6779 } else if (add_backref) { 6780 ret = btrfs_insert_inode_ref(trans, root, name, 6781 ino, parent_ino, index); 6782 } 6783 6784 /* Nothing to clean up yet */ 6785 if (ret) 6786 return ret; 6787 6788 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, 6789 btrfs_inode_type(inode), index); 6790 if (ret == -EEXIST || ret == -EOVERFLOW) 6791 goto fail_dir_item; 6792 else if (unlikely(ret)) { 6793 btrfs_abort_transaction(trans, ret); 6794 return ret; 6795 } 6796 6797 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6798 name->len * 2); 6799 inode_inc_iversion(&parent_inode->vfs_inode); 6800 update_time_after_link_or_unlink(parent_inode); 6801 6802 ret = btrfs_update_inode(trans, parent_inode); 6803 if (ret) 6804 btrfs_abort_transaction(trans, ret); 6805 return ret; 6806 6807 fail_dir_item: 6808 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6809 u64 local_index; 6810 int ret2; 6811 6812 ret2 = btrfs_del_root_ref(trans, key.objectid, btrfs_root_id(root), 6813 parent_ino, &local_index, name); 6814 if (ret2) 6815 btrfs_abort_transaction(trans, ret2); 6816 } else if (add_backref) { 6817 int ret2; 6818 6819 ret2 = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, NULL); 6820 if (ret2) 6821 btrfs_abort_transaction(trans, ret2); 6822 } 6823 6824 /* Return the original error code */ 6825 return ret; 6826 } 6827 6828 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6829 struct inode *inode) 6830 { 6831 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 6832 struct btrfs_root *root = BTRFS_I(dir)->root; 6833 struct btrfs_new_inode_args new_inode_args = { 6834 .dir = dir, 6835 .dentry = dentry, 6836 .inode = inode, 6837 }; 6838 unsigned int trans_num_items; 6839 struct btrfs_trans_handle *trans; 6840 int ret; 6841 6842 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6843 if (ret) 6844 goto out_inode; 6845 6846 trans = btrfs_start_transaction(root, trans_num_items); 6847 if (IS_ERR(trans)) { 6848 ret = PTR_ERR(trans); 6849 goto out_new_inode_args; 6850 } 6851 6852 ret = btrfs_create_new_inode(trans, &new_inode_args); 6853 if (!ret) { 6854 if (S_ISDIR(inode->i_mode)) 6855 inode->i_opflags |= IOP_FASTPERM_MAY_EXEC; 6856 d_instantiate_new(dentry, inode); 6857 } 6858 6859 btrfs_end_transaction(trans); 6860 btrfs_btree_balance_dirty(fs_info); 6861 out_new_inode_args: 6862 btrfs_new_inode_args_destroy(&new_inode_args); 6863 out_inode: 6864 if (ret) 6865 iput(inode); 6866 return ret; 6867 } 6868 6869 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir, 6870 struct dentry *dentry, umode_t mode, dev_t rdev) 6871 { 6872 struct inode *inode; 6873 6874 inode = new_inode(dir->i_sb); 6875 if (!inode) 6876 return -ENOMEM; 6877 inode_init_owner(idmap, inode, dir, mode); 6878 inode->i_op = &btrfs_special_inode_operations; 6879 init_special_inode(inode, inode->i_mode, rdev); 6880 return btrfs_create_common(dir, dentry, inode); 6881 } 6882 6883 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir, 6884 struct dentry *dentry, umode_t mode, bool excl) 6885 { 6886 struct inode *inode; 6887 6888 inode = new_inode(dir->i_sb); 6889 if (!inode) 6890 return -ENOMEM; 6891 inode_init_owner(idmap, inode, dir, mode); 6892 inode->i_fop = &btrfs_file_operations; 6893 inode->i_op = &btrfs_file_inode_operations; 6894 inode->i_mapping->a_ops = &btrfs_aops; 6895 return btrfs_create_common(dir, dentry, inode); 6896 } 6897 6898 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6899 struct dentry *dentry) 6900 { 6901 struct btrfs_trans_handle *trans = NULL; 6902 struct btrfs_root *root = BTRFS_I(dir)->root; 6903 struct inode *inode = d_inode(old_dentry); 6904 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 6905 struct fscrypt_name fname; 6906 u64 index; 6907 int ret; 6908 6909 /* do not allow sys_link's with other subvols of the same device */ 6910 if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root)) 6911 return -EXDEV; 6912 6913 if (inode->i_nlink >= BTRFS_LINK_MAX) 6914 return -EMLINK; 6915 6916 ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); 6917 if (ret) 6918 goto fail; 6919 6920 ret = btrfs_set_inode_index(BTRFS_I(dir), &index); 6921 if (ret) 6922 goto fail; 6923 6924 /* 6925 * 2 items for inode and inode ref 6926 * 2 items for dir items 6927 * 1 item for parent inode 6928 * 1 item for orphan item deletion if O_TMPFILE 6929 */ 6930 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6931 if (IS_ERR(trans)) { 6932 ret = PTR_ERR(trans); 6933 trans = NULL; 6934 goto fail; 6935 } 6936 6937 /* There are several dir indexes for this inode, clear the cache. */ 6938 BTRFS_I(inode)->dir_index = 0ULL; 6939 inode_inc_iversion(inode); 6940 inode_set_ctime_current(inode); 6941 6942 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6943 &fname.disk_name, 1, index); 6944 if (ret) 6945 goto fail; 6946 6947 /* Link added now we update the inode item with the new link count. */ 6948 inc_nlink(inode); 6949 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 6950 if (unlikely(ret)) { 6951 btrfs_abort_transaction(trans, ret); 6952 goto fail; 6953 } 6954 6955 if (inode->i_nlink == 1) { 6956 /* 6957 * If the new hard link count is 1, it's a file created with the 6958 * open(2) O_TMPFILE flag. 6959 */ 6960 ret = btrfs_orphan_del(trans, BTRFS_I(inode)); 6961 if (unlikely(ret)) { 6962 btrfs_abort_transaction(trans, ret); 6963 goto fail; 6964 } 6965 } 6966 6967 /* Grab reference for the new dentry passed to d_instantiate(). */ 6968 ihold(inode); 6969 d_instantiate(dentry, inode); 6970 btrfs_log_new_name(trans, old_dentry, NULL, 0, dentry->d_parent); 6971 6972 fail: 6973 fscrypt_free_filename(&fname); 6974 if (trans) 6975 btrfs_end_transaction(trans); 6976 btrfs_btree_balance_dirty(fs_info); 6977 return ret; 6978 } 6979 6980 static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 6981 struct dentry *dentry, umode_t mode) 6982 { 6983 struct inode *inode; 6984 6985 inode = new_inode(dir->i_sb); 6986 if (!inode) 6987 return ERR_PTR(-ENOMEM); 6988 inode_init_owner(idmap, inode, dir, S_IFDIR | mode); 6989 inode->i_op = &btrfs_dir_inode_operations; 6990 inode->i_fop = &btrfs_dir_file_operations; 6991 return ERR_PTR(btrfs_create_common(dir, dentry, inode)); 6992 } 6993 6994 static noinline int uncompress_inline(struct btrfs_path *path, 6995 struct folio *folio, 6996 struct btrfs_file_extent_item *item) 6997 { 6998 int ret; 6999 struct extent_buffer *leaf = path->nodes[0]; 7000 const u32 blocksize = leaf->fs_info->sectorsize; 7001 char *tmp; 7002 size_t max_size; 7003 unsigned long inline_size; 7004 unsigned long ptr; 7005 int compress_type; 7006 7007 compress_type = btrfs_file_extent_compression(leaf, item); 7008 max_size = btrfs_file_extent_ram_bytes(leaf, item); 7009 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 7010 tmp = kmalloc(inline_size, GFP_NOFS); 7011 if (!tmp) 7012 return -ENOMEM; 7013 ptr = btrfs_file_extent_inline_start(item); 7014 7015 read_extent_buffer(leaf, tmp, ptr, inline_size); 7016 7017 max_size = min_t(unsigned long, blocksize, max_size); 7018 ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size, 7019 max_size); 7020 7021 /* 7022 * decompression code contains a memset to fill in any space between the end 7023 * of the uncompressed data and the end of max_size in case the decompressed 7024 * data ends up shorter than ram_bytes. That doesn't cover the hole between 7025 * the end of an inline extent and the beginning of the next block, so we 7026 * cover that region here. 7027 */ 7028 7029 if (max_size < blocksize) 7030 folio_zero_range(folio, max_size, blocksize - max_size); 7031 kfree(tmp); 7032 return ret; 7033 } 7034 7035 static int read_inline_extent(struct btrfs_path *path, struct folio *folio) 7036 { 7037 const u32 blocksize = path->nodes[0]->fs_info->sectorsize; 7038 struct btrfs_file_extent_item *fi; 7039 void *kaddr; 7040 size_t copy_size; 7041 7042 if (!folio || folio_test_uptodate(folio)) 7043 return 0; 7044 7045 ASSERT(folio_pos(folio) == 0); 7046 7047 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 7048 struct btrfs_file_extent_item); 7049 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) 7050 return uncompress_inline(path, folio, fi); 7051 7052 copy_size = min_t(u64, blocksize, 7053 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); 7054 kaddr = kmap_local_folio(folio, 0); 7055 read_extent_buffer(path->nodes[0], kaddr, 7056 btrfs_file_extent_inline_start(fi), copy_size); 7057 kunmap_local(kaddr); 7058 if (copy_size < blocksize) 7059 folio_zero_range(folio, copy_size, blocksize - copy_size); 7060 return 0; 7061 } 7062 7063 /* 7064 * Lookup the first extent overlapping a range in a file. 7065 * 7066 * @inode: file to search in 7067 * @page: page to read extent data into if the extent is inline 7068 * @start: file offset 7069 * @len: length of range starting at @start 7070 * 7071 * Return the first &struct extent_map which overlaps the given range, reading 7072 * it from the B-tree and caching it if necessary. Note that there may be more 7073 * extents which overlap the given range after the returned extent_map. 7074 * 7075 * If @page is not NULL and the extent is inline, this also reads the extent 7076 * data directly into the page and marks the extent up to date in the io_tree. 7077 * 7078 * Return: ERR_PTR on error, non-NULL extent_map on success. 7079 */ 7080 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 7081 struct folio *folio, u64 start, u64 len) 7082 { 7083 struct btrfs_fs_info *fs_info = inode->root->fs_info; 7084 int ret = 0; 7085 u64 extent_start = 0; 7086 u64 extent_end = 0; 7087 u64 objectid = btrfs_ino(inode); 7088 int extent_type = -1; 7089 struct btrfs_path *path = NULL; 7090 struct btrfs_root *root = inode->root; 7091 struct btrfs_file_extent_item *item; 7092 struct extent_buffer *leaf; 7093 struct btrfs_key found_key; 7094 struct extent_map *em = NULL; 7095 struct extent_map_tree *em_tree = &inode->extent_tree; 7096 7097 read_lock(&em_tree->lock); 7098 em = btrfs_lookup_extent_mapping(em_tree, start, len); 7099 read_unlock(&em_tree->lock); 7100 7101 if (em) { 7102 if (em->start > start || em->start + em->len <= start) 7103 btrfs_free_extent_map(em); 7104 else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio) 7105 btrfs_free_extent_map(em); 7106 else 7107 goto out; 7108 } 7109 em = btrfs_alloc_extent_map(); 7110 if (!em) { 7111 ret = -ENOMEM; 7112 goto out; 7113 } 7114 em->start = EXTENT_MAP_HOLE; 7115 em->disk_bytenr = EXTENT_MAP_HOLE; 7116 em->len = (u64)-1; 7117 7118 path = btrfs_alloc_path(); 7119 if (!path) { 7120 ret = -ENOMEM; 7121 goto out; 7122 } 7123 7124 /* Chances are we'll be called again, so go ahead and do readahead */ 7125 path->reada = READA_FORWARD; 7126 7127 /* 7128 * The same explanation in load_free_space_cache applies here as well, 7129 * we only read when we're loading the free space cache, and at that 7130 * point the commit_root has everything we need. 7131 */ 7132 if (btrfs_is_free_space_inode(inode)) { 7133 path->search_commit_root = true; 7134 path->skip_locking = true; 7135 } 7136 7137 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 7138 if (ret < 0) { 7139 goto out; 7140 } else if (ret > 0) { 7141 if (path->slots[0] == 0) 7142 goto not_found; 7143 path->slots[0]--; 7144 ret = 0; 7145 } 7146 7147 leaf = path->nodes[0]; 7148 item = btrfs_item_ptr(leaf, path->slots[0], 7149 struct btrfs_file_extent_item); 7150 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 7151 if (found_key.objectid != objectid || 7152 found_key.type != BTRFS_EXTENT_DATA_KEY) { 7153 /* 7154 * If we backup past the first extent we want to move forward 7155 * and see if there is an extent in front of us, otherwise we'll 7156 * say there is a hole for our whole search range which can 7157 * cause problems. 7158 */ 7159 extent_end = start; 7160 goto next; 7161 } 7162 7163 extent_type = btrfs_file_extent_type(leaf, item); 7164 extent_start = found_key.offset; 7165 extent_end = btrfs_file_extent_end(path); 7166 if (extent_type == BTRFS_FILE_EXTENT_REG || 7167 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 7168 /* Only regular file could have regular/prealloc extent */ 7169 if (unlikely(!S_ISREG(inode->vfs_inode.i_mode))) { 7170 ret = -EUCLEAN; 7171 btrfs_crit(fs_info, 7172 "regular/prealloc extent found for non-regular inode %llu", 7173 btrfs_ino(inode)); 7174 goto out; 7175 } 7176 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 7177 extent_start); 7178 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 7179 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 7180 path->slots[0], 7181 extent_start); 7182 } 7183 next: 7184 if (start >= extent_end) { 7185 path->slots[0]++; 7186 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 7187 ret = btrfs_next_leaf(root, path); 7188 if (ret < 0) 7189 goto out; 7190 else if (ret > 0) 7191 goto not_found; 7192 7193 leaf = path->nodes[0]; 7194 } 7195 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 7196 if (found_key.objectid != objectid || 7197 found_key.type != BTRFS_EXTENT_DATA_KEY) 7198 goto not_found; 7199 if (start + len <= found_key.offset) 7200 goto not_found; 7201 if (start > found_key.offset) 7202 goto next; 7203 7204 /* New extent overlaps with existing one */ 7205 em->start = start; 7206 em->len = found_key.offset - start; 7207 em->disk_bytenr = EXTENT_MAP_HOLE; 7208 goto insert; 7209 } 7210 7211 btrfs_extent_item_to_extent_map(inode, path, item, em); 7212 7213 if (extent_type == BTRFS_FILE_EXTENT_REG || 7214 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 7215 goto insert; 7216 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 7217 /* 7218 * Inline extent can only exist at file offset 0. This is 7219 * ensured by tree-checker and inline extent creation path. 7220 * Thus all members representing file offsets should be zero. 7221 */ 7222 ASSERT(extent_start == 0); 7223 ASSERT(em->start == 0); 7224 7225 /* 7226 * btrfs_extent_item_to_extent_map() should have properly 7227 * initialized em members already. 7228 * 7229 * Other members are not utilized for inline extents. 7230 */ 7231 ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE); 7232 ASSERT(em->len == fs_info->sectorsize); 7233 7234 ret = read_inline_extent(path, folio); 7235 if (ret < 0) 7236 goto out; 7237 goto insert; 7238 } 7239 not_found: 7240 em->start = start; 7241 em->len = len; 7242 em->disk_bytenr = EXTENT_MAP_HOLE; 7243 insert: 7244 ret = 0; 7245 btrfs_release_path(path); 7246 if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) { 7247 btrfs_err(fs_info, 7248 "bad extent! em: [%llu %llu] passed [%llu %llu]", 7249 em->start, em->len, start, len); 7250 ret = -EIO; 7251 goto out; 7252 } 7253 7254 write_lock(&em_tree->lock); 7255 ret = btrfs_add_extent_mapping(inode, &em, start, len); 7256 write_unlock(&em_tree->lock); 7257 out: 7258 btrfs_free_path(path); 7259 7260 trace_btrfs_get_extent(root, inode, em); 7261 7262 if (ret) { 7263 btrfs_free_extent_map(em); 7264 return ERR_PTR(ret); 7265 } 7266 return em; 7267 } 7268 7269 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 7270 { 7271 struct btrfs_block_group *block_group; 7272 bool readonly = false; 7273 7274 block_group = btrfs_lookup_block_group(fs_info, bytenr); 7275 if (!block_group || block_group->ro) 7276 readonly = true; 7277 if (block_group) 7278 btrfs_put_block_group(block_group); 7279 return readonly; 7280 } 7281 7282 /* 7283 * Check if we can do nocow write into the range [@offset, @offset + @len) 7284 * 7285 * @offset: File offset 7286 * @len: The length to write, will be updated to the nocow writeable 7287 * range 7288 * @orig_start: (optional) Return the original file offset of the file extent 7289 * @orig_len: (optional) Return the original on-disk length of the file extent 7290 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7291 * 7292 * Return: 7293 * >0 and update @len if we can do nocow write 7294 * 0 if we can't do nocow write 7295 * <0 if error happened 7296 * 7297 * NOTE: This only checks the file extents, caller is responsible to wait for 7298 * any ordered extents. 7299 */ 7300 noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len, 7301 struct btrfs_file_extent *file_extent, 7302 bool nowait) 7303 { 7304 struct btrfs_root *root = inode->root; 7305 struct btrfs_fs_info *fs_info = root->fs_info; 7306 struct can_nocow_file_extent_args nocow_args = { 0 }; 7307 BTRFS_PATH_AUTO_FREE(path); 7308 int ret; 7309 struct extent_buffer *leaf; 7310 struct extent_io_tree *io_tree = &inode->io_tree; 7311 struct btrfs_file_extent_item *fi; 7312 struct btrfs_key key; 7313 int found_type; 7314 7315 path = btrfs_alloc_path(); 7316 if (!path) 7317 return -ENOMEM; 7318 path->nowait = nowait; 7319 7320 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 7321 offset, 0); 7322 if (ret < 0) 7323 return ret; 7324 7325 if (ret == 1) { 7326 if (path->slots[0] == 0) { 7327 /* Can't find the item, must COW. */ 7328 return 0; 7329 } 7330 path->slots[0]--; 7331 } 7332 ret = 0; 7333 leaf = path->nodes[0]; 7334 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7335 if (key.objectid != btrfs_ino(inode) || 7336 key.type != BTRFS_EXTENT_DATA_KEY) { 7337 /* Not our file or wrong item type, must COW. */ 7338 return 0; 7339 } 7340 7341 if (key.offset > offset) { 7342 /* Wrong offset, must COW. */ 7343 return 0; 7344 } 7345 7346 if (btrfs_file_extent_end(path) <= offset) 7347 return 0; 7348 7349 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7350 found_type = btrfs_file_extent_type(leaf, fi); 7351 7352 nocow_args.start = offset; 7353 nocow_args.end = offset + *len - 1; 7354 nocow_args.free_path = true; 7355 7356 ret = can_nocow_file_extent(path, &key, inode, &nocow_args); 7357 /* can_nocow_file_extent() has freed the path. */ 7358 path = NULL; 7359 7360 if (ret != 1) { 7361 /* Treat errors as not being able to NOCOW. */ 7362 return 0; 7363 } 7364 7365 if (btrfs_extent_readonly(fs_info, 7366 nocow_args.file_extent.disk_bytenr + 7367 nocow_args.file_extent.offset)) 7368 return 0; 7369 7370 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 7371 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7372 u64 range_end; 7373 7374 range_end = round_up(offset + nocow_args.file_extent.num_bytes, 7375 root->fs_info->sectorsize) - 1; 7376 ret = btrfs_test_range_bit_exists(io_tree, offset, range_end, 7377 EXTENT_DELALLOC); 7378 if (ret) 7379 return -EAGAIN; 7380 } 7381 7382 if (file_extent) 7383 memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent)); 7384 7385 *len = nocow_args.file_extent.num_bytes; 7386 7387 return 1; 7388 } 7389 7390 /* The callers of this must take lock_extent() */ 7391 struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start, 7392 const struct btrfs_file_extent *file_extent, 7393 int type) 7394 { 7395 struct extent_map *em; 7396 int ret; 7397 7398 /* 7399 * Note the missing NOCOW type. 7400 * 7401 * For pure NOCOW writes, we should not create an io extent map, but 7402 * just reusing the existing one. 7403 * Only PREALLOC writes (NOCOW write into preallocated range) can 7404 * create an io extent map. 7405 */ 7406 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7407 type == BTRFS_ORDERED_COMPRESSED || 7408 type == BTRFS_ORDERED_REGULAR); 7409 7410 switch (type) { 7411 case BTRFS_ORDERED_PREALLOC: 7412 /* We're only referring part of a larger preallocated extent. */ 7413 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes); 7414 break; 7415 case BTRFS_ORDERED_REGULAR: 7416 /* COW results a new extent matching our file extent size. */ 7417 ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes); 7418 ASSERT(file_extent->ram_bytes == file_extent->num_bytes); 7419 7420 /* Since it's a new extent, we should not have any offset. */ 7421 ASSERT(file_extent->offset == 0); 7422 break; 7423 case BTRFS_ORDERED_COMPRESSED: 7424 /* Must be compressed. */ 7425 ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE); 7426 7427 /* 7428 * Encoded write can make us to refer to part of the 7429 * uncompressed extent. 7430 */ 7431 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes); 7432 break; 7433 } 7434 7435 em = btrfs_alloc_extent_map(); 7436 if (!em) 7437 return ERR_PTR(-ENOMEM); 7438 7439 em->start = start; 7440 em->len = file_extent->num_bytes; 7441 em->disk_bytenr = file_extent->disk_bytenr; 7442 em->disk_num_bytes = file_extent->disk_num_bytes; 7443 em->ram_bytes = file_extent->ram_bytes; 7444 em->generation = -1; 7445 em->offset = file_extent->offset; 7446 em->flags |= EXTENT_FLAG_PINNED; 7447 if (type == BTRFS_ORDERED_COMPRESSED) 7448 btrfs_extent_map_set_compression(em, file_extent->compression); 7449 7450 ret = btrfs_replace_extent_map_range(inode, em, true); 7451 if (ret) { 7452 btrfs_free_extent_map(em); 7453 return ERR_PTR(ret); 7454 } 7455 7456 /* em got 2 refs now, callers needs to do btrfs_free_extent_map once. */ 7457 return em; 7458 } 7459 7460 /* 7461 * For release_folio() and invalidate_folio() we have a race window where 7462 * folio_end_writeback() is called but the subpage spinlock is not yet released. 7463 * If we continue to release/invalidate the page, we could cause use-after-free 7464 * for subpage spinlock. So this function is to spin and wait for subpage 7465 * spinlock. 7466 */ 7467 static void wait_subpage_spinlock(struct folio *folio) 7468 { 7469 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); 7470 struct btrfs_folio_state *bfs; 7471 7472 if (!btrfs_is_subpage(fs_info, folio)) 7473 return; 7474 7475 ASSERT(folio_test_private(folio) && folio_get_private(folio)); 7476 bfs = folio_get_private(folio); 7477 7478 /* 7479 * This may look insane as we just acquire the spinlock and release it, 7480 * without doing anything. But we just want to make sure no one is 7481 * still holding the subpage spinlock. 7482 * And since the page is not dirty nor writeback, and we have page 7483 * locked, the only possible way to hold a spinlock is from the endio 7484 * function to clear page writeback. 7485 * 7486 * Here we just acquire the spinlock so that all existing callers 7487 * should exit and we're safe to release/invalidate the page. 7488 */ 7489 spin_lock_irq(&bfs->lock); 7490 spin_unlock_irq(&bfs->lock); 7491 } 7492 7493 static int btrfs_launder_folio(struct folio *folio) 7494 { 7495 return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio), 7496 folio_size(folio), NULL); 7497 } 7498 7499 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7500 { 7501 if (try_release_extent_mapping(folio, gfp_flags)) { 7502 wait_subpage_spinlock(folio); 7503 clear_folio_extent_mapped(folio); 7504 return true; 7505 } 7506 return false; 7507 } 7508 7509 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7510 { 7511 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 7512 return false; 7513 return __btrfs_release_folio(folio, gfp_flags); 7514 } 7515 7516 #ifdef CONFIG_MIGRATION 7517 static int btrfs_migrate_folio(struct address_space *mapping, 7518 struct folio *dst, struct folio *src, 7519 enum migrate_mode mode) 7520 { 7521 int ret = filemap_migrate_folio(mapping, dst, src, mode); 7522 7523 if (ret) 7524 return ret; 7525 7526 if (folio_test_ordered(src)) { 7527 folio_clear_ordered(src); 7528 folio_set_ordered(dst); 7529 } 7530 7531 return 0; 7532 } 7533 #else 7534 #define btrfs_migrate_folio NULL 7535 #endif 7536 7537 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 7538 size_t length) 7539 { 7540 struct btrfs_inode *inode = folio_to_inode(folio); 7541 struct btrfs_fs_info *fs_info = inode->root->fs_info; 7542 struct extent_io_tree *tree = &inode->io_tree; 7543 struct extent_state *cached_state = NULL; 7544 u64 page_start = folio_pos(folio); 7545 u64 page_end = page_start + folio_size(folio) - 1; 7546 u64 cur; 7547 int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING; 7548 7549 /* 7550 * We have folio locked so no new ordered extent can be created on this 7551 * page, nor bio can be submitted for this folio. 7552 * 7553 * But already submitted bio can still be finished on this folio. 7554 * Furthermore, endio function won't skip folio which has Ordered 7555 * already cleared, so it's possible for endio and 7556 * invalidate_folio to do the same ordered extent accounting twice 7557 * on one folio. 7558 * 7559 * So here we wait for any submitted bios to finish, so that we won't 7560 * do double ordered extent accounting on the same folio. 7561 */ 7562 folio_wait_writeback(folio); 7563 wait_subpage_spinlock(folio); 7564 7565 /* 7566 * For subpage case, we have call sites like 7567 * btrfs_punch_hole_lock_range() which passes range not aligned to 7568 * sectorsize. 7569 * If the range doesn't cover the full folio, we don't need to and 7570 * shouldn't clear page extent mapped, as folio->private can still 7571 * record subpage dirty bits for other part of the range. 7572 * 7573 * For cases that invalidate the full folio even the range doesn't 7574 * cover the full folio, like invalidating the last folio, we're 7575 * still safe to wait for ordered extent to finish. 7576 */ 7577 if (!(offset == 0 && length == folio_size(folio))) { 7578 btrfs_release_folio(folio, GFP_NOFS); 7579 return; 7580 } 7581 7582 if (!inode_evicting) 7583 btrfs_lock_extent(tree, page_start, page_end, &cached_state); 7584 7585 cur = page_start; 7586 while (cur < page_end) { 7587 struct btrfs_ordered_extent *ordered; 7588 u64 range_end; 7589 u32 range_len; 7590 u32 extra_flags = 0; 7591 7592 ordered = btrfs_lookup_first_ordered_range(inode, cur, 7593 page_end + 1 - cur); 7594 if (!ordered) { 7595 range_end = page_end; 7596 /* 7597 * No ordered extent covering this range, we are safe 7598 * to delete all extent states in the range. 7599 */ 7600 extra_flags = EXTENT_CLEAR_ALL_BITS; 7601 goto next; 7602 } 7603 if (ordered->file_offset > cur) { 7604 /* 7605 * There is a range between [cur, oe->file_offset) not 7606 * covered by any ordered extent. 7607 * We are safe to delete all extent states, and handle 7608 * the ordered extent in the next iteration. 7609 */ 7610 range_end = ordered->file_offset - 1; 7611 extra_flags = EXTENT_CLEAR_ALL_BITS; 7612 goto next; 7613 } 7614 7615 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 7616 page_end); 7617 ASSERT(range_end + 1 - cur < U32_MAX); 7618 range_len = range_end + 1 - cur; 7619 if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) { 7620 /* 7621 * If Ordered is cleared, it means endio has 7622 * already been executed for the range. 7623 * We can't delete the extent states as 7624 * btrfs_finish_ordered_io() may still use some of them. 7625 */ 7626 goto next; 7627 } 7628 btrfs_folio_clear_ordered(fs_info, folio, cur, range_len); 7629 7630 /* 7631 * IO on this page will never be started, so we need to account 7632 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 7633 * here, must leave that up for the ordered extent completion. 7634 * 7635 * This will also unlock the range for incoming 7636 * btrfs_finish_ordered_io(). 7637 */ 7638 if (!inode_evicting) 7639 btrfs_clear_extent_bit(tree, cur, range_end, 7640 EXTENT_DELALLOC | 7641 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 7642 EXTENT_DEFRAG, &cached_state); 7643 7644 spin_lock(&inode->ordered_tree_lock); 7645 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 7646 ordered->truncated_len = min(ordered->truncated_len, 7647 cur - ordered->file_offset); 7648 spin_unlock(&inode->ordered_tree_lock); 7649 7650 /* 7651 * If the ordered extent has finished, we're safe to delete all 7652 * the extent states of the range, otherwise 7653 * btrfs_finish_ordered_io() will get executed by endio for 7654 * other pages, so we can't delete extent states. 7655 */ 7656 if (btrfs_dec_test_ordered_pending(inode, &ordered, 7657 cur, range_end + 1 - cur)) { 7658 btrfs_finish_ordered_io(ordered); 7659 /* 7660 * The ordered extent has finished, now we're again 7661 * safe to delete all extent states of the range. 7662 */ 7663 extra_flags = EXTENT_CLEAR_ALL_BITS; 7664 } 7665 next: 7666 if (ordered) 7667 btrfs_put_ordered_extent(ordered); 7668 /* 7669 * Qgroup reserved space handler 7670 * Sector(s) here will be either: 7671 * 7672 * 1) Already written to disk or bio already finished 7673 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 7674 * Qgroup will be handled by its qgroup_record then. 7675 * btrfs_qgroup_free_data() call will do nothing here. 7676 * 7677 * 2) Not written to disk yet 7678 * Then btrfs_qgroup_free_data() call will clear the 7679 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 7680 * reserved data space. 7681 * Since the IO will never happen for this page. 7682 */ 7683 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL); 7684 if (!inode_evicting) 7685 btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 7686 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 7687 EXTENT_DEFRAG | extra_flags, 7688 &cached_state); 7689 cur = range_end + 1; 7690 } 7691 /* 7692 * We have iterated through all ordered extents of the page, the page 7693 * should not have Ordered anymore, or the above iteration 7694 * did something wrong. 7695 */ 7696 ASSERT(!folio_test_ordered(folio)); 7697 btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio)); 7698 if (!inode_evicting) 7699 __btrfs_release_folio(folio, GFP_NOFS); 7700 clear_folio_extent_mapped(folio); 7701 } 7702 7703 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) 7704 { 7705 struct btrfs_truncate_control control = { 7706 .inode = inode, 7707 .ino = btrfs_ino(inode), 7708 .min_type = BTRFS_EXTENT_DATA_KEY, 7709 .clear_extent_range = true, 7710 .new_size = inode->vfs_inode.i_size, 7711 }; 7712 struct btrfs_root *root = inode->root; 7713 struct btrfs_fs_info *fs_info = root->fs_info; 7714 struct btrfs_block_rsv rsv; 7715 int ret; 7716 struct btrfs_trans_handle *trans; 7717 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 7718 const u64 lock_start = round_down(inode->vfs_inode.i_size, fs_info->sectorsize); 7719 const u64 i_size_up = round_up(inode->vfs_inode.i_size, fs_info->sectorsize); 7720 7721 /* Our inode is locked and the i_size can't be changed concurrently. */ 7722 btrfs_assert_inode_locked(inode); 7723 7724 if (!skip_writeback) { 7725 ret = btrfs_wait_ordered_range(inode, lock_start, (u64)-1); 7726 if (ret) 7727 return ret; 7728 } 7729 7730 /* 7731 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 7732 * things going on here: 7733 * 7734 * 1) We need to reserve space to update our inode. 7735 * 7736 * 2) We need to have something to cache all the space that is going to 7737 * be free'd up by the truncate operation, but also have some slack 7738 * space reserved in case it uses space during the truncate (thank you 7739 * very much snapshotting). 7740 * 7741 * And we need these to be separate. The fact is we can use a lot of 7742 * space doing the truncate, and we have no earthly idea how much space 7743 * we will use, so we need the truncate reservation to be separate so it 7744 * doesn't end up using space reserved for updating the inode. We also 7745 * need to be able to stop the transaction and start a new one, which 7746 * means we need to be able to update the inode several times, and we 7747 * have no idea of knowing how many times that will be, so we can't just 7748 * reserve 1 item for the entirety of the operation, so that has to be 7749 * done separately as well. 7750 * 7751 * So that leaves us with 7752 * 7753 * 1) rsv - for the truncate reservation, which we will steal from the 7754 * transaction reservation. 7755 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 7756 * updating the inode. 7757 */ 7758 btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP); 7759 rsv.size = min_size; 7760 rsv.failfast = true; 7761 7762 /* 7763 * 1 for the truncate slack space 7764 * 1 for updating the inode. 7765 */ 7766 trans = btrfs_start_transaction(root, 2); 7767 if (IS_ERR(trans)) { 7768 ret = PTR_ERR(trans); 7769 goto out; 7770 } 7771 7772 /* Migrate the slack space for the truncate to our reserve */ 7773 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv, 7774 min_size, false); 7775 /* 7776 * We have reserved 2 metadata units when we started the transaction and 7777 * min_size matches 1 unit, so this should never fail, but if it does, 7778 * it's not critical we just fail truncation. 7779 */ 7780 if (WARN_ON(ret)) { 7781 btrfs_end_transaction(trans); 7782 goto out; 7783 } 7784 7785 trans->block_rsv = &rsv; 7786 7787 while (1) { 7788 struct extent_state *cached_state = NULL; 7789 7790 btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 7791 /* 7792 * We want to drop from the next block forward in case this new 7793 * size is not block aligned since we will be keeping the last 7794 * block of the extent just the way it is. 7795 */ 7796 btrfs_drop_extent_map_range(inode, i_size_up, (u64)-1, false); 7797 7798 ret = btrfs_truncate_inode_items(trans, root, &control); 7799 7800 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 7801 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 7802 7803 btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 7804 7805 trans->block_rsv = &fs_info->trans_block_rsv; 7806 if (ret != -ENOSPC && ret != -EAGAIN) 7807 break; 7808 7809 ret = btrfs_update_inode(trans, inode); 7810 if (ret) 7811 break; 7812 7813 btrfs_end_transaction(trans); 7814 btrfs_btree_balance_dirty(fs_info); 7815 7816 trans = btrfs_start_transaction(root, 2); 7817 if (IS_ERR(trans)) { 7818 ret = PTR_ERR(trans); 7819 trans = NULL; 7820 break; 7821 } 7822 7823 btrfs_block_rsv_release(fs_info, &rsv, -1, NULL); 7824 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 7825 &rsv, min_size, false); 7826 /* 7827 * We have reserved 2 metadata units when we started the 7828 * transaction and min_size matches 1 unit, so this should never 7829 * fail, but if it does, it's not critical we just fail truncation. 7830 */ 7831 if (WARN_ON(ret)) 7832 break; 7833 7834 trans->block_rsv = &rsv; 7835 } 7836 7837 /* 7838 * We can't call btrfs_truncate_block inside a trans handle as we could 7839 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 7840 * know we've truncated everything except the last little bit, and can 7841 * do btrfs_truncate_block and then update the disk_i_size. 7842 */ 7843 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 7844 btrfs_end_transaction(trans); 7845 btrfs_btree_balance_dirty(fs_info); 7846 7847 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 7848 inode->vfs_inode.i_size, (u64)-1); 7849 if (ret) 7850 goto out; 7851 trans = btrfs_start_transaction(root, 1); 7852 if (IS_ERR(trans)) { 7853 ret = PTR_ERR(trans); 7854 goto out; 7855 } 7856 btrfs_inode_safe_disk_i_size_write(inode, 0); 7857 } 7858 7859 if (trans) { 7860 int ret2; 7861 7862 trans->block_rsv = &fs_info->trans_block_rsv; 7863 ret2 = btrfs_update_inode(trans, inode); 7864 if (ret2 && !ret) 7865 ret = ret2; 7866 7867 ret2 = btrfs_end_transaction(trans); 7868 if (ret2 && !ret) 7869 ret = ret2; 7870 btrfs_btree_balance_dirty(fs_info); 7871 } 7872 out: 7873 btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL); 7874 /* 7875 * So if we truncate and then write and fsync we normally would just 7876 * write the extents that changed, which is a problem if we need to 7877 * first truncate that entire inode. So set this flag so we write out 7878 * all of the extents in the inode to the sync log so we're completely 7879 * safe. 7880 * 7881 * If no extents were dropped or trimmed we don't need to force the next 7882 * fsync to truncate all the inode's items from the log and re-log them 7883 * all. This means the truncate operation did not change the file size, 7884 * or changed it to a smaller size but there was only an implicit hole 7885 * between the old i_size and the new i_size, and there were no prealloc 7886 * extents beyond i_size to drop. 7887 */ 7888 if (control.extents_found > 0) 7889 btrfs_set_inode_full_sync(inode); 7890 7891 return ret; 7892 } 7893 7894 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap, 7895 struct inode *dir) 7896 { 7897 struct inode *inode; 7898 7899 inode = new_inode(dir->i_sb); 7900 if (inode) { 7901 /* 7902 * Subvolumes don't inherit the sgid bit or the parent's gid if 7903 * the parent's sgid bit is set. This is probably a bug. 7904 */ 7905 inode_init_owner(idmap, inode, NULL, 7906 S_IFDIR | (~current_umask() & S_IRWXUGO)); 7907 inode->i_op = &btrfs_dir_inode_operations; 7908 inode->i_fop = &btrfs_dir_file_operations; 7909 } 7910 return inode; 7911 } 7912 7913 struct inode *btrfs_alloc_inode(struct super_block *sb) 7914 { 7915 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 7916 struct btrfs_inode *ei; 7917 struct inode *inode; 7918 7919 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 7920 if (!ei) 7921 return NULL; 7922 7923 ei->root = NULL; 7924 ei->generation = 0; 7925 ei->last_trans = 0; 7926 ei->last_sub_trans = 0; 7927 ei->logged_trans = 0; 7928 ei->delalloc_bytes = 0; 7929 /* new_delalloc_bytes and last_dir_index_offset are in a union. */ 7930 ei->new_delalloc_bytes = 0; 7931 ei->defrag_bytes = 0; 7932 ei->disk_i_size = 0; 7933 ei->flags = 0; 7934 ei->ro_flags = 0; 7935 /* 7936 * ->index_cnt will be properly initialized later when creating a new 7937 * inode (btrfs_create_new_inode()) or when reading an existing inode 7938 * from disk (btrfs_read_locked_inode()). 7939 */ 7940 ei->csum_bytes = 0; 7941 ei->dir_index = 0; 7942 ei->last_unlink_trans = 0; 7943 ei->last_reflink_trans = 0; 7944 ei->last_log_commit = 0; 7945 7946 spin_lock_init(&ei->lock); 7947 ei->outstanding_extents = 0; 7948 if (sb->s_magic != BTRFS_TEST_MAGIC) 7949 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 7950 BTRFS_BLOCK_RSV_DELALLOC); 7951 ei->runtime_flags = 0; 7952 ei->prop_compress = BTRFS_COMPRESS_NONE; 7953 ei->defrag_compress = BTRFS_COMPRESS_NONE; 7954 7955 ei->delayed_node = NULL; 7956 7957 ei->i_otime_sec = 0; 7958 ei->i_otime_nsec = 0; 7959 7960 inode = &ei->vfs_inode; 7961 btrfs_extent_map_tree_init(&ei->extent_tree); 7962 7963 /* This io tree sets the valid inode. */ 7964 btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); 7965 ei->io_tree.inode = ei; 7966 7967 ei->file_extent_tree = NULL; 7968 7969 mutex_init(&ei->log_mutex); 7970 spin_lock_init(&ei->ordered_tree_lock); 7971 ei->ordered_tree = RB_ROOT; 7972 ei->ordered_tree_last = NULL; 7973 INIT_LIST_HEAD(&ei->delalloc_inodes); 7974 INIT_LIST_HEAD(&ei->delayed_iput); 7975 init_rwsem(&ei->i_mmap_lock); 7976 7977 return inode; 7978 } 7979 7980 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 7981 void btrfs_test_destroy_inode(struct inode *inode) 7982 { 7983 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 7984 kfree(BTRFS_I(inode)->file_extent_tree); 7985 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 7986 } 7987 #endif 7988 7989 void btrfs_free_inode(struct inode *inode) 7990 { 7991 kfree(BTRFS_I(inode)->file_extent_tree); 7992 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 7993 } 7994 7995 void btrfs_destroy_inode(struct inode *vfs_inode) 7996 { 7997 struct btrfs_ordered_extent *ordered; 7998 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 7999 struct btrfs_root *root = inode->root; 8000 bool freespace_inode; 8001 8002 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 8003 WARN_ON(vfs_inode->i_data.nrpages); 8004 WARN_ON(inode->block_rsv.reserved); 8005 WARN_ON(inode->block_rsv.size); 8006 WARN_ON(inode->outstanding_extents); 8007 if (!S_ISDIR(vfs_inode->i_mode)) { 8008 WARN_ON(inode->delalloc_bytes); 8009 WARN_ON(inode->new_delalloc_bytes); 8010 WARN_ON(inode->csum_bytes); 8011 } 8012 if (!root || !btrfs_is_data_reloc_root(root)) 8013 WARN_ON(inode->defrag_bytes); 8014 8015 /* 8016 * This can happen where we create an inode, but somebody else also 8017 * created the same inode and we need to destroy the one we already 8018 * created. 8019 */ 8020 if (!root) 8021 return; 8022 8023 /* 8024 * If this is a free space inode do not take the ordered extents lockdep 8025 * map. 8026 */ 8027 freespace_inode = btrfs_is_free_space_inode(inode); 8028 8029 while (1) { 8030 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8031 if (!ordered) 8032 break; 8033 else { 8034 btrfs_err(root->fs_info, 8035 "found ordered extent %llu %llu on inode cleanup", 8036 ordered->file_offset, ordered->num_bytes); 8037 8038 if (!freespace_inode) 8039 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); 8040 8041 btrfs_remove_ordered_extent(inode, ordered); 8042 btrfs_put_ordered_extent(ordered); 8043 btrfs_put_ordered_extent(ordered); 8044 } 8045 } 8046 btrfs_qgroup_check_reserved_leak(inode); 8047 btrfs_del_inode_from_root(inode); 8048 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 8049 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 8050 btrfs_put_root(inode->root); 8051 } 8052 8053 int btrfs_drop_inode(struct inode *inode) 8054 { 8055 struct btrfs_root *root = BTRFS_I(inode)->root; 8056 8057 if (root == NULL) 8058 return 1; 8059 8060 /* the snap/subvol tree is on deleting */ 8061 if (btrfs_root_refs(&root->root_item) == 0) 8062 return 1; 8063 else 8064 return inode_generic_drop(inode); 8065 } 8066 8067 static void init_once(void *foo) 8068 { 8069 struct btrfs_inode *ei = foo; 8070 8071 inode_init_once(&ei->vfs_inode); 8072 #ifdef CONFIG_FS_VERITY 8073 ei->i_verity_info = NULL; 8074 #endif 8075 } 8076 8077 void __cold btrfs_destroy_cachep(void) 8078 { 8079 /* 8080 * Make sure all delayed rcu free inodes are flushed before we 8081 * destroy cache. 8082 */ 8083 rcu_barrier(); 8084 kmem_cache_destroy(btrfs_inode_cachep); 8085 } 8086 8087 int __init btrfs_init_cachep(void) 8088 { 8089 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 8090 sizeof(struct btrfs_inode), 0, 8091 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, 8092 init_once); 8093 if (!btrfs_inode_cachep) 8094 return -ENOMEM; 8095 8096 return 0; 8097 } 8098 8099 static int btrfs_getattr(struct mnt_idmap *idmap, 8100 const struct path *path, struct kstat *stat, 8101 u32 request_mask, unsigned int flags) 8102 { 8103 u64 delalloc_bytes; 8104 u64 inode_bytes; 8105 struct inode *inode = d_inode(path->dentry); 8106 u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize; 8107 u32 bi_flags = BTRFS_I(inode)->flags; 8108 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 8109 8110 stat->result_mask |= STATX_BTIME; 8111 stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec; 8112 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec; 8113 if (bi_flags & BTRFS_INODE_APPEND) 8114 stat->attributes |= STATX_ATTR_APPEND; 8115 if (bi_flags & BTRFS_INODE_COMPRESS) 8116 stat->attributes |= STATX_ATTR_COMPRESSED; 8117 if (bi_flags & BTRFS_INODE_IMMUTABLE) 8118 stat->attributes |= STATX_ATTR_IMMUTABLE; 8119 if (bi_flags & BTRFS_INODE_NODUMP) 8120 stat->attributes |= STATX_ATTR_NODUMP; 8121 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 8122 stat->attributes |= STATX_ATTR_VERITY; 8123 8124 stat->attributes_mask |= (STATX_ATTR_APPEND | 8125 STATX_ATTR_COMPRESSED | 8126 STATX_ATTR_IMMUTABLE | 8127 STATX_ATTR_NODUMP); 8128 8129 generic_fillattr(idmap, request_mask, inode, stat); 8130 stat->dev = BTRFS_I(inode)->root->anon_dev; 8131 8132 stat->subvol = btrfs_root_id(BTRFS_I(inode)->root); 8133 stat->result_mask |= STATX_SUBVOL; 8134 8135 spin_lock(&BTRFS_I(inode)->lock); 8136 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 8137 inode_bytes = inode_get_bytes(inode); 8138 spin_unlock(&BTRFS_I(inode)->lock); 8139 stat->blocks = (ALIGN(inode_bytes, blocksize) + 8140 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT; 8141 return 0; 8142 } 8143 8144 static int btrfs_rename_exchange(struct inode *old_dir, 8145 struct dentry *old_dentry, 8146 struct inode *new_dir, 8147 struct dentry *new_dentry) 8148 { 8149 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); 8150 struct btrfs_trans_handle *trans; 8151 unsigned int trans_num_items; 8152 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8153 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8154 struct inode *new_inode = new_dentry->d_inode; 8155 struct inode *old_inode = old_dentry->d_inode; 8156 struct btrfs_rename_ctx old_rename_ctx; 8157 struct btrfs_rename_ctx new_rename_ctx; 8158 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8159 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 8160 u64 old_idx = 0; 8161 u64 new_idx = 0; 8162 int ret; 8163 int ret2; 8164 bool need_abort = false; 8165 bool logs_pinned = false; 8166 struct fscrypt_name old_fname, new_fname; 8167 struct fscrypt_str *old_name, *new_name; 8168 8169 /* 8170 * For non-subvolumes allow exchange only within one subvolume, in the 8171 * same inode namespace. Two subvolumes (represented as directory) can 8172 * be exchanged as they're a logical link and have a fixed inode number. 8173 */ 8174 if (root != dest && 8175 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 8176 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 8177 return -EXDEV; 8178 8179 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8180 if (ret) 8181 return ret; 8182 8183 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8184 if (ret) { 8185 fscrypt_free_filename(&old_fname); 8186 return ret; 8187 } 8188 8189 old_name = &old_fname.disk_name; 8190 new_name = &new_fname.disk_name; 8191 8192 /* close the race window with snapshot create/destroy ioctl */ 8193 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 8194 new_ino == BTRFS_FIRST_FREE_OBJECTID) 8195 down_read(&fs_info->subvol_sem); 8196 8197 /* 8198 * For each inode: 8199 * 1 to remove old dir item 8200 * 1 to remove old dir index 8201 * 1 to add new dir item 8202 * 1 to add new dir index 8203 * 1 to update parent inode 8204 * 8205 * If the parents are the same, we only need to account for one 8206 */ 8207 trans_num_items = (old_dir == new_dir ? 9 : 10); 8208 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8209 /* 8210 * 1 to remove old root ref 8211 * 1 to remove old root backref 8212 * 1 to add new root ref 8213 * 1 to add new root backref 8214 */ 8215 trans_num_items += 4; 8216 } else { 8217 /* 8218 * 1 to update inode item 8219 * 1 to remove old inode ref 8220 * 1 to add new inode ref 8221 */ 8222 trans_num_items += 3; 8223 } 8224 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 8225 trans_num_items += 4; 8226 else 8227 trans_num_items += 3; 8228 trans = btrfs_start_transaction(root, trans_num_items); 8229 if (IS_ERR(trans)) { 8230 ret = PTR_ERR(trans); 8231 goto out_notrans; 8232 } 8233 8234 if (dest != root) { 8235 ret = btrfs_record_root_in_trans(trans, dest); 8236 if (ret) 8237 goto out_fail; 8238 } 8239 8240 /* 8241 * We need to find a free sequence number both in the source and 8242 * in the destination directory for the exchange. 8243 */ 8244 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 8245 if (ret) 8246 goto out_fail; 8247 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 8248 if (ret) 8249 goto out_fail; 8250 8251 BTRFS_I(old_inode)->dir_index = 0ULL; 8252 BTRFS_I(new_inode)->dir_index = 0ULL; 8253 8254 /* Reference for the source. */ 8255 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8256 /* force full log commit if subvolume involved. */ 8257 btrfs_set_log_full_commit(trans); 8258 } else { 8259 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, 8260 btrfs_ino(BTRFS_I(new_dir)), 8261 old_idx); 8262 if (ret) 8263 goto out_fail; 8264 need_abort = true; 8265 } 8266 8267 /* And now for the dest. */ 8268 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8269 /* force full log commit if subvolume involved. */ 8270 btrfs_set_log_full_commit(trans); 8271 } else { 8272 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, 8273 btrfs_ino(BTRFS_I(old_dir)), 8274 new_idx); 8275 if (ret) { 8276 if (unlikely(need_abort)) 8277 btrfs_abort_transaction(trans, ret); 8278 goto out_fail; 8279 } 8280 } 8281 8282 /* Update inode version and ctime/mtime. */ 8283 inode_inc_iversion(old_dir); 8284 inode_inc_iversion(new_dir); 8285 inode_inc_iversion(old_inode); 8286 inode_inc_iversion(new_inode); 8287 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 8288 8289 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && 8290 new_ino != BTRFS_FIRST_FREE_OBJECTID) { 8291 /* 8292 * If we are renaming in the same directory (and it's not for 8293 * root entries) pin the log early to prevent any concurrent 8294 * task from logging the directory after we removed the old 8295 * entries and before we add the new entries, otherwise that 8296 * task can sync a log without any entry for the inodes we are 8297 * renaming and therefore replaying that log, if a power failure 8298 * happens after syncing the log, would result in deleting the 8299 * inodes. 8300 * 8301 * If the rename affects two different directories, we want to 8302 * make sure the that there's no log commit that contains 8303 * updates for only one of the directories but not for the 8304 * other. 8305 * 8306 * If we are renaming an entry for a root, we don't care about 8307 * log updates since we called btrfs_set_log_full_commit(). 8308 */ 8309 btrfs_pin_log_trans(root); 8310 btrfs_pin_log_trans(dest); 8311 logs_pinned = true; 8312 } 8313 8314 if (old_dentry->d_parent != new_dentry->d_parent) { 8315 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8316 BTRFS_I(old_inode), true); 8317 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 8318 BTRFS_I(new_inode), true); 8319 } 8320 8321 /* src is a subvolume */ 8322 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8323 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8324 if (unlikely(ret)) { 8325 btrfs_abort_transaction(trans, ret); 8326 goto out_fail; 8327 } 8328 } else { /* src is an inode */ 8329 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8330 BTRFS_I(old_dentry->d_inode), 8331 old_name, &old_rename_ctx); 8332 if (unlikely(ret)) { 8333 btrfs_abort_transaction(trans, ret); 8334 goto out_fail; 8335 } 8336 ret = btrfs_update_inode(trans, BTRFS_I(old_inode)); 8337 if (unlikely(ret)) { 8338 btrfs_abort_transaction(trans, ret); 8339 goto out_fail; 8340 } 8341 } 8342 8343 /* dest is a subvolume */ 8344 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8345 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8346 if (unlikely(ret)) { 8347 btrfs_abort_transaction(trans, ret); 8348 goto out_fail; 8349 } 8350 } else { /* dest is an inode */ 8351 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8352 BTRFS_I(new_dentry->d_inode), 8353 new_name, &new_rename_ctx); 8354 if (unlikely(ret)) { 8355 btrfs_abort_transaction(trans, ret); 8356 goto out_fail; 8357 } 8358 ret = btrfs_update_inode(trans, BTRFS_I(new_inode)); 8359 if (unlikely(ret)) { 8360 btrfs_abort_transaction(trans, ret); 8361 goto out_fail; 8362 } 8363 } 8364 8365 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8366 new_name, 0, old_idx); 8367 if (unlikely(ret)) { 8368 btrfs_abort_transaction(trans, ret); 8369 goto out_fail; 8370 } 8371 8372 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 8373 old_name, 0, new_idx); 8374 if (unlikely(ret)) { 8375 btrfs_abort_transaction(trans, ret); 8376 goto out_fail; 8377 } 8378 8379 if (old_inode->i_nlink == 1) 8380 BTRFS_I(old_inode)->dir_index = old_idx; 8381 if (new_inode->i_nlink == 1) 8382 BTRFS_I(new_inode)->dir_index = new_idx; 8383 8384 /* 8385 * Do the log updates for all inodes. 8386 * 8387 * If either entry is for a root we don't need to update the logs since 8388 * we've called btrfs_set_log_full_commit() before. 8389 */ 8390 if (logs_pinned) { 8391 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8392 old_rename_ctx.index, new_dentry->d_parent); 8393 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 8394 new_rename_ctx.index, old_dentry->d_parent); 8395 } 8396 8397 out_fail: 8398 if (logs_pinned) { 8399 btrfs_end_log_trans(root); 8400 btrfs_end_log_trans(dest); 8401 } 8402 ret2 = btrfs_end_transaction(trans); 8403 ret = ret ? ret : ret2; 8404 out_notrans: 8405 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 8406 old_ino == BTRFS_FIRST_FREE_OBJECTID) 8407 up_read(&fs_info->subvol_sem); 8408 8409 fscrypt_free_filename(&new_fname); 8410 fscrypt_free_filename(&old_fname); 8411 return ret; 8412 } 8413 8414 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap, 8415 struct inode *dir) 8416 { 8417 struct inode *inode; 8418 8419 inode = new_inode(dir->i_sb); 8420 if (inode) { 8421 inode_init_owner(idmap, inode, dir, 8422 S_IFCHR | WHITEOUT_MODE); 8423 inode->i_op = &btrfs_special_inode_operations; 8424 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 8425 } 8426 return inode; 8427 } 8428 8429 static int btrfs_rename(struct mnt_idmap *idmap, 8430 struct inode *old_dir, struct dentry *old_dentry, 8431 struct inode *new_dir, struct dentry *new_dentry, 8432 unsigned int flags) 8433 { 8434 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); 8435 struct btrfs_new_inode_args whiteout_args = { 8436 .dir = old_dir, 8437 .dentry = old_dentry, 8438 }; 8439 struct btrfs_trans_handle *trans; 8440 unsigned int trans_num_items; 8441 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8442 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8443 struct inode *new_inode = d_inode(new_dentry); 8444 struct inode *old_inode = d_inode(old_dentry); 8445 struct btrfs_rename_ctx rename_ctx; 8446 u64 index = 0; 8447 int ret; 8448 int ret2; 8449 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8450 struct fscrypt_name old_fname, new_fname; 8451 bool logs_pinned = false; 8452 8453 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 8454 return -EPERM; 8455 8456 /* we only allow rename subvolume link between subvolumes */ 8457 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 8458 return -EXDEV; 8459 8460 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 8461 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 8462 return -ENOTEMPTY; 8463 8464 if (S_ISDIR(old_inode->i_mode) && new_inode && 8465 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 8466 return -ENOTEMPTY; 8467 8468 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8469 if (ret) 8470 return ret; 8471 8472 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8473 if (ret) { 8474 fscrypt_free_filename(&old_fname); 8475 return ret; 8476 } 8477 8478 /* check for collisions, even if the name isn't there */ 8479 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); 8480 if (ret) { 8481 if (ret == -EEXIST) { 8482 /* we shouldn't get 8483 * eexist without a new_inode */ 8484 if (WARN_ON(!new_inode)) { 8485 goto out_fscrypt_names; 8486 } 8487 } else { 8488 /* maybe -EOVERFLOW */ 8489 goto out_fscrypt_names; 8490 } 8491 } 8492 ret = 0; 8493 8494 /* 8495 * we're using rename to replace one file with another. Start IO on it 8496 * now so we don't add too much work to the end of the transaction 8497 */ 8498 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 8499 filemap_flush(old_inode->i_mapping); 8500 8501 if (flags & RENAME_WHITEOUT) { 8502 whiteout_args.inode = new_whiteout_inode(idmap, old_dir); 8503 if (!whiteout_args.inode) { 8504 ret = -ENOMEM; 8505 goto out_fscrypt_names; 8506 } 8507 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 8508 if (ret) 8509 goto out_whiteout_inode; 8510 } else { 8511 /* 1 to update the old parent inode. */ 8512 trans_num_items = 1; 8513 } 8514 8515 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8516 /* Close the race window with snapshot create/destroy ioctl */ 8517 down_read(&fs_info->subvol_sem); 8518 /* 8519 * 1 to remove old root ref 8520 * 1 to remove old root backref 8521 * 1 to add new root ref 8522 * 1 to add new root backref 8523 */ 8524 trans_num_items += 4; 8525 } else { 8526 /* 8527 * 1 to update inode 8528 * 1 to remove old inode ref 8529 * 1 to add new inode ref 8530 */ 8531 trans_num_items += 3; 8532 } 8533 /* 8534 * 1 to remove old dir item 8535 * 1 to remove old dir index 8536 * 1 to add new dir item 8537 * 1 to add new dir index 8538 */ 8539 trans_num_items += 4; 8540 /* 1 to update new parent inode if it's not the same as the old parent */ 8541 if (new_dir != old_dir) 8542 trans_num_items++; 8543 if (new_inode) { 8544 /* 8545 * 1 to update inode 8546 * 1 to remove inode ref 8547 * 1 to remove dir item 8548 * 1 to remove dir index 8549 * 1 to possibly add orphan item 8550 */ 8551 trans_num_items += 5; 8552 } 8553 trans = btrfs_start_transaction(root, trans_num_items); 8554 if (IS_ERR(trans)) { 8555 ret = PTR_ERR(trans); 8556 goto out_notrans; 8557 } 8558 8559 if (dest != root) { 8560 ret = btrfs_record_root_in_trans(trans, dest); 8561 if (ret) 8562 goto out_fail; 8563 } 8564 8565 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 8566 if (ret) 8567 goto out_fail; 8568 8569 BTRFS_I(old_inode)->dir_index = 0ULL; 8570 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 8571 /* force full log commit if subvolume involved. */ 8572 btrfs_set_log_full_commit(trans); 8573 } else { 8574 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, 8575 old_ino, btrfs_ino(BTRFS_I(new_dir)), 8576 index); 8577 if (ret) 8578 goto out_fail; 8579 } 8580 8581 inode_inc_iversion(old_dir); 8582 inode_inc_iversion(new_dir); 8583 inode_inc_iversion(old_inode); 8584 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 8585 8586 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 8587 /* 8588 * If we are renaming in the same directory (and it's not a 8589 * root entry) pin the log to prevent any concurrent task from 8590 * logging the directory after we removed the old entry and 8591 * before we add the new entry, otherwise that task can sync 8592 * a log without any entry for the inode we are renaming and 8593 * therefore replaying that log, if a power failure happens 8594 * after syncing the log, would result in deleting the inode. 8595 * 8596 * If the rename affects two different directories, we want to 8597 * make sure the that there's no log commit that contains 8598 * updates for only one of the directories but not for the 8599 * other. 8600 * 8601 * If we are renaming an entry for a root, we don't care about 8602 * log updates since we called btrfs_set_log_full_commit(). 8603 */ 8604 btrfs_pin_log_trans(root); 8605 btrfs_pin_log_trans(dest); 8606 logs_pinned = true; 8607 } 8608 8609 if (old_dentry->d_parent != new_dentry->d_parent) 8610 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8611 BTRFS_I(old_inode), true); 8612 8613 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 8614 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8615 if (unlikely(ret)) { 8616 btrfs_abort_transaction(trans, ret); 8617 goto out_fail; 8618 } 8619 } else { 8620 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8621 BTRFS_I(d_inode(old_dentry)), 8622 &old_fname.disk_name, &rename_ctx); 8623 if (unlikely(ret)) { 8624 btrfs_abort_transaction(trans, ret); 8625 goto out_fail; 8626 } 8627 ret = btrfs_update_inode(trans, BTRFS_I(old_inode)); 8628 if (unlikely(ret)) { 8629 btrfs_abort_transaction(trans, ret); 8630 goto out_fail; 8631 } 8632 } 8633 8634 if (new_inode) { 8635 inode_inc_iversion(new_inode); 8636 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 8637 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 8638 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8639 if (unlikely(ret)) { 8640 btrfs_abort_transaction(trans, ret); 8641 goto out_fail; 8642 } 8643 BUG_ON(new_inode->i_nlink == 0); 8644 } else { 8645 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8646 BTRFS_I(d_inode(new_dentry)), 8647 &new_fname.disk_name); 8648 if (unlikely(ret)) { 8649 btrfs_abort_transaction(trans, ret); 8650 goto out_fail; 8651 } 8652 } 8653 if (new_inode->i_nlink == 0) { 8654 ret = btrfs_orphan_add(trans, 8655 BTRFS_I(d_inode(new_dentry))); 8656 if (unlikely(ret)) { 8657 btrfs_abort_transaction(trans, ret); 8658 goto out_fail; 8659 } 8660 } 8661 } 8662 8663 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8664 &new_fname.disk_name, 0, index); 8665 if (unlikely(ret)) { 8666 btrfs_abort_transaction(trans, ret); 8667 goto out_fail; 8668 } 8669 8670 if (old_inode->i_nlink == 1) 8671 BTRFS_I(old_inode)->dir_index = index; 8672 8673 if (logs_pinned) 8674 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8675 rename_ctx.index, new_dentry->d_parent); 8676 8677 if (flags & RENAME_WHITEOUT) { 8678 ret = btrfs_create_new_inode(trans, &whiteout_args); 8679 if (unlikely(ret)) { 8680 btrfs_abort_transaction(trans, ret); 8681 goto out_fail; 8682 } else { 8683 unlock_new_inode(whiteout_args.inode); 8684 iput(whiteout_args.inode); 8685 whiteout_args.inode = NULL; 8686 } 8687 } 8688 out_fail: 8689 if (logs_pinned) { 8690 btrfs_end_log_trans(root); 8691 btrfs_end_log_trans(dest); 8692 } 8693 ret2 = btrfs_end_transaction(trans); 8694 ret = ret ? ret : ret2; 8695 out_notrans: 8696 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 8697 up_read(&fs_info->subvol_sem); 8698 if (flags & RENAME_WHITEOUT) 8699 btrfs_new_inode_args_destroy(&whiteout_args); 8700 out_whiteout_inode: 8701 if (flags & RENAME_WHITEOUT) 8702 iput(whiteout_args.inode); 8703 out_fscrypt_names: 8704 fscrypt_free_filename(&old_fname); 8705 fscrypt_free_filename(&new_fname); 8706 return ret; 8707 } 8708 8709 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir, 8710 struct dentry *old_dentry, struct inode *new_dir, 8711 struct dentry *new_dentry, unsigned int flags) 8712 { 8713 int ret; 8714 8715 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 8716 return -EINVAL; 8717 8718 if (flags & RENAME_EXCHANGE) 8719 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir, 8720 new_dentry); 8721 else 8722 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir, 8723 new_dentry, flags); 8724 8725 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); 8726 8727 return ret; 8728 } 8729 8730 struct btrfs_delalloc_work { 8731 struct inode *inode; 8732 struct completion completion; 8733 struct list_head list; 8734 struct btrfs_work work; 8735 }; 8736 8737 static void btrfs_run_delalloc_work(struct btrfs_work *work) 8738 { 8739 struct btrfs_delalloc_work *delalloc_work; 8740 struct inode *inode; 8741 8742 delalloc_work = container_of(work, struct btrfs_delalloc_work, 8743 work); 8744 inode = delalloc_work->inode; 8745 filemap_flush(inode->i_mapping); 8746 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 8747 &BTRFS_I(inode)->runtime_flags)) 8748 filemap_flush(inode->i_mapping); 8749 8750 iput(inode); 8751 complete(&delalloc_work->completion); 8752 } 8753 8754 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 8755 { 8756 struct btrfs_delalloc_work *work; 8757 8758 work = kmalloc(sizeof(*work), GFP_NOFS); 8759 if (!work) 8760 return NULL; 8761 8762 init_completion(&work->completion); 8763 INIT_LIST_HEAD(&work->list); 8764 work->inode = inode; 8765 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL); 8766 8767 return work; 8768 } 8769 8770 /* 8771 * some fairly slow code that needs optimization. This walks the list 8772 * of all the inodes with pending delalloc and forces them to disk. 8773 */ 8774 static int start_delalloc_inodes(struct btrfs_root *root, long *nr_to_write, 8775 bool snapshot, bool in_reclaim_context) 8776 { 8777 struct btrfs_delalloc_work *work, *next; 8778 LIST_HEAD(works); 8779 LIST_HEAD(splice); 8780 int ret = 0; 8781 8782 mutex_lock(&root->delalloc_mutex); 8783 spin_lock(&root->delalloc_lock); 8784 list_splice_init(&root->delalloc_inodes, &splice); 8785 while (!list_empty(&splice)) { 8786 struct btrfs_inode *inode; 8787 struct inode *tmp_inode; 8788 8789 inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes); 8790 8791 list_move_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 8792 8793 if (in_reclaim_context && 8794 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags)) 8795 continue; 8796 8797 tmp_inode = igrab(&inode->vfs_inode); 8798 if (!tmp_inode) { 8799 cond_resched_lock(&root->delalloc_lock); 8800 continue; 8801 } 8802 spin_unlock(&root->delalloc_lock); 8803 8804 if (snapshot) 8805 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &inode->runtime_flags); 8806 if (nr_to_write == NULL) { 8807 work = btrfs_alloc_delalloc_work(tmp_inode); 8808 if (!work) { 8809 iput(tmp_inode); 8810 ret = -ENOMEM; 8811 goto out; 8812 } 8813 list_add_tail(&work->list, &works); 8814 btrfs_queue_work(root->fs_info->flush_workers, 8815 &work->work); 8816 } else { 8817 ret = filemap_flush_nr(tmp_inode->i_mapping, 8818 nr_to_write); 8819 btrfs_add_delayed_iput(inode); 8820 8821 if (ret || *nr_to_write <= 0) 8822 goto out; 8823 } 8824 cond_resched(); 8825 spin_lock(&root->delalloc_lock); 8826 } 8827 spin_unlock(&root->delalloc_lock); 8828 8829 out: 8830 list_for_each_entry_safe(work, next, &works, list) { 8831 list_del_init(&work->list); 8832 wait_for_completion(&work->completion); 8833 kfree(work); 8834 } 8835 8836 if (!list_empty(&splice)) { 8837 spin_lock(&root->delalloc_lock); 8838 list_splice_tail(&splice, &root->delalloc_inodes); 8839 spin_unlock(&root->delalloc_lock); 8840 } 8841 mutex_unlock(&root->delalloc_mutex); 8842 return ret; 8843 } 8844 8845 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 8846 { 8847 struct btrfs_fs_info *fs_info = root->fs_info; 8848 8849 if (BTRFS_FS_ERROR(fs_info)) 8850 return -EROFS; 8851 return start_delalloc_inodes(root, NULL, true, in_reclaim_context); 8852 } 8853 8854 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 8855 bool in_reclaim_context) 8856 { 8857 long *nr_to_write = nr == LONG_MAX ? NULL : &nr; 8858 struct btrfs_root *root; 8859 LIST_HEAD(splice); 8860 int ret; 8861 8862 if (BTRFS_FS_ERROR(fs_info)) 8863 return -EROFS; 8864 8865 mutex_lock(&fs_info->delalloc_root_mutex); 8866 spin_lock(&fs_info->delalloc_root_lock); 8867 list_splice_init(&fs_info->delalloc_roots, &splice); 8868 while (!list_empty(&splice)) { 8869 root = list_first_entry(&splice, struct btrfs_root, 8870 delalloc_root); 8871 root = btrfs_grab_root(root); 8872 BUG_ON(!root); 8873 list_move_tail(&root->delalloc_root, 8874 &fs_info->delalloc_roots); 8875 spin_unlock(&fs_info->delalloc_root_lock); 8876 8877 ret = start_delalloc_inodes(root, nr_to_write, false, 8878 in_reclaim_context); 8879 btrfs_put_root(root); 8880 if (ret < 0 || nr <= 0) 8881 goto out; 8882 spin_lock(&fs_info->delalloc_root_lock); 8883 } 8884 spin_unlock(&fs_info->delalloc_root_lock); 8885 8886 ret = 0; 8887 out: 8888 if (!list_empty(&splice)) { 8889 spin_lock(&fs_info->delalloc_root_lock); 8890 list_splice_tail(&splice, &fs_info->delalloc_roots); 8891 spin_unlock(&fs_info->delalloc_root_lock); 8892 } 8893 mutex_unlock(&fs_info->delalloc_root_mutex); 8894 return ret; 8895 } 8896 8897 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, 8898 struct dentry *dentry, const char *symname) 8899 { 8900 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 8901 struct btrfs_trans_handle *trans; 8902 struct btrfs_root *root = BTRFS_I(dir)->root; 8903 struct btrfs_path *path; 8904 struct btrfs_key key; 8905 struct inode *inode; 8906 struct btrfs_new_inode_args new_inode_args = { 8907 .dir = dir, 8908 .dentry = dentry, 8909 }; 8910 unsigned int trans_num_items; 8911 int ret; 8912 int name_len; 8913 int datasize; 8914 unsigned long ptr; 8915 struct btrfs_file_extent_item *ei; 8916 struct extent_buffer *leaf; 8917 8918 name_len = strlen(symname); 8919 /* 8920 * Symlinks utilize uncompressed inline extent data, which should not 8921 * reach block size. 8922 */ 8923 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || 8924 name_len >= fs_info->sectorsize) 8925 return -ENAMETOOLONG; 8926 8927 inode = new_inode(dir->i_sb); 8928 if (!inode) 8929 return -ENOMEM; 8930 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO); 8931 inode->i_op = &btrfs_symlink_inode_operations; 8932 inode_nohighmem(inode); 8933 inode->i_mapping->a_ops = &btrfs_aops; 8934 btrfs_i_size_write(BTRFS_I(inode), name_len); 8935 inode_set_bytes(inode, name_len); 8936 8937 new_inode_args.inode = inode; 8938 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 8939 if (ret) 8940 goto out_inode; 8941 /* 1 additional item for the inline extent */ 8942 trans_num_items++; 8943 8944 trans = btrfs_start_transaction(root, trans_num_items); 8945 if (IS_ERR(trans)) { 8946 ret = PTR_ERR(trans); 8947 goto out_new_inode_args; 8948 } 8949 8950 ret = btrfs_create_new_inode(trans, &new_inode_args); 8951 if (ret) 8952 goto out; 8953 8954 path = btrfs_alloc_path(); 8955 if (unlikely(!path)) { 8956 ret = -ENOMEM; 8957 btrfs_abort_transaction(trans, ret); 8958 discard_new_inode(inode); 8959 inode = NULL; 8960 goto out; 8961 } 8962 key.objectid = btrfs_ino(BTRFS_I(inode)); 8963 key.type = BTRFS_EXTENT_DATA_KEY; 8964 key.offset = 0; 8965 datasize = btrfs_file_extent_calc_inline_size(name_len); 8966 ret = btrfs_insert_empty_item(trans, root, path, &key, datasize); 8967 if (unlikely(ret)) { 8968 btrfs_abort_transaction(trans, ret); 8969 btrfs_free_path(path); 8970 discard_new_inode(inode); 8971 inode = NULL; 8972 goto out; 8973 } 8974 leaf = path->nodes[0]; 8975 ei = btrfs_item_ptr(leaf, path->slots[0], 8976 struct btrfs_file_extent_item); 8977 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 8978 btrfs_set_file_extent_type(leaf, ei, 8979 BTRFS_FILE_EXTENT_INLINE); 8980 btrfs_set_file_extent_encryption(leaf, ei, 0); 8981 btrfs_set_file_extent_compression(leaf, ei, 0); 8982 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 8983 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 8984 8985 ptr = btrfs_file_extent_inline_start(ei); 8986 write_extent_buffer(leaf, symname, ptr, name_len); 8987 btrfs_free_path(path); 8988 8989 d_instantiate_new(dentry, inode); 8990 ret = 0; 8991 out: 8992 btrfs_end_transaction(trans); 8993 btrfs_btree_balance_dirty(fs_info); 8994 out_new_inode_args: 8995 btrfs_new_inode_args_destroy(&new_inode_args); 8996 out_inode: 8997 if (ret) 8998 iput(inode); 8999 return ret; 9000 } 9001 9002 static struct btrfs_trans_handle *insert_prealloc_file_extent( 9003 struct btrfs_trans_handle *trans_in, 9004 struct btrfs_inode *inode, 9005 struct btrfs_key *ins, 9006 u64 file_offset) 9007 { 9008 struct btrfs_file_extent_item stack_fi; 9009 struct btrfs_replace_extent_info extent_info; 9010 struct btrfs_trans_handle *trans = trans_in; 9011 struct btrfs_path *path; 9012 u64 start = ins->objectid; 9013 u64 len = ins->offset; 9014 u64 qgroup_released = 0; 9015 int ret; 9016 9017 memset(&stack_fi, 0, sizeof(stack_fi)); 9018 9019 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 9020 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 9021 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 9022 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 9023 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 9024 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9025 /* Encryption and other encoding is reserved and all 0 */ 9026 9027 ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released); 9028 if (ret < 0) 9029 return ERR_PTR(ret); 9030 9031 if (trans) { 9032 ret = insert_reserved_file_extent(trans, inode, 9033 file_offset, &stack_fi, 9034 true, qgroup_released); 9035 if (ret) 9036 goto free_qgroup; 9037 return trans; 9038 } 9039 9040 extent_info.disk_offset = start; 9041 extent_info.disk_len = len; 9042 extent_info.data_offset = 0; 9043 extent_info.data_len = len; 9044 extent_info.file_offset = file_offset; 9045 extent_info.extent_buf = (char *)&stack_fi; 9046 extent_info.is_new_extent = true; 9047 extent_info.update_times = true; 9048 extent_info.qgroup_reserved = qgroup_released; 9049 extent_info.insertions = 0; 9050 9051 path = btrfs_alloc_path(); 9052 if (!path) { 9053 ret = -ENOMEM; 9054 goto free_qgroup; 9055 } 9056 9057 ret = btrfs_replace_file_extents(inode, path, file_offset, 9058 file_offset + len - 1, &extent_info, 9059 &trans); 9060 btrfs_free_path(path); 9061 if (ret) 9062 goto free_qgroup; 9063 return trans; 9064 9065 free_qgroup: 9066 /* 9067 * We have released qgroup data range at the beginning of the function, 9068 * and normally qgroup_released bytes will be freed when committing 9069 * transaction. 9070 * But if we error out early, we have to free what we have released 9071 * or we leak qgroup data reservation. 9072 */ 9073 btrfs_qgroup_free_refroot(inode->root->fs_info, 9074 btrfs_root_id(inode->root), qgroup_released, 9075 BTRFS_QGROUP_RSV_DATA); 9076 return ERR_PTR(ret); 9077 } 9078 9079 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9080 u64 start, u64 num_bytes, u64 min_size, 9081 loff_t actual_len, u64 *alloc_hint, 9082 struct btrfs_trans_handle *trans) 9083 { 9084 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 9085 struct extent_map *em; 9086 struct btrfs_root *root = BTRFS_I(inode)->root; 9087 struct btrfs_key ins; 9088 u64 cur_offset = start; 9089 u64 clear_offset = start; 9090 u64 i_size; 9091 u64 cur_bytes; 9092 u64 last_alloc = (u64)-1; 9093 int ret = 0; 9094 bool own_trans = true; 9095 u64 end = start + num_bytes - 1; 9096 9097 if (trans) 9098 own_trans = false; 9099 while (num_bytes > 0) { 9100 cur_bytes = min_t(u64, num_bytes, SZ_256M); 9101 cur_bytes = max(cur_bytes, min_size); 9102 /* 9103 * If we are severely fragmented we could end up with really 9104 * small allocations, so if the allocator is returning small 9105 * chunks lets make its job easier by only searching for those 9106 * sized chunks. 9107 */ 9108 cur_bytes = min(cur_bytes, last_alloc); 9109 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 9110 min_size, 0, *alloc_hint, &ins, true, false); 9111 if (ret) 9112 break; 9113 9114 /* 9115 * We've reserved this space, and thus converted it from 9116 * ->bytes_may_use to ->bytes_reserved. Any error that happens 9117 * from here on out we will only need to clear our reservation 9118 * for the remaining unreserved area, so advance our 9119 * clear_offset by our extent size. 9120 */ 9121 clear_offset += ins.offset; 9122 9123 last_alloc = ins.offset; 9124 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 9125 &ins, cur_offset); 9126 /* 9127 * Now that we inserted the prealloc extent we can finally 9128 * decrement the number of reservations in the block group. 9129 * If we did it before, we could race with relocation and have 9130 * relocation miss the reserved extent, making it fail later. 9131 */ 9132 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9133 if (IS_ERR(trans)) { 9134 ret = PTR_ERR(trans); 9135 btrfs_free_reserved_extent(fs_info, ins.objectid, 9136 ins.offset, false); 9137 break; 9138 } 9139 9140 em = btrfs_alloc_extent_map(); 9141 if (!em) { 9142 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, 9143 cur_offset + ins.offset - 1, false); 9144 btrfs_set_inode_full_sync(BTRFS_I(inode)); 9145 goto next; 9146 } 9147 9148 em->start = cur_offset; 9149 em->len = ins.offset; 9150 em->disk_bytenr = ins.objectid; 9151 em->offset = 0; 9152 em->disk_num_bytes = ins.offset; 9153 em->ram_bytes = ins.offset; 9154 em->flags |= EXTENT_FLAG_PREALLOC; 9155 em->generation = trans->transid; 9156 9157 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); 9158 btrfs_free_extent_map(em); 9159 next: 9160 num_bytes -= ins.offset; 9161 cur_offset += ins.offset; 9162 *alloc_hint = ins.objectid + ins.offset; 9163 9164 inode_inc_iversion(inode); 9165 inode_set_ctime_current(inode); 9166 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9167 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9168 (actual_len > inode->i_size) && 9169 (cur_offset > inode->i_size)) { 9170 if (cur_offset > actual_len) 9171 i_size = actual_len; 9172 else 9173 i_size = cur_offset; 9174 i_size_write(inode, i_size); 9175 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 9176 } 9177 9178 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 9179 9180 if (unlikely(ret)) { 9181 btrfs_abort_transaction(trans, ret); 9182 if (own_trans) 9183 btrfs_end_transaction(trans); 9184 break; 9185 } 9186 9187 if (own_trans) { 9188 btrfs_end_transaction(trans); 9189 trans = NULL; 9190 } 9191 } 9192 if (clear_offset < end) 9193 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 9194 end - clear_offset + 1); 9195 return ret; 9196 } 9197 9198 int btrfs_prealloc_file_range(struct inode *inode, int mode, 9199 u64 start, u64 num_bytes, u64 min_size, 9200 loff_t actual_len, u64 *alloc_hint) 9201 { 9202 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9203 min_size, actual_len, alloc_hint, 9204 NULL); 9205 } 9206 9207 int btrfs_prealloc_file_range_trans(struct inode *inode, 9208 struct btrfs_trans_handle *trans, int mode, 9209 u64 start, u64 num_bytes, u64 min_size, 9210 loff_t actual_len, u64 *alloc_hint) 9211 { 9212 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9213 min_size, actual_len, alloc_hint, trans); 9214 } 9215 9216 /* 9217 * NOTE: in case you are adding MAY_EXEC check for directories: 9218 * we are marking them with IOP_FASTPERM_MAY_EXEC, allowing path lookup to 9219 * elide calls here. 9220 */ 9221 static int btrfs_permission(struct mnt_idmap *idmap, 9222 struct inode *inode, int mask) 9223 { 9224 struct btrfs_root *root = BTRFS_I(inode)->root; 9225 umode_t mode = inode->i_mode; 9226 9227 if (mask & MAY_WRITE && 9228 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9229 if (btrfs_root_readonly(root)) 9230 return -EROFS; 9231 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9232 return -EACCES; 9233 } 9234 return generic_permission(idmap, inode, mask); 9235 } 9236 9237 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 9238 struct file *file, umode_t mode) 9239 { 9240 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 9241 struct btrfs_trans_handle *trans; 9242 struct btrfs_root *root = BTRFS_I(dir)->root; 9243 struct inode *inode; 9244 struct btrfs_new_inode_args new_inode_args = { 9245 .dir = dir, 9246 .dentry = file->f_path.dentry, 9247 .orphan = true, 9248 }; 9249 unsigned int trans_num_items; 9250 int ret; 9251 9252 inode = new_inode(dir->i_sb); 9253 if (!inode) 9254 return -ENOMEM; 9255 inode_init_owner(idmap, inode, dir, mode); 9256 inode->i_fop = &btrfs_file_operations; 9257 inode->i_op = &btrfs_file_inode_operations; 9258 inode->i_mapping->a_ops = &btrfs_aops; 9259 9260 new_inode_args.inode = inode; 9261 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9262 if (ret) 9263 goto out_inode; 9264 9265 trans = btrfs_start_transaction(root, trans_num_items); 9266 if (IS_ERR(trans)) { 9267 ret = PTR_ERR(trans); 9268 goto out_new_inode_args; 9269 } 9270 9271 ret = btrfs_create_new_inode(trans, &new_inode_args); 9272 9273 /* 9274 * We set number of links to 0 in btrfs_create_new_inode(), and here we 9275 * set it to 1 because d_tmpfile() will issue a warning if the count is 9276 * 0, through: 9277 * 9278 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9279 */ 9280 set_nlink(inode, 1); 9281 9282 if (!ret) { 9283 d_tmpfile(file, inode); 9284 unlock_new_inode(inode); 9285 mark_inode_dirty(inode); 9286 } 9287 9288 btrfs_end_transaction(trans); 9289 btrfs_btree_balance_dirty(fs_info); 9290 out_new_inode_args: 9291 btrfs_new_inode_args_destroy(&new_inode_args); 9292 out_inode: 9293 if (ret) 9294 iput(inode); 9295 return finish_open_simple(file, ret); 9296 } 9297 9298 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 9299 int compress_type) 9300 { 9301 switch (compress_type) { 9302 case BTRFS_COMPRESS_NONE: 9303 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 9304 case BTRFS_COMPRESS_ZLIB: 9305 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 9306 case BTRFS_COMPRESS_LZO: 9307 /* 9308 * The LZO format depends on the sector size. 64K is the maximum 9309 * sector size that we support. 9310 */ 9311 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 9312 return -EINVAL; 9313 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 9314 (fs_info->sectorsize_bits - 12); 9315 case BTRFS_COMPRESS_ZSTD: 9316 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 9317 default: 9318 return -EUCLEAN; 9319 } 9320 } 9321 9322 static ssize_t btrfs_encoded_read_inline( 9323 struct kiocb *iocb, 9324 struct iov_iter *iter, u64 start, 9325 u64 lockend, 9326 struct extent_state **cached_state, 9327 u64 extent_start, size_t count, 9328 struct btrfs_ioctl_encoded_io_args *encoded, 9329 bool *unlocked) 9330 { 9331 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9332 struct btrfs_root *root = inode->root; 9333 struct btrfs_fs_info *fs_info = root->fs_info; 9334 struct extent_io_tree *io_tree = &inode->io_tree; 9335 BTRFS_PATH_AUTO_FREE(path); 9336 struct extent_buffer *leaf; 9337 struct btrfs_file_extent_item *item; 9338 u64 ram_bytes; 9339 unsigned long ptr; 9340 void *tmp; 9341 ssize_t ret; 9342 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); 9343 9344 path = btrfs_alloc_path(); 9345 if (!path) 9346 return -ENOMEM; 9347 9348 path->nowait = nowait; 9349 9350 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 9351 extent_start, 0); 9352 if (ret) { 9353 if (unlikely(ret > 0)) { 9354 /* The extent item disappeared? */ 9355 return -EIO; 9356 } 9357 return ret; 9358 } 9359 leaf = path->nodes[0]; 9360 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 9361 9362 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 9363 ptr = btrfs_file_extent_inline_start(item); 9364 9365 encoded->len = min_t(u64, extent_start + ram_bytes, 9366 inode->vfs_inode.i_size) - iocb->ki_pos; 9367 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9368 btrfs_file_extent_compression(leaf, item)); 9369 if (ret < 0) 9370 return ret; 9371 encoded->compression = ret; 9372 if (encoded->compression) { 9373 size_t inline_size; 9374 9375 inline_size = btrfs_file_extent_inline_item_len(leaf, 9376 path->slots[0]); 9377 if (inline_size > count) 9378 return -ENOBUFS; 9379 9380 count = inline_size; 9381 encoded->unencoded_len = ram_bytes; 9382 encoded->unencoded_offset = iocb->ki_pos - extent_start; 9383 } else { 9384 count = min_t(u64, count, encoded->len); 9385 encoded->len = count; 9386 encoded->unencoded_len = count; 9387 ptr += iocb->ki_pos - extent_start; 9388 } 9389 9390 tmp = kmalloc(count, GFP_NOFS); 9391 if (!tmp) 9392 return -ENOMEM; 9393 9394 read_extent_buffer(leaf, tmp, ptr, count); 9395 btrfs_release_path(path); 9396 btrfs_unlock_extent(io_tree, start, lockend, cached_state); 9397 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9398 *unlocked = true; 9399 9400 ret = copy_to_iter(tmp, count, iter); 9401 if (ret != count) 9402 ret = -EFAULT; 9403 kfree(tmp); 9404 9405 return ret; 9406 } 9407 9408 struct btrfs_encoded_read_private { 9409 struct completion *sync_reads; 9410 void *uring_ctx; 9411 refcount_t pending_refs; 9412 blk_status_t status; 9413 }; 9414 9415 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) 9416 { 9417 struct btrfs_encoded_read_private *priv = bbio->private; 9418 9419 if (bbio->bio.bi_status) { 9420 /* 9421 * The memory barrier implied by the refcount_dec_and_test() here 9422 * pairs with the memory barrier implied by the refcount_dec_and_test() 9423 * in btrfs_encoded_read_regular_fill_pages() to ensure that 9424 * this write is observed before the load of status in 9425 * btrfs_encoded_read_regular_fill_pages(). 9426 */ 9427 WRITE_ONCE(priv->status, bbio->bio.bi_status); 9428 } 9429 if (refcount_dec_and_test(&priv->pending_refs)) { 9430 int err = blk_status_to_errno(READ_ONCE(priv->status)); 9431 9432 if (priv->uring_ctx) { 9433 btrfs_uring_read_extent_endio(priv->uring_ctx, err); 9434 kfree(priv); 9435 } else { 9436 complete(priv->sync_reads); 9437 } 9438 } 9439 bio_put(&bbio->bio); 9440 } 9441 9442 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 9443 u64 disk_bytenr, u64 disk_io_size, 9444 struct page **pages, void *uring_ctx) 9445 { 9446 struct btrfs_encoded_read_private *priv, sync_priv; 9447 struct completion sync_reads; 9448 unsigned long i = 0; 9449 struct btrfs_bio *bbio; 9450 int ret; 9451 9452 /* 9453 * Fast path for synchronous reads which completes in this call, io_uring 9454 * needs longer time span. 9455 */ 9456 if (uring_ctx) { 9457 priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS); 9458 if (!priv) 9459 return -ENOMEM; 9460 } else { 9461 priv = &sync_priv; 9462 init_completion(&sync_reads); 9463 priv->sync_reads = &sync_reads; 9464 } 9465 9466 refcount_set(&priv->pending_refs, 1); 9467 priv->status = 0; 9468 priv->uring_ctx = uring_ctx; 9469 9470 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0, 9471 btrfs_encoded_read_endio, priv); 9472 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9473 9474 do { 9475 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); 9476 9477 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { 9478 refcount_inc(&priv->pending_refs); 9479 btrfs_submit_bbio(bbio, 0); 9480 9481 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0, 9482 btrfs_encoded_read_endio, priv); 9483 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9484 continue; 9485 } 9486 9487 i++; 9488 disk_bytenr += bytes; 9489 disk_io_size -= bytes; 9490 } while (disk_io_size); 9491 9492 refcount_inc(&priv->pending_refs); 9493 btrfs_submit_bbio(bbio, 0); 9494 9495 if (uring_ctx) { 9496 if (refcount_dec_and_test(&priv->pending_refs)) { 9497 ret = blk_status_to_errno(READ_ONCE(priv->status)); 9498 btrfs_uring_read_extent_endio(uring_ctx, ret); 9499 kfree(priv); 9500 return ret; 9501 } 9502 9503 return -EIOCBQUEUED; 9504 } else { 9505 if (!refcount_dec_and_test(&priv->pending_refs)) 9506 wait_for_completion_io(&sync_reads); 9507 /* See btrfs_encoded_read_endio() for ordering. */ 9508 return blk_status_to_errno(READ_ONCE(priv->status)); 9509 } 9510 } 9511 9512 ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter, 9513 u64 start, u64 lockend, 9514 struct extent_state **cached_state, 9515 u64 disk_bytenr, u64 disk_io_size, 9516 size_t count, bool compressed, bool *unlocked) 9517 { 9518 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9519 struct extent_io_tree *io_tree = &inode->io_tree; 9520 struct page **pages; 9521 unsigned long nr_pages, i; 9522 u64 cur; 9523 size_t page_offset; 9524 ssize_t ret; 9525 9526 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 9527 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 9528 if (!pages) 9529 return -ENOMEM; 9530 ret = btrfs_alloc_page_array(nr_pages, pages, false); 9531 if (ret) { 9532 ret = -ENOMEM; 9533 goto out; 9534 } 9535 9536 ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr, 9537 disk_io_size, pages, NULL); 9538 if (ret) 9539 goto out; 9540 9541 btrfs_unlock_extent(io_tree, start, lockend, cached_state); 9542 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9543 *unlocked = true; 9544 9545 if (compressed) { 9546 i = 0; 9547 page_offset = 0; 9548 } else { 9549 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 9550 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 9551 } 9552 cur = 0; 9553 while (cur < count) { 9554 size_t bytes = min_t(size_t, count - cur, 9555 PAGE_SIZE - page_offset); 9556 9557 if (copy_page_to_iter(pages[i], page_offset, bytes, 9558 iter) != bytes) { 9559 ret = -EFAULT; 9560 goto out; 9561 } 9562 i++; 9563 cur += bytes; 9564 page_offset = 0; 9565 } 9566 ret = count; 9567 out: 9568 for (i = 0; i < nr_pages; i++) { 9569 if (pages[i]) 9570 __free_page(pages[i]); 9571 } 9572 kfree(pages); 9573 return ret; 9574 } 9575 9576 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 9577 struct btrfs_ioctl_encoded_io_args *encoded, 9578 struct extent_state **cached_state, 9579 u64 *disk_bytenr, u64 *disk_io_size) 9580 { 9581 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9582 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9583 struct extent_io_tree *io_tree = &inode->io_tree; 9584 ssize_t ret; 9585 size_t count = iov_iter_count(iter); 9586 u64 start, lockend; 9587 struct extent_map *em; 9588 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); 9589 bool unlocked = false; 9590 9591 file_accessed(iocb->ki_filp); 9592 9593 ret = btrfs_inode_lock(inode, 9594 BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0)); 9595 if (ret) 9596 return ret; 9597 9598 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 9599 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9600 return 0; 9601 } 9602 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 9603 /* 9604 * We don't know how long the extent containing iocb->ki_pos is, but if 9605 * it's compressed we know that it won't be longer than this. 9606 */ 9607 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 9608 9609 if (nowait) { 9610 struct btrfs_ordered_extent *ordered; 9611 9612 if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping, 9613 start, lockend)) { 9614 ret = -EAGAIN; 9615 goto out_unlock_inode; 9616 } 9617 9618 if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) { 9619 ret = -EAGAIN; 9620 goto out_unlock_inode; 9621 } 9622 9623 ordered = btrfs_lookup_ordered_range(inode, start, 9624 lockend - start + 1); 9625 if (ordered) { 9626 btrfs_put_ordered_extent(ordered); 9627 btrfs_unlock_extent(io_tree, start, lockend, cached_state); 9628 ret = -EAGAIN; 9629 goto out_unlock_inode; 9630 } 9631 } else { 9632 for (;;) { 9633 struct btrfs_ordered_extent *ordered; 9634 9635 ret = btrfs_wait_ordered_range(inode, start, 9636 lockend - start + 1); 9637 if (ret) 9638 goto out_unlock_inode; 9639 9640 btrfs_lock_extent(io_tree, start, lockend, cached_state); 9641 ordered = btrfs_lookup_ordered_range(inode, start, 9642 lockend - start + 1); 9643 if (!ordered) 9644 break; 9645 btrfs_put_ordered_extent(ordered); 9646 btrfs_unlock_extent(io_tree, start, lockend, cached_state); 9647 cond_resched(); 9648 } 9649 } 9650 9651 em = btrfs_get_extent(inode, NULL, start, lockend - start + 1); 9652 if (IS_ERR(em)) { 9653 ret = PTR_ERR(em); 9654 goto out_unlock_extent; 9655 } 9656 9657 if (em->disk_bytenr == EXTENT_MAP_INLINE) { 9658 u64 extent_start = em->start; 9659 9660 /* 9661 * For inline extents we get everything we need out of the 9662 * extent item. 9663 */ 9664 btrfs_free_extent_map(em); 9665 em = NULL; 9666 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 9667 cached_state, extent_start, 9668 count, encoded, &unlocked); 9669 goto out_unlock_extent; 9670 } 9671 9672 /* 9673 * We only want to return up to EOF even if the extent extends beyond 9674 * that. 9675 */ 9676 encoded->len = min_t(u64, btrfs_extent_map_end(em), 9677 inode->vfs_inode.i_size) - iocb->ki_pos; 9678 if (em->disk_bytenr == EXTENT_MAP_HOLE || 9679 (em->flags & EXTENT_FLAG_PREALLOC)) { 9680 *disk_bytenr = EXTENT_MAP_HOLE; 9681 count = min_t(u64, count, encoded->len); 9682 encoded->len = count; 9683 encoded->unencoded_len = count; 9684 } else if (btrfs_extent_map_is_compressed(em)) { 9685 *disk_bytenr = em->disk_bytenr; 9686 /* 9687 * Bail if the buffer isn't large enough to return the whole 9688 * compressed extent. 9689 */ 9690 if (em->disk_num_bytes > count) { 9691 ret = -ENOBUFS; 9692 goto out_em; 9693 } 9694 *disk_io_size = em->disk_num_bytes; 9695 count = em->disk_num_bytes; 9696 encoded->unencoded_len = em->ram_bytes; 9697 encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset); 9698 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9699 btrfs_extent_map_compression(em)); 9700 if (ret < 0) 9701 goto out_em; 9702 encoded->compression = ret; 9703 } else { 9704 *disk_bytenr = btrfs_extent_map_block_start(em) + (start - em->start); 9705 if (encoded->len > count) 9706 encoded->len = count; 9707 /* 9708 * Don't read beyond what we locked. This also limits the page 9709 * allocations that we'll do. 9710 */ 9711 *disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 9712 count = start + *disk_io_size - iocb->ki_pos; 9713 encoded->len = count; 9714 encoded->unencoded_len = count; 9715 *disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize); 9716 } 9717 btrfs_free_extent_map(em); 9718 em = NULL; 9719 9720 if (*disk_bytenr == EXTENT_MAP_HOLE) { 9721 btrfs_unlock_extent(io_tree, start, lockend, cached_state); 9722 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9723 unlocked = true; 9724 ret = iov_iter_zero(count, iter); 9725 if (ret != count) 9726 ret = -EFAULT; 9727 } else { 9728 ret = -EIOCBQUEUED; 9729 goto out_unlock_extent; 9730 } 9731 9732 out_em: 9733 btrfs_free_extent_map(em); 9734 out_unlock_extent: 9735 /* Leave inode and extent locked if we need to do a read. */ 9736 if (!unlocked && ret != -EIOCBQUEUED) 9737 btrfs_unlock_extent(io_tree, start, lockend, cached_state); 9738 out_unlock_inode: 9739 if (!unlocked && ret != -EIOCBQUEUED) 9740 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9741 return ret; 9742 } 9743 9744 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 9745 const struct btrfs_ioctl_encoded_io_args *encoded) 9746 { 9747 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9748 struct btrfs_root *root = inode->root; 9749 struct btrfs_fs_info *fs_info = root->fs_info; 9750 struct extent_io_tree *io_tree = &inode->io_tree; 9751 struct extent_changeset *data_reserved = NULL; 9752 struct extent_state *cached_state = NULL; 9753 struct btrfs_ordered_extent *ordered; 9754 struct btrfs_file_extent file_extent; 9755 int compression; 9756 size_t orig_count; 9757 u64 start, end; 9758 u64 num_bytes, ram_bytes, disk_num_bytes; 9759 unsigned long nr_folios, i; 9760 struct folio **folios; 9761 struct btrfs_key ins; 9762 bool extent_reserved = false; 9763 struct extent_map *em; 9764 ssize_t ret; 9765 9766 switch (encoded->compression) { 9767 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 9768 compression = BTRFS_COMPRESS_ZLIB; 9769 break; 9770 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 9771 compression = BTRFS_COMPRESS_ZSTD; 9772 break; 9773 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 9774 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 9775 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 9776 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 9777 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 9778 /* The sector size must match for LZO. */ 9779 if (encoded->compression - 9780 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 9781 fs_info->sectorsize_bits) 9782 return -EINVAL; 9783 compression = BTRFS_COMPRESS_LZO; 9784 break; 9785 default: 9786 return -EINVAL; 9787 } 9788 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 9789 return -EINVAL; 9790 9791 /* 9792 * Compressed extents should always have checksums, so error out if we 9793 * have a NOCOW file or inode was created while mounted with NODATASUM. 9794 */ 9795 if (inode->flags & BTRFS_INODE_NODATASUM) 9796 return -EINVAL; 9797 9798 orig_count = iov_iter_count(from); 9799 9800 /* The extent size must be sane. */ 9801 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 9802 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 9803 return -EINVAL; 9804 9805 /* 9806 * The compressed data must be smaller than the decompressed data. 9807 * 9808 * It's of course possible for data to compress to larger or the same 9809 * size, but the buffered I/O path falls back to no compression for such 9810 * data, and we don't want to break any assumptions by creating these 9811 * extents. 9812 * 9813 * Note that this is less strict than the current check we have that the 9814 * compressed data must be at least one sector smaller than the 9815 * decompressed data. We only want to enforce the weaker requirement 9816 * from old kernels that it is at least one byte smaller. 9817 */ 9818 if (orig_count >= encoded->unencoded_len) 9819 return -EINVAL; 9820 9821 /* The extent must start on a sector boundary. */ 9822 start = iocb->ki_pos; 9823 if (!IS_ALIGNED(start, fs_info->sectorsize)) 9824 return -EINVAL; 9825 9826 /* 9827 * The extent must end on a sector boundary. However, we allow a write 9828 * which ends at or extends i_size to have an unaligned length; we round 9829 * up the extent size and set i_size to the unaligned end. 9830 */ 9831 if (start + encoded->len < inode->vfs_inode.i_size && 9832 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 9833 return -EINVAL; 9834 9835 /* Finally, the offset in the unencoded data must be sector-aligned. */ 9836 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 9837 return -EINVAL; 9838 9839 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 9840 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 9841 end = start + num_bytes - 1; 9842 9843 /* 9844 * If the extent cannot be inline, the compressed data on disk must be 9845 * sector-aligned. For convenience, we extend it with zeroes if it 9846 * isn't. 9847 */ 9848 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 9849 nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 9850 folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT); 9851 if (!folios) 9852 return -ENOMEM; 9853 for (i = 0; i < nr_folios; i++) { 9854 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 9855 char *kaddr; 9856 9857 folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0); 9858 if (!folios[i]) { 9859 ret = -ENOMEM; 9860 goto out_folios; 9861 } 9862 kaddr = kmap_local_folio(folios[i], 0); 9863 if (copy_from_iter(kaddr, bytes, from) != bytes) { 9864 kunmap_local(kaddr); 9865 ret = -EFAULT; 9866 goto out_folios; 9867 } 9868 if (bytes < PAGE_SIZE) 9869 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 9870 kunmap_local(kaddr); 9871 } 9872 9873 for (;;) { 9874 ret = btrfs_wait_ordered_range(inode, start, num_bytes); 9875 if (ret) 9876 goto out_folios; 9877 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 9878 start >> PAGE_SHIFT, 9879 end >> PAGE_SHIFT); 9880 if (ret) 9881 goto out_folios; 9882 btrfs_lock_extent(io_tree, start, end, &cached_state); 9883 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 9884 if (!ordered && 9885 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 9886 break; 9887 if (ordered) 9888 btrfs_put_ordered_extent(ordered); 9889 btrfs_unlock_extent(io_tree, start, end, &cached_state); 9890 cond_resched(); 9891 } 9892 9893 /* 9894 * We don't use the higher-level delalloc space functions because our 9895 * num_bytes and disk_num_bytes are different. 9896 */ 9897 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 9898 if (ret) 9899 goto out_unlock; 9900 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 9901 if (ret) 9902 goto out_free_data_space; 9903 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 9904 false); 9905 if (ret) 9906 goto out_qgroup_free_data; 9907 9908 /* Try an inline extent first. */ 9909 if (encoded->unencoded_len == encoded->len && 9910 encoded->unencoded_offset == 0 && 9911 can_cow_file_range_inline(inode, start, encoded->len, orig_count)) { 9912 ret = __cow_file_range_inline(inode, encoded->len, 9913 orig_count, compression, folios[0], 9914 true); 9915 if (ret <= 0) { 9916 if (ret == 0) 9917 ret = orig_count; 9918 goto out_delalloc_release; 9919 } 9920 } 9921 9922 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 9923 disk_num_bytes, 0, 0, &ins, true, true); 9924 if (ret) 9925 goto out_delalloc_release; 9926 extent_reserved = true; 9927 9928 file_extent.disk_bytenr = ins.objectid; 9929 file_extent.disk_num_bytes = ins.offset; 9930 file_extent.num_bytes = num_bytes; 9931 file_extent.ram_bytes = ram_bytes; 9932 file_extent.offset = encoded->unencoded_offset; 9933 file_extent.compression = compression; 9934 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED); 9935 if (IS_ERR(em)) { 9936 ret = PTR_ERR(em); 9937 goto out_free_reserved; 9938 } 9939 btrfs_free_extent_map(em); 9940 9941 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, 9942 (1U << BTRFS_ORDERED_ENCODED) | 9943 (1U << BTRFS_ORDERED_COMPRESSED)); 9944 if (IS_ERR(ordered)) { 9945 btrfs_drop_extent_map_range(inode, start, end, false); 9946 ret = PTR_ERR(ordered); 9947 goto out_free_reserved; 9948 } 9949 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9950 9951 if (start + encoded->len > inode->vfs_inode.i_size) 9952 i_size_write(&inode->vfs_inode, start + encoded->len); 9953 9954 btrfs_unlock_extent(io_tree, start, end, &cached_state); 9955 9956 btrfs_delalloc_release_extents(inode, num_bytes); 9957 9958 btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false); 9959 ret = orig_count; 9960 goto out; 9961 9962 out_free_reserved: 9963 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9964 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true); 9965 out_delalloc_release: 9966 btrfs_delalloc_release_extents(inode, num_bytes); 9967 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 9968 out_qgroup_free_data: 9969 if (ret < 0) 9970 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL); 9971 out_free_data_space: 9972 /* 9973 * If btrfs_reserve_extent() succeeded, then we already decremented 9974 * bytes_may_use. 9975 */ 9976 if (!extent_reserved) 9977 btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes); 9978 out_unlock: 9979 btrfs_unlock_extent(io_tree, start, end, &cached_state); 9980 out_folios: 9981 for (i = 0; i < nr_folios; i++) { 9982 if (folios[i]) 9983 folio_put(folios[i]); 9984 } 9985 kvfree(folios); 9986 out: 9987 if (ret >= 0) 9988 iocb->ki_pos += encoded->len; 9989 return ret; 9990 } 9991 9992 #ifdef CONFIG_SWAP 9993 /* 9994 * Add an entry indicating a block group or device which is pinned by a 9995 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 9996 * negative errno on failure. 9997 */ 9998 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 9999 bool is_block_group) 10000 { 10001 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10002 struct btrfs_swapfile_pin *sp, *entry; 10003 struct rb_node **p; 10004 struct rb_node *parent = NULL; 10005 10006 sp = kmalloc(sizeof(*sp), GFP_NOFS); 10007 if (!sp) 10008 return -ENOMEM; 10009 sp->ptr = ptr; 10010 sp->inode = inode; 10011 sp->is_block_group = is_block_group; 10012 sp->bg_extent_count = 1; 10013 10014 spin_lock(&fs_info->swapfile_pins_lock); 10015 p = &fs_info->swapfile_pins.rb_node; 10016 while (*p) { 10017 parent = *p; 10018 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 10019 if (sp->ptr < entry->ptr || 10020 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 10021 p = &(*p)->rb_left; 10022 } else if (sp->ptr > entry->ptr || 10023 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 10024 p = &(*p)->rb_right; 10025 } else { 10026 if (is_block_group) 10027 entry->bg_extent_count++; 10028 spin_unlock(&fs_info->swapfile_pins_lock); 10029 kfree(sp); 10030 return 1; 10031 } 10032 } 10033 rb_link_node(&sp->node, parent, p); 10034 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 10035 spin_unlock(&fs_info->swapfile_pins_lock); 10036 return 0; 10037 } 10038 10039 /* Free all of the entries pinned by this swapfile. */ 10040 static void btrfs_free_swapfile_pins(struct inode *inode) 10041 { 10042 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10043 struct btrfs_swapfile_pin *sp; 10044 struct rb_node *node, *next; 10045 10046 spin_lock(&fs_info->swapfile_pins_lock); 10047 node = rb_first(&fs_info->swapfile_pins); 10048 while (node) { 10049 next = rb_next(node); 10050 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 10051 if (sp->inode == inode) { 10052 rb_erase(&sp->node, &fs_info->swapfile_pins); 10053 if (sp->is_block_group) { 10054 btrfs_dec_block_group_swap_extents(sp->ptr, 10055 sp->bg_extent_count); 10056 btrfs_put_block_group(sp->ptr); 10057 } 10058 kfree(sp); 10059 } 10060 node = next; 10061 } 10062 spin_unlock(&fs_info->swapfile_pins_lock); 10063 } 10064 10065 struct btrfs_swap_info { 10066 u64 start; 10067 u64 block_start; 10068 u64 block_len; 10069 u64 lowest_ppage; 10070 u64 highest_ppage; 10071 unsigned long nr_pages; 10072 int nr_extents; 10073 }; 10074 10075 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 10076 struct btrfs_swap_info *bsi) 10077 { 10078 unsigned long nr_pages; 10079 unsigned long max_pages; 10080 u64 first_ppage, first_ppage_reported, next_ppage; 10081 int ret; 10082 10083 /* 10084 * Our swapfile may have had its size extended after the swap header was 10085 * written. In that case activating the swapfile should not go beyond 10086 * the max size set in the swap header. 10087 */ 10088 if (bsi->nr_pages >= sis->max) 10089 return 0; 10090 10091 max_pages = sis->max - bsi->nr_pages; 10092 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; 10093 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; 10094 10095 if (first_ppage >= next_ppage) 10096 return 0; 10097 nr_pages = next_ppage - first_ppage; 10098 nr_pages = min(nr_pages, max_pages); 10099 10100 first_ppage_reported = first_ppage; 10101 if (bsi->start == 0) 10102 first_ppage_reported++; 10103 if (bsi->lowest_ppage > first_ppage_reported) 10104 bsi->lowest_ppage = first_ppage_reported; 10105 if (bsi->highest_ppage < (next_ppage - 1)) 10106 bsi->highest_ppage = next_ppage - 1; 10107 10108 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 10109 if (ret < 0) 10110 return ret; 10111 bsi->nr_extents += ret; 10112 bsi->nr_pages += nr_pages; 10113 return 0; 10114 } 10115 10116 static void btrfs_swap_deactivate(struct file *file) 10117 { 10118 struct inode *inode = file_inode(file); 10119 10120 btrfs_free_swapfile_pins(inode); 10121 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 10122 } 10123 10124 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10125 sector_t *span) 10126 { 10127 struct inode *inode = file_inode(file); 10128 struct btrfs_root *root = BTRFS_I(inode)->root; 10129 struct btrfs_fs_info *fs_info = root->fs_info; 10130 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 10131 struct extent_state *cached_state = NULL; 10132 struct btrfs_chunk_map *map = NULL; 10133 struct btrfs_device *device = NULL; 10134 struct btrfs_swap_info bsi = { 10135 .lowest_ppage = (sector_t)-1ULL, 10136 }; 10137 struct btrfs_backref_share_check_ctx *backref_ctx = NULL; 10138 struct btrfs_path *path = NULL; 10139 int ret = 0; 10140 u64 isize; 10141 u64 prev_extent_end = 0; 10142 10143 /* 10144 * Acquire the inode's mmap lock to prevent races with memory mapped 10145 * writes, as they could happen after we flush delalloc below and before 10146 * we lock the extent range further below. The inode was already locked 10147 * up in the call chain. 10148 */ 10149 btrfs_assert_inode_locked(BTRFS_I(inode)); 10150 down_write(&BTRFS_I(inode)->i_mmap_lock); 10151 10152 /* 10153 * If the swap file was just created, make sure delalloc is done. If the 10154 * file changes again after this, the user is doing something stupid and 10155 * we don't really care. 10156 */ 10157 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); 10158 if (ret) 10159 goto out_unlock_mmap; 10160 10161 /* 10162 * The inode is locked, so these flags won't change after we check them. 10163 */ 10164 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 10165 btrfs_warn(fs_info, "swapfile must not be compressed"); 10166 ret = -EINVAL; 10167 goto out_unlock_mmap; 10168 } 10169 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 10170 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 10171 ret = -EINVAL; 10172 goto out_unlock_mmap; 10173 } 10174 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 10175 btrfs_warn(fs_info, "swapfile must not be checksummed"); 10176 ret = -EINVAL; 10177 goto out_unlock_mmap; 10178 } 10179 10180 path = btrfs_alloc_path(); 10181 backref_ctx = btrfs_alloc_backref_share_check_ctx(); 10182 if (!path || !backref_ctx) { 10183 ret = -ENOMEM; 10184 goto out_unlock_mmap; 10185 } 10186 10187 /* 10188 * Balance or device remove/replace/resize can move stuff around from 10189 * under us. The exclop protection makes sure they aren't running/won't 10190 * run concurrently while we are mapping the swap extents, and 10191 * fs_info->swapfile_pins prevents them from running while the swap 10192 * file is active and moving the extents. Note that this also prevents 10193 * a concurrent device add which isn't actually necessary, but it's not 10194 * really worth the trouble to allow it. 10195 */ 10196 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 10197 btrfs_warn(fs_info, 10198 "cannot activate swapfile while exclusive operation is running"); 10199 ret = -EBUSY; 10200 goto out_unlock_mmap; 10201 } 10202 10203 /* 10204 * Prevent snapshot creation while we are activating the swap file. 10205 * We do not want to race with snapshot creation. If snapshot creation 10206 * already started before we bumped nr_swapfiles from 0 to 1 and 10207 * completes before the first write into the swap file after it is 10208 * activated, than that write would fallback to COW. 10209 */ 10210 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 10211 btrfs_exclop_finish(fs_info); 10212 btrfs_warn(fs_info, 10213 "cannot activate swapfile because snapshot creation is in progress"); 10214 ret = -EINVAL; 10215 goto out_unlock_mmap; 10216 } 10217 /* 10218 * Snapshots can create extents which require COW even if NODATACOW is 10219 * set. We use this counter to prevent snapshots. We must increment it 10220 * before walking the extents because we don't want a concurrent 10221 * snapshot to run after we've already checked the extents. 10222 * 10223 * It is possible that subvolume is marked for deletion but still not 10224 * removed yet. To prevent this race, we check the root status before 10225 * activating the swapfile. 10226 */ 10227 spin_lock(&root->root_item_lock); 10228 if (btrfs_root_dead(root)) { 10229 spin_unlock(&root->root_item_lock); 10230 10231 btrfs_drew_write_unlock(&root->snapshot_lock); 10232 btrfs_exclop_finish(fs_info); 10233 btrfs_warn(fs_info, 10234 "cannot activate swapfile because subvolume %llu is being deleted", 10235 btrfs_root_id(root)); 10236 ret = -EPERM; 10237 goto out_unlock_mmap; 10238 } 10239 atomic_inc(&root->nr_swapfiles); 10240 spin_unlock(&root->root_item_lock); 10241 10242 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 10243 10244 btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state); 10245 while (prev_extent_end < isize) { 10246 struct btrfs_key key; 10247 struct extent_buffer *leaf; 10248 struct btrfs_file_extent_item *ei; 10249 struct btrfs_block_group *bg; 10250 u64 logical_block_start; 10251 u64 physical_block_start; 10252 u64 extent_gen; 10253 u64 disk_bytenr; 10254 u64 len; 10255 10256 key.objectid = btrfs_ino(BTRFS_I(inode)); 10257 key.type = BTRFS_EXTENT_DATA_KEY; 10258 key.offset = prev_extent_end; 10259 10260 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 10261 if (ret < 0) 10262 goto out; 10263 10264 /* 10265 * If key not found it means we have an implicit hole (NO_HOLES 10266 * is enabled). 10267 */ 10268 if (ret > 0) { 10269 btrfs_warn(fs_info, "swapfile must not have holes"); 10270 ret = -EINVAL; 10271 goto out; 10272 } 10273 10274 leaf = path->nodes[0]; 10275 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 10276 10277 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) { 10278 /* 10279 * It's unlikely we'll ever actually find ourselves 10280 * here, as a file small enough to fit inline won't be 10281 * big enough to store more than the swap header, but in 10282 * case something changes in the future, let's catch it 10283 * here rather than later. 10284 */ 10285 btrfs_warn(fs_info, "swapfile must not be inline"); 10286 ret = -EINVAL; 10287 goto out; 10288 } 10289 10290 if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) { 10291 btrfs_warn(fs_info, "swapfile must not be compressed"); 10292 ret = -EINVAL; 10293 goto out; 10294 } 10295 10296 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei); 10297 if (disk_bytenr == 0) { 10298 btrfs_warn(fs_info, "swapfile must not have holes"); 10299 ret = -EINVAL; 10300 goto out; 10301 } 10302 10303 logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei); 10304 extent_gen = btrfs_file_extent_generation(leaf, ei); 10305 prev_extent_end = btrfs_file_extent_end(path); 10306 10307 if (prev_extent_end > isize) 10308 len = isize - key.offset; 10309 else 10310 len = btrfs_file_extent_num_bytes(leaf, ei); 10311 10312 backref_ctx->curr_leaf_bytenr = leaf->start; 10313 10314 /* 10315 * Don't need the path anymore, release to avoid deadlocks when 10316 * calling btrfs_is_data_extent_shared() because when joining a 10317 * transaction it can block waiting for the current one's commit 10318 * which in turn may be trying to lock the same leaf to flush 10319 * delayed items for example. 10320 */ 10321 btrfs_release_path(path); 10322 10323 ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr, 10324 extent_gen, backref_ctx); 10325 if (ret < 0) { 10326 goto out; 10327 } else if (ret > 0) { 10328 btrfs_warn(fs_info, 10329 "swapfile must not be copy-on-write"); 10330 ret = -EINVAL; 10331 goto out; 10332 } 10333 10334 map = btrfs_get_chunk_map(fs_info, logical_block_start, len); 10335 if (IS_ERR(map)) { 10336 ret = PTR_ERR(map); 10337 goto out; 10338 } 10339 10340 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 10341 btrfs_warn(fs_info, 10342 "swapfile must have single data profile"); 10343 ret = -EINVAL; 10344 goto out; 10345 } 10346 10347 if (device == NULL) { 10348 device = map->stripes[0].dev; 10349 ret = btrfs_add_swapfile_pin(inode, device, false); 10350 if (ret == 1) 10351 ret = 0; 10352 else if (ret) 10353 goto out; 10354 } else if (device != map->stripes[0].dev) { 10355 btrfs_warn(fs_info, "swapfile must be on one device"); 10356 ret = -EINVAL; 10357 goto out; 10358 } 10359 10360 physical_block_start = (map->stripes[0].physical + 10361 (logical_block_start - map->start)); 10362 btrfs_free_chunk_map(map); 10363 map = NULL; 10364 10365 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 10366 if (!bg) { 10367 btrfs_warn(fs_info, 10368 "could not find block group containing swapfile"); 10369 ret = -EINVAL; 10370 goto out; 10371 } 10372 10373 if (!btrfs_inc_block_group_swap_extents(bg)) { 10374 btrfs_warn(fs_info, 10375 "block group for swapfile at %llu is read-only%s", 10376 bg->start, 10377 atomic_read(&fs_info->scrubs_running) ? 10378 " (scrub running)" : ""); 10379 btrfs_put_block_group(bg); 10380 ret = -EINVAL; 10381 goto out; 10382 } 10383 10384 ret = btrfs_add_swapfile_pin(inode, bg, true); 10385 if (ret) { 10386 btrfs_put_block_group(bg); 10387 if (ret == 1) 10388 ret = 0; 10389 else 10390 goto out; 10391 } 10392 10393 if (bsi.block_len && 10394 bsi.block_start + bsi.block_len == physical_block_start) { 10395 bsi.block_len += len; 10396 } else { 10397 if (bsi.block_len) { 10398 ret = btrfs_add_swap_extent(sis, &bsi); 10399 if (ret) 10400 goto out; 10401 } 10402 bsi.start = key.offset; 10403 bsi.block_start = physical_block_start; 10404 bsi.block_len = len; 10405 } 10406 10407 if (fatal_signal_pending(current)) { 10408 ret = -EINTR; 10409 goto out; 10410 } 10411 10412 cond_resched(); 10413 } 10414 10415 if (bsi.block_len) 10416 ret = btrfs_add_swap_extent(sis, &bsi); 10417 10418 out: 10419 if (!IS_ERR_OR_NULL(map)) 10420 btrfs_free_chunk_map(map); 10421 10422 btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state); 10423 10424 if (ret) 10425 btrfs_swap_deactivate(file); 10426 10427 btrfs_drew_write_unlock(&root->snapshot_lock); 10428 10429 btrfs_exclop_finish(fs_info); 10430 10431 out_unlock_mmap: 10432 up_write(&BTRFS_I(inode)->i_mmap_lock); 10433 btrfs_free_backref_share_ctx(backref_ctx); 10434 btrfs_free_path(path); 10435 if (ret) 10436 return ret; 10437 10438 if (device) 10439 sis->bdev = device->bdev; 10440 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10441 sis->max = bsi.nr_pages; 10442 sis->pages = bsi.nr_pages - 1; 10443 return bsi.nr_extents; 10444 } 10445 #else 10446 static void btrfs_swap_deactivate(struct file *file) 10447 { 10448 } 10449 10450 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10451 sector_t *span) 10452 { 10453 return -EOPNOTSUPP; 10454 } 10455 #endif 10456 10457 /* 10458 * Update the number of bytes used in the VFS' inode. When we replace extents in 10459 * a range (clone, dedupe, fallocate's zero range), we must update the number of 10460 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 10461 * always get a correct value. 10462 */ 10463 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 10464 const u64 add_bytes, 10465 const u64 del_bytes) 10466 { 10467 if (add_bytes == del_bytes) 10468 return; 10469 10470 spin_lock(&inode->lock); 10471 if (del_bytes > 0) 10472 inode_sub_bytes(&inode->vfs_inode, del_bytes); 10473 if (add_bytes > 0) 10474 inode_add_bytes(&inode->vfs_inode, add_bytes); 10475 spin_unlock(&inode->lock); 10476 } 10477 10478 /* 10479 * Verify that there are no ordered extents for a given file range. 10480 * 10481 * @inode: The target inode. 10482 * @start: Start offset of the file range, should be sector size aligned. 10483 * @end: End offset (inclusive) of the file range, its value +1 should be 10484 * sector size aligned. 10485 * 10486 * This should typically be used for cases where we locked an inode's VFS lock in 10487 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 10488 * we have flushed all delalloc in the range, we have waited for all ordered 10489 * extents in the range to complete and finally we have locked the file range in 10490 * the inode's io_tree. 10491 */ 10492 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 10493 { 10494 struct btrfs_root *root = inode->root; 10495 struct btrfs_ordered_extent *ordered; 10496 10497 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 10498 return; 10499 10500 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 10501 if (ordered) { 10502 btrfs_err(root->fs_info, 10503 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 10504 start, end, btrfs_ino(inode), btrfs_root_id(root), 10505 ordered->file_offset, 10506 ordered->file_offset + ordered->num_bytes - 1); 10507 btrfs_put_ordered_extent(ordered); 10508 } 10509 10510 ASSERT(ordered == NULL); 10511 } 10512 10513 /* 10514 * Find the first inode with a minimum number. 10515 * 10516 * @root: The root to search for. 10517 * @min_ino: The minimum inode number. 10518 * 10519 * Find the first inode in the @root with a number >= @min_ino and return it. 10520 * Returns NULL if no such inode found. 10521 */ 10522 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino) 10523 { 10524 struct btrfs_inode *inode; 10525 unsigned long from = min_ino; 10526 10527 xa_lock(&root->inodes); 10528 while (true) { 10529 inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT); 10530 if (!inode) 10531 break; 10532 if (igrab(&inode->vfs_inode)) 10533 break; 10534 10535 from = btrfs_ino(inode) + 1; 10536 cond_resched_lock(&root->inodes.xa_lock); 10537 } 10538 xa_unlock(&root->inodes); 10539 10540 return inode; 10541 } 10542 10543 static const struct inode_operations btrfs_dir_inode_operations = { 10544 .getattr = btrfs_getattr, 10545 .lookup = btrfs_lookup, 10546 .create = btrfs_create, 10547 .unlink = btrfs_unlink, 10548 .link = btrfs_link, 10549 .mkdir = btrfs_mkdir, 10550 .rmdir = btrfs_rmdir, 10551 .rename = btrfs_rename2, 10552 .symlink = btrfs_symlink, 10553 .setattr = btrfs_setattr, 10554 .mknod = btrfs_mknod, 10555 .listxattr = btrfs_listxattr, 10556 .permission = btrfs_permission, 10557 .get_inode_acl = btrfs_get_acl, 10558 .set_acl = btrfs_set_acl, 10559 .update_time = btrfs_update_time, 10560 .tmpfile = btrfs_tmpfile, 10561 .fileattr_get = btrfs_fileattr_get, 10562 .fileattr_set = btrfs_fileattr_set, 10563 }; 10564 10565 static const struct file_operations btrfs_dir_file_operations = { 10566 .llseek = btrfs_dir_llseek, 10567 .read = generic_read_dir, 10568 .iterate_shared = btrfs_real_readdir, 10569 .open = btrfs_opendir, 10570 .unlocked_ioctl = btrfs_ioctl, 10571 #ifdef CONFIG_COMPAT 10572 .compat_ioctl = btrfs_compat_ioctl, 10573 #endif 10574 .release = btrfs_release_file, 10575 .fsync = btrfs_sync_file, 10576 }; 10577 10578 /* 10579 * btrfs doesn't support the bmap operation because swapfiles 10580 * use bmap to make a mapping of extents in the file. They assume 10581 * these extents won't change over the life of the file and they 10582 * use the bmap result to do IO directly to the drive. 10583 * 10584 * the btrfs bmap call would return logical addresses that aren't 10585 * suitable for IO and they also will change frequently as COW 10586 * operations happen. So, swapfile + btrfs == corruption. 10587 * 10588 * For now we're avoiding this by dropping bmap. 10589 */ 10590 static const struct address_space_operations btrfs_aops = { 10591 .read_folio = btrfs_read_folio, 10592 .writepages = btrfs_writepages, 10593 .readahead = btrfs_readahead, 10594 .invalidate_folio = btrfs_invalidate_folio, 10595 .launder_folio = btrfs_launder_folio, 10596 .release_folio = btrfs_release_folio, 10597 .migrate_folio = btrfs_migrate_folio, 10598 .dirty_folio = filemap_dirty_folio, 10599 .error_remove_folio = generic_error_remove_folio, 10600 .swap_activate = btrfs_swap_activate, 10601 .swap_deactivate = btrfs_swap_deactivate, 10602 }; 10603 10604 static const struct inode_operations btrfs_file_inode_operations = { 10605 .getattr = btrfs_getattr, 10606 .setattr = btrfs_setattr, 10607 .listxattr = btrfs_listxattr, 10608 .permission = btrfs_permission, 10609 .fiemap = btrfs_fiemap, 10610 .get_inode_acl = btrfs_get_acl, 10611 .set_acl = btrfs_set_acl, 10612 .update_time = btrfs_update_time, 10613 .fileattr_get = btrfs_fileattr_get, 10614 .fileattr_set = btrfs_fileattr_set, 10615 }; 10616 static const struct inode_operations btrfs_special_inode_operations = { 10617 .getattr = btrfs_getattr, 10618 .setattr = btrfs_setattr, 10619 .permission = btrfs_permission, 10620 .listxattr = btrfs_listxattr, 10621 .get_inode_acl = btrfs_get_acl, 10622 .set_acl = btrfs_set_acl, 10623 .update_time = btrfs_update_time, 10624 }; 10625 static const struct inode_operations btrfs_symlink_inode_operations = { 10626 .get_link = page_get_link, 10627 .getattr = btrfs_getattr, 10628 .setattr = btrfs_setattr, 10629 .permission = btrfs_permission, 10630 .listxattr = btrfs_listxattr, 10631 .update_time = btrfs_update_time, 10632 }; 10633 10634 const struct dentry_operations btrfs_dentry_operations = { 10635 .d_delete = btrfs_dentry_delete, 10636 }; 10637