1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 #include <linux/time.h> 15 #include <linux/init.h> 16 #include <linux/string.h> 17 #include <linux/backing-dev.h> 18 #include <linux/writeback.h> 19 #include <linux/compat.h> 20 #include <linux/xattr.h> 21 #include <linux/posix_acl.h> 22 #include <linux/falloc.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/btrfs.h> 26 #include <linux/blkdev.h> 27 #include <linux/posix_acl_xattr.h> 28 #include <linux/uio.h> 29 #include <linux/magic.h> 30 #include <linux/iversion.h> 31 #include <linux/swap.h> 32 #include <linux/migrate.h> 33 #include <linux/sched/mm.h> 34 #include <linux/iomap.h> 35 #include <linux/unaligned.h> 36 #include <linux/fsverity.h> 37 #include "misc.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "ordered-data.h" 43 #include "xattr.h" 44 #include "tree-log.h" 45 #include "bio.h" 46 #include "compression.h" 47 #include "locking.h" 48 #include "props.h" 49 #include "qgroup.h" 50 #include "delalloc-space.h" 51 #include "block-group.h" 52 #include "space-info.h" 53 #include "zoned.h" 54 #include "subpage.h" 55 #include "inode-item.h" 56 #include "fs.h" 57 #include "accessors.h" 58 #include "extent-tree.h" 59 #include "root-tree.h" 60 #include "defrag.h" 61 #include "dir-item.h" 62 #include "file-item.h" 63 #include "uuid-tree.h" 64 #include "ioctl.h" 65 #include "file.h" 66 #include "acl.h" 67 #include "relocation.h" 68 #include "verity.h" 69 #include "super.h" 70 #include "orphan.h" 71 #include "backref.h" 72 #include "raid-stripe-tree.h" 73 #include "fiemap.h" 74 75 struct btrfs_iget_args { 76 u64 ino; 77 struct btrfs_root *root; 78 }; 79 80 struct btrfs_rename_ctx { 81 /* Output field. Stores the index number of the old directory entry. */ 82 u64 index; 83 }; 84 85 /* 86 * Used by data_reloc_print_warning_inode() to pass needed info for filename 87 * resolution and output of error message. 88 */ 89 struct data_reloc_warn { 90 struct btrfs_path path; 91 struct btrfs_fs_info *fs_info; 92 u64 extent_item_size; 93 u64 logical; 94 int mirror_num; 95 }; 96 97 /* 98 * For the file_extent_tree, we want to hold the inode lock when we lookup and 99 * update the disk_i_size, but lockdep will complain because our io_tree we hold 100 * the tree lock and get the inode lock when setting delalloc. These two things 101 * are unrelated, so make a class for the file_extent_tree so we don't get the 102 * two locking patterns mixed up. 103 */ 104 static struct lock_class_key file_extent_tree_class; 105 106 static const struct inode_operations btrfs_dir_inode_operations; 107 static const struct inode_operations btrfs_symlink_inode_operations; 108 static const struct inode_operations btrfs_special_inode_operations; 109 static const struct inode_operations btrfs_file_inode_operations; 110 static const struct address_space_operations btrfs_aops; 111 static const struct file_operations btrfs_dir_file_operations; 112 113 static struct kmem_cache *btrfs_inode_cachep; 114 115 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 116 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback); 117 118 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 119 struct folio *locked_folio, u64 start, 120 u64 end, struct writeback_control *wbc, 121 bool pages_dirty); 122 123 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, 124 u64 root, void *warn_ctx) 125 { 126 struct data_reloc_warn *warn = warn_ctx; 127 struct btrfs_fs_info *fs_info = warn->fs_info; 128 struct extent_buffer *eb; 129 struct btrfs_inode_item *inode_item; 130 struct inode_fs_paths *ipath = NULL; 131 struct btrfs_root *local_root; 132 struct btrfs_key key; 133 unsigned int nofs_flag; 134 u32 nlink; 135 int ret; 136 137 local_root = btrfs_get_fs_root(fs_info, root, true); 138 if (IS_ERR(local_root)) { 139 ret = PTR_ERR(local_root); 140 goto err; 141 } 142 143 /* This makes the path point to (inum INODE_ITEM ioff). */ 144 key.objectid = inum; 145 key.type = BTRFS_INODE_ITEM_KEY; 146 key.offset = 0; 147 148 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0); 149 if (ret) { 150 btrfs_put_root(local_root); 151 btrfs_release_path(&warn->path); 152 goto err; 153 } 154 155 eb = warn->path.nodes[0]; 156 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item); 157 nlink = btrfs_inode_nlink(eb, inode_item); 158 btrfs_release_path(&warn->path); 159 160 nofs_flag = memalloc_nofs_save(); 161 ipath = init_ipath(4096, local_root, &warn->path); 162 memalloc_nofs_restore(nofs_flag); 163 if (IS_ERR(ipath)) { 164 btrfs_put_root(local_root); 165 ret = PTR_ERR(ipath); 166 ipath = NULL; 167 /* 168 * -ENOMEM, not a critical error, just output an generic error 169 * without filename. 170 */ 171 btrfs_warn(fs_info, 172 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu", 173 warn->logical, warn->mirror_num, root, inum, offset); 174 return ret; 175 } 176 ret = paths_from_inode(inum, ipath); 177 if (ret < 0) 178 goto err; 179 180 /* 181 * We deliberately ignore the bit ipath might have been too small to 182 * hold all of the paths here 183 */ 184 for (int i = 0; i < ipath->fspath->elem_cnt; i++) { 185 btrfs_warn(fs_info, 186 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)", 187 warn->logical, warn->mirror_num, root, inum, offset, 188 fs_info->sectorsize, nlink, 189 (char *)(unsigned long)ipath->fspath->val[i]); 190 } 191 192 btrfs_put_root(local_root); 193 free_ipath(ipath); 194 return 0; 195 196 err: 197 btrfs_warn(fs_info, 198 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d", 199 warn->logical, warn->mirror_num, root, inum, offset, ret); 200 201 free_ipath(ipath); 202 return ret; 203 } 204 205 /* 206 * Do extra user-friendly error output (e.g. lookup all the affected files). 207 * 208 * Return true if we succeeded doing the backref lookup. 209 * Return false if such lookup failed, and has to fallback to the old error message. 210 */ 211 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off, 212 const u8 *csum, const u8 *csum_expected, 213 int mirror_num) 214 { 215 struct btrfs_fs_info *fs_info = inode->root->fs_info; 216 struct btrfs_path path = { 0 }; 217 struct btrfs_key found_key = { 0 }; 218 struct extent_buffer *eb; 219 struct btrfs_extent_item *ei; 220 const u32 csum_size = fs_info->csum_size; 221 u64 logical; 222 u64 flags; 223 u32 item_size; 224 int ret; 225 226 mutex_lock(&fs_info->reloc_mutex); 227 logical = btrfs_get_reloc_bg_bytenr(fs_info); 228 mutex_unlock(&fs_info->reloc_mutex); 229 230 if (logical == U64_MAX) { 231 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation"); 232 btrfs_warn_rl(fs_info, 233 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 234 btrfs_root_id(inode->root), btrfs_ino(inode), file_off, 235 CSUM_FMT_VALUE(csum_size, csum), 236 CSUM_FMT_VALUE(csum_size, csum_expected), 237 mirror_num); 238 return; 239 } 240 241 logical += file_off; 242 btrfs_warn_rl(fs_info, 243 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 244 btrfs_root_id(inode->root), 245 btrfs_ino(inode), file_off, logical, 246 CSUM_FMT_VALUE(csum_size, csum), 247 CSUM_FMT_VALUE(csum_size, csum_expected), 248 mirror_num); 249 250 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags); 251 if (ret < 0) { 252 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d", 253 logical, ret); 254 return; 255 } 256 eb = path.nodes[0]; 257 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item); 258 item_size = btrfs_item_size(eb, path.slots[0]); 259 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 260 unsigned long ptr = 0; 261 u64 ref_root; 262 u8 ref_level; 263 264 while (true) { 265 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 266 item_size, &ref_root, 267 &ref_level); 268 if (ret < 0) { 269 btrfs_warn_rl(fs_info, 270 "failed to resolve tree backref for logical %llu: %d", 271 logical, ret); 272 break; 273 } 274 if (ret > 0) 275 break; 276 277 btrfs_warn_rl(fs_info, 278 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu", 279 logical, mirror_num, 280 (ref_level ? "node" : "leaf"), 281 ref_level, ref_root); 282 } 283 btrfs_release_path(&path); 284 } else { 285 struct btrfs_backref_walk_ctx ctx = { 0 }; 286 struct data_reloc_warn reloc_warn = { 0 }; 287 288 btrfs_release_path(&path); 289 290 ctx.bytenr = found_key.objectid; 291 ctx.extent_item_pos = logical - found_key.objectid; 292 ctx.fs_info = fs_info; 293 294 reloc_warn.logical = logical; 295 reloc_warn.extent_item_size = found_key.offset; 296 reloc_warn.mirror_num = mirror_num; 297 reloc_warn.fs_info = fs_info; 298 299 iterate_extent_inodes(&ctx, true, 300 data_reloc_print_warning_inode, &reloc_warn); 301 } 302 } 303 304 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, 305 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) 306 { 307 struct btrfs_root *root = inode->root; 308 const u32 csum_size = root->fs_info->csum_size; 309 310 /* For data reloc tree, it's better to do a backref lookup instead. */ 311 if (btrfs_root_id(root) == BTRFS_DATA_RELOC_TREE_OBJECTID) 312 return print_data_reloc_error(inode, logical_start, csum, 313 csum_expected, mirror_num); 314 315 /* Output without objectid, which is more meaningful */ 316 if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) { 317 btrfs_warn_rl(root->fs_info, 318 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 319 btrfs_root_id(root), btrfs_ino(inode), 320 logical_start, 321 CSUM_FMT_VALUE(csum_size, csum), 322 CSUM_FMT_VALUE(csum_size, csum_expected), 323 mirror_num); 324 } else { 325 btrfs_warn_rl(root->fs_info, 326 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 327 btrfs_root_id(root), btrfs_ino(inode), 328 logical_start, 329 CSUM_FMT_VALUE(csum_size, csum), 330 CSUM_FMT_VALUE(csum_size, csum_expected), 331 mirror_num); 332 } 333 } 334 335 /* 336 * Lock inode i_rwsem based on arguments passed. 337 * 338 * ilock_flags can have the following bit set: 339 * 340 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 341 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 342 * return -EAGAIN 343 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 344 */ 345 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) 346 { 347 if (ilock_flags & BTRFS_ILOCK_SHARED) { 348 if (ilock_flags & BTRFS_ILOCK_TRY) { 349 if (!inode_trylock_shared(&inode->vfs_inode)) 350 return -EAGAIN; 351 else 352 return 0; 353 } 354 inode_lock_shared(&inode->vfs_inode); 355 } else { 356 if (ilock_flags & BTRFS_ILOCK_TRY) { 357 if (!inode_trylock(&inode->vfs_inode)) 358 return -EAGAIN; 359 else 360 return 0; 361 } 362 inode_lock(&inode->vfs_inode); 363 } 364 if (ilock_flags & BTRFS_ILOCK_MMAP) 365 down_write(&inode->i_mmap_lock); 366 return 0; 367 } 368 369 /* 370 * Unock inode i_rwsem. 371 * 372 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 373 * to decide whether the lock acquired is shared or exclusive. 374 */ 375 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) 376 { 377 if (ilock_flags & BTRFS_ILOCK_MMAP) 378 up_write(&inode->i_mmap_lock); 379 if (ilock_flags & BTRFS_ILOCK_SHARED) 380 inode_unlock_shared(&inode->vfs_inode); 381 else 382 inode_unlock(&inode->vfs_inode); 383 } 384 385 /* 386 * Cleanup all submitted ordered extents in specified range to handle errors 387 * from the btrfs_run_delalloc_range() callback. 388 * 389 * NOTE: caller must ensure that when an error happens, it can not call 390 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 391 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 392 * to be released, which we want to happen only when finishing the ordered 393 * extent (btrfs_finish_ordered_io()). 394 */ 395 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 396 struct folio *locked_folio, 397 u64 offset, u64 bytes) 398 { 399 unsigned long index = offset >> PAGE_SHIFT; 400 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 401 u64 page_start = 0, page_end = 0; 402 struct folio *folio; 403 404 if (locked_folio) { 405 page_start = folio_pos(locked_folio); 406 page_end = page_start + folio_size(locked_folio) - 1; 407 } 408 409 while (index <= end_index) { 410 /* 411 * For locked page, we will call btrfs_mark_ordered_io_finished 412 * through btrfs_mark_ordered_io_finished() on it 413 * in run_delalloc_range() for the error handling, which will 414 * clear page Ordered and run the ordered extent accounting. 415 * 416 * Here we can't just clear the Ordered bit, or 417 * btrfs_mark_ordered_io_finished() would skip the accounting 418 * for the page range, and the ordered extent will never finish. 419 */ 420 if (locked_folio && index == (page_start >> PAGE_SHIFT)) { 421 index++; 422 continue; 423 } 424 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index); 425 index++; 426 if (IS_ERR(folio)) 427 continue; 428 429 /* 430 * Here we just clear all Ordered bits for every page in the 431 * range, then btrfs_mark_ordered_io_finished() will handle 432 * the ordered extent accounting for the range. 433 */ 434 btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio, 435 offset, bytes); 436 folio_put(folio); 437 } 438 439 if (locked_folio) { 440 /* The locked page covers the full range, nothing needs to be done */ 441 if (bytes + offset <= page_start + folio_size(locked_folio)) 442 return; 443 /* 444 * In case this page belongs to the delalloc range being 445 * instantiated then skip it, since the first page of a range is 446 * going to be properly cleaned up by the caller of 447 * run_delalloc_range 448 */ 449 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 450 bytes = offset + bytes - folio_pos(locked_folio) - 451 folio_size(locked_folio); 452 offset = folio_pos(locked_folio) + folio_size(locked_folio); 453 } 454 } 455 456 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false); 457 } 458 459 static int btrfs_dirty_inode(struct btrfs_inode *inode); 460 461 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 462 struct btrfs_new_inode_args *args) 463 { 464 int err; 465 466 if (args->default_acl) { 467 err = __btrfs_set_acl(trans, args->inode, args->default_acl, 468 ACL_TYPE_DEFAULT); 469 if (err) 470 return err; 471 } 472 if (args->acl) { 473 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 474 if (err) 475 return err; 476 } 477 if (!args->default_acl && !args->acl) 478 cache_no_acl(args->inode); 479 return btrfs_xattr_security_init(trans, args->inode, args->dir, 480 &args->dentry->d_name); 481 } 482 483 /* 484 * this does all the hard work for inserting an inline extent into 485 * the btree. The caller should have done a btrfs_drop_extents so that 486 * no overlapping inline items exist in the btree 487 */ 488 static int insert_inline_extent(struct btrfs_trans_handle *trans, 489 struct btrfs_path *path, 490 struct btrfs_inode *inode, bool extent_inserted, 491 size_t size, size_t compressed_size, 492 int compress_type, 493 struct folio *compressed_folio, 494 bool update_i_size) 495 { 496 struct btrfs_root *root = inode->root; 497 struct extent_buffer *leaf; 498 const u32 sectorsize = trans->fs_info->sectorsize; 499 char *kaddr; 500 unsigned long ptr; 501 struct btrfs_file_extent_item *ei; 502 int ret; 503 size_t cur_size = size; 504 u64 i_size; 505 506 /* 507 * The decompressed size must still be no larger than a sector. Under 508 * heavy race, we can have size == 0 passed in, but that shouldn't be a 509 * big deal and we can continue the insertion. 510 */ 511 ASSERT(size <= sectorsize); 512 513 /* 514 * The compressed size also needs to be no larger than a sector. 515 * That's also why we only need one page as the parameter. 516 */ 517 if (compressed_folio) 518 ASSERT(compressed_size <= sectorsize); 519 else 520 ASSERT(compressed_size == 0); 521 522 if (compressed_size && compressed_folio) 523 cur_size = compressed_size; 524 525 if (!extent_inserted) { 526 struct btrfs_key key; 527 size_t datasize; 528 529 key.objectid = btrfs_ino(inode); 530 key.offset = 0; 531 key.type = BTRFS_EXTENT_DATA_KEY; 532 533 datasize = btrfs_file_extent_calc_inline_size(cur_size); 534 ret = btrfs_insert_empty_item(trans, root, path, &key, 535 datasize); 536 if (ret) 537 goto fail; 538 } 539 leaf = path->nodes[0]; 540 ei = btrfs_item_ptr(leaf, path->slots[0], 541 struct btrfs_file_extent_item); 542 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 543 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 544 btrfs_set_file_extent_encryption(leaf, ei, 0); 545 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 546 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 547 ptr = btrfs_file_extent_inline_start(ei); 548 549 if (compress_type != BTRFS_COMPRESS_NONE) { 550 kaddr = kmap_local_folio(compressed_folio, 0); 551 write_extent_buffer(leaf, kaddr, ptr, compressed_size); 552 kunmap_local(kaddr); 553 554 btrfs_set_file_extent_compression(leaf, ei, 555 compress_type); 556 } else { 557 struct folio *folio; 558 559 folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0); 560 ASSERT(!IS_ERR(folio)); 561 btrfs_set_file_extent_compression(leaf, ei, 0); 562 kaddr = kmap_local_folio(folio, 0); 563 write_extent_buffer(leaf, kaddr, ptr, size); 564 kunmap_local(kaddr); 565 folio_put(folio); 566 } 567 btrfs_mark_buffer_dirty(trans, leaf); 568 btrfs_release_path(path); 569 570 /* 571 * We align size to sectorsize for inline extents just for simplicity 572 * sake. 573 */ 574 ret = btrfs_inode_set_file_extent_range(inode, 0, 575 ALIGN(size, root->fs_info->sectorsize)); 576 if (ret) 577 goto fail; 578 579 /* 580 * We're an inline extent, so nobody can extend the file past i_size 581 * without locking a page we already have locked. 582 * 583 * We must do any i_size and inode updates before we unlock the pages. 584 * Otherwise we could end up racing with unlink. 585 */ 586 i_size = i_size_read(&inode->vfs_inode); 587 if (update_i_size && size > i_size) { 588 i_size_write(&inode->vfs_inode, size); 589 i_size = size; 590 } 591 inode->disk_i_size = i_size; 592 593 fail: 594 return ret; 595 } 596 597 static bool can_cow_file_range_inline(struct btrfs_inode *inode, 598 u64 offset, u64 size, 599 size_t compressed_size) 600 { 601 struct btrfs_fs_info *fs_info = inode->root->fs_info; 602 u64 data_len = (compressed_size ?: size); 603 604 /* Inline extents must start at offset 0. */ 605 if (offset != 0) 606 return false; 607 608 /* 609 * Due to the page size limit, for subpage we can only trigger the 610 * writeback for the dirty sectors of page, that means data writeback 611 * is doing more writeback than what we want. 612 * 613 * This is especially unexpected for some call sites like fallocate, 614 * where we only increase i_size after everything is done. 615 * This means we can trigger inline extent even if we didn't want to. 616 * So here we skip inline extent creation completely. 617 */ 618 if (fs_info->sectorsize != PAGE_SIZE) 619 return false; 620 621 /* Inline extents are limited to sectorsize. */ 622 if (size > fs_info->sectorsize) 623 return false; 624 625 /* We cannot exceed the maximum inline data size. */ 626 if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 627 return false; 628 629 /* We cannot exceed the user specified max_inline size. */ 630 if (data_len > fs_info->max_inline) 631 return false; 632 633 /* Inline extents must be the entirety of the file. */ 634 if (size < i_size_read(&inode->vfs_inode)) 635 return false; 636 637 return true; 638 } 639 640 /* 641 * conditionally insert an inline extent into the file. This 642 * does the checks required to make sure the data is small enough 643 * to fit as an inline extent. 644 * 645 * If being used directly, you must have already checked we're allowed to cow 646 * the range by getting true from can_cow_file_range_inline(). 647 */ 648 static noinline int __cow_file_range_inline(struct btrfs_inode *inode, 649 u64 size, size_t compressed_size, 650 int compress_type, 651 struct folio *compressed_folio, 652 bool update_i_size) 653 { 654 struct btrfs_drop_extents_args drop_args = { 0 }; 655 struct btrfs_root *root = inode->root; 656 struct btrfs_fs_info *fs_info = root->fs_info; 657 struct btrfs_trans_handle *trans; 658 u64 data_len = (compressed_size ?: size); 659 int ret; 660 struct btrfs_path *path; 661 662 path = btrfs_alloc_path(); 663 if (!path) 664 return -ENOMEM; 665 666 trans = btrfs_join_transaction(root); 667 if (IS_ERR(trans)) { 668 btrfs_free_path(path); 669 return PTR_ERR(trans); 670 } 671 trans->block_rsv = &inode->block_rsv; 672 673 drop_args.path = path; 674 drop_args.start = 0; 675 drop_args.end = fs_info->sectorsize; 676 drop_args.drop_cache = true; 677 drop_args.replace_extent = true; 678 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 679 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 680 if (ret) { 681 btrfs_abort_transaction(trans, ret); 682 goto out; 683 } 684 685 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 686 size, compressed_size, compress_type, 687 compressed_folio, update_i_size); 688 if (ret && ret != -ENOSPC) { 689 btrfs_abort_transaction(trans, ret); 690 goto out; 691 } else if (ret == -ENOSPC) { 692 ret = 1; 693 goto out; 694 } 695 696 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 697 ret = btrfs_update_inode(trans, inode); 698 if (ret && ret != -ENOSPC) { 699 btrfs_abort_transaction(trans, ret); 700 goto out; 701 } else if (ret == -ENOSPC) { 702 ret = 1; 703 goto out; 704 } 705 706 btrfs_set_inode_full_sync(inode); 707 out: 708 /* 709 * Don't forget to free the reserved space, as for inlined extent 710 * it won't count as data extent, free them directly here. 711 * And at reserve time, it's always aligned to page size, so 712 * just free one page here. 713 */ 714 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL); 715 btrfs_free_path(path); 716 btrfs_end_transaction(trans); 717 return ret; 718 } 719 720 static noinline int cow_file_range_inline(struct btrfs_inode *inode, 721 struct folio *locked_folio, 722 u64 offset, u64 end, 723 size_t compressed_size, 724 int compress_type, 725 struct folio *compressed_folio, 726 bool update_i_size) 727 { 728 struct extent_state *cached = NULL; 729 unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 730 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED; 731 u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1); 732 int ret; 733 734 if (!can_cow_file_range_inline(inode, offset, size, compressed_size)) 735 return 1; 736 737 lock_extent(&inode->io_tree, offset, end, &cached); 738 ret = __cow_file_range_inline(inode, size, compressed_size, 739 compress_type, compressed_folio, 740 update_i_size); 741 if (ret > 0) { 742 unlock_extent(&inode->io_tree, offset, end, &cached); 743 return ret; 744 } 745 746 /* 747 * In the successful case (ret == 0 here), cow_file_range will return 1. 748 * 749 * Quite a bit further up the callstack in extent_writepage(), ret == 1 750 * is treated as a short circuited success and does not unlock the folio, 751 * so we must do it here. 752 * 753 * In the failure case, the locked_folio does get unlocked by 754 * btrfs_folio_end_all_writers, which asserts that it is still locked 755 * at that point, so we must *not* unlock it here. 756 * 757 * The other two callsites in compress_file_range do not have a 758 * locked_folio, so they are not relevant to this logic. 759 */ 760 if (ret == 0) 761 locked_folio = NULL; 762 763 extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached, 764 clear_flags, PAGE_UNLOCK | 765 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); 766 return ret; 767 } 768 769 struct async_extent { 770 u64 start; 771 u64 ram_size; 772 u64 compressed_size; 773 struct folio **folios; 774 unsigned long nr_folios; 775 int compress_type; 776 struct list_head list; 777 }; 778 779 struct async_chunk { 780 struct btrfs_inode *inode; 781 struct folio *locked_folio; 782 u64 start; 783 u64 end; 784 blk_opf_t write_flags; 785 struct list_head extents; 786 struct cgroup_subsys_state *blkcg_css; 787 struct btrfs_work work; 788 struct async_cow *async_cow; 789 }; 790 791 struct async_cow { 792 atomic_t num_chunks; 793 struct async_chunk chunks[]; 794 }; 795 796 static noinline int add_async_extent(struct async_chunk *cow, 797 u64 start, u64 ram_size, 798 u64 compressed_size, 799 struct folio **folios, 800 unsigned long nr_folios, 801 int compress_type) 802 { 803 struct async_extent *async_extent; 804 805 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 806 if (!async_extent) 807 return -ENOMEM; 808 async_extent->start = start; 809 async_extent->ram_size = ram_size; 810 async_extent->compressed_size = compressed_size; 811 async_extent->folios = folios; 812 async_extent->nr_folios = nr_folios; 813 async_extent->compress_type = compress_type; 814 list_add_tail(&async_extent->list, &cow->extents); 815 return 0; 816 } 817 818 /* 819 * Check if the inode needs to be submitted to compression, based on mount 820 * options, defragmentation, properties or heuristics. 821 */ 822 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 823 u64 end) 824 { 825 struct btrfs_fs_info *fs_info = inode->root->fs_info; 826 827 if (!btrfs_inode_can_compress(inode)) { 828 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 829 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 830 btrfs_ino(inode)); 831 return 0; 832 } 833 /* 834 * Only enable sector perfect compression for experimental builds. 835 * 836 * This is a big feature change for subpage cases, and can hit 837 * different corner cases, so only limit this feature for 838 * experimental build for now. 839 * 840 * ETA for moving this out of experimental builds is 6.15. 841 */ 842 if (fs_info->sectorsize < PAGE_SIZE && 843 !IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) { 844 if (!PAGE_ALIGNED(start) || 845 !PAGE_ALIGNED(end + 1)) 846 return 0; 847 } 848 849 /* force compress */ 850 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 851 return 1; 852 /* defrag ioctl */ 853 if (inode->defrag_compress) 854 return 1; 855 /* bad compression ratios */ 856 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 857 return 0; 858 if (btrfs_test_opt(fs_info, COMPRESS) || 859 inode->flags & BTRFS_INODE_COMPRESS || 860 inode->prop_compress) 861 return btrfs_compress_heuristic(inode, start, end); 862 return 0; 863 } 864 865 static inline void inode_should_defrag(struct btrfs_inode *inode, 866 u64 start, u64 end, u64 num_bytes, u32 small_write) 867 { 868 /* If this is a small write inside eof, kick off a defrag */ 869 if (num_bytes < small_write && 870 (start > 0 || end + 1 < inode->disk_i_size)) 871 btrfs_add_inode_defrag(inode, small_write); 872 } 873 874 static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) 875 { 876 unsigned long end_index = end >> PAGE_SHIFT; 877 struct folio *folio; 878 int ret = 0; 879 880 for (unsigned long index = start >> PAGE_SHIFT; 881 index <= end_index; index++) { 882 folio = filemap_get_folio(inode->i_mapping, index); 883 if (IS_ERR(folio)) { 884 if (!ret) 885 ret = PTR_ERR(folio); 886 continue; 887 } 888 btrfs_folio_clamp_clear_dirty(inode_to_fs_info(inode), folio, start, 889 end + 1 - start); 890 folio_put(folio); 891 } 892 return ret; 893 } 894 895 /* 896 * Work queue call back to started compression on a file and pages. 897 * 898 * This is done inside an ordered work queue, and the compression is spread 899 * across many cpus. The actual IO submission is step two, and the ordered work 900 * queue takes care of making sure that happens in the same order things were 901 * put onto the queue by writepages and friends. 902 * 903 * If this code finds it can't get good compression, it puts an entry onto the 904 * work queue to write the uncompressed bytes. This makes sure that both 905 * compressed inodes and uncompressed inodes are written in the same order that 906 * the flusher thread sent them down. 907 */ 908 static void compress_file_range(struct btrfs_work *work) 909 { 910 struct async_chunk *async_chunk = 911 container_of(work, struct async_chunk, work); 912 struct btrfs_inode *inode = async_chunk->inode; 913 struct btrfs_fs_info *fs_info = inode->root->fs_info; 914 struct address_space *mapping = inode->vfs_inode.i_mapping; 915 u64 blocksize = fs_info->sectorsize; 916 u64 start = async_chunk->start; 917 u64 end = async_chunk->end; 918 u64 actual_end; 919 u64 i_size; 920 int ret = 0; 921 struct folio **folios; 922 unsigned long nr_folios; 923 unsigned long total_compressed = 0; 924 unsigned long total_in = 0; 925 unsigned int poff; 926 int i; 927 int compress_type = fs_info->compress_type; 928 929 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); 930 931 /* 932 * We need to call clear_page_dirty_for_io on each page in the range. 933 * Otherwise applications with the file mmap'd can wander in and change 934 * the page contents while we are compressing them. 935 */ 936 ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); 937 938 /* 939 * All the folios should have been locked thus no failure. 940 * 941 * And even if some folios are missing, btrfs_compress_folios() 942 * would handle them correctly, so here just do an ASSERT() check for 943 * early logic errors. 944 */ 945 ASSERT(ret == 0); 946 947 /* 948 * We need to save i_size before now because it could change in between 949 * us evaluating the size and assigning it. This is because we lock and 950 * unlock the page in truncate and fallocate, and then modify the i_size 951 * later on. 952 * 953 * The barriers are to emulate READ_ONCE, remove that once i_size_read 954 * does that for us. 955 */ 956 barrier(); 957 i_size = i_size_read(&inode->vfs_inode); 958 barrier(); 959 actual_end = min_t(u64, i_size, end + 1); 960 again: 961 folios = NULL; 962 nr_folios = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 963 nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED_PAGES); 964 965 /* 966 * we don't want to send crud past the end of i_size through 967 * compression, that's just a waste of CPU time. So, if the 968 * end of the file is before the start of our current 969 * requested range of bytes, we bail out to the uncompressed 970 * cleanup code that can deal with all of this. 971 * 972 * It isn't really the fastest way to fix things, but this is a 973 * very uncommon corner. 974 */ 975 if (actual_end <= start) 976 goto cleanup_and_bail_uncompressed; 977 978 total_compressed = actual_end - start; 979 980 /* 981 * Skip compression for a small file range(<=blocksize) that 982 * isn't an inline extent, since it doesn't save disk space at all. 983 */ 984 if (total_compressed <= blocksize && 985 (start > 0 || end + 1 < inode->disk_i_size)) 986 goto cleanup_and_bail_uncompressed; 987 988 total_compressed = min_t(unsigned long, total_compressed, 989 BTRFS_MAX_UNCOMPRESSED); 990 total_in = 0; 991 ret = 0; 992 993 /* 994 * We do compression for mount -o compress and when the inode has not 995 * been flagged as NOCOMPRESS. This flag can change at any time if we 996 * discover bad compression ratios. 997 */ 998 if (!inode_need_compress(inode, start, end)) 999 goto cleanup_and_bail_uncompressed; 1000 1001 folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS); 1002 if (!folios) { 1003 /* 1004 * Memory allocation failure is not a fatal error, we can fall 1005 * back to uncompressed code. 1006 */ 1007 goto cleanup_and_bail_uncompressed; 1008 } 1009 1010 if (inode->defrag_compress) 1011 compress_type = inode->defrag_compress; 1012 else if (inode->prop_compress) 1013 compress_type = inode->prop_compress; 1014 1015 /* Compression level is applied here. */ 1016 ret = btrfs_compress_folios(compress_type | (fs_info->compress_level << 4), 1017 mapping, start, folios, &nr_folios, &total_in, 1018 &total_compressed); 1019 if (ret) 1020 goto mark_incompressible; 1021 1022 /* 1023 * Zero the tail end of the last page, as we might be sending it down 1024 * to disk. 1025 */ 1026 poff = offset_in_page(total_compressed); 1027 if (poff) 1028 folio_zero_range(folios[nr_folios - 1], poff, PAGE_SIZE - poff); 1029 1030 /* 1031 * Try to create an inline extent. 1032 * 1033 * If we didn't compress the entire range, try to create an uncompressed 1034 * inline extent, else a compressed one. 1035 * 1036 * Check cow_file_range() for why we don't even try to create inline 1037 * extent for the subpage case. 1038 */ 1039 if (total_in < actual_end) 1040 ret = cow_file_range_inline(inode, NULL, start, end, 0, 1041 BTRFS_COMPRESS_NONE, NULL, false); 1042 else 1043 ret = cow_file_range_inline(inode, NULL, start, end, total_compressed, 1044 compress_type, folios[0], false); 1045 if (ret <= 0) { 1046 if (ret < 0) 1047 mapping_set_error(mapping, -EIO); 1048 goto free_pages; 1049 } 1050 1051 /* 1052 * We aren't doing an inline extent. Round the compressed size up to a 1053 * block size boundary so the allocator does sane things. 1054 */ 1055 total_compressed = ALIGN(total_compressed, blocksize); 1056 1057 /* 1058 * One last check to make sure the compression is really a win, compare 1059 * the page count read with the blocks on disk, compression must free at 1060 * least one sector. 1061 */ 1062 total_in = round_up(total_in, fs_info->sectorsize); 1063 if (total_compressed + blocksize > total_in) 1064 goto mark_incompressible; 1065 1066 /* 1067 * The async work queues will take care of doing actual allocation on 1068 * disk for these compressed pages, and will submit the bios. 1069 */ 1070 ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios, 1071 nr_folios, compress_type); 1072 BUG_ON(ret); 1073 if (start + total_in < end) { 1074 start += total_in; 1075 cond_resched(); 1076 goto again; 1077 } 1078 return; 1079 1080 mark_incompressible: 1081 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) 1082 inode->flags |= BTRFS_INODE_NOCOMPRESS; 1083 cleanup_and_bail_uncompressed: 1084 ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 1085 BTRFS_COMPRESS_NONE); 1086 BUG_ON(ret); 1087 free_pages: 1088 if (folios) { 1089 for (i = 0; i < nr_folios; i++) { 1090 WARN_ON(folios[i]->mapping); 1091 btrfs_free_compr_folio(folios[i]); 1092 } 1093 kfree(folios); 1094 } 1095 } 1096 1097 static void free_async_extent_pages(struct async_extent *async_extent) 1098 { 1099 int i; 1100 1101 if (!async_extent->folios) 1102 return; 1103 1104 for (i = 0; i < async_extent->nr_folios; i++) { 1105 WARN_ON(async_extent->folios[i]->mapping); 1106 btrfs_free_compr_folio(async_extent->folios[i]); 1107 } 1108 kfree(async_extent->folios); 1109 async_extent->nr_folios = 0; 1110 async_extent->folios = NULL; 1111 } 1112 1113 static void submit_uncompressed_range(struct btrfs_inode *inode, 1114 struct async_extent *async_extent, 1115 struct folio *locked_folio) 1116 { 1117 u64 start = async_extent->start; 1118 u64 end = async_extent->start + async_extent->ram_size - 1; 1119 int ret; 1120 struct writeback_control wbc = { 1121 .sync_mode = WB_SYNC_ALL, 1122 .range_start = start, 1123 .range_end = end, 1124 .no_cgroup_owner = 1, 1125 }; 1126 1127 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); 1128 ret = run_delalloc_cow(inode, locked_folio, start, end, 1129 &wbc, false); 1130 wbc_detach_inode(&wbc); 1131 if (ret < 0) { 1132 btrfs_cleanup_ordered_extents(inode, locked_folio, 1133 start, end - start + 1); 1134 if (locked_folio) { 1135 const u64 page_start = folio_pos(locked_folio); 1136 1137 folio_start_writeback(locked_folio); 1138 folio_end_writeback(locked_folio); 1139 btrfs_mark_ordered_io_finished(inode, locked_folio, 1140 page_start, PAGE_SIZE, 1141 !ret); 1142 mapping_set_error(locked_folio->mapping, ret); 1143 folio_unlock(locked_folio); 1144 } 1145 } 1146 } 1147 1148 static void submit_one_async_extent(struct async_chunk *async_chunk, 1149 struct async_extent *async_extent, 1150 u64 *alloc_hint) 1151 { 1152 struct btrfs_inode *inode = async_chunk->inode; 1153 struct extent_io_tree *io_tree = &inode->io_tree; 1154 struct btrfs_root *root = inode->root; 1155 struct btrfs_fs_info *fs_info = root->fs_info; 1156 struct btrfs_ordered_extent *ordered; 1157 struct btrfs_file_extent file_extent; 1158 struct btrfs_key ins; 1159 struct folio *locked_folio = NULL; 1160 struct extent_state *cached = NULL; 1161 struct extent_map *em; 1162 int ret = 0; 1163 u64 start = async_extent->start; 1164 u64 end = async_extent->start + async_extent->ram_size - 1; 1165 1166 if (async_chunk->blkcg_css) 1167 kthread_associate_blkcg(async_chunk->blkcg_css); 1168 1169 /* 1170 * If async_chunk->locked_folio is in the async_extent range, we need to 1171 * handle it. 1172 */ 1173 if (async_chunk->locked_folio) { 1174 u64 locked_folio_start = folio_pos(async_chunk->locked_folio); 1175 u64 locked_folio_end = locked_folio_start + 1176 folio_size(async_chunk->locked_folio) - 1; 1177 1178 if (!(start >= locked_folio_end || end <= locked_folio_start)) 1179 locked_folio = async_chunk->locked_folio; 1180 } 1181 1182 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) { 1183 submit_uncompressed_range(inode, async_extent, locked_folio); 1184 goto done; 1185 } 1186 1187 ret = btrfs_reserve_extent(root, async_extent->ram_size, 1188 async_extent->compressed_size, 1189 async_extent->compressed_size, 1190 0, *alloc_hint, &ins, 1, 1); 1191 if (ret) { 1192 /* 1193 * We can't reserve contiguous space for the compressed size. 1194 * Unlikely, but it's possible that we could have enough 1195 * non-contiguous space for the uncompressed size instead. So 1196 * fall back to uncompressed. 1197 */ 1198 submit_uncompressed_range(inode, async_extent, locked_folio); 1199 goto done; 1200 } 1201 1202 lock_extent(io_tree, start, end, &cached); 1203 1204 /* Here we're doing allocation and writeback of the compressed pages */ 1205 file_extent.disk_bytenr = ins.objectid; 1206 file_extent.disk_num_bytes = ins.offset; 1207 file_extent.ram_bytes = async_extent->ram_size; 1208 file_extent.num_bytes = async_extent->ram_size; 1209 file_extent.offset = 0; 1210 file_extent.compression = async_extent->compress_type; 1211 1212 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED); 1213 if (IS_ERR(em)) { 1214 ret = PTR_ERR(em); 1215 goto out_free_reserve; 1216 } 1217 free_extent_map(em); 1218 1219 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, 1220 1 << BTRFS_ORDERED_COMPRESSED); 1221 if (IS_ERR(ordered)) { 1222 btrfs_drop_extent_map_range(inode, start, end, false); 1223 ret = PTR_ERR(ordered); 1224 goto out_free_reserve; 1225 } 1226 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1227 1228 /* Clear dirty, set writeback and unlock the pages. */ 1229 extent_clear_unlock_delalloc(inode, start, end, 1230 NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC, 1231 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1232 btrfs_submit_compressed_write(ordered, 1233 async_extent->folios, /* compressed_folios */ 1234 async_extent->nr_folios, 1235 async_chunk->write_flags, true); 1236 *alloc_hint = ins.objectid + ins.offset; 1237 done: 1238 if (async_chunk->blkcg_css) 1239 kthread_associate_blkcg(NULL); 1240 kfree(async_extent); 1241 return; 1242 1243 out_free_reserve: 1244 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1245 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1246 mapping_set_error(inode->vfs_inode.i_mapping, -EIO); 1247 extent_clear_unlock_delalloc(inode, start, end, 1248 NULL, &cached, 1249 EXTENT_LOCKED | EXTENT_DELALLOC | 1250 EXTENT_DELALLOC_NEW | 1251 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1252 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1253 PAGE_END_WRITEBACK); 1254 free_async_extent_pages(async_extent); 1255 if (async_chunk->blkcg_css) 1256 kthread_associate_blkcg(NULL); 1257 btrfs_debug(fs_info, 1258 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1259 btrfs_root_id(root), btrfs_ino(inode), start, 1260 async_extent->ram_size, ret); 1261 kfree(async_extent); 1262 } 1263 1264 u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1265 u64 num_bytes) 1266 { 1267 struct extent_map_tree *em_tree = &inode->extent_tree; 1268 struct extent_map *em; 1269 u64 alloc_hint = 0; 1270 1271 read_lock(&em_tree->lock); 1272 em = search_extent_mapping(em_tree, start, num_bytes); 1273 if (em) { 1274 /* 1275 * if block start isn't an actual block number then find the 1276 * first block in this inode and use that as a hint. If that 1277 * block is also bogus then just don't worry about it. 1278 */ 1279 if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) { 1280 free_extent_map(em); 1281 em = search_extent_mapping(em_tree, 0, 0); 1282 if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE) 1283 alloc_hint = extent_map_block_start(em); 1284 if (em) 1285 free_extent_map(em); 1286 } else { 1287 alloc_hint = extent_map_block_start(em); 1288 free_extent_map(em); 1289 } 1290 } 1291 read_unlock(&em_tree->lock); 1292 1293 return alloc_hint; 1294 } 1295 1296 /* 1297 * when extent_io.c finds a delayed allocation range in the file, 1298 * the call backs end up in this code. The basic idea is to 1299 * allocate extents on disk for the range, and create ordered data structs 1300 * in ram to track those extents. 1301 * 1302 * locked_folio is the folio that writepage had locked already. We use 1303 * it to make sure we don't do extra locks or unlocks. 1304 * 1305 * When this function fails, it unlocks all pages except @locked_folio. 1306 * 1307 * When this function successfully creates an inline extent, it returns 1 and 1308 * unlocks all pages including locked_folio and starts I/O on them. 1309 * (In reality inline extents are limited to a single page, so locked_folio is 1310 * the only page handled anyway). 1311 * 1312 * When this function succeed and creates a normal extent, the page locking 1313 * status depends on the passed in flags: 1314 * 1315 * - If @keep_locked is set, all pages are kept locked. 1316 * - Else all pages except for @locked_folio are unlocked. 1317 * 1318 * When a failure happens in the second or later iteration of the 1319 * while-loop, the ordered extents created in previous iterations are kept 1320 * intact. So, the caller must clean them up by calling 1321 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for 1322 * example. 1323 */ 1324 static noinline int cow_file_range(struct btrfs_inode *inode, 1325 struct folio *locked_folio, u64 start, 1326 u64 end, u64 *done_offset, 1327 bool keep_locked, bool no_inline) 1328 { 1329 struct btrfs_root *root = inode->root; 1330 struct btrfs_fs_info *fs_info = root->fs_info; 1331 struct extent_state *cached = NULL; 1332 u64 alloc_hint = 0; 1333 u64 orig_start = start; 1334 u64 num_bytes; 1335 u64 cur_alloc_size = 0; 1336 u64 min_alloc_size; 1337 u64 blocksize = fs_info->sectorsize; 1338 struct btrfs_key ins; 1339 struct extent_map *em; 1340 unsigned clear_bits; 1341 unsigned long page_ops; 1342 int ret = 0; 1343 1344 if (btrfs_is_free_space_inode(inode)) { 1345 ret = -EINVAL; 1346 goto out_unlock; 1347 } 1348 1349 num_bytes = ALIGN(end - start + 1, blocksize); 1350 num_bytes = max(blocksize, num_bytes); 1351 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1352 1353 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1354 1355 if (!no_inline) { 1356 /* lets try to make an inline extent */ 1357 ret = cow_file_range_inline(inode, locked_folio, start, end, 0, 1358 BTRFS_COMPRESS_NONE, NULL, false); 1359 if (ret <= 0) { 1360 /* 1361 * We succeeded, return 1 so the caller knows we're done 1362 * with this page and already handled the IO. 1363 * 1364 * If there was an error then cow_file_range_inline() has 1365 * already done the cleanup. 1366 */ 1367 if (ret == 0) 1368 ret = 1; 1369 goto done; 1370 } 1371 } 1372 1373 alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes); 1374 1375 /* 1376 * Relocation relies on the relocated extents to have exactly the same 1377 * size as the original extents. Normally writeback for relocation data 1378 * extents follows a NOCOW path because relocation preallocates the 1379 * extents. However, due to an operation such as scrub turning a block 1380 * group to RO mode, it may fallback to COW mode, so we must make sure 1381 * an extent allocated during COW has exactly the requested size and can 1382 * not be split into smaller extents, otherwise relocation breaks and 1383 * fails during the stage where it updates the bytenr of file extent 1384 * items. 1385 */ 1386 if (btrfs_is_data_reloc_root(root)) 1387 min_alloc_size = num_bytes; 1388 else 1389 min_alloc_size = fs_info->sectorsize; 1390 1391 while (num_bytes > 0) { 1392 struct btrfs_ordered_extent *ordered; 1393 struct btrfs_file_extent file_extent; 1394 1395 ret = btrfs_reserve_extent(root, num_bytes, num_bytes, 1396 min_alloc_size, 0, alloc_hint, 1397 &ins, 1, 1); 1398 if (ret == -EAGAIN) { 1399 /* 1400 * btrfs_reserve_extent only returns -EAGAIN for zoned 1401 * file systems, which is an indication that there are 1402 * no active zones to allocate from at the moment. 1403 * 1404 * If this is the first loop iteration, wait for at 1405 * least one zone to finish before retrying the 1406 * allocation. Otherwise ask the caller to write out 1407 * the already allocated blocks before coming back to 1408 * us, or return -ENOSPC if it can't handle retries. 1409 */ 1410 ASSERT(btrfs_is_zoned(fs_info)); 1411 if (start == orig_start) { 1412 wait_on_bit_io(&inode->root->fs_info->flags, 1413 BTRFS_FS_NEED_ZONE_FINISH, 1414 TASK_UNINTERRUPTIBLE); 1415 continue; 1416 } 1417 if (done_offset) { 1418 *done_offset = start - 1; 1419 return 0; 1420 } 1421 ret = -ENOSPC; 1422 } 1423 if (ret < 0) 1424 goto out_unlock; 1425 cur_alloc_size = ins.offset; 1426 1427 file_extent.disk_bytenr = ins.objectid; 1428 file_extent.disk_num_bytes = ins.offset; 1429 file_extent.num_bytes = ins.offset; 1430 file_extent.ram_bytes = ins.offset; 1431 file_extent.offset = 0; 1432 file_extent.compression = BTRFS_COMPRESS_NONE; 1433 1434 lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1, 1435 &cached); 1436 1437 em = btrfs_create_io_em(inode, start, &file_extent, 1438 BTRFS_ORDERED_REGULAR); 1439 if (IS_ERR(em)) { 1440 unlock_extent(&inode->io_tree, start, 1441 start + cur_alloc_size - 1, &cached); 1442 ret = PTR_ERR(em); 1443 goto out_reserve; 1444 } 1445 free_extent_map(em); 1446 1447 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, 1448 1 << BTRFS_ORDERED_REGULAR); 1449 if (IS_ERR(ordered)) { 1450 unlock_extent(&inode->io_tree, start, 1451 start + cur_alloc_size - 1, &cached); 1452 ret = PTR_ERR(ordered); 1453 goto out_drop_extent_cache; 1454 } 1455 1456 if (btrfs_is_data_reloc_root(root)) { 1457 ret = btrfs_reloc_clone_csums(ordered); 1458 1459 /* 1460 * Only drop cache here, and process as normal. 1461 * 1462 * We must not allow extent_clear_unlock_delalloc() 1463 * at out_unlock label to free meta of this ordered 1464 * extent, as its meta should be freed by 1465 * btrfs_finish_ordered_io(). 1466 * 1467 * So we must continue until @start is increased to 1468 * skip current ordered extent. 1469 */ 1470 if (ret) 1471 btrfs_drop_extent_map_range(inode, start, 1472 start + cur_alloc_size - 1, 1473 false); 1474 } 1475 btrfs_put_ordered_extent(ordered); 1476 1477 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1478 1479 /* 1480 * We're not doing compressed IO, don't unlock the first page 1481 * (which the caller expects to stay locked), don't clear any 1482 * dirty bits and don't set any writeback bits 1483 * 1484 * Do set the Ordered flag so we know this page was 1485 * properly setup for writepage. 1486 */ 1487 page_ops = (keep_locked ? 0 : PAGE_UNLOCK); 1488 page_ops |= PAGE_SET_ORDERED; 1489 1490 extent_clear_unlock_delalloc(inode, start, start + cur_alloc_size - 1, 1491 locked_folio, &cached, 1492 EXTENT_LOCKED | EXTENT_DELALLOC, 1493 page_ops); 1494 if (num_bytes < cur_alloc_size) 1495 num_bytes = 0; 1496 else 1497 num_bytes -= cur_alloc_size; 1498 alloc_hint = ins.objectid + ins.offset; 1499 start += cur_alloc_size; 1500 cur_alloc_size = 0; 1501 1502 /* 1503 * btrfs_reloc_clone_csums() error, since start is increased 1504 * extent_clear_unlock_delalloc() at out_unlock label won't 1505 * free metadata of current ordered extent, we're OK to exit. 1506 */ 1507 if (ret) 1508 goto out_unlock; 1509 } 1510 done: 1511 if (done_offset) 1512 *done_offset = end; 1513 return ret; 1514 1515 out_drop_extent_cache: 1516 btrfs_drop_extent_map_range(inode, start, start + cur_alloc_size - 1, false); 1517 out_reserve: 1518 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1519 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1520 out_unlock: 1521 /* 1522 * Now, we have three regions to clean up: 1523 * 1524 * |-------(1)----|---(2)---|-------------(3)----------| 1525 * `- orig_start `- start `- start + cur_alloc_size `- end 1526 * 1527 * We process each region below. 1528 */ 1529 1530 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1531 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1532 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1533 1534 /* 1535 * For the range (1). We have already instantiated the ordered extents 1536 * for this region. They are cleaned up by 1537 * btrfs_cleanup_ordered_extents() in e.g, 1538 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are 1539 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW | 1540 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup 1541 * function. 1542 * 1543 * However, in case of @keep_locked, we still need to unlock the pages 1544 * (except @locked_folio) to ensure all the pages are unlocked. 1545 */ 1546 if (keep_locked && orig_start < start) { 1547 if (!locked_folio) 1548 mapping_set_error(inode->vfs_inode.i_mapping, ret); 1549 extent_clear_unlock_delalloc(inode, orig_start, start - 1, 1550 locked_folio, NULL, 0, page_ops); 1551 } 1552 1553 /* 1554 * At this point we're unlocked, we want to make sure we're only 1555 * clearing these flags under the extent lock, so lock the rest of the 1556 * range and clear everything up. 1557 */ 1558 lock_extent(&inode->io_tree, start, end, NULL); 1559 1560 /* 1561 * For the range (2). If we reserved an extent for our delalloc range 1562 * (or a subrange) and failed to create the respective ordered extent, 1563 * then it means that when we reserved the extent we decremented the 1564 * extent's size from the data space_info's bytes_may_use counter and 1565 * incremented the space_info's bytes_reserved counter by the same 1566 * amount. We must make sure extent_clear_unlock_delalloc() does not try 1567 * to decrement again the data space_info's bytes_may_use counter, 1568 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV. 1569 */ 1570 if (cur_alloc_size) { 1571 extent_clear_unlock_delalloc(inode, start, 1572 start + cur_alloc_size - 1, 1573 locked_folio, &cached, clear_bits, 1574 page_ops); 1575 btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL); 1576 } 1577 1578 /* 1579 * For the range (3). We never touched the region. In addition to the 1580 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data 1581 * space_info's bytes_may_use counter, reserved in 1582 * btrfs_check_data_free_space(). 1583 */ 1584 if (start + cur_alloc_size < end) { 1585 clear_bits |= EXTENT_CLEAR_DATA_RESV; 1586 extent_clear_unlock_delalloc(inode, start + cur_alloc_size, 1587 end, locked_folio, 1588 &cached, clear_bits, page_ops); 1589 btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size, 1590 end - start - cur_alloc_size + 1, NULL); 1591 } 1592 return ret; 1593 } 1594 1595 /* 1596 * Phase two of compressed writeback. This is the ordered portion of the code, 1597 * which only gets called in the order the work was queued. We walk all the 1598 * async extents created by compress_file_range and send them down to the disk. 1599 * 1600 * If called with @do_free == true then it'll try to finish the work and free 1601 * the work struct eventually. 1602 */ 1603 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free) 1604 { 1605 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1606 work); 1607 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1608 struct async_extent *async_extent; 1609 unsigned long nr_pages; 1610 u64 alloc_hint = 0; 1611 1612 if (do_free) { 1613 struct async_cow *async_cow; 1614 1615 btrfs_add_delayed_iput(async_chunk->inode); 1616 if (async_chunk->blkcg_css) 1617 css_put(async_chunk->blkcg_css); 1618 1619 async_cow = async_chunk->async_cow; 1620 if (atomic_dec_and_test(&async_cow->num_chunks)) 1621 kvfree(async_cow); 1622 return; 1623 } 1624 1625 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1626 PAGE_SHIFT; 1627 1628 while (!list_empty(&async_chunk->extents)) { 1629 async_extent = list_entry(async_chunk->extents.next, 1630 struct async_extent, list); 1631 list_del(&async_extent->list); 1632 submit_one_async_extent(async_chunk, async_extent, &alloc_hint); 1633 } 1634 1635 /* atomic_sub_return implies a barrier */ 1636 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1637 5 * SZ_1M) 1638 cond_wake_up_nomb(&fs_info->async_submit_wait); 1639 } 1640 1641 static bool run_delalloc_compressed(struct btrfs_inode *inode, 1642 struct folio *locked_folio, u64 start, 1643 u64 end, struct writeback_control *wbc) 1644 { 1645 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1646 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1647 struct async_cow *ctx; 1648 struct async_chunk *async_chunk; 1649 unsigned long nr_pages; 1650 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1651 int i; 1652 unsigned nofs_flag; 1653 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1654 1655 nofs_flag = memalloc_nofs_save(); 1656 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1657 memalloc_nofs_restore(nofs_flag); 1658 if (!ctx) 1659 return false; 1660 1661 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 1662 1663 async_chunk = ctx->chunks; 1664 atomic_set(&ctx->num_chunks, num_chunks); 1665 1666 for (i = 0; i < num_chunks; i++) { 1667 u64 cur_end = min(end, start + SZ_512K - 1); 1668 1669 /* 1670 * igrab is called higher up in the call chain, take only the 1671 * lightweight reference for the callback lifetime 1672 */ 1673 ihold(&inode->vfs_inode); 1674 async_chunk[i].async_cow = ctx; 1675 async_chunk[i].inode = inode; 1676 async_chunk[i].start = start; 1677 async_chunk[i].end = cur_end; 1678 async_chunk[i].write_flags = write_flags; 1679 INIT_LIST_HEAD(&async_chunk[i].extents); 1680 1681 /* 1682 * The locked_folio comes all the way from writepage and its 1683 * the original folio we were actually given. As we spread 1684 * this large delalloc region across multiple async_chunk 1685 * structs, only the first struct needs a pointer to 1686 * locked_folio. 1687 * 1688 * This way we don't need racey decisions about who is supposed 1689 * to unlock it. 1690 */ 1691 if (locked_folio) { 1692 /* 1693 * Depending on the compressibility, the pages might or 1694 * might not go through async. We want all of them to 1695 * be accounted against wbc once. Let's do it here 1696 * before the paths diverge. wbc accounting is used 1697 * only for foreign writeback detection and doesn't 1698 * need full accuracy. Just account the whole thing 1699 * against the first page. 1700 */ 1701 wbc_account_cgroup_owner(wbc, locked_folio, 1702 cur_end - start); 1703 async_chunk[i].locked_folio = locked_folio; 1704 locked_folio = NULL; 1705 } else { 1706 async_chunk[i].locked_folio = NULL; 1707 } 1708 1709 if (blkcg_css != blkcg_root_css) { 1710 css_get(blkcg_css); 1711 async_chunk[i].blkcg_css = blkcg_css; 1712 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT; 1713 } else { 1714 async_chunk[i].blkcg_css = NULL; 1715 } 1716 1717 btrfs_init_work(&async_chunk[i].work, compress_file_range, 1718 submit_compressed_extents); 1719 1720 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1721 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1722 1723 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1724 1725 start = cur_end + 1; 1726 } 1727 return true; 1728 } 1729 1730 /* 1731 * Run the delalloc range from start to end, and write back any dirty pages 1732 * covered by the range. 1733 */ 1734 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 1735 struct folio *locked_folio, u64 start, 1736 u64 end, struct writeback_control *wbc, 1737 bool pages_dirty) 1738 { 1739 u64 done_offset = end; 1740 int ret; 1741 1742 while (start <= end) { 1743 ret = cow_file_range(inode, locked_folio, start, end, 1744 &done_offset, true, false); 1745 if (ret) 1746 return ret; 1747 extent_write_locked_range(&inode->vfs_inode, locked_folio, 1748 start, done_offset, wbc, pages_dirty); 1749 start = done_offset + 1; 1750 } 1751 1752 return 1; 1753 } 1754 1755 static int fallback_to_cow(struct btrfs_inode *inode, 1756 struct folio *locked_folio, const u64 start, 1757 const u64 end) 1758 { 1759 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1760 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1761 const u64 range_bytes = end + 1 - start; 1762 struct extent_io_tree *io_tree = &inode->io_tree; 1763 struct extent_state *cached_state = NULL; 1764 u64 range_start = start; 1765 u64 count; 1766 int ret; 1767 1768 /* 1769 * If EXTENT_NORESERVE is set it means that when the buffered write was 1770 * made we had not enough available data space and therefore we did not 1771 * reserve data space for it, since we though we could do NOCOW for the 1772 * respective file range (either there is prealloc extent or the inode 1773 * has the NOCOW bit set). 1774 * 1775 * However when we need to fallback to COW mode (because for example the 1776 * block group for the corresponding extent was turned to RO mode by a 1777 * scrub or relocation) we need to do the following: 1778 * 1779 * 1) We increment the bytes_may_use counter of the data space info. 1780 * If COW succeeds, it allocates a new data extent and after doing 1781 * that it decrements the space info's bytes_may_use counter and 1782 * increments its bytes_reserved counter by the same amount (we do 1783 * this at btrfs_add_reserved_bytes()). So we need to increment the 1784 * bytes_may_use counter to compensate (when space is reserved at 1785 * buffered write time, the bytes_may_use counter is incremented); 1786 * 1787 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1788 * that if the COW path fails for any reason, it decrements (through 1789 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1790 * data space info, which we incremented in the step above. 1791 * 1792 * If we need to fallback to cow and the inode corresponds to a free 1793 * space cache inode or an inode of the data relocation tree, we must 1794 * also increment bytes_may_use of the data space_info for the same 1795 * reason. Space caches and relocated data extents always get a prealloc 1796 * extent for them, however scrub or balance may have set the block 1797 * group that contains that extent to RO mode and therefore force COW 1798 * when starting writeback. 1799 */ 1800 lock_extent(io_tree, start, end, &cached_state); 1801 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1802 EXTENT_NORESERVE, 0, NULL); 1803 if (count > 0 || is_space_ino || is_reloc_ino) { 1804 u64 bytes = count; 1805 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1806 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1807 1808 if (is_space_ino || is_reloc_ino) 1809 bytes = range_bytes; 1810 1811 spin_lock(&sinfo->lock); 1812 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1813 spin_unlock(&sinfo->lock); 1814 1815 if (count > 0) 1816 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1817 NULL); 1818 } 1819 unlock_extent(io_tree, start, end, &cached_state); 1820 1821 /* 1822 * Don't try to create inline extents, as a mix of inline extent that 1823 * is written out and unlocked directly and a normal NOCOW extent 1824 * doesn't work. 1825 */ 1826 ret = cow_file_range(inode, locked_folio, start, end, NULL, false, 1827 true); 1828 ASSERT(ret != 1); 1829 return ret; 1830 } 1831 1832 struct can_nocow_file_extent_args { 1833 /* Input fields. */ 1834 1835 /* Start file offset of the range we want to NOCOW. */ 1836 u64 start; 1837 /* End file offset (inclusive) of the range we want to NOCOW. */ 1838 u64 end; 1839 bool writeback_path; 1840 bool strict; 1841 /* 1842 * Free the path passed to can_nocow_file_extent() once it's not needed 1843 * anymore. 1844 */ 1845 bool free_path; 1846 1847 /* 1848 * Output fields. Only set when can_nocow_file_extent() returns 1. 1849 * The expected file extent for the NOCOW write. 1850 */ 1851 struct btrfs_file_extent file_extent; 1852 }; 1853 1854 /* 1855 * Check if we can NOCOW the file extent that the path points to. 1856 * This function may return with the path released, so the caller should check 1857 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1858 * 1859 * Returns: < 0 on error 1860 * 0 if we can not NOCOW 1861 * 1 if we can NOCOW 1862 */ 1863 static int can_nocow_file_extent(struct btrfs_path *path, 1864 struct btrfs_key *key, 1865 struct btrfs_inode *inode, 1866 struct can_nocow_file_extent_args *args) 1867 { 1868 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 1869 struct extent_buffer *leaf = path->nodes[0]; 1870 struct btrfs_root *root = inode->root; 1871 struct btrfs_file_extent_item *fi; 1872 struct btrfs_root *csum_root; 1873 u64 io_start; 1874 u64 extent_end; 1875 u8 extent_type; 1876 int can_nocow = 0; 1877 int ret = 0; 1878 bool nowait = path->nowait; 1879 1880 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 1881 extent_type = btrfs_file_extent_type(leaf, fi); 1882 1883 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1884 goto out; 1885 1886 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1887 extent_type == BTRFS_FILE_EXTENT_REG) 1888 goto out; 1889 1890 /* 1891 * If the extent was created before the generation where the last snapshot 1892 * for its subvolume was created, then this implies the extent is shared, 1893 * hence we must COW. 1894 */ 1895 if (!args->strict && 1896 btrfs_file_extent_generation(leaf, fi) <= 1897 btrfs_root_last_snapshot(&root->root_item)) 1898 goto out; 1899 1900 /* An explicit hole, must COW. */ 1901 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) 1902 goto out; 1903 1904 /* Compressed/encrypted/encoded extents must be COWed. */ 1905 if (btrfs_file_extent_compression(leaf, fi) || 1906 btrfs_file_extent_encryption(leaf, fi) || 1907 btrfs_file_extent_other_encoding(leaf, fi)) 1908 goto out; 1909 1910 extent_end = btrfs_file_extent_end(path); 1911 1912 args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1913 args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1914 args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1915 args->file_extent.offset = btrfs_file_extent_offset(leaf, fi); 1916 args->file_extent.compression = btrfs_file_extent_compression(leaf, fi); 1917 1918 /* 1919 * The following checks can be expensive, as they need to take other 1920 * locks and do btree or rbtree searches, so release the path to avoid 1921 * blocking other tasks for too long. 1922 */ 1923 btrfs_release_path(path); 1924 1925 ret = btrfs_cross_ref_exist(root, btrfs_ino(inode), 1926 key->offset - args->file_extent.offset, 1927 args->file_extent.disk_bytenr, args->strict, path); 1928 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1929 if (ret != 0) 1930 goto out; 1931 1932 if (args->free_path) { 1933 /* 1934 * We don't need the path anymore, plus through the 1935 * btrfs_lookup_csums_list() call below we will end up allocating 1936 * another path. So free the path to avoid unnecessary extra 1937 * memory usage. 1938 */ 1939 btrfs_free_path(path); 1940 path = NULL; 1941 } 1942 1943 /* If there are pending snapshots for this root, we must COW. */ 1944 if (args->writeback_path && !is_freespace_inode && 1945 atomic_read(&root->snapshot_force_cow)) 1946 goto out; 1947 1948 args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start; 1949 args->file_extent.offset += args->start - key->offset; 1950 io_start = args->file_extent.disk_bytenr + args->file_extent.offset; 1951 1952 /* 1953 * Force COW if csums exist in the range. This ensures that csums for a 1954 * given extent are either valid or do not exist. 1955 */ 1956 1957 csum_root = btrfs_csum_root(root->fs_info, io_start); 1958 ret = btrfs_lookup_csums_list(csum_root, io_start, 1959 io_start + args->file_extent.num_bytes - 1, 1960 NULL, nowait); 1961 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1962 if (ret != 0) 1963 goto out; 1964 1965 can_nocow = 1; 1966 out: 1967 if (args->free_path && path) 1968 btrfs_free_path(path); 1969 1970 return ret < 0 ? ret : can_nocow; 1971 } 1972 1973 /* 1974 * when nowcow writeback call back. This checks for snapshots or COW copies 1975 * of the extents that exist in the file, and COWs the file as required. 1976 * 1977 * If no cow copies or snapshots exist, we write directly to the existing 1978 * blocks on disk 1979 */ 1980 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 1981 struct folio *locked_folio, 1982 const u64 start, const u64 end) 1983 { 1984 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1985 struct btrfs_root *root = inode->root; 1986 struct btrfs_path *path; 1987 u64 cow_start = (u64)-1; 1988 u64 cur_offset = start; 1989 int ret; 1990 bool check_prev = true; 1991 u64 ino = btrfs_ino(inode); 1992 struct can_nocow_file_extent_args nocow_args = { 0 }; 1993 1994 /* 1995 * Normally on a zoned device we're only doing COW writes, but in case 1996 * of relocation on a zoned filesystem serializes I/O so that we're only 1997 * writing sequentially and can end up here as well. 1998 */ 1999 ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root)); 2000 2001 path = btrfs_alloc_path(); 2002 if (!path) { 2003 ret = -ENOMEM; 2004 goto error; 2005 } 2006 2007 nocow_args.end = end; 2008 nocow_args.writeback_path = true; 2009 2010 while (cur_offset <= end) { 2011 struct btrfs_block_group *nocow_bg = NULL; 2012 struct btrfs_ordered_extent *ordered; 2013 struct btrfs_key found_key; 2014 struct btrfs_file_extent_item *fi; 2015 struct extent_buffer *leaf; 2016 struct extent_state *cached_state = NULL; 2017 u64 extent_end; 2018 u64 nocow_end; 2019 int extent_type; 2020 bool is_prealloc; 2021 2022 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 2023 cur_offset, 0); 2024 if (ret < 0) 2025 goto error; 2026 2027 /* 2028 * If there is no extent for our range when doing the initial 2029 * search, then go back to the previous slot as it will be the 2030 * one containing the search offset 2031 */ 2032 if (ret > 0 && path->slots[0] > 0 && check_prev) { 2033 leaf = path->nodes[0]; 2034 btrfs_item_key_to_cpu(leaf, &found_key, 2035 path->slots[0] - 1); 2036 if (found_key.objectid == ino && 2037 found_key.type == BTRFS_EXTENT_DATA_KEY) 2038 path->slots[0]--; 2039 } 2040 check_prev = false; 2041 next_slot: 2042 /* Go to next leaf if we have exhausted the current one */ 2043 leaf = path->nodes[0]; 2044 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2045 ret = btrfs_next_leaf(root, path); 2046 if (ret < 0) 2047 goto error; 2048 if (ret > 0) 2049 break; 2050 leaf = path->nodes[0]; 2051 } 2052 2053 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2054 2055 /* Didn't find anything for our INO */ 2056 if (found_key.objectid > ino) 2057 break; 2058 /* 2059 * Keep searching until we find an EXTENT_ITEM or there are no 2060 * more extents for this inode 2061 */ 2062 if (WARN_ON_ONCE(found_key.objectid < ino) || 2063 found_key.type < BTRFS_EXTENT_DATA_KEY) { 2064 path->slots[0]++; 2065 goto next_slot; 2066 } 2067 2068 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 2069 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 2070 found_key.offset > end) 2071 break; 2072 2073 /* 2074 * If the found extent starts after requested offset, then 2075 * adjust extent_end to be right before this extent begins 2076 */ 2077 if (found_key.offset > cur_offset) { 2078 extent_end = found_key.offset; 2079 extent_type = 0; 2080 goto must_cow; 2081 } 2082 2083 /* 2084 * Found extent which begins before our range and potentially 2085 * intersect it 2086 */ 2087 fi = btrfs_item_ptr(leaf, path->slots[0], 2088 struct btrfs_file_extent_item); 2089 extent_type = btrfs_file_extent_type(leaf, fi); 2090 /* If this is triggered then we have a memory corruption. */ 2091 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 2092 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 2093 ret = -EUCLEAN; 2094 goto error; 2095 } 2096 extent_end = btrfs_file_extent_end(path); 2097 2098 /* 2099 * If the extent we got ends before our current offset, skip to 2100 * the next extent. 2101 */ 2102 if (extent_end <= cur_offset) { 2103 path->slots[0]++; 2104 goto next_slot; 2105 } 2106 2107 nocow_args.start = cur_offset; 2108 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 2109 if (ret < 0) 2110 goto error; 2111 if (ret == 0) 2112 goto must_cow; 2113 2114 ret = 0; 2115 nocow_bg = btrfs_inc_nocow_writers(fs_info, 2116 nocow_args.file_extent.disk_bytenr + 2117 nocow_args.file_extent.offset); 2118 if (!nocow_bg) { 2119 must_cow: 2120 /* 2121 * If we can't perform NOCOW writeback for the range, 2122 * then record the beginning of the range that needs to 2123 * be COWed. It will be written out before the next 2124 * NOCOW range if we find one, or when exiting this 2125 * loop. 2126 */ 2127 if (cow_start == (u64)-1) 2128 cow_start = cur_offset; 2129 cur_offset = extent_end; 2130 if (cur_offset > end) 2131 break; 2132 if (!path->nodes[0]) 2133 continue; 2134 path->slots[0]++; 2135 goto next_slot; 2136 } 2137 2138 /* 2139 * COW range from cow_start to found_key.offset - 1. As the key 2140 * will contain the beginning of the first extent that can be 2141 * NOCOW, following one which needs to be COW'ed 2142 */ 2143 if (cow_start != (u64)-1) { 2144 ret = fallback_to_cow(inode, locked_folio, cow_start, 2145 found_key.offset - 1); 2146 cow_start = (u64)-1; 2147 if (ret) { 2148 btrfs_dec_nocow_writers(nocow_bg); 2149 goto error; 2150 } 2151 } 2152 2153 nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1; 2154 lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state); 2155 2156 is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC; 2157 if (is_prealloc) { 2158 struct extent_map *em; 2159 2160 em = btrfs_create_io_em(inode, cur_offset, 2161 &nocow_args.file_extent, 2162 BTRFS_ORDERED_PREALLOC); 2163 if (IS_ERR(em)) { 2164 unlock_extent(&inode->io_tree, cur_offset, 2165 nocow_end, &cached_state); 2166 btrfs_dec_nocow_writers(nocow_bg); 2167 ret = PTR_ERR(em); 2168 goto error; 2169 } 2170 free_extent_map(em); 2171 } 2172 2173 ordered = btrfs_alloc_ordered_extent(inode, cur_offset, 2174 &nocow_args.file_extent, 2175 is_prealloc 2176 ? (1 << BTRFS_ORDERED_PREALLOC) 2177 : (1 << BTRFS_ORDERED_NOCOW)); 2178 btrfs_dec_nocow_writers(nocow_bg); 2179 if (IS_ERR(ordered)) { 2180 if (is_prealloc) { 2181 btrfs_drop_extent_map_range(inode, cur_offset, 2182 nocow_end, false); 2183 } 2184 unlock_extent(&inode->io_tree, cur_offset, 2185 nocow_end, &cached_state); 2186 ret = PTR_ERR(ordered); 2187 goto error; 2188 } 2189 2190 if (btrfs_is_data_reloc_root(root)) 2191 /* 2192 * Error handled later, as we must prevent 2193 * extent_clear_unlock_delalloc() in error handler 2194 * from freeing metadata of created ordered extent. 2195 */ 2196 ret = btrfs_reloc_clone_csums(ordered); 2197 btrfs_put_ordered_extent(ordered); 2198 2199 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, 2200 locked_folio, &cached_state, 2201 EXTENT_LOCKED | EXTENT_DELALLOC | 2202 EXTENT_CLEAR_DATA_RESV, 2203 PAGE_UNLOCK | PAGE_SET_ORDERED); 2204 2205 cur_offset = extent_end; 2206 2207 /* 2208 * btrfs_reloc_clone_csums() error, now we're OK to call error 2209 * handler, as metadata for created ordered extent will only 2210 * be freed by btrfs_finish_ordered_io(). 2211 */ 2212 if (ret) 2213 goto error; 2214 } 2215 btrfs_release_path(path); 2216 2217 if (cur_offset <= end && cow_start == (u64)-1) 2218 cow_start = cur_offset; 2219 2220 if (cow_start != (u64)-1) { 2221 cur_offset = end; 2222 ret = fallback_to_cow(inode, locked_folio, cow_start, end); 2223 cow_start = (u64)-1; 2224 if (ret) 2225 goto error; 2226 } 2227 2228 btrfs_free_path(path); 2229 return 0; 2230 2231 error: 2232 /* 2233 * If an error happened while a COW region is outstanding, cur_offset 2234 * needs to be reset to cow_start to ensure the COW region is unlocked 2235 * as well. 2236 */ 2237 if (cow_start != (u64)-1) 2238 cur_offset = cow_start; 2239 2240 /* 2241 * We need to lock the extent here because we're clearing DELALLOC and 2242 * we're not locked at this point. 2243 */ 2244 if (cur_offset < end) { 2245 struct extent_state *cached = NULL; 2246 2247 lock_extent(&inode->io_tree, cur_offset, end, &cached); 2248 extent_clear_unlock_delalloc(inode, cur_offset, end, 2249 locked_folio, &cached, 2250 EXTENT_LOCKED | EXTENT_DELALLOC | 2251 EXTENT_DEFRAG | 2252 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2253 PAGE_START_WRITEBACK | 2254 PAGE_END_WRITEBACK); 2255 btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL); 2256 } 2257 btrfs_free_path(path); 2258 return ret; 2259 } 2260 2261 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2262 { 2263 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2264 if (inode->defrag_bytes && 2265 test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG)) 2266 return false; 2267 return true; 2268 } 2269 return false; 2270 } 2271 2272 /* 2273 * Function to process delayed allocation (create CoW) for ranges which are 2274 * being touched for the first time. 2275 */ 2276 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio, 2277 u64 start, u64 end, struct writeback_control *wbc) 2278 { 2279 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2280 int ret; 2281 2282 /* 2283 * The range must cover part of the @locked_folio, or a return of 1 2284 * can confuse the caller. 2285 */ 2286 ASSERT(!(end <= folio_pos(locked_folio) || 2287 start >= folio_pos(locked_folio) + folio_size(locked_folio))); 2288 2289 if (should_nocow(inode, start, end)) { 2290 ret = run_delalloc_nocow(inode, locked_folio, start, end); 2291 goto out; 2292 } 2293 2294 if (btrfs_inode_can_compress(inode) && 2295 inode_need_compress(inode, start, end) && 2296 run_delalloc_compressed(inode, locked_folio, start, end, wbc)) 2297 return 1; 2298 2299 if (zoned) 2300 ret = run_delalloc_cow(inode, locked_folio, start, end, wbc, 2301 true); 2302 else 2303 ret = cow_file_range(inode, locked_folio, start, end, NULL, 2304 false, false); 2305 2306 out: 2307 if (ret < 0) 2308 btrfs_cleanup_ordered_extents(inode, locked_folio, start, 2309 end - start + 1); 2310 return ret; 2311 } 2312 2313 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 2314 struct extent_state *orig, u64 split) 2315 { 2316 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2317 u64 size; 2318 2319 lockdep_assert_held(&inode->io_tree.lock); 2320 2321 /* not delalloc, ignore it */ 2322 if (!(orig->state & EXTENT_DELALLOC)) 2323 return; 2324 2325 size = orig->end - orig->start + 1; 2326 if (size > fs_info->max_extent_size) { 2327 u32 num_extents; 2328 u64 new_size; 2329 2330 /* 2331 * See the explanation in btrfs_merge_delalloc_extent, the same 2332 * applies here, just in reverse. 2333 */ 2334 new_size = orig->end - split + 1; 2335 num_extents = count_max_extents(fs_info, new_size); 2336 new_size = split - orig->start; 2337 num_extents += count_max_extents(fs_info, new_size); 2338 if (count_max_extents(fs_info, size) >= num_extents) 2339 return; 2340 } 2341 2342 spin_lock(&inode->lock); 2343 btrfs_mod_outstanding_extents(inode, 1); 2344 spin_unlock(&inode->lock); 2345 } 2346 2347 /* 2348 * Handle merged delayed allocation extents so we can keep track of new extents 2349 * that are just merged onto old extents, such as when we are doing sequential 2350 * writes, so we can properly account for the metadata space we'll need. 2351 */ 2352 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 2353 struct extent_state *other) 2354 { 2355 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2356 u64 new_size, old_size; 2357 u32 num_extents; 2358 2359 lockdep_assert_held(&inode->io_tree.lock); 2360 2361 /* not delalloc, ignore it */ 2362 if (!(other->state & EXTENT_DELALLOC)) 2363 return; 2364 2365 if (new->start > other->start) 2366 new_size = new->end - other->start + 1; 2367 else 2368 new_size = other->end - new->start + 1; 2369 2370 /* we're not bigger than the max, unreserve the space and go */ 2371 if (new_size <= fs_info->max_extent_size) { 2372 spin_lock(&inode->lock); 2373 btrfs_mod_outstanding_extents(inode, -1); 2374 spin_unlock(&inode->lock); 2375 return; 2376 } 2377 2378 /* 2379 * We have to add up either side to figure out how many extents were 2380 * accounted for before we merged into one big extent. If the number of 2381 * extents we accounted for is <= the amount we need for the new range 2382 * then we can return, otherwise drop. Think of it like this 2383 * 2384 * [ 4k][MAX_SIZE] 2385 * 2386 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2387 * need 2 outstanding extents, on one side we have 1 and the other side 2388 * we have 1 so they are == and we can return. But in this case 2389 * 2390 * [MAX_SIZE+4k][MAX_SIZE+4k] 2391 * 2392 * Each range on their own accounts for 2 extents, but merged together 2393 * they are only 3 extents worth of accounting, so we need to drop in 2394 * this case. 2395 */ 2396 old_size = other->end - other->start + 1; 2397 num_extents = count_max_extents(fs_info, old_size); 2398 old_size = new->end - new->start + 1; 2399 num_extents += count_max_extents(fs_info, old_size); 2400 if (count_max_extents(fs_info, new_size) >= num_extents) 2401 return; 2402 2403 spin_lock(&inode->lock); 2404 btrfs_mod_outstanding_extents(inode, -1); 2405 spin_unlock(&inode->lock); 2406 } 2407 2408 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode) 2409 { 2410 struct btrfs_root *root = inode->root; 2411 struct btrfs_fs_info *fs_info = root->fs_info; 2412 2413 spin_lock(&root->delalloc_lock); 2414 ASSERT(list_empty(&inode->delalloc_inodes)); 2415 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 2416 root->nr_delalloc_inodes++; 2417 if (root->nr_delalloc_inodes == 1) { 2418 spin_lock(&fs_info->delalloc_root_lock); 2419 ASSERT(list_empty(&root->delalloc_root)); 2420 list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots); 2421 spin_unlock(&fs_info->delalloc_root_lock); 2422 } 2423 spin_unlock(&root->delalloc_lock); 2424 } 2425 2426 void btrfs_del_delalloc_inode(struct btrfs_inode *inode) 2427 { 2428 struct btrfs_root *root = inode->root; 2429 struct btrfs_fs_info *fs_info = root->fs_info; 2430 2431 lockdep_assert_held(&root->delalloc_lock); 2432 2433 /* 2434 * We may be called after the inode was already deleted from the list, 2435 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(), 2436 * and then later through btrfs_clear_delalloc_extent() while the inode 2437 * still has ->delalloc_bytes > 0. 2438 */ 2439 if (!list_empty(&inode->delalloc_inodes)) { 2440 list_del_init(&inode->delalloc_inodes); 2441 root->nr_delalloc_inodes--; 2442 if (!root->nr_delalloc_inodes) { 2443 ASSERT(list_empty(&root->delalloc_inodes)); 2444 spin_lock(&fs_info->delalloc_root_lock); 2445 ASSERT(!list_empty(&root->delalloc_root)); 2446 list_del_init(&root->delalloc_root); 2447 spin_unlock(&fs_info->delalloc_root_lock); 2448 } 2449 } 2450 } 2451 2452 /* 2453 * Properly track delayed allocation bytes in the inode and to maintain the 2454 * list of inodes that have pending delalloc work to be done. 2455 */ 2456 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 2457 u32 bits) 2458 { 2459 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2460 2461 lockdep_assert_held(&inode->io_tree.lock); 2462 2463 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC)) 2464 WARN_ON(1); 2465 /* 2466 * set_bit and clear bit hooks normally require _irqsave/restore 2467 * but in this case, we are only testing for the DELALLOC 2468 * bit, which is only set or cleared with irqs on 2469 */ 2470 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2471 u64 len = state->end + 1 - state->start; 2472 u64 prev_delalloc_bytes; 2473 u32 num_extents = count_max_extents(fs_info, len); 2474 2475 spin_lock(&inode->lock); 2476 btrfs_mod_outstanding_extents(inode, num_extents); 2477 spin_unlock(&inode->lock); 2478 2479 /* For sanity tests */ 2480 if (btrfs_is_testing(fs_info)) 2481 return; 2482 2483 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2484 fs_info->delalloc_batch); 2485 spin_lock(&inode->lock); 2486 prev_delalloc_bytes = inode->delalloc_bytes; 2487 inode->delalloc_bytes += len; 2488 if (bits & EXTENT_DEFRAG) 2489 inode->defrag_bytes += len; 2490 spin_unlock(&inode->lock); 2491 2492 /* 2493 * We don't need to be under the protection of the inode's lock, 2494 * because we are called while holding the inode's io_tree lock 2495 * and are therefore protected against concurrent calls of this 2496 * function and btrfs_clear_delalloc_extent(). 2497 */ 2498 if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0) 2499 btrfs_add_delalloc_inode(inode); 2500 } 2501 2502 if (!(state->state & EXTENT_DELALLOC_NEW) && 2503 (bits & EXTENT_DELALLOC_NEW)) { 2504 spin_lock(&inode->lock); 2505 inode->new_delalloc_bytes += state->end + 1 - state->start; 2506 spin_unlock(&inode->lock); 2507 } 2508 } 2509 2510 /* 2511 * Once a range is no longer delalloc this function ensures that proper 2512 * accounting happens. 2513 */ 2514 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 2515 struct extent_state *state, u32 bits) 2516 { 2517 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2518 u64 len = state->end + 1 - state->start; 2519 u32 num_extents = count_max_extents(fs_info, len); 2520 2521 lockdep_assert_held(&inode->io_tree.lock); 2522 2523 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { 2524 spin_lock(&inode->lock); 2525 inode->defrag_bytes -= len; 2526 spin_unlock(&inode->lock); 2527 } 2528 2529 /* 2530 * set_bit and clear bit hooks normally require _irqsave/restore 2531 * but in this case, we are only testing for the DELALLOC 2532 * bit, which is only set or cleared with irqs on 2533 */ 2534 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2535 struct btrfs_root *root = inode->root; 2536 u64 new_delalloc_bytes; 2537 2538 spin_lock(&inode->lock); 2539 btrfs_mod_outstanding_extents(inode, -num_extents); 2540 spin_unlock(&inode->lock); 2541 2542 /* 2543 * We don't reserve metadata space for space cache inodes so we 2544 * don't need to call delalloc_release_metadata if there is an 2545 * error. 2546 */ 2547 if (bits & EXTENT_CLEAR_META_RESV && 2548 root != fs_info->tree_root) 2549 btrfs_delalloc_release_metadata(inode, len, true); 2550 2551 /* For sanity tests. */ 2552 if (btrfs_is_testing(fs_info)) 2553 return; 2554 2555 if (!btrfs_is_data_reloc_root(root) && 2556 !btrfs_is_free_space_inode(inode) && 2557 !(state->state & EXTENT_NORESERVE) && 2558 (bits & EXTENT_CLEAR_DATA_RESV)) 2559 btrfs_free_reserved_data_space_noquota(fs_info, len); 2560 2561 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2562 fs_info->delalloc_batch); 2563 spin_lock(&inode->lock); 2564 inode->delalloc_bytes -= len; 2565 new_delalloc_bytes = inode->delalloc_bytes; 2566 spin_unlock(&inode->lock); 2567 2568 /* 2569 * We don't need to be under the protection of the inode's lock, 2570 * because we are called while holding the inode's io_tree lock 2571 * and are therefore protected against concurrent calls of this 2572 * function and btrfs_set_delalloc_extent(). 2573 */ 2574 if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) { 2575 spin_lock(&root->delalloc_lock); 2576 btrfs_del_delalloc_inode(inode); 2577 spin_unlock(&root->delalloc_lock); 2578 } 2579 } 2580 2581 if ((state->state & EXTENT_DELALLOC_NEW) && 2582 (bits & EXTENT_DELALLOC_NEW)) { 2583 spin_lock(&inode->lock); 2584 ASSERT(inode->new_delalloc_bytes >= len); 2585 inode->new_delalloc_bytes -= len; 2586 if (bits & EXTENT_ADD_INODE_BYTES) 2587 inode_add_bytes(&inode->vfs_inode, len); 2588 spin_unlock(&inode->lock); 2589 } 2590 } 2591 2592 /* 2593 * given a list of ordered sums record them in the inode. This happens 2594 * at IO completion time based on sums calculated at bio submission time. 2595 */ 2596 static int add_pending_csums(struct btrfs_trans_handle *trans, 2597 struct list_head *list) 2598 { 2599 struct btrfs_ordered_sum *sum; 2600 struct btrfs_root *csum_root = NULL; 2601 int ret; 2602 2603 list_for_each_entry(sum, list, list) { 2604 trans->adding_csums = true; 2605 if (!csum_root) 2606 csum_root = btrfs_csum_root(trans->fs_info, 2607 sum->logical); 2608 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2609 trans->adding_csums = false; 2610 if (ret) 2611 return ret; 2612 } 2613 return 0; 2614 } 2615 2616 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2617 const u64 start, 2618 const u64 len, 2619 struct extent_state **cached_state) 2620 { 2621 u64 search_start = start; 2622 const u64 end = start + len - 1; 2623 2624 while (search_start < end) { 2625 const u64 search_len = end - search_start + 1; 2626 struct extent_map *em; 2627 u64 em_len; 2628 int ret = 0; 2629 2630 em = btrfs_get_extent(inode, NULL, search_start, search_len); 2631 if (IS_ERR(em)) 2632 return PTR_ERR(em); 2633 2634 if (em->disk_bytenr != EXTENT_MAP_HOLE) 2635 goto next; 2636 2637 em_len = em->len; 2638 if (em->start < search_start) 2639 em_len -= search_start - em->start; 2640 if (em_len > search_len) 2641 em_len = search_len; 2642 2643 ret = set_extent_bit(&inode->io_tree, search_start, 2644 search_start + em_len - 1, 2645 EXTENT_DELALLOC_NEW, cached_state); 2646 next: 2647 search_start = extent_map_end(em); 2648 free_extent_map(em); 2649 if (ret) 2650 return ret; 2651 } 2652 return 0; 2653 } 2654 2655 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2656 unsigned int extra_bits, 2657 struct extent_state **cached_state) 2658 { 2659 WARN_ON(PAGE_ALIGNED(end)); 2660 2661 if (start >= i_size_read(&inode->vfs_inode) && 2662 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2663 /* 2664 * There can't be any extents following eof in this case so just 2665 * set the delalloc new bit for the range directly. 2666 */ 2667 extra_bits |= EXTENT_DELALLOC_NEW; 2668 } else { 2669 int ret; 2670 2671 ret = btrfs_find_new_delalloc_bytes(inode, start, 2672 end + 1 - start, 2673 cached_state); 2674 if (ret) 2675 return ret; 2676 } 2677 2678 return set_extent_bit(&inode->io_tree, start, end, 2679 EXTENT_DELALLOC | extra_bits, cached_state); 2680 } 2681 2682 /* see btrfs_writepage_start_hook for details on why this is required */ 2683 struct btrfs_writepage_fixup { 2684 struct folio *folio; 2685 struct btrfs_inode *inode; 2686 struct btrfs_work work; 2687 }; 2688 2689 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2690 { 2691 struct btrfs_writepage_fixup *fixup = 2692 container_of(work, struct btrfs_writepage_fixup, work); 2693 struct btrfs_ordered_extent *ordered; 2694 struct extent_state *cached_state = NULL; 2695 struct extent_changeset *data_reserved = NULL; 2696 struct folio *folio = fixup->folio; 2697 struct btrfs_inode *inode = fixup->inode; 2698 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2699 u64 page_start = folio_pos(folio); 2700 u64 page_end = folio_pos(folio) + folio_size(folio) - 1; 2701 int ret = 0; 2702 bool free_delalloc_space = true; 2703 2704 /* 2705 * This is similar to page_mkwrite, we need to reserve the space before 2706 * we take the folio lock. 2707 */ 2708 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2709 folio_size(folio)); 2710 again: 2711 folio_lock(folio); 2712 2713 /* 2714 * Before we queued this fixup, we took a reference on the folio. 2715 * folio->mapping may go NULL, but it shouldn't be moved to a different 2716 * address space. 2717 */ 2718 if (!folio->mapping || !folio_test_dirty(folio) || 2719 !folio_test_checked(folio)) { 2720 /* 2721 * Unfortunately this is a little tricky, either 2722 * 2723 * 1) We got here and our folio had already been dealt with and 2724 * we reserved our space, thus ret == 0, so we need to just 2725 * drop our space reservation and bail. This can happen the 2726 * first time we come into the fixup worker, or could happen 2727 * while waiting for the ordered extent. 2728 * 2) Our folio was already dealt with, but we happened to get an 2729 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2730 * this case we obviously don't have anything to release, but 2731 * because the folio was already dealt with we don't want to 2732 * mark the folio with an error, so make sure we're resetting 2733 * ret to 0. This is why we have this check _before_ the ret 2734 * check, because we do not want to have a surprise ENOSPC 2735 * when the folio was already properly dealt with. 2736 */ 2737 if (!ret) { 2738 btrfs_delalloc_release_extents(inode, folio_size(folio)); 2739 btrfs_delalloc_release_space(inode, data_reserved, 2740 page_start, folio_size(folio), 2741 true); 2742 } 2743 ret = 0; 2744 goto out_page; 2745 } 2746 2747 /* 2748 * We can't mess with the folio state unless it is locked, so now that 2749 * it is locked bail if we failed to make our space reservation. 2750 */ 2751 if (ret) 2752 goto out_page; 2753 2754 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2755 2756 /* already ordered? We're done */ 2757 if (folio_test_ordered(folio)) 2758 goto out_reserved; 2759 2760 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2761 if (ordered) { 2762 unlock_extent(&inode->io_tree, page_start, page_end, 2763 &cached_state); 2764 folio_unlock(folio); 2765 btrfs_start_ordered_extent(ordered); 2766 btrfs_put_ordered_extent(ordered); 2767 goto again; 2768 } 2769 2770 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2771 &cached_state); 2772 if (ret) 2773 goto out_reserved; 2774 2775 /* 2776 * Everything went as planned, we're now the owner of a dirty page with 2777 * delayed allocation bits set and space reserved for our COW 2778 * destination. 2779 * 2780 * The page was dirty when we started, nothing should have cleaned it. 2781 */ 2782 BUG_ON(!folio_test_dirty(folio)); 2783 free_delalloc_space = false; 2784 out_reserved: 2785 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2786 if (free_delalloc_space) 2787 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2788 PAGE_SIZE, true); 2789 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2790 out_page: 2791 if (ret) { 2792 /* 2793 * We hit ENOSPC or other errors. Update the mapping and page 2794 * to reflect the errors and clean the page. 2795 */ 2796 mapping_set_error(folio->mapping, ret); 2797 btrfs_mark_ordered_io_finished(inode, folio, page_start, 2798 folio_size(folio), !ret); 2799 folio_clear_dirty_for_io(folio); 2800 } 2801 btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE); 2802 folio_unlock(folio); 2803 folio_put(folio); 2804 kfree(fixup); 2805 extent_changeset_free(data_reserved); 2806 /* 2807 * As a precaution, do a delayed iput in case it would be the last iput 2808 * that could need flushing space. Recursing back to fixup worker would 2809 * deadlock. 2810 */ 2811 btrfs_add_delayed_iput(inode); 2812 } 2813 2814 /* 2815 * There are a few paths in the higher layers of the kernel that directly 2816 * set the folio dirty bit without asking the filesystem if it is a 2817 * good idea. This causes problems because we want to make sure COW 2818 * properly happens and the data=ordered rules are followed. 2819 * 2820 * In our case any range that doesn't have the ORDERED bit set 2821 * hasn't been properly setup for IO. We kick off an async process 2822 * to fix it up. The async helper will wait for ordered extents, set 2823 * the delalloc bit and make it safe to write the folio. 2824 */ 2825 int btrfs_writepage_cow_fixup(struct folio *folio) 2826 { 2827 struct inode *inode = folio->mapping->host; 2828 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 2829 struct btrfs_writepage_fixup *fixup; 2830 2831 /* This folio has ordered extent covering it already */ 2832 if (folio_test_ordered(folio)) 2833 return 0; 2834 2835 /* 2836 * folio_checked is set below when we create a fixup worker for this 2837 * folio, don't try to create another one if we're already 2838 * folio_test_checked. 2839 * 2840 * The extent_io writepage code will redirty the foio if we send back 2841 * EAGAIN. 2842 */ 2843 if (folio_test_checked(folio)) 2844 return -EAGAIN; 2845 2846 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2847 if (!fixup) 2848 return -EAGAIN; 2849 2850 /* 2851 * We are already holding a reference to this inode from 2852 * write_cache_pages. We need to hold it because the space reservation 2853 * takes place outside of the folio lock, and we can't trust 2854 * page->mapping outside of the folio lock. 2855 */ 2856 ihold(inode); 2857 btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio)); 2858 folio_get(folio); 2859 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL); 2860 fixup->folio = folio; 2861 fixup->inode = BTRFS_I(inode); 2862 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2863 2864 return -EAGAIN; 2865 } 2866 2867 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2868 struct btrfs_inode *inode, u64 file_pos, 2869 struct btrfs_file_extent_item *stack_fi, 2870 const bool update_inode_bytes, 2871 u64 qgroup_reserved) 2872 { 2873 struct btrfs_root *root = inode->root; 2874 const u64 sectorsize = root->fs_info->sectorsize; 2875 struct btrfs_path *path; 2876 struct extent_buffer *leaf; 2877 struct btrfs_key ins; 2878 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 2879 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 2880 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 2881 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 2882 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 2883 struct btrfs_drop_extents_args drop_args = { 0 }; 2884 int ret; 2885 2886 path = btrfs_alloc_path(); 2887 if (!path) 2888 return -ENOMEM; 2889 2890 /* 2891 * we may be replacing one extent in the tree with another. 2892 * The new extent is pinned in the extent map, and we don't want 2893 * to drop it from the cache until it is completely in the btree. 2894 * 2895 * So, tell btrfs_drop_extents to leave this extent in the cache. 2896 * the caller is expected to unpin it and allow it to be merged 2897 * with the others. 2898 */ 2899 drop_args.path = path; 2900 drop_args.start = file_pos; 2901 drop_args.end = file_pos + num_bytes; 2902 drop_args.replace_extent = true; 2903 drop_args.extent_item_size = sizeof(*stack_fi); 2904 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 2905 if (ret) 2906 goto out; 2907 2908 if (!drop_args.extent_inserted) { 2909 ins.objectid = btrfs_ino(inode); 2910 ins.offset = file_pos; 2911 ins.type = BTRFS_EXTENT_DATA_KEY; 2912 2913 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2914 sizeof(*stack_fi)); 2915 if (ret) 2916 goto out; 2917 } 2918 leaf = path->nodes[0]; 2919 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 2920 write_extent_buffer(leaf, stack_fi, 2921 btrfs_item_ptr_offset(leaf, path->slots[0]), 2922 sizeof(struct btrfs_file_extent_item)); 2923 2924 btrfs_mark_buffer_dirty(trans, leaf); 2925 btrfs_release_path(path); 2926 2927 /* 2928 * If we dropped an inline extent here, we know the range where it is 2929 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 2930 * number of bytes only for that range containing the inline extent. 2931 * The remaining of the range will be processed when clearning the 2932 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 2933 */ 2934 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 2935 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 2936 2937 inline_size = drop_args.bytes_found - inline_size; 2938 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 2939 drop_args.bytes_found -= inline_size; 2940 num_bytes -= sectorsize; 2941 } 2942 2943 if (update_inode_bytes) 2944 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 2945 2946 ins.objectid = disk_bytenr; 2947 ins.offset = disk_num_bytes; 2948 ins.type = BTRFS_EXTENT_ITEM_KEY; 2949 2950 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 2951 if (ret) 2952 goto out; 2953 2954 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 2955 file_pos - offset, 2956 qgroup_reserved, &ins); 2957 out: 2958 btrfs_free_path(path); 2959 2960 return ret; 2961 } 2962 2963 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 2964 u64 start, u64 len) 2965 { 2966 struct btrfs_block_group *cache; 2967 2968 cache = btrfs_lookup_block_group(fs_info, start); 2969 ASSERT(cache); 2970 2971 spin_lock(&cache->lock); 2972 cache->delalloc_bytes -= len; 2973 spin_unlock(&cache->lock); 2974 2975 btrfs_put_block_group(cache); 2976 } 2977 2978 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 2979 struct btrfs_ordered_extent *oe) 2980 { 2981 struct btrfs_file_extent_item stack_fi; 2982 bool update_inode_bytes; 2983 u64 num_bytes = oe->num_bytes; 2984 u64 ram_bytes = oe->ram_bytes; 2985 2986 memset(&stack_fi, 0, sizeof(stack_fi)); 2987 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 2988 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 2989 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 2990 oe->disk_num_bytes); 2991 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 2992 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) 2993 num_bytes = oe->truncated_len; 2994 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 2995 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 2996 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 2997 /* Encryption and other encoding is reserved and all 0 */ 2998 2999 /* 3000 * For delalloc, when completing an ordered extent we update the inode's 3001 * bytes when clearing the range in the inode's io tree, so pass false 3002 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 3003 * except if the ordered extent was truncated. 3004 */ 3005 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 3006 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3007 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3008 3009 return insert_reserved_file_extent(trans, oe->inode, 3010 oe->file_offset, &stack_fi, 3011 update_inode_bytes, oe->qgroup_rsv); 3012 } 3013 3014 /* 3015 * As ordered data IO finishes, this gets called so we can finish 3016 * an ordered extent if the range of bytes in the file it covers are 3017 * fully written. 3018 */ 3019 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) 3020 { 3021 struct btrfs_inode *inode = ordered_extent->inode; 3022 struct btrfs_root *root = inode->root; 3023 struct btrfs_fs_info *fs_info = root->fs_info; 3024 struct btrfs_trans_handle *trans = NULL; 3025 struct extent_io_tree *io_tree = &inode->io_tree; 3026 struct extent_state *cached_state = NULL; 3027 u64 start, end; 3028 int compress_type = 0; 3029 int ret = 0; 3030 u64 logical_len = ordered_extent->num_bytes; 3031 bool freespace_inode; 3032 bool truncated = false; 3033 bool clear_reserved_extent = true; 3034 unsigned int clear_bits = EXTENT_DEFRAG; 3035 3036 start = ordered_extent->file_offset; 3037 end = start + ordered_extent->num_bytes - 1; 3038 3039 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3040 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3041 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3042 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3043 clear_bits |= EXTENT_DELALLOC_NEW; 3044 3045 freespace_inode = btrfs_is_free_space_inode(inode); 3046 if (!freespace_inode) 3047 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3048 3049 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3050 ret = -EIO; 3051 goto out; 3052 } 3053 3054 if (btrfs_is_zoned(fs_info)) 3055 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3056 ordered_extent->disk_num_bytes); 3057 3058 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3059 truncated = true; 3060 logical_len = ordered_extent->truncated_len; 3061 /* Truncated the entire extent, don't bother adding */ 3062 if (!logical_len) 3063 goto out; 3064 } 3065 3066 if (freespace_inode) 3067 trans = btrfs_join_transaction_spacecache(root); 3068 else 3069 trans = btrfs_join_transaction(root); 3070 if (IS_ERR(trans)) { 3071 ret = PTR_ERR(trans); 3072 trans = NULL; 3073 goto out; 3074 } 3075 3076 trans->block_rsv = &inode->block_rsv; 3077 3078 ret = btrfs_insert_raid_extent(trans, ordered_extent); 3079 if (ret) { 3080 btrfs_abort_transaction(trans, ret); 3081 goto out; 3082 } 3083 3084 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3085 /* Logic error */ 3086 ASSERT(list_empty(&ordered_extent->list)); 3087 if (!list_empty(&ordered_extent->list)) { 3088 ret = -EINVAL; 3089 btrfs_abort_transaction(trans, ret); 3090 goto out; 3091 } 3092 3093 btrfs_inode_safe_disk_i_size_write(inode, 0); 3094 ret = btrfs_update_inode_fallback(trans, inode); 3095 if (ret) { 3096 /* -ENOMEM or corruption */ 3097 btrfs_abort_transaction(trans, ret); 3098 } 3099 goto out; 3100 } 3101 3102 clear_bits |= EXTENT_LOCKED; 3103 lock_extent(io_tree, start, end, &cached_state); 3104 3105 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3106 compress_type = ordered_extent->compress_type; 3107 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3108 BUG_ON(compress_type); 3109 ret = btrfs_mark_extent_written(trans, inode, 3110 ordered_extent->file_offset, 3111 ordered_extent->file_offset + 3112 logical_len); 3113 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, 3114 ordered_extent->disk_num_bytes); 3115 } else { 3116 BUG_ON(root == fs_info->tree_root); 3117 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3118 if (!ret) { 3119 clear_reserved_extent = false; 3120 btrfs_release_delalloc_bytes(fs_info, 3121 ordered_extent->disk_bytenr, 3122 ordered_extent->disk_num_bytes); 3123 } 3124 } 3125 if (ret < 0) { 3126 btrfs_abort_transaction(trans, ret); 3127 goto out; 3128 } 3129 3130 ret = unpin_extent_cache(inode, ordered_extent->file_offset, 3131 ordered_extent->num_bytes, trans->transid); 3132 if (ret < 0) { 3133 btrfs_abort_transaction(trans, ret); 3134 goto out; 3135 } 3136 3137 ret = add_pending_csums(trans, &ordered_extent->list); 3138 if (ret) { 3139 btrfs_abort_transaction(trans, ret); 3140 goto out; 3141 } 3142 3143 /* 3144 * If this is a new delalloc range, clear its new delalloc flag to 3145 * update the inode's number of bytes. This needs to be done first 3146 * before updating the inode item. 3147 */ 3148 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3149 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3150 clear_extent_bit(&inode->io_tree, start, end, 3151 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3152 &cached_state); 3153 3154 btrfs_inode_safe_disk_i_size_write(inode, 0); 3155 ret = btrfs_update_inode_fallback(trans, inode); 3156 if (ret) { /* -ENOMEM or corruption */ 3157 btrfs_abort_transaction(trans, ret); 3158 goto out; 3159 } 3160 out: 3161 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3162 &cached_state); 3163 3164 if (trans) 3165 btrfs_end_transaction(trans); 3166 3167 if (ret || truncated) { 3168 u64 unwritten_start = start; 3169 3170 /* 3171 * If we failed to finish this ordered extent for any reason we 3172 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3173 * extent, and mark the inode with the error if it wasn't 3174 * already set. Any error during writeback would have already 3175 * set the mapping error, so we need to set it if we're the ones 3176 * marking this ordered extent as failed. 3177 */ 3178 if (ret) 3179 btrfs_mark_ordered_extent_error(ordered_extent); 3180 3181 if (truncated) 3182 unwritten_start += logical_len; 3183 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3184 3185 /* 3186 * Drop extent maps for the part of the extent we didn't write. 3187 * 3188 * We have an exception here for the free_space_inode, this is 3189 * because when we do btrfs_get_extent() on the free space inode 3190 * we will search the commit root. If this is a new block group 3191 * we won't find anything, and we will trip over the assert in 3192 * writepage where we do ASSERT(em->block_start != 3193 * EXTENT_MAP_HOLE). 3194 * 3195 * Theoretically we could also skip this for any NOCOW extent as 3196 * we don't mess with the extent map tree in the NOCOW case, but 3197 * for now simply skip this if we are the free space inode. 3198 */ 3199 if (!btrfs_is_free_space_inode(inode)) 3200 btrfs_drop_extent_map_range(inode, unwritten_start, 3201 end, false); 3202 3203 /* 3204 * If the ordered extent had an IOERR or something else went 3205 * wrong we need to return the space for this ordered extent 3206 * back to the allocator. We only free the extent in the 3207 * truncated case if we didn't write out the extent at all. 3208 * 3209 * If we made it past insert_reserved_file_extent before we 3210 * errored out then we don't need to do this as the accounting 3211 * has already been done. 3212 */ 3213 if ((ret || !logical_len) && 3214 clear_reserved_extent && 3215 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3216 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3217 /* 3218 * Discard the range before returning it back to the 3219 * free space pool 3220 */ 3221 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3222 btrfs_discard_extent(fs_info, 3223 ordered_extent->disk_bytenr, 3224 ordered_extent->disk_num_bytes, 3225 NULL); 3226 btrfs_free_reserved_extent(fs_info, 3227 ordered_extent->disk_bytenr, 3228 ordered_extent->disk_num_bytes, 1); 3229 /* 3230 * Actually free the qgroup rsv which was released when 3231 * the ordered extent was created. 3232 */ 3233 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root), 3234 ordered_extent->qgroup_rsv, 3235 BTRFS_QGROUP_RSV_DATA); 3236 } 3237 } 3238 3239 /* 3240 * This needs to be done to make sure anybody waiting knows we are done 3241 * updating everything for this ordered extent. 3242 */ 3243 btrfs_remove_ordered_extent(inode, ordered_extent); 3244 3245 /* once for us */ 3246 btrfs_put_ordered_extent(ordered_extent); 3247 /* once for the tree */ 3248 btrfs_put_ordered_extent(ordered_extent); 3249 3250 return ret; 3251 } 3252 3253 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered) 3254 { 3255 if (btrfs_is_zoned(ordered->inode->root->fs_info) && 3256 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) && 3257 list_empty(&ordered->bioc_list)) 3258 btrfs_finish_ordered_zoned(ordered); 3259 return btrfs_finish_one_ordered(ordered); 3260 } 3261 3262 /* 3263 * Verify the checksum for a single sector without any extra action that depend 3264 * on the type of I/O. 3265 */ 3266 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, 3267 u32 pgoff, u8 *csum, const u8 * const csum_expected) 3268 { 3269 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3270 char *kaddr; 3271 3272 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); 3273 3274 shash->tfm = fs_info->csum_shash; 3275 3276 kaddr = kmap_local_page(page) + pgoff; 3277 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 3278 kunmap_local(kaddr); 3279 3280 if (memcmp(csum, csum_expected, fs_info->csum_size)) 3281 return -EIO; 3282 return 0; 3283 } 3284 3285 /* 3286 * Verify the checksum of a single data sector. 3287 * 3288 * @bbio: btrfs_io_bio which contains the csum 3289 * @dev: device the sector is on 3290 * @bio_offset: offset to the beginning of the bio (in bytes) 3291 * @bv: bio_vec to check 3292 * 3293 * Check if the checksum on a data block is valid. When a checksum mismatch is 3294 * detected, report the error and fill the corrupted range with zero. 3295 * 3296 * Return %true if the sector is ok or had no checksum to start with, else %false. 3297 */ 3298 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, 3299 u32 bio_offset, struct bio_vec *bv) 3300 { 3301 struct btrfs_inode *inode = bbio->inode; 3302 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3303 u64 file_offset = bbio->file_offset + bio_offset; 3304 u64 end = file_offset + bv->bv_len - 1; 3305 u8 *csum_expected; 3306 u8 csum[BTRFS_CSUM_SIZE]; 3307 3308 ASSERT(bv->bv_len == fs_info->sectorsize); 3309 3310 if (!bbio->csum) 3311 return true; 3312 3313 if (btrfs_is_data_reloc_root(inode->root) && 3314 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, 3315 NULL)) { 3316 /* Skip the range without csum for data reloc inode */ 3317 clear_extent_bits(&inode->io_tree, file_offset, end, 3318 EXTENT_NODATASUM); 3319 return true; 3320 } 3321 3322 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * 3323 fs_info->csum_size; 3324 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, 3325 csum_expected)) 3326 goto zeroit; 3327 return true; 3328 3329 zeroit: 3330 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected, 3331 bbio->mirror_num); 3332 if (dev) 3333 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 3334 memzero_bvec(bv); 3335 return false; 3336 } 3337 3338 /* 3339 * Perform a delayed iput on @inode. 3340 * 3341 * @inode: The inode we want to perform iput on 3342 * 3343 * This function uses the generic vfs_inode::i_count to track whether we should 3344 * just decrement it (in case it's > 1) or if this is the last iput then link 3345 * the inode to the delayed iput machinery. Delayed iputs are processed at 3346 * transaction commit time/superblock commit/cleaner kthread. 3347 */ 3348 void btrfs_add_delayed_iput(struct btrfs_inode *inode) 3349 { 3350 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3351 unsigned long flags; 3352 3353 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) 3354 return; 3355 3356 atomic_inc(&fs_info->nr_delayed_iputs); 3357 /* 3358 * Need to be irq safe here because we can be called from either an irq 3359 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq 3360 * context. 3361 */ 3362 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags); 3363 ASSERT(list_empty(&inode->delayed_iput)); 3364 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); 3365 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags); 3366 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3367 wake_up_process(fs_info->cleaner_kthread); 3368 } 3369 3370 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3371 struct btrfs_inode *inode) 3372 { 3373 list_del_init(&inode->delayed_iput); 3374 spin_unlock_irq(&fs_info->delayed_iput_lock); 3375 iput(&inode->vfs_inode); 3376 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3377 wake_up(&fs_info->delayed_iputs_wait); 3378 spin_lock_irq(&fs_info->delayed_iput_lock); 3379 } 3380 3381 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3382 struct btrfs_inode *inode) 3383 { 3384 if (!list_empty(&inode->delayed_iput)) { 3385 spin_lock_irq(&fs_info->delayed_iput_lock); 3386 if (!list_empty(&inode->delayed_iput)) 3387 run_delayed_iput_locked(fs_info, inode); 3388 spin_unlock_irq(&fs_info->delayed_iput_lock); 3389 } 3390 } 3391 3392 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3393 { 3394 /* 3395 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which 3396 * calls btrfs_add_delayed_iput() and that needs to lock 3397 * fs_info->delayed_iput_lock. So we need to disable irqs here to 3398 * prevent a deadlock. 3399 */ 3400 spin_lock_irq(&fs_info->delayed_iput_lock); 3401 while (!list_empty(&fs_info->delayed_iputs)) { 3402 struct btrfs_inode *inode; 3403 3404 inode = list_first_entry(&fs_info->delayed_iputs, 3405 struct btrfs_inode, delayed_iput); 3406 run_delayed_iput_locked(fs_info, inode); 3407 if (need_resched()) { 3408 spin_unlock_irq(&fs_info->delayed_iput_lock); 3409 cond_resched(); 3410 spin_lock_irq(&fs_info->delayed_iput_lock); 3411 } 3412 } 3413 spin_unlock_irq(&fs_info->delayed_iput_lock); 3414 } 3415 3416 /* 3417 * Wait for flushing all delayed iputs 3418 * 3419 * @fs_info: the filesystem 3420 * 3421 * This will wait on any delayed iputs that are currently running with KILLABLE 3422 * set. Once they are all done running we will return, unless we are killed in 3423 * which case we return EINTR. This helps in user operations like fallocate etc 3424 * that might get blocked on the iputs. 3425 * 3426 * Return EINTR if we were killed, 0 if nothing's pending 3427 */ 3428 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3429 { 3430 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3431 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3432 if (ret) 3433 return -EINTR; 3434 return 0; 3435 } 3436 3437 /* 3438 * This creates an orphan entry for the given inode in case something goes wrong 3439 * in the middle of an unlink. 3440 */ 3441 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3442 struct btrfs_inode *inode) 3443 { 3444 int ret; 3445 3446 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3447 if (ret && ret != -EEXIST) { 3448 btrfs_abort_transaction(trans, ret); 3449 return ret; 3450 } 3451 3452 return 0; 3453 } 3454 3455 /* 3456 * We have done the delete so we can go ahead and remove the orphan item for 3457 * this particular inode. 3458 */ 3459 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3460 struct btrfs_inode *inode) 3461 { 3462 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3463 } 3464 3465 /* 3466 * this cleans up any orphans that may be left on the list from the last use 3467 * of this root. 3468 */ 3469 int btrfs_orphan_cleanup(struct btrfs_root *root) 3470 { 3471 struct btrfs_fs_info *fs_info = root->fs_info; 3472 struct btrfs_path *path; 3473 struct extent_buffer *leaf; 3474 struct btrfs_key key, found_key; 3475 struct btrfs_trans_handle *trans; 3476 struct inode *inode; 3477 u64 last_objectid = 0; 3478 int ret = 0, nr_unlink = 0; 3479 3480 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3481 return 0; 3482 3483 path = btrfs_alloc_path(); 3484 if (!path) { 3485 ret = -ENOMEM; 3486 goto out; 3487 } 3488 path->reada = READA_BACK; 3489 3490 key.objectid = BTRFS_ORPHAN_OBJECTID; 3491 key.type = BTRFS_ORPHAN_ITEM_KEY; 3492 key.offset = (u64)-1; 3493 3494 while (1) { 3495 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3496 if (ret < 0) 3497 goto out; 3498 3499 /* 3500 * if ret == 0 means we found what we were searching for, which 3501 * is weird, but possible, so only screw with path if we didn't 3502 * find the key and see if we have stuff that matches 3503 */ 3504 if (ret > 0) { 3505 ret = 0; 3506 if (path->slots[0] == 0) 3507 break; 3508 path->slots[0]--; 3509 } 3510 3511 /* pull out the item */ 3512 leaf = path->nodes[0]; 3513 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3514 3515 /* make sure the item matches what we want */ 3516 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3517 break; 3518 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3519 break; 3520 3521 /* release the path since we're done with it */ 3522 btrfs_release_path(path); 3523 3524 /* 3525 * this is where we are basically btrfs_lookup, without the 3526 * crossing root thing. we store the inode number in the 3527 * offset of the orphan item. 3528 */ 3529 3530 if (found_key.offset == last_objectid) { 3531 /* 3532 * We found the same inode as before. This means we were 3533 * not able to remove its items via eviction triggered 3534 * by an iput(). A transaction abort may have happened, 3535 * due to -ENOSPC for example, so try to grab the error 3536 * that lead to a transaction abort, if any. 3537 */ 3538 btrfs_err(fs_info, 3539 "Error removing orphan entry, stopping orphan cleanup"); 3540 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL; 3541 goto out; 3542 } 3543 3544 last_objectid = found_key.offset; 3545 3546 found_key.objectid = found_key.offset; 3547 found_key.type = BTRFS_INODE_ITEM_KEY; 3548 found_key.offset = 0; 3549 inode = btrfs_iget(last_objectid, root); 3550 if (IS_ERR(inode)) { 3551 ret = PTR_ERR(inode); 3552 inode = NULL; 3553 if (ret != -ENOENT) 3554 goto out; 3555 } 3556 3557 if (!inode && root == fs_info->tree_root) { 3558 struct btrfs_root *dead_root; 3559 int is_dead_root = 0; 3560 3561 /* 3562 * This is an orphan in the tree root. Currently these 3563 * could come from 2 sources: 3564 * a) a root (snapshot/subvolume) deletion in progress 3565 * b) a free space cache inode 3566 * We need to distinguish those two, as the orphan item 3567 * for a root must not get deleted before the deletion 3568 * of the snapshot/subvolume's tree completes. 3569 * 3570 * btrfs_find_orphan_roots() ran before us, which has 3571 * found all deleted roots and loaded them into 3572 * fs_info->fs_roots_radix. So here we can find if an 3573 * orphan item corresponds to a deleted root by looking 3574 * up the root from that radix tree. 3575 */ 3576 3577 spin_lock(&fs_info->fs_roots_radix_lock); 3578 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3579 (unsigned long)found_key.objectid); 3580 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3581 is_dead_root = 1; 3582 spin_unlock(&fs_info->fs_roots_radix_lock); 3583 3584 if (is_dead_root) { 3585 /* prevent this orphan from being found again */ 3586 key.offset = found_key.objectid - 1; 3587 continue; 3588 } 3589 3590 } 3591 3592 /* 3593 * If we have an inode with links, there are a couple of 3594 * possibilities: 3595 * 3596 * 1. We were halfway through creating fsverity metadata for the 3597 * file. In that case, the orphan item represents incomplete 3598 * fsverity metadata which must be cleaned up with 3599 * btrfs_drop_verity_items and deleting the orphan item. 3600 3601 * 2. Old kernels (before v3.12) used to create an 3602 * orphan item for truncate indicating that there were possibly 3603 * extent items past i_size that needed to be deleted. In v3.12, 3604 * truncate was changed to update i_size in sync with the extent 3605 * items, but the (useless) orphan item was still created. Since 3606 * v4.18, we don't create the orphan item for truncate at all. 3607 * 3608 * So, this item could mean that we need to do a truncate, but 3609 * only if this filesystem was last used on a pre-v3.12 kernel 3610 * and was not cleanly unmounted. The odds of that are quite 3611 * slim, and it's a pain to do the truncate now, so just delete 3612 * the orphan item. 3613 * 3614 * It's also possible that this orphan item was supposed to be 3615 * deleted but wasn't. The inode number may have been reused, 3616 * but either way, we can delete the orphan item. 3617 */ 3618 if (!inode || inode->i_nlink) { 3619 if (inode) { 3620 ret = btrfs_drop_verity_items(BTRFS_I(inode)); 3621 iput(inode); 3622 inode = NULL; 3623 if (ret) 3624 goto out; 3625 } 3626 trans = btrfs_start_transaction(root, 1); 3627 if (IS_ERR(trans)) { 3628 ret = PTR_ERR(trans); 3629 goto out; 3630 } 3631 btrfs_debug(fs_info, "auto deleting %Lu", 3632 found_key.objectid); 3633 ret = btrfs_del_orphan_item(trans, root, 3634 found_key.objectid); 3635 btrfs_end_transaction(trans); 3636 if (ret) 3637 goto out; 3638 continue; 3639 } 3640 3641 nr_unlink++; 3642 3643 /* this will do delete_inode and everything for us */ 3644 iput(inode); 3645 } 3646 /* release the path since we're done with it */ 3647 btrfs_release_path(path); 3648 3649 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3650 trans = btrfs_join_transaction(root); 3651 if (!IS_ERR(trans)) 3652 btrfs_end_transaction(trans); 3653 } 3654 3655 if (nr_unlink) 3656 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3657 3658 out: 3659 if (ret) 3660 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3661 btrfs_free_path(path); 3662 return ret; 3663 } 3664 3665 /* 3666 * very simple check to peek ahead in the leaf looking for xattrs. If we 3667 * don't find any xattrs, we know there can't be any acls. 3668 * 3669 * slot is the slot the inode is in, objectid is the objectid of the inode 3670 */ 3671 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3672 int slot, u64 objectid, 3673 int *first_xattr_slot) 3674 { 3675 u32 nritems = btrfs_header_nritems(leaf); 3676 struct btrfs_key found_key; 3677 static u64 xattr_access = 0; 3678 static u64 xattr_default = 0; 3679 int scanned = 0; 3680 3681 if (!xattr_access) { 3682 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3683 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3684 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3685 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3686 } 3687 3688 slot++; 3689 *first_xattr_slot = -1; 3690 while (slot < nritems) { 3691 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3692 3693 /* we found a different objectid, there must not be acls */ 3694 if (found_key.objectid != objectid) 3695 return 0; 3696 3697 /* we found an xattr, assume we've got an acl */ 3698 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3699 if (*first_xattr_slot == -1) 3700 *first_xattr_slot = slot; 3701 if (found_key.offset == xattr_access || 3702 found_key.offset == xattr_default) 3703 return 1; 3704 } 3705 3706 /* 3707 * we found a key greater than an xattr key, there can't 3708 * be any acls later on 3709 */ 3710 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3711 return 0; 3712 3713 slot++; 3714 scanned++; 3715 3716 /* 3717 * it goes inode, inode backrefs, xattrs, extents, 3718 * so if there are a ton of hard links to an inode there can 3719 * be a lot of backrefs. Don't waste time searching too hard, 3720 * this is just an optimization 3721 */ 3722 if (scanned >= 8) 3723 break; 3724 } 3725 /* we hit the end of the leaf before we found an xattr or 3726 * something larger than an xattr. We have to assume the inode 3727 * has acls 3728 */ 3729 if (*first_xattr_slot == -1) 3730 *first_xattr_slot = slot; 3731 return 1; 3732 } 3733 3734 static int btrfs_init_file_extent_tree(struct btrfs_inode *inode) 3735 { 3736 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3737 3738 if (WARN_ON_ONCE(inode->file_extent_tree)) 3739 return 0; 3740 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 3741 return 0; 3742 if (!S_ISREG(inode->vfs_inode.i_mode)) 3743 return 0; 3744 if (btrfs_is_free_space_inode(inode)) 3745 return 0; 3746 3747 inode->file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL); 3748 if (!inode->file_extent_tree) 3749 return -ENOMEM; 3750 3751 extent_io_tree_init(fs_info, inode->file_extent_tree, IO_TREE_INODE_FILE_EXTENT); 3752 /* Lockdep class is set only for the file extent tree. */ 3753 lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class); 3754 3755 return 0; 3756 } 3757 3758 static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc) 3759 { 3760 struct btrfs_root *root = inode->root; 3761 struct btrfs_inode *existing; 3762 const u64 ino = btrfs_ino(inode); 3763 int ret; 3764 3765 if (inode_unhashed(&inode->vfs_inode)) 3766 return 0; 3767 3768 if (prealloc) { 3769 ret = xa_reserve(&root->inodes, ino, GFP_NOFS); 3770 if (ret) 3771 return ret; 3772 } 3773 3774 existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC); 3775 3776 if (xa_is_err(existing)) { 3777 ret = xa_err(existing); 3778 ASSERT(ret != -EINVAL); 3779 ASSERT(ret != -ENOMEM); 3780 return ret; 3781 } else if (existing) { 3782 WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING))); 3783 } 3784 3785 return 0; 3786 } 3787 3788 /* 3789 * Read a locked inode from the btree into the in-memory inode and add it to 3790 * its root list/tree. 3791 * 3792 * On failure clean up the inode. 3793 */ 3794 static int btrfs_read_locked_inode(struct inode *inode, struct btrfs_path *path) 3795 { 3796 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 3797 struct extent_buffer *leaf; 3798 struct btrfs_inode_item *inode_item; 3799 struct btrfs_root *root = BTRFS_I(inode)->root; 3800 struct btrfs_key location; 3801 unsigned long ptr; 3802 int maybe_acls; 3803 u32 rdev; 3804 int ret; 3805 bool filled = false; 3806 int first_xattr_slot; 3807 3808 ret = btrfs_init_file_extent_tree(BTRFS_I(inode)); 3809 if (ret) 3810 goto out; 3811 3812 ret = btrfs_fill_inode(inode, &rdev); 3813 if (!ret) 3814 filled = true; 3815 3816 ASSERT(path); 3817 3818 btrfs_get_inode_key(BTRFS_I(inode), &location); 3819 3820 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3821 if (ret) { 3822 /* 3823 * ret > 0 can come from btrfs_search_slot called by 3824 * btrfs_lookup_inode(), this means the inode was not found. 3825 */ 3826 if (ret > 0) 3827 ret = -ENOENT; 3828 goto out; 3829 } 3830 3831 leaf = path->nodes[0]; 3832 3833 if (filled) 3834 goto cache_index; 3835 3836 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3837 struct btrfs_inode_item); 3838 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3839 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3840 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3841 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3842 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3843 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 3844 round_up(i_size_read(inode), fs_info->sectorsize)); 3845 3846 inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime), 3847 btrfs_timespec_nsec(leaf, &inode_item->atime)); 3848 3849 inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime), 3850 btrfs_timespec_nsec(leaf, &inode_item->mtime)); 3851 3852 inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime), 3853 btrfs_timespec_nsec(leaf, &inode_item->ctime)); 3854 3855 BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime); 3856 BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime); 3857 3858 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3859 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3860 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3861 3862 inode_set_iversion_queried(inode, 3863 btrfs_inode_sequence(leaf, inode_item)); 3864 inode->i_generation = BTRFS_I(inode)->generation; 3865 inode->i_rdev = 0; 3866 rdev = btrfs_inode_rdev(leaf, inode_item); 3867 3868 if (S_ISDIR(inode->i_mode)) 3869 BTRFS_I(inode)->index_cnt = (u64)-1; 3870 3871 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 3872 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 3873 3874 cache_index: 3875 /* 3876 * If we were modified in the current generation and evicted from memory 3877 * and then re-read we need to do a full sync since we don't have any 3878 * idea about which extents were modified before we were evicted from 3879 * cache. 3880 * 3881 * This is required for both inode re-read from disk and delayed inode 3882 * in the delayed_nodes xarray. 3883 */ 3884 if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info)) 3885 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3886 &BTRFS_I(inode)->runtime_flags); 3887 3888 /* 3889 * We don't persist the id of the transaction where an unlink operation 3890 * against the inode was last made. So here we assume the inode might 3891 * have been evicted, and therefore the exact value of last_unlink_trans 3892 * lost, and set it to last_trans to avoid metadata inconsistencies 3893 * between the inode and its parent if the inode is fsync'ed and the log 3894 * replayed. For example, in the scenario: 3895 * 3896 * touch mydir/foo 3897 * ln mydir/foo mydir/bar 3898 * sync 3899 * unlink mydir/bar 3900 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3901 * xfs_io -c fsync mydir/foo 3902 * <power failure> 3903 * mount fs, triggers fsync log replay 3904 * 3905 * We must make sure that when we fsync our inode foo we also log its 3906 * parent inode, otherwise after log replay the parent still has the 3907 * dentry with the "bar" name but our inode foo has a link count of 1 3908 * and doesn't have an inode ref with the name "bar" anymore. 3909 * 3910 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3911 * but it guarantees correctness at the expense of occasional full 3912 * transaction commits on fsync if our inode is a directory, or if our 3913 * inode is not a directory, logging its parent unnecessarily. 3914 */ 3915 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3916 3917 /* 3918 * Same logic as for last_unlink_trans. We don't persist the generation 3919 * of the last transaction where this inode was used for a reflink 3920 * operation, so after eviction and reloading the inode we must be 3921 * pessimistic and assume the last transaction that modified the inode. 3922 */ 3923 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 3924 3925 path->slots[0]++; 3926 if (inode->i_nlink != 1 || 3927 path->slots[0] >= btrfs_header_nritems(leaf)) 3928 goto cache_acl; 3929 3930 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3931 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 3932 goto cache_acl; 3933 3934 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3935 if (location.type == BTRFS_INODE_REF_KEY) { 3936 struct btrfs_inode_ref *ref; 3937 3938 ref = (struct btrfs_inode_ref *)ptr; 3939 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3940 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3941 struct btrfs_inode_extref *extref; 3942 3943 extref = (struct btrfs_inode_extref *)ptr; 3944 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3945 extref); 3946 } 3947 cache_acl: 3948 /* 3949 * try to precache a NULL acl entry for files that don't have 3950 * any xattrs or acls 3951 */ 3952 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3953 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 3954 if (first_xattr_slot != -1) { 3955 path->slots[0] = first_xattr_slot; 3956 ret = btrfs_load_inode_props(inode, path); 3957 if (ret) 3958 btrfs_err(fs_info, 3959 "error loading props for ino %llu (root %llu): %d", 3960 btrfs_ino(BTRFS_I(inode)), 3961 btrfs_root_id(root), ret); 3962 } 3963 3964 if (!maybe_acls) 3965 cache_no_acl(inode); 3966 3967 switch (inode->i_mode & S_IFMT) { 3968 case S_IFREG: 3969 inode->i_mapping->a_ops = &btrfs_aops; 3970 inode->i_fop = &btrfs_file_operations; 3971 inode->i_op = &btrfs_file_inode_operations; 3972 break; 3973 case S_IFDIR: 3974 inode->i_fop = &btrfs_dir_file_operations; 3975 inode->i_op = &btrfs_dir_inode_operations; 3976 break; 3977 case S_IFLNK: 3978 inode->i_op = &btrfs_symlink_inode_operations; 3979 inode_nohighmem(inode); 3980 inode->i_mapping->a_ops = &btrfs_aops; 3981 break; 3982 default: 3983 inode->i_op = &btrfs_special_inode_operations; 3984 init_special_inode(inode, inode->i_mode, rdev); 3985 break; 3986 } 3987 3988 btrfs_sync_inode_flags_to_i_flags(inode); 3989 3990 ret = btrfs_add_inode_to_root(BTRFS_I(inode), true); 3991 if (ret) 3992 goto out; 3993 3994 return 0; 3995 out: 3996 iget_failed(inode); 3997 return ret; 3998 } 3999 4000 /* 4001 * given a leaf and an inode, copy the inode fields into the leaf 4002 */ 4003 static void fill_inode_item(struct btrfs_trans_handle *trans, 4004 struct extent_buffer *leaf, 4005 struct btrfs_inode_item *item, 4006 struct inode *inode) 4007 { 4008 struct btrfs_map_token token; 4009 u64 flags; 4010 4011 btrfs_init_map_token(&token, leaf); 4012 4013 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 4014 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 4015 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 4016 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 4017 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 4018 4019 btrfs_set_token_timespec_sec(&token, &item->atime, 4020 inode_get_atime_sec(inode)); 4021 btrfs_set_token_timespec_nsec(&token, &item->atime, 4022 inode_get_atime_nsec(inode)); 4023 4024 btrfs_set_token_timespec_sec(&token, &item->mtime, 4025 inode_get_mtime_sec(inode)); 4026 btrfs_set_token_timespec_nsec(&token, &item->mtime, 4027 inode_get_mtime_nsec(inode)); 4028 4029 btrfs_set_token_timespec_sec(&token, &item->ctime, 4030 inode_get_ctime_sec(inode)); 4031 btrfs_set_token_timespec_nsec(&token, &item->ctime, 4032 inode_get_ctime_nsec(inode)); 4033 4034 btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec); 4035 btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec); 4036 4037 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 4038 btrfs_set_token_inode_generation(&token, item, 4039 BTRFS_I(inode)->generation); 4040 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 4041 btrfs_set_token_inode_transid(&token, item, trans->transid); 4042 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 4043 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 4044 BTRFS_I(inode)->ro_flags); 4045 btrfs_set_token_inode_flags(&token, item, flags); 4046 btrfs_set_token_inode_block_group(&token, item, 0); 4047 } 4048 4049 /* 4050 * copy everything in the in-memory inode into the btree. 4051 */ 4052 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 4053 struct btrfs_inode *inode) 4054 { 4055 struct btrfs_inode_item *inode_item; 4056 struct btrfs_path *path; 4057 struct extent_buffer *leaf; 4058 struct btrfs_key key; 4059 int ret; 4060 4061 path = btrfs_alloc_path(); 4062 if (!path) 4063 return -ENOMEM; 4064 4065 btrfs_get_inode_key(inode, &key); 4066 ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1); 4067 if (ret) { 4068 if (ret > 0) 4069 ret = -ENOENT; 4070 goto failed; 4071 } 4072 4073 leaf = path->nodes[0]; 4074 inode_item = btrfs_item_ptr(leaf, path->slots[0], 4075 struct btrfs_inode_item); 4076 4077 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 4078 btrfs_mark_buffer_dirty(trans, leaf); 4079 btrfs_set_inode_last_trans(trans, inode); 4080 ret = 0; 4081 failed: 4082 btrfs_free_path(path); 4083 return ret; 4084 } 4085 4086 /* 4087 * copy everything in the in-memory inode into the btree. 4088 */ 4089 int btrfs_update_inode(struct btrfs_trans_handle *trans, 4090 struct btrfs_inode *inode) 4091 { 4092 struct btrfs_root *root = inode->root; 4093 struct btrfs_fs_info *fs_info = root->fs_info; 4094 int ret; 4095 4096 /* 4097 * If the inode is a free space inode, we can deadlock during commit 4098 * if we put it into the delayed code. 4099 * 4100 * The data relocation inode should also be directly updated 4101 * without delay 4102 */ 4103 if (!btrfs_is_free_space_inode(inode) 4104 && !btrfs_is_data_reloc_root(root) 4105 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4106 btrfs_update_root_times(trans, root); 4107 4108 ret = btrfs_delayed_update_inode(trans, inode); 4109 if (!ret) 4110 btrfs_set_inode_last_trans(trans, inode); 4111 return ret; 4112 } 4113 4114 return btrfs_update_inode_item(trans, inode); 4115 } 4116 4117 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4118 struct btrfs_inode *inode) 4119 { 4120 int ret; 4121 4122 ret = btrfs_update_inode(trans, inode); 4123 if (ret == -ENOSPC) 4124 return btrfs_update_inode_item(trans, inode); 4125 return ret; 4126 } 4127 4128 /* 4129 * unlink helper that gets used here in inode.c and in the tree logging 4130 * recovery code. It remove a link in a directory with a given name, and 4131 * also drops the back refs in the inode to the directory 4132 */ 4133 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4134 struct btrfs_inode *dir, 4135 struct btrfs_inode *inode, 4136 const struct fscrypt_str *name, 4137 struct btrfs_rename_ctx *rename_ctx) 4138 { 4139 struct btrfs_root *root = dir->root; 4140 struct btrfs_fs_info *fs_info = root->fs_info; 4141 struct btrfs_path *path; 4142 int ret = 0; 4143 struct btrfs_dir_item *di; 4144 u64 index; 4145 u64 ino = btrfs_ino(inode); 4146 u64 dir_ino = btrfs_ino(dir); 4147 4148 path = btrfs_alloc_path(); 4149 if (!path) { 4150 ret = -ENOMEM; 4151 goto out; 4152 } 4153 4154 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); 4155 if (IS_ERR_OR_NULL(di)) { 4156 ret = di ? PTR_ERR(di) : -ENOENT; 4157 goto err; 4158 } 4159 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4160 if (ret) 4161 goto err; 4162 btrfs_release_path(path); 4163 4164 /* 4165 * If we don't have dir index, we have to get it by looking up 4166 * the inode ref, since we get the inode ref, remove it directly, 4167 * it is unnecessary to do delayed deletion. 4168 * 4169 * But if we have dir index, needn't search inode ref to get it. 4170 * Since the inode ref is close to the inode item, it is better 4171 * that we delay to delete it, and just do this deletion when 4172 * we update the inode item. 4173 */ 4174 if (inode->dir_index) { 4175 ret = btrfs_delayed_delete_inode_ref(inode); 4176 if (!ret) { 4177 index = inode->dir_index; 4178 goto skip_backref; 4179 } 4180 } 4181 4182 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); 4183 if (ret) { 4184 btrfs_info(fs_info, 4185 "failed to delete reference to %.*s, inode %llu parent %llu", 4186 name->len, name->name, ino, dir_ino); 4187 btrfs_abort_transaction(trans, ret); 4188 goto err; 4189 } 4190 skip_backref: 4191 if (rename_ctx) 4192 rename_ctx->index = index; 4193 4194 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4195 if (ret) { 4196 btrfs_abort_transaction(trans, ret); 4197 goto err; 4198 } 4199 4200 /* 4201 * If we are in a rename context, we don't need to update anything in the 4202 * log. That will be done later during the rename by btrfs_log_new_name(). 4203 * Besides that, doing it here would only cause extra unnecessary btree 4204 * operations on the log tree, increasing latency for applications. 4205 */ 4206 if (!rename_ctx) { 4207 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino); 4208 btrfs_del_dir_entries_in_log(trans, root, name, dir, index); 4209 } 4210 4211 /* 4212 * If we have a pending delayed iput we could end up with the final iput 4213 * being run in btrfs-cleaner context. If we have enough of these built 4214 * up we can end up burning a lot of time in btrfs-cleaner without any 4215 * way to throttle the unlinks. Since we're currently holding a ref on 4216 * the inode we can run the delayed iput here without any issues as the 4217 * final iput won't be done until after we drop the ref we're currently 4218 * holding. 4219 */ 4220 btrfs_run_delayed_iput(fs_info, inode); 4221 err: 4222 btrfs_free_path(path); 4223 if (ret) 4224 goto out; 4225 4226 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4227 inode_inc_iversion(&inode->vfs_inode); 4228 inode_set_ctime_current(&inode->vfs_inode); 4229 inode_inc_iversion(&dir->vfs_inode); 4230 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); 4231 ret = btrfs_update_inode(trans, dir); 4232 out: 4233 return ret; 4234 } 4235 4236 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4237 struct btrfs_inode *dir, struct btrfs_inode *inode, 4238 const struct fscrypt_str *name) 4239 { 4240 int ret; 4241 4242 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); 4243 if (!ret) { 4244 drop_nlink(&inode->vfs_inode); 4245 ret = btrfs_update_inode(trans, inode); 4246 } 4247 return ret; 4248 } 4249 4250 /* 4251 * helper to start transaction for unlink and rmdir. 4252 * 4253 * unlink and rmdir are special in btrfs, they do not always free space, so 4254 * if we cannot make our reservations the normal way try and see if there is 4255 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4256 * allow the unlink to occur. 4257 */ 4258 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) 4259 { 4260 struct btrfs_root *root = dir->root; 4261 4262 return btrfs_start_transaction_fallback_global_rsv(root, 4263 BTRFS_UNLINK_METADATA_UNITS); 4264 } 4265 4266 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4267 { 4268 struct btrfs_trans_handle *trans; 4269 struct inode *inode = d_inode(dentry); 4270 int ret; 4271 struct fscrypt_name fname; 4272 4273 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4274 if (ret) 4275 return ret; 4276 4277 /* This needs to handle no-key deletions later on */ 4278 4279 trans = __unlink_start_trans(BTRFS_I(dir)); 4280 if (IS_ERR(trans)) { 4281 ret = PTR_ERR(trans); 4282 goto fscrypt_free; 4283 } 4284 4285 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4286 false); 4287 4288 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4289 &fname.disk_name); 4290 if (ret) 4291 goto end_trans; 4292 4293 if (inode->i_nlink == 0) { 4294 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4295 if (ret) 4296 goto end_trans; 4297 } 4298 4299 end_trans: 4300 btrfs_end_transaction(trans); 4301 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4302 fscrypt_free: 4303 fscrypt_free_filename(&fname); 4304 return ret; 4305 } 4306 4307 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4308 struct btrfs_inode *dir, struct dentry *dentry) 4309 { 4310 struct btrfs_root *root = dir->root; 4311 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4312 struct btrfs_path *path; 4313 struct extent_buffer *leaf; 4314 struct btrfs_dir_item *di; 4315 struct btrfs_key key; 4316 u64 index; 4317 int ret; 4318 u64 objectid; 4319 u64 dir_ino = btrfs_ino(dir); 4320 struct fscrypt_name fname; 4321 4322 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 4323 if (ret) 4324 return ret; 4325 4326 /* This needs to handle no-key deletions later on */ 4327 4328 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4329 objectid = btrfs_root_id(inode->root); 4330 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4331 objectid = inode->ref_root_id; 4332 } else { 4333 WARN_ON(1); 4334 fscrypt_free_filename(&fname); 4335 return -EINVAL; 4336 } 4337 4338 path = btrfs_alloc_path(); 4339 if (!path) { 4340 ret = -ENOMEM; 4341 goto out; 4342 } 4343 4344 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4345 &fname.disk_name, -1); 4346 if (IS_ERR_OR_NULL(di)) { 4347 ret = di ? PTR_ERR(di) : -ENOENT; 4348 goto out; 4349 } 4350 4351 leaf = path->nodes[0]; 4352 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4353 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4354 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4355 if (ret) { 4356 btrfs_abort_transaction(trans, ret); 4357 goto out; 4358 } 4359 btrfs_release_path(path); 4360 4361 /* 4362 * This is a placeholder inode for a subvolume we didn't have a 4363 * reference to at the time of the snapshot creation. In the meantime 4364 * we could have renamed the real subvol link into our snapshot, so 4365 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4366 * Instead simply lookup the dir_index_item for this entry so we can 4367 * remove it. Otherwise we know we have a ref to the root and we can 4368 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4369 */ 4370 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4371 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); 4372 if (IS_ERR(di)) { 4373 ret = PTR_ERR(di); 4374 btrfs_abort_transaction(trans, ret); 4375 goto out; 4376 } 4377 4378 leaf = path->nodes[0]; 4379 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4380 index = key.offset; 4381 btrfs_release_path(path); 4382 } else { 4383 ret = btrfs_del_root_ref(trans, objectid, 4384 btrfs_root_id(root), dir_ino, 4385 &index, &fname.disk_name); 4386 if (ret) { 4387 btrfs_abort_transaction(trans, ret); 4388 goto out; 4389 } 4390 } 4391 4392 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4393 if (ret) { 4394 btrfs_abort_transaction(trans, ret); 4395 goto out; 4396 } 4397 4398 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); 4399 inode_inc_iversion(&dir->vfs_inode); 4400 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); 4401 ret = btrfs_update_inode_fallback(trans, dir); 4402 if (ret) 4403 btrfs_abort_transaction(trans, ret); 4404 out: 4405 btrfs_free_path(path); 4406 fscrypt_free_filename(&fname); 4407 return ret; 4408 } 4409 4410 /* 4411 * Helper to check if the subvolume references other subvolumes or if it's 4412 * default. 4413 */ 4414 static noinline int may_destroy_subvol(struct btrfs_root *root) 4415 { 4416 struct btrfs_fs_info *fs_info = root->fs_info; 4417 struct btrfs_path *path; 4418 struct btrfs_dir_item *di; 4419 struct btrfs_key key; 4420 struct fscrypt_str name = FSTR_INIT("default", 7); 4421 u64 dir_id; 4422 int ret; 4423 4424 path = btrfs_alloc_path(); 4425 if (!path) 4426 return -ENOMEM; 4427 4428 /* Make sure this root isn't set as the default subvol */ 4429 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4430 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4431 dir_id, &name, 0); 4432 if (di && !IS_ERR(di)) { 4433 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4434 if (key.objectid == btrfs_root_id(root)) { 4435 ret = -EPERM; 4436 btrfs_err(fs_info, 4437 "deleting default subvolume %llu is not allowed", 4438 key.objectid); 4439 goto out; 4440 } 4441 btrfs_release_path(path); 4442 } 4443 4444 key.objectid = btrfs_root_id(root); 4445 key.type = BTRFS_ROOT_REF_KEY; 4446 key.offset = (u64)-1; 4447 4448 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4449 if (ret < 0) 4450 goto out; 4451 if (ret == 0) { 4452 /* 4453 * Key with offset -1 found, there would have to exist a root 4454 * with such id, but this is out of valid range. 4455 */ 4456 ret = -EUCLEAN; 4457 goto out; 4458 } 4459 4460 ret = 0; 4461 if (path->slots[0] > 0) { 4462 path->slots[0]--; 4463 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4464 if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY) 4465 ret = -ENOTEMPTY; 4466 } 4467 out: 4468 btrfs_free_path(path); 4469 return ret; 4470 } 4471 4472 /* Delete all dentries for inodes belonging to the root */ 4473 static void btrfs_prune_dentries(struct btrfs_root *root) 4474 { 4475 struct btrfs_fs_info *fs_info = root->fs_info; 4476 struct btrfs_inode *inode; 4477 u64 min_ino = 0; 4478 4479 if (!BTRFS_FS_ERROR(fs_info)) 4480 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4481 4482 inode = btrfs_find_first_inode(root, min_ino); 4483 while (inode) { 4484 if (atomic_read(&inode->vfs_inode.i_count) > 1) 4485 d_prune_aliases(&inode->vfs_inode); 4486 4487 min_ino = btrfs_ino(inode) + 1; 4488 /* 4489 * btrfs_drop_inode() will have it removed from the inode 4490 * cache when its usage count hits zero. 4491 */ 4492 iput(&inode->vfs_inode); 4493 cond_resched(); 4494 inode = btrfs_find_first_inode(root, min_ino); 4495 } 4496 } 4497 4498 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) 4499 { 4500 struct btrfs_root *root = dir->root; 4501 struct btrfs_fs_info *fs_info = root->fs_info; 4502 struct inode *inode = d_inode(dentry); 4503 struct btrfs_root *dest = BTRFS_I(inode)->root; 4504 struct btrfs_trans_handle *trans; 4505 struct btrfs_block_rsv block_rsv; 4506 u64 root_flags; 4507 u64 qgroup_reserved = 0; 4508 int ret; 4509 4510 down_write(&fs_info->subvol_sem); 4511 4512 /* 4513 * Don't allow to delete a subvolume with send in progress. This is 4514 * inside the inode lock so the error handling that has to drop the bit 4515 * again is not run concurrently. 4516 */ 4517 spin_lock(&dest->root_item_lock); 4518 if (dest->send_in_progress) { 4519 spin_unlock(&dest->root_item_lock); 4520 btrfs_warn(fs_info, 4521 "attempt to delete subvolume %llu during send", 4522 btrfs_root_id(dest)); 4523 ret = -EPERM; 4524 goto out_up_write; 4525 } 4526 if (atomic_read(&dest->nr_swapfiles)) { 4527 spin_unlock(&dest->root_item_lock); 4528 btrfs_warn(fs_info, 4529 "attempt to delete subvolume %llu with active swapfile", 4530 btrfs_root_id(root)); 4531 ret = -EPERM; 4532 goto out_up_write; 4533 } 4534 root_flags = btrfs_root_flags(&dest->root_item); 4535 btrfs_set_root_flags(&dest->root_item, 4536 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4537 spin_unlock(&dest->root_item_lock); 4538 4539 ret = may_destroy_subvol(dest); 4540 if (ret) 4541 goto out_undead; 4542 4543 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4544 /* 4545 * One for dir inode, 4546 * two for dir entries, 4547 * two for root ref/backref. 4548 */ 4549 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4550 if (ret) 4551 goto out_undead; 4552 qgroup_reserved = block_rsv.qgroup_rsv_reserved; 4553 4554 trans = btrfs_start_transaction(root, 0); 4555 if (IS_ERR(trans)) { 4556 ret = PTR_ERR(trans); 4557 goto out_release; 4558 } 4559 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); 4560 qgroup_reserved = 0; 4561 trans->block_rsv = &block_rsv; 4562 trans->bytes_reserved = block_rsv.size; 4563 4564 btrfs_record_snapshot_destroy(trans, dir); 4565 4566 ret = btrfs_unlink_subvol(trans, dir, dentry); 4567 if (ret) { 4568 btrfs_abort_transaction(trans, ret); 4569 goto out_end_trans; 4570 } 4571 4572 ret = btrfs_record_root_in_trans(trans, dest); 4573 if (ret) { 4574 btrfs_abort_transaction(trans, ret); 4575 goto out_end_trans; 4576 } 4577 4578 memset(&dest->root_item.drop_progress, 0, 4579 sizeof(dest->root_item.drop_progress)); 4580 btrfs_set_root_drop_level(&dest->root_item, 0); 4581 btrfs_set_root_refs(&dest->root_item, 0); 4582 4583 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4584 ret = btrfs_insert_orphan_item(trans, 4585 fs_info->tree_root, 4586 btrfs_root_id(dest)); 4587 if (ret) { 4588 btrfs_abort_transaction(trans, ret); 4589 goto out_end_trans; 4590 } 4591 } 4592 4593 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4594 BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest)); 4595 if (ret && ret != -ENOENT) { 4596 btrfs_abort_transaction(trans, ret); 4597 goto out_end_trans; 4598 } 4599 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4600 ret = btrfs_uuid_tree_remove(trans, 4601 dest->root_item.received_uuid, 4602 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4603 btrfs_root_id(dest)); 4604 if (ret && ret != -ENOENT) { 4605 btrfs_abort_transaction(trans, ret); 4606 goto out_end_trans; 4607 } 4608 } 4609 4610 free_anon_bdev(dest->anon_dev); 4611 dest->anon_dev = 0; 4612 out_end_trans: 4613 trans->block_rsv = NULL; 4614 trans->bytes_reserved = 0; 4615 ret = btrfs_end_transaction(trans); 4616 inode->i_flags |= S_DEAD; 4617 out_release: 4618 btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL); 4619 if (qgroup_reserved) 4620 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); 4621 out_undead: 4622 if (ret) { 4623 spin_lock(&dest->root_item_lock); 4624 root_flags = btrfs_root_flags(&dest->root_item); 4625 btrfs_set_root_flags(&dest->root_item, 4626 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4627 spin_unlock(&dest->root_item_lock); 4628 } 4629 out_up_write: 4630 up_write(&fs_info->subvol_sem); 4631 if (!ret) { 4632 d_invalidate(dentry); 4633 btrfs_prune_dentries(dest); 4634 ASSERT(dest->send_in_progress == 0); 4635 } 4636 4637 return ret; 4638 } 4639 4640 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4641 { 4642 struct inode *inode = d_inode(dentry); 4643 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 4644 int ret = 0; 4645 struct btrfs_trans_handle *trans; 4646 u64 last_unlink_trans; 4647 struct fscrypt_name fname; 4648 4649 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4650 return -ENOTEMPTY; 4651 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { 4652 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4653 btrfs_err(fs_info, 4654 "extent tree v2 doesn't support snapshot deletion yet"); 4655 return -EOPNOTSUPP; 4656 } 4657 return btrfs_delete_subvolume(BTRFS_I(dir), dentry); 4658 } 4659 4660 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4661 if (ret) 4662 return ret; 4663 4664 /* This needs to handle no-key deletions later on */ 4665 4666 trans = __unlink_start_trans(BTRFS_I(dir)); 4667 if (IS_ERR(trans)) { 4668 ret = PTR_ERR(trans); 4669 goto out_notrans; 4670 } 4671 4672 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4673 ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); 4674 goto out; 4675 } 4676 4677 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4678 if (ret) 4679 goto out; 4680 4681 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4682 4683 /* now the directory is empty */ 4684 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4685 &fname.disk_name); 4686 if (!ret) { 4687 btrfs_i_size_write(BTRFS_I(inode), 0); 4688 /* 4689 * Propagate the last_unlink_trans value of the deleted dir to 4690 * its parent directory. This is to prevent an unrecoverable 4691 * log tree in the case we do something like this: 4692 * 1) create dir foo 4693 * 2) create snapshot under dir foo 4694 * 3) delete the snapshot 4695 * 4) rmdir foo 4696 * 5) mkdir foo 4697 * 6) fsync foo or some file inside foo 4698 */ 4699 if (last_unlink_trans >= trans->transid) 4700 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4701 } 4702 out: 4703 btrfs_end_transaction(trans); 4704 out_notrans: 4705 btrfs_btree_balance_dirty(fs_info); 4706 fscrypt_free_filename(&fname); 4707 4708 return ret; 4709 } 4710 4711 /* 4712 * Read, zero a chunk and write a block. 4713 * 4714 * @inode - inode that we're zeroing 4715 * @from - the offset to start zeroing 4716 * @len - the length to zero, 0 to zero the entire range respective to the 4717 * offset 4718 * @front - zero up to the offset instead of from the offset on 4719 * 4720 * This will find the block for the "from" offset and cow the block and zero the 4721 * part we want to zero. This is used with truncate and hole punching. 4722 */ 4723 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 4724 int front) 4725 { 4726 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4727 struct address_space *mapping = inode->vfs_inode.i_mapping; 4728 struct extent_io_tree *io_tree = &inode->io_tree; 4729 struct btrfs_ordered_extent *ordered; 4730 struct extent_state *cached_state = NULL; 4731 struct extent_changeset *data_reserved = NULL; 4732 bool only_release_metadata = false; 4733 u32 blocksize = fs_info->sectorsize; 4734 pgoff_t index = from >> PAGE_SHIFT; 4735 unsigned offset = from & (blocksize - 1); 4736 struct folio *folio; 4737 gfp_t mask = btrfs_alloc_write_mask(mapping); 4738 size_t write_bytes = blocksize; 4739 int ret = 0; 4740 u64 block_start; 4741 u64 block_end; 4742 4743 if (IS_ALIGNED(offset, blocksize) && 4744 (!len || IS_ALIGNED(len, blocksize))) 4745 goto out; 4746 4747 block_start = round_down(from, blocksize); 4748 block_end = block_start + blocksize - 1; 4749 4750 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 4751 blocksize, false); 4752 if (ret < 0) { 4753 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { 4754 /* For nocow case, no need to reserve data space */ 4755 only_release_metadata = true; 4756 } else { 4757 goto out; 4758 } 4759 } 4760 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 4761 if (ret < 0) { 4762 if (!only_release_metadata) 4763 btrfs_free_reserved_data_space(inode, data_reserved, 4764 block_start, blocksize); 4765 goto out; 4766 } 4767 again: 4768 folio = __filemap_get_folio(mapping, index, 4769 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); 4770 if (IS_ERR(folio)) { 4771 btrfs_delalloc_release_space(inode, data_reserved, block_start, 4772 blocksize, true); 4773 btrfs_delalloc_release_extents(inode, blocksize); 4774 ret = -ENOMEM; 4775 goto out; 4776 } 4777 4778 if (!folio_test_uptodate(folio)) { 4779 ret = btrfs_read_folio(NULL, folio); 4780 folio_lock(folio); 4781 if (folio->mapping != mapping) { 4782 folio_unlock(folio); 4783 folio_put(folio); 4784 goto again; 4785 } 4786 if (!folio_test_uptodate(folio)) { 4787 ret = -EIO; 4788 goto out_unlock; 4789 } 4790 } 4791 4792 /* 4793 * We unlock the page after the io is completed and then re-lock it 4794 * above. release_folio() could have come in between that and cleared 4795 * folio private, but left the page in the mapping. Set the page mapped 4796 * here to make sure it's properly set for the subpage stuff. 4797 */ 4798 ret = set_folio_extent_mapped(folio); 4799 if (ret < 0) 4800 goto out_unlock; 4801 4802 folio_wait_writeback(folio); 4803 4804 lock_extent(io_tree, block_start, block_end, &cached_state); 4805 4806 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4807 if (ordered) { 4808 unlock_extent(io_tree, block_start, block_end, &cached_state); 4809 folio_unlock(folio); 4810 folio_put(folio); 4811 btrfs_start_ordered_extent(ordered); 4812 btrfs_put_ordered_extent(ordered); 4813 goto again; 4814 } 4815 4816 clear_extent_bit(&inode->io_tree, block_start, block_end, 4817 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4818 &cached_state); 4819 4820 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 4821 &cached_state); 4822 if (ret) { 4823 unlock_extent(io_tree, block_start, block_end, &cached_state); 4824 goto out_unlock; 4825 } 4826 4827 if (offset != blocksize) { 4828 if (!len) 4829 len = blocksize - offset; 4830 if (front) 4831 folio_zero_range(folio, block_start - folio_pos(folio), 4832 offset); 4833 else 4834 folio_zero_range(folio, 4835 (block_start - folio_pos(folio)) + offset, 4836 len); 4837 } 4838 btrfs_folio_clear_checked(fs_info, folio, block_start, 4839 block_end + 1 - block_start); 4840 btrfs_folio_set_dirty(fs_info, folio, block_start, 4841 block_end + 1 - block_start); 4842 unlock_extent(io_tree, block_start, block_end, &cached_state); 4843 4844 if (only_release_metadata) 4845 set_extent_bit(&inode->io_tree, block_start, block_end, 4846 EXTENT_NORESERVE, NULL); 4847 4848 out_unlock: 4849 if (ret) { 4850 if (only_release_metadata) 4851 btrfs_delalloc_release_metadata(inode, blocksize, true); 4852 else 4853 btrfs_delalloc_release_space(inode, data_reserved, 4854 block_start, blocksize, true); 4855 } 4856 btrfs_delalloc_release_extents(inode, blocksize); 4857 folio_unlock(folio); 4858 folio_put(folio); 4859 out: 4860 if (only_release_metadata) 4861 btrfs_check_nocow_unlock(inode); 4862 extent_changeset_free(data_reserved); 4863 return ret; 4864 } 4865 4866 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len) 4867 { 4868 struct btrfs_root *root = inode->root; 4869 struct btrfs_fs_info *fs_info = root->fs_info; 4870 struct btrfs_trans_handle *trans; 4871 struct btrfs_drop_extents_args drop_args = { 0 }; 4872 int ret; 4873 4874 /* 4875 * If NO_HOLES is enabled, we don't need to do anything. 4876 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 4877 * or btrfs_update_inode() will be called, which guarantee that the next 4878 * fsync will know this inode was changed and needs to be logged. 4879 */ 4880 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 4881 return 0; 4882 4883 /* 4884 * 1 - for the one we're dropping 4885 * 1 - for the one we're adding 4886 * 1 - for updating the inode. 4887 */ 4888 trans = btrfs_start_transaction(root, 3); 4889 if (IS_ERR(trans)) 4890 return PTR_ERR(trans); 4891 4892 drop_args.start = offset; 4893 drop_args.end = offset + len; 4894 drop_args.drop_cache = true; 4895 4896 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 4897 if (ret) { 4898 btrfs_abort_transaction(trans, ret); 4899 btrfs_end_transaction(trans); 4900 return ret; 4901 } 4902 4903 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len); 4904 if (ret) { 4905 btrfs_abort_transaction(trans, ret); 4906 } else { 4907 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 4908 btrfs_update_inode(trans, inode); 4909 } 4910 btrfs_end_transaction(trans); 4911 return ret; 4912 } 4913 4914 /* 4915 * This function puts in dummy file extents for the area we're creating a hole 4916 * for. So if we are truncating this file to a larger size we need to insert 4917 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4918 * the range between oldsize and size 4919 */ 4920 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 4921 { 4922 struct btrfs_root *root = inode->root; 4923 struct btrfs_fs_info *fs_info = root->fs_info; 4924 struct extent_io_tree *io_tree = &inode->io_tree; 4925 struct extent_map *em = NULL; 4926 struct extent_state *cached_state = NULL; 4927 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 4928 u64 block_end = ALIGN(size, fs_info->sectorsize); 4929 u64 last_byte; 4930 u64 cur_offset; 4931 u64 hole_size; 4932 int ret = 0; 4933 4934 /* 4935 * If our size started in the middle of a block we need to zero out the 4936 * rest of the block before we expand the i_size, otherwise we could 4937 * expose stale data. 4938 */ 4939 ret = btrfs_truncate_block(inode, oldsize, 0, 0); 4940 if (ret) 4941 return ret; 4942 4943 if (size <= hole_start) 4944 return 0; 4945 4946 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 4947 &cached_state); 4948 cur_offset = hole_start; 4949 while (1) { 4950 em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset); 4951 if (IS_ERR(em)) { 4952 ret = PTR_ERR(em); 4953 em = NULL; 4954 break; 4955 } 4956 last_byte = min(extent_map_end(em), block_end); 4957 last_byte = ALIGN(last_byte, fs_info->sectorsize); 4958 hole_size = last_byte - cur_offset; 4959 4960 if (!(em->flags & EXTENT_FLAG_PREALLOC)) { 4961 struct extent_map *hole_em; 4962 4963 ret = maybe_insert_hole(inode, cur_offset, hole_size); 4964 if (ret) 4965 break; 4966 4967 ret = btrfs_inode_set_file_extent_range(inode, 4968 cur_offset, hole_size); 4969 if (ret) 4970 break; 4971 4972 hole_em = alloc_extent_map(); 4973 if (!hole_em) { 4974 btrfs_drop_extent_map_range(inode, cur_offset, 4975 cur_offset + hole_size - 1, 4976 false); 4977 btrfs_set_inode_full_sync(inode); 4978 goto next; 4979 } 4980 hole_em->start = cur_offset; 4981 hole_em->len = hole_size; 4982 4983 hole_em->disk_bytenr = EXTENT_MAP_HOLE; 4984 hole_em->disk_num_bytes = 0; 4985 hole_em->ram_bytes = hole_size; 4986 hole_em->generation = btrfs_get_fs_generation(fs_info); 4987 4988 ret = btrfs_replace_extent_map_range(inode, hole_em, true); 4989 free_extent_map(hole_em); 4990 } else { 4991 ret = btrfs_inode_set_file_extent_range(inode, 4992 cur_offset, hole_size); 4993 if (ret) 4994 break; 4995 } 4996 next: 4997 free_extent_map(em); 4998 em = NULL; 4999 cur_offset = last_byte; 5000 if (cur_offset >= block_end) 5001 break; 5002 } 5003 free_extent_map(em); 5004 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); 5005 return ret; 5006 } 5007 5008 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 5009 { 5010 struct btrfs_root *root = BTRFS_I(inode)->root; 5011 struct btrfs_trans_handle *trans; 5012 loff_t oldsize = i_size_read(inode); 5013 loff_t newsize = attr->ia_size; 5014 int mask = attr->ia_valid; 5015 int ret; 5016 5017 /* 5018 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 5019 * special case where we need to update the times despite not having 5020 * these flags set. For all other operations the VFS set these flags 5021 * explicitly if it wants a timestamp update. 5022 */ 5023 if (newsize != oldsize) { 5024 inode_inc_iversion(inode); 5025 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) { 5026 inode_set_mtime_to_ts(inode, 5027 inode_set_ctime_current(inode)); 5028 } 5029 } 5030 5031 if (newsize > oldsize) { 5032 /* 5033 * Don't do an expanding truncate while snapshotting is ongoing. 5034 * This is to ensure the snapshot captures a fully consistent 5035 * state of this file - if the snapshot captures this expanding 5036 * truncation, it must capture all writes that happened before 5037 * this truncation. 5038 */ 5039 btrfs_drew_write_lock(&root->snapshot_lock); 5040 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 5041 if (ret) { 5042 btrfs_drew_write_unlock(&root->snapshot_lock); 5043 return ret; 5044 } 5045 5046 trans = btrfs_start_transaction(root, 1); 5047 if (IS_ERR(trans)) { 5048 btrfs_drew_write_unlock(&root->snapshot_lock); 5049 return PTR_ERR(trans); 5050 } 5051 5052 i_size_write(inode, newsize); 5053 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 5054 pagecache_isize_extended(inode, oldsize, newsize); 5055 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 5056 btrfs_drew_write_unlock(&root->snapshot_lock); 5057 btrfs_end_transaction(trans); 5058 } else { 5059 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 5060 5061 if (btrfs_is_zoned(fs_info)) { 5062 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 5063 ALIGN(newsize, fs_info->sectorsize), 5064 (u64)-1); 5065 if (ret) 5066 return ret; 5067 } 5068 5069 /* 5070 * We're truncating a file that used to have good data down to 5071 * zero. Make sure any new writes to the file get on disk 5072 * on close. 5073 */ 5074 if (newsize == 0) 5075 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5076 &BTRFS_I(inode)->runtime_flags); 5077 5078 truncate_setsize(inode, newsize); 5079 5080 inode_dio_wait(inode); 5081 5082 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); 5083 if (ret && inode->i_nlink) { 5084 int err; 5085 5086 /* 5087 * Truncate failed, so fix up the in-memory size. We 5088 * adjusted disk_i_size down as we removed extents, so 5089 * wait for disk_i_size to be stable and then update the 5090 * in-memory size to match. 5091 */ 5092 err = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); 5093 if (err) 5094 return err; 5095 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5096 } 5097 } 5098 5099 return ret; 5100 } 5101 5102 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 5103 struct iattr *attr) 5104 { 5105 struct inode *inode = d_inode(dentry); 5106 struct btrfs_root *root = BTRFS_I(inode)->root; 5107 int err; 5108 5109 if (btrfs_root_readonly(root)) 5110 return -EROFS; 5111 5112 err = setattr_prepare(idmap, dentry, attr); 5113 if (err) 5114 return err; 5115 5116 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5117 err = btrfs_setsize(inode, attr); 5118 if (err) 5119 return err; 5120 } 5121 5122 if (attr->ia_valid) { 5123 setattr_copy(idmap, inode, attr); 5124 inode_inc_iversion(inode); 5125 err = btrfs_dirty_inode(BTRFS_I(inode)); 5126 5127 if (!err && attr->ia_valid & ATTR_MODE) 5128 err = posix_acl_chmod(idmap, dentry, inode->i_mode); 5129 } 5130 5131 return err; 5132 } 5133 5134 /* 5135 * While truncating the inode pages during eviction, we get the VFS 5136 * calling btrfs_invalidate_folio() against each folio of the inode. This 5137 * is slow because the calls to btrfs_invalidate_folio() result in a 5138 * huge amount of calls to lock_extent() and clear_extent_bit(), 5139 * which keep merging and splitting extent_state structures over and over, 5140 * wasting lots of time. 5141 * 5142 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5143 * skip all those expensive operations on a per folio basis and do only 5144 * the ordered io finishing, while we release here the extent_map and 5145 * extent_state structures, without the excessive merging and splitting. 5146 */ 5147 static void evict_inode_truncate_pages(struct inode *inode) 5148 { 5149 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5150 struct rb_node *node; 5151 5152 ASSERT(inode->i_state & I_FREEING); 5153 truncate_inode_pages_final(&inode->i_data); 5154 5155 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 5156 5157 /* 5158 * Keep looping until we have no more ranges in the io tree. 5159 * We can have ongoing bios started by readahead that have 5160 * their endio callback (extent_io.c:end_bio_extent_readpage) 5161 * still in progress (unlocked the pages in the bio but did not yet 5162 * unlocked the ranges in the io tree). Therefore this means some 5163 * ranges can still be locked and eviction started because before 5164 * submitting those bios, which are executed by a separate task (work 5165 * queue kthread), inode references (inode->i_count) were not taken 5166 * (which would be dropped in the end io callback of each bio). 5167 * Therefore here we effectively end up waiting for those bios and 5168 * anyone else holding locked ranges without having bumped the inode's 5169 * reference count - if we don't do it, when they access the inode's 5170 * io_tree to unlock a range it may be too late, leading to an 5171 * use-after-free issue. 5172 */ 5173 spin_lock(&io_tree->lock); 5174 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5175 struct extent_state *state; 5176 struct extent_state *cached_state = NULL; 5177 u64 start; 5178 u64 end; 5179 unsigned state_flags; 5180 5181 node = rb_first(&io_tree->state); 5182 state = rb_entry(node, struct extent_state, rb_node); 5183 start = state->start; 5184 end = state->end; 5185 state_flags = state->state; 5186 spin_unlock(&io_tree->lock); 5187 5188 lock_extent(io_tree, start, end, &cached_state); 5189 5190 /* 5191 * If still has DELALLOC flag, the extent didn't reach disk, 5192 * and its reserved space won't be freed by delayed_ref. 5193 * So we need to free its reserved space here. 5194 * (Refer to comment in btrfs_invalidate_folio, case 2) 5195 * 5196 * Note, end is the bytenr of last byte, so we need + 1 here. 5197 */ 5198 if (state_flags & EXTENT_DELALLOC) 5199 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5200 end - start + 1, NULL); 5201 5202 clear_extent_bit(io_tree, start, end, 5203 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, 5204 &cached_state); 5205 5206 cond_resched(); 5207 spin_lock(&io_tree->lock); 5208 } 5209 spin_unlock(&io_tree->lock); 5210 } 5211 5212 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5213 struct btrfs_block_rsv *rsv) 5214 { 5215 struct btrfs_fs_info *fs_info = root->fs_info; 5216 struct btrfs_trans_handle *trans; 5217 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1); 5218 int ret; 5219 5220 /* 5221 * Eviction should be taking place at some place safe because of our 5222 * delayed iputs. However the normal flushing code will run delayed 5223 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5224 * 5225 * We reserve the delayed_refs_extra here again because we can't use 5226 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5227 * above. We reserve our extra bit here because we generate a ton of 5228 * delayed refs activity by truncating. 5229 * 5230 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5231 * if we fail to make this reservation we can re-try without the 5232 * delayed_refs_extra so we can make some forward progress. 5233 */ 5234 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5235 BTRFS_RESERVE_FLUSH_EVICT); 5236 if (ret) { 5237 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5238 BTRFS_RESERVE_FLUSH_EVICT); 5239 if (ret) { 5240 btrfs_warn(fs_info, 5241 "could not allocate space for delete; will truncate on mount"); 5242 return ERR_PTR(-ENOSPC); 5243 } 5244 delayed_refs_extra = 0; 5245 } 5246 5247 trans = btrfs_join_transaction(root); 5248 if (IS_ERR(trans)) 5249 return trans; 5250 5251 if (delayed_refs_extra) { 5252 trans->block_rsv = &fs_info->trans_block_rsv; 5253 trans->bytes_reserved = delayed_refs_extra; 5254 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5255 delayed_refs_extra, true); 5256 } 5257 return trans; 5258 } 5259 5260 void btrfs_evict_inode(struct inode *inode) 5261 { 5262 struct btrfs_fs_info *fs_info; 5263 struct btrfs_trans_handle *trans; 5264 struct btrfs_root *root = BTRFS_I(inode)->root; 5265 struct btrfs_block_rsv *rsv = NULL; 5266 int ret; 5267 5268 trace_btrfs_inode_evict(inode); 5269 5270 if (!root) { 5271 fsverity_cleanup_inode(inode); 5272 clear_inode(inode); 5273 return; 5274 } 5275 5276 fs_info = inode_to_fs_info(inode); 5277 evict_inode_truncate_pages(inode); 5278 5279 if (inode->i_nlink && 5280 ((btrfs_root_refs(&root->root_item) != 0 && 5281 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) || 5282 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5283 goto out; 5284 5285 if (is_bad_inode(inode)) 5286 goto out; 5287 5288 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5289 goto out; 5290 5291 if (inode->i_nlink > 0) { 5292 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5293 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID); 5294 goto out; 5295 } 5296 5297 /* 5298 * This makes sure the inode item in tree is uptodate and the space for 5299 * the inode update is released. 5300 */ 5301 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5302 if (ret) 5303 goto out; 5304 5305 /* 5306 * This drops any pending insert or delete operations we have for this 5307 * inode. We could have a delayed dir index deletion queued up, but 5308 * we're removing the inode completely so that'll be taken care of in 5309 * the truncate. 5310 */ 5311 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5312 5313 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5314 if (!rsv) 5315 goto out; 5316 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5317 rsv->failfast = true; 5318 5319 btrfs_i_size_write(BTRFS_I(inode), 0); 5320 5321 while (1) { 5322 struct btrfs_truncate_control control = { 5323 .inode = BTRFS_I(inode), 5324 .ino = btrfs_ino(BTRFS_I(inode)), 5325 .new_size = 0, 5326 .min_type = 0, 5327 }; 5328 5329 trans = evict_refill_and_join(root, rsv); 5330 if (IS_ERR(trans)) 5331 goto out; 5332 5333 trans->block_rsv = rsv; 5334 5335 ret = btrfs_truncate_inode_items(trans, root, &control); 5336 trans->block_rsv = &fs_info->trans_block_rsv; 5337 btrfs_end_transaction(trans); 5338 /* 5339 * We have not added new delayed items for our inode after we 5340 * have flushed its delayed items, so no need to throttle on 5341 * delayed items. However we have modified extent buffers. 5342 */ 5343 btrfs_btree_balance_dirty_nodelay(fs_info); 5344 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5345 goto out; 5346 else if (!ret) 5347 break; 5348 } 5349 5350 /* 5351 * Errors here aren't a big deal, it just means we leave orphan items in 5352 * the tree. They will be cleaned up on the next mount. If the inode 5353 * number gets reused, cleanup deletes the orphan item without doing 5354 * anything, and unlink reuses the existing orphan item. 5355 * 5356 * If it turns out that we are dropping too many of these, we might want 5357 * to add a mechanism for retrying these after a commit. 5358 */ 5359 trans = evict_refill_and_join(root, rsv); 5360 if (!IS_ERR(trans)) { 5361 trans->block_rsv = rsv; 5362 btrfs_orphan_del(trans, BTRFS_I(inode)); 5363 trans->block_rsv = &fs_info->trans_block_rsv; 5364 btrfs_end_transaction(trans); 5365 } 5366 5367 out: 5368 btrfs_free_block_rsv(fs_info, rsv); 5369 /* 5370 * If we didn't successfully delete, the orphan item will still be in 5371 * the tree and we'll retry on the next mount. Again, we might also want 5372 * to retry these periodically in the future. 5373 */ 5374 btrfs_remove_delayed_node(BTRFS_I(inode)); 5375 fsverity_cleanup_inode(inode); 5376 clear_inode(inode); 5377 } 5378 5379 /* 5380 * Return the key found in the dir entry in the location pointer, fill @type 5381 * with BTRFS_FT_*, and return 0. 5382 * 5383 * If no dir entries were found, returns -ENOENT. 5384 * If found a corrupted location in dir entry, returns -EUCLEAN. 5385 */ 5386 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, 5387 struct btrfs_key *location, u8 *type) 5388 { 5389 struct btrfs_dir_item *di; 5390 struct btrfs_path *path; 5391 struct btrfs_root *root = dir->root; 5392 int ret = 0; 5393 struct fscrypt_name fname; 5394 5395 path = btrfs_alloc_path(); 5396 if (!path) 5397 return -ENOMEM; 5398 5399 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 5400 if (ret < 0) 5401 goto out; 5402 /* 5403 * fscrypt_setup_filename() should never return a positive value, but 5404 * gcc on sparc/parisc thinks it can, so assert that doesn't happen. 5405 */ 5406 ASSERT(ret == 0); 5407 5408 /* This needs to handle no-key deletions later on */ 5409 5410 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), 5411 &fname.disk_name, 0); 5412 if (IS_ERR_OR_NULL(di)) { 5413 ret = di ? PTR_ERR(di) : -ENOENT; 5414 goto out; 5415 } 5416 5417 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5418 if (location->type != BTRFS_INODE_ITEM_KEY && 5419 location->type != BTRFS_ROOT_ITEM_KEY) { 5420 ret = -EUCLEAN; 5421 btrfs_warn(root->fs_info, 5422 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5423 __func__, fname.disk_name.name, btrfs_ino(dir), 5424 location->objectid, location->type, location->offset); 5425 } 5426 if (!ret) 5427 *type = btrfs_dir_ftype(path->nodes[0], di); 5428 out: 5429 fscrypt_free_filename(&fname); 5430 btrfs_free_path(path); 5431 return ret; 5432 } 5433 5434 /* 5435 * when we hit a tree root in a directory, the btrfs part of the inode 5436 * needs to be changed to reflect the root directory of the tree root. This 5437 * is kind of like crossing a mount point. 5438 */ 5439 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5440 struct btrfs_inode *dir, 5441 struct dentry *dentry, 5442 struct btrfs_key *location, 5443 struct btrfs_root **sub_root) 5444 { 5445 struct btrfs_path *path; 5446 struct btrfs_root *new_root; 5447 struct btrfs_root_ref *ref; 5448 struct extent_buffer *leaf; 5449 struct btrfs_key key; 5450 int ret; 5451 int err = 0; 5452 struct fscrypt_name fname; 5453 5454 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); 5455 if (ret) 5456 return ret; 5457 5458 path = btrfs_alloc_path(); 5459 if (!path) { 5460 err = -ENOMEM; 5461 goto out; 5462 } 5463 5464 err = -ENOENT; 5465 key.objectid = btrfs_root_id(dir->root); 5466 key.type = BTRFS_ROOT_REF_KEY; 5467 key.offset = location->objectid; 5468 5469 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5470 if (ret) { 5471 if (ret < 0) 5472 err = ret; 5473 goto out; 5474 } 5475 5476 leaf = path->nodes[0]; 5477 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5478 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5479 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) 5480 goto out; 5481 5482 ret = memcmp_extent_buffer(leaf, fname.disk_name.name, 5483 (unsigned long)(ref + 1), fname.disk_name.len); 5484 if (ret) 5485 goto out; 5486 5487 btrfs_release_path(path); 5488 5489 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5490 if (IS_ERR(new_root)) { 5491 err = PTR_ERR(new_root); 5492 goto out; 5493 } 5494 5495 *sub_root = new_root; 5496 location->objectid = btrfs_root_dirid(&new_root->root_item); 5497 location->type = BTRFS_INODE_ITEM_KEY; 5498 location->offset = 0; 5499 err = 0; 5500 out: 5501 btrfs_free_path(path); 5502 fscrypt_free_filename(&fname); 5503 return err; 5504 } 5505 5506 5507 5508 static void btrfs_del_inode_from_root(struct btrfs_inode *inode) 5509 { 5510 struct btrfs_root *root = inode->root; 5511 struct btrfs_inode *entry; 5512 bool empty = false; 5513 5514 xa_lock(&root->inodes); 5515 entry = __xa_erase(&root->inodes, btrfs_ino(inode)); 5516 if (entry == inode) 5517 empty = xa_empty(&root->inodes); 5518 xa_unlock(&root->inodes); 5519 5520 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5521 xa_lock(&root->inodes); 5522 empty = xa_empty(&root->inodes); 5523 xa_unlock(&root->inodes); 5524 if (empty) 5525 btrfs_add_dead_root(root); 5526 } 5527 } 5528 5529 5530 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5531 { 5532 struct btrfs_iget_args *args = p; 5533 5534 btrfs_set_inode_number(BTRFS_I(inode), args->ino); 5535 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5536 5537 if (args->root && args->root == args->root->fs_info->tree_root && 5538 args->ino != BTRFS_BTREE_INODE_OBJECTID) 5539 set_bit(BTRFS_INODE_FREE_SPACE_INODE, 5540 &BTRFS_I(inode)->runtime_flags); 5541 return 0; 5542 } 5543 5544 static int btrfs_find_actor(struct inode *inode, void *opaque) 5545 { 5546 struct btrfs_iget_args *args = opaque; 5547 5548 return args->ino == btrfs_ino(BTRFS_I(inode)) && 5549 args->root == BTRFS_I(inode)->root; 5550 } 5551 5552 static struct inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root) 5553 { 5554 struct inode *inode; 5555 struct btrfs_iget_args args; 5556 unsigned long hashval = btrfs_inode_hash(ino, root); 5557 5558 args.ino = ino; 5559 args.root = root; 5560 5561 inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor, 5562 btrfs_init_locked_inode, 5563 (void *)&args); 5564 return inode; 5565 } 5566 5567 /* 5568 * Get an inode object given its inode number and corresponding root. Path is 5569 * preallocated to prevent recursing back to iget through allocator. 5570 */ 5571 struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root, 5572 struct btrfs_path *path) 5573 { 5574 struct inode *inode; 5575 int ret; 5576 5577 inode = btrfs_iget_locked(ino, root); 5578 if (!inode) 5579 return ERR_PTR(-ENOMEM); 5580 5581 if (!(inode->i_state & I_NEW)) 5582 return inode; 5583 5584 ret = btrfs_read_locked_inode(inode, path); 5585 if (ret) 5586 return ERR_PTR(ret); 5587 5588 unlock_new_inode(inode); 5589 return inode; 5590 } 5591 5592 /* 5593 * Get an inode object given its inode number and corresponding root. 5594 */ 5595 struct inode *btrfs_iget(u64 ino, struct btrfs_root *root) 5596 { 5597 struct inode *inode; 5598 struct btrfs_path *path; 5599 int ret; 5600 5601 inode = btrfs_iget_locked(ino, root); 5602 if (!inode) 5603 return ERR_PTR(-ENOMEM); 5604 5605 if (!(inode->i_state & I_NEW)) 5606 return inode; 5607 5608 path = btrfs_alloc_path(); 5609 if (!path) 5610 return ERR_PTR(-ENOMEM); 5611 5612 ret = btrfs_read_locked_inode(inode, path); 5613 btrfs_free_path(path); 5614 if (ret) 5615 return ERR_PTR(ret); 5616 5617 unlock_new_inode(inode); 5618 return inode; 5619 } 5620 5621 static struct inode *new_simple_dir(struct inode *dir, 5622 struct btrfs_key *key, 5623 struct btrfs_root *root) 5624 { 5625 struct timespec64 ts; 5626 struct inode *inode = new_inode(dir->i_sb); 5627 5628 if (!inode) 5629 return ERR_PTR(-ENOMEM); 5630 5631 BTRFS_I(inode)->root = btrfs_grab_root(root); 5632 BTRFS_I(inode)->ref_root_id = key->objectid; 5633 set_bit(BTRFS_INODE_ROOT_STUB, &BTRFS_I(inode)->runtime_flags); 5634 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5635 5636 btrfs_set_inode_number(BTRFS_I(inode), BTRFS_EMPTY_SUBVOL_DIR_OBJECTID); 5637 /* 5638 * We only need lookup, the rest is read-only and there's no inode 5639 * associated with the dentry 5640 */ 5641 inode->i_op = &simple_dir_inode_operations; 5642 inode->i_opflags &= ~IOP_XATTR; 5643 inode->i_fop = &simple_dir_operations; 5644 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5645 5646 ts = inode_set_ctime_current(inode); 5647 inode_set_mtime_to_ts(inode, ts); 5648 inode_set_atime_to_ts(inode, inode_get_atime(dir)); 5649 BTRFS_I(inode)->i_otime_sec = ts.tv_sec; 5650 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec; 5651 5652 inode->i_uid = dir->i_uid; 5653 inode->i_gid = dir->i_gid; 5654 5655 return inode; 5656 } 5657 5658 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5659 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5660 static_assert(BTRFS_FT_DIR == FT_DIR); 5661 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5662 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5663 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5664 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5665 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5666 5667 static inline u8 btrfs_inode_type(struct inode *inode) 5668 { 5669 return fs_umode_to_ftype(inode->i_mode); 5670 } 5671 5672 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5673 { 5674 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 5675 struct inode *inode; 5676 struct btrfs_root *root = BTRFS_I(dir)->root; 5677 struct btrfs_root *sub_root = root; 5678 struct btrfs_key location = { 0 }; 5679 u8 di_type = 0; 5680 int ret = 0; 5681 5682 if (dentry->d_name.len > BTRFS_NAME_LEN) 5683 return ERR_PTR(-ENAMETOOLONG); 5684 5685 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type); 5686 if (ret < 0) 5687 return ERR_PTR(ret); 5688 5689 if (location.type == BTRFS_INODE_ITEM_KEY) { 5690 inode = btrfs_iget(location.objectid, root); 5691 if (IS_ERR(inode)) 5692 return inode; 5693 5694 /* Do extra check against inode mode with di_type */ 5695 if (btrfs_inode_type(inode) != di_type) { 5696 btrfs_crit(fs_info, 5697 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5698 inode->i_mode, btrfs_inode_type(inode), 5699 di_type); 5700 iput(inode); 5701 return ERR_PTR(-EUCLEAN); 5702 } 5703 return inode; 5704 } 5705 5706 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry, 5707 &location, &sub_root); 5708 if (ret < 0) { 5709 if (ret != -ENOENT) 5710 inode = ERR_PTR(ret); 5711 else 5712 inode = new_simple_dir(dir, &location, root); 5713 } else { 5714 inode = btrfs_iget(location.objectid, sub_root); 5715 btrfs_put_root(sub_root); 5716 5717 if (IS_ERR(inode)) 5718 return inode; 5719 5720 down_read(&fs_info->cleanup_work_sem); 5721 if (!sb_rdonly(inode->i_sb)) 5722 ret = btrfs_orphan_cleanup(sub_root); 5723 up_read(&fs_info->cleanup_work_sem); 5724 if (ret) { 5725 iput(inode); 5726 inode = ERR_PTR(ret); 5727 } 5728 } 5729 5730 return inode; 5731 } 5732 5733 static int btrfs_dentry_delete(const struct dentry *dentry) 5734 { 5735 struct btrfs_root *root; 5736 struct inode *inode = d_inode(dentry); 5737 5738 if (!inode && !IS_ROOT(dentry)) 5739 inode = d_inode(dentry->d_parent); 5740 5741 if (inode) { 5742 root = BTRFS_I(inode)->root; 5743 if (btrfs_root_refs(&root->root_item) == 0) 5744 return 1; 5745 5746 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5747 return 1; 5748 } 5749 return 0; 5750 } 5751 5752 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5753 unsigned int flags) 5754 { 5755 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5756 5757 if (inode == ERR_PTR(-ENOENT)) 5758 inode = NULL; 5759 return d_splice_alias(inode, dentry); 5760 } 5761 5762 /* 5763 * Find the highest existing sequence number in a directory and then set the 5764 * in-memory index_cnt variable to the first free sequence number. 5765 */ 5766 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 5767 { 5768 struct btrfs_root *root = inode->root; 5769 struct btrfs_key key, found_key; 5770 struct btrfs_path *path; 5771 struct extent_buffer *leaf; 5772 int ret; 5773 5774 key.objectid = btrfs_ino(inode); 5775 key.type = BTRFS_DIR_INDEX_KEY; 5776 key.offset = (u64)-1; 5777 5778 path = btrfs_alloc_path(); 5779 if (!path) 5780 return -ENOMEM; 5781 5782 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5783 if (ret < 0) 5784 goto out; 5785 /* FIXME: we should be able to handle this */ 5786 if (ret == 0) 5787 goto out; 5788 ret = 0; 5789 5790 if (path->slots[0] == 0) { 5791 inode->index_cnt = BTRFS_DIR_START_INDEX; 5792 goto out; 5793 } 5794 5795 path->slots[0]--; 5796 5797 leaf = path->nodes[0]; 5798 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5799 5800 if (found_key.objectid != btrfs_ino(inode) || 5801 found_key.type != BTRFS_DIR_INDEX_KEY) { 5802 inode->index_cnt = BTRFS_DIR_START_INDEX; 5803 goto out; 5804 } 5805 5806 inode->index_cnt = found_key.offset + 1; 5807 out: 5808 btrfs_free_path(path); 5809 return ret; 5810 } 5811 5812 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) 5813 { 5814 int ret = 0; 5815 5816 btrfs_inode_lock(dir, 0); 5817 if (dir->index_cnt == (u64)-1) { 5818 ret = btrfs_inode_delayed_dir_index_count(dir); 5819 if (ret) { 5820 ret = btrfs_set_inode_index_count(dir); 5821 if (ret) 5822 goto out; 5823 } 5824 } 5825 5826 /* index_cnt is the index number of next new entry, so decrement it. */ 5827 *index = dir->index_cnt - 1; 5828 out: 5829 btrfs_inode_unlock(dir, 0); 5830 5831 return ret; 5832 } 5833 5834 /* 5835 * All this infrastructure exists because dir_emit can fault, and we are holding 5836 * the tree lock when doing readdir. For now just allocate a buffer and copy 5837 * our information into that, and then dir_emit from the buffer. This is 5838 * similar to what NFS does, only we don't keep the buffer around in pagecache 5839 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5840 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5841 * tree lock. 5842 */ 5843 static int btrfs_opendir(struct inode *inode, struct file *file) 5844 { 5845 struct btrfs_file_private *private; 5846 u64 last_index; 5847 int ret; 5848 5849 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); 5850 if (ret) 5851 return ret; 5852 5853 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5854 if (!private) 5855 return -ENOMEM; 5856 private->last_index = last_index; 5857 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5858 if (!private->filldir_buf) { 5859 kfree(private); 5860 return -ENOMEM; 5861 } 5862 file->private_data = private; 5863 return 0; 5864 } 5865 5866 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence) 5867 { 5868 struct btrfs_file_private *private = file->private_data; 5869 int ret; 5870 5871 ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)), 5872 &private->last_index); 5873 if (ret) 5874 return ret; 5875 5876 return generic_file_llseek(file, offset, whence); 5877 } 5878 5879 struct dir_entry { 5880 u64 ino; 5881 u64 offset; 5882 unsigned type; 5883 int name_len; 5884 }; 5885 5886 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5887 { 5888 while (entries--) { 5889 struct dir_entry *entry = addr; 5890 char *name = (char *)(entry + 1); 5891 5892 ctx->pos = get_unaligned(&entry->offset); 5893 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5894 get_unaligned(&entry->ino), 5895 get_unaligned(&entry->type))) 5896 return 1; 5897 addr += sizeof(struct dir_entry) + 5898 get_unaligned(&entry->name_len); 5899 ctx->pos++; 5900 } 5901 return 0; 5902 } 5903 5904 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5905 { 5906 struct inode *inode = file_inode(file); 5907 struct btrfs_root *root = BTRFS_I(inode)->root; 5908 struct btrfs_file_private *private = file->private_data; 5909 struct btrfs_dir_item *di; 5910 struct btrfs_key key; 5911 struct btrfs_key found_key; 5912 struct btrfs_path *path; 5913 void *addr; 5914 LIST_HEAD(ins_list); 5915 LIST_HEAD(del_list); 5916 int ret; 5917 char *name_ptr; 5918 int name_len; 5919 int entries = 0; 5920 int total_len = 0; 5921 bool put = false; 5922 struct btrfs_key location; 5923 5924 if (!dir_emit_dots(file, ctx)) 5925 return 0; 5926 5927 path = btrfs_alloc_path(); 5928 if (!path) 5929 return -ENOMEM; 5930 5931 addr = private->filldir_buf; 5932 path->reada = READA_FORWARD; 5933 5934 put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index, 5935 &ins_list, &del_list); 5936 5937 again: 5938 key.type = BTRFS_DIR_INDEX_KEY; 5939 key.offset = ctx->pos; 5940 key.objectid = btrfs_ino(BTRFS_I(inode)); 5941 5942 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 5943 struct dir_entry *entry; 5944 struct extent_buffer *leaf = path->nodes[0]; 5945 u8 ftype; 5946 5947 if (found_key.objectid != key.objectid) 5948 break; 5949 if (found_key.type != BTRFS_DIR_INDEX_KEY) 5950 break; 5951 if (found_key.offset < ctx->pos) 5952 continue; 5953 if (found_key.offset > private->last_index) 5954 break; 5955 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 5956 continue; 5957 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 5958 name_len = btrfs_dir_name_len(leaf, di); 5959 if ((total_len + sizeof(struct dir_entry) + name_len) >= 5960 PAGE_SIZE) { 5961 btrfs_release_path(path); 5962 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5963 if (ret) 5964 goto nopos; 5965 addr = private->filldir_buf; 5966 entries = 0; 5967 total_len = 0; 5968 goto again; 5969 } 5970 5971 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di)); 5972 entry = addr; 5973 name_ptr = (char *)(entry + 1); 5974 read_extent_buffer(leaf, name_ptr, 5975 (unsigned long)(di + 1), name_len); 5976 put_unaligned(name_len, &entry->name_len); 5977 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); 5978 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5979 put_unaligned(location.objectid, &entry->ino); 5980 put_unaligned(found_key.offset, &entry->offset); 5981 entries++; 5982 addr += sizeof(struct dir_entry) + name_len; 5983 total_len += sizeof(struct dir_entry) + name_len; 5984 } 5985 /* Catch error encountered during iteration */ 5986 if (ret < 0) 5987 goto err; 5988 5989 btrfs_release_path(path); 5990 5991 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5992 if (ret) 5993 goto nopos; 5994 5995 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 5996 if (ret) 5997 goto nopos; 5998 5999 /* 6000 * Stop new entries from being returned after we return the last 6001 * entry. 6002 * 6003 * New directory entries are assigned a strictly increasing 6004 * offset. This means that new entries created during readdir 6005 * are *guaranteed* to be seen in the future by that readdir. 6006 * This has broken buggy programs which operate on names as 6007 * they're returned by readdir. Until we reuse freed offsets 6008 * we have this hack to stop new entries from being returned 6009 * under the assumption that they'll never reach this huge 6010 * offset. 6011 * 6012 * This is being careful not to overflow 32bit loff_t unless the 6013 * last entry requires it because doing so has broken 32bit apps 6014 * in the past. 6015 */ 6016 if (ctx->pos >= INT_MAX) 6017 ctx->pos = LLONG_MAX; 6018 else 6019 ctx->pos = INT_MAX; 6020 nopos: 6021 ret = 0; 6022 err: 6023 if (put) 6024 btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list); 6025 btrfs_free_path(path); 6026 return ret; 6027 } 6028 6029 /* 6030 * This is somewhat expensive, updating the tree every time the 6031 * inode changes. But, it is most likely to find the inode in cache. 6032 * FIXME, needs more benchmarking...there are no reasons other than performance 6033 * to keep or drop this code. 6034 */ 6035 static int btrfs_dirty_inode(struct btrfs_inode *inode) 6036 { 6037 struct btrfs_root *root = inode->root; 6038 struct btrfs_fs_info *fs_info = root->fs_info; 6039 struct btrfs_trans_handle *trans; 6040 int ret; 6041 6042 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) 6043 return 0; 6044 6045 trans = btrfs_join_transaction(root); 6046 if (IS_ERR(trans)) 6047 return PTR_ERR(trans); 6048 6049 ret = btrfs_update_inode(trans, inode); 6050 if (ret == -ENOSPC || ret == -EDQUOT) { 6051 /* whoops, lets try again with the full transaction */ 6052 btrfs_end_transaction(trans); 6053 trans = btrfs_start_transaction(root, 1); 6054 if (IS_ERR(trans)) 6055 return PTR_ERR(trans); 6056 6057 ret = btrfs_update_inode(trans, inode); 6058 } 6059 btrfs_end_transaction(trans); 6060 if (inode->delayed_node) 6061 btrfs_balance_delayed_items(fs_info); 6062 6063 return ret; 6064 } 6065 6066 /* 6067 * This is a copy of file_update_time. We need this so we can return error on 6068 * ENOSPC for updating the inode in the case of file write and mmap writes. 6069 */ 6070 static int btrfs_update_time(struct inode *inode, int flags) 6071 { 6072 struct btrfs_root *root = BTRFS_I(inode)->root; 6073 bool dirty; 6074 6075 if (btrfs_root_readonly(root)) 6076 return -EROFS; 6077 6078 dirty = inode_update_timestamps(inode, flags); 6079 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6080 } 6081 6082 /* 6083 * helper to find a free sequence number in a given directory. This current 6084 * code is very simple, later versions will do smarter things in the btree 6085 */ 6086 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6087 { 6088 int ret = 0; 6089 6090 if (dir->index_cnt == (u64)-1) { 6091 ret = btrfs_inode_delayed_dir_index_count(dir); 6092 if (ret) { 6093 ret = btrfs_set_inode_index_count(dir); 6094 if (ret) 6095 return ret; 6096 } 6097 } 6098 6099 *index = dir->index_cnt; 6100 dir->index_cnt++; 6101 6102 return ret; 6103 } 6104 6105 static int btrfs_insert_inode_locked(struct inode *inode) 6106 { 6107 struct btrfs_iget_args args; 6108 6109 args.ino = btrfs_ino(BTRFS_I(inode)); 6110 args.root = BTRFS_I(inode)->root; 6111 6112 return insert_inode_locked4(inode, 6113 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6114 btrfs_find_actor, &args); 6115 } 6116 6117 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6118 unsigned int *trans_num_items) 6119 { 6120 struct inode *dir = args->dir; 6121 struct inode *inode = args->inode; 6122 int ret; 6123 6124 if (!args->orphan) { 6125 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, 6126 &args->fname); 6127 if (ret) 6128 return ret; 6129 } 6130 6131 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6132 if (ret) { 6133 fscrypt_free_filename(&args->fname); 6134 return ret; 6135 } 6136 6137 /* 1 to add inode item */ 6138 *trans_num_items = 1; 6139 /* 1 to add compression property */ 6140 if (BTRFS_I(dir)->prop_compress) 6141 (*trans_num_items)++; 6142 /* 1 to add default ACL xattr */ 6143 if (args->default_acl) 6144 (*trans_num_items)++; 6145 /* 1 to add access ACL xattr */ 6146 if (args->acl) 6147 (*trans_num_items)++; 6148 #ifdef CONFIG_SECURITY 6149 /* 1 to add LSM xattr */ 6150 if (dir->i_security) 6151 (*trans_num_items)++; 6152 #endif 6153 if (args->orphan) { 6154 /* 1 to add orphan item */ 6155 (*trans_num_items)++; 6156 } else { 6157 /* 6158 * 1 to add dir item 6159 * 1 to add dir index 6160 * 1 to update parent inode item 6161 * 6162 * No need for 1 unit for the inode ref item because it is 6163 * inserted in a batch together with the inode item at 6164 * btrfs_create_new_inode(). 6165 */ 6166 *trans_num_items += 3; 6167 } 6168 return 0; 6169 } 6170 6171 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6172 { 6173 posix_acl_release(args->acl); 6174 posix_acl_release(args->default_acl); 6175 fscrypt_free_filename(&args->fname); 6176 } 6177 6178 /* 6179 * Inherit flags from the parent inode. 6180 * 6181 * Currently only the compression flags and the cow flags are inherited. 6182 */ 6183 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir) 6184 { 6185 unsigned int flags; 6186 6187 flags = dir->flags; 6188 6189 if (flags & BTRFS_INODE_NOCOMPRESS) { 6190 inode->flags &= ~BTRFS_INODE_COMPRESS; 6191 inode->flags |= BTRFS_INODE_NOCOMPRESS; 6192 } else if (flags & BTRFS_INODE_COMPRESS) { 6193 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 6194 inode->flags |= BTRFS_INODE_COMPRESS; 6195 } 6196 6197 if (flags & BTRFS_INODE_NODATACOW) { 6198 inode->flags |= BTRFS_INODE_NODATACOW; 6199 if (S_ISREG(inode->vfs_inode.i_mode)) 6200 inode->flags |= BTRFS_INODE_NODATASUM; 6201 } 6202 6203 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); 6204 } 6205 6206 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6207 struct btrfs_new_inode_args *args) 6208 { 6209 struct timespec64 ts; 6210 struct inode *dir = args->dir; 6211 struct inode *inode = args->inode; 6212 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; 6213 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 6214 struct btrfs_root *root; 6215 struct btrfs_inode_item *inode_item; 6216 struct btrfs_path *path; 6217 u64 objectid; 6218 struct btrfs_inode_ref *ref; 6219 struct btrfs_key key[2]; 6220 u32 sizes[2]; 6221 struct btrfs_item_batch batch; 6222 unsigned long ptr; 6223 int ret; 6224 bool xa_reserved = false; 6225 6226 path = btrfs_alloc_path(); 6227 if (!path) 6228 return -ENOMEM; 6229 6230 if (!args->subvol) 6231 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6232 root = BTRFS_I(inode)->root; 6233 6234 ret = btrfs_init_file_extent_tree(BTRFS_I(inode)); 6235 if (ret) 6236 goto out; 6237 6238 ret = btrfs_get_free_objectid(root, &objectid); 6239 if (ret) 6240 goto out; 6241 btrfs_set_inode_number(BTRFS_I(inode), objectid); 6242 6243 ret = xa_reserve(&root->inodes, objectid, GFP_NOFS); 6244 if (ret) 6245 goto out; 6246 xa_reserved = true; 6247 6248 if (args->orphan) { 6249 /* 6250 * O_TMPFILE, set link count to 0, so that after this point, we 6251 * fill in an inode item with the correct link count. 6252 */ 6253 set_nlink(inode, 0); 6254 } else { 6255 trace_btrfs_inode_request(dir); 6256 6257 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6258 if (ret) 6259 goto out; 6260 } 6261 6262 if (S_ISDIR(inode->i_mode)) 6263 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6264 6265 BTRFS_I(inode)->generation = trans->transid; 6266 inode->i_generation = BTRFS_I(inode)->generation; 6267 6268 /* 6269 * We don't have any capability xattrs set here yet, shortcut any 6270 * queries for the xattrs here. If we add them later via the inode 6271 * security init path or any other path this flag will be cleared. 6272 */ 6273 set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags); 6274 6275 /* 6276 * Subvolumes don't inherit flags from their parent directory. 6277 * Originally this was probably by accident, but we probably can't 6278 * change it now without compatibility issues. 6279 */ 6280 if (!args->subvol) 6281 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir)); 6282 6283 if (S_ISREG(inode->i_mode)) { 6284 if (btrfs_test_opt(fs_info, NODATASUM)) 6285 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6286 if (btrfs_test_opt(fs_info, NODATACOW)) 6287 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6288 BTRFS_INODE_NODATASUM; 6289 } 6290 6291 ret = btrfs_insert_inode_locked(inode); 6292 if (ret < 0) { 6293 if (!args->orphan) 6294 BTRFS_I(dir)->index_cnt--; 6295 goto out; 6296 } 6297 6298 /* 6299 * We could have gotten an inode number from somebody who was fsynced 6300 * and then removed in this same transaction, so let's just set full 6301 * sync since it will be a full sync anyway and this will blow away the 6302 * old info in the log. 6303 */ 6304 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6305 6306 key[0].objectid = objectid; 6307 key[0].type = BTRFS_INODE_ITEM_KEY; 6308 key[0].offset = 0; 6309 6310 sizes[0] = sizeof(struct btrfs_inode_item); 6311 6312 if (!args->orphan) { 6313 /* 6314 * Start new inodes with an inode_ref. This is slightly more 6315 * efficient for small numbers of hard links since they will 6316 * be packed into one item. Extended refs will kick in if we 6317 * add more hard links than can fit in the ref item. 6318 */ 6319 key[1].objectid = objectid; 6320 key[1].type = BTRFS_INODE_REF_KEY; 6321 if (args->subvol) { 6322 key[1].offset = objectid; 6323 sizes[1] = 2 + sizeof(*ref); 6324 } else { 6325 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6326 sizes[1] = name->len + sizeof(*ref); 6327 } 6328 } 6329 6330 batch.keys = &key[0]; 6331 batch.data_sizes = &sizes[0]; 6332 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6333 batch.nr = args->orphan ? 1 : 2; 6334 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6335 if (ret != 0) { 6336 btrfs_abort_transaction(trans, ret); 6337 goto discard; 6338 } 6339 6340 ts = simple_inode_init_ts(inode); 6341 BTRFS_I(inode)->i_otime_sec = ts.tv_sec; 6342 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec; 6343 6344 /* 6345 * We're going to fill the inode item now, so at this point the inode 6346 * must be fully initialized. 6347 */ 6348 6349 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6350 struct btrfs_inode_item); 6351 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6352 sizeof(*inode_item)); 6353 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6354 6355 if (!args->orphan) { 6356 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6357 struct btrfs_inode_ref); 6358 ptr = (unsigned long)(ref + 1); 6359 if (args->subvol) { 6360 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6361 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6362 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6363 } else { 6364 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 6365 name->len); 6366 btrfs_set_inode_ref_index(path->nodes[0], ref, 6367 BTRFS_I(inode)->dir_index); 6368 write_extent_buffer(path->nodes[0], name->name, ptr, 6369 name->len); 6370 } 6371 } 6372 6373 btrfs_mark_buffer_dirty(trans, path->nodes[0]); 6374 /* 6375 * We don't need the path anymore, plus inheriting properties, adding 6376 * ACLs, security xattrs, orphan item or adding the link, will result in 6377 * allocating yet another path. So just free our path. 6378 */ 6379 btrfs_free_path(path); 6380 path = NULL; 6381 6382 if (args->subvol) { 6383 struct inode *parent; 6384 6385 /* 6386 * Subvolumes inherit properties from their parent subvolume, 6387 * not the directory they were created in. 6388 */ 6389 parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root); 6390 if (IS_ERR(parent)) { 6391 ret = PTR_ERR(parent); 6392 } else { 6393 ret = btrfs_inode_inherit_props(trans, inode, parent); 6394 iput(parent); 6395 } 6396 } else { 6397 ret = btrfs_inode_inherit_props(trans, inode, dir); 6398 } 6399 if (ret) { 6400 btrfs_err(fs_info, 6401 "error inheriting props for ino %llu (root %llu): %d", 6402 btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret); 6403 } 6404 6405 /* 6406 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6407 * probably a bug. 6408 */ 6409 if (!args->subvol) { 6410 ret = btrfs_init_inode_security(trans, args); 6411 if (ret) { 6412 btrfs_abort_transaction(trans, ret); 6413 goto discard; 6414 } 6415 } 6416 6417 ret = btrfs_add_inode_to_root(BTRFS_I(inode), false); 6418 if (WARN_ON(ret)) { 6419 /* Shouldn't happen, we used xa_reserve() before. */ 6420 btrfs_abort_transaction(trans, ret); 6421 goto discard; 6422 } 6423 6424 trace_btrfs_inode_new(inode); 6425 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6426 6427 btrfs_update_root_times(trans, root); 6428 6429 if (args->orphan) { 6430 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6431 } else { 6432 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6433 0, BTRFS_I(inode)->dir_index); 6434 } 6435 if (ret) { 6436 btrfs_abort_transaction(trans, ret); 6437 goto discard; 6438 } 6439 6440 return 0; 6441 6442 discard: 6443 /* 6444 * discard_new_inode() calls iput(), but the caller owns the reference 6445 * to the inode. 6446 */ 6447 ihold(inode); 6448 discard_new_inode(inode); 6449 out: 6450 if (xa_reserved) 6451 xa_release(&root->inodes, objectid); 6452 6453 btrfs_free_path(path); 6454 return ret; 6455 } 6456 6457 /* 6458 * utility function to add 'inode' into 'parent_inode' with 6459 * a give name and a given sequence number. 6460 * if 'add_backref' is true, also insert a backref from the 6461 * inode to the parent directory. 6462 */ 6463 int btrfs_add_link(struct btrfs_trans_handle *trans, 6464 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6465 const struct fscrypt_str *name, int add_backref, u64 index) 6466 { 6467 int ret = 0; 6468 struct btrfs_key key; 6469 struct btrfs_root *root = parent_inode->root; 6470 u64 ino = btrfs_ino(inode); 6471 u64 parent_ino = btrfs_ino(parent_inode); 6472 6473 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6474 memcpy(&key, &inode->root->root_key, sizeof(key)); 6475 } else { 6476 key.objectid = ino; 6477 key.type = BTRFS_INODE_ITEM_KEY; 6478 key.offset = 0; 6479 } 6480 6481 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6482 ret = btrfs_add_root_ref(trans, key.objectid, 6483 btrfs_root_id(root), parent_ino, 6484 index, name); 6485 } else if (add_backref) { 6486 ret = btrfs_insert_inode_ref(trans, root, name, 6487 ino, parent_ino, index); 6488 } 6489 6490 /* Nothing to clean up yet */ 6491 if (ret) 6492 return ret; 6493 6494 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, 6495 btrfs_inode_type(&inode->vfs_inode), index); 6496 if (ret == -EEXIST || ret == -EOVERFLOW) 6497 goto fail_dir_item; 6498 else if (ret) { 6499 btrfs_abort_transaction(trans, ret); 6500 return ret; 6501 } 6502 6503 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6504 name->len * 2); 6505 inode_inc_iversion(&parent_inode->vfs_inode); 6506 /* 6507 * If we are replaying a log tree, we do not want to update the mtime 6508 * and ctime of the parent directory with the current time, since the 6509 * log replay procedure is responsible for setting them to their correct 6510 * values (the ones it had when the fsync was done). 6511 */ 6512 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) 6513 inode_set_mtime_to_ts(&parent_inode->vfs_inode, 6514 inode_set_ctime_current(&parent_inode->vfs_inode)); 6515 6516 ret = btrfs_update_inode(trans, parent_inode); 6517 if (ret) 6518 btrfs_abort_transaction(trans, ret); 6519 return ret; 6520 6521 fail_dir_item: 6522 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6523 u64 local_index; 6524 int err; 6525 err = btrfs_del_root_ref(trans, key.objectid, 6526 btrfs_root_id(root), parent_ino, 6527 &local_index, name); 6528 if (err) 6529 btrfs_abort_transaction(trans, err); 6530 } else if (add_backref) { 6531 u64 local_index; 6532 int err; 6533 6534 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, 6535 &local_index); 6536 if (err) 6537 btrfs_abort_transaction(trans, err); 6538 } 6539 6540 /* Return the original error code */ 6541 return ret; 6542 } 6543 6544 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6545 struct inode *inode) 6546 { 6547 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 6548 struct btrfs_root *root = BTRFS_I(dir)->root; 6549 struct btrfs_new_inode_args new_inode_args = { 6550 .dir = dir, 6551 .dentry = dentry, 6552 .inode = inode, 6553 }; 6554 unsigned int trans_num_items; 6555 struct btrfs_trans_handle *trans; 6556 int err; 6557 6558 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6559 if (err) 6560 goto out_inode; 6561 6562 trans = btrfs_start_transaction(root, trans_num_items); 6563 if (IS_ERR(trans)) { 6564 err = PTR_ERR(trans); 6565 goto out_new_inode_args; 6566 } 6567 6568 err = btrfs_create_new_inode(trans, &new_inode_args); 6569 if (!err) 6570 d_instantiate_new(dentry, inode); 6571 6572 btrfs_end_transaction(trans); 6573 btrfs_btree_balance_dirty(fs_info); 6574 out_new_inode_args: 6575 btrfs_new_inode_args_destroy(&new_inode_args); 6576 out_inode: 6577 if (err) 6578 iput(inode); 6579 return err; 6580 } 6581 6582 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir, 6583 struct dentry *dentry, umode_t mode, dev_t rdev) 6584 { 6585 struct inode *inode; 6586 6587 inode = new_inode(dir->i_sb); 6588 if (!inode) 6589 return -ENOMEM; 6590 inode_init_owner(idmap, inode, dir, mode); 6591 inode->i_op = &btrfs_special_inode_operations; 6592 init_special_inode(inode, inode->i_mode, rdev); 6593 return btrfs_create_common(dir, dentry, inode); 6594 } 6595 6596 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir, 6597 struct dentry *dentry, umode_t mode, bool excl) 6598 { 6599 struct inode *inode; 6600 6601 inode = new_inode(dir->i_sb); 6602 if (!inode) 6603 return -ENOMEM; 6604 inode_init_owner(idmap, inode, dir, mode); 6605 inode->i_fop = &btrfs_file_operations; 6606 inode->i_op = &btrfs_file_inode_operations; 6607 inode->i_mapping->a_ops = &btrfs_aops; 6608 return btrfs_create_common(dir, dentry, inode); 6609 } 6610 6611 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6612 struct dentry *dentry) 6613 { 6614 struct btrfs_trans_handle *trans = NULL; 6615 struct btrfs_root *root = BTRFS_I(dir)->root; 6616 struct inode *inode = d_inode(old_dentry); 6617 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 6618 struct fscrypt_name fname; 6619 u64 index; 6620 int err; 6621 int drop_inode = 0; 6622 6623 /* do not allow sys_link's with other subvols of the same device */ 6624 if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root)) 6625 return -EXDEV; 6626 6627 if (inode->i_nlink >= BTRFS_LINK_MAX) 6628 return -EMLINK; 6629 6630 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); 6631 if (err) 6632 goto fail; 6633 6634 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6635 if (err) 6636 goto fail; 6637 6638 /* 6639 * 2 items for inode and inode ref 6640 * 2 items for dir items 6641 * 1 item for parent inode 6642 * 1 item for orphan item deletion if O_TMPFILE 6643 */ 6644 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6645 if (IS_ERR(trans)) { 6646 err = PTR_ERR(trans); 6647 trans = NULL; 6648 goto fail; 6649 } 6650 6651 /* There are several dir indexes for this inode, clear the cache. */ 6652 BTRFS_I(inode)->dir_index = 0ULL; 6653 inc_nlink(inode); 6654 inode_inc_iversion(inode); 6655 inode_set_ctime_current(inode); 6656 ihold(inode); 6657 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6658 6659 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6660 &fname.disk_name, 1, index); 6661 6662 if (err) { 6663 drop_inode = 1; 6664 } else { 6665 struct dentry *parent = dentry->d_parent; 6666 6667 err = btrfs_update_inode(trans, BTRFS_I(inode)); 6668 if (err) 6669 goto fail; 6670 if (inode->i_nlink == 1) { 6671 /* 6672 * If new hard link count is 1, it's a file created 6673 * with open(2) O_TMPFILE flag. 6674 */ 6675 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6676 if (err) 6677 goto fail; 6678 } 6679 d_instantiate(dentry, inode); 6680 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent); 6681 } 6682 6683 fail: 6684 fscrypt_free_filename(&fname); 6685 if (trans) 6686 btrfs_end_transaction(trans); 6687 if (drop_inode) { 6688 inode_dec_link_count(inode); 6689 iput(inode); 6690 } 6691 btrfs_btree_balance_dirty(fs_info); 6692 return err; 6693 } 6694 6695 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 6696 struct dentry *dentry, umode_t mode) 6697 { 6698 struct inode *inode; 6699 6700 inode = new_inode(dir->i_sb); 6701 if (!inode) 6702 return -ENOMEM; 6703 inode_init_owner(idmap, inode, dir, S_IFDIR | mode); 6704 inode->i_op = &btrfs_dir_inode_operations; 6705 inode->i_fop = &btrfs_dir_file_operations; 6706 return btrfs_create_common(dir, dentry, inode); 6707 } 6708 6709 static noinline int uncompress_inline(struct btrfs_path *path, 6710 struct folio *folio, 6711 struct btrfs_file_extent_item *item) 6712 { 6713 int ret; 6714 struct extent_buffer *leaf = path->nodes[0]; 6715 char *tmp; 6716 size_t max_size; 6717 unsigned long inline_size; 6718 unsigned long ptr; 6719 int compress_type; 6720 6721 compress_type = btrfs_file_extent_compression(leaf, item); 6722 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6723 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 6724 tmp = kmalloc(inline_size, GFP_NOFS); 6725 if (!tmp) 6726 return -ENOMEM; 6727 ptr = btrfs_file_extent_inline_start(item); 6728 6729 read_extent_buffer(leaf, tmp, ptr, inline_size); 6730 6731 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6732 ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size, 6733 max_size); 6734 6735 /* 6736 * decompression code contains a memset to fill in any space between the end 6737 * of the uncompressed data and the end of max_size in case the decompressed 6738 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6739 * the end of an inline extent and the beginning of the next block, so we 6740 * cover that region here. 6741 */ 6742 6743 if (max_size < PAGE_SIZE) 6744 folio_zero_range(folio, max_size, PAGE_SIZE - max_size); 6745 kfree(tmp); 6746 return ret; 6747 } 6748 6749 static int read_inline_extent(struct btrfs_path *path, struct folio *folio) 6750 { 6751 struct btrfs_file_extent_item *fi; 6752 void *kaddr; 6753 size_t copy_size; 6754 6755 if (!folio || folio_test_uptodate(folio)) 6756 return 0; 6757 6758 ASSERT(folio_pos(folio) == 0); 6759 6760 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 6761 struct btrfs_file_extent_item); 6762 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) 6763 return uncompress_inline(path, folio, fi); 6764 6765 copy_size = min_t(u64, PAGE_SIZE, 6766 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); 6767 kaddr = kmap_local_folio(folio, 0); 6768 read_extent_buffer(path->nodes[0], kaddr, 6769 btrfs_file_extent_inline_start(fi), copy_size); 6770 kunmap_local(kaddr); 6771 if (copy_size < PAGE_SIZE) 6772 folio_zero_range(folio, copy_size, PAGE_SIZE - copy_size); 6773 return 0; 6774 } 6775 6776 /* 6777 * Lookup the first extent overlapping a range in a file. 6778 * 6779 * @inode: file to search in 6780 * @page: page to read extent data into if the extent is inline 6781 * @start: file offset 6782 * @len: length of range starting at @start 6783 * 6784 * Return the first &struct extent_map which overlaps the given range, reading 6785 * it from the B-tree and caching it if necessary. Note that there may be more 6786 * extents which overlap the given range after the returned extent_map. 6787 * 6788 * If @page is not NULL and the extent is inline, this also reads the extent 6789 * data directly into the page and marks the extent up to date in the io_tree. 6790 * 6791 * Return: ERR_PTR on error, non-NULL extent_map on success. 6792 */ 6793 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6794 struct folio *folio, u64 start, u64 len) 6795 { 6796 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6797 int ret = 0; 6798 u64 extent_start = 0; 6799 u64 extent_end = 0; 6800 u64 objectid = btrfs_ino(inode); 6801 int extent_type = -1; 6802 struct btrfs_path *path = NULL; 6803 struct btrfs_root *root = inode->root; 6804 struct btrfs_file_extent_item *item; 6805 struct extent_buffer *leaf; 6806 struct btrfs_key found_key; 6807 struct extent_map *em = NULL; 6808 struct extent_map_tree *em_tree = &inode->extent_tree; 6809 6810 read_lock(&em_tree->lock); 6811 em = lookup_extent_mapping(em_tree, start, len); 6812 read_unlock(&em_tree->lock); 6813 6814 if (em) { 6815 if (em->start > start || em->start + em->len <= start) 6816 free_extent_map(em); 6817 else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio) 6818 free_extent_map(em); 6819 else 6820 goto out; 6821 } 6822 em = alloc_extent_map(); 6823 if (!em) { 6824 ret = -ENOMEM; 6825 goto out; 6826 } 6827 em->start = EXTENT_MAP_HOLE; 6828 em->disk_bytenr = EXTENT_MAP_HOLE; 6829 em->len = (u64)-1; 6830 6831 path = btrfs_alloc_path(); 6832 if (!path) { 6833 ret = -ENOMEM; 6834 goto out; 6835 } 6836 6837 /* Chances are we'll be called again, so go ahead and do readahead */ 6838 path->reada = READA_FORWARD; 6839 6840 /* 6841 * The same explanation in load_free_space_cache applies here as well, 6842 * we only read when we're loading the free space cache, and at that 6843 * point the commit_root has everything we need. 6844 */ 6845 if (btrfs_is_free_space_inode(inode)) { 6846 path->search_commit_root = 1; 6847 path->skip_locking = 1; 6848 } 6849 6850 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6851 if (ret < 0) { 6852 goto out; 6853 } else if (ret > 0) { 6854 if (path->slots[0] == 0) 6855 goto not_found; 6856 path->slots[0]--; 6857 ret = 0; 6858 } 6859 6860 leaf = path->nodes[0]; 6861 item = btrfs_item_ptr(leaf, path->slots[0], 6862 struct btrfs_file_extent_item); 6863 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6864 if (found_key.objectid != objectid || 6865 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6866 /* 6867 * If we backup past the first extent we want to move forward 6868 * and see if there is an extent in front of us, otherwise we'll 6869 * say there is a hole for our whole search range which can 6870 * cause problems. 6871 */ 6872 extent_end = start; 6873 goto next; 6874 } 6875 6876 extent_type = btrfs_file_extent_type(leaf, item); 6877 extent_start = found_key.offset; 6878 extent_end = btrfs_file_extent_end(path); 6879 if (extent_type == BTRFS_FILE_EXTENT_REG || 6880 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6881 /* Only regular file could have regular/prealloc extent */ 6882 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6883 ret = -EUCLEAN; 6884 btrfs_crit(fs_info, 6885 "regular/prealloc extent found for non-regular inode %llu", 6886 btrfs_ino(inode)); 6887 goto out; 6888 } 6889 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6890 extent_start); 6891 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6892 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 6893 path->slots[0], 6894 extent_start); 6895 } 6896 next: 6897 if (start >= extent_end) { 6898 path->slots[0]++; 6899 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6900 ret = btrfs_next_leaf(root, path); 6901 if (ret < 0) 6902 goto out; 6903 else if (ret > 0) 6904 goto not_found; 6905 6906 leaf = path->nodes[0]; 6907 } 6908 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6909 if (found_key.objectid != objectid || 6910 found_key.type != BTRFS_EXTENT_DATA_KEY) 6911 goto not_found; 6912 if (start + len <= found_key.offset) 6913 goto not_found; 6914 if (start > found_key.offset) 6915 goto next; 6916 6917 /* New extent overlaps with existing one */ 6918 em->start = start; 6919 em->len = found_key.offset - start; 6920 em->disk_bytenr = EXTENT_MAP_HOLE; 6921 goto insert; 6922 } 6923 6924 btrfs_extent_item_to_extent_map(inode, path, item, em); 6925 6926 if (extent_type == BTRFS_FILE_EXTENT_REG || 6927 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6928 goto insert; 6929 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6930 /* 6931 * Inline extent can only exist at file offset 0. This is 6932 * ensured by tree-checker and inline extent creation path. 6933 * Thus all members representing file offsets should be zero. 6934 */ 6935 ASSERT(extent_start == 0); 6936 ASSERT(em->start == 0); 6937 6938 /* 6939 * btrfs_extent_item_to_extent_map() should have properly 6940 * initialized em members already. 6941 * 6942 * Other members are not utilized for inline extents. 6943 */ 6944 ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE); 6945 ASSERT(em->len == fs_info->sectorsize); 6946 6947 ret = read_inline_extent(path, folio); 6948 if (ret < 0) 6949 goto out; 6950 goto insert; 6951 } 6952 not_found: 6953 em->start = start; 6954 em->len = len; 6955 em->disk_bytenr = EXTENT_MAP_HOLE; 6956 insert: 6957 ret = 0; 6958 btrfs_release_path(path); 6959 if (em->start > start || extent_map_end(em) <= start) { 6960 btrfs_err(fs_info, 6961 "bad extent! em: [%llu %llu] passed [%llu %llu]", 6962 em->start, em->len, start, len); 6963 ret = -EIO; 6964 goto out; 6965 } 6966 6967 write_lock(&em_tree->lock); 6968 ret = btrfs_add_extent_mapping(inode, &em, start, len); 6969 write_unlock(&em_tree->lock); 6970 out: 6971 btrfs_free_path(path); 6972 6973 trace_btrfs_get_extent(root, inode, em); 6974 6975 if (ret) { 6976 free_extent_map(em); 6977 return ERR_PTR(ret); 6978 } 6979 return em; 6980 } 6981 6982 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 6983 { 6984 struct btrfs_block_group *block_group; 6985 bool readonly = false; 6986 6987 block_group = btrfs_lookup_block_group(fs_info, bytenr); 6988 if (!block_group || block_group->ro) 6989 readonly = true; 6990 if (block_group) 6991 btrfs_put_block_group(block_group); 6992 return readonly; 6993 } 6994 6995 /* 6996 * Check if we can do nocow write into the range [@offset, @offset + @len) 6997 * 6998 * @offset: File offset 6999 * @len: The length to write, will be updated to the nocow writeable 7000 * range 7001 * @orig_start: (optional) Return the original file offset of the file extent 7002 * @orig_len: (optional) Return the original on-disk length of the file extent 7003 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7004 * @strict: if true, omit optimizations that might force us into unnecessary 7005 * cow. e.g., don't trust generation number. 7006 * 7007 * Return: 7008 * >0 and update @len if we can do nocow write 7009 * 0 if we can't do nocow write 7010 * <0 if error happened 7011 * 7012 * NOTE: This only checks the file extents, caller is responsible to wait for 7013 * any ordered extents. 7014 */ 7015 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7016 struct btrfs_file_extent *file_extent, 7017 bool nowait, bool strict) 7018 { 7019 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 7020 struct can_nocow_file_extent_args nocow_args = { 0 }; 7021 struct btrfs_path *path; 7022 int ret; 7023 struct extent_buffer *leaf; 7024 struct btrfs_root *root = BTRFS_I(inode)->root; 7025 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7026 struct btrfs_file_extent_item *fi; 7027 struct btrfs_key key; 7028 int found_type; 7029 7030 path = btrfs_alloc_path(); 7031 if (!path) 7032 return -ENOMEM; 7033 path->nowait = nowait; 7034 7035 ret = btrfs_lookup_file_extent(NULL, root, path, 7036 btrfs_ino(BTRFS_I(inode)), offset, 0); 7037 if (ret < 0) 7038 goto out; 7039 7040 if (ret == 1) { 7041 if (path->slots[0] == 0) { 7042 /* can't find the item, must cow */ 7043 ret = 0; 7044 goto out; 7045 } 7046 path->slots[0]--; 7047 } 7048 ret = 0; 7049 leaf = path->nodes[0]; 7050 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7051 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7052 key.type != BTRFS_EXTENT_DATA_KEY) { 7053 /* not our file or wrong item type, must cow */ 7054 goto out; 7055 } 7056 7057 if (key.offset > offset) { 7058 /* Wrong offset, must cow */ 7059 goto out; 7060 } 7061 7062 if (btrfs_file_extent_end(path) <= offset) 7063 goto out; 7064 7065 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7066 found_type = btrfs_file_extent_type(leaf, fi); 7067 7068 nocow_args.start = offset; 7069 nocow_args.end = offset + *len - 1; 7070 nocow_args.strict = strict; 7071 nocow_args.free_path = true; 7072 7073 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args); 7074 /* can_nocow_file_extent() has freed the path. */ 7075 path = NULL; 7076 7077 if (ret != 1) { 7078 /* Treat errors as not being able to NOCOW. */ 7079 ret = 0; 7080 goto out; 7081 } 7082 7083 ret = 0; 7084 if (btrfs_extent_readonly(fs_info, 7085 nocow_args.file_extent.disk_bytenr + 7086 nocow_args.file_extent.offset)) 7087 goto out; 7088 7089 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7090 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7091 u64 range_end; 7092 7093 range_end = round_up(offset + nocow_args.file_extent.num_bytes, 7094 root->fs_info->sectorsize) - 1; 7095 ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC); 7096 if (ret) { 7097 ret = -EAGAIN; 7098 goto out; 7099 } 7100 } 7101 7102 if (file_extent) 7103 memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent)); 7104 7105 *len = nocow_args.file_extent.num_bytes; 7106 ret = 1; 7107 out: 7108 btrfs_free_path(path); 7109 return ret; 7110 } 7111 7112 /* The callers of this must take lock_extent() */ 7113 struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start, 7114 const struct btrfs_file_extent *file_extent, 7115 int type) 7116 { 7117 struct extent_map *em; 7118 int ret; 7119 7120 /* 7121 * Note the missing NOCOW type. 7122 * 7123 * For pure NOCOW writes, we should not create an io extent map, but 7124 * just reusing the existing one. 7125 * Only PREALLOC writes (NOCOW write into preallocated range) can 7126 * create an io extent map. 7127 */ 7128 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7129 type == BTRFS_ORDERED_COMPRESSED || 7130 type == BTRFS_ORDERED_REGULAR); 7131 7132 switch (type) { 7133 case BTRFS_ORDERED_PREALLOC: 7134 /* We're only referring part of a larger preallocated extent. */ 7135 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes); 7136 break; 7137 case BTRFS_ORDERED_REGULAR: 7138 /* COW results a new extent matching our file extent size. */ 7139 ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes); 7140 ASSERT(file_extent->ram_bytes == file_extent->num_bytes); 7141 7142 /* Since it's a new extent, we should not have any offset. */ 7143 ASSERT(file_extent->offset == 0); 7144 break; 7145 case BTRFS_ORDERED_COMPRESSED: 7146 /* Must be compressed. */ 7147 ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE); 7148 7149 /* 7150 * Encoded write can make us to refer to part of the 7151 * uncompressed extent. 7152 */ 7153 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes); 7154 break; 7155 } 7156 7157 em = alloc_extent_map(); 7158 if (!em) 7159 return ERR_PTR(-ENOMEM); 7160 7161 em->start = start; 7162 em->len = file_extent->num_bytes; 7163 em->disk_bytenr = file_extent->disk_bytenr; 7164 em->disk_num_bytes = file_extent->disk_num_bytes; 7165 em->ram_bytes = file_extent->ram_bytes; 7166 em->generation = -1; 7167 em->offset = file_extent->offset; 7168 em->flags |= EXTENT_FLAG_PINNED; 7169 if (type == BTRFS_ORDERED_COMPRESSED) 7170 extent_map_set_compression(em, file_extent->compression); 7171 7172 ret = btrfs_replace_extent_map_range(inode, em, true); 7173 if (ret) { 7174 free_extent_map(em); 7175 return ERR_PTR(ret); 7176 } 7177 7178 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7179 return em; 7180 } 7181 7182 /* 7183 * For release_folio() and invalidate_folio() we have a race window where 7184 * folio_end_writeback() is called but the subpage spinlock is not yet released. 7185 * If we continue to release/invalidate the page, we could cause use-after-free 7186 * for subpage spinlock. So this function is to spin and wait for subpage 7187 * spinlock. 7188 */ 7189 static void wait_subpage_spinlock(struct folio *folio) 7190 { 7191 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); 7192 struct btrfs_subpage *subpage; 7193 7194 if (!btrfs_is_subpage(fs_info, folio->mapping)) 7195 return; 7196 7197 ASSERT(folio_test_private(folio) && folio_get_private(folio)); 7198 subpage = folio_get_private(folio); 7199 7200 /* 7201 * This may look insane as we just acquire the spinlock and release it, 7202 * without doing anything. But we just want to make sure no one is 7203 * still holding the subpage spinlock. 7204 * And since the page is not dirty nor writeback, and we have page 7205 * locked, the only possible way to hold a spinlock is from the endio 7206 * function to clear page writeback. 7207 * 7208 * Here we just acquire the spinlock so that all existing callers 7209 * should exit and we're safe to release/invalidate the page. 7210 */ 7211 spin_lock_irq(&subpage->lock); 7212 spin_unlock_irq(&subpage->lock); 7213 } 7214 7215 static int btrfs_launder_folio(struct folio *folio) 7216 { 7217 return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio), 7218 PAGE_SIZE, NULL); 7219 } 7220 7221 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7222 { 7223 if (try_release_extent_mapping(folio, gfp_flags)) { 7224 wait_subpage_spinlock(folio); 7225 clear_folio_extent_mapped(folio); 7226 return true; 7227 } 7228 return false; 7229 } 7230 7231 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7232 { 7233 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 7234 return false; 7235 return __btrfs_release_folio(folio, gfp_flags); 7236 } 7237 7238 #ifdef CONFIG_MIGRATION 7239 static int btrfs_migrate_folio(struct address_space *mapping, 7240 struct folio *dst, struct folio *src, 7241 enum migrate_mode mode) 7242 { 7243 int ret = filemap_migrate_folio(mapping, dst, src, mode); 7244 7245 if (ret != MIGRATEPAGE_SUCCESS) 7246 return ret; 7247 7248 if (folio_test_ordered(src)) { 7249 folio_clear_ordered(src); 7250 folio_set_ordered(dst); 7251 } 7252 7253 return MIGRATEPAGE_SUCCESS; 7254 } 7255 #else 7256 #define btrfs_migrate_folio NULL 7257 #endif 7258 7259 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 7260 size_t length) 7261 { 7262 struct btrfs_inode *inode = folio_to_inode(folio); 7263 struct btrfs_fs_info *fs_info = inode->root->fs_info; 7264 struct extent_io_tree *tree = &inode->io_tree; 7265 struct extent_state *cached_state = NULL; 7266 u64 page_start = folio_pos(folio); 7267 u64 page_end = page_start + folio_size(folio) - 1; 7268 u64 cur; 7269 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 7270 7271 /* 7272 * We have folio locked so no new ordered extent can be created on this 7273 * page, nor bio can be submitted for this folio. 7274 * 7275 * But already submitted bio can still be finished on this folio. 7276 * Furthermore, endio function won't skip folio which has Ordered 7277 * already cleared, so it's possible for endio and 7278 * invalidate_folio to do the same ordered extent accounting twice 7279 * on one folio. 7280 * 7281 * So here we wait for any submitted bios to finish, so that we won't 7282 * do double ordered extent accounting on the same folio. 7283 */ 7284 folio_wait_writeback(folio); 7285 wait_subpage_spinlock(folio); 7286 7287 /* 7288 * For subpage case, we have call sites like 7289 * btrfs_punch_hole_lock_range() which passes range not aligned to 7290 * sectorsize. 7291 * If the range doesn't cover the full folio, we don't need to and 7292 * shouldn't clear page extent mapped, as folio->private can still 7293 * record subpage dirty bits for other part of the range. 7294 * 7295 * For cases that invalidate the full folio even the range doesn't 7296 * cover the full folio, like invalidating the last folio, we're 7297 * still safe to wait for ordered extent to finish. 7298 */ 7299 if (!(offset == 0 && length == folio_size(folio))) { 7300 btrfs_release_folio(folio, GFP_NOFS); 7301 return; 7302 } 7303 7304 if (!inode_evicting) 7305 lock_extent(tree, page_start, page_end, &cached_state); 7306 7307 cur = page_start; 7308 while (cur < page_end) { 7309 struct btrfs_ordered_extent *ordered; 7310 u64 range_end; 7311 u32 range_len; 7312 u32 extra_flags = 0; 7313 7314 ordered = btrfs_lookup_first_ordered_range(inode, cur, 7315 page_end + 1 - cur); 7316 if (!ordered) { 7317 range_end = page_end; 7318 /* 7319 * No ordered extent covering this range, we are safe 7320 * to delete all extent states in the range. 7321 */ 7322 extra_flags = EXTENT_CLEAR_ALL_BITS; 7323 goto next; 7324 } 7325 if (ordered->file_offset > cur) { 7326 /* 7327 * There is a range between [cur, oe->file_offset) not 7328 * covered by any ordered extent. 7329 * We are safe to delete all extent states, and handle 7330 * the ordered extent in the next iteration. 7331 */ 7332 range_end = ordered->file_offset - 1; 7333 extra_flags = EXTENT_CLEAR_ALL_BITS; 7334 goto next; 7335 } 7336 7337 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 7338 page_end); 7339 ASSERT(range_end + 1 - cur < U32_MAX); 7340 range_len = range_end + 1 - cur; 7341 if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) { 7342 /* 7343 * If Ordered is cleared, it means endio has 7344 * already been executed for the range. 7345 * We can't delete the extent states as 7346 * btrfs_finish_ordered_io() may still use some of them. 7347 */ 7348 goto next; 7349 } 7350 btrfs_folio_clear_ordered(fs_info, folio, cur, range_len); 7351 7352 /* 7353 * IO on this page will never be started, so we need to account 7354 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 7355 * here, must leave that up for the ordered extent completion. 7356 * 7357 * This will also unlock the range for incoming 7358 * btrfs_finish_ordered_io(). 7359 */ 7360 if (!inode_evicting) 7361 clear_extent_bit(tree, cur, range_end, 7362 EXTENT_DELALLOC | 7363 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 7364 EXTENT_DEFRAG, &cached_state); 7365 7366 spin_lock_irq(&inode->ordered_tree_lock); 7367 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 7368 ordered->truncated_len = min(ordered->truncated_len, 7369 cur - ordered->file_offset); 7370 spin_unlock_irq(&inode->ordered_tree_lock); 7371 7372 /* 7373 * If the ordered extent has finished, we're safe to delete all 7374 * the extent states of the range, otherwise 7375 * btrfs_finish_ordered_io() will get executed by endio for 7376 * other pages, so we can't delete extent states. 7377 */ 7378 if (btrfs_dec_test_ordered_pending(inode, &ordered, 7379 cur, range_end + 1 - cur)) { 7380 btrfs_finish_ordered_io(ordered); 7381 /* 7382 * The ordered extent has finished, now we're again 7383 * safe to delete all extent states of the range. 7384 */ 7385 extra_flags = EXTENT_CLEAR_ALL_BITS; 7386 } 7387 next: 7388 if (ordered) 7389 btrfs_put_ordered_extent(ordered); 7390 /* 7391 * Qgroup reserved space handler 7392 * Sector(s) here will be either: 7393 * 7394 * 1) Already written to disk or bio already finished 7395 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 7396 * Qgroup will be handled by its qgroup_record then. 7397 * btrfs_qgroup_free_data() call will do nothing here. 7398 * 7399 * 2) Not written to disk yet 7400 * Then btrfs_qgroup_free_data() call will clear the 7401 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 7402 * reserved data space. 7403 * Since the IO will never happen for this page. 7404 */ 7405 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL); 7406 if (!inode_evicting) { 7407 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 7408 EXTENT_DELALLOC | EXTENT_UPTODATE | 7409 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG | 7410 extra_flags, &cached_state); 7411 } 7412 cur = range_end + 1; 7413 } 7414 /* 7415 * We have iterated through all ordered extents of the page, the page 7416 * should not have Ordered anymore, or the above iteration 7417 * did something wrong. 7418 */ 7419 ASSERT(!folio_test_ordered(folio)); 7420 btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio)); 7421 if (!inode_evicting) 7422 __btrfs_release_folio(folio, GFP_NOFS); 7423 clear_folio_extent_mapped(folio); 7424 } 7425 7426 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) 7427 { 7428 struct btrfs_truncate_control control = { 7429 .inode = inode, 7430 .ino = btrfs_ino(inode), 7431 .min_type = BTRFS_EXTENT_DATA_KEY, 7432 .clear_extent_range = true, 7433 }; 7434 struct btrfs_root *root = inode->root; 7435 struct btrfs_fs_info *fs_info = root->fs_info; 7436 struct btrfs_block_rsv *rsv; 7437 int ret; 7438 struct btrfs_trans_handle *trans; 7439 u64 mask = fs_info->sectorsize - 1; 7440 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 7441 7442 if (!skip_writeback) { 7443 ret = btrfs_wait_ordered_range(inode, 7444 inode->vfs_inode.i_size & (~mask), 7445 (u64)-1); 7446 if (ret) 7447 return ret; 7448 } 7449 7450 /* 7451 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 7452 * things going on here: 7453 * 7454 * 1) We need to reserve space to update our inode. 7455 * 7456 * 2) We need to have something to cache all the space that is going to 7457 * be free'd up by the truncate operation, but also have some slack 7458 * space reserved in case it uses space during the truncate (thank you 7459 * very much snapshotting). 7460 * 7461 * And we need these to be separate. The fact is we can use a lot of 7462 * space doing the truncate, and we have no earthly idea how much space 7463 * we will use, so we need the truncate reservation to be separate so it 7464 * doesn't end up using space reserved for updating the inode. We also 7465 * need to be able to stop the transaction and start a new one, which 7466 * means we need to be able to update the inode several times, and we 7467 * have no idea of knowing how many times that will be, so we can't just 7468 * reserve 1 item for the entirety of the operation, so that has to be 7469 * done separately as well. 7470 * 7471 * So that leaves us with 7472 * 7473 * 1) rsv - for the truncate reservation, which we will steal from the 7474 * transaction reservation. 7475 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 7476 * updating the inode. 7477 */ 7478 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 7479 if (!rsv) 7480 return -ENOMEM; 7481 rsv->size = min_size; 7482 rsv->failfast = true; 7483 7484 /* 7485 * 1 for the truncate slack space 7486 * 1 for updating the inode. 7487 */ 7488 trans = btrfs_start_transaction(root, 2); 7489 if (IS_ERR(trans)) { 7490 ret = PTR_ERR(trans); 7491 goto out; 7492 } 7493 7494 /* Migrate the slack space for the truncate to our reserve */ 7495 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 7496 min_size, false); 7497 /* 7498 * We have reserved 2 metadata units when we started the transaction and 7499 * min_size matches 1 unit, so this should never fail, but if it does, 7500 * it's not critical we just fail truncation. 7501 */ 7502 if (WARN_ON(ret)) { 7503 btrfs_end_transaction(trans); 7504 goto out; 7505 } 7506 7507 trans->block_rsv = rsv; 7508 7509 while (1) { 7510 struct extent_state *cached_state = NULL; 7511 const u64 new_size = inode->vfs_inode.i_size; 7512 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 7513 7514 control.new_size = new_size; 7515 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 7516 /* 7517 * We want to drop from the next block forward in case this new 7518 * size is not block aligned since we will be keeping the last 7519 * block of the extent just the way it is. 7520 */ 7521 btrfs_drop_extent_map_range(inode, 7522 ALIGN(new_size, fs_info->sectorsize), 7523 (u64)-1, false); 7524 7525 ret = btrfs_truncate_inode_items(trans, root, &control); 7526 7527 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 7528 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 7529 7530 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 7531 7532 trans->block_rsv = &fs_info->trans_block_rsv; 7533 if (ret != -ENOSPC && ret != -EAGAIN) 7534 break; 7535 7536 ret = btrfs_update_inode(trans, inode); 7537 if (ret) 7538 break; 7539 7540 btrfs_end_transaction(trans); 7541 btrfs_btree_balance_dirty(fs_info); 7542 7543 trans = btrfs_start_transaction(root, 2); 7544 if (IS_ERR(trans)) { 7545 ret = PTR_ERR(trans); 7546 trans = NULL; 7547 break; 7548 } 7549 7550 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 7551 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 7552 rsv, min_size, false); 7553 /* 7554 * We have reserved 2 metadata units when we started the 7555 * transaction and min_size matches 1 unit, so this should never 7556 * fail, but if it does, it's not critical we just fail truncation. 7557 */ 7558 if (WARN_ON(ret)) 7559 break; 7560 7561 trans->block_rsv = rsv; 7562 } 7563 7564 /* 7565 * We can't call btrfs_truncate_block inside a trans handle as we could 7566 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 7567 * know we've truncated everything except the last little bit, and can 7568 * do btrfs_truncate_block and then update the disk_i_size. 7569 */ 7570 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 7571 btrfs_end_transaction(trans); 7572 btrfs_btree_balance_dirty(fs_info); 7573 7574 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); 7575 if (ret) 7576 goto out; 7577 trans = btrfs_start_transaction(root, 1); 7578 if (IS_ERR(trans)) { 7579 ret = PTR_ERR(trans); 7580 goto out; 7581 } 7582 btrfs_inode_safe_disk_i_size_write(inode, 0); 7583 } 7584 7585 if (trans) { 7586 int ret2; 7587 7588 trans->block_rsv = &fs_info->trans_block_rsv; 7589 ret2 = btrfs_update_inode(trans, inode); 7590 if (ret2 && !ret) 7591 ret = ret2; 7592 7593 ret2 = btrfs_end_transaction(trans); 7594 if (ret2 && !ret) 7595 ret = ret2; 7596 btrfs_btree_balance_dirty(fs_info); 7597 } 7598 out: 7599 btrfs_free_block_rsv(fs_info, rsv); 7600 /* 7601 * So if we truncate and then write and fsync we normally would just 7602 * write the extents that changed, which is a problem if we need to 7603 * first truncate that entire inode. So set this flag so we write out 7604 * all of the extents in the inode to the sync log so we're completely 7605 * safe. 7606 * 7607 * If no extents were dropped or trimmed we don't need to force the next 7608 * fsync to truncate all the inode's items from the log and re-log them 7609 * all. This means the truncate operation did not change the file size, 7610 * or changed it to a smaller size but there was only an implicit hole 7611 * between the old i_size and the new i_size, and there were no prealloc 7612 * extents beyond i_size to drop. 7613 */ 7614 if (control.extents_found > 0) 7615 btrfs_set_inode_full_sync(inode); 7616 7617 return ret; 7618 } 7619 7620 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap, 7621 struct inode *dir) 7622 { 7623 struct inode *inode; 7624 7625 inode = new_inode(dir->i_sb); 7626 if (inode) { 7627 /* 7628 * Subvolumes don't inherit the sgid bit or the parent's gid if 7629 * the parent's sgid bit is set. This is probably a bug. 7630 */ 7631 inode_init_owner(idmap, inode, NULL, 7632 S_IFDIR | (~current_umask() & S_IRWXUGO)); 7633 inode->i_op = &btrfs_dir_inode_operations; 7634 inode->i_fop = &btrfs_dir_file_operations; 7635 } 7636 return inode; 7637 } 7638 7639 struct inode *btrfs_alloc_inode(struct super_block *sb) 7640 { 7641 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 7642 struct btrfs_inode *ei; 7643 struct inode *inode; 7644 7645 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 7646 if (!ei) 7647 return NULL; 7648 7649 ei->root = NULL; 7650 ei->generation = 0; 7651 ei->last_trans = 0; 7652 ei->last_sub_trans = 0; 7653 ei->logged_trans = 0; 7654 ei->delalloc_bytes = 0; 7655 ei->new_delalloc_bytes = 0; 7656 ei->defrag_bytes = 0; 7657 ei->disk_i_size = 0; 7658 ei->flags = 0; 7659 ei->ro_flags = 0; 7660 /* 7661 * ->index_cnt will be properly initialized later when creating a new 7662 * inode (btrfs_create_new_inode()) or when reading an existing inode 7663 * from disk (btrfs_read_locked_inode()). 7664 */ 7665 ei->csum_bytes = 0; 7666 ei->dir_index = 0; 7667 ei->last_unlink_trans = 0; 7668 ei->last_reflink_trans = 0; 7669 ei->last_log_commit = 0; 7670 7671 spin_lock_init(&ei->lock); 7672 ei->outstanding_extents = 0; 7673 if (sb->s_magic != BTRFS_TEST_MAGIC) 7674 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 7675 BTRFS_BLOCK_RSV_DELALLOC); 7676 ei->runtime_flags = 0; 7677 ei->prop_compress = BTRFS_COMPRESS_NONE; 7678 ei->defrag_compress = BTRFS_COMPRESS_NONE; 7679 7680 ei->delayed_node = NULL; 7681 7682 ei->i_otime_sec = 0; 7683 ei->i_otime_nsec = 0; 7684 7685 inode = &ei->vfs_inode; 7686 extent_map_tree_init(&ei->extent_tree); 7687 7688 /* This io tree sets the valid inode. */ 7689 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); 7690 ei->io_tree.inode = ei; 7691 7692 ei->file_extent_tree = NULL; 7693 7694 mutex_init(&ei->log_mutex); 7695 spin_lock_init(&ei->ordered_tree_lock); 7696 ei->ordered_tree = RB_ROOT; 7697 ei->ordered_tree_last = NULL; 7698 INIT_LIST_HEAD(&ei->delalloc_inodes); 7699 INIT_LIST_HEAD(&ei->delayed_iput); 7700 init_rwsem(&ei->i_mmap_lock); 7701 7702 return inode; 7703 } 7704 7705 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 7706 void btrfs_test_destroy_inode(struct inode *inode) 7707 { 7708 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 7709 kfree(BTRFS_I(inode)->file_extent_tree); 7710 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 7711 } 7712 #endif 7713 7714 void btrfs_free_inode(struct inode *inode) 7715 { 7716 kfree(BTRFS_I(inode)->file_extent_tree); 7717 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 7718 } 7719 7720 void btrfs_destroy_inode(struct inode *vfs_inode) 7721 { 7722 struct btrfs_ordered_extent *ordered; 7723 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 7724 struct btrfs_root *root = inode->root; 7725 bool freespace_inode; 7726 7727 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 7728 WARN_ON(vfs_inode->i_data.nrpages); 7729 WARN_ON(inode->block_rsv.reserved); 7730 WARN_ON(inode->block_rsv.size); 7731 WARN_ON(inode->outstanding_extents); 7732 if (!S_ISDIR(vfs_inode->i_mode)) { 7733 WARN_ON(inode->delalloc_bytes); 7734 WARN_ON(inode->new_delalloc_bytes); 7735 WARN_ON(inode->csum_bytes); 7736 } 7737 if (!root || !btrfs_is_data_reloc_root(root)) 7738 WARN_ON(inode->defrag_bytes); 7739 7740 /* 7741 * This can happen where we create an inode, but somebody else also 7742 * created the same inode and we need to destroy the one we already 7743 * created. 7744 */ 7745 if (!root) 7746 return; 7747 7748 /* 7749 * If this is a free space inode do not take the ordered extents lockdep 7750 * map. 7751 */ 7752 freespace_inode = btrfs_is_free_space_inode(inode); 7753 7754 while (1) { 7755 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 7756 if (!ordered) 7757 break; 7758 else { 7759 btrfs_err(root->fs_info, 7760 "found ordered extent %llu %llu on inode cleanup", 7761 ordered->file_offset, ordered->num_bytes); 7762 7763 if (!freespace_inode) 7764 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); 7765 7766 btrfs_remove_ordered_extent(inode, ordered); 7767 btrfs_put_ordered_extent(ordered); 7768 btrfs_put_ordered_extent(ordered); 7769 } 7770 } 7771 btrfs_qgroup_check_reserved_leak(inode); 7772 btrfs_del_inode_from_root(inode); 7773 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 7774 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 7775 btrfs_put_root(inode->root); 7776 } 7777 7778 int btrfs_drop_inode(struct inode *inode) 7779 { 7780 struct btrfs_root *root = BTRFS_I(inode)->root; 7781 7782 if (root == NULL) 7783 return 1; 7784 7785 /* the snap/subvol tree is on deleting */ 7786 if (btrfs_root_refs(&root->root_item) == 0) 7787 return 1; 7788 else 7789 return generic_drop_inode(inode); 7790 } 7791 7792 static void init_once(void *foo) 7793 { 7794 struct btrfs_inode *ei = foo; 7795 7796 inode_init_once(&ei->vfs_inode); 7797 } 7798 7799 void __cold btrfs_destroy_cachep(void) 7800 { 7801 /* 7802 * Make sure all delayed rcu free inodes are flushed before we 7803 * destroy cache. 7804 */ 7805 rcu_barrier(); 7806 kmem_cache_destroy(btrfs_inode_cachep); 7807 } 7808 7809 int __init btrfs_init_cachep(void) 7810 { 7811 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 7812 sizeof(struct btrfs_inode), 0, 7813 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, 7814 init_once); 7815 if (!btrfs_inode_cachep) 7816 return -ENOMEM; 7817 7818 return 0; 7819 } 7820 7821 static int btrfs_getattr(struct mnt_idmap *idmap, 7822 const struct path *path, struct kstat *stat, 7823 u32 request_mask, unsigned int flags) 7824 { 7825 u64 delalloc_bytes; 7826 u64 inode_bytes; 7827 struct inode *inode = d_inode(path->dentry); 7828 u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize; 7829 u32 bi_flags = BTRFS_I(inode)->flags; 7830 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 7831 7832 stat->result_mask |= STATX_BTIME; 7833 stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec; 7834 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec; 7835 if (bi_flags & BTRFS_INODE_APPEND) 7836 stat->attributes |= STATX_ATTR_APPEND; 7837 if (bi_flags & BTRFS_INODE_COMPRESS) 7838 stat->attributes |= STATX_ATTR_COMPRESSED; 7839 if (bi_flags & BTRFS_INODE_IMMUTABLE) 7840 stat->attributes |= STATX_ATTR_IMMUTABLE; 7841 if (bi_flags & BTRFS_INODE_NODUMP) 7842 stat->attributes |= STATX_ATTR_NODUMP; 7843 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 7844 stat->attributes |= STATX_ATTR_VERITY; 7845 7846 stat->attributes_mask |= (STATX_ATTR_APPEND | 7847 STATX_ATTR_COMPRESSED | 7848 STATX_ATTR_IMMUTABLE | 7849 STATX_ATTR_NODUMP); 7850 7851 generic_fillattr(idmap, request_mask, inode, stat); 7852 stat->dev = BTRFS_I(inode)->root->anon_dev; 7853 7854 stat->subvol = BTRFS_I(inode)->root->root_key.objectid; 7855 stat->result_mask |= STATX_SUBVOL; 7856 7857 spin_lock(&BTRFS_I(inode)->lock); 7858 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 7859 inode_bytes = inode_get_bytes(inode); 7860 spin_unlock(&BTRFS_I(inode)->lock); 7861 stat->blocks = (ALIGN(inode_bytes, blocksize) + 7862 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT; 7863 return 0; 7864 } 7865 7866 static int btrfs_rename_exchange(struct inode *old_dir, 7867 struct dentry *old_dentry, 7868 struct inode *new_dir, 7869 struct dentry *new_dentry) 7870 { 7871 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); 7872 struct btrfs_trans_handle *trans; 7873 unsigned int trans_num_items; 7874 struct btrfs_root *root = BTRFS_I(old_dir)->root; 7875 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 7876 struct inode *new_inode = new_dentry->d_inode; 7877 struct inode *old_inode = old_dentry->d_inode; 7878 struct btrfs_rename_ctx old_rename_ctx; 7879 struct btrfs_rename_ctx new_rename_ctx; 7880 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 7881 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 7882 u64 old_idx = 0; 7883 u64 new_idx = 0; 7884 int ret; 7885 int ret2; 7886 bool need_abort = false; 7887 struct fscrypt_name old_fname, new_fname; 7888 struct fscrypt_str *old_name, *new_name; 7889 7890 /* 7891 * For non-subvolumes allow exchange only within one subvolume, in the 7892 * same inode namespace. Two subvolumes (represented as directory) can 7893 * be exchanged as they're a logical link and have a fixed inode number. 7894 */ 7895 if (root != dest && 7896 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 7897 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 7898 return -EXDEV; 7899 7900 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 7901 if (ret) 7902 return ret; 7903 7904 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 7905 if (ret) { 7906 fscrypt_free_filename(&old_fname); 7907 return ret; 7908 } 7909 7910 old_name = &old_fname.disk_name; 7911 new_name = &new_fname.disk_name; 7912 7913 /* close the race window with snapshot create/destroy ioctl */ 7914 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 7915 new_ino == BTRFS_FIRST_FREE_OBJECTID) 7916 down_read(&fs_info->subvol_sem); 7917 7918 /* 7919 * For each inode: 7920 * 1 to remove old dir item 7921 * 1 to remove old dir index 7922 * 1 to add new dir item 7923 * 1 to add new dir index 7924 * 1 to update parent inode 7925 * 7926 * If the parents are the same, we only need to account for one 7927 */ 7928 trans_num_items = (old_dir == new_dir ? 9 : 10); 7929 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 7930 /* 7931 * 1 to remove old root ref 7932 * 1 to remove old root backref 7933 * 1 to add new root ref 7934 * 1 to add new root backref 7935 */ 7936 trans_num_items += 4; 7937 } else { 7938 /* 7939 * 1 to update inode item 7940 * 1 to remove old inode ref 7941 * 1 to add new inode ref 7942 */ 7943 trans_num_items += 3; 7944 } 7945 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 7946 trans_num_items += 4; 7947 else 7948 trans_num_items += 3; 7949 trans = btrfs_start_transaction(root, trans_num_items); 7950 if (IS_ERR(trans)) { 7951 ret = PTR_ERR(trans); 7952 goto out_notrans; 7953 } 7954 7955 if (dest != root) { 7956 ret = btrfs_record_root_in_trans(trans, dest); 7957 if (ret) 7958 goto out_fail; 7959 } 7960 7961 /* 7962 * We need to find a free sequence number both in the source and 7963 * in the destination directory for the exchange. 7964 */ 7965 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 7966 if (ret) 7967 goto out_fail; 7968 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 7969 if (ret) 7970 goto out_fail; 7971 7972 BTRFS_I(old_inode)->dir_index = 0ULL; 7973 BTRFS_I(new_inode)->dir_index = 0ULL; 7974 7975 /* Reference for the source. */ 7976 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 7977 /* force full log commit if subvolume involved. */ 7978 btrfs_set_log_full_commit(trans); 7979 } else { 7980 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, 7981 btrfs_ino(BTRFS_I(new_dir)), 7982 old_idx); 7983 if (ret) 7984 goto out_fail; 7985 need_abort = true; 7986 } 7987 7988 /* And now for the dest. */ 7989 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 7990 /* force full log commit if subvolume involved. */ 7991 btrfs_set_log_full_commit(trans); 7992 } else { 7993 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, 7994 btrfs_ino(BTRFS_I(old_dir)), 7995 new_idx); 7996 if (ret) { 7997 if (need_abort) 7998 btrfs_abort_transaction(trans, ret); 7999 goto out_fail; 8000 } 8001 } 8002 8003 /* Update inode version and ctime/mtime. */ 8004 inode_inc_iversion(old_dir); 8005 inode_inc_iversion(new_dir); 8006 inode_inc_iversion(old_inode); 8007 inode_inc_iversion(new_inode); 8008 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 8009 8010 if (old_dentry->d_parent != new_dentry->d_parent) { 8011 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8012 BTRFS_I(old_inode), true); 8013 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 8014 BTRFS_I(new_inode), true); 8015 } 8016 8017 /* src is a subvolume */ 8018 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8019 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8020 } else { /* src is an inode */ 8021 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8022 BTRFS_I(old_dentry->d_inode), 8023 old_name, &old_rename_ctx); 8024 if (!ret) 8025 ret = btrfs_update_inode(trans, BTRFS_I(old_inode)); 8026 } 8027 if (ret) { 8028 btrfs_abort_transaction(trans, ret); 8029 goto out_fail; 8030 } 8031 8032 /* dest is a subvolume */ 8033 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8034 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8035 } else { /* dest is an inode */ 8036 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8037 BTRFS_I(new_dentry->d_inode), 8038 new_name, &new_rename_ctx); 8039 if (!ret) 8040 ret = btrfs_update_inode(trans, BTRFS_I(new_inode)); 8041 } 8042 if (ret) { 8043 btrfs_abort_transaction(trans, ret); 8044 goto out_fail; 8045 } 8046 8047 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8048 new_name, 0, old_idx); 8049 if (ret) { 8050 btrfs_abort_transaction(trans, ret); 8051 goto out_fail; 8052 } 8053 8054 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 8055 old_name, 0, new_idx); 8056 if (ret) { 8057 btrfs_abort_transaction(trans, ret); 8058 goto out_fail; 8059 } 8060 8061 if (old_inode->i_nlink == 1) 8062 BTRFS_I(old_inode)->dir_index = old_idx; 8063 if (new_inode->i_nlink == 1) 8064 BTRFS_I(new_inode)->dir_index = new_idx; 8065 8066 /* 8067 * Now pin the logs of the roots. We do it to ensure that no other task 8068 * can sync the logs while we are in progress with the rename, because 8069 * that could result in an inconsistency in case any of the inodes that 8070 * are part of this rename operation were logged before. 8071 */ 8072 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8073 btrfs_pin_log_trans(root); 8074 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8075 btrfs_pin_log_trans(dest); 8076 8077 /* Do the log updates for all inodes. */ 8078 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8079 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8080 old_rename_ctx.index, new_dentry->d_parent); 8081 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8082 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 8083 new_rename_ctx.index, old_dentry->d_parent); 8084 8085 /* Now unpin the logs. */ 8086 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8087 btrfs_end_log_trans(root); 8088 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8089 btrfs_end_log_trans(dest); 8090 out_fail: 8091 ret2 = btrfs_end_transaction(trans); 8092 ret = ret ? ret : ret2; 8093 out_notrans: 8094 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 8095 old_ino == BTRFS_FIRST_FREE_OBJECTID) 8096 up_read(&fs_info->subvol_sem); 8097 8098 fscrypt_free_filename(&new_fname); 8099 fscrypt_free_filename(&old_fname); 8100 return ret; 8101 } 8102 8103 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap, 8104 struct inode *dir) 8105 { 8106 struct inode *inode; 8107 8108 inode = new_inode(dir->i_sb); 8109 if (inode) { 8110 inode_init_owner(idmap, inode, dir, 8111 S_IFCHR | WHITEOUT_MODE); 8112 inode->i_op = &btrfs_special_inode_operations; 8113 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 8114 } 8115 return inode; 8116 } 8117 8118 static int btrfs_rename(struct mnt_idmap *idmap, 8119 struct inode *old_dir, struct dentry *old_dentry, 8120 struct inode *new_dir, struct dentry *new_dentry, 8121 unsigned int flags) 8122 { 8123 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir); 8124 struct btrfs_new_inode_args whiteout_args = { 8125 .dir = old_dir, 8126 .dentry = old_dentry, 8127 }; 8128 struct btrfs_trans_handle *trans; 8129 unsigned int trans_num_items; 8130 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8131 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8132 struct inode *new_inode = d_inode(new_dentry); 8133 struct inode *old_inode = d_inode(old_dentry); 8134 struct btrfs_rename_ctx rename_ctx; 8135 u64 index = 0; 8136 int ret; 8137 int ret2; 8138 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8139 struct fscrypt_name old_fname, new_fname; 8140 8141 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 8142 return -EPERM; 8143 8144 /* we only allow rename subvolume link between subvolumes */ 8145 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 8146 return -EXDEV; 8147 8148 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 8149 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 8150 return -ENOTEMPTY; 8151 8152 if (S_ISDIR(old_inode->i_mode) && new_inode && 8153 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 8154 return -ENOTEMPTY; 8155 8156 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8157 if (ret) 8158 return ret; 8159 8160 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8161 if (ret) { 8162 fscrypt_free_filename(&old_fname); 8163 return ret; 8164 } 8165 8166 /* check for collisions, even if the name isn't there */ 8167 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); 8168 if (ret) { 8169 if (ret == -EEXIST) { 8170 /* we shouldn't get 8171 * eexist without a new_inode */ 8172 if (WARN_ON(!new_inode)) { 8173 goto out_fscrypt_names; 8174 } 8175 } else { 8176 /* maybe -EOVERFLOW */ 8177 goto out_fscrypt_names; 8178 } 8179 } 8180 ret = 0; 8181 8182 /* 8183 * we're using rename to replace one file with another. Start IO on it 8184 * now so we don't add too much work to the end of the transaction 8185 */ 8186 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 8187 filemap_flush(old_inode->i_mapping); 8188 8189 if (flags & RENAME_WHITEOUT) { 8190 whiteout_args.inode = new_whiteout_inode(idmap, old_dir); 8191 if (!whiteout_args.inode) { 8192 ret = -ENOMEM; 8193 goto out_fscrypt_names; 8194 } 8195 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 8196 if (ret) 8197 goto out_whiteout_inode; 8198 } else { 8199 /* 1 to update the old parent inode. */ 8200 trans_num_items = 1; 8201 } 8202 8203 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8204 /* Close the race window with snapshot create/destroy ioctl */ 8205 down_read(&fs_info->subvol_sem); 8206 /* 8207 * 1 to remove old root ref 8208 * 1 to remove old root backref 8209 * 1 to add new root ref 8210 * 1 to add new root backref 8211 */ 8212 trans_num_items += 4; 8213 } else { 8214 /* 8215 * 1 to update inode 8216 * 1 to remove old inode ref 8217 * 1 to add new inode ref 8218 */ 8219 trans_num_items += 3; 8220 } 8221 /* 8222 * 1 to remove old dir item 8223 * 1 to remove old dir index 8224 * 1 to add new dir item 8225 * 1 to add new dir index 8226 */ 8227 trans_num_items += 4; 8228 /* 1 to update new parent inode if it's not the same as the old parent */ 8229 if (new_dir != old_dir) 8230 trans_num_items++; 8231 if (new_inode) { 8232 /* 8233 * 1 to update inode 8234 * 1 to remove inode ref 8235 * 1 to remove dir item 8236 * 1 to remove dir index 8237 * 1 to possibly add orphan item 8238 */ 8239 trans_num_items += 5; 8240 } 8241 trans = btrfs_start_transaction(root, trans_num_items); 8242 if (IS_ERR(trans)) { 8243 ret = PTR_ERR(trans); 8244 goto out_notrans; 8245 } 8246 8247 if (dest != root) { 8248 ret = btrfs_record_root_in_trans(trans, dest); 8249 if (ret) 8250 goto out_fail; 8251 } 8252 8253 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 8254 if (ret) 8255 goto out_fail; 8256 8257 BTRFS_I(old_inode)->dir_index = 0ULL; 8258 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 8259 /* force full log commit if subvolume involved. */ 8260 btrfs_set_log_full_commit(trans); 8261 } else { 8262 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, 8263 old_ino, btrfs_ino(BTRFS_I(new_dir)), 8264 index); 8265 if (ret) 8266 goto out_fail; 8267 } 8268 8269 inode_inc_iversion(old_dir); 8270 inode_inc_iversion(new_dir); 8271 inode_inc_iversion(old_inode); 8272 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 8273 8274 if (old_dentry->d_parent != new_dentry->d_parent) 8275 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8276 BTRFS_I(old_inode), true); 8277 8278 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 8279 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8280 } else { 8281 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8282 BTRFS_I(d_inode(old_dentry)), 8283 &old_fname.disk_name, &rename_ctx); 8284 if (!ret) 8285 ret = btrfs_update_inode(trans, BTRFS_I(old_inode)); 8286 } 8287 if (ret) { 8288 btrfs_abort_transaction(trans, ret); 8289 goto out_fail; 8290 } 8291 8292 if (new_inode) { 8293 inode_inc_iversion(new_inode); 8294 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 8295 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 8296 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8297 BUG_ON(new_inode->i_nlink == 0); 8298 } else { 8299 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8300 BTRFS_I(d_inode(new_dentry)), 8301 &new_fname.disk_name); 8302 } 8303 if (!ret && new_inode->i_nlink == 0) 8304 ret = btrfs_orphan_add(trans, 8305 BTRFS_I(d_inode(new_dentry))); 8306 if (ret) { 8307 btrfs_abort_transaction(trans, ret); 8308 goto out_fail; 8309 } 8310 } 8311 8312 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8313 &new_fname.disk_name, 0, index); 8314 if (ret) { 8315 btrfs_abort_transaction(trans, ret); 8316 goto out_fail; 8317 } 8318 8319 if (old_inode->i_nlink == 1) 8320 BTRFS_I(old_inode)->dir_index = index; 8321 8322 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8323 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8324 rename_ctx.index, new_dentry->d_parent); 8325 8326 if (flags & RENAME_WHITEOUT) { 8327 ret = btrfs_create_new_inode(trans, &whiteout_args); 8328 if (ret) { 8329 btrfs_abort_transaction(trans, ret); 8330 goto out_fail; 8331 } else { 8332 unlock_new_inode(whiteout_args.inode); 8333 iput(whiteout_args.inode); 8334 whiteout_args.inode = NULL; 8335 } 8336 } 8337 out_fail: 8338 ret2 = btrfs_end_transaction(trans); 8339 ret = ret ? ret : ret2; 8340 out_notrans: 8341 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 8342 up_read(&fs_info->subvol_sem); 8343 if (flags & RENAME_WHITEOUT) 8344 btrfs_new_inode_args_destroy(&whiteout_args); 8345 out_whiteout_inode: 8346 if (flags & RENAME_WHITEOUT) 8347 iput(whiteout_args.inode); 8348 out_fscrypt_names: 8349 fscrypt_free_filename(&old_fname); 8350 fscrypt_free_filename(&new_fname); 8351 return ret; 8352 } 8353 8354 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir, 8355 struct dentry *old_dentry, struct inode *new_dir, 8356 struct dentry *new_dentry, unsigned int flags) 8357 { 8358 int ret; 8359 8360 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 8361 return -EINVAL; 8362 8363 if (flags & RENAME_EXCHANGE) 8364 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir, 8365 new_dentry); 8366 else 8367 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir, 8368 new_dentry, flags); 8369 8370 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); 8371 8372 return ret; 8373 } 8374 8375 struct btrfs_delalloc_work { 8376 struct inode *inode; 8377 struct completion completion; 8378 struct list_head list; 8379 struct btrfs_work work; 8380 }; 8381 8382 static void btrfs_run_delalloc_work(struct btrfs_work *work) 8383 { 8384 struct btrfs_delalloc_work *delalloc_work; 8385 struct inode *inode; 8386 8387 delalloc_work = container_of(work, struct btrfs_delalloc_work, 8388 work); 8389 inode = delalloc_work->inode; 8390 filemap_flush(inode->i_mapping); 8391 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 8392 &BTRFS_I(inode)->runtime_flags)) 8393 filemap_flush(inode->i_mapping); 8394 8395 iput(inode); 8396 complete(&delalloc_work->completion); 8397 } 8398 8399 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 8400 { 8401 struct btrfs_delalloc_work *work; 8402 8403 work = kmalloc(sizeof(*work), GFP_NOFS); 8404 if (!work) 8405 return NULL; 8406 8407 init_completion(&work->completion); 8408 INIT_LIST_HEAD(&work->list); 8409 work->inode = inode; 8410 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL); 8411 8412 return work; 8413 } 8414 8415 /* 8416 * some fairly slow code that needs optimization. This walks the list 8417 * of all the inodes with pending delalloc and forces them to disk. 8418 */ 8419 static int start_delalloc_inodes(struct btrfs_root *root, 8420 struct writeback_control *wbc, bool snapshot, 8421 bool in_reclaim_context) 8422 { 8423 struct btrfs_inode *binode; 8424 struct inode *inode; 8425 struct btrfs_delalloc_work *work, *next; 8426 LIST_HEAD(works); 8427 LIST_HEAD(splice); 8428 int ret = 0; 8429 bool full_flush = wbc->nr_to_write == LONG_MAX; 8430 8431 mutex_lock(&root->delalloc_mutex); 8432 spin_lock(&root->delalloc_lock); 8433 list_splice_init(&root->delalloc_inodes, &splice); 8434 while (!list_empty(&splice)) { 8435 binode = list_entry(splice.next, struct btrfs_inode, 8436 delalloc_inodes); 8437 8438 list_move_tail(&binode->delalloc_inodes, 8439 &root->delalloc_inodes); 8440 8441 if (in_reclaim_context && 8442 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 8443 continue; 8444 8445 inode = igrab(&binode->vfs_inode); 8446 if (!inode) { 8447 cond_resched_lock(&root->delalloc_lock); 8448 continue; 8449 } 8450 spin_unlock(&root->delalloc_lock); 8451 8452 if (snapshot) 8453 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 8454 &binode->runtime_flags); 8455 if (full_flush) { 8456 work = btrfs_alloc_delalloc_work(inode); 8457 if (!work) { 8458 iput(inode); 8459 ret = -ENOMEM; 8460 goto out; 8461 } 8462 list_add_tail(&work->list, &works); 8463 btrfs_queue_work(root->fs_info->flush_workers, 8464 &work->work); 8465 } else { 8466 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); 8467 btrfs_add_delayed_iput(BTRFS_I(inode)); 8468 if (ret || wbc->nr_to_write <= 0) 8469 goto out; 8470 } 8471 cond_resched(); 8472 spin_lock(&root->delalloc_lock); 8473 } 8474 spin_unlock(&root->delalloc_lock); 8475 8476 out: 8477 list_for_each_entry_safe(work, next, &works, list) { 8478 list_del_init(&work->list); 8479 wait_for_completion(&work->completion); 8480 kfree(work); 8481 } 8482 8483 if (!list_empty(&splice)) { 8484 spin_lock(&root->delalloc_lock); 8485 list_splice_tail(&splice, &root->delalloc_inodes); 8486 spin_unlock(&root->delalloc_lock); 8487 } 8488 mutex_unlock(&root->delalloc_mutex); 8489 return ret; 8490 } 8491 8492 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 8493 { 8494 struct writeback_control wbc = { 8495 .nr_to_write = LONG_MAX, 8496 .sync_mode = WB_SYNC_NONE, 8497 .range_start = 0, 8498 .range_end = LLONG_MAX, 8499 }; 8500 struct btrfs_fs_info *fs_info = root->fs_info; 8501 8502 if (BTRFS_FS_ERROR(fs_info)) 8503 return -EROFS; 8504 8505 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context); 8506 } 8507 8508 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 8509 bool in_reclaim_context) 8510 { 8511 struct writeback_control wbc = { 8512 .nr_to_write = nr, 8513 .sync_mode = WB_SYNC_NONE, 8514 .range_start = 0, 8515 .range_end = LLONG_MAX, 8516 }; 8517 struct btrfs_root *root; 8518 LIST_HEAD(splice); 8519 int ret; 8520 8521 if (BTRFS_FS_ERROR(fs_info)) 8522 return -EROFS; 8523 8524 mutex_lock(&fs_info->delalloc_root_mutex); 8525 spin_lock(&fs_info->delalloc_root_lock); 8526 list_splice_init(&fs_info->delalloc_roots, &splice); 8527 while (!list_empty(&splice)) { 8528 /* 8529 * Reset nr_to_write here so we know that we're doing a full 8530 * flush. 8531 */ 8532 if (nr == LONG_MAX) 8533 wbc.nr_to_write = LONG_MAX; 8534 8535 root = list_first_entry(&splice, struct btrfs_root, 8536 delalloc_root); 8537 root = btrfs_grab_root(root); 8538 BUG_ON(!root); 8539 list_move_tail(&root->delalloc_root, 8540 &fs_info->delalloc_roots); 8541 spin_unlock(&fs_info->delalloc_root_lock); 8542 8543 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context); 8544 btrfs_put_root(root); 8545 if (ret < 0 || wbc.nr_to_write <= 0) 8546 goto out; 8547 spin_lock(&fs_info->delalloc_root_lock); 8548 } 8549 spin_unlock(&fs_info->delalloc_root_lock); 8550 8551 ret = 0; 8552 out: 8553 if (!list_empty(&splice)) { 8554 spin_lock(&fs_info->delalloc_root_lock); 8555 list_splice_tail(&splice, &fs_info->delalloc_roots); 8556 spin_unlock(&fs_info->delalloc_root_lock); 8557 } 8558 mutex_unlock(&fs_info->delalloc_root_mutex); 8559 return ret; 8560 } 8561 8562 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, 8563 struct dentry *dentry, const char *symname) 8564 { 8565 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 8566 struct btrfs_trans_handle *trans; 8567 struct btrfs_root *root = BTRFS_I(dir)->root; 8568 struct btrfs_path *path; 8569 struct btrfs_key key; 8570 struct inode *inode; 8571 struct btrfs_new_inode_args new_inode_args = { 8572 .dir = dir, 8573 .dentry = dentry, 8574 }; 8575 unsigned int trans_num_items; 8576 int err; 8577 int name_len; 8578 int datasize; 8579 unsigned long ptr; 8580 struct btrfs_file_extent_item *ei; 8581 struct extent_buffer *leaf; 8582 8583 name_len = strlen(symname); 8584 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 8585 return -ENAMETOOLONG; 8586 8587 inode = new_inode(dir->i_sb); 8588 if (!inode) 8589 return -ENOMEM; 8590 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO); 8591 inode->i_op = &btrfs_symlink_inode_operations; 8592 inode_nohighmem(inode); 8593 inode->i_mapping->a_ops = &btrfs_aops; 8594 btrfs_i_size_write(BTRFS_I(inode), name_len); 8595 inode_set_bytes(inode, name_len); 8596 8597 new_inode_args.inode = inode; 8598 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 8599 if (err) 8600 goto out_inode; 8601 /* 1 additional item for the inline extent */ 8602 trans_num_items++; 8603 8604 trans = btrfs_start_transaction(root, trans_num_items); 8605 if (IS_ERR(trans)) { 8606 err = PTR_ERR(trans); 8607 goto out_new_inode_args; 8608 } 8609 8610 err = btrfs_create_new_inode(trans, &new_inode_args); 8611 if (err) 8612 goto out; 8613 8614 path = btrfs_alloc_path(); 8615 if (!path) { 8616 err = -ENOMEM; 8617 btrfs_abort_transaction(trans, err); 8618 discard_new_inode(inode); 8619 inode = NULL; 8620 goto out; 8621 } 8622 key.objectid = btrfs_ino(BTRFS_I(inode)); 8623 key.offset = 0; 8624 key.type = BTRFS_EXTENT_DATA_KEY; 8625 datasize = btrfs_file_extent_calc_inline_size(name_len); 8626 err = btrfs_insert_empty_item(trans, root, path, &key, 8627 datasize); 8628 if (err) { 8629 btrfs_abort_transaction(trans, err); 8630 btrfs_free_path(path); 8631 discard_new_inode(inode); 8632 inode = NULL; 8633 goto out; 8634 } 8635 leaf = path->nodes[0]; 8636 ei = btrfs_item_ptr(leaf, path->slots[0], 8637 struct btrfs_file_extent_item); 8638 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 8639 btrfs_set_file_extent_type(leaf, ei, 8640 BTRFS_FILE_EXTENT_INLINE); 8641 btrfs_set_file_extent_encryption(leaf, ei, 0); 8642 btrfs_set_file_extent_compression(leaf, ei, 0); 8643 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 8644 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 8645 8646 ptr = btrfs_file_extent_inline_start(ei); 8647 write_extent_buffer(leaf, symname, ptr, name_len); 8648 btrfs_mark_buffer_dirty(trans, leaf); 8649 btrfs_free_path(path); 8650 8651 d_instantiate_new(dentry, inode); 8652 err = 0; 8653 out: 8654 btrfs_end_transaction(trans); 8655 btrfs_btree_balance_dirty(fs_info); 8656 out_new_inode_args: 8657 btrfs_new_inode_args_destroy(&new_inode_args); 8658 out_inode: 8659 if (err) 8660 iput(inode); 8661 return err; 8662 } 8663 8664 static struct btrfs_trans_handle *insert_prealloc_file_extent( 8665 struct btrfs_trans_handle *trans_in, 8666 struct btrfs_inode *inode, 8667 struct btrfs_key *ins, 8668 u64 file_offset) 8669 { 8670 struct btrfs_file_extent_item stack_fi; 8671 struct btrfs_replace_extent_info extent_info; 8672 struct btrfs_trans_handle *trans = trans_in; 8673 struct btrfs_path *path; 8674 u64 start = ins->objectid; 8675 u64 len = ins->offset; 8676 u64 qgroup_released = 0; 8677 int ret; 8678 8679 memset(&stack_fi, 0, sizeof(stack_fi)); 8680 8681 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 8682 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 8683 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 8684 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 8685 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 8686 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 8687 /* Encryption and other encoding is reserved and all 0 */ 8688 8689 ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released); 8690 if (ret < 0) 8691 return ERR_PTR(ret); 8692 8693 if (trans) { 8694 ret = insert_reserved_file_extent(trans, inode, 8695 file_offset, &stack_fi, 8696 true, qgroup_released); 8697 if (ret) 8698 goto free_qgroup; 8699 return trans; 8700 } 8701 8702 extent_info.disk_offset = start; 8703 extent_info.disk_len = len; 8704 extent_info.data_offset = 0; 8705 extent_info.data_len = len; 8706 extent_info.file_offset = file_offset; 8707 extent_info.extent_buf = (char *)&stack_fi; 8708 extent_info.is_new_extent = true; 8709 extent_info.update_times = true; 8710 extent_info.qgroup_reserved = qgroup_released; 8711 extent_info.insertions = 0; 8712 8713 path = btrfs_alloc_path(); 8714 if (!path) { 8715 ret = -ENOMEM; 8716 goto free_qgroup; 8717 } 8718 8719 ret = btrfs_replace_file_extents(inode, path, file_offset, 8720 file_offset + len - 1, &extent_info, 8721 &trans); 8722 btrfs_free_path(path); 8723 if (ret) 8724 goto free_qgroup; 8725 return trans; 8726 8727 free_qgroup: 8728 /* 8729 * We have released qgroup data range at the beginning of the function, 8730 * and normally qgroup_released bytes will be freed when committing 8731 * transaction. 8732 * But if we error out early, we have to free what we have released 8733 * or we leak qgroup data reservation. 8734 */ 8735 btrfs_qgroup_free_refroot(inode->root->fs_info, 8736 btrfs_root_id(inode->root), qgroup_released, 8737 BTRFS_QGROUP_RSV_DATA); 8738 return ERR_PTR(ret); 8739 } 8740 8741 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 8742 u64 start, u64 num_bytes, u64 min_size, 8743 loff_t actual_len, u64 *alloc_hint, 8744 struct btrfs_trans_handle *trans) 8745 { 8746 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 8747 struct extent_map *em; 8748 struct btrfs_root *root = BTRFS_I(inode)->root; 8749 struct btrfs_key ins; 8750 u64 cur_offset = start; 8751 u64 clear_offset = start; 8752 u64 i_size; 8753 u64 cur_bytes; 8754 u64 last_alloc = (u64)-1; 8755 int ret = 0; 8756 bool own_trans = true; 8757 u64 end = start + num_bytes - 1; 8758 8759 if (trans) 8760 own_trans = false; 8761 while (num_bytes > 0) { 8762 cur_bytes = min_t(u64, num_bytes, SZ_256M); 8763 cur_bytes = max(cur_bytes, min_size); 8764 /* 8765 * If we are severely fragmented we could end up with really 8766 * small allocations, so if the allocator is returning small 8767 * chunks lets make its job easier by only searching for those 8768 * sized chunks. 8769 */ 8770 cur_bytes = min(cur_bytes, last_alloc); 8771 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 8772 min_size, 0, *alloc_hint, &ins, 1, 0); 8773 if (ret) 8774 break; 8775 8776 /* 8777 * We've reserved this space, and thus converted it from 8778 * ->bytes_may_use to ->bytes_reserved. Any error that happens 8779 * from here on out we will only need to clear our reservation 8780 * for the remaining unreserved area, so advance our 8781 * clear_offset by our extent size. 8782 */ 8783 clear_offset += ins.offset; 8784 8785 last_alloc = ins.offset; 8786 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 8787 &ins, cur_offset); 8788 /* 8789 * Now that we inserted the prealloc extent we can finally 8790 * decrement the number of reservations in the block group. 8791 * If we did it before, we could race with relocation and have 8792 * relocation miss the reserved extent, making it fail later. 8793 */ 8794 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 8795 if (IS_ERR(trans)) { 8796 ret = PTR_ERR(trans); 8797 btrfs_free_reserved_extent(fs_info, ins.objectid, 8798 ins.offset, 0); 8799 break; 8800 } 8801 8802 em = alloc_extent_map(); 8803 if (!em) { 8804 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, 8805 cur_offset + ins.offset - 1, false); 8806 btrfs_set_inode_full_sync(BTRFS_I(inode)); 8807 goto next; 8808 } 8809 8810 em->start = cur_offset; 8811 em->len = ins.offset; 8812 em->disk_bytenr = ins.objectid; 8813 em->offset = 0; 8814 em->disk_num_bytes = ins.offset; 8815 em->ram_bytes = ins.offset; 8816 em->flags |= EXTENT_FLAG_PREALLOC; 8817 em->generation = trans->transid; 8818 8819 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); 8820 free_extent_map(em); 8821 next: 8822 num_bytes -= ins.offset; 8823 cur_offset += ins.offset; 8824 *alloc_hint = ins.objectid + ins.offset; 8825 8826 inode_inc_iversion(inode); 8827 inode_set_ctime_current(inode); 8828 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 8829 if (!(mode & FALLOC_FL_KEEP_SIZE) && 8830 (actual_len > inode->i_size) && 8831 (cur_offset > inode->i_size)) { 8832 if (cur_offset > actual_len) 8833 i_size = actual_len; 8834 else 8835 i_size = cur_offset; 8836 i_size_write(inode, i_size); 8837 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 8838 } 8839 8840 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 8841 8842 if (ret) { 8843 btrfs_abort_transaction(trans, ret); 8844 if (own_trans) 8845 btrfs_end_transaction(trans); 8846 break; 8847 } 8848 8849 if (own_trans) { 8850 btrfs_end_transaction(trans); 8851 trans = NULL; 8852 } 8853 } 8854 if (clear_offset < end) 8855 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 8856 end - clear_offset + 1); 8857 return ret; 8858 } 8859 8860 int btrfs_prealloc_file_range(struct inode *inode, int mode, 8861 u64 start, u64 num_bytes, u64 min_size, 8862 loff_t actual_len, u64 *alloc_hint) 8863 { 8864 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 8865 min_size, actual_len, alloc_hint, 8866 NULL); 8867 } 8868 8869 int btrfs_prealloc_file_range_trans(struct inode *inode, 8870 struct btrfs_trans_handle *trans, int mode, 8871 u64 start, u64 num_bytes, u64 min_size, 8872 loff_t actual_len, u64 *alloc_hint) 8873 { 8874 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 8875 min_size, actual_len, alloc_hint, trans); 8876 } 8877 8878 static int btrfs_permission(struct mnt_idmap *idmap, 8879 struct inode *inode, int mask) 8880 { 8881 struct btrfs_root *root = BTRFS_I(inode)->root; 8882 umode_t mode = inode->i_mode; 8883 8884 if (mask & MAY_WRITE && 8885 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 8886 if (btrfs_root_readonly(root)) 8887 return -EROFS; 8888 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 8889 return -EACCES; 8890 } 8891 return generic_permission(idmap, inode, mask); 8892 } 8893 8894 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 8895 struct file *file, umode_t mode) 8896 { 8897 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); 8898 struct btrfs_trans_handle *trans; 8899 struct btrfs_root *root = BTRFS_I(dir)->root; 8900 struct inode *inode; 8901 struct btrfs_new_inode_args new_inode_args = { 8902 .dir = dir, 8903 .dentry = file->f_path.dentry, 8904 .orphan = true, 8905 }; 8906 unsigned int trans_num_items; 8907 int ret; 8908 8909 inode = new_inode(dir->i_sb); 8910 if (!inode) 8911 return -ENOMEM; 8912 inode_init_owner(idmap, inode, dir, mode); 8913 inode->i_fop = &btrfs_file_operations; 8914 inode->i_op = &btrfs_file_inode_operations; 8915 inode->i_mapping->a_ops = &btrfs_aops; 8916 8917 new_inode_args.inode = inode; 8918 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 8919 if (ret) 8920 goto out_inode; 8921 8922 trans = btrfs_start_transaction(root, trans_num_items); 8923 if (IS_ERR(trans)) { 8924 ret = PTR_ERR(trans); 8925 goto out_new_inode_args; 8926 } 8927 8928 ret = btrfs_create_new_inode(trans, &new_inode_args); 8929 8930 /* 8931 * We set number of links to 0 in btrfs_create_new_inode(), and here we 8932 * set it to 1 because d_tmpfile() will issue a warning if the count is 8933 * 0, through: 8934 * 8935 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 8936 */ 8937 set_nlink(inode, 1); 8938 8939 if (!ret) { 8940 d_tmpfile(file, inode); 8941 unlock_new_inode(inode); 8942 mark_inode_dirty(inode); 8943 } 8944 8945 btrfs_end_transaction(trans); 8946 btrfs_btree_balance_dirty(fs_info); 8947 out_new_inode_args: 8948 btrfs_new_inode_args_destroy(&new_inode_args); 8949 out_inode: 8950 if (ret) 8951 iput(inode); 8952 return finish_open_simple(file, ret); 8953 } 8954 8955 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 8956 int compress_type) 8957 { 8958 switch (compress_type) { 8959 case BTRFS_COMPRESS_NONE: 8960 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 8961 case BTRFS_COMPRESS_ZLIB: 8962 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 8963 case BTRFS_COMPRESS_LZO: 8964 /* 8965 * The LZO format depends on the sector size. 64K is the maximum 8966 * sector size that we support. 8967 */ 8968 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 8969 return -EINVAL; 8970 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 8971 (fs_info->sectorsize_bits - 12); 8972 case BTRFS_COMPRESS_ZSTD: 8973 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 8974 default: 8975 return -EUCLEAN; 8976 } 8977 } 8978 8979 static ssize_t btrfs_encoded_read_inline( 8980 struct kiocb *iocb, 8981 struct iov_iter *iter, u64 start, 8982 u64 lockend, 8983 struct extent_state **cached_state, 8984 u64 extent_start, size_t count, 8985 struct btrfs_ioctl_encoded_io_args *encoded, 8986 bool *unlocked) 8987 { 8988 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 8989 struct btrfs_root *root = inode->root; 8990 struct btrfs_fs_info *fs_info = root->fs_info; 8991 struct extent_io_tree *io_tree = &inode->io_tree; 8992 struct btrfs_path *path; 8993 struct extent_buffer *leaf; 8994 struct btrfs_file_extent_item *item; 8995 u64 ram_bytes; 8996 unsigned long ptr; 8997 void *tmp; 8998 ssize_t ret; 8999 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); 9000 9001 path = btrfs_alloc_path(); 9002 if (!path) { 9003 ret = -ENOMEM; 9004 goto out; 9005 } 9006 9007 path->nowait = nowait; 9008 9009 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 9010 extent_start, 0); 9011 if (ret) { 9012 if (ret > 0) { 9013 /* The extent item disappeared? */ 9014 ret = -EIO; 9015 } 9016 goto out; 9017 } 9018 leaf = path->nodes[0]; 9019 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 9020 9021 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 9022 ptr = btrfs_file_extent_inline_start(item); 9023 9024 encoded->len = min_t(u64, extent_start + ram_bytes, 9025 inode->vfs_inode.i_size) - iocb->ki_pos; 9026 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9027 btrfs_file_extent_compression(leaf, item)); 9028 if (ret < 0) 9029 goto out; 9030 encoded->compression = ret; 9031 if (encoded->compression) { 9032 size_t inline_size; 9033 9034 inline_size = btrfs_file_extent_inline_item_len(leaf, 9035 path->slots[0]); 9036 if (inline_size > count) { 9037 ret = -ENOBUFS; 9038 goto out; 9039 } 9040 count = inline_size; 9041 encoded->unencoded_len = ram_bytes; 9042 encoded->unencoded_offset = iocb->ki_pos - extent_start; 9043 } else { 9044 count = min_t(u64, count, encoded->len); 9045 encoded->len = count; 9046 encoded->unencoded_len = count; 9047 ptr += iocb->ki_pos - extent_start; 9048 } 9049 9050 tmp = kmalloc(count, GFP_NOFS); 9051 if (!tmp) { 9052 ret = -ENOMEM; 9053 goto out; 9054 } 9055 read_extent_buffer(leaf, tmp, ptr, count); 9056 btrfs_release_path(path); 9057 unlock_extent(io_tree, start, lockend, cached_state); 9058 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9059 *unlocked = true; 9060 9061 ret = copy_to_iter(tmp, count, iter); 9062 if (ret != count) 9063 ret = -EFAULT; 9064 kfree(tmp); 9065 out: 9066 btrfs_free_path(path); 9067 return ret; 9068 } 9069 9070 struct btrfs_encoded_read_private { 9071 wait_queue_head_t wait; 9072 void *uring_ctx; 9073 atomic_t pending; 9074 blk_status_t status; 9075 }; 9076 9077 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) 9078 { 9079 struct btrfs_encoded_read_private *priv = bbio->private; 9080 9081 if (bbio->bio.bi_status) { 9082 /* 9083 * The memory barrier implied by the atomic_dec_return() here 9084 * pairs with the memory barrier implied by the 9085 * atomic_dec_return() or io_wait_event() in 9086 * btrfs_encoded_read_regular_fill_pages() to ensure that this 9087 * write is observed before the load of status in 9088 * btrfs_encoded_read_regular_fill_pages(). 9089 */ 9090 WRITE_ONCE(priv->status, bbio->bio.bi_status); 9091 } 9092 if (atomic_dec_return(&priv->pending) == 0) { 9093 int err = blk_status_to_errno(READ_ONCE(priv->status)); 9094 9095 if (priv->uring_ctx) { 9096 btrfs_uring_read_extent_endio(priv->uring_ctx, err); 9097 kfree(priv); 9098 } else { 9099 wake_up(&priv->wait); 9100 } 9101 } 9102 bio_put(&bbio->bio); 9103 } 9104 9105 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 9106 u64 disk_bytenr, u64 disk_io_size, 9107 struct page **pages, void *uring_ctx) 9108 { 9109 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9110 struct btrfs_encoded_read_private *priv; 9111 unsigned long i = 0; 9112 struct btrfs_bio *bbio; 9113 int ret; 9114 9115 priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS); 9116 if (!priv) 9117 return -ENOMEM; 9118 9119 init_waitqueue_head(&priv->wait); 9120 atomic_set(&priv->pending, 1); 9121 priv->status = 0; 9122 priv->uring_ctx = uring_ctx; 9123 9124 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9125 btrfs_encoded_read_endio, priv); 9126 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9127 bbio->inode = inode; 9128 9129 do { 9130 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); 9131 9132 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { 9133 atomic_inc(&priv->pending); 9134 btrfs_submit_bbio(bbio, 0); 9135 9136 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9137 btrfs_encoded_read_endio, priv); 9138 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9139 bbio->inode = inode; 9140 continue; 9141 } 9142 9143 i++; 9144 disk_bytenr += bytes; 9145 disk_io_size -= bytes; 9146 } while (disk_io_size); 9147 9148 atomic_inc(&priv->pending); 9149 btrfs_submit_bbio(bbio, 0); 9150 9151 if (uring_ctx) { 9152 if (atomic_dec_return(&priv->pending) == 0) { 9153 ret = blk_status_to_errno(READ_ONCE(priv->status)); 9154 btrfs_uring_read_extent_endio(uring_ctx, ret); 9155 kfree(priv); 9156 return ret; 9157 } 9158 9159 return -EIOCBQUEUED; 9160 } else { 9161 if (atomic_dec_return(&priv->pending) != 0) 9162 io_wait_event(priv->wait, !atomic_read(&priv->pending)); 9163 /* See btrfs_encoded_read_endio() for ordering. */ 9164 ret = blk_status_to_errno(READ_ONCE(priv->status)); 9165 kfree(priv); 9166 return ret; 9167 } 9168 } 9169 9170 ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter, 9171 u64 start, u64 lockend, 9172 struct extent_state **cached_state, 9173 u64 disk_bytenr, u64 disk_io_size, 9174 size_t count, bool compressed, bool *unlocked) 9175 { 9176 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9177 struct extent_io_tree *io_tree = &inode->io_tree; 9178 struct page **pages; 9179 unsigned long nr_pages, i; 9180 u64 cur; 9181 size_t page_offset; 9182 ssize_t ret; 9183 9184 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 9185 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 9186 if (!pages) 9187 return -ENOMEM; 9188 ret = btrfs_alloc_page_array(nr_pages, pages, false); 9189 if (ret) { 9190 ret = -ENOMEM; 9191 goto out; 9192 } 9193 9194 ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr, 9195 disk_io_size, pages, NULL); 9196 if (ret) 9197 goto out; 9198 9199 unlock_extent(io_tree, start, lockend, cached_state); 9200 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9201 *unlocked = true; 9202 9203 if (compressed) { 9204 i = 0; 9205 page_offset = 0; 9206 } else { 9207 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 9208 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 9209 } 9210 cur = 0; 9211 while (cur < count) { 9212 size_t bytes = min_t(size_t, count - cur, 9213 PAGE_SIZE - page_offset); 9214 9215 if (copy_page_to_iter(pages[i], page_offset, bytes, 9216 iter) != bytes) { 9217 ret = -EFAULT; 9218 goto out; 9219 } 9220 i++; 9221 cur += bytes; 9222 page_offset = 0; 9223 } 9224 ret = count; 9225 out: 9226 for (i = 0; i < nr_pages; i++) { 9227 if (pages[i]) 9228 __free_page(pages[i]); 9229 } 9230 kfree(pages); 9231 return ret; 9232 } 9233 9234 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 9235 struct btrfs_ioctl_encoded_io_args *encoded, 9236 struct extent_state **cached_state, 9237 u64 *disk_bytenr, u64 *disk_io_size) 9238 { 9239 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9240 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9241 struct extent_io_tree *io_tree = &inode->io_tree; 9242 ssize_t ret; 9243 size_t count = iov_iter_count(iter); 9244 u64 start, lockend; 9245 struct extent_map *em; 9246 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); 9247 bool unlocked = false; 9248 9249 file_accessed(iocb->ki_filp); 9250 9251 ret = btrfs_inode_lock(inode, 9252 BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0)); 9253 if (ret) 9254 return ret; 9255 9256 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 9257 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9258 return 0; 9259 } 9260 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 9261 /* 9262 * We don't know how long the extent containing iocb->ki_pos is, but if 9263 * it's compressed we know that it won't be longer than this. 9264 */ 9265 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 9266 9267 if (nowait) { 9268 struct btrfs_ordered_extent *ordered; 9269 9270 if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping, 9271 start, lockend)) { 9272 ret = -EAGAIN; 9273 goto out_unlock_inode; 9274 } 9275 9276 if (!try_lock_extent(io_tree, start, lockend, cached_state)) { 9277 ret = -EAGAIN; 9278 goto out_unlock_inode; 9279 } 9280 9281 ordered = btrfs_lookup_ordered_range(inode, start, 9282 lockend - start + 1); 9283 if (ordered) { 9284 btrfs_put_ordered_extent(ordered); 9285 unlock_extent(io_tree, start, lockend, cached_state); 9286 ret = -EAGAIN; 9287 goto out_unlock_inode; 9288 } 9289 } else { 9290 for (;;) { 9291 struct btrfs_ordered_extent *ordered; 9292 9293 ret = btrfs_wait_ordered_range(inode, start, 9294 lockend - start + 1); 9295 if (ret) 9296 goto out_unlock_inode; 9297 9298 lock_extent(io_tree, start, lockend, cached_state); 9299 ordered = btrfs_lookup_ordered_range(inode, start, 9300 lockend - start + 1); 9301 if (!ordered) 9302 break; 9303 btrfs_put_ordered_extent(ordered); 9304 unlock_extent(io_tree, start, lockend, cached_state); 9305 cond_resched(); 9306 } 9307 } 9308 9309 em = btrfs_get_extent(inode, NULL, start, lockend - start + 1); 9310 if (IS_ERR(em)) { 9311 ret = PTR_ERR(em); 9312 goto out_unlock_extent; 9313 } 9314 9315 if (em->disk_bytenr == EXTENT_MAP_INLINE) { 9316 u64 extent_start = em->start; 9317 9318 /* 9319 * For inline extents we get everything we need out of the 9320 * extent item. 9321 */ 9322 free_extent_map(em); 9323 em = NULL; 9324 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 9325 cached_state, extent_start, 9326 count, encoded, &unlocked); 9327 goto out_unlock_extent; 9328 } 9329 9330 /* 9331 * We only want to return up to EOF even if the extent extends beyond 9332 * that. 9333 */ 9334 encoded->len = min_t(u64, extent_map_end(em), 9335 inode->vfs_inode.i_size) - iocb->ki_pos; 9336 if (em->disk_bytenr == EXTENT_MAP_HOLE || 9337 (em->flags & EXTENT_FLAG_PREALLOC)) { 9338 *disk_bytenr = EXTENT_MAP_HOLE; 9339 count = min_t(u64, count, encoded->len); 9340 encoded->len = count; 9341 encoded->unencoded_len = count; 9342 } else if (extent_map_is_compressed(em)) { 9343 *disk_bytenr = em->disk_bytenr; 9344 /* 9345 * Bail if the buffer isn't large enough to return the whole 9346 * compressed extent. 9347 */ 9348 if (em->disk_num_bytes > count) { 9349 ret = -ENOBUFS; 9350 goto out_em; 9351 } 9352 *disk_io_size = em->disk_num_bytes; 9353 count = em->disk_num_bytes; 9354 encoded->unencoded_len = em->ram_bytes; 9355 encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset); 9356 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9357 extent_map_compression(em)); 9358 if (ret < 0) 9359 goto out_em; 9360 encoded->compression = ret; 9361 } else { 9362 *disk_bytenr = extent_map_block_start(em) + (start - em->start); 9363 if (encoded->len > count) 9364 encoded->len = count; 9365 /* 9366 * Don't read beyond what we locked. This also limits the page 9367 * allocations that we'll do. 9368 */ 9369 *disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 9370 count = start + *disk_io_size - iocb->ki_pos; 9371 encoded->len = count; 9372 encoded->unencoded_len = count; 9373 *disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize); 9374 } 9375 free_extent_map(em); 9376 em = NULL; 9377 9378 if (*disk_bytenr == EXTENT_MAP_HOLE) { 9379 unlock_extent(io_tree, start, lockend, cached_state); 9380 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9381 unlocked = true; 9382 ret = iov_iter_zero(count, iter); 9383 if (ret != count) 9384 ret = -EFAULT; 9385 } else { 9386 ret = -EIOCBQUEUED; 9387 goto out_unlock_extent; 9388 } 9389 9390 out_em: 9391 free_extent_map(em); 9392 out_unlock_extent: 9393 /* Leave inode and extent locked if we need to do a read. */ 9394 if (!unlocked && ret != -EIOCBQUEUED) 9395 unlock_extent(io_tree, start, lockend, cached_state); 9396 out_unlock_inode: 9397 if (!unlocked && ret != -EIOCBQUEUED) 9398 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9399 return ret; 9400 } 9401 9402 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 9403 const struct btrfs_ioctl_encoded_io_args *encoded) 9404 { 9405 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9406 struct btrfs_root *root = inode->root; 9407 struct btrfs_fs_info *fs_info = root->fs_info; 9408 struct extent_io_tree *io_tree = &inode->io_tree; 9409 struct extent_changeset *data_reserved = NULL; 9410 struct extent_state *cached_state = NULL; 9411 struct btrfs_ordered_extent *ordered; 9412 struct btrfs_file_extent file_extent; 9413 int compression; 9414 size_t orig_count; 9415 u64 start, end; 9416 u64 num_bytes, ram_bytes, disk_num_bytes; 9417 unsigned long nr_folios, i; 9418 struct folio **folios; 9419 struct btrfs_key ins; 9420 bool extent_reserved = false; 9421 struct extent_map *em; 9422 ssize_t ret; 9423 9424 switch (encoded->compression) { 9425 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 9426 compression = BTRFS_COMPRESS_ZLIB; 9427 break; 9428 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 9429 compression = BTRFS_COMPRESS_ZSTD; 9430 break; 9431 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 9432 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 9433 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 9434 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 9435 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 9436 /* The sector size must match for LZO. */ 9437 if (encoded->compression - 9438 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 9439 fs_info->sectorsize_bits) 9440 return -EINVAL; 9441 compression = BTRFS_COMPRESS_LZO; 9442 break; 9443 default: 9444 return -EINVAL; 9445 } 9446 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 9447 return -EINVAL; 9448 9449 /* 9450 * Compressed extents should always have checksums, so error out if we 9451 * have a NOCOW file or inode was created while mounted with NODATASUM. 9452 */ 9453 if (inode->flags & BTRFS_INODE_NODATASUM) 9454 return -EINVAL; 9455 9456 orig_count = iov_iter_count(from); 9457 9458 /* The extent size must be sane. */ 9459 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 9460 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 9461 return -EINVAL; 9462 9463 /* 9464 * The compressed data must be smaller than the decompressed data. 9465 * 9466 * It's of course possible for data to compress to larger or the same 9467 * size, but the buffered I/O path falls back to no compression for such 9468 * data, and we don't want to break any assumptions by creating these 9469 * extents. 9470 * 9471 * Note that this is less strict than the current check we have that the 9472 * compressed data must be at least one sector smaller than the 9473 * decompressed data. We only want to enforce the weaker requirement 9474 * from old kernels that it is at least one byte smaller. 9475 */ 9476 if (orig_count >= encoded->unencoded_len) 9477 return -EINVAL; 9478 9479 /* The extent must start on a sector boundary. */ 9480 start = iocb->ki_pos; 9481 if (!IS_ALIGNED(start, fs_info->sectorsize)) 9482 return -EINVAL; 9483 9484 /* 9485 * The extent must end on a sector boundary. However, we allow a write 9486 * which ends at or extends i_size to have an unaligned length; we round 9487 * up the extent size and set i_size to the unaligned end. 9488 */ 9489 if (start + encoded->len < inode->vfs_inode.i_size && 9490 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 9491 return -EINVAL; 9492 9493 /* Finally, the offset in the unencoded data must be sector-aligned. */ 9494 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 9495 return -EINVAL; 9496 9497 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 9498 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 9499 end = start + num_bytes - 1; 9500 9501 /* 9502 * If the extent cannot be inline, the compressed data on disk must be 9503 * sector-aligned. For convenience, we extend it with zeroes if it 9504 * isn't. 9505 */ 9506 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 9507 nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 9508 folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT); 9509 if (!folios) 9510 return -ENOMEM; 9511 for (i = 0; i < nr_folios; i++) { 9512 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 9513 char *kaddr; 9514 9515 folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0); 9516 if (!folios[i]) { 9517 ret = -ENOMEM; 9518 goto out_folios; 9519 } 9520 kaddr = kmap_local_folio(folios[i], 0); 9521 if (copy_from_iter(kaddr, bytes, from) != bytes) { 9522 kunmap_local(kaddr); 9523 ret = -EFAULT; 9524 goto out_folios; 9525 } 9526 if (bytes < PAGE_SIZE) 9527 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 9528 kunmap_local(kaddr); 9529 } 9530 9531 for (;;) { 9532 struct btrfs_ordered_extent *ordered; 9533 9534 ret = btrfs_wait_ordered_range(inode, start, num_bytes); 9535 if (ret) 9536 goto out_folios; 9537 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 9538 start >> PAGE_SHIFT, 9539 end >> PAGE_SHIFT); 9540 if (ret) 9541 goto out_folios; 9542 lock_extent(io_tree, start, end, &cached_state); 9543 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 9544 if (!ordered && 9545 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 9546 break; 9547 if (ordered) 9548 btrfs_put_ordered_extent(ordered); 9549 unlock_extent(io_tree, start, end, &cached_state); 9550 cond_resched(); 9551 } 9552 9553 /* 9554 * We don't use the higher-level delalloc space functions because our 9555 * num_bytes and disk_num_bytes are different. 9556 */ 9557 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 9558 if (ret) 9559 goto out_unlock; 9560 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 9561 if (ret) 9562 goto out_free_data_space; 9563 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 9564 false); 9565 if (ret) 9566 goto out_qgroup_free_data; 9567 9568 /* Try an inline extent first. */ 9569 if (encoded->unencoded_len == encoded->len && 9570 encoded->unencoded_offset == 0 && 9571 can_cow_file_range_inline(inode, start, encoded->len, orig_count)) { 9572 ret = __cow_file_range_inline(inode, encoded->len, 9573 orig_count, compression, folios[0], 9574 true); 9575 if (ret <= 0) { 9576 if (ret == 0) 9577 ret = orig_count; 9578 goto out_delalloc_release; 9579 } 9580 } 9581 9582 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 9583 disk_num_bytes, 0, 0, &ins, 1, 1); 9584 if (ret) 9585 goto out_delalloc_release; 9586 extent_reserved = true; 9587 9588 file_extent.disk_bytenr = ins.objectid; 9589 file_extent.disk_num_bytes = ins.offset; 9590 file_extent.num_bytes = num_bytes; 9591 file_extent.ram_bytes = ram_bytes; 9592 file_extent.offset = encoded->unencoded_offset; 9593 file_extent.compression = compression; 9594 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED); 9595 if (IS_ERR(em)) { 9596 ret = PTR_ERR(em); 9597 goto out_free_reserved; 9598 } 9599 free_extent_map(em); 9600 9601 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, 9602 (1 << BTRFS_ORDERED_ENCODED) | 9603 (1 << BTRFS_ORDERED_COMPRESSED)); 9604 if (IS_ERR(ordered)) { 9605 btrfs_drop_extent_map_range(inode, start, end, false); 9606 ret = PTR_ERR(ordered); 9607 goto out_free_reserved; 9608 } 9609 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9610 9611 if (start + encoded->len > inode->vfs_inode.i_size) 9612 i_size_write(&inode->vfs_inode, start + encoded->len); 9613 9614 unlock_extent(io_tree, start, end, &cached_state); 9615 9616 btrfs_delalloc_release_extents(inode, num_bytes); 9617 9618 btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false); 9619 ret = orig_count; 9620 goto out; 9621 9622 out_free_reserved: 9623 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9624 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 9625 out_delalloc_release: 9626 btrfs_delalloc_release_extents(inode, num_bytes); 9627 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 9628 out_qgroup_free_data: 9629 if (ret < 0) 9630 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL); 9631 out_free_data_space: 9632 /* 9633 * If btrfs_reserve_extent() succeeded, then we already decremented 9634 * bytes_may_use. 9635 */ 9636 if (!extent_reserved) 9637 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); 9638 out_unlock: 9639 unlock_extent(io_tree, start, end, &cached_state); 9640 out_folios: 9641 for (i = 0; i < nr_folios; i++) { 9642 if (folios[i]) 9643 folio_put(folios[i]); 9644 } 9645 kvfree(folios); 9646 out: 9647 if (ret >= 0) 9648 iocb->ki_pos += encoded->len; 9649 return ret; 9650 } 9651 9652 #ifdef CONFIG_SWAP 9653 /* 9654 * Add an entry indicating a block group or device which is pinned by a 9655 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 9656 * negative errno on failure. 9657 */ 9658 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 9659 bool is_block_group) 9660 { 9661 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 9662 struct btrfs_swapfile_pin *sp, *entry; 9663 struct rb_node **p; 9664 struct rb_node *parent = NULL; 9665 9666 sp = kmalloc(sizeof(*sp), GFP_NOFS); 9667 if (!sp) 9668 return -ENOMEM; 9669 sp->ptr = ptr; 9670 sp->inode = inode; 9671 sp->is_block_group = is_block_group; 9672 sp->bg_extent_count = 1; 9673 9674 spin_lock(&fs_info->swapfile_pins_lock); 9675 p = &fs_info->swapfile_pins.rb_node; 9676 while (*p) { 9677 parent = *p; 9678 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 9679 if (sp->ptr < entry->ptr || 9680 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 9681 p = &(*p)->rb_left; 9682 } else if (sp->ptr > entry->ptr || 9683 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 9684 p = &(*p)->rb_right; 9685 } else { 9686 if (is_block_group) 9687 entry->bg_extent_count++; 9688 spin_unlock(&fs_info->swapfile_pins_lock); 9689 kfree(sp); 9690 return 1; 9691 } 9692 } 9693 rb_link_node(&sp->node, parent, p); 9694 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 9695 spin_unlock(&fs_info->swapfile_pins_lock); 9696 return 0; 9697 } 9698 9699 /* Free all of the entries pinned by this swapfile. */ 9700 static void btrfs_free_swapfile_pins(struct inode *inode) 9701 { 9702 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 9703 struct btrfs_swapfile_pin *sp; 9704 struct rb_node *node, *next; 9705 9706 spin_lock(&fs_info->swapfile_pins_lock); 9707 node = rb_first(&fs_info->swapfile_pins); 9708 while (node) { 9709 next = rb_next(node); 9710 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 9711 if (sp->inode == inode) { 9712 rb_erase(&sp->node, &fs_info->swapfile_pins); 9713 if (sp->is_block_group) { 9714 btrfs_dec_block_group_swap_extents(sp->ptr, 9715 sp->bg_extent_count); 9716 btrfs_put_block_group(sp->ptr); 9717 } 9718 kfree(sp); 9719 } 9720 node = next; 9721 } 9722 spin_unlock(&fs_info->swapfile_pins_lock); 9723 } 9724 9725 struct btrfs_swap_info { 9726 u64 start; 9727 u64 block_start; 9728 u64 block_len; 9729 u64 lowest_ppage; 9730 u64 highest_ppage; 9731 unsigned long nr_pages; 9732 int nr_extents; 9733 }; 9734 9735 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 9736 struct btrfs_swap_info *bsi) 9737 { 9738 unsigned long nr_pages; 9739 unsigned long max_pages; 9740 u64 first_ppage, first_ppage_reported, next_ppage; 9741 int ret; 9742 9743 /* 9744 * Our swapfile may have had its size extended after the swap header was 9745 * written. In that case activating the swapfile should not go beyond 9746 * the max size set in the swap header. 9747 */ 9748 if (bsi->nr_pages >= sis->max) 9749 return 0; 9750 9751 max_pages = sis->max - bsi->nr_pages; 9752 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; 9753 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; 9754 9755 if (first_ppage >= next_ppage) 9756 return 0; 9757 nr_pages = next_ppage - first_ppage; 9758 nr_pages = min(nr_pages, max_pages); 9759 9760 first_ppage_reported = first_ppage; 9761 if (bsi->start == 0) 9762 first_ppage_reported++; 9763 if (bsi->lowest_ppage > first_ppage_reported) 9764 bsi->lowest_ppage = first_ppage_reported; 9765 if (bsi->highest_ppage < (next_ppage - 1)) 9766 bsi->highest_ppage = next_ppage - 1; 9767 9768 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 9769 if (ret < 0) 9770 return ret; 9771 bsi->nr_extents += ret; 9772 bsi->nr_pages += nr_pages; 9773 return 0; 9774 } 9775 9776 static void btrfs_swap_deactivate(struct file *file) 9777 { 9778 struct inode *inode = file_inode(file); 9779 9780 btrfs_free_swapfile_pins(inode); 9781 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 9782 } 9783 9784 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 9785 sector_t *span) 9786 { 9787 struct inode *inode = file_inode(file); 9788 struct btrfs_root *root = BTRFS_I(inode)->root; 9789 struct btrfs_fs_info *fs_info = root->fs_info; 9790 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 9791 struct extent_state *cached_state = NULL; 9792 struct extent_map *em = NULL; 9793 struct btrfs_chunk_map *map = NULL; 9794 struct btrfs_device *device = NULL; 9795 struct btrfs_swap_info bsi = { 9796 .lowest_ppage = (sector_t)-1ULL, 9797 }; 9798 int ret = 0; 9799 u64 isize; 9800 u64 start; 9801 9802 /* 9803 * If the swap file was just created, make sure delalloc is done. If the 9804 * file changes again after this, the user is doing something stupid and 9805 * we don't really care. 9806 */ 9807 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); 9808 if (ret) 9809 return ret; 9810 9811 /* 9812 * The inode is locked, so these flags won't change after we check them. 9813 */ 9814 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 9815 btrfs_warn(fs_info, "swapfile must not be compressed"); 9816 return -EINVAL; 9817 } 9818 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 9819 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 9820 return -EINVAL; 9821 } 9822 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 9823 btrfs_warn(fs_info, "swapfile must not be checksummed"); 9824 return -EINVAL; 9825 } 9826 9827 /* 9828 * Balance or device remove/replace/resize can move stuff around from 9829 * under us. The exclop protection makes sure they aren't running/won't 9830 * run concurrently while we are mapping the swap extents, and 9831 * fs_info->swapfile_pins prevents them from running while the swap 9832 * file is active and moving the extents. Note that this also prevents 9833 * a concurrent device add which isn't actually necessary, but it's not 9834 * really worth the trouble to allow it. 9835 */ 9836 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 9837 btrfs_warn(fs_info, 9838 "cannot activate swapfile while exclusive operation is running"); 9839 return -EBUSY; 9840 } 9841 9842 /* 9843 * Prevent snapshot creation while we are activating the swap file. 9844 * We do not want to race with snapshot creation. If snapshot creation 9845 * already started before we bumped nr_swapfiles from 0 to 1 and 9846 * completes before the first write into the swap file after it is 9847 * activated, than that write would fallback to COW. 9848 */ 9849 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 9850 btrfs_exclop_finish(fs_info); 9851 btrfs_warn(fs_info, 9852 "cannot activate swapfile because snapshot creation is in progress"); 9853 return -EINVAL; 9854 } 9855 /* 9856 * Snapshots can create extents which require COW even if NODATACOW is 9857 * set. We use this counter to prevent snapshots. We must increment it 9858 * before walking the extents because we don't want a concurrent 9859 * snapshot to run after we've already checked the extents. 9860 * 9861 * It is possible that subvolume is marked for deletion but still not 9862 * removed yet. To prevent this race, we check the root status before 9863 * activating the swapfile. 9864 */ 9865 spin_lock(&root->root_item_lock); 9866 if (btrfs_root_dead(root)) { 9867 spin_unlock(&root->root_item_lock); 9868 9869 btrfs_exclop_finish(fs_info); 9870 btrfs_warn(fs_info, 9871 "cannot activate swapfile because subvolume %llu is being deleted", 9872 btrfs_root_id(root)); 9873 return -EPERM; 9874 } 9875 atomic_inc(&root->nr_swapfiles); 9876 spin_unlock(&root->root_item_lock); 9877 9878 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 9879 9880 lock_extent(io_tree, 0, isize - 1, &cached_state); 9881 start = 0; 9882 while (start < isize) { 9883 u64 logical_block_start, physical_block_start; 9884 struct btrfs_block_group *bg; 9885 u64 len = isize - start; 9886 9887 em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len); 9888 if (IS_ERR(em)) { 9889 ret = PTR_ERR(em); 9890 goto out; 9891 } 9892 9893 if (em->disk_bytenr == EXTENT_MAP_HOLE) { 9894 btrfs_warn(fs_info, "swapfile must not have holes"); 9895 ret = -EINVAL; 9896 goto out; 9897 } 9898 if (em->disk_bytenr == EXTENT_MAP_INLINE) { 9899 /* 9900 * It's unlikely we'll ever actually find ourselves 9901 * here, as a file small enough to fit inline won't be 9902 * big enough to store more than the swap header, but in 9903 * case something changes in the future, let's catch it 9904 * here rather than later. 9905 */ 9906 btrfs_warn(fs_info, "swapfile must not be inline"); 9907 ret = -EINVAL; 9908 goto out; 9909 } 9910 if (extent_map_is_compressed(em)) { 9911 btrfs_warn(fs_info, "swapfile must not be compressed"); 9912 ret = -EINVAL; 9913 goto out; 9914 } 9915 9916 logical_block_start = extent_map_block_start(em) + (start - em->start); 9917 len = min(len, em->len - (start - em->start)); 9918 free_extent_map(em); 9919 em = NULL; 9920 9921 ret = can_nocow_extent(inode, start, &len, NULL, false, true); 9922 if (ret < 0) { 9923 goto out; 9924 } else if (ret) { 9925 ret = 0; 9926 } else { 9927 btrfs_warn(fs_info, 9928 "swapfile must not be copy-on-write"); 9929 ret = -EINVAL; 9930 goto out; 9931 } 9932 9933 map = btrfs_get_chunk_map(fs_info, logical_block_start, len); 9934 if (IS_ERR(map)) { 9935 ret = PTR_ERR(map); 9936 goto out; 9937 } 9938 9939 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 9940 btrfs_warn(fs_info, 9941 "swapfile must have single data profile"); 9942 ret = -EINVAL; 9943 goto out; 9944 } 9945 9946 if (device == NULL) { 9947 device = map->stripes[0].dev; 9948 ret = btrfs_add_swapfile_pin(inode, device, false); 9949 if (ret == 1) 9950 ret = 0; 9951 else if (ret) 9952 goto out; 9953 } else if (device != map->stripes[0].dev) { 9954 btrfs_warn(fs_info, "swapfile must be on one device"); 9955 ret = -EINVAL; 9956 goto out; 9957 } 9958 9959 physical_block_start = (map->stripes[0].physical + 9960 (logical_block_start - map->start)); 9961 len = min(len, map->chunk_len - (logical_block_start - map->start)); 9962 btrfs_free_chunk_map(map); 9963 map = NULL; 9964 9965 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 9966 if (!bg) { 9967 btrfs_warn(fs_info, 9968 "could not find block group containing swapfile"); 9969 ret = -EINVAL; 9970 goto out; 9971 } 9972 9973 if (!btrfs_inc_block_group_swap_extents(bg)) { 9974 btrfs_warn(fs_info, 9975 "block group for swapfile at %llu is read-only%s", 9976 bg->start, 9977 atomic_read(&fs_info->scrubs_running) ? 9978 " (scrub running)" : ""); 9979 btrfs_put_block_group(bg); 9980 ret = -EINVAL; 9981 goto out; 9982 } 9983 9984 ret = btrfs_add_swapfile_pin(inode, bg, true); 9985 if (ret) { 9986 btrfs_put_block_group(bg); 9987 if (ret == 1) 9988 ret = 0; 9989 else 9990 goto out; 9991 } 9992 9993 if (bsi.block_len && 9994 bsi.block_start + bsi.block_len == physical_block_start) { 9995 bsi.block_len += len; 9996 } else { 9997 if (bsi.block_len) { 9998 ret = btrfs_add_swap_extent(sis, &bsi); 9999 if (ret) 10000 goto out; 10001 } 10002 bsi.start = start; 10003 bsi.block_start = physical_block_start; 10004 bsi.block_len = len; 10005 } 10006 10007 start += len; 10008 } 10009 10010 if (bsi.block_len) 10011 ret = btrfs_add_swap_extent(sis, &bsi); 10012 10013 out: 10014 if (!IS_ERR_OR_NULL(em)) 10015 free_extent_map(em); 10016 if (!IS_ERR_OR_NULL(map)) 10017 btrfs_free_chunk_map(map); 10018 10019 unlock_extent(io_tree, 0, isize - 1, &cached_state); 10020 10021 if (ret) 10022 btrfs_swap_deactivate(file); 10023 10024 btrfs_drew_write_unlock(&root->snapshot_lock); 10025 10026 btrfs_exclop_finish(fs_info); 10027 10028 if (ret) 10029 return ret; 10030 10031 if (device) 10032 sis->bdev = device->bdev; 10033 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10034 sis->max = bsi.nr_pages; 10035 sis->pages = bsi.nr_pages - 1; 10036 sis->highest_bit = bsi.nr_pages - 1; 10037 return bsi.nr_extents; 10038 } 10039 #else 10040 static void btrfs_swap_deactivate(struct file *file) 10041 { 10042 } 10043 10044 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10045 sector_t *span) 10046 { 10047 return -EOPNOTSUPP; 10048 } 10049 #endif 10050 10051 /* 10052 * Update the number of bytes used in the VFS' inode. When we replace extents in 10053 * a range (clone, dedupe, fallocate's zero range), we must update the number of 10054 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 10055 * always get a correct value. 10056 */ 10057 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 10058 const u64 add_bytes, 10059 const u64 del_bytes) 10060 { 10061 if (add_bytes == del_bytes) 10062 return; 10063 10064 spin_lock(&inode->lock); 10065 if (del_bytes > 0) 10066 inode_sub_bytes(&inode->vfs_inode, del_bytes); 10067 if (add_bytes > 0) 10068 inode_add_bytes(&inode->vfs_inode, add_bytes); 10069 spin_unlock(&inode->lock); 10070 } 10071 10072 /* 10073 * Verify that there are no ordered extents for a given file range. 10074 * 10075 * @inode: The target inode. 10076 * @start: Start offset of the file range, should be sector size aligned. 10077 * @end: End offset (inclusive) of the file range, its value +1 should be 10078 * sector size aligned. 10079 * 10080 * This should typically be used for cases where we locked an inode's VFS lock in 10081 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 10082 * we have flushed all delalloc in the range, we have waited for all ordered 10083 * extents in the range to complete and finally we have locked the file range in 10084 * the inode's io_tree. 10085 */ 10086 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 10087 { 10088 struct btrfs_root *root = inode->root; 10089 struct btrfs_ordered_extent *ordered; 10090 10091 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 10092 return; 10093 10094 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 10095 if (ordered) { 10096 btrfs_err(root->fs_info, 10097 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 10098 start, end, btrfs_ino(inode), btrfs_root_id(root), 10099 ordered->file_offset, 10100 ordered->file_offset + ordered->num_bytes - 1); 10101 btrfs_put_ordered_extent(ordered); 10102 } 10103 10104 ASSERT(ordered == NULL); 10105 } 10106 10107 /* 10108 * Find the first inode with a minimum number. 10109 * 10110 * @root: The root to search for. 10111 * @min_ino: The minimum inode number. 10112 * 10113 * Find the first inode in the @root with a number >= @min_ino and return it. 10114 * Returns NULL if no such inode found. 10115 */ 10116 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino) 10117 { 10118 struct btrfs_inode *inode; 10119 unsigned long from = min_ino; 10120 10121 xa_lock(&root->inodes); 10122 while (true) { 10123 inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT); 10124 if (!inode) 10125 break; 10126 if (igrab(&inode->vfs_inode)) 10127 break; 10128 10129 from = btrfs_ino(inode) + 1; 10130 cond_resched_lock(&root->inodes.xa_lock); 10131 } 10132 xa_unlock(&root->inodes); 10133 10134 return inode; 10135 } 10136 10137 static const struct inode_operations btrfs_dir_inode_operations = { 10138 .getattr = btrfs_getattr, 10139 .lookup = btrfs_lookup, 10140 .create = btrfs_create, 10141 .unlink = btrfs_unlink, 10142 .link = btrfs_link, 10143 .mkdir = btrfs_mkdir, 10144 .rmdir = btrfs_rmdir, 10145 .rename = btrfs_rename2, 10146 .symlink = btrfs_symlink, 10147 .setattr = btrfs_setattr, 10148 .mknod = btrfs_mknod, 10149 .listxattr = btrfs_listxattr, 10150 .permission = btrfs_permission, 10151 .get_inode_acl = btrfs_get_acl, 10152 .set_acl = btrfs_set_acl, 10153 .update_time = btrfs_update_time, 10154 .tmpfile = btrfs_tmpfile, 10155 .fileattr_get = btrfs_fileattr_get, 10156 .fileattr_set = btrfs_fileattr_set, 10157 }; 10158 10159 static const struct file_operations btrfs_dir_file_operations = { 10160 .llseek = btrfs_dir_llseek, 10161 .read = generic_read_dir, 10162 .iterate_shared = btrfs_real_readdir, 10163 .open = btrfs_opendir, 10164 .unlocked_ioctl = btrfs_ioctl, 10165 #ifdef CONFIG_COMPAT 10166 .compat_ioctl = btrfs_compat_ioctl, 10167 #endif 10168 .release = btrfs_release_file, 10169 .fsync = btrfs_sync_file, 10170 }; 10171 10172 /* 10173 * btrfs doesn't support the bmap operation because swapfiles 10174 * use bmap to make a mapping of extents in the file. They assume 10175 * these extents won't change over the life of the file and they 10176 * use the bmap result to do IO directly to the drive. 10177 * 10178 * the btrfs bmap call would return logical addresses that aren't 10179 * suitable for IO and they also will change frequently as COW 10180 * operations happen. So, swapfile + btrfs == corruption. 10181 * 10182 * For now we're avoiding this by dropping bmap. 10183 */ 10184 static const struct address_space_operations btrfs_aops = { 10185 .read_folio = btrfs_read_folio, 10186 .writepages = btrfs_writepages, 10187 .readahead = btrfs_readahead, 10188 .invalidate_folio = btrfs_invalidate_folio, 10189 .launder_folio = btrfs_launder_folio, 10190 .release_folio = btrfs_release_folio, 10191 .migrate_folio = btrfs_migrate_folio, 10192 .dirty_folio = filemap_dirty_folio, 10193 .error_remove_folio = generic_error_remove_folio, 10194 .swap_activate = btrfs_swap_activate, 10195 .swap_deactivate = btrfs_swap_deactivate, 10196 }; 10197 10198 static const struct inode_operations btrfs_file_inode_operations = { 10199 .getattr = btrfs_getattr, 10200 .setattr = btrfs_setattr, 10201 .listxattr = btrfs_listxattr, 10202 .permission = btrfs_permission, 10203 .fiemap = btrfs_fiemap, 10204 .get_inode_acl = btrfs_get_acl, 10205 .set_acl = btrfs_set_acl, 10206 .update_time = btrfs_update_time, 10207 .fileattr_get = btrfs_fileattr_get, 10208 .fileattr_set = btrfs_fileattr_set, 10209 }; 10210 static const struct inode_operations btrfs_special_inode_operations = { 10211 .getattr = btrfs_getattr, 10212 .setattr = btrfs_setattr, 10213 .permission = btrfs_permission, 10214 .listxattr = btrfs_listxattr, 10215 .get_inode_acl = btrfs_get_acl, 10216 .set_acl = btrfs_set_acl, 10217 .update_time = btrfs_update_time, 10218 }; 10219 static const struct inode_operations btrfs_symlink_inode_operations = { 10220 .get_link = page_get_link, 10221 .getattr = btrfs_getattr, 10222 .setattr = btrfs_setattr, 10223 .permission = btrfs_permission, 10224 .listxattr = btrfs_listxattr, 10225 .update_time = btrfs_update_time, 10226 }; 10227 10228 const struct dentry_operations btrfs_dentry_operations = { 10229 .d_delete = btrfs_dentry_delete, 10230 }; 10231