1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include <linux/kernel.h> 8 #include <linux/bio.h> 9 #include <linux/blk-cgroup.h> 10 #include <linux/file.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 #include <linux/time.h> 15 #include <linux/init.h> 16 #include <linux/string.h> 17 #include <linux/backing-dev.h> 18 #include <linux/writeback.h> 19 #include <linux/compat.h> 20 #include <linux/xattr.h> 21 #include <linux/posix_acl.h> 22 #include <linux/falloc.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/btrfs.h> 26 #include <linux/blkdev.h> 27 #include <linux/posix_acl_xattr.h> 28 #include <linux/uio.h> 29 #include <linux/magic.h> 30 #include <linux/iversion.h> 31 #include <linux/swap.h> 32 #include <linux/migrate.h> 33 #include <linux/sched/mm.h> 34 #include <linux/iomap.h> 35 #include <asm/unaligned.h> 36 #include <linux/fsverity.h> 37 #include "misc.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "print-tree.h" 43 #include "ordered-data.h" 44 #include "xattr.h" 45 #include "tree-log.h" 46 #include "bio.h" 47 #include "compression.h" 48 #include "locking.h" 49 #include "free-space-cache.h" 50 #include "props.h" 51 #include "qgroup.h" 52 #include "delalloc-space.h" 53 #include "block-group.h" 54 #include "space-info.h" 55 #include "zoned.h" 56 #include "subpage.h" 57 #include "inode-item.h" 58 #include "fs.h" 59 #include "accessors.h" 60 #include "extent-tree.h" 61 #include "root-tree.h" 62 #include "defrag.h" 63 #include "dir-item.h" 64 #include "file-item.h" 65 #include "uuid-tree.h" 66 #include "ioctl.h" 67 #include "file.h" 68 #include "acl.h" 69 #include "relocation.h" 70 #include "verity.h" 71 #include "super.h" 72 #include "orphan.h" 73 #include "backref.h" 74 75 struct btrfs_iget_args { 76 u64 ino; 77 struct btrfs_root *root; 78 }; 79 80 struct btrfs_dio_data { 81 ssize_t submitted; 82 struct extent_changeset *data_reserved; 83 struct btrfs_ordered_extent *ordered; 84 bool data_space_reserved; 85 bool nocow_done; 86 }; 87 88 struct btrfs_dio_private { 89 /* Range of I/O */ 90 u64 file_offset; 91 u32 bytes; 92 93 /* This must be last */ 94 struct btrfs_bio bbio; 95 }; 96 97 static struct bio_set btrfs_dio_bioset; 98 99 struct btrfs_rename_ctx { 100 /* Output field. Stores the index number of the old directory entry. */ 101 u64 index; 102 }; 103 104 /* 105 * Used by data_reloc_print_warning_inode() to pass needed info for filename 106 * resolution and output of error message. 107 */ 108 struct data_reloc_warn { 109 struct btrfs_path path; 110 struct btrfs_fs_info *fs_info; 111 u64 extent_item_size; 112 u64 logical; 113 int mirror_num; 114 }; 115 116 static const struct inode_operations btrfs_dir_inode_operations; 117 static const struct inode_operations btrfs_symlink_inode_operations; 118 static const struct inode_operations btrfs_special_inode_operations; 119 static const struct inode_operations btrfs_file_inode_operations; 120 static const struct address_space_operations btrfs_aops; 121 static const struct file_operations btrfs_dir_file_operations; 122 123 static struct kmem_cache *btrfs_inode_cachep; 124 125 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 126 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback); 127 128 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 129 struct page *locked_page, u64 start, 130 u64 end, struct writeback_control *wbc, 131 bool pages_dirty); 132 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 133 u64 len, u64 orig_start, u64 block_start, 134 u64 block_len, u64 orig_block_len, 135 u64 ram_bytes, int compress_type, 136 int type); 137 138 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, 139 u64 root, void *warn_ctx) 140 { 141 struct data_reloc_warn *warn = warn_ctx; 142 struct btrfs_fs_info *fs_info = warn->fs_info; 143 struct extent_buffer *eb; 144 struct btrfs_inode_item *inode_item; 145 struct inode_fs_paths *ipath = NULL; 146 struct btrfs_root *local_root; 147 struct btrfs_key key; 148 unsigned int nofs_flag; 149 u32 nlink; 150 int ret; 151 152 local_root = btrfs_get_fs_root(fs_info, root, true); 153 if (IS_ERR(local_root)) { 154 ret = PTR_ERR(local_root); 155 goto err; 156 } 157 158 /* This makes the path point to (inum INODE_ITEM ioff). */ 159 key.objectid = inum; 160 key.type = BTRFS_INODE_ITEM_KEY; 161 key.offset = 0; 162 163 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0); 164 if (ret) { 165 btrfs_put_root(local_root); 166 btrfs_release_path(&warn->path); 167 goto err; 168 } 169 170 eb = warn->path.nodes[0]; 171 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item); 172 nlink = btrfs_inode_nlink(eb, inode_item); 173 btrfs_release_path(&warn->path); 174 175 nofs_flag = memalloc_nofs_save(); 176 ipath = init_ipath(4096, local_root, &warn->path); 177 memalloc_nofs_restore(nofs_flag); 178 if (IS_ERR(ipath)) { 179 btrfs_put_root(local_root); 180 ret = PTR_ERR(ipath); 181 ipath = NULL; 182 /* 183 * -ENOMEM, not a critical error, just output an generic error 184 * without filename. 185 */ 186 btrfs_warn(fs_info, 187 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu", 188 warn->logical, warn->mirror_num, root, inum, offset); 189 return ret; 190 } 191 ret = paths_from_inode(inum, ipath); 192 if (ret < 0) 193 goto err; 194 195 /* 196 * We deliberately ignore the bit ipath might have been too small to 197 * hold all of the paths here 198 */ 199 for (int i = 0; i < ipath->fspath->elem_cnt; i++) { 200 btrfs_warn(fs_info, 201 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)", 202 warn->logical, warn->mirror_num, root, inum, offset, 203 fs_info->sectorsize, nlink, 204 (char *)(unsigned long)ipath->fspath->val[i]); 205 } 206 207 btrfs_put_root(local_root); 208 free_ipath(ipath); 209 return 0; 210 211 err: 212 btrfs_warn(fs_info, 213 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d", 214 warn->logical, warn->mirror_num, root, inum, offset, ret); 215 216 free_ipath(ipath); 217 return ret; 218 } 219 220 /* 221 * Do extra user-friendly error output (e.g. lookup all the affected files). 222 * 223 * Return true if we succeeded doing the backref lookup. 224 * Return false if such lookup failed, and has to fallback to the old error message. 225 */ 226 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off, 227 const u8 *csum, const u8 *csum_expected, 228 int mirror_num) 229 { 230 struct btrfs_fs_info *fs_info = inode->root->fs_info; 231 struct btrfs_path path = { 0 }; 232 struct btrfs_key found_key = { 0 }; 233 struct extent_buffer *eb; 234 struct btrfs_extent_item *ei; 235 const u32 csum_size = fs_info->csum_size; 236 u64 logical; 237 u64 flags; 238 u32 item_size; 239 int ret; 240 241 mutex_lock(&fs_info->reloc_mutex); 242 logical = btrfs_get_reloc_bg_bytenr(fs_info); 243 mutex_unlock(&fs_info->reloc_mutex); 244 245 if (logical == U64_MAX) { 246 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation"); 247 btrfs_warn_rl(fs_info, 248 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 249 inode->root->root_key.objectid, btrfs_ino(inode), file_off, 250 CSUM_FMT_VALUE(csum_size, csum), 251 CSUM_FMT_VALUE(csum_size, csum_expected), 252 mirror_num); 253 return; 254 } 255 256 logical += file_off; 257 btrfs_warn_rl(fs_info, 258 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 259 inode->root->root_key.objectid, 260 btrfs_ino(inode), file_off, logical, 261 CSUM_FMT_VALUE(csum_size, csum), 262 CSUM_FMT_VALUE(csum_size, csum_expected), 263 mirror_num); 264 265 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags); 266 if (ret < 0) { 267 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d", 268 logical, ret); 269 return; 270 } 271 eb = path.nodes[0]; 272 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item); 273 item_size = btrfs_item_size(eb, path.slots[0]); 274 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 275 unsigned long ptr = 0; 276 u64 ref_root; 277 u8 ref_level; 278 279 while (true) { 280 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, 281 item_size, &ref_root, 282 &ref_level); 283 if (ret < 0) { 284 btrfs_warn_rl(fs_info, 285 "failed to resolve tree backref for logical %llu: %d", 286 logical, ret); 287 break; 288 } 289 if (ret > 0) 290 break; 291 292 btrfs_warn_rl(fs_info, 293 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu", 294 logical, mirror_num, 295 (ref_level ? "node" : "leaf"), 296 ref_level, ref_root); 297 } 298 btrfs_release_path(&path); 299 } else { 300 struct btrfs_backref_walk_ctx ctx = { 0 }; 301 struct data_reloc_warn reloc_warn = { 0 }; 302 303 btrfs_release_path(&path); 304 305 ctx.bytenr = found_key.objectid; 306 ctx.extent_item_pos = logical - found_key.objectid; 307 ctx.fs_info = fs_info; 308 309 reloc_warn.logical = logical; 310 reloc_warn.extent_item_size = found_key.offset; 311 reloc_warn.mirror_num = mirror_num; 312 reloc_warn.fs_info = fs_info; 313 314 iterate_extent_inodes(&ctx, true, 315 data_reloc_print_warning_inode, &reloc_warn); 316 } 317 } 318 319 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode, 320 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) 321 { 322 struct btrfs_root *root = inode->root; 323 const u32 csum_size = root->fs_info->csum_size; 324 325 /* For data reloc tree, it's better to do a backref lookup instead. */ 326 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 327 return print_data_reloc_error(inode, logical_start, csum, 328 csum_expected, mirror_num); 329 330 /* Output without objectid, which is more meaningful */ 331 if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) { 332 btrfs_warn_rl(root->fs_info, 333 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 334 root->root_key.objectid, btrfs_ino(inode), 335 logical_start, 336 CSUM_FMT_VALUE(csum_size, csum), 337 CSUM_FMT_VALUE(csum_size, csum_expected), 338 mirror_num); 339 } else { 340 btrfs_warn_rl(root->fs_info, 341 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", 342 root->root_key.objectid, btrfs_ino(inode), 343 logical_start, 344 CSUM_FMT_VALUE(csum_size, csum), 345 CSUM_FMT_VALUE(csum_size, csum_expected), 346 mirror_num); 347 } 348 } 349 350 /* 351 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed 352 * 353 * ilock_flags can have the following bit set: 354 * 355 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode 356 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt 357 * return -EAGAIN 358 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock 359 */ 360 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags) 361 { 362 if (ilock_flags & BTRFS_ILOCK_SHARED) { 363 if (ilock_flags & BTRFS_ILOCK_TRY) { 364 if (!inode_trylock_shared(&inode->vfs_inode)) 365 return -EAGAIN; 366 else 367 return 0; 368 } 369 inode_lock_shared(&inode->vfs_inode); 370 } else { 371 if (ilock_flags & BTRFS_ILOCK_TRY) { 372 if (!inode_trylock(&inode->vfs_inode)) 373 return -EAGAIN; 374 else 375 return 0; 376 } 377 inode_lock(&inode->vfs_inode); 378 } 379 if (ilock_flags & BTRFS_ILOCK_MMAP) 380 down_write(&inode->i_mmap_lock); 381 return 0; 382 } 383 384 /* 385 * btrfs_inode_unlock - unock inode i_rwsem 386 * 387 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock() 388 * to decide whether the lock acquired is shared or exclusive. 389 */ 390 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags) 391 { 392 if (ilock_flags & BTRFS_ILOCK_MMAP) 393 up_write(&inode->i_mmap_lock); 394 if (ilock_flags & BTRFS_ILOCK_SHARED) 395 inode_unlock_shared(&inode->vfs_inode); 396 else 397 inode_unlock(&inode->vfs_inode); 398 } 399 400 /* 401 * Cleanup all submitted ordered extents in specified range to handle errors 402 * from the btrfs_run_delalloc_range() callback. 403 * 404 * NOTE: caller must ensure that when an error happens, it can not call 405 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 406 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 407 * to be released, which we want to happen only when finishing the ordered 408 * extent (btrfs_finish_ordered_io()). 409 */ 410 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, 411 struct page *locked_page, 412 u64 offset, u64 bytes) 413 { 414 unsigned long index = offset >> PAGE_SHIFT; 415 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 416 u64 page_start = 0, page_end = 0; 417 struct page *page; 418 419 if (locked_page) { 420 page_start = page_offset(locked_page); 421 page_end = page_start + PAGE_SIZE - 1; 422 } 423 424 while (index <= end_index) { 425 /* 426 * For locked page, we will call btrfs_mark_ordered_io_finished 427 * through btrfs_mark_ordered_io_finished() on it 428 * in run_delalloc_range() for the error handling, which will 429 * clear page Ordered and run the ordered extent accounting. 430 * 431 * Here we can't just clear the Ordered bit, or 432 * btrfs_mark_ordered_io_finished() would skip the accounting 433 * for the page range, and the ordered extent will never finish. 434 */ 435 if (locked_page && index == (page_start >> PAGE_SHIFT)) { 436 index++; 437 continue; 438 } 439 page = find_get_page(inode->vfs_inode.i_mapping, index); 440 index++; 441 if (!page) 442 continue; 443 444 /* 445 * Here we just clear all Ordered bits for every page in the 446 * range, then btrfs_mark_ordered_io_finished() will handle 447 * the ordered extent accounting for the range. 448 */ 449 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page, 450 offset, bytes); 451 put_page(page); 452 } 453 454 if (locked_page) { 455 /* The locked page covers the full range, nothing needs to be done */ 456 if (bytes + offset <= page_start + PAGE_SIZE) 457 return; 458 /* 459 * In case this page belongs to the delalloc range being 460 * instantiated then skip it, since the first page of a range is 461 * going to be properly cleaned up by the caller of 462 * run_delalloc_range 463 */ 464 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 465 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE; 466 offset = page_offset(locked_page) + PAGE_SIZE; 467 } 468 } 469 470 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false); 471 } 472 473 static int btrfs_dirty_inode(struct btrfs_inode *inode); 474 475 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 476 struct btrfs_new_inode_args *args) 477 { 478 int err; 479 480 if (args->default_acl) { 481 err = __btrfs_set_acl(trans, args->inode, args->default_acl, 482 ACL_TYPE_DEFAULT); 483 if (err) 484 return err; 485 } 486 if (args->acl) { 487 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); 488 if (err) 489 return err; 490 } 491 if (!args->default_acl && !args->acl) 492 cache_no_acl(args->inode); 493 return btrfs_xattr_security_init(trans, args->inode, args->dir, 494 &args->dentry->d_name); 495 } 496 497 /* 498 * this does all the hard work for inserting an inline extent into 499 * the btree. The caller should have done a btrfs_drop_extents so that 500 * no overlapping inline items exist in the btree 501 */ 502 static int insert_inline_extent(struct btrfs_trans_handle *trans, 503 struct btrfs_path *path, 504 struct btrfs_inode *inode, bool extent_inserted, 505 size_t size, size_t compressed_size, 506 int compress_type, 507 struct page **compressed_pages, 508 bool update_i_size) 509 { 510 struct btrfs_root *root = inode->root; 511 struct extent_buffer *leaf; 512 struct page *page = NULL; 513 char *kaddr; 514 unsigned long ptr; 515 struct btrfs_file_extent_item *ei; 516 int ret; 517 size_t cur_size = size; 518 u64 i_size; 519 520 ASSERT((compressed_size > 0 && compressed_pages) || 521 (compressed_size == 0 && !compressed_pages)); 522 523 if (compressed_size && compressed_pages) 524 cur_size = compressed_size; 525 526 if (!extent_inserted) { 527 struct btrfs_key key; 528 size_t datasize; 529 530 key.objectid = btrfs_ino(inode); 531 key.offset = 0; 532 key.type = BTRFS_EXTENT_DATA_KEY; 533 534 datasize = btrfs_file_extent_calc_inline_size(cur_size); 535 ret = btrfs_insert_empty_item(trans, root, path, &key, 536 datasize); 537 if (ret) 538 goto fail; 539 } 540 leaf = path->nodes[0]; 541 ei = btrfs_item_ptr(leaf, path->slots[0], 542 struct btrfs_file_extent_item); 543 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 544 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 545 btrfs_set_file_extent_encryption(leaf, ei, 0); 546 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 547 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 548 ptr = btrfs_file_extent_inline_start(ei); 549 550 if (compress_type != BTRFS_COMPRESS_NONE) { 551 struct page *cpage; 552 int i = 0; 553 while (compressed_size > 0) { 554 cpage = compressed_pages[i]; 555 cur_size = min_t(unsigned long, compressed_size, 556 PAGE_SIZE); 557 558 kaddr = kmap_local_page(cpage); 559 write_extent_buffer(leaf, kaddr, ptr, cur_size); 560 kunmap_local(kaddr); 561 562 i++; 563 ptr += cur_size; 564 compressed_size -= cur_size; 565 } 566 btrfs_set_file_extent_compression(leaf, ei, 567 compress_type); 568 } else { 569 page = find_get_page(inode->vfs_inode.i_mapping, 0); 570 btrfs_set_file_extent_compression(leaf, ei, 0); 571 kaddr = kmap_local_page(page); 572 write_extent_buffer(leaf, kaddr, ptr, size); 573 kunmap_local(kaddr); 574 put_page(page); 575 } 576 btrfs_mark_buffer_dirty(leaf); 577 btrfs_release_path(path); 578 579 /* 580 * We align size to sectorsize for inline extents just for simplicity 581 * sake. 582 */ 583 ret = btrfs_inode_set_file_extent_range(inode, 0, 584 ALIGN(size, root->fs_info->sectorsize)); 585 if (ret) 586 goto fail; 587 588 /* 589 * We're an inline extent, so nobody can extend the file past i_size 590 * without locking a page we already have locked. 591 * 592 * We must do any i_size and inode updates before we unlock the pages. 593 * Otherwise we could end up racing with unlink. 594 */ 595 i_size = i_size_read(&inode->vfs_inode); 596 if (update_i_size && size > i_size) { 597 i_size_write(&inode->vfs_inode, size); 598 i_size = size; 599 } 600 inode->disk_i_size = i_size; 601 602 fail: 603 return ret; 604 } 605 606 607 /* 608 * conditionally insert an inline extent into the file. This 609 * does the checks required to make sure the data is small enough 610 * to fit as an inline extent. 611 */ 612 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size, 613 size_t compressed_size, 614 int compress_type, 615 struct page **compressed_pages, 616 bool update_i_size) 617 { 618 struct btrfs_drop_extents_args drop_args = { 0 }; 619 struct btrfs_root *root = inode->root; 620 struct btrfs_fs_info *fs_info = root->fs_info; 621 struct btrfs_trans_handle *trans; 622 u64 data_len = (compressed_size ?: size); 623 int ret; 624 struct btrfs_path *path; 625 626 /* 627 * We can create an inline extent if it ends at or beyond the current 628 * i_size, is no larger than a sector (decompressed), and the (possibly 629 * compressed) data fits in a leaf and the configured maximum inline 630 * size. 631 */ 632 if (size < i_size_read(&inode->vfs_inode) || 633 size > fs_info->sectorsize || 634 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || 635 data_len > fs_info->max_inline) 636 return 1; 637 638 path = btrfs_alloc_path(); 639 if (!path) 640 return -ENOMEM; 641 642 trans = btrfs_join_transaction(root); 643 if (IS_ERR(trans)) { 644 btrfs_free_path(path); 645 return PTR_ERR(trans); 646 } 647 trans->block_rsv = &inode->block_rsv; 648 649 drop_args.path = path; 650 drop_args.start = 0; 651 drop_args.end = fs_info->sectorsize; 652 drop_args.drop_cache = true; 653 drop_args.replace_extent = true; 654 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len); 655 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 656 if (ret) { 657 btrfs_abort_transaction(trans, ret); 658 goto out; 659 } 660 661 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted, 662 size, compressed_size, compress_type, 663 compressed_pages, update_i_size); 664 if (ret && ret != -ENOSPC) { 665 btrfs_abort_transaction(trans, ret); 666 goto out; 667 } else if (ret == -ENOSPC) { 668 ret = 1; 669 goto out; 670 } 671 672 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found); 673 ret = btrfs_update_inode(trans, root, inode); 674 if (ret && ret != -ENOSPC) { 675 btrfs_abort_transaction(trans, ret); 676 goto out; 677 } else if (ret == -ENOSPC) { 678 ret = 1; 679 goto out; 680 } 681 682 btrfs_set_inode_full_sync(inode); 683 out: 684 /* 685 * Don't forget to free the reserved space, as for inlined extent 686 * it won't count as data extent, free them directly here. 687 * And at reserve time, it's always aligned to page size, so 688 * just free one page here. 689 */ 690 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE); 691 btrfs_free_path(path); 692 btrfs_end_transaction(trans); 693 return ret; 694 } 695 696 struct async_extent { 697 u64 start; 698 u64 ram_size; 699 u64 compressed_size; 700 struct page **pages; 701 unsigned long nr_pages; 702 int compress_type; 703 struct list_head list; 704 }; 705 706 struct async_chunk { 707 struct btrfs_inode *inode; 708 struct page *locked_page; 709 u64 start; 710 u64 end; 711 blk_opf_t write_flags; 712 struct list_head extents; 713 struct cgroup_subsys_state *blkcg_css; 714 struct btrfs_work work; 715 struct async_cow *async_cow; 716 }; 717 718 struct async_cow { 719 atomic_t num_chunks; 720 struct async_chunk chunks[]; 721 }; 722 723 static noinline int add_async_extent(struct async_chunk *cow, 724 u64 start, u64 ram_size, 725 u64 compressed_size, 726 struct page **pages, 727 unsigned long nr_pages, 728 int compress_type) 729 { 730 struct async_extent *async_extent; 731 732 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 733 BUG_ON(!async_extent); /* -ENOMEM */ 734 async_extent->start = start; 735 async_extent->ram_size = ram_size; 736 async_extent->compressed_size = compressed_size; 737 async_extent->pages = pages; 738 async_extent->nr_pages = nr_pages; 739 async_extent->compress_type = compress_type; 740 list_add_tail(&async_extent->list, &cow->extents); 741 return 0; 742 } 743 744 /* 745 * Check if the inode needs to be submitted to compression, based on mount 746 * options, defragmentation, properties or heuristics. 747 */ 748 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, 749 u64 end) 750 { 751 struct btrfs_fs_info *fs_info = inode->root->fs_info; 752 753 if (!btrfs_inode_can_compress(inode)) { 754 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 755 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 756 btrfs_ino(inode)); 757 return 0; 758 } 759 /* 760 * Special check for subpage. 761 * 762 * We lock the full page then run each delalloc range in the page, thus 763 * for the following case, we will hit some subpage specific corner case: 764 * 765 * 0 32K 64K 766 * | |///////| |///////| 767 * \- A \- B 768 * 769 * In above case, both range A and range B will try to unlock the full 770 * page [0, 64K), causing the one finished later will have page 771 * unlocked already, triggering various page lock requirement BUG_ON()s. 772 * 773 * So here we add an artificial limit that subpage compression can only 774 * if the range is fully page aligned. 775 * 776 * In theory we only need to ensure the first page is fully covered, but 777 * the tailing partial page will be locked until the full compression 778 * finishes, delaying the write of other range. 779 * 780 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range 781 * first to prevent any submitted async extent to unlock the full page. 782 * By this, we can ensure for subpage case that only the last async_cow 783 * will unlock the full page. 784 */ 785 if (fs_info->sectorsize < PAGE_SIZE) { 786 if (!PAGE_ALIGNED(start) || 787 !PAGE_ALIGNED(end + 1)) 788 return 0; 789 } 790 791 /* force compress */ 792 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 793 return 1; 794 /* defrag ioctl */ 795 if (inode->defrag_compress) 796 return 1; 797 /* bad compression ratios */ 798 if (inode->flags & BTRFS_INODE_NOCOMPRESS) 799 return 0; 800 if (btrfs_test_opt(fs_info, COMPRESS) || 801 inode->flags & BTRFS_INODE_COMPRESS || 802 inode->prop_compress) 803 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); 804 return 0; 805 } 806 807 static inline void inode_should_defrag(struct btrfs_inode *inode, 808 u64 start, u64 end, u64 num_bytes, u32 small_write) 809 { 810 /* If this is a small write inside eof, kick off a defrag */ 811 if (num_bytes < small_write && 812 (start > 0 || end + 1 < inode->disk_i_size)) 813 btrfs_add_inode_defrag(NULL, inode, small_write); 814 } 815 816 /* 817 * Work queue call back to started compression on a file and pages. 818 * 819 * This is done inside an ordered work queue, and the compression is spread 820 * across many cpus. The actual IO submission is step two, and the ordered work 821 * queue takes care of making sure that happens in the same order things were 822 * put onto the queue by writepages and friends. 823 * 824 * If this code finds it can't get good compression, it puts an entry onto the 825 * work queue to write the uncompressed bytes. This makes sure that both 826 * compressed inodes and uncompressed inodes are written in the same order that 827 * the flusher thread sent them down. 828 */ 829 static void compress_file_range(struct btrfs_work *work) 830 { 831 struct async_chunk *async_chunk = 832 container_of(work, struct async_chunk, work); 833 struct btrfs_inode *inode = async_chunk->inode; 834 struct btrfs_fs_info *fs_info = inode->root->fs_info; 835 struct address_space *mapping = inode->vfs_inode.i_mapping; 836 u64 blocksize = fs_info->sectorsize; 837 u64 start = async_chunk->start; 838 u64 end = async_chunk->end; 839 u64 actual_end; 840 u64 i_size; 841 int ret = 0; 842 struct page **pages; 843 unsigned long nr_pages; 844 unsigned long total_compressed = 0; 845 unsigned long total_in = 0; 846 unsigned int poff; 847 int i; 848 int compress_type = fs_info->compress_type; 849 850 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); 851 852 /* 853 * We need to call clear_page_dirty_for_io on each page in the range. 854 * Otherwise applications with the file mmap'd can wander in and change 855 * the page contents while we are compressing them. 856 */ 857 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); 858 859 /* 860 * We need to save i_size before now because it could change in between 861 * us evaluating the size and assigning it. This is because we lock and 862 * unlock the page in truncate and fallocate, and then modify the i_size 863 * later on. 864 * 865 * The barriers are to emulate READ_ONCE, remove that once i_size_read 866 * does that for us. 867 */ 868 barrier(); 869 i_size = i_size_read(&inode->vfs_inode); 870 barrier(); 871 actual_end = min_t(u64, i_size, end + 1); 872 again: 873 pages = NULL; 874 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 875 nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES); 876 877 /* 878 * we don't want to send crud past the end of i_size through 879 * compression, that's just a waste of CPU time. So, if the 880 * end of the file is before the start of our current 881 * requested range of bytes, we bail out to the uncompressed 882 * cleanup code that can deal with all of this. 883 * 884 * It isn't really the fastest way to fix things, but this is a 885 * very uncommon corner. 886 */ 887 if (actual_end <= start) 888 goto cleanup_and_bail_uncompressed; 889 890 total_compressed = actual_end - start; 891 892 /* 893 * Skip compression for a small file range(<=blocksize) that 894 * isn't an inline extent, since it doesn't save disk space at all. 895 */ 896 if (total_compressed <= blocksize && 897 (start > 0 || end + 1 < inode->disk_i_size)) 898 goto cleanup_and_bail_uncompressed; 899 900 /* 901 * For subpage case, we require full page alignment for the sector 902 * aligned range. 903 * Thus we must also check against @actual_end, not just @end. 904 */ 905 if (blocksize < PAGE_SIZE) { 906 if (!PAGE_ALIGNED(start) || 907 !PAGE_ALIGNED(round_up(actual_end, blocksize))) 908 goto cleanup_and_bail_uncompressed; 909 } 910 911 total_compressed = min_t(unsigned long, total_compressed, 912 BTRFS_MAX_UNCOMPRESSED); 913 total_in = 0; 914 ret = 0; 915 916 /* 917 * We do compression for mount -o compress and when the inode has not 918 * been flagged as NOCOMPRESS. This flag can change at any time if we 919 * discover bad compression ratios. 920 */ 921 if (!inode_need_compress(inode, start, end)) 922 goto cleanup_and_bail_uncompressed; 923 924 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 925 if (!pages) { 926 /* 927 * Memory allocation failure is not a fatal error, we can fall 928 * back to uncompressed code. 929 */ 930 goto cleanup_and_bail_uncompressed; 931 } 932 933 if (inode->defrag_compress) 934 compress_type = inode->defrag_compress; 935 else if (inode->prop_compress) 936 compress_type = inode->prop_compress; 937 938 /* Compression level is applied here. */ 939 ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4), 940 mapping, start, pages, &nr_pages, &total_in, 941 &total_compressed); 942 if (ret) 943 goto mark_incompressible; 944 945 /* 946 * Zero the tail end of the last page, as we might be sending it down 947 * to disk. 948 */ 949 poff = offset_in_page(total_compressed); 950 if (poff) 951 memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff); 952 953 /* 954 * Try to create an inline extent. 955 * 956 * If we didn't compress the entire range, try to create an uncompressed 957 * inline extent, else a compressed one. 958 * 959 * Check cow_file_range() for why we don't even try to create inline 960 * extent for the subpage case. 961 */ 962 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { 963 if (total_in < actual_end) { 964 ret = cow_file_range_inline(inode, actual_end, 0, 965 BTRFS_COMPRESS_NONE, NULL, 966 false); 967 } else { 968 ret = cow_file_range_inline(inode, actual_end, 969 total_compressed, 970 compress_type, pages, 971 false); 972 } 973 if (ret <= 0) { 974 unsigned long clear_flags = EXTENT_DELALLOC | 975 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 976 EXTENT_DO_ACCOUNTING; 977 978 if (ret < 0) 979 mapping_set_error(mapping, -EIO); 980 981 /* 982 * inline extent creation worked or returned error, 983 * we don't need to create any more async work items. 984 * Unlock and free up our temp pages. 985 * 986 * We use DO_ACCOUNTING here because we need the 987 * delalloc_release_metadata to be done _after_ we drop 988 * our outstanding extent for clearing delalloc for this 989 * range. 990 */ 991 extent_clear_unlock_delalloc(inode, start, end, 992 NULL, 993 clear_flags, 994 PAGE_UNLOCK | 995 PAGE_START_WRITEBACK | 996 PAGE_END_WRITEBACK); 997 goto free_pages; 998 } 999 } 1000 1001 /* 1002 * We aren't doing an inline extent. Round the compressed size up to a 1003 * block size boundary so the allocator does sane things. 1004 */ 1005 total_compressed = ALIGN(total_compressed, blocksize); 1006 1007 /* 1008 * One last check to make sure the compression is really a win, compare 1009 * the page count read with the blocks on disk, compression must free at 1010 * least one sector. 1011 */ 1012 total_in = round_up(total_in, fs_info->sectorsize); 1013 if (total_compressed + blocksize > total_in) 1014 goto mark_incompressible; 1015 1016 /* 1017 * The async work queues will take care of doing actual allocation on 1018 * disk for these compressed pages, and will submit the bios. 1019 */ 1020 add_async_extent(async_chunk, start, total_in, total_compressed, pages, 1021 nr_pages, compress_type); 1022 if (start + total_in < end) { 1023 start += total_in; 1024 cond_resched(); 1025 goto again; 1026 } 1027 return; 1028 1029 mark_incompressible: 1030 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) 1031 inode->flags |= BTRFS_INODE_NOCOMPRESS; 1032 cleanup_and_bail_uncompressed: 1033 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 1034 BTRFS_COMPRESS_NONE); 1035 free_pages: 1036 if (pages) { 1037 for (i = 0; i < nr_pages; i++) { 1038 WARN_ON(pages[i]->mapping); 1039 put_page(pages[i]); 1040 } 1041 kfree(pages); 1042 } 1043 } 1044 1045 static void free_async_extent_pages(struct async_extent *async_extent) 1046 { 1047 int i; 1048 1049 if (!async_extent->pages) 1050 return; 1051 1052 for (i = 0; i < async_extent->nr_pages; i++) { 1053 WARN_ON(async_extent->pages[i]->mapping); 1054 put_page(async_extent->pages[i]); 1055 } 1056 kfree(async_extent->pages); 1057 async_extent->nr_pages = 0; 1058 async_extent->pages = NULL; 1059 } 1060 1061 static void submit_uncompressed_range(struct btrfs_inode *inode, 1062 struct async_extent *async_extent, 1063 struct page *locked_page) 1064 { 1065 u64 start = async_extent->start; 1066 u64 end = async_extent->start + async_extent->ram_size - 1; 1067 int ret; 1068 struct writeback_control wbc = { 1069 .sync_mode = WB_SYNC_ALL, 1070 .range_start = start, 1071 .range_end = end, 1072 .no_cgroup_owner = 1, 1073 }; 1074 1075 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); 1076 ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false); 1077 wbc_detach_inode(&wbc); 1078 if (ret < 0) { 1079 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1); 1080 if (locked_page) { 1081 const u64 page_start = page_offset(locked_page); 1082 1083 set_page_writeback(locked_page); 1084 end_page_writeback(locked_page); 1085 btrfs_mark_ordered_io_finished(inode, locked_page, 1086 page_start, PAGE_SIZE, 1087 !ret); 1088 btrfs_page_clear_uptodate(inode->root->fs_info, 1089 locked_page, page_start, 1090 PAGE_SIZE); 1091 mapping_set_error(locked_page->mapping, ret); 1092 unlock_page(locked_page); 1093 } 1094 } 1095 } 1096 1097 static void submit_one_async_extent(struct async_chunk *async_chunk, 1098 struct async_extent *async_extent, 1099 u64 *alloc_hint) 1100 { 1101 struct btrfs_inode *inode = async_chunk->inode; 1102 struct extent_io_tree *io_tree = &inode->io_tree; 1103 struct btrfs_root *root = inode->root; 1104 struct btrfs_fs_info *fs_info = root->fs_info; 1105 struct btrfs_ordered_extent *ordered; 1106 struct btrfs_key ins; 1107 struct page *locked_page = NULL; 1108 struct extent_map *em; 1109 int ret = 0; 1110 u64 start = async_extent->start; 1111 u64 end = async_extent->start + async_extent->ram_size - 1; 1112 1113 if (async_chunk->blkcg_css) 1114 kthread_associate_blkcg(async_chunk->blkcg_css); 1115 1116 /* 1117 * If async_chunk->locked_page is in the async_extent range, we need to 1118 * handle it. 1119 */ 1120 if (async_chunk->locked_page) { 1121 u64 locked_page_start = page_offset(async_chunk->locked_page); 1122 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1; 1123 1124 if (!(start >= locked_page_end || end <= locked_page_start)) 1125 locked_page = async_chunk->locked_page; 1126 } 1127 lock_extent(io_tree, start, end, NULL); 1128 1129 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) { 1130 submit_uncompressed_range(inode, async_extent, locked_page); 1131 goto done; 1132 } 1133 1134 ret = btrfs_reserve_extent(root, async_extent->ram_size, 1135 async_extent->compressed_size, 1136 async_extent->compressed_size, 1137 0, *alloc_hint, &ins, 1, 1); 1138 if (ret) { 1139 /* 1140 * Here we used to try again by going back to non-compressed 1141 * path for ENOSPC. But we can't reserve space even for 1142 * compressed size, how could it work for uncompressed size 1143 * which requires larger size? So here we directly go error 1144 * path. 1145 */ 1146 goto out_free; 1147 } 1148 1149 /* Here we're doing allocation and writeback of the compressed pages */ 1150 em = create_io_em(inode, start, 1151 async_extent->ram_size, /* len */ 1152 start, /* orig_start */ 1153 ins.objectid, /* block_start */ 1154 ins.offset, /* block_len */ 1155 ins.offset, /* orig_block_len */ 1156 async_extent->ram_size, /* ram_bytes */ 1157 async_extent->compress_type, 1158 BTRFS_ORDERED_COMPRESSED); 1159 if (IS_ERR(em)) { 1160 ret = PTR_ERR(em); 1161 goto out_free_reserve; 1162 } 1163 free_extent_map(em); 1164 1165 ordered = btrfs_alloc_ordered_extent(inode, start, /* file_offset */ 1166 async_extent->ram_size, /* num_bytes */ 1167 async_extent->ram_size, /* ram_bytes */ 1168 ins.objectid, /* disk_bytenr */ 1169 ins.offset, /* disk_num_bytes */ 1170 0, /* offset */ 1171 1 << BTRFS_ORDERED_COMPRESSED, 1172 async_extent->compress_type); 1173 if (IS_ERR(ordered)) { 1174 btrfs_drop_extent_map_range(inode, start, end, false); 1175 ret = PTR_ERR(ordered); 1176 goto out_free_reserve; 1177 } 1178 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1179 1180 /* Clear dirty, set writeback and unlock the pages. */ 1181 extent_clear_unlock_delalloc(inode, start, end, 1182 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 1183 PAGE_UNLOCK | PAGE_START_WRITEBACK); 1184 btrfs_submit_compressed_write(ordered, 1185 async_extent->pages, /* compressed_pages */ 1186 async_extent->nr_pages, 1187 async_chunk->write_flags, true); 1188 *alloc_hint = ins.objectid + ins.offset; 1189 done: 1190 if (async_chunk->blkcg_css) 1191 kthread_associate_blkcg(NULL); 1192 kfree(async_extent); 1193 return; 1194 1195 out_free_reserve: 1196 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1197 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1198 out_free: 1199 mapping_set_error(inode->vfs_inode.i_mapping, -EIO); 1200 extent_clear_unlock_delalloc(inode, start, end, 1201 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 1202 EXTENT_DELALLOC_NEW | 1203 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 1204 PAGE_UNLOCK | PAGE_START_WRITEBACK | 1205 PAGE_END_WRITEBACK); 1206 free_async_extent_pages(async_extent); 1207 if (async_chunk->blkcg_css) 1208 kthread_associate_blkcg(NULL); 1209 btrfs_debug(fs_info, 1210 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d", 1211 root->root_key.objectid, btrfs_ino(inode), start, 1212 async_extent->ram_size, ret); 1213 kfree(async_extent); 1214 } 1215 1216 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, 1217 u64 num_bytes) 1218 { 1219 struct extent_map_tree *em_tree = &inode->extent_tree; 1220 struct extent_map *em; 1221 u64 alloc_hint = 0; 1222 1223 read_lock(&em_tree->lock); 1224 em = search_extent_mapping(em_tree, start, num_bytes); 1225 if (em) { 1226 /* 1227 * if block start isn't an actual block number then find the 1228 * first block in this inode and use that as a hint. If that 1229 * block is also bogus then just don't worry about it. 1230 */ 1231 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 1232 free_extent_map(em); 1233 em = search_extent_mapping(em_tree, 0, 0); 1234 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 1235 alloc_hint = em->block_start; 1236 if (em) 1237 free_extent_map(em); 1238 } else { 1239 alloc_hint = em->block_start; 1240 free_extent_map(em); 1241 } 1242 } 1243 read_unlock(&em_tree->lock); 1244 1245 return alloc_hint; 1246 } 1247 1248 /* 1249 * when extent_io.c finds a delayed allocation range in the file, 1250 * the call backs end up in this code. The basic idea is to 1251 * allocate extents on disk for the range, and create ordered data structs 1252 * in ram to track those extents. 1253 * 1254 * locked_page is the page that writepage had locked already. We use 1255 * it to make sure we don't do extra locks or unlocks. 1256 * 1257 * When this function fails, it unlocks all pages except @locked_page. 1258 * 1259 * When this function successfully creates an inline extent, it returns 1 and 1260 * unlocks all pages including locked_page and starts I/O on them. 1261 * (In reality inline extents are limited to a single page, so locked_page is 1262 * the only page handled anyway). 1263 * 1264 * When this function succeed and creates a normal extent, the page locking 1265 * status depends on the passed in flags: 1266 * 1267 * - If @keep_locked is set, all pages are kept locked. 1268 * - Else all pages except for @locked_page are unlocked. 1269 * 1270 * When a failure happens in the second or later iteration of the 1271 * while-loop, the ordered extents created in previous iterations are kept 1272 * intact. So, the caller must clean them up by calling 1273 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for 1274 * example. 1275 */ 1276 static noinline int cow_file_range(struct btrfs_inode *inode, 1277 struct page *locked_page, u64 start, u64 end, 1278 u64 *done_offset, 1279 bool keep_locked, bool no_inline) 1280 { 1281 struct btrfs_root *root = inode->root; 1282 struct btrfs_fs_info *fs_info = root->fs_info; 1283 u64 alloc_hint = 0; 1284 u64 orig_start = start; 1285 u64 num_bytes; 1286 unsigned long ram_size; 1287 u64 cur_alloc_size = 0; 1288 u64 min_alloc_size; 1289 u64 blocksize = fs_info->sectorsize; 1290 struct btrfs_key ins; 1291 struct extent_map *em; 1292 unsigned clear_bits; 1293 unsigned long page_ops; 1294 bool extent_reserved = false; 1295 int ret = 0; 1296 1297 if (btrfs_is_free_space_inode(inode)) { 1298 ret = -EINVAL; 1299 goto out_unlock; 1300 } 1301 1302 num_bytes = ALIGN(end - start + 1, blocksize); 1303 num_bytes = max(blocksize, num_bytes); 1304 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 1305 1306 inode_should_defrag(inode, start, end, num_bytes, SZ_64K); 1307 1308 /* 1309 * Due to the page size limit, for subpage we can only trigger the 1310 * writeback for the dirty sectors of page, that means data writeback 1311 * is doing more writeback than what we want. 1312 * 1313 * This is especially unexpected for some call sites like fallocate, 1314 * where we only increase i_size after everything is done. 1315 * This means we can trigger inline extent even if we didn't want to. 1316 * So here we skip inline extent creation completely. 1317 */ 1318 if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) { 1319 u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode), 1320 end + 1); 1321 1322 /* lets try to make an inline extent */ 1323 ret = cow_file_range_inline(inode, actual_end, 0, 1324 BTRFS_COMPRESS_NONE, NULL, false); 1325 if (ret == 0) { 1326 /* 1327 * We use DO_ACCOUNTING here because we need the 1328 * delalloc_release_metadata to be run _after_ we drop 1329 * our outstanding extent for clearing delalloc for this 1330 * range. 1331 */ 1332 extent_clear_unlock_delalloc(inode, start, end, 1333 locked_page, 1334 EXTENT_LOCKED | EXTENT_DELALLOC | 1335 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1336 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1337 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); 1338 /* 1339 * locked_page is locked by the caller of 1340 * writepage_delalloc(), not locked by 1341 * __process_pages_contig(). 1342 * 1343 * We can't let __process_pages_contig() to unlock it, 1344 * as it doesn't have any subpage::writers recorded. 1345 * 1346 * Here we manually unlock the page, since the caller 1347 * can't determine if it's an inline extent or a 1348 * compressed extent. 1349 */ 1350 unlock_page(locked_page); 1351 ret = 1; 1352 goto done; 1353 } else if (ret < 0) { 1354 goto out_unlock; 1355 } 1356 } 1357 1358 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 1359 1360 /* 1361 * Relocation relies on the relocated extents to have exactly the same 1362 * size as the original extents. Normally writeback for relocation data 1363 * extents follows a NOCOW path because relocation preallocates the 1364 * extents. However, due to an operation such as scrub turning a block 1365 * group to RO mode, it may fallback to COW mode, so we must make sure 1366 * an extent allocated during COW has exactly the requested size and can 1367 * not be split into smaller extents, otherwise relocation breaks and 1368 * fails during the stage where it updates the bytenr of file extent 1369 * items. 1370 */ 1371 if (btrfs_is_data_reloc_root(root)) 1372 min_alloc_size = num_bytes; 1373 else 1374 min_alloc_size = fs_info->sectorsize; 1375 1376 while (num_bytes > 0) { 1377 struct btrfs_ordered_extent *ordered; 1378 1379 cur_alloc_size = num_bytes; 1380 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, 1381 min_alloc_size, 0, alloc_hint, 1382 &ins, 1, 1); 1383 if (ret == -EAGAIN) { 1384 /* 1385 * btrfs_reserve_extent only returns -EAGAIN for zoned 1386 * file systems, which is an indication that there are 1387 * no active zones to allocate from at the moment. 1388 * 1389 * If this is the first loop iteration, wait for at 1390 * least one zone to finish before retrying the 1391 * allocation. Otherwise ask the caller to write out 1392 * the already allocated blocks before coming back to 1393 * us, or return -ENOSPC if it can't handle retries. 1394 */ 1395 ASSERT(btrfs_is_zoned(fs_info)); 1396 if (start == orig_start) { 1397 wait_on_bit_io(&inode->root->fs_info->flags, 1398 BTRFS_FS_NEED_ZONE_FINISH, 1399 TASK_UNINTERRUPTIBLE); 1400 continue; 1401 } 1402 if (done_offset) { 1403 *done_offset = start - 1; 1404 return 0; 1405 } 1406 ret = -ENOSPC; 1407 } 1408 if (ret < 0) 1409 goto out_unlock; 1410 cur_alloc_size = ins.offset; 1411 extent_reserved = true; 1412 1413 ram_size = ins.offset; 1414 em = create_io_em(inode, start, ins.offset, /* len */ 1415 start, /* orig_start */ 1416 ins.objectid, /* block_start */ 1417 ins.offset, /* block_len */ 1418 ins.offset, /* orig_block_len */ 1419 ram_size, /* ram_bytes */ 1420 BTRFS_COMPRESS_NONE, /* compress_type */ 1421 BTRFS_ORDERED_REGULAR /* type */); 1422 if (IS_ERR(em)) { 1423 ret = PTR_ERR(em); 1424 goto out_reserve; 1425 } 1426 free_extent_map(em); 1427 1428 ordered = btrfs_alloc_ordered_extent(inode, start, ram_size, 1429 ram_size, ins.objectid, cur_alloc_size, 1430 0, 1 << BTRFS_ORDERED_REGULAR, 1431 BTRFS_COMPRESS_NONE); 1432 if (IS_ERR(ordered)) { 1433 ret = PTR_ERR(ordered); 1434 goto out_drop_extent_cache; 1435 } 1436 1437 if (btrfs_is_data_reloc_root(root)) { 1438 ret = btrfs_reloc_clone_csums(ordered); 1439 1440 /* 1441 * Only drop cache here, and process as normal. 1442 * 1443 * We must not allow extent_clear_unlock_delalloc() 1444 * at out_unlock label to free meta of this ordered 1445 * extent, as its meta should be freed by 1446 * btrfs_finish_ordered_io(). 1447 * 1448 * So we must continue until @start is increased to 1449 * skip current ordered extent. 1450 */ 1451 if (ret) 1452 btrfs_drop_extent_map_range(inode, start, 1453 start + ram_size - 1, 1454 false); 1455 } 1456 btrfs_put_ordered_extent(ordered); 1457 1458 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1459 1460 /* 1461 * We're not doing compressed IO, don't unlock the first page 1462 * (which the caller expects to stay locked), don't clear any 1463 * dirty bits and don't set any writeback bits 1464 * 1465 * Do set the Ordered (Private2) bit so we know this page was 1466 * properly setup for writepage. 1467 */ 1468 page_ops = (keep_locked ? 0 : PAGE_UNLOCK); 1469 page_ops |= PAGE_SET_ORDERED; 1470 1471 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, 1472 locked_page, 1473 EXTENT_LOCKED | EXTENT_DELALLOC, 1474 page_ops); 1475 if (num_bytes < cur_alloc_size) 1476 num_bytes = 0; 1477 else 1478 num_bytes -= cur_alloc_size; 1479 alloc_hint = ins.objectid + ins.offset; 1480 start += cur_alloc_size; 1481 extent_reserved = false; 1482 1483 /* 1484 * btrfs_reloc_clone_csums() error, since start is increased 1485 * extent_clear_unlock_delalloc() at out_unlock label won't 1486 * free metadata of current ordered extent, we're OK to exit. 1487 */ 1488 if (ret) 1489 goto out_unlock; 1490 } 1491 done: 1492 if (done_offset) 1493 *done_offset = end; 1494 return ret; 1495 1496 out_drop_extent_cache: 1497 btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false); 1498 out_reserve: 1499 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1500 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1501 out_unlock: 1502 /* 1503 * Now, we have three regions to clean up: 1504 * 1505 * |-------(1)----|---(2)---|-------------(3)----------| 1506 * `- orig_start `- start `- start + cur_alloc_size `- end 1507 * 1508 * We process each region below. 1509 */ 1510 1511 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1512 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1513 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK; 1514 1515 /* 1516 * For the range (1). We have already instantiated the ordered extents 1517 * for this region. They are cleaned up by 1518 * btrfs_cleanup_ordered_extents() in e.g, 1519 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are 1520 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW | 1521 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup 1522 * function. 1523 * 1524 * However, in case of @keep_locked, we still need to unlock the pages 1525 * (except @locked_page) to ensure all the pages are unlocked. 1526 */ 1527 if (keep_locked && orig_start < start) { 1528 if (!locked_page) 1529 mapping_set_error(inode->vfs_inode.i_mapping, ret); 1530 extent_clear_unlock_delalloc(inode, orig_start, start - 1, 1531 locked_page, 0, page_ops); 1532 } 1533 1534 /* 1535 * For the range (2). If we reserved an extent for our delalloc range 1536 * (or a subrange) and failed to create the respective ordered extent, 1537 * then it means that when we reserved the extent we decremented the 1538 * extent's size from the data space_info's bytes_may_use counter and 1539 * incremented the space_info's bytes_reserved counter by the same 1540 * amount. We must make sure extent_clear_unlock_delalloc() does not try 1541 * to decrement again the data space_info's bytes_may_use counter, 1542 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV. 1543 */ 1544 if (extent_reserved) { 1545 extent_clear_unlock_delalloc(inode, start, 1546 start + cur_alloc_size - 1, 1547 locked_page, 1548 clear_bits, 1549 page_ops); 1550 start += cur_alloc_size; 1551 } 1552 1553 /* 1554 * For the range (3). We never touched the region. In addition to the 1555 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data 1556 * space_info's bytes_may_use counter, reserved in 1557 * btrfs_check_data_free_space(). 1558 */ 1559 if (start < end) { 1560 clear_bits |= EXTENT_CLEAR_DATA_RESV; 1561 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1562 clear_bits, page_ops); 1563 } 1564 return ret; 1565 } 1566 1567 /* 1568 * Phase two of compressed writeback. This is the ordered portion of the code, 1569 * which only gets called in the order the work was queued. We walk all the 1570 * async extents created by compress_file_range and send them down to the disk. 1571 */ 1572 static noinline void submit_compressed_extents(struct btrfs_work *work) 1573 { 1574 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1575 work); 1576 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1577 struct async_extent *async_extent; 1578 unsigned long nr_pages; 1579 u64 alloc_hint = 0; 1580 1581 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1582 PAGE_SHIFT; 1583 1584 while (!list_empty(&async_chunk->extents)) { 1585 async_extent = list_entry(async_chunk->extents.next, 1586 struct async_extent, list); 1587 list_del(&async_extent->list); 1588 submit_one_async_extent(async_chunk, async_extent, &alloc_hint); 1589 } 1590 1591 /* atomic_sub_return implies a barrier */ 1592 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1593 5 * SZ_1M) 1594 cond_wake_up_nomb(&fs_info->async_submit_wait); 1595 } 1596 1597 static noinline void async_cow_free(struct btrfs_work *work) 1598 { 1599 struct async_chunk *async_chunk; 1600 struct async_cow *async_cow; 1601 1602 async_chunk = container_of(work, struct async_chunk, work); 1603 btrfs_add_delayed_iput(async_chunk->inode); 1604 if (async_chunk->blkcg_css) 1605 css_put(async_chunk->blkcg_css); 1606 1607 async_cow = async_chunk->async_cow; 1608 if (atomic_dec_and_test(&async_cow->num_chunks)) 1609 kvfree(async_cow); 1610 } 1611 1612 static bool run_delalloc_compressed(struct btrfs_inode *inode, 1613 struct page *locked_page, u64 start, 1614 u64 end, struct writeback_control *wbc) 1615 { 1616 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1617 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc); 1618 struct async_cow *ctx; 1619 struct async_chunk *async_chunk; 1620 unsigned long nr_pages; 1621 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1622 int i; 1623 unsigned nofs_flag; 1624 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1625 1626 nofs_flag = memalloc_nofs_save(); 1627 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1628 memalloc_nofs_restore(nofs_flag); 1629 if (!ctx) 1630 return false; 1631 1632 unlock_extent(&inode->io_tree, start, end, NULL); 1633 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); 1634 1635 async_chunk = ctx->chunks; 1636 atomic_set(&ctx->num_chunks, num_chunks); 1637 1638 for (i = 0; i < num_chunks; i++) { 1639 u64 cur_end = min(end, start + SZ_512K - 1); 1640 1641 /* 1642 * igrab is called higher up in the call chain, take only the 1643 * lightweight reference for the callback lifetime 1644 */ 1645 ihold(&inode->vfs_inode); 1646 async_chunk[i].async_cow = ctx; 1647 async_chunk[i].inode = inode; 1648 async_chunk[i].start = start; 1649 async_chunk[i].end = cur_end; 1650 async_chunk[i].write_flags = write_flags; 1651 INIT_LIST_HEAD(&async_chunk[i].extents); 1652 1653 /* 1654 * The locked_page comes all the way from writepage and its 1655 * the original page we were actually given. As we spread 1656 * this large delalloc region across multiple async_chunk 1657 * structs, only the first struct needs a pointer to locked_page 1658 * 1659 * This way we don't need racey decisions about who is supposed 1660 * to unlock it. 1661 */ 1662 if (locked_page) { 1663 /* 1664 * Depending on the compressibility, the pages might or 1665 * might not go through async. We want all of them to 1666 * be accounted against wbc once. Let's do it here 1667 * before the paths diverge. wbc accounting is used 1668 * only for foreign writeback detection and doesn't 1669 * need full accuracy. Just account the whole thing 1670 * against the first page. 1671 */ 1672 wbc_account_cgroup_owner(wbc, locked_page, 1673 cur_end - start); 1674 async_chunk[i].locked_page = locked_page; 1675 locked_page = NULL; 1676 } else { 1677 async_chunk[i].locked_page = NULL; 1678 } 1679 1680 if (blkcg_css != blkcg_root_css) { 1681 css_get(blkcg_css); 1682 async_chunk[i].blkcg_css = blkcg_css; 1683 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT; 1684 } else { 1685 async_chunk[i].blkcg_css = NULL; 1686 } 1687 1688 btrfs_init_work(&async_chunk[i].work, compress_file_range, 1689 submit_compressed_extents, async_cow_free); 1690 1691 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1692 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1693 1694 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1695 1696 start = cur_end + 1; 1697 } 1698 return true; 1699 } 1700 1701 /* 1702 * Run the delalloc range from start to end, and write back any dirty pages 1703 * covered by the range. 1704 */ 1705 static noinline int run_delalloc_cow(struct btrfs_inode *inode, 1706 struct page *locked_page, u64 start, 1707 u64 end, struct writeback_control *wbc, 1708 bool pages_dirty) 1709 { 1710 u64 done_offset = end; 1711 int ret; 1712 1713 while (start <= end) { 1714 ret = cow_file_range(inode, locked_page, start, end, &done_offset, 1715 true, false); 1716 if (ret) 1717 return ret; 1718 extent_write_locked_range(&inode->vfs_inode, locked_page, start, 1719 done_offset, wbc, pages_dirty); 1720 start = done_offset + 1; 1721 } 1722 1723 return 1; 1724 } 1725 1726 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, 1727 u64 bytenr, u64 num_bytes, bool nowait) 1728 { 1729 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr); 1730 struct btrfs_ordered_sum *sums; 1731 int ret; 1732 LIST_HEAD(list); 1733 1734 ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1, 1735 &list, 0, nowait); 1736 if (ret == 0 && list_empty(&list)) 1737 return 0; 1738 1739 while (!list_empty(&list)) { 1740 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1741 list_del(&sums->list); 1742 kfree(sums); 1743 } 1744 if (ret < 0) 1745 return ret; 1746 return 1; 1747 } 1748 1749 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, 1750 const u64 start, const u64 end) 1751 { 1752 const bool is_space_ino = btrfs_is_free_space_inode(inode); 1753 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); 1754 const u64 range_bytes = end + 1 - start; 1755 struct extent_io_tree *io_tree = &inode->io_tree; 1756 u64 range_start = start; 1757 u64 count; 1758 int ret; 1759 1760 /* 1761 * If EXTENT_NORESERVE is set it means that when the buffered write was 1762 * made we had not enough available data space and therefore we did not 1763 * reserve data space for it, since we though we could do NOCOW for the 1764 * respective file range (either there is prealloc extent or the inode 1765 * has the NOCOW bit set). 1766 * 1767 * However when we need to fallback to COW mode (because for example the 1768 * block group for the corresponding extent was turned to RO mode by a 1769 * scrub or relocation) we need to do the following: 1770 * 1771 * 1) We increment the bytes_may_use counter of the data space info. 1772 * If COW succeeds, it allocates a new data extent and after doing 1773 * that it decrements the space info's bytes_may_use counter and 1774 * increments its bytes_reserved counter by the same amount (we do 1775 * this at btrfs_add_reserved_bytes()). So we need to increment the 1776 * bytes_may_use counter to compensate (when space is reserved at 1777 * buffered write time, the bytes_may_use counter is incremented); 1778 * 1779 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1780 * that if the COW path fails for any reason, it decrements (through 1781 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1782 * data space info, which we incremented in the step above. 1783 * 1784 * If we need to fallback to cow and the inode corresponds to a free 1785 * space cache inode or an inode of the data relocation tree, we must 1786 * also increment bytes_may_use of the data space_info for the same 1787 * reason. Space caches and relocated data extents always get a prealloc 1788 * extent for them, however scrub or balance may have set the block 1789 * group that contains that extent to RO mode and therefore force COW 1790 * when starting writeback. 1791 */ 1792 count = count_range_bits(io_tree, &range_start, end, range_bytes, 1793 EXTENT_NORESERVE, 0, NULL); 1794 if (count > 0 || is_space_ino || is_reloc_ino) { 1795 u64 bytes = count; 1796 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1797 struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1798 1799 if (is_space_ino || is_reloc_ino) 1800 bytes = range_bytes; 1801 1802 spin_lock(&sinfo->lock); 1803 btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1804 spin_unlock(&sinfo->lock); 1805 1806 if (count > 0) 1807 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1808 NULL); 1809 } 1810 1811 /* 1812 * Don't try to create inline extents, as a mix of inline extent that 1813 * is written out and unlocked directly and a normal NOCOW extent 1814 * doesn't work. 1815 */ 1816 ret = cow_file_range(inode, locked_page, start, end, NULL, false, true); 1817 ASSERT(ret != 1); 1818 return ret; 1819 } 1820 1821 struct can_nocow_file_extent_args { 1822 /* Input fields. */ 1823 1824 /* Start file offset of the range we want to NOCOW. */ 1825 u64 start; 1826 /* End file offset (inclusive) of the range we want to NOCOW. */ 1827 u64 end; 1828 bool writeback_path; 1829 bool strict; 1830 /* 1831 * Free the path passed to can_nocow_file_extent() once it's not needed 1832 * anymore. 1833 */ 1834 bool free_path; 1835 1836 /* Output fields. Only set when can_nocow_file_extent() returns 1. */ 1837 1838 u64 disk_bytenr; 1839 u64 disk_num_bytes; 1840 u64 extent_offset; 1841 /* Number of bytes that can be written to in NOCOW mode. */ 1842 u64 num_bytes; 1843 }; 1844 1845 /* 1846 * Check if we can NOCOW the file extent that the path points to. 1847 * This function may return with the path released, so the caller should check 1848 * if path->nodes[0] is NULL or not if it needs to use the path afterwards. 1849 * 1850 * Returns: < 0 on error 1851 * 0 if we can not NOCOW 1852 * 1 if we can NOCOW 1853 */ 1854 static int can_nocow_file_extent(struct btrfs_path *path, 1855 struct btrfs_key *key, 1856 struct btrfs_inode *inode, 1857 struct can_nocow_file_extent_args *args) 1858 { 1859 const bool is_freespace_inode = btrfs_is_free_space_inode(inode); 1860 struct extent_buffer *leaf = path->nodes[0]; 1861 struct btrfs_root *root = inode->root; 1862 struct btrfs_file_extent_item *fi; 1863 u64 extent_end; 1864 u8 extent_type; 1865 int can_nocow = 0; 1866 int ret = 0; 1867 bool nowait = path->nowait; 1868 1869 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 1870 extent_type = btrfs_file_extent_type(leaf, fi); 1871 1872 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1873 goto out; 1874 1875 /* Can't access these fields unless we know it's not an inline extent. */ 1876 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1877 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1878 args->extent_offset = btrfs_file_extent_offset(leaf, fi); 1879 1880 if (!(inode->flags & BTRFS_INODE_NODATACOW) && 1881 extent_type == BTRFS_FILE_EXTENT_REG) 1882 goto out; 1883 1884 /* 1885 * If the extent was created before the generation where the last snapshot 1886 * for its subvolume was created, then this implies the extent is shared, 1887 * hence we must COW. 1888 */ 1889 if (!args->strict && 1890 btrfs_file_extent_generation(leaf, fi) <= 1891 btrfs_root_last_snapshot(&root->root_item)) 1892 goto out; 1893 1894 /* An explicit hole, must COW. */ 1895 if (args->disk_bytenr == 0) 1896 goto out; 1897 1898 /* Compressed/encrypted/encoded extents must be COWed. */ 1899 if (btrfs_file_extent_compression(leaf, fi) || 1900 btrfs_file_extent_encryption(leaf, fi) || 1901 btrfs_file_extent_other_encoding(leaf, fi)) 1902 goto out; 1903 1904 extent_end = btrfs_file_extent_end(path); 1905 1906 /* 1907 * The following checks can be expensive, as they need to take other 1908 * locks and do btree or rbtree searches, so release the path to avoid 1909 * blocking other tasks for too long. 1910 */ 1911 btrfs_release_path(path); 1912 1913 ret = btrfs_cross_ref_exist(root, btrfs_ino(inode), 1914 key->offset - args->extent_offset, 1915 args->disk_bytenr, args->strict, path); 1916 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1917 if (ret != 0) 1918 goto out; 1919 1920 if (args->free_path) { 1921 /* 1922 * We don't need the path anymore, plus through the 1923 * csum_exist_in_range() call below we will end up allocating 1924 * another path. So free the path to avoid unnecessary extra 1925 * memory usage. 1926 */ 1927 btrfs_free_path(path); 1928 path = NULL; 1929 } 1930 1931 /* If there are pending snapshots for this root, we must COW. */ 1932 if (args->writeback_path && !is_freespace_inode && 1933 atomic_read(&root->snapshot_force_cow)) 1934 goto out; 1935 1936 args->disk_bytenr += args->extent_offset; 1937 args->disk_bytenr += args->start - key->offset; 1938 args->num_bytes = min(args->end + 1, extent_end) - args->start; 1939 1940 /* 1941 * Force COW if csums exist in the range. This ensures that csums for a 1942 * given extent are either valid or do not exist. 1943 */ 1944 ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes, 1945 nowait); 1946 WARN_ON_ONCE(ret > 0 && is_freespace_inode); 1947 if (ret != 0) 1948 goto out; 1949 1950 can_nocow = 1; 1951 out: 1952 if (args->free_path && path) 1953 btrfs_free_path(path); 1954 1955 return ret < 0 ? ret : can_nocow; 1956 } 1957 1958 /* 1959 * when nowcow writeback call back. This checks for snapshots or COW copies 1960 * of the extents that exist in the file, and COWs the file as required. 1961 * 1962 * If no cow copies or snapshots exist, we write directly to the existing 1963 * blocks on disk 1964 */ 1965 static noinline int run_delalloc_nocow(struct btrfs_inode *inode, 1966 struct page *locked_page, 1967 const u64 start, const u64 end) 1968 { 1969 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1970 struct btrfs_root *root = inode->root; 1971 struct btrfs_path *path; 1972 u64 cow_start = (u64)-1; 1973 u64 cur_offset = start; 1974 int ret; 1975 bool check_prev = true; 1976 u64 ino = btrfs_ino(inode); 1977 struct can_nocow_file_extent_args nocow_args = { 0 }; 1978 1979 /* 1980 * Normally on a zoned device we're only doing COW writes, but in case 1981 * of relocation on a zoned filesystem serializes I/O so that we're only 1982 * writing sequentially and can end up here as well. 1983 */ 1984 ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root)); 1985 1986 path = btrfs_alloc_path(); 1987 if (!path) { 1988 ret = -ENOMEM; 1989 goto error; 1990 } 1991 1992 nocow_args.end = end; 1993 nocow_args.writeback_path = true; 1994 1995 while (1) { 1996 struct btrfs_block_group *nocow_bg = NULL; 1997 struct btrfs_ordered_extent *ordered; 1998 struct btrfs_key found_key; 1999 struct btrfs_file_extent_item *fi; 2000 struct extent_buffer *leaf; 2001 u64 extent_end; 2002 u64 ram_bytes; 2003 u64 nocow_end; 2004 int extent_type; 2005 bool is_prealloc; 2006 2007 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 2008 cur_offset, 0); 2009 if (ret < 0) 2010 goto error; 2011 2012 /* 2013 * If there is no extent for our range when doing the initial 2014 * search, then go back to the previous slot as it will be the 2015 * one containing the search offset 2016 */ 2017 if (ret > 0 && path->slots[0] > 0 && check_prev) { 2018 leaf = path->nodes[0]; 2019 btrfs_item_key_to_cpu(leaf, &found_key, 2020 path->slots[0] - 1); 2021 if (found_key.objectid == ino && 2022 found_key.type == BTRFS_EXTENT_DATA_KEY) 2023 path->slots[0]--; 2024 } 2025 check_prev = false; 2026 next_slot: 2027 /* Go to next leaf if we have exhausted the current one */ 2028 leaf = path->nodes[0]; 2029 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 2030 ret = btrfs_next_leaf(root, path); 2031 if (ret < 0) 2032 goto error; 2033 if (ret > 0) 2034 break; 2035 leaf = path->nodes[0]; 2036 } 2037 2038 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2039 2040 /* Didn't find anything for our INO */ 2041 if (found_key.objectid > ino) 2042 break; 2043 /* 2044 * Keep searching until we find an EXTENT_ITEM or there are no 2045 * more extents for this inode 2046 */ 2047 if (WARN_ON_ONCE(found_key.objectid < ino) || 2048 found_key.type < BTRFS_EXTENT_DATA_KEY) { 2049 path->slots[0]++; 2050 goto next_slot; 2051 } 2052 2053 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 2054 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 2055 found_key.offset > end) 2056 break; 2057 2058 /* 2059 * If the found extent starts after requested offset, then 2060 * adjust extent_end to be right before this extent begins 2061 */ 2062 if (found_key.offset > cur_offset) { 2063 extent_end = found_key.offset; 2064 extent_type = 0; 2065 goto must_cow; 2066 } 2067 2068 /* 2069 * Found extent which begins before our range and potentially 2070 * intersect it 2071 */ 2072 fi = btrfs_item_ptr(leaf, path->slots[0], 2073 struct btrfs_file_extent_item); 2074 extent_type = btrfs_file_extent_type(leaf, fi); 2075 /* If this is triggered then we have a memory corruption. */ 2076 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES); 2077 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) { 2078 ret = -EUCLEAN; 2079 goto error; 2080 } 2081 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 2082 extent_end = btrfs_file_extent_end(path); 2083 2084 /* 2085 * If the extent we got ends before our current offset, skip to 2086 * the next extent. 2087 */ 2088 if (extent_end <= cur_offset) { 2089 path->slots[0]++; 2090 goto next_slot; 2091 } 2092 2093 nocow_args.start = cur_offset; 2094 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args); 2095 if (ret < 0) 2096 goto error; 2097 if (ret == 0) 2098 goto must_cow; 2099 2100 ret = 0; 2101 nocow_bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr); 2102 if (!nocow_bg) { 2103 must_cow: 2104 /* 2105 * If we can't perform NOCOW writeback for the range, 2106 * then record the beginning of the range that needs to 2107 * be COWed. It will be written out before the next 2108 * NOCOW range if we find one, or when exiting this 2109 * loop. 2110 */ 2111 if (cow_start == (u64)-1) 2112 cow_start = cur_offset; 2113 cur_offset = extent_end; 2114 if (cur_offset > end) 2115 break; 2116 if (!path->nodes[0]) 2117 continue; 2118 path->slots[0]++; 2119 goto next_slot; 2120 } 2121 2122 /* 2123 * COW range from cow_start to found_key.offset - 1. As the key 2124 * will contain the beginning of the first extent that can be 2125 * NOCOW, following one which needs to be COW'ed 2126 */ 2127 if (cow_start != (u64)-1) { 2128 ret = fallback_to_cow(inode, locked_page, 2129 cow_start, found_key.offset - 1); 2130 cow_start = (u64)-1; 2131 if (ret) { 2132 btrfs_dec_nocow_writers(nocow_bg); 2133 goto error; 2134 } 2135 } 2136 2137 nocow_end = cur_offset + nocow_args.num_bytes - 1; 2138 is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC; 2139 if (is_prealloc) { 2140 u64 orig_start = found_key.offset - nocow_args.extent_offset; 2141 struct extent_map *em; 2142 2143 em = create_io_em(inode, cur_offset, nocow_args.num_bytes, 2144 orig_start, 2145 nocow_args.disk_bytenr, /* block_start */ 2146 nocow_args.num_bytes, /* block_len */ 2147 nocow_args.disk_num_bytes, /* orig_block_len */ 2148 ram_bytes, BTRFS_COMPRESS_NONE, 2149 BTRFS_ORDERED_PREALLOC); 2150 if (IS_ERR(em)) { 2151 btrfs_dec_nocow_writers(nocow_bg); 2152 ret = PTR_ERR(em); 2153 goto error; 2154 } 2155 free_extent_map(em); 2156 } 2157 2158 ordered = btrfs_alloc_ordered_extent(inode, cur_offset, 2159 nocow_args.num_bytes, nocow_args.num_bytes, 2160 nocow_args.disk_bytenr, nocow_args.num_bytes, 0, 2161 is_prealloc 2162 ? (1 << BTRFS_ORDERED_PREALLOC) 2163 : (1 << BTRFS_ORDERED_NOCOW), 2164 BTRFS_COMPRESS_NONE); 2165 btrfs_dec_nocow_writers(nocow_bg); 2166 if (IS_ERR(ordered)) { 2167 if (is_prealloc) { 2168 btrfs_drop_extent_map_range(inode, cur_offset, 2169 nocow_end, false); 2170 } 2171 ret = PTR_ERR(ordered); 2172 goto error; 2173 } 2174 2175 if (btrfs_is_data_reloc_root(root)) 2176 /* 2177 * Error handled later, as we must prevent 2178 * extent_clear_unlock_delalloc() in error handler 2179 * from freeing metadata of created ordered extent. 2180 */ 2181 ret = btrfs_reloc_clone_csums(ordered); 2182 btrfs_put_ordered_extent(ordered); 2183 2184 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end, 2185 locked_page, EXTENT_LOCKED | 2186 EXTENT_DELALLOC | 2187 EXTENT_CLEAR_DATA_RESV, 2188 PAGE_UNLOCK | PAGE_SET_ORDERED); 2189 2190 cur_offset = extent_end; 2191 2192 /* 2193 * btrfs_reloc_clone_csums() error, now we're OK to call error 2194 * handler, as metadata for created ordered extent will only 2195 * be freed by btrfs_finish_ordered_io(). 2196 */ 2197 if (ret) 2198 goto error; 2199 if (cur_offset > end) 2200 break; 2201 } 2202 btrfs_release_path(path); 2203 2204 if (cur_offset <= end && cow_start == (u64)-1) 2205 cow_start = cur_offset; 2206 2207 if (cow_start != (u64)-1) { 2208 cur_offset = end; 2209 ret = fallback_to_cow(inode, locked_page, cow_start, end); 2210 cow_start = (u64)-1; 2211 if (ret) 2212 goto error; 2213 } 2214 2215 btrfs_free_path(path); 2216 return 0; 2217 2218 error: 2219 /* 2220 * If an error happened while a COW region is outstanding, cur_offset 2221 * needs to be reset to cow_start to ensure the COW region is unlocked 2222 * as well. 2223 */ 2224 if (cow_start != (u64)-1) 2225 cur_offset = cow_start; 2226 if (cur_offset < end) 2227 extent_clear_unlock_delalloc(inode, cur_offset, end, 2228 locked_page, EXTENT_LOCKED | 2229 EXTENT_DELALLOC | EXTENT_DEFRAG | 2230 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 2231 PAGE_START_WRITEBACK | 2232 PAGE_END_WRITEBACK); 2233 btrfs_free_path(path); 2234 return ret; 2235 } 2236 2237 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) 2238 { 2239 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { 2240 if (inode->defrag_bytes && 2241 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 2242 0, NULL)) 2243 return false; 2244 return true; 2245 } 2246 return false; 2247 } 2248 2249 /* 2250 * Function to process delayed allocation (create CoW) for ranges which are 2251 * being touched for the first time. 2252 */ 2253 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, 2254 u64 start, u64 end, struct writeback_control *wbc) 2255 { 2256 const bool zoned = btrfs_is_zoned(inode->root->fs_info); 2257 int ret; 2258 2259 /* 2260 * The range must cover part of the @locked_page, or a return of 1 2261 * can confuse the caller. 2262 */ 2263 ASSERT(!(end <= page_offset(locked_page) || 2264 start >= page_offset(locked_page) + PAGE_SIZE)); 2265 2266 if (should_nocow(inode, start, end)) { 2267 ret = run_delalloc_nocow(inode, locked_page, start, end); 2268 goto out; 2269 } 2270 2271 if (btrfs_inode_can_compress(inode) && 2272 inode_need_compress(inode, start, end) && 2273 run_delalloc_compressed(inode, locked_page, start, end, wbc)) 2274 return 1; 2275 2276 if (zoned) 2277 ret = run_delalloc_cow(inode, locked_page, start, end, wbc, 2278 true); 2279 else 2280 ret = cow_file_range(inode, locked_page, start, end, NULL, 2281 false, false); 2282 2283 out: 2284 if (ret < 0) 2285 btrfs_cleanup_ordered_extents(inode, locked_page, start, 2286 end - start + 1); 2287 return ret; 2288 } 2289 2290 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 2291 struct extent_state *orig, u64 split) 2292 { 2293 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2294 u64 size; 2295 2296 /* not delalloc, ignore it */ 2297 if (!(orig->state & EXTENT_DELALLOC)) 2298 return; 2299 2300 size = orig->end - orig->start + 1; 2301 if (size > fs_info->max_extent_size) { 2302 u32 num_extents; 2303 u64 new_size; 2304 2305 /* 2306 * See the explanation in btrfs_merge_delalloc_extent, the same 2307 * applies here, just in reverse. 2308 */ 2309 new_size = orig->end - split + 1; 2310 num_extents = count_max_extents(fs_info, new_size); 2311 new_size = split - orig->start; 2312 num_extents += count_max_extents(fs_info, new_size); 2313 if (count_max_extents(fs_info, size) >= num_extents) 2314 return; 2315 } 2316 2317 spin_lock(&inode->lock); 2318 btrfs_mod_outstanding_extents(inode, 1); 2319 spin_unlock(&inode->lock); 2320 } 2321 2322 /* 2323 * Handle merged delayed allocation extents so we can keep track of new extents 2324 * that are just merged onto old extents, such as when we are doing sequential 2325 * writes, so we can properly account for the metadata space we'll need. 2326 */ 2327 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 2328 struct extent_state *other) 2329 { 2330 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2331 u64 new_size, old_size; 2332 u32 num_extents; 2333 2334 /* not delalloc, ignore it */ 2335 if (!(other->state & EXTENT_DELALLOC)) 2336 return; 2337 2338 if (new->start > other->start) 2339 new_size = new->end - other->start + 1; 2340 else 2341 new_size = other->end - new->start + 1; 2342 2343 /* we're not bigger than the max, unreserve the space and go */ 2344 if (new_size <= fs_info->max_extent_size) { 2345 spin_lock(&inode->lock); 2346 btrfs_mod_outstanding_extents(inode, -1); 2347 spin_unlock(&inode->lock); 2348 return; 2349 } 2350 2351 /* 2352 * We have to add up either side to figure out how many extents were 2353 * accounted for before we merged into one big extent. If the number of 2354 * extents we accounted for is <= the amount we need for the new range 2355 * then we can return, otherwise drop. Think of it like this 2356 * 2357 * [ 4k][MAX_SIZE] 2358 * 2359 * So we've grown the extent by a MAX_SIZE extent, this would mean we 2360 * need 2 outstanding extents, on one side we have 1 and the other side 2361 * we have 1 so they are == and we can return. But in this case 2362 * 2363 * [MAX_SIZE+4k][MAX_SIZE+4k] 2364 * 2365 * Each range on their own accounts for 2 extents, but merged together 2366 * they are only 3 extents worth of accounting, so we need to drop in 2367 * this case. 2368 */ 2369 old_size = other->end - other->start + 1; 2370 num_extents = count_max_extents(fs_info, old_size); 2371 old_size = new->end - new->start + 1; 2372 num_extents += count_max_extents(fs_info, old_size); 2373 if (count_max_extents(fs_info, new_size) >= num_extents) 2374 return; 2375 2376 spin_lock(&inode->lock); 2377 btrfs_mod_outstanding_extents(inode, -1); 2378 spin_unlock(&inode->lock); 2379 } 2380 2381 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 2382 struct btrfs_inode *inode) 2383 { 2384 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2385 2386 spin_lock(&root->delalloc_lock); 2387 if (list_empty(&inode->delalloc_inodes)) { 2388 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); 2389 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, &inode->runtime_flags); 2390 root->nr_delalloc_inodes++; 2391 if (root->nr_delalloc_inodes == 1) { 2392 spin_lock(&fs_info->delalloc_root_lock); 2393 BUG_ON(!list_empty(&root->delalloc_root)); 2394 list_add_tail(&root->delalloc_root, 2395 &fs_info->delalloc_roots); 2396 spin_unlock(&fs_info->delalloc_root_lock); 2397 } 2398 } 2399 spin_unlock(&root->delalloc_lock); 2400 } 2401 2402 void __btrfs_del_delalloc_inode(struct btrfs_root *root, 2403 struct btrfs_inode *inode) 2404 { 2405 struct btrfs_fs_info *fs_info = root->fs_info; 2406 2407 if (!list_empty(&inode->delalloc_inodes)) { 2408 list_del_init(&inode->delalloc_inodes); 2409 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2410 &inode->runtime_flags); 2411 root->nr_delalloc_inodes--; 2412 if (!root->nr_delalloc_inodes) { 2413 ASSERT(list_empty(&root->delalloc_inodes)); 2414 spin_lock(&fs_info->delalloc_root_lock); 2415 BUG_ON(list_empty(&root->delalloc_root)); 2416 list_del_init(&root->delalloc_root); 2417 spin_unlock(&fs_info->delalloc_root_lock); 2418 } 2419 } 2420 } 2421 2422 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 2423 struct btrfs_inode *inode) 2424 { 2425 spin_lock(&root->delalloc_lock); 2426 __btrfs_del_delalloc_inode(root, inode); 2427 spin_unlock(&root->delalloc_lock); 2428 } 2429 2430 /* 2431 * Properly track delayed allocation bytes in the inode and to maintain the 2432 * list of inodes that have pending delalloc work to be done. 2433 */ 2434 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 2435 u32 bits) 2436 { 2437 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2438 2439 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC)) 2440 WARN_ON(1); 2441 /* 2442 * set_bit and clear bit hooks normally require _irqsave/restore 2443 * but in this case, we are only testing for the DELALLOC 2444 * bit, which is only set or cleared with irqs on 2445 */ 2446 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2447 struct btrfs_root *root = inode->root; 2448 u64 len = state->end + 1 - state->start; 2449 u32 num_extents = count_max_extents(fs_info, len); 2450 bool do_list = !btrfs_is_free_space_inode(inode); 2451 2452 spin_lock(&inode->lock); 2453 btrfs_mod_outstanding_extents(inode, num_extents); 2454 spin_unlock(&inode->lock); 2455 2456 /* For sanity tests */ 2457 if (btrfs_is_testing(fs_info)) 2458 return; 2459 2460 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 2461 fs_info->delalloc_batch); 2462 spin_lock(&inode->lock); 2463 inode->delalloc_bytes += len; 2464 if (bits & EXTENT_DEFRAG) 2465 inode->defrag_bytes += len; 2466 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2467 &inode->runtime_flags)) 2468 btrfs_add_delalloc_inodes(root, inode); 2469 spin_unlock(&inode->lock); 2470 } 2471 2472 if (!(state->state & EXTENT_DELALLOC_NEW) && 2473 (bits & EXTENT_DELALLOC_NEW)) { 2474 spin_lock(&inode->lock); 2475 inode->new_delalloc_bytes += state->end + 1 - state->start; 2476 spin_unlock(&inode->lock); 2477 } 2478 } 2479 2480 /* 2481 * Once a range is no longer delalloc this function ensures that proper 2482 * accounting happens. 2483 */ 2484 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 2485 struct extent_state *state, u32 bits) 2486 { 2487 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2488 u64 len = state->end + 1 - state->start; 2489 u32 num_extents = count_max_extents(fs_info, len); 2490 2491 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { 2492 spin_lock(&inode->lock); 2493 inode->defrag_bytes -= len; 2494 spin_unlock(&inode->lock); 2495 } 2496 2497 /* 2498 * set_bit and clear bit hooks normally require _irqsave/restore 2499 * but in this case, we are only testing for the DELALLOC 2500 * bit, which is only set or cleared with irqs on 2501 */ 2502 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 2503 struct btrfs_root *root = inode->root; 2504 bool do_list = !btrfs_is_free_space_inode(inode); 2505 2506 spin_lock(&inode->lock); 2507 btrfs_mod_outstanding_extents(inode, -num_extents); 2508 spin_unlock(&inode->lock); 2509 2510 /* 2511 * We don't reserve metadata space for space cache inodes so we 2512 * don't need to call delalloc_release_metadata if there is an 2513 * error. 2514 */ 2515 if (bits & EXTENT_CLEAR_META_RESV && 2516 root != fs_info->tree_root) 2517 btrfs_delalloc_release_metadata(inode, len, false); 2518 2519 /* For sanity tests. */ 2520 if (btrfs_is_testing(fs_info)) 2521 return; 2522 2523 if (!btrfs_is_data_reloc_root(root) && 2524 do_list && !(state->state & EXTENT_NORESERVE) && 2525 (bits & EXTENT_CLEAR_DATA_RESV)) 2526 btrfs_free_reserved_data_space_noquota(fs_info, len); 2527 2528 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 2529 fs_info->delalloc_batch); 2530 spin_lock(&inode->lock); 2531 inode->delalloc_bytes -= len; 2532 if (do_list && inode->delalloc_bytes == 0 && 2533 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 2534 &inode->runtime_flags)) 2535 btrfs_del_delalloc_inode(root, inode); 2536 spin_unlock(&inode->lock); 2537 } 2538 2539 if ((state->state & EXTENT_DELALLOC_NEW) && 2540 (bits & EXTENT_DELALLOC_NEW)) { 2541 spin_lock(&inode->lock); 2542 ASSERT(inode->new_delalloc_bytes >= len); 2543 inode->new_delalloc_bytes -= len; 2544 if (bits & EXTENT_ADD_INODE_BYTES) 2545 inode_add_bytes(&inode->vfs_inode, len); 2546 spin_unlock(&inode->lock); 2547 } 2548 } 2549 2550 static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, 2551 struct btrfs_ordered_extent *ordered) 2552 { 2553 u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 2554 u64 len = bbio->bio.bi_iter.bi_size; 2555 struct btrfs_ordered_extent *new; 2556 int ret; 2557 2558 /* Must always be called for the beginning of an ordered extent. */ 2559 if (WARN_ON_ONCE(start != ordered->disk_bytenr)) 2560 return -EINVAL; 2561 2562 /* No need to split if the ordered extent covers the entire bio. */ 2563 if (ordered->disk_num_bytes == len) { 2564 refcount_inc(&ordered->refs); 2565 bbio->ordered = ordered; 2566 return 0; 2567 } 2568 2569 /* 2570 * Don't split the extent_map for NOCOW extents, as we're writing into 2571 * a pre-existing one. 2572 */ 2573 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 2574 ret = split_extent_map(bbio->inode, bbio->file_offset, 2575 ordered->num_bytes, len, 2576 ordered->disk_bytenr); 2577 if (ret) 2578 return ret; 2579 } 2580 2581 new = btrfs_split_ordered_extent(ordered, len); 2582 if (IS_ERR(new)) 2583 return PTR_ERR(new); 2584 bbio->ordered = new; 2585 return 0; 2586 } 2587 2588 /* 2589 * given a list of ordered sums record them in the inode. This happens 2590 * at IO completion time based on sums calculated at bio submission time. 2591 */ 2592 static int add_pending_csums(struct btrfs_trans_handle *trans, 2593 struct list_head *list) 2594 { 2595 struct btrfs_ordered_sum *sum; 2596 struct btrfs_root *csum_root = NULL; 2597 int ret; 2598 2599 list_for_each_entry(sum, list, list) { 2600 trans->adding_csums = true; 2601 if (!csum_root) 2602 csum_root = btrfs_csum_root(trans->fs_info, 2603 sum->logical); 2604 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2605 trans->adding_csums = false; 2606 if (ret) 2607 return ret; 2608 } 2609 return 0; 2610 } 2611 2612 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, 2613 const u64 start, 2614 const u64 len, 2615 struct extent_state **cached_state) 2616 { 2617 u64 search_start = start; 2618 const u64 end = start + len - 1; 2619 2620 while (search_start < end) { 2621 const u64 search_len = end - search_start + 1; 2622 struct extent_map *em; 2623 u64 em_len; 2624 int ret = 0; 2625 2626 em = btrfs_get_extent(inode, NULL, 0, search_start, search_len); 2627 if (IS_ERR(em)) 2628 return PTR_ERR(em); 2629 2630 if (em->block_start != EXTENT_MAP_HOLE) 2631 goto next; 2632 2633 em_len = em->len; 2634 if (em->start < search_start) 2635 em_len -= search_start - em->start; 2636 if (em_len > search_len) 2637 em_len = search_len; 2638 2639 ret = set_extent_bit(&inode->io_tree, search_start, 2640 search_start + em_len - 1, 2641 EXTENT_DELALLOC_NEW, cached_state); 2642 next: 2643 search_start = extent_map_end(em); 2644 free_extent_map(em); 2645 if (ret) 2646 return ret; 2647 } 2648 return 0; 2649 } 2650 2651 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 2652 unsigned int extra_bits, 2653 struct extent_state **cached_state) 2654 { 2655 WARN_ON(PAGE_ALIGNED(end)); 2656 2657 if (start >= i_size_read(&inode->vfs_inode) && 2658 !(inode->flags & BTRFS_INODE_PREALLOC)) { 2659 /* 2660 * There can't be any extents following eof in this case so just 2661 * set the delalloc new bit for the range directly. 2662 */ 2663 extra_bits |= EXTENT_DELALLOC_NEW; 2664 } else { 2665 int ret; 2666 2667 ret = btrfs_find_new_delalloc_bytes(inode, start, 2668 end + 1 - start, 2669 cached_state); 2670 if (ret) 2671 return ret; 2672 } 2673 2674 return set_extent_bit(&inode->io_tree, start, end, 2675 EXTENT_DELALLOC | extra_bits, cached_state); 2676 } 2677 2678 /* see btrfs_writepage_start_hook for details on why this is required */ 2679 struct btrfs_writepage_fixup { 2680 struct page *page; 2681 struct btrfs_inode *inode; 2682 struct btrfs_work work; 2683 }; 2684 2685 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2686 { 2687 struct btrfs_writepage_fixup *fixup = 2688 container_of(work, struct btrfs_writepage_fixup, work); 2689 struct btrfs_ordered_extent *ordered; 2690 struct extent_state *cached_state = NULL; 2691 struct extent_changeset *data_reserved = NULL; 2692 struct page *page = fixup->page; 2693 struct btrfs_inode *inode = fixup->inode; 2694 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2695 u64 page_start = page_offset(page); 2696 u64 page_end = page_offset(page) + PAGE_SIZE - 1; 2697 int ret = 0; 2698 bool free_delalloc_space = true; 2699 2700 /* 2701 * This is similar to page_mkwrite, we need to reserve the space before 2702 * we take the page lock. 2703 */ 2704 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2705 PAGE_SIZE); 2706 again: 2707 lock_page(page); 2708 2709 /* 2710 * Before we queued this fixup, we took a reference on the page. 2711 * page->mapping may go NULL, but it shouldn't be moved to a different 2712 * address space. 2713 */ 2714 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 2715 /* 2716 * Unfortunately this is a little tricky, either 2717 * 2718 * 1) We got here and our page had already been dealt with and 2719 * we reserved our space, thus ret == 0, so we need to just 2720 * drop our space reservation and bail. This can happen the 2721 * first time we come into the fixup worker, or could happen 2722 * while waiting for the ordered extent. 2723 * 2) Our page was already dealt with, but we happened to get an 2724 * ENOSPC above from the btrfs_delalloc_reserve_space. In 2725 * this case we obviously don't have anything to release, but 2726 * because the page was already dealt with we don't want to 2727 * mark the page with an error, so make sure we're resetting 2728 * ret to 0. This is why we have this check _before_ the ret 2729 * check, because we do not want to have a surprise ENOSPC 2730 * when the page was already properly dealt with. 2731 */ 2732 if (!ret) { 2733 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2734 btrfs_delalloc_release_space(inode, data_reserved, 2735 page_start, PAGE_SIZE, 2736 true); 2737 } 2738 ret = 0; 2739 goto out_page; 2740 } 2741 2742 /* 2743 * We can't mess with the page state unless it is locked, so now that 2744 * it is locked bail if we failed to make our space reservation. 2745 */ 2746 if (ret) 2747 goto out_page; 2748 2749 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2750 2751 /* already ordered? We're done */ 2752 if (PageOrdered(page)) 2753 goto out_reserved; 2754 2755 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); 2756 if (ordered) { 2757 unlock_extent(&inode->io_tree, page_start, page_end, 2758 &cached_state); 2759 unlock_page(page); 2760 btrfs_start_ordered_extent(ordered); 2761 btrfs_put_ordered_extent(ordered); 2762 goto again; 2763 } 2764 2765 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2766 &cached_state); 2767 if (ret) 2768 goto out_reserved; 2769 2770 /* 2771 * Everything went as planned, we're now the owner of a dirty page with 2772 * delayed allocation bits set and space reserved for our COW 2773 * destination. 2774 * 2775 * The page was dirty when we started, nothing should have cleaned it. 2776 */ 2777 BUG_ON(!PageDirty(page)); 2778 free_delalloc_space = false; 2779 out_reserved: 2780 btrfs_delalloc_release_extents(inode, PAGE_SIZE); 2781 if (free_delalloc_space) 2782 btrfs_delalloc_release_space(inode, data_reserved, page_start, 2783 PAGE_SIZE, true); 2784 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); 2785 out_page: 2786 if (ret) { 2787 /* 2788 * We hit ENOSPC or other errors. Update the mapping and page 2789 * to reflect the errors and clean the page. 2790 */ 2791 mapping_set_error(page->mapping, ret); 2792 btrfs_mark_ordered_io_finished(inode, page, page_start, 2793 PAGE_SIZE, !ret); 2794 btrfs_page_clear_uptodate(fs_info, page, page_start, PAGE_SIZE); 2795 clear_page_dirty_for_io(page); 2796 } 2797 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); 2798 unlock_page(page); 2799 put_page(page); 2800 kfree(fixup); 2801 extent_changeset_free(data_reserved); 2802 /* 2803 * As a precaution, do a delayed iput in case it would be the last iput 2804 * that could need flushing space. Recursing back to fixup worker would 2805 * deadlock. 2806 */ 2807 btrfs_add_delayed_iput(inode); 2808 } 2809 2810 /* 2811 * There are a few paths in the higher layers of the kernel that directly 2812 * set the page dirty bit without asking the filesystem if it is a 2813 * good idea. This causes problems because we want to make sure COW 2814 * properly happens and the data=ordered rules are followed. 2815 * 2816 * In our case any range that doesn't have the ORDERED bit set 2817 * hasn't been properly setup for IO. We kick off an async process 2818 * to fix it up. The async helper will wait for ordered extents, set 2819 * the delalloc bit and make it safe to write the page. 2820 */ 2821 int btrfs_writepage_cow_fixup(struct page *page) 2822 { 2823 struct inode *inode = page->mapping->host; 2824 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2825 struct btrfs_writepage_fixup *fixup; 2826 2827 /* This page has ordered extent covering it already */ 2828 if (PageOrdered(page)) 2829 return 0; 2830 2831 /* 2832 * PageChecked is set below when we create a fixup worker for this page, 2833 * don't try to create another one if we're already PageChecked() 2834 * 2835 * The extent_io writepage code will redirty the page if we send back 2836 * EAGAIN. 2837 */ 2838 if (PageChecked(page)) 2839 return -EAGAIN; 2840 2841 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2842 if (!fixup) 2843 return -EAGAIN; 2844 2845 /* 2846 * We are already holding a reference to this inode from 2847 * write_cache_pages. We need to hold it because the space reservation 2848 * takes place outside of the page lock, and we can't trust 2849 * page->mapping outside of the page lock. 2850 */ 2851 ihold(inode); 2852 btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE); 2853 get_page(page); 2854 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); 2855 fixup->page = page; 2856 fixup->inode = BTRFS_I(inode); 2857 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2858 2859 return -EAGAIN; 2860 } 2861 2862 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2863 struct btrfs_inode *inode, u64 file_pos, 2864 struct btrfs_file_extent_item *stack_fi, 2865 const bool update_inode_bytes, 2866 u64 qgroup_reserved) 2867 { 2868 struct btrfs_root *root = inode->root; 2869 const u64 sectorsize = root->fs_info->sectorsize; 2870 struct btrfs_path *path; 2871 struct extent_buffer *leaf; 2872 struct btrfs_key ins; 2873 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); 2874 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi); 2875 u64 offset = btrfs_stack_file_extent_offset(stack_fi); 2876 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi); 2877 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi); 2878 struct btrfs_drop_extents_args drop_args = { 0 }; 2879 int ret; 2880 2881 path = btrfs_alloc_path(); 2882 if (!path) 2883 return -ENOMEM; 2884 2885 /* 2886 * we may be replacing one extent in the tree with another. 2887 * The new extent is pinned in the extent map, and we don't want 2888 * to drop it from the cache until it is completely in the btree. 2889 * 2890 * So, tell btrfs_drop_extents to leave this extent in the cache. 2891 * the caller is expected to unpin it and allow it to be merged 2892 * with the others. 2893 */ 2894 drop_args.path = path; 2895 drop_args.start = file_pos; 2896 drop_args.end = file_pos + num_bytes; 2897 drop_args.replace_extent = true; 2898 drop_args.extent_item_size = sizeof(*stack_fi); 2899 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 2900 if (ret) 2901 goto out; 2902 2903 if (!drop_args.extent_inserted) { 2904 ins.objectid = btrfs_ino(inode); 2905 ins.offset = file_pos; 2906 ins.type = BTRFS_EXTENT_DATA_KEY; 2907 2908 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2909 sizeof(*stack_fi)); 2910 if (ret) 2911 goto out; 2912 } 2913 leaf = path->nodes[0]; 2914 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); 2915 write_extent_buffer(leaf, stack_fi, 2916 btrfs_item_ptr_offset(leaf, path->slots[0]), 2917 sizeof(struct btrfs_file_extent_item)); 2918 2919 btrfs_mark_buffer_dirty(leaf); 2920 btrfs_release_path(path); 2921 2922 /* 2923 * If we dropped an inline extent here, we know the range where it is 2924 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the 2925 * number of bytes only for that range containing the inline extent. 2926 * The remaining of the range will be processed when clearning the 2927 * EXTENT_DELALLOC_BIT bit through the ordered extent completion. 2928 */ 2929 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) { 2930 u64 inline_size = round_down(drop_args.bytes_found, sectorsize); 2931 2932 inline_size = drop_args.bytes_found - inline_size; 2933 btrfs_update_inode_bytes(inode, sectorsize, inline_size); 2934 drop_args.bytes_found -= inline_size; 2935 num_bytes -= sectorsize; 2936 } 2937 2938 if (update_inode_bytes) 2939 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found); 2940 2941 ins.objectid = disk_bytenr; 2942 ins.offset = disk_num_bytes; 2943 ins.type = BTRFS_EXTENT_ITEM_KEY; 2944 2945 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes); 2946 if (ret) 2947 goto out; 2948 2949 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode), 2950 file_pos - offset, 2951 qgroup_reserved, &ins); 2952 out: 2953 btrfs_free_path(path); 2954 2955 return ret; 2956 } 2957 2958 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 2959 u64 start, u64 len) 2960 { 2961 struct btrfs_block_group *cache; 2962 2963 cache = btrfs_lookup_block_group(fs_info, start); 2964 ASSERT(cache); 2965 2966 spin_lock(&cache->lock); 2967 cache->delalloc_bytes -= len; 2968 spin_unlock(&cache->lock); 2969 2970 btrfs_put_block_group(cache); 2971 } 2972 2973 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, 2974 struct btrfs_ordered_extent *oe) 2975 { 2976 struct btrfs_file_extent_item stack_fi; 2977 bool update_inode_bytes; 2978 u64 num_bytes = oe->num_bytes; 2979 u64 ram_bytes = oe->ram_bytes; 2980 2981 memset(&stack_fi, 0, sizeof(stack_fi)); 2982 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG); 2983 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); 2984 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, 2985 oe->disk_num_bytes); 2986 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); 2987 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) { 2988 num_bytes = oe->truncated_len; 2989 ram_bytes = num_bytes; 2990 } 2991 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes); 2992 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes); 2993 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); 2994 /* Encryption and other encoding is reserved and all 0 */ 2995 2996 /* 2997 * For delalloc, when completing an ordered extent we update the inode's 2998 * bytes when clearing the range in the inode's io tree, so pass false 2999 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(), 3000 * except if the ordered extent was truncated. 3001 */ 3002 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || 3003 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || 3004 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); 3005 3006 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), 3007 oe->file_offset, &stack_fi, 3008 update_inode_bytes, oe->qgroup_rsv); 3009 } 3010 3011 /* 3012 * As ordered data IO finishes, this gets called so we can finish 3013 * an ordered extent if the range of bytes in the file it covers are 3014 * fully written. 3015 */ 3016 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) 3017 { 3018 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); 3019 struct btrfs_root *root = inode->root; 3020 struct btrfs_fs_info *fs_info = root->fs_info; 3021 struct btrfs_trans_handle *trans = NULL; 3022 struct extent_io_tree *io_tree = &inode->io_tree; 3023 struct extent_state *cached_state = NULL; 3024 u64 start, end; 3025 int compress_type = 0; 3026 int ret = 0; 3027 u64 logical_len = ordered_extent->num_bytes; 3028 bool freespace_inode; 3029 bool truncated = false; 3030 bool clear_reserved_extent = true; 3031 unsigned int clear_bits = EXTENT_DEFRAG; 3032 3033 start = ordered_extent->file_offset; 3034 end = start + ordered_extent->num_bytes - 1; 3035 3036 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3037 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3038 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && 3039 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) 3040 clear_bits |= EXTENT_DELALLOC_NEW; 3041 3042 freespace_inode = btrfs_is_free_space_inode(inode); 3043 if (!freespace_inode) 3044 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent); 3045 3046 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3047 ret = -EIO; 3048 goto out; 3049 } 3050 3051 if (btrfs_is_zoned(fs_info)) 3052 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, 3053 ordered_extent->disk_num_bytes); 3054 3055 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3056 truncated = true; 3057 logical_len = ordered_extent->truncated_len; 3058 /* Truncated the entire extent, don't bother adding */ 3059 if (!logical_len) 3060 goto out; 3061 } 3062 3063 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3064 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 3065 3066 btrfs_inode_safe_disk_i_size_write(inode, 0); 3067 if (freespace_inode) 3068 trans = btrfs_join_transaction_spacecache(root); 3069 else 3070 trans = btrfs_join_transaction(root); 3071 if (IS_ERR(trans)) { 3072 ret = PTR_ERR(trans); 3073 trans = NULL; 3074 goto out; 3075 } 3076 trans->block_rsv = &inode->block_rsv; 3077 ret = btrfs_update_inode_fallback(trans, root, inode); 3078 if (ret) /* -ENOMEM or corruption */ 3079 btrfs_abort_transaction(trans, ret); 3080 goto out; 3081 } 3082 3083 clear_bits |= EXTENT_LOCKED; 3084 lock_extent(io_tree, start, end, &cached_state); 3085 3086 if (freespace_inode) 3087 trans = btrfs_join_transaction_spacecache(root); 3088 else 3089 trans = btrfs_join_transaction(root); 3090 if (IS_ERR(trans)) { 3091 ret = PTR_ERR(trans); 3092 trans = NULL; 3093 goto out; 3094 } 3095 3096 trans->block_rsv = &inode->block_rsv; 3097 3098 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3099 compress_type = ordered_extent->compress_type; 3100 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3101 BUG_ON(compress_type); 3102 ret = btrfs_mark_extent_written(trans, inode, 3103 ordered_extent->file_offset, 3104 ordered_extent->file_offset + 3105 logical_len); 3106 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, 3107 ordered_extent->disk_num_bytes); 3108 } else { 3109 BUG_ON(root == fs_info->tree_root); 3110 ret = insert_ordered_extent_file_extent(trans, ordered_extent); 3111 if (!ret) { 3112 clear_reserved_extent = false; 3113 btrfs_release_delalloc_bytes(fs_info, 3114 ordered_extent->disk_bytenr, 3115 ordered_extent->disk_num_bytes); 3116 } 3117 } 3118 unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset, 3119 ordered_extent->num_bytes, trans->transid); 3120 if (ret < 0) { 3121 btrfs_abort_transaction(trans, ret); 3122 goto out; 3123 } 3124 3125 ret = add_pending_csums(trans, &ordered_extent->list); 3126 if (ret) { 3127 btrfs_abort_transaction(trans, ret); 3128 goto out; 3129 } 3130 3131 /* 3132 * If this is a new delalloc range, clear its new delalloc flag to 3133 * update the inode's number of bytes. This needs to be done first 3134 * before updating the inode item. 3135 */ 3136 if ((clear_bits & EXTENT_DELALLOC_NEW) && 3137 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) 3138 clear_extent_bit(&inode->io_tree, start, end, 3139 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, 3140 &cached_state); 3141 3142 btrfs_inode_safe_disk_i_size_write(inode, 0); 3143 ret = btrfs_update_inode_fallback(trans, root, inode); 3144 if (ret) { /* -ENOMEM or corruption */ 3145 btrfs_abort_transaction(trans, ret); 3146 goto out; 3147 } 3148 ret = 0; 3149 out: 3150 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 3151 &cached_state); 3152 3153 if (trans) 3154 btrfs_end_transaction(trans); 3155 3156 if (ret || truncated) { 3157 u64 unwritten_start = start; 3158 3159 /* 3160 * If we failed to finish this ordered extent for any reason we 3161 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered 3162 * extent, and mark the inode with the error if it wasn't 3163 * already set. Any error during writeback would have already 3164 * set the mapping error, so we need to set it if we're the ones 3165 * marking this ordered extent as failed. 3166 */ 3167 if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR, 3168 &ordered_extent->flags)) 3169 mapping_set_error(ordered_extent->inode->i_mapping, -EIO); 3170 3171 if (truncated) 3172 unwritten_start += logical_len; 3173 clear_extent_uptodate(io_tree, unwritten_start, end, NULL); 3174 3175 /* Drop extent maps for the part of the extent we didn't write. */ 3176 btrfs_drop_extent_map_range(inode, unwritten_start, end, false); 3177 3178 /* 3179 * If the ordered extent had an IOERR or something else went 3180 * wrong we need to return the space for this ordered extent 3181 * back to the allocator. We only free the extent in the 3182 * truncated case if we didn't write out the extent at all. 3183 * 3184 * If we made it past insert_reserved_file_extent before we 3185 * errored out then we don't need to do this as the accounting 3186 * has already been done. 3187 */ 3188 if ((ret || !logical_len) && 3189 clear_reserved_extent && 3190 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3191 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3192 /* 3193 * Discard the range before returning it back to the 3194 * free space pool 3195 */ 3196 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC)) 3197 btrfs_discard_extent(fs_info, 3198 ordered_extent->disk_bytenr, 3199 ordered_extent->disk_num_bytes, 3200 NULL); 3201 btrfs_free_reserved_extent(fs_info, 3202 ordered_extent->disk_bytenr, 3203 ordered_extent->disk_num_bytes, 1); 3204 /* 3205 * Actually free the qgroup rsv which was released when 3206 * the ordered extent was created. 3207 */ 3208 btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid, 3209 ordered_extent->qgroup_rsv, 3210 BTRFS_QGROUP_RSV_DATA); 3211 } 3212 } 3213 3214 /* 3215 * This needs to be done to make sure anybody waiting knows we are done 3216 * updating everything for this ordered extent. 3217 */ 3218 btrfs_remove_ordered_extent(inode, ordered_extent); 3219 3220 /* once for us */ 3221 btrfs_put_ordered_extent(ordered_extent); 3222 /* once for the tree */ 3223 btrfs_put_ordered_extent(ordered_extent); 3224 3225 return ret; 3226 } 3227 3228 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered) 3229 { 3230 if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) && 3231 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 3232 btrfs_finish_ordered_zoned(ordered); 3233 return btrfs_finish_one_ordered(ordered); 3234 } 3235 3236 /* 3237 * Verify the checksum for a single sector without any extra action that depend 3238 * on the type of I/O. 3239 */ 3240 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, 3241 u32 pgoff, u8 *csum, const u8 * const csum_expected) 3242 { 3243 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3244 char *kaddr; 3245 3246 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); 3247 3248 shash->tfm = fs_info->csum_shash; 3249 3250 kaddr = kmap_local_page(page) + pgoff; 3251 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); 3252 kunmap_local(kaddr); 3253 3254 if (memcmp(csum, csum_expected, fs_info->csum_size)) 3255 return -EIO; 3256 return 0; 3257 } 3258 3259 /* 3260 * Verify the checksum of a single data sector. 3261 * 3262 * @bbio: btrfs_io_bio which contains the csum 3263 * @dev: device the sector is on 3264 * @bio_offset: offset to the beginning of the bio (in bytes) 3265 * @bv: bio_vec to check 3266 * 3267 * Check if the checksum on a data block is valid. When a checksum mismatch is 3268 * detected, report the error and fill the corrupted range with zero. 3269 * 3270 * Return %true if the sector is ok or had no checksum to start with, else %false. 3271 */ 3272 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, 3273 u32 bio_offset, struct bio_vec *bv) 3274 { 3275 struct btrfs_inode *inode = bbio->inode; 3276 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3277 u64 file_offset = bbio->file_offset + bio_offset; 3278 u64 end = file_offset + bv->bv_len - 1; 3279 u8 *csum_expected; 3280 u8 csum[BTRFS_CSUM_SIZE]; 3281 3282 ASSERT(bv->bv_len == fs_info->sectorsize); 3283 3284 if (!bbio->csum) 3285 return true; 3286 3287 if (btrfs_is_data_reloc_root(inode->root) && 3288 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, 3289 1, NULL)) { 3290 /* Skip the range without csum for data reloc inode */ 3291 clear_extent_bits(&inode->io_tree, file_offset, end, 3292 EXTENT_NODATASUM); 3293 return true; 3294 } 3295 3296 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * 3297 fs_info->csum_size; 3298 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, 3299 csum_expected)) 3300 goto zeroit; 3301 return true; 3302 3303 zeroit: 3304 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected, 3305 bbio->mirror_num); 3306 if (dev) 3307 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); 3308 memzero_bvec(bv); 3309 return false; 3310 } 3311 3312 /* 3313 * btrfs_add_delayed_iput - perform a delayed iput on @inode 3314 * 3315 * @inode: The inode we want to perform iput on 3316 * 3317 * This function uses the generic vfs_inode::i_count to track whether we should 3318 * just decrement it (in case it's > 1) or if this is the last iput then link 3319 * the inode to the delayed iput machinery. Delayed iputs are processed at 3320 * transaction commit time/superblock commit/cleaner kthread. 3321 */ 3322 void btrfs_add_delayed_iput(struct btrfs_inode *inode) 3323 { 3324 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3325 unsigned long flags; 3326 3327 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) 3328 return; 3329 3330 atomic_inc(&fs_info->nr_delayed_iputs); 3331 /* 3332 * Need to be irq safe here because we can be called from either an irq 3333 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq 3334 * context. 3335 */ 3336 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags); 3337 ASSERT(list_empty(&inode->delayed_iput)); 3338 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); 3339 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags); 3340 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3341 wake_up_process(fs_info->cleaner_kthread); 3342 } 3343 3344 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3345 struct btrfs_inode *inode) 3346 { 3347 list_del_init(&inode->delayed_iput); 3348 spin_unlock_irq(&fs_info->delayed_iput_lock); 3349 iput(&inode->vfs_inode); 3350 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3351 wake_up(&fs_info->delayed_iputs_wait); 3352 spin_lock_irq(&fs_info->delayed_iput_lock); 3353 } 3354 3355 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3356 struct btrfs_inode *inode) 3357 { 3358 if (!list_empty(&inode->delayed_iput)) { 3359 spin_lock_irq(&fs_info->delayed_iput_lock); 3360 if (!list_empty(&inode->delayed_iput)) 3361 run_delayed_iput_locked(fs_info, inode); 3362 spin_unlock_irq(&fs_info->delayed_iput_lock); 3363 } 3364 } 3365 3366 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3367 { 3368 /* 3369 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which 3370 * calls btrfs_add_delayed_iput() and that needs to lock 3371 * fs_info->delayed_iput_lock. So we need to disable irqs here to 3372 * prevent a deadlock. 3373 */ 3374 spin_lock_irq(&fs_info->delayed_iput_lock); 3375 while (!list_empty(&fs_info->delayed_iputs)) { 3376 struct btrfs_inode *inode; 3377 3378 inode = list_first_entry(&fs_info->delayed_iputs, 3379 struct btrfs_inode, delayed_iput); 3380 run_delayed_iput_locked(fs_info, inode); 3381 if (need_resched()) { 3382 spin_unlock_irq(&fs_info->delayed_iput_lock); 3383 cond_resched(); 3384 spin_lock_irq(&fs_info->delayed_iput_lock); 3385 } 3386 } 3387 spin_unlock_irq(&fs_info->delayed_iput_lock); 3388 } 3389 3390 /* 3391 * Wait for flushing all delayed iputs 3392 * 3393 * @fs_info: the filesystem 3394 * 3395 * This will wait on any delayed iputs that are currently running with KILLABLE 3396 * set. Once they are all done running we will return, unless we are killed in 3397 * which case we return EINTR. This helps in user operations like fallocate etc 3398 * that might get blocked on the iputs. 3399 * 3400 * Return EINTR if we were killed, 0 if nothing's pending 3401 */ 3402 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3403 { 3404 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3405 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3406 if (ret) 3407 return -EINTR; 3408 return 0; 3409 } 3410 3411 /* 3412 * This creates an orphan entry for the given inode in case something goes wrong 3413 * in the middle of an unlink. 3414 */ 3415 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3416 struct btrfs_inode *inode) 3417 { 3418 int ret; 3419 3420 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3421 if (ret && ret != -EEXIST) { 3422 btrfs_abort_transaction(trans, ret); 3423 return ret; 3424 } 3425 3426 return 0; 3427 } 3428 3429 /* 3430 * We have done the delete so we can go ahead and remove the orphan item for 3431 * this particular inode. 3432 */ 3433 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3434 struct btrfs_inode *inode) 3435 { 3436 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3437 } 3438 3439 /* 3440 * this cleans up any orphans that may be left on the list from the last use 3441 * of this root. 3442 */ 3443 int btrfs_orphan_cleanup(struct btrfs_root *root) 3444 { 3445 struct btrfs_fs_info *fs_info = root->fs_info; 3446 struct btrfs_path *path; 3447 struct extent_buffer *leaf; 3448 struct btrfs_key key, found_key; 3449 struct btrfs_trans_handle *trans; 3450 struct inode *inode; 3451 u64 last_objectid = 0; 3452 int ret = 0, nr_unlink = 0; 3453 3454 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) 3455 return 0; 3456 3457 path = btrfs_alloc_path(); 3458 if (!path) { 3459 ret = -ENOMEM; 3460 goto out; 3461 } 3462 path->reada = READA_BACK; 3463 3464 key.objectid = BTRFS_ORPHAN_OBJECTID; 3465 key.type = BTRFS_ORPHAN_ITEM_KEY; 3466 key.offset = (u64)-1; 3467 3468 while (1) { 3469 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3470 if (ret < 0) 3471 goto out; 3472 3473 /* 3474 * if ret == 0 means we found what we were searching for, which 3475 * is weird, but possible, so only screw with path if we didn't 3476 * find the key and see if we have stuff that matches 3477 */ 3478 if (ret > 0) { 3479 ret = 0; 3480 if (path->slots[0] == 0) 3481 break; 3482 path->slots[0]--; 3483 } 3484 3485 /* pull out the item */ 3486 leaf = path->nodes[0]; 3487 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3488 3489 /* make sure the item matches what we want */ 3490 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3491 break; 3492 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3493 break; 3494 3495 /* release the path since we're done with it */ 3496 btrfs_release_path(path); 3497 3498 /* 3499 * this is where we are basically btrfs_lookup, without the 3500 * crossing root thing. we store the inode number in the 3501 * offset of the orphan item. 3502 */ 3503 3504 if (found_key.offset == last_objectid) { 3505 /* 3506 * We found the same inode as before. This means we were 3507 * not able to remove its items via eviction triggered 3508 * by an iput(). A transaction abort may have happened, 3509 * due to -ENOSPC for example, so try to grab the error 3510 * that lead to a transaction abort, if any. 3511 */ 3512 btrfs_err(fs_info, 3513 "Error removing orphan entry, stopping orphan cleanup"); 3514 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL; 3515 goto out; 3516 } 3517 3518 last_objectid = found_key.offset; 3519 3520 found_key.objectid = found_key.offset; 3521 found_key.type = BTRFS_INODE_ITEM_KEY; 3522 found_key.offset = 0; 3523 inode = btrfs_iget(fs_info->sb, last_objectid, root); 3524 if (IS_ERR(inode)) { 3525 ret = PTR_ERR(inode); 3526 inode = NULL; 3527 if (ret != -ENOENT) 3528 goto out; 3529 } 3530 3531 if (!inode && root == fs_info->tree_root) { 3532 struct btrfs_root *dead_root; 3533 int is_dead_root = 0; 3534 3535 /* 3536 * This is an orphan in the tree root. Currently these 3537 * could come from 2 sources: 3538 * a) a root (snapshot/subvolume) deletion in progress 3539 * b) a free space cache inode 3540 * We need to distinguish those two, as the orphan item 3541 * for a root must not get deleted before the deletion 3542 * of the snapshot/subvolume's tree completes. 3543 * 3544 * btrfs_find_orphan_roots() ran before us, which has 3545 * found all deleted roots and loaded them into 3546 * fs_info->fs_roots_radix. So here we can find if an 3547 * orphan item corresponds to a deleted root by looking 3548 * up the root from that radix tree. 3549 */ 3550 3551 spin_lock(&fs_info->fs_roots_radix_lock); 3552 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3553 (unsigned long)found_key.objectid); 3554 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3555 is_dead_root = 1; 3556 spin_unlock(&fs_info->fs_roots_radix_lock); 3557 3558 if (is_dead_root) { 3559 /* prevent this orphan from being found again */ 3560 key.offset = found_key.objectid - 1; 3561 continue; 3562 } 3563 3564 } 3565 3566 /* 3567 * If we have an inode with links, there are a couple of 3568 * possibilities: 3569 * 3570 * 1. We were halfway through creating fsverity metadata for the 3571 * file. In that case, the orphan item represents incomplete 3572 * fsverity metadata which must be cleaned up with 3573 * btrfs_drop_verity_items and deleting the orphan item. 3574 3575 * 2. Old kernels (before v3.12) used to create an 3576 * orphan item for truncate indicating that there were possibly 3577 * extent items past i_size that needed to be deleted. In v3.12, 3578 * truncate was changed to update i_size in sync with the extent 3579 * items, but the (useless) orphan item was still created. Since 3580 * v4.18, we don't create the orphan item for truncate at all. 3581 * 3582 * So, this item could mean that we need to do a truncate, but 3583 * only if this filesystem was last used on a pre-v3.12 kernel 3584 * and was not cleanly unmounted. The odds of that are quite 3585 * slim, and it's a pain to do the truncate now, so just delete 3586 * the orphan item. 3587 * 3588 * It's also possible that this orphan item was supposed to be 3589 * deleted but wasn't. The inode number may have been reused, 3590 * but either way, we can delete the orphan item. 3591 */ 3592 if (!inode || inode->i_nlink) { 3593 if (inode) { 3594 ret = btrfs_drop_verity_items(BTRFS_I(inode)); 3595 iput(inode); 3596 inode = NULL; 3597 if (ret) 3598 goto out; 3599 } 3600 trans = btrfs_start_transaction(root, 1); 3601 if (IS_ERR(trans)) { 3602 ret = PTR_ERR(trans); 3603 goto out; 3604 } 3605 btrfs_debug(fs_info, "auto deleting %Lu", 3606 found_key.objectid); 3607 ret = btrfs_del_orphan_item(trans, root, 3608 found_key.objectid); 3609 btrfs_end_transaction(trans); 3610 if (ret) 3611 goto out; 3612 continue; 3613 } 3614 3615 nr_unlink++; 3616 3617 /* this will do delete_inode and everything for us */ 3618 iput(inode); 3619 } 3620 /* release the path since we're done with it */ 3621 btrfs_release_path(path); 3622 3623 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3624 trans = btrfs_join_transaction(root); 3625 if (!IS_ERR(trans)) 3626 btrfs_end_transaction(trans); 3627 } 3628 3629 if (nr_unlink) 3630 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3631 3632 out: 3633 if (ret) 3634 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3635 btrfs_free_path(path); 3636 return ret; 3637 } 3638 3639 /* 3640 * very simple check to peek ahead in the leaf looking for xattrs. If we 3641 * don't find any xattrs, we know there can't be any acls. 3642 * 3643 * slot is the slot the inode is in, objectid is the objectid of the inode 3644 */ 3645 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3646 int slot, u64 objectid, 3647 int *first_xattr_slot) 3648 { 3649 u32 nritems = btrfs_header_nritems(leaf); 3650 struct btrfs_key found_key; 3651 static u64 xattr_access = 0; 3652 static u64 xattr_default = 0; 3653 int scanned = 0; 3654 3655 if (!xattr_access) { 3656 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3657 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3658 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3659 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3660 } 3661 3662 slot++; 3663 *first_xattr_slot = -1; 3664 while (slot < nritems) { 3665 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3666 3667 /* we found a different objectid, there must not be acls */ 3668 if (found_key.objectid != objectid) 3669 return 0; 3670 3671 /* we found an xattr, assume we've got an acl */ 3672 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3673 if (*first_xattr_slot == -1) 3674 *first_xattr_slot = slot; 3675 if (found_key.offset == xattr_access || 3676 found_key.offset == xattr_default) 3677 return 1; 3678 } 3679 3680 /* 3681 * we found a key greater than an xattr key, there can't 3682 * be any acls later on 3683 */ 3684 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3685 return 0; 3686 3687 slot++; 3688 scanned++; 3689 3690 /* 3691 * it goes inode, inode backrefs, xattrs, extents, 3692 * so if there are a ton of hard links to an inode there can 3693 * be a lot of backrefs. Don't waste time searching too hard, 3694 * this is just an optimization 3695 */ 3696 if (scanned >= 8) 3697 break; 3698 } 3699 /* we hit the end of the leaf before we found an xattr or 3700 * something larger than an xattr. We have to assume the inode 3701 * has acls 3702 */ 3703 if (*first_xattr_slot == -1) 3704 *first_xattr_slot = slot; 3705 return 1; 3706 } 3707 3708 /* 3709 * read an inode from the btree into the in-memory inode 3710 */ 3711 static int btrfs_read_locked_inode(struct inode *inode, 3712 struct btrfs_path *in_path) 3713 { 3714 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3715 struct btrfs_path *path = in_path; 3716 struct extent_buffer *leaf; 3717 struct btrfs_inode_item *inode_item; 3718 struct btrfs_root *root = BTRFS_I(inode)->root; 3719 struct btrfs_key location; 3720 unsigned long ptr; 3721 int maybe_acls; 3722 u32 rdev; 3723 int ret; 3724 bool filled = false; 3725 int first_xattr_slot; 3726 3727 ret = btrfs_fill_inode(inode, &rdev); 3728 if (!ret) 3729 filled = true; 3730 3731 if (!path) { 3732 path = btrfs_alloc_path(); 3733 if (!path) 3734 return -ENOMEM; 3735 } 3736 3737 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3738 3739 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3740 if (ret) { 3741 if (path != in_path) 3742 btrfs_free_path(path); 3743 return ret; 3744 } 3745 3746 leaf = path->nodes[0]; 3747 3748 if (filled) 3749 goto cache_index; 3750 3751 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3752 struct btrfs_inode_item); 3753 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3754 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3755 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3756 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3757 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3758 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0, 3759 round_up(i_size_read(inode), fs_info->sectorsize)); 3760 3761 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3762 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3763 3764 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3765 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3766 3767 inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime), 3768 btrfs_timespec_nsec(leaf, &inode_item->ctime)); 3769 3770 BTRFS_I(inode)->i_otime.tv_sec = 3771 btrfs_timespec_sec(leaf, &inode_item->otime); 3772 BTRFS_I(inode)->i_otime.tv_nsec = 3773 btrfs_timespec_nsec(leaf, &inode_item->otime); 3774 3775 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3776 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3777 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3778 3779 inode_set_iversion_queried(inode, 3780 btrfs_inode_sequence(leaf, inode_item)); 3781 inode->i_generation = BTRFS_I(inode)->generation; 3782 inode->i_rdev = 0; 3783 rdev = btrfs_inode_rdev(leaf, inode_item); 3784 3785 BTRFS_I(inode)->index_cnt = (u64)-1; 3786 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item), 3787 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); 3788 3789 cache_index: 3790 /* 3791 * If we were modified in the current generation and evicted from memory 3792 * and then re-read we need to do a full sync since we don't have any 3793 * idea about which extents were modified before we were evicted from 3794 * cache. 3795 * 3796 * This is required for both inode re-read from disk and delayed inode 3797 * in delayed_nodes_tree. 3798 */ 3799 if (BTRFS_I(inode)->last_trans == fs_info->generation) 3800 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3801 &BTRFS_I(inode)->runtime_flags); 3802 3803 /* 3804 * We don't persist the id of the transaction where an unlink operation 3805 * against the inode was last made. So here we assume the inode might 3806 * have been evicted, and therefore the exact value of last_unlink_trans 3807 * lost, and set it to last_trans to avoid metadata inconsistencies 3808 * between the inode and its parent if the inode is fsync'ed and the log 3809 * replayed. For example, in the scenario: 3810 * 3811 * touch mydir/foo 3812 * ln mydir/foo mydir/bar 3813 * sync 3814 * unlink mydir/bar 3815 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3816 * xfs_io -c fsync mydir/foo 3817 * <power failure> 3818 * mount fs, triggers fsync log replay 3819 * 3820 * We must make sure that when we fsync our inode foo we also log its 3821 * parent inode, otherwise after log replay the parent still has the 3822 * dentry with the "bar" name but our inode foo has a link count of 1 3823 * and doesn't have an inode ref with the name "bar" anymore. 3824 * 3825 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3826 * but it guarantees correctness at the expense of occasional full 3827 * transaction commits on fsync if our inode is a directory, or if our 3828 * inode is not a directory, logging its parent unnecessarily. 3829 */ 3830 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3831 3832 /* 3833 * Same logic as for last_unlink_trans. We don't persist the generation 3834 * of the last transaction where this inode was used for a reflink 3835 * operation, so after eviction and reloading the inode we must be 3836 * pessimistic and assume the last transaction that modified the inode. 3837 */ 3838 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; 3839 3840 path->slots[0]++; 3841 if (inode->i_nlink != 1 || 3842 path->slots[0] >= btrfs_header_nritems(leaf)) 3843 goto cache_acl; 3844 3845 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3846 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 3847 goto cache_acl; 3848 3849 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3850 if (location.type == BTRFS_INODE_REF_KEY) { 3851 struct btrfs_inode_ref *ref; 3852 3853 ref = (struct btrfs_inode_ref *)ptr; 3854 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3855 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3856 struct btrfs_inode_extref *extref; 3857 3858 extref = (struct btrfs_inode_extref *)ptr; 3859 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3860 extref); 3861 } 3862 cache_acl: 3863 /* 3864 * try to precache a NULL acl entry for files that don't have 3865 * any xattrs or acls 3866 */ 3867 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3868 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 3869 if (first_xattr_slot != -1) { 3870 path->slots[0] = first_xattr_slot; 3871 ret = btrfs_load_inode_props(inode, path); 3872 if (ret) 3873 btrfs_err(fs_info, 3874 "error loading props for ino %llu (root %llu): %d", 3875 btrfs_ino(BTRFS_I(inode)), 3876 root->root_key.objectid, ret); 3877 } 3878 if (path != in_path) 3879 btrfs_free_path(path); 3880 3881 if (!maybe_acls) 3882 cache_no_acl(inode); 3883 3884 switch (inode->i_mode & S_IFMT) { 3885 case S_IFREG: 3886 inode->i_mapping->a_ops = &btrfs_aops; 3887 inode->i_fop = &btrfs_file_operations; 3888 inode->i_op = &btrfs_file_inode_operations; 3889 break; 3890 case S_IFDIR: 3891 inode->i_fop = &btrfs_dir_file_operations; 3892 inode->i_op = &btrfs_dir_inode_operations; 3893 break; 3894 case S_IFLNK: 3895 inode->i_op = &btrfs_symlink_inode_operations; 3896 inode_nohighmem(inode); 3897 inode->i_mapping->a_ops = &btrfs_aops; 3898 break; 3899 default: 3900 inode->i_op = &btrfs_special_inode_operations; 3901 init_special_inode(inode, inode->i_mode, rdev); 3902 break; 3903 } 3904 3905 btrfs_sync_inode_flags_to_i_flags(inode); 3906 return 0; 3907 } 3908 3909 /* 3910 * given a leaf and an inode, copy the inode fields into the leaf 3911 */ 3912 static void fill_inode_item(struct btrfs_trans_handle *trans, 3913 struct extent_buffer *leaf, 3914 struct btrfs_inode_item *item, 3915 struct inode *inode) 3916 { 3917 struct btrfs_map_token token; 3918 u64 flags; 3919 3920 btrfs_init_map_token(&token, leaf); 3921 3922 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 3923 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 3924 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 3925 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 3926 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 3927 3928 btrfs_set_token_timespec_sec(&token, &item->atime, 3929 inode->i_atime.tv_sec); 3930 btrfs_set_token_timespec_nsec(&token, &item->atime, 3931 inode->i_atime.tv_nsec); 3932 3933 btrfs_set_token_timespec_sec(&token, &item->mtime, 3934 inode->i_mtime.tv_sec); 3935 btrfs_set_token_timespec_nsec(&token, &item->mtime, 3936 inode->i_mtime.tv_nsec); 3937 3938 btrfs_set_token_timespec_sec(&token, &item->ctime, 3939 inode_get_ctime(inode).tv_sec); 3940 btrfs_set_token_timespec_nsec(&token, &item->ctime, 3941 inode_get_ctime(inode).tv_nsec); 3942 3943 btrfs_set_token_timespec_sec(&token, &item->otime, 3944 BTRFS_I(inode)->i_otime.tv_sec); 3945 btrfs_set_token_timespec_nsec(&token, &item->otime, 3946 BTRFS_I(inode)->i_otime.tv_nsec); 3947 3948 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 3949 btrfs_set_token_inode_generation(&token, item, 3950 BTRFS_I(inode)->generation); 3951 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 3952 btrfs_set_token_inode_transid(&token, item, trans->transid); 3953 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 3954 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 3955 BTRFS_I(inode)->ro_flags); 3956 btrfs_set_token_inode_flags(&token, item, flags); 3957 btrfs_set_token_inode_block_group(&token, item, 0); 3958 } 3959 3960 /* 3961 * copy everything in the in-memory inode into the btree. 3962 */ 3963 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 3964 struct btrfs_root *root, 3965 struct btrfs_inode *inode) 3966 { 3967 struct btrfs_inode_item *inode_item; 3968 struct btrfs_path *path; 3969 struct extent_buffer *leaf; 3970 int ret; 3971 3972 path = btrfs_alloc_path(); 3973 if (!path) 3974 return -ENOMEM; 3975 3976 ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1); 3977 if (ret) { 3978 if (ret > 0) 3979 ret = -ENOENT; 3980 goto failed; 3981 } 3982 3983 leaf = path->nodes[0]; 3984 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3985 struct btrfs_inode_item); 3986 3987 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); 3988 btrfs_mark_buffer_dirty(leaf); 3989 btrfs_set_inode_last_trans(trans, inode); 3990 ret = 0; 3991 failed: 3992 btrfs_free_path(path); 3993 return ret; 3994 } 3995 3996 /* 3997 * copy everything in the in-memory inode into the btree. 3998 */ 3999 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 4000 struct btrfs_root *root, 4001 struct btrfs_inode *inode) 4002 { 4003 struct btrfs_fs_info *fs_info = root->fs_info; 4004 int ret; 4005 4006 /* 4007 * If the inode is a free space inode, we can deadlock during commit 4008 * if we put it into the delayed code. 4009 * 4010 * The data relocation inode should also be directly updated 4011 * without delay 4012 */ 4013 if (!btrfs_is_free_space_inode(inode) 4014 && !btrfs_is_data_reloc_root(root) 4015 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 4016 btrfs_update_root_times(trans, root); 4017 4018 ret = btrfs_delayed_update_inode(trans, root, inode); 4019 if (!ret) 4020 btrfs_set_inode_last_trans(trans, inode); 4021 return ret; 4022 } 4023 4024 return btrfs_update_inode_item(trans, root, inode); 4025 } 4026 4027 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4028 struct btrfs_root *root, struct btrfs_inode *inode) 4029 { 4030 int ret; 4031 4032 ret = btrfs_update_inode(trans, root, inode); 4033 if (ret == -ENOSPC) 4034 return btrfs_update_inode_item(trans, root, inode); 4035 return ret; 4036 } 4037 4038 /* 4039 * unlink helper that gets used here in inode.c and in the tree logging 4040 * recovery code. It remove a link in a directory with a given name, and 4041 * also drops the back refs in the inode to the directory 4042 */ 4043 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4044 struct btrfs_inode *dir, 4045 struct btrfs_inode *inode, 4046 const struct fscrypt_str *name, 4047 struct btrfs_rename_ctx *rename_ctx) 4048 { 4049 struct btrfs_root *root = dir->root; 4050 struct btrfs_fs_info *fs_info = root->fs_info; 4051 struct btrfs_path *path; 4052 int ret = 0; 4053 struct btrfs_dir_item *di; 4054 u64 index; 4055 u64 ino = btrfs_ino(inode); 4056 u64 dir_ino = btrfs_ino(dir); 4057 4058 path = btrfs_alloc_path(); 4059 if (!path) { 4060 ret = -ENOMEM; 4061 goto out; 4062 } 4063 4064 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); 4065 if (IS_ERR_OR_NULL(di)) { 4066 ret = di ? PTR_ERR(di) : -ENOENT; 4067 goto err; 4068 } 4069 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4070 if (ret) 4071 goto err; 4072 btrfs_release_path(path); 4073 4074 /* 4075 * If we don't have dir index, we have to get it by looking up 4076 * the inode ref, since we get the inode ref, remove it directly, 4077 * it is unnecessary to do delayed deletion. 4078 * 4079 * But if we have dir index, needn't search inode ref to get it. 4080 * Since the inode ref is close to the inode item, it is better 4081 * that we delay to delete it, and just do this deletion when 4082 * we update the inode item. 4083 */ 4084 if (inode->dir_index) { 4085 ret = btrfs_delayed_delete_inode_ref(inode); 4086 if (!ret) { 4087 index = inode->dir_index; 4088 goto skip_backref; 4089 } 4090 } 4091 4092 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index); 4093 if (ret) { 4094 btrfs_info(fs_info, 4095 "failed to delete reference to %.*s, inode %llu parent %llu", 4096 name->len, name->name, ino, dir_ino); 4097 btrfs_abort_transaction(trans, ret); 4098 goto err; 4099 } 4100 skip_backref: 4101 if (rename_ctx) 4102 rename_ctx->index = index; 4103 4104 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4105 if (ret) { 4106 btrfs_abort_transaction(trans, ret); 4107 goto err; 4108 } 4109 4110 /* 4111 * If we are in a rename context, we don't need to update anything in the 4112 * log. That will be done later during the rename by btrfs_log_new_name(). 4113 * Besides that, doing it here would only cause extra unnecessary btree 4114 * operations on the log tree, increasing latency for applications. 4115 */ 4116 if (!rename_ctx) { 4117 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino); 4118 btrfs_del_dir_entries_in_log(trans, root, name, dir, index); 4119 } 4120 4121 /* 4122 * If we have a pending delayed iput we could end up with the final iput 4123 * being run in btrfs-cleaner context. If we have enough of these built 4124 * up we can end up burning a lot of time in btrfs-cleaner without any 4125 * way to throttle the unlinks. Since we're currently holding a ref on 4126 * the inode we can run the delayed iput here without any issues as the 4127 * final iput won't be done until after we drop the ref we're currently 4128 * holding. 4129 */ 4130 btrfs_run_delayed_iput(fs_info, inode); 4131 err: 4132 btrfs_free_path(path); 4133 if (ret) 4134 goto out; 4135 4136 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4137 inode_inc_iversion(&inode->vfs_inode); 4138 inode_inc_iversion(&dir->vfs_inode); 4139 inode_set_ctime_current(&inode->vfs_inode); 4140 dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode); 4141 ret = btrfs_update_inode(trans, root, dir); 4142 out: 4143 return ret; 4144 } 4145 4146 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4147 struct btrfs_inode *dir, struct btrfs_inode *inode, 4148 const struct fscrypt_str *name) 4149 { 4150 int ret; 4151 4152 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL); 4153 if (!ret) { 4154 drop_nlink(&inode->vfs_inode); 4155 ret = btrfs_update_inode(trans, inode->root, inode); 4156 } 4157 return ret; 4158 } 4159 4160 /* 4161 * helper to start transaction for unlink and rmdir. 4162 * 4163 * unlink and rmdir are special in btrfs, they do not always free space, so 4164 * if we cannot make our reservations the normal way try and see if there is 4165 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4166 * allow the unlink to occur. 4167 */ 4168 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir) 4169 { 4170 struct btrfs_root *root = dir->root; 4171 4172 return btrfs_start_transaction_fallback_global_rsv(root, 4173 BTRFS_UNLINK_METADATA_UNITS); 4174 } 4175 4176 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4177 { 4178 struct btrfs_trans_handle *trans; 4179 struct inode *inode = d_inode(dentry); 4180 int ret; 4181 struct fscrypt_name fname; 4182 4183 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4184 if (ret) 4185 return ret; 4186 4187 /* This needs to handle no-key deletions later on */ 4188 4189 trans = __unlink_start_trans(BTRFS_I(dir)); 4190 if (IS_ERR(trans)) { 4191 ret = PTR_ERR(trans); 4192 goto fscrypt_free; 4193 } 4194 4195 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4196 false); 4197 4198 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4199 &fname.disk_name); 4200 if (ret) 4201 goto end_trans; 4202 4203 if (inode->i_nlink == 0) { 4204 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4205 if (ret) 4206 goto end_trans; 4207 } 4208 4209 end_trans: 4210 btrfs_end_transaction(trans); 4211 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); 4212 fscrypt_free: 4213 fscrypt_free_filename(&fname); 4214 return ret; 4215 } 4216 4217 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4218 struct btrfs_inode *dir, struct dentry *dentry) 4219 { 4220 struct btrfs_root *root = dir->root; 4221 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); 4222 struct btrfs_path *path; 4223 struct extent_buffer *leaf; 4224 struct btrfs_dir_item *di; 4225 struct btrfs_key key; 4226 u64 index; 4227 int ret; 4228 u64 objectid; 4229 u64 dir_ino = btrfs_ino(dir); 4230 struct fscrypt_name fname; 4231 4232 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 4233 if (ret) 4234 return ret; 4235 4236 /* This needs to handle no-key deletions later on */ 4237 4238 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) { 4239 objectid = inode->root->root_key.objectid; 4240 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4241 objectid = inode->location.objectid; 4242 } else { 4243 WARN_ON(1); 4244 fscrypt_free_filename(&fname); 4245 return -EINVAL; 4246 } 4247 4248 path = btrfs_alloc_path(); 4249 if (!path) { 4250 ret = -ENOMEM; 4251 goto out; 4252 } 4253 4254 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4255 &fname.disk_name, -1); 4256 if (IS_ERR_OR_NULL(di)) { 4257 ret = di ? PTR_ERR(di) : -ENOENT; 4258 goto out; 4259 } 4260 4261 leaf = path->nodes[0]; 4262 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4263 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4264 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4265 if (ret) { 4266 btrfs_abort_transaction(trans, ret); 4267 goto out; 4268 } 4269 btrfs_release_path(path); 4270 4271 /* 4272 * This is a placeholder inode for a subvolume we didn't have a 4273 * reference to at the time of the snapshot creation. In the meantime 4274 * we could have renamed the real subvol link into our snapshot, so 4275 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. 4276 * Instead simply lookup the dir_index_item for this entry so we can 4277 * remove it. Otherwise we know we have a ref to the root and we can 4278 * call btrfs_del_root_ref, and it _shouldn't_ fail. 4279 */ 4280 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) { 4281 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name); 4282 if (IS_ERR_OR_NULL(di)) { 4283 if (!di) 4284 ret = -ENOENT; 4285 else 4286 ret = PTR_ERR(di); 4287 btrfs_abort_transaction(trans, ret); 4288 goto out; 4289 } 4290 4291 leaf = path->nodes[0]; 4292 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4293 index = key.offset; 4294 btrfs_release_path(path); 4295 } else { 4296 ret = btrfs_del_root_ref(trans, objectid, 4297 root->root_key.objectid, dir_ino, 4298 &index, &fname.disk_name); 4299 if (ret) { 4300 btrfs_abort_transaction(trans, ret); 4301 goto out; 4302 } 4303 } 4304 4305 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4306 if (ret) { 4307 btrfs_abort_transaction(trans, ret); 4308 goto out; 4309 } 4310 4311 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); 4312 inode_inc_iversion(&dir->vfs_inode); 4313 dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode); 4314 ret = btrfs_update_inode_fallback(trans, root, dir); 4315 if (ret) 4316 btrfs_abort_transaction(trans, ret); 4317 out: 4318 btrfs_free_path(path); 4319 fscrypt_free_filename(&fname); 4320 return ret; 4321 } 4322 4323 /* 4324 * Helper to check if the subvolume references other subvolumes or if it's 4325 * default. 4326 */ 4327 static noinline int may_destroy_subvol(struct btrfs_root *root) 4328 { 4329 struct btrfs_fs_info *fs_info = root->fs_info; 4330 struct btrfs_path *path; 4331 struct btrfs_dir_item *di; 4332 struct btrfs_key key; 4333 struct fscrypt_str name = FSTR_INIT("default", 7); 4334 u64 dir_id; 4335 int ret; 4336 4337 path = btrfs_alloc_path(); 4338 if (!path) 4339 return -ENOMEM; 4340 4341 /* Make sure this root isn't set as the default subvol */ 4342 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4343 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4344 dir_id, &name, 0); 4345 if (di && !IS_ERR(di)) { 4346 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4347 if (key.objectid == root->root_key.objectid) { 4348 ret = -EPERM; 4349 btrfs_err(fs_info, 4350 "deleting default subvolume %llu is not allowed", 4351 key.objectid); 4352 goto out; 4353 } 4354 btrfs_release_path(path); 4355 } 4356 4357 key.objectid = root->root_key.objectid; 4358 key.type = BTRFS_ROOT_REF_KEY; 4359 key.offset = (u64)-1; 4360 4361 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4362 if (ret < 0) 4363 goto out; 4364 BUG_ON(ret == 0); 4365 4366 ret = 0; 4367 if (path->slots[0] > 0) { 4368 path->slots[0]--; 4369 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4370 if (key.objectid == root->root_key.objectid && 4371 key.type == BTRFS_ROOT_REF_KEY) 4372 ret = -ENOTEMPTY; 4373 } 4374 out: 4375 btrfs_free_path(path); 4376 return ret; 4377 } 4378 4379 /* Delete all dentries for inodes belonging to the root */ 4380 static void btrfs_prune_dentries(struct btrfs_root *root) 4381 { 4382 struct btrfs_fs_info *fs_info = root->fs_info; 4383 struct rb_node *node; 4384 struct rb_node *prev; 4385 struct btrfs_inode *entry; 4386 struct inode *inode; 4387 u64 objectid = 0; 4388 4389 if (!BTRFS_FS_ERROR(fs_info)) 4390 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4391 4392 spin_lock(&root->inode_lock); 4393 again: 4394 node = root->inode_tree.rb_node; 4395 prev = NULL; 4396 while (node) { 4397 prev = node; 4398 entry = rb_entry(node, struct btrfs_inode, rb_node); 4399 4400 if (objectid < btrfs_ino(entry)) 4401 node = node->rb_left; 4402 else if (objectid > btrfs_ino(entry)) 4403 node = node->rb_right; 4404 else 4405 break; 4406 } 4407 if (!node) { 4408 while (prev) { 4409 entry = rb_entry(prev, struct btrfs_inode, rb_node); 4410 if (objectid <= btrfs_ino(entry)) { 4411 node = prev; 4412 break; 4413 } 4414 prev = rb_next(prev); 4415 } 4416 } 4417 while (node) { 4418 entry = rb_entry(node, struct btrfs_inode, rb_node); 4419 objectid = btrfs_ino(entry) + 1; 4420 inode = igrab(&entry->vfs_inode); 4421 if (inode) { 4422 spin_unlock(&root->inode_lock); 4423 if (atomic_read(&inode->i_count) > 1) 4424 d_prune_aliases(inode); 4425 /* 4426 * btrfs_drop_inode will have it removed from the inode 4427 * cache when its usage count hits zero. 4428 */ 4429 iput(inode); 4430 cond_resched(); 4431 spin_lock(&root->inode_lock); 4432 goto again; 4433 } 4434 4435 if (cond_resched_lock(&root->inode_lock)) 4436 goto again; 4437 4438 node = rb_next(node); 4439 } 4440 spin_unlock(&root->inode_lock); 4441 } 4442 4443 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry) 4444 { 4445 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 4446 struct btrfs_root *root = dir->root; 4447 struct inode *inode = d_inode(dentry); 4448 struct btrfs_root *dest = BTRFS_I(inode)->root; 4449 struct btrfs_trans_handle *trans; 4450 struct btrfs_block_rsv block_rsv; 4451 u64 root_flags; 4452 int ret; 4453 4454 /* 4455 * Don't allow to delete a subvolume with send in progress. This is 4456 * inside the inode lock so the error handling that has to drop the bit 4457 * again is not run concurrently. 4458 */ 4459 spin_lock(&dest->root_item_lock); 4460 if (dest->send_in_progress) { 4461 spin_unlock(&dest->root_item_lock); 4462 btrfs_warn(fs_info, 4463 "attempt to delete subvolume %llu during send", 4464 dest->root_key.objectid); 4465 return -EPERM; 4466 } 4467 if (atomic_read(&dest->nr_swapfiles)) { 4468 spin_unlock(&dest->root_item_lock); 4469 btrfs_warn(fs_info, 4470 "attempt to delete subvolume %llu with active swapfile", 4471 root->root_key.objectid); 4472 return -EPERM; 4473 } 4474 root_flags = btrfs_root_flags(&dest->root_item); 4475 btrfs_set_root_flags(&dest->root_item, 4476 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4477 spin_unlock(&dest->root_item_lock); 4478 4479 down_write(&fs_info->subvol_sem); 4480 4481 ret = may_destroy_subvol(dest); 4482 if (ret) 4483 goto out_up_write; 4484 4485 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4486 /* 4487 * One for dir inode, 4488 * two for dir entries, 4489 * two for root ref/backref. 4490 */ 4491 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4492 if (ret) 4493 goto out_up_write; 4494 4495 trans = btrfs_start_transaction(root, 0); 4496 if (IS_ERR(trans)) { 4497 ret = PTR_ERR(trans); 4498 goto out_release; 4499 } 4500 trans->block_rsv = &block_rsv; 4501 trans->bytes_reserved = block_rsv.size; 4502 4503 btrfs_record_snapshot_destroy(trans, dir); 4504 4505 ret = btrfs_unlink_subvol(trans, dir, dentry); 4506 if (ret) { 4507 btrfs_abort_transaction(trans, ret); 4508 goto out_end_trans; 4509 } 4510 4511 ret = btrfs_record_root_in_trans(trans, dest); 4512 if (ret) { 4513 btrfs_abort_transaction(trans, ret); 4514 goto out_end_trans; 4515 } 4516 4517 memset(&dest->root_item.drop_progress, 0, 4518 sizeof(dest->root_item.drop_progress)); 4519 btrfs_set_root_drop_level(&dest->root_item, 0); 4520 btrfs_set_root_refs(&dest->root_item, 0); 4521 4522 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4523 ret = btrfs_insert_orphan_item(trans, 4524 fs_info->tree_root, 4525 dest->root_key.objectid); 4526 if (ret) { 4527 btrfs_abort_transaction(trans, ret); 4528 goto out_end_trans; 4529 } 4530 } 4531 4532 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4533 BTRFS_UUID_KEY_SUBVOL, 4534 dest->root_key.objectid); 4535 if (ret && ret != -ENOENT) { 4536 btrfs_abort_transaction(trans, ret); 4537 goto out_end_trans; 4538 } 4539 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4540 ret = btrfs_uuid_tree_remove(trans, 4541 dest->root_item.received_uuid, 4542 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4543 dest->root_key.objectid); 4544 if (ret && ret != -ENOENT) { 4545 btrfs_abort_transaction(trans, ret); 4546 goto out_end_trans; 4547 } 4548 } 4549 4550 free_anon_bdev(dest->anon_dev); 4551 dest->anon_dev = 0; 4552 out_end_trans: 4553 trans->block_rsv = NULL; 4554 trans->bytes_reserved = 0; 4555 ret = btrfs_end_transaction(trans); 4556 inode->i_flags |= S_DEAD; 4557 out_release: 4558 btrfs_subvolume_release_metadata(root, &block_rsv); 4559 out_up_write: 4560 up_write(&fs_info->subvol_sem); 4561 if (ret) { 4562 spin_lock(&dest->root_item_lock); 4563 root_flags = btrfs_root_flags(&dest->root_item); 4564 btrfs_set_root_flags(&dest->root_item, 4565 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4566 spin_unlock(&dest->root_item_lock); 4567 } else { 4568 d_invalidate(dentry); 4569 btrfs_prune_dentries(dest); 4570 ASSERT(dest->send_in_progress == 0); 4571 } 4572 4573 return ret; 4574 } 4575 4576 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4577 { 4578 struct inode *inode = d_inode(dentry); 4579 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 4580 int err = 0; 4581 struct btrfs_trans_handle *trans; 4582 u64 last_unlink_trans; 4583 struct fscrypt_name fname; 4584 4585 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4586 return -ENOTEMPTY; 4587 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) { 4588 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) { 4589 btrfs_err(fs_info, 4590 "extent tree v2 doesn't support snapshot deletion yet"); 4591 return -EOPNOTSUPP; 4592 } 4593 return btrfs_delete_subvolume(BTRFS_I(dir), dentry); 4594 } 4595 4596 err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); 4597 if (err) 4598 return err; 4599 4600 /* This needs to handle no-key deletions later on */ 4601 4602 trans = __unlink_start_trans(BTRFS_I(dir)); 4603 if (IS_ERR(trans)) { 4604 err = PTR_ERR(trans); 4605 goto out_notrans; 4606 } 4607 4608 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4609 err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); 4610 goto out; 4611 } 4612 4613 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4614 if (err) 4615 goto out; 4616 4617 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4618 4619 /* now the directory is empty */ 4620 err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4621 &fname.disk_name); 4622 if (!err) { 4623 btrfs_i_size_write(BTRFS_I(inode), 0); 4624 /* 4625 * Propagate the last_unlink_trans value of the deleted dir to 4626 * its parent directory. This is to prevent an unrecoverable 4627 * log tree in the case we do something like this: 4628 * 1) create dir foo 4629 * 2) create snapshot under dir foo 4630 * 3) delete the snapshot 4631 * 4) rmdir foo 4632 * 5) mkdir foo 4633 * 6) fsync foo or some file inside foo 4634 */ 4635 if (last_unlink_trans >= trans->transid) 4636 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4637 } 4638 out: 4639 btrfs_end_transaction(trans); 4640 out_notrans: 4641 btrfs_btree_balance_dirty(fs_info); 4642 fscrypt_free_filename(&fname); 4643 4644 return err; 4645 } 4646 4647 /* 4648 * btrfs_truncate_block - read, zero a chunk and write a block 4649 * @inode - inode that we're zeroing 4650 * @from - the offset to start zeroing 4651 * @len - the length to zero, 0 to zero the entire range respective to the 4652 * offset 4653 * @front - zero up to the offset instead of from the offset on 4654 * 4655 * This will find the block for the "from" offset and cow the block and zero the 4656 * part we want to zero. This is used with truncate and hole punching. 4657 */ 4658 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 4659 int front) 4660 { 4661 struct btrfs_fs_info *fs_info = inode->root->fs_info; 4662 struct address_space *mapping = inode->vfs_inode.i_mapping; 4663 struct extent_io_tree *io_tree = &inode->io_tree; 4664 struct btrfs_ordered_extent *ordered; 4665 struct extent_state *cached_state = NULL; 4666 struct extent_changeset *data_reserved = NULL; 4667 bool only_release_metadata = false; 4668 u32 blocksize = fs_info->sectorsize; 4669 pgoff_t index = from >> PAGE_SHIFT; 4670 unsigned offset = from & (blocksize - 1); 4671 struct page *page; 4672 gfp_t mask = btrfs_alloc_write_mask(mapping); 4673 size_t write_bytes = blocksize; 4674 int ret = 0; 4675 u64 block_start; 4676 u64 block_end; 4677 4678 if (IS_ALIGNED(offset, blocksize) && 4679 (!len || IS_ALIGNED(len, blocksize))) 4680 goto out; 4681 4682 block_start = round_down(from, blocksize); 4683 block_end = block_start + blocksize - 1; 4684 4685 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, 4686 blocksize, false); 4687 if (ret < 0) { 4688 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { 4689 /* For nocow case, no need to reserve data space */ 4690 only_release_metadata = true; 4691 } else { 4692 goto out; 4693 } 4694 } 4695 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); 4696 if (ret < 0) { 4697 if (!only_release_metadata) 4698 btrfs_free_reserved_data_space(inode, data_reserved, 4699 block_start, blocksize); 4700 goto out; 4701 } 4702 again: 4703 page = find_or_create_page(mapping, index, mask); 4704 if (!page) { 4705 btrfs_delalloc_release_space(inode, data_reserved, block_start, 4706 blocksize, true); 4707 btrfs_delalloc_release_extents(inode, blocksize); 4708 ret = -ENOMEM; 4709 goto out; 4710 } 4711 4712 if (!PageUptodate(page)) { 4713 ret = btrfs_read_folio(NULL, page_folio(page)); 4714 lock_page(page); 4715 if (page->mapping != mapping) { 4716 unlock_page(page); 4717 put_page(page); 4718 goto again; 4719 } 4720 if (!PageUptodate(page)) { 4721 ret = -EIO; 4722 goto out_unlock; 4723 } 4724 } 4725 4726 /* 4727 * We unlock the page after the io is completed and then re-lock it 4728 * above. release_folio() could have come in between that and cleared 4729 * PagePrivate(), but left the page in the mapping. Set the page mapped 4730 * here to make sure it's properly set for the subpage stuff. 4731 */ 4732 ret = set_page_extent_mapped(page); 4733 if (ret < 0) 4734 goto out_unlock; 4735 4736 wait_on_page_writeback(page); 4737 4738 lock_extent(io_tree, block_start, block_end, &cached_state); 4739 4740 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4741 if (ordered) { 4742 unlock_extent(io_tree, block_start, block_end, &cached_state); 4743 unlock_page(page); 4744 put_page(page); 4745 btrfs_start_ordered_extent(ordered); 4746 btrfs_put_ordered_extent(ordered); 4747 goto again; 4748 } 4749 4750 clear_extent_bit(&inode->io_tree, block_start, block_end, 4751 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4752 &cached_state); 4753 4754 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 4755 &cached_state); 4756 if (ret) { 4757 unlock_extent(io_tree, block_start, block_end, &cached_state); 4758 goto out_unlock; 4759 } 4760 4761 if (offset != blocksize) { 4762 if (!len) 4763 len = blocksize - offset; 4764 if (front) 4765 memzero_page(page, (block_start - page_offset(page)), 4766 offset); 4767 else 4768 memzero_page(page, (block_start - page_offset(page)) + offset, 4769 len); 4770 } 4771 btrfs_page_clear_checked(fs_info, page, block_start, 4772 block_end + 1 - block_start); 4773 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); 4774 unlock_extent(io_tree, block_start, block_end, &cached_state); 4775 4776 if (only_release_metadata) 4777 set_extent_bit(&inode->io_tree, block_start, block_end, 4778 EXTENT_NORESERVE, NULL); 4779 4780 out_unlock: 4781 if (ret) { 4782 if (only_release_metadata) 4783 btrfs_delalloc_release_metadata(inode, blocksize, true); 4784 else 4785 btrfs_delalloc_release_space(inode, data_reserved, 4786 block_start, blocksize, true); 4787 } 4788 btrfs_delalloc_release_extents(inode, blocksize); 4789 unlock_page(page); 4790 put_page(page); 4791 out: 4792 if (only_release_metadata) 4793 btrfs_check_nocow_unlock(inode); 4794 extent_changeset_free(data_reserved); 4795 return ret; 4796 } 4797 4798 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode, 4799 u64 offset, u64 len) 4800 { 4801 struct btrfs_fs_info *fs_info = root->fs_info; 4802 struct btrfs_trans_handle *trans; 4803 struct btrfs_drop_extents_args drop_args = { 0 }; 4804 int ret; 4805 4806 /* 4807 * If NO_HOLES is enabled, we don't need to do anything. 4808 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans() 4809 * or btrfs_update_inode() will be called, which guarantee that the next 4810 * fsync will know this inode was changed and needs to be logged. 4811 */ 4812 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 4813 return 0; 4814 4815 /* 4816 * 1 - for the one we're dropping 4817 * 1 - for the one we're adding 4818 * 1 - for updating the inode. 4819 */ 4820 trans = btrfs_start_transaction(root, 3); 4821 if (IS_ERR(trans)) 4822 return PTR_ERR(trans); 4823 4824 drop_args.start = offset; 4825 drop_args.end = offset + len; 4826 drop_args.drop_cache = true; 4827 4828 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 4829 if (ret) { 4830 btrfs_abort_transaction(trans, ret); 4831 btrfs_end_transaction(trans); 4832 return ret; 4833 } 4834 4835 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len); 4836 if (ret) { 4837 btrfs_abort_transaction(trans, ret); 4838 } else { 4839 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found); 4840 btrfs_update_inode(trans, root, inode); 4841 } 4842 btrfs_end_transaction(trans); 4843 return ret; 4844 } 4845 4846 /* 4847 * This function puts in dummy file extents for the area we're creating a hole 4848 * for. So if we are truncating this file to a larger size we need to insert 4849 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4850 * the range between oldsize and size 4851 */ 4852 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) 4853 { 4854 struct btrfs_root *root = inode->root; 4855 struct btrfs_fs_info *fs_info = root->fs_info; 4856 struct extent_io_tree *io_tree = &inode->io_tree; 4857 struct extent_map *em = NULL; 4858 struct extent_state *cached_state = NULL; 4859 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 4860 u64 block_end = ALIGN(size, fs_info->sectorsize); 4861 u64 last_byte; 4862 u64 cur_offset; 4863 u64 hole_size; 4864 int err = 0; 4865 4866 /* 4867 * If our size started in the middle of a block we need to zero out the 4868 * rest of the block before we expand the i_size, otherwise we could 4869 * expose stale data. 4870 */ 4871 err = btrfs_truncate_block(inode, oldsize, 0, 0); 4872 if (err) 4873 return err; 4874 4875 if (size <= hole_start) 4876 return 0; 4877 4878 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, 4879 &cached_state); 4880 cur_offset = hole_start; 4881 while (1) { 4882 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 4883 block_end - cur_offset); 4884 if (IS_ERR(em)) { 4885 err = PTR_ERR(em); 4886 em = NULL; 4887 break; 4888 } 4889 last_byte = min(extent_map_end(em), block_end); 4890 last_byte = ALIGN(last_byte, fs_info->sectorsize); 4891 hole_size = last_byte - cur_offset; 4892 4893 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 4894 struct extent_map *hole_em; 4895 4896 err = maybe_insert_hole(root, inode, cur_offset, 4897 hole_size); 4898 if (err) 4899 break; 4900 4901 err = btrfs_inode_set_file_extent_range(inode, 4902 cur_offset, hole_size); 4903 if (err) 4904 break; 4905 4906 hole_em = alloc_extent_map(); 4907 if (!hole_em) { 4908 btrfs_drop_extent_map_range(inode, cur_offset, 4909 cur_offset + hole_size - 1, 4910 false); 4911 btrfs_set_inode_full_sync(inode); 4912 goto next; 4913 } 4914 hole_em->start = cur_offset; 4915 hole_em->len = hole_size; 4916 hole_em->orig_start = cur_offset; 4917 4918 hole_em->block_start = EXTENT_MAP_HOLE; 4919 hole_em->block_len = 0; 4920 hole_em->orig_block_len = 0; 4921 hole_em->ram_bytes = hole_size; 4922 hole_em->compress_type = BTRFS_COMPRESS_NONE; 4923 hole_em->generation = fs_info->generation; 4924 4925 err = btrfs_replace_extent_map_range(inode, hole_em, true); 4926 free_extent_map(hole_em); 4927 } else { 4928 err = btrfs_inode_set_file_extent_range(inode, 4929 cur_offset, hole_size); 4930 if (err) 4931 break; 4932 } 4933 next: 4934 free_extent_map(em); 4935 em = NULL; 4936 cur_offset = last_byte; 4937 if (cur_offset >= block_end) 4938 break; 4939 } 4940 free_extent_map(em); 4941 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); 4942 return err; 4943 } 4944 4945 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 4946 { 4947 struct btrfs_root *root = BTRFS_I(inode)->root; 4948 struct btrfs_trans_handle *trans; 4949 loff_t oldsize = i_size_read(inode); 4950 loff_t newsize = attr->ia_size; 4951 int mask = attr->ia_valid; 4952 int ret; 4953 4954 /* 4955 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 4956 * special case where we need to update the times despite not having 4957 * these flags set. For all other operations the VFS set these flags 4958 * explicitly if it wants a timestamp update. 4959 */ 4960 if (newsize != oldsize) { 4961 inode_inc_iversion(inode); 4962 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) { 4963 inode->i_mtime = inode_set_ctime_current(inode); 4964 } 4965 } 4966 4967 if (newsize > oldsize) { 4968 /* 4969 * Don't do an expanding truncate while snapshotting is ongoing. 4970 * This is to ensure the snapshot captures a fully consistent 4971 * state of this file - if the snapshot captures this expanding 4972 * truncation, it must capture all writes that happened before 4973 * this truncation. 4974 */ 4975 btrfs_drew_write_lock(&root->snapshot_lock); 4976 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize); 4977 if (ret) { 4978 btrfs_drew_write_unlock(&root->snapshot_lock); 4979 return ret; 4980 } 4981 4982 trans = btrfs_start_transaction(root, 1); 4983 if (IS_ERR(trans)) { 4984 btrfs_drew_write_unlock(&root->snapshot_lock); 4985 return PTR_ERR(trans); 4986 } 4987 4988 i_size_write(inode, newsize); 4989 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 4990 pagecache_isize_extended(inode, oldsize, newsize); 4991 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 4992 btrfs_drew_write_unlock(&root->snapshot_lock); 4993 btrfs_end_transaction(trans); 4994 } else { 4995 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 4996 4997 if (btrfs_is_zoned(fs_info)) { 4998 ret = btrfs_wait_ordered_range(inode, 4999 ALIGN(newsize, fs_info->sectorsize), 5000 (u64)-1); 5001 if (ret) 5002 return ret; 5003 } 5004 5005 /* 5006 * We're truncating a file that used to have good data down to 5007 * zero. Make sure any new writes to the file get on disk 5008 * on close. 5009 */ 5010 if (newsize == 0) 5011 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 5012 &BTRFS_I(inode)->runtime_flags); 5013 5014 truncate_setsize(inode, newsize); 5015 5016 inode_dio_wait(inode); 5017 5018 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize); 5019 if (ret && inode->i_nlink) { 5020 int err; 5021 5022 /* 5023 * Truncate failed, so fix up the in-memory size. We 5024 * adjusted disk_i_size down as we removed extents, so 5025 * wait for disk_i_size to be stable and then update the 5026 * in-memory size to match. 5027 */ 5028 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); 5029 if (err) 5030 return err; 5031 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5032 } 5033 } 5034 5035 return ret; 5036 } 5037 5038 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 5039 struct iattr *attr) 5040 { 5041 struct inode *inode = d_inode(dentry); 5042 struct btrfs_root *root = BTRFS_I(inode)->root; 5043 int err; 5044 5045 if (btrfs_root_readonly(root)) 5046 return -EROFS; 5047 5048 err = setattr_prepare(idmap, dentry, attr); 5049 if (err) 5050 return err; 5051 5052 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5053 err = btrfs_setsize(inode, attr); 5054 if (err) 5055 return err; 5056 } 5057 5058 if (attr->ia_valid) { 5059 setattr_copy(idmap, inode, attr); 5060 inode_inc_iversion(inode); 5061 err = btrfs_dirty_inode(BTRFS_I(inode)); 5062 5063 if (!err && attr->ia_valid & ATTR_MODE) 5064 err = posix_acl_chmod(idmap, dentry, inode->i_mode); 5065 } 5066 5067 return err; 5068 } 5069 5070 /* 5071 * While truncating the inode pages during eviction, we get the VFS 5072 * calling btrfs_invalidate_folio() against each folio of the inode. This 5073 * is slow because the calls to btrfs_invalidate_folio() result in a 5074 * huge amount of calls to lock_extent() and clear_extent_bit(), 5075 * which keep merging and splitting extent_state structures over and over, 5076 * wasting lots of time. 5077 * 5078 * Therefore if the inode is being evicted, let btrfs_invalidate_folio() 5079 * skip all those expensive operations on a per folio basis and do only 5080 * the ordered io finishing, while we release here the extent_map and 5081 * extent_state structures, without the excessive merging and splitting. 5082 */ 5083 static void evict_inode_truncate_pages(struct inode *inode) 5084 { 5085 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5086 struct rb_node *node; 5087 5088 ASSERT(inode->i_state & I_FREEING); 5089 truncate_inode_pages_final(&inode->i_data); 5090 5091 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 5092 5093 /* 5094 * Keep looping until we have no more ranges in the io tree. 5095 * We can have ongoing bios started by readahead that have 5096 * their endio callback (extent_io.c:end_bio_extent_readpage) 5097 * still in progress (unlocked the pages in the bio but did not yet 5098 * unlocked the ranges in the io tree). Therefore this means some 5099 * ranges can still be locked and eviction started because before 5100 * submitting those bios, which are executed by a separate task (work 5101 * queue kthread), inode references (inode->i_count) were not taken 5102 * (which would be dropped in the end io callback of each bio). 5103 * Therefore here we effectively end up waiting for those bios and 5104 * anyone else holding locked ranges without having bumped the inode's 5105 * reference count - if we don't do it, when they access the inode's 5106 * io_tree to unlock a range it may be too late, leading to an 5107 * use-after-free issue. 5108 */ 5109 spin_lock(&io_tree->lock); 5110 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5111 struct extent_state *state; 5112 struct extent_state *cached_state = NULL; 5113 u64 start; 5114 u64 end; 5115 unsigned state_flags; 5116 5117 node = rb_first(&io_tree->state); 5118 state = rb_entry(node, struct extent_state, rb_node); 5119 start = state->start; 5120 end = state->end; 5121 state_flags = state->state; 5122 spin_unlock(&io_tree->lock); 5123 5124 lock_extent(io_tree, start, end, &cached_state); 5125 5126 /* 5127 * If still has DELALLOC flag, the extent didn't reach disk, 5128 * and its reserved space won't be freed by delayed_ref. 5129 * So we need to free its reserved space here. 5130 * (Refer to comment in btrfs_invalidate_folio, case 2) 5131 * 5132 * Note, end is the bytenr of last byte, so we need + 1 here. 5133 */ 5134 if (state_flags & EXTENT_DELALLOC) 5135 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, 5136 end - start + 1); 5137 5138 clear_extent_bit(io_tree, start, end, 5139 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, 5140 &cached_state); 5141 5142 cond_resched(); 5143 spin_lock(&io_tree->lock); 5144 } 5145 spin_unlock(&io_tree->lock); 5146 } 5147 5148 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5149 struct btrfs_block_rsv *rsv) 5150 { 5151 struct btrfs_fs_info *fs_info = root->fs_info; 5152 struct btrfs_trans_handle *trans; 5153 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1); 5154 int ret; 5155 5156 /* 5157 * Eviction should be taking place at some place safe because of our 5158 * delayed iputs. However the normal flushing code will run delayed 5159 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5160 * 5161 * We reserve the delayed_refs_extra here again because we can't use 5162 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5163 * above. We reserve our extra bit here because we generate a ton of 5164 * delayed refs activity by truncating. 5165 * 5166 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can, 5167 * if we fail to make this reservation we can re-try without the 5168 * delayed_refs_extra so we can make some forward progress. 5169 */ 5170 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, 5171 BTRFS_RESERVE_FLUSH_EVICT); 5172 if (ret) { 5173 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, 5174 BTRFS_RESERVE_FLUSH_EVICT); 5175 if (ret) { 5176 btrfs_warn(fs_info, 5177 "could not allocate space for delete; will truncate on mount"); 5178 return ERR_PTR(-ENOSPC); 5179 } 5180 delayed_refs_extra = 0; 5181 } 5182 5183 trans = btrfs_join_transaction(root); 5184 if (IS_ERR(trans)) 5185 return trans; 5186 5187 if (delayed_refs_extra) { 5188 trans->block_rsv = &fs_info->trans_block_rsv; 5189 trans->bytes_reserved = delayed_refs_extra; 5190 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5191 delayed_refs_extra, true); 5192 } 5193 return trans; 5194 } 5195 5196 void btrfs_evict_inode(struct inode *inode) 5197 { 5198 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5199 struct btrfs_trans_handle *trans; 5200 struct btrfs_root *root = BTRFS_I(inode)->root; 5201 struct btrfs_block_rsv *rsv = NULL; 5202 int ret; 5203 5204 trace_btrfs_inode_evict(inode); 5205 5206 if (!root) { 5207 fsverity_cleanup_inode(inode); 5208 clear_inode(inode); 5209 return; 5210 } 5211 5212 evict_inode_truncate_pages(inode); 5213 5214 if (inode->i_nlink && 5215 ((btrfs_root_refs(&root->root_item) != 0 && 5216 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5217 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5218 goto out; 5219 5220 if (is_bad_inode(inode)) 5221 goto out; 5222 5223 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5224 goto out; 5225 5226 if (inode->i_nlink > 0) { 5227 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5228 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5229 goto out; 5230 } 5231 5232 /* 5233 * This makes sure the inode item in tree is uptodate and the space for 5234 * the inode update is released. 5235 */ 5236 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5237 if (ret) 5238 goto out; 5239 5240 /* 5241 * This drops any pending insert or delete operations we have for this 5242 * inode. We could have a delayed dir index deletion queued up, but 5243 * we're removing the inode completely so that'll be taken care of in 5244 * the truncate. 5245 */ 5246 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 5247 5248 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5249 if (!rsv) 5250 goto out; 5251 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5252 rsv->failfast = true; 5253 5254 btrfs_i_size_write(BTRFS_I(inode), 0); 5255 5256 while (1) { 5257 struct btrfs_truncate_control control = { 5258 .inode = BTRFS_I(inode), 5259 .ino = btrfs_ino(BTRFS_I(inode)), 5260 .new_size = 0, 5261 .min_type = 0, 5262 }; 5263 5264 trans = evict_refill_and_join(root, rsv); 5265 if (IS_ERR(trans)) 5266 goto out; 5267 5268 trans->block_rsv = rsv; 5269 5270 ret = btrfs_truncate_inode_items(trans, root, &control); 5271 trans->block_rsv = &fs_info->trans_block_rsv; 5272 btrfs_end_transaction(trans); 5273 /* 5274 * We have not added new delayed items for our inode after we 5275 * have flushed its delayed items, so no need to throttle on 5276 * delayed items. However we have modified extent buffers. 5277 */ 5278 btrfs_btree_balance_dirty_nodelay(fs_info); 5279 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5280 goto out; 5281 else if (!ret) 5282 break; 5283 } 5284 5285 /* 5286 * Errors here aren't a big deal, it just means we leave orphan items in 5287 * the tree. They will be cleaned up on the next mount. If the inode 5288 * number gets reused, cleanup deletes the orphan item without doing 5289 * anything, and unlink reuses the existing orphan item. 5290 * 5291 * If it turns out that we are dropping too many of these, we might want 5292 * to add a mechanism for retrying these after a commit. 5293 */ 5294 trans = evict_refill_and_join(root, rsv); 5295 if (!IS_ERR(trans)) { 5296 trans->block_rsv = rsv; 5297 btrfs_orphan_del(trans, BTRFS_I(inode)); 5298 trans->block_rsv = &fs_info->trans_block_rsv; 5299 btrfs_end_transaction(trans); 5300 } 5301 5302 out: 5303 btrfs_free_block_rsv(fs_info, rsv); 5304 /* 5305 * If we didn't successfully delete, the orphan item will still be in 5306 * the tree and we'll retry on the next mount. Again, we might also want 5307 * to retry these periodically in the future. 5308 */ 5309 btrfs_remove_delayed_node(BTRFS_I(inode)); 5310 fsverity_cleanup_inode(inode); 5311 clear_inode(inode); 5312 } 5313 5314 /* 5315 * Return the key found in the dir entry in the location pointer, fill @type 5316 * with BTRFS_FT_*, and return 0. 5317 * 5318 * If no dir entries were found, returns -ENOENT. 5319 * If found a corrupted location in dir entry, returns -EUCLEAN. 5320 */ 5321 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, 5322 struct btrfs_key *location, u8 *type) 5323 { 5324 struct btrfs_dir_item *di; 5325 struct btrfs_path *path; 5326 struct btrfs_root *root = dir->root; 5327 int ret = 0; 5328 struct fscrypt_name fname; 5329 5330 path = btrfs_alloc_path(); 5331 if (!path) 5332 return -ENOMEM; 5333 5334 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); 5335 if (ret < 0) 5336 goto out; 5337 /* 5338 * fscrypt_setup_filename() should never return a positive value, but 5339 * gcc on sparc/parisc thinks it can, so assert that doesn't happen. 5340 */ 5341 ASSERT(ret == 0); 5342 5343 /* This needs to handle no-key deletions later on */ 5344 5345 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), 5346 &fname.disk_name, 0); 5347 if (IS_ERR_OR_NULL(di)) { 5348 ret = di ? PTR_ERR(di) : -ENOENT; 5349 goto out; 5350 } 5351 5352 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5353 if (location->type != BTRFS_INODE_ITEM_KEY && 5354 location->type != BTRFS_ROOT_ITEM_KEY) { 5355 ret = -EUCLEAN; 5356 btrfs_warn(root->fs_info, 5357 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5358 __func__, fname.disk_name.name, btrfs_ino(dir), 5359 location->objectid, location->type, location->offset); 5360 } 5361 if (!ret) 5362 *type = btrfs_dir_ftype(path->nodes[0], di); 5363 out: 5364 fscrypt_free_filename(&fname); 5365 btrfs_free_path(path); 5366 return ret; 5367 } 5368 5369 /* 5370 * when we hit a tree root in a directory, the btrfs part of the inode 5371 * needs to be changed to reflect the root directory of the tree root. This 5372 * is kind of like crossing a mount point. 5373 */ 5374 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5375 struct btrfs_inode *dir, 5376 struct dentry *dentry, 5377 struct btrfs_key *location, 5378 struct btrfs_root **sub_root) 5379 { 5380 struct btrfs_path *path; 5381 struct btrfs_root *new_root; 5382 struct btrfs_root_ref *ref; 5383 struct extent_buffer *leaf; 5384 struct btrfs_key key; 5385 int ret; 5386 int err = 0; 5387 struct fscrypt_name fname; 5388 5389 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); 5390 if (ret) 5391 return ret; 5392 5393 path = btrfs_alloc_path(); 5394 if (!path) { 5395 err = -ENOMEM; 5396 goto out; 5397 } 5398 5399 err = -ENOENT; 5400 key.objectid = dir->root->root_key.objectid; 5401 key.type = BTRFS_ROOT_REF_KEY; 5402 key.offset = location->objectid; 5403 5404 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5405 if (ret) { 5406 if (ret < 0) 5407 err = ret; 5408 goto out; 5409 } 5410 5411 leaf = path->nodes[0]; 5412 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5413 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5414 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len) 5415 goto out; 5416 5417 ret = memcmp_extent_buffer(leaf, fname.disk_name.name, 5418 (unsigned long)(ref + 1), fname.disk_name.len); 5419 if (ret) 5420 goto out; 5421 5422 btrfs_release_path(path); 5423 5424 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5425 if (IS_ERR(new_root)) { 5426 err = PTR_ERR(new_root); 5427 goto out; 5428 } 5429 5430 *sub_root = new_root; 5431 location->objectid = btrfs_root_dirid(&new_root->root_item); 5432 location->type = BTRFS_INODE_ITEM_KEY; 5433 location->offset = 0; 5434 err = 0; 5435 out: 5436 btrfs_free_path(path); 5437 fscrypt_free_filename(&fname); 5438 return err; 5439 } 5440 5441 static void inode_tree_add(struct btrfs_inode *inode) 5442 { 5443 struct btrfs_root *root = inode->root; 5444 struct btrfs_inode *entry; 5445 struct rb_node **p; 5446 struct rb_node *parent; 5447 struct rb_node *new = &inode->rb_node; 5448 u64 ino = btrfs_ino(inode); 5449 5450 if (inode_unhashed(&inode->vfs_inode)) 5451 return; 5452 parent = NULL; 5453 spin_lock(&root->inode_lock); 5454 p = &root->inode_tree.rb_node; 5455 while (*p) { 5456 parent = *p; 5457 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5458 5459 if (ino < btrfs_ino(entry)) 5460 p = &parent->rb_left; 5461 else if (ino > btrfs_ino(entry)) 5462 p = &parent->rb_right; 5463 else { 5464 WARN_ON(!(entry->vfs_inode.i_state & 5465 (I_WILL_FREE | I_FREEING))); 5466 rb_replace_node(parent, new, &root->inode_tree); 5467 RB_CLEAR_NODE(parent); 5468 spin_unlock(&root->inode_lock); 5469 return; 5470 } 5471 } 5472 rb_link_node(new, parent, p); 5473 rb_insert_color(new, &root->inode_tree); 5474 spin_unlock(&root->inode_lock); 5475 } 5476 5477 static void inode_tree_del(struct btrfs_inode *inode) 5478 { 5479 struct btrfs_root *root = inode->root; 5480 int empty = 0; 5481 5482 spin_lock(&root->inode_lock); 5483 if (!RB_EMPTY_NODE(&inode->rb_node)) { 5484 rb_erase(&inode->rb_node, &root->inode_tree); 5485 RB_CLEAR_NODE(&inode->rb_node); 5486 empty = RB_EMPTY_ROOT(&root->inode_tree); 5487 } 5488 spin_unlock(&root->inode_lock); 5489 5490 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5491 spin_lock(&root->inode_lock); 5492 empty = RB_EMPTY_ROOT(&root->inode_tree); 5493 spin_unlock(&root->inode_lock); 5494 if (empty) 5495 btrfs_add_dead_root(root); 5496 } 5497 } 5498 5499 5500 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5501 { 5502 struct btrfs_iget_args *args = p; 5503 5504 inode->i_ino = args->ino; 5505 BTRFS_I(inode)->location.objectid = args->ino; 5506 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 5507 BTRFS_I(inode)->location.offset = 0; 5508 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5509 BUG_ON(args->root && !BTRFS_I(inode)->root); 5510 5511 if (args->root && args->root == args->root->fs_info->tree_root && 5512 args->ino != BTRFS_BTREE_INODE_OBJECTID) 5513 set_bit(BTRFS_INODE_FREE_SPACE_INODE, 5514 &BTRFS_I(inode)->runtime_flags); 5515 return 0; 5516 } 5517 5518 static int btrfs_find_actor(struct inode *inode, void *opaque) 5519 { 5520 struct btrfs_iget_args *args = opaque; 5521 5522 return args->ino == BTRFS_I(inode)->location.objectid && 5523 args->root == BTRFS_I(inode)->root; 5524 } 5525 5526 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, 5527 struct btrfs_root *root) 5528 { 5529 struct inode *inode; 5530 struct btrfs_iget_args args; 5531 unsigned long hashval = btrfs_inode_hash(ino, root); 5532 5533 args.ino = ino; 5534 args.root = root; 5535 5536 inode = iget5_locked(s, hashval, btrfs_find_actor, 5537 btrfs_init_locked_inode, 5538 (void *)&args); 5539 return inode; 5540 } 5541 5542 /* 5543 * Get an inode object given its inode number and corresponding root. 5544 * Path can be preallocated to prevent recursing back to iget through 5545 * allocator. NULL is also valid but may require an additional allocation 5546 * later. 5547 */ 5548 struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 5549 struct btrfs_root *root, struct btrfs_path *path) 5550 { 5551 struct inode *inode; 5552 5553 inode = btrfs_iget_locked(s, ino, root); 5554 if (!inode) 5555 return ERR_PTR(-ENOMEM); 5556 5557 if (inode->i_state & I_NEW) { 5558 int ret; 5559 5560 ret = btrfs_read_locked_inode(inode, path); 5561 if (!ret) { 5562 inode_tree_add(BTRFS_I(inode)); 5563 unlock_new_inode(inode); 5564 } else { 5565 iget_failed(inode); 5566 /* 5567 * ret > 0 can come from btrfs_search_slot called by 5568 * btrfs_read_locked_inode, this means the inode item 5569 * was not found. 5570 */ 5571 if (ret > 0) 5572 ret = -ENOENT; 5573 inode = ERR_PTR(ret); 5574 } 5575 } 5576 5577 return inode; 5578 } 5579 5580 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) 5581 { 5582 return btrfs_iget_path(s, ino, root, NULL); 5583 } 5584 5585 static struct inode *new_simple_dir(struct inode *dir, 5586 struct btrfs_key *key, 5587 struct btrfs_root *root) 5588 { 5589 struct inode *inode = new_inode(dir->i_sb); 5590 5591 if (!inode) 5592 return ERR_PTR(-ENOMEM); 5593 5594 BTRFS_I(inode)->root = btrfs_grab_root(root); 5595 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5596 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5597 5598 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5599 /* 5600 * We only need lookup, the rest is read-only and there's no inode 5601 * associated with the dentry 5602 */ 5603 inode->i_op = &simple_dir_inode_operations; 5604 inode->i_opflags &= ~IOP_XATTR; 5605 inode->i_fop = &simple_dir_operations; 5606 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5607 inode->i_mtime = inode_set_ctime_current(inode); 5608 inode->i_atime = dir->i_atime; 5609 BTRFS_I(inode)->i_otime = inode->i_mtime; 5610 inode->i_uid = dir->i_uid; 5611 inode->i_gid = dir->i_gid; 5612 5613 return inode; 5614 } 5615 5616 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN); 5617 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE); 5618 static_assert(BTRFS_FT_DIR == FT_DIR); 5619 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV); 5620 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV); 5621 static_assert(BTRFS_FT_FIFO == FT_FIFO); 5622 static_assert(BTRFS_FT_SOCK == FT_SOCK); 5623 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK); 5624 5625 static inline u8 btrfs_inode_type(struct inode *inode) 5626 { 5627 return fs_umode_to_ftype(inode->i_mode); 5628 } 5629 5630 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5631 { 5632 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 5633 struct inode *inode; 5634 struct btrfs_root *root = BTRFS_I(dir)->root; 5635 struct btrfs_root *sub_root = root; 5636 struct btrfs_key location; 5637 u8 di_type = 0; 5638 int ret = 0; 5639 5640 if (dentry->d_name.len > BTRFS_NAME_LEN) 5641 return ERR_PTR(-ENAMETOOLONG); 5642 5643 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type); 5644 if (ret < 0) 5645 return ERR_PTR(ret); 5646 5647 if (location.type == BTRFS_INODE_ITEM_KEY) { 5648 inode = btrfs_iget(dir->i_sb, location.objectid, root); 5649 if (IS_ERR(inode)) 5650 return inode; 5651 5652 /* Do extra check against inode mode with di_type */ 5653 if (btrfs_inode_type(inode) != di_type) { 5654 btrfs_crit(fs_info, 5655 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5656 inode->i_mode, btrfs_inode_type(inode), 5657 di_type); 5658 iput(inode); 5659 return ERR_PTR(-EUCLEAN); 5660 } 5661 return inode; 5662 } 5663 5664 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry, 5665 &location, &sub_root); 5666 if (ret < 0) { 5667 if (ret != -ENOENT) 5668 inode = ERR_PTR(ret); 5669 else 5670 inode = new_simple_dir(dir, &location, root); 5671 } else { 5672 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); 5673 btrfs_put_root(sub_root); 5674 5675 if (IS_ERR(inode)) 5676 return inode; 5677 5678 down_read(&fs_info->cleanup_work_sem); 5679 if (!sb_rdonly(inode->i_sb)) 5680 ret = btrfs_orphan_cleanup(sub_root); 5681 up_read(&fs_info->cleanup_work_sem); 5682 if (ret) { 5683 iput(inode); 5684 inode = ERR_PTR(ret); 5685 } 5686 } 5687 5688 return inode; 5689 } 5690 5691 static int btrfs_dentry_delete(const struct dentry *dentry) 5692 { 5693 struct btrfs_root *root; 5694 struct inode *inode = d_inode(dentry); 5695 5696 if (!inode && !IS_ROOT(dentry)) 5697 inode = d_inode(dentry->d_parent); 5698 5699 if (inode) { 5700 root = BTRFS_I(inode)->root; 5701 if (btrfs_root_refs(&root->root_item) == 0) 5702 return 1; 5703 5704 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5705 return 1; 5706 } 5707 return 0; 5708 } 5709 5710 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5711 unsigned int flags) 5712 { 5713 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5714 5715 if (inode == ERR_PTR(-ENOENT)) 5716 inode = NULL; 5717 return d_splice_alias(inode, dentry); 5718 } 5719 5720 /* 5721 * Find the highest existing sequence number in a directory and then set the 5722 * in-memory index_cnt variable to the first free sequence number. 5723 */ 5724 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 5725 { 5726 struct btrfs_root *root = inode->root; 5727 struct btrfs_key key, found_key; 5728 struct btrfs_path *path; 5729 struct extent_buffer *leaf; 5730 int ret; 5731 5732 key.objectid = btrfs_ino(inode); 5733 key.type = BTRFS_DIR_INDEX_KEY; 5734 key.offset = (u64)-1; 5735 5736 path = btrfs_alloc_path(); 5737 if (!path) 5738 return -ENOMEM; 5739 5740 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5741 if (ret < 0) 5742 goto out; 5743 /* FIXME: we should be able to handle this */ 5744 if (ret == 0) 5745 goto out; 5746 ret = 0; 5747 5748 if (path->slots[0] == 0) { 5749 inode->index_cnt = BTRFS_DIR_START_INDEX; 5750 goto out; 5751 } 5752 5753 path->slots[0]--; 5754 5755 leaf = path->nodes[0]; 5756 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5757 5758 if (found_key.objectid != btrfs_ino(inode) || 5759 found_key.type != BTRFS_DIR_INDEX_KEY) { 5760 inode->index_cnt = BTRFS_DIR_START_INDEX; 5761 goto out; 5762 } 5763 5764 inode->index_cnt = found_key.offset + 1; 5765 out: 5766 btrfs_free_path(path); 5767 return ret; 5768 } 5769 5770 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) 5771 { 5772 if (dir->index_cnt == (u64)-1) { 5773 int ret; 5774 5775 ret = btrfs_inode_delayed_dir_index_count(dir); 5776 if (ret) { 5777 ret = btrfs_set_inode_index_count(dir); 5778 if (ret) 5779 return ret; 5780 } 5781 } 5782 5783 *index = dir->index_cnt; 5784 5785 return 0; 5786 } 5787 5788 /* 5789 * All this infrastructure exists because dir_emit can fault, and we are holding 5790 * the tree lock when doing readdir. For now just allocate a buffer and copy 5791 * our information into that, and then dir_emit from the buffer. This is 5792 * similar to what NFS does, only we don't keep the buffer around in pagecache 5793 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5794 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5795 * tree lock. 5796 */ 5797 static int btrfs_opendir(struct inode *inode, struct file *file) 5798 { 5799 struct btrfs_file_private *private; 5800 u64 last_index; 5801 int ret; 5802 5803 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); 5804 if (ret) 5805 return ret; 5806 5807 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5808 if (!private) 5809 return -ENOMEM; 5810 private->last_index = last_index; 5811 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5812 if (!private->filldir_buf) { 5813 kfree(private); 5814 return -ENOMEM; 5815 } 5816 file->private_data = private; 5817 return 0; 5818 } 5819 5820 struct dir_entry { 5821 u64 ino; 5822 u64 offset; 5823 unsigned type; 5824 int name_len; 5825 }; 5826 5827 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5828 { 5829 while (entries--) { 5830 struct dir_entry *entry = addr; 5831 char *name = (char *)(entry + 1); 5832 5833 ctx->pos = get_unaligned(&entry->offset); 5834 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5835 get_unaligned(&entry->ino), 5836 get_unaligned(&entry->type))) 5837 return 1; 5838 addr += sizeof(struct dir_entry) + 5839 get_unaligned(&entry->name_len); 5840 ctx->pos++; 5841 } 5842 return 0; 5843 } 5844 5845 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5846 { 5847 struct inode *inode = file_inode(file); 5848 struct btrfs_root *root = BTRFS_I(inode)->root; 5849 struct btrfs_file_private *private = file->private_data; 5850 struct btrfs_dir_item *di; 5851 struct btrfs_key key; 5852 struct btrfs_key found_key; 5853 struct btrfs_path *path; 5854 void *addr; 5855 LIST_HEAD(ins_list); 5856 LIST_HEAD(del_list); 5857 int ret; 5858 char *name_ptr; 5859 int name_len; 5860 int entries = 0; 5861 int total_len = 0; 5862 bool put = false; 5863 struct btrfs_key location; 5864 5865 if (!dir_emit_dots(file, ctx)) 5866 return 0; 5867 5868 path = btrfs_alloc_path(); 5869 if (!path) 5870 return -ENOMEM; 5871 5872 addr = private->filldir_buf; 5873 path->reada = READA_FORWARD; 5874 5875 put = btrfs_readdir_get_delayed_items(inode, private->last_index, 5876 &ins_list, &del_list); 5877 5878 again: 5879 key.type = BTRFS_DIR_INDEX_KEY; 5880 key.offset = ctx->pos; 5881 key.objectid = btrfs_ino(BTRFS_I(inode)); 5882 5883 btrfs_for_each_slot(root, &key, &found_key, path, ret) { 5884 struct dir_entry *entry; 5885 struct extent_buffer *leaf = path->nodes[0]; 5886 u8 ftype; 5887 5888 if (found_key.objectid != key.objectid) 5889 break; 5890 if (found_key.type != BTRFS_DIR_INDEX_KEY) 5891 break; 5892 if (found_key.offset < ctx->pos) 5893 continue; 5894 if (found_key.offset > private->last_index) 5895 break; 5896 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 5897 continue; 5898 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 5899 name_len = btrfs_dir_name_len(leaf, di); 5900 if ((total_len + sizeof(struct dir_entry) + name_len) >= 5901 PAGE_SIZE) { 5902 btrfs_release_path(path); 5903 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5904 if (ret) 5905 goto nopos; 5906 addr = private->filldir_buf; 5907 entries = 0; 5908 total_len = 0; 5909 goto again; 5910 } 5911 5912 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di)); 5913 entry = addr; 5914 name_ptr = (char *)(entry + 1); 5915 read_extent_buffer(leaf, name_ptr, 5916 (unsigned long)(di + 1), name_len); 5917 put_unaligned(name_len, &entry->name_len); 5918 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); 5919 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5920 put_unaligned(location.objectid, &entry->ino); 5921 put_unaligned(found_key.offset, &entry->offset); 5922 entries++; 5923 addr += sizeof(struct dir_entry) + name_len; 5924 total_len += sizeof(struct dir_entry) + name_len; 5925 } 5926 /* Catch error encountered during iteration */ 5927 if (ret < 0) 5928 goto err; 5929 5930 btrfs_release_path(path); 5931 5932 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 5933 if (ret) 5934 goto nopos; 5935 5936 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 5937 if (ret) 5938 goto nopos; 5939 5940 /* 5941 * Stop new entries from being returned after we return the last 5942 * entry. 5943 * 5944 * New directory entries are assigned a strictly increasing 5945 * offset. This means that new entries created during readdir 5946 * are *guaranteed* to be seen in the future by that readdir. 5947 * This has broken buggy programs which operate on names as 5948 * they're returned by readdir. Until we re-use freed offsets 5949 * we have this hack to stop new entries from being returned 5950 * under the assumption that they'll never reach this huge 5951 * offset. 5952 * 5953 * This is being careful not to overflow 32bit loff_t unless the 5954 * last entry requires it because doing so has broken 32bit apps 5955 * in the past. 5956 */ 5957 if (ctx->pos >= INT_MAX) 5958 ctx->pos = LLONG_MAX; 5959 else 5960 ctx->pos = INT_MAX; 5961 nopos: 5962 ret = 0; 5963 err: 5964 if (put) 5965 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); 5966 btrfs_free_path(path); 5967 return ret; 5968 } 5969 5970 /* 5971 * This is somewhat expensive, updating the tree every time the 5972 * inode changes. But, it is most likely to find the inode in cache. 5973 * FIXME, needs more benchmarking...there are no reasons other than performance 5974 * to keep or drop this code. 5975 */ 5976 static int btrfs_dirty_inode(struct btrfs_inode *inode) 5977 { 5978 struct btrfs_root *root = inode->root; 5979 struct btrfs_fs_info *fs_info = root->fs_info; 5980 struct btrfs_trans_handle *trans; 5981 int ret; 5982 5983 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) 5984 return 0; 5985 5986 trans = btrfs_join_transaction(root); 5987 if (IS_ERR(trans)) 5988 return PTR_ERR(trans); 5989 5990 ret = btrfs_update_inode(trans, root, inode); 5991 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) { 5992 /* whoops, lets try again with the full transaction */ 5993 btrfs_end_transaction(trans); 5994 trans = btrfs_start_transaction(root, 1); 5995 if (IS_ERR(trans)) 5996 return PTR_ERR(trans); 5997 5998 ret = btrfs_update_inode(trans, root, inode); 5999 } 6000 btrfs_end_transaction(trans); 6001 if (inode->delayed_node) 6002 btrfs_balance_delayed_items(fs_info); 6003 6004 return ret; 6005 } 6006 6007 /* 6008 * This is a copy of file_update_time. We need this so we can return error on 6009 * ENOSPC for updating the inode in the case of file write and mmap writes. 6010 */ 6011 static int btrfs_update_time(struct inode *inode, int flags) 6012 { 6013 struct btrfs_root *root = BTRFS_I(inode)->root; 6014 bool dirty = flags & ~S_VERSION; 6015 6016 if (btrfs_root_readonly(root)) 6017 return -EROFS; 6018 6019 dirty = inode_update_timestamps(inode, flags); 6020 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6021 } 6022 6023 /* 6024 * helper to find a free sequence number in a given directory. This current 6025 * code is very simple, later versions will do smarter things in the btree 6026 */ 6027 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6028 { 6029 int ret = 0; 6030 6031 if (dir->index_cnt == (u64)-1) { 6032 ret = btrfs_inode_delayed_dir_index_count(dir); 6033 if (ret) { 6034 ret = btrfs_set_inode_index_count(dir); 6035 if (ret) 6036 return ret; 6037 } 6038 } 6039 6040 *index = dir->index_cnt; 6041 dir->index_cnt++; 6042 6043 return ret; 6044 } 6045 6046 static int btrfs_insert_inode_locked(struct inode *inode) 6047 { 6048 struct btrfs_iget_args args; 6049 6050 args.ino = BTRFS_I(inode)->location.objectid; 6051 args.root = BTRFS_I(inode)->root; 6052 6053 return insert_inode_locked4(inode, 6054 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6055 btrfs_find_actor, &args); 6056 } 6057 6058 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 6059 unsigned int *trans_num_items) 6060 { 6061 struct inode *dir = args->dir; 6062 struct inode *inode = args->inode; 6063 int ret; 6064 6065 if (!args->orphan) { 6066 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, 6067 &args->fname); 6068 if (ret) 6069 return ret; 6070 } 6071 6072 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); 6073 if (ret) { 6074 fscrypt_free_filename(&args->fname); 6075 return ret; 6076 } 6077 6078 /* 1 to add inode item */ 6079 *trans_num_items = 1; 6080 /* 1 to add compression property */ 6081 if (BTRFS_I(dir)->prop_compress) 6082 (*trans_num_items)++; 6083 /* 1 to add default ACL xattr */ 6084 if (args->default_acl) 6085 (*trans_num_items)++; 6086 /* 1 to add access ACL xattr */ 6087 if (args->acl) 6088 (*trans_num_items)++; 6089 #ifdef CONFIG_SECURITY 6090 /* 1 to add LSM xattr */ 6091 if (dir->i_security) 6092 (*trans_num_items)++; 6093 #endif 6094 if (args->orphan) { 6095 /* 1 to add orphan item */ 6096 (*trans_num_items)++; 6097 } else { 6098 /* 6099 * 1 to add dir item 6100 * 1 to add dir index 6101 * 1 to update parent inode item 6102 * 6103 * No need for 1 unit for the inode ref item because it is 6104 * inserted in a batch together with the inode item at 6105 * btrfs_create_new_inode(). 6106 */ 6107 *trans_num_items += 3; 6108 } 6109 return 0; 6110 } 6111 6112 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) 6113 { 6114 posix_acl_release(args->acl); 6115 posix_acl_release(args->default_acl); 6116 fscrypt_free_filename(&args->fname); 6117 } 6118 6119 /* 6120 * Inherit flags from the parent inode. 6121 * 6122 * Currently only the compression flags and the cow flags are inherited. 6123 */ 6124 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir) 6125 { 6126 unsigned int flags; 6127 6128 flags = dir->flags; 6129 6130 if (flags & BTRFS_INODE_NOCOMPRESS) { 6131 inode->flags &= ~BTRFS_INODE_COMPRESS; 6132 inode->flags |= BTRFS_INODE_NOCOMPRESS; 6133 } else if (flags & BTRFS_INODE_COMPRESS) { 6134 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 6135 inode->flags |= BTRFS_INODE_COMPRESS; 6136 } 6137 6138 if (flags & BTRFS_INODE_NODATACOW) { 6139 inode->flags |= BTRFS_INODE_NODATACOW; 6140 if (S_ISREG(inode->vfs_inode.i_mode)) 6141 inode->flags |= BTRFS_INODE_NODATASUM; 6142 } 6143 6144 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); 6145 } 6146 6147 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 6148 struct btrfs_new_inode_args *args) 6149 { 6150 struct inode *dir = args->dir; 6151 struct inode *inode = args->inode; 6152 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; 6153 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6154 struct btrfs_root *root; 6155 struct btrfs_inode_item *inode_item; 6156 struct btrfs_key *location; 6157 struct btrfs_path *path; 6158 u64 objectid; 6159 struct btrfs_inode_ref *ref; 6160 struct btrfs_key key[2]; 6161 u32 sizes[2]; 6162 struct btrfs_item_batch batch; 6163 unsigned long ptr; 6164 int ret; 6165 6166 path = btrfs_alloc_path(); 6167 if (!path) 6168 return -ENOMEM; 6169 6170 if (!args->subvol) 6171 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); 6172 root = BTRFS_I(inode)->root; 6173 6174 ret = btrfs_get_free_objectid(root, &objectid); 6175 if (ret) 6176 goto out; 6177 inode->i_ino = objectid; 6178 6179 if (args->orphan) { 6180 /* 6181 * O_TMPFILE, set link count to 0, so that after this point, we 6182 * fill in an inode item with the correct link count. 6183 */ 6184 set_nlink(inode, 0); 6185 } else { 6186 trace_btrfs_inode_request(dir); 6187 6188 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); 6189 if (ret) 6190 goto out; 6191 } 6192 /* index_cnt is ignored for everything but a dir. */ 6193 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; 6194 BTRFS_I(inode)->generation = trans->transid; 6195 inode->i_generation = BTRFS_I(inode)->generation; 6196 6197 /* 6198 * Subvolumes don't inherit flags from their parent directory. 6199 * Originally this was probably by accident, but we probably can't 6200 * change it now without compatibility issues. 6201 */ 6202 if (!args->subvol) 6203 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir)); 6204 6205 if (S_ISREG(inode->i_mode)) { 6206 if (btrfs_test_opt(fs_info, NODATASUM)) 6207 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6208 if (btrfs_test_opt(fs_info, NODATACOW)) 6209 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6210 BTRFS_INODE_NODATASUM; 6211 } 6212 6213 location = &BTRFS_I(inode)->location; 6214 location->objectid = objectid; 6215 location->offset = 0; 6216 location->type = BTRFS_INODE_ITEM_KEY; 6217 6218 ret = btrfs_insert_inode_locked(inode); 6219 if (ret < 0) { 6220 if (!args->orphan) 6221 BTRFS_I(dir)->index_cnt--; 6222 goto out; 6223 } 6224 6225 /* 6226 * We could have gotten an inode number from somebody who was fsynced 6227 * and then removed in this same transaction, so let's just set full 6228 * sync since it will be a full sync anyway and this will blow away the 6229 * old info in the log. 6230 */ 6231 btrfs_set_inode_full_sync(BTRFS_I(inode)); 6232 6233 key[0].objectid = objectid; 6234 key[0].type = BTRFS_INODE_ITEM_KEY; 6235 key[0].offset = 0; 6236 6237 sizes[0] = sizeof(struct btrfs_inode_item); 6238 6239 if (!args->orphan) { 6240 /* 6241 * Start new inodes with an inode_ref. This is slightly more 6242 * efficient for small numbers of hard links since they will 6243 * be packed into one item. Extended refs will kick in if we 6244 * add more hard links than can fit in the ref item. 6245 */ 6246 key[1].objectid = objectid; 6247 key[1].type = BTRFS_INODE_REF_KEY; 6248 if (args->subvol) { 6249 key[1].offset = objectid; 6250 sizes[1] = 2 + sizeof(*ref); 6251 } else { 6252 key[1].offset = btrfs_ino(BTRFS_I(dir)); 6253 sizes[1] = name->len + sizeof(*ref); 6254 } 6255 } 6256 6257 batch.keys = &key[0]; 6258 batch.data_sizes = &sizes[0]; 6259 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); 6260 batch.nr = args->orphan ? 1 : 2; 6261 ret = btrfs_insert_empty_items(trans, root, path, &batch); 6262 if (ret != 0) { 6263 btrfs_abort_transaction(trans, ret); 6264 goto discard; 6265 } 6266 6267 inode->i_mtime = inode_set_ctime_current(inode); 6268 inode->i_atime = inode->i_mtime; 6269 BTRFS_I(inode)->i_otime = inode->i_mtime; 6270 6271 /* 6272 * We're going to fill the inode item now, so at this point the inode 6273 * must be fully initialized. 6274 */ 6275 6276 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6277 struct btrfs_inode_item); 6278 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6279 sizeof(*inode_item)); 6280 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6281 6282 if (!args->orphan) { 6283 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6284 struct btrfs_inode_ref); 6285 ptr = (unsigned long)(ref + 1); 6286 if (args->subvol) { 6287 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); 6288 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); 6289 write_extent_buffer(path->nodes[0], "..", ptr, 2); 6290 } else { 6291 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 6292 name->len); 6293 btrfs_set_inode_ref_index(path->nodes[0], ref, 6294 BTRFS_I(inode)->dir_index); 6295 write_extent_buffer(path->nodes[0], name->name, ptr, 6296 name->len); 6297 } 6298 } 6299 6300 btrfs_mark_buffer_dirty(path->nodes[0]); 6301 /* 6302 * We don't need the path anymore, plus inheriting properties, adding 6303 * ACLs, security xattrs, orphan item or adding the link, will result in 6304 * allocating yet another path. So just free our path. 6305 */ 6306 btrfs_free_path(path); 6307 path = NULL; 6308 6309 if (args->subvol) { 6310 struct inode *parent; 6311 6312 /* 6313 * Subvolumes inherit properties from their parent subvolume, 6314 * not the directory they were created in. 6315 */ 6316 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, 6317 BTRFS_I(dir)->root); 6318 if (IS_ERR(parent)) { 6319 ret = PTR_ERR(parent); 6320 } else { 6321 ret = btrfs_inode_inherit_props(trans, inode, parent); 6322 iput(parent); 6323 } 6324 } else { 6325 ret = btrfs_inode_inherit_props(trans, inode, dir); 6326 } 6327 if (ret) { 6328 btrfs_err(fs_info, 6329 "error inheriting props for ino %llu (root %llu): %d", 6330 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, 6331 ret); 6332 } 6333 6334 /* 6335 * Subvolumes don't inherit ACLs or get passed to the LSM. This is 6336 * probably a bug. 6337 */ 6338 if (!args->subvol) { 6339 ret = btrfs_init_inode_security(trans, args); 6340 if (ret) { 6341 btrfs_abort_transaction(trans, ret); 6342 goto discard; 6343 } 6344 } 6345 6346 inode_tree_add(BTRFS_I(inode)); 6347 6348 trace_btrfs_inode_new(inode); 6349 btrfs_set_inode_last_trans(trans, BTRFS_I(inode)); 6350 6351 btrfs_update_root_times(trans, root); 6352 6353 if (args->orphan) { 6354 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 6355 } else { 6356 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 6357 0, BTRFS_I(inode)->dir_index); 6358 } 6359 if (ret) { 6360 btrfs_abort_transaction(trans, ret); 6361 goto discard; 6362 } 6363 6364 return 0; 6365 6366 discard: 6367 /* 6368 * discard_new_inode() calls iput(), but the caller owns the reference 6369 * to the inode. 6370 */ 6371 ihold(inode); 6372 discard_new_inode(inode); 6373 out: 6374 btrfs_free_path(path); 6375 return ret; 6376 } 6377 6378 /* 6379 * utility function to add 'inode' into 'parent_inode' with 6380 * a give name and a given sequence number. 6381 * if 'add_backref' is true, also insert a backref from the 6382 * inode to the parent directory. 6383 */ 6384 int btrfs_add_link(struct btrfs_trans_handle *trans, 6385 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6386 const struct fscrypt_str *name, int add_backref, u64 index) 6387 { 6388 int ret = 0; 6389 struct btrfs_key key; 6390 struct btrfs_root *root = parent_inode->root; 6391 u64 ino = btrfs_ino(inode); 6392 u64 parent_ino = btrfs_ino(parent_inode); 6393 6394 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6395 memcpy(&key, &inode->root->root_key, sizeof(key)); 6396 } else { 6397 key.objectid = ino; 6398 key.type = BTRFS_INODE_ITEM_KEY; 6399 key.offset = 0; 6400 } 6401 6402 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6403 ret = btrfs_add_root_ref(trans, key.objectid, 6404 root->root_key.objectid, parent_ino, 6405 index, name); 6406 } else if (add_backref) { 6407 ret = btrfs_insert_inode_ref(trans, root, name, 6408 ino, parent_ino, index); 6409 } 6410 6411 /* Nothing to clean up yet */ 6412 if (ret) 6413 return ret; 6414 6415 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key, 6416 btrfs_inode_type(&inode->vfs_inode), index); 6417 if (ret == -EEXIST || ret == -EOVERFLOW) 6418 goto fail_dir_item; 6419 else if (ret) { 6420 btrfs_abort_transaction(trans, ret); 6421 return ret; 6422 } 6423 6424 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6425 name->len * 2); 6426 inode_inc_iversion(&parent_inode->vfs_inode); 6427 /* 6428 * If we are replaying a log tree, we do not want to update the mtime 6429 * and ctime of the parent directory with the current time, since the 6430 * log replay procedure is responsible for setting them to their correct 6431 * values (the ones it had when the fsync was done). 6432 */ 6433 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) 6434 parent_inode->vfs_inode.i_mtime = 6435 inode_set_ctime_current(&parent_inode->vfs_inode); 6436 6437 ret = btrfs_update_inode(trans, root, parent_inode); 6438 if (ret) 6439 btrfs_abort_transaction(trans, ret); 6440 return ret; 6441 6442 fail_dir_item: 6443 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6444 u64 local_index; 6445 int err; 6446 err = btrfs_del_root_ref(trans, key.objectid, 6447 root->root_key.objectid, parent_ino, 6448 &local_index, name); 6449 if (err) 6450 btrfs_abort_transaction(trans, err); 6451 } else if (add_backref) { 6452 u64 local_index; 6453 int err; 6454 6455 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, 6456 &local_index); 6457 if (err) 6458 btrfs_abort_transaction(trans, err); 6459 } 6460 6461 /* Return the original error code */ 6462 return ret; 6463 } 6464 6465 static int btrfs_create_common(struct inode *dir, struct dentry *dentry, 6466 struct inode *inode) 6467 { 6468 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6469 struct btrfs_root *root = BTRFS_I(dir)->root; 6470 struct btrfs_new_inode_args new_inode_args = { 6471 .dir = dir, 6472 .dentry = dentry, 6473 .inode = inode, 6474 }; 6475 unsigned int trans_num_items; 6476 struct btrfs_trans_handle *trans; 6477 int err; 6478 6479 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 6480 if (err) 6481 goto out_inode; 6482 6483 trans = btrfs_start_transaction(root, trans_num_items); 6484 if (IS_ERR(trans)) { 6485 err = PTR_ERR(trans); 6486 goto out_new_inode_args; 6487 } 6488 6489 err = btrfs_create_new_inode(trans, &new_inode_args); 6490 if (!err) 6491 d_instantiate_new(dentry, inode); 6492 6493 btrfs_end_transaction(trans); 6494 btrfs_btree_balance_dirty(fs_info); 6495 out_new_inode_args: 6496 btrfs_new_inode_args_destroy(&new_inode_args); 6497 out_inode: 6498 if (err) 6499 iput(inode); 6500 return err; 6501 } 6502 6503 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir, 6504 struct dentry *dentry, umode_t mode, dev_t rdev) 6505 { 6506 struct inode *inode; 6507 6508 inode = new_inode(dir->i_sb); 6509 if (!inode) 6510 return -ENOMEM; 6511 inode_init_owner(idmap, inode, dir, mode); 6512 inode->i_op = &btrfs_special_inode_operations; 6513 init_special_inode(inode, inode->i_mode, rdev); 6514 return btrfs_create_common(dir, dentry, inode); 6515 } 6516 6517 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir, 6518 struct dentry *dentry, umode_t mode, bool excl) 6519 { 6520 struct inode *inode; 6521 6522 inode = new_inode(dir->i_sb); 6523 if (!inode) 6524 return -ENOMEM; 6525 inode_init_owner(idmap, inode, dir, mode); 6526 inode->i_fop = &btrfs_file_operations; 6527 inode->i_op = &btrfs_file_inode_operations; 6528 inode->i_mapping->a_ops = &btrfs_aops; 6529 return btrfs_create_common(dir, dentry, inode); 6530 } 6531 6532 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6533 struct dentry *dentry) 6534 { 6535 struct btrfs_trans_handle *trans = NULL; 6536 struct btrfs_root *root = BTRFS_I(dir)->root; 6537 struct inode *inode = d_inode(old_dentry); 6538 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 6539 struct fscrypt_name fname; 6540 u64 index; 6541 int err; 6542 int drop_inode = 0; 6543 6544 /* do not allow sys_link's with other subvols of the same device */ 6545 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) 6546 return -EXDEV; 6547 6548 if (inode->i_nlink >= BTRFS_LINK_MAX) 6549 return -EMLINK; 6550 6551 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); 6552 if (err) 6553 goto fail; 6554 6555 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6556 if (err) 6557 goto fail; 6558 6559 /* 6560 * 2 items for inode and inode ref 6561 * 2 items for dir items 6562 * 1 item for parent inode 6563 * 1 item for orphan item deletion if O_TMPFILE 6564 */ 6565 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6566 if (IS_ERR(trans)) { 6567 err = PTR_ERR(trans); 6568 trans = NULL; 6569 goto fail; 6570 } 6571 6572 /* There are several dir indexes for this inode, clear the cache. */ 6573 BTRFS_I(inode)->dir_index = 0ULL; 6574 inc_nlink(inode); 6575 inode_inc_iversion(inode); 6576 inode_set_ctime_current(inode); 6577 ihold(inode); 6578 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6579 6580 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6581 &fname.disk_name, 1, index); 6582 6583 if (err) { 6584 drop_inode = 1; 6585 } else { 6586 struct dentry *parent = dentry->d_parent; 6587 6588 err = btrfs_update_inode(trans, root, BTRFS_I(inode)); 6589 if (err) 6590 goto fail; 6591 if (inode->i_nlink == 1) { 6592 /* 6593 * If new hard link count is 1, it's a file created 6594 * with open(2) O_TMPFILE flag. 6595 */ 6596 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6597 if (err) 6598 goto fail; 6599 } 6600 d_instantiate(dentry, inode); 6601 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent); 6602 } 6603 6604 fail: 6605 fscrypt_free_filename(&fname); 6606 if (trans) 6607 btrfs_end_transaction(trans); 6608 if (drop_inode) { 6609 inode_dec_link_count(inode); 6610 iput(inode); 6611 } 6612 btrfs_btree_balance_dirty(fs_info); 6613 return err; 6614 } 6615 6616 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 6617 struct dentry *dentry, umode_t mode) 6618 { 6619 struct inode *inode; 6620 6621 inode = new_inode(dir->i_sb); 6622 if (!inode) 6623 return -ENOMEM; 6624 inode_init_owner(idmap, inode, dir, S_IFDIR | mode); 6625 inode->i_op = &btrfs_dir_inode_operations; 6626 inode->i_fop = &btrfs_dir_file_operations; 6627 return btrfs_create_common(dir, dentry, inode); 6628 } 6629 6630 static noinline int uncompress_inline(struct btrfs_path *path, 6631 struct page *page, 6632 struct btrfs_file_extent_item *item) 6633 { 6634 int ret; 6635 struct extent_buffer *leaf = path->nodes[0]; 6636 char *tmp; 6637 size_t max_size; 6638 unsigned long inline_size; 6639 unsigned long ptr; 6640 int compress_type; 6641 6642 compress_type = btrfs_file_extent_compression(leaf, item); 6643 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6644 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); 6645 tmp = kmalloc(inline_size, GFP_NOFS); 6646 if (!tmp) 6647 return -ENOMEM; 6648 ptr = btrfs_file_extent_inline_start(item); 6649 6650 read_extent_buffer(leaf, tmp, ptr, inline_size); 6651 6652 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6653 ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size); 6654 6655 /* 6656 * decompression code contains a memset to fill in any space between the end 6657 * of the uncompressed data and the end of max_size in case the decompressed 6658 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6659 * the end of an inline extent and the beginning of the next block, so we 6660 * cover that region here. 6661 */ 6662 6663 if (max_size < PAGE_SIZE) 6664 memzero_page(page, max_size, PAGE_SIZE - max_size); 6665 kfree(tmp); 6666 return ret; 6667 } 6668 6669 static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path, 6670 struct page *page) 6671 { 6672 struct btrfs_file_extent_item *fi; 6673 void *kaddr; 6674 size_t copy_size; 6675 6676 if (!page || PageUptodate(page)) 6677 return 0; 6678 6679 ASSERT(page_offset(page) == 0); 6680 6681 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 6682 struct btrfs_file_extent_item); 6683 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) 6684 return uncompress_inline(path, page, fi); 6685 6686 copy_size = min_t(u64, PAGE_SIZE, 6687 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); 6688 kaddr = kmap_local_page(page); 6689 read_extent_buffer(path->nodes[0], kaddr, 6690 btrfs_file_extent_inline_start(fi), copy_size); 6691 kunmap_local(kaddr); 6692 if (copy_size < PAGE_SIZE) 6693 memzero_page(page, copy_size, PAGE_SIZE - copy_size); 6694 return 0; 6695 } 6696 6697 /* 6698 * Lookup the first extent overlapping a range in a file. 6699 * 6700 * @inode: file to search in 6701 * @page: page to read extent data into if the extent is inline 6702 * @pg_offset: offset into @page to copy to 6703 * @start: file offset 6704 * @len: length of range starting at @start 6705 * 6706 * Return the first &struct extent_map which overlaps the given range, reading 6707 * it from the B-tree and caching it if necessary. Note that there may be more 6708 * extents which overlap the given range after the returned extent_map. 6709 * 6710 * If @page is not NULL and the extent is inline, this also reads the extent 6711 * data directly into the page and marks the extent up to date in the io_tree. 6712 * 6713 * Return: ERR_PTR on error, non-NULL extent_map on success. 6714 */ 6715 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6716 struct page *page, size_t pg_offset, 6717 u64 start, u64 len) 6718 { 6719 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6720 int ret = 0; 6721 u64 extent_start = 0; 6722 u64 extent_end = 0; 6723 u64 objectid = btrfs_ino(inode); 6724 int extent_type = -1; 6725 struct btrfs_path *path = NULL; 6726 struct btrfs_root *root = inode->root; 6727 struct btrfs_file_extent_item *item; 6728 struct extent_buffer *leaf; 6729 struct btrfs_key found_key; 6730 struct extent_map *em = NULL; 6731 struct extent_map_tree *em_tree = &inode->extent_tree; 6732 6733 read_lock(&em_tree->lock); 6734 em = lookup_extent_mapping(em_tree, start, len); 6735 read_unlock(&em_tree->lock); 6736 6737 if (em) { 6738 if (em->start > start || em->start + em->len <= start) 6739 free_extent_map(em); 6740 else if (em->block_start == EXTENT_MAP_INLINE && page) 6741 free_extent_map(em); 6742 else 6743 goto out; 6744 } 6745 em = alloc_extent_map(); 6746 if (!em) { 6747 ret = -ENOMEM; 6748 goto out; 6749 } 6750 em->start = EXTENT_MAP_HOLE; 6751 em->orig_start = EXTENT_MAP_HOLE; 6752 em->len = (u64)-1; 6753 em->block_len = (u64)-1; 6754 6755 path = btrfs_alloc_path(); 6756 if (!path) { 6757 ret = -ENOMEM; 6758 goto out; 6759 } 6760 6761 /* Chances are we'll be called again, so go ahead and do readahead */ 6762 path->reada = READA_FORWARD; 6763 6764 /* 6765 * The same explanation in load_free_space_cache applies here as well, 6766 * we only read when we're loading the free space cache, and at that 6767 * point the commit_root has everything we need. 6768 */ 6769 if (btrfs_is_free_space_inode(inode)) { 6770 path->search_commit_root = 1; 6771 path->skip_locking = 1; 6772 } 6773 6774 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6775 if (ret < 0) { 6776 goto out; 6777 } else if (ret > 0) { 6778 if (path->slots[0] == 0) 6779 goto not_found; 6780 path->slots[0]--; 6781 ret = 0; 6782 } 6783 6784 leaf = path->nodes[0]; 6785 item = btrfs_item_ptr(leaf, path->slots[0], 6786 struct btrfs_file_extent_item); 6787 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6788 if (found_key.objectid != objectid || 6789 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6790 /* 6791 * If we backup past the first extent we want to move forward 6792 * and see if there is an extent in front of us, otherwise we'll 6793 * say there is a hole for our whole search range which can 6794 * cause problems. 6795 */ 6796 extent_end = start; 6797 goto next; 6798 } 6799 6800 extent_type = btrfs_file_extent_type(leaf, item); 6801 extent_start = found_key.offset; 6802 extent_end = btrfs_file_extent_end(path); 6803 if (extent_type == BTRFS_FILE_EXTENT_REG || 6804 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6805 /* Only regular file could have regular/prealloc extent */ 6806 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6807 ret = -EUCLEAN; 6808 btrfs_crit(fs_info, 6809 "regular/prealloc extent found for non-regular inode %llu", 6810 btrfs_ino(inode)); 6811 goto out; 6812 } 6813 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6814 extent_start); 6815 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6816 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 6817 path->slots[0], 6818 extent_start); 6819 } 6820 next: 6821 if (start >= extent_end) { 6822 path->slots[0]++; 6823 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6824 ret = btrfs_next_leaf(root, path); 6825 if (ret < 0) 6826 goto out; 6827 else if (ret > 0) 6828 goto not_found; 6829 6830 leaf = path->nodes[0]; 6831 } 6832 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6833 if (found_key.objectid != objectid || 6834 found_key.type != BTRFS_EXTENT_DATA_KEY) 6835 goto not_found; 6836 if (start + len <= found_key.offset) 6837 goto not_found; 6838 if (start > found_key.offset) 6839 goto next; 6840 6841 /* New extent overlaps with existing one */ 6842 em->start = start; 6843 em->orig_start = start; 6844 em->len = found_key.offset - start; 6845 em->block_start = EXTENT_MAP_HOLE; 6846 goto insert; 6847 } 6848 6849 btrfs_extent_item_to_extent_map(inode, path, item, em); 6850 6851 if (extent_type == BTRFS_FILE_EXTENT_REG || 6852 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6853 goto insert; 6854 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 6855 /* 6856 * Inline extent can only exist at file offset 0. This is 6857 * ensured by tree-checker and inline extent creation path. 6858 * Thus all members representing file offsets should be zero. 6859 */ 6860 ASSERT(pg_offset == 0); 6861 ASSERT(extent_start == 0); 6862 ASSERT(em->start == 0); 6863 6864 /* 6865 * btrfs_extent_item_to_extent_map() should have properly 6866 * initialized em members already. 6867 * 6868 * Other members are not utilized for inline extents. 6869 */ 6870 ASSERT(em->block_start == EXTENT_MAP_INLINE); 6871 ASSERT(em->len == fs_info->sectorsize); 6872 6873 ret = read_inline_extent(inode, path, page); 6874 if (ret < 0) 6875 goto out; 6876 goto insert; 6877 } 6878 not_found: 6879 em->start = start; 6880 em->orig_start = start; 6881 em->len = len; 6882 em->block_start = EXTENT_MAP_HOLE; 6883 insert: 6884 ret = 0; 6885 btrfs_release_path(path); 6886 if (em->start > start || extent_map_end(em) <= start) { 6887 btrfs_err(fs_info, 6888 "bad extent! em: [%llu %llu] passed [%llu %llu]", 6889 em->start, em->len, start, len); 6890 ret = -EIO; 6891 goto out; 6892 } 6893 6894 write_lock(&em_tree->lock); 6895 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 6896 write_unlock(&em_tree->lock); 6897 out: 6898 btrfs_free_path(path); 6899 6900 trace_btrfs_get_extent(root, inode, em); 6901 6902 if (ret) { 6903 free_extent_map(em); 6904 return ERR_PTR(ret); 6905 } 6906 return em; 6907 } 6908 6909 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, 6910 struct btrfs_dio_data *dio_data, 6911 const u64 start, 6912 const u64 len, 6913 const u64 orig_start, 6914 const u64 block_start, 6915 const u64 block_len, 6916 const u64 orig_block_len, 6917 const u64 ram_bytes, 6918 const int type) 6919 { 6920 struct extent_map *em = NULL; 6921 struct btrfs_ordered_extent *ordered; 6922 6923 if (type != BTRFS_ORDERED_NOCOW) { 6924 em = create_io_em(inode, start, len, orig_start, block_start, 6925 block_len, orig_block_len, ram_bytes, 6926 BTRFS_COMPRESS_NONE, /* compress_type */ 6927 type); 6928 if (IS_ERR(em)) 6929 goto out; 6930 } 6931 ordered = btrfs_alloc_ordered_extent(inode, start, len, len, 6932 block_start, block_len, 0, 6933 (1 << type) | 6934 (1 << BTRFS_ORDERED_DIRECT), 6935 BTRFS_COMPRESS_NONE); 6936 if (IS_ERR(ordered)) { 6937 if (em) { 6938 free_extent_map(em); 6939 btrfs_drop_extent_map_range(inode, start, 6940 start + len - 1, false); 6941 } 6942 em = ERR_CAST(ordered); 6943 } else { 6944 ASSERT(!dio_data->ordered); 6945 dio_data->ordered = ordered; 6946 } 6947 out: 6948 6949 return em; 6950 } 6951 6952 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, 6953 struct btrfs_dio_data *dio_data, 6954 u64 start, u64 len) 6955 { 6956 struct btrfs_root *root = inode->root; 6957 struct btrfs_fs_info *fs_info = root->fs_info; 6958 struct extent_map *em; 6959 struct btrfs_key ins; 6960 u64 alloc_hint; 6961 int ret; 6962 6963 alloc_hint = get_extent_allocation_hint(inode, start, len); 6964 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 6965 0, alloc_hint, &ins, 1, 1); 6966 if (ret) 6967 return ERR_PTR(ret); 6968 6969 em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start, 6970 ins.objectid, ins.offset, ins.offset, 6971 ins.offset, BTRFS_ORDERED_REGULAR); 6972 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 6973 if (IS_ERR(em)) 6974 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 6975 1); 6976 6977 return em; 6978 } 6979 6980 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) 6981 { 6982 struct btrfs_block_group *block_group; 6983 bool readonly = false; 6984 6985 block_group = btrfs_lookup_block_group(fs_info, bytenr); 6986 if (!block_group || block_group->ro) 6987 readonly = true; 6988 if (block_group) 6989 btrfs_put_block_group(block_group); 6990 return readonly; 6991 } 6992 6993 /* 6994 * Check if we can do nocow write into the range [@offset, @offset + @len) 6995 * 6996 * @offset: File offset 6997 * @len: The length to write, will be updated to the nocow writeable 6998 * range 6999 * @orig_start: (optional) Return the original file offset of the file extent 7000 * @orig_len: (optional) Return the original on-disk length of the file extent 7001 * @ram_bytes: (optional) Return the ram_bytes of the file extent 7002 * @strict: if true, omit optimizations that might force us into unnecessary 7003 * cow. e.g., don't trust generation number. 7004 * 7005 * Return: 7006 * >0 and update @len if we can do nocow write 7007 * 0 if we can't do nocow write 7008 * <0 if error happened 7009 * 7010 * NOTE: This only checks the file extents, caller is responsible to wait for 7011 * any ordered extents. 7012 */ 7013 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7014 u64 *orig_start, u64 *orig_block_len, 7015 u64 *ram_bytes, bool nowait, bool strict) 7016 { 7017 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7018 struct can_nocow_file_extent_args nocow_args = { 0 }; 7019 struct btrfs_path *path; 7020 int ret; 7021 struct extent_buffer *leaf; 7022 struct btrfs_root *root = BTRFS_I(inode)->root; 7023 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7024 struct btrfs_file_extent_item *fi; 7025 struct btrfs_key key; 7026 int found_type; 7027 7028 path = btrfs_alloc_path(); 7029 if (!path) 7030 return -ENOMEM; 7031 path->nowait = nowait; 7032 7033 ret = btrfs_lookup_file_extent(NULL, root, path, 7034 btrfs_ino(BTRFS_I(inode)), offset, 0); 7035 if (ret < 0) 7036 goto out; 7037 7038 if (ret == 1) { 7039 if (path->slots[0] == 0) { 7040 /* can't find the item, must cow */ 7041 ret = 0; 7042 goto out; 7043 } 7044 path->slots[0]--; 7045 } 7046 ret = 0; 7047 leaf = path->nodes[0]; 7048 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7049 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7050 key.type != BTRFS_EXTENT_DATA_KEY) { 7051 /* not our file or wrong item type, must cow */ 7052 goto out; 7053 } 7054 7055 if (key.offset > offset) { 7056 /* Wrong offset, must cow */ 7057 goto out; 7058 } 7059 7060 if (btrfs_file_extent_end(path) <= offset) 7061 goto out; 7062 7063 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 7064 found_type = btrfs_file_extent_type(leaf, fi); 7065 if (ram_bytes) 7066 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7067 7068 nocow_args.start = offset; 7069 nocow_args.end = offset + *len - 1; 7070 nocow_args.strict = strict; 7071 nocow_args.free_path = true; 7072 7073 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args); 7074 /* can_nocow_file_extent() has freed the path. */ 7075 path = NULL; 7076 7077 if (ret != 1) { 7078 /* Treat errors as not being able to NOCOW. */ 7079 ret = 0; 7080 goto out; 7081 } 7082 7083 ret = 0; 7084 if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr)) 7085 goto out; 7086 7087 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7088 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7089 u64 range_end; 7090 7091 range_end = round_up(offset + nocow_args.num_bytes, 7092 root->fs_info->sectorsize) - 1; 7093 ret = test_range_bit(io_tree, offset, range_end, 7094 EXTENT_DELALLOC, 0, NULL); 7095 if (ret) { 7096 ret = -EAGAIN; 7097 goto out; 7098 } 7099 } 7100 7101 if (orig_start) 7102 *orig_start = key.offset - nocow_args.extent_offset; 7103 if (orig_block_len) 7104 *orig_block_len = nocow_args.disk_num_bytes; 7105 7106 *len = nocow_args.num_bytes; 7107 ret = 1; 7108 out: 7109 btrfs_free_path(path); 7110 return ret; 7111 } 7112 7113 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7114 struct extent_state **cached_state, 7115 unsigned int iomap_flags) 7116 { 7117 const bool writing = (iomap_flags & IOMAP_WRITE); 7118 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7119 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7120 struct btrfs_ordered_extent *ordered; 7121 int ret = 0; 7122 7123 while (1) { 7124 if (nowait) { 7125 if (!try_lock_extent(io_tree, lockstart, lockend, 7126 cached_state)) 7127 return -EAGAIN; 7128 } else { 7129 lock_extent(io_tree, lockstart, lockend, cached_state); 7130 } 7131 /* 7132 * We're concerned with the entire range that we're going to be 7133 * doing DIO to, so we need to make sure there's no ordered 7134 * extents in this range. 7135 */ 7136 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, 7137 lockend - lockstart + 1); 7138 7139 /* 7140 * We need to make sure there are no buffered pages in this 7141 * range either, we could have raced between the invalidate in 7142 * generic_file_direct_write and locking the extent. The 7143 * invalidate needs to happen so that reads after a write do not 7144 * get stale data. 7145 */ 7146 if (!ordered && 7147 (!writing || !filemap_range_has_page(inode->i_mapping, 7148 lockstart, lockend))) 7149 break; 7150 7151 unlock_extent(io_tree, lockstart, lockend, cached_state); 7152 7153 if (ordered) { 7154 if (nowait) { 7155 btrfs_put_ordered_extent(ordered); 7156 ret = -EAGAIN; 7157 break; 7158 } 7159 /* 7160 * If we are doing a DIO read and the ordered extent we 7161 * found is for a buffered write, we can not wait for it 7162 * to complete and retry, because if we do so we can 7163 * deadlock with concurrent buffered writes on page 7164 * locks. This happens only if our DIO read covers more 7165 * than one extent map, if at this point has already 7166 * created an ordered extent for a previous extent map 7167 * and locked its range in the inode's io tree, and a 7168 * concurrent write against that previous extent map's 7169 * range and this range started (we unlock the ranges 7170 * in the io tree only when the bios complete and 7171 * buffered writes always lock pages before attempting 7172 * to lock range in the io tree). 7173 */ 7174 if (writing || 7175 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) 7176 btrfs_start_ordered_extent(ordered); 7177 else 7178 ret = nowait ? -EAGAIN : -ENOTBLK; 7179 btrfs_put_ordered_extent(ordered); 7180 } else { 7181 /* 7182 * We could trigger writeback for this range (and wait 7183 * for it to complete) and then invalidate the pages for 7184 * this range (through invalidate_inode_pages2_range()), 7185 * but that can lead us to a deadlock with a concurrent 7186 * call to readahead (a buffered read or a defrag call 7187 * triggered a readahead) on a page lock due to an 7188 * ordered dio extent we created before but did not have 7189 * yet a corresponding bio submitted (whence it can not 7190 * complete), which makes readahead wait for that 7191 * ordered extent to complete while holding a lock on 7192 * that page. 7193 */ 7194 ret = nowait ? -EAGAIN : -ENOTBLK; 7195 } 7196 7197 if (ret) 7198 break; 7199 7200 cond_resched(); 7201 } 7202 7203 return ret; 7204 } 7205 7206 /* The callers of this must take lock_extent() */ 7207 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start, 7208 u64 len, u64 orig_start, u64 block_start, 7209 u64 block_len, u64 orig_block_len, 7210 u64 ram_bytes, int compress_type, 7211 int type) 7212 { 7213 struct extent_map *em; 7214 int ret; 7215 7216 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7217 type == BTRFS_ORDERED_COMPRESSED || 7218 type == BTRFS_ORDERED_NOCOW || 7219 type == BTRFS_ORDERED_REGULAR); 7220 7221 em = alloc_extent_map(); 7222 if (!em) 7223 return ERR_PTR(-ENOMEM); 7224 7225 em->start = start; 7226 em->orig_start = orig_start; 7227 em->len = len; 7228 em->block_len = block_len; 7229 em->block_start = block_start; 7230 em->orig_block_len = orig_block_len; 7231 em->ram_bytes = ram_bytes; 7232 em->generation = -1; 7233 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7234 if (type == BTRFS_ORDERED_PREALLOC) { 7235 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7236 } else if (type == BTRFS_ORDERED_COMPRESSED) { 7237 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 7238 em->compress_type = compress_type; 7239 } 7240 7241 ret = btrfs_replace_extent_map_range(inode, em, true); 7242 if (ret) { 7243 free_extent_map(em); 7244 return ERR_PTR(ret); 7245 } 7246 7247 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7248 return em; 7249 } 7250 7251 7252 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7253 struct inode *inode, 7254 struct btrfs_dio_data *dio_data, 7255 u64 start, u64 *lenp, 7256 unsigned int iomap_flags) 7257 { 7258 const bool nowait = (iomap_flags & IOMAP_NOWAIT); 7259 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7260 struct extent_map *em = *map; 7261 int type; 7262 u64 block_start, orig_start, orig_block_len, ram_bytes; 7263 struct btrfs_block_group *bg; 7264 bool can_nocow = false; 7265 bool space_reserved = false; 7266 u64 len = *lenp; 7267 u64 prev_len; 7268 int ret = 0; 7269 7270 /* 7271 * We don't allocate a new extent in the following cases 7272 * 7273 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7274 * existing extent. 7275 * 2) The extent is marked as PREALLOC. We're good to go here and can 7276 * just use the extent. 7277 * 7278 */ 7279 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7280 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7281 em->block_start != EXTENT_MAP_HOLE)) { 7282 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7283 type = BTRFS_ORDERED_PREALLOC; 7284 else 7285 type = BTRFS_ORDERED_NOCOW; 7286 len = min(len, em->len - (start - em->start)); 7287 block_start = em->block_start + (start - em->start); 7288 7289 if (can_nocow_extent(inode, start, &len, &orig_start, 7290 &orig_block_len, &ram_bytes, false, false) == 1) { 7291 bg = btrfs_inc_nocow_writers(fs_info, block_start); 7292 if (bg) 7293 can_nocow = true; 7294 } 7295 } 7296 7297 prev_len = len; 7298 if (can_nocow) { 7299 struct extent_map *em2; 7300 7301 /* We can NOCOW, so only need to reserve metadata space. */ 7302 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7303 nowait); 7304 if (ret < 0) { 7305 /* Our caller expects us to free the input extent map. */ 7306 free_extent_map(em); 7307 *map = NULL; 7308 btrfs_dec_nocow_writers(bg); 7309 if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) 7310 ret = -EAGAIN; 7311 goto out; 7312 } 7313 space_reserved = true; 7314 7315 em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len, 7316 orig_start, block_start, 7317 len, orig_block_len, 7318 ram_bytes, type); 7319 btrfs_dec_nocow_writers(bg); 7320 if (type == BTRFS_ORDERED_PREALLOC) { 7321 free_extent_map(em); 7322 *map = em2; 7323 em = em2; 7324 } 7325 7326 if (IS_ERR(em2)) { 7327 ret = PTR_ERR(em2); 7328 goto out; 7329 } 7330 7331 dio_data->nocow_done = true; 7332 } else { 7333 /* Our caller expects us to free the input extent map. */ 7334 free_extent_map(em); 7335 *map = NULL; 7336 7337 if (nowait) { 7338 ret = -EAGAIN; 7339 goto out; 7340 } 7341 7342 /* 7343 * If we could not allocate data space before locking the file 7344 * range and we can't do a NOCOW write, then we have to fail. 7345 */ 7346 if (!dio_data->data_space_reserved) { 7347 ret = -ENOSPC; 7348 goto out; 7349 } 7350 7351 /* 7352 * We have to COW and we have already reserved data space before, 7353 * so now we reserve only metadata. 7354 */ 7355 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, 7356 false); 7357 if (ret < 0) 7358 goto out; 7359 space_reserved = true; 7360 7361 em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len); 7362 if (IS_ERR(em)) { 7363 ret = PTR_ERR(em); 7364 goto out; 7365 } 7366 *map = em; 7367 len = min(len, em->len - (start - em->start)); 7368 if (len < prev_len) 7369 btrfs_delalloc_release_metadata(BTRFS_I(inode), 7370 prev_len - len, true); 7371 } 7372 7373 /* 7374 * We have created our ordered extent, so we can now release our reservation 7375 * for an outstanding extent. 7376 */ 7377 btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len); 7378 7379 /* 7380 * Need to update the i_size under the extent lock so buffered 7381 * readers will get the updated i_size when we unlock. 7382 */ 7383 if (start + len > i_size_read(inode)) 7384 i_size_write(inode, start + len); 7385 out: 7386 if (ret && space_reserved) { 7387 btrfs_delalloc_release_extents(BTRFS_I(inode), len); 7388 btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true); 7389 } 7390 *lenp = len; 7391 return ret; 7392 } 7393 7394 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, 7395 loff_t length, unsigned int flags, struct iomap *iomap, 7396 struct iomap *srcmap) 7397 { 7398 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7399 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7400 struct extent_map *em; 7401 struct extent_state *cached_state = NULL; 7402 struct btrfs_dio_data *dio_data = iter->private; 7403 u64 lockstart, lockend; 7404 const bool write = !!(flags & IOMAP_WRITE); 7405 int ret = 0; 7406 u64 len = length; 7407 const u64 data_alloc_len = length; 7408 bool unlock_extents = false; 7409 7410 /* 7411 * We could potentially fault if we have a buffer > PAGE_SIZE, and if 7412 * we're NOWAIT we may submit a bio for a partial range and return 7413 * EIOCBQUEUED, which would result in an errant short read. 7414 * 7415 * The best way to handle this would be to allow for partial completions 7416 * of iocb's, so we could submit the partial bio, return and fault in 7417 * the rest of the pages, and then submit the io for the rest of the 7418 * range. However we don't have that currently, so simply return 7419 * -EAGAIN at this point so that the normal path is used. 7420 */ 7421 if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE) 7422 return -EAGAIN; 7423 7424 /* 7425 * Cap the size of reads to that usually seen in buffered I/O as we need 7426 * to allocate a contiguous array for the checksums. 7427 */ 7428 if (!write) 7429 len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS); 7430 7431 lockstart = start; 7432 lockend = start + len - 1; 7433 7434 /* 7435 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't 7436 * enough if we've written compressed pages to this area, so we need to 7437 * flush the dirty pages again to make absolutely sure that any 7438 * outstanding dirty pages are on disk - the first flush only starts 7439 * compression on the data, while keeping the pages locked, so by the 7440 * time the second flush returns we know bios for the compressed pages 7441 * were submitted and finished, and the pages no longer under writeback. 7442 * 7443 * If we have a NOWAIT request and we have any pages in the range that 7444 * are locked, likely due to compression still in progress, we don't want 7445 * to block on page locks. We also don't want to block on pages marked as 7446 * dirty or under writeback (same as for the non-compression case). 7447 * iomap_dio_rw() did the same check, but after that and before we got 7448 * here, mmap'ed writes may have happened or buffered reads started 7449 * (readpage() and readahead(), which lock pages), as we haven't locked 7450 * the file range yet. 7451 */ 7452 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7453 &BTRFS_I(inode)->runtime_flags)) { 7454 if (flags & IOMAP_NOWAIT) { 7455 if (filemap_range_needs_writeback(inode->i_mapping, 7456 lockstart, lockend)) 7457 return -EAGAIN; 7458 } else { 7459 ret = filemap_fdatawrite_range(inode->i_mapping, start, 7460 start + length - 1); 7461 if (ret) 7462 return ret; 7463 } 7464 } 7465 7466 memset(dio_data, 0, sizeof(*dio_data)); 7467 7468 /* 7469 * We always try to allocate data space and must do it before locking 7470 * the file range, to avoid deadlocks with concurrent writes to the same 7471 * range if the range has several extents and the writes don't expand the 7472 * current i_size (the inode lock is taken in shared mode). If we fail to 7473 * allocate data space here we continue and later, after locking the 7474 * file range, we fail with ENOSPC only if we figure out we can not do a 7475 * NOCOW write. 7476 */ 7477 if (write && !(flags & IOMAP_NOWAIT)) { 7478 ret = btrfs_check_data_free_space(BTRFS_I(inode), 7479 &dio_data->data_reserved, 7480 start, data_alloc_len, false); 7481 if (!ret) 7482 dio_data->data_space_reserved = true; 7483 else if (ret && !(BTRFS_I(inode)->flags & 7484 (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 7485 goto err; 7486 } 7487 7488 /* 7489 * If this errors out it's because we couldn't invalidate pagecache for 7490 * this range and we need to fallback to buffered IO, or we are doing a 7491 * NOWAIT read/write and we need to block. 7492 */ 7493 ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags); 7494 if (ret < 0) 7495 goto err; 7496 7497 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 7498 if (IS_ERR(em)) { 7499 ret = PTR_ERR(em); 7500 goto unlock_err; 7501 } 7502 7503 /* 7504 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7505 * io. INLINE is special, and we could probably kludge it in here, but 7506 * it's still buffered so for safety lets just fall back to the generic 7507 * buffered path. 7508 * 7509 * For COMPRESSED we _have_ to read the entire extent in so we can 7510 * decompress it, so there will be buffering required no matter what we 7511 * do, so go ahead and fallback to buffered. 7512 * 7513 * We return -ENOTBLK because that's what makes DIO go ahead and go back 7514 * to buffered IO. Don't blame me, this is the price we pay for using 7515 * the generic code. 7516 */ 7517 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7518 em->block_start == EXTENT_MAP_INLINE) { 7519 free_extent_map(em); 7520 /* 7521 * If we are in a NOWAIT context, return -EAGAIN in order to 7522 * fallback to buffered IO. This is not only because we can 7523 * block with buffered IO (no support for NOWAIT semantics at 7524 * the moment) but also to avoid returning short reads to user 7525 * space - this happens if we were able to read some data from 7526 * previous non-compressed extents and then when we fallback to 7527 * buffered IO, at btrfs_file_read_iter() by calling 7528 * filemap_read(), we fail to fault in pages for the read buffer, 7529 * in which case filemap_read() returns a short read (the number 7530 * of bytes previously read is > 0, so it does not return -EFAULT). 7531 */ 7532 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; 7533 goto unlock_err; 7534 } 7535 7536 len = min(len, em->len - (start - em->start)); 7537 7538 /* 7539 * If we have a NOWAIT request and the range contains multiple extents 7540 * (or a mix of extents and holes), then we return -EAGAIN to make the 7541 * caller fallback to a context where it can do a blocking (without 7542 * NOWAIT) request. This way we avoid doing partial IO and returning 7543 * success to the caller, which is not optimal for writes and for reads 7544 * it can result in unexpected behaviour for an application. 7545 * 7546 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling 7547 * iomap_dio_rw(), we can end up returning less data then what the caller 7548 * asked for, resulting in an unexpected, and incorrect, short read. 7549 * That is, the caller asked to read N bytes and we return less than that, 7550 * which is wrong unless we are crossing EOF. This happens if we get a 7551 * page fault error when trying to fault in pages for the buffer that is 7552 * associated to the struct iov_iter passed to iomap_dio_rw(), and we 7553 * have previously submitted bios for other extents in the range, in 7554 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of 7555 * those bios have completed by the time we get the page fault error, 7556 * which we return back to our caller - we should only return EIOCBQUEUED 7557 * after we have submitted bios for all the extents in the range. 7558 */ 7559 if ((flags & IOMAP_NOWAIT) && len < length) { 7560 free_extent_map(em); 7561 ret = -EAGAIN; 7562 goto unlock_err; 7563 } 7564 7565 if (write) { 7566 ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, 7567 start, &len, flags); 7568 if (ret < 0) 7569 goto unlock_err; 7570 unlock_extents = true; 7571 /* Recalc len in case the new em is smaller than requested */ 7572 len = min(len, em->len - (start - em->start)); 7573 if (dio_data->data_space_reserved) { 7574 u64 release_offset; 7575 u64 release_len = 0; 7576 7577 if (dio_data->nocow_done) { 7578 release_offset = start; 7579 release_len = data_alloc_len; 7580 } else if (len < data_alloc_len) { 7581 release_offset = start + len; 7582 release_len = data_alloc_len - len; 7583 } 7584 7585 if (release_len > 0) 7586 btrfs_free_reserved_data_space(BTRFS_I(inode), 7587 dio_data->data_reserved, 7588 release_offset, 7589 release_len); 7590 } 7591 } else { 7592 /* 7593 * We need to unlock only the end area that we aren't using. 7594 * The rest is going to be unlocked by the endio routine. 7595 */ 7596 lockstart = start + len; 7597 if (lockstart < lockend) 7598 unlock_extents = true; 7599 } 7600 7601 if (unlock_extents) 7602 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7603 &cached_state); 7604 else 7605 free_extent_state(cached_state); 7606 7607 /* 7608 * Translate extent map information to iomap. 7609 * We trim the extents (and move the addr) even though iomap code does 7610 * that, since we have locked only the parts we are performing I/O in. 7611 */ 7612 if ((em->block_start == EXTENT_MAP_HOLE) || 7613 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { 7614 iomap->addr = IOMAP_NULL_ADDR; 7615 iomap->type = IOMAP_HOLE; 7616 } else { 7617 iomap->addr = em->block_start + (start - em->start); 7618 iomap->type = IOMAP_MAPPED; 7619 } 7620 iomap->offset = start; 7621 iomap->bdev = fs_info->fs_devices->latest_dev->bdev; 7622 iomap->length = len; 7623 free_extent_map(em); 7624 7625 return 0; 7626 7627 unlock_err: 7628 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7629 &cached_state); 7630 err: 7631 if (dio_data->data_space_reserved) { 7632 btrfs_free_reserved_data_space(BTRFS_I(inode), 7633 dio_data->data_reserved, 7634 start, data_alloc_len); 7635 extent_changeset_free(dio_data->data_reserved); 7636 } 7637 7638 return ret; 7639 } 7640 7641 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, 7642 ssize_t written, unsigned int flags, struct iomap *iomap) 7643 { 7644 struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); 7645 struct btrfs_dio_data *dio_data = iter->private; 7646 size_t submitted = dio_data->submitted; 7647 const bool write = !!(flags & IOMAP_WRITE); 7648 int ret = 0; 7649 7650 if (!write && (iomap->type == IOMAP_HOLE)) { 7651 /* If reading from a hole, unlock and return */ 7652 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, 7653 NULL); 7654 return 0; 7655 } 7656 7657 if (submitted < length) { 7658 pos += submitted; 7659 length -= submitted; 7660 if (write) 7661 btrfs_finish_ordered_extent(dio_data->ordered, NULL, 7662 pos, length, false); 7663 else 7664 unlock_extent(&BTRFS_I(inode)->io_tree, pos, 7665 pos + length - 1, NULL); 7666 ret = -ENOTBLK; 7667 } 7668 if (write) { 7669 btrfs_put_ordered_extent(dio_data->ordered); 7670 dio_data->ordered = NULL; 7671 } 7672 7673 if (write) 7674 extent_changeset_free(dio_data->data_reserved); 7675 return ret; 7676 } 7677 7678 static void btrfs_dio_end_io(struct btrfs_bio *bbio) 7679 { 7680 struct btrfs_dio_private *dip = 7681 container_of(bbio, struct btrfs_dio_private, bbio); 7682 struct btrfs_inode *inode = bbio->inode; 7683 struct bio *bio = &bbio->bio; 7684 7685 if (bio->bi_status) { 7686 btrfs_warn(inode->root->fs_info, 7687 "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d", 7688 btrfs_ino(inode), bio->bi_opf, 7689 dip->file_offset, dip->bytes, bio->bi_status); 7690 } 7691 7692 if (btrfs_op(bio) == BTRFS_MAP_WRITE) { 7693 btrfs_finish_ordered_extent(bbio->ordered, NULL, 7694 dip->file_offset, dip->bytes, 7695 !bio->bi_status); 7696 } else { 7697 unlock_extent(&inode->io_tree, dip->file_offset, 7698 dip->file_offset + dip->bytes - 1, NULL); 7699 } 7700 7701 bbio->bio.bi_private = bbio->private; 7702 iomap_dio_bio_end_io(bio); 7703 } 7704 7705 static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio, 7706 loff_t file_offset) 7707 { 7708 struct btrfs_bio *bbio = btrfs_bio(bio); 7709 struct btrfs_dio_private *dip = 7710 container_of(bbio, struct btrfs_dio_private, bbio); 7711 struct btrfs_dio_data *dio_data = iter->private; 7712 7713 btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info, 7714 btrfs_dio_end_io, bio->bi_private); 7715 bbio->inode = BTRFS_I(iter->inode); 7716 bbio->file_offset = file_offset; 7717 7718 dip->file_offset = file_offset; 7719 dip->bytes = bio->bi_iter.bi_size; 7720 7721 dio_data->submitted += bio->bi_iter.bi_size; 7722 7723 /* 7724 * Check if we are doing a partial write. If we are, we need to split 7725 * the ordered extent to match the submitted bio. Hang on to the 7726 * remaining unfinishable ordered_extent in dio_data so that it can be 7727 * cancelled in iomap_end to avoid a deadlock wherein faulting the 7728 * remaining pages is blocked on the outstanding ordered extent. 7729 */ 7730 if (iter->flags & IOMAP_WRITE) { 7731 int ret; 7732 7733 ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered); 7734 if (ret) { 7735 btrfs_finish_ordered_extent(dio_data->ordered, NULL, 7736 file_offset, dip->bytes, 7737 !ret); 7738 bio->bi_status = errno_to_blk_status(ret); 7739 iomap_dio_bio_end_io(bio); 7740 return; 7741 } 7742 } 7743 7744 btrfs_submit_bio(bbio, 0); 7745 } 7746 7747 static const struct iomap_ops btrfs_dio_iomap_ops = { 7748 .iomap_begin = btrfs_dio_iomap_begin, 7749 .iomap_end = btrfs_dio_iomap_end, 7750 }; 7751 7752 static const struct iomap_dio_ops btrfs_dio_ops = { 7753 .submit_io = btrfs_dio_submit_io, 7754 .bio_set = &btrfs_dio_bioset, 7755 }; 7756 7757 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) 7758 { 7759 struct btrfs_dio_data data = { 0 }; 7760 7761 return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7762 IOMAP_DIO_PARTIAL, &data, done_before); 7763 } 7764 7765 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, 7766 size_t done_before) 7767 { 7768 struct btrfs_dio_data data = { 0 }; 7769 7770 return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 7771 IOMAP_DIO_PARTIAL, &data, done_before); 7772 } 7773 7774 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 7775 u64 start, u64 len) 7776 { 7777 int ret; 7778 7779 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 7780 if (ret) 7781 return ret; 7782 7783 /* 7784 * fiemap_prep() called filemap_write_and_wait() for the whole possible 7785 * file range (0 to LLONG_MAX), but that is not enough if we have 7786 * compression enabled. The first filemap_fdatawrite_range() only kicks 7787 * in the compression of data (in an async thread) and will return 7788 * before the compression is done and writeback is started. A second 7789 * filemap_fdatawrite_range() is needed to wait for the compression to 7790 * complete and writeback to start. We also need to wait for ordered 7791 * extents to complete, because our fiemap implementation uses mainly 7792 * file extent items to list the extents, searching for extent maps 7793 * only for file ranges with holes or prealloc extents to figure out 7794 * if we have delalloc in those ranges. 7795 */ 7796 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { 7797 ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX); 7798 if (ret) 7799 return ret; 7800 } 7801 7802 return extent_fiemap(BTRFS_I(inode), fieinfo, start, len); 7803 } 7804 7805 static int btrfs_writepages(struct address_space *mapping, 7806 struct writeback_control *wbc) 7807 { 7808 return extent_writepages(mapping, wbc); 7809 } 7810 7811 static void btrfs_readahead(struct readahead_control *rac) 7812 { 7813 extent_readahead(rac); 7814 } 7815 7816 /* 7817 * For release_folio() and invalidate_folio() we have a race window where 7818 * folio_end_writeback() is called but the subpage spinlock is not yet released. 7819 * If we continue to release/invalidate the page, we could cause use-after-free 7820 * for subpage spinlock. So this function is to spin and wait for subpage 7821 * spinlock. 7822 */ 7823 static void wait_subpage_spinlock(struct page *page) 7824 { 7825 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); 7826 struct btrfs_subpage *subpage; 7827 7828 if (!btrfs_is_subpage(fs_info, page)) 7829 return; 7830 7831 ASSERT(PagePrivate(page) && page->private); 7832 subpage = (struct btrfs_subpage *)page->private; 7833 7834 /* 7835 * This may look insane as we just acquire the spinlock and release it, 7836 * without doing anything. But we just want to make sure no one is 7837 * still holding the subpage spinlock. 7838 * And since the page is not dirty nor writeback, and we have page 7839 * locked, the only possible way to hold a spinlock is from the endio 7840 * function to clear page writeback. 7841 * 7842 * Here we just acquire the spinlock so that all existing callers 7843 * should exit and we're safe to release/invalidate the page. 7844 */ 7845 spin_lock_irq(&subpage->lock); 7846 spin_unlock_irq(&subpage->lock); 7847 } 7848 7849 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7850 { 7851 int ret = try_release_extent_mapping(&folio->page, gfp_flags); 7852 7853 if (ret == 1) { 7854 wait_subpage_spinlock(&folio->page); 7855 clear_page_extent_mapped(&folio->page); 7856 } 7857 return ret; 7858 } 7859 7860 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags) 7861 { 7862 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 7863 return false; 7864 return __btrfs_release_folio(folio, gfp_flags); 7865 } 7866 7867 #ifdef CONFIG_MIGRATION 7868 static int btrfs_migrate_folio(struct address_space *mapping, 7869 struct folio *dst, struct folio *src, 7870 enum migrate_mode mode) 7871 { 7872 int ret = filemap_migrate_folio(mapping, dst, src, mode); 7873 7874 if (ret != MIGRATEPAGE_SUCCESS) 7875 return ret; 7876 7877 if (folio_test_ordered(src)) { 7878 folio_clear_ordered(src); 7879 folio_set_ordered(dst); 7880 } 7881 7882 return MIGRATEPAGE_SUCCESS; 7883 } 7884 #else 7885 #define btrfs_migrate_folio NULL 7886 #endif 7887 7888 static void btrfs_invalidate_folio(struct folio *folio, size_t offset, 7889 size_t length) 7890 { 7891 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); 7892 struct btrfs_fs_info *fs_info = inode->root->fs_info; 7893 struct extent_io_tree *tree = &inode->io_tree; 7894 struct extent_state *cached_state = NULL; 7895 u64 page_start = folio_pos(folio); 7896 u64 page_end = page_start + folio_size(folio) - 1; 7897 u64 cur; 7898 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; 7899 7900 /* 7901 * We have folio locked so no new ordered extent can be created on this 7902 * page, nor bio can be submitted for this folio. 7903 * 7904 * But already submitted bio can still be finished on this folio. 7905 * Furthermore, endio function won't skip folio which has Ordered 7906 * (Private2) already cleared, so it's possible for endio and 7907 * invalidate_folio to do the same ordered extent accounting twice 7908 * on one folio. 7909 * 7910 * So here we wait for any submitted bios to finish, so that we won't 7911 * do double ordered extent accounting on the same folio. 7912 */ 7913 folio_wait_writeback(folio); 7914 wait_subpage_spinlock(&folio->page); 7915 7916 /* 7917 * For subpage case, we have call sites like 7918 * btrfs_punch_hole_lock_range() which passes range not aligned to 7919 * sectorsize. 7920 * If the range doesn't cover the full folio, we don't need to and 7921 * shouldn't clear page extent mapped, as folio->private can still 7922 * record subpage dirty bits for other part of the range. 7923 * 7924 * For cases that invalidate the full folio even the range doesn't 7925 * cover the full folio, like invalidating the last folio, we're 7926 * still safe to wait for ordered extent to finish. 7927 */ 7928 if (!(offset == 0 && length == folio_size(folio))) { 7929 btrfs_release_folio(folio, GFP_NOFS); 7930 return; 7931 } 7932 7933 if (!inode_evicting) 7934 lock_extent(tree, page_start, page_end, &cached_state); 7935 7936 cur = page_start; 7937 while (cur < page_end) { 7938 struct btrfs_ordered_extent *ordered; 7939 u64 range_end; 7940 u32 range_len; 7941 u32 extra_flags = 0; 7942 7943 ordered = btrfs_lookup_first_ordered_range(inode, cur, 7944 page_end + 1 - cur); 7945 if (!ordered) { 7946 range_end = page_end; 7947 /* 7948 * No ordered extent covering this range, we are safe 7949 * to delete all extent states in the range. 7950 */ 7951 extra_flags = EXTENT_CLEAR_ALL_BITS; 7952 goto next; 7953 } 7954 if (ordered->file_offset > cur) { 7955 /* 7956 * There is a range between [cur, oe->file_offset) not 7957 * covered by any ordered extent. 7958 * We are safe to delete all extent states, and handle 7959 * the ordered extent in the next iteration. 7960 */ 7961 range_end = ordered->file_offset - 1; 7962 extra_flags = EXTENT_CLEAR_ALL_BITS; 7963 goto next; 7964 } 7965 7966 range_end = min(ordered->file_offset + ordered->num_bytes - 1, 7967 page_end); 7968 ASSERT(range_end + 1 - cur < U32_MAX); 7969 range_len = range_end + 1 - cur; 7970 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) { 7971 /* 7972 * If Ordered (Private2) is cleared, it means endio has 7973 * already been executed for the range. 7974 * We can't delete the extent states as 7975 * btrfs_finish_ordered_io() may still use some of them. 7976 */ 7977 goto next; 7978 } 7979 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len); 7980 7981 /* 7982 * IO on this page will never be started, so we need to account 7983 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW 7984 * here, must leave that up for the ordered extent completion. 7985 * 7986 * This will also unlock the range for incoming 7987 * btrfs_finish_ordered_io(). 7988 */ 7989 if (!inode_evicting) 7990 clear_extent_bit(tree, cur, range_end, 7991 EXTENT_DELALLOC | 7992 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 7993 EXTENT_DEFRAG, &cached_state); 7994 7995 spin_lock_irq(&inode->ordered_tree.lock); 7996 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 7997 ordered->truncated_len = min(ordered->truncated_len, 7998 cur - ordered->file_offset); 7999 spin_unlock_irq(&inode->ordered_tree.lock); 8000 8001 /* 8002 * If the ordered extent has finished, we're safe to delete all 8003 * the extent states of the range, otherwise 8004 * btrfs_finish_ordered_io() will get executed by endio for 8005 * other pages, so we can't delete extent states. 8006 */ 8007 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8008 cur, range_end + 1 - cur)) { 8009 btrfs_finish_ordered_io(ordered); 8010 /* 8011 * The ordered extent has finished, now we're again 8012 * safe to delete all extent states of the range. 8013 */ 8014 extra_flags = EXTENT_CLEAR_ALL_BITS; 8015 } 8016 next: 8017 if (ordered) 8018 btrfs_put_ordered_extent(ordered); 8019 /* 8020 * Qgroup reserved space handler 8021 * Sector(s) here will be either: 8022 * 8023 * 1) Already written to disk or bio already finished 8024 * Then its QGROUP_RESERVED bit in io_tree is already cleared. 8025 * Qgroup will be handled by its qgroup_record then. 8026 * btrfs_qgroup_free_data() call will do nothing here. 8027 * 8028 * 2) Not written to disk yet 8029 * Then btrfs_qgroup_free_data() call will clear the 8030 * QGROUP_RESERVED bit of its io_tree, and free the qgroup 8031 * reserved data space. 8032 * Since the IO will never happen for this page. 8033 */ 8034 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur); 8035 if (!inode_evicting) { 8036 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | 8037 EXTENT_DELALLOC | EXTENT_UPTODATE | 8038 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG | 8039 extra_flags, &cached_state); 8040 } 8041 cur = range_end + 1; 8042 } 8043 /* 8044 * We have iterated through all ordered extents of the page, the page 8045 * should not have Ordered (Private2) anymore, or the above iteration 8046 * did something wrong. 8047 */ 8048 ASSERT(!folio_test_ordered(folio)); 8049 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio)); 8050 if (!inode_evicting) 8051 __btrfs_release_folio(folio, GFP_NOFS); 8052 clear_page_extent_mapped(&folio->page); 8053 } 8054 8055 /* 8056 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8057 * called from a page fault handler when a page is first dirtied. Hence we must 8058 * be careful to check for EOF conditions here. We set the page up correctly 8059 * for a written page which means we get ENOSPC checking when writing into 8060 * holes and correct delalloc and unwritten extent mapping on filesystems that 8061 * support these features. 8062 * 8063 * We are not allowed to take the i_mutex here so we have to play games to 8064 * protect against truncate races as the page could now be beyond EOF. Because 8065 * truncate_setsize() writes the inode size before removing pages, once we have 8066 * the page lock we can determine safely if the page is beyond EOF. If it is not 8067 * beyond EOF, then the page is guaranteed safe against truncation until we 8068 * unlock the page. 8069 */ 8070 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 8071 { 8072 struct page *page = vmf->page; 8073 struct inode *inode = file_inode(vmf->vma->vm_file); 8074 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8075 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8076 struct btrfs_ordered_extent *ordered; 8077 struct extent_state *cached_state = NULL; 8078 struct extent_changeset *data_reserved = NULL; 8079 unsigned long zero_start; 8080 loff_t size; 8081 vm_fault_t ret; 8082 int ret2; 8083 int reserved = 0; 8084 u64 reserved_space; 8085 u64 page_start; 8086 u64 page_end; 8087 u64 end; 8088 8089 reserved_space = PAGE_SIZE; 8090 8091 sb_start_pagefault(inode->i_sb); 8092 page_start = page_offset(page); 8093 page_end = page_start + PAGE_SIZE - 1; 8094 end = page_end; 8095 8096 /* 8097 * Reserving delalloc space after obtaining the page lock can lead to 8098 * deadlock. For example, if a dirty page is locked by this function 8099 * and the call to btrfs_delalloc_reserve_space() ends up triggering 8100 * dirty page write out, then the btrfs_writepages() function could 8101 * end up waiting indefinitely to get a lock on the page currently 8102 * being processed by btrfs_page_mkwrite() function. 8103 */ 8104 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 8105 page_start, reserved_space); 8106 if (!ret2) { 8107 ret2 = file_update_time(vmf->vma->vm_file); 8108 reserved = 1; 8109 } 8110 if (ret2) { 8111 ret = vmf_error(ret2); 8112 if (reserved) 8113 goto out; 8114 goto out_noreserve; 8115 } 8116 8117 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8118 again: 8119 down_read(&BTRFS_I(inode)->i_mmap_lock); 8120 lock_page(page); 8121 size = i_size_read(inode); 8122 8123 if ((page->mapping != inode->i_mapping) || 8124 (page_start >= size)) { 8125 /* page got truncated out from underneath us */ 8126 goto out_unlock; 8127 } 8128 wait_on_page_writeback(page); 8129 8130 lock_extent(io_tree, page_start, page_end, &cached_state); 8131 ret2 = set_page_extent_mapped(page); 8132 if (ret2 < 0) { 8133 ret = vmf_error(ret2); 8134 unlock_extent(io_tree, page_start, page_end, &cached_state); 8135 goto out_unlock; 8136 } 8137 8138 /* 8139 * we can't set the delalloc bits if there are pending ordered 8140 * extents. Drop our locks and wait for them to finish 8141 */ 8142 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, 8143 PAGE_SIZE); 8144 if (ordered) { 8145 unlock_extent(io_tree, page_start, page_end, &cached_state); 8146 unlock_page(page); 8147 up_read(&BTRFS_I(inode)->i_mmap_lock); 8148 btrfs_start_ordered_extent(ordered); 8149 btrfs_put_ordered_extent(ordered); 8150 goto again; 8151 } 8152 8153 if (page->index == ((size - 1) >> PAGE_SHIFT)) { 8154 reserved_space = round_up(size - page_start, 8155 fs_info->sectorsize); 8156 if (reserved_space < PAGE_SIZE) { 8157 end = page_start + reserved_space - 1; 8158 btrfs_delalloc_release_space(BTRFS_I(inode), 8159 data_reserved, page_start, 8160 PAGE_SIZE - reserved_space, true); 8161 } 8162 } 8163 8164 /* 8165 * page_mkwrite gets called when the page is firstly dirtied after it's 8166 * faulted in, but write(2) could also dirty a page and set delalloc 8167 * bits, thus in this case for space account reason, we still need to 8168 * clear any delalloc bits within this page range since we have to 8169 * reserve data&meta space before lock_page() (see above comments). 8170 */ 8171 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, 8172 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8173 EXTENT_DEFRAG, &cached_state); 8174 8175 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, 8176 &cached_state); 8177 if (ret2) { 8178 unlock_extent(io_tree, page_start, page_end, &cached_state); 8179 ret = VM_FAULT_SIGBUS; 8180 goto out_unlock; 8181 } 8182 8183 /* page is wholly or partially inside EOF */ 8184 if (page_start + PAGE_SIZE > size) 8185 zero_start = offset_in_page(size); 8186 else 8187 zero_start = PAGE_SIZE; 8188 8189 if (zero_start != PAGE_SIZE) 8190 memzero_page(page, zero_start, PAGE_SIZE - zero_start); 8191 8192 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); 8193 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); 8194 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); 8195 8196 btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); 8197 8198 unlock_extent(io_tree, page_start, page_end, &cached_state); 8199 up_read(&BTRFS_I(inode)->i_mmap_lock); 8200 8201 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8202 sb_end_pagefault(inode->i_sb); 8203 extent_changeset_free(data_reserved); 8204 return VM_FAULT_LOCKED; 8205 8206 out_unlock: 8207 unlock_page(page); 8208 up_read(&BTRFS_I(inode)->i_mmap_lock); 8209 out: 8210 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 8211 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, 8212 reserved_space, (ret != 0)); 8213 out_noreserve: 8214 sb_end_pagefault(inode->i_sb); 8215 extent_changeset_free(data_reserved); 8216 return ret; 8217 } 8218 8219 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) 8220 { 8221 struct btrfs_truncate_control control = { 8222 .inode = inode, 8223 .ino = btrfs_ino(inode), 8224 .min_type = BTRFS_EXTENT_DATA_KEY, 8225 .clear_extent_range = true, 8226 }; 8227 struct btrfs_root *root = inode->root; 8228 struct btrfs_fs_info *fs_info = root->fs_info; 8229 struct btrfs_block_rsv *rsv; 8230 int ret; 8231 struct btrfs_trans_handle *trans; 8232 u64 mask = fs_info->sectorsize - 1; 8233 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 8234 8235 if (!skip_writeback) { 8236 ret = btrfs_wait_ordered_range(&inode->vfs_inode, 8237 inode->vfs_inode.i_size & (~mask), 8238 (u64)-1); 8239 if (ret) 8240 return ret; 8241 } 8242 8243 /* 8244 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 8245 * things going on here: 8246 * 8247 * 1) We need to reserve space to update our inode. 8248 * 8249 * 2) We need to have something to cache all the space that is going to 8250 * be free'd up by the truncate operation, but also have some slack 8251 * space reserved in case it uses space during the truncate (thank you 8252 * very much snapshotting). 8253 * 8254 * And we need these to be separate. The fact is we can use a lot of 8255 * space doing the truncate, and we have no earthly idea how much space 8256 * we will use, so we need the truncate reservation to be separate so it 8257 * doesn't end up using space reserved for updating the inode. We also 8258 * need to be able to stop the transaction and start a new one, which 8259 * means we need to be able to update the inode several times, and we 8260 * have no idea of knowing how many times that will be, so we can't just 8261 * reserve 1 item for the entirety of the operation, so that has to be 8262 * done separately as well. 8263 * 8264 * So that leaves us with 8265 * 8266 * 1) rsv - for the truncate reservation, which we will steal from the 8267 * transaction reservation. 8268 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 8269 * updating the inode. 8270 */ 8271 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 8272 if (!rsv) 8273 return -ENOMEM; 8274 rsv->size = min_size; 8275 rsv->failfast = true; 8276 8277 /* 8278 * 1 for the truncate slack space 8279 * 1 for updating the inode. 8280 */ 8281 trans = btrfs_start_transaction(root, 2); 8282 if (IS_ERR(trans)) { 8283 ret = PTR_ERR(trans); 8284 goto out; 8285 } 8286 8287 /* Migrate the slack space for the truncate to our reserve */ 8288 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 8289 min_size, false); 8290 /* 8291 * We have reserved 2 metadata units when we started the transaction and 8292 * min_size matches 1 unit, so this should never fail, but if it does, 8293 * it's not critical we just fail truncation. 8294 */ 8295 if (WARN_ON(ret)) { 8296 btrfs_end_transaction(trans); 8297 goto out; 8298 } 8299 8300 trans->block_rsv = rsv; 8301 8302 while (1) { 8303 struct extent_state *cached_state = NULL; 8304 const u64 new_size = inode->vfs_inode.i_size; 8305 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); 8306 8307 control.new_size = new_size; 8308 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8309 /* 8310 * We want to drop from the next block forward in case this new 8311 * size is not block aligned since we will be keeping the last 8312 * block of the extent just the way it is. 8313 */ 8314 btrfs_drop_extent_map_range(inode, 8315 ALIGN(new_size, fs_info->sectorsize), 8316 (u64)-1, false); 8317 8318 ret = btrfs_truncate_inode_items(trans, root, &control); 8319 8320 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 8321 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 8322 8323 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); 8324 8325 trans->block_rsv = &fs_info->trans_block_rsv; 8326 if (ret != -ENOSPC && ret != -EAGAIN) 8327 break; 8328 8329 ret = btrfs_update_inode(trans, root, inode); 8330 if (ret) 8331 break; 8332 8333 btrfs_end_transaction(trans); 8334 btrfs_btree_balance_dirty(fs_info); 8335 8336 trans = btrfs_start_transaction(root, 2); 8337 if (IS_ERR(trans)) { 8338 ret = PTR_ERR(trans); 8339 trans = NULL; 8340 break; 8341 } 8342 8343 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); 8344 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 8345 rsv, min_size, false); 8346 /* 8347 * We have reserved 2 metadata units when we started the 8348 * transaction and min_size matches 1 unit, so this should never 8349 * fail, but if it does, it's not critical we just fail truncation. 8350 */ 8351 if (WARN_ON(ret)) 8352 break; 8353 8354 trans->block_rsv = rsv; 8355 } 8356 8357 /* 8358 * We can't call btrfs_truncate_block inside a trans handle as we could 8359 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we 8360 * know we've truncated everything except the last little bit, and can 8361 * do btrfs_truncate_block and then update the disk_i_size. 8362 */ 8363 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) { 8364 btrfs_end_transaction(trans); 8365 btrfs_btree_balance_dirty(fs_info); 8366 8367 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); 8368 if (ret) 8369 goto out; 8370 trans = btrfs_start_transaction(root, 1); 8371 if (IS_ERR(trans)) { 8372 ret = PTR_ERR(trans); 8373 goto out; 8374 } 8375 btrfs_inode_safe_disk_i_size_write(inode, 0); 8376 } 8377 8378 if (trans) { 8379 int ret2; 8380 8381 trans->block_rsv = &fs_info->trans_block_rsv; 8382 ret2 = btrfs_update_inode(trans, root, inode); 8383 if (ret2 && !ret) 8384 ret = ret2; 8385 8386 ret2 = btrfs_end_transaction(trans); 8387 if (ret2 && !ret) 8388 ret = ret2; 8389 btrfs_btree_balance_dirty(fs_info); 8390 } 8391 out: 8392 btrfs_free_block_rsv(fs_info, rsv); 8393 /* 8394 * So if we truncate and then write and fsync we normally would just 8395 * write the extents that changed, which is a problem if we need to 8396 * first truncate that entire inode. So set this flag so we write out 8397 * all of the extents in the inode to the sync log so we're completely 8398 * safe. 8399 * 8400 * If no extents were dropped or trimmed we don't need to force the next 8401 * fsync to truncate all the inode's items from the log and re-log them 8402 * all. This means the truncate operation did not change the file size, 8403 * or changed it to a smaller size but there was only an implicit hole 8404 * between the old i_size and the new i_size, and there were no prealloc 8405 * extents beyond i_size to drop. 8406 */ 8407 if (control.extents_found > 0) 8408 btrfs_set_inode_full_sync(inode); 8409 8410 return ret; 8411 } 8412 8413 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap, 8414 struct inode *dir) 8415 { 8416 struct inode *inode; 8417 8418 inode = new_inode(dir->i_sb); 8419 if (inode) { 8420 /* 8421 * Subvolumes don't inherit the sgid bit or the parent's gid if 8422 * the parent's sgid bit is set. This is probably a bug. 8423 */ 8424 inode_init_owner(idmap, inode, NULL, 8425 S_IFDIR | (~current_umask() & S_IRWXUGO)); 8426 inode->i_op = &btrfs_dir_inode_operations; 8427 inode->i_fop = &btrfs_dir_file_operations; 8428 } 8429 return inode; 8430 } 8431 8432 struct inode *btrfs_alloc_inode(struct super_block *sb) 8433 { 8434 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 8435 struct btrfs_inode *ei; 8436 struct inode *inode; 8437 8438 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL); 8439 if (!ei) 8440 return NULL; 8441 8442 ei->root = NULL; 8443 ei->generation = 0; 8444 ei->last_trans = 0; 8445 ei->last_sub_trans = 0; 8446 ei->logged_trans = 0; 8447 ei->delalloc_bytes = 0; 8448 ei->new_delalloc_bytes = 0; 8449 ei->defrag_bytes = 0; 8450 ei->disk_i_size = 0; 8451 ei->flags = 0; 8452 ei->ro_flags = 0; 8453 ei->csum_bytes = 0; 8454 ei->index_cnt = (u64)-1; 8455 ei->dir_index = 0; 8456 ei->last_unlink_trans = 0; 8457 ei->last_reflink_trans = 0; 8458 ei->last_log_commit = 0; 8459 8460 spin_lock_init(&ei->lock); 8461 ei->outstanding_extents = 0; 8462 if (sb->s_magic != BTRFS_TEST_MAGIC) 8463 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 8464 BTRFS_BLOCK_RSV_DELALLOC); 8465 ei->runtime_flags = 0; 8466 ei->prop_compress = BTRFS_COMPRESS_NONE; 8467 ei->defrag_compress = BTRFS_COMPRESS_NONE; 8468 8469 ei->delayed_node = NULL; 8470 8471 ei->i_otime.tv_sec = 0; 8472 ei->i_otime.tv_nsec = 0; 8473 8474 inode = &ei->vfs_inode; 8475 extent_map_tree_init(&ei->extent_tree); 8476 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); 8477 ei->io_tree.inode = ei; 8478 extent_io_tree_init(fs_info, &ei->file_extent_tree, 8479 IO_TREE_INODE_FILE_EXTENT); 8480 mutex_init(&ei->log_mutex); 8481 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 8482 INIT_LIST_HEAD(&ei->delalloc_inodes); 8483 INIT_LIST_HEAD(&ei->delayed_iput); 8484 RB_CLEAR_NODE(&ei->rb_node); 8485 init_rwsem(&ei->i_mmap_lock); 8486 8487 return inode; 8488 } 8489 8490 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 8491 void btrfs_test_destroy_inode(struct inode *inode) 8492 { 8493 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); 8494 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8495 } 8496 #endif 8497 8498 void btrfs_free_inode(struct inode *inode) 8499 { 8500 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8501 } 8502 8503 void btrfs_destroy_inode(struct inode *vfs_inode) 8504 { 8505 struct btrfs_ordered_extent *ordered; 8506 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 8507 struct btrfs_root *root = inode->root; 8508 bool freespace_inode; 8509 8510 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); 8511 WARN_ON(vfs_inode->i_data.nrpages); 8512 WARN_ON(inode->block_rsv.reserved); 8513 WARN_ON(inode->block_rsv.size); 8514 WARN_ON(inode->outstanding_extents); 8515 if (!S_ISDIR(vfs_inode->i_mode)) { 8516 WARN_ON(inode->delalloc_bytes); 8517 WARN_ON(inode->new_delalloc_bytes); 8518 } 8519 WARN_ON(inode->csum_bytes); 8520 WARN_ON(inode->defrag_bytes); 8521 8522 /* 8523 * This can happen where we create an inode, but somebody else also 8524 * created the same inode and we need to destroy the one we already 8525 * created. 8526 */ 8527 if (!root) 8528 return; 8529 8530 /* 8531 * If this is a free space inode do not take the ordered extents lockdep 8532 * map. 8533 */ 8534 freespace_inode = btrfs_is_free_space_inode(inode); 8535 8536 while (1) { 8537 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8538 if (!ordered) 8539 break; 8540 else { 8541 btrfs_err(root->fs_info, 8542 "found ordered extent %llu %llu on inode cleanup", 8543 ordered->file_offset, ordered->num_bytes); 8544 8545 if (!freespace_inode) 8546 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); 8547 8548 btrfs_remove_ordered_extent(inode, ordered); 8549 btrfs_put_ordered_extent(ordered); 8550 btrfs_put_ordered_extent(ordered); 8551 } 8552 } 8553 btrfs_qgroup_check_reserved_leak(inode); 8554 inode_tree_del(inode); 8555 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 8556 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); 8557 btrfs_put_root(inode->root); 8558 } 8559 8560 int btrfs_drop_inode(struct inode *inode) 8561 { 8562 struct btrfs_root *root = BTRFS_I(inode)->root; 8563 8564 if (root == NULL) 8565 return 1; 8566 8567 /* the snap/subvol tree is on deleting */ 8568 if (btrfs_root_refs(&root->root_item) == 0) 8569 return 1; 8570 else 8571 return generic_drop_inode(inode); 8572 } 8573 8574 static void init_once(void *foo) 8575 { 8576 struct btrfs_inode *ei = foo; 8577 8578 inode_init_once(&ei->vfs_inode); 8579 } 8580 8581 void __cold btrfs_destroy_cachep(void) 8582 { 8583 /* 8584 * Make sure all delayed rcu free inodes are flushed before we 8585 * destroy cache. 8586 */ 8587 rcu_barrier(); 8588 bioset_exit(&btrfs_dio_bioset); 8589 kmem_cache_destroy(btrfs_inode_cachep); 8590 } 8591 8592 int __init btrfs_init_cachep(void) 8593 { 8594 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 8595 sizeof(struct btrfs_inode), 0, 8596 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, 8597 init_once); 8598 if (!btrfs_inode_cachep) 8599 goto fail; 8600 8601 if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE, 8602 offsetof(struct btrfs_dio_private, bbio.bio), 8603 BIOSET_NEED_BVECS)) 8604 goto fail; 8605 8606 return 0; 8607 fail: 8608 btrfs_destroy_cachep(); 8609 return -ENOMEM; 8610 } 8611 8612 static int btrfs_getattr(struct mnt_idmap *idmap, 8613 const struct path *path, struct kstat *stat, 8614 u32 request_mask, unsigned int flags) 8615 { 8616 u64 delalloc_bytes; 8617 u64 inode_bytes; 8618 struct inode *inode = d_inode(path->dentry); 8619 u32 blocksize = inode->i_sb->s_blocksize; 8620 u32 bi_flags = BTRFS_I(inode)->flags; 8621 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; 8622 8623 stat->result_mask |= STATX_BTIME; 8624 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; 8625 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; 8626 if (bi_flags & BTRFS_INODE_APPEND) 8627 stat->attributes |= STATX_ATTR_APPEND; 8628 if (bi_flags & BTRFS_INODE_COMPRESS) 8629 stat->attributes |= STATX_ATTR_COMPRESSED; 8630 if (bi_flags & BTRFS_INODE_IMMUTABLE) 8631 stat->attributes |= STATX_ATTR_IMMUTABLE; 8632 if (bi_flags & BTRFS_INODE_NODUMP) 8633 stat->attributes |= STATX_ATTR_NODUMP; 8634 if (bi_ro_flags & BTRFS_INODE_RO_VERITY) 8635 stat->attributes |= STATX_ATTR_VERITY; 8636 8637 stat->attributes_mask |= (STATX_ATTR_APPEND | 8638 STATX_ATTR_COMPRESSED | 8639 STATX_ATTR_IMMUTABLE | 8640 STATX_ATTR_NODUMP); 8641 8642 generic_fillattr(idmap, request_mask, inode, stat); 8643 stat->dev = BTRFS_I(inode)->root->anon_dev; 8644 8645 spin_lock(&BTRFS_I(inode)->lock); 8646 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 8647 inode_bytes = inode_get_bytes(inode); 8648 spin_unlock(&BTRFS_I(inode)->lock); 8649 stat->blocks = (ALIGN(inode_bytes, blocksize) + 8650 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT; 8651 return 0; 8652 } 8653 8654 static int btrfs_rename_exchange(struct inode *old_dir, 8655 struct dentry *old_dentry, 8656 struct inode *new_dir, 8657 struct dentry *new_dentry) 8658 { 8659 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 8660 struct btrfs_trans_handle *trans; 8661 unsigned int trans_num_items; 8662 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8663 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8664 struct inode *new_inode = new_dentry->d_inode; 8665 struct inode *old_inode = old_dentry->d_inode; 8666 struct btrfs_rename_ctx old_rename_ctx; 8667 struct btrfs_rename_ctx new_rename_ctx; 8668 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8669 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 8670 u64 old_idx = 0; 8671 u64 new_idx = 0; 8672 int ret; 8673 int ret2; 8674 bool need_abort = false; 8675 struct fscrypt_name old_fname, new_fname; 8676 struct fscrypt_str *old_name, *new_name; 8677 8678 /* 8679 * For non-subvolumes allow exchange only within one subvolume, in the 8680 * same inode namespace. Two subvolumes (represented as directory) can 8681 * be exchanged as they're a logical link and have a fixed inode number. 8682 */ 8683 if (root != dest && 8684 (old_ino != BTRFS_FIRST_FREE_OBJECTID || 8685 new_ino != BTRFS_FIRST_FREE_OBJECTID)) 8686 return -EXDEV; 8687 8688 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8689 if (ret) 8690 return ret; 8691 8692 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8693 if (ret) { 8694 fscrypt_free_filename(&old_fname); 8695 return ret; 8696 } 8697 8698 old_name = &old_fname.disk_name; 8699 new_name = &new_fname.disk_name; 8700 8701 /* close the race window with snapshot create/destroy ioctl */ 8702 if (old_ino == BTRFS_FIRST_FREE_OBJECTID || 8703 new_ino == BTRFS_FIRST_FREE_OBJECTID) 8704 down_read(&fs_info->subvol_sem); 8705 8706 /* 8707 * For each inode: 8708 * 1 to remove old dir item 8709 * 1 to remove old dir index 8710 * 1 to add new dir item 8711 * 1 to add new dir index 8712 * 1 to update parent inode 8713 * 8714 * If the parents are the same, we only need to account for one 8715 */ 8716 trans_num_items = (old_dir == new_dir ? 9 : 10); 8717 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8718 /* 8719 * 1 to remove old root ref 8720 * 1 to remove old root backref 8721 * 1 to add new root ref 8722 * 1 to add new root backref 8723 */ 8724 trans_num_items += 4; 8725 } else { 8726 /* 8727 * 1 to update inode item 8728 * 1 to remove old inode ref 8729 * 1 to add new inode ref 8730 */ 8731 trans_num_items += 3; 8732 } 8733 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 8734 trans_num_items += 4; 8735 else 8736 trans_num_items += 3; 8737 trans = btrfs_start_transaction(root, trans_num_items); 8738 if (IS_ERR(trans)) { 8739 ret = PTR_ERR(trans); 8740 goto out_notrans; 8741 } 8742 8743 if (dest != root) { 8744 ret = btrfs_record_root_in_trans(trans, dest); 8745 if (ret) 8746 goto out_fail; 8747 } 8748 8749 /* 8750 * We need to find a free sequence number both in the source and 8751 * in the destination directory for the exchange. 8752 */ 8753 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 8754 if (ret) 8755 goto out_fail; 8756 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 8757 if (ret) 8758 goto out_fail; 8759 8760 BTRFS_I(old_inode)->dir_index = 0ULL; 8761 BTRFS_I(new_inode)->dir_index = 0ULL; 8762 8763 /* Reference for the source. */ 8764 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8765 /* force full log commit if subvolume involved. */ 8766 btrfs_set_log_full_commit(trans); 8767 } else { 8768 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino, 8769 btrfs_ino(BTRFS_I(new_dir)), 8770 old_idx); 8771 if (ret) 8772 goto out_fail; 8773 need_abort = true; 8774 } 8775 8776 /* And now for the dest. */ 8777 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8778 /* force full log commit if subvolume involved. */ 8779 btrfs_set_log_full_commit(trans); 8780 } else { 8781 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino, 8782 btrfs_ino(BTRFS_I(old_dir)), 8783 new_idx); 8784 if (ret) { 8785 if (need_abort) 8786 btrfs_abort_transaction(trans, ret); 8787 goto out_fail; 8788 } 8789 } 8790 8791 /* Update inode version and ctime/mtime. */ 8792 inode_inc_iversion(old_dir); 8793 inode_inc_iversion(new_dir); 8794 inode_inc_iversion(old_inode); 8795 inode_inc_iversion(new_inode); 8796 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 8797 8798 if (old_dentry->d_parent != new_dentry->d_parent) { 8799 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 8800 BTRFS_I(old_inode), true); 8801 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 8802 BTRFS_I(new_inode), true); 8803 } 8804 8805 /* src is a subvolume */ 8806 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8807 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 8808 } else { /* src is an inode */ 8809 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 8810 BTRFS_I(old_dentry->d_inode), 8811 old_name, &old_rename_ctx); 8812 if (!ret) 8813 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 8814 } 8815 if (ret) { 8816 btrfs_abort_transaction(trans, ret); 8817 goto out_fail; 8818 } 8819 8820 /* dest is a subvolume */ 8821 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 8822 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 8823 } else { /* dest is an inode */ 8824 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir), 8825 BTRFS_I(new_dentry->d_inode), 8826 new_name, &new_rename_ctx); 8827 if (!ret) 8828 ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode)); 8829 } 8830 if (ret) { 8831 btrfs_abort_transaction(trans, ret); 8832 goto out_fail; 8833 } 8834 8835 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 8836 new_name, 0, old_idx); 8837 if (ret) { 8838 btrfs_abort_transaction(trans, ret); 8839 goto out_fail; 8840 } 8841 8842 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 8843 old_name, 0, new_idx); 8844 if (ret) { 8845 btrfs_abort_transaction(trans, ret); 8846 goto out_fail; 8847 } 8848 8849 if (old_inode->i_nlink == 1) 8850 BTRFS_I(old_inode)->dir_index = old_idx; 8851 if (new_inode->i_nlink == 1) 8852 BTRFS_I(new_inode)->dir_index = new_idx; 8853 8854 /* 8855 * Now pin the logs of the roots. We do it to ensure that no other task 8856 * can sync the logs while we are in progress with the rename, because 8857 * that could result in an inconsistency in case any of the inodes that 8858 * are part of this rename operation were logged before. 8859 */ 8860 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8861 btrfs_pin_log_trans(root); 8862 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8863 btrfs_pin_log_trans(dest); 8864 8865 /* Do the log updates for all inodes. */ 8866 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8867 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 8868 old_rename_ctx.index, new_dentry->d_parent); 8869 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8870 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), 8871 new_rename_ctx.index, old_dentry->d_parent); 8872 8873 /* Now unpin the logs. */ 8874 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 8875 btrfs_end_log_trans(root); 8876 if (new_ino != BTRFS_FIRST_FREE_OBJECTID) 8877 btrfs_end_log_trans(dest); 8878 out_fail: 8879 ret2 = btrfs_end_transaction(trans); 8880 ret = ret ? ret : ret2; 8881 out_notrans: 8882 if (new_ino == BTRFS_FIRST_FREE_OBJECTID || 8883 old_ino == BTRFS_FIRST_FREE_OBJECTID) 8884 up_read(&fs_info->subvol_sem); 8885 8886 fscrypt_free_filename(&new_fname); 8887 fscrypt_free_filename(&old_fname); 8888 return ret; 8889 } 8890 8891 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap, 8892 struct inode *dir) 8893 { 8894 struct inode *inode; 8895 8896 inode = new_inode(dir->i_sb); 8897 if (inode) { 8898 inode_init_owner(idmap, inode, dir, 8899 S_IFCHR | WHITEOUT_MODE); 8900 inode->i_op = &btrfs_special_inode_operations; 8901 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 8902 } 8903 return inode; 8904 } 8905 8906 static int btrfs_rename(struct mnt_idmap *idmap, 8907 struct inode *old_dir, struct dentry *old_dentry, 8908 struct inode *new_dir, struct dentry *new_dentry, 8909 unsigned int flags) 8910 { 8911 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 8912 struct btrfs_new_inode_args whiteout_args = { 8913 .dir = old_dir, 8914 .dentry = old_dentry, 8915 }; 8916 struct btrfs_trans_handle *trans; 8917 unsigned int trans_num_items; 8918 struct btrfs_root *root = BTRFS_I(old_dir)->root; 8919 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 8920 struct inode *new_inode = d_inode(new_dentry); 8921 struct inode *old_inode = d_inode(old_dentry); 8922 struct btrfs_rename_ctx rename_ctx; 8923 u64 index = 0; 8924 int ret; 8925 int ret2; 8926 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 8927 struct fscrypt_name old_fname, new_fname; 8928 8929 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 8930 return -EPERM; 8931 8932 /* we only allow rename subvolume link between subvolumes */ 8933 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 8934 return -EXDEV; 8935 8936 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 8937 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 8938 return -ENOTEMPTY; 8939 8940 if (S_ISDIR(old_inode->i_mode) && new_inode && 8941 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 8942 return -ENOTEMPTY; 8943 8944 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); 8945 if (ret) 8946 return ret; 8947 8948 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); 8949 if (ret) { 8950 fscrypt_free_filename(&old_fname); 8951 return ret; 8952 } 8953 8954 /* check for collisions, even if the name isn't there */ 8955 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); 8956 if (ret) { 8957 if (ret == -EEXIST) { 8958 /* we shouldn't get 8959 * eexist without a new_inode */ 8960 if (WARN_ON(!new_inode)) { 8961 goto out_fscrypt_names; 8962 } 8963 } else { 8964 /* maybe -EOVERFLOW */ 8965 goto out_fscrypt_names; 8966 } 8967 } 8968 ret = 0; 8969 8970 /* 8971 * we're using rename to replace one file with another. Start IO on it 8972 * now so we don't add too much work to the end of the transaction 8973 */ 8974 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 8975 filemap_flush(old_inode->i_mapping); 8976 8977 if (flags & RENAME_WHITEOUT) { 8978 whiteout_args.inode = new_whiteout_inode(idmap, old_dir); 8979 if (!whiteout_args.inode) { 8980 ret = -ENOMEM; 8981 goto out_fscrypt_names; 8982 } 8983 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); 8984 if (ret) 8985 goto out_whiteout_inode; 8986 } else { 8987 /* 1 to update the old parent inode. */ 8988 trans_num_items = 1; 8989 } 8990 8991 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 8992 /* Close the race window with snapshot create/destroy ioctl */ 8993 down_read(&fs_info->subvol_sem); 8994 /* 8995 * 1 to remove old root ref 8996 * 1 to remove old root backref 8997 * 1 to add new root ref 8998 * 1 to add new root backref 8999 */ 9000 trans_num_items += 4; 9001 } else { 9002 /* 9003 * 1 to update inode 9004 * 1 to remove old inode ref 9005 * 1 to add new inode ref 9006 */ 9007 trans_num_items += 3; 9008 } 9009 /* 9010 * 1 to remove old dir item 9011 * 1 to remove old dir index 9012 * 1 to add new dir item 9013 * 1 to add new dir index 9014 */ 9015 trans_num_items += 4; 9016 /* 1 to update new parent inode if it's not the same as the old parent */ 9017 if (new_dir != old_dir) 9018 trans_num_items++; 9019 if (new_inode) { 9020 /* 9021 * 1 to update inode 9022 * 1 to remove inode ref 9023 * 1 to remove dir item 9024 * 1 to remove dir index 9025 * 1 to possibly add orphan item 9026 */ 9027 trans_num_items += 5; 9028 } 9029 trans = btrfs_start_transaction(root, trans_num_items); 9030 if (IS_ERR(trans)) { 9031 ret = PTR_ERR(trans); 9032 goto out_notrans; 9033 } 9034 9035 if (dest != root) { 9036 ret = btrfs_record_root_in_trans(trans, dest); 9037 if (ret) 9038 goto out_fail; 9039 } 9040 9041 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 9042 if (ret) 9043 goto out_fail; 9044 9045 BTRFS_I(old_inode)->dir_index = 0ULL; 9046 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9047 /* force full log commit if subvolume involved. */ 9048 btrfs_set_log_full_commit(trans); 9049 } else { 9050 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name, 9051 old_ino, btrfs_ino(BTRFS_I(new_dir)), 9052 index); 9053 if (ret) 9054 goto out_fail; 9055 } 9056 9057 inode_inc_iversion(old_dir); 9058 inode_inc_iversion(new_dir); 9059 inode_inc_iversion(old_inode); 9060 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 9061 9062 if (old_dentry->d_parent != new_dentry->d_parent) 9063 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9064 BTRFS_I(old_inode), true); 9065 9066 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9067 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry); 9068 } else { 9069 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir), 9070 BTRFS_I(d_inode(old_dentry)), 9071 &old_fname.disk_name, &rename_ctx); 9072 if (!ret) 9073 ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode)); 9074 } 9075 if (ret) { 9076 btrfs_abort_transaction(trans, ret); 9077 goto out_fail; 9078 } 9079 9080 if (new_inode) { 9081 inode_inc_iversion(new_inode); 9082 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 9083 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9084 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry); 9085 BUG_ON(new_inode->i_nlink == 0); 9086 } else { 9087 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir), 9088 BTRFS_I(d_inode(new_dentry)), 9089 &new_fname.disk_name); 9090 } 9091 if (!ret && new_inode->i_nlink == 0) 9092 ret = btrfs_orphan_add(trans, 9093 BTRFS_I(d_inode(new_dentry))); 9094 if (ret) { 9095 btrfs_abort_transaction(trans, ret); 9096 goto out_fail; 9097 } 9098 } 9099 9100 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9101 &new_fname.disk_name, 0, index); 9102 if (ret) { 9103 btrfs_abort_transaction(trans, ret); 9104 goto out_fail; 9105 } 9106 9107 if (old_inode->i_nlink == 1) 9108 BTRFS_I(old_inode)->dir_index = index; 9109 9110 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) 9111 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), 9112 rename_ctx.index, new_dentry->d_parent); 9113 9114 if (flags & RENAME_WHITEOUT) { 9115 ret = btrfs_create_new_inode(trans, &whiteout_args); 9116 if (ret) { 9117 btrfs_abort_transaction(trans, ret); 9118 goto out_fail; 9119 } else { 9120 unlock_new_inode(whiteout_args.inode); 9121 iput(whiteout_args.inode); 9122 whiteout_args.inode = NULL; 9123 } 9124 } 9125 out_fail: 9126 ret2 = btrfs_end_transaction(trans); 9127 ret = ret ? ret : ret2; 9128 out_notrans: 9129 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9130 up_read(&fs_info->subvol_sem); 9131 if (flags & RENAME_WHITEOUT) 9132 btrfs_new_inode_args_destroy(&whiteout_args); 9133 out_whiteout_inode: 9134 if (flags & RENAME_WHITEOUT) 9135 iput(whiteout_args.inode); 9136 out_fscrypt_names: 9137 fscrypt_free_filename(&old_fname); 9138 fscrypt_free_filename(&new_fname); 9139 return ret; 9140 } 9141 9142 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir, 9143 struct dentry *old_dentry, struct inode *new_dir, 9144 struct dentry *new_dentry, unsigned int flags) 9145 { 9146 int ret; 9147 9148 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 9149 return -EINVAL; 9150 9151 if (flags & RENAME_EXCHANGE) 9152 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir, 9153 new_dentry); 9154 else 9155 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir, 9156 new_dentry, flags); 9157 9158 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); 9159 9160 return ret; 9161 } 9162 9163 struct btrfs_delalloc_work { 9164 struct inode *inode; 9165 struct completion completion; 9166 struct list_head list; 9167 struct btrfs_work work; 9168 }; 9169 9170 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9171 { 9172 struct btrfs_delalloc_work *delalloc_work; 9173 struct inode *inode; 9174 9175 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9176 work); 9177 inode = delalloc_work->inode; 9178 filemap_flush(inode->i_mapping); 9179 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9180 &BTRFS_I(inode)->runtime_flags)) 9181 filemap_flush(inode->i_mapping); 9182 9183 iput(inode); 9184 complete(&delalloc_work->completion); 9185 } 9186 9187 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 9188 { 9189 struct btrfs_delalloc_work *work; 9190 9191 work = kmalloc(sizeof(*work), GFP_NOFS); 9192 if (!work) 9193 return NULL; 9194 9195 init_completion(&work->completion); 9196 INIT_LIST_HEAD(&work->list); 9197 work->inode = inode; 9198 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); 9199 9200 return work; 9201 } 9202 9203 /* 9204 * some fairly slow code that needs optimization. This walks the list 9205 * of all the inodes with pending delalloc and forces them to disk. 9206 */ 9207 static int start_delalloc_inodes(struct btrfs_root *root, 9208 struct writeback_control *wbc, bool snapshot, 9209 bool in_reclaim_context) 9210 { 9211 struct btrfs_inode *binode; 9212 struct inode *inode; 9213 struct btrfs_delalloc_work *work, *next; 9214 LIST_HEAD(works); 9215 LIST_HEAD(splice); 9216 int ret = 0; 9217 bool full_flush = wbc->nr_to_write == LONG_MAX; 9218 9219 mutex_lock(&root->delalloc_mutex); 9220 spin_lock(&root->delalloc_lock); 9221 list_splice_init(&root->delalloc_inodes, &splice); 9222 while (!list_empty(&splice)) { 9223 binode = list_entry(splice.next, struct btrfs_inode, 9224 delalloc_inodes); 9225 9226 list_move_tail(&binode->delalloc_inodes, 9227 &root->delalloc_inodes); 9228 9229 if (in_reclaim_context && 9230 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 9231 continue; 9232 9233 inode = igrab(&binode->vfs_inode); 9234 if (!inode) { 9235 cond_resched_lock(&root->delalloc_lock); 9236 continue; 9237 } 9238 spin_unlock(&root->delalloc_lock); 9239 9240 if (snapshot) 9241 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 9242 &binode->runtime_flags); 9243 if (full_flush) { 9244 work = btrfs_alloc_delalloc_work(inode); 9245 if (!work) { 9246 iput(inode); 9247 ret = -ENOMEM; 9248 goto out; 9249 } 9250 list_add_tail(&work->list, &works); 9251 btrfs_queue_work(root->fs_info->flush_workers, 9252 &work->work); 9253 } else { 9254 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); 9255 btrfs_add_delayed_iput(BTRFS_I(inode)); 9256 if (ret || wbc->nr_to_write <= 0) 9257 goto out; 9258 } 9259 cond_resched(); 9260 spin_lock(&root->delalloc_lock); 9261 } 9262 spin_unlock(&root->delalloc_lock); 9263 9264 out: 9265 list_for_each_entry_safe(work, next, &works, list) { 9266 list_del_init(&work->list); 9267 wait_for_completion(&work->completion); 9268 kfree(work); 9269 } 9270 9271 if (!list_empty(&splice)) { 9272 spin_lock(&root->delalloc_lock); 9273 list_splice_tail(&splice, &root->delalloc_inodes); 9274 spin_unlock(&root->delalloc_lock); 9275 } 9276 mutex_unlock(&root->delalloc_mutex); 9277 return ret; 9278 } 9279 9280 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context) 9281 { 9282 struct writeback_control wbc = { 9283 .nr_to_write = LONG_MAX, 9284 .sync_mode = WB_SYNC_NONE, 9285 .range_start = 0, 9286 .range_end = LLONG_MAX, 9287 }; 9288 struct btrfs_fs_info *fs_info = root->fs_info; 9289 9290 if (BTRFS_FS_ERROR(fs_info)) 9291 return -EROFS; 9292 9293 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context); 9294 } 9295 9296 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 9297 bool in_reclaim_context) 9298 { 9299 struct writeback_control wbc = { 9300 .nr_to_write = nr, 9301 .sync_mode = WB_SYNC_NONE, 9302 .range_start = 0, 9303 .range_end = LLONG_MAX, 9304 }; 9305 struct btrfs_root *root; 9306 LIST_HEAD(splice); 9307 int ret; 9308 9309 if (BTRFS_FS_ERROR(fs_info)) 9310 return -EROFS; 9311 9312 mutex_lock(&fs_info->delalloc_root_mutex); 9313 spin_lock(&fs_info->delalloc_root_lock); 9314 list_splice_init(&fs_info->delalloc_roots, &splice); 9315 while (!list_empty(&splice)) { 9316 /* 9317 * Reset nr_to_write here so we know that we're doing a full 9318 * flush. 9319 */ 9320 if (nr == LONG_MAX) 9321 wbc.nr_to_write = LONG_MAX; 9322 9323 root = list_first_entry(&splice, struct btrfs_root, 9324 delalloc_root); 9325 root = btrfs_grab_root(root); 9326 BUG_ON(!root); 9327 list_move_tail(&root->delalloc_root, 9328 &fs_info->delalloc_roots); 9329 spin_unlock(&fs_info->delalloc_root_lock); 9330 9331 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context); 9332 btrfs_put_root(root); 9333 if (ret < 0 || wbc.nr_to_write <= 0) 9334 goto out; 9335 spin_lock(&fs_info->delalloc_root_lock); 9336 } 9337 spin_unlock(&fs_info->delalloc_root_lock); 9338 9339 ret = 0; 9340 out: 9341 if (!list_empty(&splice)) { 9342 spin_lock(&fs_info->delalloc_root_lock); 9343 list_splice_tail(&splice, &fs_info->delalloc_roots); 9344 spin_unlock(&fs_info->delalloc_root_lock); 9345 } 9346 mutex_unlock(&fs_info->delalloc_root_mutex); 9347 return ret; 9348 } 9349 9350 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir, 9351 struct dentry *dentry, const char *symname) 9352 { 9353 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9354 struct btrfs_trans_handle *trans; 9355 struct btrfs_root *root = BTRFS_I(dir)->root; 9356 struct btrfs_path *path; 9357 struct btrfs_key key; 9358 struct inode *inode; 9359 struct btrfs_new_inode_args new_inode_args = { 9360 .dir = dir, 9361 .dentry = dentry, 9362 }; 9363 unsigned int trans_num_items; 9364 int err; 9365 int name_len; 9366 int datasize; 9367 unsigned long ptr; 9368 struct btrfs_file_extent_item *ei; 9369 struct extent_buffer *leaf; 9370 9371 name_len = strlen(symname); 9372 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 9373 return -ENAMETOOLONG; 9374 9375 inode = new_inode(dir->i_sb); 9376 if (!inode) 9377 return -ENOMEM; 9378 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO); 9379 inode->i_op = &btrfs_symlink_inode_operations; 9380 inode_nohighmem(inode); 9381 inode->i_mapping->a_ops = &btrfs_aops; 9382 btrfs_i_size_write(BTRFS_I(inode), name_len); 9383 inode_set_bytes(inode, name_len); 9384 9385 new_inode_args.inode = inode; 9386 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9387 if (err) 9388 goto out_inode; 9389 /* 1 additional item for the inline extent */ 9390 trans_num_items++; 9391 9392 trans = btrfs_start_transaction(root, trans_num_items); 9393 if (IS_ERR(trans)) { 9394 err = PTR_ERR(trans); 9395 goto out_new_inode_args; 9396 } 9397 9398 err = btrfs_create_new_inode(trans, &new_inode_args); 9399 if (err) 9400 goto out; 9401 9402 path = btrfs_alloc_path(); 9403 if (!path) { 9404 err = -ENOMEM; 9405 btrfs_abort_transaction(trans, err); 9406 discard_new_inode(inode); 9407 inode = NULL; 9408 goto out; 9409 } 9410 key.objectid = btrfs_ino(BTRFS_I(inode)); 9411 key.offset = 0; 9412 key.type = BTRFS_EXTENT_DATA_KEY; 9413 datasize = btrfs_file_extent_calc_inline_size(name_len); 9414 err = btrfs_insert_empty_item(trans, root, path, &key, 9415 datasize); 9416 if (err) { 9417 btrfs_abort_transaction(trans, err); 9418 btrfs_free_path(path); 9419 discard_new_inode(inode); 9420 inode = NULL; 9421 goto out; 9422 } 9423 leaf = path->nodes[0]; 9424 ei = btrfs_item_ptr(leaf, path->slots[0], 9425 struct btrfs_file_extent_item); 9426 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9427 btrfs_set_file_extent_type(leaf, ei, 9428 BTRFS_FILE_EXTENT_INLINE); 9429 btrfs_set_file_extent_encryption(leaf, ei, 0); 9430 btrfs_set_file_extent_compression(leaf, ei, 0); 9431 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9432 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9433 9434 ptr = btrfs_file_extent_inline_start(ei); 9435 write_extent_buffer(leaf, symname, ptr, name_len); 9436 btrfs_mark_buffer_dirty(leaf); 9437 btrfs_free_path(path); 9438 9439 d_instantiate_new(dentry, inode); 9440 err = 0; 9441 out: 9442 btrfs_end_transaction(trans); 9443 btrfs_btree_balance_dirty(fs_info); 9444 out_new_inode_args: 9445 btrfs_new_inode_args_destroy(&new_inode_args); 9446 out_inode: 9447 if (err) 9448 iput(inode); 9449 return err; 9450 } 9451 9452 static struct btrfs_trans_handle *insert_prealloc_file_extent( 9453 struct btrfs_trans_handle *trans_in, 9454 struct btrfs_inode *inode, 9455 struct btrfs_key *ins, 9456 u64 file_offset) 9457 { 9458 struct btrfs_file_extent_item stack_fi; 9459 struct btrfs_replace_extent_info extent_info; 9460 struct btrfs_trans_handle *trans = trans_in; 9461 struct btrfs_path *path; 9462 u64 start = ins->objectid; 9463 u64 len = ins->offset; 9464 int qgroup_released; 9465 int ret; 9466 9467 memset(&stack_fi, 0, sizeof(stack_fi)); 9468 9469 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC); 9470 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start); 9471 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len); 9472 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len); 9473 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len); 9474 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE); 9475 /* Encryption and other encoding is reserved and all 0 */ 9476 9477 qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len); 9478 if (qgroup_released < 0) 9479 return ERR_PTR(qgroup_released); 9480 9481 if (trans) { 9482 ret = insert_reserved_file_extent(trans, inode, 9483 file_offset, &stack_fi, 9484 true, qgroup_released); 9485 if (ret) 9486 goto free_qgroup; 9487 return trans; 9488 } 9489 9490 extent_info.disk_offset = start; 9491 extent_info.disk_len = len; 9492 extent_info.data_offset = 0; 9493 extent_info.data_len = len; 9494 extent_info.file_offset = file_offset; 9495 extent_info.extent_buf = (char *)&stack_fi; 9496 extent_info.is_new_extent = true; 9497 extent_info.update_times = true; 9498 extent_info.qgroup_reserved = qgroup_released; 9499 extent_info.insertions = 0; 9500 9501 path = btrfs_alloc_path(); 9502 if (!path) { 9503 ret = -ENOMEM; 9504 goto free_qgroup; 9505 } 9506 9507 ret = btrfs_replace_file_extents(inode, path, file_offset, 9508 file_offset + len - 1, &extent_info, 9509 &trans); 9510 btrfs_free_path(path); 9511 if (ret) 9512 goto free_qgroup; 9513 return trans; 9514 9515 free_qgroup: 9516 /* 9517 * We have released qgroup data range at the beginning of the function, 9518 * and normally qgroup_released bytes will be freed when committing 9519 * transaction. 9520 * But if we error out early, we have to free what we have released 9521 * or we leak qgroup data reservation. 9522 */ 9523 btrfs_qgroup_free_refroot(inode->root->fs_info, 9524 inode->root->root_key.objectid, qgroup_released, 9525 BTRFS_QGROUP_RSV_DATA); 9526 return ERR_PTR(ret); 9527 } 9528 9529 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9530 u64 start, u64 num_bytes, u64 min_size, 9531 loff_t actual_len, u64 *alloc_hint, 9532 struct btrfs_trans_handle *trans) 9533 { 9534 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 9535 struct extent_map *em; 9536 struct btrfs_root *root = BTRFS_I(inode)->root; 9537 struct btrfs_key ins; 9538 u64 cur_offset = start; 9539 u64 clear_offset = start; 9540 u64 i_size; 9541 u64 cur_bytes; 9542 u64 last_alloc = (u64)-1; 9543 int ret = 0; 9544 bool own_trans = true; 9545 u64 end = start + num_bytes - 1; 9546 9547 if (trans) 9548 own_trans = false; 9549 while (num_bytes > 0) { 9550 cur_bytes = min_t(u64, num_bytes, SZ_256M); 9551 cur_bytes = max(cur_bytes, min_size); 9552 /* 9553 * If we are severely fragmented we could end up with really 9554 * small allocations, so if the allocator is returning small 9555 * chunks lets make its job easier by only searching for those 9556 * sized chunks. 9557 */ 9558 cur_bytes = min(cur_bytes, last_alloc); 9559 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 9560 min_size, 0, *alloc_hint, &ins, 1, 0); 9561 if (ret) 9562 break; 9563 9564 /* 9565 * We've reserved this space, and thus converted it from 9566 * ->bytes_may_use to ->bytes_reserved. Any error that happens 9567 * from here on out we will only need to clear our reservation 9568 * for the remaining unreserved area, so advance our 9569 * clear_offset by our extent size. 9570 */ 9571 clear_offset += ins.offset; 9572 9573 last_alloc = ins.offset; 9574 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode), 9575 &ins, cur_offset); 9576 /* 9577 * Now that we inserted the prealloc extent we can finally 9578 * decrement the number of reservations in the block group. 9579 * If we did it before, we could race with relocation and have 9580 * relocation miss the reserved extent, making it fail later. 9581 */ 9582 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 9583 if (IS_ERR(trans)) { 9584 ret = PTR_ERR(trans); 9585 btrfs_free_reserved_extent(fs_info, ins.objectid, 9586 ins.offset, 0); 9587 break; 9588 } 9589 9590 em = alloc_extent_map(); 9591 if (!em) { 9592 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, 9593 cur_offset + ins.offset - 1, false); 9594 btrfs_set_inode_full_sync(BTRFS_I(inode)); 9595 goto next; 9596 } 9597 9598 em->start = cur_offset; 9599 em->orig_start = cur_offset; 9600 em->len = ins.offset; 9601 em->block_start = ins.objectid; 9602 em->block_len = ins.offset; 9603 em->orig_block_len = ins.offset; 9604 em->ram_bytes = ins.offset; 9605 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 9606 em->generation = trans->transid; 9607 9608 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); 9609 free_extent_map(em); 9610 next: 9611 num_bytes -= ins.offset; 9612 cur_offset += ins.offset; 9613 *alloc_hint = ins.objectid + ins.offset; 9614 9615 inode_inc_iversion(inode); 9616 inode_set_ctime_current(inode); 9617 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9618 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9619 (actual_len > inode->i_size) && 9620 (cur_offset > inode->i_size)) { 9621 if (cur_offset > actual_len) 9622 i_size = actual_len; 9623 else 9624 i_size = cur_offset; 9625 i_size_write(inode, i_size); 9626 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 9627 } 9628 9629 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 9630 9631 if (ret) { 9632 btrfs_abort_transaction(trans, ret); 9633 if (own_trans) 9634 btrfs_end_transaction(trans); 9635 break; 9636 } 9637 9638 if (own_trans) { 9639 btrfs_end_transaction(trans); 9640 trans = NULL; 9641 } 9642 } 9643 if (clear_offset < end) 9644 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset, 9645 end - clear_offset + 1); 9646 return ret; 9647 } 9648 9649 int btrfs_prealloc_file_range(struct inode *inode, int mode, 9650 u64 start, u64 num_bytes, u64 min_size, 9651 loff_t actual_len, u64 *alloc_hint) 9652 { 9653 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9654 min_size, actual_len, alloc_hint, 9655 NULL); 9656 } 9657 9658 int btrfs_prealloc_file_range_trans(struct inode *inode, 9659 struct btrfs_trans_handle *trans, int mode, 9660 u64 start, u64 num_bytes, u64 min_size, 9661 loff_t actual_len, u64 *alloc_hint) 9662 { 9663 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9664 min_size, actual_len, alloc_hint, trans); 9665 } 9666 9667 static int btrfs_permission(struct mnt_idmap *idmap, 9668 struct inode *inode, int mask) 9669 { 9670 struct btrfs_root *root = BTRFS_I(inode)->root; 9671 umode_t mode = inode->i_mode; 9672 9673 if (mask & MAY_WRITE && 9674 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9675 if (btrfs_root_readonly(root)) 9676 return -EROFS; 9677 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9678 return -EACCES; 9679 } 9680 return generic_permission(idmap, inode, mask); 9681 } 9682 9683 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 9684 struct file *file, umode_t mode) 9685 { 9686 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 9687 struct btrfs_trans_handle *trans; 9688 struct btrfs_root *root = BTRFS_I(dir)->root; 9689 struct inode *inode; 9690 struct btrfs_new_inode_args new_inode_args = { 9691 .dir = dir, 9692 .dentry = file->f_path.dentry, 9693 .orphan = true, 9694 }; 9695 unsigned int trans_num_items; 9696 int ret; 9697 9698 inode = new_inode(dir->i_sb); 9699 if (!inode) 9700 return -ENOMEM; 9701 inode_init_owner(idmap, inode, dir, mode); 9702 inode->i_fop = &btrfs_file_operations; 9703 inode->i_op = &btrfs_file_inode_operations; 9704 inode->i_mapping->a_ops = &btrfs_aops; 9705 9706 new_inode_args.inode = inode; 9707 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); 9708 if (ret) 9709 goto out_inode; 9710 9711 trans = btrfs_start_transaction(root, trans_num_items); 9712 if (IS_ERR(trans)) { 9713 ret = PTR_ERR(trans); 9714 goto out_new_inode_args; 9715 } 9716 9717 ret = btrfs_create_new_inode(trans, &new_inode_args); 9718 9719 /* 9720 * We set number of links to 0 in btrfs_create_new_inode(), and here we 9721 * set it to 1 because d_tmpfile() will issue a warning if the count is 9722 * 0, through: 9723 * 9724 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9725 */ 9726 set_nlink(inode, 1); 9727 9728 if (!ret) { 9729 d_tmpfile(file, inode); 9730 unlock_new_inode(inode); 9731 mark_inode_dirty(inode); 9732 } 9733 9734 btrfs_end_transaction(trans); 9735 btrfs_btree_balance_dirty(fs_info); 9736 out_new_inode_args: 9737 btrfs_new_inode_args_destroy(&new_inode_args); 9738 out_inode: 9739 if (ret) 9740 iput(inode); 9741 return finish_open_simple(file, ret); 9742 } 9743 9744 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end) 9745 { 9746 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9747 unsigned long index = start >> PAGE_SHIFT; 9748 unsigned long end_index = end >> PAGE_SHIFT; 9749 struct page *page; 9750 u32 len; 9751 9752 ASSERT(end + 1 - start <= U32_MAX); 9753 len = end + 1 - start; 9754 while (index <= end_index) { 9755 page = find_get_page(inode->vfs_inode.i_mapping, index); 9756 ASSERT(page); /* Pages should be in the extent_io_tree */ 9757 9758 btrfs_page_set_writeback(fs_info, page, start, len); 9759 put_page(page); 9760 index++; 9761 } 9762 } 9763 9764 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 9765 int compress_type) 9766 { 9767 switch (compress_type) { 9768 case BTRFS_COMPRESS_NONE: 9769 return BTRFS_ENCODED_IO_COMPRESSION_NONE; 9770 case BTRFS_COMPRESS_ZLIB: 9771 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB; 9772 case BTRFS_COMPRESS_LZO: 9773 /* 9774 * The LZO format depends on the sector size. 64K is the maximum 9775 * sector size that we support. 9776 */ 9777 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) 9778 return -EINVAL; 9779 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 9780 (fs_info->sectorsize_bits - 12); 9781 case BTRFS_COMPRESS_ZSTD: 9782 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD; 9783 default: 9784 return -EUCLEAN; 9785 } 9786 } 9787 9788 static ssize_t btrfs_encoded_read_inline( 9789 struct kiocb *iocb, 9790 struct iov_iter *iter, u64 start, 9791 u64 lockend, 9792 struct extent_state **cached_state, 9793 u64 extent_start, size_t count, 9794 struct btrfs_ioctl_encoded_io_args *encoded, 9795 bool *unlocked) 9796 { 9797 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9798 struct btrfs_root *root = inode->root; 9799 struct btrfs_fs_info *fs_info = root->fs_info; 9800 struct extent_io_tree *io_tree = &inode->io_tree; 9801 struct btrfs_path *path; 9802 struct extent_buffer *leaf; 9803 struct btrfs_file_extent_item *item; 9804 u64 ram_bytes; 9805 unsigned long ptr; 9806 void *tmp; 9807 ssize_t ret; 9808 9809 path = btrfs_alloc_path(); 9810 if (!path) { 9811 ret = -ENOMEM; 9812 goto out; 9813 } 9814 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 9815 extent_start, 0); 9816 if (ret) { 9817 if (ret > 0) { 9818 /* The extent item disappeared? */ 9819 ret = -EIO; 9820 } 9821 goto out; 9822 } 9823 leaf = path->nodes[0]; 9824 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); 9825 9826 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); 9827 ptr = btrfs_file_extent_inline_start(item); 9828 9829 encoded->len = min_t(u64, extent_start + ram_bytes, 9830 inode->vfs_inode.i_size) - iocb->ki_pos; 9831 ret = btrfs_encoded_io_compression_from_extent(fs_info, 9832 btrfs_file_extent_compression(leaf, item)); 9833 if (ret < 0) 9834 goto out; 9835 encoded->compression = ret; 9836 if (encoded->compression) { 9837 size_t inline_size; 9838 9839 inline_size = btrfs_file_extent_inline_item_len(leaf, 9840 path->slots[0]); 9841 if (inline_size > count) { 9842 ret = -ENOBUFS; 9843 goto out; 9844 } 9845 count = inline_size; 9846 encoded->unencoded_len = ram_bytes; 9847 encoded->unencoded_offset = iocb->ki_pos - extent_start; 9848 } else { 9849 count = min_t(u64, count, encoded->len); 9850 encoded->len = count; 9851 encoded->unencoded_len = count; 9852 ptr += iocb->ki_pos - extent_start; 9853 } 9854 9855 tmp = kmalloc(count, GFP_NOFS); 9856 if (!tmp) { 9857 ret = -ENOMEM; 9858 goto out; 9859 } 9860 read_extent_buffer(leaf, tmp, ptr, count); 9861 btrfs_release_path(path); 9862 unlock_extent(io_tree, start, lockend, cached_state); 9863 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9864 *unlocked = true; 9865 9866 ret = copy_to_iter(tmp, count, iter); 9867 if (ret != count) 9868 ret = -EFAULT; 9869 kfree(tmp); 9870 out: 9871 btrfs_free_path(path); 9872 return ret; 9873 } 9874 9875 struct btrfs_encoded_read_private { 9876 wait_queue_head_t wait; 9877 atomic_t pending; 9878 blk_status_t status; 9879 }; 9880 9881 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) 9882 { 9883 struct btrfs_encoded_read_private *priv = bbio->private; 9884 9885 if (bbio->bio.bi_status) { 9886 /* 9887 * The memory barrier implied by the atomic_dec_return() here 9888 * pairs with the memory barrier implied by the 9889 * atomic_dec_return() or io_wait_event() in 9890 * btrfs_encoded_read_regular_fill_pages() to ensure that this 9891 * write is observed before the load of status in 9892 * btrfs_encoded_read_regular_fill_pages(). 9893 */ 9894 WRITE_ONCE(priv->status, bbio->bio.bi_status); 9895 } 9896 if (!atomic_dec_return(&priv->pending)) 9897 wake_up(&priv->wait); 9898 bio_put(&bbio->bio); 9899 } 9900 9901 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 9902 u64 file_offset, u64 disk_bytenr, 9903 u64 disk_io_size, struct page **pages) 9904 { 9905 struct btrfs_fs_info *fs_info = inode->root->fs_info; 9906 struct btrfs_encoded_read_private priv = { 9907 .pending = ATOMIC_INIT(1), 9908 }; 9909 unsigned long i = 0; 9910 struct btrfs_bio *bbio; 9911 9912 init_waitqueue_head(&priv.wait); 9913 9914 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9915 btrfs_encoded_read_endio, &priv); 9916 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9917 bbio->inode = inode; 9918 9919 do { 9920 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); 9921 9922 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { 9923 atomic_inc(&priv.pending); 9924 btrfs_submit_bio(bbio, 0); 9925 9926 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, 9927 btrfs_encoded_read_endio, &priv); 9928 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; 9929 bbio->inode = inode; 9930 continue; 9931 } 9932 9933 i++; 9934 disk_bytenr += bytes; 9935 disk_io_size -= bytes; 9936 } while (disk_io_size); 9937 9938 atomic_inc(&priv.pending); 9939 btrfs_submit_bio(bbio, 0); 9940 9941 if (atomic_dec_return(&priv.pending)) 9942 io_wait_event(priv.wait, !atomic_read(&priv.pending)); 9943 /* See btrfs_encoded_read_endio() for ordering. */ 9944 return blk_status_to_errno(READ_ONCE(priv.status)); 9945 } 9946 9947 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, 9948 struct iov_iter *iter, 9949 u64 start, u64 lockend, 9950 struct extent_state **cached_state, 9951 u64 disk_bytenr, u64 disk_io_size, 9952 size_t count, bool compressed, 9953 bool *unlocked) 9954 { 9955 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 9956 struct extent_io_tree *io_tree = &inode->io_tree; 9957 struct page **pages; 9958 unsigned long nr_pages, i; 9959 u64 cur; 9960 size_t page_offset; 9961 ssize_t ret; 9962 9963 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); 9964 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 9965 if (!pages) 9966 return -ENOMEM; 9967 ret = btrfs_alloc_page_array(nr_pages, pages); 9968 if (ret) { 9969 ret = -ENOMEM; 9970 goto out; 9971 } 9972 9973 ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr, 9974 disk_io_size, pages); 9975 if (ret) 9976 goto out; 9977 9978 unlock_extent(io_tree, start, lockend, cached_state); 9979 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 9980 *unlocked = true; 9981 9982 if (compressed) { 9983 i = 0; 9984 page_offset = 0; 9985 } else { 9986 i = (iocb->ki_pos - start) >> PAGE_SHIFT; 9987 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); 9988 } 9989 cur = 0; 9990 while (cur < count) { 9991 size_t bytes = min_t(size_t, count - cur, 9992 PAGE_SIZE - page_offset); 9993 9994 if (copy_page_to_iter(pages[i], page_offset, bytes, 9995 iter) != bytes) { 9996 ret = -EFAULT; 9997 goto out; 9998 } 9999 i++; 10000 cur += bytes; 10001 page_offset = 0; 10002 } 10003 ret = count; 10004 out: 10005 for (i = 0; i < nr_pages; i++) { 10006 if (pages[i]) 10007 __free_page(pages[i]); 10008 } 10009 kfree(pages); 10010 return ret; 10011 } 10012 10013 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 10014 struct btrfs_ioctl_encoded_io_args *encoded) 10015 { 10016 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10017 struct btrfs_fs_info *fs_info = inode->root->fs_info; 10018 struct extent_io_tree *io_tree = &inode->io_tree; 10019 ssize_t ret; 10020 size_t count = iov_iter_count(iter); 10021 u64 start, lockend, disk_bytenr, disk_io_size; 10022 struct extent_state *cached_state = NULL; 10023 struct extent_map *em; 10024 bool unlocked = false; 10025 10026 file_accessed(iocb->ki_filp); 10027 10028 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED); 10029 10030 if (iocb->ki_pos >= inode->vfs_inode.i_size) { 10031 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10032 return 0; 10033 } 10034 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); 10035 /* 10036 * We don't know how long the extent containing iocb->ki_pos is, but if 10037 * it's compressed we know that it won't be longer than this. 10038 */ 10039 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; 10040 10041 for (;;) { 10042 struct btrfs_ordered_extent *ordered; 10043 10044 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, 10045 lockend - start + 1); 10046 if (ret) 10047 goto out_unlock_inode; 10048 lock_extent(io_tree, start, lockend, &cached_state); 10049 ordered = btrfs_lookup_ordered_range(inode, start, 10050 lockend - start + 1); 10051 if (!ordered) 10052 break; 10053 btrfs_put_ordered_extent(ordered); 10054 unlock_extent(io_tree, start, lockend, &cached_state); 10055 cond_resched(); 10056 } 10057 10058 em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1); 10059 if (IS_ERR(em)) { 10060 ret = PTR_ERR(em); 10061 goto out_unlock_extent; 10062 } 10063 10064 if (em->block_start == EXTENT_MAP_INLINE) { 10065 u64 extent_start = em->start; 10066 10067 /* 10068 * For inline extents we get everything we need out of the 10069 * extent item. 10070 */ 10071 free_extent_map(em); 10072 em = NULL; 10073 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, 10074 &cached_state, extent_start, 10075 count, encoded, &unlocked); 10076 goto out; 10077 } 10078 10079 /* 10080 * We only want to return up to EOF even if the extent extends beyond 10081 * that. 10082 */ 10083 encoded->len = min_t(u64, extent_map_end(em), 10084 inode->vfs_inode.i_size) - iocb->ki_pos; 10085 if (em->block_start == EXTENT_MAP_HOLE || 10086 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 10087 disk_bytenr = EXTENT_MAP_HOLE; 10088 count = min_t(u64, count, encoded->len); 10089 encoded->len = count; 10090 encoded->unencoded_len = count; 10091 } else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10092 disk_bytenr = em->block_start; 10093 /* 10094 * Bail if the buffer isn't large enough to return the whole 10095 * compressed extent. 10096 */ 10097 if (em->block_len > count) { 10098 ret = -ENOBUFS; 10099 goto out_em; 10100 } 10101 disk_io_size = em->block_len; 10102 count = em->block_len; 10103 encoded->unencoded_len = em->ram_bytes; 10104 encoded->unencoded_offset = iocb->ki_pos - em->orig_start; 10105 ret = btrfs_encoded_io_compression_from_extent(fs_info, 10106 em->compress_type); 10107 if (ret < 0) 10108 goto out_em; 10109 encoded->compression = ret; 10110 } else { 10111 disk_bytenr = em->block_start + (start - em->start); 10112 if (encoded->len > count) 10113 encoded->len = count; 10114 /* 10115 * Don't read beyond what we locked. This also limits the page 10116 * allocations that we'll do. 10117 */ 10118 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; 10119 count = start + disk_io_size - iocb->ki_pos; 10120 encoded->len = count; 10121 encoded->unencoded_len = count; 10122 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize); 10123 } 10124 free_extent_map(em); 10125 em = NULL; 10126 10127 if (disk_bytenr == EXTENT_MAP_HOLE) { 10128 unlock_extent(io_tree, start, lockend, &cached_state); 10129 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10130 unlocked = true; 10131 ret = iov_iter_zero(count, iter); 10132 if (ret != count) 10133 ret = -EFAULT; 10134 } else { 10135 ret = btrfs_encoded_read_regular(iocb, iter, start, lockend, 10136 &cached_state, disk_bytenr, 10137 disk_io_size, count, 10138 encoded->compression, 10139 &unlocked); 10140 } 10141 10142 out: 10143 if (ret >= 0) 10144 iocb->ki_pos += encoded->len; 10145 out_em: 10146 free_extent_map(em); 10147 out_unlock_extent: 10148 if (!unlocked) 10149 unlock_extent(io_tree, start, lockend, &cached_state); 10150 out_unlock_inode: 10151 if (!unlocked) 10152 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 10153 return ret; 10154 } 10155 10156 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 10157 const struct btrfs_ioctl_encoded_io_args *encoded) 10158 { 10159 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); 10160 struct btrfs_root *root = inode->root; 10161 struct btrfs_fs_info *fs_info = root->fs_info; 10162 struct extent_io_tree *io_tree = &inode->io_tree; 10163 struct extent_changeset *data_reserved = NULL; 10164 struct extent_state *cached_state = NULL; 10165 struct btrfs_ordered_extent *ordered; 10166 int compression; 10167 size_t orig_count; 10168 u64 start, end; 10169 u64 num_bytes, ram_bytes, disk_num_bytes; 10170 unsigned long nr_pages, i; 10171 struct page **pages; 10172 struct btrfs_key ins; 10173 bool extent_reserved = false; 10174 struct extent_map *em; 10175 ssize_t ret; 10176 10177 switch (encoded->compression) { 10178 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB: 10179 compression = BTRFS_COMPRESS_ZLIB; 10180 break; 10181 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD: 10182 compression = BTRFS_COMPRESS_ZSTD; 10183 break; 10184 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K: 10185 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K: 10186 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K: 10187 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K: 10188 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K: 10189 /* The sector size must match for LZO. */ 10190 if (encoded->compression - 10191 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 != 10192 fs_info->sectorsize_bits) 10193 return -EINVAL; 10194 compression = BTRFS_COMPRESS_LZO; 10195 break; 10196 default: 10197 return -EINVAL; 10198 } 10199 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) 10200 return -EINVAL; 10201 10202 orig_count = iov_iter_count(from); 10203 10204 /* The extent size must be sane. */ 10205 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || 10206 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0) 10207 return -EINVAL; 10208 10209 /* 10210 * The compressed data must be smaller than the decompressed data. 10211 * 10212 * It's of course possible for data to compress to larger or the same 10213 * size, but the buffered I/O path falls back to no compression for such 10214 * data, and we don't want to break any assumptions by creating these 10215 * extents. 10216 * 10217 * Note that this is less strict than the current check we have that the 10218 * compressed data must be at least one sector smaller than the 10219 * decompressed data. We only want to enforce the weaker requirement 10220 * from old kernels that it is at least one byte smaller. 10221 */ 10222 if (orig_count >= encoded->unencoded_len) 10223 return -EINVAL; 10224 10225 /* The extent must start on a sector boundary. */ 10226 start = iocb->ki_pos; 10227 if (!IS_ALIGNED(start, fs_info->sectorsize)) 10228 return -EINVAL; 10229 10230 /* 10231 * The extent must end on a sector boundary. However, we allow a write 10232 * which ends at or extends i_size to have an unaligned length; we round 10233 * up the extent size and set i_size to the unaligned end. 10234 */ 10235 if (start + encoded->len < inode->vfs_inode.i_size && 10236 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) 10237 return -EINVAL; 10238 10239 /* Finally, the offset in the unencoded data must be sector-aligned. */ 10240 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) 10241 return -EINVAL; 10242 10243 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); 10244 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); 10245 end = start + num_bytes - 1; 10246 10247 /* 10248 * If the extent cannot be inline, the compressed data on disk must be 10249 * sector-aligned. For convenience, we extend it with zeroes if it 10250 * isn't. 10251 */ 10252 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); 10253 nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE); 10254 pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT); 10255 if (!pages) 10256 return -ENOMEM; 10257 for (i = 0; i < nr_pages; i++) { 10258 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from)); 10259 char *kaddr; 10260 10261 pages[i] = alloc_page(GFP_KERNEL_ACCOUNT); 10262 if (!pages[i]) { 10263 ret = -ENOMEM; 10264 goto out_pages; 10265 } 10266 kaddr = kmap_local_page(pages[i]); 10267 if (copy_from_iter(kaddr, bytes, from) != bytes) { 10268 kunmap_local(kaddr); 10269 ret = -EFAULT; 10270 goto out_pages; 10271 } 10272 if (bytes < PAGE_SIZE) 10273 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); 10274 kunmap_local(kaddr); 10275 } 10276 10277 for (;;) { 10278 struct btrfs_ordered_extent *ordered; 10279 10280 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes); 10281 if (ret) 10282 goto out_pages; 10283 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, 10284 start >> PAGE_SHIFT, 10285 end >> PAGE_SHIFT); 10286 if (ret) 10287 goto out_pages; 10288 lock_extent(io_tree, start, end, &cached_state); 10289 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); 10290 if (!ordered && 10291 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) 10292 break; 10293 if (ordered) 10294 btrfs_put_ordered_extent(ordered); 10295 unlock_extent(io_tree, start, end, &cached_state); 10296 cond_resched(); 10297 } 10298 10299 /* 10300 * We don't use the higher-level delalloc space functions because our 10301 * num_bytes and disk_num_bytes are different. 10302 */ 10303 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes); 10304 if (ret) 10305 goto out_unlock; 10306 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); 10307 if (ret) 10308 goto out_free_data_space; 10309 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, 10310 false); 10311 if (ret) 10312 goto out_qgroup_free_data; 10313 10314 /* Try an inline extent first. */ 10315 if (start == 0 && encoded->unencoded_len == encoded->len && 10316 encoded->unencoded_offset == 0) { 10317 ret = cow_file_range_inline(inode, encoded->len, orig_count, 10318 compression, pages, true); 10319 if (ret <= 0) { 10320 if (ret == 0) 10321 ret = orig_count; 10322 goto out_delalloc_release; 10323 } 10324 } 10325 10326 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes, 10327 disk_num_bytes, 0, 0, &ins, 1, 1); 10328 if (ret) 10329 goto out_delalloc_release; 10330 extent_reserved = true; 10331 10332 em = create_io_em(inode, start, num_bytes, 10333 start - encoded->unencoded_offset, ins.objectid, 10334 ins.offset, ins.offset, ram_bytes, compression, 10335 BTRFS_ORDERED_COMPRESSED); 10336 if (IS_ERR(em)) { 10337 ret = PTR_ERR(em); 10338 goto out_free_reserved; 10339 } 10340 free_extent_map(em); 10341 10342 ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes, 10343 ins.objectid, ins.offset, 10344 encoded->unencoded_offset, 10345 (1 << BTRFS_ORDERED_ENCODED) | 10346 (1 << BTRFS_ORDERED_COMPRESSED), 10347 compression); 10348 if (IS_ERR(ordered)) { 10349 btrfs_drop_extent_map_range(inode, start, end, false); 10350 ret = PTR_ERR(ordered); 10351 goto out_free_reserved; 10352 } 10353 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10354 10355 if (start + encoded->len > inode->vfs_inode.i_size) 10356 i_size_write(&inode->vfs_inode, start + encoded->len); 10357 10358 unlock_extent(io_tree, start, end, &cached_state); 10359 10360 btrfs_delalloc_release_extents(inode, num_bytes); 10361 10362 btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false); 10363 ret = orig_count; 10364 goto out; 10365 10366 out_free_reserved: 10367 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10368 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 10369 out_delalloc_release: 10370 btrfs_delalloc_release_extents(inode, num_bytes); 10371 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); 10372 out_qgroup_free_data: 10373 if (ret < 0) 10374 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes); 10375 out_free_data_space: 10376 /* 10377 * If btrfs_reserve_extent() succeeded, then we already decremented 10378 * bytes_may_use. 10379 */ 10380 if (!extent_reserved) 10381 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); 10382 out_unlock: 10383 unlock_extent(io_tree, start, end, &cached_state); 10384 out_pages: 10385 for (i = 0; i < nr_pages; i++) { 10386 if (pages[i]) 10387 __free_page(pages[i]); 10388 } 10389 kvfree(pages); 10390 out: 10391 if (ret >= 0) 10392 iocb->ki_pos += encoded->len; 10393 return ret; 10394 } 10395 10396 #ifdef CONFIG_SWAP 10397 /* 10398 * Add an entry indicating a block group or device which is pinned by a 10399 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 10400 * negative errno on failure. 10401 */ 10402 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 10403 bool is_block_group) 10404 { 10405 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10406 struct btrfs_swapfile_pin *sp, *entry; 10407 struct rb_node **p; 10408 struct rb_node *parent = NULL; 10409 10410 sp = kmalloc(sizeof(*sp), GFP_NOFS); 10411 if (!sp) 10412 return -ENOMEM; 10413 sp->ptr = ptr; 10414 sp->inode = inode; 10415 sp->is_block_group = is_block_group; 10416 sp->bg_extent_count = 1; 10417 10418 spin_lock(&fs_info->swapfile_pins_lock); 10419 p = &fs_info->swapfile_pins.rb_node; 10420 while (*p) { 10421 parent = *p; 10422 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 10423 if (sp->ptr < entry->ptr || 10424 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 10425 p = &(*p)->rb_left; 10426 } else if (sp->ptr > entry->ptr || 10427 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 10428 p = &(*p)->rb_right; 10429 } else { 10430 if (is_block_group) 10431 entry->bg_extent_count++; 10432 spin_unlock(&fs_info->swapfile_pins_lock); 10433 kfree(sp); 10434 return 1; 10435 } 10436 } 10437 rb_link_node(&sp->node, parent, p); 10438 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 10439 spin_unlock(&fs_info->swapfile_pins_lock); 10440 return 0; 10441 } 10442 10443 /* Free all of the entries pinned by this swapfile. */ 10444 static void btrfs_free_swapfile_pins(struct inode *inode) 10445 { 10446 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10447 struct btrfs_swapfile_pin *sp; 10448 struct rb_node *node, *next; 10449 10450 spin_lock(&fs_info->swapfile_pins_lock); 10451 node = rb_first(&fs_info->swapfile_pins); 10452 while (node) { 10453 next = rb_next(node); 10454 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 10455 if (sp->inode == inode) { 10456 rb_erase(&sp->node, &fs_info->swapfile_pins); 10457 if (sp->is_block_group) { 10458 btrfs_dec_block_group_swap_extents(sp->ptr, 10459 sp->bg_extent_count); 10460 btrfs_put_block_group(sp->ptr); 10461 } 10462 kfree(sp); 10463 } 10464 node = next; 10465 } 10466 spin_unlock(&fs_info->swapfile_pins_lock); 10467 } 10468 10469 struct btrfs_swap_info { 10470 u64 start; 10471 u64 block_start; 10472 u64 block_len; 10473 u64 lowest_ppage; 10474 u64 highest_ppage; 10475 unsigned long nr_pages; 10476 int nr_extents; 10477 }; 10478 10479 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 10480 struct btrfs_swap_info *bsi) 10481 { 10482 unsigned long nr_pages; 10483 unsigned long max_pages; 10484 u64 first_ppage, first_ppage_reported, next_ppage; 10485 int ret; 10486 10487 /* 10488 * Our swapfile may have had its size extended after the swap header was 10489 * written. In that case activating the swapfile should not go beyond 10490 * the max size set in the swap header. 10491 */ 10492 if (bsi->nr_pages >= sis->max) 10493 return 0; 10494 10495 max_pages = sis->max - bsi->nr_pages; 10496 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; 10497 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; 10498 10499 if (first_ppage >= next_ppage) 10500 return 0; 10501 nr_pages = next_ppage - first_ppage; 10502 nr_pages = min(nr_pages, max_pages); 10503 10504 first_ppage_reported = first_ppage; 10505 if (bsi->start == 0) 10506 first_ppage_reported++; 10507 if (bsi->lowest_ppage > first_ppage_reported) 10508 bsi->lowest_ppage = first_ppage_reported; 10509 if (bsi->highest_ppage < (next_ppage - 1)) 10510 bsi->highest_ppage = next_ppage - 1; 10511 10512 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 10513 if (ret < 0) 10514 return ret; 10515 bsi->nr_extents += ret; 10516 bsi->nr_pages += nr_pages; 10517 return 0; 10518 } 10519 10520 static void btrfs_swap_deactivate(struct file *file) 10521 { 10522 struct inode *inode = file_inode(file); 10523 10524 btrfs_free_swapfile_pins(inode); 10525 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 10526 } 10527 10528 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10529 sector_t *span) 10530 { 10531 struct inode *inode = file_inode(file); 10532 struct btrfs_root *root = BTRFS_I(inode)->root; 10533 struct btrfs_fs_info *fs_info = root->fs_info; 10534 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 10535 struct extent_state *cached_state = NULL; 10536 struct extent_map *em = NULL; 10537 struct btrfs_device *device = NULL; 10538 struct btrfs_swap_info bsi = { 10539 .lowest_ppage = (sector_t)-1ULL, 10540 }; 10541 int ret = 0; 10542 u64 isize; 10543 u64 start; 10544 10545 /* 10546 * If the swap file was just created, make sure delalloc is done. If the 10547 * file changes again after this, the user is doing something stupid and 10548 * we don't really care. 10549 */ 10550 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 10551 if (ret) 10552 return ret; 10553 10554 /* 10555 * The inode is locked, so these flags won't change after we check them. 10556 */ 10557 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 10558 btrfs_warn(fs_info, "swapfile must not be compressed"); 10559 return -EINVAL; 10560 } 10561 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 10562 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 10563 return -EINVAL; 10564 } 10565 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 10566 btrfs_warn(fs_info, "swapfile must not be checksummed"); 10567 return -EINVAL; 10568 } 10569 10570 /* 10571 * Balance or device remove/replace/resize can move stuff around from 10572 * under us. The exclop protection makes sure they aren't running/won't 10573 * run concurrently while we are mapping the swap extents, and 10574 * fs_info->swapfile_pins prevents them from running while the swap 10575 * file is active and moving the extents. Note that this also prevents 10576 * a concurrent device add which isn't actually necessary, but it's not 10577 * really worth the trouble to allow it. 10578 */ 10579 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) { 10580 btrfs_warn(fs_info, 10581 "cannot activate swapfile while exclusive operation is running"); 10582 return -EBUSY; 10583 } 10584 10585 /* 10586 * Prevent snapshot creation while we are activating the swap file. 10587 * We do not want to race with snapshot creation. If snapshot creation 10588 * already started before we bumped nr_swapfiles from 0 to 1 and 10589 * completes before the first write into the swap file after it is 10590 * activated, than that write would fallback to COW. 10591 */ 10592 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { 10593 btrfs_exclop_finish(fs_info); 10594 btrfs_warn(fs_info, 10595 "cannot activate swapfile because snapshot creation is in progress"); 10596 return -EINVAL; 10597 } 10598 /* 10599 * Snapshots can create extents which require COW even if NODATACOW is 10600 * set. We use this counter to prevent snapshots. We must increment it 10601 * before walking the extents because we don't want a concurrent 10602 * snapshot to run after we've already checked the extents. 10603 * 10604 * It is possible that subvolume is marked for deletion but still not 10605 * removed yet. To prevent this race, we check the root status before 10606 * activating the swapfile. 10607 */ 10608 spin_lock(&root->root_item_lock); 10609 if (btrfs_root_dead(root)) { 10610 spin_unlock(&root->root_item_lock); 10611 10612 btrfs_exclop_finish(fs_info); 10613 btrfs_warn(fs_info, 10614 "cannot activate swapfile because subvolume %llu is being deleted", 10615 root->root_key.objectid); 10616 return -EPERM; 10617 } 10618 atomic_inc(&root->nr_swapfiles); 10619 spin_unlock(&root->root_item_lock); 10620 10621 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 10622 10623 lock_extent(io_tree, 0, isize - 1, &cached_state); 10624 start = 0; 10625 while (start < isize) { 10626 u64 logical_block_start, physical_block_start; 10627 struct btrfs_block_group *bg; 10628 u64 len = isize - start; 10629 10630 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); 10631 if (IS_ERR(em)) { 10632 ret = PTR_ERR(em); 10633 goto out; 10634 } 10635 10636 if (em->block_start == EXTENT_MAP_HOLE) { 10637 btrfs_warn(fs_info, "swapfile must not have holes"); 10638 ret = -EINVAL; 10639 goto out; 10640 } 10641 if (em->block_start == EXTENT_MAP_INLINE) { 10642 /* 10643 * It's unlikely we'll ever actually find ourselves 10644 * here, as a file small enough to fit inline won't be 10645 * big enough to store more than the swap header, but in 10646 * case something changes in the future, let's catch it 10647 * here rather than later. 10648 */ 10649 btrfs_warn(fs_info, "swapfile must not be inline"); 10650 ret = -EINVAL; 10651 goto out; 10652 } 10653 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10654 btrfs_warn(fs_info, "swapfile must not be compressed"); 10655 ret = -EINVAL; 10656 goto out; 10657 } 10658 10659 logical_block_start = em->block_start + (start - em->start); 10660 len = min(len, em->len - (start - em->start)); 10661 free_extent_map(em); 10662 em = NULL; 10663 10664 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true); 10665 if (ret < 0) { 10666 goto out; 10667 } else if (ret) { 10668 ret = 0; 10669 } else { 10670 btrfs_warn(fs_info, 10671 "swapfile must not be copy-on-write"); 10672 ret = -EINVAL; 10673 goto out; 10674 } 10675 10676 em = btrfs_get_chunk_map(fs_info, logical_block_start, len); 10677 if (IS_ERR(em)) { 10678 ret = PTR_ERR(em); 10679 goto out; 10680 } 10681 10682 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 10683 btrfs_warn(fs_info, 10684 "swapfile must have single data profile"); 10685 ret = -EINVAL; 10686 goto out; 10687 } 10688 10689 if (device == NULL) { 10690 device = em->map_lookup->stripes[0].dev; 10691 ret = btrfs_add_swapfile_pin(inode, device, false); 10692 if (ret == 1) 10693 ret = 0; 10694 else if (ret) 10695 goto out; 10696 } else if (device != em->map_lookup->stripes[0].dev) { 10697 btrfs_warn(fs_info, "swapfile must be on one device"); 10698 ret = -EINVAL; 10699 goto out; 10700 } 10701 10702 physical_block_start = (em->map_lookup->stripes[0].physical + 10703 (logical_block_start - em->start)); 10704 len = min(len, em->len - (logical_block_start - em->start)); 10705 free_extent_map(em); 10706 em = NULL; 10707 10708 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 10709 if (!bg) { 10710 btrfs_warn(fs_info, 10711 "could not find block group containing swapfile"); 10712 ret = -EINVAL; 10713 goto out; 10714 } 10715 10716 if (!btrfs_inc_block_group_swap_extents(bg)) { 10717 btrfs_warn(fs_info, 10718 "block group for swapfile at %llu is read-only%s", 10719 bg->start, 10720 atomic_read(&fs_info->scrubs_running) ? 10721 " (scrub running)" : ""); 10722 btrfs_put_block_group(bg); 10723 ret = -EINVAL; 10724 goto out; 10725 } 10726 10727 ret = btrfs_add_swapfile_pin(inode, bg, true); 10728 if (ret) { 10729 btrfs_put_block_group(bg); 10730 if (ret == 1) 10731 ret = 0; 10732 else 10733 goto out; 10734 } 10735 10736 if (bsi.block_len && 10737 bsi.block_start + bsi.block_len == physical_block_start) { 10738 bsi.block_len += len; 10739 } else { 10740 if (bsi.block_len) { 10741 ret = btrfs_add_swap_extent(sis, &bsi); 10742 if (ret) 10743 goto out; 10744 } 10745 bsi.start = start; 10746 bsi.block_start = physical_block_start; 10747 bsi.block_len = len; 10748 } 10749 10750 start += len; 10751 } 10752 10753 if (bsi.block_len) 10754 ret = btrfs_add_swap_extent(sis, &bsi); 10755 10756 out: 10757 if (!IS_ERR_OR_NULL(em)) 10758 free_extent_map(em); 10759 10760 unlock_extent(io_tree, 0, isize - 1, &cached_state); 10761 10762 if (ret) 10763 btrfs_swap_deactivate(file); 10764 10765 btrfs_drew_write_unlock(&root->snapshot_lock); 10766 10767 btrfs_exclop_finish(fs_info); 10768 10769 if (ret) 10770 return ret; 10771 10772 if (device) 10773 sis->bdev = device->bdev; 10774 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10775 sis->max = bsi.nr_pages; 10776 sis->pages = bsi.nr_pages - 1; 10777 sis->highest_bit = bsi.nr_pages - 1; 10778 return bsi.nr_extents; 10779 } 10780 #else 10781 static void btrfs_swap_deactivate(struct file *file) 10782 { 10783 } 10784 10785 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10786 sector_t *span) 10787 { 10788 return -EOPNOTSUPP; 10789 } 10790 #endif 10791 10792 /* 10793 * Update the number of bytes used in the VFS' inode. When we replace extents in 10794 * a range (clone, dedupe, fallocate's zero range), we must update the number of 10795 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls 10796 * always get a correct value. 10797 */ 10798 void btrfs_update_inode_bytes(struct btrfs_inode *inode, 10799 const u64 add_bytes, 10800 const u64 del_bytes) 10801 { 10802 if (add_bytes == del_bytes) 10803 return; 10804 10805 spin_lock(&inode->lock); 10806 if (del_bytes > 0) 10807 inode_sub_bytes(&inode->vfs_inode, del_bytes); 10808 if (add_bytes > 0) 10809 inode_add_bytes(&inode->vfs_inode, add_bytes); 10810 spin_unlock(&inode->lock); 10811 } 10812 10813 /* 10814 * Verify that there are no ordered extents for a given file range. 10815 * 10816 * @inode: The target inode. 10817 * @start: Start offset of the file range, should be sector size aligned. 10818 * @end: End offset (inclusive) of the file range, its value +1 should be 10819 * sector size aligned. 10820 * 10821 * This should typically be used for cases where we locked an inode's VFS lock in 10822 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, 10823 * we have flushed all delalloc in the range, we have waited for all ordered 10824 * extents in the range to complete and finally we have locked the file range in 10825 * the inode's io_tree. 10826 */ 10827 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) 10828 { 10829 struct btrfs_root *root = inode->root; 10830 struct btrfs_ordered_extent *ordered; 10831 10832 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 10833 return; 10834 10835 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); 10836 if (ordered) { 10837 btrfs_err(root->fs_info, 10838 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", 10839 start, end, btrfs_ino(inode), root->root_key.objectid, 10840 ordered->file_offset, 10841 ordered->file_offset + ordered->num_bytes - 1); 10842 btrfs_put_ordered_extent(ordered); 10843 } 10844 10845 ASSERT(ordered == NULL); 10846 } 10847 10848 static const struct inode_operations btrfs_dir_inode_operations = { 10849 .getattr = btrfs_getattr, 10850 .lookup = btrfs_lookup, 10851 .create = btrfs_create, 10852 .unlink = btrfs_unlink, 10853 .link = btrfs_link, 10854 .mkdir = btrfs_mkdir, 10855 .rmdir = btrfs_rmdir, 10856 .rename = btrfs_rename2, 10857 .symlink = btrfs_symlink, 10858 .setattr = btrfs_setattr, 10859 .mknod = btrfs_mknod, 10860 .listxattr = btrfs_listxattr, 10861 .permission = btrfs_permission, 10862 .get_inode_acl = btrfs_get_acl, 10863 .set_acl = btrfs_set_acl, 10864 .update_time = btrfs_update_time, 10865 .tmpfile = btrfs_tmpfile, 10866 .fileattr_get = btrfs_fileattr_get, 10867 .fileattr_set = btrfs_fileattr_set, 10868 }; 10869 10870 static const struct file_operations btrfs_dir_file_operations = { 10871 .llseek = generic_file_llseek, 10872 .read = generic_read_dir, 10873 .iterate_shared = btrfs_real_readdir, 10874 .open = btrfs_opendir, 10875 .unlocked_ioctl = btrfs_ioctl, 10876 #ifdef CONFIG_COMPAT 10877 .compat_ioctl = btrfs_compat_ioctl, 10878 #endif 10879 .release = btrfs_release_file, 10880 .fsync = btrfs_sync_file, 10881 }; 10882 10883 /* 10884 * btrfs doesn't support the bmap operation because swapfiles 10885 * use bmap to make a mapping of extents in the file. They assume 10886 * these extents won't change over the life of the file and they 10887 * use the bmap result to do IO directly to the drive. 10888 * 10889 * the btrfs bmap call would return logical addresses that aren't 10890 * suitable for IO and they also will change frequently as COW 10891 * operations happen. So, swapfile + btrfs == corruption. 10892 * 10893 * For now we're avoiding this by dropping bmap. 10894 */ 10895 static const struct address_space_operations btrfs_aops = { 10896 .read_folio = btrfs_read_folio, 10897 .writepages = btrfs_writepages, 10898 .readahead = btrfs_readahead, 10899 .invalidate_folio = btrfs_invalidate_folio, 10900 .release_folio = btrfs_release_folio, 10901 .migrate_folio = btrfs_migrate_folio, 10902 .dirty_folio = filemap_dirty_folio, 10903 .error_remove_page = generic_error_remove_page, 10904 .swap_activate = btrfs_swap_activate, 10905 .swap_deactivate = btrfs_swap_deactivate, 10906 }; 10907 10908 static const struct inode_operations btrfs_file_inode_operations = { 10909 .getattr = btrfs_getattr, 10910 .setattr = btrfs_setattr, 10911 .listxattr = btrfs_listxattr, 10912 .permission = btrfs_permission, 10913 .fiemap = btrfs_fiemap, 10914 .get_inode_acl = btrfs_get_acl, 10915 .set_acl = btrfs_set_acl, 10916 .update_time = btrfs_update_time, 10917 .fileattr_get = btrfs_fileattr_get, 10918 .fileattr_set = btrfs_fileattr_set, 10919 }; 10920 static const struct inode_operations btrfs_special_inode_operations = { 10921 .getattr = btrfs_getattr, 10922 .setattr = btrfs_setattr, 10923 .permission = btrfs_permission, 10924 .listxattr = btrfs_listxattr, 10925 .get_inode_acl = btrfs_get_acl, 10926 .set_acl = btrfs_set_acl, 10927 .update_time = btrfs_update_time, 10928 }; 10929 static const struct inode_operations btrfs_symlink_inode_operations = { 10930 .get_link = page_get_link, 10931 .getattr = btrfs_getattr, 10932 .setattr = btrfs_setattr, 10933 .permission = btrfs_permission, 10934 .listxattr = btrfs_listxattr, 10935 .update_time = btrfs_update_time, 10936 }; 10937 10938 const struct dentry_operations btrfs_dentry_operations = { 10939 .d_delete = btrfs_dentry_delete, 10940 }; 10941