1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/pagemap.h> 8 #include <linux/time.h> 9 #include <linux/init.h> 10 #include <linux/string.h> 11 #include <linux/backing-dev.h> 12 #include <linux/falloc.h> 13 #include <linux/writeback.h> 14 #include <linux/compat.h> 15 #include <linux/slab.h> 16 #include <linux/btrfs.h> 17 #include <linux/uio.h> 18 #include <linux/iversion.h> 19 #include <linux/fsverity.h> 20 #include <linux/iomap.h> 21 #include "ctree.h" 22 #include "disk-io.h" 23 #include "transaction.h" 24 #include "btrfs_inode.h" 25 #include "tree-log.h" 26 #include "locking.h" 27 #include "qgroup.h" 28 #include "compression.h" 29 #include "delalloc-space.h" 30 #include "reflink.h" 31 #include "subpage.h" 32 #include "fs.h" 33 #include "accessors.h" 34 #include "extent-tree.h" 35 #include "file-item.h" 36 #include "ioctl.h" 37 #include "file.h" 38 #include "super.h" 39 40 /* simple helper to fault in pages and copy. This should go away 41 * and be replaced with calls into generic code. 42 */ 43 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes, 44 struct page **prepared_pages, 45 struct iov_iter *i) 46 { 47 size_t copied = 0; 48 size_t total_copied = 0; 49 int pg = 0; 50 int offset = offset_in_page(pos); 51 52 while (write_bytes > 0) { 53 size_t count = min_t(size_t, 54 PAGE_SIZE - offset, write_bytes); 55 struct page *page = prepared_pages[pg]; 56 /* 57 * Copy data from userspace to the current page 58 */ 59 copied = copy_page_from_iter_atomic(page, offset, count, i); 60 61 /* Flush processor's dcache for this page */ 62 flush_dcache_page(page); 63 64 /* 65 * if we get a partial write, we can end up with 66 * partially up to date pages. These add 67 * a lot of complexity, so make sure they don't 68 * happen by forcing this copy to be retried. 69 * 70 * The rest of the btrfs_file_write code will fall 71 * back to page at a time copies after we return 0. 72 */ 73 if (unlikely(copied < count)) { 74 if (!PageUptodate(page)) { 75 iov_iter_revert(i, copied); 76 copied = 0; 77 } 78 if (!copied) 79 break; 80 } 81 82 write_bytes -= copied; 83 total_copied += copied; 84 offset += copied; 85 if (offset == PAGE_SIZE) { 86 pg++; 87 offset = 0; 88 } 89 } 90 return total_copied; 91 } 92 93 /* 94 * unlocks pages after btrfs_file_write is done with them 95 */ 96 static void btrfs_drop_pages(struct btrfs_fs_info *fs_info, 97 struct page **pages, size_t num_pages, 98 u64 pos, u64 copied) 99 { 100 size_t i; 101 u64 block_start = round_down(pos, fs_info->sectorsize); 102 u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start; 103 104 ASSERT(block_len <= U32_MAX); 105 for (i = 0; i < num_pages; i++) { 106 /* page checked is some magic around finding pages that 107 * have been modified without going through btrfs_set_page_dirty 108 * clear it here. There should be no need to mark the pages 109 * accessed as prepare_pages should have marked them accessed 110 * in prepare_pages via find_or_create_page() 111 */ 112 btrfs_folio_clamp_clear_checked(fs_info, page_folio(pages[i]), 113 block_start, block_len); 114 unlock_page(pages[i]); 115 put_page(pages[i]); 116 } 117 } 118 119 /* 120 * After btrfs_copy_from_user(), update the following things for delalloc: 121 * - Mark newly dirtied pages as DELALLOC in the io tree. 122 * Used to advise which range is to be written back. 123 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup 124 * - Update inode size for past EOF write 125 */ 126 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, 127 size_t num_pages, loff_t pos, size_t write_bytes, 128 struct extent_state **cached, bool noreserve) 129 { 130 struct btrfs_fs_info *fs_info = inode->root->fs_info; 131 int ret = 0; 132 int i; 133 u64 num_bytes; 134 u64 start_pos; 135 u64 end_of_last_block; 136 u64 end_pos = pos + write_bytes; 137 loff_t isize = i_size_read(&inode->vfs_inode); 138 unsigned int extra_bits = 0; 139 140 if (write_bytes == 0) 141 return 0; 142 143 if (noreserve) 144 extra_bits |= EXTENT_NORESERVE; 145 146 start_pos = round_down(pos, fs_info->sectorsize); 147 num_bytes = round_up(write_bytes + pos - start_pos, 148 fs_info->sectorsize); 149 ASSERT(num_bytes <= U32_MAX); 150 151 end_of_last_block = start_pos + num_bytes - 1; 152 153 /* 154 * The pages may have already been dirty, clear out old accounting so 155 * we can set things up properly 156 */ 157 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block, 158 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 159 cached); 160 161 ret = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 162 extra_bits, cached); 163 if (ret) 164 return ret; 165 166 for (i = 0; i < num_pages; i++) { 167 struct page *p = pages[i]; 168 169 btrfs_folio_clamp_set_uptodate(fs_info, page_folio(p), 170 start_pos, num_bytes); 171 btrfs_folio_clamp_clear_checked(fs_info, page_folio(p), 172 start_pos, num_bytes); 173 btrfs_folio_clamp_set_dirty(fs_info, page_folio(p), 174 start_pos, num_bytes); 175 } 176 177 /* 178 * we've only changed i_size in ram, and we haven't updated 179 * the disk i_size. There is no need to log the inode 180 * at this time. 181 */ 182 if (end_pos > isize) 183 i_size_write(&inode->vfs_inode, end_pos); 184 return 0; 185 } 186 187 /* 188 * this is very complex, but the basic idea is to drop all extents 189 * in the range start - end. hint_block is filled in with a block number 190 * that would be a good hint to the block allocator for this file. 191 * 192 * If an extent intersects the range but is not entirely inside the range 193 * it is either truncated or split. Anything entirely inside the range 194 * is deleted from the tree. 195 * 196 * Note: the VFS' inode number of bytes is not updated, it's up to the caller 197 * to deal with that. We set the field 'bytes_found' of the arguments structure 198 * with the number of allocated bytes found in the target range, so that the 199 * caller can update the inode's number of bytes in an atomic way when 200 * replacing extents in a range to avoid races with stat(2). 201 */ 202 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 203 struct btrfs_root *root, struct btrfs_inode *inode, 204 struct btrfs_drop_extents_args *args) 205 { 206 struct btrfs_fs_info *fs_info = root->fs_info; 207 struct extent_buffer *leaf; 208 struct btrfs_file_extent_item *fi; 209 struct btrfs_ref ref = { 0 }; 210 struct btrfs_key key; 211 struct btrfs_key new_key; 212 u64 ino = btrfs_ino(inode); 213 u64 search_start = args->start; 214 u64 disk_bytenr = 0; 215 u64 num_bytes = 0; 216 u64 extent_offset = 0; 217 u64 extent_end = 0; 218 u64 last_end = args->start; 219 int del_nr = 0; 220 int del_slot = 0; 221 int extent_type; 222 int recow; 223 int ret; 224 int modify_tree = -1; 225 int update_refs; 226 int found = 0; 227 struct btrfs_path *path = args->path; 228 229 args->bytes_found = 0; 230 args->extent_inserted = false; 231 232 /* Must always have a path if ->replace_extent is true */ 233 ASSERT(!(args->replace_extent && !args->path)); 234 235 if (!path) { 236 path = btrfs_alloc_path(); 237 if (!path) { 238 ret = -ENOMEM; 239 goto out; 240 } 241 } 242 243 if (args->drop_cache) 244 btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false); 245 246 if (args->start >= inode->disk_i_size && !args->replace_extent) 247 modify_tree = 0; 248 249 update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID); 250 while (1) { 251 recow = 0; 252 ret = btrfs_lookup_file_extent(trans, root, path, ino, 253 search_start, modify_tree); 254 if (ret < 0) 255 break; 256 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) { 257 leaf = path->nodes[0]; 258 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 259 if (key.objectid == ino && 260 key.type == BTRFS_EXTENT_DATA_KEY) 261 path->slots[0]--; 262 } 263 ret = 0; 264 next_slot: 265 leaf = path->nodes[0]; 266 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 267 BUG_ON(del_nr > 0); 268 ret = btrfs_next_leaf(root, path); 269 if (ret < 0) 270 break; 271 if (ret > 0) { 272 ret = 0; 273 break; 274 } 275 leaf = path->nodes[0]; 276 recow = 1; 277 } 278 279 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 280 281 if (key.objectid > ino) 282 break; 283 if (WARN_ON_ONCE(key.objectid < ino) || 284 key.type < BTRFS_EXTENT_DATA_KEY) { 285 ASSERT(del_nr == 0); 286 path->slots[0]++; 287 goto next_slot; 288 } 289 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end) 290 break; 291 292 fi = btrfs_item_ptr(leaf, path->slots[0], 293 struct btrfs_file_extent_item); 294 extent_type = btrfs_file_extent_type(leaf, fi); 295 296 if (extent_type == BTRFS_FILE_EXTENT_REG || 297 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 298 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 299 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 300 extent_offset = btrfs_file_extent_offset(leaf, fi); 301 extent_end = key.offset + 302 btrfs_file_extent_num_bytes(leaf, fi); 303 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 304 extent_end = key.offset + 305 btrfs_file_extent_ram_bytes(leaf, fi); 306 } else { 307 /* can't happen */ 308 BUG(); 309 } 310 311 /* 312 * Don't skip extent items representing 0 byte lengths. They 313 * used to be created (bug) if while punching holes we hit 314 * -ENOSPC condition. So if we find one here, just ensure we 315 * delete it, otherwise we would insert a new file extent item 316 * with the same key (offset) as that 0 bytes length file 317 * extent item in the call to setup_items_for_insert() later 318 * in this function. 319 */ 320 if (extent_end == key.offset && extent_end >= search_start) { 321 last_end = extent_end; 322 goto delete_extent_item; 323 } 324 325 if (extent_end <= search_start) { 326 path->slots[0]++; 327 goto next_slot; 328 } 329 330 found = 1; 331 search_start = max(key.offset, args->start); 332 if (recow || !modify_tree) { 333 modify_tree = -1; 334 btrfs_release_path(path); 335 continue; 336 } 337 338 /* 339 * | - range to drop - | 340 * | -------- extent -------- | 341 */ 342 if (args->start > key.offset && args->end < extent_end) { 343 BUG_ON(del_nr > 0); 344 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 345 ret = -EOPNOTSUPP; 346 break; 347 } 348 349 memcpy(&new_key, &key, sizeof(new_key)); 350 new_key.offset = args->start; 351 ret = btrfs_duplicate_item(trans, root, path, 352 &new_key); 353 if (ret == -EAGAIN) { 354 btrfs_release_path(path); 355 continue; 356 } 357 if (ret < 0) 358 break; 359 360 leaf = path->nodes[0]; 361 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 362 struct btrfs_file_extent_item); 363 btrfs_set_file_extent_num_bytes(leaf, fi, 364 args->start - key.offset); 365 366 fi = btrfs_item_ptr(leaf, path->slots[0], 367 struct btrfs_file_extent_item); 368 369 extent_offset += args->start - key.offset; 370 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 371 btrfs_set_file_extent_num_bytes(leaf, fi, 372 extent_end - args->start); 373 btrfs_mark_buffer_dirty(trans, leaf); 374 375 if (update_refs && disk_bytenr > 0) { 376 btrfs_init_generic_ref(&ref, 377 BTRFS_ADD_DELAYED_REF, 378 disk_bytenr, num_bytes, 0, 379 root->root_key.objectid); 380 btrfs_init_data_ref(&ref, 381 root->root_key.objectid, 382 new_key.objectid, 383 args->start - extent_offset, 384 0, false); 385 ret = btrfs_inc_extent_ref(trans, &ref); 386 if (ret) { 387 btrfs_abort_transaction(trans, ret); 388 break; 389 } 390 } 391 key.offset = args->start; 392 } 393 /* 394 * From here on out we will have actually dropped something, so 395 * last_end can be updated. 396 */ 397 last_end = extent_end; 398 399 /* 400 * | ---- range to drop ----- | 401 * | -------- extent -------- | 402 */ 403 if (args->start <= key.offset && args->end < extent_end) { 404 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 405 ret = -EOPNOTSUPP; 406 break; 407 } 408 409 memcpy(&new_key, &key, sizeof(new_key)); 410 new_key.offset = args->end; 411 btrfs_set_item_key_safe(trans, path, &new_key); 412 413 extent_offset += args->end - key.offset; 414 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 415 btrfs_set_file_extent_num_bytes(leaf, fi, 416 extent_end - args->end); 417 btrfs_mark_buffer_dirty(trans, leaf); 418 if (update_refs && disk_bytenr > 0) 419 args->bytes_found += args->end - key.offset; 420 break; 421 } 422 423 search_start = extent_end; 424 /* 425 * | ---- range to drop ----- | 426 * | -------- extent -------- | 427 */ 428 if (args->start > key.offset && args->end >= extent_end) { 429 BUG_ON(del_nr > 0); 430 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 431 ret = -EOPNOTSUPP; 432 break; 433 } 434 435 btrfs_set_file_extent_num_bytes(leaf, fi, 436 args->start - key.offset); 437 btrfs_mark_buffer_dirty(trans, leaf); 438 if (update_refs && disk_bytenr > 0) 439 args->bytes_found += extent_end - args->start; 440 if (args->end == extent_end) 441 break; 442 443 path->slots[0]++; 444 goto next_slot; 445 } 446 447 /* 448 * | ---- range to drop ----- | 449 * | ------ extent ------ | 450 */ 451 if (args->start <= key.offset && args->end >= extent_end) { 452 delete_extent_item: 453 if (del_nr == 0) { 454 del_slot = path->slots[0]; 455 del_nr = 1; 456 } else { 457 BUG_ON(del_slot + del_nr != path->slots[0]); 458 del_nr++; 459 } 460 461 if (update_refs && 462 extent_type == BTRFS_FILE_EXTENT_INLINE) { 463 args->bytes_found += extent_end - key.offset; 464 extent_end = ALIGN(extent_end, 465 fs_info->sectorsize); 466 } else if (update_refs && disk_bytenr > 0) { 467 btrfs_init_generic_ref(&ref, 468 BTRFS_DROP_DELAYED_REF, 469 disk_bytenr, num_bytes, 0, 470 root->root_key.objectid); 471 btrfs_init_data_ref(&ref, 472 root->root_key.objectid, 473 key.objectid, 474 key.offset - extent_offset, 0, 475 false); 476 ret = btrfs_free_extent(trans, &ref); 477 if (ret) { 478 btrfs_abort_transaction(trans, ret); 479 break; 480 } 481 args->bytes_found += extent_end - key.offset; 482 } 483 484 if (args->end == extent_end) 485 break; 486 487 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 488 path->slots[0]++; 489 goto next_slot; 490 } 491 492 ret = btrfs_del_items(trans, root, path, del_slot, 493 del_nr); 494 if (ret) { 495 btrfs_abort_transaction(trans, ret); 496 break; 497 } 498 499 del_nr = 0; 500 del_slot = 0; 501 502 btrfs_release_path(path); 503 continue; 504 } 505 506 BUG(); 507 } 508 509 if (!ret && del_nr > 0) { 510 /* 511 * Set path->slots[0] to first slot, so that after the delete 512 * if items are move off from our leaf to its immediate left or 513 * right neighbor leafs, we end up with a correct and adjusted 514 * path->slots[0] for our insertion (if args->replace_extent). 515 */ 516 path->slots[0] = del_slot; 517 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 518 if (ret) 519 btrfs_abort_transaction(trans, ret); 520 } 521 522 leaf = path->nodes[0]; 523 /* 524 * If btrfs_del_items() was called, it might have deleted a leaf, in 525 * which case it unlocked our path, so check path->locks[0] matches a 526 * write lock. 527 */ 528 if (!ret && args->replace_extent && 529 path->locks[0] == BTRFS_WRITE_LOCK && 530 btrfs_leaf_free_space(leaf) >= 531 sizeof(struct btrfs_item) + args->extent_item_size) { 532 533 key.objectid = ino; 534 key.type = BTRFS_EXTENT_DATA_KEY; 535 key.offset = args->start; 536 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) { 537 struct btrfs_key slot_key; 538 539 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]); 540 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0) 541 path->slots[0]++; 542 } 543 btrfs_setup_item_for_insert(trans, root, path, &key, 544 args->extent_item_size); 545 args->extent_inserted = true; 546 } 547 548 if (!args->path) 549 btrfs_free_path(path); 550 else if (!args->extent_inserted) 551 btrfs_release_path(path); 552 out: 553 args->drop_end = found ? min(args->end, last_end) : args->end; 554 555 return ret; 556 } 557 558 static int extent_mergeable(struct extent_buffer *leaf, int slot, 559 u64 objectid, u64 bytenr, u64 orig_offset, 560 u64 *start, u64 *end) 561 { 562 struct btrfs_file_extent_item *fi; 563 struct btrfs_key key; 564 u64 extent_end; 565 566 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 567 return 0; 568 569 btrfs_item_key_to_cpu(leaf, &key, slot); 570 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 571 return 0; 572 573 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 574 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 575 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 576 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 577 btrfs_file_extent_compression(leaf, fi) || 578 btrfs_file_extent_encryption(leaf, fi) || 579 btrfs_file_extent_other_encoding(leaf, fi)) 580 return 0; 581 582 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 583 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 584 return 0; 585 586 *start = key.offset; 587 *end = extent_end; 588 return 1; 589 } 590 591 /* 592 * Mark extent in the range start - end as written. 593 * 594 * This changes extent type from 'pre-allocated' to 'regular'. If only 595 * part of extent is marked as written, the extent will be split into 596 * two or three. 597 */ 598 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 599 struct btrfs_inode *inode, u64 start, u64 end) 600 { 601 struct btrfs_root *root = inode->root; 602 struct extent_buffer *leaf; 603 struct btrfs_path *path; 604 struct btrfs_file_extent_item *fi; 605 struct btrfs_ref ref = { 0 }; 606 struct btrfs_key key; 607 struct btrfs_key new_key; 608 u64 bytenr; 609 u64 num_bytes; 610 u64 extent_end; 611 u64 orig_offset; 612 u64 other_start; 613 u64 other_end; 614 u64 split; 615 int del_nr = 0; 616 int del_slot = 0; 617 int recow; 618 int ret = 0; 619 u64 ino = btrfs_ino(inode); 620 621 path = btrfs_alloc_path(); 622 if (!path) 623 return -ENOMEM; 624 again: 625 recow = 0; 626 split = start; 627 key.objectid = ino; 628 key.type = BTRFS_EXTENT_DATA_KEY; 629 key.offset = split; 630 631 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 632 if (ret < 0) 633 goto out; 634 if (ret > 0 && path->slots[0] > 0) 635 path->slots[0]--; 636 637 leaf = path->nodes[0]; 638 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 639 if (key.objectid != ino || 640 key.type != BTRFS_EXTENT_DATA_KEY) { 641 ret = -EINVAL; 642 btrfs_abort_transaction(trans, ret); 643 goto out; 644 } 645 fi = btrfs_item_ptr(leaf, path->slots[0], 646 struct btrfs_file_extent_item); 647 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) { 648 ret = -EINVAL; 649 btrfs_abort_transaction(trans, ret); 650 goto out; 651 } 652 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 653 if (key.offset > start || extent_end < end) { 654 ret = -EINVAL; 655 btrfs_abort_transaction(trans, ret); 656 goto out; 657 } 658 659 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 660 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 661 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 662 memcpy(&new_key, &key, sizeof(new_key)); 663 664 if (start == key.offset && end < extent_end) { 665 other_start = 0; 666 other_end = start; 667 if (extent_mergeable(leaf, path->slots[0] - 1, 668 ino, bytenr, orig_offset, 669 &other_start, &other_end)) { 670 new_key.offset = end; 671 btrfs_set_item_key_safe(trans, path, &new_key); 672 fi = btrfs_item_ptr(leaf, path->slots[0], 673 struct btrfs_file_extent_item); 674 btrfs_set_file_extent_generation(leaf, fi, 675 trans->transid); 676 btrfs_set_file_extent_num_bytes(leaf, fi, 677 extent_end - end); 678 btrfs_set_file_extent_offset(leaf, fi, 679 end - orig_offset); 680 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 681 struct btrfs_file_extent_item); 682 btrfs_set_file_extent_generation(leaf, fi, 683 trans->transid); 684 btrfs_set_file_extent_num_bytes(leaf, fi, 685 end - other_start); 686 btrfs_mark_buffer_dirty(trans, leaf); 687 goto out; 688 } 689 } 690 691 if (start > key.offset && end == extent_end) { 692 other_start = end; 693 other_end = 0; 694 if (extent_mergeable(leaf, path->slots[0] + 1, 695 ino, bytenr, orig_offset, 696 &other_start, &other_end)) { 697 fi = btrfs_item_ptr(leaf, path->slots[0], 698 struct btrfs_file_extent_item); 699 btrfs_set_file_extent_num_bytes(leaf, fi, 700 start - key.offset); 701 btrfs_set_file_extent_generation(leaf, fi, 702 trans->transid); 703 path->slots[0]++; 704 new_key.offset = start; 705 btrfs_set_item_key_safe(trans, path, &new_key); 706 707 fi = btrfs_item_ptr(leaf, path->slots[0], 708 struct btrfs_file_extent_item); 709 btrfs_set_file_extent_generation(leaf, fi, 710 trans->transid); 711 btrfs_set_file_extent_num_bytes(leaf, fi, 712 other_end - start); 713 btrfs_set_file_extent_offset(leaf, fi, 714 start - orig_offset); 715 btrfs_mark_buffer_dirty(trans, leaf); 716 goto out; 717 } 718 } 719 720 while (start > key.offset || end < extent_end) { 721 if (key.offset == start) 722 split = end; 723 724 new_key.offset = split; 725 ret = btrfs_duplicate_item(trans, root, path, &new_key); 726 if (ret == -EAGAIN) { 727 btrfs_release_path(path); 728 goto again; 729 } 730 if (ret < 0) { 731 btrfs_abort_transaction(trans, ret); 732 goto out; 733 } 734 735 leaf = path->nodes[0]; 736 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 737 struct btrfs_file_extent_item); 738 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 739 btrfs_set_file_extent_num_bytes(leaf, fi, 740 split - key.offset); 741 742 fi = btrfs_item_ptr(leaf, path->slots[0], 743 struct btrfs_file_extent_item); 744 745 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 746 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 747 btrfs_set_file_extent_num_bytes(leaf, fi, 748 extent_end - split); 749 btrfs_mark_buffer_dirty(trans, leaf); 750 751 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr, 752 num_bytes, 0, root->root_key.objectid); 753 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, 754 orig_offset, 0, false); 755 ret = btrfs_inc_extent_ref(trans, &ref); 756 if (ret) { 757 btrfs_abort_transaction(trans, ret); 758 goto out; 759 } 760 761 if (split == start) { 762 key.offset = start; 763 } else { 764 if (start != key.offset) { 765 ret = -EINVAL; 766 btrfs_abort_transaction(trans, ret); 767 goto out; 768 } 769 path->slots[0]--; 770 extent_end = end; 771 } 772 recow = 1; 773 } 774 775 other_start = end; 776 other_end = 0; 777 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 778 num_bytes, 0, root->root_key.objectid); 779 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset, 780 0, false); 781 if (extent_mergeable(leaf, path->slots[0] + 1, 782 ino, bytenr, orig_offset, 783 &other_start, &other_end)) { 784 if (recow) { 785 btrfs_release_path(path); 786 goto again; 787 } 788 extent_end = other_end; 789 del_slot = path->slots[0] + 1; 790 del_nr++; 791 ret = btrfs_free_extent(trans, &ref); 792 if (ret) { 793 btrfs_abort_transaction(trans, ret); 794 goto out; 795 } 796 } 797 other_start = 0; 798 other_end = start; 799 if (extent_mergeable(leaf, path->slots[0] - 1, 800 ino, bytenr, orig_offset, 801 &other_start, &other_end)) { 802 if (recow) { 803 btrfs_release_path(path); 804 goto again; 805 } 806 key.offset = other_start; 807 del_slot = path->slots[0]; 808 del_nr++; 809 ret = btrfs_free_extent(trans, &ref); 810 if (ret) { 811 btrfs_abort_transaction(trans, ret); 812 goto out; 813 } 814 } 815 if (del_nr == 0) { 816 fi = btrfs_item_ptr(leaf, path->slots[0], 817 struct btrfs_file_extent_item); 818 btrfs_set_file_extent_type(leaf, fi, 819 BTRFS_FILE_EXTENT_REG); 820 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 821 btrfs_mark_buffer_dirty(trans, leaf); 822 } else { 823 fi = btrfs_item_ptr(leaf, del_slot - 1, 824 struct btrfs_file_extent_item); 825 btrfs_set_file_extent_type(leaf, fi, 826 BTRFS_FILE_EXTENT_REG); 827 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 828 btrfs_set_file_extent_num_bytes(leaf, fi, 829 extent_end - key.offset); 830 btrfs_mark_buffer_dirty(trans, leaf); 831 832 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 833 if (ret < 0) { 834 btrfs_abort_transaction(trans, ret); 835 goto out; 836 } 837 } 838 out: 839 btrfs_free_path(path); 840 return ret; 841 } 842 843 /* 844 * on error we return an unlocked page and the error value 845 * on success we return a locked page and 0 846 */ 847 static int prepare_uptodate_page(struct inode *inode, 848 struct page *page, u64 pos, 849 bool force_uptodate) 850 { 851 struct folio *folio = page_folio(page); 852 int ret = 0; 853 854 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) && 855 !PageUptodate(page)) { 856 ret = btrfs_read_folio(NULL, folio); 857 if (ret) 858 return ret; 859 lock_page(page); 860 if (!PageUptodate(page)) { 861 unlock_page(page); 862 return -EIO; 863 } 864 865 /* 866 * Since btrfs_read_folio() will unlock the folio before it 867 * returns, there is a window where btrfs_release_folio() can be 868 * called to release the page. Here we check both inode 869 * mapping and PagePrivate() to make sure the page was not 870 * released. 871 * 872 * The private flag check is essential for subpage as we need 873 * to store extra bitmap using folio private. 874 */ 875 if (page->mapping != inode->i_mapping || !folio_test_private(folio)) { 876 unlock_page(page); 877 return -EAGAIN; 878 } 879 } 880 return 0; 881 } 882 883 static fgf_t get_prepare_fgp_flags(bool nowait) 884 { 885 fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT; 886 887 if (nowait) 888 fgp_flags |= FGP_NOWAIT; 889 890 return fgp_flags; 891 } 892 893 static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait) 894 { 895 gfp_t gfp; 896 897 gfp = btrfs_alloc_write_mask(inode->i_mapping); 898 if (nowait) { 899 gfp &= ~__GFP_DIRECT_RECLAIM; 900 gfp |= GFP_NOWAIT; 901 } 902 903 return gfp; 904 } 905 906 /* 907 * this just gets pages into the page cache and locks them down. 908 */ 909 static noinline int prepare_pages(struct inode *inode, struct page **pages, 910 size_t num_pages, loff_t pos, 911 size_t write_bytes, bool force_uptodate, 912 bool nowait) 913 { 914 int i; 915 unsigned long index = pos >> PAGE_SHIFT; 916 gfp_t mask = get_prepare_gfp_flags(inode, nowait); 917 fgf_t fgp_flags = get_prepare_fgp_flags(nowait); 918 int ret = 0; 919 int faili; 920 921 for (i = 0; i < num_pages; i++) { 922 again: 923 pages[i] = pagecache_get_page(inode->i_mapping, index + i, 924 fgp_flags, mask | __GFP_WRITE); 925 if (!pages[i]) { 926 faili = i - 1; 927 if (nowait) 928 ret = -EAGAIN; 929 else 930 ret = -ENOMEM; 931 goto fail; 932 } 933 934 ret = set_page_extent_mapped(pages[i]); 935 if (ret < 0) { 936 faili = i; 937 goto fail; 938 } 939 940 if (i == 0) 941 ret = prepare_uptodate_page(inode, pages[i], pos, 942 force_uptodate); 943 if (!ret && i == num_pages - 1) 944 ret = prepare_uptodate_page(inode, pages[i], 945 pos + write_bytes, false); 946 if (ret) { 947 put_page(pages[i]); 948 if (!nowait && ret == -EAGAIN) { 949 ret = 0; 950 goto again; 951 } 952 faili = i - 1; 953 goto fail; 954 } 955 wait_on_page_writeback(pages[i]); 956 } 957 958 return 0; 959 fail: 960 while (faili >= 0) { 961 unlock_page(pages[faili]); 962 put_page(pages[faili]); 963 faili--; 964 } 965 return ret; 966 967 } 968 969 /* 970 * This function locks the extent and properly waits for data=ordered extents 971 * to finish before allowing the pages to be modified if need. 972 * 973 * The return value: 974 * 1 - the extent is locked 975 * 0 - the extent is not locked, and everything is OK 976 * -EAGAIN - need re-prepare the pages 977 * the other < 0 number - Something wrong happens 978 */ 979 static noinline int 980 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, 981 size_t num_pages, loff_t pos, 982 size_t write_bytes, 983 u64 *lockstart, u64 *lockend, bool nowait, 984 struct extent_state **cached_state) 985 { 986 struct btrfs_fs_info *fs_info = inode->root->fs_info; 987 u64 start_pos; 988 u64 last_pos; 989 int i; 990 int ret = 0; 991 992 start_pos = round_down(pos, fs_info->sectorsize); 993 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1; 994 995 if (start_pos < inode->vfs_inode.i_size) { 996 struct btrfs_ordered_extent *ordered; 997 998 if (nowait) { 999 if (!try_lock_extent(&inode->io_tree, start_pos, last_pos, 1000 cached_state)) { 1001 for (i = 0; i < num_pages; i++) { 1002 unlock_page(pages[i]); 1003 put_page(pages[i]); 1004 pages[i] = NULL; 1005 } 1006 1007 return -EAGAIN; 1008 } 1009 } else { 1010 lock_extent(&inode->io_tree, start_pos, last_pos, cached_state); 1011 } 1012 1013 ordered = btrfs_lookup_ordered_range(inode, start_pos, 1014 last_pos - start_pos + 1); 1015 if (ordered && 1016 ordered->file_offset + ordered->num_bytes > start_pos && 1017 ordered->file_offset <= last_pos) { 1018 unlock_extent(&inode->io_tree, start_pos, last_pos, 1019 cached_state); 1020 for (i = 0; i < num_pages; i++) { 1021 unlock_page(pages[i]); 1022 put_page(pages[i]); 1023 } 1024 btrfs_start_ordered_extent(ordered); 1025 btrfs_put_ordered_extent(ordered); 1026 return -EAGAIN; 1027 } 1028 if (ordered) 1029 btrfs_put_ordered_extent(ordered); 1030 1031 *lockstart = start_pos; 1032 *lockend = last_pos; 1033 ret = 1; 1034 } 1035 1036 /* 1037 * We should be called after prepare_pages() which should have locked 1038 * all pages in the range. 1039 */ 1040 for (i = 0; i < num_pages; i++) 1041 WARN_ON(!PageLocked(pages[i])); 1042 1043 return ret; 1044 } 1045 1046 /* 1047 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes) 1048 * 1049 * @pos: File offset. 1050 * @write_bytes: The length to write, will be updated to the nocow writeable 1051 * range. 1052 * 1053 * This function will flush ordered extents in the range to ensure proper 1054 * nocow checks. 1055 * 1056 * Return: 1057 * > 0 If we can nocow, and updates @write_bytes. 1058 * 0 If we can't do a nocow write. 1059 * -EAGAIN If we can't do a nocow write because snapshoting of the inode's 1060 * root is in progress. 1061 * < 0 If an error happened. 1062 * 1063 * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0. 1064 */ 1065 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, 1066 size_t *write_bytes, bool nowait) 1067 { 1068 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1069 struct btrfs_root *root = inode->root; 1070 struct extent_state *cached_state = NULL; 1071 u64 lockstart, lockend; 1072 u64 num_bytes; 1073 int ret; 1074 1075 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 1076 return 0; 1077 1078 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) 1079 return -EAGAIN; 1080 1081 lockstart = round_down(pos, fs_info->sectorsize); 1082 lockend = round_up(pos + *write_bytes, 1083 fs_info->sectorsize) - 1; 1084 num_bytes = lockend - lockstart + 1; 1085 1086 if (nowait) { 1087 if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend, 1088 &cached_state)) { 1089 btrfs_drew_write_unlock(&root->snapshot_lock); 1090 return -EAGAIN; 1091 } 1092 } else { 1093 btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend, 1094 &cached_state); 1095 } 1096 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, 1097 NULL, NULL, NULL, nowait, false); 1098 if (ret <= 0) 1099 btrfs_drew_write_unlock(&root->snapshot_lock); 1100 else 1101 *write_bytes = min_t(size_t, *write_bytes , 1102 num_bytes - pos + lockstart); 1103 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state); 1104 1105 return ret; 1106 } 1107 1108 void btrfs_check_nocow_unlock(struct btrfs_inode *inode) 1109 { 1110 btrfs_drew_write_unlock(&inode->root->snapshot_lock); 1111 } 1112 1113 static void update_time_for_write(struct inode *inode) 1114 { 1115 struct timespec64 now, ts; 1116 1117 if (IS_NOCMTIME(inode)) 1118 return; 1119 1120 now = current_time(inode); 1121 ts = inode_get_mtime(inode); 1122 if (!timespec64_equal(&ts, &now)) 1123 inode_set_mtime_to_ts(inode, now); 1124 1125 ts = inode_get_ctime(inode); 1126 if (!timespec64_equal(&ts, &now)) 1127 inode_set_ctime_to_ts(inode, now); 1128 1129 if (IS_I_VERSION(inode)) 1130 inode_inc_iversion(inode); 1131 } 1132 1133 static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, 1134 size_t count) 1135 { 1136 struct file *file = iocb->ki_filp; 1137 struct inode *inode = file_inode(file); 1138 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 1139 loff_t pos = iocb->ki_pos; 1140 int ret; 1141 loff_t oldsize; 1142 loff_t start_pos; 1143 1144 /* 1145 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or 1146 * prealloc flags, as without those flags we always have to COW. We will 1147 * later check if we can really COW into the target range (using 1148 * can_nocow_extent() at btrfs_get_blocks_direct_write()). 1149 */ 1150 if ((iocb->ki_flags & IOCB_NOWAIT) && 1151 !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) 1152 return -EAGAIN; 1153 1154 ret = file_remove_privs(file); 1155 if (ret) 1156 return ret; 1157 1158 /* 1159 * We reserve space for updating the inode when we reserve space for the 1160 * extent we are going to write, so we will enospc out there. We don't 1161 * need to start yet another transaction to update the inode as we will 1162 * update the inode when we finish writing whatever data we write. 1163 */ 1164 update_time_for_write(inode); 1165 1166 start_pos = round_down(pos, fs_info->sectorsize); 1167 oldsize = i_size_read(inode); 1168 if (start_pos > oldsize) { 1169 /* Expand hole size to cover write data, preventing empty gap */ 1170 loff_t end_pos = round_up(pos + count, fs_info->sectorsize); 1171 1172 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos); 1173 if (ret) 1174 return ret; 1175 } 1176 1177 return 0; 1178 } 1179 1180 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, 1181 struct iov_iter *i) 1182 { 1183 struct file *file = iocb->ki_filp; 1184 loff_t pos; 1185 struct inode *inode = file_inode(file); 1186 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 1187 struct page **pages = NULL; 1188 struct extent_changeset *data_reserved = NULL; 1189 u64 release_bytes = 0; 1190 u64 lockstart; 1191 u64 lockend; 1192 size_t num_written = 0; 1193 int nrptrs; 1194 ssize_t ret; 1195 bool only_release_metadata = false; 1196 bool force_page_uptodate = false; 1197 loff_t old_isize = i_size_read(inode); 1198 unsigned int ilock_flags = 0; 1199 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); 1200 unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0); 1201 1202 if (nowait) 1203 ilock_flags |= BTRFS_ILOCK_TRY; 1204 1205 ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags); 1206 if (ret < 0) 1207 return ret; 1208 1209 ret = generic_write_checks(iocb, i); 1210 if (ret <= 0) 1211 goto out; 1212 1213 ret = btrfs_write_check(iocb, i, ret); 1214 if (ret < 0) 1215 goto out; 1216 1217 pos = iocb->ki_pos; 1218 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE), 1219 PAGE_SIZE / (sizeof(struct page *))); 1220 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); 1221 nrptrs = max(nrptrs, 8); 1222 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); 1223 if (!pages) { 1224 ret = -ENOMEM; 1225 goto out; 1226 } 1227 1228 while (iov_iter_count(i) > 0) { 1229 struct extent_state *cached_state = NULL; 1230 size_t offset = offset_in_page(pos); 1231 size_t sector_offset; 1232 size_t write_bytes = min(iov_iter_count(i), 1233 nrptrs * (size_t)PAGE_SIZE - 1234 offset); 1235 size_t num_pages; 1236 size_t reserve_bytes; 1237 size_t dirty_pages; 1238 size_t copied; 1239 size_t dirty_sectors; 1240 size_t num_sectors; 1241 int extents_locked; 1242 1243 /* 1244 * Fault pages before locking them in prepare_pages 1245 * to avoid recursive lock 1246 */ 1247 if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) { 1248 ret = -EFAULT; 1249 break; 1250 } 1251 1252 only_release_metadata = false; 1253 sector_offset = pos & (fs_info->sectorsize - 1); 1254 1255 extent_changeset_release(data_reserved); 1256 ret = btrfs_check_data_free_space(BTRFS_I(inode), 1257 &data_reserved, pos, 1258 write_bytes, nowait); 1259 if (ret < 0) { 1260 int can_nocow; 1261 1262 if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) { 1263 ret = -EAGAIN; 1264 break; 1265 } 1266 1267 /* 1268 * If we don't have to COW at the offset, reserve 1269 * metadata only. write_bytes may get smaller than 1270 * requested here. 1271 */ 1272 can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos, 1273 &write_bytes, nowait); 1274 if (can_nocow < 0) 1275 ret = can_nocow; 1276 if (can_nocow > 0) 1277 ret = 0; 1278 if (ret) 1279 break; 1280 only_release_metadata = true; 1281 } 1282 1283 num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE); 1284 WARN_ON(num_pages > nrptrs); 1285 reserve_bytes = round_up(write_bytes + sector_offset, 1286 fs_info->sectorsize); 1287 WARN_ON(reserve_bytes == 0); 1288 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 1289 reserve_bytes, 1290 reserve_bytes, nowait); 1291 if (ret) { 1292 if (!only_release_metadata) 1293 btrfs_free_reserved_data_space(BTRFS_I(inode), 1294 data_reserved, pos, 1295 write_bytes); 1296 else 1297 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1298 1299 if (nowait && ret == -ENOSPC) 1300 ret = -EAGAIN; 1301 break; 1302 } 1303 1304 release_bytes = reserve_bytes; 1305 again: 1306 ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags); 1307 if (ret) { 1308 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); 1309 break; 1310 } 1311 1312 /* 1313 * This is going to setup the pages array with the number of 1314 * pages we want, so we don't really need to worry about the 1315 * contents of pages from loop to loop 1316 */ 1317 ret = prepare_pages(inode, pages, num_pages, 1318 pos, write_bytes, force_page_uptodate, false); 1319 if (ret) { 1320 btrfs_delalloc_release_extents(BTRFS_I(inode), 1321 reserve_bytes); 1322 break; 1323 } 1324 1325 extents_locked = lock_and_cleanup_extent_if_need( 1326 BTRFS_I(inode), pages, 1327 num_pages, pos, write_bytes, &lockstart, 1328 &lockend, nowait, &cached_state); 1329 if (extents_locked < 0) { 1330 if (!nowait && extents_locked == -EAGAIN) 1331 goto again; 1332 1333 btrfs_delalloc_release_extents(BTRFS_I(inode), 1334 reserve_bytes); 1335 ret = extents_locked; 1336 break; 1337 } 1338 1339 copied = btrfs_copy_from_user(pos, write_bytes, pages, i); 1340 1341 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes); 1342 dirty_sectors = round_up(copied + sector_offset, 1343 fs_info->sectorsize); 1344 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors); 1345 1346 /* 1347 * if we have trouble faulting in the pages, fall 1348 * back to one page at a time 1349 */ 1350 if (copied < write_bytes) 1351 nrptrs = 1; 1352 1353 if (copied == 0) { 1354 force_page_uptodate = true; 1355 dirty_sectors = 0; 1356 dirty_pages = 0; 1357 } else { 1358 force_page_uptodate = false; 1359 dirty_pages = DIV_ROUND_UP(copied + offset, 1360 PAGE_SIZE); 1361 } 1362 1363 if (num_sectors > dirty_sectors) { 1364 /* release everything except the sectors we dirtied */ 1365 release_bytes -= dirty_sectors << fs_info->sectorsize_bits; 1366 if (only_release_metadata) { 1367 btrfs_delalloc_release_metadata(BTRFS_I(inode), 1368 release_bytes, true); 1369 } else { 1370 u64 __pos; 1371 1372 __pos = round_down(pos, 1373 fs_info->sectorsize) + 1374 (dirty_pages << PAGE_SHIFT); 1375 btrfs_delalloc_release_space(BTRFS_I(inode), 1376 data_reserved, __pos, 1377 release_bytes, true); 1378 } 1379 } 1380 1381 release_bytes = round_up(copied + sector_offset, 1382 fs_info->sectorsize); 1383 1384 ret = btrfs_dirty_pages(BTRFS_I(inode), pages, 1385 dirty_pages, pos, copied, 1386 &cached_state, only_release_metadata); 1387 1388 /* 1389 * If we have not locked the extent range, because the range's 1390 * start offset is >= i_size, we might still have a non-NULL 1391 * cached extent state, acquired while marking the extent range 1392 * as delalloc through btrfs_dirty_pages(). Therefore free any 1393 * possible cached extent state to avoid a memory leak. 1394 */ 1395 if (extents_locked) 1396 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, 1397 lockend, &cached_state); 1398 else 1399 free_extent_state(cached_state); 1400 1401 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); 1402 if (ret) { 1403 btrfs_drop_pages(fs_info, pages, num_pages, pos, copied); 1404 break; 1405 } 1406 1407 release_bytes = 0; 1408 if (only_release_metadata) 1409 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1410 1411 btrfs_drop_pages(fs_info, pages, num_pages, pos, copied); 1412 1413 cond_resched(); 1414 1415 pos += copied; 1416 num_written += copied; 1417 } 1418 1419 kfree(pages); 1420 1421 if (release_bytes) { 1422 if (only_release_metadata) { 1423 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1424 btrfs_delalloc_release_metadata(BTRFS_I(inode), 1425 release_bytes, true); 1426 } else { 1427 btrfs_delalloc_release_space(BTRFS_I(inode), 1428 data_reserved, 1429 round_down(pos, fs_info->sectorsize), 1430 release_bytes, true); 1431 } 1432 } 1433 1434 extent_changeset_free(data_reserved); 1435 if (num_written > 0) { 1436 pagecache_isize_extended(inode, old_isize, iocb->ki_pos); 1437 iocb->ki_pos += num_written; 1438 } 1439 out: 1440 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); 1441 return num_written ? num_written : ret; 1442 } 1443 1444 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, 1445 const struct iov_iter *iter, loff_t offset) 1446 { 1447 const u32 blocksize_mask = fs_info->sectorsize - 1; 1448 1449 if (offset & blocksize_mask) 1450 return -EINVAL; 1451 1452 if (iov_iter_alignment(iter) & blocksize_mask) 1453 return -EINVAL; 1454 1455 return 0; 1456 } 1457 1458 static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) 1459 { 1460 struct file *file = iocb->ki_filp; 1461 struct inode *inode = file_inode(file); 1462 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 1463 loff_t pos; 1464 ssize_t written = 0; 1465 ssize_t written_buffered; 1466 size_t prev_left = 0; 1467 loff_t endbyte; 1468 ssize_t ret; 1469 unsigned int ilock_flags = 0; 1470 struct iomap_dio *dio; 1471 1472 if (iocb->ki_flags & IOCB_NOWAIT) 1473 ilock_flags |= BTRFS_ILOCK_TRY; 1474 1475 /* 1476 * If the write DIO is within EOF, use a shared lock and also only if 1477 * security bits will likely not be dropped by file_remove_privs() called 1478 * from btrfs_write_check(). Either will need to be rechecked after the 1479 * lock was acquired. 1480 */ 1481 if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode)) 1482 ilock_flags |= BTRFS_ILOCK_SHARED; 1483 1484 relock: 1485 ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags); 1486 if (ret < 0) 1487 return ret; 1488 1489 /* Shared lock cannot be used with security bits set. */ 1490 if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) { 1491 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); 1492 ilock_flags &= ~BTRFS_ILOCK_SHARED; 1493 goto relock; 1494 } 1495 1496 ret = generic_write_checks(iocb, from); 1497 if (ret <= 0) { 1498 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); 1499 return ret; 1500 } 1501 1502 ret = btrfs_write_check(iocb, from, ret); 1503 if (ret < 0) { 1504 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); 1505 goto out; 1506 } 1507 1508 pos = iocb->ki_pos; 1509 /* 1510 * Re-check since file size may have changed just before taking the 1511 * lock or pos may have changed because of O_APPEND in generic_write_check() 1512 */ 1513 if ((ilock_flags & BTRFS_ILOCK_SHARED) && 1514 pos + iov_iter_count(from) > i_size_read(inode)) { 1515 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); 1516 ilock_flags &= ~BTRFS_ILOCK_SHARED; 1517 goto relock; 1518 } 1519 1520 if (check_direct_IO(fs_info, from, pos)) { 1521 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); 1522 goto buffered; 1523 } 1524 1525 /* 1526 * The iov_iter can be mapped to the same file range we are writing to. 1527 * If that's the case, then we will deadlock in the iomap code, because 1528 * it first calls our callback btrfs_dio_iomap_begin(), which will create 1529 * an ordered extent, and after that it will fault in the pages that the 1530 * iov_iter refers to. During the fault in we end up in the readahead 1531 * pages code (starting at btrfs_readahead()), which will lock the range, 1532 * find that ordered extent and then wait for it to complete (at 1533 * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since 1534 * obviously the ordered extent can never complete as we didn't submit 1535 * yet the respective bio(s). This always happens when the buffer is 1536 * memory mapped to the same file range, since the iomap DIO code always 1537 * invalidates pages in the target file range (after starting and waiting 1538 * for any writeback). 1539 * 1540 * So here we disable page faults in the iov_iter and then retry if we 1541 * got -EFAULT, faulting in the pages before the retry. 1542 */ 1543 from->nofault = true; 1544 dio = btrfs_dio_write(iocb, from, written); 1545 from->nofault = false; 1546 1547 /* 1548 * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync 1549 * iocb, and that needs to lock the inode. So unlock it before calling 1550 * iomap_dio_complete() to avoid a deadlock. 1551 */ 1552 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); 1553 1554 if (IS_ERR_OR_NULL(dio)) 1555 ret = PTR_ERR_OR_ZERO(dio); 1556 else 1557 ret = iomap_dio_complete(dio); 1558 1559 /* No increment (+=) because iomap returns a cumulative value. */ 1560 if (ret > 0) 1561 written = ret; 1562 1563 if (iov_iter_count(from) > 0 && (ret == -EFAULT || ret > 0)) { 1564 const size_t left = iov_iter_count(from); 1565 /* 1566 * We have more data left to write. Try to fault in as many as 1567 * possible of the remainder pages and retry. We do this without 1568 * releasing and locking again the inode, to prevent races with 1569 * truncate. 1570 * 1571 * Also, in case the iov refers to pages in the file range of the 1572 * file we want to write to (due to a mmap), we could enter an 1573 * infinite loop if we retry after faulting the pages in, since 1574 * iomap will invalidate any pages in the range early on, before 1575 * it tries to fault in the pages of the iov. So we keep track of 1576 * how much was left of iov in the previous EFAULT and fallback 1577 * to buffered IO in case we haven't made any progress. 1578 */ 1579 if (left == prev_left) { 1580 ret = -ENOTBLK; 1581 } else { 1582 fault_in_iov_iter_readable(from, left); 1583 prev_left = left; 1584 goto relock; 1585 } 1586 } 1587 1588 /* 1589 * If 'ret' is -ENOTBLK or we have not written all data, then it means 1590 * we must fallback to buffered IO. 1591 */ 1592 if ((ret < 0 && ret != -ENOTBLK) || !iov_iter_count(from)) 1593 goto out; 1594 1595 buffered: 1596 /* 1597 * If we are in a NOWAIT context, then return -EAGAIN to signal the caller 1598 * it must retry the operation in a context where blocking is acceptable, 1599 * because even if we end up not blocking during the buffered IO attempt 1600 * below, we will block when flushing and waiting for the IO. 1601 */ 1602 if (iocb->ki_flags & IOCB_NOWAIT) { 1603 ret = -EAGAIN; 1604 goto out; 1605 } 1606 1607 pos = iocb->ki_pos; 1608 written_buffered = btrfs_buffered_write(iocb, from); 1609 if (written_buffered < 0) { 1610 ret = written_buffered; 1611 goto out; 1612 } 1613 /* 1614 * Ensure all data is persisted. We want the next direct IO read to be 1615 * able to read what was just written. 1616 */ 1617 endbyte = pos + written_buffered - 1; 1618 ret = btrfs_fdatawrite_range(inode, pos, endbyte); 1619 if (ret) 1620 goto out; 1621 ret = filemap_fdatawait_range(inode->i_mapping, pos, endbyte); 1622 if (ret) 1623 goto out; 1624 written += written_buffered; 1625 iocb->ki_pos = pos + written_buffered; 1626 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT, 1627 endbyte >> PAGE_SHIFT); 1628 out: 1629 return ret < 0 ? ret : written; 1630 } 1631 1632 static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from, 1633 const struct btrfs_ioctl_encoded_io_args *encoded) 1634 { 1635 struct file *file = iocb->ki_filp; 1636 struct inode *inode = file_inode(file); 1637 loff_t count; 1638 ssize_t ret; 1639 1640 btrfs_inode_lock(BTRFS_I(inode), 0); 1641 count = encoded->len; 1642 ret = generic_write_checks_count(iocb, &count); 1643 if (ret == 0 && count != encoded->len) { 1644 /* 1645 * The write got truncated by generic_write_checks_count(). We 1646 * can't do a partial encoded write. 1647 */ 1648 ret = -EFBIG; 1649 } 1650 if (ret || encoded->len == 0) 1651 goto out; 1652 1653 ret = btrfs_write_check(iocb, from, encoded->len); 1654 if (ret < 0) 1655 goto out; 1656 1657 ret = btrfs_do_encoded_write(iocb, from, encoded); 1658 out: 1659 btrfs_inode_unlock(BTRFS_I(inode), 0); 1660 return ret; 1661 } 1662 1663 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from, 1664 const struct btrfs_ioctl_encoded_io_args *encoded) 1665 { 1666 struct file *file = iocb->ki_filp; 1667 struct btrfs_inode *inode = BTRFS_I(file_inode(file)); 1668 ssize_t num_written, num_sync; 1669 1670 /* 1671 * If the fs flips readonly due to some impossible error, although we 1672 * have opened a file as writable, we have to stop this write operation 1673 * to ensure consistency. 1674 */ 1675 if (BTRFS_FS_ERROR(inode->root->fs_info)) 1676 return -EROFS; 1677 1678 if (encoded && (iocb->ki_flags & IOCB_NOWAIT)) 1679 return -EOPNOTSUPP; 1680 1681 if (encoded) { 1682 num_written = btrfs_encoded_write(iocb, from, encoded); 1683 num_sync = encoded->len; 1684 } else if (iocb->ki_flags & IOCB_DIRECT) { 1685 num_written = btrfs_direct_write(iocb, from); 1686 num_sync = num_written; 1687 } else { 1688 num_written = btrfs_buffered_write(iocb, from); 1689 num_sync = num_written; 1690 } 1691 1692 btrfs_set_inode_last_sub_trans(inode); 1693 1694 if (num_sync > 0) { 1695 num_sync = generic_write_sync(iocb, num_sync); 1696 if (num_sync < 0) 1697 num_written = num_sync; 1698 } 1699 1700 return num_written; 1701 } 1702 1703 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1704 { 1705 return btrfs_do_write_iter(iocb, from, NULL); 1706 } 1707 1708 int btrfs_release_file(struct inode *inode, struct file *filp) 1709 { 1710 struct btrfs_file_private *private = filp->private_data; 1711 1712 if (private) { 1713 kfree(private->filldir_buf); 1714 free_extent_state(private->llseek_cached_state); 1715 kfree(private); 1716 filp->private_data = NULL; 1717 } 1718 1719 /* 1720 * Set by setattr when we are about to truncate a file from a non-zero 1721 * size to a zero size. This tries to flush down new bytes that may 1722 * have been written if the application were using truncate to replace 1723 * a file in place. 1724 */ 1725 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE, 1726 &BTRFS_I(inode)->runtime_flags)) 1727 filemap_flush(inode->i_mapping); 1728 return 0; 1729 } 1730 1731 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end) 1732 { 1733 int ret; 1734 struct blk_plug plug; 1735 1736 /* 1737 * This is only called in fsync, which would do synchronous writes, so 1738 * a plug can merge adjacent IOs as much as possible. Esp. in case of 1739 * multiple disks using raid profile, a large IO can be split to 1740 * several segments of stripe length (currently 64K). 1741 */ 1742 blk_start_plug(&plug); 1743 ret = btrfs_fdatawrite_range(inode, start, end); 1744 blk_finish_plug(&plug); 1745 1746 return ret; 1747 } 1748 1749 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx) 1750 { 1751 struct btrfs_inode *inode = BTRFS_I(ctx->inode); 1752 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1753 1754 if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) && 1755 list_empty(&ctx->ordered_extents)) 1756 return true; 1757 1758 /* 1759 * If we are doing a fast fsync we can not bail out if the inode's 1760 * last_trans is <= then the last committed transaction, because we only 1761 * update the last_trans of the inode during ordered extent completion, 1762 * and for a fast fsync we don't wait for that, we only wait for the 1763 * writeback to complete. 1764 */ 1765 if (inode->last_trans <= btrfs_get_last_trans_committed(fs_info) && 1766 (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) || 1767 list_empty(&ctx->ordered_extents))) 1768 return true; 1769 1770 return false; 1771 } 1772 1773 /* 1774 * fsync call for both files and directories. This logs the inode into 1775 * the tree log instead of forcing full commits whenever possible. 1776 * 1777 * It needs to call filemap_fdatawait so that all ordered extent updates are 1778 * in the metadata btree are up to date for copying to the log. 1779 * 1780 * It drops the inode mutex before doing the tree log commit. This is an 1781 * important optimization for directories because holding the mutex prevents 1782 * new operations on the dir while we write to disk. 1783 */ 1784 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 1785 { 1786 struct dentry *dentry = file_dentry(file); 1787 struct inode *inode = d_inode(dentry); 1788 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 1789 struct btrfs_root *root = BTRFS_I(inode)->root; 1790 struct btrfs_trans_handle *trans; 1791 struct btrfs_log_ctx ctx; 1792 int ret = 0, err; 1793 u64 len; 1794 bool full_sync; 1795 1796 trace_btrfs_sync_file(file, datasync); 1797 1798 btrfs_init_log_ctx(&ctx, inode); 1799 1800 /* 1801 * Always set the range to a full range, otherwise we can get into 1802 * several problems, from missing file extent items to represent holes 1803 * when not using the NO_HOLES feature, to log tree corruption due to 1804 * races between hole detection during logging and completion of ordered 1805 * extents outside the range, to missing checksums due to ordered extents 1806 * for which we flushed only a subset of their pages. 1807 */ 1808 start = 0; 1809 end = LLONG_MAX; 1810 len = (u64)LLONG_MAX + 1; 1811 1812 /* 1813 * We write the dirty pages in the range and wait until they complete 1814 * out of the ->i_mutex. If so, we can flush the dirty pages by 1815 * multi-task, and make the performance up. See 1816 * btrfs_wait_ordered_range for an explanation of the ASYNC check. 1817 */ 1818 ret = start_ordered_ops(inode, start, end); 1819 if (ret) 1820 goto out; 1821 1822 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 1823 1824 atomic_inc(&root->log_batch); 1825 1826 /* 1827 * Before we acquired the inode's lock and the mmap lock, someone may 1828 * have dirtied more pages in the target range. We need to make sure 1829 * that writeback for any such pages does not start while we are logging 1830 * the inode, because if it does, any of the following might happen when 1831 * we are not doing a full inode sync: 1832 * 1833 * 1) We log an extent after its writeback finishes but before its 1834 * checksums are added to the csum tree, leading to -EIO errors 1835 * when attempting to read the extent after a log replay. 1836 * 1837 * 2) We can end up logging an extent before its writeback finishes. 1838 * Therefore after the log replay we will have a file extent item 1839 * pointing to an unwritten extent (and no data checksums as well). 1840 * 1841 * So trigger writeback for any eventual new dirty pages and then we 1842 * wait for all ordered extents to complete below. 1843 */ 1844 ret = start_ordered_ops(inode, start, end); 1845 if (ret) { 1846 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 1847 goto out; 1848 } 1849 1850 /* 1851 * Always check for the full sync flag while holding the inode's lock, 1852 * to avoid races with other tasks. The flag must be either set all the 1853 * time during logging or always off all the time while logging. 1854 * We check the flag here after starting delalloc above, because when 1855 * running delalloc the full sync flag may be set if we need to drop 1856 * extra extent map ranges due to temporary memory allocation failures. 1857 */ 1858 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 1859 &BTRFS_I(inode)->runtime_flags); 1860 1861 /* 1862 * We have to do this here to avoid the priority inversion of waiting on 1863 * IO of a lower priority task while holding a transaction open. 1864 * 1865 * For a full fsync we wait for the ordered extents to complete while 1866 * for a fast fsync we wait just for writeback to complete, and then 1867 * attach the ordered extents to the transaction so that a transaction 1868 * commit waits for their completion, to avoid data loss if we fsync, 1869 * the current transaction commits before the ordered extents complete 1870 * and a power failure happens right after that. 1871 * 1872 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the 1873 * logical address recorded in the ordered extent may change. We need 1874 * to wait for the IO to stabilize the logical address. 1875 */ 1876 if (full_sync || btrfs_is_zoned(fs_info)) { 1877 ret = btrfs_wait_ordered_range(inode, start, len); 1878 } else { 1879 /* 1880 * Get our ordered extents as soon as possible to avoid doing 1881 * checksum lookups in the csum tree, and use instead the 1882 * checksums attached to the ordered extents. 1883 */ 1884 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode), 1885 &ctx.ordered_extents); 1886 ret = filemap_fdatawait_range(inode->i_mapping, start, end); 1887 } 1888 1889 if (ret) 1890 goto out_release_extents; 1891 1892 atomic_inc(&root->log_batch); 1893 1894 if (skip_inode_logging(&ctx)) { 1895 /* 1896 * We've had everything committed since the last time we were 1897 * modified so clear this flag in case it was set for whatever 1898 * reason, it's no longer relevant. 1899 */ 1900 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 1901 &BTRFS_I(inode)->runtime_flags); 1902 /* 1903 * An ordered extent might have started before and completed 1904 * already with io errors, in which case the inode was not 1905 * updated and we end up here. So check the inode's mapping 1906 * for any errors that might have happened since we last 1907 * checked called fsync. 1908 */ 1909 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err); 1910 goto out_release_extents; 1911 } 1912 1913 btrfs_init_log_ctx_scratch_eb(&ctx); 1914 1915 /* 1916 * We use start here because we will need to wait on the IO to complete 1917 * in btrfs_sync_log, which could require joining a transaction (for 1918 * example checking cross references in the nocow path). If we use join 1919 * here we could get into a situation where we're waiting on IO to 1920 * happen that is blocked on a transaction trying to commit. With start 1921 * we inc the extwriter counter, so we wait for all extwriters to exit 1922 * before we start blocking joiners. This comment is to keep somebody 1923 * from thinking they are super smart and changing this to 1924 * btrfs_join_transaction *cough*Josef*cough*. 1925 */ 1926 trans = btrfs_start_transaction(root, 0); 1927 if (IS_ERR(trans)) { 1928 ret = PTR_ERR(trans); 1929 goto out_release_extents; 1930 } 1931 trans->in_fsync = true; 1932 1933 ret = btrfs_log_dentry_safe(trans, dentry, &ctx); 1934 /* 1935 * Scratch eb no longer needed, release before syncing log or commit 1936 * transaction, to avoid holding unnecessary memory during such long 1937 * operations. 1938 */ 1939 if (ctx.scratch_eb) { 1940 free_extent_buffer(ctx.scratch_eb); 1941 ctx.scratch_eb = NULL; 1942 } 1943 btrfs_release_log_ctx_extents(&ctx); 1944 if (ret < 0) { 1945 /* Fallthrough and commit/free transaction. */ 1946 ret = BTRFS_LOG_FORCE_COMMIT; 1947 } 1948 1949 /* we've logged all the items and now have a consistent 1950 * version of the file in the log. It is possible that 1951 * someone will come in and modify the file, but that's 1952 * fine because the log is consistent on disk, and we 1953 * have references to all of the file's extents 1954 * 1955 * It is possible that someone will come in and log the 1956 * file again, but that will end up using the synchronization 1957 * inside btrfs_sync_log to keep things safe. 1958 */ 1959 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 1960 1961 if (ret == BTRFS_NO_LOG_SYNC) { 1962 ret = btrfs_end_transaction(trans); 1963 goto out; 1964 } 1965 1966 /* We successfully logged the inode, attempt to sync the log. */ 1967 if (!ret) { 1968 ret = btrfs_sync_log(trans, root, &ctx); 1969 if (!ret) { 1970 ret = btrfs_end_transaction(trans); 1971 goto out; 1972 } 1973 } 1974 1975 /* 1976 * At this point we need to commit the transaction because we had 1977 * btrfs_need_log_full_commit() or some other error. 1978 * 1979 * If we didn't do a full sync we have to stop the trans handle, wait on 1980 * the ordered extents, start it again and commit the transaction. If 1981 * we attempt to wait on the ordered extents here we could deadlock with 1982 * something like fallocate() that is holding the extent lock trying to 1983 * start a transaction while some other thread is trying to commit the 1984 * transaction while we (fsync) are currently holding the transaction 1985 * open. 1986 */ 1987 if (!full_sync) { 1988 ret = btrfs_end_transaction(trans); 1989 if (ret) 1990 goto out; 1991 ret = btrfs_wait_ordered_range(inode, start, len); 1992 if (ret) 1993 goto out; 1994 1995 /* 1996 * This is safe to use here because we're only interested in 1997 * making sure the transaction that had the ordered extents is 1998 * committed. We aren't waiting on anything past this point, 1999 * we're purely getting the transaction and committing it. 2000 */ 2001 trans = btrfs_attach_transaction_barrier(root); 2002 if (IS_ERR(trans)) { 2003 ret = PTR_ERR(trans); 2004 2005 /* 2006 * We committed the transaction and there's no currently 2007 * running transaction, this means everything we care 2008 * about made it to disk and we are done. 2009 */ 2010 if (ret == -ENOENT) 2011 ret = 0; 2012 goto out; 2013 } 2014 } 2015 2016 ret = btrfs_commit_transaction(trans); 2017 out: 2018 free_extent_buffer(ctx.scratch_eb); 2019 ASSERT(list_empty(&ctx.list)); 2020 ASSERT(list_empty(&ctx.conflict_inodes)); 2021 err = file_check_and_advance_wb_err(file); 2022 if (!ret) 2023 ret = err; 2024 return ret > 0 ? -EIO : ret; 2025 2026 out_release_extents: 2027 btrfs_release_log_ctx_extents(&ctx); 2028 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 2029 goto out; 2030 } 2031 2032 /* 2033 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 2034 * called from a page fault handler when a page is first dirtied. Hence we must 2035 * be careful to check for EOF conditions here. We set the page up correctly 2036 * for a written page which means we get ENOSPC checking when writing into 2037 * holes and correct delalloc and unwritten extent mapping on filesystems that 2038 * support these features. 2039 * 2040 * We are not allowed to take the i_mutex here so we have to play games to 2041 * protect against truncate races as the page could now be beyond EOF. Because 2042 * truncate_setsize() writes the inode size before removing pages, once we have 2043 * the page lock we can determine safely if the page is beyond EOF. If it is not 2044 * beyond EOF, then the page is guaranteed safe against truncation until we 2045 * unlock the page. 2046 */ 2047 static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 2048 { 2049 struct page *page = vmf->page; 2050 struct folio *folio = page_folio(page); 2051 struct inode *inode = file_inode(vmf->vma->vm_file); 2052 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 2053 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 2054 struct btrfs_ordered_extent *ordered; 2055 struct extent_state *cached_state = NULL; 2056 struct extent_changeset *data_reserved = NULL; 2057 unsigned long zero_start; 2058 loff_t size; 2059 vm_fault_t ret; 2060 int ret2; 2061 int reserved = 0; 2062 u64 reserved_space; 2063 u64 page_start; 2064 u64 page_end; 2065 u64 end; 2066 2067 ASSERT(folio_order(folio) == 0); 2068 2069 reserved_space = PAGE_SIZE; 2070 2071 sb_start_pagefault(inode->i_sb); 2072 page_start = page_offset(page); 2073 page_end = page_start + PAGE_SIZE - 1; 2074 end = page_end; 2075 2076 /* 2077 * Reserving delalloc space after obtaining the page lock can lead to 2078 * deadlock. For example, if a dirty page is locked by this function 2079 * and the call to btrfs_delalloc_reserve_space() ends up triggering 2080 * dirty page write out, then the btrfs_writepages() function could 2081 * end up waiting indefinitely to get a lock on the page currently 2082 * being processed by btrfs_page_mkwrite() function. 2083 */ 2084 ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 2085 page_start, reserved_space); 2086 if (!ret2) { 2087 ret2 = file_update_time(vmf->vma->vm_file); 2088 reserved = 1; 2089 } 2090 if (ret2) { 2091 ret = vmf_error(ret2); 2092 if (reserved) 2093 goto out; 2094 goto out_noreserve; 2095 } 2096 2097 /* Make the VM retry the fault. */ 2098 ret = VM_FAULT_NOPAGE; 2099 again: 2100 down_read(&BTRFS_I(inode)->i_mmap_lock); 2101 lock_page(page); 2102 size = i_size_read(inode); 2103 2104 if ((page->mapping != inode->i_mapping) || 2105 (page_start >= size)) { 2106 /* Page got truncated out from underneath us. */ 2107 goto out_unlock; 2108 } 2109 wait_on_page_writeback(page); 2110 2111 lock_extent(io_tree, page_start, page_end, &cached_state); 2112 ret2 = set_page_extent_mapped(page); 2113 if (ret2 < 0) { 2114 ret = vmf_error(ret2); 2115 unlock_extent(io_tree, page_start, page_end, &cached_state); 2116 goto out_unlock; 2117 } 2118 2119 /* 2120 * We can't set the delalloc bits if there are pending ordered 2121 * extents. Drop our locks and wait for them to finish. 2122 */ 2123 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE); 2124 if (ordered) { 2125 unlock_extent(io_tree, page_start, page_end, &cached_state); 2126 unlock_page(page); 2127 up_read(&BTRFS_I(inode)->i_mmap_lock); 2128 btrfs_start_ordered_extent(ordered); 2129 btrfs_put_ordered_extent(ordered); 2130 goto again; 2131 } 2132 2133 if (page->index == ((size - 1) >> PAGE_SHIFT)) { 2134 reserved_space = round_up(size - page_start, fs_info->sectorsize); 2135 if (reserved_space < PAGE_SIZE) { 2136 end = page_start + reserved_space - 1; 2137 btrfs_delalloc_release_space(BTRFS_I(inode), 2138 data_reserved, page_start, 2139 PAGE_SIZE - reserved_space, true); 2140 } 2141 } 2142 2143 /* 2144 * page_mkwrite gets called when the page is firstly dirtied after it's 2145 * faulted in, but write(2) could also dirty a page and set delalloc 2146 * bits, thus in this case for space account reason, we still need to 2147 * clear any delalloc bits within this page range since we have to 2148 * reserve data&meta space before lock_page() (see above comments). 2149 */ 2150 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, 2151 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 2152 EXTENT_DEFRAG, &cached_state); 2153 2154 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, 2155 &cached_state); 2156 if (ret2) { 2157 unlock_extent(io_tree, page_start, page_end, &cached_state); 2158 ret = VM_FAULT_SIGBUS; 2159 goto out_unlock; 2160 } 2161 2162 /* Page is wholly or partially inside EOF. */ 2163 if (page_start + PAGE_SIZE > size) 2164 zero_start = offset_in_page(size); 2165 else 2166 zero_start = PAGE_SIZE; 2167 2168 if (zero_start != PAGE_SIZE) 2169 memzero_page(page, zero_start, PAGE_SIZE - zero_start); 2170 2171 btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE); 2172 btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start); 2173 btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start); 2174 2175 btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); 2176 2177 unlock_extent(io_tree, page_start, page_end, &cached_state); 2178 up_read(&BTRFS_I(inode)->i_mmap_lock); 2179 2180 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 2181 sb_end_pagefault(inode->i_sb); 2182 extent_changeset_free(data_reserved); 2183 return VM_FAULT_LOCKED; 2184 2185 out_unlock: 2186 unlock_page(page); 2187 up_read(&BTRFS_I(inode)->i_mmap_lock); 2188 out: 2189 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); 2190 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, 2191 reserved_space, (ret != 0)); 2192 out_noreserve: 2193 sb_end_pagefault(inode->i_sb); 2194 extent_changeset_free(data_reserved); 2195 return ret; 2196 } 2197 2198 static const struct vm_operations_struct btrfs_file_vm_ops = { 2199 .fault = filemap_fault, 2200 .map_pages = filemap_map_pages, 2201 .page_mkwrite = btrfs_page_mkwrite, 2202 }; 2203 2204 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 2205 { 2206 struct address_space *mapping = filp->f_mapping; 2207 2208 if (!mapping->a_ops->read_folio) 2209 return -ENOEXEC; 2210 2211 file_accessed(filp); 2212 vma->vm_ops = &btrfs_file_vm_ops; 2213 2214 return 0; 2215 } 2216 2217 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf, 2218 int slot, u64 start, u64 end) 2219 { 2220 struct btrfs_file_extent_item *fi; 2221 struct btrfs_key key; 2222 2223 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 2224 return 0; 2225 2226 btrfs_item_key_to_cpu(leaf, &key, slot); 2227 if (key.objectid != btrfs_ino(inode) || 2228 key.type != BTRFS_EXTENT_DATA_KEY) 2229 return 0; 2230 2231 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 2232 2233 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) 2234 return 0; 2235 2236 if (btrfs_file_extent_disk_bytenr(leaf, fi)) 2237 return 0; 2238 2239 if (key.offset == end) 2240 return 1; 2241 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start) 2242 return 1; 2243 return 0; 2244 } 2245 2246 static int fill_holes(struct btrfs_trans_handle *trans, 2247 struct btrfs_inode *inode, 2248 struct btrfs_path *path, u64 offset, u64 end) 2249 { 2250 struct btrfs_fs_info *fs_info = trans->fs_info; 2251 struct btrfs_root *root = inode->root; 2252 struct extent_buffer *leaf; 2253 struct btrfs_file_extent_item *fi; 2254 struct extent_map *hole_em; 2255 struct btrfs_key key; 2256 int ret; 2257 2258 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 2259 goto out; 2260 2261 key.objectid = btrfs_ino(inode); 2262 key.type = BTRFS_EXTENT_DATA_KEY; 2263 key.offset = offset; 2264 2265 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2266 if (ret <= 0) { 2267 /* 2268 * We should have dropped this offset, so if we find it then 2269 * something has gone horribly wrong. 2270 */ 2271 if (ret == 0) 2272 ret = -EINVAL; 2273 return ret; 2274 } 2275 2276 leaf = path->nodes[0]; 2277 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) { 2278 u64 num_bytes; 2279 2280 path->slots[0]--; 2281 fi = btrfs_item_ptr(leaf, path->slots[0], 2282 struct btrfs_file_extent_item); 2283 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + 2284 end - offset; 2285 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2286 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2287 btrfs_set_file_extent_offset(leaf, fi, 0); 2288 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 2289 btrfs_mark_buffer_dirty(trans, leaf); 2290 goto out; 2291 } 2292 2293 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) { 2294 u64 num_bytes; 2295 2296 key.offset = offset; 2297 btrfs_set_item_key_safe(trans, path, &key); 2298 fi = btrfs_item_ptr(leaf, path->slots[0], 2299 struct btrfs_file_extent_item); 2300 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end - 2301 offset; 2302 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2303 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 2304 btrfs_set_file_extent_offset(leaf, fi, 0); 2305 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 2306 btrfs_mark_buffer_dirty(trans, leaf); 2307 goto out; 2308 } 2309 btrfs_release_path(path); 2310 2311 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, 2312 end - offset); 2313 if (ret) 2314 return ret; 2315 2316 out: 2317 btrfs_release_path(path); 2318 2319 hole_em = alloc_extent_map(); 2320 if (!hole_em) { 2321 btrfs_drop_extent_map_range(inode, offset, end - 1, false); 2322 btrfs_set_inode_full_sync(inode); 2323 } else { 2324 hole_em->start = offset; 2325 hole_em->len = end - offset; 2326 hole_em->ram_bytes = hole_em->len; 2327 hole_em->orig_start = offset; 2328 2329 hole_em->block_start = EXTENT_MAP_HOLE; 2330 hole_em->block_len = 0; 2331 hole_em->orig_block_len = 0; 2332 hole_em->generation = trans->transid; 2333 2334 ret = btrfs_replace_extent_map_range(inode, hole_em, true); 2335 free_extent_map(hole_em); 2336 if (ret) 2337 btrfs_set_inode_full_sync(inode); 2338 } 2339 2340 return 0; 2341 } 2342 2343 /* 2344 * Find a hole extent on given inode and change start/len to the end of hole 2345 * extent.(hole/vacuum extent whose em->start <= start && 2346 * em->start + em->len > start) 2347 * When a hole extent is found, return 1 and modify start/len. 2348 */ 2349 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len) 2350 { 2351 struct btrfs_fs_info *fs_info = inode->root->fs_info; 2352 struct extent_map *em; 2353 int ret = 0; 2354 2355 em = btrfs_get_extent(inode, NULL, 2356 round_down(*start, fs_info->sectorsize), 2357 round_up(*len, fs_info->sectorsize)); 2358 if (IS_ERR(em)) 2359 return PTR_ERR(em); 2360 2361 /* Hole or vacuum extent(only exists in no-hole mode) */ 2362 if (em->block_start == EXTENT_MAP_HOLE) { 2363 ret = 1; 2364 *len = em->start + em->len > *start + *len ? 2365 0 : *start + *len - em->start - em->len; 2366 *start = em->start + em->len; 2367 } 2368 free_extent_map(em); 2369 return ret; 2370 } 2371 2372 static void btrfs_punch_hole_lock_range(struct inode *inode, 2373 const u64 lockstart, 2374 const u64 lockend, 2375 struct extent_state **cached_state) 2376 { 2377 /* 2378 * For subpage case, if the range is not at page boundary, we could 2379 * have pages at the leading/tailing part of the range. 2380 * This could lead to dead loop since filemap_range_has_page() 2381 * will always return true. 2382 * So here we need to do extra page alignment for 2383 * filemap_range_has_page(). 2384 */ 2385 const u64 page_lockstart = round_up(lockstart, PAGE_SIZE); 2386 const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1; 2387 2388 while (1) { 2389 truncate_pagecache_range(inode, lockstart, lockend); 2390 2391 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2392 cached_state); 2393 /* 2394 * We can't have ordered extents in the range, nor dirty/writeback 2395 * pages, because we have locked the inode's VFS lock in exclusive 2396 * mode, we have locked the inode's i_mmap_lock in exclusive mode, 2397 * we have flushed all delalloc in the range and we have waited 2398 * for any ordered extents in the range to complete. 2399 * We can race with anyone reading pages from this range, so after 2400 * locking the range check if we have pages in the range, and if 2401 * we do, unlock the range and retry. 2402 */ 2403 if (!filemap_range_has_page(inode->i_mapping, page_lockstart, 2404 page_lockend)) 2405 break; 2406 2407 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2408 cached_state); 2409 } 2410 2411 btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend); 2412 } 2413 2414 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans, 2415 struct btrfs_inode *inode, 2416 struct btrfs_path *path, 2417 struct btrfs_replace_extent_info *extent_info, 2418 const u64 replace_len, 2419 const u64 bytes_to_drop) 2420 { 2421 struct btrfs_fs_info *fs_info = trans->fs_info; 2422 struct btrfs_root *root = inode->root; 2423 struct btrfs_file_extent_item *extent; 2424 struct extent_buffer *leaf; 2425 struct btrfs_key key; 2426 int slot; 2427 struct btrfs_ref ref = { 0 }; 2428 int ret; 2429 2430 if (replace_len == 0) 2431 return 0; 2432 2433 if (extent_info->disk_offset == 0 && 2434 btrfs_fs_incompat(fs_info, NO_HOLES)) { 2435 btrfs_update_inode_bytes(inode, 0, bytes_to_drop); 2436 return 0; 2437 } 2438 2439 key.objectid = btrfs_ino(inode); 2440 key.type = BTRFS_EXTENT_DATA_KEY; 2441 key.offset = extent_info->file_offset; 2442 ret = btrfs_insert_empty_item(trans, root, path, &key, 2443 sizeof(struct btrfs_file_extent_item)); 2444 if (ret) 2445 return ret; 2446 leaf = path->nodes[0]; 2447 slot = path->slots[0]; 2448 write_extent_buffer(leaf, extent_info->extent_buf, 2449 btrfs_item_ptr_offset(leaf, slot), 2450 sizeof(struct btrfs_file_extent_item)); 2451 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 2452 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE); 2453 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset); 2454 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len); 2455 if (extent_info->is_new_extent) 2456 btrfs_set_file_extent_generation(leaf, extent, trans->transid); 2457 btrfs_mark_buffer_dirty(trans, leaf); 2458 btrfs_release_path(path); 2459 2460 ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset, 2461 replace_len); 2462 if (ret) 2463 return ret; 2464 2465 /* If it's a hole, nothing more needs to be done. */ 2466 if (extent_info->disk_offset == 0) { 2467 btrfs_update_inode_bytes(inode, 0, bytes_to_drop); 2468 return 0; 2469 } 2470 2471 btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop); 2472 2473 if (extent_info->is_new_extent && extent_info->insertions == 0) { 2474 key.objectid = extent_info->disk_offset; 2475 key.type = BTRFS_EXTENT_ITEM_KEY; 2476 key.offset = extent_info->disk_len; 2477 ret = btrfs_alloc_reserved_file_extent(trans, root, 2478 btrfs_ino(inode), 2479 extent_info->file_offset, 2480 extent_info->qgroup_reserved, 2481 &key); 2482 } else { 2483 u64 ref_offset; 2484 2485 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, 2486 extent_info->disk_offset, 2487 extent_info->disk_len, 0, 2488 root->root_key.objectid); 2489 ref_offset = extent_info->file_offset - extent_info->data_offset; 2490 btrfs_init_data_ref(&ref, root->root_key.objectid, 2491 btrfs_ino(inode), ref_offset, 0, false); 2492 ret = btrfs_inc_extent_ref(trans, &ref); 2493 } 2494 2495 extent_info->insertions++; 2496 2497 return ret; 2498 } 2499 2500 /* 2501 * The respective range must have been previously locked, as well as the inode. 2502 * The end offset is inclusive (last byte of the range). 2503 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing 2504 * the file range with an extent. 2505 * When not punching a hole, we don't want to end up in a state where we dropped 2506 * extents without inserting a new one, so we must abort the transaction to avoid 2507 * a corruption. 2508 */ 2509 int btrfs_replace_file_extents(struct btrfs_inode *inode, 2510 struct btrfs_path *path, const u64 start, 2511 const u64 end, 2512 struct btrfs_replace_extent_info *extent_info, 2513 struct btrfs_trans_handle **trans_out) 2514 { 2515 struct btrfs_drop_extents_args drop_args = { 0 }; 2516 struct btrfs_root *root = inode->root; 2517 struct btrfs_fs_info *fs_info = root->fs_info; 2518 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1); 2519 u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize); 2520 struct btrfs_trans_handle *trans = NULL; 2521 struct btrfs_block_rsv *rsv; 2522 unsigned int rsv_count; 2523 u64 cur_offset; 2524 u64 len = end - start; 2525 int ret = 0; 2526 2527 if (end <= start) 2528 return -EINVAL; 2529 2530 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 2531 if (!rsv) { 2532 ret = -ENOMEM; 2533 goto out; 2534 } 2535 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1); 2536 rsv->failfast = true; 2537 2538 /* 2539 * 1 - update the inode 2540 * 1 - removing the extents in the range 2541 * 1 - adding the hole extent if no_holes isn't set or if we are 2542 * replacing the range with a new extent 2543 */ 2544 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info) 2545 rsv_count = 3; 2546 else 2547 rsv_count = 2; 2548 2549 trans = btrfs_start_transaction(root, rsv_count); 2550 if (IS_ERR(trans)) { 2551 ret = PTR_ERR(trans); 2552 trans = NULL; 2553 goto out_free; 2554 } 2555 2556 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 2557 min_size, false); 2558 if (WARN_ON(ret)) 2559 goto out_trans; 2560 trans->block_rsv = rsv; 2561 2562 cur_offset = start; 2563 drop_args.path = path; 2564 drop_args.end = end + 1; 2565 drop_args.drop_cache = true; 2566 while (cur_offset < end) { 2567 drop_args.start = cur_offset; 2568 ret = btrfs_drop_extents(trans, root, inode, &drop_args); 2569 /* If we are punching a hole decrement the inode's byte count */ 2570 if (!extent_info) 2571 btrfs_update_inode_bytes(inode, 0, 2572 drop_args.bytes_found); 2573 if (ret != -ENOSPC) { 2574 /* 2575 * The only time we don't want to abort is if we are 2576 * attempting to clone a partial inline extent, in which 2577 * case we'll get EOPNOTSUPP. However if we aren't 2578 * clone we need to abort no matter what, because if we 2579 * got EOPNOTSUPP via prealloc then we messed up and 2580 * need to abort. 2581 */ 2582 if (ret && 2583 (ret != -EOPNOTSUPP || 2584 (extent_info && extent_info->is_new_extent))) 2585 btrfs_abort_transaction(trans, ret); 2586 break; 2587 } 2588 2589 trans->block_rsv = &fs_info->trans_block_rsv; 2590 2591 if (!extent_info && cur_offset < drop_args.drop_end && 2592 cur_offset < ino_size) { 2593 ret = fill_holes(trans, inode, path, cur_offset, 2594 drop_args.drop_end); 2595 if (ret) { 2596 /* 2597 * If we failed then we didn't insert our hole 2598 * entries for the area we dropped, so now the 2599 * fs is corrupted, so we must abort the 2600 * transaction. 2601 */ 2602 btrfs_abort_transaction(trans, ret); 2603 break; 2604 } 2605 } else if (!extent_info && cur_offset < drop_args.drop_end) { 2606 /* 2607 * We are past the i_size here, but since we didn't 2608 * insert holes we need to clear the mapped area so we 2609 * know to not set disk_i_size in this area until a new 2610 * file extent is inserted here. 2611 */ 2612 ret = btrfs_inode_clear_file_extent_range(inode, 2613 cur_offset, 2614 drop_args.drop_end - cur_offset); 2615 if (ret) { 2616 /* 2617 * We couldn't clear our area, so we could 2618 * presumably adjust up and corrupt the fs, so 2619 * we need to abort. 2620 */ 2621 btrfs_abort_transaction(trans, ret); 2622 break; 2623 } 2624 } 2625 2626 if (extent_info && 2627 drop_args.drop_end > extent_info->file_offset) { 2628 u64 replace_len = drop_args.drop_end - 2629 extent_info->file_offset; 2630 2631 ret = btrfs_insert_replace_extent(trans, inode, path, 2632 extent_info, replace_len, 2633 drop_args.bytes_found); 2634 if (ret) { 2635 btrfs_abort_transaction(trans, ret); 2636 break; 2637 } 2638 extent_info->data_len -= replace_len; 2639 extent_info->data_offset += replace_len; 2640 extent_info->file_offset += replace_len; 2641 } 2642 2643 /* 2644 * We are releasing our handle on the transaction, balance the 2645 * dirty pages of the btree inode and flush delayed items, and 2646 * then get a new transaction handle, which may now point to a 2647 * new transaction in case someone else may have committed the 2648 * transaction we used to replace/drop file extent items. So 2649 * bump the inode's iversion and update mtime and ctime except 2650 * if we are called from a dedupe context. This is because a 2651 * power failure/crash may happen after the transaction is 2652 * committed and before we finish replacing/dropping all the 2653 * file extent items we need. 2654 */ 2655 inode_inc_iversion(&inode->vfs_inode); 2656 2657 if (!extent_info || extent_info->update_times) 2658 inode_set_mtime_to_ts(&inode->vfs_inode, 2659 inode_set_ctime_current(&inode->vfs_inode)); 2660 2661 ret = btrfs_update_inode(trans, inode); 2662 if (ret) 2663 break; 2664 2665 btrfs_end_transaction(trans); 2666 btrfs_btree_balance_dirty(fs_info); 2667 2668 trans = btrfs_start_transaction(root, rsv_count); 2669 if (IS_ERR(trans)) { 2670 ret = PTR_ERR(trans); 2671 trans = NULL; 2672 break; 2673 } 2674 2675 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 2676 rsv, min_size, false); 2677 if (WARN_ON(ret)) 2678 break; 2679 trans->block_rsv = rsv; 2680 2681 cur_offset = drop_args.drop_end; 2682 len = end - cur_offset; 2683 if (!extent_info && len) { 2684 ret = find_first_non_hole(inode, &cur_offset, &len); 2685 if (unlikely(ret < 0)) 2686 break; 2687 if (ret && !len) { 2688 ret = 0; 2689 break; 2690 } 2691 } 2692 } 2693 2694 /* 2695 * If we were cloning, force the next fsync to be a full one since we 2696 * we replaced (or just dropped in the case of cloning holes when 2697 * NO_HOLES is enabled) file extent items and did not setup new extent 2698 * maps for the replacement extents (or holes). 2699 */ 2700 if (extent_info && !extent_info->is_new_extent) 2701 btrfs_set_inode_full_sync(inode); 2702 2703 if (ret) 2704 goto out_trans; 2705 2706 trans->block_rsv = &fs_info->trans_block_rsv; 2707 /* 2708 * If we are using the NO_HOLES feature we might have had already an 2709 * hole that overlaps a part of the region [lockstart, lockend] and 2710 * ends at (or beyond) lockend. Since we have no file extent items to 2711 * represent holes, drop_end can be less than lockend and so we must 2712 * make sure we have an extent map representing the existing hole (the 2713 * call to __btrfs_drop_extents() might have dropped the existing extent 2714 * map representing the existing hole), otherwise the fast fsync path 2715 * will not record the existence of the hole region 2716 * [existing_hole_start, lockend]. 2717 */ 2718 if (drop_args.drop_end <= end) 2719 drop_args.drop_end = end + 1; 2720 /* 2721 * Don't insert file hole extent item if it's for a range beyond eof 2722 * (because it's useless) or if it represents a 0 bytes range (when 2723 * cur_offset == drop_end). 2724 */ 2725 if (!extent_info && cur_offset < ino_size && 2726 cur_offset < drop_args.drop_end) { 2727 ret = fill_holes(trans, inode, path, cur_offset, 2728 drop_args.drop_end); 2729 if (ret) { 2730 /* Same comment as above. */ 2731 btrfs_abort_transaction(trans, ret); 2732 goto out_trans; 2733 } 2734 } else if (!extent_info && cur_offset < drop_args.drop_end) { 2735 /* See the comment in the loop above for the reasoning here. */ 2736 ret = btrfs_inode_clear_file_extent_range(inode, cur_offset, 2737 drop_args.drop_end - cur_offset); 2738 if (ret) { 2739 btrfs_abort_transaction(trans, ret); 2740 goto out_trans; 2741 } 2742 2743 } 2744 if (extent_info) { 2745 ret = btrfs_insert_replace_extent(trans, inode, path, 2746 extent_info, extent_info->data_len, 2747 drop_args.bytes_found); 2748 if (ret) { 2749 btrfs_abort_transaction(trans, ret); 2750 goto out_trans; 2751 } 2752 } 2753 2754 out_trans: 2755 if (!trans) 2756 goto out_free; 2757 2758 trans->block_rsv = &fs_info->trans_block_rsv; 2759 if (ret) 2760 btrfs_end_transaction(trans); 2761 else 2762 *trans_out = trans; 2763 out_free: 2764 btrfs_free_block_rsv(fs_info, rsv); 2765 out: 2766 return ret; 2767 } 2768 2769 static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) 2770 { 2771 struct inode *inode = file_inode(file); 2772 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 2773 struct btrfs_root *root = BTRFS_I(inode)->root; 2774 struct extent_state *cached_state = NULL; 2775 struct btrfs_path *path; 2776 struct btrfs_trans_handle *trans = NULL; 2777 u64 lockstart; 2778 u64 lockend; 2779 u64 tail_start; 2780 u64 tail_len; 2781 u64 orig_start = offset; 2782 int ret = 0; 2783 bool same_block; 2784 u64 ino_size; 2785 bool truncated_block = false; 2786 bool updated_inode = false; 2787 2788 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 2789 2790 ret = btrfs_wait_ordered_range(inode, offset, len); 2791 if (ret) 2792 goto out_only_mutex; 2793 2794 ino_size = round_up(inode->i_size, fs_info->sectorsize); 2795 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len); 2796 if (ret < 0) 2797 goto out_only_mutex; 2798 if (ret && !len) { 2799 /* Already in a large hole */ 2800 ret = 0; 2801 goto out_only_mutex; 2802 } 2803 2804 ret = file_modified(file); 2805 if (ret) 2806 goto out_only_mutex; 2807 2808 lockstart = round_up(offset, fs_info->sectorsize); 2809 lockend = round_down(offset + len, fs_info->sectorsize) - 1; 2810 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset)) 2811 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)); 2812 /* 2813 * We needn't truncate any block which is beyond the end of the file 2814 * because we are sure there is no data there. 2815 */ 2816 /* 2817 * Only do this if we are in the same block and we aren't doing the 2818 * entire block. 2819 */ 2820 if (same_block && len < fs_info->sectorsize) { 2821 if (offset < ino_size) { 2822 truncated_block = true; 2823 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len, 2824 0); 2825 } else { 2826 ret = 0; 2827 } 2828 goto out_only_mutex; 2829 } 2830 2831 /* zero back part of the first block */ 2832 if (offset < ino_size) { 2833 truncated_block = true; 2834 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0); 2835 if (ret) { 2836 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 2837 return ret; 2838 } 2839 } 2840 2841 /* Check the aligned pages after the first unaligned page, 2842 * if offset != orig_start, which means the first unaligned page 2843 * including several following pages are already in holes, 2844 * the extra check can be skipped */ 2845 if (offset == orig_start) { 2846 /* after truncate page, check hole again */ 2847 len = offset + len - lockstart; 2848 offset = lockstart; 2849 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len); 2850 if (ret < 0) 2851 goto out_only_mutex; 2852 if (ret && !len) { 2853 ret = 0; 2854 goto out_only_mutex; 2855 } 2856 lockstart = offset; 2857 } 2858 2859 /* Check the tail unaligned part is in a hole */ 2860 tail_start = lockend + 1; 2861 tail_len = offset + len - tail_start; 2862 if (tail_len) { 2863 ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len); 2864 if (unlikely(ret < 0)) 2865 goto out_only_mutex; 2866 if (!ret) { 2867 /* zero the front end of the last page */ 2868 if (tail_start + tail_len < ino_size) { 2869 truncated_block = true; 2870 ret = btrfs_truncate_block(BTRFS_I(inode), 2871 tail_start + tail_len, 2872 0, 1); 2873 if (ret) 2874 goto out_only_mutex; 2875 } 2876 } 2877 } 2878 2879 if (lockend < lockstart) { 2880 ret = 0; 2881 goto out_only_mutex; 2882 } 2883 2884 btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state); 2885 2886 path = btrfs_alloc_path(); 2887 if (!path) { 2888 ret = -ENOMEM; 2889 goto out; 2890 } 2891 2892 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart, 2893 lockend, NULL, &trans); 2894 btrfs_free_path(path); 2895 if (ret) 2896 goto out; 2897 2898 ASSERT(trans != NULL); 2899 inode_inc_iversion(inode); 2900 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 2901 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 2902 updated_inode = true; 2903 btrfs_end_transaction(trans); 2904 btrfs_btree_balance_dirty(fs_info); 2905 out: 2906 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2907 &cached_state); 2908 out_only_mutex: 2909 if (!updated_inode && truncated_block && !ret) { 2910 /* 2911 * If we only end up zeroing part of a page, we still need to 2912 * update the inode item, so that all the time fields are 2913 * updated as well as the necessary btrfs inode in memory fields 2914 * for detecting, at fsync time, if the inode isn't yet in the 2915 * log tree or it's there but not up to date. 2916 */ 2917 struct timespec64 now = inode_set_ctime_current(inode); 2918 2919 inode_inc_iversion(inode); 2920 inode_set_mtime_to_ts(inode, now); 2921 trans = btrfs_start_transaction(root, 1); 2922 if (IS_ERR(trans)) { 2923 ret = PTR_ERR(trans); 2924 } else { 2925 int ret2; 2926 2927 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 2928 ret2 = btrfs_end_transaction(trans); 2929 if (!ret) 2930 ret = ret2; 2931 } 2932 } 2933 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 2934 return ret; 2935 } 2936 2937 /* Helper structure to record which range is already reserved */ 2938 struct falloc_range { 2939 struct list_head list; 2940 u64 start; 2941 u64 len; 2942 }; 2943 2944 /* 2945 * Helper function to add falloc range 2946 * 2947 * Caller should have locked the larger range of extent containing 2948 * [start, len) 2949 */ 2950 static int add_falloc_range(struct list_head *head, u64 start, u64 len) 2951 { 2952 struct falloc_range *range = NULL; 2953 2954 if (!list_empty(head)) { 2955 /* 2956 * As fallocate iterates by bytenr order, we only need to check 2957 * the last range. 2958 */ 2959 range = list_last_entry(head, struct falloc_range, list); 2960 if (range->start + range->len == start) { 2961 range->len += len; 2962 return 0; 2963 } 2964 } 2965 2966 range = kmalloc(sizeof(*range), GFP_KERNEL); 2967 if (!range) 2968 return -ENOMEM; 2969 range->start = start; 2970 range->len = len; 2971 list_add_tail(&range->list, head); 2972 return 0; 2973 } 2974 2975 static int btrfs_fallocate_update_isize(struct inode *inode, 2976 const u64 end, 2977 const int mode) 2978 { 2979 struct btrfs_trans_handle *trans; 2980 struct btrfs_root *root = BTRFS_I(inode)->root; 2981 int ret; 2982 int ret2; 2983 2984 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode)) 2985 return 0; 2986 2987 trans = btrfs_start_transaction(root, 1); 2988 if (IS_ERR(trans)) 2989 return PTR_ERR(trans); 2990 2991 inode_set_ctime_current(inode); 2992 i_size_write(inode, end); 2993 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 2994 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 2995 ret2 = btrfs_end_transaction(trans); 2996 2997 return ret ? ret : ret2; 2998 } 2999 3000 enum { 3001 RANGE_BOUNDARY_WRITTEN_EXTENT, 3002 RANGE_BOUNDARY_PREALLOC_EXTENT, 3003 RANGE_BOUNDARY_HOLE, 3004 }; 3005 3006 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode, 3007 u64 offset) 3008 { 3009 const u64 sectorsize = inode->root->fs_info->sectorsize; 3010 struct extent_map *em; 3011 int ret; 3012 3013 offset = round_down(offset, sectorsize); 3014 em = btrfs_get_extent(inode, NULL, offset, sectorsize); 3015 if (IS_ERR(em)) 3016 return PTR_ERR(em); 3017 3018 if (em->block_start == EXTENT_MAP_HOLE) 3019 ret = RANGE_BOUNDARY_HOLE; 3020 else if (em->flags & EXTENT_FLAG_PREALLOC) 3021 ret = RANGE_BOUNDARY_PREALLOC_EXTENT; 3022 else 3023 ret = RANGE_BOUNDARY_WRITTEN_EXTENT; 3024 3025 free_extent_map(em); 3026 return ret; 3027 } 3028 3029 static int btrfs_zero_range(struct inode *inode, 3030 loff_t offset, 3031 loff_t len, 3032 const int mode) 3033 { 3034 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 3035 struct extent_map *em; 3036 struct extent_changeset *data_reserved = NULL; 3037 int ret; 3038 u64 alloc_hint = 0; 3039 const u64 sectorsize = fs_info->sectorsize; 3040 u64 alloc_start = round_down(offset, sectorsize); 3041 u64 alloc_end = round_up(offset + len, sectorsize); 3042 u64 bytes_to_reserve = 0; 3043 bool space_reserved = false; 3044 3045 em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start, 3046 alloc_end - alloc_start); 3047 if (IS_ERR(em)) { 3048 ret = PTR_ERR(em); 3049 goto out; 3050 } 3051 3052 /* 3053 * Avoid hole punching and extent allocation for some cases. More cases 3054 * could be considered, but these are unlikely common and we keep things 3055 * as simple as possible for now. Also, intentionally, if the target 3056 * range contains one or more prealloc extents together with regular 3057 * extents and holes, we drop all the existing extents and allocate a 3058 * new prealloc extent, so that we get a larger contiguous disk extent. 3059 */ 3060 if (em->start <= alloc_start && (em->flags & EXTENT_FLAG_PREALLOC)) { 3061 const u64 em_end = em->start + em->len; 3062 3063 if (em_end >= offset + len) { 3064 /* 3065 * The whole range is already a prealloc extent, 3066 * do nothing except updating the inode's i_size if 3067 * needed. 3068 */ 3069 free_extent_map(em); 3070 ret = btrfs_fallocate_update_isize(inode, offset + len, 3071 mode); 3072 goto out; 3073 } 3074 /* 3075 * Part of the range is already a prealloc extent, so operate 3076 * only on the remaining part of the range. 3077 */ 3078 alloc_start = em_end; 3079 ASSERT(IS_ALIGNED(alloc_start, sectorsize)); 3080 len = offset + len - alloc_start; 3081 offset = alloc_start; 3082 alloc_hint = em->block_start + em->len; 3083 } 3084 free_extent_map(em); 3085 3086 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) == 3087 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) { 3088 em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start, sectorsize); 3089 if (IS_ERR(em)) { 3090 ret = PTR_ERR(em); 3091 goto out; 3092 } 3093 3094 if (em->flags & EXTENT_FLAG_PREALLOC) { 3095 free_extent_map(em); 3096 ret = btrfs_fallocate_update_isize(inode, offset + len, 3097 mode); 3098 goto out; 3099 } 3100 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) { 3101 free_extent_map(em); 3102 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len, 3103 0); 3104 if (!ret) 3105 ret = btrfs_fallocate_update_isize(inode, 3106 offset + len, 3107 mode); 3108 return ret; 3109 } 3110 free_extent_map(em); 3111 alloc_start = round_down(offset, sectorsize); 3112 alloc_end = alloc_start + sectorsize; 3113 goto reserve_space; 3114 } 3115 3116 alloc_start = round_up(offset, sectorsize); 3117 alloc_end = round_down(offset + len, sectorsize); 3118 3119 /* 3120 * For unaligned ranges, check the pages at the boundaries, they might 3121 * map to an extent, in which case we need to partially zero them, or 3122 * they might map to a hole, in which case we need our allocation range 3123 * to cover them. 3124 */ 3125 if (!IS_ALIGNED(offset, sectorsize)) { 3126 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode), 3127 offset); 3128 if (ret < 0) 3129 goto out; 3130 if (ret == RANGE_BOUNDARY_HOLE) { 3131 alloc_start = round_down(offset, sectorsize); 3132 ret = 0; 3133 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { 3134 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0); 3135 if (ret) 3136 goto out; 3137 } else { 3138 ret = 0; 3139 } 3140 } 3141 3142 if (!IS_ALIGNED(offset + len, sectorsize)) { 3143 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode), 3144 offset + len); 3145 if (ret < 0) 3146 goto out; 3147 if (ret == RANGE_BOUNDARY_HOLE) { 3148 alloc_end = round_up(offset + len, sectorsize); 3149 ret = 0; 3150 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { 3151 ret = btrfs_truncate_block(BTRFS_I(inode), offset + len, 3152 0, 1); 3153 if (ret) 3154 goto out; 3155 } else { 3156 ret = 0; 3157 } 3158 } 3159 3160 reserve_space: 3161 if (alloc_start < alloc_end) { 3162 struct extent_state *cached_state = NULL; 3163 const u64 lockstart = alloc_start; 3164 const u64 lockend = alloc_end - 1; 3165 3166 bytes_to_reserve = alloc_end - alloc_start; 3167 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), 3168 bytes_to_reserve); 3169 if (ret < 0) 3170 goto out; 3171 space_reserved = true; 3172 btrfs_punch_hole_lock_range(inode, lockstart, lockend, 3173 &cached_state); 3174 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved, 3175 alloc_start, bytes_to_reserve); 3176 if (ret) { 3177 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, 3178 lockend, &cached_state); 3179 goto out; 3180 } 3181 ret = btrfs_prealloc_file_range(inode, mode, alloc_start, 3182 alloc_end - alloc_start, 3183 fs_info->sectorsize, 3184 offset + len, &alloc_hint); 3185 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, 3186 &cached_state); 3187 /* btrfs_prealloc_file_range releases reserved space on error */ 3188 if (ret) { 3189 space_reserved = false; 3190 goto out; 3191 } 3192 } 3193 ret = btrfs_fallocate_update_isize(inode, offset + len, mode); 3194 out: 3195 if (ret && space_reserved) 3196 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved, 3197 alloc_start, bytes_to_reserve); 3198 extent_changeset_free(data_reserved); 3199 3200 return ret; 3201 } 3202 3203 static long btrfs_fallocate(struct file *file, int mode, 3204 loff_t offset, loff_t len) 3205 { 3206 struct inode *inode = file_inode(file); 3207 struct extent_state *cached_state = NULL; 3208 struct extent_changeset *data_reserved = NULL; 3209 struct falloc_range *range; 3210 struct falloc_range *tmp; 3211 LIST_HEAD(reserve_list); 3212 u64 cur_offset; 3213 u64 last_byte; 3214 u64 alloc_start; 3215 u64 alloc_end; 3216 u64 alloc_hint = 0; 3217 u64 locked_end; 3218 u64 actual_end = 0; 3219 u64 data_space_needed = 0; 3220 u64 data_space_reserved = 0; 3221 u64 qgroup_reserved = 0; 3222 struct extent_map *em; 3223 int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize; 3224 int ret; 3225 3226 /* Do not allow fallocate in ZONED mode */ 3227 if (btrfs_is_zoned(inode_to_fs_info(inode))) 3228 return -EOPNOTSUPP; 3229 3230 alloc_start = round_down(offset, blocksize); 3231 alloc_end = round_up(offset + len, blocksize); 3232 cur_offset = alloc_start; 3233 3234 /* Make sure we aren't being give some crap mode */ 3235 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 3236 FALLOC_FL_ZERO_RANGE)) 3237 return -EOPNOTSUPP; 3238 3239 if (mode & FALLOC_FL_PUNCH_HOLE) 3240 return btrfs_punch_hole(file, offset, len); 3241 3242 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 3243 3244 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) { 3245 ret = inode_newsize_ok(inode, offset + len); 3246 if (ret) 3247 goto out; 3248 } 3249 3250 ret = file_modified(file); 3251 if (ret) 3252 goto out; 3253 3254 /* 3255 * TODO: Move these two operations after we have checked 3256 * accurate reserved space, or fallocate can still fail but 3257 * with page truncated or size expanded. 3258 * 3259 * But that's a minor problem and won't do much harm BTW. 3260 */ 3261 if (alloc_start > inode->i_size) { 3262 ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode), 3263 alloc_start); 3264 if (ret) 3265 goto out; 3266 } else if (offset + len > inode->i_size) { 3267 /* 3268 * If we are fallocating from the end of the file onward we 3269 * need to zero out the end of the block if i_size lands in the 3270 * middle of a block. 3271 */ 3272 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0); 3273 if (ret) 3274 goto out; 3275 } 3276 3277 /* 3278 * We have locked the inode at the VFS level (in exclusive mode) and we 3279 * have locked the i_mmap_lock lock (in exclusive mode). Now before 3280 * locking the file range, flush all dealloc in the range and wait for 3281 * all ordered extents in the range to complete. After this we can lock 3282 * the file range and, due to the previous locking we did, we know there 3283 * can't be more delalloc or ordered extents in the range. 3284 */ 3285 ret = btrfs_wait_ordered_range(inode, alloc_start, 3286 alloc_end - alloc_start); 3287 if (ret) 3288 goto out; 3289 3290 if (mode & FALLOC_FL_ZERO_RANGE) { 3291 ret = btrfs_zero_range(inode, offset, len, mode); 3292 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 3293 return ret; 3294 } 3295 3296 locked_end = alloc_end - 1; 3297 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 3298 &cached_state); 3299 3300 btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end); 3301 3302 /* First, check if we exceed the qgroup limit */ 3303 while (cur_offset < alloc_end) { 3304 em = btrfs_get_extent(BTRFS_I(inode), NULL, cur_offset, 3305 alloc_end - cur_offset); 3306 if (IS_ERR(em)) { 3307 ret = PTR_ERR(em); 3308 break; 3309 } 3310 last_byte = min(extent_map_end(em), alloc_end); 3311 actual_end = min_t(u64, extent_map_end(em), offset + len); 3312 last_byte = ALIGN(last_byte, blocksize); 3313 if (em->block_start == EXTENT_MAP_HOLE || 3314 (cur_offset >= inode->i_size && 3315 !(em->flags & EXTENT_FLAG_PREALLOC))) { 3316 const u64 range_len = last_byte - cur_offset; 3317 3318 ret = add_falloc_range(&reserve_list, cur_offset, range_len); 3319 if (ret < 0) { 3320 free_extent_map(em); 3321 break; 3322 } 3323 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), 3324 &data_reserved, cur_offset, range_len); 3325 if (ret < 0) { 3326 free_extent_map(em); 3327 break; 3328 } 3329 qgroup_reserved += range_len; 3330 data_space_needed += range_len; 3331 } 3332 free_extent_map(em); 3333 cur_offset = last_byte; 3334 } 3335 3336 if (!ret && data_space_needed > 0) { 3337 /* 3338 * We are safe to reserve space here as we can't have delalloc 3339 * in the range, see above. 3340 */ 3341 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), 3342 data_space_needed); 3343 if (!ret) 3344 data_space_reserved = data_space_needed; 3345 } 3346 3347 /* 3348 * If ret is still 0, means we're OK to fallocate. 3349 * Or just cleanup the list and exit. 3350 */ 3351 list_for_each_entry_safe(range, tmp, &reserve_list, list) { 3352 if (!ret) { 3353 ret = btrfs_prealloc_file_range(inode, mode, 3354 range->start, 3355 range->len, blocksize, 3356 offset + len, &alloc_hint); 3357 /* 3358 * btrfs_prealloc_file_range() releases space even 3359 * if it returns an error. 3360 */ 3361 data_space_reserved -= range->len; 3362 qgroup_reserved -= range->len; 3363 } else if (data_space_reserved > 0) { 3364 btrfs_free_reserved_data_space(BTRFS_I(inode), 3365 data_reserved, range->start, 3366 range->len); 3367 data_space_reserved -= range->len; 3368 qgroup_reserved -= range->len; 3369 } else if (qgroup_reserved > 0) { 3370 btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved, 3371 range->start, range->len, NULL); 3372 qgroup_reserved -= range->len; 3373 } 3374 list_del(&range->list); 3375 kfree(range); 3376 } 3377 if (ret < 0) 3378 goto out_unlock; 3379 3380 /* 3381 * We didn't need to allocate any more space, but we still extended the 3382 * size of the file so we need to update i_size and the inode item. 3383 */ 3384 ret = btrfs_fallocate_update_isize(inode, actual_end, mode); 3385 out_unlock: 3386 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 3387 &cached_state); 3388 out: 3389 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); 3390 extent_changeset_free(data_reserved); 3391 return ret; 3392 } 3393 3394 /* 3395 * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range 3396 * that has unflushed and/or flushing delalloc. There might be other adjacent 3397 * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps 3398 * looping while it gets adjacent subranges, and merging them together. 3399 */ 3400 static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end, 3401 struct extent_state **cached_state, 3402 bool *search_io_tree, 3403 u64 *delalloc_start_ret, u64 *delalloc_end_ret) 3404 { 3405 u64 len = end + 1 - start; 3406 u64 delalloc_len = 0; 3407 struct btrfs_ordered_extent *oe; 3408 u64 oe_start; 3409 u64 oe_end; 3410 3411 /* 3412 * Search the io tree first for EXTENT_DELALLOC. If we find any, it 3413 * means we have delalloc (dirty pages) for which writeback has not 3414 * started yet. 3415 */ 3416 if (*search_io_tree) { 3417 spin_lock(&inode->lock); 3418 if (inode->delalloc_bytes > 0) { 3419 spin_unlock(&inode->lock); 3420 *delalloc_start_ret = start; 3421 delalloc_len = count_range_bits(&inode->io_tree, 3422 delalloc_start_ret, end, 3423 len, EXTENT_DELALLOC, 1, 3424 cached_state); 3425 } else { 3426 spin_unlock(&inode->lock); 3427 } 3428 } 3429 3430 if (delalloc_len > 0) { 3431 /* 3432 * If delalloc was found then *delalloc_start_ret has a sector size 3433 * aligned value (rounded down). 3434 */ 3435 *delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1; 3436 3437 if (*delalloc_start_ret == start) { 3438 /* Delalloc for the whole range, nothing more to do. */ 3439 if (*delalloc_end_ret == end) 3440 return true; 3441 /* Else trim our search range for ordered extents. */ 3442 start = *delalloc_end_ret + 1; 3443 len = end + 1 - start; 3444 } 3445 } else { 3446 /* No delalloc, future calls don't need to search again. */ 3447 *search_io_tree = false; 3448 } 3449 3450 /* 3451 * Now also check if there's any ordered extent in the range. 3452 * We do this because: 3453 * 3454 * 1) When delalloc is flushed, the file range is locked, we clear the 3455 * EXTENT_DELALLOC bit from the io tree and create an extent map and 3456 * an ordered extent for the write. So we might just have been called 3457 * after delalloc is flushed and before the ordered extent completes 3458 * and inserts the new file extent item in the subvolume's btree; 3459 * 3460 * 2) We may have an ordered extent created by flushing delalloc for a 3461 * subrange that starts before the subrange we found marked with 3462 * EXTENT_DELALLOC in the io tree. 3463 * 3464 * We could also use the extent map tree to find such delalloc that is 3465 * being flushed, but using the ordered extents tree is more efficient 3466 * because it's usually much smaller as ordered extents are removed from 3467 * the tree once they complete. With the extent maps, we mau have them 3468 * in the extent map tree for a very long time, and they were either 3469 * created by previous writes or loaded by read operations. 3470 */ 3471 oe = btrfs_lookup_first_ordered_range(inode, start, len); 3472 if (!oe) 3473 return (delalloc_len > 0); 3474 3475 /* The ordered extent may span beyond our search range. */ 3476 oe_start = max(oe->file_offset, start); 3477 oe_end = min(oe->file_offset + oe->num_bytes - 1, end); 3478 3479 btrfs_put_ordered_extent(oe); 3480 3481 /* Don't have unflushed delalloc, return the ordered extent range. */ 3482 if (delalloc_len == 0) { 3483 *delalloc_start_ret = oe_start; 3484 *delalloc_end_ret = oe_end; 3485 return true; 3486 } 3487 3488 /* 3489 * We have both unflushed delalloc (io_tree) and an ordered extent. 3490 * If the ranges are adjacent returned a combined range, otherwise 3491 * return the leftmost range. 3492 */ 3493 if (oe_start < *delalloc_start_ret) { 3494 if (oe_end < *delalloc_start_ret) 3495 *delalloc_end_ret = oe_end; 3496 *delalloc_start_ret = oe_start; 3497 } else if (*delalloc_end_ret + 1 == oe_start) { 3498 *delalloc_end_ret = oe_end; 3499 } 3500 3501 return true; 3502 } 3503 3504 /* 3505 * Check if there's delalloc in a given range. 3506 * 3507 * @inode: The inode. 3508 * @start: The start offset of the range. It does not need to be 3509 * sector size aligned. 3510 * @end: The end offset (inclusive value) of the search range. 3511 * It does not need to be sector size aligned. 3512 * @cached_state: Extent state record used for speeding up delalloc 3513 * searches in the inode's io_tree. Can be NULL. 3514 * @delalloc_start_ret: Output argument, set to the start offset of the 3515 * subrange found with delalloc (may not be sector size 3516 * aligned). 3517 * @delalloc_end_ret: Output argument, set to he end offset (inclusive value) 3518 * of the subrange found with delalloc. 3519 * 3520 * Returns true if a subrange with delalloc is found within the given range, and 3521 * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and 3522 * end offsets of the subrange. 3523 */ 3524 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end, 3525 struct extent_state **cached_state, 3526 u64 *delalloc_start_ret, u64 *delalloc_end_ret) 3527 { 3528 u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize); 3529 u64 prev_delalloc_end = 0; 3530 bool search_io_tree = true; 3531 bool ret = false; 3532 3533 while (cur_offset <= end) { 3534 u64 delalloc_start; 3535 u64 delalloc_end; 3536 bool delalloc; 3537 3538 delalloc = find_delalloc_subrange(inode, cur_offset, end, 3539 cached_state, &search_io_tree, 3540 &delalloc_start, 3541 &delalloc_end); 3542 if (!delalloc) 3543 break; 3544 3545 if (prev_delalloc_end == 0) { 3546 /* First subrange found. */ 3547 *delalloc_start_ret = max(delalloc_start, start); 3548 *delalloc_end_ret = delalloc_end; 3549 ret = true; 3550 } else if (delalloc_start == prev_delalloc_end + 1) { 3551 /* Subrange adjacent to the previous one, merge them. */ 3552 *delalloc_end_ret = delalloc_end; 3553 } else { 3554 /* Subrange not adjacent to the previous one, exit. */ 3555 break; 3556 } 3557 3558 prev_delalloc_end = delalloc_end; 3559 cur_offset = delalloc_end + 1; 3560 cond_resched(); 3561 } 3562 3563 return ret; 3564 } 3565 3566 /* 3567 * Check if there's a hole or delalloc range in a range representing a hole (or 3568 * prealloc extent) found in the inode's subvolume btree. 3569 * 3570 * @inode: The inode. 3571 * @whence: Seek mode (SEEK_DATA or SEEK_HOLE). 3572 * @start: Start offset of the hole region. It does not need to be sector 3573 * size aligned. 3574 * @end: End offset (inclusive value) of the hole region. It does not 3575 * need to be sector size aligned. 3576 * @start_ret: Return parameter, used to set the start of the subrange in the 3577 * hole that matches the search criteria (seek mode), if such 3578 * subrange is found (return value of the function is true). 3579 * The value returned here may not be sector size aligned. 3580 * 3581 * Returns true if a subrange matching the given seek mode is found, and if one 3582 * is found, it updates @start_ret with the start of the subrange. 3583 */ 3584 static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence, 3585 struct extent_state **cached_state, 3586 u64 start, u64 end, u64 *start_ret) 3587 { 3588 u64 delalloc_start; 3589 u64 delalloc_end; 3590 bool delalloc; 3591 3592 delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state, 3593 &delalloc_start, &delalloc_end); 3594 if (delalloc && whence == SEEK_DATA) { 3595 *start_ret = delalloc_start; 3596 return true; 3597 } 3598 3599 if (delalloc && whence == SEEK_HOLE) { 3600 /* 3601 * We found delalloc but it starts after out start offset. So we 3602 * have a hole between our start offset and the delalloc start. 3603 */ 3604 if (start < delalloc_start) { 3605 *start_ret = start; 3606 return true; 3607 } 3608 /* 3609 * Delalloc range starts at our start offset. 3610 * If the delalloc range's length is smaller than our range, 3611 * then it means we have a hole that starts where the delalloc 3612 * subrange ends. 3613 */ 3614 if (delalloc_end < end) { 3615 *start_ret = delalloc_end + 1; 3616 return true; 3617 } 3618 3619 /* There's delalloc for the whole range. */ 3620 return false; 3621 } 3622 3623 if (!delalloc && whence == SEEK_HOLE) { 3624 *start_ret = start; 3625 return true; 3626 } 3627 3628 /* 3629 * No delalloc in the range and we are seeking for data. The caller has 3630 * to iterate to the next extent item in the subvolume btree. 3631 */ 3632 return false; 3633 } 3634 3635 static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) 3636 { 3637 struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host); 3638 struct btrfs_file_private *private = file->private_data; 3639 struct btrfs_fs_info *fs_info = inode->root->fs_info; 3640 struct extent_state *cached_state = NULL; 3641 struct extent_state **delalloc_cached_state; 3642 const loff_t i_size = i_size_read(&inode->vfs_inode); 3643 const u64 ino = btrfs_ino(inode); 3644 struct btrfs_root *root = inode->root; 3645 struct btrfs_path *path; 3646 struct btrfs_key key; 3647 u64 last_extent_end; 3648 u64 lockstart; 3649 u64 lockend; 3650 u64 start; 3651 int ret; 3652 bool found = false; 3653 3654 if (i_size == 0 || offset >= i_size) 3655 return -ENXIO; 3656 3657 /* 3658 * Quick path. If the inode has no prealloc extents and its number of 3659 * bytes used matches its i_size, then it can not have holes. 3660 */ 3661 if (whence == SEEK_HOLE && 3662 !(inode->flags & BTRFS_INODE_PREALLOC) && 3663 inode_get_bytes(&inode->vfs_inode) == i_size) 3664 return i_size; 3665 3666 if (!private) { 3667 private = kzalloc(sizeof(*private), GFP_KERNEL); 3668 /* 3669 * No worries if memory allocation failed. 3670 * The private structure is used only for speeding up multiple 3671 * lseek SEEK_HOLE/DATA calls to a file when there's delalloc, 3672 * so everything will still be correct. 3673 */ 3674 file->private_data = private; 3675 } 3676 3677 if (private) 3678 delalloc_cached_state = &private->llseek_cached_state; 3679 else 3680 delalloc_cached_state = NULL; 3681 3682 /* 3683 * offset can be negative, in this case we start finding DATA/HOLE from 3684 * the very start of the file. 3685 */ 3686 start = max_t(loff_t, 0, offset); 3687 3688 lockstart = round_down(start, fs_info->sectorsize); 3689 lockend = round_up(i_size, fs_info->sectorsize); 3690 if (lockend <= lockstart) 3691 lockend = lockstart + fs_info->sectorsize; 3692 lockend--; 3693 3694 path = btrfs_alloc_path(); 3695 if (!path) 3696 return -ENOMEM; 3697 path->reada = READA_FORWARD; 3698 3699 key.objectid = ino; 3700 key.type = BTRFS_EXTENT_DATA_KEY; 3701 key.offset = start; 3702 3703 last_extent_end = lockstart; 3704 3705 lock_extent(&inode->io_tree, lockstart, lockend, &cached_state); 3706 3707 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3708 if (ret < 0) { 3709 goto out; 3710 } else if (ret > 0 && path->slots[0] > 0) { 3711 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); 3712 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY) 3713 path->slots[0]--; 3714 } 3715 3716 while (start < i_size) { 3717 struct extent_buffer *leaf = path->nodes[0]; 3718 struct btrfs_file_extent_item *extent; 3719 u64 extent_end; 3720 u8 type; 3721 3722 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3723 ret = btrfs_next_leaf(root, path); 3724 if (ret < 0) 3725 goto out; 3726 else if (ret > 0) 3727 break; 3728 3729 leaf = path->nodes[0]; 3730 } 3731 3732 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3733 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) 3734 break; 3735 3736 extent_end = btrfs_file_extent_end(path); 3737 3738 /* 3739 * In the first iteration we may have a slot that points to an 3740 * extent that ends before our start offset, so skip it. 3741 */ 3742 if (extent_end <= start) { 3743 path->slots[0]++; 3744 continue; 3745 } 3746 3747 /* We have an implicit hole, NO_HOLES feature is likely set. */ 3748 if (last_extent_end < key.offset) { 3749 u64 search_start = last_extent_end; 3750 u64 found_start; 3751 3752 /* 3753 * First iteration, @start matches @offset and it's 3754 * within the hole. 3755 */ 3756 if (start == offset) 3757 search_start = offset; 3758 3759 found = find_desired_extent_in_hole(inode, whence, 3760 delalloc_cached_state, 3761 search_start, 3762 key.offset - 1, 3763 &found_start); 3764 if (found) { 3765 start = found_start; 3766 break; 3767 } 3768 /* 3769 * Didn't find data or a hole (due to delalloc) in the 3770 * implicit hole range, so need to analyze the extent. 3771 */ 3772 } 3773 3774 extent = btrfs_item_ptr(leaf, path->slots[0], 3775 struct btrfs_file_extent_item); 3776 type = btrfs_file_extent_type(leaf, extent); 3777 3778 /* 3779 * Can't access the extent's disk_bytenr field if this is an 3780 * inline extent, since at that offset, it's where the extent 3781 * data starts. 3782 */ 3783 if (type == BTRFS_FILE_EXTENT_PREALLOC || 3784 (type == BTRFS_FILE_EXTENT_REG && 3785 btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) { 3786 /* 3787 * Explicit hole or prealloc extent, search for delalloc. 3788 * A prealloc extent is treated like a hole. 3789 */ 3790 u64 search_start = key.offset; 3791 u64 found_start; 3792 3793 /* 3794 * First iteration, @start matches @offset and it's 3795 * within the hole. 3796 */ 3797 if (start == offset) 3798 search_start = offset; 3799 3800 found = find_desired_extent_in_hole(inode, whence, 3801 delalloc_cached_state, 3802 search_start, 3803 extent_end - 1, 3804 &found_start); 3805 if (found) { 3806 start = found_start; 3807 break; 3808 } 3809 /* 3810 * Didn't find data or a hole (due to delalloc) in the 3811 * implicit hole range, so need to analyze the next 3812 * extent item. 3813 */ 3814 } else { 3815 /* 3816 * Found a regular or inline extent. 3817 * If we are seeking for data, adjust the start offset 3818 * and stop, we're done. 3819 */ 3820 if (whence == SEEK_DATA) { 3821 start = max_t(u64, key.offset, offset); 3822 found = true; 3823 break; 3824 } 3825 /* 3826 * Else, we are seeking for a hole, check the next file 3827 * extent item. 3828 */ 3829 } 3830 3831 start = extent_end; 3832 last_extent_end = extent_end; 3833 path->slots[0]++; 3834 if (fatal_signal_pending(current)) { 3835 ret = -EINTR; 3836 goto out; 3837 } 3838 cond_resched(); 3839 } 3840 3841 /* We have an implicit hole from the last extent found up to i_size. */ 3842 if (!found && start < i_size) { 3843 found = find_desired_extent_in_hole(inode, whence, 3844 delalloc_cached_state, start, 3845 i_size - 1, &start); 3846 if (!found) 3847 start = i_size; 3848 } 3849 3850 out: 3851 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state); 3852 btrfs_free_path(path); 3853 3854 if (ret < 0) 3855 return ret; 3856 3857 if (whence == SEEK_DATA && start >= i_size) 3858 return -ENXIO; 3859 3860 return min_t(loff_t, start, i_size); 3861 } 3862 3863 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) 3864 { 3865 struct inode *inode = file->f_mapping->host; 3866 3867 switch (whence) { 3868 default: 3869 return generic_file_llseek(file, offset, whence); 3870 case SEEK_DATA: 3871 case SEEK_HOLE: 3872 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED); 3873 offset = find_desired_extent(file, offset, whence); 3874 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED); 3875 break; 3876 } 3877 3878 if (offset < 0) 3879 return offset; 3880 3881 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 3882 } 3883 3884 static int btrfs_file_open(struct inode *inode, struct file *filp) 3885 { 3886 int ret; 3887 3888 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC | 3889 FMODE_CAN_ODIRECT; 3890 3891 ret = fsverity_file_open(inode, filp); 3892 if (ret) 3893 return ret; 3894 return generic_file_open(inode, filp); 3895 } 3896 3897 static int check_direct_read(struct btrfs_fs_info *fs_info, 3898 const struct iov_iter *iter, loff_t offset) 3899 { 3900 int ret; 3901 int i, seg; 3902 3903 ret = check_direct_IO(fs_info, iter, offset); 3904 if (ret < 0) 3905 return ret; 3906 3907 if (!iter_is_iovec(iter)) 3908 return 0; 3909 3910 for (seg = 0; seg < iter->nr_segs; seg++) { 3911 for (i = seg + 1; i < iter->nr_segs; i++) { 3912 const struct iovec *iov1 = iter_iov(iter) + seg; 3913 const struct iovec *iov2 = iter_iov(iter) + i; 3914 3915 if (iov1->iov_base == iov2->iov_base) 3916 return -EINVAL; 3917 } 3918 } 3919 return 0; 3920 } 3921 3922 static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to) 3923 { 3924 struct inode *inode = file_inode(iocb->ki_filp); 3925 size_t prev_left = 0; 3926 ssize_t read = 0; 3927 ssize_t ret; 3928 3929 if (fsverity_active(inode)) 3930 return 0; 3931 3932 if (check_direct_read(inode_to_fs_info(inode), to, iocb->ki_pos)) 3933 return 0; 3934 3935 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED); 3936 again: 3937 /* 3938 * This is similar to what we do for direct IO writes, see the comment 3939 * at btrfs_direct_write(), but we also disable page faults in addition 3940 * to disabling them only at the iov_iter level. This is because when 3941 * reading from a hole or prealloc extent, iomap calls iov_iter_zero(), 3942 * which can still trigger page fault ins despite having set ->nofault 3943 * to true of our 'to' iov_iter. 3944 * 3945 * The difference to direct IO writes is that we deadlock when trying 3946 * to lock the extent range in the inode's tree during he page reads 3947 * triggered by the fault in (while for writes it is due to waiting for 3948 * our own ordered extent). This is because for direct IO reads, 3949 * btrfs_dio_iomap_begin() returns with the extent range locked, which 3950 * is only unlocked in the endio callback (end_bio_extent_readpage()). 3951 */ 3952 pagefault_disable(); 3953 to->nofault = true; 3954 ret = btrfs_dio_read(iocb, to, read); 3955 to->nofault = false; 3956 pagefault_enable(); 3957 3958 /* No increment (+=) because iomap returns a cumulative value. */ 3959 if (ret > 0) 3960 read = ret; 3961 3962 if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) { 3963 const size_t left = iov_iter_count(to); 3964 3965 if (left == prev_left) { 3966 /* 3967 * We didn't make any progress since the last attempt, 3968 * fallback to a buffered read for the remainder of the 3969 * range. This is just to avoid any possibility of looping 3970 * for too long. 3971 */ 3972 ret = read; 3973 } else { 3974 /* 3975 * We made some progress since the last retry or this is 3976 * the first time we are retrying. Fault in as many pages 3977 * as possible and retry. 3978 */ 3979 fault_in_iov_iter_writeable(to, left); 3980 prev_left = left; 3981 goto again; 3982 } 3983 } 3984 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED); 3985 return ret < 0 ? ret : read; 3986 } 3987 3988 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 3989 { 3990 ssize_t ret = 0; 3991 3992 if (iocb->ki_flags & IOCB_DIRECT) { 3993 ret = btrfs_direct_read(iocb, to); 3994 if (ret < 0 || !iov_iter_count(to) || 3995 iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp))) 3996 return ret; 3997 } 3998 3999 return filemap_read(iocb, to, ret); 4000 } 4001 4002 const struct file_operations btrfs_file_operations = { 4003 .llseek = btrfs_file_llseek, 4004 .read_iter = btrfs_file_read_iter, 4005 .splice_read = filemap_splice_read, 4006 .write_iter = btrfs_file_write_iter, 4007 .splice_write = iter_file_splice_write, 4008 .mmap = btrfs_file_mmap, 4009 .open = btrfs_file_open, 4010 .release = btrfs_release_file, 4011 .get_unmapped_area = thp_get_unmapped_area, 4012 .fsync = btrfs_sync_file, 4013 .fallocate = btrfs_fallocate, 4014 .unlocked_ioctl = btrfs_ioctl, 4015 #ifdef CONFIG_COMPAT 4016 .compat_ioctl = btrfs_compat_ioctl, 4017 #endif 4018 .remap_file_range = btrfs_remap_file_range, 4019 }; 4020 4021 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end) 4022 { 4023 int ret; 4024 4025 /* 4026 * So with compression we will find and lock a dirty page and clear the 4027 * first one as dirty, setup an async extent, and immediately return 4028 * with the entire range locked but with nobody actually marked with 4029 * writeback. So we can't just filemap_write_and_wait_range() and 4030 * expect it to work since it will just kick off a thread to do the 4031 * actual work. So we need to call filemap_fdatawrite_range _again_ 4032 * since it will wait on the page lock, which won't be unlocked until 4033 * after the pages have been marked as writeback and so we're good to go 4034 * from there. We have to do this otherwise we'll miss the ordered 4035 * extents and that results in badness. Please Josef, do not think you 4036 * know better and pull this out at some point in the future, it is 4037 * right and you are wrong. 4038 */ 4039 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 4040 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 4041 &BTRFS_I(inode)->runtime_flags)) 4042 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 4043 4044 return ret; 4045 } 4046