xref: /linux/fs/btrfs/file.c (revision b92dd11725a7c57f55e148c7d3ce58a86f480575)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
19 #include <linux/fsverity.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "btrfs_inode.h"
24 #include "print-tree.h"
25 #include "tree-log.h"
26 #include "locking.h"
27 #include "volumes.h"
28 #include "qgroup.h"
29 #include "compression.h"
30 #include "delalloc-space.h"
31 #include "reflink.h"
32 #include "subpage.h"
33 
34 static struct kmem_cache *btrfs_inode_defrag_cachep;
35 /*
36  * when auto defrag is enabled we
37  * queue up these defrag structs to remember which
38  * inodes need defragging passes
39  */
40 struct inode_defrag {
41 	struct rb_node rb_node;
42 	/* objectid */
43 	u64 ino;
44 	/*
45 	 * transid where the defrag was added, we search for
46 	 * extents newer than this
47 	 */
48 	u64 transid;
49 
50 	/* root objectid */
51 	u64 root;
52 
53 	/*
54 	 * The extent size threshold for autodefrag.
55 	 *
56 	 * This value is different for compressed/non-compressed extents,
57 	 * thus needs to be passed from higher layer.
58 	 * (aka, inode_should_defrag())
59 	 */
60 	u32 extent_thresh;
61 };
62 
63 static int __compare_inode_defrag(struct inode_defrag *defrag1,
64 				  struct inode_defrag *defrag2)
65 {
66 	if (defrag1->root > defrag2->root)
67 		return 1;
68 	else if (defrag1->root < defrag2->root)
69 		return -1;
70 	else if (defrag1->ino > defrag2->ino)
71 		return 1;
72 	else if (defrag1->ino < defrag2->ino)
73 		return -1;
74 	else
75 		return 0;
76 }
77 
78 /* pop a record for an inode into the defrag tree.  The lock
79  * must be held already
80  *
81  * If you're inserting a record for an older transid than an
82  * existing record, the transid already in the tree is lowered
83  *
84  * If an existing record is found the defrag item you
85  * pass in is freed
86  */
87 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
88 				    struct inode_defrag *defrag)
89 {
90 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
91 	struct inode_defrag *entry;
92 	struct rb_node **p;
93 	struct rb_node *parent = NULL;
94 	int ret;
95 
96 	p = &fs_info->defrag_inodes.rb_node;
97 	while (*p) {
98 		parent = *p;
99 		entry = rb_entry(parent, struct inode_defrag, rb_node);
100 
101 		ret = __compare_inode_defrag(defrag, entry);
102 		if (ret < 0)
103 			p = &parent->rb_left;
104 		else if (ret > 0)
105 			p = &parent->rb_right;
106 		else {
107 			/* if we're reinserting an entry for
108 			 * an old defrag run, make sure to
109 			 * lower the transid of our existing record
110 			 */
111 			if (defrag->transid < entry->transid)
112 				entry->transid = defrag->transid;
113 			entry->extent_thresh = min(defrag->extent_thresh,
114 						   entry->extent_thresh);
115 			return -EEXIST;
116 		}
117 	}
118 	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
119 	rb_link_node(&defrag->rb_node, parent, p);
120 	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
121 	return 0;
122 }
123 
124 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
125 {
126 	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
127 		return 0;
128 
129 	if (btrfs_fs_closing(fs_info))
130 		return 0;
131 
132 	return 1;
133 }
134 
135 /*
136  * insert a defrag record for this inode if auto defrag is
137  * enabled
138  */
139 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
140 			   struct btrfs_inode *inode, u32 extent_thresh)
141 {
142 	struct btrfs_root *root = inode->root;
143 	struct btrfs_fs_info *fs_info = root->fs_info;
144 	struct inode_defrag *defrag;
145 	u64 transid;
146 	int ret;
147 
148 	if (!__need_auto_defrag(fs_info))
149 		return 0;
150 
151 	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
152 		return 0;
153 
154 	if (trans)
155 		transid = trans->transid;
156 	else
157 		transid = inode->root->last_trans;
158 
159 	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
160 	if (!defrag)
161 		return -ENOMEM;
162 
163 	defrag->ino = btrfs_ino(inode);
164 	defrag->transid = transid;
165 	defrag->root = root->root_key.objectid;
166 	defrag->extent_thresh = extent_thresh;
167 
168 	spin_lock(&fs_info->defrag_inodes_lock);
169 	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
170 		/*
171 		 * If we set IN_DEFRAG flag and evict the inode from memory,
172 		 * and then re-read this inode, this new inode doesn't have
173 		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
174 		 */
175 		ret = __btrfs_add_inode_defrag(inode, defrag);
176 		if (ret)
177 			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
178 	} else {
179 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
180 	}
181 	spin_unlock(&fs_info->defrag_inodes_lock);
182 	return 0;
183 }
184 
185 /*
186  * pick the defragable inode that we want, if it doesn't exist, we will get
187  * the next one.
188  */
189 static struct inode_defrag *
190 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
191 {
192 	struct inode_defrag *entry = NULL;
193 	struct inode_defrag tmp;
194 	struct rb_node *p;
195 	struct rb_node *parent = NULL;
196 	int ret;
197 
198 	tmp.ino = ino;
199 	tmp.root = root;
200 
201 	spin_lock(&fs_info->defrag_inodes_lock);
202 	p = fs_info->defrag_inodes.rb_node;
203 	while (p) {
204 		parent = p;
205 		entry = rb_entry(parent, struct inode_defrag, rb_node);
206 
207 		ret = __compare_inode_defrag(&tmp, entry);
208 		if (ret < 0)
209 			p = parent->rb_left;
210 		else if (ret > 0)
211 			p = parent->rb_right;
212 		else
213 			goto out;
214 	}
215 
216 	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
217 		parent = rb_next(parent);
218 		if (parent)
219 			entry = rb_entry(parent, struct inode_defrag, rb_node);
220 		else
221 			entry = NULL;
222 	}
223 out:
224 	if (entry)
225 		rb_erase(parent, &fs_info->defrag_inodes);
226 	spin_unlock(&fs_info->defrag_inodes_lock);
227 	return entry;
228 }
229 
230 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
231 {
232 	struct inode_defrag *defrag;
233 	struct rb_node *node;
234 
235 	spin_lock(&fs_info->defrag_inodes_lock);
236 	node = rb_first(&fs_info->defrag_inodes);
237 	while (node) {
238 		rb_erase(node, &fs_info->defrag_inodes);
239 		defrag = rb_entry(node, struct inode_defrag, rb_node);
240 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
241 
242 		cond_resched_lock(&fs_info->defrag_inodes_lock);
243 
244 		node = rb_first(&fs_info->defrag_inodes);
245 	}
246 	spin_unlock(&fs_info->defrag_inodes_lock);
247 }
248 
249 #define BTRFS_DEFRAG_BATCH	1024
250 
251 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
252 				    struct inode_defrag *defrag)
253 {
254 	struct btrfs_root *inode_root;
255 	struct inode *inode;
256 	struct btrfs_ioctl_defrag_range_args range;
257 	int ret = 0;
258 	u64 cur = 0;
259 
260 again:
261 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
262 		goto cleanup;
263 	if (!__need_auto_defrag(fs_info))
264 		goto cleanup;
265 
266 	/* get the inode */
267 	inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
268 	if (IS_ERR(inode_root)) {
269 		ret = PTR_ERR(inode_root);
270 		goto cleanup;
271 	}
272 
273 	inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
274 	btrfs_put_root(inode_root);
275 	if (IS_ERR(inode)) {
276 		ret = PTR_ERR(inode);
277 		goto cleanup;
278 	}
279 
280 	if (cur >= i_size_read(inode)) {
281 		iput(inode);
282 		goto cleanup;
283 	}
284 
285 	/* do a chunk of defrag */
286 	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
287 	memset(&range, 0, sizeof(range));
288 	range.len = (u64)-1;
289 	range.start = cur;
290 	range.extent_thresh = defrag->extent_thresh;
291 
292 	sb_start_write(fs_info->sb);
293 	ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
294 				       BTRFS_DEFRAG_BATCH);
295 	sb_end_write(fs_info->sb);
296 	iput(inode);
297 
298 	if (ret < 0)
299 		goto cleanup;
300 
301 	cur = max(cur + fs_info->sectorsize, range.start);
302 	goto again;
303 
304 cleanup:
305 	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
306 	return ret;
307 }
308 
309 /*
310  * run through the list of inodes in the FS that need
311  * defragging
312  */
313 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
314 {
315 	struct inode_defrag *defrag;
316 	u64 first_ino = 0;
317 	u64 root_objectid = 0;
318 
319 	atomic_inc(&fs_info->defrag_running);
320 	while (1) {
321 		/* Pause the auto defragger. */
322 		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
323 			     &fs_info->fs_state))
324 			break;
325 
326 		if (!__need_auto_defrag(fs_info))
327 			break;
328 
329 		/* find an inode to defrag */
330 		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
331 						 first_ino);
332 		if (!defrag) {
333 			if (root_objectid || first_ino) {
334 				root_objectid = 0;
335 				first_ino = 0;
336 				continue;
337 			} else {
338 				break;
339 			}
340 		}
341 
342 		first_ino = defrag->ino + 1;
343 		root_objectid = defrag->root;
344 
345 		__btrfs_run_defrag_inode(fs_info, defrag);
346 	}
347 	atomic_dec(&fs_info->defrag_running);
348 
349 	/*
350 	 * during unmount, we use the transaction_wait queue to
351 	 * wait for the defragger to stop
352 	 */
353 	wake_up(&fs_info->transaction_wait);
354 	return 0;
355 }
356 
357 /* simple helper to fault in pages and copy.  This should go away
358  * and be replaced with calls into generic code.
359  */
360 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
361 					 struct page **prepared_pages,
362 					 struct iov_iter *i)
363 {
364 	size_t copied = 0;
365 	size_t total_copied = 0;
366 	int pg = 0;
367 	int offset = offset_in_page(pos);
368 
369 	while (write_bytes > 0) {
370 		size_t count = min_t(size_t,
371 				     PAGE_SIZE - offset, write_bytes);
372 		struct page *page = prepared_pages[pg];
373 		/*
374 		 * Copy data from userspace to the current page
375 		 */
376 		copied = copy_page_from_iter_atomic(page, offset, count, i);
377 
378 		/* Flush processor's dcache for this page */
379 		flush_dcache_page(page);
380 
381 		/*
382 		 * if we get a partial write, we can end up with
383 		 * partially up to date pages.  These add
384 		 * a lot of complexity, so make sure they don't
385 		 * happen by forcing this copy to be retried.
386 		 *
387 		 * The rest of the btrfs_file_write code will fall
388 		 * back to page at a time copies after we return 0.
389 		 */
390 		if (unlikely(copied < count)) {
391 			if (!PageUptodate(page)) {
392 				iov_iter_revert(i, copied);
393 				copied = 0;
394 			}
395 			if (!copied)
396 				break;
397 		}
398 
399 		write_bytes -= copied;
400 		total_copied += copied;
401 		offset += copied;
402 		if (offset == PAGE_SIZE) {
403 			pg++;
404 			offset = 0;
405 		}
406 	}
407 	return total_copied;
408 }
409 
410 /*
411  * unlocks pages after btrfs_file_write is done with them
412  */
413 static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
414 			     struct page **pages, size_t num_pages,
415 			     u64 pos, u64 copied)
416 {
417 	size_t i;
418 	u64 block_start = round_down(pos, fs_info->sectorsize);
419 	u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
420 
421 	ASSERT(block_len <= U32_MAX);
422 	for (i = 0; i < num_pages; i++) {
423 		/* page checked is some magic around finding pages that
424 		 * have been modified without going through btrfs_set_page_dirty
425 		 * clear it here. There should be no need to mark the pages
426 		 * accessed as prepare_pages should have marked them accessed
427 		 * in prepare_pages via find_or_create_page()
428 		 */
429 		btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start,
430 					       block_len);
431 		unlock_page(pages[i]);
432 		put_page(pages[i]);
433 	}
434 }
435 
436 /*
437  * After btrfs_copy_from_user(), update the following things for delalloc:
438  * - Mark newly dirtied pages as DELALLOC in the io tree.
439  *   Used to advise which range is to be written back.
440  * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
441  * - Update inode size for past EOF write
442  */
443 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
444 		      size_t num_pages, loff_t pos, size_t write_bytes,
445 		      struct extent_state **cached, bool noreserve)
446 {
447 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
448 	int err = 0;
449 	int i;
450 	u64 num_bytes;
451 	u64 start_pos;
452 	u64 end_of_last_block;
453 	u64 end_pos = pos + write_bytes;
454 	loff_t isize = i_size_read(&inode->vfs_inode);
455 	unsigned int extra_bits = 0;
456 
457 	if (write_bytes == 0)
458 		return 0;
459 
460 	if (noreserve)
461 		extra_bits |= EXTENT_NORESERVE;
462 
463 	start_pos = round_down(pos, fs_info->sectorsize);
464 	num_bytes = round_up(write_bytes + pos - start_pos,
465 			     fs_info->sectorsize);
466 	ASSERT(num_bytes <= U32_MAX);
467 
468 	end_of_last_block = start_pos + num_bytes - 1;
469 
470 	/*
471 	 * The pages may have already been dirty, clear out old accounting so
472 	 * we can set things up properly
473 	 */
474 	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
475 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
476 			 cached);
477 
478 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
479 					extra_bits, cached);
480 	if (err)
481 		return err;
482 
483 	for (i = 0; i < num_pages; i++) {
484 		struct page *p = pages[i];
485 
486 		btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
487 		btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes);
488 		btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
489 	}
490 
491 	/*
492 	 * we've only changed i_size in ram, and we haven't updated
493 	 * the disk i_size.  There is no need to log the inode
494 	 * at this time.
495 	 */
496 	if (end_pos > isize)
497 		i_size_write(&inode->vfs_inode, end_pos);
498 	return 0;
499 }
500 
501 /*
502  * this is very complex, but the basic idea is to drop all extents
503  * in the range start - end.  hint_block is filled in with a block number
504  * that would be a good hint to the block allocator for this file.
505  *
506  * If an extent intersects the range but is not entirely inside the range
507  * it is either truncated or split.  Anything entirely inside the range
508  * is deleted from the tree.
509  *
510  * Note: the VFS' inode number of bytes is not updated, it's up to the caller
511  * to deal with that. We set the field 'bytes_found' of the arguments structure
512  * with the number of allocated bytes found in the target range, so that the
513  * caller can update the inode's number of bytes in an atomic way when
514  * replacing extents in a range to avoid races with stat(2).
515  */
516 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
517 		       struct btrfs_root *root, struct btrfs_inode *inode,
518 		       struct btrfs_drop_extents_args *args)
519 {
520 	struct btrfs_fs_info *fs_info = root->fs_info;
521 	struct extent_buffer *leaf;
522 	struct btrfs_file_extent_item *fi;
523 	struct btrfs_ref ref = { 0 };
524 	struct btrfs_key key;
525 	struct btrfs_key new_key;
526 	u64 ino = btrfs_ino(inode);
527 	u64 search_start = args->start;
528 	u64 disk_bytenr = 0;
529 	u64 num_bytes = 0;
530 	u64 extent_offset = 0;
531 	u64 extent_end = 0;
532 	u64 last_end = args->start;
533 	int del_nr = 0;
534 	int del_slot = 0;
535 	int extent_type;
536 	int recow;
537 	int ret;
538 	int modify_tree = -1;
539 	int update_refs;
540 	int found = 0;
541 	struct btrfs_path *path = args->path;
542 
543 	args->bytes_found = 0;
544 	args->extent_inserted = false;
545 
546 	/* Must always have a path if ->replace_extent is true */
547 	ASSERT(!(args->replace_extent && !args->path));
548 
549 	if (!path) {
550 		path = btrfs_alloc_path();
551 		if (!path) {
552 			ret = -ENOMEM;
553 			goto out;
554 		}
555 	}
556 
557 	if (args->drop_cache)
558 		btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
559 
560 	if (args->start >= inode->disk_i_size && !args->replace_extent)
561 		modify_tree = 0;
562 
563 	update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
564 	while (1) {
565 		recow = 0;
566 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
567 					       search_start, modify_tree);
568 		if (ret < 0)
569 			break;
570 		if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
571 			leaf = path->nodes[0];
572 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
573 			if (key.objectid == ino &&
574 			    key.type == BTRFS_EXTENT_DATA_KEY)
575 				path->slots[0]--;
576 		}
577 		ret = 0;
578 next_slot:
579 		leaf = path->nodes[0];
580 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
581 			BUG_ON(del_nr > 0);
582 			ret = btrfs_next_leaf(root, path);
583 			if (ret < 0)
584 				break;
585 			if (ret > 0) {
586 				ret = 0;
587 				break;
588 			}
589 			leaf = path->nodes[0];
590 			recow = 1;
591 		}
592 
593 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
594 
595 		if (key.objectid > ino)
596 			break;
597 		if (WARN_ON_ONCE(key.objectid < ino) ||
598 		    key.type < BTRFS_EXTENT_DATA_KEY) {
599 			ASSERT(del_nr == 0);
600 			path->slots[0]++;
601 			goto next_slot;
602 		}
603 		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
604 			break;
605 
606 		fi = btrfs_item_ptr(leaf, path->slots[0],
607 				    struct btrfs_file_extent_item);
608 		extent_type = btrfs_file_extent_type(leaf, fi);
609 
610 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
611 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
612 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
613 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
614 			extent_offset = btrfs_file_extent_offset(leaf, fi);
615 			extent_end = key.offset +
616 				btrfs_file_extent_num_bytes(leaf, fi);
617 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
618 			extent_end = key.offset +
619 				btrfs_file_extent_ram_bytes(leaf, fi);
620 		} else {
621 			/* can't happen */
622 			BUG();
623 		}
624 
625 		/*
626 		 * Don't skip extent items representing 0 byte lengths. They
627 		 * used to be created (bug) if while punching holes we hit
628 		 * -ENOSPC condition. So if we find one here, just ensure we
629 		 * delete it, otherwise we would insert a new file extent item
630 		 * with the same key (offset) as that 0 bytes length file
631 		 * extent item in the call to setup_items_for_insert() later
632 		 * in this function.
633 		 */
634 		if (extent_end == key.offset && extent_end >= search_start) {
635 			last_end = extent_end;
636 			goto delete_extent_item;
637 		}
638 
639 		if (extent_end <= search_start) {
640 			path->slots[0]++;
641 			goto next_slot;
642 		}
643 
644 		found = 1;
645 		search_start = max(key.offset, args->start);
646 		if (recow || !modify_tree) {
647 			modify_tree = -1;
648 			btrfs_release_path(path);
649 			continue;
650 		}
651 
652 		/*
653 		 *     | - range to drop - |
654 		 *  | -------- extent -------- |
655 		 */
656 		if (args->start > key.offset && args->end < extent_end) {
657 			BUG_ON(del_nr > 0);
658 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
659 				ret = -EOPNOTSUPP;
660 				break;
661 			}
662 
663 			memcpy(&new_key, &key, sizeof(new_key));
664 			new_key.offset = args->start;
665 			ret = btrfs_duplicate_item(trans, root, path,
666 						   &new_key);
667 			if (ret == -EAGAIN) {
668 				btrfs_release_path(path);
669 				continue;
670 			}
671 			if (ret < 0)
672 				break;
673 
674 			leaf = path->nodes[0];
675 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
676 					    struct btrfs_file_extent_item);
677 			btrfs_set_file_extent_num_bytes(leaf, fi,
678 							args->start - key.offset);
679 
680 			fi = btrfs_item_ptr(leaf, path->slots[0],
681 					    struct btrfs_file_extent_item);
682 
683 			extent_offset += args->start - key.offset;
684 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
685 			btrfs_set_file_extent_num_bytes(leaf, fi,
686 							extent_end - args->start);
687 			btrfs_mark_buffer_dirty(leaf);
688 
689 			if (update_refs && disk_bytenr > 0) {
690 				btrfs_init_generic_ref(&ref,
691 						BTRFS_ADD_DELAYED_REF,
692 						disk_bytenr, num_bytes, 0);
693 				btrfs_init_data_ref(&ref,
694 						root->root_key.objectid,
695 						new_key.objectid,
696 						args->start - extent_offset,
697 						0, false);
698 				ret = btrfs_inc_extent_ref(trans, &ref);
699 				BUG_ON(ret); /* -ENOMEM */
700 			}
701 			key.offset = args->start;
702 		}
703 		/*
704 		 * From here on out we will have actually dropped something, so
705 		 * last_end can be updated.
706 		 */
707 		last_end = extent_end;
708 
709 		/*
710 		 *  | ---- range to drop ----- |
711 		 *      | -------- extent -------- |
712 		 */
713 		if (args->start <= key.offset && args->end < extent_end) {
714 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
715 				ret = -EOPNOTSUPP;
716 				break;
717 			}
718 
719 			memcpy(&new_key, &key, sizeof(new_key));
720 			new_key.offset = args->end;
721 			btrfs_set_item_key_safe(fs_info, path, &new_key);
722 
723 			extent_offset += args->end - key.offset;
724 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
725 			btrfs_set_file_extent_num_bytes(leaf, fi,
726 							extent_end - args->end);
727 			btrfs_mark_buffer_dirty(leaf);
728 			if (update_refs && disk_bytenr > 0)
729 				args->bytes_found += args->end - key.offset;
730 			break;
731 		}
732 
733 		search_start = extent_end;
734 		/*
735 		 *       | ---- range to drop ----- |
736 		 *  | -------- extent -------- |
737 		 */
738 		if (args->start > key.offset && args->end >= extent_end) {
739 			BUG_ON(del_nr > 0);
740 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
741 				ret = -EOPNOTSUPP;
742 				break;
743 			}
744 
745 			btrfs_set_file_extent_num_bytes(leaf, fi,
746 							args->start - key.offset);
747 			btrfs_mark_buffer_dirty(leaf);
748 			if (update_refs && disk_bytenr > 0)
749 				args->bytes_found += extent_end - args->start;
750 			if (args->end == extent_end)
751 				break;
752 
753 			path->slots[0]++;
754 			goto next_slot;
755 		}
756 
757 		/*
758 		 *  | ---- range to drop ----- |
759 		 *    | ------ extent ------ |
760 		 */
761 		if (args->start <= key.offset && args->end >= extent_end) {
762 delete_extent_item:
763 			if (del_nr == 0) {
764 				del_slot = path->slots[0];
765 				del_nr = 1;
766 			} else {
767 				BUG_ON(del_slot + del_nr != path->slots[0]);
768 				del_nr++;
769 			}
770 
771 			if (update_refs &&
772 			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
773 				args->bytes_found += extent_end - key.offset;
774 				extent_end = ALIGN(extent_end,
775 						   fs_info->sectorsize);
776 			} else if (update_refs && disk_bytenr > 0) {
777 				btrfs_init_generic_ref(&ref,
778 						BTRFS_DROP_DELAYED_REF,
779 						disk_bytenr, num_bytes, 0);
780 				btrfs_init_data_ref(&ref,
781 						root->root_key.objectid,
782 						key.objectid,
783 						key.offset - extent_offset, 0,
784 						false);
785 				ret = btrfs_free_extent(trans, &ref);
786 				BUG_ON(ret); /* -ENOMEM */
787 				args->bytes_found += extent_end - key.offset;
788 			}
789 
790 			if (args->end == extent_end)
791 				break;
792 
793 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
794 				path->slots[0]++;
795 				goto next_slot;
796 			}
797 
798 			ret = btrfs_del_items(trans, root, path, del_slot,
799 					      del_nr);
800 			if (ret) {
801 				btrfs_abort_transaction(trans, ret);
802 				break;
803 			}
804 
805 			del_nr = 0;
806 			del_slot = 0;
807 
808 			btrfs_release_path(path);
809 			continue;
810 		}
811 
812 		BUG();
813 	}
814 
815 	if (!ret && del_nr > 0) {
816 		/*
817 		 * Set path->slots[0] to first slot, so that after the delete
818 		 * if items are move off from our leaf to its immediate left or
819 		 * right neighbor leafs, we end up with a correct and adjusted
820 		 * path->slots[0] for our insertion (if args->replace_extent).
821 		 */
822 		path->slots[0] = del_slot;
823 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
824 		if (ret)
825 			btrfs_abort_transaction(trans, ret);
826 	}
827 
828 	leaf = path->nodes[0];
829 	/*
830 	 * If btrfs_del_items() was called, it might have deleted a leaf, in
831 	 * which case it unlocked our path, so check path->locks[0] matches a
832 	 * write lock.
833 	 */
834 	if (!ret && args->replace_extent &&
835 	    path->locks[0] == BTRFS_WRITE_LOCK &&
836 	    btrfs_leaf_free_space(leaf) >=
837 	    sizeof(struct btrfs_item) + args->extent_item_size) {
838 
839 		key.objectid = ino;
840 		key.type = BTRFS_EXTENT_DATA_KEY;
841 		key.offset = args->start;
842 		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
843 			struct btrfs_key slot_key;
844 
845 			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
846 			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
847 				path->slots[0]++;
848 		}
849 		btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
850 		args->extent_inserted = true;
851 	}
852 
853 	if (!args->path)
854 		btrfs_free_path(path);
855 	else if (!args->extent_inserted)
856 		btrfs_release_path(path);
857 out:
858 	args->drop_end = found ? min(args->end, last_end) : args->end;
859 
860 	return ret;
861 }
862 
863 static int extent_mergeable(struct extent_buffer *leaf, int slot,
864 			    u64 objectid, u64 bytenr, u64 orig_offset,
865 			    u64 *start, u64 *end)
866 {
867 	struct btrfs_file_extent_item *fi;
868 	struct btrfs_key key;
869 	u64 extent_end;
870 
871 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
872 		return 0;
873 
874 	btrfs_item_key_to_cpu(leaf, &key, slot);
875 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
876 		return 0;
877 
878 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
879 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
880 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
881 	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
882 	    btrfs_file_extent_compression(leaf, fi) ||
883 	    btrfs_file_extent_encryption(leaf, fi) ||
884 	    btrfs_file_extent_other_encoding(leaf, fi))
885 		return 0;
886 
887 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
888 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
889 		return 0;
890 
891 	*start = key.offset;
892 	*end = extent_end;
893 	return 1;
894 }
895 
896 /*
897  * Mark extent in the range start - end as written.
898  *
899  * This changes extent type from 'pre-allocated' to 'regular'. If only
900  * part of extent is marked as written, the extent will be split into
901  * two or three.
902  */
903 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
904 			      struct btrfs_inode *inode, u64 start, u64 end)
905 {
906 	struct btrfs_fs_info *fs_info = trans->fs_info;
907 	struct btrfs_root *root = inode->root;
908 	struct extent_buffer *leaf;
909 	struct btrfs_path *path;
910 	struct btrfs_file_extent_item *fi;
911 	struct btrfs_ref ref = { 0 };
912 	struct btrfs_key key;
913 	struct btrfs_key new_key;
914 	u64 bytenr;
915 	u64 num_bytes;
916 	u64 extent_end;
917 	u64 orig_offset;
918 	u64 other_start;
919 	u64 other_end;
920 	u64 split;
921 	int del_nr = 0;
922 	int del_slot = 0;
923 	int recow;
924 	int ret = 0;
925 	u64 ino = btrfs_ino(inode);
926 
927 	path = btrfs_alloc_path();
928 	if (!path)
929 		return -ENOMEM;
930 again:
931 	recow = 0;
932 	split = start;
933 	key.objectid = ino;
934 	key.type = BTRFS_EXTENT_DATA_KEY;
935 	key.offset = split;
936 
937 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
938 	if (ret < 0)
939 		goto out;
940 	if (ret > 0 && path->slots[0] > 0)
941 		path->slots[0]--;
942 
943 	leaf = path->nodes[0];
944 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
945 	if (key.objectid != ino ||
946 	    key.type != BTRFS_EXTENT_DATA_KEY) {
947 		ret = -EINVAL;
948 		btrfs_abort_transaction(trans, ret);
949 		goto out;
950 	}
951 	fi = btrfs_item_ptr(leaf, path->slots[0],
952 			    struct btrfs_file_extent_item);
953 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
954 		ret = -EINVAL;
955 		btrfs_abort_transaction(trans, ret);
956 		goto out;
957 	}
958 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
959 	if (key.offset > start || extent_end < end) {
960 		ret = -EINVAL;
961 		btrfs_abort_transaction(trans, ret);
962 		goto out;
963 	}
964 
965 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
966 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
967 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
968 	memcpy(&new_key, &key, sizeof(new_key));
969 
970 	if (start == key.offset && end < extent_end) {
971 		other_start = 0;
972 		other_end = start;
973 		if (extent_mergeable(leaf, path->slots[0] - 1,
974 				     ino, bytenr, orig_offset,
975 				     &other_start, &other_end)) {
976 			new_key.offset = end;
977 			btrfs_set_item_key_safe(fs_info, path, &new_key);
978 			fi = btrfs_item_ptr(leaf, path->slots[0],
979 					    struct btrfs_file_extent_item);
980 			btrfs_set_file_extent_generation(leaf, fi,
981 							 trans->transid);
982 			btrfs_set_file_extent_num_bytes(leaf, fi,
983 							extent_end - end);
984 			btrfs_set_file_extent_offset(leaf, fi,
985 						     end - orig_offset);
986 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
987 					    struct btrfs_file_extent_item);
988 			btrfs_set_file_extent_generation(leaf, fi,
989 							 trans->transid);
990 			btrfs_set_file_extent_num_bytes(leaf, fi,
991 							end - other_start);
992 			btrfs_mark_buffer_dirty(leaf);
993 			goto out;
994 		}
995 	}
996 
997 	if (start > key.offset && end == extent_end) {
998 		other_start = end;
999 		other_end = 0;
1000 		if (extent_mergeable(leaf, path->slots[0] + 1,
1001 				     ino, bytenr, orig_offset,
1002 				     &other_start, &other_end)) {
1003 			fi = btrfs_item_ptr(leaf, path->slots[0],
1004 					    struct btrfs_file_extent_item);
1005 			btrfs_set_file_extent_num_bytes(leaf, fi,
1006 							start - key.offset);
1007 			btrfs_set_file_extent_generation(leaf, fi,
1008 							 trans->transid);
1009 			path->slots[0]++;
1010 			new_key.offset = start;
1011 			btrfs_set_item_key_safe(fs_info, path, &new_key);
1012 
1013 			fi = btrfs_item_ptr(leaf, path->slots[0],
1014 					    struct btrfs_file_extent_item);
1015 			btrfs_set_file_extent_generation(leaf, fi,
1016 							 trans->transid);
1017 			btrfs_set_file_extent_num_bytes(leaf, fi,
1018 							other_end - start);
1019 			btrfs_set_file_extent_offset(leaf, fi,
1020 						     start - orig_offset);
1021 			btrfs_mark_buffer_dirty(leaf);
1022 			goto out;
1023 		}
1024 	}
1025 
1026 	while (start > key.offset || end < extent_end) {
1027 		if (key.offset == start)
1028 			split = end;
1029 
1030 		new_key.offset = split;
1031 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1032 		if (ret == -EAGAIN) {
1033 			btrfs_release_path(path);
1034 			goto again;
1035 		}
1036 		if (ret < 0) {
1037 			btrfs_abort_transaction(trans, ret);
1038 			goto out;
1039 		}
1040 
1041 		leaf = path->nodes[0];
1042 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1043 				    struct btrfs_file_extent_item);
1044 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1045 		btrfs_set_file_extent_num_bytes(leaf, fi,
1046 						split - key.offset);
1047 
1048 		fi = btrfs_item_ptr(leaf, path->slots[0],
1049 				    struct btrfs_file_extent_item);
1050 
1051 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1052 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1053 		btrfs_set_file_extent_num_bytes(leaf, fi,
1054 						extent_end - split);
1055 		btrfs_mark_buffer_dirty(leaf);
1056 
1057 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1058 				       num_bytes, 0);
1059 		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1060 				    orig_offset, 0, false);
1061 		ret = btrfs_inc_extent_ref(trans, &ref);
1062 		if (ret) {
1063 			btrfs_abort_transaction(trans, ret);
1064 			goto out;
1065 		}
1066 
1067 		if (split == start) {
1068 			key.offset = start;
1069 		} else {
1070 			if (start != key.offset) {
1071 				ret = -EINVAL;
1072 				btrfs_abort_transaction(trans, ret);
1073 				goto out;
1074 			}
1075 			path->slots[0]--;
1076 			extent_end = end;
1077 		}
1078 		recow = 1;
1079 	}
1080 
1081 	other_start = end;
1082 	other_end = 0;
1083 	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1084 			       num_bytes, 0);
1085 	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
1086 			    0, false);
1087 	if (extent_mergeable(leaf, path->slots[0] + 1,
1088 			     ino, bytenr, orig_offset,
1089 			     &other_start, &other_end)) {
1090 		if (recow) {
1091 			btrfs_release_path(path);
1092 			goto again;
1093 		}
1094 		extent_end = other_end;
1095 		del_slot = path->slots[0] + 1;
1096 		del_nr++;
1097 		ret = btrfs_free_extent(trans, &ref);
1098 		if (ret) {
1099 			btrfs_abort_transaction(trans, ret);
1100 			goto out;
1101 		}
1102 	}
1103 	other_start = 0;
1104 	other_end = start;
1105 	if (extent_mergeable(leaf, path->slots[0] - 1,
1106 			     ino, bytenr, orig_offset,
1107 			     &other_start, &other_end)) {
1108 		if (recow) {
1109 			btrfs_release_path(path);
1110 			goto again;
1111 		}
1112 		key.offset = other_start;
1113 		del_slot = path->slots[0];
1114 		del_nr++;
1115 		ret = btrfs_free_extent(trans, &ref);
1116 		if (ret) {
1117 			btrfs_abort_transaction(trans, ret);
1118 			goto out;
1119 		}
1120 	}
1121 	if (del_nr == 0) {
1122 		fi = btrfs_item_ptr(leaf, path->slots[0],
1123 			   struct btrfs_file_extent_item);
1124 		btrfs_set_file_extent_type(leaf, fi,
1125 					   BTRFS_FILE_EXTENT_REG);
1126 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1127 		btrfs_mark_buffer_dirty(leaf);
1128 	} else {
1129 		fi = btrfs_item_ptr(leaf, del_slot - 1,
1130 			   struct btrfs_file_extent_item);
1131 		btrfs_set_file_extent_type(leaf, fi,
1132 					   BTRFS_FILE_EXTENT_REG);
1133 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1134 		btrfs_set_file_extent_num_bytes(leaf, fi,
1135 						extent_end - key.offset);
1136 		btrfs_mark_buffer_dirty(leaf);
1137 
1138 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1139 		if (ret < 0) {
1140 			btrfs_abort_transaction(trans, ret);
1141 			goto out;
1142 		}
1143 	}
1144 out:
1145 	btrfs_free_path(path);
1146 	return ret;
1147 }
1148 
1149 /*
1150  * on error we return an unlocked page and the error value
1151  * on success we return a locked page and 0
1152  */
1153 static int prepare_uptodate_page(struct inode *inode,
1154 				 struct page *page, u64 pos,
1155 				 bool force_uptodate)
1156 {
1157 	struct folio *folio = page_folio(page);
1158 	int ret = 0;
1159 
1160 	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1161 	    !PageUptodate(page)) {
1162 		ret = btrfs_read_folio(NULL, folio);
1163 		if (ret)
1164 			return ret;
1165 		lock_page(page);
1166 		if (!PageUptodate(page)) {
1167 			unlock_page(page);
1168 			return -EIO;
1169 		}
1170 
1171 		/*
1172 		 * Since btrfs_read_folio() will unlock the folio before it
1173 		 * returns, there is a window where btrfs_release_folio() can be
1174 		 * called to release the page.  Here we check both inode
1175 		 * mapping and PagePrivate() to make sure the page was not
1176 		 * released.
1177 		 *
1178 		 * The private flag check is essential for subpage as we need
1179 		 * to store extra bitmap using page->private.
1180 		 */
1181 		if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
1182 			unlock_page(page);
1183 			return -EAGAIN;
1184 		}
1185 	}
1186 	return 0;
1187 }
1188 
1189 static unsigned int get_prepare_fgp_flags(bool nowait)
1190 {
1191 	unsigned int fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
1192 
1193 	if (nowait)
1194 		fgp_flags |= FGP_NOWAIT;
1195 
1196 	return fgp_flags;
1197 }
1198 
1199 static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
1200 {
1201 	gfp_t gfp;
1202 
1203 	gfp = btrfs_alloc_write_mask(inode->i_mapping);
1204 	if (nowait) {
1205 		gfp &= ~__GFP_DIRECT_RECLAIM;
1206 		gfp |= GFP_NOWAIT;
1207 	}
1208 
1209 	return gfp;
1210 }
1211 
1212 /*
1213  * this just gets pages into the page cache and locks them down.
1214  */
1215 static noinline int prepare_pages(struct inode *inode, struct page **pages,
1216 				  size_t num_pages, loff_t pos,
1217 				  size_t write_bytes, bool force_uptodate,
1218 				  bool nowait)
1219 {
1220 	int i;
1221 	unsigned long index = pos >> PAGE_SHIFT;
1222 	gfp_t mask = get_prepare_gfp_flags(inode, nowait);
1223 	unsigned int fgp_flags = get_prepare_fgp_flags(nowait);
1224 	int err = 0;
1225 	int faili;
1226 
1227 	for (i = 0; i < num_pages; i++) {
1228 again:
1229 		pages[i] = pagecache_get_page(inode->i_mapping, index + i,
1230 					      fgp_flags, mask | __GFP_WRITE);
1231 		if (!pages[i]) {
1232 			faili = i - 1;
1233 			if (nowait)
1234 				err = -EAGAIN;
1235 			else
1236 				err = -ENOMEM;
1237 			goto fail;
1238 		}
1239 
1240 		err = set_page_extent_mapped(pages[i]);
1241 		if (err < 0) {
1242 			faili = i;
1243 			goto fail;
1244 		}
1245 
1246 		if (i == 0)
1247 			err = prepare_uptodate_page(inode, pages[i], pos,
1248 						    force_uptodate);
1249 		if (!err && i == num_pages - 1)
1250 			err = prepare_uptodate_page(inode, pages[i],
1251 						    pos + write_bytes, false);
1252 		if (err) {
1253 			put_page(pages[i]);
1254 			if (!nowait && err == -EAGAIN) {
1255 				err = 0;
1256 				goto again;
1257 			}
1258 			faili = i - 1;
1259 			goto fail;
1260 		}
1261 		wait_on_page_writeback(pages[i]);
1262 	}
1263 
1264 	return 0;
1265 fail:
1266 	while (faili >= 0) {
1267 		unlock_page(pages[faili]);
1268 		put_page(pages[faili]);
1269 		faili--;
1270 	}
1271 	return err;
1272 
1273 }
1274 
1275 /*
1276  * This function locks the extent and properly waits for data=ordered extents
1277  * to finish before allowing the pages to be modified if need.
1278  *
1279  * The return value:
1280  * 1 - the extent is locked
1281  * 0 - the extent is not locked, and everything is OK
1282  * -EAGAIN - need re-prepare the pages
1283  * the other < 0 number - Something wrong happens
1284  */
1285 static noinline int
1286 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1287 				size_t num_pages, loff_t pos,
1288 				size_t write_bytes,
1289 				u64 *lockstart, u64 *lockend, bool nowait,
1290 				struct extent_state **cached_state)
1291 {
1292 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1293 	u64 start_pos;
1294 	u64 last_pos;
1295 	int i;
1296 	int ret = 0;
1297 
1298 	start_pos = round_down(pos, fs_info->sectorsize);
1299 	last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
1300 
1301 	if (start_pos < inode->vfs_inode.i_size) {
1302 		struct btrfs_ordered_extent *ordered;
1303 
1304 		if (nowait) {
1305 			if (!try_lock_extent(&inode->io_tree, start_pos, last_pos)) {
1306 				for (i = 0; i < num_pages; i++) {
1307 					unlock_page(pages[i]);
1308 					put_page(pages[i]);
1309 					pages[i] = NULL;
1310 				}
1311 
1312 				return -EAGAIN;
1313 			}
1314 		} else {
1315 			lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
1316 		}
1317 
1318 		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1319 						     last_pos - start_pos + 1);
1320 		if (ordered &&
1321 		    ordered->file_offset + ordered->num_bytes > start_pos &&
1322 		    ordered->file_offset <= last_pos) {
1323 			unlock_extent(&inode->io_tree, start_pos, last_pos,
1324 				      cached_state);
1325 			for (i = 0; i < num_pages; i++) {
1326 				unlock_page(pages[i]);
1327 				put_page(pages[i]);
1328 			}
1329 			btrfs_start_ordered_extent(ordered, 1);
1330 			btrfs_put_ordered_extent(ordered);
1331 			return -EAGAIN;
1332 		}
1333 		if (ordered)
1334 			btrfs_put_ordered_extent(ordered);
1335 
1336 		*lockstart = start_pos;
1337 		*lockend = last_pos;
1338 		ret = 1;
1339 	}
1340 
1341 	/*
1342 	 * We should be called after prepare_pages() which should have locked
1343 	 * all pages in the range.
1344 	 */
1345 	for (i = 0; i < num_pages; i++)
1346 		WARN_ON(!PageLocked(pages[i]));
1347 
1348 	return ret;
1349 }
1350 
1351 /*
1352  * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1353  *
1354  * @pos:         File offset.
1355  * @write_bytes: The length to write, will be updated to the nocow writeable
1356  *               range.
1357  *
1358  * This function will flush ordered extents in the range to ensure proper
1359  * nocow checks.
1360  *
1361  * Return:
1362  * > 0          If we can nocow, and updates @write_bytes.
1363  *  0           If we can't do a nocow write.
1364  * -EAGAIN      If we can't do a nocow write because snapshoting of the inode's
1365  *              root is in progress.
1366  * < 0          If an error happened.
1367  *
1368  * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
1369  */
1370 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1371 			   size_t *write_bytes, bool nowait)
1372 {
1373 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1374 	struct btrfs_root *root = inode->root;
1375 	u64 lockstart, lockend;
1376 	u64 num_bytes;
1377 	int ret;
1378 
1379 	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1380 		return 0;
1381 
1382 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1383 		return -EAGAIN;
1384 
1385 	lockstart = round_down(pos, fs_info->sectorsize);
1386 	lockend = round_up(pos + *write_bytes,
1387 			   fs_info->sectorsize) - 1;
1388 	num_bytes = lockend - lockstart + 1;
1389 
1390 	if (nowait) {
1391 		if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend)) {
1392 			btrfs_drew_write_unlock(&root->snapshot_lock);
1393 			return -EAGAIN;
1394 		}
1395 	} else {
1396 		btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend, NULL);
1397 	}
1398 	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1399 			NULL, NULL, NULL, nowait, false);
1400 	if (ret <= 0)
1401 		btrfs_drew_write_unlock(&root->snapshot_lock);
1402 	else
1403 		*write_bytes = min_t(size_t, *write_bytes ,
1404 				     num_bytes - pos + lockstart);
1405 	unlock_extent(&inode->io_tree, lockstart, lockend, NULL);
1406 
1407 	return ret;
1408 }
1409 
1410 void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1411 {
1412 	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1413 }
1414 
1415 static void update_time_for_write(struct inode *inode)
1416 {
1417 	struct timespec64 now;
1418 
1419 	if (IS_NOCMTIME(inode))
1420 		return;
1421 
1422 	now = current_time(inode);
1423 	if (!timespec64_equal(&inode->i_mtime, &now))
1424 		inode->i_mtime = now;
1425 
1426 	if (!timespec64_equal(&inode->i_ctime, &now))
1427 		inode->i_ctime = now;
1428 
1429 	if (IS_I_VERSION(inode))
1430 		inode_inc_iversion(inode);
1431 }
1432 
1433 static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1434 			     size_t count)
1435 {
1436 	struct file *file = iocb->ki_filp;
1437 	struct inode *inode = file_inode(file);
1438 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1439 	loff_t pos = iocb->ki_pos;
1440 	int ret;
1441 	loff_t oldsize;
1442 	loff_t start_pos;
1443 
1444 	/*
1445 	 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1446 	 * prealloc flags, as without those flags we always have to COW. We will
1447 	 * later check if we can really COW into the target range (using
1448 	 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1449 	 */
1450 	if ((iocb->ki_flags & IOCB_NOWAIT) &&
1451 	    !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1452 		return -EAGAIN;
1453 
1454 	current->backing_dev_info = inode_to_bdi(inode);
1455 	ret = file_remove_privs(file);
1456 	if (ret)
1457 		return ret;
1458 
1459 	/*
1460 	 * We reserve space for updating the inode when we reserve space for the
1461 	 * extent we are going to write, so we will enospc out there.  We don't
1462 	 * need to start yet another transaction to update the inode as we will
1463 	 * update the inode when we finish writing whatever data we write.
1464 	 */
1465 	update_time_for_write(inode);
1466 
1467 	start_pos = round_down(pos, fs_info->sectorsize);
1468 	oldsize = i_size_read(inode);
1469 	if (start_pos > oldsize) {
1470 		/* Expand hole size to cover write data, preventing empty gap */
1471 		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1472 
1473 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1474 		if (ret) {
1475 			current->backing_dev_info = NULL;
1476 			return ret;
1477 		}
1478 	}
1479 
1480 	return 0;
1481 }
1482 
1483 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1484 					       struct iov_iter *i)
1485 {
1486 	struct file *file = iocb->ki_filp;
1487 	loff_t pos;
1488 	struct inode *inode = file_inode(file);
1489 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1490 	struct page **pages = NULL;
1491 	struct extent_changeset *data_reserved = NULL;
1492 	u64 release_bytes = 0;
1493 	u64 lockstart;
1494 	u64 lockend;
1495 	size_t num_written = 0;
1496 	int nrptrs;
1497 	ssize_t ret;
1498 	bool only_release_metadata = false;
1499 	bool force_page_uptodate = false;
1500 	loff_t old_isize = i_size_read(inode);
1501 	unsigned int ilock_flags = 0;
1502 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1503 	unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1504 
1505 	if (nowait)
1506 		ilock_flags |= BTRFS_ILOCK_TRY;
1507 
1508 	ret = btrfs_inode_lock(inode, ilock_flags);
1509 	if (ret < 0)
1510 		return ret;
1511 
1512 	ret = generic_write_checks(iocb, i);
1513 	if (ret <= 0)
1514 		goto out;
1515 
1516 	ret = btrfs_write_check(iocb, i, ret);
1517 	if (ret < 0)
1518 		goto out;
1519 
1520 	pos = iocb->ki_pos;
1521 	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1522 			PAGE_SIZE / (sizeof(struct page *)));
1523 	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1524 	nrptrs = max(nrptrs, 8);
1525 	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1526 	if (!pages) {
1527 		ret = -ENOMEM;
1528 		goto out;
1529 	}
1530 
1531 	while (iov_iter_count(i) > 0) {
1532 		struct extent_state *cached_state = NULL;
1533 		size_t offset = offset_in_page(pos);
1534 		size_t sector_offset;
1535 		size_t write_bytes = min(iov_iter_count(i),
1536 					 nrptrs * (size_t)PAGE_SIZE -
1537 					 offset);
1538 		size_t num_pages;
1539 		size_t reserve_bytes;
1540 		size_t dirty_pages;
1541 		size_t copied;
1542 		size_t dirty_sectors;
1543 		size_t num_sectors;
1544 		int extents_locked;
1545 
1546 		/*
1547 		 * Fault pages before locking them in prepare_pages
1548 		 * to avoid recursive lock
1549 		 */
1550 		if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1551 			ret = -EFAULT;
1552 			break;
1553 		}
1554 
1555 		only_release_metadata = false;
1556 		sector_offset = pos & (fs_info->sectorsize - 1);
1557 
1558 		extent_changeset_release(data_reserved);
1559 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
1560 						  &data_reserved, pos,
1561 						  write_bytes, nowait);
1562 		if (ret < 0) {
1563 			int can_nocow;
1564 
1565 			if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1566 				ret = -EAGAIN;
1567 				break;
1568 			}
1569 
1570 			/*
1571 			 * If we don't have to COW at the offset, reserve
1572 			 * metadata only. write_bytes may get smaller than
1573 			 * requested here.
1574 			 */
1575 			can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1576 							   &write_bytes, nowait);
1577 			if (can_nocow < 0)
1578 				ret = can_nocow;
1579 			if (can_nocow > 0)
1580 				ret = 0;
1581 			if (ret)
1582 				break;
1583 			only_release_metadata = true;
1584 		}
1585 
1586 		num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1587 		WARN_ON(num_pages > nrptrs);
1588 		reserve_bytes = round_up(write_bytes + sector_offset,
1589 					 fs_info->sectorsize);
1590 		WARN_ON(reserve_bytes == 0);
1591 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1592 						      reserve_bytes,
1593 						      reserve_bytes, nowait);
1594 		if (ret) {
1595 			if (!only_release_metadata)
1596 				btrfs_free_reserved_data_space(BTRFS_I(inode),
1597 						data_reserved, pos,
1598 						write_bytes);
1599 			else
1600 				btrfs_check_nocow_unlock(BTRFS_I(inode));
1601 			break;
1602 		}
1603 
1604 		release_bytes = reserve_bytes;
1605 again:
1606 		ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1607 		if (ret)
1608 			break;
1609 
1610 		/*
1611 		 * This is going to setup the pages array with the number of
1612 		 * pages we want, so we don't really need to worry about the
1613 		 * contents of pages from loop to loop
1614 		 */
1615 		ret = prepare_pages(inode, pages, num_pages,
1616 				    pos, write_bytes, force_page_uptodate, false);
1617 		if (ret) {
1618 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1619 						       reserve_bytes);
1620 			break;
1621 		}
1622 
1623 		extents_locked = lock_and_cleanup_extent_if_need(
1624 				BTRFS_I(inode), pages,
1625 				num_pages, pos, write_bytes, &lockstart,
1626 				&lockend, nowait, &cached_state);
1627 		if (extents_locked < 0) {
1628 			if (!nowait && extents_locked == -EAGAIN)
1629 				goto again;
1630 
1631 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1632 						       reserve_bytes);
1633 			ret = extents_locked;
1634 			break;
1635 		}
1636 
1637 		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1638 
1639 		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1640 		dirty_sectors = round_up(copied + sector_offset,
1641 					fs_info->sectorsize);
1642 		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1643 
1644 		/*
1645 		 * if we have trouble faulting in the pages, fall
1646 		 * back to one page at a time
1647 		 */
1648 		if (copied < write_bytes)
1649 			nrptrs = 1;
1650 
1651 		if (copied == 0) {
1652 			force_page_uptodate = true;
1653 			dirty_sectors = 0;
1654 			dirty_pages = 0;
1655 		} else {
1656 			force_page_uptodate = false;
1657 			dirty_pages = DIV_ROUND_UP(copied + offset,
1658 						   PAGE_SIZE);
1659 		}
1660 
1661 		if (num_sectors > dirty_sectors) {
1662 			/* release everything except the sectors we dirtied */
1663 			release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1664 			if (only_release_metadata) {
1665 				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1666 							release_bytes, true);
1667 			} else {
1668 				u64 __pos;
1669 
1670 				__pos = round_down(pos,
1671 						   fs_info->sectorsize) +
1672 					(dirty_pages << PAGE_SHIFT);
1673 				btrfs_delalloc_release_space(BTRFS_I(inode),
1674 						data_reserved, __pos,
1675 						release_bytes, true);
1676 			}
1677 		}
1678 
1679 		release_bytes = round_up(copied + sector_offset,
1680 					fs_info->sectorsize);
1681 
1682 		ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1683 					dirty_pages, pos, copied,
1684 					&cached_state, only_release_metadata);
1685 
1686 		/*
1687 		 * If we have not locked the extent range, because the range's
1688 		 * start offset is >= i_size, we might still have a non-NULL
1689 		 * cached extent state, acquired while marking the extent range
1690 		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1691 		 * possible cached extent state to avoid a memory leak.
1692 		 */
1693 		if (extents_locked)
1694 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1695 				      lockend, &cached_state);
1696 		else
1697 			free_extent_state(cached_state);
1698 
1699 		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1700 		if (ret) {
1701 			btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1702 			break;
1703 		}
1704 
1705 		release_bytes = 0;
1706 		if (only_release_metadata)
1707 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1708 
1709 		btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1710 
1711 		cond_resched();
1712 
1713 		pos += copied;
1714 		num_written += copied;
1715 	}
1716 
1717 	kfree(pages);
1718 
1719 	if (release_bytes) {
1720 		if (only_release_metadata) {
1721 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1722 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1723 					release_bytes, true);
1724 		} else {
1725 			btrfs_delalloc_release_space(BTRFS_I(inode),
1726 					data_reserved,
1727 					round_down(pos, fs_info->sectorsize),
1728 					release_bytes, true);
1729 		}
1730 	}
1731 
1732 	extent_changeset_free(data_reserved);
1733 	if (num_written > 0) {
1734 		pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1735 		iocb->ki_pos += num_written;
1736 	}
1737 out:
1738 	btrfs_inode_unlock(inode, ilock_flags);
1739 	return num_written ? num_written : ret;
1740 }
1741 
1742 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1743 			       const struct iov_iter *iter, loff_t offset)
1744 {
1745 	const u32 blocksize_mask = fs_info->sectorsize - 1;
1746 
1747 	if (offset & blocksize_mask)
1748 		return -EINVAL;
1749 
1750 	if (iov_iter_alignment(iter) & blocksize_mask)
1751 		return -EINVAL;
1752 
1753 	return 0;
1754 }
1755 
1756 static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1757 {
1758 	struct file *file = iocb->ki_filp;
1759 	struct inode *inode = file_inode(file);
1760 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1761 	loff_t pos;
1762 	ssize_t written = 0;
1763 	ssize_t written_buffered;
1764 	size_t prev_left = 0;
1765 	loff_t endbyte;
1766 	ssize_t err;
1767 	unsigned int ilock_flags = 0;
1768 
1769 	if (iocb->ki_flags & IOCB_NOWAIT)
1770 		ilock_flags |= BTRFS_ILOCK_TRY;
1771 
1772 	/* If the write DIO is within EOF, use a shared lock */
1773 	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
1774 		ilock_flags |= BTRFS_ILOCK_SHARED;
1775 
1776 relock:
1777 	err = btrfs_inode_lock(inode, ilock_flags);
1778 	if (err < 0)
1779 		return err;
1780 
1781 	err = generic_write_checks(iocb, from);
1782 	if (err <= 0) {
1783 		btrfs_inode_unlock(inode, ilock_flags);
1784 		return err;
1785 	}
1786 
1787 	err = btrfs_write_check(iocb, from, err);
1788 	if (err < 0) {
1789 		btrfs_inode_unlock(inode, ilock_flags);
1790 		goto out;
1791 	}
1792 
1793 	pos = iocb->ki_pos;
1794 	/*
1795 	 * Re-check since file size may have changed just before taking the
1796 	 * lock or pos may have changed because of O_APPEND in generic_write_check()
1797 	 */
1798 	if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1799 	    pos + iov_iter_count(from) > i_size_read(inode)) {
1800 		btrfs_inode_unlock(inode, ilock_flags);
1801 		ilock_flags &= ~BTRFS_ILOCK_SHARED;
1802 		goto relock;
1803 	}
1804 
1805 	if (check_direct_IO(fs_info, from, pos)) {
1806 		btrfs_inode_unlock(inode, ilock_flags);
1807 		goto buffered;
1808 	}
1809 
1810 	/*
1811 	 * The iov_iter can be mapped to the same file range we are writing to.
1812 	 * If that's the case, then we will deadlock in the iomap code, because
1813 	 * it first calls our callback btrfs_dio_iomap_begin(), which will create
1814 	 * an ordered extent, and after that it will fault in the pages that the
1815 	 * iov_iter refers to. During the fault in we end up in the readahead
1816 	 * pages code (starting at btrfs_readahead()), which will lock the range,
1817 	 * find that ordered extent and then wait for it to complete (at
1818 	 * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
1819 	 * obviously the ordered extent can never complete as we didn't submit
1820 	 * yet the respective bio(s). This always happens when the buffer is
1821 	 * memory mapped to the same file range, since the iomap DIO code always
1822 	 * invalidates pages in the target file range (after starting and waiting
1823 	 * for any writeback).
1824 	 *
1825 	 * So here we disable page faults in the iov_iter and then retry if we
1826 	 * got -EFAULT, faulting in the pages before the retry.
1827 	 */
1828 again:
1829 	from->nofault = true;
1830 	err = btrfs_dio_rw(iocb, from, written);
1831 	from->nofault = false;
1832 
1833 	/* No increment (+=) because iomap returns a cumulative value. */
1834 	if (err > 0)
1835 		written = err;
1836 
1837 	if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
1838 		const size_t left = iov_iter_count(from);
1839 		/*
1840 		 * We have more data left to write. Try to fault in as many as
1841 		 * possible of the remainder pages and retry. We do this without
1842 		 * releasing and locking again the inode, to prevent races with
1843 		 * truncate.
1844 		 *
1845 		 * Also, in case the iov refers to pages in the file range of the
1846 		 * file we want to write to (due to a mmap), we could enter an
1847 		 * infinite loop if we retry after faulting the pages in, since
1848 		 * iomap will invalidate any pages in the range early on, before
1849 		 * it tries to fault in the pages of the iov. So we keep track of
1850 		 * how much was left of iov in the previous EFAULT and fallback
1851 		 * to buffered IO in case we haven't made any progress.
1852 		 */
1853 		if (left == prev_left) {
1854 			err = -ENOTBLK;
1855 		} else {
1856 			fault_in_iov_iter_readable(from, left);
1857 			prev_left = left;
1858 			goto again;
1859 		}
1860 	}
1861 
1862 	btrfs_inode_unlock(inode, ilock_flags);
1863 
1864 	/*
1865 	 * If 'err' is -ENOTBLK or we have not written all data, then it means
1866 	 * we must fallback to buffered IO.
1867 	 */
1868 	if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
1869 		goto out;
1870 
1871 buffered:
1872 	/*
1873 	 * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
1874 	 * it must retry the operation in a context where blocking is acceptable,
1875 	 * since we currently don't have NOWAIT semantics support for buffered IO
1876 	 * and may block there for many reasons (reserving space for example).
1877 	 */
1878 	if (iocb->ki_flags & IOCB_NOWAIT) {
1879 		err = -EAGAIN;
1880 		goto out;
1881 	}
1882 
1883 	pos = iocb->ki_pos;
1884 	written_buffered = btrfs_buffered_write(iocb, from);
1885 	if (written_buffered < 0) {
1886 		err = written_buffered;
1887 		goto out;
1888 	}
1889 	/*
1890 	 * Ensure all data is persisted. We want the next direct IO read to be
1891 	 * able to read what was just written.
1892 	 */
1893 	endbyte = pos + written_buffered - 1;
1894 	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1895 	if (err)
1896 		goto out;
1897 	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1898 	if (err)
1899 		goto out;
1900 	written += written_buffered;
1901 	iocb->ki_pos = pos + written_buffered;
1902 	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1903 				 endbyte >> PAGE_SHIFT);
1904 out:
1905 	return err < 0 ? err : written;
1906 }
1907 
1908 static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1909 			const struct btrfs_ioctl_encoded_io_args *encoded)
1910 {
1911 	struct file *file = iocb->ki_filp;
1912 	struct inode *inode = file_inode(file);
1913 	loff_t count;
1914 	ssize_t ret;
1915 
1916 	btrfs_inode_lock(inode, 0);
1917 	count = encoded->len;
1918 	ret = generic_write_checks_count(iocb, &count);
1919 	if (ret == 0 && count != encoded->len) {
1920 		/*
1921 		 * The write got truncated by generic_write_checks_count(). We
1922 		 * can't do a partial encoded write.
1923 		 */
1924 		ret = -EFBIG;
1925 	}
1926 	if (ret || encoded->len == 0)
1927 		goto out;
1928 
1929 	ret = btrfs_write_check(iocb, from, encoded->len);
1930 	if (ret < 0)
1931 		goto out;
1932 
1933 	ret = btrfs_do_encoded_write(iocb, from, encoded);
1934 out:
1935 	btrfs_inode_unlock(inode, 0);
1936 	return ret;
1937 }
1938 
1939 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1940 			    const struct btrfs_ioctl_encoded_io_args *encoded)
1941 {
1942 	struct file *file = iocb->ki_filp;
1943 	struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1944 	ssize_t num_written, num_sync;
1945 	const bool sync = iocb_is_dsync(iocb);
1946 
1947 	/*
1948 	 * If the fs flips readonly due to some impossible error, although we
1949 	 * have opened a file as writable, we have to stop this write operation
1950 	 * to ensure consistency.
1951 	 */
1952 	if (BTRFS_FS_ERROR(inode->root->fs_info))
1953 		return -EROFS;
1954 
1955 	if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1956 		return -EOPNOTSUPP;
1957 
1958 	if (sync)
1959 		atomic_inc(&inode->sync_writers);
1960 
1961 	if (encoded) {
1962 		num_written = btrfs_encoded_write(iocb, from, encoded);
1963 		num_sync = encoded->len;
1964 	} else if (iocb->ki_flags & IOCB_DIRECT) {
1965 		num_written = btrfs_direct_write(iocb, from);
1966 		num_sync = num_written;
1967 	} else {
1968 		num_written = btrfs_buffered_write(iocb, from);
1969 		num_sync = num_written;
1970 	}
1971 
1972 	btrfs_set_inode_last_sub_trans(inode);
1973 
1974 	if (num_sync > 0) {
1975 		num_sync = generic_write_sync(iocb, num_sync);
1976 		if (num_sync < 0)
1977 			num_written = num_sync;
1978 	}
1979 
1980 	if (sync)
1981 		atomic_dec(&inode->sync_writers);
1982 
1983 	current->backing_dev_info = NULL;
1984 	return num_written;
1985 }
1986 
1987 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1988 {
1989 	return btrfs_do_write_iter(iocb, from, NULL);
1990 }
1991 
1992 int btrfs_release_file(struct inode *inode, struct file *filp)
1993 {
1994 	struct btrfs_file_private *private = filp->private_data;
1995 
1996 	if (private && private->filldir_buf)
1997 		kfree(private->filldir_buf);
1998 	kfree(private);
1999 	filp->private_data = NULL;
2000 
2001 	/*
2002 	 * Set by setattr when we are about to truncate a file from a non-zero
2003 	 * size to a zero size.  This tries to flush down new bytes that may
2004 	 * have been written if the application were using truncate to replace
2005 	 * a file in place.
2006 	 */
2007 	if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
2008 			       &BTRFS_I(inode)->runtime_flags))
2009 			filemap_flush(inode->i_mapping);
2010 	return 0;
2011 }
2012 
2013 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2014 {
2015 	int ret;
2016 	struct blk_plug plug;
2017 
2018 	/*
2019 	 * This is only called in fsync, which would do synchronous writes, so
2020 	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2021 	 * multiple disks using raid profile, a large IO can be split to
2022 	 * several segments of stripe length (currently 64K).
2023 	 */
2024 	blk_start_plug(&plug);
2025 	atomic_inc(&BTRFS_I(inode)->sync_writers);
2026 	ret = btrfs_fdatawrite_range(inode, start, end);
2027 	atomic_dec(&BTRFS_I(inode)->sync_writers);
2028 	blk_finish_plug(&plug);
2029 
2030 	return ret;
2031 }
2032 
2033 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
2034 {
2035 	struct btrfs_inode *inode = BTRFS_I(ctx->inode);
2036 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2037 
2038 	if (btrfs_inode_in_log(inode, fs_info->generation) &&
2039 	    list_empty(&ctx->ordered_extents))
2040 		return true;
2041 
2042 	/*
2043 	 * If we are doing a fast fsync we can not bail out if the inode's
2044 	 * last_trans is <= then the last committed transaction, because we only
2045 	 * update the last_trans of the inode during ordered extent completion,
2046 	 * and for a fast fsync we don't wait for that, we only wait for the
2047 	 * writeback to complete.
2048 	 */
2049 	if (inode->last_trans <= fs_info->last_trans_committed &&
2050 	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
2051 	     list_empty(&ctx->ordered_extents)))
2052 		return true;
2053 
2054 	return false;
2055 }
2056 
2057 /*
2058  * fsync call for both files and directories.  This logs the inode into
2059  * the tree log instead of forcing full commits whenever possible.
2060  *
2061  * It needs to call filemap_fdatawait so that all ordered extent updates are
2062  * in the metadata btree are up to date for copying to the log.
2063  *
2064  * It drops the inode mutex before doing the tree log commit.  This is an
2065  * important optimization for directories because holding the mutex prevents
2066  * new operations on the dir while we write to disk.
2067  */
2068 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2069 {
2070 	struct dentry *dentry = file_dentry(file);
2071 	struct inode *inode = d_inode(dentry);
2072 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2073 	struct btrfs_root *root = BTRFS_I(inode)->root;
2074 	struct btrfs_trans_handle *trans;
2075 	struct btrfs_log_ctx ctx;
2076 	int ret = 0, err;
2077 	u64 len;
2078 	bool full_sync;
2079 
2080 	trace_btrfs_sync_file(file, datasync);
2081 
2082 	btrfs_init_log_ctx(&ctx, inode);
2083 
2084 	/*
2085 	 * Always set the range to a full range, otherwise we can get into
2086 	 * several problems, from missing file extent items to represent holes
2087 	 * when not using the NO_HOLES feature, to log tree corruption due to
2088 	 * races between hole detection during logging and completion of ordered
2089 	 * extents outside the range, to missing checksums due to ordered extents
2090 	 * for which we flushed only a subset of their pages.
2091 	 */
2092 	start = 0;
2093 	end = LLONG_MAX;
2094 	len = (u64)LLONG_MAX + 1;
2095 
2096 	/*
2097 	 * We write the dirty pages in the range and wait until they complete
2098 	 * out of the ->i_mutex. If so, we can flush the dirty pages by
2099 	 * multi-task, and make the performance up.  See
2100 	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2101 	 */
2102 	ret = start_ordered_ops(inode, start, end);
2103 	if (ret)
2104 		goto out;
2105 
2106 	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2107 
2108 	atomic_inc(&root->log_batch);
2109 
2110 	/*
2111 	 * Before we acquired the inode's lock and the mmap lock, someone may
2112 	 * have dirtied more pages in the target range. We need to make sure
2113 	 * that writeback for any such pages does not start while we are logging
2114 	 * the inode, because if it does, any of the following might happen when
2115 	 * we are not doing a full inode sync:
2116 	 *
2117 	 * 1) We log an extent after its writeback finishes but before its
2118 	 *    checksums are added to the csum tree, leading to -EIO errors
2119 	 *    when attempting to read the extent after a log replay.
2120 	 *
2121 	 * 2) We can end up logging an extent before its writeback finishes.
2122 	 *    Therefore after the log replay we will have a file extent item
2123 	 *    pointing to an unwritten extent (and no data checksums as well).
2124 	 *
2125 	 * So trigger writeback for any eventual new dirty pages and then we
2126 	 * wait for all ordered extents to complete below.
2127 	 */
2128 	ret = start_ordered_ops(inode, start, end);
2129 	if (ret) {
2130 		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2131 		goto out;
2132 	}
2133 
2134 	/*
2135 	 * Always check for the full sync flag while holding the inode's lock,
2136 	 * to avoid races with other tasks. The flag must be either set all the
2137 	 * time during logging or always off all the time while logging.
2138 	 * We check the flag here after starting delalloc above, because when
2139 	 * running delalloc the full sync flag may be set if we need to drop
2140 	 * extra extent map ranges due to temporary memory allocation failures.
2141 	 */
2142 	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2143 			     &BTRFS_I(inode)->runtime_flags);
2144 
2145 	/*
2146 	 * We have to do this here to avoid the priority inversion of waiting on
2147 	 * IO of a lower priority task while holding a transaction open.
2148 	 *
2149 	 * For a full fsync we wait for the ordered extents to complete while
2150 	 * for a fast fsync we wait just for writeback to complete, and then
2151 	 * attach the ordered extents to the transaction so that a transaction
2152 	 * commit waits for their completion, to avoid data loss if we fsync,
2153 	 * the current transaction commits before the ordered extents complete
2154 	 * and a power failure happens right after that.
2155 	 *
2156 	 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
2157 	 * logical address recorded in the ordered extent may change. We need
2158 	 * to wait for the IO to stabilize the logical address.
2159 	 */
2160 	if (full_sync || btrfs_is_zoned(fs_info)) {
2161 		ret = btrfs_wait_ordered_range(inode, start, len);
2162 	} else {
2163 		/*
2164 		 * Get our ordered extents as soon as possible to avoid doing
2165 		 * checksum lookups in the csum tree, and use instead the
2166 		 * checksums attached to the ordered extents.
2167 		 */
2168 		btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
2169 						      &ctx.ordered_extents);
2170 		ret = filemap_fdatawait_range(inode->i_mapping, start, end);
2171 	}
2172 
2173 	if (ret)
2174 		goto out_release_extents;
2175 
2176 	atomic_inc(&root->log_batch);
2177 
2178 	smp_mb();
2179 	if (skip_inode_logging(&ctx)) {
2180 		/*
2181 		 * We've had everything committed since the last time we were
2182 		 * modified so clear this flag in case it was set for whatever
2183 		 * reason, it's no longer relevant.
2184 		 */
2185 		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2186 			  &BTRFS_I(inode)->runtime_flags);
2187 		/*
2188 		 * An ordered extent might have started before and completed
2189 		 * already with io errors, in which case the inode was not
2190 		 * updated and we end up here. So check the inode's mapping
2191 		 * for any errors that might have happened since we last
2192 		 * checked called fsync.
2193 		 */
2194 		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2195 		goto out_release_extents;
2196 	}
2197 
2198 	/*
2199 	 * We use start here because we will need to wait on the IO to complete
2200 	 * in btrfs_sync_log, which could require joining a transaction (for
2201 	 * example checking cross references in the nocow path).  If we use join
2202 	 * here we could get into a situation where we're waiting on IO to
2203 	 * happen that is blocked on a transaction trying to commit.  With start
2204 	 * we inc the extwriter counter, so we wait for all extwriters to exit
2205 	 * before we start blocking joiners.  This comment is to keep somebody
2206 	 * from thinking they are super smart and changing this to
2207 	 * btrfs_join_transaction *cough*Josef*cough*.
2208 	 */
2209 	trans = btrfs_start_transaction(root, 0);
2210 	if (IS_ERR(trans)) {
2211 		ret = PTR_ERR(trans);
2212 		goto out_release_extents;
2213 	}
2214 	trans->in_fsync = true;
2215 
2216 	ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
2217 	btrfs_release_log_ctx_extents(&ctx);
2218 	if (ret < 0) {
2219 		/* Fallthrough and commit/free transaction. */
2220 		ret = BTRFS_LOG_FORCE_COMMIT;
2221 	}
2222 
2223 	/* we've logged all the items and now have a consistent
2224 	 * version of the file in the log.  It is possible that
2225 	 * someone will come in and modify the file, but that's
2226 	 * fine because the log is consistent on disk, and we
2227 	 * have references to all of the file's extents
2228 	 *
2229 	 * It is possible that someone will come in and log the
2230 	 * file again, but that will end up using the synchronization
2231 	 * inside btrfs_sync_log to keep things safe.
2232 	 */
2233 	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2234 
2235 	if (ret == BTRFS_NO_LOG_SYNC) {
2236 		ret = btrfs_end_transaction(trans);
2237 		goto out;
2238 	}
2239 
2240 	/* We successfully logged the inode, attempt to sync the log. */
2241 	if (!ret) {
2242 		ret = btrfs_sync_log(trans, root, &ctx);
2243 		if (!ret) {
2244 			ret = btrfs_end_transaction(trans);
2245 			goto out;
2246 		}
2247 	}
2248 
2249 	/*
2250 	 * At this point we need to commit the transaction because we had
2251 	 * btrfs_need_log_full_commit() or some other error.
2252 	 *
2253 	 * If we didn't do a full sync we have to stop the trans handle, wait on
2254 	 * the ordered extents, start it again and commit the transaction.  If
2255 	 * we attempt to wait on the ordered extents here we could deadlock with
2256 	 * something like fallocate() that is holding the extent lock trying to
2257 	 * start a transaction while some other thread is trying to commit the
2258 	 * transaction while we (fsync) are currently holding the transaction
2259 	 * open.
2260 	 */
2261 	if (!full_sync) {
2262 		ret = btrfs_end_transaction(trans);
2263 		if (ret)
2264 			goto out;
2265 		ret = btrfs_wait_ordered_range(inode, start, len);
2266 		if (ret)
2267 			goto out;
2268 
2269 		/*
2270 		 * This is safe to use here because we're only interested in
2271 		 * making sure the transaction that had the ordered extents is
2272 		 * committed.  We aren't waiting on anything past this point,
2273 		 * we're purely getting the transaction and committing it.
2274 		 */
2275 		trans = btrfs_attach_transaction_barrier(root);
2276 		if (IS_ERR(trans)) {
2277 			ret = PTR_ERR(trans);
2278 
2279 			/*
2280 			 * We committed the transaction and there's no currently
2281 			 * running transaction, this means everything we care
2282 			 * about made it to disk and we are done.
2283 			 */
2284 			if (ret == -ENOENT)
2285 				ret = 0;
2286 			goto out;
2287 		}
2288 	}
2289 
2290 	ret = btrfs_commit_transaction(trans);
2291 out:
2292 	ASSERT(list_empty(&ctx.list));
2293 	ASSERT(list_empty(&ctx.conflict_inodes));
2294 	err = file_check_and_advance_wb_err(file);
2295 	if (!ret)
2296 		ret = err;
2297 	return ret > 0 ? -EIO : ret;
2298 
2299 out_release_extents:
2300 	btrfs_release_log_ctx_extents(&ctx);
2301 	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2302 	goto out;
2303 }
2304 
2305 static const struct vm_operations_struct btrfs_file_vm_ops = {
2306 	.fault		= filemap_fault,
2307 	.map_pages	= filemap_map_pages,
2308 	.page_mkwrite	= btrfs_page_mkwrite,
2309 };
2310 
2311 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2312 {
2313 	struct address_space *mapping = filp->f_mapping;
2314 
2315 	if (!mapping->a_ops->read_folio)
2316 		return -ENOEXEC;
2317 
2318 	file_accessed(filp);
2319 	vma->vm_ops = &btrfs_file_vm_ops;
2320 
2321 	return 0;
2322 }
2323 
2324 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2325 			  int slot, u64 start, u64 end)
2326 {
2327 	struct btrfs_file_extent_item *fi;
2328 	struct btrfs_key key;
2329 
2330 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2331 		return 0;
2332 
2333 	btrfs_item_key_to_cpu(leaf, &key, slot);
2334 	if (key.objectid != btrfs_ino(inode) ||
2335 	    key.type != BTRFS_EXTENT_DATA_KEY)
2336 		return 0;
2337 
2338 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2339 
2340 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2341 		return 0;
2342 
2343 	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2344 		return 0;
2345 
2346 	if (key.offset == end)
2347 		return 1;
2348 	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2349 		return 1;
2350 	return 0;
2351 }
2352 
2353 static int fill_holes(struct btrfs_trans_handle *trans,
2354 		struct btrfs_inode *inode,
2355 		struct btrfs_path *path, u64 offset, u64 end)
2356 {
2357 	struct btrfs_fs_info *fs_info = trans->fs_info;
2358 	struct btrfs_root *root = inode->root;
2359 	struct extent_buffer *leaf;
2360 	struct btrfs_file_extent_item *fi;
2361 	struct extent_map *hole_em;
2362 	struct btrfs_key key;
2363 	int ret;
2364 
2365 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2366 		goto out;
2367 
2368 	key.objectid = btrfs_ino(inode);
2369 	key.type = BTRFS_EXTENT_DATA_KEY;
2370 	key.offset = offset;
2371 
2372 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2373 	if (ret <= 0) {
2374 		/*
2375 		 * We should have dropped this offset, so if we find it then
2376 		 * something has gone horribly wrong.
2377 		 */
2378 		if (ret == 0)
2379 			ret = -EINVAL;
2380 		return ret;
2381 	}
2382 
2383 	leaf = path->nodes[0];
2384 	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2385 		u64 num_bytes;
2386 
2387 		path->slots[0]--;
2388 		fi = btrfs_item_ptr(leaf, path->slots[0],
2389 				    struct btrfs_file_extent_item);
2390 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2391 			end - offset;
2392 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2393 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2394 		btrfs_set_file_extent_offset(leaf, fi, 0);
2395 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2396 		btrfs_mark_buffer_dirty(leaf);
2397 		goto out;
2398 	}
2399 
2400 	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2401 		u64 num_bytes;
2402 
2403 		key.offset = offset;
2404 		btrfs_set_item_key_safe(fs_info, path, &key);
2405 		fi = btrfs_item_ptr(leaf, path->slots[0],
2406 				    struct btrfs_file_extent_item);
2407 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2408 			offset;
2409 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2410 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2411 		btrfs_set_file_extent_offset(leaf, fi, 0);
2412 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2413 		btrfs_mark_buffer_dirty(leaf);
2414 		goto out;
2415 	}
2416 	btrfs_release_path(path);
2417 
2418 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2419 				       end - offset);
2420 	if (ret)
2421 		return ret;
2422 
2423 out:
2424 	btrfs_release_path(path);
2425 
2426 	hole_em = alloc_extent_map();
2427 	if (!hole_em) {
2428 		btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2429 		btrfs_set_inode_full_sync(inode);
2430 	} else {
2431 		hole_em->start = offset;
2432 		hole_em->len = end - offset;
2433 		hole_em->ram_bytes = hole_em->len;
2434 		hole_em->orig_start = offset;
2435 
2436 		hole_em->block_start = EXTENT_MAP_HOLE;
2437 		hole_em->block_len = 0;
2438 		hole_em->orig_block_len = 0;
2439 		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2440 		hole_em->generation = trans->transid;
2441 
2442 		ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2443 		free_extent_map(hole_em);
2444 		if (ret)
2445 			btrfs_set_inode_full_sync(inode);
2446 	}
2447 
2448 	return 0;
2449 }
2450 
2451 /*
2452  * Find a hole extent on given inode and change start/len to the end of hole
2453  * extent.(hole/vacuum extent whose em->start <= start &&
2454  *	   em->start + em->len > start)
2455  * When a hole extent is found, return 1 and modify start/len.
2456  */
2457 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2458 {
2459 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2460 	struct extent_map *em;
2461 	int ret = 0;
2462 
2463 	em = btrfs_get_extent(inode, NULL, 0,
2464 			      round_down(*start, fs_info->sectorsize),
2465 			      round_up(*len, fs_info->sectorsize));
2466 	if (IS_ERR(em))
2467 		return PTR_ERR(em);
2468 
2469 	/* Hole or vacuum extent(only exists in no-hole mode) */
2470 	if (em->block_start == EXTENT_MAP_HOLE) {
2471 		ret = 1;
2472 		*len = em->start + em->len > *start + *len ?
2473 		       0 : *start + *len - em->start - em->len;
2474 		*start = em->start + em->len;
2475 	}
2476 	free_extent_map(em);
2477 	return ret;
2478 }
2479 
2480 static void btrfs_punch_hole_lock_range(struct inode *inode,
2481 					const u64 lockstart,
2482 					const u64 lockend,
2483 					struct extent_state **cached_state)
2484 {
2485 	/*
2486 	 * For subpage case, if the range is not at page boundary, we could
2487 	 * have pages at the leading/tailing part of the range.
2488 	 * This could lead to dead loop since filemap_range_has_page()
2489 	 * will always return true.
2490 	 * So here we need to do extra page alignment for
2491 	 * filemap_range_has_page().
2492 	 */
2493 	const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2494 	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2495 
2496 	while (1) {
2497 		truncate_pagecache_range(inode, lockstart, lockend);
2498 
2499 		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2500 			    cached_state);
2501 		/*
2502 		 * We can't have ordered extents in the range, nor dirty/writeback
2503 		 * pages, because we have locked the inode's VFS lock in exclusive
2504 		 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2505 		 * we have flushed all delalloc in the range and we have waited
2506 		 * for any ordered extents in the range to complete.
2507 		 * We can race with anyone reading pages from this range, so after
2508 		 * locking the range check if we have pages in the range, and if
2509 		 * we do, unlock the range and retry.
2510 		 */
2511 		if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2512 					    page_lockend))
2513 			break;
2514 
2515 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2516 			      cached_state);
2517 	}
2518 
2519 	btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2520 }
2521 
2522 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2523 				     struct btrfs_inode *inode,
2524 				     struct btrfs_path *path,
2525 				     struct btrfs_replace_extent_info *extent_info,
2526 				     const u64 replace_len,
2527 				     const u64 bytes_to_drop)
2528 {
2529 	struct btrfs_fs_info *fs_info = trans->fs_info;
2530 	struct btrfs_root *root = inode->root;
2531 	struct btrfs_file_extent_item *extent;
2532 	struct extent_buffer *leaf;
2533 	struct btrfs_key key;
2534 	int slot;
2535 	struct btrfs_ref ref = { 0 };
2536 	int ret;
2537 
2538 	if (replace_len == 0)
2539 		return 0;
2540 
2541 	if (extent_info->disk_offset == 0 &&
2542 	    btrfs_fs_incompat(fs_info, NO_HOLES)) {
2543 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2544 		return 0;
2545 	}
2546 
2547 	key.objectid = btrfs_ino(inode);
2548 	key.type = BTRFS_EXTENT_DATA_KEY;
2549 	key.offset = extent_info->file_offset;
2550 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2551 				      sizeof(struct btrfs_file_extent_item));
2552 	if (ret)
2553 		return ret;
2554 	leaf = path->nodes[0];
2555 	slot = path->slots[0];
2556 	write_extent_buffer(leaf, extent_info->extent_buf,
2557 			    btrfs_item_ptr_offset(leaf, slot),
2558 			    sizeof(struct btrfs_file_extent_item));
2559 	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2560 	ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2561 	btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2562 	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2563 	if (extent_info->is_new_extent)
2564 		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2565 	btrfs_mark_buffer_dirty(leaf);
2566 	btrfs_release_path(path);
2567 
2568 	ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2569 						replace_len);
2570 	if (ret)
2571 		return ret;
2572 
2573 	/* If it's a hole, nothing more needs to be done. */
2574 	if (extent_info->disk_offset == 0) {
2575 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2576 		return 0;
2577 	}
2578 
2579 	btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2580 
2581 	if (extent_info->is_new_extent && extent_info->insertions == 0) {
2582 		key.objectid = extent_info->disk_offset;
2583 		key.type = BTRFS_EXTENT_ITEM_KEY;
2584 		key.offset = extent_info->disk_len;
2585 		ret = btrfs_alloc_reserved_file_extent(trans, root,
2586 						       btrfs_ino(inode),
2587 						       extent_info->file_offset,
2588 						       extent_info->qgroup_reserved,
2589 						       &key);
2590 	} else {
2591 		u64 ref_offset;
2592 
2593 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2594 				       extent_info->disk_offset,
2595 				       extent_info->disk_len, 0);
2596 		ref_offset = extent_info->file_offset - extent_info->data_offset;
2597 		btrfs_init_data_ref(&ref, root->root_key.objectid,
2598 				    btrfs_ino(inode), ref_offset, 0, false);
2599 		ret = btrfs_inc_extent_ref(trans, &ref);
2600 	}
2601 
2602 	extent_info->insertions++;
2603 
2604 	return ret;
2605 }
2606 
2607 /*
2608  * The respective range must have been previously locked, as well as the inode.
2609  * The end offset is inclusive (last byte of the range).
2610  * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2611  * the file range with an extent.
2612  * When not punching a hole, we don't want to end up in a state where we dropped
2613  * extents without inserting a new one, so we must abort the transaction to avoid
2614  * a corruption.
2615  */
2616 int btrfs_replace_file_extents(struct btrfs_inode *inode,
2617 			       struct btrfs_path *path, const u64 start,
2618 			       const u64 end,
2619 			       struct btrfs_replace_extent_info *extent_info,
2620 			       struct btrfs_trans_handle **trans_out)
2621 {
2622 	struct btrfs_drop_extents_args drop_args = { 0 };
2623 	struct btrfs_root *root = inode->root;
2624 	struct btrfs_fs_info *fs_info = root->fs_info;
2625 	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2626 	u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2627 	struct btrfs_trans_handle *trans = NULL;
2628 	struct btrfs_block_rsv *rsv;
2629 	unsigned int rsv_count;
2630 	u64 cur_offset;
2631 	u64 len = end - start;
2632 	int ret = 0;
2633 
2634 	if (end <= start)
2635 		return -EINVAL;
2636 
2637 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2638 	if (!rsv) {
2639 		ret = -ENOMEM;
2640 		goto out;
2641 	}
2642 	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2643 	rsv->failfast = true;
2644 
2645 	/*
2646 	 * 1 - update the inode
2647 	 * 1 - removing the extents in the range
2648 	 * 1 - adding the hole extent if no_holes isn't set or if we are
2649 	 *     replacing the range with a new extent
2650 	 */
2651 	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2652 		rsv_count = 3;
2653 	else
2654 		rsv_count = 2;
2655 
2656 	trans = btrfs_start_transaction(root, rsv_count);
2657 	if (IS_ERR(trans)) {
2658 		ret = PTR_ERR(trans);
2659 		trans = NULL;
2660 		goto out_free;
2661 	}
2662 
2663 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2664 				      min_size, false);
2665 	if (WARN_ON(ret))
2666 		goto out_trans;
2667 	trans->block_rsv = rsv;
2668 
2669 	cur_offset = start;
2670 	drop_args.path = path;
2671 	drop_args.end = end + 1;
2672 	drop_args.drop_cache = true;
2673 	while (cur_offset < end) {
2674 		drop_args.start = cur_offset;
2675 		ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2676 		/* If we are punching a hole decrement the inode's byte count */
2677 		if (!extent_info)
2678 			btrfs_update_inode_bytes(inode, 0,
2679 						 drop_args.bytes_found);
2680 		if (ret != -ENOSPC) {
2681 			/*
2682 			 * The only time we don't want to abort is if we are
2683 			 * attempting to clone a partial inline extent, in which
2684 			 * case we'll get EOPNOTSUPP.  However if we aren't
2685 			 * clone we need to abort no matter what, because if we
2686 			 * got EOPNOTSUPP via prealloc then we messed up and
2687 			 * need to abort.
2688 			 */
2689 			if (ret &&
2690 			    (ret != -EOPNOTSUPP ||
2691 			     (extent_info && extent_info->is_new_extent)))
2692 				btrfs_abort_transaction(trans, ret);
2693 			break;
2694 		}
2695 
2696 		trans->block_rsv = &fs_info->trans_block_rsv;
2697 
2698 		if (!extent_info && cur_offset < drop_args.drop_end &&
2699 		    cur_offset < ino_size) {
2700 			ret = fill_holes(trans, inode, path, cur_offset,
2701 					 drop_args.drop_end);
2702 			if (ret) {
2703 				/*
2704 				 * If we failed then we didn't insert our hole
2705 				 * entries for the area we dropped, so now the
2706 				 * fs is corrupted, so we must abort the
2707 				 * transaction.
2708 				 */
2709 				btrfs_abort_transaction(trans, ret);
2710 				break;
2711 			}
2712 		} else if (!extent_info && cur_offset < drop_args.drop_end) {
2713 			/*
2714 			 * We are past the i_size here, but since we didn't
2715 			 * insert holes we need to clear the mapped area so we
2716 			 * know to not set disk_i_size in this area until a new
2717 			 * file extent is inserted here.
2718 			 */
2719 			ret = btrfs_inode_clear_file_extent_range(inode,
2720 					cur_offset,
2721 					drop_args.drop_end - cur_offset);
2722 			if (ret) {
2723 				/*
2724 				 * We couldn't clear our area, so we could
2725 				 * presumably adjust up and corrupt the fs, so
2726 				 * we need to abort.
2727 				 */
2728 				btrfs_abort_transaction(trans, ret);
2729 				break;
2730 			}
2731 		}
2732 
2733 		if (extent_info &&
2734 		    drop_args.drop_end > extent_info->file_offset) {
2735 			u64 replace_len = drop_args.drop_end -
2736 					  extent_info->file_offset;
2737 
2738 			ret = btrfs_insert_replace_extent(trans, inode,	path,
2739 					extent_info, replace_len,
2740 					drop_args.bytes_found);
2741 			if (ret) {
2742 				btrfs_abort_transaction(trans, ret);
2743 				break;
2744 			}
2745 			extent_info->data_len -= replace_len;
2746 			extent_info->data_offset += replace_len;
2747 			extent_info->file_offset += replace_len;
2748 		}
2749 
2750 		/*
2751 		 * We are releasing our handle on the transaction, balance the
2752 		 * dirty pages of the btree inode and flush delayed items, and
2753 		 * then get a new transaction handle, which may now point to a
2754 		 * new transaction in case someone else may have committed the
2755 		 * transaction we used to replace/drop file extent items. So
2756 		 * bump the inode's iversion and update mtime and ctime except
2757 		 * if we are called from a dedupe context. This is because a
2758 		 * power failure/crash may happen after the transaction is
2759 		 * committed and before we finish replacing/dropping all the
2760 		 * file extent items we need.
2761 		 */
2762 		inode_inc_iversion(&inode->vfs_inode);
2763 
2764 		if (!extent_info || extent_info->update_times) {
2765 			inode->vfs_inode.i_mtime = current_time(&inode->vfs_inode);
2766 			inode->vfs_inode.i_ctime = inode->vfs_inode.i_mtime;
2767 		}
2768 
2769 		ret = btrfs_update_inode(trans, root, inode);
2770 		if (ret)
2771 			break;
2772 
2773 		btrfs_end_transaction(trans);
2774 		btrfs_btree_balance_dirty(fs_info);
2775 
2776 		trans = btrfs_start_transaction(root, rsv_count);
2777 		if (IS_ERR(trans)) {
2778 			ret = PTR_ERR(trans);
2779 			trans = NULL;
2780 			break;
2781 		}
2782 
2783 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2784 					      rsv, min_size, false);
2785 		if (WARN_ON(ret))
2786 			break;
2787 		trans->block_rsv = rsv;
2788 
2789 		cur_offset = drop_args.drop_end;
2790 		len = end - cur_offset;
2791 		if (!extent_info && len) {
2792 			ret = find_first_non_hole(inode, &cur_offset, &len);
2793 			if (unlikely(ret < 0))
2794 				break;
2795 			if (ret && !len) {
2796 				ret = 0;
2797 				break;
2798 			}
2799 		}
2800 	}
2801 
2802 	/*
2803 	 * If we were cloning, force the next fsync to be a full one since we
2804 	 * we replaced (or just dropped in the case of cloning holes when
2805 	 * NO_HOLES is enabled) file extent items and did not setup new extent
2806 	 * maps for the replacement extents (or holes).
2807 	 */
2808 	if (extent_info && !extent_info->is_new_extent)
2809 		btrfs_set_inode_full_sync(inode);
2810 
2811 	if (ret)
2812 		goto out_trans;
2813 
2814 	trans->block_rsv = &fs_info->trans_block_rsv;
2815 	/*
2816 	 * If we are using the NO_HOLES feature we might have had already an
2817 	 * hole that overlaps a part of the region [lockstart, lockend] and
2818 	 * ends at (or beyond) lockend. Since we have no file extent items to
2819 	 * represent holes, drop_end can be less than lockend and so we must
2820 	 * make sure we have an extent map representing the existing hole (the
2821 	 * call to __btrfs_drop_extents() might have dropped the existing extent
2822 	 * map representing the existing hole), otherwise the fast fsync path
2823 	 * will not record the existence of the hole region
2824 	 * [existing_hole_start, lockend].
2825 	 */
2826 	if (drop_args.drop_end <= end)
2827 		drop_args.drop_end = end + 1;
2828 	/*
2829 	 * Don't insert file hole extent item if it's for a range beyond eof
2830 	 * (because it's useless) or if it represents a 0 bytes range (when
2831 	 * cur_offset == drop_end).
2832 	 */
2833 	if (!extent_info && cur_offset < ino_size &&
2834 	    cur_offset < drop_args.drop_end) {
2835 		ret = fill_holes(trans, inode, path, cur_offset,
2836 				 drop_args.drop_end);
2837 		if (ret) {
2838 			/* Same comment as above. */
2839 			btrfs_abort_transaction(trans, ret);
2840 			goto out_trans;
2841 		}
2842 	} else if (!extent_info && cur_offset < drop_args.drop_end) {
2843 		/* See the comment in the loop above for the reasoning here. */
2844 		ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2845 					drop_args.drop_end - cur_offset);
2846 		if (ret) {
2847 			btrfs_abort_transaction(trans, ret);
2848 			goto out_trans;
2849 		}
2850 
2851 	}
2852 	if (extent_info) {
2853 		ret = btrfs_insert_replace_extent(trans, inode, path,
2854 				extent_info, extent_info->data_len,
2855 				drop_args.bytes_found);
2856 		if (ret) {
2857 			btrfs_abort_transaction(trans, ret);
2858 			goto out_trans;
2859 		}
2860 	}
2861 
2862 out_trans:
2863 	if (!trans)
2864 		goto out_free;
2865 
2866 	trans->block_rsv = &fs_info->trans_block_rsv;
2867 	if (ret)
2868 		btrfs_end_transaction(trans);
2869 	else
2870 		*trans_out = trans;
2871 out_free:
2872 	btrfs_free_block_rsv(fs_info, rsv);
2873 out:
2874 	return ret;
2875 }
2876 
2877 static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2878 {
2879 	struct inode *inode = file_inode(file);
2880 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2881 	struct btrfs_root *root = BTRFS_I(inode)->root;
2882 	struct extent_state *cached_state = NULL;
2883 	struct btrfs_path *path;
2884 	struct btrfs_trans_handle *trans = NULL;
2885 	u64 lockstart;
2886 	u64 lockend;
2887 	u64 tail_start;
2888 	u64 tail_len;
2889 	u64 orig_start = offset;
2890 	int ret = 0;
2891 	bool same_block;
2892 	u64 ino_size;
2893 	bool truncated_block = false;
2894 	bool updated_inode = false;
2895 
2896 	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2897 
2898 	ret = btrfs_wait_ordered_range(inode, offset, len);
2899 	if (ret)
2900 		goto out_only_mutex;
2901 
2902 	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2903 	ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2904 	if (ret < 0)
2905 		goto out_only_mutex;
2906 	if (ret && !len) {
2907 		/* Already in a large hole */
2908 		ret = 0;
2909 		goto out_only_mutex;
2910 	}
2911 
2912 	ret = file_modified(file);
2913 	if (ret)
2914 		goto out_only_mutex;
2915 
2916 	lockstart = round_up(offset, fs_info->sectorsize);
2917 	lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2918 	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2919 		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2920 	/*
2921 	 * We needn't truncate any block which is beyond the end of the file
2922 	 * because we are sure there is no data there.
2923 	 */
2924 	/*
2925 	 * Only do this if we are in the same block and we aren't doing the
2926 	 * entire block.
2927 	 */
2928 	if (same_block && len < fs_info->sectorsize) {
2929 		if (offset < ino_size) {
2930 			truncated_block = true;
2931 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2932 						   0);
2933 		} else {
2934 			ret = 0;
2935 		}
2936 		goto out_only_mutex;
2937 	}
2938 
2939 	/* zero back part of the first block */
2940 	if (offset < ino_size) {
2941 		truncated_block = true;
2942 		ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2943 		if (ret) {
2944 			btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2945 			return ret;
2946 		}
2947 	}
2948 
2949 	/* Check the aligned pages after the first unaligned page,
2950 	 * if offset != orig_start, which means the first unaligned page
2951 	 * including several following pages are already in holes,
2952 	 * the extra check can be skipped */
2953 	if (offset == orig_start) {
2954 		/* after truncate page, check hole again */
2955 		len = offset + len - lockstart;
2956 		offset = lockstart;
2957 		ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2958 		if (ret < 0)
2959 			goto out_only_mutex;
2960 		if (ret && !len) {
2961 			ret = 0;
2962 			goto out_only_mutex;
2963 		}
2964 		lockstart = offset;
2965 	}
2966 
2967 	/* Check the tail unaligned part is in a hole */
2968 	tail_start = lockend + 1;
2969 	tail_len = offset + len - tail_start;
2970 	if (tail_len) {
2971 		ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2972 		if (unlikely(ret < 0))
2973 			goto out_only_mutex;
2974 		if (!ret) {
2975 			/* zero the front end of the last page */
2976 			if (tail_start + tail_len < ino_size) {
2977 				truncated_block = true;
2978 				ret = btrfs_truncate_block(BTRFS_I(inode),
2979 							tail_start + tail_len,
2980 							0, 1);
2981 				if (ret)
2982 					goto out_only_mutex;
2983 			}
2984 		}
2985 	}
2986 
2987 	if (lockend < lockstart) {
2988 		ret = 0;
2989 		goto out_only_mutex;
2990 	}
2991 
2992 	btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
2993 
2994 	path = btrfs_alloc_path();
2995 	if (!path) {
2996 		ret = -ENOMEM;
2997 		goto out;
2998 	}
2999 
3000 	ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
3001 					 lockend, NULL, &trans);
3002 	btrfs_free_path(path);
3003 	if (ret)
3004 		goto out;
3005 
3006 	ASSERT(trans != NULL);
3007 	inode_inc_iversion(inode);
3008 	inode->i_mtime = current_time(inode);
3009 	inode->i_ctime = inode->i_mtime;
3010 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3011 	updated_inode = true;
3012 	btrfs_end_transaction(trans);
3013 	btrfs_btree_balance_dirty(fs_info);
3014 out:
3015 	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3016 		      &cached_state);
3017 out_only_mutex:
3018 	if (!updated_inode && truncated_block && !ret) {
3019 		/*
3020 		 * If we only end up zeroing part of a page, we still need to
3021 		 * update the inode item, so that all the time fields are
3022 		 * updated as well as the necessary btrfs inode in memory fields
3023 		 * for detecting, at fsync time, if the inode isn't yet in the
3024 		 * log tree or it's there but not up to date.
3025 		 */
3026 		struct timespec64 now = current_time(inode);
3027 
3028 		inode_inc_iversion(inode);
3029 		inode->i_mtime = now;
3030 		inode->i_ctime = now;
3031 		trans = btrfs_start_transaction(root, 1);
3032 		if (IS_ERR(trans)) {
3033 			ret = PTR_ERR(trans);
3034 		} else {
3035 			int ret2;
3036 
3037 			ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3038 			ret2 = btrfs_end_transaction(trans);
3039 			if (!ret)
3040 				ret = ret2;
3041 		}
3042 	}
3043 	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3044 	return ret;
3045 }
3046 
3047 /* Helper structure to record which range is already reserved */
3048 struct falloc_range {
3049 	struct list_head list;
3050 	u64 start;
3051 	u64 len;
3052 };
3053 
3054 /*
3055  * Helper function to add falloc range
3056  *
3057  * Caller should have locked the larger range of extent containing
3058  * [start, len)
3059  */
3060 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
3061 {
3062 	struct falloc_range *range = NULL;
3063 
3064 	if (!list_empty(head)) {
3065 		/*
3066 		 * As fallocate iterates by bytenr order, we only need to check
3067 		 * the last range.
3068 		 */
3069 		range = list_last_entry(head, struct falloc_range, list);
3070 		if (range->start + range->len == start) {
3071 			range->len += len;
3072 			return 0;
3073 		}
3074 	}
3075 
3076 	range = kmalloc(sizeof(*range), GFP_KERNEL);
3077 	if (!range)
3078 		return -ENOMEM;
3079 	range->start = start;
3080 	range->len = len;
3081 	list_add_tail(&range->list, head);
3082 	return 0;
3083 }
3084 
3085 static int btrfs_fallocate_update_isize(struct inode *inode,
3086 					const u64 end,
3087 					const int mode)
3088 {
3089 	struct btrfs_trans_handle *trans;
3090 	struct btrfs_root *root = BTRFS_I(inode)->root;
3091 	int ret;
3092 	int ret2;
3093 
3094 	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
3095 		return 0;
3096 
3097 	trans = btrfs_start_transaction(root, 1);
3098 	if (IS_ERR(trans))
3099 		return PTR_ERR(trans);
3100 
3101 	inode->i_ctime = current_time(inode);
3102 	i_size_write(inode, end);
3103 	btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
3104 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3105 	ret2 = btrfs_end_transaction(trans);
3106 
3107 	return ret ? ret : ret2;
3108 }
3109 
3110 enum {
3111 	RANGE_BOUNDARY_WRITTEN_EXTENT,
3112 	RANGE_BOUNDARY_PREALLOC_EXTENT,
3113 	RANGE_BOUNDARY_HOLE,
3114 };
3115 
3116 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
3117 						 u64 offset)
3118 {
3119 	const u64 sectorsize = inode->root->fs_info->sectorsize;
3120 	struct extent_map *em;
3121 	int ret;
3122 
3123 	offset = round_down(offset, sectorsize);
3124 	em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
3125 	if (IS_ERR(em))
3126 		return PTR_ERR(em);
3127 
3128 	if (em->block_start == EXTENT_MAP_HOLE)
3129 		ret = RANGE_BOUNDARY_HOLE;
3130 	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3131 		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
3132 	else
3133 		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
3134 
3135 	free_extent_map(em);
3136 	return ret;
3137 }
3138 
3139 static int btrfs_zero_range(struct inode *inode,
3140 			    loff_t offset,
3141 			    loff_t len,
3142 			    const int mode)
3143 {
3144 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3145 	struct extent_map *em;
3146 	struct extent_changeset *data_reserved = NULL;
3147 	int ret;
3148 	u64 alloc_hint = 0;
3149 	const u64 sectorsize = fs_info->sectorsize;
3150 	u64 alloc_start = round_down(offset, sectorsize);
3151 	u64 alloc_end = round_up(offset + len, sectorsize);
3152 	u64 bytes_to_reserve = 0;
3153 	bool space_reserved = false;
3154 
3155 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3156 			      alloc_end - alloc_start);
3157 	if (IS_ERR(em)) {
3158 		ret = PTR_ERR(em);
3159 		goto out;
3160 	}
3161 
3162 	/*
3163 	 * Avoid hole punching and extent allocation for some cases. More cases
3164 	 * could be considered, but these are unlikely common and we keep things
3165 	 * as simple as possible for now. Also, intentionally, if the target
3166 	 * range contains one or more prealloc extents together with regular
3167 	 * extents and holes, we drop all the existing extents and allocate a
3168 	 * new prealloc extent, so that we get a larger contiguous disk extent.
3169 	 */
3170 	if (em->start <= alloc_start &&
3171 	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3172 		const u64 em_end = em->start + em->len;
3173 
3174 		if (em_end >= offset + len) {
3175 			/*
3176 			 * The whole range is already a prealloc extent,
3177 			 * do nothing except updating the inode's i_size if
3178 			 * needed.
3179 			 */
3180 			free_extent_map(em);
3181 			ret = btrfs_fallocate_update_isize(inode, offset + len,
3182 							   mode);
3183 			goto out;
3184 		}
3185 		/*
3186 		 * Part of the range is already a prealloc extent, so operate
3187 		 * only on the remaining part of the range.
3188 		 */
3189 		alloc_start = em_end;
3190 		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3191 		len = offset + len - alloc_start;
3192 		offset = alloc_start;
3193 		alloc_hint = em->block_start + em->len;
3194 	}
3195 	free_extent_map(em);
3196 
3197 	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3198 	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3199 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3200 				      sectorsize);
3201 		if (IS_ERR(em)) {
3202 			ret = PTR_ERR(em);
3203 			goto out;
3204 		}
3205 
3206 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3207 			free_extent_map(em);
3208 			ret = btrfs_fallocate_update_isize(inode, offset + len,
3209 							   mode);
3210 			goto out;
3211 		}
3212 		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3213 			free_extent_map(em);
3214 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
3215 						   0);
3216 			if (!ret)
3217 				ret = btrfs_fallocate_update_isize(inode,
3218 								   offset + len,
3219 								   mode);
3220 			return ret;
3221 		}
3222 		free_extent_map(em);
3223 		alloc_start = round_down(offset, sectorsize);
3224 		alloc_end = alloc_start + sectorsize;
3225 		goto reserve_space;
3226 	}
3227 
3228 	alloc_start = round_up(offset, sectorsize);
3229 	alloc_end = round_down(offset + len, sectorsize);
3230 
3231 	/*
3232 	 * For unaligned ranges, check the pages at the boundaries, they might
3233 	 * map to an extent, in which case we need to partially zero them, or
3234 	 * they might map to a hole, in which case we need our allocation range
3235 	 * to cover them.
3236 	 */
3237 	if (!IS_ALIGNED(offset, sectorsize)) {
3238 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3239 							    offset);
3240 		if (ret < 0)
3241 			goto out;
3242 		if (ret == RANGE_BOUNDARY_HOLE) {
3243 			alloc_start = round_down(offset, sectorsize);
3244 			ret = 0;
3245 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3246 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
3247 			if (ret)
3248 				goto out;
3249 		} else {
3250 			ret = 0;
3251 		}
3252 	}
3253 
3254 	if (!IS_ALIGNED(offset + len, sectorsize)) {
3255 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3256 							    offset + len);
3257 		if (ret < 0)
3258 			goto out;
3259 		if (ret == RANGE_BOUNDARY_HOLE) {
3260 			alloc_end = round_up(offset + len, sectorsize);
3261 			ret = 0;
3262 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3263 			ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
3264 						   0, 1);
3265 			if (ret)
3266 				goto out;
3267 		} else {
3268 			ret = 0;
3269 		}
3270 	}
3271 
3272 reserve_space:
3273 	if (alloc_start < alloc_end) {
3274 		struct extent_state *cached_state = NULL;
3275 		const u64 lockstart = alloc_start;
3276 		const u64 lockend = alloc_end - 1;
3277 
3278 		bytes_to_reserve = alloc_end - alloc_start;
3279 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3280 						      bytes_to_reserve);
3281 		if (ret < 0)
3282 			goto out;
3283 		space_reserved = true;
3284 		btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3285 					    &cached_state);
3286 		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3287 						alloc_start, bytes_to_reserve);
3288 		if (ret) {
3289 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
3290 				      lockend, &cached_state);
3291 			goto out;
3292 		}
3293 		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3294 						alloc_end - alloc_start,
3295 						i_blocksize(inode),
3296 						offset + len, &alloc_hint);
3297 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3298 			      &cached_state);
3299 		/* btrfs_prealloc_file_range releases reserved space on error */
3300 		if (ret) {
3301 			space_reserved = false;
3302 			goto out;
3303 		}
3304 	}
3305 	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3306  out:
3307 	if (ret && space_reserved)
3308 		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3309 					       alloc_start, bytes_to_reserve);
3310 	extent_changeset_free(data_reserved);
3311 
3312 	return ret;
3313 }
3314 
3315 static long btrfs_fallocate(struct file *file, int mode,
3316 			    loff_t offset, loff_t len)
3317 {
3318 	struct inode *inode = file_inode(file);
3319 	struct extent_state *cached_state = NULL;
3320 	struct extent_changeset *data_reserved = NULL;
3321 	struct falloc_range *range;
3322 	struct falloc_range *tmp;
3323 	struct list_head reserve_list;
3324 	u64 cur_offset;
3325 	u64 last_byte;
3326 	u64 alloc_start;
3327 	u64 alloc_end;
3328 	u64 alloc_hint = 0;
3329 	u64 locked_end;
3330 	u64 actual_end = 0;
3331 	u64 data_space_needed = 0;
3332 	u64 data_space_reserved = 0;
3333 	u64 qgroup_reserved = 0;
3334 	struct extent_map *em;
3335 	int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
3336 	int ret;
3337 
3338 	/* Do not allow fallocate in ZONED mode */
3339 	if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3340 		return -EOPNOTSUPP;
3341 
3342 	alloc_start = round_down(offset, blocksize);
3343 	alloc_end = round_up(offset + len, blocksize);
3344 	cur_offset = alloc_start;
3345 
3346 	/* Make sure we aren't being give some crap mode */
3347 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3348 		     FALLOC_FL_ZERO_RANGE))
3349 		return -EOPNOTSUPP;
3350 
3351 	if (mode & FALLOC_FL_PUNCH_HOLE)
3352 		return btrfs_punch_hole(file, offset, len);
3353 
3354 	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
3355 
3356 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3357 		ret = inode_newsize_ok(inode, offset + len);
3358 		if (ret)
3359 			goto out;
3360 	}
3361 
3362 	ret = file_modified(file);
3363 	if (ret)
3364 		goto out;
3365 
3366 	/*
3367 	 * TODO: Move these two operations after we have checked
3368 	 * accurate reserved space, or fallocate can still fail but
3369 	 * with page truncated or size expanded.
3370 	 *
3371 	 * But that's a minor problem and won't do much harm BTW.
3372 	 */
3373 	if (alloc_start > inode->i_size) {
3374 		ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3375 					alloc_start);
3376 		if (ret)
3377 			goto out;
3378 	} else if (offset + len > inode->i_size) {
3379 		/*
3380 		 * If we are fallocating from the end of the file onward we
3381 		 * need to zero out the end of the block if i_size lands in the
3382 		 * middle of a block.
3383 		 */
3384 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3385 		if (ret)
3386 			goto out;
3387 	}
3388 
3389 	/*
3390 	 * We have locked the inode at the VFS level (in exclusive mode) and we
3391 	 * have locked the i_mmap_lock lock (in exclusive mode). Now before
3392 	 * locking the file range, flush all dealloc in the range and wait for
3393 	 * all ordered extents in the range to complete. After this we can lock
3394 	 * the file range and, due to the previous locking we did, we know there
3395 	 * can't be more delalloc or ordered extents in the range.
3396 	 */
3397 	ret = btrfs_wait_ordered_range(inode, alloc_start,
3398 				       alloc_end - alloc_start);
3399 	if (ret)
3400 		goto out;
3401 
3402 	if (mode & FALLOC_FL_ZERO_RANGE) {
3403 		ret = btrfs_zero_range(inode, offset, len, mode);
3404 		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3405 		return ret;
3406 	}
3407 
3408 	locked_end = alloc_end - 1;
3409 	lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3410 		    &cached_state);
3411 
3412 	btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3413 
3414 	/* First, check if we exceed the qgroup limit */
3415 	INIT_LIST_HEAD(&reserve_list);
3416 	while (cur_offset < alloc_end) {
3417 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3418 				      alloc_end - cur_offset);
3419 		if (IS_ERR(em)) {
3420 			ret = PTR_ERR(em);
3421 			break;
3422 		}
3423 		last_byte = min(extent_map_end(em), alloc_end);
3424 		actual_end = min_t(u64, extent_map_end(em), offset + len);
3425 		last_byte = ALIGN(last_byte, blocksize);
3426 		if (em->block_start == EXTENT_MAP_HOLE ||
3427 		    (cur_offset >= inode->i_size &&
3428 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3429 			const u64 range_len = last_byte - cur_offset;
3430 
3431 			ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3432 			if (ret < 0) {
3433 				free_extent_map(em);
3434 				break;
3435 			}
3436 			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3437 					&data_reserved, cur_offset, range_len);
3438 			if (ret < 0) {
3439 				free_extent_map(em);
3440 				break;
3441 			}
3442 			qgroup_reserved += range_len;
3443 			data_space_needed += range_len;
3444 		}
3445 		free_extent_map(em);
3446 		cur_offset = last_byte;
3447 	}
3448 
3449 	if (!ret && data_space_needed > 0) {
3450 		/*
3451 		 * We are safe to reserve space here as we can't have delalloc
3452 		 * in the range, see above.
3453 		 */
3454 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3455 						      data_space_needed);
3456 		if (!ret)
3457 			data_space_reserved = data_space_needed;
3458 	}
3459 
3460 	/*
3461 	 * If ret is still 0, means we're OK to fallocate.
3462 	 * Or just cleanup the list and exit.
3463 	 */
3464 	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3465 		if (!ret) {
3466 			ret = btrfs_prealloc_file_range(inode, mode,
3467 					range->start,
3468 					range->len, i_blocksize(inode),
3469 					offset + len, &alloc_hint);
3470 			/*
3471 			 * btrfs_prealloc_file_range() releases space even
3472 			 * if it returns an error.
3473 			 */
3474 			data_space_reserved -= range->len;
3475 			qgroup_reserved -= range->len;
3476 		} else if (data_space_reserved > 0) {
3477 			btrfs_free_reserved_data_space(BTRFS_I(inode),
3478 					       data_reserved, range->start,
3479 					       range->len);
3480 			data_space_reserved -= range->len;
3481 			qgroup_reserved -= range->len;
3482 		} else if (qgroup_reserved > 0) {
3483 			btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3484 					       range->start, range->len);
3485 			qgroup_reserved -= range->len;
3486 		}
3487 		list_del(&range->list);
3488 		kfree(range);
3489 	}
3490 	if (ret < 0)
3491 		goto out_unlock;
3492 
3493 	/*
3494 	 * We didn't need to allocate any more space, but we still extended the
3495 	 * size of the file so we need to update i_size and the inode item.
3496 	 */
3497 	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3498 out_unlock:
3499 	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3500 		      &cached_state);
3501 out:
3502 	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3503 	extent_changeset_free(data_reserved);
3504 	return ret;
3505 }
3506 
3507 /*
3508  * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3509  * that has unflushed and/or flushing delalloc. There might be other adjacent
3510  * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3511  * looping while it gets adjacent subranges, and merging them together.
3512  */
3513 static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3514 				   u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3515 {
3516 	const u64 len = end + 1 - start;
3517 	struct extent_map_tree *em_tree = &inode->extent_tree;
3518 	struct extent_map *em;
3519 	u64 em_end;
3520 	u64 delalloc_len;
3521 
3522 	/*
3523 	 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3524 	 * means we have delalloc (dirty pages) for which writeback has not
3525 	 * started yet.
3526 	 */
3527 	*delalloc_start_ret = start;
3528 	delalloc_len = count_range_bits(&inode->io_tree, delalloc_start_ret, end,
3529 					len, EXTENT_DELALLOC, 1);
3530 	/*
3531 	 * If delalloc was found then *delalloc_start_ret has a sector size
3532 	 * aligned value (rounded down).
3533 	 */
3534 	if (delalloc_len > 0)
3535 		*delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3536 
3537 	/*
3538 	 * Now also check if there's any extent map in the range that does not
3539 	 * map to a hole or prealloc extent. We do this because:
3540 	 *
3541 	 * 1) When delalloc is flushed, the file range is locked, we clear the
3542 	 *    EXTENT_DELALLOC bit from the io tree and create an extent map for
3543 	 *    an allocated extent. So we might just have been called after
3544 	 *    delalloc is flushed and before the ordered extent completes and
3545 	 *    inserts the new file extent item in the subvolume's btree;
3546 	 *
3547 	 * 2) We may have an extent map created by flushing delalloc for a
3548 	 *    subrange that starts before the subrange we found marked with
3549 	 *    EXTENT_DELALLOC in the io tree.
3550 	 */
3551 	read_lock(&em_tree->lock);
3552 	em = lookup_extent_mapping(em_tree, start, len);
3553 	read_unlock(&em_tree->lock);
3554 
3555 	/* extent_map_end() returns a non-inclusive end offset. */
3556 	em_end = em ? extent_map_end(em) : 0;
3557 
3558 	/*
3559 	 * If we have a hole/prealloc extent map, check the next one if this one
3560 	 * ends before our range's end.
3561 	 */
3562 	if (em && (em->block_start == EXTENT_MAP_HOLE ||
3563 		   test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) && em_end < end) {
3564 		struct extent_map *next_em;
3565 
3566 		read_lock(&em_tree->lock);
3567 		next_em = lookup_extent_mapping(em_tree, em_end, len - em_end);
3568 		read_unlock(&em_tree->lock);
3569 
3570 		free_extent_map(em);
3571 		em_end = next_em ? extent_map_end(next_em) : 0;
3572 		em = next_em;
3573 	}
3574 
3575 	if (em && (em->block_start == EXTENT_MAP_HOLE ||
3576 		   test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3577 		free_extent_map(em);
3578 		em = NULL;
3579 	}
3580 
3581 	/*
3582 	 * No extent map or one for a hole or prealloc extent. Use the delalloc
3583 	 * range we found in the io tree if we have one.
3584 	 */
3585 	if (!em)
3586 		return (delalloc_len > 0);
3587 
3588 	/*
3589 	 * We don't have any range as EXTENT_DELALLOC in the io tree, so the
3590 	 * extent map is the only subrange representing delalloc.
3591 	 */
3592 	if (delalloc_len == 0) {
3593 		*delalloc_start_ret = em->start;
3594 		*delalloc_end_ret = min(end, em_end - 1);
3595 		free_extent_map(em);
3596 		return true;
3597 	}
3598 
3599 	/*
3600 	 * The extent map represents a delalloc range that starts before the
3601 	 * delalloc range we found in the io tree.
3602 	 */
3603 	if (em->start < *delalloc_start_ret) {
3604 		*delalloc_start_ret = em->start;
3605 		/*
3606 		 * If the ranges are adjacent, return a combined range.
3607 		 * Otherwise return the extent map's range.
3608 		 */
3609 		if (em_end < *delalloc_start_ret)
3610 			*delalloc_end_ret = min(end, em_end - 1);
3611 
3612 		free_extent_map(em);
3613 		return true;
3614 	}
3615 
3616 	/*
3617 	 * The extent map starts after the delalloc range we found in the io
3618 	 * tree. If it's adjacent, return a combined range, otherwise return
3619 	 * the range found in the io tree.
3620 	 */
3621 	if (*delalloc_end_ret + 1 == em->start)
3622 		*delalloc_end_ret = min(end, em_end - 1);
3623 
3624 	free_extent_map(em);
3625 	return true;
3626 }
3627 
3628 /*
3629  * Check if there's delalloc in a given range.
3630  *
3631  * @inode:               The inode.
3632  * @start:               The start offset of the range. It does not need to be
3633  *                       sector size aligned.
3634  * @end:                 The end offset (inclusive value) of the search range.
3635  *                       It does not need to be sector size aligned.
3636  * @delalloc_start_ret:  Output argument, set to the start offset of the
3637  *                       subrange found with delalloc (may not be sector size
3638  *                       aligned).
3639  * @delalloc_end_ret:    Output argument, set to he end offset (inclusive value)
3640  *                       of the subrange found with delalloc.
3641  *
3642  * Returns true if a subrange with delalloc is found within the given range, and
3643  * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3644  * end offsets of the subrange.
3645  */
3646 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3647 				  u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3648 {
3649 	u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3650 	u64 prev_delalloc_end = 0;
3651 	bool ret = false;
3652 
3653 	while (cur_offset < end) {
3654 		u64 delalloc_start;
3655 		u64 delalloc_end;
3656 		bool delalloc;
3657 
3658 		delalloc = find_delalloc_subrange(inode, cur_offset, end,
3659 						  &delalloc_start,
3660 						  &delalloc_end);
3661 		if (!delalloc)
3662 			break;
3663 
3664 		if (prev_delalloc_end == 0) {
3665 			/* First subrange found. */
3666 			*delalloc_start_ret = max(delalloc_start, start);
3667 			*delalloc_end_ret = delalloc_end;
3668 			ret = true;
3669 		} else if (delalloc_start == prev_delalloc_end + 1) {
3670 			/* Subrange adjacent to the previous one, merge them. */
3671 			*delalloc_end_ret = delalloc_end;
3672 		} else {
3673 			/* Subrange not adjacent to the previous one, exit. */
3674 			break;
3675 		}
3676 
3677 		prev_delalloc_end = delalloc_end;
3678 		cur_offset = delalloc_end + 1;
3679 		cond_resched();
3680 	}
3681 
3682 	return ret;
3683 }
3684 
3685 /*
3686  * Check if there's a hole or delalloc range in a range representing a hole (or
3687  * prealloc extent) found in the inode's subvolume btree.
3688  *
3689  * @inode:      The inode.
3690  * @whence:     Seek mode (SEEK_DATA or SEEK_HOLE).
3691  * @start:      Start offset of the hole region. It does not need to be sector
3692  *              size aligned.
3693  * @end:        End offset (inclusive value) of the hole region. It does not
3694  *              need to be sector size aligned.
3695  * @start_ret:  Return parameter, used to set the start of the subrange in the
3696  *              hole that matches the search criteria (seek mode), if such
3697  *              subrange is found (return value of the function is true).
3698  *              The value returned here may not be sector size aligned.
3699  *
3700  * Returns true if a subrange matching the given seek mode is found, and if one
3701  * is found, it updates @start_ret with the start of the subrange.
3702  */
3703 static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3704 					u64 start, u64 end, u64 *start_ret)
3705 {
3706 	u64 delalloc_start;
3707 	u64 delalloc_end;
3708 	bool delalloc;
3709 
3710 	delalloc = btrfs_find_delalloc_in_range(inode, start, end,
3711 						&delalloc_start, &delalloc_end);
3712 	if (delalloc && whence == SEEK_DATA) {
3713 		*start_ret = delalloc_start;
3714 		return true;
3715 	}
3716 
3717 	if (delalloc && whence == SEEK_HOLE) {
3718 		/*
3719 		 * We found delalloc but it starts after out start offset. So we
3720 		 * have a hole between our start offset and the delalloc start.
3721 		 */
3722 		if (start < delalloc_start) {
3723 			*start_ret = start;
3724 			return true;
3725 		}
3726 		/*
3727 		 * Delalloc range starts at our start offset.
3728 		 * If the delalloc range's length is smaller than our range,
3729 		 * then it means we have a hole that starts where the delalloc
3730 		 * subrange ends.
3731 		 */
3732 		if (delalloc_end < end) {
3733 			*start_ret = delalloc_end + 1;
3734 			return true;
3735 		}
3736 
3737 		/* There's delalloc for the whole range. */
3738 		return false;
3739 	}
3740 
3741 	if (!delalloc && whence == SEEK_HOLE) {
3742 		*start_ret = start;
3743 		return true;
3744 	}
3745 
3746 	/*
3747 	 * No delalloc in the range and we are seeking for data. The caller has
3748 	 * to iterate to the next extent item in the subvolume btree.
3749 	 */
3750 	return false;
3751 }
3752 
3753 static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
3754 				  int whence)
3755 {
3756 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3757 	struct extent_state *cached_state = NULL;
3758 	const loff_t i_size = i_size_read(&inode->vfs_inode);
3759 	const u64 ino = btrfs_ino(inode);
3760 	struct btrfs_root *root = inode->root;
3761 	struct btrfs_path *path;
3762 	struct btrfs_key key;
3763 	u64 last_extent_end;
3764 	u64 lockstart;
3765 	u64 lockend;
3766 	u64 start;
3767 	int ret;
3768 	bool found = false;
3769 
3770 	if (i_size == 0 || offset >= i_size)
3771 		return -ENXIO;
3772 
3773 	/*
3774 	 * Quick path. If the inode has no prealloc extents and its number of
3775 	 * bytes used matches its i_size, then it can not have holes.
3776 	 */
3777 	if (whence == SEEK_HOLE &&
3778 	    !(inode->flags & BTRFS_INODE_PREALLOC) &&
3779 	    inode_get_bytes(&inode->vfs_inode) == i_size)
3780 		return i_size;
3781 
3782 	/*
3783 	 * offset can be negative, in this case we start finding DATA/HOLE from
3784 	 * the very start of the file.
3785 	 */
3786 	start = max_t(loff_t, 0, offset);
3787 
3788 	lockstart = round_down(start, fs_info->sectorsize);
3789 	lockend = round_up(i_size, fs_info->sectorsize);
3790 	if (lockend <= lockstart)
3791 		lockend = lockstart + fs_info->sectorsize;
3792 	lockend--;
3793 
3794 	path = btrfs_alloc_path();
3795 	if (!path)
3796 		return -ENOMEM;
3797 	path->reada = READA_FORWARD;
3798 
3799 	key.objectid = ino;
3800 	key.type = BTRFS_EXTENT_DATA_KEY;
3801 	key.offset = start;
3802 
3803 	last_extent_end = lockstart;
3804 
3805 	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3806 
3807 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3808 	if (ret < 0) {
3809 		goto out;
3810 	} else if (ret > 0 && path->slots[0] > 0) {
3811 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3812 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3813 			path->slots[0]--;
3814 	}
3815 
3816 	while (start < i_size) {
3817 		struct extent_buffer *leaf = path->nodes[0];
3818 		struct btrfs_file_extent_item *extent;
3819 		u64 extent_end;
3820 
3821 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3822 			ret = btrfs_next_leaf(root, path);
3823 			if (ret < 0)
3824 				goto out;
3825 			else if (ret > 0)
3826 				break;
3827 
3828 			leaf = path->nodes[0];
3829 		}
3830 
3831 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3832 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3833 			break;
3834 
3835 		extent_end = btrfs_file_extent_end(path);
3836 
3837 		/*
3838 		 * In the first iteration we may have a slot that points to an
3839 		 * extent that ends before our start offset, so skip it.
3840 		 */
3841 		if (extent_end <= start) {
3842 			path->slots[0]++;
3843 			continue;
3844 		}
3845 
3846 		/* We have an implicit hole, NO_HOLES feature is likely set. */
3847 		if (last_extent_end < key.offset) {
3848 			u64 search_start = last_extent_end;
3849 			u64 found_start;
3850 
3851 			/*
3852 			 * First iteration, @start matches @offset and it's
3853 			 * within the hole.
3854 			 */
3855 			if (start == offset)
3856 				search_start = offset;
3857 
3858 			found = find_desired_extent_in_hole(inode, whence,
3859 							    search_start,
3860 							    key.offset - 1,
3861 							    &found_start);
3862 			if (found) {
3863 				start = found_start;
3864 				break;
3865 			}
3866 			/*
3867 			 * Didn't find data or a hole (due to delalloc) in the
3868 			 * implicit hole range, so need to analyze the extent.
3869 			 */
3870 		}
3871 
3872 		extent = btrfs_item_ptr(leaf, path->slots[0],
3873 					struct btrfs_file_extent_item);
3874 
3875 		if (btrfs_file_extent_disk_bytenr(leaf, extent) == 0 ||
3876 		    btrfs_file_extent_type(leaf, extent) ==
3877 		    BTRFS_FILE_EXTENT_PREALLOC) {
3878 			/*
3879 			 * Explicit hole or prealloc extent, search for delalloc.
3880 			 * A prealloc extent is treated like a hole.
3881 			 */
3882 			u64 search_start = key.offset;
3883 			u64 found_start;
3884 
3885 			/*
3886 			 * First iteration, @start matches @offset and it's
3887 			 * within the hole.
3888 			 */
3889 			if (start == offset)
3890 				search_start = offset;
3891 
3892 			found = find_desired_extent_in_hole(inode, whence,
3893 							    search_start,
3894 							    extent_end - 1,
3895 							    &found_start);
3896 			if (found) {
3897 				start = found_start;
3898 				break;
3899 			}
3900 			/*
3901 			 * Didn't find data or a hole (due to delalloc) in the
3902 			 * implicit hole range, so need to analyze the next
3903 			 * extent item.
3904 			 */
3905 		} else {
3906 			/*
3907 			 * Found a regular or inline extent.
3908 			 * If we are seeking for data, adjust the start offset
3909 			 * and stop, we're done.
3910 			 */
3911 			if (whence == SEEK_DATA) {
3912 				start = max_t(u64, key.offset, offset);
3913 				found = true;
3914 				break;
3915 			}
3916 			/*
3917 			 * Else, we are seeking for a hole, check the next file
3918 			 * extent item.
3919 			 */
3920 		}
3921 
3922 		start = extent_end;
3923 		last_extent_end = extent_end;
3924 		path->slots[0]++;
3925 		if (fatal_signal_pending(current)) {
3926 			ret = -EINTR;
3927 			goto out;
3928 		}
3929 		cond_resched();
3930 	}
3931 
3932 	/* We have an implicit hole from the last extent found up to i_size. */
3933 	if (!found && start < i_size) {
3934 		found = find_desired_extent_in_hole(inode, whence, start,
3935 						    i_size - 1, &start);
3936 		if (!found)
3937 			start = i_size;
3938 	}
3939 
3940 out:
3941 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3942 	btrfs_free_path(path);
3943 
3944 	if (ret < 0)
3945 		return ret;
3946 
3947 	if (whence == SEEK_DATA && start >= i_size)
3948 		return -ENXIO;
3949 
3950 	return min_t(loff_t, start, i_size);
3951 }
3952 
3953 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3954 {
3955 	struct inode *inode = file->f_mapping->host;
3956 
3957 	switch (whence) {
3958 	default:
3959 		return generic_file_llseek(file, offset, whence);
3960 	case SEEK_DATA:
3961 	case SEEK_HOLE:
3962 		btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3963 		offset = find_desired_extent(BTRFS_I(inode), offset, whence);
3964 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3965 		break;
3966 	}
3967 
3968 	if (offset < 0)
3969 		return offset;
3970 
3971 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3972 }
3973 
3974 static int btrfs_file_open(struct inode *inode, struct file *filp)
3975 {
3976 	int ret;
3977 
3978 	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC;
3979 
3980 	ret = fsverity_file_open(inode, filp);
3981 	if (ret)
3982 		return ret;
3983 	return generic_file_open(inode, filp);
3984 }
3985 
3986 static int check_direct_read(struct btrfs_fs_info *fs_info,
3987 			     const struct iov_iter *iter, loff_t offset)
3988 {
3989 	int ret;
3990 	int i, seg;
3991 
3992 	ret = check_direct_IO(fs_info, iter, offset);
3993 	if (ret < 0)
3994 		return ret;
3995 
3996 	if (!iter_is_iovec(iter))
3997 		return 0;
3998 
3999 	for (seg = 0; seg < iter->nr_segs; seg++)
4000 		for (i = seg + 1; i < iter->nr_segs; i++)
4001 			if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
4002 				return -EINVAL;
4003 	return 0;
4004 }
4005 
4006 static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
4007 {
4008 	struct inode *inode = file_inode(iocb->ki_filp);
4009 	size_t prev_left = 0;
4010 	ssize_t read = 0;
4011 	ssize_t ret;
4012 
4013 	if (fsverity_active(inode))
4014 		return 0;
4015 
4016 	if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
4017 		return 0;
4018 
4019 	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
4020 again:
4021 	/*
4022 	 * This is similar to what we do for direct IO writes, see the comment
4023 	 * at btrfs_direct_write(), but we also disable page faults in addition
4024 	 * to disabling them only at the iov_iter level. This is because when
4025 	 * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
4026 	 * which can still trigger page fault ins despite having set ->nofault
4027 	 * to true of our 'to' iov_iter.
4028 	 *
4029 	 * The difference to direct IO writes is that we deadlock when trying
4030 	 * to lock the extent range in the inode's tree during he page reads
4031 	 * triggered by the fault in (while for writes it is due to waiting for
4032 	 * our own ordered extent). This is because for direct IO reads,
4033 	 * btrfs_dio_iomap_begin() returns with the extent range locked, which
4034 	 * is only unlocked in the endio callback (end_bio_extent_readpage()).
4035 	 */
4036 	pagefault_disable();
4037 	to->nofault = true;
4038 	ret = btrfs_dio_rw(iocb, to, read);
4039 	to->nofault = false;
4040 	pagefault_enable();
4041 
4042 	/* No increment (+=) because iomap returns a cumulative value. */
4043 	if (ret > 0)
4044 		read = ret;
4045 
4046 	if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
4047 		const size_t left = iov_iter_count(to);
4048 
4049 		if (left == prev_left) {
4050 			/*
4051 			 * We didn't make any progress since the last attempt,
4052 			 * fallback to a buffered read for the remainder of the
4053 			 * range. This is just to avoid any possibility of looping
4054 			 * for too long.
4055 			 */
4056 			ret = read;
4057 		} else {
4058 			/*
4059 			 * We made some progress since the last retry or this is
4060 			 * the first time we are retrying. Fault in as many pages
4061 			 * as possible and retry.
4062 			 */
4063 			fault_in_iov_iter_writeable(to, left);
4064 			prev_left = left;
4065 			goto again;
4066 		}
4067 	}
4068 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
4069 	return ret < 0 ? ret : read;
4070 }
4071 
4072 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4073 {
4074 	ssize_t ret = 0;
4075 
4076 	if (iocb->ki_flags & IOCB_DIRECT) {
4077 		ret = btrfs_direct_read(iocb, to);
4078 		if (ret < 0 || !iov_iter_count(to) ||
4079 		    iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
4080 			return ret;
4081 	}
4082 
4083 	return filemap_read(iocb, to, ret);
4084 }
4085 
4086 const struct file_operations btrfs_file_operations = {
4087 	.llseek		= btrfs_file_llseek,
4088 	.read_iter      = btrfs_file_read_iter,
4089 	.splice_read	= generic_file_splice_read,
4090 	.write_iter	= btrfs_file_write_iter,
4091 	.splice_write	= iter_file_splice_write,
4092 	.mmap		= btrfs_file_mmap,
4093 	.open		= btrfs_file_open,
4094 	.release	= btrfs_release_file,
4095 	.get_unmapped_area = thp_get_unmapped_area,
4096 	.fsync		= btrfs_sync_file,
4097 	.fallocate	= btrfs_fallocate,
4098 	.unlocked_ioctl	= btrfs_ioctl,
4099 #ifdef CONFIG_COMPAT
4100 	.compat_ioctl	= btrfs_compat_ioctl,
4101 #endif
4102 	.remap_file_range = btrfs_remap_file_range,
4103 };
4104 
4105 void __cold btrfs_auto_defrag_exit(void)
4106 {
4107 	kmem_cache_destroy(btrfs_inode_defrag_cachep);
4108 }
4109 
4110 int __init btrfs_auto_defrag_init(void)
4111 {
4112 	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
4113 					sizeof(struct inode_defrag), 0,
4114 					SLAB_MEM_SPREAD,
4115 					NULL);
4116 	if (!btrfs_inode_defrag_cachep)
4117 		return -ENOMEM;
4118 
4119 	return 0;
4120 }
4121 
4122 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
4123 {
4124 	int ret;
4125 
4126 	/*
4127 	 * So with compression we will find and lock a dirty page and clear the
4128 	 * first one as dirty, setup an async extent, and immediately return
4129 	 * with the entire range locked but with nobody actually marked with
4130 	 * writeback.  So we can't just filemap_write_and_wait_range() and
4131 	 * expect it to work since it will just kick off a thread to do the
4132 	 * actual work.  So we need to call filemap_fdatawrite_range _again_
4133 	 * since it will wait on the page lock, which won't be unlocked until
4134 	 * after the pages have been marked as writeback and so we're good to go
4135 	 * from there.  We have to do this otherwise we'll miss the ordered
4136 	 * extents and that results in badness.  Please Josef, do not think you
4137 	 * know better and pull this out at some point in the future, it is
4138 	 * right and you are wrong.
4139 	 */
4140 	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
4141 	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
4142 			     &BTRFS_I(inode)->runtime_flags))
4143 		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
4144 
4145 	return ret;
4146 }
4147