xref: /linux/fs/btrfs/file.c (revision 7fc7f25419f5a6b09199ba4b5026b94ef184fa79)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
19 #include <linux/fsverity.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "btrfs_inode.h"
24 #include "print-tree.h"
25 #include "tree-log.h"
26 #include "locking.h"
27 #include "volumes.h"
28 #include "qgroup.h"
29 #include "compression.h"
30 #include "delalloc-space.h"
31 #include "reflink.h"
32 #include "subpage.h"
33 
34 static struct kmem_cache *btrfs_inode_defrag_cachep;
35 /*
36  * when auto defrag is enabled we
37  * queue up these defrag structs to remember which
38  * inodes need defragging passes
39  */
40 struct inode_defrag {
41 	struct rb_node rb_node;
42 	/* objectid */
43 	u64 ino;
44 	/*
45 	 * transid where the defrag was added, we search for
46 	 * extents newer than this
47 	 */
48 	u64 transid;
49 
50 	/* root objectid */
51 	u64 root;
52 
53 	/*
54 	 * The extent size threshold for autodefrag.
55 	 *
56 	 * This value is different for compressed/non-compressed extents,
57 	 * thus needs to be passed from higher layer.
58 	 * (aka, inode_should_defrag())
59 	 */
60 	u32 extent_thresh;
61 };
62 
63 static int __compare_inode_defrag(struct inode_defrag *defrag1,
64 				  struct inode_defrag *defrag2)
65 {
66 	if (defrag1->root > defrag2->root)
67 		return 1;
68 	else if (defrag1->root < defrag2->root)
69 		return -1;
70 	else if (defrag1->ino > defrag2->ino)
71 		return 1;
72 	else if (defrag1->ino < defrag2->ino)
73 		return -1;
74 	else
75 		return 0;
76 }
77 
78 /* pop a record for an inode into the defrag tree.  The lock
79  * must be held already
80  *
81  * If you're inserting a record for an older transid than an
82  * existing record, the transid already in the tree is lowered
83  *
84  * If an existing record is found the defrag item you
85  * pass in is freed
86  */
87 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
88 				    struct inode_defrag *defrag)
89 {
90 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
91 	struct inode_defrag *entry;
92 	struct rb_node **p;
93 	struct rb_node *parent = NULL;
94 	int ret;
95 
96 	p = &fs_info->defrag_inodes.rb_node;
97 	while (*p) {
98 		parent = *p;
99 		entry = rb_entry(parent, struct inode_defrag, rb_node);
100 
101 		ret = __compare_inode_defrag(defrag, entry);
102 		if (ret < 0)
103 			p = &parent->rb_left;
104 		else if (ret > 0)
105 			p = &parent->rb_right;
106 		else {
107 			/* if we're reinserting an entry for
108 			 * an old defrag run, make sure to
109 			 * lower the transid of our existing record
110 			 */
111 			if (defrag->transid < entry->transid)
112 				entry->transid = defrag->transid;
113 			entry->extent_thresh = min(defrag->extent_thresh,
114 						   entry->extent_thresh);
115 			return -EEXIST;
116 		}
117 	}
118 	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
119 	rb_link_node(&defrag->rb_node, parent, p);
120 	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
121 	return 0;
122 }
123 
124 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
125 {
126 	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
127 		return 0;
128 
129 	if (btrfs_fs_closing(fs_info))
130 		return 0;
131 
132 	return 1;
133 }
134 
135 /*
136  * insert a defrag record for this inode if auto defrag is
137  * enabled
138  */
139 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
140 			   struct btrfs_inode *inode, u32 extent_thresh)
141 {
142 	struct btrfs_root *root = inode->root;
143 	struct btrfs_fs_info *fs_info = root->fs_info;
144 	struct inode_defrag *defrag;
145 	u64 transid;
146 	int ret;
147 
148 	if (!__need_auto_defrag(fs_info))
149 		return 0;
150 
151 	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
152 		return 0;
153 
154 	if (trans)
155 		transid = trans->transid;
156 	else
157 		transid = inode->root->last_trans;
158 
159 	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
160 	if (!defrag)
161 		return -ENOMEM;
162 
163 	defrag->ino = btrfs_ino(inode);
164 	defrag->transid = transid;
165 	defrag->root = root->root_key.objectid;
166 	defrag->extent_thresh = extent_thresh;
167 
168 	spin_lock(&fs_info->defrag_inodes_lock);
169 	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
170 		/*
171 		 * If we set IN_DEFRAG flag and evict the inode from memory,
172 		 * and then re-read this inode, this new inode doesn't have
173 		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
174 		 */
175 		ret = __btrfs_add_inode_defrag(inode, defrag);
176 		if (ret)
177 			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
178 	} else {
179 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
180 	}
181 	spin_unlock(&fs_info->defrag_inodes_lock);
182 	return 0;
183 }
184 
185 /*
186  * pick the defragable inode that we want, if it doesn't exist, we will get
187  * the next one.
188  */
189 static struct inode_defrag *
190 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
191 {
192 	struct inode_defrag *entry = NULL;
193 	struct inode_defrag tmp;
194 	struct rb_node *p;
195 	struct rb_node *parent = NULL;
196 	int ret;
197 
198 	tmp.ino = ino;
199 	tmp.root = root;
200 
201 	spin_lock(&fs_info->defrag_inodes_lock);
202 	p = fs_info->defrag_inodes.rb_node;
203 	while (p) {
204 		parent = p;
205 		entry = rb_entry(parent, struct inode_defrag, rb_node);
206 
207 		ret = __compare_inode_defrag(&tmp, entry);
208 		if (ret < 0)
209 			p = parent->rb_left;
210 		else if (ret > 0)
211 			p = parent->rb_right;
212 		else
213 			goto out;
214 	}
215 
216 	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
217 		parent = rb_next(parent);
218 		if (parent)
219 			entry = rb_entry(parent, struct inode_defrag, rb_node);
220 		else
221 			entry = NULL;
222 	}
223 out:
224 	if (entry)
225 		rb_erase(parent, &fs_info->defrag_inodes);
226 	spin_unlock(&fs_info->defrag_inodes_lock);
227 	return entry;
228 }
229 
230 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
231 {
232 	struct inode_defrag *defrag;
233 	struct rb_node *node;
234 
235 	spin_lock(&fs_info->defrag_inodes_lock);
236 	node = rb_first(&fs_info->defrag_inodes);
237 	while (node) {
238 		rb_erase(node, &fs_info->defrag_inodes);
239 		defrag = rb_entry(node, struct inode_defrag, rb_node);
240 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
241 
242 		cond_resched_lock(&fs_info->defrag_inodes_lock);
243 
244 		node = rb_first(&fs_info->defrag_inodes);
245 	}
246 	spin_unlock(&fs_info->defrag_inodes_lock);
247 }
248 
249 #define BTRFS_DEFRAG_BATCH	1024
250 
251 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
252 				    struct inode_defrag *defrag)
253 {
254 	struct btrfs_root *inode_root;
255 	struct inode *inode;
256 	struct btrfs_ioctl_defrag_range_args range;
257 	int ret = 0;
258 	u64 cur = 0;
259 
260 again:
261 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
262 		goto cleanup;
263 	if (!__need_auto_defrag(fs_info))
264 		goto cleanup;
265 
266 	/* get the inode */
267 	inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
268 	if (IS_ERR(inode_root)) {
269 		ret = PTR_ERR(inode_root);
270 		goto cleanup;
271 	}
272 
273 	inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
274 	btrfs_put_root(inode_root);
275 	if (IS_ERR(inode)) {
276 		ret = PTR_ERR(inode);
277 		goto cleanup;
278 	}
279 
280 	if (cur >= i_size_read(inode)) {
281 		iput(inode);
282 		goto cleanup;
283 	}
284 
285 	/* do a chunk of defrag */
286 	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
287 	memset(&range, 0, sizeof(range));
288 	range.len = (u64)-1;
289 	range.start = cur;
290 	range.extent_thresh = defrag->extent_thresh;
291 
292 	sb_start_write(fs_info->sb);
293 	ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
294 				       BTRFS_DEFRAG_BATCH);
295 	sb_end_write(fs_info->sb);
296 	iput(inode);
297 
298 	if (ret < 0)
299 		goto cleanup;
300 
301 	cur = max(cur + fs_info->sectorsize, range.start);
302 	goto again;
303 
304 cleanup:
305 	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
306 	return ret;
307 }
308 
309 /*
310  * run through the list of inodes in the FS that need
311  * defragging
312  */
313 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
314 {
315 	struct inode_defrag *defrag;
316 	u64 first_ino = 0;
317 	u64 root_objectid = 0;
318 
319 	atomic_inc(&fs_info->defrag_running);
320 	while (1) {
321 		/* Pause the auto defragger. */
322 		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
323 			     &fs_info->fs_state))
324 			break;
325 
326 		if (!__need_auto_defrag(fs_info))
327 			break;
328 
329 		/* find an inode to defrag */
330 		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
331 						 first_ino);
332 		if (!defrag) {
333 			if (root_objectid || first_ino) {
334 				root_objectid = 0;
335 				first_ino = 0;
336 				continue;
337 			} else {
338 				break;
339 			}
340 		}
341 
342 		first_ino = defrag->ino + 1;
343 		root_objectid = defrag->root;
344 
345 		__btrfs_run_defrag_inode(fs_info, defrag);
346 	}
347 	atomic_dec(&fs_info->defrag_running);
348 
349 	/*
350 	 * during unmount, we use the transaction_wait queue to
351 	 * wait for the defragger to stop
352 	 */
353 	wake_up(&fs_info->transaction_wait);
354 	return 0;
355 }
356 
357 /* simple helper to fault in pages and copy.  This should go away
358  * and be replaced with calls into generic code.
359  */
360 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
361 					 struct page **prepared_pages,
362 					 struct iov_iter *i)
363 {
364 	size_t copied = 0;
365 	size_t total_copied = 0;
366 	int pg = 0;
367 	int offset = offset_in_page(pos);
368 
369 	while (write_bytes > 0) {
370 		size_t count = min_t(size_t,
371 				     PAGE_SIZE - offset, write_bytes);
372 		struct page *page = prepared_pages[pg];
373 		/*
374 		 * Copy data from userspace to the current page
375 		 */
376 		copied = copy_page_from_iter_atomic(page, offset, count, i);
377 
378 		/* Flush processor's dcache for this page */
379 		flush_dcache_page(page);
380 
381 		/*
382 		 * if we get a partial write, we can end up with
383 		 * partially up to date pages.  These add
384 		 * a lot of complexity, so make sure they don't
385 		 * happen by forcing this copy to be retried.
386 		 *
387 		 * The rest of the btrfs_file_write code will fall
388 		 * back to page at a time copies after we return 0.
389 		 */
390 		if (unlikely(copied < count)) {
391 			if (!PageUptodate(page)) {
392 				iov_iter_revert(i, copied);
393 				copied = 0;
394 			}
395 			if (!copied)
396 				break;
397 		}
398 
399 		write_bytes -= copied;
400 		total_copied += copied;
401 		offset += copied;
402 		if (offset == PAGE_SIZE) {
403 			pg++;
404 			offset = 0;
405 		}
406 	}
407 	return total_copied;
408 }
409 
410 /*
411  * unlocks pages after btrfs_file_write is done with them
412  */
413 static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
414 			     struct page **pages, size_t num_pages,
415 			     u64 pos, u64 copied)
416 {
417 	size_t i;
418 	u64 block_start = round_down(pos, fs_info->sectorsize);
419 	u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
420 
421 	ASSERT(block_len <= U32_MAX);
422 	for (i = 0; i < num_pages; i++) {
423 		/* page checked is some magic around finding pages that
424 		 * have been modified without going through btrfs_set_page_dirty
425 		 * clear it here. There should be no need to mark the pages
426 		 * accessed as prepare_pages should have marked them accessed
427 		 * in prepare_pages via find_or_create_page()
428 		 */
429 		btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start,
430 					       block_len);
431 		unlock_page(pages[i]);
432 		put_page(pages[i]);
433 	}
434 }
435 
436 /*
437  * After btrfs_copy_from_user(), update the following things for delalloc:
438  * - Mark newly dirtied pages as DELALLOC in the io tree.
439  *   Used to advise which range is to be written back.
440  * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
441  * - Update inode size for past EOF write
442  */
443 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
444 		      size_t num_pages, loff_t pos, size_t write_bytes,
445 		      struct extent_state **cached, bool noreserve)
446 {
447 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
448 	int err = 0;
449 	int i;
450 	u64 num_bytes;
451 	u64 start_pos;
452 	u64 end_of_last_block;
453 	u64 end_pos = pos + write_bytes;
454 	loff_t isize = i_size_read(&inode->vfs_inode);
455 	unsigned int extra_bits = 0;
456 
457 	if (write_bytes == 0)
458 		return 0;
459 
460 	if (noreserve)
461 		extra_bits |= EXTENT_NORESERVE;
462 
463 	start_pos = round_down(pos, fs_info->sectorsize);
464 	num_bytes = round_up(write_bytes + pos - start_pos,
465 			     fs_info->sectorsize);
466 	ASSERT(num_bytes <= U32_MAX);
467 
468 	end_of_last_block = start_pos + num_bytes - 1;
469 
470 	/*
471 	 * The pages may have already been dirty, clear out old accounting so
472 	 * we can set things up properly
473 	 */
474 	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
475 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
476 			 cached);
477 
478 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
479 					extra_bits, cached);
480 	if (err)
481 		return err;
482 
483 	for (i = 0; i < num_pages; i++) {
484 		struct page *p = pages[i];
485 
486 		btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
487 		btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes);
488 		btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
489 	}
490 
491 	/*
492 	 * we've only changed i_size in ram, and we haven't updated
493 	 * the disk i_size.  There is no need to log the inode
494 	 * at this time.
495 	 */
496 	if (end_pos > isize)
497 		i_size_write(&inode->vfs_inode, end_pos);
498 	return 0;
499 }
500 
501 /*
502  * this is very complex, but the basic idea is to drop all extents
503  * in the range start - end.  hint_block is filled in with a block number
504  * that would be a good hint to the block allocator for this file.
505  *
506  * If an extent intersects the range but is not entirely inside the range
507  * it is either truncated or split.  Anything entirely inside the range
508  * is deleted from the tree.
509  *
510  * Note: the VFS' inode number of bytes is not updated, it's up to the caller
511  * to deal with that. We set the field 'bytes_found' of the arguments structure
512  * with the number of allocated bytes found in the target range, so that the
513  * caller can update the inode's number of bytes in an atomic way when
514  * replacing extents in a range to avoid races with stat(2).
515  */
516 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
517 		       struct btrfs_root *root, struct btrfs_inode *inode,
518 		       struct btrfs_drop_extents_args *args)
519 {
520 	struct btrfs_fs_info *fs_info = root->fs_info;
521 	struct extent_buffer *leaf;
522 	struct btrfs_file_extent_item *fi;
523 	struct btrfs_ref ref = { 0 };
524 	struct btrfs_key key;
525 	struct btrfs_key new_key;
526 	u64 ino = btrfs_ino(inode);
527 	u64 search_start = args->start;
528 	u64 disk_bytenr = 0;
529 	u64 num_bytes = 0;
530 	u64 extent_offset = 0;
531 	u64 extent_end = 0;
532 	u64 last_end = args->start;
533 	int del_nr = 0;
534 	int del_slot = 0;
535 	int extent_type;
536 	int recow;
537 	int ret;
538 	int modify_tree = -1;
539 	int update_refs;
540 	int found = 0;
541 	struct btrfs_path *path = args->path;
542 
543 	args->bytes_found = 0;
544 	args->extent_inserted = false;
545 
546 	/* Must always have a path if ->replace_extent is true */
547 	ASSERT(!(args->replace_extent && !args->path));
548 
549 	if (!path) {
550 		path = btrfs_alloc_path();
551 		if (!path) {
552 			ret = -ENOMEM;
553 			goto out;
554 		}
555 	}
556 
557 	if (args->drop_cache)
558 		btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
559 
560 	if (args->start >= inode->disk_i_size && !args->replace_extent)
561 		modify_tree = 0;
562 
563 	update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
564 	while (1) {
565 		recow = 0;
566 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
567 					       search_start, modify_tree);
568 		if (ret < 0)
569 			break;
570 		if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
571 			leaf = path->nodes[0];
572 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
573 			if (key.objectid == ino &&
574 			    key.type == BTRFS_EXTENT_DATA_KEY)
575 				path->slots[0]--;
576 		}
577 		ret = 0;
578 next_slot:
579 		leaf = path->nodes[0];
580 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
581 			BUG_ON(del_nr > 0);
582 			ret = btrfs_next_leaf(root, path);
583 			if (ret < 0)
584 				break;
585 			if (ret > 0) {
586 				ret = 0;
587 				break;
588 			}
589 			leaf = path->nodes[0];
590 			recow = 1;
591 		}
592 
593 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
594 
595 		if (key.objectid > ino)
596 			break;
597 		if (WARN_ON_ONCE(key.objectid < ino) ||
598 		    key.type < BTRFS_EXTENT_DATA_KEY) {
599 			ASSERT(del_nr == 0);
600 			path->slots[0]++;
601 			goto next_slot;
602 		}
603 		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
604 			break;
605 
606 		fi = btrfs_item_ptr(leaf, path->slots[0],
607 				    struct btrfs_file_extent_item);
608 		extent_type = btrfs_file_extent_type(leaf, fi);
609 
610 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
611 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
612 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
613 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
614 			extent_offset = btrfs_file_extent_offset(leaf, fi);
615 			extent_end = key.offset +
616 				btrfs_file_extent_num_bytes(leaf, fi);
617 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
618 			extent_end = key.offset +
619 				btrfs_file_extent_ram_bytes(leaf, fi);
620 		} else {
621 			/* can't happen */
622 			BUG();
623 		}
624 
625 		/*
626 		 * Don't skip extent items representing 0 byte lengths. They
627 		 * used to be created (bug) if while punching holes we hit
628 		 * -ENOSPC condition. So if we find one here, just ensure we
629 		 * delete it, otherwise we would insert a new file extent item
630 		 * with the same key (offset) as that 0 bytes length file
631 		 * extent item in the call to setup_items_for_insert() later
632 		 * in this function.
633 		 */
634 		if (extent_end == key.offset && extent_end >= search_start) {
635 			last_end = extent_end;
636 			goto delete_extent_item;
637 		}
638 
639 		if (extent_end <= search_start) {
640 			path->slots[0]++;
641 			goto next_slot;
642 		}
643 
644 		found = 1;
645 		search_start = max(key.offset, args->start);
646 		if (recow || !modify_tree) {
647 			modify_tree = -1;
648 			btrfs_release_path(path);
649 			continue;
650 		}
651 
652 		/*
653 		 *     | - range to drop - |
654 		 *  | -------- extent -------- |
655 		 */
656 		if (args->start > key.offset && args->end < extent_end) {
657 			BUG_ON(del_nr > 0);
658 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
659 				ret = -EOPNOTSUPP;
660 				break;
661 			}
662 
663 			memcpy(&new_key, &key, sizeof(new_key));
664 			new_key.offset = args->start;
665 			ret = btrfs_duplicate_item(trans, root, path,
666 						   &new_key);
667 			if (ret == -EAGAIN) {
668 				btrfs_release_path(path);
669 				continue;
670 			}
671 			if (ret < 0)
672 				break;
673 
674 			leaf = path->nodes[0];
675 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
676 					    struct btrfs_file_extent_item);
677 			btrfs_set_file_extent_num_bytes(leaf, fi,
678 							args->start - key.offset);
679 
680 			fi = btrfs_item_ptr(leaf, path->slots[0],
681 					    struct btrfs_file_extent_item);
682 
683 			extent_offset += args->start - key.offset;
684 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
685 			btrfs_set_file_extent_num_bytes(leaf, fi,
686 							extent_end - args->start);
687 			btrfs_mark_buffer_dirty(leaf);
688 
689 			if (update_refs && disk_bytenr > 0) {
690 				btrfs_init_generic_ref(&ref,
691 						BTRFS_ADD_DELAYED_REF,
692 						disk_bytenr, num_bytes, 0);
693 				btrfs_init_data_ref(&ref,
694 						root->root_key.objectid,
695 						new_key.objectid,
696 						args->start - extent_offset,
697 						0, false);
698 				ret = btrfs_inc_extent_ref(trans, &ref);
699 				BUG_ON(ret); /* -ENOMEM */
700 			}
701 			key.offset = args->start;
702 		}
703 		/*
704 		 * From here on out we will have actually dropped something, so
705 		 * last_end can be updated.
706 		 */
707 		last_end = extent_end;
708 
709 		/*
710 		 *  | ---- range to drop ----- |
711 		 *      | -------- extent -------- |
712 		 */
713 		if (args->start <= key.offset && args->end < extent_end) {
714 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
715 				ret = -EOPNOTSUPP;
716 				break;
717 			}
718 
719 			memcpy(&new_key, &key, sizeof(new_key));
720 			new_key.offset = args->end;
721 			btrfs_set_item_key_safe(fs_info, path, &new_key);
722 
723 			extent_offset += args->end - key.offset;
724 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
725 			btrfs_set_file_extent_num_bytes(leaf, fi,
726 							extent_end - args->end);
727 			btrfs_mark_buffer_dirty(leaf);
728 			if (update_refs && disk_bytenr > 0)
729 				args->bytes_found += args->end - key.offset;
730 			break;
731 		}
732 
733 		search_start = extent_end;
734 		/*
735 		 *       | ---- range to drop ----- |
736 		 *  | -------- extent -------- |
737 		 */
738 		if (args->start > key.offset && args->end >= extent_end) {
739 			BUG_ON(del_nr > 0);
740 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
741 				ret = -EOPNOTSUPP;
742 				break;
743 			}
744 
745 			btrfs_set_file_extent_num_bytes(leaf, fi,
746 							args->start - key.offset);
747 			btrfs_mark_buffer_dirty(leaf);
748 			if (update_refs && disk_bytenr > 0)
749 				args->bytes_found += extent_end - args->start;
750 			if (args->end == extent_end)
751 				break;
752 
753 			path->slots[0]++;
754 			goto next_slot;
755 		}
756 
757 		/*
758 		 *  | ---- range to drop ----- |
759 		 *    | ------ extent ------ |
760 		 */
761 		if (args->start <= key.offset && args->end >= extent_end) {
762 delete_extent_item:
763 			if (del_nr == 0) {
764 				del_slot = path->slots[0];
765 				del_nr = 1;
766 			} else {
767 				BUG_ON(del_slot + del_nr != path->slots[0]);
768 				del_nr++;
769 			}
770 
771 			if (update_refs &&
772 			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
773 				args->bytes_found += extent_end - key.offset;
774 				extent_end = ALIGN(extent_end,
775 						   fs_info->sectorsize);
776 			} else if (update_refs && disk_bytenr > 0) {
777 				btrfs_init_generic_ref(&ref,
778 						BTRFS_DROP_DELAYED_REF,
779 						disk_bytenr, num_bytes, 0);
780 				btrfs_init_data_ref(&ref,
781 						root->root_key.objectid,
782 						key.objectid,
783 						key.offset - extent_offset, 0,
784 						false);
785 				ret = btrfs_free_extent(trans, &ref);
786 				BUG_ON(ret); /* -ENOMEM */
787 				args->bytes_found += extent_end - key.offset;
788 			}
789 
790 			if (args->end == extent_end)
791 				break;
792 
793 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
794 				path->slots[0]++;
795 				goto next_slot;
796 			}
797 
798 			ret = btrfs_del_items(trans, root, path, del_slot,
799 					      del_nr);
800 			if (ret) {
801 				btrfs_abort_transaction(trans, ret);
802 				break;
803 			}
804 
805 			del_nr = 0;
806 			del_slot = 0;
807 
808 			btrfs_release_path(path);
809 			continue;
810 		}
811 
812 		BUG();
813 	}
814 
815 	if (!ret && del_nr > 0) {
816 		/*
817 		 * Set path->slots[0] to first slot, so that after the delete
818 		 * if items are move off from our leaf to its immediate left or
819 		 * right neighbor leafs, we end up with a correct and adjusted
820 		 * path->slots[0] for our insertion (if args->replace_extent).
821 		 */
822 		path->slots[0] = del_slot;
823 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
824 		if (ret)
825 			btrfs_abort_transaction(trans, ret);
826 	}
827 
828 	leaf = path->nodes[0];
829 	/*
830 	 * If btrfs_del_items() was called, it might have deleted a leaf, in
831 	 * which case it unlocked our path, so check path->locks[0] matches a
832 	 * write lock.
833 	 */
834 	if (!ret && args->replace_extent &&
835 	    path->locks[0] == BTRFS_WRITE_LOCK &&
836 	    btrfs_leaf_free_space(leaf) >=
837 	    sizeof(struct btrfs_item) + args->extent_item_size) {
838 
839 		key.objectid = ino;
840 		key.type = BTRFS_EXTENT_DATA_KEY;
841 		key.offset = args->start;
842 		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
843 			struct btrfs_key slot_key;
844 
845 			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
846 			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
847 				path->slots[0]++;
848 		}
849 		btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
850 		args->extent_inserted = true;
851 	}
852 
853 	if (!args->path)
854 		btrfs_free_path(path);
855 	else if (!args->extent_inserted)
856 		btrfs_release_path(path);
857 out:
858 	args->drop_end = found ? min(args->end, last_end) : args->end;
859 
860 	return ret;
861 }
862 
863 static int extent_mergeable(struct extent_buffer *leaf, int slot,
864 			    u64 objectid, u64 bytenr, u64 orig_offset,
865 			    u64 *start, u64 *end)
866 {
867 	struct btrfs_file_extent_item *fi;
868 	struct btrfs_key key;
869 	u64 extent_end;
870 
871 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
872 		return 0;
873 
874 	btrfs_item_key_to_cpu(leaf, &key, slot);
875 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
876 		return 0;
877 
878 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
879 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
880 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
881 	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
882 	    btrfs_file_extent_compression(leaf, fi) ||
883 	    btrfs_file_extent_encryption(leaf, fi) ||
884 	    btrfs_file_extent_other_encoding(leaf, fi))
885 		return 0;
886 
887 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
888 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
889 		return 0;
890 
891 	*start = key.offset;
892 	*end = extent_end;
893 	return 1;
894 }
895 
896 /*
897  * Mark extent in the range start - end as written.
898  *
899  * This changes extent type from 'pre-allocated' to 'regular'. If only
900  * part of extent is marked as written, the extent will be split into
901  * two or three.
902  */
903 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
904 			      struct btrfs_inode *inode, u64 start, u64 end)
905 {
906 	struct btrfs_fs_info *fs_info = trans->fs_info;
907 	struct btrfs_root *root = inode->root;
908 	struct extent_buffer *leaf;
909 	struct btrfs_path *path;
910 	struct btrfs_file_extent_item *fi;
911 	struct btrfs_ref ref = { 0 };
912 	struct btrfs_key key;
913 	struct btrfs_key new_key;
914 	u64 bytenr;
915 	u64 num_bytes;
916 	u64 extent_end;
917 	u64 orig_offset;
918 	u64 other_start;
919 	u64 other_end;
920 	u64 split;
921 	int del_nr = 0;
922 	int del_slot = 0;
923 	int recow;
924 	int ret = 0;
925 	u64 ino = btrfs_ino(inode);
926 
927 	path = btrfs_alloc_path();
928 	if (!path)
929 		return -ENOMEM;
930 again:
931 	recow = 0;
932 	split = start;
933 	key.objectid = ino;
934 	key.type = BTRFS_EXTENT_DATA_KEY;
935 	key.offset = split;
936 
937 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
938 	if (ret < 0)
939 		goto out;
940 	if (ret > 0 && path->slots[0] > 0)
941 		path->slots[0]--;
942 
943 	leaf = path->nodes[0];
944 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
945 	if (key.objectid != ino ||
946 	    key.type != BTRFS_EXTENT_DATA_KEY) {
947 		ret = -EINVAL;
948 		btrfs_abort_transaction(trans, ret);
949 		goto out;
950 	}
951 	fi = btrfs_item_ptr(leaf, path->slots[0],
952 			    struct btrfs_file_extent_item);
953 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
954 		ret = -EINVAL;
955 		btrfs_abort_transaction(trans, ret);
956 		goto out;
957 	}
958 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
959 	if (key.offset > start || extent_end < end) {
960 		ret = -EINVAL;
961 		btrfs_abort_transaction(trans, ret);
962 		goto out;
963 	}
964 
965 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
966 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
967 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
968 	memcpy(&new_key, &key, sizeof(new_key));
969 
970 	if (start == key.offset && end < extent_end) {
971 		other_start = 0;
972 		other_end = start;
973 		if (extent_mergeable(leaf, path->slots[0] - 1,
974 				     ino, bytenr, orig_offset,
975 				     &other_start, &other_end)) {
976 			new_key.offset = end;
977 			btrfs_set_item_key_safe(fs_info, path, &new_key);
978 			fi = btrfs_item_ptr(leaf, path->slots[0],
979 					    struct btrfs_file_extent_item);
980 			btrfs_set_file_extent_generation(leaf, fi,
981 							 trans->transid);
982 			btrfs_set_file_extent_num_bytes(leaf, fi,
983 							extent_end - end);
984 			btrfs_set_file_extent_offset(leaf, fi,
985 						     end - orig_offset);
986 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
987 					    struct btrfs_file_extent_item);
988 			btrfs_set_file_extent_generation(leaf, fi,
989 							 trans->transid);
990 			btrfs_set_file_extent_num_bytes(leaf, fi,
991 							end - other_start);
992 			btrfs_mark_buffer_dirty(leaf);
993 			goto out;
994 		}
995 	}
996 
997 	if (start > key.offset && end == extent_end) {
998 		other_start = end;
999 		other_end = 0;
1000 		if (extent_mergeable(leaf, path->slots[0] + 1,
1001 				     ino, bytenr, orig_offset,
1002 				     &other_start, &other_end)) {
1003 			fi = btrfs_item_ptr(leaf, path->slots[0],
1004 					    struct btrfs_file_extent_item);
1005 			btrfs_set_file_extent_num_bytes(leaf, fi,
1006 							start - key.offset);
1007 			btrfs_set_file_extent_generation(leaf, fi,
1008 							 trans->transid);
1009 			path->slots[0]++;
1010 			new_key.offset = start;
1011 			btrfs_set_item_key_safe(fs_info, path, &new_key);
1012 
1013 			fi = btrfs_item_ptr(leaf, path->slots[0],
1014 					    struct btrfs_file_extent_item);
1015 			btrfs_set_file_extent_generation(leaf, fi,
1016 							 trans->transid);
1017 			btrfs_set_file_extent_num_bytes(leaf, fi,
1018 							other_end - start);
1019 			btrfs_set_file_extent_offset(leaf, fi,
1020 						     start - orig_offset);
1021 			btrfs_mark_buffer_dirty(leaf);
1022 			goto out;
1023 		}
1024 	}
1025 
1026 	while (start > key.offset || end < extent_end) {
1027 		if (key.offset == start)
1028 			split = end;
1029 
1030 		new_key.offset = split;
1031 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1032 		if (ret == -EAGAIN) {
1033 			btrfs_release_path(path);
1034 			goto again;
1035 		}
1036 		if (ret < 0) {
1037 			btrfs_abort_transaction(trans, ret);
1038 			goto out;
1039 		}
1040 
1041 		leaf = path->nodes[0];
1042 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1043 				    struct btrfs_file_extent_item);
1044 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1045 		btrfs_set_file_extent_num_bytes(leaf, fi,
1046 						split - key.offset);
1047 
1048 		fi = btrfs_item_ptr(leaf, path->slots[0],
1049 				    struct btrfs_file_extent_item);
1050 
1051 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1052 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1053 		btrfs_set_file_extent_num_bytes(leaf, fi,
1054 						extent_end - split);
1055 		btrfs_mark_buffer_dirty(leaf);
1056 
1057 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1058 				       num_bytes, 0);
1059 		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1060 				    orig_offset, 0, false);
1061 		ret = btrfs_inc_extent_ref(trans, &ref);
1062 		if (ret) {
1063 			btrfs_abort_transaction(trans, ret);
1064 			goto out;
1065 		}
1066 
1067 		if (split == start) {
1068 			key.offset = start;
1069 		} else {
1070 			if (start != key.offset) {
1071 				ret = -EINVAL;
1072 				btrfs_abort_transaction(trans, ret);
1073 				goto out;
1074 			}
1075 			path->slots[0]--;
1076 			extent_end = end;
1077 		}
1078 		recow = 1;
1079 	}
1080 
1081 	other_start = end;
1082 	other_end = 0;
1083 	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1084 			       num_bytes, 0);
1085 	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
1086 			    0, false);
1087 	if (extent_mergeable(leaf, path->slots[0] + 1,
1088 			     ino, bytenr, orig_offset,
1089 			     &other_start, &other_end)) {
1090 		if (recow) {
1091 			btrfs_release_path(path);
1092 			goto again;
1093 		}
1094 		extent_end = other_end;
1095 		del_slot = path->slots[0] + 1;
1096 		del_nr++;
1097 		ret = btrfs_free_extent(trans, &ref);
1098 		if (ret) {
1099 			btrfs_abort_transaction(trans, ret);
1100 			goto out;
1101 		}
1102 	}
1103 	other_start = 0;
1104 	other_end = start;
1105 	if (extent_mergeable(leaf, path->slots[0] - 1,
1106 			     ino, bytenr, orig_offset,
1107 			     &other_start, &other_end)) {
1108 		if (recow) {
1109 			btrfs_release_path(path);
1110 			goto again;
1111 		}
1112 		key.offset = other_start;
1113 		del_slot = path->slots[0];
1114 		del_nr++;
1115 		ret = btrfs_free_extent(trans, &ref);
1116 		if (ret) {
1117 			btrfs_abort_transaction(trans, ret);
1118 			goto out;
1119 		}
1120 	}
1121 	if (del_nr == 0) {
1122 		fi = btrfs_item_ptr(leaf, path->slots[0],
1123 			   struct btrfs_file_extent_item);
1124 		btrfs_set_file_extent_type(leaf, fi,
1125 					   BTRFS_FILE_EXTENT_REG);
1126 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1127 		btrfs_mark_buffer_dirty(leaf);
1128 	} else {
1129 		fi = btrfs_item_ptr(leaf, del_slot - 1,
1130 			   struct btrfs_file_extent_item);
1131 		btrfs_set_file_extent_type(leaf, fi,
1132 					   BTRFS_FILE_EXTENT_REG);
1133 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1134 		btrfs_set_file_extent_num_bytes(leaf, fi,
1135 						extent_end - key.offset);
1136 		btrfs_mark_buffer_dirty(leaf);
1137 
1138 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1139 		if (ret < 0) {
1140 			btrfs_abort_transaction(trans, ret);
1141 			goto out;
1142 		}
1143 	}
1144 out:
1145 	btrfs_free_path(path);
1146 	return ret;
1147 }
1148 
1149 /*
1150  * on error we return an unlocked page and the error value
1151  * on success we return a locked page and 0
1152  */
1153 static int prepare_uptodate_page(struct inode *inode,
1154 				 struct page *page, u64 pos,
1155 				 bool force_uptodate)
1156 {
1157 	struct folio *folio = page_folio(page);
1158 	int ret = 0;
1159 
1160 	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1161 	    !PageUptodate(page)) {
1162 		ret = btrfs_read_folio(NULL, folio);
1163 		if (ret)
1164 			return ret;
1165 		lock_page(page);
1166 		if (!PageUptodate(page)) {
1167 			unlock_page(page);
1168 			return -EIO;
1169 		}
1170 
1171 		/*
1172 		 * Since btrfs_read_folio() will unlock the folio before it
1173 		 * returns, there is a window where btrfs_release_folio() can be
1174 		 * called to release the page.  Here we check both inode
1175 		 * mapping and PagePrivate() to make sure the page was not
1176 		 * released.
1177 		 *
1178 		 * The private flag check is essential for subpage as we need
1179 		 * to store extra bitmap using page->private.
1180 		 */
1181 		if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
1182 			unlock_page(page);
1183 			return -EAGAIN;
1184 		}
1185 	}
1186 	return 0;
1187 }
1188 
1189 static unsigned int get_prepare_fgp_flags(bool nowait)
1190 {
1191 	unsigned int fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
1192 
1193 	if (nowait)
1194 		fgp_flags |= FGP_NOWAIT;
1195 
1196 	return fgp_flags;
1197 }
1198 
1199 static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
1200 {
1201 	gfp_t gfp;
1202 
1203 	gfp = btrfs_alloc_write_mask(inode->i_mapping);
1204 	if (nowait) {
1205 		gfp &= ~__GFP_DIRECT_RECLAIM;
1206 		gfp |= GFP_NOWAIT;
1207 	}
1208 
1209 	return gfp;
1210 }
1211 
1212 /*
1213  * this just gets pages into the page cache and locks them down.
1214  */
1215 static noinline int prepare_pages(struct inode *inode, struct page **pages,
1216 				  size_t num_pages, loff_t pos,
1217 				  size_t write_bytes, bool force_uptodate,
1218 				  bool nowait)
1219 {
1220 	int i;
1221 	unsigned long index = pos >> PAGE_SHIFT;
1222 	gfp_t mask = get_prepare_gfp_flags(inode, nowait);
1223 	unsigned int fgp_flags = get_prepare_fgp_flags(nowait);
1224 	int err = 0;
1225 	int faili;
1226 
1227 	for (i = 0; i < num_pages; i++) {
1228 again:
1229 		pages[i] = pagecache_get_page(inode->i_mapping, index + i,
1230 					      fgp_flags, mask | __GFP_WRITE);
1231 		if (!pages[i]) {
1232 			faili = i - 1;
1233 			if (nowait)
1234 				err = -EAGAIN;
1235 			else
1236 				err = -ENOMEM;
1237 			goto fail;
1238 		}
1239 
1240 		err = set_page_extent_mapped(pages[i]);
1241 		if (err < 0) {
1242 			faili = i;
1243 			goto fail;
1244 		}
1245 
1246 		if (i == 0)
1247 			err = prepare_uptodate_page(inode, pages[i], pos,
1248 						    force_uptodate);
1249 		if (!err && i == num_pages - 1)
1250 			err = prepare_uptodate_page(inode, pages[i],
1251 						    pos + write_bytes, false);
1252 		if (err) {
1253 			put_page(pages[i]);
1254 			if (!nowait && err == -EAGAIN) {
1255 				err = 0;
1256 				goto again;
1257 			}
1258 			faili = i - 1;
1259 			goto fail;
1260 		}
1261 		wait_on_page_writeback(pages[i]);
1262 	}
1263 
1264 	return 0;
1265 fail:
1266 	while (faili >= 0) {
1267 		unlock_page(pages[faili]);
1268 		put_page(pages[faili]);
1269 		faili--;
1270 	}
1271 	return err;
1272 
1273 }
1274 
1275 /*
1276  * This function locks the extent and properly waits for data=ordered extents
1277  * to finish before allowing the pages to be modified if need.
1278  *
1279  * The return value:
1280  * 1 - the extent is locked
1281  * 0 - the extent is not locked, and everything is OK
1282  * -EAGAIN - need re-prepare the pages
1283  * the other < 0 number - Something wrong happens
1284  */
1285 static noinline int
1286 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1287 				size_t num_pages, loff_t pos,
1288 				size_t write_bytes,
1289 				u64 *lockstart, u64 *lockend, bool nowait,
1290 				struct extent_state **cached_state)
1291 {
1292 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1293 	u64 start_pos;
1294 	u64 last_pos;
1295 	int i;
1296 	int ret = 0;
1297 
1298 	start_pos = round_down(pos, fs_info->sectorsize);
1299 	last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
1300 
1301 	if (start_pos < inode->vfs_inode.i_size) {
1302 		struct btrfs_ordered_extent *ordered;
1303 
1304 		if (nowait) {
1305 			if (!try_lock_extent(&inode->io_tree, start_pos, last_pos)) {
1306 				for (i = 0; i < num_pages; i++) {
1307 					unlock_page(pages[i]);
1308 					put_page(pages[i]);
1309 					pages[i] = NULL;
1310 				}
1311 
1312 				return -EAGAIN;
1313 			}
1314 		} else {
1315 			lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
1316 		}
1317 
1318 		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1319 						     last_pos - start_pos + 1);
1320 		if (ordered &&
1321 		    ordered->file_offset + ordered->num_bytes > start_pos &&
1322 		    ordered->file_offset <= last_pos) {
1323 			unlock_extent(&inode->io_tree, start_pos, last_pos,
1324 				      cached_state);
1325 			for (i = 0; i < num_pages; i++) {
1326 				unlock_page(pages[i]);
1327 				put_page(pages[i]);
1328 			}
1329 			btrfs_start_ordered_extent(ordered, 1);
1330 			btrfs_put_ordered_extent(ordered);
1331 			return -EAGAIN;
1332 		}
1333 		if (ordered)
1334 			btrfs_put_ordered_extent(ordered);
1335 
1336 		*lockstart = start_pos;
1337 		*lockend = last_pos;
1338 		ret = 1;
1339 	}
1340 
1341 	/*
1342 	 * We should be called after prepare_pages() which should have locked
1343 	 * all pages in the range.
1344 	 */
1345 	for (i = 0; i < num_pages; i++)
1346 		WARN_ON(!PageLocked(pages[i]));
1347 
1348 	return ret;
1349 }
1350 
1351 /*
1352  * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1353  *
1354  * @pos:         File offset.
1355  * @write_bytes: The length to write, will be updated to the nocow writeable
1356  *               range.
1357  *
1358  * This function will flush ordered extents in the range to ensure proper
1359  * nocow checks.
1360  *
1361  * Return:
1362  * > 0          If we can nocow, and updates @write_bytes.
1363  *  0           If we can't do a nocow write.
1364  * -EAGAIN      If we can't do a nocow write because snapshoting of the inode's
1365  *              root is in progress.
1366  * < 0          If an error happened.
1367  *
1368  * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
1369  */
1370 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1371 			   size_t *write_bytes, bool nowait)
1372 {
1373 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1374 	struct btrfs_root *root = inode->root;
1375 	u64 lockstart, lockend;
1376 	u64 num_bytes;
1377 	int ret;
1378 
1379 	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1380 		return 0;
1381 
1382 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1383 		return -EAGAIN;
1384 
1385 	lockstart = round_down(pos, fs_info->sectorsize);
1386 	lockend = round_up(pos + *write_bytes,
1387 			   fs_info->sectorsize) - 1;
1388 	num_bytes = lockend - lockstart + 1;
1389 
1390 	if (nowait) {
1391 		if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend)) {
1392 			btrfs_drew_write_unlock(&root->snapshot_lock);
1393 			return -EAGAIN;
1394 		}
1395 	} else {
1396 		btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend, NULL);
1397 	}
1398 	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1399 			NULL, NULL, NULL, nowait, false);
1400 	if (ret <= 0)
1401 		btrfs_drew_write_unlock(&root->snapshot_lock);
1402 	else
1403 		*write_bytes = min_t(size_t, *write_bytes ,
1404 				     num_bytes - pos + lockstart);
1405 	unlock_extent(&inode->io_tree, lockstart, lockend, NULL);
1406 
1407 	return ret;
1408 }
1409 
1410 void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1411 {
1412 	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1413 }
1414 
1415 static void update_time_for_write(struct inode *inode)
1416 {
1417 	struct timespec64 now;
1418 
1419 	if (IS_NOCMTIME(inode))
1420 		return;
1421 
1422 	now = current_time(inode);
1423 	if (!timespec64_equal(&inode->i_mtime, &now))
1424 		inode->i_mtime = now;
1425 
1426 	if (!timespec64_equal(&inode->i_ctime, &now))
1427 		inode->i_ctime = now;
1428 
1429 	if (IS_I_VERSION(inode))
1430 		inode_inc_iversion(inode);
1431 }
1432 
1433 static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1434 			     size_t count)
1435 {
1436 	struct file *file = iocb->ki_filp;
1437 	struct inode *inode = file_inode(file);
1438 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1439 	loff_t pos = iocb->ki_pos;
1440 	int ret;
1441 	loff_t oldsize;
1442 	loff_t start_pos;
1443 
1444 	/*
1445 	 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1446 	 * prealloc flags, as without those flags we always have to COW. We will
1447 	 * later check if we can really COW into the target range (using
1448 	 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1449 	 */
1450 	if ((iocb->ki_flags & IOCB_NOWAIT) &&
1451 	    !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1452 		return -EAGAIN;
1453 
1454 	current->backing_dev_info = inode_to_bdi(inode);
1455 	ret = file_remove_privs(file);
1456 	if (ret)
1457 		return ret;
1458 
1459 	/*
1460 	 * We reserve space for updating the inode when we reserve space for the
1461 	 * extent we are going to write, so we will enospc out there.  We don't
1462 	 * need to start yet another transaction to update the inode as we will
1463 	 * update the inode when we finish writing whatever data we write.
1464 	 */
1465 	update_time_for_write(inode);
1466 
1467 	start_pos = round_down(pos, fs_info->sectorsize);
1468 	oldsize = i_size_read(inode);
1469 	if (start_pos > oldsize) {
1470 		/* Expand hole size to cover write data, preventing empty gap */
1471 		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1472 
1473 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1474 		if (ret) {
1475 			current->backing_dev_info = NULL;
1476 			return ret;
1477 		}
1478 	}
1479 
1480 	return 0;
1481 }
1482 
1483 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1484 					       struct iov_iter *i)
1485 {
1486 	struct file *file = iocb->ki_filp;
1487 	loff_t pos;
1488 	struct inode *inode = file_inode(file);
1489 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1490 	struct page **pages = NULL;
1491 	struct extent_changeset *data_reserved = NULL;
1492 	u64 release_bytes = 0;
1493 	u64 lockstart;
1494 	u64 lockend;
1495 	size_t num_written = 0;
1496 	int nrptrs;
1497 	ssize_t ret;
1498 	bool only_release_metadata = false;
1499 	bool force_page_uptodate = false;
1500 	loff_t old_isize = i_size_read(inode);
1501 	unsigned int ilock_flags = 0;
1502 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1503 	unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1504 
1505 	if (nowait)
1506 		ilock_flags |= BTRFS_ILOCK_TRY;
1507 
1508 	ret = btrfs_inode_lock(inode, ilock_flags);
1509 	if (ret < 0)
1510 		return ret;
1511 
1512 	ret = generic_write_checks(iocb, i);
1513 	if (ret <= 0)
1514 		goto out;
1515 
1516 	ret = btrfs_write_check(iocb, i, ret);
1517 	if (ret < 0)
1518 		goto out;
1519 
1520 	pos = iocb->ki_pos;
1521 	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1522 			PAGE_SIZE / (sizeof(struct page *)));
1523 	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1524 	nrptrs = max(nrptrs, 8);
1525 	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1526 	if (!pages) {
1527 		ret = -ENOMEM;
1528 		goto out;
1529 	}
1530 
1531 	while (iov_iter_count(i) > 0) {
1532 		struct extent_state *cached_state = NULL;
1533 		size_t offset = offset_in_page(pos);
1534 		size_t sector_offset;
1535 		size_t write_bytes = min(iov_iter_count(i),
1536 					 nrptrs * (size_t)PAGE_SIZE -
1537 					 offset);
1538 		size_t num_pages;
1539 		size_t reserve_bytes;
1540 		size_t dirty_pages;
1541 		size_t copied;
1542 		size_t dirty_sectors;
1543 		size_t num_sectors;
1544 		int extents_locked;
1545 
1546 		/*
1547 		 * Fault pages before locking them in prepare_pages
1548 		 * to avoid recursive lock
1549 		 */
1550 		if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1551 			ret = -EFAULT;
1552 			break;
1553 		}
1554 
1555 		only_release_metadata = false;
1556 		sector_offset = pos & (fs_info->sectorsize - 1);
1557 
1558 		extent_changeset_release(data_reserved);
1559 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
1560 						  &data_reserved, pos,
1561 						  write_bytes, nowait);
1562 		if (ret < 0) {
1563 			int can_nocow;
1564 
1565 			if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1566 				ret = -EAGAIN;
1567 				break;
1568 			}
1569 
1570 			/*
1571 			 * If we don't have to COW at the offset, reserve
1572 			 * metadata only. write_bytes may get smaller than
1573 			 * requested here.
1574 			 */
1575 			can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1576 							   &write_bytes, nowait);
1577 			if (can_nocow < 0)
1578 				ret = can_nocow;
1579 			if (can_nocow > 0)
1580 				ret = 0;
1581 			if (ret)
1582 				break;
1583 			only_release_metadata = true;
1584 		}
1585 
1586 		num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1587 		WARN_ON(num_pages > nrptrs);
1588 		reserve_bytes = round_up(write_bytes + sector_offset,
1589 					 fs_info->sectorsize);
1590 		WARN_ON(reserve_bytes == 0);
1591 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1592 						      reserve_bytes,
1593 						      reserve_bytes, nowait);
1594 		if (ret) {
1595 			if (!only_release_metadata)
1596 				btrfs_free_reserved_data_space(BTRFS_I(inode),
1597 						data_reserved, pos,
1598 						write_bytes);
1599 			else
1600 				btrfs_check_nocow_unlock(BTRFS_I(inode));
1601 
1602 			if (nowait && ret == -ENOSPC)
1603 				ret = -EAGAIN;
1604 			break;
1605 		}
1606 
1607 		release_bytes = reserve_bytes;
1608 again:
1609 		ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1610 		if (ret) {
1611 			btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1612 			break;
1613 		}
1614 
1615 		/*
1616 		 * This is going to setup the pages array with the number of
1617 		 * pages we want, so we don't really need to worry about the
1618 		 * contents of pages from loop to loop
1619 		 */
1620 		ret = prepare_pages(inode, pages, num_pages,
1621 				    pos, write_bytes, force_page_uptodate, false);
1622 		if (ret) {
1623 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1624 						       reserve_bytes);
1625 			break;
1626 		}
1627 
1628 		extents_locked = lock_and_cleanup_extent_if_need(
1629 				BTRFS_I(inode), pages,
1630 				num_pages, pos, write_bytes, &lockstart,
1631 				&lockend, nowait, &cached_state);
1632 		if (extents_locked < 0) {
1633 			if (!nowait && extents_locked == -EAGAIN)
1634 				goto again;
1635 
1636 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1637 						       reserve_bytes);
1638 			ret = extents_locked;
1639 			break;
1640 		}
1641 
1642 		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1643 
1644 		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1645 		dirty_sectors = round_up(copied + sector_offset,
1646 					fs_info->sectorsize);
1647 		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1648 
1649 		/*
1650 		 * if we have trouble faulting in the pages, fall
1651 		 * back to one page at a time
1652 		 */
1653 		if (copied < write_bytes)
1654 			nrptrs = 1;
1655 
1656 		if (copied == 0) {
1657 			force_page_uptodate = true;
1658 			dirty_sectors = 0;
1659 			dirty_pages = 0;
1660 		} else {
1661 			force_page_uptodate = false;
1662 			dirty_pages = DIV_ROUND_UP(copied + offset,
1663 						   PAGE_SIZE);
1664 		}
1665 
1666 		if (num_sectors > dirty_sectors) {
1667 			/* release everything except the sectors we dirtied */
1668 			release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1669 			if (only_release_metadata) {
1670 				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1671 							release_bytes, true);
1672 			} else {
1673 				u64 __pos;
1674 
1675 				__pos = round_down(pos,
1676 						   fs_info->sectorsize) +
1677 					(dirty_pages << PAGE_SHIFT);
1678 				btrfs_delalloc_release_space(BTRFS_I(inode),
1679 						data_reserved, __pos,
1680 						release_bytes, true);
1681 			}
1682 		}
1683 
1684 		release_bytes = round_up(copied + sector_offset,
1685 					fs_info->sectorsize);
1686 
1687 		ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1688 					dirty_pages, pos, copied,
1689 					&cached_state, only_release_metadata);
1690 
1691 		/*
1692 		 * If we have not locked the extent range, because the range's
1693 		 * start offset is >= i_size, we might still have a non-NULL
1694 		 * cached extent state, acquired while marking the extent range
1695 		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1696 		 * possible cached extent state to avoid a memory leak.
1697 		 */
1698 		if (extents_locked)
1699 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1700 				      lockend, &cached_state);
1701 		else
1702 			free_extent_state(cached_state);
1703 
1704 		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1705 		if (ret) {
1706 			btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1707 			break;
1708 		}
1709 
1710 		release_bytes = 0;
1711 		if (only_release_metadata)
1712 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1713 
1714 		btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1715 
1716 		cond_resched();
1717 
1718 		pos += copied;
1719 		num_written += copied;
1720 	}
1721 
1722 	kfree(pages);
1723 
1724 	if (release_bytes) {
1725 		if (only_release_metadata) {
1726 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1727 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1728 					release_bytes, true);
1729 		} else {
1730 			btrfs_delalloc_release_space(BTRFS_I(inode),
1731 					data_reserved,
1732 					round_down(pos, fs_info->sectorsize),
1733 					release_bytes, true);
1734 		}
1735 	}
1736 
1737 	extent_changeset_free(data_reserved);
1738 	if (num_written > 0) {
1739 		pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1740 		iocb->ki_pos += num_written;
1741 	}
1742 out:
1743 	btrfs_inode_unlock(inode, ilock_flags);
1744 	return num_written ? num_written : ret;
1745 }
1746 
1747 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1748 			       const struct iov_iter *iter, loff_t offset)
1749 {
1750 	const u32 blocksize_mask = fs_info->sectorsize - 1;
1751 
1752 	if (offset & blocksize_mask)
1753 		return -EINVAL;
1754 
1755 	if (iov_iter_alignment(iter) & blocksize_mask)
1756 		return -EINVAL;
1757 
1758 	return 0;
1759 }
1760 
1761 static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1762 {
1763 	struct file *file = iocb->ki_filp;
1764 	struct inode *inode = file_inode(file);
1765 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1766 	loff_t pos;
1767 	ssize_t written = 0;
1768 	ssize_t written_buffered;
1769 	size_t prev_left = 0;
1770 	loff_t endbyte;
1771 	ssize_t err;
1772 	unsigned int ilock_flags = 0;
1773 	struct iomap_dio *dio;
1774 
1775 	if (iocb->ki_flags & IOCB_NOWAIT)
1776 		ilock_flags |= BTRFS_ILOCK_TRY;
1777 
1778 	/* If the write DIO is within EOF, use a shared lock */
1779 	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
1780 		ilock_flags |= BTRFS_ILOCK_SHARED;
1781 
1782 relock:
1783 	err = btrfs_inode_lock(inode, ilock_flags);
1784 	if (err < 0)
1785 		return err;
1786 
1787 	err = generic_write_checks(iocb, from);
1788 	if (err <= 0) {
1789 		btrfs_inode_unlock(inode, ilock_flags);
1790 		return err;
1791 	}
1792 
1793 	err = btrfs_write_check(iocb, from, err);
1794 	if (err < 0) {
1795 		btrfs_inode_unlock(inode, ilock_flags);
1796 		goto out;
1797 	}
1798 
1799 	pos = iocb->ki_pos;
1800 	/*
1801 	 * Re-check since file size may have changed just before taking the
1802 	 * lock or pos may have changed because of O_APPEND in generic_write_check()
1803 	 */
1804 	if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1805 	    pos + iov_iter_count(from) > i_size_read(inode)) {
1806 		btrfs_inode_unlock(inode, ilock_flags);
1807 		ilock_flags &= ~BTRFS_ILOCK_SHARED;
1808 		goto relock;
1809 	}
1810 
1811 	if (check_direct_IO(fs_info, from, pos)) {
1812 		btrfs_inode_unlock(inode, ilock_flags);
1813 		goto buffered;
1814 	}
1815 
1816 	/*
1817 	 * The iov_iter can be mapped to the same file range we are writing to.
1818 	 * If that's the case, then we will deadlock in the iomap code, because
1819 	 * it first calls our callback btrfs_dio_iomap_begin(), which will create
1820 	 * an ordered extent, and after that it will fault in the pages that the
1821 	 * iov_iter refers to. During the fault in we end up in the readahead
1822 	 * pages code (starting at btrfs_readahead()), which will lock the range,
1823 	 * find that ordered extent and then wait for it to complete (at
1824 	 * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
1825 	 * obviously the ordered extent can never complete as we didn't submit
1826 	 * yet the respective bio(s). This always happens when the buffer is
1827 	 * memory mapped to the same file range, since the iomap DIO code always
1828 	 * invalidates pages in the target file range (after starting and waiting
1829 	 * for any writeback).
1830 	 *
1831 	 * So here we disable page faults in the iov_iter and then retry if we
1832 	 * got -EFAULT, faulting in the pages before the retry.
1833 	 */
1834 	from->nofault = true;
1835 	dio = btrfs_dio_write(iocb, from, written);
1836 	from->nofault = false;
1837 
1838 	/*
1839 	 * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
1840 	 * iocb, and that needs to lock the inode. So unlock it before calling
1841 	 * iomap_dio_complete() to avoid a deadlock.
1842 	 */
1843 	btrfs_inode_unlock(inode, ilock_flags);
1844 
1845 	if (IS_ERR_OR_NULL(dio))
1846 		err = PTR_ERR_OR_ZERO(dio);
1847 	else
1848 		err = iomap_dio_complete(dio);
1849 
1850 	/* No increment (+=) because iomap returns a cumulative value. */
1851 	if (err > 0)
1852 		written = err;
1853 
1854 	if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
1855 		const size_t left = iov_iter_count(from);
1856 		/*
1857 		 * We have more data left to write. Try to fault in as many as
1858 		 * possible of the remainder pages and retry. We do this without
1859 		 * releasing and locking again the inode, to prevent races with
1860 		 * truncate.
1861 		 *
1862 		 * Also, in case the iov refers to pages in the file range of the
1863 		 * file we want to write to (due to a mmap), we could enter an
1864 		 * infinite loop if we retry after faulting the pages in, since
1865 		 * iomap will invalidate any pages in the range early on, before
1866 		 * it tries to fault in the pages of the iov. So we keep track of
1867 		 * how much was left of iov in the previous EFAULT and fallback
1868 		 * to buffered IO in case we haven't made any progress.
1869 		 */
1870 		if (left == prev_left) {
1871 			err = -ENOTBLK;
1872 		} else {
1873 			fault_in_iov_iter_readable(from, left);
1874 			prev_left = left;
1875 			goto relock;
1876 		}
1877 	}
1878 
1879 	/*
1880 	 * If 'err' is -ENOTBLK or we have not written all data, then it means
1881 	 * we must fallback to buffered IO.
1882 	 */
1883 	if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
1884 		goto out;
1885 
1886 buffered:
1887 	/*
1888 	 * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
1889 	 * it must retry the operation in a context where blocking is acceptable,
1890 	 * since we currently don't have NOWAIT semantics support for buffered IO
1891 	 * and may block there for many reasons (reserving space for example).
1892 	 */
1893 	if (iocb->ki_flags & IOCB_NOWAIT) {
1894 		err = -EAGAIN;
1895 		goto out;
1896 	}
1897 
1898 	pos = iocb->ki_pos;
1899 	written_buffered = btrfs_buffered_write(iocb, from);
1900 	if (written_buffered < 0) {
1901 		err = written_buffered;
1902 		goto out;
1903 	}
1904 	/*
1905 	 * Ensure all data is persisted. We want the next direct IO read to be
1906 	 * able to read what was just written.
1907 	 */
1908 	endbyte = pos + written_buffered - 1;
1909 	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1910 	if (err)
1911 		goto out;
1912 	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1913 	if (err)
1914 		goto out;
1915 	written += written_buffered;
1916 	iocb->ki_pos = pos + written_buffered;
1917 	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1918 				 endbyte >> PAGE_SHIFT);
1919 out:
1920 	return err < 0 ? err : written;
1921 }
1922 
1923 static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1924 			const struct btrfs_ioctl_encoded_io_args *encoded)
1925 {
1926 	struct file *file = iocb->ki_filp;
1927 	struct inode *inode = file_inode(file);
1928 	loff_t count;
1929 	ssize_t ret;
1930 
1931 	btrfs_inode_lock(inode, 0);
1932 	count = encoded->len;
1933 	ret = generic_write_checks_count(iocb, &count);
1934 	if (ret == 0 && count != encoded->len) {
1935 		/*
1936 		 * The write got truncated by generic_write_checks_count(). We
1937 		 * can't do a partial encoded write.
1938 		 */
1939 		ret = -EFBIG;
1940 	}
1941 	if (ret || encoded->len == 0)
1942 		goto out;
1943 
1944 	ret = btrfs_write_check(iocb, from, encoded->len);
1945 	if (ret < 0)
1946 		goto out;
1947 
1948 	ret = btrfs_do_encoded_write(iocb, from, encoded);
1949 out:
1950 	btrfs_inode_unlock(inode, 0);
1951 	return ret;
1952 }
1953 
1954 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1955 			    const struct btrfs_ioctl_encoded_io_args *encoded)
1956 {
1957 	struct file *file = iocb->ki_filp;
1958 	struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1959 	ssize_t num_written, num_sync;
1960 	const bool sync = iocb_is_dsync(iocb);
1961 
1962 	/*
1963 	 * If the fs flips readonly due to some impossible error, although we
1964 	 * have opened a file as writable, we have to stop this write operation
1965 	 * to ensure consistency.
1966 	 */
1967 	if (BTRFS_FS_ERROR(inode->root->fs_info))
1968 		return -EROFS;
1969 
1970 	if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1971 		return -EOPNOTSUPP;
1972 
1973 	if (sync)
1974 		atomic_inc(&inode->sync_writers);
1975 
1976 	if (encoded) {
1977 		num_written = btrfs_encoded_write(iocb, from, encoded);
1978 		num_sync = encoded->len;
1979 	} else if (iocb->ki_flags & IOCB_DIRECT) {
1980 		num_written = btrfs_direct_write(iocb, from);
1981 		num_sync = num_written;
1982 	} else {
1983 		num_written = btrfs_buffered_write(iocb, from);
1984 		num_sync = num_written;
1985 	}
1986 
1987 	btrfs_set_inode_last_sub_trans(inode);
1988 
1989 	if (num_sync > 0) {
1990 		num_sync = generic_write_sync(iocb, num_sync);
1991 		if (num_sync < 0)
1992 			num_written = num_sync;
1993 	}
1994 
1995 	if (sync)
1996 		atomic_dec(&inode->sync_writers);
1997 
1998 	current->backing_dev_info = NULL;
1999 	return num_written;
2000 }
2001 
2002 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2003 {
2004 	return btrfs_do_write_iter(iocb, from, NULL);
2005 }
2006 
2007 int btrfs_release_file(struct inode *inode, struct file *filp)
2008 {
2009 	struct btrfs_file_private *private = filp->private_data;
2010 
2011 	if (private && private->filldir_buf)
2012 		kfree(private->filldir_buf);
2013 	kfree(private);
2014 	filp->private_data = NULL;
2015 
2016 	/*
2017 	 * Set by setattr when we are about to truncate a file from a non-zero
2018 	 * size to a zero size.  This tries to flush down new bytes that may
2019 	 * have been written if the application were using truncate to replace
2020 	 * a file in place.
2021 	 */
2022 	if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
2023 			       &BTRFS_I(inode)->runtime_flags))
2024 			filemap_flush(inode->i_mapping);
2025 	return 0;
2026 }
2027 
2028 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2029 {
2030 	int ret;
2031 	struct blk_plug plug;
2032 
2033 	/*
2034 	 * This is only called in fsync, which would do synchronous writes, so
2035 	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2036 	 * multiple disks using raid profile, a large IO can be split to
2037 	 * several segments of stripe length (currently 64K).
2038 	 */
2039 	blk_start_plug(&plug);
2040 	atomic_inc(&BTRFS_I(inode)->sync_writers);
2041 	ret = btrfs_fdatawrite_range(inode, start, end);
2042 	atomic_dec(&BTRFS_I(inode)->sync_writers);
2043 	blk_finish_plug(&plug);
2044 
2045 	return ret;
2046 }
2047 
2048 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
2049 {
2050 	struct btrfs_inode *inode = BTRFS_I(ctx->inode);
2051 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2052 
2053 	if (btrfs_inode_in_log(inode, fs_info->generation) &&
2054 	    list_empty(&ctx->ordered_extents))
2055 		return true;
2056 
2057 	/*
2058 	 * If we are doing a fast fsync we can not bail out if the inode's
2059 	 * last_trans is <= then the last committed transaction, because we only
2060 	 * update the last_trans of the inode during ordered extent completion,
2061 	 * and for a fast fsync we don't wait for that, we only wait for the
2062 	 * writeback to complete.
2063 	 */
2064 	if (inode->last_trans <= fs_info->last_trans_committed &&
2065 	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
2066 	     list_empty(&ctx->ordered_extents)))
2067 		return true;
2068 
2069 	return false;
2070 }
2071 
2072 /*
2073  * fsync call for both files and directories.  This logs the inode into
2074  * the tree log instead of forcing full commits whenever possible.
2075  *
2076  * It needs to call filemap_fdatawait so that all ordered extent updates are
2077  * in the metadata btree are up to date for copying to the log.
2078  *
2079  * It drops the inode mutex before doing the tree log commit.  This is an
2080  * important optimization for directories because holding the mutex prevents
2081  * new operations on the dir while we write to disk.
2082  */
2083 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2084 {
2085 	struct dentry *dentry = file_dentry(file);
2086 	struct inode *inode = d_inode(dentry);
2087 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2088 	struct btrfs_root *root = BTRFS_I(inode)->root;
2089 	struct btrfs_trans_handle *trans;
2090 	struct btrfs_log_ctx ctx;
2091 	int ret = 0, err;
2092 	u64 len;
2093 	bool full_sync;
2094 
2095 	trace_btrfs_sync_file(file, datasync);
2096 
2097 	btrfs_init_log_ctx(&ctx, inode);
2098 
2099 	/*
2100 	 * Always set the range to a full range, otherwise we can get into
2101 	 * several problems, from missing file extent items to represent holes
2102 	 * when not using the NO_HOLES feature, to log tree corruption due to
2103 	 * races between hole detection during logging and completion of ordered
2104 	 * extents outside the range, to missing checksums due to ordered extents
2105 	 * for which we flushed only a subset of their pages.
2106 	 */
2107 	start = 0;
2108 	end = LLONG_MAX;
2109 	len = (u64)LLONG_MAX + 1;
2110 
2111 	/*
2112 	 * We write the dirty pages in the range and wait until they complete
2113 	 * out of the ->i_mutex. If so, we can flush the dirty pages by
2114 	 * multi-task, and make the performance up.  See
2115 	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2116 	 */
2117 	ret = start_ordered_ops(inode, start, end);
2118 	if (ret)
2119 		goto out;
2120 
2121 	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2122 
2123 	atomic_inc(&root->log_batch);
2124 
2125 	/*
2126 	 * Before we acquired the inode's lock and the mmap lock, someone may
2127 	 * have dirtied more pages in the target range. We need to make sure
2128 	 * that writeback for any such pages does not start while we are logging
2129 	 * the inode, because if it does, any of the following might happen when
2130 	 * we are not doing a full inode sync:
2131 	 *
2132 	 * 1) We log an extent after its writeback finishes but before its
2133 	 *    checksums are added to the csum tree, leading to -EIO errors
2134 	 *    when attempting to read the extent after a log replay.
2135 	 *
2136 	 * 2) We can end up logging an extent before its writeback finishes.
2137 	 *    Therefore after the log replay we will have a file extent item
2138 	 *    pointing to an unwritten extent (and no data checksums as well).
2139 	 *
2140 	 * So trigger writeback for any eventual new dirty pages and then we
2141 	 * wait for all ordered extents to complete below.
2142 	 */
2143 	ret = start_ordered_ops(inode, start, end);
2144 	if (ret) {
2145 		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2146 		goto out;
2147 	}
2148 
2149 	/*
2150 	 * Always check for the full sync flag while holding the inode's lock,
2151 	 * to avoid races with other tasks. The flag must be either set all the
2152 	 * time during logging or always off all the time while logging.
2153 	 * We check the flag here after starting delalloc above, because when
2154 	 * running delalloc the full sync flag may be set if we need to drop
2155 	 * extra extent map ranges due to temporary memory allocation failures.
2156 	 */
2157 	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2158 			     &BTRFS_I(inode)->runtime_flags);
2159 
2160 	/*
2161 	 * We have to do this here to avoid the priority inversion of waiting on
2162 	 * IO of a lower priority task while holding a transaction open.
2163 	 *
2164 	 * For a full fsync we wait for the ordered extents to complete while
2165 	 * for a fast fsync we wait just for writeback to complete, and then
2166 	 * attach the ordered extents to the transaction so that a transaction
2167 	 * commit waits for their completion, to avoid data loss if we fsync,
2168 	 * the current transaction commits before the ordered extents complete
2169 	 * and a power failure happens right after that.
2170 	 *
2171 	 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
2172 	 * logical address recorded in the ordered extent may change. We need
2173 	 * to wait for the IO to stabilize the logical address.
2174 	 */
2175 	if (full_sync || btrfs_is_zoned(fs_info)) {
2176 		ret = btrfs_wait_ordered_range(inode, start, len);
2177 	} else {
2178 		/*
2179 		 * Get our ordered extents as soon as possible to avoid doing
2180 		 * checksum lookups in the csum tree, and use instead the
2181 		 * checksums attached to the ordered extents.
2182 		 */
2183 		btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
2184 						      &ctx.ordered_extents);
2185 		ret = filemap_fdatawait_range(inode->i_mapping, start, end);
2186 	}
2187 
2188 	if (ret)
2189 		goto out_release_extents;
2190 
2191 	atomic_inc(&root->log_batch);
2192 
2193 	smp_mb();
2194 	if (skip_inode_logging(&ctx)) {
2195 		/*
2196 		 * We've had everything committed since the last time we were
2197 		 * modified so clear this flag in case it was set for whatever
2198 		 * reason, it's no longer relevant.
2199 		 */
2200 		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2201 			  &BTRFS_I(inode)->runtime_flags);
2202 		/*
2203 		 * An ordered extent might have started before and completed
2204 		 * already with io errors, in which case the inode was not
2205 		 * updated and we end up here. So check the inode's mapping
2206 		 * for any errors that might have happened since we last
2207 		 * checked called fsync.
2208 		 */
2209 		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2210 		goto out_release_extents;
2211 	}
2212 
2213 	/*
2214 	 * We use start here because we will need to wait on the IO to complete
2215 	 * in btrfs_sync_log, which could require joining a transaction (for
2216 	 * example checking cross references in the nocow path).  If we use join
2217 	 * here we could get into a situation where we're waiting on IO to
2218 	 * happen that is blocked on a transaction trying to commit.  With start
2219 	 * we inc the extwriter counter, so we wait for all extwriters to exit
2220 	 * before we start blocking joiners.  This comment is to keep somebody
2221 	 * from thinking they are super smart and changing this to
2222 	 * btrfs_join_transaction *cough*Josef*cough*.
2223 	 */
2224 	trans = btrfs_start_transaction(root, 0);
2225 	if (IS_ERR(trans)) {
2226 		ret = PTR_ERR(trans);
2227 		goto out_release_extents;
2228 	}
2229 	trans->in_fsync = true;
2230 
2231 	ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
2232 	btrfs_release_log_ctx_extents(&ctx);
2233 	if (ret < 0) {
2234 		/* Fallthrough and commit/free transaction. */
2235 		ret = BTRFS_LOG_FORCE_COMMIT;
2236 	}
2237 
2238 	/* we've logged all the items and now have a consistent
2239 	 * version of the file in the log.  It is possible that
2240 	 * someone will come in and modify the file, but that's
2241 	 * fine because the log is consistent on disk, and we
2242 	 * have references to all of the file's extents
2243 	 *
2244 	 * It is possible that someone will come in and log the
2245 	 * file again, but that will end up using the synchronization
2246 	 * inside btrfs_sync_log to keep things safe.
2247 	 */
2248 	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2249 
2250 	if (ret == BTRFS_NO_LOG_SYNC) {
2251 		ret = btrfs_end_transaction(trans);
2252 		goto out;
2253 	}
2254 
2255 	/* We successfully logged the inode, attempt to sync the log. */
2256 	if (!ret) {
2257 		ret = btrfs_sync_log(trans, root, &ctx);
2258 		if (!ret) {
2259 			ret = btrfs_end_transaction(trans);
2260 			goto out;
2261 		}
2262 	}
2263 
2264 	/*
2265 	 * At this point we need to commit the transaction because we had
2266 	 * btrfs_need_log_full_commit() or some other error.
2267 	 *
2268 	 * If we didn't do a full sync we have to stop the trans handle, wait on
2269 	 * the ordered extents, start it again and commit the transaction.  If
2270 	 * we attempt to wait on the ordered extents here we could deadlock with
2271 	 * something like fallocate() that is holding the extent lock trying to
2272 	 * start a transaction while some other thread is trying to commit the
2273 	 * transaction while we (fsync) are currently holding the transaction
2274 	 * open.
2275 	 */
2276 	if (!full_sync) {
2277 		ret = btrfs_end_transaction(trans);
2278 		if (ret)
2279 			goto out;
2280 		ret = btrfs_wait_ordered_range(inode, start, len);
2281 		if (ret)
2282 			goto out;
2283 
2284 		/*
2285 		 * This is safe to use here because we're only interested in
2286 		 * making sure the transaction that had the ordered extents is
2287 		 * committed.  We aren't waiting on anything past this point,
2288 		 * we're purely getting the transaction and committing it.
2289 		 */
2290 		trans = btrfs_attach_transaction_barrier(root);
2291 		if (IS_ERR(trans)) {
2292 			ret = PTR_ERR(trans);
2293 
2294 			/*
2295 			 * We committed the transaction and there's no currently
2296 			 * running transaction, this means everything we care
2297 			 * about made it to disk and we are done.
2298 			 */
2299 			if (ret == -ENOENT)
2300 				ret = 0;
2301 			goto out;
2302 		}
2303 	}
2304 
2305 	ret = btrfs_commit_transaction(trans);
2306 out:
2307 	ASSERT(list_empty(&ctx.list));
2308 	ASSERT(list_empty(&ctx.conflict_inodes));
2309 	err = file_check_and_advance_wb_err(file);
2310 	if (!ret)
2311 		ret = err;
2312 	return ret > 0 ? -EIO : ret;
2313 
2314 out_release_extents:
2315 	btrfs_release_log_ctx_extents(&ctx);
2316 	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2317 	goto out;
2318 }
2319 
2320 static const struct vm_operations_struct btrfs_file_vm_ops = {
2321 	.fault		= filemap_fault,
2322 	.map_pages	= filemap_map_pages,
2323 	.page_mkwrite	= btrfs_page_mkwrite,
2324 };
2325 
2326 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2327 {
2328 	struct address_space *mapping = filp->f_mapping;
2329 
2330 	if (!mapping->a_ops->read_folio)
2331 		return -ENOEXEC;
2332 
2333 	file_accessed(filp);
2334 	vma->vm_ops = &btrfs_file_vm_ops;
2335 
2336 	return 0;
2337 }
2338 
2339 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2340 			  int slot, u64 start, u64 end)
2341 {
2342 	struct btrfs_file_extent_item *fi;
2343 	struct btrfs_key key;
2344 
2345 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2346 		return 0;
2347 
2348 	btrfs_item_key_to_cpu(leaf, &key, slot);
2349 	if (key.objectid != btrfs_ino(inode) ||
2350 	    key.type != BTRFS_EXTENT_DATA_KEY)
2351 		return 0;
2352 
2353 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2354 
2355 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2356 		return 0;
2357 
2358 	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2359 		return 0;
2360 
2361 	if (key.offset == end)
2362 		return 1;
2363 	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2364 		return 1;
2365 	return 0;
2366 }
2367 
2368 static int fill_holes(struct btrfs_trans_handle *trans,
2369 		struct btrfs_inode *inode,
2370 		struct btrfs_path *path, u64 offset, u64 end)
2371 {
2372 	struct btrfs_fs_info *fs_info = trans->fs_info;
2373 	struct btrfs_root *root = inode->root;
2374 	struct extent_buffer *leaf;
2375 	struct btrfs_file_extent_item *fi;
2376 	struct extent_map *hole_em;
2377 	struct btrfs_key key;
2378 	int ret;
2379 
2380 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2381 		goto out;
2382 
2383 	key.objectid = btrfs_ino(inode);
2384 	key.type = BTRFS_EXTENT_DATA_KEY;
2385 	key.offset = offset;
2386 
2387 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2388 	if (ret <= 0) {
2389 		/*
2390 		 * We should have dropped this offset, so if we find it then
2391 		 * something has gone horribly wrong.
2392 		 */
2393 		if (ret == 0)
2394 			ret = -EINVAL;
2395 		return ret;
2396 	}
2397 
2398 	leaf = path->nodes[0];
2399 	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2400 		u64 num_bytes;
2401 
2402 		path->slots[0]--;
2403 		fi = btrfs_item_ptr(leaf, path->slots[0],
2404 				    struct btrfs_file_extent_item);
2405 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2406 			end - offset;
2407 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2408 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2409 		btrfs_set_file_extent_offset(leaf, fi, 0);
2410 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2411 		btrfs_mark_buffer_dirty(leaf);
2412 		goto out;
2413 	}
2414 
2415 	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2416 		u64 num_bytes;
2417 
2418 		key.offset = offset;
2419 		btrfs_set_item_key_safe(fs_info, path, &key);
2420 		fi = btrfs_item_ptr(leaf, path->slots[0],
2421 				    struct btrfs_file_extent_item);
2422 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2423 			offset;
2424 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2425 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2426 		btrfs_set_file_extent_offset(leaf, fi, 0);
2427 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2428 		btrfs_mark_buffer_dirty(leaf);
2429 		goto out;
2430 	}
2431 	btrfs_release_path(path);
2432 
2433 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2434 				       end - offset);
2435 	if (ret)
2436 		return ret;
2437 
2438 out:
2439 	btrfs_release_path(path);
2440 
2441 	hole_em = alloc_extent_map();
2442 	if (!hole_em) {
2443 		btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2444 		btrfs_set_inode_full_sync(inode);
2445 	} else {
2446 		hole_em->start = offset;
2447 		hole_em->len = end - offset;
2448 		hole_em->ram_bytes = hole_em->len;
2449 		hole_em->orig_start = offset;
2450 
2451 		hole_em->block_start = EXTENT_MAP_HOLE;
2452 		hole_em->block_len = 0;
2453 		hole_em->orig_block_len = 0;
2454 		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2455 		hole_em->generation = trans->transid;
2456 
2457 		ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2458 		free_extent_map(hole_em);
2459 		if (ret)
2460 			btrfs_set_inode_full_sync(inode);
2461 	}
2462 
2463 	return 0;
2464 }
2465 
2466 /*
2467  * Find a hole extent on given inode and change start/len to the end of hole
2468  * extent.(hole/vacuum extent whose em->start <= start &&
2469  *	   em->start + em->len > start)
2470  * When a hole extent is found, return 1 and modify start/len.
2471  */
2472 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2473 {
2474 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2475 	struct extent_map *em;
2476 	int ret = 0;
2477 
2478 	em = btrfs_get_extent(inode, NULL, 0,
2479 			      round_down(*start, fs_info->sectorsize),
2480 			      round_up(*len, fs_info->sectorsize));
2481 	if (IS_ERR(em))
2482 		return PTR_ERR(em);
2483 
2484 	/* Hole or vacuum extent(only exists in no-hole mode) */
2485 	if (em->block_start == EXTENT_MAP_HOLE) {
2486 		ret = 1;
2487 		*len = em->start + em->len > *start + *len ?
2488 		       0 : *start + *len - em->start - em->len;
2489 		*start = em->start + em->len;
2490 	}
2491 	free_extent_map(em);
2492 	return ret;
2493 }
2494 
2495 static void btrfs_punch_hole_lock_range(struct inode *inode,
2496 					const u64 lockstart,
2497 					const u64 lockend,
2498 					struct extent_state **cached_state)
2499 {
2500 	/*
2501 	 * For subpage case, if the range is not at page boundary, we could
2502 	 * have pages at the leading/tailing part of the range.
2503 	 * This could lead to dead loop since filemap_range_has_page()
2504 	 * will always return true.
2505 	 * So here we need to do extra page alignment for
2506 	 * filemap_range_has_page().
2507 	 */
2508 	const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2509 	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2510 
2511 	while (1) {
2512 		truncate_pagecache_range(inode, lockstart, lockend);
2513 
2514 		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2515 			    cached_state);
2516 		/*
2517 		 * We can't have ordered extents in the range, nor dirty/writeback
2518 		 * pages, because we have locked the inode's VFS lock in exclusive
2519 		 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2520 		 * we have flushed all delalloc in the range and we have waited
2521 		 * for any ordered extents in the range to complete.
2522 		 * We can race with anyone reading pages from this range, so after
2523 		 * locking the range check if we have pages in the range, and if
2524 		 * we do, unlock the range and retry.
2525 		 */
2526 		if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2527 					    page_lockend))
2528 			break;
2529 
2530 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2531 			      cached_state);
2532 	}
2533 
2534 	btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2535 }
2536 
2537 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2538 				     struct btrfs_inode *inode,
2539 				     struct btrfs_path *path,
2540 				     struct btrfs_replace_extent_info *extent_info,
2541 				     const u64 replace_len,
2542 				     const u64 bytes_to_drop)
2543 {
2544 	struct btrfs_fs_info *fs_info = trans->fs_info;
2545 	struct btrfs_root *root = inode->root;
2546 	struct btrfs_file_extent_item *extent;
2547 	struct extent_buffer *leaf;
2548 	struct btrfs_key key;
2549 	int slot;
2550 	struct btrfs_ref ref = { 0 };
2551 	int ret;
2552 
2553 	if (replace_len == 0)
2554 		return 0;
2555 
2556 	if (extent_info->disk_offset == 0 &&
2557 	    btrfs_fs_incompat(fs_info, NO_HOLES)) {
2558 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2559 		return 0;
2560 	}
2561 
2562 	key.objectid = btrfs_ino(inode);
2563 	key.type = BTRFS_EXTENT_DATA_KEY;
2564 	key.offset = extent_info->file_offset;
2565 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2566 				      sizeof(struct btrfs_file_extent_item));
2567 	if (ret)
2568 		return ret;
2569 	leaf = path->nodes[0];
2570 	slot = path->slots[0];
2571 	write_extent_buffer(leaf, extent_info->extent_buf,
2572 			    btrfs_item_ptr_offset(leaf, slot),
2573 			    sizeof(struct btrfs_file_extent_item));
2574 	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2575 	ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2576 	btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2577 	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2578 	if (extent_info->is_new_extent)
2579 		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2580 	btrfs_mark_buffer_dirty(leaf);
2581 	btrfs_release_path(path);
2582 
2583 	ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2584 						replace_len);
2585 	if (ret)
2586 		return ret;
2587 
2588 	/* If it's a hole, nothing more needs to be done. */
2589 	if (extent_info->disk_offset == 0) {
2590 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2591 		return 0;
2592 	}
2593 
2594 	btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2595 
2596 	if (extent_info->is_new_extent && extent_info->insertions == 0) {
2597 		key.objectid = extent_info->disk_offset;
2598 		key.type = BTRFS_EXTENT_ITEM_KEY;
2599 		key.offset = extent_info->disk_len;
2600 		ret = btrfs_alloc_reserved_file_extent(trans, root,
2601 						       btrfs_ino(inode),
2602 						       extent_info->file_offset,
2603 						       extent_info->qgroup_reserved,
2604 						       &key);
2605 	} else {
2606 		u64 ref_offset;
2607 
2608 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2609 				       extent_info->disk_offset,
2610 				       extent_info->disk_len, 0);
2611 		ref_offset = extent_info->file_offset - extent_info->data_offset;
2612 		btrfs_init_data_ref(&ref, root->root_key.objectid,
2613 				    btrfs_ino(inode), ref_offset, 0, false);
2614 		ret = btrfs_inc_extent_ref(trans, &ref);
2615 	}
2616 
2617 	extent_info->insertions++;
2618 
2619 	return ret;
2620 }
2621 
2622 /*
2623  * The respective range must have been previously locked, as well as the inode.
2624  * The end offset is inclusive (last byte of the range).
2625  * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2626  * the file range with an extent.
2627  * When not punching a hole, we don't want to end up in a state where we dropped
2628  * extents without inserting a new one, so we must abort the transaction to avoid
2629  * a corruption.
2630  */
2631 int btrfs_replace_file_extents(struct btrfs_inode *inode,
2632 			       struct btrfs_path *path, const u64 start,
2633 			       const u64 end,
2634 			       struct btrfs_replace_extent_info *extent_info,
2635 			       struct btrfs_trans_handle **trans_out)
2636 {
2637 	struct btrfs_drop_extents_args drop_args = { 0 };
2638 	struct btrfs_root *root = inode->root;
2639 	struct btrfs_fs_info *fs_info = root->fs_info;
2640 	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2641 	u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2642 	struct btrfs_trans_handle *trans = NULL;
2643 	struct btrfs_block_rsv *rsv;
2644 	unsigned int rsv_count;
2645 	u64 cur_offset;
2646 	u64 len = end - start;
2647 	int ret = 0;
2648 
2649 	if (end <= start)
2650 		return -EINVAL;
2651 
2652 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2653 	if (!rsv) {
2654 		ret = -ENOMEM;
2655 		goto out;
2656 	}
2657 	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2658 	rsv->failfast = true;
2659 
2660 	/*
2661 	 * 1 - update the inode
2662 	 * 1 - removing the extents in the range
2663 	 * 1 - adding the hole extent if no_holes isn't set or if we are
2664 	 *     replacing the range with a new extent
2665 	 */
2666 	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2667 		rsv_count = 3;
2668 	else
2669 		rsv_count = 2;
2670 
2671 	trans = btrfs_start_transaction(root, rsv_count);
2672 	if (IS_ERR(trans)) {
2673 		ret = PTR_ERR(trans);
2674 		trans = NULL;
2675 		goto out_free;
2676 	}
2677 
2678 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2679 				      min_size, false);
2680 	if (WARN_ON(ret))
2681 		goto out_trans;
2682 	trans->block_rsv = rsv;
2683 
2684 	cur_offset = start;
2685 	drop_args.path = path;
2686 	drop_args.end = end + 1;
2687 	drop_args.drop_cache = true;
2688 	while (cur_offset < end) {
2689 		drop_args.start = cur_offset;
2690 		ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2691 		/* If we are punching a hole decrement the inode's byte count */
2692 		if (!extent_info)
2693 			btrfs_update_inode_bytes(inode, 0,
2694 						 drop_args.bytes_found);
2695 		if (ret != -ENOSPC) {
2696 			/*
2697 			 * The only time we don't want to abort is if we are
2698 			 * attempting to clone a partial inline extent, in which
2699 			 * case we'll get EOPNOTSUPP.  However if we aren't
2700 			 * clone we need to abort no matter what, because if we
2701 			 * got EOPNOTSUPP via prealloc then we messed up and
2702 			 * need to abort.
2703 			 */
2704 			if (ret &&
2705 			    (ret != -EOPNOTSUPP ||
2706 			     (extent_info && extent_info->is_new_extent)))
2707 				btrfs_abort_transaction(trans, ret);
2708 			break;
2709 		}
2710 
2711 		trans->block_rsv = &fs_info->trans_block_rsv;
2712 
2713 		if (!extent_info && cur_offset < drop_args.drop_end &&
2714 		    cur_offset < ino_size) {
2715 			ret = fill_holes(trans, inode, path, cur_offset,
2716 					 drop_args.drop_end);
2717 			if (ret) {
2718 				/*
2719 				 * If we failed then we didn't insert our hole
2720 				 * entries for the area we dropped, so now the
2721 				 * fs is corrupted, so we must abort the
2722 				 * transaction.
2723 				 */
2724 				btrfs_abort_transaction(trans, ret);
2725 				break;
2726 			}
2727 		} else if (!extent_info && cur_offset < drop_args.drop_end) {
2728 			/*
2729 			 * We are past the i_size here, but since we didn't
2730 			 * insert holes we need to clear the mapped area so we
2731 			 * know to not set disk_i_size in this area until a new
2732 			 * file extent is inserted here.
2733 			 */
2734 			ret = btrfs_inode_clear_file_extent_range(inode,
2735 					cur_offset,
2736 					drop_args.drop_end - cur_offset);
2737 			if (ret) {
2738 				/*
2739 				 * We couldn't clear our area, so we could
2740 				 * presumably adjust up and corrupt the fs, so
2741 				 * we need to abort.
2742 				 */
2743 				btrfs_abort_transaction(trans, ret);
2744 				break;
2745 			}
2746 		}
2747 
2748 		if (extent_info &&
2749 		    drop_args.drop_end > extent_info->file_offset) {
2750 			u64 replace_len = drop_args.drop_end -
2751 					  extent_info->file_offset;
2752 
2753 			ret = btrfs_insert_replace_extent(trans, inode,	path,
2754 					extent_info, replace_len,
2755 					drop_args.bytes_found);
2756 			if (ret) {
2757 				btrfs_abort_transaction(trans, ret);
2758 				break;
2759 			}
2760 			extent_info->data_len -= replace_len;
2761 			extent_info->data_offset += replace_len;
2762 			extent_info->file_offset += replace_len;
2763 		}
2764 
2765 		/*
2766 		 * We are releasing our handle on the transaction, balance the
2767 		 * dirty pages of the btree inode and flush delayed items, and
2768 		 * then get a new transaction handle, which may now point to a
2769 		 * new transaction in case someone else may have committed the
2770 		 * transaction we used to replace/drop file extent items. So
2771 		 * bump the inode's iversion and update mtime and ctime except
2772 		 * if we are called from a dedupe context. This is because a
2773 		 * power failure/crash may happen after the transaction is
2774 		 * committed and before we finish replacing/dropping all the
2775 		 * file extent items we need.
2776 		 */
2777 		inode_inc_iversion(&inode->vfs_inode);
2778 
2779 		if (!extent_info || extent_info->update_times) {
2780 			inode->vfs_inode.i_mtime = current_time(&inode->vfs_inode);
2781 			inode->vfs_inode.i_ctime = inode->vfs_inode.i_mtime;
2782 		}
2783 
2784 		ret = btrfs_update_inode(trans, root, inode);
2785 		if (ret)
2786 			break;
2787 
2788 		btrfs_end_transaction(trans);
2789 		btrfs_btree_balance_dirty(fs_info);
2790 
2791 		trans = btrfs_start_transaction(root, rsv_count);
2792 		if (IS_ERR(trans)) {
2793 			ret = PTR_ERR(trans);
2794 			trans = NULL;
2795 			break;
2796 		}
2797 
2798 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2799 					      rsv, min_size, false);
2800 		if (WARN_ON(ret))
2801 			break;
2802 		trans->block_rsv = rsv;
2803 
2804 		cur_offset = drop_args.drop_end;
2805 		len = end - cur_offset;
2806 		if (!extent_info && len) {
2807 			ret = find_first_non_hole(inode, &cur_offset, &len);
2808 			if (unlikely(ret < 0))
2809 				break;
2810 			if (ret && !len) {
2811 				ret = 0;
2812 				break;
2813 			}
2814 		}
2815 	}
2816 
2817 	/*
2818 	 * If we were cloning, force the next fsync to be a full one since we
2819 	 * we replaced (or just dropped in the case of cloning holes when
2820 	 * NO_HOLES is enabled) file extent items and did not setup new extent
2821 	 * maps for the replacement extents (or holes).
2822 	 */
2823 	if (extent_info && !extent_info->is_new_extent)
2824 		btrfs_set_inode_full_sync(inode);
2825 
2826 	if (ret)
2827 		goto out_trans;
2828 
2829 	trans->block_rsv = &fs_info->trans_block_rsv;
2830 	/*
2831 	 * If we are using the NO_HOLES feature we might have had already an
2832 	 * hole that overlaps a part of the region [lockstart, lockend] and
2833 	 * ends at (or beyond) lockend. Since we have no file extent items to
2834 	 * represent holes, drop_end can be less than lockend and so we must
2835 	 * make sure we have an extent map representing the existing hole (the
2836 	 * call to __btrfs_drop_extents() might have dropped the existing extent
2837 	 * map representing the existing hole), otherwise the fast fsync path
2838 	 * will not record the existence of the hole region
2839 	 * [existing_hole_start, lockend].
2840 	 */
2841 	if (drop_args.drop_end <= end)
2842 		drop_args.drop_end = end + 1;
2843 	/*
2844 	 * Don't insert file hole extent item if it's for a range beyond eof
2845 	 * (because it's useless) or if it represents a 0 bytes range (when
2846 	 * cur_offset == drop_end).
2847 	 */
2848 	if (!extent_info && cur_offset < ino_size &&
2849 	    cur_offset < drop_args.drop_end) {
2850 		ret = fill_holes(trans, inode, path, cur_offset,
2851 				 drop_args.drop_end);
2852 		if (ret) {
2853 			/* Same comment as above. */
2854 			btrfs_abort_transaction(trans, ret);
2855 			goto out_trans;
2856 		}
2857 	} else if (!extent_info && cur_offset < drop_args.drop_end) {
2858 		/* See the comment in the loop above for the reasoning here. */
2859 		ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2860 					drop_args.drop_end - cur_offset);
2861 		if (ret) {
2862 			btrfs_abort_transaction(trans, ret);
2863 			goto out_trans;
2864 		}
2865 
2866 	}
2867 	if (extent_info) {
2868 		ret = btrfs_insert_replace_extent(trans, inode, path,
2869 				extent_info, extent_info->data_len,
2870 				drop_args.bytes_found);
2871 		if (ret) {
2872 			btrfs_abort_transaction(trans, ret);
2873 			goto out_trans;
2874 		}
2875 	}
2876 
2877 out_trans:
2878 	if (!trans)
2879 		goto out_free;
2880 
2881 	trans->block_rsv = &fs_info->trans_block_rsv;
2882 	if (ret)
2883 		btrfs_end_transaction(trans);
2884 	else
2885 		*trans_out = trans;
2886 out_free:
2887 	btrfs_free_block_rsv(fs_info, rsv);
2888 out:
2889 	return ret;
2890 }
2891 
2892 static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2893 {
2894 	struct inode *inode = file_inode(file);
2895 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2896 	struct btrfs_root *root = BTRFS_I(inode)->root;
2897 	struct extent_state *cached_state = NULL;
2898 	struct btrfs_path *path;
2899 	struct btrfs_trans_handle *trans = NULL;
2900 	u64 lockstart;
2901 	u64 lockend;
2902 	u64 tail_start;
2903 	u64 tail_len;
2904 	u64 orig_start = offset;
2905 	int ret = 0;
2906 	bool same_block;
2907 	u64 ino_size;
2908 	bool truncated_block = false;
2909 	bool updated_inode = false;
2910 
2911 	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2912 
2913 	ret = btrfs_wait_ordered_range(inode, offset, len);
2914 	if (ret)
2915 		goto out_only_mutex;
2916 
2917 	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2918 	ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2919 	if (ret < 0)
2920 		goto out_only_mutex;
2921 	if (ret && !len) {
2922 		/* Already in a large hole */
2923 		ret = 0;
2924 		goto out_only_mutex;
2925 	}
2926 
2927 	ret = file_modified(file);
2928 	if (ret)
2929 		goto out_only_mutex;
2930 
2931 	lockstart = round_up(offset, fs_info->sectorsize);
2932 	lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2933 	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2934 		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2935 	/*
2936 	 * We needn't truncate any block which is beyond the end of the file
2937 	 * because we are sure there is no data there.
2938 	 */
2939 	/*
2940 	 * Only do this if we are in the same block and we aren't doing the
2941 	 * entire block.
2942 	 */
2943 	if (same_block && len < fs_info->sectorsize) {
2944 		if (offset < ino_size) {
2945 			truncated_block = true;
2946 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2947 						   0);
2948 		} else {
2949 			ret = 0;
2950 		}
2951 		goto out_only_mutex;
2952 	}
2953 
2954 	/* zero back part of the first block */
2955 	if (offset < ino_size) {
2956 		truncated_block = true;
2957 		ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2958 		if (ret) {
2959 			btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2960 			return ret;
2961 		}
2962 	}
2963 
2964 	/* Check the aligned pages after the first unaligned page,
2965 	 * if offset != orig_start, which means the first unaligned page
2966 	 * including several following pages are already in holes,
2967 	 * the extra check can be skipped */
2968 	if (offset == orig_start) {
2969 		/* after truncate page, check hole again */
2970 		len = offset + len - lockstart;
2971 		offset = lockstart;
2972 		ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2973 		if (ret < 0)
2974 			goto out_only_mutex;
2975 		if (ret && !len) {
2976 			ret = 0;
2977 			goto out_only_mutex;
2978 		}
2979 		lockstart = offset;
2980 	}
2981 
2982 	/* Check the tail unaligned part is in a hole */
2983 	tail_start = lockend + 1;
2984 	tail_len = offset + len - tail_start;
2985 	if (tail_len) {
2986 		ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2987 		if (unlikely(ret < 0))
2988 			goto out_only_mutex;
2989 		if (!ret) {
2990 			/* zero the front end of the last page */
2991 			if (tail_start + tail_len < ino_size) {
2992 				truncated_block = true;
2993 				ret = btrfs_truncate_block(BTRFS_I(inode),
2994 							tail_start + tail_len,
2995 							0, 1);
2996 				if (ret)
2997 					goto out_only_mutex;
2998 			}
2999 		}
3000 	}
3001 
3002 	if (lockend < lockstart) {
3003 		ret = 0;
3004 		goto out_only_mutex;
3005 	}
3006 
3007 	btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
3008 
3009 	path = btrfs_alloc_path();
3010 	if (!path) {
3011 		ret = -ENOMEM;
3012 		goto out;
3013 	}
3014 
3015 	ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
3016 					 lockend, NULL, &trans);
3017 	btrfs_free_path(path);
3018 	if (ret)
3019 		goto out;
3020 
3021 	ASSERT(trans != NULL);
3022 	inode_inc_iversion(inode);
3023 	inode->i_mtime = current_time(inode);
3024 	inode->i_ctime = inode->i_mtime;
3025 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3026 	updated_inode = true;
3027 	btrfs_end_transaction(trans);
3028 	btrfs_btree_balance_dirty(fs_info);
3029 out:
3030 	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3031 		      &cached_state);
3032 out_only_mutex:
3033 	if (!updated_inode && truncated_block && !ret) {
3034 		/*
3035 		 * If we only end up zeroing part of a page, we still need to
3036 		 * update the inode item, so that all the time fields are
3037 		 * updated as well as the necessary btrfs inode in memory fields
3038 		 * for detecting, at fsync time, if the inode isn't yet in the
3039 		 * log tree or it's there but not up to date.
3040 		 */
3041 		struct timespec64 now = current_time(inode);
3042 
3043 		inode_inc_iversion(inode);
3044 		inode->i_mtime = now;
3045 		inode->i_ctime = now;
3046 		trans = btrfs_start_transaction(root, 1);
3047 		if (IS_ERR(trans)) {
3048 			ret = PTR_ERR(trans);
3049 		} else {
3050 			int ret2;
3051 
3052 			ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3053 			ret2 = btrfs_end_transaction(trans);
3054 			if (!ret)
3055 				ret = ret2;
3056 		}
3057 	}
3058 	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3059 	return ret;
3060 }
3061 
3062 /* Helper structure to record which range is already reserved */
3063 struct falloc_range {
3064 	struct list_head list;
3065 	u64 start;
3066 	u64 len;
3067 };
3068 
3069 /*
3070  * Helper function to add falloc range
3071  *
3072  * Caller should have locked the larger range of extent containing
3073  * [start, len)
3074  */
3075 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
3076 {
3077 	struct falloc_range *range = NULL;
3078 
3079 	if (!list_empty(head)) {
3080 		/*
3081 		 * As fallocate iterates by bytenr order, we only need to check
3082 		 * the last range.
3083 		 */
3084 		range = list_last_entry(head, struct falloc_range, list);
3085 		if (range->start + range->len == start) {
3086 			range->len += len;
3087 			return 0;
3088 		}
3089 	}
3090 
3091 	range = kmalloc(sizeof(*range), GFP_KERNEL);
3092 	if (!range)
3093 		return -ENOMEM;
3094 	range->start = start;
3095 	range->len = len;
3096 	list_add_tail(&range->list, head);
3097 	return 0;
3098 }
3099 
3100 static int btrfs_fallocate_update_isize(struct inode *inode,
3101 					const u64 end,
3102 					const int mode)
3103 {
3104 	struct btrfs_trans_handle *trans;
3105 	struct btrfs_root *root = BTRFS_I(inode)->root;
3106 	int ret;
3107 	int ret2;
3108 
3109 	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
3110 		return 0;
3111 
3112 	trans = btrfs_start_transaction(root, 1);
3113 	if (IS_ERR(trans))
3114 		return PTR_ERR(trans);
3115 
3116 	inode->i_ctime = current_time(inode);
3117 	i_size_write(inode, end);
3118 	btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
3119 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3120 	ret2 = btrfs_end_transaction(trans);
3121 
3122 	return ret ? ret : ret2;
3123 }
3124 
3125 enum {
3126 	RANGE_BOUNDARY_WRITTEN_EXTENT,
3127 	RANGE_BOUNDARY_PREALLOC_EXTENT,
3128 	RANGE_BOUNDARY_HOLE,
3129 };
3130 
3131 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
3132 						 u64 offset)
3133 {
3134 	const u64 sectorsize = inode->root->fs_info->sectorsize;
3135 	struct extent_map *em;
3136 	int ret;
3137 
3138 	offset = round_down(offset, sectorsize);
3139 	em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
3140 	if (IS_ERR(em))
3141 		return PTR_ERR(em);
3142 
3143 	if (em->block_start == EXTENT_MAP_HOLE)
3144 		ret = RANGE_BOUNDARY_HOLE;
3145 	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3146 		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
3147 	else
3148 		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
3149 
3150 	free_extent_map(em);
3151 	return ret;
3152 }
3153 
3154 static int btrfs_zero_range(struct inode *inode,
3155 			    loff_t offset,
3156 			    loff_t len,
3157 			    const int mode)
3158 {
3159 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3160 	struct extent_map *em;
3161 	struct extent_changeset *data_reserved = NULL;
3162 	int ret;
3163 	u64 alloc_hint = 0;
3164 	const u64 sectorsize = fs_info->sectorsize;
3165 	u64 alloc_start = round_down(offset, sectorsize);
3166 	u64 alloc_end = round_up(offset + len, sectorsize);
3167 	u64 bytes_to_reserve = 0;
3168 	bool space_reserved = false;
3169 
3170 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3171 			      alloc_end - alloc_start);
3172 	if (IS_ERR(em)) {
3173 		ret = PTR_ERR(em);
3174 		goto out;
3175 	}
3176 
3177 	/*
3178 	 * Avoid hole punching and extent allocation for some cases. More cases
3179 	 * could be considered, but these are unlikely common and we keep things
3180 	 * as simple as possible for now. Also, intentionally, if the target
3181 	 * range contains one or more prealloc extents together with regular
3182 	 * extents and holes, we drop all the existing extents and allocate a
3183 	 * new prealloc extent, so that we get a larger contiguous disk extent.
3184 	 */
3185 	if (em->start <= alloc_start &&
3186 	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3187 		const u64 em_end = em->start + em->len;
3188 
3189 		if (em_end >= offset + len) {
3190 			/*
3191 			 * The whole range is already a prealloc extent,
3192 			 * do nothing except updating the inode's i_size if
3193 			 * needed.
3194 			 */
3195 			free_extent_map(em);
3196 			ret = btrfs_fallocate_update_isize(inode, offset + len,
3197 							   mode);
3198 			goto out;
3199 		}
3200 		/*
3201 		 * Part of the range is already a prealloc extent, so operate
3202 		 * only on the remaining part of the range.
3203 		 */
3204 		alloc_start = em_end;
3205 		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3206 		len = offset + len - alloc_start;
3207 		offset = alloc_start;
3208 		alloc_hint = em->block_start + em->len;
3209 	}
3210 	free_extent_map(em);
3211 
3212 	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3213 	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3214 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3215 				      sectorsize);
3216 		if (IS_ERR(em)) {
3217 			ret = PTR_ERR(em);
3218 			goto out;
3219 		}
3220 
3221 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3222 			free_extent_map(em);
3223 			ret = btrfs_fallocate_update_isize(inode, offset + len,
3224 							   mode);
3225 			goto out;
3226 		}
3227 		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3228 			free_extent_map(em);
3229 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
3230 						   0);
3231 			if (!ret)
3232 				ret = btrfs_fallocate_update_isize(inode,
3233 								   offset + len,
3234 								   mode);
3235 			return ret;
3236 		}
3237 		free_extent_map(em);
3238 		alloc_start = round_down(offset, sectorsize);
3239 		alloc_end = alloc_start + sectorsize;
3240 		goto reserve_space;
3241 	}
3242 
3243 	alloc_start = round_up(offset, sectorsize);
3244 	alloc_end = round_down(offset + len, sectorsize);
3245 
3246 	/*
3247 	 * For unaligned ranges, check the pages at the boundaries, they might
3248 	 * map to an extent, in which case we need to partially zero them, or
3249 	 * they might map to a hole, in which case we need our allocation range
3250 	 * to cover them.
3251 	 */
3252 	if (!IS_ALIGNED(offset, sectorsize)) {
3253 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3254 							    offset);
3255 		if (ret < 0)
3256 			goto out;
3257 		if (ret == RANGE_BOUNDARY_HOLE) {
3258 			alloc_start = round_down(offset, sectorsize);
3259 			ret = 0;
3260 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3261 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
3262 			if (ret)
3263 				goto out;
3264 		} else {
3265 			ret = 0;
3266 		}
3267 	}
3268 
3269 	if (!IS_ALIGNED(offset + len, sectorsize)) {
3270 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3271 							    offset + len);
3272 		if (ret < 0)
3273 			goto out;
3274 		if (ret == RANGE_BOUNDARY_HOLE) {
3275 			alloc_end = round_up(offset + len, sectorsize);
3276 			ret = 0;
3277 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3278 			ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
3279 						   0, 1);
3280 			if (ret)
3281 				goto out;
3282 		} else {
3283 			ret = 0;
3284 		}
3285 	}
3286 
3287 reserve_space:
3288 	if (alloc_start < alloc_end) {
3289 		struct extent_state *cached_state = NULL;
3290 		const u64 lockstart = alloc_start;
3291 		const u64 lockend = alloc_end - 1;
3292 
3293 		bytes_to_reserve = alloc_end - alloc_start;
3294 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3295 						      bytes_to_reserve);
3296 		if (ret < 0)
3297 			goto out;
3298 		space_reserved = true;
3299 		btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3300 					    &cached_state);
3301 		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3302 						alloc_start, bytes_to_reserve);
3303 		if (ret) {
3304 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
3305 				      lockend, &cached_state);
3306 			goto out;
3307 		}
3308 		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3309 						alloc_end - alloc_start,
3310 						i_blocksize(inode),
3311 						offset + len, &alloc_hint);
3312 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3313 			      &cached_state);
3314 		/* btrfs_prealloc_file_range releases reserved space on error */
3315 		if (ret) {
3316 			space_reserved = false;
3317 			goto out;
3318 		}
3319 	}
3320 	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3321  out:
3322 	if (ret && space_reserved)
3323 		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3324 					       alloc_start, bytes_to_reserve);
3325 	extent_changeset_free(data_reserved);
3326 
3327 	return ret;
3328 }
3329 
3330 static long btrfs_fallocate(struct file *file, int mode,
3331 			    loff_t offset, loff_t len)
3332 {
3333 	struct inode *inode = file_inode(file);
3334 	struct extent_state *cached_state = NULL;
3335 	struct extent_changeset *data_reserved = NULL;
3336 	struct falloc_range *range;
3337 	struct falloc_range *tmp;
3338 	struct list_head reserve_list;
3339 	u64 cur_offset;
3340 	u64 last_byte;
3341 	u64 alloc_start;
3342 	u64 alloc_end;
3343 	u64 alloc_hint = 0;
3344 	u64 locked_end;
3345 	u64 actual_end = 0;
3346 	u64 data_space_needed = 0;
3347 	u64 data_space_reserved = 0;
3348 	u64 qgroup_reserved = 0;
3349 	struct extent_map *em;
3350 	int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
3351 	int ret;
3352 
3353 	/* Do not allow fallocate in ZONED mode */
3354 	if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3355 		return -EOPNOTSUPP;
3356 
3357 	alloc_start = round_down(offset, blocksize);
3358 	alloc_end = round_up(offset + len, blocksize);
3359 	cur_offset = alloc_start;
3360 
3361 	/* Make sure we aren't being give some crap mode */
3362 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3363 		     FALLOC_FL_ZERO_RANGE))
3364 		return -EOPNOTSUPP;
3365 
3366 	if (mode & FALLOC_FL_PUNCH_HOLE)
3367 		return btrfs_punch_hole(file, offset, len);
3368 
3369 	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
3370 
3371 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3372 		ret = inode_newsize_ok(inode, offset + len);
3373 		if (ret)
3374 			goto out;
3375 	}
3376 
3377 	ret = file_modified(file);
3378 	if (ret)
3379 		goto out;
3380 
3381 	/*
3382 	 * TODO: Move these two operations after we have checked
3383 	 * accurate reserved space, or fallocate can still fail but
3384 	 * with page truncated or size expanded.
3385 	 *
3386 	 * But that's a minor problem and won't do much harm BTW.
3387 	 */
3388 	if (alloc_start > inode->i_size) {
3389 		ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3390 					alloc_start);
3391 		if (ret)
3392 			goto out;
3393 	} else if (offset + len > inode->i_size) {
3394 		/*
3395 		 * If we are fallocating from the end of the file onward we
3396 		 * need to zero out the end of the block if i_size lands in the
3397 		 * middle of a block.
3398 		 */
3399 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3400 		if (ret)
3401 			goto out;
3402 	}
3403 
3404 	/*
3405 	 * We have locked the inode at the VFS level (in exclusive mode) and we
3406 	 * have locked the i_mmap_lock lock (in exclusive mode). Now before
3407 	 * locking the file range, flush all dealloc in the range and wait for
3408 	 * all ordered extents in the range to complete. After this we can lock
3409 	 * the file range and, due to the previous locking we did, we know there
3410 	 * can't be more delalloc or ordered extents in the range.
3411 	 */
3412 	ret = btrfs_wait_ordered_range(inode, alloc_start,
3413 				       alloc_end - alloc_start);
3414 	if (ret)
3415 		goto out;
3416 
3417 	if (mode & FALLOC_FL_ZERO_RANGE) {
3418 		ret = btrfs_zero_range(inode, offset, len, mode);
3419 		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3420 		return ret;
3421 	}
3422 
3423 	locked_end = alloc_end - 1;
3424 	lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3425 		    &cached_state);
3426 
3427 	btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3428 
3429 	/* First, check if we exceed the qgroup limit */
3430 	INIT_LIST_HEAD(&reserve_list);
3431 	while (cur_offset < alloc_end) {
3432 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3433 				      alloc_end - cur_offset);
3434 		if (IS_ERR(em)) {
3435 			ret = PTR_ERR(em);
3436 			break;
3437 		}
3438 		last_byte = min(extent_map_end(em), alloc_end);
3439 		actual_end = min_t(u64, extent_map_end(em), offset + len);
3440 		last_byte = ALIGN(last_byte, blocksize);
3441 		if (em->block_start == EXTENT_MAP_HOLE ||
3442 		    (cur_offset >= inode->i_size &&
3443 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3444 			const u64 range_len = last_byte - cur_offset;
3445 
3446 			ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3447 			if (ret < 0) {
3448 				free_extent_map(em);
3449 				break;
3450 			}
3451 			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3452 					&data_reserved, cur_offset, range_len);
3453 			if (ret < 0) {
3454 				free_extent_map(em);
3455 				break;
3456 			}
3457 			qgroup_reserved += range_len;
3458 			data_space_needed += range_len;
3459 		}
3460 		free_extent_map(em);
3461 		cur_offset = last_byte;
3462 	}
3463 
3464 	if (!ret && data_space_needed > 0) {
3465 		/*
3466 		 * We are safe to reserve space here as we can't have delalloc
3467 		 * in the range, see above.
3468 		 */
3469 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3470 						      data_space_needed);
3471 		if (!ret)
3472 			data_space_reserved = data_space_needed;
3473 	}
3474 
3475 	/*
3476 	 * If ret is still 0, means we're OK to fallocate.
3477 	 * Or just cleanup the list and exit.
3478 	 */
3479 	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3480 		if (!ret) {
3481 			ret = btrfs_prealloc_file_range(inode, mode,
3482 					range->start,
3483 					range->len, i_blocksize(inode),
3484 					offset + len, &alloc_hint);
3485 			/*
3486 			 * btrfs_prealloc_file_range() releases space even
3487 			 * if it returns an error.
3488 			 */
3489 			data_space_reserved -= range->len;
3490 			qgroup_reserved -= range->len;
3491 		} else if (data_space_reserved > 0) {
3492 			btrfs_free_reserved_data_space(BTRFS_I(inode),
3493 					       data_reserved, range->start,
3494 					       range->len);
3495 			data_space_reserved -= range->len;
3496 			qgroup_reserved -= range->len;
3497 		} else if (qgroup_reserved > 0) {
3498 			btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3499 					       range->start, range->len);
3500 			qgroup_reserved -= range->len;
3501 		}
3502 		list_del(&range->list);
3503 		kfree(range);
3504 	}
3505 	if (ret < 0)
3506 		goto out_unlock;
3507 
3508 	/*
3509 	 * We didn't need to allocate any more space, but we still extended the
3510 	 * size of the file so we need to update i_size and the inode item.
3511 	 */
3512 	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3513 out_unlock:
3514 	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3515 		      &cached_state);
3516 out:
3517 	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3518 	extent_changeset_free(data_reserved);
3519 	return ret;
3520 }
3521 
3522 /*
3523  * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3524  * that has unflushed and/or flushing delalloc. There might be other adjacent
3525  * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3526  * looping while it gets adjacent subranges, and merging them together.
3527  */
3528 static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3529 				   u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3530 {
3531 	const u64 len = end + 1 - start;
3532 	struct extent_map_tree *em_tree = &inode->extent_tree;
3533 	struct extent_map *em;
3534 	u64 em_end;
3535 	u64 delalloc_len;
3536 
3537 	/*
3538 	 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3539 	 * means we have delalloc (dirty pages) for which writeback has not
3540 	 * started yet.
3541 	 */
3542 	*delalloc_start_ret = start;
3543 	delalloc_len = count_range_bits(&inode->io_tree, delalloc_start_ret, end,
3544 					len, EXTENT_DELALLOC, 1);
3545 	/*
3546 	 * If delalloc was found then *delalloc_start_ret has a sector size
3547 	 * aligned value (rounded down).
3548 	 */
3549 	if (delalloc_len > 0)
3550 		*delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3551 
3552 	/*
3553 	 * Now also check if there's any extent map in the range that does not
3554 	 * map to a hole or prealloc extent. We do this because:
3555 	 *
3556 	 * 1) When delalloc is flushed, the file range is locked, we clear the
3557 	 *    EXTENT_DELALLOC bit from the io tree and create an extent map for
3558 	 *    an allocated extent. So we might just have been called after
3559 	 *    delalloc is flushed and before the ordered extent completes and
3560 	 *    inserts the new file extent item in the subvolume's btree;
3561 	 *
3562 	 * 2) We may have an extent map created by flushing delalloc for a
3563 	 *    subrange that starts before the subrange we found marked with
3564 	 *    EXTENT_DELALLOC in the io tree.
3565 	 */
3566 	read_lock(&em_tree->lock);
3567 	em = lookup_extent_mapping(em_tree, start, len);
3568 	read_unlock(&em_tree->lock);
3569 
3570 	/* extent_map_end() returns a non-inclusive end offset. */
3571 	em_end = em ? extent_map_end(em) : 0;
3572 
3573 	/*
3574 	 * If we have a hole/prealloc extent map, check the next one if this one
3575 	 * ends before our range's end.
3576 	 */
3577 	if (em && (em->block_start == EXTENT_MAP_HOLE ||
3578 		   test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) && em_end < end) {
3579 		struct extent_map *next_em;
3580 
3581 		read_lock(&em_tree->lock);
3582 		next_em = lookup_extent_mapping(em_tree, em_end, len - em_end);
3583 		read_unlock(&em_tree->lock);
3584 
3585 		free_extent_map(em);
3586 		em_end = next_em ? extent_map_end(next_em) : 0;
3587 		em = next_em;
3588 	}
3589 
3590 	if (em && (em->block_start == EXTENT_MAP_HOLE ||
3591 		   test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3592 		free_extent_map(em);
3593 		em = NULL;
3594 	}
3595 
3596 	/*
3597 	 * No extent map or one for a hole or prealloc extent. Use the delalloc
3598 	 * range we found in the io tree if we have one.
3599 	 */
3600 	if (!em)
3601 		return (delalloc_len > 0);
3602 
3603 	/*
3604 	 * We don't have any range as EXTENT_DELALLOC in the io tree, so the
3605 	 * extent map is the only subrange representing delalloc.
3606 	 */
3607 	if (delalloc_len == 0) {
3608 		*delalloc_start_ret = em->start;
3609 		*delalloc_end_ret = min(end, em_end - 1);
3610 		free_extent_map(em);
3611 		return true;
3612 	}
3613 
3614 	/*
3615 	 * The extent map represents a delalloc range that starts before the
3616 	 * delalloc range we found in the io tree.
3617 	 */
3618 	if (em->start < *delalloc_start_ret) {
3619 		*delalloc_start_ret = em->start;
3620 		/*
3621 		 * If the ranges are adjacent, return a combined range.
3622 		 * Otherwise return the extent map's range.
3623 		 */
3624 		if (em_end < *delalloc_start_ret)
3625 			*delalloc_end_ret = min(end, em_end - 1);
3626 
3627 		free_extent_map(em);
3628 		return true;
3629 	}
3630 
3631 	/*
3632 	 * The extent map starts after the delalloc range we found in the io
3633 	 * tree. If it's adjacent, return a combined range, otherwise return
3634 	 * the range found in the io tree.
3635 	 */
3636 	if (*delalloc_end_ret + 1 == em->start)
3637 		*delalloc_end_ret = min(end, em_end - 1);
3638 
3639 	free_extent_map(em);
3640 	return true;
3641 }
3642 
3643 /*
3644  * Check if there's delalloc in a given range.
3645  *
3646  * @inode:               The inode.
3647  * @start:               The start offset of the range. It does not need to be
3648  *                       sector size aligned.
3649  * @end:                 The end offset (inclusive value) of the search range.
3650  *                       It does not need to be sector size aligned.
3651  * @delalloc_start_ret:  Output argument, set to the start offset of the
3652  *                       subrange found with delalloc (may not be sector size
3653  *                       aligned).
3654  * @delalloc_end_ret:    Output argument, set to he end offset (inclusive value)
3655  *                       of the subrange found with delalloc.
3656  *
3657  * Returns true if a subrange with delalloc is found within the given range, and
3658  * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3659  * end offsets of the subrange.
3660  */
3661 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3662 				  u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3663 {
3664 	u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3665 	u64 prev_delalloc_end = 0;
3666 	bool ret = false;
3667 
3668 	while (cur_offset < end) {
3669 		u64 delalloc_start;
3670 		u64 delalloc_end;
3671 		bool delalloc;
3672 
3673 		delalloc = find_delalloc_subrange(inode, cur_offset, end,
3674 						  &delalloc_start,
3675 						  &delalloc_end);
3676 		if (!delalloc)
3677 			break;
3678 
3679 		if (prev_delalloc_end == 0) {
3680 			/* First subrange found. */
3681 			*delalloc_start_ret = max(delalloc_start, start);
3682 			*delalloc_end_ret = delalloc_end;
3683 			ret = true;
3684 		} else if (delalloc_start == prev_delalloc_end + 1) {
3685 			/* Subrange adjacent to the previous one, merge them. */
3686 			*delalloc_end_ret = delalloc_end;
3687 		} else {
3688 			/* Subrange not adjacent to the previous one, exit. */
3689 			break;
3690 		}
3691 
3692 		prev_delalloc_end = delalloc_end;
3693 		cur_offset = delalloc_end + 1;
3694 		cond_resched();
3695 	}
3696 
3697 	return ret;
3698 }
3699 
3700 /*
3701  * Check if there's a hole or delalloc range in a range representing a hole (or
3702  * prealloc extent) found in the inode's subvolume btree.
3703  *
3704  * @inode:      The inode.
3705  * @whence:     Seek mode (SEEK_DATA or SEEK_HOLE).
3706  * @start:      Start offset of the hole region. It does not need to be sector
3707  *              size aligned.
3708  * @end:        End offset (inclusive value) of the hole region. It does not
3709  *              need to be sector size aligned.
3710  * @start_ret:  Return parameter, used to set the start of the subrange in the
3711  *              hole that matches the search criteria (seek mode), if such
3712  *              subrange is found (return value of the function is true).
3713  *              The value returned here may not be sector size aligned.
3714  *
3715  * Returns true if a subrange matching the given seek mode is found, and if one
3716  * is found, it updates @start_ret with the start of the subrange.
3717  */
3718 static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3719 					u64 start, u64 end, u64 *start_ret)
3720 {
3721 	u64 delalloc_start;
3722 	u64 delalloc_end;
3723 	bool delalloc;
3724 
3725 	delalloc = btrfs_find_delalloc_in_range(inode, start, end,
3726 						&delalloc_start, &delalloc_end);
3727 	if (delalloc && whence == SEEK_DATA) {
3728 		*start_ret = delalloc_start;
3729 		return true;
3730 	}
3731 
3732 	if (delalloc && whence == SEEK_HOLE) {
3733 		/*
3734 		 * We found delalloc but it starts after out start offset. So we
3735 		 * have a hole between our start offset and the delalloc start.
3736 		 */
3737 		if (start < delalloc_start) {
3738 			*start_ret = start;
3739 			return true;
3740 		}
3741 		/*
3742 		 * Delalloc range starts at our start offset.
3743 		 * If the delalloc range's length is smaller than our range,
3744 		 * then it means we have a hole that starts where the delalloc
3745 		 * subrange ends.
3746 		 */
3747 		if (delalloc_end < end) {
3748 			*start_ret = delalloc_end + 1;
3749 			return true;
3750 		}
3751 
3752 		/* There's delalloc for the whole range. */
3753 		return false;
3754 	}
3755 
3756 	if (!delalloc && whence == SEEK_HOLE) {
3757 		*start_ret = start;
3758 		return true;
3759 	}
3760 
3761 	/*
3762 	 * No delalloc in the range and we are seeking for data. The caller has
3763 	 * to iterate to the next extent item in the subvolume btree.
3764 	 */
3765 	return false;
3766 }
3767 
3768 static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
3769 				  int whence)
3770 {
3771 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3772 	struct extent_state *cached_state = NULL;
3773 	const loff_t i_size = i_size_read(&inode->vfs_inode);
3774 	const u64 ino = btrfs_ino(inode);
3775 	struct btrfs_root *root = inode->root;
3776 	struct btrfs_path *path;
3777 	struct btrfs_key key;
3778 	u64 last_extent_end;
3779 	u64 lockstart;
3780 	u64 lockend;
3781 	u64 start;
3782 	int ret;
3783 	bool found = false;
3784 
3785 	if (i_size == 0 || offset >= i_size)
3786 		return -ENXIO;
3787 
3788 	/*
3789 	 * Quick path. If the inode has no prealloc extents and its number of
3790 	 * bytes used matches its i_size, then it can not have holes.
3791 	 */
3792 	if (whence == SEEK_HOLE &&
3793 	    !(inode->flags & BTRFS_INODE_PREALLOC) &&
3794 	    inode_get_bytes(&inode->vfs_inode) == i_size)
3795 		return i_size;
3796 
3797 	/*
3798 	 * offset can be negative, in this case we start finding DATA/HOLE from
3799 	 * the very start of the file.
3800 	 */
3801 	start = max_t(loff_t, 0, offset);
3802 
3803 	lockstart = round_down(start, fs_info->sectorsize);
3804 	lockend = round_up(i_size, fs_info->sectorsize);
3805 	if (lockend <= lockstart)
3806 		lockend = lockstart + fs_info->sectorsize;
3807 	lockend--;
3808 
3809 	path = btrfs_alloc_path();
3810 	if (!path)
3811 		return -ENOMEM;
3812 	path->reada = READA_FORWARD;
3813 
3814 	key.objectid = ino;
3815 	key.type = BTRFS_EXTENT_DATA_KEY;
3816 	key.offset = start;
3817 
3818 	last_extent_end = lockstart;
3819 
3820 	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3821 
3822 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3823 	if (ret < 0) {
3824 		goto out;
3825 	} else if (ret > 0 && path->slots[0] > 0) {
3826 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3827 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3828 			path->slots[0]--;
3829 	}
3830 
3831 	while (start < i_size) {
3832 		struct extent_buffer *leaf = path->nodes[0];
3833 		struct btrfs_file_extent_item *extent;
3834 		u64 extent_end;
3835 
3836 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3837 			ret = btrfs_next_leaf(root, path);
3838 			if (ret < 0)
3839 				goto out;
3840 			else if (ret > 0)
3841 				break;
3842 
3843 			leaf = path->nodes[0];
3844 		}
3845 
3846 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3847 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3848 			break;
3849 
3850 		extent_end = btrfs_file_extent_end(path);
3851 
3852 		/*
3853 		 * In the first iteration we may have a slot that points to an
3854 		 * extent that ends before our start offset, so skip it.
3855 		 */
3856 		if (extent_end <= start) {
3857 			path->slots[0]++;
3858 			continue;
3859 		}
3860 
3861 		/* We have an implicit hole, NO_HOLES feature is likely set. */
3862 		if (last_extent_end < key.offset) {
3863 			u64 search_start = last_extent_end;
3864 			u64 found_start;
3865 
3866 			/*
3867 			 * First iteration, @start matches @offset and it's
3868 			 * within the hole.
3869 			 */
3870 			if (start == offset)
3871 				search_start = offset;
3872 
3873 			found = find_desired_extent_in_hole(inode, whence,
3874 							    search_start,
3875 							    key.offset - 1,
3876 							    &found_start);
3877 			if (found) {
3878 				start = found_start;
3879 				break;
3880 			}
3881 			/*
3882 			 * Didn't find data or a hole (due to delalloc) in the
3883 			 * implicit hole range, so need to analyze the extent.
3884 			 */
3885 		}
3886 
3887 		extent = btrfs_item_ptr(leaf, path->slots[0],
3888 					struct btrfs_file_extent_item);
3889 
3890 		if (btrfs_file_extent_disk_bytenr(leaf, extent) == 0 ||
3891 		    btrfs_file_extent_type(leaf, extent) ==
3892 		    BTRFS_FILE_EXTENT_PREALLOC) {
3893 			/*
3894 			 * Explicit hole or prealloc extent, search for delalloc.
3895 			 * A prealloc extent is treated like a hole.
3896 			 */
3897 			u64 search_start = key.offset;
3898 			u64 found_start;
3899 
3900 			/*
3901 			 * First iteration, @start matches @offset and it's
3902 			 * within the hole.
3903 			 */
3904 			if (start == offset)
3905 				search_start = offset;
3906 
3907 			found = find_desired_extent_in_hole(inode, whence,
3908 							    search_start,
3909 							    extent_end - 1,
3910 							    &found_start);
3911 			if (found) {
3912 				start = found_start;
3913 				break;
3914 			}
3915 			/*
3916 			 * Didn't find data or a hole (due to delalloc) in the
3917 			 * implicit hole range, so need to analyze the next
3918 			 * extent item.
3919 			 */
3920 		} else {
3921 			/*
3922 			 * Found a regular or inline extent.
3923 			 * If we are seeking for data, adjust the start offset
3924 			 * and stop, we're done.
3925 			 */
3926 			if (whence == SEEK_DATA) {
3927 				start = max_t(u64, key.offset, offset);
3928 				found = true;
3929 				break;
3930 			}
3931 			/*
3932 			 * Else, we are seeking for a hole, check the next file
3933 			 * extent item.
3934 			 */
3935 		}
3936 
3937 		start = extent_end;
3938 		last_extent_end = extent_end;
3939 		path->slots[0]++;
3940 		if (fatal_signal_pending(current)) {
3941 			ret = -EINTR;
3942 			goto out;
3943 		}
3944 		cond_resched();
3945 	}
3946 
3947 	/* We have an implicit hole from the last extent found up to i_size. */
3948 	if (!found && start < i_size) {
3949 		found = find_desired_extent_in_hole(inode, whence, start,
3950 						    i_size - 1, &start);
3951 		if (!found)
3952 			start = i_size;
3953 	}
3954 
3955 out:
3956 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3957 	btrfs_free_path(path);
3958 
3959 	if (ret < 0)
3960 		return ret;
3961 
3962 	if (whence == SEEK_DATA && start >= i_size)
3963 		return -ENXIO;
3964 
3965 	return min_t(loff_t, start, i_size);
3966 }
3967 
3968 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3969 {
3970 	struct inode *inode = file->f_mapping->host;
3971 
3972 	switch (whence) {
3973 	default:
3974 		return generic_file_llseek(file, offset, whence);
3975 	case SEEK_DATA:
3976 	case SEEK_HOLE:
3977 		btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3978 		offset = find_desired_extent(BTRFS_I(inode), offset, whence);
3979 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3980 		break;
3981 	}
3982 
3983 	if (offset < 0)
3984 		return offset;
3985 
3986 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3987 }
3988 
3989 static int btrfs_file_open(struct inode *inode, struct file *filp)
3990 {
3991 	int ret;
3992 
3993 	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC;
3994 
3995 	ret = fsverity_file_open(inode, filp);
3996 	if (ret)
3997 		return ret;
3998 	return generic_file_open(inode, filp);
3999 }
4000 
4001 static int check_direct_read(struct btrfs_fs_info *fs_info,
4002 			     const struct iov_iter *iter, loff_t offset)
4003 {
4004 	int ret;
4005 	int i, seg;
4006 
4007 	ret = check_direct_IO(fs_info, iter, offset);
4008 	if (ret < 0)
4009 		return ret;
4010 
4011 	if (!iter_is_iovec(iter))
4012 		return 0;
4013 
4014 	for (seg = 0; seg < iter->nr_segs; seg++)
4015 		for (i = seg + 1; i < iter->nr_segs; i++)
4016 			if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
4017 				return -EINVAL;
4018 	return 0;
4019 }
4020 
4021 static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
4022 {
4023 	struct inode *inode = file_inode(iocb->ki_filp);
4024 	size_t prev_left = 0;
4025 	ssize_t read = 0;
4026 	ssize_t ret;
4027 
4028 	if (fsverity_active(inode))
4029 		return 0;
4030 
4031 	if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
4032 		return 0;
4033 
4034 	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
4035 again:
4036 	/*
4037 	 * This is similar to what we do for direct IO writes, see the comment
4038 	 * at btrfs_direct_write(), but we also disable page faults in addition
4039 	 * to disabling them only at the iov_iter level. This is because when
4040 	 * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
4041 	 * which can still trigger page fault ins despite having set ->nofault
4042 	 * to true of our 'to' iov_iter.
4043 	 *
4044 	 * The difference to direct IO writes is that we deadlock when trying
4045 	 * to lock the extent range in the inode's tree during he page reads
4046 	 * triggered by the fault in (while for writes it is due to waiting for
4047 	 * our own ordered extent). This is because for direct IO reads,
4048 	 * btrfs_dio_iomap_begin() returns with the extent range locked, which
4049 	 * is only unlocked in the endio callback (end_bio_extent_readpage()).
4050 	 */
4051 	pagefault_disable();
4052 	to->nofault = true;
4053 	ret = btrfs_dio_read(iocb, to, read);
4054 	to->nofault = false;
4055 	pagefault_enable();
4056 
4057 	/* No increment (+=) because iomap returns a cumulative value. */
4058 	if (ret > 0)
4059 		read = ret;
4060 
4061 	if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
4062 		const size_t left = iov_iter_count(to);
4063 
4064 		if (left == prev_left) {
4065 			/*
4066 			 * We didn't make any progress since the last attempt,
4067 			 * fallback to a buffered read for the remainder of the
4068 			 * range. This is just to avoid any possibility of looping
4069 			 * for too long.
4070 			 */
4071 			ret = read;
4072 		} else {
4073 			/*
4074 			 * We made some progress since the last retry or this is
4075 			 * the first time we are retrying. Fault in as many pages
4076 			 * as possible and retry.
4077 			 */
4078 			fault_in_iov_iter_writeable(to, left);
4079 			prev_left = left;
4080 			goto again;
4081 		}
4082 	}
4083 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
4084 	return ret < 0 ? ret : read;
4085 }
4086 
4087 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4088 {
4089 	ssize_t ret = 0;
4090 
4091 	if (iocb->ki_flags & IOCB_DIRECT) {
4092 		ret = btrfs_direct_read(iocb, to);
4093 		if (ret < 0 || !iov_iter_count(to) ||
4094 		    iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
4095 			return ret;
4096 	}
4097 
4098 	return filemap_read(iocb, to, ret);
4099 }
4100 
4101 const struct file_operations btrfs_file_operations = {
4102 	.llseek		= btrfs_file_llseek,
4103 	.read_iter      = btrfs_file_read_iter,
4104 	.splice_read	= generic_file_splice_read,
4105 	.write_iter	= btrfs_file_write_iter,
4106 	.splice_write	= iter_file_splice_write,
4107 	.mmap		= btrfs_file_mmap,
4108 	.open		= btrfs_file_open,
4109 	.release	= btrfs_release_file,
4110 	.get_unmapped_area = thp_get_unmapped_area,
4111 	.fsync		= btrfs_sync_file,
4112 	.fallocate	= btrfs_fallocate,
4113 	.unlocked_ioctl	= btrfs_ioctl,
4114 #ifdef CONFIG_COMPAT
4115 	.compat_ioctl	= btrfs_compat_ioctl,
4116 #endif
4117 	.remap_file_range = btrfs_remap_file_range,
4118 };
4119 
4120 void __cold btrfs_auto_defrag_exit(void)
4121 {
4122 	kmem_cache_destroy(btrfs_inode_defrag_cachep);
4123 }
4124 
4125 int __init btrfs_auto_defrag_init(void)
4126 {
4127 	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
4128 					sizeof(struct inode_defrag), 0,
4129 					SLAB_MEM_SPREAD,
4130 					NULL);
4131 	if (!btrfs_inode_defrag_cachep)
4132 		return -ENOMEM;
4133 
4134 	return 0;
4135 }
4136 
4137 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
4138 {
4139 	int ret;
4140 
4141 	/*
4142 	 * So with compression we will find and lock a dirty page and clear the
4143 	 * first one as dirty, setup an async extent, and immediately return
4144 	 * with the entire range locked but with nobody actually marked with
4145 	 * writeback.  So we can't just filemap_write_and_wait_range() and
4146 	 * expect it to work since it will just kick off a thread to do the
4147 	 * actual work.  So we need to call filemap_fdatawrite_range _again_
4148 	 * since it will wait on the page lock, which won't be unlocked until
4149 	 * after the pages have been marked as writeback and so we're good to go
4150 	 * from there.  We have to do this otherwise we'll miss the ordered
4151 	 * extents and that results in badness.  Please Josef, do not think you
4152 	 * know better and pull this out at some point in the future, it is
4153 	 * right and you are wrong.
4154 	 */
4155 	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
4156 	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
4157 			     &BTRFS_I(inode)->runtime_flags))
4158 		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
4159 
4160 	return ret;
4161 }
4162